-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathname_classifier.py
More file actions
153 lines (137 loc) · 4.19 KB
/
name_classifier.py
File metadata and controls
153 lines (137 loc) · 4.19 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from nltk import tokenize
import nltk
#from nltk.tokenize import RegexpTokenizer
from nltk.util import ngrams
#from rdflib.graph import Graph
#import numpy as np
#import csv
import os.path
#import json
import sys
def MSG(s):
sys.stderr.write(s + '\n')
def wait(s=""):
print "%s \n" % s
raw_input("Press Enter to continue...")
def word_ngrams(words, min=2, max=4):
s = []
for n in range(min, max):
for ngram in ngrams(words, n):
s.append(' '.join(str(i) for i in ngram))
return s
def saveResutls(fileName, dict, th=0):
with open(fileName, 'w') as outFile:
for key, val in dict:
if val >= th:
outFile.write('{} {}\n'.format(key ,val) )
MSG("Results are saved in a file")
MSG(fileName)
def printResutls(dict, th=0):
for key, val in dict:
if val >= th:
print '+ {}'.format(key)
else:
print '- {}'.format(key)
def saveDict(fileName, dict, th):
with open(fileName, 'w') as outFile:
for key, val in dict:
if val >= th:
outFile.write('{} {}\n'.format(key ,val) )
MSG("File is saved.")
MSG(fileName)
def loadDict(fileName, dict):
with open(fileName, 'r') as inFile:
text = inFile.read().decode('utf-8')
lines = text.strip().split('\n')
for line in lines:
tokens = line.split(' ')
dict.update({' '.join(tokens[:len(tokens)-1]):tokens[len(tokens)-1]})
def buildNgram(fileName, dict, n1 = 3, n2 = 7):
text = open(fileName).read().decode('utf-8')
names = text.strip().split('\n')
for name in names:
tokens = nltk.word_tokenize(name)
for token in tokens:
generated_ngrams = word_ngrams(token, n1,n2)
for ngram in generated_ngrams:
ng = ''.join(ngram)
if not dict.has_key(ng):
dict.update({ng:1})
else:
occ=dict[ng]
dict.update({ng:occ+1})
print len(dict)
# to model multi word names
# replace space with _
def buildNgram2(fileName, dict, n1=3, n2=7):
text = open(fileName).read().decode('utf-8')
names = text.strip().split('\n')
for name in names:
name.replace(' ','_')
generated_ngrams = word_ngrams(name, n1,n2)
for ngram in generated_ngrams:
ng = ''.join(ngram)
if not dict.has_key(ng):
dict.update({ng:1})
else:
occ=dict[ng]
dict.update({ng:occ+1})
print len(dict)
#trainFileName="data/persondata_en.nt.names.train.txt"
trainFileName="data/persondata_en.nt.names"
negativeFileName="data/corncob_lowercase.txt"
ngrams_stat = {}
ngrams_stat_sorted = {}
neg_ngrams_stat = {}
tokenizer = tokenize.RegexpTokenizer("[a-zA-Z'`éèî]+")
if len(sys.argv) > 1:
testFileName=sys.argv[1]
if not os.path.isfile(testFileName):
MSG(" ## Test file does not exist." )
MSG(testFileName)
quit()
# read data
if os.path.isfile('data/train_ngram2.csv'):
MSG(" ## Reading name model ...")
loadDict('data/train_ngram2.csv', ngrams_stat)
else:
pMSG(" ## Building name model ...")
buildNgram2(trainFileName, ngrams_stat, 2, 7)
ngrams_stat_sorted = sorted(ngrams_stat.iteritems(), key=lambda x:x[1], reverse=True)
saveDict('data/train_ngram2.csv', ngrams_stat_sorted, th=10)
if os.path.isfile('data/negative_ngram2.csv'):
MSG(" ## Loading negative ngram model ...")
loadDict('data/negative_ngram2.csv', neg_ngrams_stat)
else:
MSG(" ## Building negative ngram model ...")
buildNgram2(negativeFileName, neg_ngrams_stat, 3, 7)
neg_ngrams_stat_sorted = sorted(neg_ngrams_stat.iteritems(), key=lambda x:x[1], reverse=True)
saveDict('data/negative_ngram2.csv', neg_ngrams_stat_sorted, th=10)
# test
MSG(" ## Reading test file ...")
text = open(testFileName).read().decode('utf-8')
names = text.strip().split('\n')
MSG(" ## Recognizing test names ...")
recognized_names = {}
for name in names:
tokens = name.split(' ')
tot_found=0
tot_ngram=0
for token in tokens:
generated_ngrams = word_ngrams(token, 3,7)
for ngram in generated_ngrams:
ng = ''.join(ngram)
if ng in ngrams_stat:
tot_found += len(ng)
if ng in neg_ngrams_stat:
tot_found -= len(ng)
tot_ngram+=len(ng)
if(tot_ngram > 0):
recognized_names[name] = int(100*tot_found/tot_ngram)
else:
recognized_names[name] = 0
reco_name_sorted = sorted(recognized_names.iteritems(), key=lambda x:x[1], reverse=True)
#saveResutls(testFileName+'.reco2.txt',reco_name_sorted)
printResutls(reco_name_sorted, 10)