An error occurred while loading the file. Please try again.
-
Fize Jacques authored
Debug, STR modif for faster generation, debug disambiguators, update pipeline,debug document selection
755998a6
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
# coding = utf-8
import argparse,glob,logging,string,time
from concurrent.futures import ThreadPoolExecutor
from langdetect import detect
from progressbar import ProgressBar, Timer, Bar, ETA, Counter
from strpython.nlp.disambiguator.share_prop import *
from strpython.pipeline import *
import networkx as nx
logging.basicConfig(format='%(asctime)s %(message)s')
def filter_nonprintable(text):
# Get the difference of all ASCII characters from the set of printable characters
nonprintable = set([chr(i) for i in range(128)]).difference(string.printable)
# Use translate to remove all non-printable characters
return text.translate({ord(character):None for character in nonprintable})
parser = argparse.ArgumentParser()
parser.add_argument("texts_input_dir")
parser.add_argument("graphs_output_dir")
parser.add_argument("metadata_output_fn")
subparsers = parser.add_subparsers(help='commands')
normal = subparsers.add_parser(
'normal', help='Basic STR generation. No argument are necessary !')
normal.set_defaults(which="norm")
gen_parser = subparsers.add_parser(
'generalisation', help='Apply a generalisation transformation on the generated STRs')
gen_parser.set_defaults(which="gene")
gen_parser.add_argument(
'-t','--type_gen', help='Type of generalisation',default="all")
gen_parser.add_argument(
'-n', help='Language',default=1)
gen_parser.add_argument(
'-b','--bound', help='If Generalisation is bounded, this arg. correspond'
'to the maximal ',default="country")
ext_parser = subparsers.add_parser(
'extension', help='Apply a extension process on the STRs')
ext_parser.set_defaults(which="ext")
ext_parser.add_argument(
'-d','--distance', help='radius distance',default=150)
ext_parser.add_argument(
'-u','--unit', help='unit used for the radius distance',default="km")
ext_parser.add_argument(
'-a','--adjacent_count', help='number of adjacent SE add to the STR',default=1)
args = parser.parse_args()
if "which" in args:
if args.which =="gene":
args.type_trans="gen"
elif args.which =="ext":
args.type_trans="ext"
print("Parameters entered : ",args)
start = time.time()
class_=StanfordNER
# Initialise Graphs Transformers
pipeline= {
"en":Pipeline(lang="english",tagger=Tagger(),ner=class_(lang="en")),
"fr":Pipeline(lang="french",tagger=Tagger(),ner=class_(lang="fr")),
"es":Pipeline(lang="espagnol",tagger=Tagger(),ner=class_(lang="es"))
}
# Read Input Files
import re
texts_=[]
if os.path.exists(args.texts_input_dir):
files_glob= glob.glob(args.texts_input_dir+"/*.txt")
files_=["" ]* len(files_glob)
for fn in files_glob:
id = int(re.findall("\d+", fn)[-1])
files_[id]=fn
if not files_:
print("No .txt files found in {0}".format(args.texts_input_dir))
exit()
for fn in files_:
try:
tex=open(fn).read()
#lang = detect(tex) #for bug encoding
texts_.append(tex)
except:
print("{0} could'nt be read ! Add Lorem Ipsum instead".format(fn))
texts_.append("Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.")
# If output Dir doesn't exists
if not os.path.exists(args.graphs_output_dir):
os.makedirs(args.graphs_output_dir)
if not texts_:
print("No text files were loaded !")
exit()
data={}
n=0
logging.info("Identify Document(s) language(s)")
with ProgressBar(max_value=len(texts_),widgets=[' [', Timer(), '] ',Bar(),'(', Counter(),')','(', ETA(), ')']) as pg:
for text in range(len(texts_)):
pg.update(text)
if not text:
lang="en"
else:
try:
lang=detect(texts_[text])
except Exception as e:
lang="en"
#print(lang, text)
if not lang in data and lang in pipeline:
data[lang]=[]
if lang in pipeline:
data[lang].append(text)
else:
if not "en" in data:data["en"]=[] # Ca peut arriver :s :s :s !!!
data["en"].append(text)
# except:
# n+=1 # encoding error
associated_es={}
count_per_doc={}
list_gs=[]
i=0
def workSTR(id_doc,text,count_per_doc,associated_es, list_gs,pg,lang):
global i
if not text:
count_per_doc[id_doc] = {}
associated_es[id_doc] = {}
g = nx.MultiDiGraph()
# g.add_node("GD9355566")#"en": "New Britain Public Library",
list_gs.append(g)
else:
t = filter_nonprintable(text)
# try:
str, count, se_identified = pipeline[lang].build(t, None, **vars(args))
list_gs.append(str.graph)
# Save Metadata
count_per_doc[id_doc] = count
associated_es[id_doc] = se_identified
# except Exception as e: # NER Bug
# warnings.warn(repr(e))
# count_per_doc[id_doc] = {}
# associated_es[id_doc] = {}
# g = nx.MultiDiGraph()
# list_gs.append(g)
# Save Graph structure
nx.write_gexf(list_gs[-1], args.graphs_output_dir + "/{0}.gexf".format(id_doc))
i+=1
pg.update(i)
logging.info("Extracting Toponyms and Building STR...")
queue=[]
with ThreadPoolExecutor(max_workers=4) as executor:
with ProgressBar(max_value=len(texts_),widgets=[' [', Timer(), '] ',Bar(),'(', Counter(),')','(', ETA(), ')']) as pg:
pg.start()
for lang in data:
for id_doc in data[lang]:
workSTR(id_doc,texts_[id_doc],count_per_doc,associated_es, list_gs,pg,lang)
# print(id_doc)
# if not texts_[id_doc]:
# count_per_doc[id_doc] = {}
# associated_es[id_doc] = {}
# g=nx.MultiDiGraph()
# #g.add_node("GD9355566")#"en": "New Britain Public Library",
# list_gs.append(g)
#
# else:
# t=filter_nonprintable(texts_[id_doc])
# try:
# str, count, se_identified = pipeline[lang].build(t,None,**vars(args))
# list_gs.append(str.graph)
# # Save Metadata
# count_per_doc[id_doc] = count
# associated_es[id_doc] = se_identified
# except Exception as e: # NER Bug
# warnings.warn(repr(e))
# count_per_doc[id_doc] = {}
# associated_es[id_doc] = {}
# g = nx.MultiDiGraph()
# list_gs.append(g)
#
# # Save Graph structure
# nx.write_gexf(list_gs[-1], args.graphs_output_dir+"/{0}.gexf".format(id_doc))
# i+=1
# pg.update(i)
# Save Metadata
open(os.path.join(args.graphs_output_dir,args.metadata_output_fn),'w').write(json.dumps([associated_es,count_per_doc],indent=4))
print("--- %s seconds ---" % (time.time() - start))