generate_data.py 7.26 KiB
# coding = utf-8
import argparse,glob,logging,string,time
from concurrent.futures import ThreadPoolExecutor

from langdetect import detect
from progressbar import ProgressBar, Timer, Bar, ETA, Counter

from strpython.nlp.disambiguator.share_prop import *
from strpython.pipeline import *
import networkx as nx


logging.basicConfig(format='%(asctime)s %(message)s')

def filter_nonprintable(text):
    # Get the difference of all ASCII characters from the set of printable characters
    nonprintable = set([chr(i) for i in range(128)]).difference(string.printable)
    # Use translate to remove all non-printable characters
    return text.translate({ord(character):None for character in nonprintable})

parser = argparse.ArgumentParser()
parser.add_argument("texts_input_dir")
parser.add_argument("graphs_output_dir")
parser.add_argument("metadata_output_fn")

subparsers = parser.add_subparsers(help='commands')

normal = subparsers.add_parser(
    'normal', help='Basic STR generation. No argument are necessary !')
normal.set_defaults(which="norm")


gen_parser = subparsers.add_parser(
    'generalisation', help='Apply a generalisation transformation on the generated STRs')
gen_parser.set_defaults(which="gene")
gen_parser.add_argument(
    '-t','--type_gen', help='Type of generalisation',default="all")
gen_parser.add_argument(
    '-n', help='Language',default=1)
gen_parser.add_argument(
    '-b','--bound', help='If Generalisation is bounded, this arg. correspond'
                         'to the maximal ',default="country")

ext_parser = subparsers.add_parser(
    'extension', help='Apply a extension process on the STRs')
ext_parser.set_defaults(which="ext")
ext_parser.add_argument(
    '-d','--distance', help='radius distance',default=150)
ext_parser.add_argument(
    '-u','--unit', help='unit used for the radius distance',default="km")
ext_parser.add_argument(
    '-a','--adjacent_count', help='number of adjacent SE add to the STR',default=1)

args = parser.parse_args()
if "which" in args:
    if args.which =="gene":
        args.type_trans="gen"
    elif args.which =="ext":
        args.type_trans="ext"

print("Parameters entered : ",args)


start = time.time()
class_=StanfordNER
# Initialise Graphs Transformers
pipeline= {
    "en":Pipeline(lang="english",tagger=Tagger(),ner=class_(lang="en")),
    "fr":Pipeline(lang="french",tagger=Tagger(),ner=class_(lang="fr")),
    "es":Pipeline(lang="espagnol",tagger=Tagger(),ner=class_(lang="es"))
}



# Read Input Files
import re
texts_=[]
if os.path.exists(args.texts_input_dir):
    files_glob= glob.glob(args.texts_input_dir+"/*.txt")
    files_=["" ]* len(files_glob)
    for fn in files_glob:
        id = int(re.findall("\d+", fn)[-1])
        files_[id]=fn
    if not files_:
        print("No .txt files found in {0}".format(args.texts_input_dir))
        exit()
    for fn in files_:
        try:
            tex=open(fn).read()
            #lang = detect(tex) #for bug encoding
            texts_.append(tex)
        except:
            print("{0} could'nt be read ! Add Lorem Ipsum instead".format(fn))
            texts_.append("Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.")


# If output Dir doesn't exists
if not os.path.exists(args.graphs_output_dir):
    os.makedirs(args.graphs_output_dir)

if not texts_:
    print("No text files were loaded !")
    exit()




data={}
n=0
logging.info("Identify Document(s) language(s)")
with ProgressBar(max_value=len(texts_),widgets=[' [', Timer(), '] ',Bar(),'(', Counter(),')','(', ETA(), ')']) as pg:
    for text in range(len(texts_)):
        pg.update(text)
        if not text:
            lang="en"
        else:
            try:
                lang=detect(texts_[text])

            except Exception as e:
                lang="en"
            #print(lang, text)
        if not lang in data and lang in pipeline:
            data[lang]=[]
        if lang in pipeline:
            data[lang].append(text)
        else:
            if not "en" in data:data["en"]=[] # Ca peut arriver :s :s :s !!!
            data["en"].append(text)
    # except:
        #     n+=1 # encoding error

associated_es={}
count_per_doc={}
list_gs=[]
i=0



def workSTR(id_doc,text,count_per_doc,associated_es, list_gs,pg,lang):
    global i
    if not text:
        count_per_doc[id_doc] = {}
        associated_es[id_doc] = {}
        g = nx.MultiDiGraph()
        # g.add_node("GD9355566")#"en": "New Britain Public Library",
        list_gs.append(g)

    else:
        t = filter_nonprintable(text)
        # try:
        str, count, se_identified = pipeline[lang].build(t, None, **vars(args))
        list_gs.append(str.graph)
        # Save Metadata
        count_per_doc[id_doc] = count
        associated_es[id_doc] = se_identified
        # except Exception as e:  # NER Bug
        #     warnings.warn(repr(e))
        #     count_per_doc[id_doc] = {}
        #     associated_es[id_doc] = {}
        #     g = nx.MultiDiGraph()
        #     list_gs.append(g)

    # Save Graph structure
    nx.write_gexf(list_gs[-1], args.graphs_output_dir + "/{0}.gexf".format(id_doc))
    i+=1
    pg.update(i)


logging.info("Extracting Toponyms and Building STR...")
queue=[]
with  ThreadPoolExecutor(max_workers=4) as executor:
    with ProgressBar(max_value=len(texts_),widgets=[' [', Timer(), '] ',Bar(),'(', Counter(),')','(', ETA(), ')']) as pg:
        pg.start()
        for lang in data:
            for id_doc in data[lang]:
                workSTR(id_doc,texts_[id_doc],count_per_doc,associated_es, list_gs,pg,lang)
                # print(id_doc)
                # if not texts_[id_doc]:
                #     count_per_doc[id_doc] = {}
                #     associated_es[id_doc] = {}
                #     g=nx.MultiDiGraph()
                #     #g.add_node("GD9355566")#"en": "New Britain Public Library",
                #     list_gs.append(g)
                #
                # else:
                #     t=filter_nonprintable(texts_[id_doc])
                #     try:
                #         str, count, se_identified = pipeline[lang].build(t,None,**vars(args))
                #         list_gs.append(str.graph)
                #         # Save Metadata
                #         count_per_doc[id_doc] = count
                #         associated_es[id_doc] = se_identified
                #     except Exception as e: # NER Bug
                #         warnings.warn(repr(e))
                #         count_per_doc[id_doc] = {}
                #         associated_es[id_doc] = {}
                #         g = nx.MultiDiGraph()
                #         list_gs.append(g)
                #
                # # Save Graph structure
                # nx.write_gexf(list_gs[-1], args.graphs_output_dir+"/{0}.gexf".format(id_doc))
                # i+=1
                # pg.update(i)


# Save Metadata
open(os.path.join(args.graphs_output_dir,args.metadata_output_fn),'w').write(json.dumps([associated_es,count_per_doc],indent=4))


print("--- %s seconds ---" % (time.time() - start))