diff --git a/Attention.py b/SENEGAL/Attention.py
similarity index 100%
rename from Attention.py
rename to SENEGAL/Attention.py
diff --git a/B_LSTM.py b/SENEGAL/B_LSTM.py
similarity index 100%
rename from B_LSTM.py
rename to SENEGAL/B_LSTM.py
diff --git a/CLF.py b/SENEGAL/CLF.py
similarity index 100%
rename from CLF.py
rename to SENEGAL/CLF.py
diff --git a/CNN.py b/SENEGAL/CNN.py
similarity index 100%
rename from CNN.py
rename to SENEGAL/CNN.py
diff --git a/GRU.py b/SENEGAL/GRU.py
similarity index 100%
rename from GRU.py
rename to SENEGAL/GRU.py
diff --git a/LSTM.py b/SENEGAL/LSTM.py
similarity index 100%
rename from LSTM.py
rename to SENEGAL/LSTM.py
diff --git a/ResNet18.py b/SENEGAL/ResNet18.py
similarity index 100%
rename from ResNet18.py
rename to SENEGAL/ResNet18.py
diff --git a/restoreRnn.py b/SENEGAL/RestoreRnn.py
similarity index 100%
rename from restoreRnn.py
rename to SENEGAL/RestoreRnn.py
diff --git a/SaveRnn.py b/SENEGAL/SaveRnn.py
similarity index 100%
rename from SaveRnn.py
rename to SENEGAL/SaveRnn.py
diff --git a/combo.py b/SENEGAL/combo.py
similarity index 100%
rename from combo.py
rename to SENEGAL/combo.py
diff --git a/concatH.py b/SENEGAL/concatH.py
similarity index 100%
rename from concatH.py
rename to SENEGAL/concatH.py
diff --git a/dualLSTM.py b/SENEGAL/dualLSTM.py
similarity index 100%
rename from dualLSTM.py
rename to SENEGAL/dualLSTM.py
diff --git a/mapDS.py b/SENEGAL/mapDS.py
similarity index 100%
rename from mapDS.py
rename to SENEGAL/mapDS.py
diff --git a/saveDS.py b/SENEGAL/saveDS.py
similarity index 100%
rename from saveDS.py
rename to SENEGAL/saveDS.py
diff --git a/simpleDenseNet.py b/SENEGAL/simpleDenseNet.py
similarity index 100%
rename from simpleDenseNet.py
rename to SENEGAL/simpleDenseNet.py
diff --git a/splitRandDino.py b/SENEGAL/splitRandDino.py
similarity index 100%
rename from splitRandDino.py
rename to SENEGAL/splitRandDino.py
diff --git a/measuresC.py b/measuresC.py
deleted file mode 100644
index c8577b8e06d9230d9d51f0b8bd293258f3292b67..0000000000000000000000000000000000000000
--- a/measuresC.py
+++ /dev/null
@@ -1,58 +0,0 @@
-import numpy as np
-import sys
-from sklearn.metrics import accuracy_score
-from sklearn.metrics import precision_recall_fscore_support
-from sklearn.metrics import precision_score,accuracy_score,recall_score,f1_score
-
-
-
-p_split = 70
-n_split=10
-
-#python meqsuresC.py RFC,GBC,SVC 1
-#python meqsuresC.py RFC,GBC,SVC 2
-
-arr_C=sys.argv[1].split(',')
-norm=int(sys.argv[2])
-
-precision = np.zeros(3)
-recall = np.zeros(3)
-fscore = np.zeros(3)
-
-for class_ in arr_C:
-    for i in range(n_split):
-
-        var_totpred = './dataset/N%d/%s_truthpred_%d%s%d%s'%(norm,class_,p_split,'p/totpred',i,'.npy')
-        var_gt='./dataset/N%d/%s_truthpred_%d%s%d%s'%(norm,class_,p_split,'p/gt',i,'.npy')
-
-        C_pred = np.load(var_totpred)
-        test_y = np.load(var_gt)
-
-
-        var_prec,var_rec,var_fsc,_ = precision_recall_fscore_support(test_y, C_pred)
-
-        #Summ P,R ans FS values for each class
-        precision = np.add(precision, np.array(var_prec))
-        recall = np.add(precision, np.array(var_rec))
-        fscore = np.add(precision, np.array(var_fsc))
-
-    #get the mean values of P,R,FS
-    precision = np.divide(precision,n_split)
-    recall = np.divide(recall,n_split)
-    fscore = np.divide(fscore,n_split)
-
-    #get other measures
-    acc_score = accuracy_score(test_y, C_pred)
-    prec_score = precision_score(test_y, C_pred, average='weighted')
-    rec_score = recall_score(test_y, C_pred, average='weighted')
-    fsc_score = f1_score(test_y, C_pred, average='weighted')
-
-
-
-    np.save('./dataset/N%d/%s_truthpred_%d%s'%(norm,class_,p_split,'p/precision.npy'), precision)
-    np.save('./dataset/N%d/%s_truthpred_%d%s'%(norm,class_,p_split,'p/recall.npy'), recall)
-    np.save('./dataset/N%d/%s_truthpred_%d%s'%(norm,class_,p_split,'p/fscore.npy'), fscore)
-    np.save('./dataset/N%d/%s_truthpred_%d%s'%(norm,class_,p_split,'p/accuracy_score.npy'), acc_score)
-    np.save('./dataset/N%d/%s_truthpred_%d%s'%(norm,class_,p_split,'p/precision_score.npy'), prec_score)
-    np.save('./dataset/N%d/%s_truthpred_%d%s'%(norm,class_,p_split,'p/recall_score.npy'), rec_score)
-    np.save('./dataset/N%d/%s_truthpred_%d%s'%(norm,class_,p_split,'p/fscore_score.npy'), fsc_score)
diff --git a/measuresDUAL.py b/measuresDUAL.py
deleted file mode 100644
index d6d557b6ff262a59935cb7bb77f490f877e938f5..0000000000000000000000000000000000000000
--- a/measuresDUAL.py
+++ /dev/null
@@ -1,113 +0,0 @@
-import numpy as np
-import sys
-from sklearn.metrics import accuracy_score
-from sklearn.metrics import precision_recall_fscore_support
-from sklearn.metrics import precision_score,accuracy_score,recall_score,f1_score,confusion_matrix
-
-
-timesteps = 22
-ninput = 13
-p_split = 70
-n_split=10
-
-# python meaasuresLSTM LSTM,B_LSTM 8 64 3 1
-
-arrT=sys.argv[1].split(',')
-batchsz=int(sys.argv[2])
-nunits=int(sys.argv[3])
-nlayer=int(sys.argv[4])
-norm=int(sys.argv[5])
-opH=sys.argv[6]
-
-precision = np.zeros(3)
-recall = np.zeros(3)
-fscore = np.zeros(3)
-
-
-
-for T_lstm in arrT:
-    for i in range(n_split):
-
-        TP=[]
-        totalLayer=[]
-        totalClass=[]
-
-        if opH=='+':
-            var_totpred= './dataset/N%d/%s%d%s%d%s%d%s%d%s%d%s'%(norm,T_lstm,nlayer,'l_truthpred_',p_split,'p',nunits,'u',batchsz,'b+/totpred',i,'.npy')
-            var_gt= './dataset/N%d/%s%d%s%d%s%d%s%d%s%d%s'%(norm,T_lstm,nlayer,'l_truthpred_',p_split,'p',nunits,'u',batchsz,'b+/gt',i,'.npy')
-        if opH == 'c':
-            var_totpred= './dataset/N%d/%s%d%s%d%s%d%s%d%s%d%s'%(norm,T_lstm,nlayer,'l_truthpred_',p_split,'p',nunits,'u',batchsz,'bc/totpred',i,'.npy')
-            var_gt= './dataset/N%d/%s%d%s%d%s%d%s%d%s%d%s'%(norm,T_lstm,nlayer,'l_truthpred_',p_split,'p',nunits,'u',batchsz,'bc/gt',i,'.npy')
-
-        LSTM_pred = np.load(var_totpred)
-        test_y = np.load(var_gt)
-
-        #get confusion matrix
-        C = confusion_matrix(test_y, LSTM_pred)
-
-        #get true positive values for each class
-        TP.append(C[0][0])
-        TP.append(C[1][1])
-        TP.append(C[2][2])
-
-        #get total number of retrieved instances
-        totalLayer.append(C[0][0]+C[0][1]+C[0][2])
-        totalLayer.append(C[1][0]+C[1][1]+C[1][2])
-        totalLayer.append(C[2][0]+C[2][1]+C[2][2])
-
-        #get total amount of relevant instances
-        totalClass.append(C[0][0]+C[1][0]+C[2][0])
-        totalClass.append(C[0][1]+C[1][1]+C[2][1])
-        totalClass.append(C[0][2]+C[1][2]+C[2][2])
-
-        # PRECISION = TP / RETRIEVED_INSTANCES
-        prec_temp=np.divide(TP,totalLayer,dtype="float32")
-        # RECALL = TP / RELEVANT INSTANCES
-        recall_temp=np.divide(TP,totalClass,dtype="float32")
-        # FSCORE = 2*( P*R / P+R  )
-        fscore_temp=2*(np.multiply(prec_temp,recall_temp,dtype="float32")/np.add(prec_temp,recall_temp,dtype="float32"))
-
-        # sum values per class
-        for j in range(0,3):
-
-            precision[j] = precision[j]+prec_temp[j]
-            recall[j] = recall[j]+recall_temp[j]
-            fscore[j] = fscore[j]+fscore_temp[j]
-
-
-    # get P,R and FS mean values per class
-    precision = np.divide(precision,n_split)
-    recall = np.divide(recall,n_split)
-    fscore = np.divide(fscore,n_split)
-
-    # get other measures
-    acc_score = accuracy_score(test_y, LSTM_pred)
-    prec_score = precision_score(test_y, LSTM_pred, average='weighted')
-    rec_score = recall_score(test_y, LSTM_pred, average='weighted')
-    fsc_score = f1_score(test_y, LSTM_pred, average='weighted')
-
-#print 'precision',precision
-#print 'recall',recall
-#print 'fscore',fscore
-#print 'acc_score',acc_score
-#print 'prec_score',prec_score
-#print 'rec_score',rec_score
-#print 'fsc_score',fsc_score
-
-if opH=='+':
-    np.save('./dataset/N%d/%s%d%s%d%s%d%s%d%s'%(norm,T_lstm,nlayer,'l_truthpred_',p_split,'p',nunits,'u',batchsz,'b+/precision.npy'), precision)
-    np.save('./dataset/N%d/%s%d%s%d%s%d%s%d%s'%(norm,T_lstm,nlayer,'l_truthpred_',p_split,'p',nunits,'u',batchsz,'b+/recall.npy'), recall)
-    np.save('./dataset/N%d/%s%d%s%d%s%d%s%d%s'%(norm,T_lstm,nlayer,'l_truthpred_',p_split,'p',nunits,'u',batchsz,'b+/fscore.npy'), fscore)
-    np.save('./dataset/N%d/%s%d%s%d%s%d%s%d%s'%(norm,T_lstm,nlayer,'l_truthpred_',p_split,'p',nunits,'u',batchsz,'b+/accuracy_score.npy'), acc_score)
-    np.save('./dataset/N%d/%s%d%s%d%s%d%s%d%s'%(norm,T_lstm,nlayer,'l_truthpred_',p_split,'p',nunits,'u',batchsz,'b+/precision_score.npy'), prec_score)
-    np.save('./dataset/N%d/%s%d%s%d%s%d%s%d%s'%(norm,T_lstm,nlayer,'l_truthpred_',p_split,'p',nunits,'u',batchsz,'b+/recall_score.npy'), rec_score)
-    np.save('./dataset/N%d/%s%d%s%d%s%d%s%d%s'%(norm,T_lstm,nlayer,'l_truthpred_',p_split,'p',nunits,'u',batchsz,'b+/fscore_score.npy'), fsc_score)
-
-if opH == 'c':
-    np.save('./dataset/N%d/%s%d%s%d%s%d%s%d%s'%(norm,T_lstm,nlayer,'l_truthpred_',p_split,'p',nunits,'u',batchsz,'bc/precision.npy'), precision)
-    np.save('./dataset/N%d/%s%d%s%d%s%d%s%d%s'%(norm,T_lstm,nlayer,'l_truthpred_',p_split,'p',nunits,'u',batchsz,'bc/recall.npy'), recall)
-    np.save('./dataset/N%d/%s%d%s%d%s%d%s%d%s'%(norm,T_lstm,nlayer,'l_truthpred_',p_split,'p',nunits,'u',batchsz,'bc/fscore.npy'), fscore)
-    np.save('./dataset/N%d/%s%d%s%d%s%d%s%d%s'%(norm,T_lstm,nlayer,'l_truthpred_',p_split,'p',nunits,'u',batchsz,'bc/accuracy_score.npy'), acc_score)
-    np.save('./dataset/N%d/%s%d%s%d%s%d%s%d%s'%(norm,T_lstm,nlayer,'l_truthpred_',p_split,'p',nunits,'u',batchsz,'bc/precision_score.npy'), prec_score)
-    np.save('./dataset/N%d/%s%d%s%d%s%d%s%d%s'%(norm,T_lstm,nlayer,'l_truthpred_',p_split,'p',nunits,'u',batchsz,'bc/recall_score.npy'), rec_score)
-    np.save('./dataset/N%d/%s%d%s%d%s%d%s%d%s'%(norm,T_lstm,nlayer,'l_truthpred_',p_split,'p',nunits,'u',batchsz,'bc/fscore_score.npy'), fsc_score)
diff --git a/measuresLSTM.py b/measuresLSTM.py
deleted file mode 100644
index de20caaff22f9039ca3038d11262c0ab2914ab98..0000000000000000000000000000000000000000
--- a/measuresLSTM.py
+++ /dev/null
@@ -1,101 +0,0 @@
-import numpy as np
-import sys
-from sklearn.metrics import accuracy_score
-from sklearn.metrics import precision_recall_fscore_support
-from sklearn.metrics import precision_score,accuracy_score,recall_score,f1_score,confusion_matrix
-
-
-timesteps = 22
-ninput = 13
-p_split = 70
-n_split=10
-
-# python meaasuresLSTM LSTM,B_LSTM 8 64 3 1
-
-
-batchsz=int(sys.argv[2])
-nunits=int(sys.argv[3])
-nlayer=int(sys.argv[4])
-norm=int(sys.argv[5])
-Tlstm=sys.argv[6]
-
-precision = np.zeros(3)
-recall = np.zeros(3)
-fscore = np.zeros(3)
-
-g_path = './dataset/N%d/%s_%dl_truthpred_%dp%du%db/'%(norm,Tlstm,norm,p_split_nunits,batchsz)
-
-for i in range(n_split):
-
-    TP=[]
-    totalClass=[]
-    totalLayer=[]
-
-
-    var_totpred= g_path+'totpred%d.npy'% i
-    var_gt= g_path+'gt%d.npy'% i
-
-    LSTM_pred = np.load(var_totpred)
-    test_y = np.load(var_gt)
-
-    #get confusion matrix
-    C = confusion_matrix(test_y, LSTM_pred)
-
-    #get true positive values for each class
-    TP.append(C[0][0])
-    TP.append(C[1][1])
-    TP.append(C[2][2])
-
-    #get total number of retrieved instances
-    totalLayer.append(C[0][0]+C[0][1]+C[0][2])
-    totalLayer.append(C[1][0]+C[1][1]+C[1][2])
-    totalLayer.append(C[2][0]+C[2][1]+C[2][2])
-
-    #get total amount of relevant instances
-    totalClass.append(C[0][0]+C[1][0]+C[2][0])
-    totalClass.append(C[0][1]+C[1][1]+C[2][1])
-    totalClass.append(C[0][2]+C[1][2]+C[2][2])
-
-
-    # PRECISION = TP / RETRIEVED_INSTANCES
-    prec_temp=np.divide(TP,totalLayer,dtype="float32")
-    # RECALL = TP / RELEVANT INSTANCES
-    recall_temp=np.divide(TP,totalClass,dtype="float32")
-    # FSCORE = 2*( P*R / P+R  )
-    fscore_temp=2*(np.multiply(prec_temp,recall_temp,dtype="float32")/np.add(prec_temp,recall_temp,dtype="float32"))
-
-    # sum values per class
-    for j in range(0,3):
-
-        precision[j] = precision[j]+prec_temp[j]
-        recall[j] = recall[j]+recall_temp[j]
-        fscore[j] = fscore[j]+fscore_temp[j]
-
-
-# get P,R and FS mean values per class
-precision = np.divide(precision,n_split,dtype="float32")
-recall = np.divide(recall,n_split,dtype="float32")
-fscore = np.divide(fscore,n_split,dtype="float32")
-
-# get other measures
-acc_score = accuracy_score(test_y, LSTM_pred)
-prec_score = precision_score(test_y, LSTM_pred, average='weighted')
-rec_score = recall_score(test_y, LSTM_pred, average='weighted')
-fsc_score = f1_score(test_y, LSTM_pred, average='weighted')
-
-#print 'precision',precision
-#print 'recall',recall
-#print 'fscore',fscore
-#print 'acc_score',acc_score
-#print 'prec_score',prec_score
-#print 'rec_score',rec_score
-#print 'fsc_score',fsc_score
-
-
-np.save(g_path+'precision.npy'), precision)
-np.save(g_path+'recall.npy'), recall)
-np.save(g_path+'fscore.npy'), fscore)
-np.save(g_path+'accuracy_score.npy'), acc_score)
-np.save(g_path+'precision_score.npy'), prec_score)
-np.save(g_path+'recall_score.npy'), rec_score)
-np.save(g_path+'fscore_score.npy'), fsc_score)
diff --git a/plot_measures.py b/plot_measures.py
deleted file mode 100644
index 9e02b9b80c50c27983fb82b0060f8b3ff010bd5c..0000000000000000000000000000000000000000
--- a/plot_measures.py
+++ /dev/null
@@ -1,210 +0,0 @@
-import numpy as np
-import pandas as pd
-
-'''
-#FSCORE 64-128-256 HIDDEN UNITS N1 1LIV
-var_1_64_8N1 = './dataset/N1/LSTM1l_truthpred_70p64u8b/fscore_score.npy'
-var_1_128_8N1 = './dataset/N1/LSTM1l_truthpred_70p128u8b/fscore_score.npy'
-var_1_256_8N1 = './dataset/N1/LSTM1l_truthpred_70p256u8b/fscore_score.npy'
-
-#FSCORE 64-128-256 HIDDEN UNITS N2 1LIV
-var_1_64_8N2 = './dataset/N2/LSTM1l_truthpred_70p64u8b/fscore_score.npy'
-var_1_128_8N2 = './dataset/N2/LSTM1l_truthpred_70p128u8b/fscore_score.npy'
-var_1_256_8N2 = './dataset/N2/LSTM1l_truthpred_70p256u8b/fscore_score.npy'
-var_N1 = './dataset/N1/RFC_truthpred_70p/fscore_score.npy'
-
-#FSCORE 64-128-256 HIDDEN UNITS N1 3LIV
-var_3_64_8N1 = './dataset/N1/LSTM3l_truthpred_70p64u8b/fscore_score.npy'
-var_3_128_8N1 = './dataset/N1/LSTM3l_truthpred_70p128u8b/fscore_score.npy'
-var_3_256_8N1 = './dataset/N1/LSTM3l_truthpred_70p256u8b/fscore_score.npy'
-
-#FSCORE 64-128-256 HIDDEN UNITS N2 3LIV
-var_3_64_8N2 = './dataset/N2/LSTM3l_truthpred_70p64u8b/fscore_score.npy'
-var_3_128_8N2 = './dataset/N2/LSTM3l_truthpred_70p128u8b/fscore_score.npy'
-var_3_256_8N2 = './dataset/N2/LSTM3l_truthpred_70p256u8b/fscore_score.npy'
-
-#FSCORE RFC
-var_N2 = './dataset/N2/RFC_truthpred_70p/fscore_score.npy'
-
-#FSCORE 64-128 HIDDEN UNITS N1 1LIV
-var_B_1_64_8N1 = './dataset/N1/B_LSTM1l_truthpred_70p64u8b/fscore_score.npy'
-var_B_1_128_8N1 = './dataset/N1/B_LSTM1l_truthpred_70p128u8b/fscore_score.npy'
-
-#FSCORE 64-128 HIDDEN UNITS N1 3LIV
-var_B_3_64_8N1 = './dataset/N1/B_LSTM3l_truthpred_70p64u8b/fscore_score.npy'
-var_B_3_128_8N1 = './dataset/N1/B_LSTM3l_truthpred_70p128u8b/fscore_score.npy'
-
-
-
-#FSCOREs N1
-fsN1=[]
-#LSTM N1 1LIV
-fsN1.append(np.load(var_1_64_8N1))
-fsN1.append(np.load(var_1_128_8N1))
-fsN1.append(np.load(var_1_256_8N1))
-#LSTM N1 L3
-fsN1.append(np.load(var_3_64_8N1))
-fsN1.append(np.load(var_3_128_8N1))
-fsN1.append(np.load(var_3_256_8N1))
-#RFC
-fsN1.append(np.load(var_N1))
-#BI LSTM
-fsN1.append(np.load(var_B_1_64_8N1))
-fsN1.append(np.load(var_B_1_128_8N1))
-fsN1.append(np.load(var_B_3_64_8N1))
-fsN1.append(np.load(var_B_3_128_8N1))
-
-
-
-
-
-#FSCORES N2
-fsN2=[]
-#LSTM N2 1LIV
-fsN2.append(np.load(var_1_64_8N2))
-fsN2.append(np.load(var_1_128_8N2))
-fsN2.append(np.load(var_1_256_8N2))
-#LSTM N2 L3
-fsN2.append(np.load(var_3_64_8N2))
-fsN2.append(np.load(var_3_128_8N2))
-fsN2.append(np.load(var_3_256_8N2))
-#RFC
-fsN2.append(np.load(var_N2))
-fsN2.append('-')
-fsN2.append('-')
-fsN2.append('-')
-fsN2.append('-')
-
-
-print "Fscores"
-print "N1:",np.array(fsN1)
-print "N2:",np.array(fsN2)
-
-fsN1N2=np.vstack(( np.array(fsN1),np.array(fsN2) ))
-
-fsLSTM_rfc = pd.DataFrame(np.array(fsN1N2),columns = ['1_64_8','1_128_8','1_256_8','3_64_8','3_128_8','3_256_8','RFC','bi_1_64_8','bi_1_128_8','bi_3_64_8','bi_3_128_8'])
-fsLSTM_rfc.to_csv("fscoreLSTM-RFC_N1N2.csv")
-
-
-#SINGLE LAYER ACCURACY 64-128-256 HIDDEN UNITS N1 1LIV
-a_1_64_8N1 = './dataset/N1/LSTM1l_truthpred_70p64u8b/accuracy_score.npy'
-a_1_128_8N1 = './dataset/N1/LSTM1l_truthpred_70p128u8b/accuracy_score.npy'
-a_1_256_8N1 = './dataset/N1/LSTM1l_truthpred_70p256u8b/accuracy_score.npy'
-
-#MUTLILAYER ACCURACY 64-128-256 HIDDEN UNITS N1 3LIV
-a_3_64_8N1 = './dataset/N1/LSTM3l_truthpred_70p64u8b/accuracy_score.npy'
-a_3_128_8N1 = './dataset/N1/LSTM3l_truthpred_70p128u8b/accuracy_score.npy'
-a_3_256_8N1 = './dataset/N1/LSTM3l_truthpred_70p256u8b/accuracy_score.npy'
-
-#SINGLE LAYER ACCURACY 64-128-256 HIDDEN UNITS N2 1LIV
-a_1_64_8N2 = './dataset/N2/LSTM1l_truthpred_70p64u8b/accuracy_score.npy'
-a_1_128_8N2 = './dataset/N2/LSTM1l_truthpred_70p128u8b/accuracy_score.npy'
-a_1_256_8N2 = './dataset/N2/LSTM1l_truthpred_70p256u8b/accuracy_score.npy'
-a_N1 = './dataset/N1/RFC_truthpred_70p/accuracy_score.npy'
-
-#MUTLILAYER ACCURACY 64-128-256 HIDDEN UNITS N2 3LIV
-a_3_64_8N2 = './dataset/N2/LSTM3l_truthpred_70p64u8b/accuracy_score.npy'
-a_3_128_8N2 = './dataset/N2/LSTM3l_truthpred_70p128u8b/accuracy_score.npy'
-a_3_256_8N2 = './dataset/N2/LSTM3l_truthpred_70p256u8b/accuracy_score.npy'
-a_N2 = './dataset/N2/RFC_truthpred_70p/accuracy_score.npy'
-
-#BIDIRECTIONAL ACCURACY 64-128 HIDDEN UNITS N1 1LIV
-a_B_1_64_8N1 = './dataset/N1/B_LSTM1l_truthpred_70p64u8b/accuracy_score.npy'
-a_B_1_128_8N1 = './dataset/N1/B_LSTM1l_truthpred_70p128u8b/accuracy_score.npy'
-
-#BIDIRECTIONAL ACCURACY 64-128 HIDDEN UNITS N1 3LIV
-a_B_3_64_8N1 = './dataset/N1/B_LSTM3l_truthpred_70p64u8b/accuracy_score.npy'
-a_B_3_128_8N1 = './dataset/N1/B_LSTM3l_truthpred_70p128u8b/accuracy_score.npy'
-
-#ATTENTION ACCURACY 64-128 HU N1 1L 
-a_Attention_1_64_8N1 = './dataset/N1/Attention1l_truthpred_70p64u8b/accuracy_score.npy'
-a_Attention_1_128_8N1 = './dataset/N1/Attention1l_truthpred_70p128u8b/accuracy_score.npy'
-
-#ATTENTION ACCURACY 64-128 HU N1 3L 
-a_Attention_3_64_8N1 = './dataset/N1/Attention1l_truthpred_70p64u8b/accuracy_score.npy'
-a_Attention_3_128_8N1 = './dataset/N1/Attention1l_truthpred_70p128u8b/accuracy_score.npy'
-
-
-#ACCURACY N1
-aN1=[]
-#LSTM 1LIV
-aN1.append(np.load(a_1_64_8N1))
-aN1.append(np.load(a_1_128_8N1))
-aN1.append(np.load(a_1_256_8N1))
-#LSTM 3LIV
-aN1.append(np.load(a_3_64_8N1))
-aN1.append(np.load(a_3_128_8N1))
-aN1.append(np.load(a_3_256_8N1))
-#RFC
-aN1.append(np.load(a_N1))
-#BI LSTM
-aN1.append(np.load(a_B_1_64_8N1))
-aN1.append(np.load(a_B_1_128_8N1))
-aN1.append(np.load(a_B_3_64_8N1))
-aN1.append(np.load(a_B_3_128_8N1))
-
-#ACCURACY N2
-aN2=[]
-aN2.append(np.load(a_1_64_8N2))
-aN2.append(np.load(a_1_128_8N2))
-aN2.append(np.load(a_1_256_8N2))
-
-aN2.append(np.load(a_3_64_8N2))
-aN2.append(np.load(a_3_128_8N2))
-aN2.append(np.load(a_3_256_8N2))
-aN2.append(np.load(a_N2))
-
-aN2.append('-')
-aN2.append('-')
-aN2.append('-')
-aN2.append('-')
-
-
-print "Accuracy"
-print "N1:",np.array(fsN1)
-print "N2:",np.array(fsN2)
-
-
-
-aN1N2=np.vstack(( np.array(aN1),np.array(aN2) ))
-
-aLSTM_rfc = pd.DataFrame(np.array(aN1N2),columns = ['1_64_8','1_128_8','1_256_8','3_64_8','3_128_8','3_256_8','RFC','bi_1_64_8','bi_1_128_8','bi_3_64_8','bi_3_128_8'])
-aLSTM_rfc.to_csv("accuracyLSTM-RFC_N1N2.csv")
-
-
-
-
-#FSCORE ACCURACY 64-128 HU N1 1L 
-var_Attention_1_64_8N1 = './dataset/N1/Attention1l_truthpred_70p64u8b/fscore_score.npy'
-var_Attention_1_128_8N1 = './dataset/N1/Attention1l_truthpred_70p128u8b/fscore_score.npy'
-
-#FSCORE ACCURACY 64-128 HU N1 3L 
-var_Attention_3_64_8N1 = './dataset/N1/Attention1l_truthpred_70p64u8b/fscore_score.npy'
-var_Attention_3_128_8N1 = './dataset/N1/Attention1l_truthpred_70p128u8b/fscore_score.npy'
-
-#FSCORE ACCURACY 64-128 HU N1 1L 
-var_concatH_1_64_8N1 = './dataset/N1/concatH1l_truthpred_70p64u8b/fscore_score.npy'
-var_concatH_1_128_8N1 = './dataset/N1/concatH1l_truthpred_70p128u8b/fscore_score.npy'
-
-#FSCORE ACCURACY 64-128 HU N1 3L 
-var_concatH_3_64_8N1 = './dataset/N1/concatH1l_truthpred_70p64u8b/fscore_score.npy'
-var_concatH_3_128_8N1 = './dataset/N1/concatH1l_truthpred_70p128u8b/fscore_score.npy'
-
-
-fsN1=[]
-#LSTM 1LIV
-fsN1.append(np.load(var_Attention_1_64_8N1))
-fsN1.append(np.load(var_Attention_1_128_8N1))
-fsN1.append(np.load(var_Attention_3_64_8N1))
-fsN1.append(np.load(var_Attention_3_128_8N1))
-
-fsN1.append(np.load(var_concatH_1_64_8N1))
-fsN1.append(np.load(var_concatH_1_128_8N1))
-fsN1.append(np.load(var_concatH_3_64_8N1))
-fsN1.append(np.load(var_concatH_3_128_8N1))
-
-fsAtt_concat = pd.DataFrame(np.array(fsN1),columns = ['A1_64_8','A1_128_8','A3_64_8','A3_128_8','conc_1_64_8','conc_1_128_8','conc_3_64_8','conc_3_128_8'])
-fsAtt_concat.to_csv("fscoreAttentionConcat.csv")
-'''
-
-
diff --git a/tables.py b/tables.py
deleted file mode 100644
index 264d48d95b52ae0d3623d682b2fc84638b65e75c..0000000000000000000000000000000000000000
--- a/tables.py
+++ /dev/null
@@ -1,68 +0,0 @@
-import numpy as np
-import pandas as pd
-
-#FSCORE  64-128 HU N1 1L 
-var_Attention_1_64_8N1 = './dataset/N1/Attention1l_truthpred_70p64u8b/fscore_score.npy'
-var_Attention_1_128_8N1 = './dataset/N1/Attention1l_truthpred_70p128u8b/fscore_score.npy'
-
-#FSCORE  64-128 HU N1 3L 
-var_Attention_3_64_8N1 = './dataset/N1/Attention3l_truthpred_70p64u8b/fscore_score.npy'
-var_Attention_3_128_8N1 = './dataset/N1/Attention3l_truthpred_70p128u8b/fscore_score.npy'
-
-#FSCORE  64-128 HU N1 1L 
-var_concatH_1_64_8N1 = './dataset/N1/concatH1l_truthpred_70p64u8b/fscore_score.npy'
-
-#FSCORE  64-128 HU N1 3L 
-var_concatH_3_64_8N1 = './dataset/N1/concatH3l_truthpred_70p64u8b/fscore_score.npy'
-
-
-
-fsN1=[]
-#LSTM 1LIV
-fsN1.append(np.load(var_Attention_1_64_8N1))
-fsN1.append(np.load(var_Attention_1_128_8N1))
-fsN1.append(np.load(var_Attention_3_64_8N1))
-fsN1.append(np.load(var_Attention_3_128_8N1))
-
-fsN1.append(np.load(var_concatH_1_64_8N1))
-
-fsN1.append(np.load(var_concatH_3_64_8N1))
-
-
-fsAtt_concat = pd.DataFrame(np.array(fsN1))
-fsAtt_concat.to_csv("fscoreAttentionConcat.csv")
-
-# ACCURACY 64-128 HU N1 1L 
-a_Attention_1_64_8N1 = './dataset/N1/Attention1l_truthpred_70p64u8b/accuracy_score.npy'
-a_Attention_1_128_8N1 = './dataset/N1/Attention1l_truthpred_70p128u8b/accuracy_score.npy'
-
-# ACCURACY 64-128 HU N1 3L 
-a_Attention_3_64_8N1 = './dataset/N1/Attention3l_truthpred_70p64u8b/accuracy_score.npy'
-a_Attention_3_128_8N1 = './dataset/N1/Attention3l_truthpred_70p128u8b/accuracy_score.npy'
-
-# ACCURACY 64-128 HU N1 1L 
-a_concatH_1_64_8N1 = './dataset/N1/concatH1l_truthpred_70p64u8b/accuracy_score.npy'
-
-
-# ACCURACY 64-128 HU N1 3L 
-a_concatH_3_64_8N1 = './dataset/N1/concatH3l_truthpred_70p64u8b/accuracy_score.npy'
-
-
-
-aN1=[]
-#LSTM 1LIV
-aN1.append(np.load(a_Attention_1_64_8N1))
-aN1.append(np.load(a_Attention_1_128_8N1))
-aN1.append(np.load(a_Attention_3_64_8N1))
-aN1.append(np.load(a_Attention_3_128_8N1))
-
-aN1.append(np.load(a_concatH_1_64_8N1))
-
-aN1.append(np.load(a_concatH_3_64_8N1))
-
-
-aAtt_concat = pd.DataFrame(np.array(aN1))
-aAtt_concat.to_csv("accuracyAttentionConcat.csv")
-
-
-
diff --git a/tablexclassN1.py b/tablexclassN1.py
deleted file mode 100644
index 371d9038911663c7f4b91f3cbb97d2a8df83c06b..0000000000000000000000000000000000000000
--- a/tablexclassN1.py
+++ /dev/null
@@ -1,90 +0,0 @@
-import numpy as np
-import pandas as pd
-
-
-
-#FSCORE 64-128-256 HIDDEN UNITS N1 1LIV
-var_1_64_8N1 = './dataset/N1/LSTM1l_truthpred_70p64u8b/fscore.npy'
-var_1_128_8N1 = './dataset/N1/LSTM1l_truthpred_70p128u8b/fscore.npy'
-var_1_256_8N1 = './dataset/N1/LSTM1l_truthpred_70p256u8b/fscore.npy'
-
-#FSCORE 64-128-256 HIDDEN UNITS N1 3LIV
-var_3_64_8N1 = './dataset/N1/LSTM3l_truthpred_70p64u8b/fscore.npy'
-var_3_128_8N1 = './dataset/N1/LSTM3l_truthpred_70p128u8b/fscore.npy'
-var_3_256_8N1 = './dataset/N1/LSTM3l_truthpred_70p256u8b/fscore.npy'
-
-#FSCORE RFC
-var_N1 = './dataset/N1/RFC_truthpred_70p/fscore.npy'
-
-#FSCORE 64-128 HIDDEN UNITS N1 1LIV
-var_B_1_64_8N1 = './dataset/N1/B_LSTM1l_truthpred_70p64u8b/fscore.npy'
-var_B_1_128_8N1 = './dataset/N1/B_LSTM1l_truthpred_70p128u8b/fscore.npy'
-
-#FSCORE 64-128 HIDDEN UNITS N1 3LIV
-var_B_3_64_8N1 = './dataset/N1/B_LSTM3l_truthpred_70p64u8b/fscore.npy'
-var_B_3_128_8N1 = './dataset/N1/B_LSTM3l_truthpred_70p128u8b/fscore.npy'
-
-var_attention_1_64_8N1 = './dataset/N1/Attention1l_truthpred_70p64u8b/fscore.npy'
-var_attention_1_128_8N1 ='./dataset/N1/Attention1l_truthpred_70p128u8b/fscore.npy'
-var_attention_1_256_8N1 ='./dataset/N1/Attention1l_truthpred_70p256u8b/fscore.npy'
-
-var_attention_3_64_8N1 ='./dataset/N1/Attention3l_truthpred_70p64u8b/fscore.npy'
-var_attention_3_128_8N1 ='./dataset/N1/Attention3l_truthpred_70p128u8b/fscore.npy'
-var_attention_3_256_8N1 ='./dataset/N1/Attention3l_truthpred_70p256u8b/fscore.npy'
-
-#FSCORE CONCAT
-var_concat_1_64_8N1 = './dataset/N1/concatH1l_truthpred_70p64u8b/fscore.npy'
-var_concat_1_128_8N1 = './dataset/N1/concatH1l_truthpred_70p128u8b/fscore.npy'
-var_concat_1_256_8N1 = './dataset/N1/concatH1l_truthpred_70p256u8b/fscore.npy'
-
-#FSCORE CONCAT
-var_concat_3_64_8N1 = './dataset/N1/concatH3l_truthpred_70p64u8b/fscore.npy'
-var_concat_3_128_8N1 = './dataset/N1/concatH3l_truthpred_70p128u8b/fscore.npy'
-var_concat_3_256_8N1 = './dataset/N1/concatH3l_truthpred_70p256u8b/fscore.npy'
-
-#FSCORE DUAL
-var_dual_3_32_8N1 = './dataset/N1/dualLSTM3l_truthpred_70p32u8b+/fscore.npy'
-var_dual_3_64_8N1 = './dataset/N1/dualLSTM3l_truthpred_70p64u8b+/fscore.npy'
-
-#FSCOREs N1
-fsN1=[]
-#LSTM N1 1LIV
-fsN1.append(np.load(var_1_64_8N1))
-fsN1.append(np.load(var_1_128_8N1))
-fsN1.append(np.load(var_1_256_8N1))
-#LSTM N1 L3
-fsN1.append(np.load(var_3_64_8N1))
-fsN1.append(np.load(var_3_128_8N1))
-fsN1.append(np.load(var_3_256_8N1))
-#RFC
-fsN1.append(np.load(var_N1))
-#BI LSTM
-fsN1.append(np.load(var_B_1_64_8N1))
-fsN1.append(np.load(var_B_1_128_8N1))
-fsN1.append(np.load(var_B_3_64_8N1))
-fsN1.append(np.load(var_B_3_128_8N1))
-#concat N1 L3
-fsN1.append(np.load(var_attention_1_64_8N1))
-fsN1.append(np.load(var_attention_1_128_8N1))
-fsN1.append(np.load(var_attention_1_256_8N1))
-fsN1.append(np.load(var_attention_3_64_8N1))
-fsN1.append(np.load(var_attention_3_128_8N1))
-fsN1.append(np.load(var_attention_3_256_8N1))
-
-fsN1.append(np.load(var_concat_1_64_8N1))
-fsN1.append(np.load(var_concat_1_128_8N1))
-fsN1.append(np.load(var_concat_1_256_8N1))
-#concat N1 L3
-fsN1.append(np.load(var_concat_3_64_8N1))
-fsN1.append(np.load(var_concat_3_128_8N1))
-fsN1.append(np.load(var_concat_3_256_8N1))
-#dual N1 L3
-fsN1.append(np.load(var_dual_3_32_8N1))
-fsN1.append(np.load(var_dual_3_64_8N1))
-
-
-print "fscore"
-print "N1:",np.array(fsN1)
-
-fsLSTM_rfc = pd.DataFrame(np.array(fsN1))
-fsLSTM_rfc.to_csv("fscorexclasses.csv")
diff --git a/tablexclassN2.py b/tablexclassN2.py
deleted file mode 100644
index 08ab0d27c7768d9f6df5e21b9afe85990496ec31..0000000000000000000000000000000000000000
--- a/tablexclassN2.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import numpy as np
-import pandas as pd
-
-
-
-#FSCORE 64-128-256 HIDDEN UNITS N1 1LIV
-var_1_64_8N2 = './dataset/N2/LSTM1l_truthpred_70p64u8b/precision.npy'
-var_1_128_8N2 = './dataset/N2/LSTM1l_truthpred_70p128u8b/precision.npy'
-var_1_256_8N2 = './dataset/N2/LSTM1l_truthpred_70p256u8b/precision.npy'
-
-#FSCORE 64-128-256 HIDDEN UNITS N1 3LIV
-var_3_64_8N2 = './dataset/N2/LSTM3l_truthpred_70p64u8b/precision.npy'
-var_3_128_8N2 = './dataset/N2/LSTM3l_truthpred_70p128u8b/precision.npy'
-var_3_256_8N2 = './dataset/N2/LSTM3l_truthpred_70p256u8b/precision.npy'
-
-#FSCORE RFC
-var_N2 = './dataset/N2/RFC_truthpred_70p/precision.npy'
-
-#FSCOREs N1
-fsN2=[]
-#LSTM N1 1LIV
-fsN2.append(np.load(var_1_64_8N2))
-fsN2.append(np.load(var_1_128_8N2))
-fsN2.append(np.load(var_1_256_8N2))
-#LSTM N1 L3
-fsN2.append(np.load(var_3_64_8N2))
-fsN2.append(np.load(var_3_128_8N2))
-fsN2.append(np.load(var_3_256_8N2))
-#RFC
-fsN2.append(np.load(var_N2))
-
-print "precision"
-print "N2:",np.array(fsN2)
-
-fsLSTM_rfc = pd.DataFrame(np.array(fsN2))
-fsLSTM_rfc.to_csv("precisionxclassesN2.csv")