diff --git a/classificationWorkflow.py b/classificationWorkflow.py
index d16e445bc935bf48843af9608e331279d06a6b34..b2898aa2d68acf90ee83951dade9d5e34926eae0 100644
--- a/classificationWorkflow.py
+++ b/classificationWorkflow.py
@@ -128,6 +128,174 @@ def baseClassifyCmd(shp,stat_file,model_file,code,flds,out_file,compute_confiden
     else:
         sys.exit('Platform not supported!')
 
+def baseDeepTraining(shp,code,flds, params, model_file, out_scaler, csv_classes,epochs=20):
+    import geopandas
+    import tensorflow as tf
+    from sklearn.preprocessing import StandardScaler
+    import joblib
+    import csv
+
+    #Read training shp with geopandas
+    if type(shp)==str :
+        ds = geopandas.read_file(shp)
+    elif type(shp)==list:
+        ds = geopandas.read_file(shp[0])
+        for s in shp[1:]:
+            ds = ds.append(geopandas.read_file(s))
+    #Extract feats and 
+    feats = ds.truncate(before=flds[0],axis="columns").truncate(after=flds[-1],axis="columns")
+    targets = ds[code]
+    nb_class = len(targets.unique())
+    classes = np.array(sorted(targets.unique()))
+    nb_class = len(targets.unique())
+    ctargets = np.array([np.where(classes==i)[0][0] for i in targets])
+
+    scaler = StandardScaler()
+    scaler.fit(feats)
+    cfeats = scaler.transform(feats)
+
+    if params[1] == 'standard':
+        # Model init
+        model = tf.keras.models.Sequential()
+
+        # Add the layers
+        model.add(tf.keras.layers.Dense(len(flds), activation="relu"))
+        model.add(tf.keras.layers.Dense(len(flds)/2, activation="relu"))
+        model.add(tf.keras.layers.Dense(len(flds)/4, activation="relu"))
+        model.add(tf.keras.layers.Dense(nb_class, activation="softmax"))
+
+        model.compile(loss="sparse_categorical_crossentropy",
+            optimizer="sgd",
+            metrics=["accuracy"])
+
+    history = model.fit(cfeats, ctargets, epochs=epochs)
+    model.save(model_file)
+    joblib.dump(scaler,out_scaler)
+    with open(csv_classes,'w') as file:
+        writer = csv.writer(file)
+        for i,c in enumerate(classes):
+            writer.writerow([i,c])
+
+
+def baseDeepClassify(shp, model_file, code,flds, out_file, compute_confidence=False):
+    import geopandas
+    import tensorflow as tf
+    from sklearn.preprocessing import StandardScaler
+    import joblib
+    import csv
+
+    scaler_file = os.path.join(os.path.dirname(model_file),os.path.basename(model_file).replace(code,code + '_scaler').replace('.h5','.joblib'))
+    csv_classes = os.path.join(os.path.dirname(model_file),os.path.basename(model_file).replace(code,code + '_class').replace('.h5','.csv'))
+
+    ds = geopandas.read_file(shp)
+    feats = ds.truncate(before=flds[0],axis='columns').truncate(after=flds[-1],axis='columns')
+
+    scaler = joblib.load(scaler_file)
+    cfeats = scaler.transform(feats)
+
+    model=tf.keras.models.load_model(model_file)
+    predict = model.predict(cfeats)
+
+    dict_classes = {}
+    with open(csv_classes,'r') as file:
+        rdr = csv.reader(file)
+        for row in rdr:
+            dict_classes[int(row[0])]=int(row[1])
+    class_predict = [dict_classes[np.where(p==np.max(p))[0][0]] for p in predict]
+    ds.insert(ds.shape[1]-1,'p'+code,class_predict)
+    if compute_confidence:
+        confmap = [np.max(p) for p in predict]
+        ds.insert(ds.shape[1]-1,'conf_'+code,confmap)
+    if out_file is None:
+        out_file = shp
+    ds.to_file(out_file)
+
+def deepTraining(shp,code,model_fld,params,feat,feat_mode = 'list', epochs=20):
+    import geopandas
+    import tensorflow as tf
+    from sklearn.preprocessing import StandardScaler
+    import joblib
+    import csv
+
+    if params[1] == 'standard':
+        ext = "tfDense"
+
+    model_file = os.path.join(model_fld, ext + '_' + code + '.h5')
+    scaler_file = os.path.join(model_fld, ext + '_scaler_' + code + '.joblib')
+    csv_classes = os.path.join(model_fld, ext + '_class_'+ code + '.csv')
+
+    #Read training shp with geopandas
+    ds = geopandas.read_file(shp)
+    #Extract feats and 
+    feats = ds.truncate(before=feat[0],axis="columns").truncate(after=feat[-1],axis="columns")
+    targets = ds[code]
+    nb_class = len(targets.unique())
+    classes = np.array(sorted(targets.unique()))
+    nb_class = len(targets.unique())
+    ctargets = np.array([np.where(classes==i)[0][0] for i in targets])
+
+    scaler = StandardScaler()
+    print (feats)
+    scaler.fit(feats)
+    cfeats = scaler.transform(feats)
+
+    if params[1] == 'standard':
+        # Model init
+        model = tf.keras.models.Sequential()
+
+        # Add the layers
+        model.add(tf.keras.layers.Dense(len(feat), activation="relu"))
+        model.add(tf.keras.layers.Dense(len(feat)/2, activation="relu"))
+        model.add(tf.keras.layers.Dense(len(feat)/4, activation="relu"))
+        model.add(tf.keras.layers.Dense(nb_class, activation="softmax"))
+
+        model.compile(loss="sparse_categorical_crossentropy",
+            optimizer="sgd",
+            metrics=["accuracy"])
+
+    history = model.fit(cfeats, ctargets, epochs=epochs)
+    model.save(model_file)
+    joblib.dump(scaler,scaler_file)
+    with open(csv_classes,'w') as file :
+        writer = csv.writer(file)
+        for i,c in enumerate(classes):
+            writer.writerow([i,c])
+
+    return model_file, scaler_file, csv_classes
+
+def deepClassify(shp_list,code,scaler,model_file, csv_classes,out_fld,out_ext,feat,feat_mode = 'list',Nproc=1,compute_confidence=False):
+    import geopandas
+    import tensorflow as tf
+    from sklearn.preprocessing import StandardScaler
+    import joblib
+    import csv
+    
+    scaler = joblib.load(scaler)
+    model = tf.keras.models.load_model(model_file)
+    dict_classes = {}
+    with open(csv_classes,'r') as file:
+        rdr = csv.reader(file)
+        for row in rdr:
+            dict_classes[int(row[0])]=int(row[1])
+
+    out_file_list = []
+    for shp in shp_list:
+        out_file = os.path.join(out_fld, os.path.basename(shp).replace('.shp', out_ext + '.shp'))
+        ds = geopandas.read_file(shp)
+        feats = ds.truncate(before=feat[0],axis='columns').truncate(after=feat[-1],axis='columns')
+
+        cfeats = scaler.transform(feats)
+        predict = model.predict(cfeats)
+        class_predict = [dict_classes[np.where(p==np.max(p))[0][0]] for p in predict]
+        out = ds.filter(items=['Segment_ID',code[1:],'geometry'])
+        out.insert(out.shape[1]-1,code,class_predict)
+        if compute_confidence:
+            confmap = [np.max(p) for p in predict]
+            out.insert(out.shape[1]-1,'confidence',confmap)
+        out.to_file(out_file)
+        out_file_list.append(out_file)
+    return out_file_list
+
 def training(shp,code,model_fld,params,feat,feat_mode = 'list'):
 
     # Platform dependent parameters
@@ -383,12 +551,13 @@ def Htraining(shp,code_list,model_fld,params,feat,feat_mode = 'list'):
 
     if '-classifier' in params:
         classifier = params[params.index('-classifier') + 1]
+    elif '-tensorflow' in params:
+        classifier = 'tfDense'
     else:
         classifier = 'libsvm'
     h_model_fld = model_fld + '/' + 'H-MODEL_' + '_'.join(code_list)
     if not os.path.exists(h_model_fld):
         os.mkdir(h_model_fld)
-    stat_file = model_fld + '/GT_stats.xml'
 
     if feat_mode == 'prefix':
         flds = getFeaturesFields(shp, feat)
@@ -397,40 +566,65 @@ def Htraining(shp,code_list,model_fld,params,feat,feat_mode = 'list'):
     else:
         sys.exit('ERROR: mode ' + feat_mode + ' not valid.')
 
-    # Statistics unskew
-    if platform.system() == 'Linux':
-        cmd = ['otbcli_ComputeVectorFeaturesStatistics','-io.vd',shp,'-io.stats',stat_file,'-feat'] + flds
-        subprocess.call(cmd,shell=sh)
-    elif platform.system() == 'Windows':
-        import otbApplication
-        app = otbApplication.Registry.CreateApplication('ComputeVectorFeaturesStatistics')
-        app.SetParameterStringList('io.vd', [shp])
-        app.SetParameterString('io.stats', stat_file)
-        app.UpdateParameters()
-        app.SetParameterStringList('feat',flds)
-        app.ExecuteAndWriteOutput()
-    else:
-        sys.exit("Platform not supported!")
+    if classifier != 'tfDense':
+        stat_file = model_fld + '/GT_stats.xml'
+        # Statistics unskew
+        if platform.system() == 'Linux':
+            cmd = ['otbcli_ComputeVectorFeaturesStatistics','-io.vd',shp,'-io.stats',stat_file,'-feat'] + flds
+            subprocess.call(cmd,shell=sh)
+        elif platform.system() == 'Windows':
+            import otbApplication
+            app = otbApplication.Registry.CreateApplication('ComputeVectorFeaturesStatistics')
+            app.SetParameterStringList('io.vd', [shp])
+            app.SetParameterString('io.stats', stat_file)
+            app.UpdateParameters()
+            app.SetParameterStringList('feat',flds)
+            app.ExecuteAndWriteOutput()
+        else:
+            sys.exit("Platform not supported!")
+
+        # Computes classes hierarchy in a dictionnary
+        h_dict, ds_list = retrieveClassHierarchy(shp,code_list)
+        # Create models for each level
+        with open(h_model_fld + '/h-model.csv',mode='w') as h_model_file:
+            writer = csv.writer(h_model_file)
+            level = 'ROOT'
+            last = list(h_dict.keys())[-1]
+            for code in list(h_dict.keys()):
+                Nsb = len(h_dict[code][0])
+                for i in range(Nsb):
+                    model_file = h_model_fld + '/' + classifier + '_' + code + '_' + str(h_dict[code][0][i]) + '.model'
+                    confmat_file = h_model_fld + '/' + classifier + '_' + code + '_' + str(h_dict[code][0][i]) + '.confmat.txt'
+                    baseTrainingCmd(h_dict[code][1][i],stat_file,code,feat,params,model_file,confmat_file)
+                    writer.writerow([level,str(h_dict[code][0][i]),model_file,code,str(code!=last)])
+                level = code
 
-    # Computes classes hierarchy in a dictionnary
-    h_dict, ds_list = retrieveClassHierarchy(shp,code_list)
-    # Create models for each level
-    with open(h_model_fld + '/h-model.csv',mode='w') as h_model_file:
-        writer = csv.writer(h_model_file)
-        level = 'ROOT'
-        last = list(h_dict.keys())[-1]
-        for code in list(h_dict.keys()):
-            Nsb = len(h_dict[code][0])
-            for i in range(Nsb):
-                model_file = h_model_fld + '/' + classifier + '_' + code + '_' + str(h_dict[code][0][i]) + '.model'
-                confmat_file = h_model_fld + '/' + classifier + '_' + code + '_' + str(h_dict[code][0][i]) + '.confmat.txt'
-                baseTrainingCmd(h_dict[code][1][i],stat_file,code,feat,params,model_file,confmat_file)
-                writer.writerow([level,str(h_dict[code][0][i]),model_file,code,str(code!=last)])
-            level = code
-
-    drv = ogr.GetDriverByName('ESRI Shapefile')
-    for fn in ds_list:
-        drv.DeleteDataSource(fn)
+        drv = ogr.GetDriverByName('ESRI Shapefile')
+        for fn in ds_list:
+            drv.DeleteDataSource(fn)
+
+    elif classifier == 'tfDense':
+        stat_file = "scaler"
+        # Computes classes hierarchy in a dictionnary
+        h_dict, ds_list = retrieveClassHierarchy(shp,code_list)
+        # Create models for each level
+        with open(h_model_fld + '/h-model.csv',mode='w') as h_model_file:
+            writer = csv.writer(h_model_file)
+            level = 'ROOT'
+            last = list(h_dict.keys())[-1]
+            for code in list(h_dict.keys()):
+                Nsb = len(h_dict[code][0])
+                for i in range(Nsb):
+                    model_file = h_model_fld + '/' + classifier + '_' + code + '_' + str(h_dict[code][0][i]) + '.h5'
+                    scaler = h_model_fld + '/' + classifier + '_' + code + '_scaler_' + str(h_dict[code][0][i]) + '.joblib'
+                    csv_classes = h_model_fld + '/' + classifier + '_' + code + '_class_' + str(h_dict[code][0][i]) + '.csv'
+                    baseDeepTraining(h_dict[code][1][i],code,feat,params,model_file,scaler,csv_classes)
+                    writer.writerow([level,str(h_dict[code][0][i]),model_file,code,str(code!=last)])
+                level = code
+
+        drv = ogr.GetDriverByName('ESRI Shapefile')
+        for fn in ds_list:
+            drv.DeleteDataSource(fn)
 
     return stat_file, h_model_fld
 
@@ -489,15 +683,25 @@ def Hclassify(shp_list,stat_file,h_model_fld,feat,out_fld,out_ext,feat_mode = 'l
                 if os.path.exists(in_shp):
                     out_shp = in_shp.replace('.shp','_ROOT.shp') if row[0] == 'ROOT' else None
                     split_shp = out_shp if row[0] == 'ROOT' else in_shp
-                    # Classification attempt with only one class raised an error,
-                    # if so, apply the value class in consequence
-                    with open(row[2]) as model:
-                        lines = model.readlines()
-                        to_classify = int(lines[1].split(' ')[0]) != 1
-                    if to_classify :
-                        baseClassifyCmd(in_shp,stat_file,row[2],'p'+row[3],flds,out_shp)
-                    else :
-                        addField(in_shp,'p'+row[3],int(lines[1].split(' ')[1]))
+                    if stat_file != 'scaler':
+                        # Classification attempt with only one class raised an error,
+                        # if so, apply the value class in consequence
+                        with open(row[2]) as model:
+                            lines = model.readlines()
+                            to_classify = int(lines[1].split(' ')[0]) != 1
+                        if to_classify :
+                            baseClassifyCmd(in_shp,stat_file,row[2],'p'+row[3],flds,out_shp)
+                        else :
+                            addField(in_shp,'p'+row[3],int(lines[1].split(' ')[1]))
+                    else:
+                        with open(os.path.join(os.path.dirname(row[2]),os.path.basename(row[2]).replace(row[3],row[3] + '_class').replace('.h5','.csv'))) as file :
+                            rdr = csv.reader(file)
+                            data = list(rdr)
+                            to_classify = True if len(data) != 1 else int(data[0][1]) 
+                        if to_classify is True:
+                            baseDeepClassify(in_shp,row[2],row[3],flds,out_shp)
+                        else :
+                            addField(in_shp,'p'+row[3],to_classify)
                     if out_shp is not None:
                         toDelete.append(out_shp)
                     #if there is a more detailed level than the current one, split the current 
diff --git a/launchChain.py b/launchChain.py
index 3511c638b19d83066fca85bde8f16720681254aa..7b7ddc7c1964e50e37b1e28dd5a822b5435d46dc 100644
--- a/launchChain.py
+++ b/launchChain.py
@@ -14,7 +14,7 @@ import csv
 from math import floor,log10
 from computeFeatures import featureComputation, readConfigFile
 from segmentationWorkflow import segmentationWorkflow, generateGTSamples, generateVALSamples
-from classificationWorkflow import training, classify
+from classificationWorkflow import training, classify, deepTraining, deepClassify
 from mtdUtils import checkSRS, getRasterInfo, getFieldNames, keepFields, queuedProcess
 from validationFramework import pixelValidation,surfaceValidation,formatValidationTxt, genKFolds, kFoldRefToSamples, kFoldReport, getVariableImportance
 import time
@@ -670,8 +670,7 @@ def main(argv):
     rfimpnodesize = int(config.get('TRAINING CONFIGURATION', 'rfimpnodesize'))
     rfimpmaxfeat = int(config.get('TRAINING CONFIGURATION', 'rfimpmaxfeat'))
     rfimpnruns = int(config.get('TRAINING CONFIGURATION', 'rfimpnruns'))
-    hierarchical_classif = config.get('TRAINING CONFIGURATION','hierarchicalclassif')
-
+    hierarchical_classif = config.get('TRAINING CONFIGURATION','hierarchicalclassif') == 'YES'
     model_fld = output_fld + '/MODEL_' + setup_name
     if not os.path.exists(model_fld):
         os.mkdir(model_fld)
@@ -706,14 +705,23 @@ def main(argv):
                         if not os.path.exists(mfld):
                             os.mkdir(mfld)
 
-                        sfn, mfn = training(kfold_train_samples[i], cfield, mfld, params, var_list)
-                        val_list = [kfold_test_samples[i]]
-                        val_out = mfld + '/' + os.path.basename(kfold_test_samples[i]).replace('.shp','_' + cfield + '.shp')
-                        val_out_check = mfld + '/' + os.path.basename(kfold_test_samples[i]).replace('.shp','_' + cfield + '_check.shp')
-                        val_out_tmp_list = classify(val_list, 'p' + cfield, sfn, mfn, mfld,
-                                                    '_' + cfield + '_tmp', var_list)
-                        keepFields(val_out_tmp_list[0], val_out, ['Segment_ID', cfield, 'p' + cfield, 'confidence'])
-                        shpd.DeleteDataSource(val_out_tmp_list[0])
+                        if '-classifier' in params:
+                            sfn, mfn = training(kfold_train_samples[i], cfield, mfld, params, var_list)
+                            val_list = [kfold_test_samples[i]]
+                            val_out = mfld + '/' + os.path.basename(kfold_test_samples[i]).replace('.shp','_' + cfield + '.shp')
+                            val_out_check = mfld + '/' + os.path.basename(kfold_test_samples[i]).replace('.shp','_' + cfield + '_check.shp')
+                            val_out_tmp_list = classify(val_list, 'p' + cfield, sfn, mfn, mfld,
+                                                        '_' + cfield + '_tmp', var_list)
+                            keepFields(val_out_tmp_list[0], val_out, ['Segment_ID', cfield, 'p' + cfield, 'confidence'])
+                            shpd.DeleteDataSource(val_out_tmp_list[0])
+                        elif '-tensorflow' in params:
+                            mfn, scaler, csv_classes = deepTraining(kfold_train_samples[i], cfield, mfld, params, var_list)
+                            val_list = [kfold_test_samples[i]]
+                            val_out = mfld + '/' + os.path.basename(kfold_test_samples[i]).replace('.shp','_' + cfield + '.shp')
+                            val_out_check = mfld + '/' + os.path.basename(kfold_test_samples[i]).replace('.shp','_' + cfield + '_check.shp')
+                            val_out_list = deepClassify(val_list, 'p' + cfield, scaler, mfn, csv_classes, mfld,
+                                                        '_' + cfield, var_list)
+                            os.rename(val_out_list[0], val_out)
                         txt_out = mfld + '/' + os.path.basename(kfold_test_samples[i]).replace('.shp','_' + cfield + '_report.txt')
                         classes, cm, acc, kappa, prf = surfaceValidation(test_folds[i], val_out, val_out_check, cfield)
                         formatValidationTxt(classes, cm, acc, kappa, prf, txt_out)
@@ -728,7 +736,10 @@ def main(argv):
                     kFoldReport(fscores,accuracies,kappas,model_fld + '/kFold_report_' + cfield + '.txt')
 
             for cfield in cfieldlist:
-                training(samples_fld + '/GT_samples.shp',cfield,model_fld,params,var_list)
+                if params[0] == "-classifier":
+                    training(samples_fld + '/GT_samples.shp',cfield,model_fld,params,var_list)
+                elif params[0] == '-tensorflow':
+                    deepTraining(samples_fld + '/GT_samples.shp',cfield,model_fld,params,var_list)
 
             if rfimp == True:
                 for cfield in cfieldlist:
@@ -810,76 +821,113 @@ def main(argv):
 
     if '-classifier' in params:
         classifier = params[params.index('-classifier') + 1]
+    elif '-tensorflow' in params:
+        classifier = 'tfDense'
     else:
         classifier = 'libsvm'
-    stat_file = model_fld + '/GT_stats.xml'
+    stat_file = model_fld + '/GT_stats.xml' if '-tensorflow' not in params else 'scaler'
 
     shpd = ogr.GetDriverByName('ESRI Shapefile')
 
     if input_runlevel < 7:
         if hierarchical_classif is False :
             for cfield in cfieldlist:
-                model_file = model_fld + '/' + classifier + '_' + cfield + '.model'
-                if not os.path.exists(model_file):
-                    warnings.warn('Error: Model file ' + model_file + ' not found. Skipping.')
-                    continue
-
-                # Validation step
-                if ch_mode == 0 or ch_mode == 2:
-                    ref_shp = config.get('GENERAL CONFIGURATION', 'validation')
-                    val_mode = int(config.get('GENERAL CONFIGURATION', 'validmode'))
-                    val_list = [val_fld + '/VAL_samples.shp']
-                    val_out = final_fld + '/VAL_samples_' + cfield + '.shp'
-                    val_out_check = final_fld + '/VAL_samples_' + cfield + '_check.shp'
-                    val_out_tmp_list = classify(val_list, 'p'+cfield, stat_file, model_file, final_fld, '_' + cfield + '_tmp', var_list, compute_confidence=comp_conf)
-                    keepFields(val_out_tmp_list[0],val_out,['Segment_ID',cfield,'p'+cfield,'confidence'])
-                    shpd.DeleteDataSource(val_out_tmp_list[0])
-                    txt_out = final_fld + '/VAL_samples.' + cfield + '.report.txt'
-                    if val_mode == 0:
-                        pixelValidation(ref_shp, val_out, seg_output, txt_out, cfield)
-                    elif val_mode == 1:
-                        classes,cm,acc,kappa,prf = surfaceValidation(ref_shp, val_out, val_out_check, cfield)
-                        formatValidationTxt(classes, cm, acc, kappa, prf, txt_out)
-
-                # Classification and map production steps
-                shp_list = glob.glob(test_fld + '/segmentation_*.shp')
-                if ch_mode > 0 or (ch_mode < 0 and len(shp_list) > 0):
-                    if not os.path.exists(final_fld + '/MAPS'):
-                        os.mkdir(final_fld + '/MAPS')
-                    if not os.path.exists(final_fld + '/MAPS/VECTOR_' + cfield):
-                        os.mkdir(final_fld + '/MAPS/VECTOR_' + cfield)
-                    map_list = []
-                    ref_list = []
-
-                    for cshp in shp_list:
-                        ref_list.append(cshp.replace('.shp', '.tif'))
-
-                    map_tmp_list = classify(shp_list,'p'+cfield,stat_file,model_file,final_fld + '/MAPS/VECTOR_' + cfield + '/','_' + cfield + '_tmp',var_list,Nproc=N_proc,compute_confidence=comp_conf)
-
-                    for cshp in map_tmp_list:
-                        map_out = cshp.replace('_tmp.shp','.shp')
-                        keepFields(cshp,map_out,['Segment_ID','p'+cfield,'confidence'])
-                        shpd.DeleteDataSource(cshp)
-                        map_list.append(map_out)
-
-                    if rasout == 'VRT':
-                        if not os.path.exists(final_fld + '/MAPS/RASTER_' + cfield):
-                            os.mkdir(final_fld + '/MAPS/RASTER_' + cfield)
-                        ras_list = []
-                        cmd_list = []
-                        for map,ref in zip(map_list,ref_list):
-                            ras_list.append(final_fld + '/MAPS/RASTER_' + cfield + '/' + os.path.basename(map).replace('.shp', '.tif'))
-                            cmd = ['otbcli_Rasterization', '-in', map, '-im', ref, '-mode', 'attribute', '-mode.attribute.field', 'p'+cfield, '-out', ras_list[-1]]
-                            cmd_list.append(cmd)
-                        queuedProcess(cmd_list,N_processes=N_proc,shell=sh)
+                if '-tensorflow' not in params:
+                    model_file = os.apth.join(model_fld, classifier+ '_' + cfield + '.model')
+                    if not os.path.exists(model_file):
+                        warnings.warn('Error: Model file ' + model_file + ' not found. Skipping.')
+                        continue
+
+                    # Validation step
+                    if ch_mode == 0 or ch_mode == 2:
+                        ref_shp = config.get('GENERAL CONFIGURATION', 'validation')
+                        val_mode = int(config.get('GENERAL CONFIGURATION', 'validmode'))
+                        val_list = [val_fld + '/VAL_samples.shp']
+                        val_out = final_fld + '/VAL_samples_' + cfield + '.shp'
+                        val_out_check = final_fld + '/VAL_samples_' + cfield + '_check.shp'
+                        val_out_tmp_list = classify(val_list, 'p'+cfield, stat_file, model_file, final_fld, '_' + cfield + '_tmp', var_list, compute_confidence=comp_conf)
+                        keepFields(val_out_tmp_list[0],val_out,['Segment_ID',cfield,'p'+cfield,'confidence'])
+                        shpd.DeleteDataSource(val_out_tmp_list[0])
+                        txt_out = final_fld + '/VAL_samples.' + cfield + '.report.txt'
+                        if val_mode == 0:
+                            pixelValidation(ref_shp, val_out, seg_output, txt_out, cfield)
+                        elif val_mode == 1:
+                            classes,cm,acc,kappa,prf = surfaceValidation(ref_shp, val_out, val_out_check, cfield)
+                            formatValidationTxt(classes, cm, acc, kappa, prf, txt_out)
+
+                    # Classification and map production steps
+                    shp_list = glob.glob(test_fld + '/segmentation_*.shp')
+                    if ch_mode > 0 or (ch_mode < 0 and len(shp_list) > 0):
+                        if not os.path.exists(final_fld + '/MAPS'):
+                            os.mkdir(final_fld + '/MAPS')
+                        if not os.path.exists(final_fld + '/MAPS/VECTOR_' + cfield):
+                            os.mkdir(final_fld + '/MAPS/VECTOR_' + cfield)
+                        map_list = []
+                        ref_list = []
+
+                        for cshp in shp_list:
+                            ref_list.append(cshp.replace('.shp', '.tif'))
+
+                        map_tmp_list = classify(shp_list,'p'+cfield,stat_file,model_file,final_fld + '/MAPS/VECTOR_' + cfield + '/','_' + cfield + '_tmp',var_list,Nproc=N_proc,compute_confidence=comp_conf)
+
+                        for cshp in map_tmp_list:
+                            map_out = cshp.replace('_tmp.shp','.shp')
+                            keepFields(cshp,map_out,['Segment_ID','p'+cfield,'confidence'])
+                            shpd.DeleteDataSource(cshp)
+                            map_list.append(map_out)
+                else :
+                    model_file = os.path.join(model_fld, classifier + '_' + cfield + '.h5')
+                    scaler_file = os.path.join(model_fld, classifier + '_scaler_' + cfield + '.joblib')
+                    csv_classes = os.path.join(model_fld, classifier + '_class_'+ cfield +'.csv')
+                    
+                    # Validation step
+                    if ch_mode == 0 or ch_mode == 2:
+                        ref_shp = config.get('GENERAL CONFIGURATION', 'validation')
+                        val_mode = int(config.get('GENERAL CONFIGURATION', 'validmode'))
+                        val_list = [val_fld + '/VAL_samples.shp']
+                        val_out = final_fld + '/VAL_samples_' + cfield + '.shp'
+                        val_out_check = final_fld + '/VAL_samples_' + cfield + '_check.shp'
+                        val_out_list = deepClassify(val_list, 'p'+cfield, scaler_file, model_file, csv_classes, final_fld, '_' + cfield, var_list, compute_confidence=comp_conf)
+                        os.rename(val_out_list[0], val_out)
+                        txt_out = final_fld + '/VAL_samples.' + cfield + '.report.txt'
+                        if val_mode == 0:
+                            pixelValidation(ref_shp, val_out, seg_output, txt_out, cfield)
+                        elif val_mode == 1:
+                            classes,cm,acc,kappa,prf = surfaceValidation(ref_shp, val_out, val_out_check, cfield)
+                            formatValidationTxt(classes, cm, acc, kappa, prf, txt_out)
+
+                    # Classification and map production steps
+                    shp_list = glob.glob(test_fld + '/segmentation_*.shp')
+                    if ch_mode > 0 or (ch_mode < 0 and len(shp_list) > 0):
+                        if not os.path.exists(final_fld + '/MAPS'):
+                            os.mkdir(final_fld + '/MAPS')
+                        if not os.path.exists(final_fld + '/MAPS/VECTOR_' + cfield):
+                            os.mkdir(final_fld + '/MAPS/VECTOR_' + cfield)
+                        ref_list = []
+
+                        for cshp in shp_list:
+                            ref_list.append(cshp.replace('.shp', '.tif'))
+
+                        map_list = deepClassify(shp_list,'p'+cfield,scaler_file,model_file,csv_classes,final_fld + '/MAPS/VECTOR_' + cfield + '/','_' + cfield,var_list,Nproc=N_proc,compute_confidence=comp_conf)
 
-                        cmd = ['gdalbuildvrt', '-srcnodata', '0', '-vrtnodata', '0', final_fld + '/MAPS/RASTER_' + cfield + '/Classif_' + cfield + '.vrt'] + ras_list
-                        subprocess.call(cmd, shell=sh)
+                if rasout == 'VRT':
+                    if not os.path.exists(final_fld + '/MAPS/RASTER_' + cfield):
+                        os.mkdir(final_fld + '/MAPS/RASTER_' + cfield)
+                    ras_list = []
+                    cmd_list = []
+                    for map,ref in zip(map_list,ref_list):
+                        ras_list.append(final_fld + '/MAPS/RASTER_' + cfield + '/' + os.path.basename(map).replace('.shp', '.tif'))
+                        cmd = ['otbcli_Rasterization', '-in', map, '-im', ref, '-mode', 'attribute', '-mode.attribute.field', 'p'+cfield, '-out', ras_list[-1]]
+                        cmd_list.append(cmd)
+                    queuedProcess(cmd_list,N_processes=N_proc,shell=sh)
+
+                    cmd = ['gdalbuildvrt', '-srcnodata', '0', '-vrtnodata', '0', final_fld + '/MAPS/RASTER_' + cfield + '/Classif_' + cfield + '.vrt'] + ras_list
+                    subprocess.call(cmd, shell=sh)
 
         else :
             from classificationWorkflow import Hclassify
             h_model_fld = os.path.join(model_fld,'H-MODEL_' + '_'.join(cfieldlist))
-            if len(glob.glob(os.path.join(h_model_fld,'*.model'))) == 0 :
+            if len(glob.glob(os.path.join(h_model_fld,'*.h5'))) == 0 :
                 sys.exit('Error: There is no model file in ' + h_model_fld + ' folder. Skipping.')
 
             # Validation step