diff --git a/classificationWorkflow.py b/classificationWorkflow.py
index b2898aa2d68acf90ee83951dade9d5e34926eae0..d0b298e29ea696f5ace7f5cc79dcb6e79b0964b5 100644
--- a/classificationWorkflow.py
+++ b/classificationWorkflow.py
@@ -143,7 +143,7 @@ def baseDeepTraining(shp,code,flds, params, model_file, out_scaler, csv_classes,
         for s in shp[1:]:
             ds = ds.append(geopandas.read_file(s))
     #Extract feats and 
-    feats = ds.truncate(before=flds[0],axis="columns").truncate(after=flds[-1],axis="columns")
+    feats = ds.filter(flds,axis=1)
     targets = ds[code]
     nb_class = len(targets.unique())
     classes = np.array(sorted(targets.unique()))
@@ -188,7 +188,7 @@ def baseDeepClassify(shp, model_file, code,flds, out_file, compute_confidence=Fa
     csv_classes = os.path.join(os.path.dirname(model_file),os.path.basename(model_file).replace(code,code + '_class').replace('.h5','.csv'))
 
     ds = geopandas.read_file(shp)
-    feats = ds.truncate(before=flds[0],axis='columns').truncate(after=flds[-1],axis='columns')
+    feats = ds.filter(flds,axis=1)
 
     scaler = joblib.load(scaler_file)
     cfeats = scaler.transform(feats)
@@ -227,7 +227,7 @@ def deepTraining(shp,code,model_fld,params,feat,feat_mode = 'list', epochs=20):
     #Read training shp with geopandas
     ds = geopandas.read_file(shp)
     #Extract feats and 
-    feats = ds.truncate(before=feat[0],axis="columns").truncate(after=feat[-1],axis="columns")
+    feats = ds.filter(feat,axis=1)
     targets = ds[code]
     nb_class = len(targets.unique())
     classes = np.array(sorted(targets.unique()))
@@ -235,7 +235,6 @@ def deepTraining(shp,code,model_fld,params,feat,feat_mode = 'list', epochs=20):
     ctargets = np.array([np.where(classes==i)[0][0] for i in targets])
 
     scaler = StandardScaler()
-    print (feats)
     scaler.fit(feats)
     cfeats = scaler.transform(feats)
 
@@ -282,7 +281,7 @@ def deepClassify(shp_list,code,scaler,model_file, csv_classes,out_fld,out_ext,fe
     for shp in shp_list:
         out_file = os.path.join(out_fld, os.path.basename(shp).replace('.shp', out_ext + '.shp'))
         ds = geopandas.read_file(shp)
-        feats = ds.truncate(before=feat[0],axis='columns').truncate(after=feat[-1],axis='columns')
+        feats = ds.filter(feat,axis=1)
 
         cfeats = scaler.transform(feats)
         predict = model.predict(cfeats)