diff --git a/Learning/ObjectBased.py b/Learning/ObjectBased.py
index 93effce4b23119aa2320ea42a11d115249a340ee..3d23e444f615cfd3f50cd104ee922651172c7863 100644
--- a/Learning/ObjectBased.py
+++ b/Learning/ObjectBased.py
@@ -9,17 +9,18 @@ from sklearn.ensemble import RandomForestClassifier
 from sklearn.metrics import confusion_matrix, accuracy_score, cohen_kappa_score, precision_recall_fscore_support
 
 class ObjectBasedClassifier:
-    def __init__(self, object_layer, reference_data, time_series_list, user_feature_list, ref_class_field='class',
-                 ref_id_field='id'):
+    def __init__(self, object_layer, time_series_list, user_feature_list,
+                 reference_data=None, ref_class_field='class', ref_id_field='id'):
         self.obia_base = OBIABase(object_layer, ref_data=reference_data, ref_class_field=ref_class_field,
                                   ref_id_field=ref_id_field)
         for lst in time_series_list:
             self.obia_base.add_raster_time_series_for_stats(lst)
         for ras in user_feature_list:
             self.obia_base.add_raster_for_stats(ras)
-        self.obia_base.populate_ref_db()
-        self.training_base = self.obia_base.get_reference_db_as_training_base(class_field=ref_class_field)
-        self.training_base['folds'] = []
+        if reference_data is not None:
+            self.obia_base.populate_ref_db()
+            self.training_base = self.obia_base.get_reference_db_as_training_base(class_field=ref_class_field)
+            self.training_base['folds'] = []
         return
 
     def gen_k_folds(self, k, class_field='class'):
@@ -96,11 +97,10 @@ class ObjectBasedClassifier:
         }
         return models, summary, results
 
-    def classify(self, model, output_file=None, compress='NONE'):
+    def classify(self, model, perc=None, output_file=None, compress='NONE'):
         prg = tqdm(desc='Classification', total=len(self.obia_base.tiles))
         if isinstance(model, list):
-            for t, L, X in self.obia_base.tiled_data(normalize=[self.training_base['perc2'],
-                                                                self.training_base['perc98']]):
+            for t, L, X in self.obia_base.tiled_data(normalize=perc):
                 prob = []
                 for m in model:
                     prob.append(m.predict_proba(X))
@@ -109,8 +109,7 @@ class ObjectBasedClassifier:
                 self.obia_base.populate_map(t, L, c, output_file, compress)
                 prg.update(1)
         else:
-            for t,L,X in self.obia_base.tiled_data(normalize=[self.training_base['perc2'],
-                                                              self.training_base['perc98']]):
+            for t,L,X in self.obia_base.tiled_data(normalize=perc):
                 c = model.predict(X)
                 self.obia_base.populate_map(t, L, c, output_file, compress)
                 prg.update(1)
diff --git a/Workflows/basic.py b/Workflows/basic.py
index 2fd290506f95550ef004ebbd56ca53fdfc82de6d..e8191481278ddffec8f85156d6e25c89af5dff7a 100644
--- a/Workflows/basic.py
+++ b/Workflows/basic.py
@@ -3,6 +3,7 @@ import json
 import pickle
 from moringa import preprocess_s2, run_segmentation
 from Learning.ObjectBased import ObjectBasedClassifier
+from Postprocessing import Report
 
 def process_timeseries(oroot, d, ts_lst_pkl):
     ts_lst = []
@@ -44,20 +45,58 @@ def train_valid_workflow(seg, ts_lst_pkl, d, m_file):
     with open(ts_lst_pkl, 'rb') as ts_save:
         ts_lst = pickle.load(ts_save)
     obc = ObjectBasedClassifier(seg,
-                                d['ref_db']['path'],
                                 ts_lst,
                                 d['userfeat'],
+                                reference_data=d['ref_db']['path'],
                                 ref_class_field=d['ref_db']['fields'])
 
     obc.gen_k_folds(5, class_field=d['ref_db']['fields'][-1])
 
-    for cf in d['ref_db']['fields']:
+    for i,cf in enumerate(d['ref_db']['fields']):
         if d['training']['classifier'] == 'rf':
             m, s, r = obc.train_RF(d['training']['parameters']['n_trees'], class_field=cf, return_true_vs_pred=True)
-            m_dict = {'model': m, 'results': r, 'summary': s}
-            os.makedirs(m_file[cf], exist_ok=True)
-            with open(m_file[cf], 'wb') as mf:
+            m_dict = {'model': m, 'results': r, 'summary': s,
+                      'perc2':obc.training_base['perc2'], 'perc98':obc.training_base['perc98']}
+            os.makedirs(os.path.dirname(m_file[i]), exist_ok=True)
+            with open(m_file[i], 'wb') as mf:
                 pickle.dump(m_dict, mf)
+    return
+
+def classify(seg, ts_lst_pkl, m_files, d, map_files):
+    assert (os.path.exists(seg))
+    assert (os.path.exists(ts_lst_pkl))
+    for m_file in m_files:
+        assert (os.path.exists(m_file))
+    print('[MORINGA-INFO] : Performing classification')
+    with open(ts_lst_pkl, 'rb') as ts_save:
+        ts_lst = pickle.load(ts_save)
+    obc = ObjectBasedClassifier(seg,
+                                ts_lst,
+                                d['userfeat'])
+    for m_file, map_file in zip(m_files, map_files):
+        with open(m_file, 'rb') as mf:
+            m_dict = pickle.load(mf)
+        obc.classify(m_dict['model'], perc=[m_dict['perc2'], m_dict['perc98']], output_file=map_file)
+    return
+
+def report(map_files, m_files, d, report_files):
+    print('[MORINGA-INFO] : Generating report(s)')
+    for map_file, palette_fn, m_file, report_file in zip(map_files, d['map_output']['palette_files'], m_files, report_files):
+        assert os.path.exists(map_file)
+        assert os.path.exists(m_file)
+        os.makedirs(os.path.splitext(report_file)[0]+'_figures', exist_ok=True)
+        with open(m_file, 'rb') as mf:
+            m_dict = pickle.load(mf)
+        of = Report.generate_report_figures(
+            map_file,
+            palette_fn,
+            m_dict['results'],
+            m_dict['summary'],
+            os.path.splitext(report_file)[0]+'_figures',
+            d['chain_name'])
+        Report.generate_pdf(of, report_file, d['chain_name'])
+    return
+
 def basic(cfg, runlevel=1, single_step=False):
     os.environ['OTB_LOGGER_LEVEL'] = 'CRITICAL'
     with open(cfg,'r') as f:
@@ -84,15 +123,32 @@ def basic(cfg, runlevel=1, single_step=False):
             return
 
     # Training/Validation Workflow
-    m_file = {}
+    m_files = []
     for cf in d['ref_db']['fields']:
-        m_file[cf] = os.path.join(oroot, 'model/model_{}.pkl'.format(cf))
+        m_files.append(os.path.join(oroot, 'model/model_{}.pkl'.format(cf)))
     if step == 3:
-        train_valid_workflow(seg, ts_lst_pkl, d, m_file)
+        train_valid_workflow(seg, ts_lst_pkl, d, m_files)
+        step += 1
+        if single_step:
+            return
+
+    # Classification
+    map_files = []
+    for cf in d['ref_db']['fields']:
+        map_files.append(os.path.join(oroot, 'maps/{}_map_{}.tif'.format(d['chain_name'],cf)))
+    if step == 4:
+        classify(seg, ts_lst_pkl, m_files, d, map_files)
         step += 1
         if single_step:
             return
 
+    # Report
+    report_fn = []
+    for cf in d['ref_db']['fields']:
+        report_fn.append(os.path.join(oroot, 'reports/{}_report_{}.pdf'.format(d['chain_name'],cf)))
+    if step == 5:
+        report(map_files, m_files, d, report_fn)
+
     return