diff --git a/Learning/ObjectBased.py b/Learning/ObjectBased.py
index f1dd96c4a70a74ad43ebe74ef4af33bd124e0af3..93effce4b23119aa2320ea42a11d115249a340ee 100644
--- a/Learning/ObjectBased.py
+++ b/Learning/ObjectBased.py
@@ -9,12 +9,11 @@ from sklearn.ensemble import RandomForestClassifier
 from sklearn.metrics import confusion_matrix, accuracy_score, cohen_kappa_score, precision_recall_fscore_support
 
 class ObjectBasedClassifier:
-    def __init__(self, object_layer, reference_data, time_series_patterns, user_feature_list, ref_class_field='class',
+    def __init__(self, object_layer, reference_data, time_series_list, user_feature_list, ref_class_field='class',
                  ref_id_field='id'):
         self.obia_base = OBIABase(object_layer, ref_data=reference_data, ref_class_field=ref_class_field,
                                   ref_id_field=ref_id_field)
-        for ptrn in time_series_patterns:
-            lst = sorted(glob.glob(ptrn))
+        for lst in time_series_list:
             self.obia_base.add_raster_time_series_for_stats(lst)
         for ras in user_feature_list:
             self.obia_base.add_raster_for_stats(ras)
@@ -119,17 +118,13 @@ class ObjectBasedClassifier:
 
 #TEST CODE
 def run_test(sample_folder):
+    lst1 = '{}/output/S2_processed/T31PDL/*/*FEAT.tif'.format(sample_folder)
     obc = ObjectBasedClassifier('{}/output/segmentation/segmentation.tif'.format(sample_folder),
                                 '{}/input/REF/ref_l2.shp'.format(sample_folder),
-                                ['{}/output/S2_processed/T31PDL/*/*FEAT.tif'.format(sample_folder)],
+                                [lst1],
                                 ['{}/input/THR/THR_SPOT6.tif'.format(sample_folder)],
                                 ref_class_field=['class', 'Class_L1a'])
-    '''
-    obc = ObjectBasedClassifier('/DATA/Benin/OBSYDYA_data/MORINGA/SEGMENTATION/segmentation.tif',
-                                '/DATA/Benin/OBSYDYA_data/MORINGA/reference/BD_OBSYDYA_2022_ParakouNdali_v0.2.shp',
-                                ['/DATA/Benin/OBSYDYA_data/MORINGA/basefolder/FEAT/S2_THEIA_FEAT/S2_THEIA_MOSAIC_*.tif'],
-                                glob.glob('/DATA/Benin/OBSYDYA_data/MORINGA/ext_features'))
-    '''
+
     obc.gen_k_folds(5, class_field='class')
     m, s, r = obc.train_RF(100, return_true_vs_pred=True)
     obc.classify(m, '{}/output/classification/firstmap_l1.tif'.format(sample_folder))
diff --git a/TimeSeries/s2theia.py b/TimeSeries/s2theia.py
index 119f4bf844b33733c3c33902729a37382f5f09ad..949d25c13b58508879d9b267fe80a86587be3f28 100644
--- a/TimeSeries/s2theia.py
+++ b/TimeSeries/s2theia.py
@@ -806,7 +806,7 @@ class S2TheiaPipeline:
                         t.write_outputs(output_warped, update_pipe=True, flag_nodata=True)
                 t.gapfill(self.output_dates, store_gapfill)
                 stack_name = t.generate_feature_stack(feat_list)
-                out.append(t.write_outputs(out_fld))
+                out.extend(t.write_outputs(out_fld))
                 t.reset()
             if len(self.tiles) > 1 and mosaicking == 'vrt':
                 out_mos = []
diff --git a/Workflows/basic.py b/Workflows/basic.py
new file mode 100644
index 0000000000000000000000000000000000000000..2fd290506f95550ef004ebbd56ca53fdfc82de6d
--- /dev/null
+++ b/Workflows/basic.py
@@ -0,0 +1,102 @@
+import os
+import json
+import pickle
+from moringa import preprocess_s2, run_segmentation
+from Learning.ObjectBased import ObjectBasedClassifier
+
+def process_timeseries(oroot, d, ts_lst_pkl):
+    ts_lst = []
+    for ts in d['timeseries']:
+        print('[MORINGA-INFO] : Preprocessing {} from {}'.format(ts['type'], ts['provider']))
+        if ts['type'] == 's2':
+            ots = os.path.join(oroot, 'timeseries/' + ts['type'] + ts['provider'])
+            os.makedirs(ots, exist_ok=True)
+            ts_lst.append(preprocess_s2(ts['path'],
+                                        ots,
+                                        roi=d['roi'],
+                                        output_dates_file=ts['output_dates_file'],
+                                        provider=ts['provider']))
+        else:
+            raise ValueError('TimeSeries type not yet supported.')
+    with open(ts_lst_pkl, 'wb') as ts_save:
+        pickle.dump(ts_lst, ts_save)
+    return
+
+def perform_segmentation(ofn, d):
+    print('[MORINGA-INFO] : Performing segmentation')
+    os.makedirs(os.path.dirname(ofn), exist_ok=True)
+    run_segmentation(d['segmentation']['src'],
+                     d['segmentation']['th'],
+                     d['segmentation']['cw'],
+                     d['segmentation']['sw'],
+                     ofn,
+                     n_first_iter=d['segmentation']['n_first_iter'],
+                     margin=d['segmentation']['margin'],
+                     roi=d['roi'],
+                     n_proc=d['segmentation']['n_proc'],
+                     light=d['segmentation']['lightmode'])
+    return
+
+def train_valid_workflow(seg, ts_lst_pkl, d, m_file):
+    assert (os.path.exists(seg))
+    assert (os.path.exists(ts_lst_pkl))
+    print('[MORINGA-INFO] : Running Training/Validation Workflow')
+    with open(ts_lst_pkl, 'rb') as ts_save:
+        ts_lst = pickle.load(ts_save)
+    obc = ObjectBasedClassifier(seg,
+                                d['ref_db']['path'],
+                                ts_lst,
+                                d['userfeat'],
+                                ref_class_field=d['ref_db']['fields'])
+
+    obc.gen_k_folds(5, class_field=d['ref_db']['fields'][-1])
+
+    for cf in d['ref_db']['fields']:
+        if d['training']['classifier'] == 'rf':
+            m, s, r = obc.train_RF(d['training']['parameters']['n_trees'], class_field=cf, return_true_vs_pred=True)
+            m_dict = {'model': m, 'results': r, 'summary': s}
+            os.makedirs(m_file[cf], exist_ok=True)
+            with open(m_file[cf], 'wb') as mf:
+                pickle.dump(m_dict, mf)
+def basic(cfg, runlevel=1, single_step=False):
+    os.environ['OTB_LOGGER_LEVEL'] = 'CRITICAL'
+    with open(cfg,'r') as f:
+        d = json.load(f)
+
+    oroot = os.path.join(d['output_path'], d['chain_name'])
+
+    step = runlevel
+
+    # Preprocess timeseries
+    ts_lst_pkl = os.path.join(oroot, 'time_series_list.pkl')
+    if step == 1:
+        process_timeseries(oroot, d, ts_lst_pkl)
+        step += 1
+        if single_step:
+            return
+
+    # Segmentation
+    seg = os.path.join(oroot, 'segmentation/{}_obj_layer.tif'.format(d['chain_name']))
+    if step == 2:
+        perform_segmentation(seg, d)
+        step += 1
+        if single_step:
+            return
+
+    # Training/Validation Workflow
+    m_file = {}
+    for cf in d['ref_db']['fields']:
+        m_file[cf] = os.path.join(oroot, 'model/model_{}.pkl'.format(cf))
+    if step == 3:
+        train_valid_workflow(seg, ts_lst_pkl, d, m_file)
+        step += 1
+        if single_step:
+            return
+
+    return
+
+
+
+
+
+
diff --git a/moringa.py b/moringa.py
index 67ec90c6cb9ca09592c57a0ea7378183255c4bd3..9f6c93fb44b33e47c976cd02bc7b0ef0c5e19461 100755
--- a/moringa.py
+++ b/moringa.py
@@ -6,8 +6,8 @@ import VHR.vhrbase
 from TimeSeries import s2theia, s2planetary, s1base, s1planetary, planet_mosaics
 
 def run_segmentation(img, threshold, cw, sw , out_seg,
-                     n_first_iter, margin, roi, n_proc, memory,
-                     remove_graph, force_parallel, light):
+                     n_first_iter, margin, roi, n_proc, memory=None,
+                     remove_graph=True, force_parallel=False, light=False):
     if not os.path.exists(os.path.dirname(out_seg)):
         os.makedirs(os.path.dirname(out_seg))
     params = OBIA.segmentation.LSGRMParams(threshold, cw, sw, n_first_iter, margin)
@@ -15,10 +15,10 @@ def run_segmentation(img, threshold, cw, sw , out_seg,
         OBIA.segmentation.lsgrm_light(img, params, out_seg, n_proc, memory, roi, force_parallel)
     else:
         OBIA.segmentation.lsgrm(img, params, out_seg, n_proc, memory, roi, remove_graph, force_parallel)
-    return
+    return out_seg
 
 def preprocess_spot67(in_fld, out_fld, dem_fld, geoid_file, skip_ps, compress,
-                      clip, align_to, align_to_band, align_using_band):
+                      clip, align_to=None, align_to_band=3, align_using_band=1):
     sp = VHR.vhrbase.SPOT67RasterPipeline(in_fld)
     sp.to_toa()
     sp.orthorectify(dem_fld, geoid_file)
@@ -77,7 +77,7 @@ def preprocess_s1(in_fld, roi, out_fld, dem_fld=None, geoid=None, direction=None
     s1.compute_features()
     return s1.write_outputs(out_fld)
 
-def fetch(imagery, shp, dt, out_fld, auth):
+def fetch(imagery, shp, dt, out_fld, auth=None):
     assert(imagery in ['s2theia', 's2planetary', 's1grd', 's1rtc', 'planetmosaics'])
     if imagery != 's2planetary' and auth is None:
         raise ValueError("Please provide authentication information.")