diff --git a/Common/geotools.py b/Common/geotools.py
new file mode 100644
index 0000000000000000000000000000000000000000..646c2d2db27795f091f71e784baf0a9defcf8526
--- /dev/null
+++ b/Common/geotools.py
@@ -0,0 +1,23 @@
+from osgeo import gdal
+from osgeo import ogr
+from pyproj import Transformer as T
+
+def get_query_bbox(shp, query_srs=4326, return_all=False):
+    ds = ogr.Open(shp)
+    ly = ds.GetLayer(0)
+    xt = ly.GetExtent()
+    shp_srs = int(ly.GetSpatialRef().GetAuthorityCode(None))
+
+    i_bbox = [xt[0], xt[2], xt[1], xt[3]]
+    if shp_srs == query_srs:
+        bbox = i_bbox
+    else:
+        tr = T.from_crs(shp_srs, query_srs, always_xy=True)
+        lon_min, lat_min = tr.transform(xt[0], xt[2])
+        lon_max, lat_max = tr.transform(xt[1], xt[3])
+        bbox = [lon_min, lat_min, lon_max, lat_max]
+
+    if return_all:
+        return bbox, i_bbox, shp_srs
+    else:
+        return bbox
\ No newline at end of file
diff --git a/Learning/ObjectBased.py b/Learning/ObjectBased.py
index 893455d8c58557e65a5427ddfee5db1fc47d62eb..f1dd96c4a70a74ad43ebe74ef4af33bd124e0af3 100644
--- a/Learning/ObjectBased.py
+++ b/Learning/ObjectBased.py
@@ -9,8 +9,10 @@ from sklearn.ensemble import RandomForestClassifier
 from sklearn.metrics import confusion_matrix, accuracy_score, cohen_kappa_score, precision_recall_fscore_support
 
 class ObjectBasedClassifier:
-    def __init__(self, object_layer, reference_data, time_series_patterns, user_feature_list, ref_class_field='class'):
-        self.obia_base = OBIABase(object_layer, ref_data = reference_data, ref_class_field=ref_class_field)
+    def __init__(self, object_layer, reference_data, time_series_patterns, user_feature_list, ref_class_field='class',
+                 ref_id_field='id'):
+        self.obia_base = OBIABase(object_layer, ref_data=reference_data, ref_class_field=ref_class_field,
+                                  ref_id_field=ref_id_field)
         for ptrn in time_series_patterns:
             lst = sorted(glob.glob(ptrn))
             self.obia_base.add_raster_time_series_for_stats(lst)
diff --git a/OBIA/OBIABase.py b/OBIA/OBIABase.py
index 48d8889d6514d4e2988c69d91741cb1088d13d79..90abe476d58620e9522c903483d6347bccd490c4 100644
--- a/OBIA/OBIABase.py
+++ b/OBIA/OBIABase.py
@@ -174,11 +174,14 @@ class OBIABase:
             tile_obj = np.squeeze(clip_obj['array']).astype(np.uint32)
         else:
             assert (self.ref_obj_layer_pipe is not None)
-            self.ref_obj_layer_pipe[-1].PropagateRequestedRegion('out', r)
-            tile_obj = self.ref_obj_layer_pipe[-1].GetImageAsNumpyArray('out').astype(np.uint32)
-
-            #tile_obj = self.ref_obj_layer[r['index'][1]:r['index'][1]+r['size'][1],
-            #           r['index'][0]:r['index'][0]+r['size'][0]]
+            tmp_er = otb.Registry.CreateApplication('ExtractROI')
+            tmp_er.SetParameterInputImage('in', self.ref_obj_layer_pipe[-1].GetParameterOutputImage('out'))
+            tmp_er.SetParameterInt('startx', r['index'][0])
+            tmp_er.SetParameterInt('starty', r['index'][1])
+            tmp_er.SetParameterInt('sizex', r['size'][0])
+            tmp_er.SetParameterInt('sizey', r['size'][1])
+            tmp_er.Execute()
+            tile_obj = tmp_er.GetImageAsNumpyArray('out').astype(np.uint32)
 
         si = otb.Registry.CreateApplication('Superimpose')
         si.SetParameterString('inm', input_image)
diff --git a/Postprocessing/Report.py b/Postprocessing/Report.py
index ad57dca9f7fa1c4170794fdac7fcb62ec7e2f391..d3b74bf0a319a1b0147f665c008d26479cc2be62 100644
--- a/Postprocessing/Report.py
+++ b/Postprocessing/Report.py
@@ -30,7 +30,7 @@ def parse_colormap_file(fn):
 
     return labels, class_names, colors
 
-def generate_report_figures(map, palette_fn, results, summary, out_dir, map_name=None):
+def generate_report_figures(map, palette_fn, results, summary, out_dir, map_name=None, importance_perc=0.75):
     labels, class_names, colors = parse_colormap_file(palette_fn)
     colors_norm = [(x[0]/255,x[1]/255,x[2]/255,x[3]/255) for x in colors]
     with plt.ioff():
@@ -81,7 +81,7 @@ def generate_report_figures(map, palette_fn, results, summary, out_dir, map_name
         imp_s = [x for _, x in sorted(zip(imp_m, imp_s), reverse=True)]
         imp_m = sorted(imp_m, reverse=True)
         c_imp = np.cumsum(imp_m)
-        idx = np.where(c_imp<0.75*c_imp[-1])[0][-1]
+        idx = np.where(c_imp<importance_perc * c_imp[-1])[0][-1]
         imp_m = imp_m[:idx]
         imp_s = imp_s[:idx]
         imp_n = imp_n[:idx]
diff --git a/TimeSeries/planet_mosaics.py b/TimeSeries/planet_mosaics.py
index 8d0563f6685a7c23b58d53068485691af7c7f843..c7ec32b4de46fe3dd4cca59a89e6389494b04579 100644
--- a/TimeSeries/planet_mosaics.py
+++ b/TimeSeries/planet_mosaics.py
@@ -49,44 +49,46 @@ def fetch(shp, dt, out_fld, api_key):
         }
         res = session.get(API_URL, params=parameters)
         mosaic = res.json()
-        mosaic_id = mosaic['mosaics'][0]['id']
-
-        search_parameters = {
-            'bbox': str_bbox,
-            'minimal': True
-        }
-        quads_url = "{}/{}/quads".format(API_URL, mosaic_id)
-        res = session.get(quads_url, params=search_parameters, stream=True)
-        quads = res.json()
-
-        l_quads = []
-        for q in tqdm(quads['items'], desc='Downloading quads for {}'.format(md)):
-            link = q['_links']['download']
-            l_quads.append(os.path.join(out_fld, q['id'] + '.tif'))
-            if not os.path.exists(l_quads[-1]):
-                urllib.request.urlretrieve(link, l_quads[-1])
-
-        app_mos = otb.Registry.CreateApplication('Mosaic')
-        app_mos.SetParameterStringList('il', l_quads)
-        app_mos.Execute()
-        app_roi = otb.Registry.CreateApplication('ExtractROI')
-        app_roi.SetParameterInputImage('in', app_mos.GetParameterOutputImage('out'))
-        fn = 'PlanetMosaic_{}_{}.tif'.format(os.path.splitext(os.path.basename(shp))[0], md)
-        app_roi.SetParameterString('mode', 'fit')
-        app_roi.SetParameterString('mode.fit.vect', shp)
-        app_roi.SetParameterString('out', os.path.join(out_fld, fn))
-        app_roi.SetParameterOutputImagePixelType('out', otb.ImagePixelType_uint16)
-        app_roi.ExecuteAndWriteOutput()
-
-        [os.remove(f) for f in l_quads]
-
-    return
+        if len(mosaic['mosaics']) > 0:
+            mosaic_id = mosaic['mosaics'][0]['id']
+
+            search_parameters = {
+                'bbox': str_bbox,
+                'minimal': True
+            }
+            quads_url = "{}/{}/quads".format(API_URL, mosaic_id)
+            res = session.get(quads_url, params=search_parameters, stream=True)
+            quads = res.json()
+
+            l_quads = []
+            for q in tqdm(quads['items'], desc='Downloading quads for {}'.format(md)):
+                link = q['_links']['download']
+                l_quads.append(os.path.join(out_fld, q['id'] + '.tif'))
+                if not os.path.exists(l_quads[-1]):
+                    urllib.request.urlretrieve(link, l_quads[-1])
+
+            app_mos = otb.Registry.CreateApplication('Mosaic')
+            app_mos.SetParameterStringList('il', l_quads)
+            app_mos.Execute()
+            app_roi = otb.Registry.CreateApplication('ExtractROI')
+            app_roi.SetParameterInputImage('in', app_mos.GetParameterOutputImage('out'))
+            fn = 'PlanetMosaic_{}_{}.tif'.format(os.path.splitext(os.path.basename(shp))[0], md)
+            app_roi.SetParameterString('mode', 'fit')
+            app_roi.SetParameterString('mode.fit.vect', shp)
+            app_roi.SetParameterString('out', os.path.join(out_fld, fn))
+            app_roi.SetParameterOutputImagePixelType('out', otb.ImagePixelType_uint16)
+            app_roi.ExecuteAndWriteOutput()
+
+            [os.remove(f) for f in l_quads]
+
+    return PlanetMosaicPipeline(out_fld)
 
 class PlanetMosaicPipeline:
     # --- BEGIN SENSOR PROTOTYPE ---
 
     NAME = 'PLANET-MOSAICS'
     REF_TYPE = otb.ImagePixelType_uint16
+    FEAT_TYPE = otb.ImagePixelType_float
     PTRN_name = 'PlanetMosaic_*.tif'
     FEAT_exp = {
         'B': 'im1b1',
@@ -142,14 +144,23 @@ class PlanetMosaicPipeline:
         return
 
     def parse_folder(self, fld):
-        img_list = [os.path.abspath(x) for x in glob.glob(os.path.join(fld, self.PTRN_dir))]
+        img_list = [os.path.abspath(x) for x in glob.glob(os.path.join(fld, self.PTRN_name))]
         return sorted(img_list, key=lambda x: self._img_id(x))
 
+    def append(self, app, fname=None, ftype=None, outp=None, is_output=False):
+        if is_output:
+            self.out_idx.append(len(self.pipe))
+        self.pipe.append(app)
+        self.files.append(fname)
+        self.types.append(ftype)
+        self.out_p.append(outp)
+
     def compute_features(self, feat_list=['B', 'G', 'R', 'NIR', 'NDVI']):
         proc_idx = self.out_idx.copy()
         self.out_idx = []
         for k in range(len(proc_idx)):
             bm = otb.Registry.CreateApplication('BandMathX')
+            bm.AddImageToParameterInputImageList('il', self.pipe[k].GetParameterOutputImage(self.out_p[k]))
             expr = []
             for f in feat_list:
                 expr.append(self.FEAT_exp[f])
@@ -157,7 +168,7 @@ class PlanetMosaicPipeline:
             bm.SetParameterString('exp', expr)
             bm.Execute()
             fn = self.files[proc_idx[k]].replace('.tif', '_FEAT.tif')
-            self.append(bm, fn, self.REF_TYPE, 'out', is_output=True)
+            self.append(bm, fn, self.FEAT_TYPE, 'out', is_output=True)
 
     def write_outputs(self, fld, update_pipe=False, compress=False):
         out = []
diff --git a/TimeSeries/s1base.py b/TimeSeries/s1base.py
index d0cbd0977f20ecc56b809f40338e67e3eecc813f..923b2f0d8eeb741340599eb3ba5d74ed2cd3b0af 100644
--- a/TimeSeries/s1base.py
+++ b/TimeSeries/s1base.py
@@ -7,13 +7,34 @@ import uuid
 import xml.etree.ElementTree as ET
 import shutil
 from itertools import groupby
+from Common.geotools import get_query_bbox
+from eodag import EODataAccessGateway
+
+def fetch(shp, dt, output_fld, credentials):
+    bbox = get_query_bbox(shp)
+    dag = EODataAccessGateway(user_conf_file_path=credentials)
+    dag.set_preferred_provider("peps")
+    search_criteria = {
+        "productType": "S1_SAR_GRD",
+        "start": dt.split("/")[0],
+        "end": dt.split("/")[1],
+        "geom": {"lonmin": bbox[0], "latmin": bbox[1], "lonmax": bbox[2], "latmax": bbox[3]}
+    }
+    res = dag.search_all(**search_criteria)
+    res.filter_property(sensorMode='IW')
+    if len(res) > 0:
+        os.makedirs(output_fld, exist_ok=True)
+        dag.download_all(res, outputs_prefix=output_fld, extract=True)
+
+    return S1GRDPipeline(output_fld)
+
 
 class S1GRDPipeline:
     # --- BEGIN SENSOR PROTOTYPE ---
     NAME = 'S1-IW-GRD'
     VAL_TYPE = otb.ImagePixelType_int16
     TMP_TYPE = otb.ImagePixelType_float
-    PTRN_dir = 'S1*_IW_GRD*.SAFE'
+    PTRN_dir = 'S1*_IW_GRD*/S1*_IW_GRD*.SAFE'
     PTRN_ref = '-iw-grd-'
     VH_name = 'vh'
     PTRN = ['measurement/s1*-iw-grd-vh-*.tiff', 'measurement/s1*-iw-grd-vv-*.tiff']
diff --git a/TimeSeries/s1planetary.py b/TimeSeries/s1planetary.py
index e104c9303c0fcb4567c183525a81c6e229b21ae8..28e04c2685f4fd36684e5ef2a3a85b013e3eb0eb 100644
--- a/TimeSeries/s1planetary.py
+++ b/TimeSeries/s1planetary.py
@@ -1,9 +1,7 @@
 import os.path
-
 from TimeSeries.s1base import *
 import planetary_computer as PC
 from pystac_client import Client
-from osgeo import ogr
 from pyproj import Transformer as T
 from shapely.geometry import Polygon
 import rasterio
@@ -11,24 +9,12 @@ import rasterio.mask
 import tqdm
 import otbApplication as otb
 import json
+from Common.geotools import get_query_bbox
 
 def fetch(shp, dt, output_fld, api_key, direction=None):
 
     os.environ['PC_SDK_SUBSCRIPTION_KEY'] = api_key
-    ds = ogr.Open(shp)
-    ly = ds.GetLayer(0)
-    xt = ly.GetExtent()
-    shp_srs = int(ly.GetSpatialRef().GetAuthorityCode(None))
-    qry_srs = 4326
-
-    i_bbox = [xt[0], xt[2], xt[1], xt[3]]
-    if shp_srs == qry_srs:
-        bbox = i_bbox
-    else:
-        tr = T.from_crs(shp_srs, qry_srs, always_xy=True)
-        lon_min, lat_min = tr.transform(xt[0], xt[2])
-        lon_max, lat_max = tr.transform(xt[1], xt[3])
-        bbox = [lon_min, lat_min, lon_max, lat_max]
+    bbox, i_bbox, shp_srs = get_query_bbox(shp, return_all=True)
 
     api = Client.open('https://planetarycomputer.microsoft.com/api/stac/v1', modifier=PC.sign_inplace)
     res = api.search(collections="sentinel-1-rtc", bbox=bbox, datetime=dt)
diff --git a/TimeSeries/s2planetary.py b/TimeSeries/s2planetary.py
index a0de40366d7655da645181a4762821b2376245fa..928ce5399ea04c0120d91aec304639f6924d6e33 100644
--- a/TimeSeries/s2planetary.py
+++ b/TimeSeries/s2planetary.py
@@ -1,31 +1,16 @@
-import warnings
-
 from TimeSeries.s2sen2cor import *
 import planetary_computer as PC
 from pystac_client import Client
 import rasterio
 import rasterio.mask
-from osgeo import ogr
+from Common.geotools import get_query_bbox
 from pyproj import Transformer as T
 from shapely.geometry import Polygon
 import tqdm
 
 def fetch(shp, dt, output_fld, band_list=None):
 
-    ds = ogr.Open(shp)
-    ly = ds.GetLayer(0)
-    xt = ly.GetExtent()
-    shp_srs = int(ly.GetSpatialRef().GetAuthorityCode(None))
-    qry_srs = 4326
-
-    i_bbox = [xt[0], xt[2], xt[1], xt[3]]
-    if shp_srs == qry_srs:
-        bbox = i_bbox
-    else:
-        tr = T.from_crs(shp_srs,qry_srs,always_xy=True)
-        lon_min, lat_min = tr.transform(xt[0], xt[2])
-        lon_max, lat_max = tr.transform(xt[1], xt[3])
-        bbox = [lon_min, lat_min, lon_max, lat_max]
+    bbox, i_bbox, shp_srs = get_query_bbox(shp, return_all=True)
 
     api = Client.open('https://planetarycomputer.microsoft.com/api/stac/v1', modifier=PC.sign_inplace)
     res = api.search(collections="sentinel-2-l2a", bbox=bbox, datetime=dt)
diff --git a/TimeSeries/s2theia.py b/TimeSeries/s2theia.py
index 39593e2c02b22f55eae467783853a3f5bbdeecc6..bd9ff9f0b2035064dbc3eb296f1fb2426d5ecfce 100644
--- a/TimeSeries/s2theia.py
+++ b/TimeSeries/s2theia.py
@@ -1,11 +1,7 @@
-import sys
 import warnings
-
-from osgeo import gdal,ogr
+from osgeo import gdal
 import otbApplication as otb
 from theia_picker import TheiaCatalog
-from pyproj import Transformer as T
-import tqdm
 
 from Common.otb_numpy_proc import to_otb_pipeline
 import numpy as np
@@ -17,25 +13,11 @@ from osgeo import osr
 import datetime
 import uuid
 import shutil
-
+from Common.geotools import get_query_bbox
 from Common.geometry import get_displacements_to_ref, get_clearest_central_image
 
 def fetch(shp, dt, output_fld, credentials):
-
-    ds = ogr.Open(shp)
-    ly = ds.GetLayer(0)
-    xt = ly.GetExtent()
-    shp_srs = int(ly.GetSpatialRef().GetAuthorityCode(None))
-    qry_srs = 4326
-
-    i_bbox = [xt[0], xt[2], xt[1], xt[3]]
-    if shp_srs == qry_srs:
-        bbox = i_bbox
-    else:
-        tr = T.from_crs(shp_srs,qry_srs,always_xy=True)
-        lon_min, lat_min = tr.transform(xt[0], xt[2])
-        lon_max, lat_max = tr.transform(xt[1], xt[3])
-        bbox = [lon_min, lat_min, lon_max, lat_max]
+    bbox = get_query_bbox(shp)
 
     theia = TheiaCatalog(credentials)
     features = theia.search(
@@ -48,11 +30,8 @@ def fetch(shp, dt, output_fld, credentials):
     lst = ['FRE_B2','FRE_B3','FRE_B4','FRE_B5','FRE_B6','FRE_B7','FRE_B8','FRE_B8A',
            'FRE_B11','FRE_B12','EDG_R1','SAT_R1','CLM_R1']
 
-    #prg = tqdm.tqdm(total=len(features) * 13, desc="Fetching from Theia")
     for f in features:
         f.download_files(matching=lst, download_dir=output_fld)
-        #prg.update()
-    #prg.close()
 
     return S2TheiaPipeline(output_fld)
 
diff --git a/docker/dockerfile b/docker/dockerfile
index 38766075c4abfa6bab8bdef5beba6a8453ce4cad..47c533127a544f19c49781e5ee1ced33e1e492a4 100644
--- a/docker/dockerfile
+++ b/docker/dockerfile
@@ -115,7 +115,8 @@ RUN pip install gdal==3.4.2 \
       planetary_computer \
       theia_picker \
       fpdf2 \
-      planet
+      planet \
+      eodag
 
 RUN mkdir moringav2
 COPY --chown=ubuntu . /home/ubuntu/moringav2
\ No newline at end of file
diff --git a/moringa.py b/moringa.py
index 9b66d04ba7cdfd8d1d0af8516e0b8fd7a5844e1b..e57911a86b5cad493f4cb11fcac7b8e42ce041a7 100755
--- a/moringa.py
+++ b/moringa.py
@@ -3,15 +3,18 @@ import sys
 import argparse
 import OBIA.segmentation
 import VHR.vhrbase
-from TimeSeries import s2theia, s2planetary
+from TimeSeries import s2theia, s2planetary, s1base, s1planetary
 
 def run_segmentation(img, threshold, cw, sw , out_seg,
                      n_first_iter, margin, roi, n_proc, memory,
-                     remove_graph, force_parallel):
+                     remove_graph, force_parallel, light):
     if not os.path.exists(os.path.dirname(out_seg)):
         os.makedirs(os.path.dirname(out_seg))
     params = OBIA.segmentation.LSGRMParams(threshold, cw, sw, n_first_iter, margin)
-    OBIA.segmentation.lsgrm(img, params, out_seg, n_proc, memory, roi, remove_graph, force_parallel)
+    if light:
+        OBIA.segmentation.lsgrm_light(img, params, out_seg, n_proc, memory, roi, force_parallel)
+    else:
+        OBIA.segmentation.lsgrm(img, params, out_seg, n_proc, memory, roi, remove_graph, force_parallel)
     return
 
 def preprocess_spot67(in_fld, out_fld, dem_fld, geoid_file, skip_ps, compress,
@@ -30,8 +33,7 @@ def preprocess_spot67(in_fld, out_fld, dem_fld, geoid_file, skip_ps, compress,
     return
 
 def preprocess_s2(in_fld, out_fld, output_dates_file=None, roi=None,
-                  align=False, align_to=None, align_to_band=3, align_using_band=3,
-                  provider='theia'):
+                  align_to=None, align_to_band=3, align_using_band=3, provider='theia'):
     S2Processor = None
     if provider == 'theia':
         S2Processor = s2theia.S2TheiaPipeline
@@ -48,26 +50,50 @@ def preprocess_s2(in_fld, out_fld, output_dates_file=None, roi=None,
     else:
         raise ValueError("Please provide path to a text file containing output dates.")
 
-    align_flag = align is not None
-    s2.extract_feature_set(out_fld, store_gapfill=True, mosaicking='vrt', align=align_flag, align_to=align_to,
-                           align_to_band=align_to_band, align_using_band=align_using_band, output_aligned=align)
+    align = align_to is not None
+    if (align_to == 'self'):
+        align_to = None
+
+    s2.extract_feature_set(out_fld, store_gapfill=True, mosaicking='vrt', align=align,
+                           align_to=align_to, align_to_band=align_to_band, align_using_band=align_using_band)
+    return
+
+def preprocess_s1(in_fld, roi, out_fld, dem_fld=None, geoid=None, direction=None, satellite=None,
+                  skip_despeckle=False, provider='native'):
+    S1processor = None
+    if provider == 'native':
+        S1Processor = s1base.S1GRDPipeline
+    elif provider == 'planetary':
+        S1Processor = s1planetary.S1RTCPipeline
+    else:
+        raise ValueError("Unsupported/non-valid provider")
+
+    s1 = S1Processor(in_fld, roi, out_fld, direction=direction, satellite=satellite)
+    if provider == 'native':
+        assert(os.path.exists(dem_fld) and os.path.exists(geoid))
+        s1.calibrate()
+        s1.orthorectify(dem_fld, geoid)
+    s1.stitch()
+    if not skip_despeckle:
+        s1.multitemp_speckle_filter()
+    s1.compute_features()
+    s1.write_outputs(out_fld)
     return
 
 def main(args):
     parser = argparse.ArgumentParser(prog="moringa", add_help=False)
     subpar = parser.add_subparsers(dest="cmd")
 
-    prepr = subpar.add_parser("preprocess_s2", help="Performs Moringa preset time series preprocessing.",
+    prepr = subpar.add_parser("preprocess_s2", help="Performs Moringa preset time series preprocessing for Sentinel-2.",
                               formatter_class=argparse.ArgumentDefaultsHelpFormatter)
     prepr.add_argument("in_folder", type=str, help="Path to the folder containing (de-zipped) S2 images.")
     prepr.add_argument("out_folder", type=str, help="Path to the folder in which preprocessed stacks will be stored.")
     prepr.add_argument("--output_dates_file", type=str, default=None, help="Path to the text file containing output dates for temporal interpolation.")
     prepr.add_argument("--roi", type=str, default=None, help="Path to the ROI vector file.")
-    prepr.add_argument("--align", type=str, default=None, help="To perform within-series image alignment, set this as output path for the aligned series.")
-    prepr.add_argument("--align_to", type=str, default=None, help="Path to a (optional)) reference image to which the stacks must be aligned.")
-    prepr.add_argument("--align_to_band", type=int, default=1, help="Band of reference image used for alignment.")
-    prepr.add_argument("--align_using_band", type=int, default=3, help="Band of current stack used for alignment.")
-    prepr.add_argument("--provider", type=str, default='theia', help="S2 image provider. Supported: 'theia', 'theial3a', 'sen2cor', 'planetary'")
+    prepr.add_argument("--align_to", type=str, default=None, help="Optional strategy for S2 spatial alignment (self/<date>/path to reference image).")
+    prepr.add_argument("--align_to_band", type=int, default=3, help="Band of reference image used for co-registration.")
+    prepr.add_argument("--align_using_band", type=int, default=3, help="Band of current stack used for co-registration.")
+    prepr.add_argument("--provider", type=str, default='theia', help="S2 image provider. Currently supported: 'theia', 'planetary'")
 
     segmt = subpar.add_parser("segment", help="Performs (large scale Baatz-Shape) segmentation of an input image.",
                               formatter_class=argparse.ArgumentDefaultsHelpFormatter)
@@ -76,11 +102,12 @@ def main(args):
     segmt.add_argument("outimg", type=str, help="Path to the output segmentation file (.tif, .shp, .gpkg, .gml).")
     segmt.add_argument("--cw", type=float, default=0.5, help="Color weight in Baatz-Shape criterion.")
     segmt.add_argument("--sw", type=float, default=0.5, help="Spatial weight in Baatz-Shape criterion.")
-    segmt.add_argument("--n_first_iter", type=int, default=12, help="Number of iterations for parallel tile processing.")
+    segmt.add_argument("--n_first_iter", type=int, default=12, help="Number of iterations for parallel tile processing (no use in light mode).")
     segmt.add_argument("--tile_margin", type=int, default=100, help="Margin for tile overlap.")
     segmt.add_argument("--roi", type=str, default=None, help="Vector file containing an ROI.")
     segmt.add_argument("--n_proc", type=int, help="Number of cores to use.")
     segmt.add_argument("--mem_limit", type=int, help="Memory limit in MB.")
+    segmt.add_argument("--use_light_alg", help="Use the sub-obtimal version of the algorithm. Faster but may have artefacts.", action='store_true')
     segmt.add_argument("--keep_graph", help="Keep the graph files (.bin) after segmentation.", action='store_true')
     segmt.add_argument("--force_parallel", help="Force the spot6/7 preprocess one-liner parallelization of the process even if the full graph fits in memory.", action='store_true')
 
@@ -97,6 +124,19 @@ def main(args):
     vhrprep.add_argument("--skip_ps", help="Skip pansharpening step.", action='store_true')
     vhrprep.add_argument("--compress", help="Use lossless compression on outputs.", action='store_true')
 
+    s1prepr = subpar.add_parser("preprocess_s1", help="Performs Moringa preset time series preprocessing for Sentinel-1.",
+                                formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+    s1prepr.add_argument("in_folder", type=str, help="Path to the folder containing (de-zipped) S1 images.")
+    s1prepr.add_argument("roi", type=str, default=None, help="Path to an image whose geometry has to be fit (e.g. a S2 scene).")
+    s1prepr.add_argument("out_folder", type=str, help="Path to the folder in which preprocessed stacks will be stored.")
+    s1prepr.add_argument("--dem_fld", type=str, default=None, help="Path to the folder containing DEM covering the scene in WGS84 projection.")
+    s1prepr.add_argument("--geoid", type=str, default=None, help="Path to the geoid file.")
+    s1prepr.add_argument("--direction", type=str, default=None, help="Filter direction (ascending/descending)")
+    s1prepr.add_argument("--satellite", type=str, default=None, help="Filter satellite (s1a/s1b)")
+    s1prepr.add_argument("--skip_despeckle", help="Skip despeckling step", action='store_true')
+    s1prepr.add_argument("--provider", type=str, default='native',
+                         help="S1 image provider. Currently supported: 'native' (e.g. esa/peps), 'planetary'")
+
     if len(args) == 1:
         parser.print_help()
         sys.exit(0)
@@ -105,17 +145,21 @@ def main(args):
 
     if arg.cmd == "preprocess_s2":
         preprocess_s2(arg.in_folder, arg.out_folder, output_dates_file=arg.output_dates_file, roi=arg.roi,
-                      align=arg.align, align_to=arg.align_to, align_to_band=arg.align_to_band,
-                      align_using_band=arg.align_using_band, provider=arg.provider)
+                      align_to=arg.align_to, align_to_band=arg.align_to_band, align_using_band=arg.align_using_band,
+                      provider=arg.provider)
 
     if arg.cmd == "segment":
         run_segmentation(arg.img, arg.threshold, arg.cw, arg.sw, arg.outimg, arg.n_first_iter, arg.tile_margin,
-                         arg.roi, arg.n_proc, arg.mem_limit, not arg.keep_graph, arg.force_parallel)
+                         arg.roi, arg.n_proc, arg.mem_limit, not arg.keep_graph, arg.force_parallel, arg.use_light_alg)
 
     if arg.cmd == "preprocess_spot67":
         preprocess_spot67(arg.fld, arg.out_fld, arg.dem_fld, arg.geoid, arg.skip_ps, arg.compress,
                           arg.clip, arg.align_to, arg.align_to_band, arg.align_using_band)
 
+    if arg.cmd == "preprocess_s1":
+        preprocess_s1(arg.in_folder, arg.roi, arg.out_folder, arg.dem_fld, arg.geoid, arg.direction, arg.satellite,
+                      arg.skip_despeckle, arg.provider)
+
     return 0
 
 if __name__ == "__main__":