diff --git a/Learning/ObjectBased.py b/Learning/ObjectBased.py
index 3d23e444f615cfd3f50cd104ee922651172c7863..5936243e5eb6a5fddd4a21b9d7f38d24909e1bf8 100644
--- a/Learning/ObjectBased.py
+++ b/Learning/ObjectBased.py
@@ -97,21 +97,22 @@ class ObjectBasedClassifier:
         }
         return models, summary, results
 
-    def classify(self, model, perc=None, output_file=None, compress='NONE'):
-        prg = tqdm(desc='Classification', total=len(self.obia_base.tiles))
-        if isinstance(model, list):
-            for t, L, X in self.obia_base.tiled_data(normalize=perc):
-                prob = []
-                for m in model:
-                    prob.append(m.predict_proba(X))
-                prob = np.prod(prob, axis=0)
-                c = model[0].classes_[np.argmax(prob, axis=1)]
-                self.obia_base.populate_map(t, L, c, output_file, compress)
-                prg.update(1)
-        else:
-            for t,L,X in self.obia_base.tiled_data(normalize=perc):
-                c = model.predict(X)
-                self.obia_base.populate_map(t, L, c, output_file, compress)
+    def classify(self, models, perc=None, output_files=None, compress='NONE'):
+        if isinstance(output_files, str):
+            models, output_files = [models], [output_files]
+        prg = tqdm(desc='Classification', total=len(self.obia_base.tiles) * len(models))
+        for t, L, X in self.obia_base.tiled_data(normalize=perc):
+            for model, output_file in zip(models, output_files):
+                if isinstance(model, list):
+                    prob = []
+                    for m in model:
+                        prob.append(m.predict_proba(X))
+                    prob = np.prod(prob, axis=0)
+                    c = model[0].classes_[np.argmax(prob, axis=1)]
+                    self.obia_base.populate_map(t, L, c, output_file, compress)
+                else:
+                    c = model.predict(X)
+                    self.obia_base.populate_map(t, L, c, output_file, compress)
                 prg.update(1)
         return
 
diff --git a/OBIA/OBIABase.py b/OBIA/OBIABase.py
index f8ded1389fed88d26dc269bcb812bcb5eb9ec2b8..d93b55b0a829eb52ed6e37e68c73f6feddbda7bb 100644
--- a/OBIA/OBIABase.py
+++ b/OBIA/OBIABase.py
@@ -54,7 +54,7 @@ class OBIABase:
         self.n_vars = 0
 
         # Output raster
-        self.output_map = None
+        self.output_maps = []
 
     def init_ref_db(self, vector_file, id_field, class_field):
         if isinstance(class_field, str):
@@ -321,6 +321,10 @@ class OBIABase:
                 X = (X - normalize[0]) / (normalize[1] - normalize[0])
             yield tilenum,L,X
 
+    def create_new_map(self):
+        self.output_maps.append(np.zeros((self.H, self.W), dtype=np.int))
+        return
+
     def populate_map(self, tilenum, obj_id, classes, output_file=None, compress='NONE'):
         r = otb.itkRegion()
         r['index'][0], r['index'][1], r['size'][0], r['size'][1] = self.tiles[tilenum]
@@ -331,9 +335,7 @@ class OBIABase:
         tmp = np.zeros(np.max(tile_obj) + 1, dtype=int)
         tmp[obj_id] = classes
         if output_file is None:
-            if self.output_map is None:
-                self.output_map = np.zeros((self.H,self.W), dtype=np.int)
-            self.output_map[self.tiles[tilenum][1]:self.tiles[tilenum][1]+self.tiles[tilenum][3],
+            self.output_map[-1][self.tiles[tilenum][1]:self.tiles[tilenum][1]+self.tiles[tilenum][3],
             self.tiles[tilenum][0]:self.tiles[tilenum][0]+self.tiles[tilenum][2]] += tmp[tile_obj]
         else:
             if not os.path.exists(os.path.dirname(output_file)):
diff --git a/Workflows/basic.py b/Workflows/basic.py
index 935ff991d603ac3ca43c24c66faf1c5927edbf6f..37af4e17428181e6f0319afd6cd29fe8de08ad17 100644
--- a/Workflows/basic.py
+++ b/Workflows/basic.py
@@ -80,10 +80,13 @@ def classify(seg, ts_lst_pkl, m_files, d, map_files):
     obc = ObjectBasedClassifier(seg,
                                 ts_lst,
                                 unroll_file_list(d['userfeat']))
-    for m_file, map_file in zip(m_files, map_files):
+    models = []
+    for m_file in m_files:
         with open(m_file, 'rb') as mf:
             m_dict = pickle.load(mf)
-        obc.classify(m_dict['model'], perc=[m_dict['perc2'], m_dict['perc98']], output_file=map_file)
+        models.append(m_dict['model'])
+    perc = [m_dict['perc2'], m_dict['perc98']]
+    obc.classify(models, perc=perc, output_files=map_files)
     return
 
 def report(map_files, m_files, d, report_files):