diff --git a/Learning/ObjectBased.py b/Learning/ObjectBased.py
index 7a0d4eb474a6a772ce5425f78c0d7d380ffc9705..59dd4018db494029490800049fdbde3ebf7b8382 100644
--- a/Learning/ObjectBased.py
+++ b/Learning/ObjectBased.py
@@ -68,34 +68,34 @@ class ObjectBasedClassifier:
             y_true, y_pred = self.obia_base.true_pred_bypixel(l, c, class_field)
             if return_true_vs_pred:
                 yt_yp.append((y_true, y_pred))
-            else:
-                results.append(
-                    {
-                        'conf_matrix': confusion_matrix(y_true, y_pred),
-                        'accuracy': accuracy_score(y_true, y_pred),
-                        'kappa' : cohen_kappa_score(y_true, y_pred),
-                        'p_r_f1': precision_recall_fscore_support(y_true, y_pred, zero_division=0),
-                        'importances' : models[-1].feature_importances_
-                    }
-                )
+            results.append(
+                {
+                    'conf_matrix': confusion_matrix(y_true, y_pred),
+                    'accuracy': accuracy_score(y_true, y_pred),
+                    'kappa' : cohen_kappa_score(y_true, y_pred),
+                    'p_r_f1': precision_recall_fscore_support(y_true, y_pred, zero_division=0),
+                    'importances' : models[-1].feature_importances_
+                }
+            )
+
+        all_imp = np.vstack([x['importances'] for x in results])
+        summary = {
+            'accuracy_mean': np.mean([x['accuracy'] for x in results]),
+            'accuracy_std': np.std([x['accuracy'] for x in results]),
+            'kappa_mean': np.mean([x['kappa'] for x in results]),
+            'kappa_std': np.std([x['kappa'] for x in results]),
+            'prec_mean': np.mean([x['p_r_f1'][0] for x in results], axis=0),
+            'prec_std': np.std([x['p_r_f1'][0] for x in results], axis=0),
+            'rec_mean': np.mean([x['p_r_f1'][1] for x in results], axis=0),
+            'rec_std': np.std([x['p_r_f1'][1] for x in results], axis=0),
+            'f1_mean': np.mean([x['p_r_f1'][2] for x in results], axis=0),
+            'f1_std': np.std([x['p_r_f1'][2] for x in results], axis=0),
+            'importance_mean': {k:v for k, v in zip(self.obia_base.get_vars(), np.mean(all_imp, axis=0))},
+            'importance_std': {k:v for k, v in zip(self.obia_base.get_vars(), np.std(all_imp, axis=0))}
+        }
         if return_true_vs_pred:
-            return models, yt_yp
+            return models, summary, results, yt_yp
         else:
-            all_imp = np.vstack([x['importances'] for x in results])
-            summary = {
-                'accuracy_mean': np.mean([x['accuracy'] for x in results]),
-                'accuracy_std': np.std([x['accuracy'] for x in results]),
-                'kappa_mean': np.mean([x['kappa'] for x in results]),
-                'kappa_std': np.std([x['kappa'] for x in results]),
-                'prec_mean': np.mean([x['p_r_f1'][0] for x in results], axis=0),
-                'prec_std': np.std([x['p_r_f1'][0] for x in results], axis=0),
-                'rec_mean': np.mean([x['p_r_f1'][1] for x in results], axis=0),
-                'rec_std': np.std([x['p_r_f1'][1] for x in results], axis=0),
-                'f1_mean': np.mean([x['p_r_f1'][2] for x in results], axis=0),
-                'f1_std': np.std([x['p_r_f1'][2] for x in results], axis=0),
-                'importance_mean': {k:v for k, v in zip(self.obia_base.get_vars(), np.mean(all_imp, axis=0))},
-                'importance_std': {k:v for k, v in zip(self.obia_base.get_vars(), np.std(all_imp, axis=0))}
-            }
             return models, summary, results
 
     def classify(self, model, output_file=None, compress='NONE'):