diff --git a/Archive.py b/Archive.py
index e87ecaf30b51d389906265baf9564832b58640b3..7c83929204ca445d79f806d81339db7c82fc1426 100644
--- a/Archive.py
+++ b/Archive.py
@@ -327,7 +327,7 @@ class Archive():
                     token=data_file.readline()
             except Exception as e:
                 self.logger.error("Authentification is probably wrong : {0}".format(e))
-                sys.exit(-1)
+                # sys.exit(-1)
 
         #====================
         # Download
@@ -435,6 +435,5 @@ class Archive():
                     # Create a list with dates without duplicates
                     if not di in self.single_date:
                         self.single_date.append(di)
-    
-        self.logger.info("End of unpack archives.")
-    
+
+        self.logger.info("End of unpack archives.")
\ No newline at end of file
diff --git a/Constantes.py b/Constantes.py
index c313d6441365972cc06125c421ac3136b20ac2f9..35adb0d269297ea0ec45febb1785366270c5c7eb 100644
--- a/Constantes.py
+++ b/Constantes.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
 
 import logging
 
@@ -9,3 +11,6 @@ EXPERT_MODE = 1
 MULTIPROCESSING_ENABLE = True
 MULTIPROCESSING_DISABLE = False
 
+CLOUD_THRESHOLD = 0.4
+
+EPSG_PHYMOBAT = 2154
diff --git a/PHYMOBAT.py b/PHYMOBAT.py
index dc9fee000df8a7e9ecee18ca1f78209e050f4178..13804e1b73c73ab7a3f174612f6a36441052e5e8 100644
--- a/PHYMOBAT.py
+++ b/PHYMOBAT.py
@@ -821,7 +821,8 @@ class PHYMOBAT(QtWidgets.QMainWindow, Processing):
 		"""
 			Function to launch the processing. This function take account :
 			
-			- The ``Multi-processing`` check box if the processing has launched with multi process. By default, this is checked. It need a computer with minimum 12Go memory.
+			- The ``Multi-processing`` check box if the processing has launched with multi process.
+				By default, this is checked. It need a computer with minimum 12Go memory.
 			- Append a few system value with :func:`get_variable`.
 			- There are 3 principal check boxes :
 				- to get number download available images
@@ -930,7 +931,7 @@ class PHYMOBAT(QtWidgets.QMainWindow, Processing):
 		if self.current_mode == Constantes.SIMPLE_MODE:
 			
 			# Compute a output slope raster 
-			if self.path_mnt:
+			if self.path_mnt :
 				self.i_slope()
 		
 			# Save the sample features
@@ -942,40 +943,36 @@ class PHYMOBAT(QtWidgets.QMainWindow, Processing):
 			# function to launch the image processing	
 			self.i_images_processing()
 
-			raise("Phymobat")
-
-			# To launch texture processing only
-			self.i_vhrs()
-
 			# Compute optimal threshold 
 			self.i_sample_rf()
-					
+				
 			# Classification processing 
 			self.out_fieldname_carto = self.out_fieldname_carto
 			self.out_fieldtype_carto = self.out_fieldtype_carto 
 			self.i_classifier_rf()
+
 			self.i_validate()
-			
+
 		# Clear variables after processing
 		#self.clear_sample()
 		self.out_fieldname_carto = ['ID', 'AREA']
 		self.out_fieldtype_carto = [ogr.OFTString, ogr.OFTReal]
 		# Images after processing images
-		self.out_ndvistats_folder_
-		tab = defaultdict(list)
+		self.out_ndvistats_folder_tab = defaultdict(list)
 		
 		Processing.__init__(self)# Initialize variable without to close and launch again the application
 		
 		# End of the processus
-		endTime = time.time() # Tps : Terminé
-		self.logger.info('...........' + ' Outputted to File in ' + str(endTime - startTime) + ' secondes')
+		endTime = time.time()
+		self.logger.info('........... Outputted to File in {0} secondes'.format(endTime - startTime))
 		nb_day_processing = int(time.strftime('%d', time.gmtime(endTime - startTime))) - 1
-		self.logger.info("That is, " + str(nb_day_processing) + ' day(s) ' + time.strftime('%Hh %Mmin%S', time.gmtime(endTime - startTime)))
+		self.logger.info("That is {0} day(s) {1}".format(nb_day_processing, time.strftime('%Hh %Mmin%S', time.gmtime(endTime - startTime))))
 		
 	def open_backup(self, test=None):
 		"""
-		Function to load input text in every fields. The input file must be a XML file.
+			Function to load input text in every fields. The input file must be a XML file.
 		"""
+		
 		if test is None :
 			in_backup = QtWidgets.QFileDialog.getOpenFileName(self, "Open backup", os.getcwd(), '*.xml')[0]
 		else :
@@ -1175,13 +1172,14 @@ class PHYMOBAT(QtWidgets.QMainWindow, Processing):
 					   
 	def save_backup(self):
 		"""
-		Function to save input text in every fields. The output file must be a XML file.
+			Function to save input text in every fields.
+			The output file must be a XML file.
 		"""
 		
 		out_backup = QtWidgets.QFileDialog.getSaveFileName(self, "Save backup", os.getcwd(), '*.xml')[0]
 
 		# if the user has forgotten to put .shp at the end of the output xml
-		if out_backup[-4:] != '.xml':
+		if not out_backup.endswith('.xml'):
 			out_backup = out_backup + '.xml'
 		
 		root = ET.Element("Data_filled")
diff --git a/Precision_moba.py b/Precision_moba.py
index be00495c0882647df15f268d31238161a6a3c0f0..b0678bd7da61230f8ca5d41a7e84230badac8afa 100644
--- a/Precision_moba.py
+++ b/Precision_moba.py
@@ -21,6 +21,7 @@ import os, subprocess
 import numpy as np
 
 from Toolbox import Toolbox
+import Outils
 from Vector import Vector
 from RasterSat_by_date import RasterSat_by_date
 
@@ -37,170 +38,183 @@ from sklearn.metrics import precision_score
 from sklearn.metrics import f1_score 
 
 class Precision_moba():
-    """
-    Classification precision class. This class build the whole of processing to extract the precision
-    of the classification from the MOBA method.
-    
-    :param path_cut: Study area shapefile
-    :type path_cut: str
-    :param path_save: Main folder path. To save precision file in txt
-    :type path_save: str
-    :param complete_validation_shp: Shapefile path of the validation
-    :type complete_validation_shp: str
-    :param ex_raster: Raster model path to the rasterization
-    :type ex_raster: str
-    :param img_pr: To store the rasterization path of the classification [0] and the validation [1]
-    :type img_pr: list
-    :param complete_img: To store info raster of the classification [0] and the validation [1]
-    :type complete_img: list
-    
-    """
-    
-    def __init__(self, path_cut, path_save):
-        """Create a new 'Precision' instance
-        
-        """
-        
-        self.path_cut = path_cut
-        self.path_save = path_save
-        
-        self.complete_validation_shp = ''
-        self.ex_raster = ''
-        
-        self.img_pr = []
-        self.complete_img = []
-    
-    def preprocess_to_raster_precision(self, shp_pr, field_pr):
-        """
-        Function to extract data in pixel of vectors. For that, it need to
-        save a shapefile, then rasterize it and get a final data matrix.
-        
-        :param shp_pr: Input shapefile path
-        :type shp_pr: str 
-        :param field_pr: Field name of the shapefile to rasterize
-        :type field_pr: str
-        """
-        
-        kwargs = {}
-        kwargs['rm_rast'] = 1 # To remove existing raster in the clip_raster function
-        opt = {}
-        opt['Remove'] = 1 # To remove existing vector
-        # Define vector
-        vector_pr = Vector(shp_pr, self.path_cut, **opt)
-        # Define Toolbox used
-        current_img = Toolbox()
-        # Create the raster output path
-        img_pr = vector_pr.layer_rasterization(self.ex_raster, field_pr)
-        current_img.imag = img_pr
-        current_img.vect = self.complete_validation_shp
-        self.img_pr.append(current_img.clip_raster(**kwargs))
-        
-        # Call the raster class to extract the image data
-        self.complete_img.append(RasterSat_by_date('', '', [0]))
-    
-    def confus_matrix(self, data_class, data_val):
-        """
-        Function to compute and save a confusion matrix, precision, recall, 
-        f_scrore and overall accuracy of the classification. 
-        It compute all statistics because of sklearn module.
-        At the final step, it save in the principal folder.
-        
-        :param data_class: Classification data
-        :type data_class: matrix array
-        :param data_val: Validation data
-        :type data_val: matrix array
-        """
-        
-        data_class = data_class.flatten() # to convert a matrix array in list array
-        data_val = data_val.flatten()
-        
-        # Add pixel value in a list without no data
-        fb_stats_2 = [[int(data_class[x]),int(data_val[x])] for x in range(len(data_val)) if data_val[x] != -10000]
-        fb_stats_2 = list(map(list, zip(*fb_stats_2)))# transpose list
+	"""
+		Classification precision class. This class build the whole of processing to extract the precision
+		of the classification from the MOBA method.
+		
+		@param path_cut: Study area shapefile
+		@type path_cut: str
 
-        fb_stats_in = fb_stats_2[0]
-        fb_stats_val = fb_stats_2[1]
-        
-        # Open a file (txt) to save results
-        with open(self.path_save + "/ConfusionMatrix.txt", "w") as f :
-            # Compute statistics on the classification
-            cm = confusion_matrix(fb_stats_val, fb_stats_in)
-            f.write("Confusion Matrix :\n")
-            for out in cm:
-                f.write(str(out) + "\n")
-            
-            all_pr = classification_report(fb_stats_val, fb_stats_in)
-            f.write("\n")
-            f.write(all_pr)    
-            
-            accur = accuracy_score(fb_stats_val, fb_stats_in)
-            f.write("\n")
-            f.write("Accuracy : " + str(accur) + "\n")
-            
-            recall = recall_score(fb_stats_val, fb_stats_in)
-            f.write("\n")
-            f.write("Recall : " + str(recall) + "\n")
-            
-            pr = precision_score(fb_stats_val, fb_stats_in)
-            f.write("\n")
-            f.write("Precision : " + str(pr) + "\n")
-            
-            f_scor = f1_score(fb_stats_val, fb_stats_in)
-            f.write("\n")
-            f.write("F_score : " + str(f_scor) + "\n")
-            
-            kappa = self.cohen_kappa_score(fb_stats_val, fb_stats_in)
-            f.write("\n")
-            f.write("Kappa : " + str(kappa) + "\n")
-            
-            self.logger.info('\n\n Accuracy : {0}, Kappa : {1}, Recall : {2}, Precision : {3},\
-                F_score : {4} \n {5} \n Confusion matrix : \n {6} \
-                '.format(accur, kappa, recall, pr, f_scor, all_pr, cm))  
-        
-    def cohen_kappa_score(self, y1, y2, labels=None):
-        """
-        Ref : https://github.com/scikit-learn/scikit-learn/blob/51a765a/sklearn/metrics/classification.py#L260 
-        
-        Cohen's kappa: a statistic that measures inter-annotator agreement.
-        This function computes Cohen's kappa [1], a score that expresses the level
-        of agreement between two annotators on a classification problem. It is
-        defined as
-        .. math::
-            \kappa = (p_o - p_e) / (1 - p_e)
-        where :math:`p_o` is the empirical probability of agreement on the label
-        assigned to any sample (the observed agreement ratio), and :math:`p_e` is
-        the expected agreement when both annotators assign labels randomly.
-        :math:`p_e` is estimated using a per-annotator empirical prior over the
-        class labels [2].
-        Parameters
-        ----------
-        y1 : array, shape = [n_samples]
-            Labels assigned by the first annotator.
-        y2 : array, shape = [n_samples]
-            Labels assigned by the second annotator. The kappa statistic is
-            symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
-        labels : array, shape = [n_classes], optional
-            List of labels to index the matrix. This may be used to select a
-            subset of labels. If None, all labels that appear at least once in
-            ``y1`` or ``y2`` are used.
-        Returns
-        -------
-        kappa : float
-            The kappa statistic, which is a number between -1 and 1. The maximum
-            value means complete agreement; zero or lower means chance agreement.
-        References
-        ----------
-        .. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
-               Educational and Psychological Measurement 20(1):37-46.
-               doi:10.1177/001316446002000104.
-        .. [2] R. Artstein and M. Poesio (2008). "Inter-coder agreement for
-               computational linguistics". Computational Linguistic 34(4):555-596.
-        """
-        
-        confusion = confusion_matrix(y1, y2, labels=labels)
-        P = confusion / float(confusion.sum())
-        p_observed = np.trace(P)
-        p_expected = np.dot(P.sum(axis=0), P.sum(axis=1))
-        
-        return (p_observed - p_expected) / (1 - p_expected)
-        
\ No newline at end of file
+		@param path_save: Main folder path. To save precision file in txt
+		@type path_save: str
+
+		@param complete_validation_shp: Shapefile path of the validation
+		@type complete_validation_shp: str
+
+		@param ex_raster: Raster model path to the rasterization
+		@type ex_raster: str
+
+		@param img_pr: To store the rasterization path of the classification [0] and the validation [1]
+		@type img_pr: list
+
+		@param complete_img: To store info raster of the classification [0] and the validation [1]
+		@type complete_img: list
+	"""
+	
+	def __init__(self, path_cut, path_save):
+		"""
+			Create a new 'Precision' instance   
+		"""
+		
+		self.path_cut = path_cut
+		self.path_save = path_save
+		
+		self.complete_validation_shp = ''
+		self.ex_raster = ''
+		
+		self.img_pr = []
+		self.complete_img = []
+
+		self.logger = Outils.Log("Log", "Precision_moba")
+	
+	def preprocess_to_raster_precision(self, shp_pr, field_pr):
+		"""
+			Function to extract data in pixel of vectors. For that, it need to
+			save a shapefile, then rasterize it and get a final data matrix.
+			
+			@param shp_pr: Input shapefile path
+			@type shp_pr: str 
+
+			@param field_pr: Field name of the shapefile to rasterize
+			@type field_pr: str
+		"""
+		
+		kwargs = {}
+		kwargs['rm_rast'] = 1 # To remove existing raster in the clip_raster function
+		
+		opt = {}
+		opt['Remove'] = 1 # To remove existing vector
+		
+		# Define vector
+		vector_pr = Vector(shp_pr, self.path_cut, **opt)
+		
+		# Define Toolbox used
+		current_img = Toolbox()
+		
+		# Create the raster output path
+		img_pr = vector_pr.layer_rasterization(self.ex_raster, field_pr)
+		current_img.image = img_pr
+		current_img.vect = self.complete_validation_shp
+		self.img_pr.append(current_img.clip_raster(**kwargs))
+		
+		# Call the raster class to extract the image data
+		self.complete_img.append(RasterSat_by_date('', ''))
+
+		vector_pr.logger.close()
+		current_img.logger.close()
+		self.complete_img[-1].logger.close()
+
+	def confus_matrix(self, data_class, data_val):
+		"""
+			Function to compute and save a confusion matrix, precision, recall, 
+			f_scrore and overall accuracy of the classification. 
+			It compute all statistics because of sklearn module.
+			At the final step, it save in the principal folder.
+			
+			@param data_class: Classification data
+			@type data_class: matrix array
+		
+			@param data_val: Validation data
+			@type data_val: matrix array
+		"""
+		
+		data_class = data_class.flatten() # to convert a matrix array in list array
+		data_val = data_val.flatten()
+		
+		# Add pixel value in a list without no data
+		fb_stats_2 = [[int(data_class[x]),int(data_val[x])] for x in range(len(data_val)) if data_val[x] != -10000]
+		fb_stats_2 = list(map(list, zip(*fb_stats_2)))# transpose list
+
+		fb_stats_in = fb_stats_2[0]
+		fb_stats_val = fb_stats_2[1]
+
+		self.logger.debug(self.path_save)
+		
+		# Open a file (txt) to save results
+		with open("{0}/ConfusionMatrix.txt".format(self.path_save), "w") as f :
+			# Compute statistics on the classification
+			cm = confusion_matrix(fb_stats_val, fb_stats_in)
+			
+			f.write("Confusion Matrix :\n")
+			
+			for out in cm:
+				f.write("{0}\n".format(out))
+			
+			all_pr = classification_report(fb_stats_val, fb_stats_in)
+			f.write("\n{0}".format(all_pr))
+			
+			accur = accuracy_score(fb_stats_val, fb_stats_in)
+			f.write("\nAccuracy : {0}\n".format(accur))
+			
+			recall = recall_score(fb_stats_val, fb_stats_in, average=None)
+			f.write("\nRecall : {0}\n".format(recall))
+			
+			pr = precision_score(fb_stats_val, fb_stats_in, average=None)
+			f.write("\nPrecision : {0}\n".format(pr))
+			
+			f_scor = f1_score(fb_stats_val, fb_stats_in, average=None)
+			f.write("\nF_score : {0}\n".format(f_scor))
+			
+			kappa = self.cohen_kappa_score(fb_stats_val, fb_stats_in)
+			f.write("\nKappa : {0}\n".format(kappa))
+			
+			self.logger.info('\n\n Accuracy : {0}, Kappa : {1}, Recall : {2}, Precision : {3}, F_score : {4}\n\
+			{5} \n Confusion matrix : \n {6}'.format(accur, kappa, recall, pr, f_scor, all_pr, cm))  
+		
+	def cohen_kappa_score(self, y1, y2, labels=None):
+		"""
+			Ref : https://github.com/scikit-learn/scikit-learn/blob/51a765a/sklearn/metrics/classification.py#L260 
+			
+			Cohen's kappa: a statistic that measures inter-annotator agreement.
+			This function computes Cohen's kappa [1], a score that expresses the level
+			of agreement between two annotators on a classification problem. It is
+			defined as
+			.. math::
+				\kappa = (p_o - p_e) / (1 - p_e)
+			where :math:`p_o` is the empirical probability of agreement on the label
+			assigned to any sample (the observed agreement ratio), and :math:`p_e` is
+			the expected agreement when both annotators assign labels randomly.
+			:math:`p_e` is estimated using a per-annotator empirical prior over the
+			class labels [2].
+			Parameters
+			----------
+			y1 : array, shape = [n_samples]
+				Labels assigned by the first annotator.
+			y2 : array, shape = [n_samples]
+				Labels assigned by the second annotator. The kappa statistic is symmetric,
+				 so swapping ``y1`` and ``y2`` doesn't change the value.
+			labels : array, shape = [n_classes], optional
+				List of labels to index the matrix. This may be used to select a
+				subset of labels. If None, all labels that appear at least once in
+				``y1`` or ``y2`` are used.
+			Returns
+			-------
+			kappa : float
+				The kappa statistic, which is a number between -1 and 1. The maximum
+				value means complete agreement; zero or lower means chance agreement.
+			References
+			----------
+			.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
+				   Educational and Psychological Measurement 20(1):37-46.
+				   doi:10.1177/001316446002000104.
+			.. [2] R. Artstein and M. Poesio (2008). "Inter-coder agreement for
+				   computational linguistics". Computational Linguistic 34(4):555-596.
+		"""
+		
+		confusion = confusion_matrix(y1, y2, labels=labels)
+		P = confusion / float(confusion.sum())
+		p_observed = np.trace(P)
+		p_expected = np.dot(P.sum(axis=0), P.sum(axis=1))
+		
+		return (p_observed - p_expected) / (1 - p_expected)
+		
\ No newline at end of file
diff --git a/Processing.py b/Processing.py
index f3e885eee2d223ced555571bc5cebf95e3137c59..3c115aad20e6236d33c65b615768b575890f1374 100644
--- a/Processing.py
+++ b/Processing.py
@@ -41,7 +41,7 @@ from Slope import Slope
 # Class group vector
 from Vector import Vector
 from Sample import Sample
-from Segmentation import Segmentation
+import Segmentation
 from Rpg import Rpg
 
 import Constantes
@@ -139,13 +139,10 @@ class Processing(object):
 		self.password = ''
 
 		# List of output raster path
-		self.raster_path = []
-		self.list_band_outraster = []
-		
-		# Class name
+		self.raster_path = defaultdict(dict)
 		
 		# TODO : Change index of the classes -> Harbacées 6 / Ligneux 7 by Agriculuture 4 / Eboulis 5
-		
+		# Class name
 		self.in_class_name = ['Non Vegetation semi-naturelle', 'Vegetation semi-naturelle',\
 						 'Herbacees', 'Ligneux', \
 						 'Ligneux mixtes', 'Ligneux denses',\
@@ -234,17 +231,16 @@ class Processing(object):
 		self.check_download.download_auto(self.user, self.password)
 		self.check_download.decompress()
 		 
-	def i_img_sat(self):
-		
+	def i_img_sat(self):	
 		"""
 			Interface function to processing satellite images:
 			
 				1. Clip archive images and modify Archive class to integrate clip image path.
 				With :func:`Toolbox.clip_raster` in ``Toolbox`` module.
 			
-				2. Search cloud's percentage :func:`RasterSat_by_date.RasterSat_by_date.pourc_cloud`, select 
-				image and compute ndvi index :func:`RasterSat_by_date.RasterSat_by_date.calcul_ndvi`. If cloud's percentage is 
-				greater than 40%, then not select and compute ndvi index.
+				2. Search cloud's percentage :func:`RasterSat_by_date.RasterSat_by_date.pourc_cloud`, 
+				select image and compute ndvi index :func:`RasterSat_by_date.RasterSat_by_date.calcul_ndvi`.
+				If cloud's percentage is greater than 40%, then not select and compute ndvi index.
 			
 				3. Compute temporal stats on ndvi index [min, max, std, min-max]. With :func:`Toolbox.calc_serie_stats` 
 				in ``Toolbox`` module.
@@ -255,7 +251,7 @@ class Processing(object):
 				>>> stats_test = RasterSat_by_date(class_archive, big_folder, one_date)
 				>>> stats_test.complete_raster(stats_test.create_raster(in_raster, stats_data, in_ds), stats_data)
 		"""
-		
+
 		# Map projection of the image and the cloud mask
 		for clip_index, clip in enumerate(self.check_download.list_img):
 			current_list = Toolbox()
@@ -274,37 +270,43 @@ class Processing(object):
 		check_L8 = RasterSat_by_date(self.check_download, self.folder_processing)
 
 		for date in self.check_download.single_date:
-			   
-			# check_L8 = RasterSat_by_date(self.check_download, self.folder_processing, date)
-			check_L8.mosaic_by_date(date)
 
-			raise("STOP")
+			check_L8.mosaic_by_date(date)
 			 
-			# Search cloud's percentage, select image and compute ndvi index if > cl
-			cl = check_L8.pourc_cloud(check_L8._one_date[3], check_L8._one_date[4])
-			if cl > 0.60:
-				check_L8.calcul_ndvi(check_L8._one_date[3])
-				spectral_out.append(check_L8._one_date)
+			# Search cloud's percentage, select image and compute ndvi index 
+			# if cloud_pourcentage < CLOUD_THRESHOLD
+
+			if check_L8.pourc_cloud() < Constantes.CLOUD_THRESHOLD :
+				tmp = date
+				check_L8.calcul_ndvi()
+				tmp.extend(check_L8.cloudiness_pourcentage)
+				spectral_out.append(tmp)
 
 		# Compute temporal stats on ndvi index [min, max, std, min-max]
 		spectral_trans = np.transpose(np.array(spectral_out, dtype=object))
+
 		stats_name = ['Min', 'Date', 'Max', 'Std', 'MaxMin']
 		stats_ndvi, stats_cloud = current_list.calc_serie_stats(spectral_trans)
-		
-		# Create stats ndvi raster and stats cloud raster
-		stats_L8 = RasterSat_by_date(self.check_download, self.folder_processing, [0])
-		
-		# Stats cloud raster
-		out_cloud_folder = stats_L8._class_archive._folder + '/' + stats_L8._big_folder + '/' + self.captor_project + '/Cloud_number_' + self.captor_project + '.TIF'
-		
-		stats_L8.complete_raster(*stats_L8.create_raster(out_cloud_folder, stats_cloud, stats_L8.raster_data(self.check_download.list_img[0][4])[1]), stats_cloud)
-		
+
+		# Stats cloud raster		
+		stats_cloud_raster = "{0}/{1}/{2}/Cloud_number_{3}.TIF".format(check_L8._class_archive._folder, check_L8._big_folder, self.captor_project, self.captor_project)
+
+		check_L8.complete_raster(\
+			*check_L8.create_empty_raster(stats_cloud_raster, stats_cloud,\
+			 check_L8.raster_data(self.check_download.list_img[0][4])[1]),\
+			 stats_cloud)
+
 		# Stats ndvi rasters		
 		for stats_index in range(len(stats_ndvi)):
-			out_ndvistats_folder = stats_L8._class_archive._folder + '/' + stats_L8._big_folder + '/' + self.captor_project + '/' + stats_name[stats_index] + '_' + self.captor_project + '.TIF'
-			self.out_ndvistats_folder_tab[stats_index] = out_ndvistats_folder
-			stats_L8.complete_raster(*stats_L8.create_raster(out_ndvistats_folder, stats_ndvi[stats_index], stats_L8.raster_data(self.check_download.list_img[0][4])[1]), stats_ndvi[stats_index])
-		
+			out_ndvistats_raster = "{0}/{1}/{2}/{3}_{4}.TIF".format(check_L8._class_archive._folder, check_L8._big_folder, self.captor_project, stats_name[stats_index], self.captor_project)
+			self.out_ndvistats_folder_tab[stats_index] = out_ndvistats_raster
+			check_L8.complete_raster(\
+				*check_L8.create_empty_raster(out_ndvistats_raster, stats_ndvi[stats_index],\
+				 check_L8.raster_data(self.check_download.list_img[0][4])[1]),\
+				 stats_ndvi[stats_index])
+
+		check_L8.logger.close()
+
 	def i_slope(self):
 		"""
 			Interface function to processing slope raster. From a MNT, and with a command line gdal,
@@ -312,42 +314,50 @@ class Processing(object):
 		"""
 		
 		current_path_mnt = Toolbox()
-		current_path_mnt.imag = self.path_mnt
+		current_path_mnt.image = self.path_mnt
 		current_path_mnt.vect = self.path_area
 		path_mnt = current_path_mnt.clip_raster()
 		
 		study_slope = Slope(path_mnt)
-		study_slope.extract_slope()# Call this function to compute slope raster
+
+		# Call this function to compute slope raster
+		study_slope.extract_slope()
 		self.path_mnt = study_slope.out_mnt
+
+		current_path_mnt.logger.close()
 	
 	def i_vhrs(self):
 		"""
-			Interface function to processing VHRS images. It create two OTB texture images :func:`Vhrs.Vhrs` : SFS Texture and Haralick Texture
+			Interface function to processing VHRS images. 
+			It create two OTB texture images : 
+				func: `Vhrs.Vhrs` : SFS Texture and Haralick Texture
 		"""
 
-		# Create texture image
+		############################## Create texture image ##############################
+		
 		# Clip orthography image 
+
 		current_path_ortho = Toolbox()
-		current_path_ortho.imag = self.path_ortho
+
+		current_path_ortho.image = self.path_ortho
 		current_path_ortho.vect = self.path_area
 		path_ortho = current_path_ortho.clip_raster()
 		
 		texture_irc = Vhrs(path_ortho, self.mp)
 		self.out_ndvistats_folder_tab['sfs'] = texture_irc.out_sfs
 		self.out_ndvistats_folder_tab['haralick'] = texture_irc.out_haralick
+
+		current_path_ortho.logger.close()
+		texture_irc.logger.close()
+
 		
-	def i_images_processing(self, vs=1): 
+	def i_images_processing(self): 
 		"""
-			Interface function to launch processing VHRS images :
-				func:`i_vhrs` and satellite images :func:`i_img_sat` in multi-processing.
-			
-			:param vs: Boolean variable to launch texture processing because of interface checkbox -> 0 or 1.
-				- 0 means, not texture processing
-				- 1 means, launch texture processing
-			:type vs: int
+			Interface function to launch processing VHRS images : func:`i_vhrs` 
+			and satellite images :func:`i_img_sat` in multi-processing.
 		"""
-		
-		######################### Multiprocessing #########################
+
+		######################### Multiprocessing ? #########################
 
 		mgr = BaseManager()
 		mgr.register('defaultdict', defaultdict, DictProxy)
@@ -356,48 +366,41 @@ class Processing(object):
 		self.out_ndvistats_folder_tab = mgr.defaultdict(list)
 		
 		p_img_sat = Process(target=self.i_img_sat)
-		p_img_sat.start()
-
-		raise ("Process")
+		p_vhrs = Process(target=self.i_vhrs)
 
+		self.logger.debug("Multiprocessing : {0}".format(self.mp))
+		
 		if self.mp == Constantes.MULTIPROCESSING_DISABLE:
+			p_img_sat.start()
 			p_img_sat.join()
-		
-		if vs == 1:
-			p_vhrs = Process(target=self.i_vhrs)
 			p_vhrs.start()
-			p_vhrs.join()
-		
-		if self.mp == Constantes.MULTIPROCESSING_ENABLE:
+			p_vhrs.join()	
+		else :
+			p_img_sat.start()
+			p_vhrs.start()
 			p_img_sat.join()
+			p_vhrs.join()
 
 		###################################################################
+
+		self.raster_path["Min"]["PATH"] = self.out_ndvistats_folder_tab[0]
+		self.raster_path["Min"]["BAND"] = 1
 		
-		# List of output raster path
-		self.raster_path.append(self.out_ndvistats_folder_tab[0])
-		# List of output raster band
-		self.list_band_outraster.append(1)
-		
-		if vs == 1:
-			self.raster_path.append(self.out_ndvistats_folder_tab['sfs'])
-			self.list_band_outraster.append(4)
-			self.raster_path.append(self.out_ndvistats_folder_tab['haralick'])
-			self.list_band_outraster.append(2)
+		for i in range(1,7) :
+			self.raster_path["SFS_{0}".format(i)]["PATH"] = self.out_ndvistats_folder_tab['sfs']
+			self.raster_path["SFS_{0}".format(i)]["BAND"] = i
+
+		for i in range(1,9) :
+			self.raster_path["HARALICK_{0}".format(i)]["PATH"] = self.out_ndvistats_folder_tab['haralick']
+			self.raster_path["HARALICK_{0}".format(i)]["BAND"] = i
 		
 		# To slope, to extract scree
-		if self.path_mnt != '':
-			self.raster_path.append(self.path_mnt)
-			self.list_band_outraster.append(1)
+		if self.path_mnt :
+			self.raster_path["MNT"]["PATH"] = self.path_mnt
+			self.raster_path["MNT"]["BAND"] = 1
 			
-		self.raster_path.append(self.out_ndvistats_folder_tab[2])
-		# example raster path tab :
-		#				[path_folder_dpt + '/' + folder_processing + '/' + classif_year + '/Min_2014.TIF',\
-		#				os.path.dirname(path_ortho) + '/Clip_buffer_surface_dep_18_IRCOrtho65_2m_sfs.TIF',\
-		#				os.path.dirname(path_ortho) + '/Clip_buffer_surface_dep_18_IRCOrtho65_2m_haralick.TIF',\
-		#				path_folder_dpt + '/' + folder_processing + '/' + classif_year + '/Max_2014.TIF']
-		
-		# List of output raster band
-		self.list_band_outraster.append(1) #[1, 4, 2, 1]
+		self.raster_path["MAX"]["PATH"] = self.out_ndvistats_folder_tab[2]
+		self.raster_path["MAX"]["BAND"] = 1
 		
 		self.logger.info("End of images processing !")
 		
@@ -405,10 +408,10 @@ class Processing(object):
 		"""
 			Interface function to extract mono rpg crops.
 			
-			:param path_rpg: Input RPG shapefile.
-			:type path_rpg: str
+			@param path_rpg: Input RPG shapefile.
+			@type path_rpg: str
 			
-			:returns: str -- variable **Rpg.vector_used**, output no duplicated crops shapefile (path).
+			@returns: str -- variable **Rpg.vector_used**, output no duplicated crops shapefile (path).
 		"""
 			   
 		# Extract rpg crops
@@ -423,30 +426,37 @@ class Processing(object):
 			mono_sample.vector_used = "{0}/MONO_{1}".format(mono_sample.vector_folder, mono_sample.vector_name)
 
 		self.logger.info('End of RPG processing')
+
+		mono_sample.logger.close()
 		
 		return mono_sample.vector_used
 		 
 	def i_sample(self):
 		"""
-		Interface function to compute threshold with various sample. It also extract a list of validation layer (shapefile) 
-		to compute the precision of the next classification :func:`i_validate`. 
-		
-		It create samples 2 by 2 with kwargs field names and class :func:`Sample.Sample.create_sample`. 
-		Then, it compute zonal statistics by polygons :func:`Vector.Sample.zonal_stats`.
-		
-		With zonal statistics computed, a optimal threshold is determined :func:`Seath.Seath.separability_and_threshold` that
-		will print in a text file .lg in the main folder.
-		
-		.. warning:: :func:`Seath.Seath.separability_and_threshold` does not always allow to discriminate optimal threshold. 
-					Then, this function will be launch at least ten time until it reaches a optimal threshold.
+			Interface function to compute threshold with various sample. 
+			It also extract a list of validation layer (shapefile) 
+			to compute the precision of the next classification : func:`i_validate`. 
+			
+			It create samples 2 by 2 with kwargs field names and class :func:`Sample.Sample.create_sample`. 
+			Then, it compute zonal statistics by polygons :func:`Vector.Sample.zonal_stats`.
+			
+			With zonal statistics computed, a optimal threshold is determined :
+			 func:`Seath.Seath.separability_and_threshold` that will print in a text file .lg 
+			 in the main folder.
+			
+			.. warning:: :func:`Seath.Seath.separability_and_threshold` does not always
+			 allow to discriminate optimal threshold. Then, this function will be launch
+			 at least ten time until it reaches a optimal threshold.
 		"""
 		
 		# Compute threshold with various sample
 		i_s = 0
 		while i_s < 10:
+
 			try :
 				self.valid_shp = []
 				sample_rd = {}
+
 				for sple in range(len(self.sample_name) * 2):
 					kwargs = {}
 					kwargs['fieldname'] = self.fieldname_args[sple]
@@ -460,85 +470,90 @@ class Processing(object):
 				
 				# Search the optimal threshold by class 
 				# Open a text file to print stats of Seath method
-				file_J = self.path_folder_dpt + '/log_J.lg'
-				f = open(file_J, "wb")
-				for th_seath in range(len(self.sample_name)):
-					self.decis[th_seath] = Seath()
-					self.decis[th_seath].value_1 = sample_rd[th_seath*2].stats_dict
-					self.decis[th_seath].value_2 = sample_rd[th_seath*2 + 1].stats_dict
-					self.decis[th_seath].separability_and_threshold()
-					
-					# Print the J value in the text file .lg
-					f.write('For ' + str(self.sample_name[th_seath]) + ' :\n')
-					f.write('J = ' + str(self.decis[th_seath].J[0]) +'\n')
-					f.write('The class 1 ' + str(self.decis[th_seath].threshold[0]) +'\n')
-					
-				f.close()	
+				with open("{0}/log_J.lg".format(self.path_folder_dpt), "wb") as f : 
+					for th_seath in range(len(self.sample_name)):
+						self.decis[th_seath] = Seath()
+						self.decis[th_seath].value_1 = sample_rd[th_seath*2].stats_dict
+						self.decis[th_seath].value_2 = sample_rd[th_seath*2 + 1].stats_dict
+						self.decis[th_seath].separability_and_threshold()
+						
+						# Print the J value in the text file .lg
+						f.write('For {0} :\n'.format(self.sample_name[th_seath]))
+						f.write('J = {0}\n'.format(self.decis[th_seath].J[0]))
+						f.write('The class 1 {0}\n'.format(self.decis[th_seath].threshold[0]))
+	
 				i_s = 20
 			except:
 				i_s = i_s + 1
-		# Method to stop the processus if there is not found a valid threshold
+
+		# Stop the processus if there is not found a valid threshold
 		if i_s != 20:
-			self.logger.error('Problem in the sample processing !!!')
+			self.logger.error('Problem durinf the sample processing.')
 			sys.exit(1)
 	
 	def i_sample_rf(self):
 		"""
 			This function build a random forest trees like model to create a final classification.
-			All of This using the method described in the :func:`i_validate` function and because
+			All of this using the method described in the : func:`i_validate` function and because
 			of sklearn module.
 		"""
 		
 		X_rf = []
 		y_rf = []
-		sample_rd = {}			
-		# Tricks to add all textural indexes
-		rm_index = 1
-		self.raster_path.remove(self.raster_path[rm_index]) # Remove SFS layer
-		self.raster_path.remove(self.raster_path[rm_index]) # Remove Haralick layer
-		self.list_band_outraster.remove(self.list_band_outraster[rm_index]) # Remove band of the layer
-		self.list_band_outraster.remove(self.list_band_outraster[rm_index]) # Remove band of the layer
-		# Add all layers in the simple index haralick
-		for add_layer in range(8):
-			self.raster_path.insert(add_layer+1, self.out_ndvistats_folder_tab['haralick'])
-			self.list_band_outraster.insert(add_layer+1, add_layer+1)
-		# Add all layers in the SFS index
-		for add_layer in range(6):
-			self.raster_path.insert(add_layer+1, self.out_ndvistats_folder_tab['sfs'])
-			self.list_band_outraster.insert(add_layer+1, add_layer+1)
-			
+		sample_rd = {}	
+
+		self.logger.debug("Taille : {0}".format(len(self.raster_path)))
+		self.logger.debug("Rasters : {0}".format(self.raster_path))
+
 		# Extract value mean from polygons
 		for sple in range(len(self.sample_name) * 2):
 			kwargs = {}
 			kwargs['fieldname'] = self.fieldname_args[sple]
 			kwargs['class'] = self.class_args[sple]
-			sample_rd[sple] = Sample(self.sample_name[round(sple/2)], self.path_area, self.list_nb_sample[round(sple/2)])
+
+			self.logger.debug("Creation Sample : {0} - {1} - {2}".format( \
+				self.sample_name[round(sple/2)], self.path_area, self.list_nb_sample[round(sple/2)]) )
+
+			sample_rd[sple] = Sample(self.sample_name[round(sple/2)], self.path_area,\
+			 self.list_nb_sample[round(sple/2)])
+
+			self.logger.debug("kwargs : {0}".format(kwargs))
 
 			sample_rd[sple].create_sample(**kwargs)
 			
 			# Add the validation shapefile
 			self.valid_shp.append([sample_rd[sple].vector_val, kwargs['fieldname'], kwargs['class']])
 
+			self.logger.debug(self.valid_shp)
+
+			items = list(self.raster_path.items())
+
 			for lbo in range(len(self.raster_path)):
+				self.logger.debug("lbo : {0}".format(lbo))
 				kwargs['rank'] = lbo
 				kwargs['nb_img'] = len(self.raster_path)
-				sample_rd[sple].zonal_stats(self.raster_path[lbo], self.list_band_outraster[lbo], **kwargs)
+
+				self.logger.debug("item : {0}".format(items[lbo][1]))
+				sample_rd[sple].zonal_stats(items[lbo][1]["PATH"], items[lbo][1]["BAND"], **kwargs)
 			
+
 			# To convert the dictionnary in a list
 			# for key, value in sample_rd[sple].stats_dict.iteritems():
 			for key, value in sample_rd[sple].stats_dict.items():
-#				 X_rf.append([-10000 if (math.isnan(x) or math.isinf(x)) else x for x in value])
+				# X_rf.append([-10000 if (math.isnan(x) or math.isinf(x)) else x for x in value])
 				# To set the grassland class of the RPG and PIAO (same class)			
 				if sple == 2:
-#					 y_rf.append(1)
+					# y_rf.append(1)
 					pass
 				elif sple == 3:
-#					 y_rf.append(4)
+					# y_rf.append(4)
 					pass
 				else:
 					y_rf.append(sple)
 					X_rf.append([-10000 if (x is None or math.isnan(x) or math.isinf(x)) else x for x in value])
-					
+		
+			sample_rd[sple].logger.close()
+
 		# Build a forest of trees from the samples				 
 		self.rf = self.rf.fit(X_rf, y_rf)
 		
@@ -547,7 +562,8 @@ class Processing(object):
 		importance = [(importance[x],x+1) for x in range(len(importance))]
 		importance.sort()
 		
-		file_feat_import = os.path.dirname(str(self.raster_path[0])) + '/Feature_important_RF.ft'
+		file_feat_import = "{0}/Feature_important_RF.ft".format(os.path.dirname(items[0][1]["PATH"]))
+
 		if os.path.exists(file_feat_import):
 			os.remove(file_feat_import)
 		
@@ -555,22 +571,24 @@ class Processing(object):
 			f_out.write(str(importance))
 		
 		# Print in a file decision tree
-		file_decisiontree = os.path.dirname(str(self.raster_path[0])) + '/Decision_tree.dot'
+		file_decisiontree = "{0}/Decision_tree.dot".format(os.path.dirname(items[0][1]["PATH"]))
 		
 		if os.path.exists(file_decisiontree):
 			os.remove(file_decisiontree)
 		
 		tree_in_forest = self.rf.estimators_[499]
+
 		with open(file_decisiontree, 'w') as my_file:
 			my_file = tree.export_graphviz(tree_in_forest, out_file = my_file)
 
 	def i_classifier_rf(self): 
 		"""
-			Interface function to launch random forest classification with a input segmentation :func:`Segmentation.Segmentation`.
+			Interface function to launch random forest classification with a input segmentation : 
+			 func:`Segmentation.Segmentation`.
 		
 			This function use the sklearn module to build the best of decision tree to extract classes.
-			The optimal threshold are stored by class **rf** variable in :func:`Processing.i_sample_rf`. Then it computes zonal statistics by polygons
-			for every images in multi-processing (if **mp** = 1).
+			The optimal threshold are stored by class **rf** variable in :func:`Processing.i_sample_rf`.
+			Then it computes zonal statistics by polygons for every images in multi-processing (if **mp** = 1).
 		"""
 		
 		# Multiprocessing
@@ -580,17 +598,21 @@ class Processing(object):
 		multi_process_var = [] # Multi processing variable
 		  
 		# Extract final cartography
-		out_carto = Segmentation(self.path_segm, self.path_area) 
+		out_carto = Segmentation.Segmentation(self.path_segm, self.path_area) 
 		out_carto.output_file = self.output_name_moba
 		out_carto.out_class_name = self.in_class_name
-#		 out_carto.out_threshold = []
 
-		for ind_th in range(len(self.raster_path)):
-			multi_process_var.append([self.raster_path[ind_th], self.list_band_outraster[ind_th]])
+		items = list(self.raster_path.items())
+
+		for ind_th in range(len(items)):
+			multi_process_var.append([items[ind_th][1]["PATH"], items[ind_th][1]["BAND"]])
 
 		# Compute zonal stats with multi processing
 		exist_stats = 1 # By default, the stats file exists already
-		file_stats = os.path.dirname(str(self.raster_path[0])) + '/Stats_raster_spectral_texture.stats' # Stats backup file
+		
+		# Stats backup file
+		file_stats = "{0}/Stats_raster_spectral_texture.stats".format(os.path.dirname(items[ind_th][1]["PATH"]))
+		
 		if not os.path.exists(file_stats):
 			exist_stats = 0 # The sats file doesn't exist
 			# Open a stats backup to avoid computing again (Gain of time)
@@ -602,7 +624,7 @@ class Processing(object):
 		if exist_stats == 0:
 			out_carto.stats_dict = mgr.defaultdict(list)
 			
-			for i in range(len(multi_process_var)):
+			for i in range(len(multi_process_var)) :
 				kwargs['rank'] = i
 				kwargs['nb_img'] = len(multi_process_var)
 				p.append(Process(target=out_carto.zonal_stats, args=(*multi_process_var[i], ), kwargs=kwargs))
@@ -616,11 +638,10 @@ class Processing(object):
 					p[i].join()
 					
 			for key, value_seg in out_carto.stats_dict.items():
-				true_value = [-10000 if (x == None or math.isnan(x) or math.isinf(x)) else x for x in value_seg]
+				true_value = [-10000 if (x is None or math.isnan(x) or math.isinf(x)) else x for x in value_seg]
 				X_out_rf.append(true_value)
 				
-				# Print rasters stats value in the text file .lg
-				f_out.write(str(true_value) + '\n')
+				f_out.write("{0}\n".format(true_value))
 			
 			# Close the output file
 			f_out.close()
@@ -636,12 +657,14 @@ class Processing(object):
 					X_out_rf.append(eval(x_in.strip('\n')))
 		
 		predicted_rf = self.rf.predict(X_out_rf)
+
+		self.logger.debug("Taille sample_name : {0}".format(len(self.sample_name)))
 			 
 		# For the higher than level 1
 		if len(self.sample_name) > 2:
 			# Compute the biomass and density distribution
-			# Use 'out_carto.out_threshold' to konw predicted in the segmentation class
-			out_carto.out_threshold = predicted_rf
+			# Use 'out_carto.out_threshold' to know predicted in the segmentation class
+			out_carto.out_threshold["PREDICTION"] = predicted_rf
 			# In the compute_biomass_density function, this variable used normally to define 
 			# threshold of the classification with SEATH method is initialized
 			out_carto.compute_biomass_density('RF')
@@ -679,7 +702,7 @@ class Processing(object):
 		opt = {}
 		opt['Remove'] = 1
 		rpg_tif = Vector(self.sample_name[0], self.path_area, **opt)
-#		 if not os.path.exists(str(rpg_tif.vector_used[:-3]+'TIF')): 
+
 		kwargs['choice_nb_b'] = 1
 		out_carto.stats_rpg_tif = out_carto.zonal_stats_pp(rpg_tif.layer_rasterization(self.path_ortho, 'CODE_GROUP', **kwargs))
 
@@ -688,10 +711,11 @@ class Processing(object):
 		
 	def i_classifier_s(self): 
 		"""
-		Interface function to launch decision tree classification with a input segmentation :func:`Segmentation.Segmentation`.
+		Interface function to launch decision tree classification with a input 
+		segmentation :func:`Segmentation.Segmentation`.
 		
-		This function store optimal threshold by class **Segmentation.out_threshold**. Then it computes zonal statistics by polygons
-		for every images in multi-processing (if **mp** = 1).
+		This function store optimal threshold by class **Segmentation.out_threshold**.
+		Then it computes zonal statistics by polygons for every images in multi-processing (if **mp** = 1).
 		""" 
 		
 		# Multiprocessing
@@ -704,14 +728,15 @@ class Processing(object):
 		out_carto = Segmentation(self.path_segm, self.path_area) 
 		out_carto.output_file = self.output_name_moba
 		out_carto.out_class_name = self.in_class_name
-		out_carto.out_threshold = []
+		
 		for ind_th in range(len(self.sample_name)):
 			out_carto.out_threshold.append(self.decis[ind_th].threshold[0])
 			if '>' in self.decis[ind_th].threshold[0]:
 				out_carto.out_threshold.append(self.decis[ind_th].threshold[0].replace('>', '<='))
 			elif '<' in self.decis[ind_th].threshold[0]:
 				out_carto.out_threshold.append(self.decis[ind_th].threshold[0].replace('<', '>='))
-		#	 out_carto.zonal_stats((raster_path[ind_th], list_band_outraster[ind_th]))
+			# out_carto.zonal_stats((raster_path[ind_th], list_band_outraster[ind_th]))
+
 			multi_process_var.append([self.raster_path[ind_th], self.list_band_outraster[ind_th]])
 		 
 		# Compute zonal stats on slope raster
@@ -802,23 +827,25 @@ class Processing(object):
 		rpg_tif.layer_rasterization(self.path_ortho, 'CODE_GROUP')
 		  
 		# Final cartography
-		out_carto.mono_rpg_tif = self.sample_name[0][:-4] + '.TIF'
+		out_carto.mono_rpg_tif = "{0}.TIF".format(self.sample_name[0][:-4])
 		out_carto.create_cartography(self.out_fieldname_carto, self.out_fieldtype_carto)
 	   
 	def i_validate(self):
 		"""
-		Interface to validate a classification. It going to rasterize the validation shapefile and the 
-		classification shapefile with :func:`layer_rasterization`. Next, to compare pixel by pixel, the classification
-		quality to built a confusion matrix in a csv file.
-		
+			Interface to validate a classification. It going to rasterize the validation shapefile and the 
+			classification shapefile with :func:`layer_rasterization`. 
+			Next, to compare pixel by pixel, the classification quality to built a confusion matrix in a csv file.
 		"""
+
+		self.logger.info('"Validation')
+
 		# Variable to convert the input classname to an individual interger
 		# Only for the validate sample
 		class_validate = 0
-		complete_validate_shp = os.path.dirname(str(self.valid_shp[0][0])) + '/validate.shp'
 
-		# TODO: Set this method in the Precision_moba class
+		complete_validate_shp = "{0}/validate.shp".format(os.path.dirname(self.valid_shp[0][0]))
 
+		# TODO: Set this method in the Precision_moba class
 		
 		# Processing to rasterize the validate shapefile. 1) Merge sahpefiles 2) Rasterization
 		for val in self.valid_shp:
@@ -838,6 +865,7 @@ class Processing(object):
 					opt['add_fieldname'] = 1 
 					opt['fieldname'] = 'CLASS_CODE'
 					opt['class'] = str(class_validate) # Add integer classes
+					
 					# Set the new shapefile
 					val[0] = val[0][:-4] + '_.shp'
 					val[1] = opt['fieldname']
@@ -851,18 +879,26 @@ class Processing(object):
 						process_tocall_merge =  ['ogr2ogr', '-update', '-append', complete_validate_shp, \
 												 val[0], '-nln', os.path.basename(str(complete_validate_shp[:-4]))]
 					subprocess.call(process_tocall_merge)
+
+					sample_val.logger.close()
+			
 			# Increrment variable
 			class_validate = self.valid_shp.index(val) + 1
 		
 		# Compute precision of the classification
 		valid = Precision_moba(self.path_area, self.path_folder_dpt)	 
 		valid.complete_validation_shp = complete_validate_shp
-		valid.ex_raster = self.raster_path[0]
-		
+
+		valid.ex_raster = list(self.raster_path.items())[0][1]["PATH"]
+
 		# TODO: Call the RasterSat_by_Date class here instead of the Precision_moba class
+
+		self.logger.debug("Name moba : {0}".format(self.output_name_moba))
 		
 		valid.preprocess_to_raster_precision(self.output_name_moba, 'FBPHY_CODE') # To the classification's data
 		valid.preprocess_to_raster_precision(complete_validate_shp, val[1]) # To the validation's data
 		
 		# Compute precision on the output classification
 		valid.confus_matrix(valid.complete_img[0].raster_data(valid.img_pr[0])[0], valid.complete_img[1].raster_data(valid.img_pr[1])[0])
+
+		valid.logger.close()
diff --git a/RasterSat_by_date.py b/RasterSat_by_date.py
index ec2292bc9e38ae6bbd5795d7dbc79ebba973c710..77416dc150bccb0672a04f6436435e7704da02f2 100644
--- a/RasterSat_by_date.py
+++ b/RasterSat_by_date.py
@@ -25,7 +25,9 @@ except :
 	from osgeo import gdal
 
 import Outils
+import Satellites
 import numpy as np
+from libtiff import TIFF
 
 class RasterSat_by_date(object):
 	"""
@@ -77,8 +79,6 @@ class RasterSat_by_date(object):
 		
 		# Take images with the same date
 
-		# self.logger.debug(self._class_archive.list_img)
-
 		for d_dup in self._class_archive.list_img:
 			if d_dup[:3] == d_uni:
 				group.append(d_dup)
@@ -87,15 +87,17 @@ class RasterSat_by_date(object):
 	
 	def vrt_translate_gdal(self, vrt_translate, src_data, dst_data):
 		"""
-		Function to launch gdal tools in command line. With ``gdalbuildvrt`` and ``gdal_translate``.
-		This function is used :func:`mosaic_by_date` to mosaic image by date.
-		
-		:param vrt_translate: ``vrt`` or ``translate``
-		:type vrt_translate: str
-		:param src_data: Data source. Several data for vrt process and one data (vrt data) for gdal_translate
-		:type src_data: list (process ``vrt``) or str (process ``translate``)  
-		:param dst_data: Output path
-		:type dst_data: str
+			Function to launch gdal tools in command line. With ``gdalbuildvrt`` and ``gdal_translate``.
+			This function is used :func:`mosaic_by_date` to mosaic image by date.
+			
+			@param vrt_translate: ``vrt`` or ``translate``
+			@type vrt_translate: str
+			
+			@param src_data: Data source. Several data for vrt process and one data (vrt data) for gdal_translate
+			@type src_data: list (process ``vrt``) or str (process ``translate``)  
+			
+			@param dst_data: Output path
+			@type dst_data: str
 		"""
 		
 		if os.path.exists(dst_data):
@@ -185,30 +187,30 @@ class RasterSat_by_date(object):
 		if not os.path.exists(gtifcloud_out):
 			self.vrt_translate_gdal('translate', vrtcloud_out, gtifcloud_out)
 
-		raise("STOP")
-		self.images_gtif = [gtif_out, gtifcloud_out]
-		# self._one_date.append(gtifcloud_out)
+		self.cloudiness_pourcentage = [gtif_out, gtifcloud_out]
 	
 	def raster_data(self, img):
 		"""
-		Function to extract raster information.
-		Return table of pixel values and raster information like line number, pixel size, ... (gdal pointer)
-		
-		:param img: Raster path
-		:type img: str
+			Function to extract raster information.
+			Return table of pixel values and raster information like line number, pixel size, ... (gdal pointer)
+			
+			@param img 	: Raster path
+			@type img  	: str
 
-		:returns: numpy.array -- variable **data**, Pixel value matrix of a raster.
-				  
-				  gdal pointer -- variable **_in_ds**, Raster information.
+			@returns	: numpy.array -- variable **data**, Pixel value matrix of a raster.
+					  
+					  	  gdal pointer -- variable **_in_ds**, Raster information.
 		"""
 		
 		# Load Gdal's drivers
 		gdal.AllRegister()
+
+		self.logger.debug("Img : {0}".format(img))
 		
 		# Loading input raster
-		self.logger.info('Loading input raster : {0}'.format(os.path.split(str(img))[1][:-4]))
-		in_ds = gdal.Open(str(img), gdal.GA_ReadOnly)
-		
+		self.logger.info('Loading input raster : {0}'.format(os.path.basename(img)))
+		in_ds = gdal.Open(img, gdal.GA_ReadOnly)
+
 		# if it doesn't exist
 		if in_ds is None:
 			self.logger.error('Could not open ')
@@ -219,20 +221,21 @@ class RasterSat_by_date(object):
 			nbband = in_ds.RasterCount # Spectral band number
 		else:
 			nbband = self.choice_nb_b
+
 		rows = in_ds.RasterYSize # Rows number
 		cols = in_ds.RasterXSize # Columns number
 		
 		# Table's declaration 
-		data = [] # np.float32([[0]*cols for i in xrange(rows)])
+		data = [] 
 		for band in range(nbband):
 			
 			canal = in_ds.GetRasterBand(band + 1) # Select a band
+			
 			if nbband == 1:
-				data = canal.ReadAsArray(0, 0, cols, rows).astype(np.float32) # Assign pixel values at the data
+				data = canal.ReadAsArray(0, 0, cols, rows).astype(np.float32) # Assign pixel values at th0e data
 			else:
 				data.append(canal.ReadAsArray(0, 0, cols, rows).astype(np.float32))
-#			 print('Copie des pixels du raster ! Bande :',  (band + 1))
-		
+				
 		###################################
 		# Close input raster
 		_in_ds = in_ds
@@ -240,117 +243,130 @@ class RasterSat_by_date(object):
 		
 		return data, _in_ds
 	
-	def pourc_cloud(self, img_spec, img_cloud):
+	def pourc_cloud(self):
 		"""
-		Return clear pixel percentage on the image **img_spec** because of a cloud image **img_cloud**.
-		
-		:param img_spec: Spectral image path
-		:type img_spec: str
-		:param img_cloud: Cloud image path
-		:type img_cloud: str
-		
-		:returns: float -- variable **nb0**, clear pixel percentage.
-		:Example:
-		
-		>>> import RasterSat_by_date
-		>>> Landsat_test = RasterSat_by_date(class_archive, big_folder, one_date)
-		>>> nb0_test = Landsat_test.pourc_cloud(Landsat_test._one_date[3], Landsat_test._one_date[4])
-		>>> nb0_test
-		98
+			Return clear pixel percentage on the image self.cloudiness_pourcentage[0]
+			 because of a cloud image cloudiness_pourcentage[1].
+					
+			:returns: float -- variable **nb0**, clear pixel percentage.
+			:Example:
+			
+			>>> import RasterSat_by_date
+			>>> Landsat_test = RasterSat_by_date(class_archive, big_folder, one_date)
+			>>> nb0_test = Landsat_test.pourc_cloud(Landsat_test._one_date[3], Landsat_test._one_date[4])
+			>>> nb0_test
+			98
 		"""
 		
 		# Extract raster's information
-		data_spec, info_spec = self.raster_data(img_spec)
-		data_cloud, info_cloud = self.raster_data(img_cloud)
-		self._one_date.append(data_cloud) # Add cloud pixel value table
-		
-		# Extent of the images
-		mask_spec = np.in1d(data_spec[0], [-10000, math.isnan], invert=True) # ex : array([ True,  True,  True,  True, False,  True,  True,  True,  True], dtype=bool) -> False where there is -10000 ou NaN
-		
+		data_spec, info_spec = self.raster_data(self.cloudiness_pourcentage[0])
+		data_cloud, info_cloud = self.raster_data(self.cloudiness_pourcentage[1])
+
+		# Add cloud pixel value table
+		self.cloudiness_pourcentage.append(data_cloud) 
+	
+		# Extent of the images : 
+		# ex : array([ True,  True,  True,  True, False,  True,  True,  True,  True], dtype=bool)
+		# -> False where there is -10000 ou NaN
+		mask_spec = np.isin(data_spec[0], [-10000, math.isnan], invert=True) 
+
 		# Print area account of the pixel size 'info_spec.GetGeoTransform()'
-		self.logger.info('Area = ' + str(float((np.sum(mask_spec)  * info_spec.GetGeoTransform()[1] * abs(info_spec.GetGeoTransform()[-1]) )/10000)) + 'ha' )
-		
-		# Cloud mask
-		mask_cloud = np.in1d(data_cloud, 0) # This is the same opposite False where there is 0
-		cloud = np.choose(mask_cloud, (False, mask_spec)) # If True in cloud mask, it take spectral image else False
-		dist = np.sum(cloud) # Sum of True. True is cloud
-		
+		area = float((np.sum(mask_spec) * info_spec.GetGeoTransform()[1] * abs(info_spec.GetGeoTransform()[-1]) )/10000)
+		self.logger.info("Area = {0} ha".format(area))
+
+		############################## Cloud mask ##############################
+
+		# This is the same opposite False where there is 0
+		mask_cloud = np.isin(data_cloud, 0) 
+
+		# If True in cloud mask, it take spectral image else False
+		cloud = np.choose(mask_cloud, (False, mask_spec))
+
+		# Sum of True. True is cloud 
+		dist = np.sum(cloud) 
+
 		# Computer cloud's percentage with dist (sum of cloud) by sum of the image's extent
 		try :
 			nb0 = float(dist)/(np.sum(mask_spec))
-			self.logger.info('For ' + os.path.split(str(img_spec))[1][:-4] + ', cloudy cover ' + str(100 - round(nb0*100, 2)) + "%")
+			self.logger.info('For {0}, cloudy cover = {1}%'.format(os.path.basename(self.cloudiness_pourcentage[0]), 100 - round(nb0*100, 2) ) )
 		except ZeroDivisionError:
 			nb0 = 0
 			self.logger.info("The raster isn\'t in the area !")
 		
-		return nb0
+		return 1.0-nb0
 	
-	def calcul_ndvi(self, img_spec):
+	def calcul_ndvi(self):
 		"""
-		Computer NDVI index for a Landsat image.
-		
-		NDVI = (band4 - band3) / (band4 + band3) with nb_captor = 0
-		
-		or for Sentinel 2 image (2A) : NDVI = band3 - band2 / band3 + band2 with nb_captor = -1
-		
-		:param img_spec: Spectral image path
-		:type img_spec: str
+			Computer NDVI index for a Landsat image.
+			
+			NDVI = (PIR - R) / (PIR + R)
+		"""	
+
+		np.seterr(invalid='ignore')
 
-		"""
-		
-		# NDVI formula index for 2 captor (L8 or S2A Theia)
-		if self._class_archive._captor == 'SENTINEL2':
-			n_captor = -1
-		else :
-			n_captor = 0
-		
 		# Extract raster's information
-		data, in_ds = self.raster_data(img_spec)
-		
+		data, in_ds = self.raster_data(self.cloudiness_pourcentage[0])
+		data_cloud, in_ds_cloud = self.raster_data(self.cloudiness_pourcentage[1])
+
+		canal_PIR = Satellites.SATELLITE[self._class_archive._captor]["PIR"]
+		canal_R = Satellites.SATELLITE[self._class_archive._captor]["R"]
+
 		# Computer NDVI
-		mask = np.greater(data[0], -10000)
-		ndvi = np.choose(mask, (-10000, eval('(data[4+n_captor]-data[3+n_captor])') / eval('(data[4+n_captor]+data[3+n_captor])'))) # If True, -10000 (NaN) else compute mathematical operation
+		mask = np.greater(data_cloud, -10000)
+
+		# If True, -10000 (NaN) else compute mathematical operation
+		ndvi = np.choose(mask, (-10000, eval('(data[canal_PIR]-data[canal_R])') / eval('(data[canal_PIR]+data[canal_R])')))
 		
 		# Outfile name
-		img_ndvi = img_spec[:-4] + '_ndvi.TIF'
-		self._one_date.append(img_ndvi) # Add ndvi image path
-		self._one_date.append(ndvi) # Add ndvi pixel value table
-		self.create_raster(img_ndvi, ndvi, in_ds)
-		
-	def create_raster(self, out_raster, data, in_ds):
+		img_ndvi = "{0}_ndvi.TIF".format(self.cloudiness_pourcentage[0][:-4])
+
+		self.cloudiness_pourcentage.append(img_ndvi) # Add ndvi image path
+		self.cloudiness_pourcentage.append(ndvi) # Add ndvi pixel value table
+
+		self.create_empty_raster(img_ndvi, ndvi, in_ds)
+
+	def create_empty_raster(self, out_raster_path, data, in_ds):
 		"""
-		Create a raster empty with the input raster property
-		
-		:param out_raster: Output image path
-		:type out_raster: str
-		:param data: Pixel value matrix. Matrix size equal to that of a raster.
-		:type data: numpy.array
-		:param in_ds: Raster information
-		:type in_ds: gdal pointer
-		
-		:returns: gdal pointer -- variable **out_ds**, Raster out information.
-				  
-				  int -- variable **nbband**, Band number of the out layer. 
-				  
-				  int -- variable **e**, Index to know if the raster exists. If it doesn't exists e = 0 else e = 1 (by default).
+			Create an empty raster with the input raster property
+			
+			@param out_raster_path: Output image path
+			@type out_raster_path: str
+
+			@param data: Pixel value matrix. Matrix size equal to that of a raster.
+			@type data: numpy.array
+
+			@param in_ds: Raster information
+			@type in_ds: gdal pointer
+			
+			@returns: gdal pointer -- variable **out_ds**, Raster out information.
+					  
+					  int -- variable **nbband**, Band number of the out layer. 
+					  
+					  int -- variable **e**, Index to know if the raster exists.
+					   If it doesn't exists e = 0 else e = 1 (by default).
 		"""
 		
-#		 if os.path.exists(str(out_raster)):
-#			 os.remove(str(out_raster))
 		e = 1 # Raster out exists by default 
+		
 		# Verify if the processing take input band or one spectral band	
 		if data.ndim == 2 or self.choice_nb_b == 1:
 			nbband = 1
 		else:
 			nbband = in_ds.RasterCount 
 			
-		driver = gdal.GetDriverByName('GTiff')  
-		if not os.path.exists(str(out_raster)):
+		driver = gdal.GetDriverByName('GTiff') 
+
+		if os.path.exists(out_raster_path):
+			os.remove(out_raster_path)
+
+		if not os.path.exists(out_raster_path):
+
 			e = 0	
 			# Create outfile
-			self.out_ds = driver.Create(str(out_raster), in_ds.RasterXSize, in_ds.RasterYSize, nbband, gdal.GDT_Float32)
+			self.out_ds = driver.Create(out_raster_path, in_ds.RasterXSize, in_ds.RasterYSize, nbband, gdal.GDT_Float32)
+
 			if self.out_ds is None:
-				self.logger.error('Could not create ' + os.path.split(str(out_raster))[1])
+				self.logger.error('Could not create {0}'.format(os.path.basename(out_raster_path)))
 				sys.exit(1)
 				
 			# Get extent coordinates and raster resolution
@@ -370,31 +386,34 @@ class RasterSat_by_date(object):
 			# Set the geo-traking and outfile projection
 			self.out_ds.SetGeoTransform(geotransform)
 			self.out_ds.SetProjection(def_projection)
-		
+
 		else:
-			
-			self.out_ds = gdal.Open(str(out_raster), gdal.GA_ReadOnly)
+			self.out_ds = gdal.Open(out_raster_path, gdal.GA_ReadOnly)
 			
 		return nbband, e
 	
 	def complete_raster(self, nbband, e, data): 
 		"""
-		  This function complete the function above :func:`create_raster()`. It 
-		  fills the raster table and close the layer.
+			This function complete the function above : func:`create_empty_raster()`. 
+			It fills the raster table and close the layer.
 		
-		  :param out_ds: Raster out information
-		  :type out_ds: gdal pointer
-		  :param nbband: Band number of the out layer
-		  :type nbband: int
-		  :param e: Index to know if the raster existed. If it didn't exist e = 0.
-		  :type e: int
-		  :param data: Pixel value matrix. Matrix size equal to that of a raster.
-		  :type data: numpy.array
+			@param out_ds: Raster out information
+			@type out_ds: gdal pointer
+
+			@param nbband: Band number of the out layer
+			@type nbband: int
+
+			@param e: Index to know if the raster existed. If it didn't exist e = 0.
+			@type e: int
+
+			@param data: Pixel value matrix. Matrix size equal to that of a raster.
+			@type data: numpy.array
 		"""
 		
 		# The e index to verify if the layer existed already because of the 
-		# function :func:`create_raster()`
+		# function :func:`create_empty_raster()`
 		if e == 0 :   
+			
 			p = 0 # Increment for the number band
 			while p < nbband:
 				#Incrementation
@@ -417,6 +436,4 @@ class RasterSat_by_date(object):
 				out_band = None	
 			
 		# Close open data
-		self.out_ds = None
-
-			
\ No newline at end of file
+		self.out_ds = None
\ No newline at end of file
diff --git a/Sample.py b/Sample.py
index c7125468742f09629b959ba77e726b39d6e54c44..ae7a078077328fc897e60489a076d6ffc556d6f7 100644
--- a/Sample.py
+++ b/Sample.py
@@ -30,20 +30,23 @@ except :
 	from osgeo import ogr
 
 class Sample(Vector):
-	
 	"""
-		Vector class inherits the super vector class properties. This class create training sample.
+		Vector class inherits the super vector class properties. 
+		This class create training sample.
 	
-		:param vector_used: Input/Output shapefile to clip (path)
-		:type vector_used: str
-		:param vector_cut: Area shapefile (path)
-		:type vector_cut: str
-		:param nb_sample: Number of polygons for every sample
-		:type nb_sample: int
-		:param vector_val: Output shapefile to validate the futur classification
-		:type vector_val: str
+		@param vector_used: Input/Output shapefile to clip (path)
+		@type vector_used: str
+
+		@param vector_cut: Area shapefile (path)
+		@type vector_cut: str
+
+		@param nb_sample: Number of polygons for every sample
+		@type nb_sample: int
+
+		@param vector_val: Output shapefile to validate the futur classification
+		@type vector_val: str
 	
-		:opt: Refer to the Vector class
+		@opt: Refer to the Vector class
 	"""
 	
 	def __init__(self, used, cut, nb_sample, **opt):
@@ -51,14 +54,13 @@ class Sample(Vector):
 			Create a new 'Sample' instance   
 		"""
 
-		self.logger = Outils.Log("Log", "Sample")
-
+		self.logger = Outils.Log('Log', 'Sample')
+		
 		Vector.__init__(self, used, cut, **opt)
 		
 		self._nb_sample = nb_sample
 		self.vector_val = ''
 	
-	
 	def create_sample(self, **kwargs):
 		"""
 			Function to create a sample shapefile of a specific class
@@ -71,7 +73,10 @@ class Sample(Vector):
 		
 		kw_field = kwargs['fieldname'] if kwargs.get('fieldname') else ''
 		kw_classes = kwargs['class'] if kwargs.get('class') else ''
-		
+
+		self.logger.debug("kw_field : {0}".format(kw_field))
+		self.logger.debug("kw_classes : {0}".format(kw_classes))
+
 		# If the users want select polygons with a certain class name
 		if kw_field and kw_classes:
 			# The random sample with class name selected only
@@ -80,30 +85,44 @@ class Sample(Vector):
 			# The random sample without class name selected
 			random_sample = np.array(random.sample(range(self.data_source.GetLayer().GetFeatureCount()), self._nb_sample))
 			
+		self.logger.debug("random_sample : {0}".format(random_sample))
+
 		# Output shapefile of the sample's polygons (path)
-		self.vector_used = self.vector_used[:-4] + '_' + kw_classes.replace(',','').replace(' ','') + 'rd.shp'
+		self.vector_used = "{0}_{1}_rd.shp".format(self.vector_used[:-4], kw_classes.replace(',','').replace(' ',''))
+
+		self.logger.debug("vector_used : {0}".format(self.vector_used))
+
+		self.logger.debug("Fill sample : {0} - {1}".format(self.vector_used, random_sample[:round(len(random_sample)/2)]))
+
 		# Fill and create the sample shapefile
 		self.fill_sample(self.vector_used, random_sample[:round(len(random_sample)/2)])
+	
 		# Output shapefile of the validate polygon (path)
-		self.vector_val = self.vector_used[:-6] + 'val.shp'
+		self.vector_val = "{0}val.shp".format(self.vector_used[:-6]) 
+		
 		# Fill and create the validate polygons shapefile
 		self.fill_sample(self.vector_val, random_sample[round(len(random_sample)/2):])
 
 	def select_random_sample(self, kw_field, kw_classes):		
 		"""
-			Function to select id with class name specific only. This function is used in :func:`create_sample`
+			Function to select id with class name specific only.
+			 This function is used in :func:`create_sample`
 
-			:param kw_field: Field name in the input shapefile
-			:type kw_field: str
-			:param kw_classes: Class names in the input shapefile like this --> 'classname1, classname2'
-			:type kw_classes: str
-			:returns: list -- variable **select_id**, List of id with a class name specific.
+			@param kw_field: Field name in the input shapefile
+			@type kw_field: str
+			
+			@param kw_classes: Class names in the input shapefile like this --> 'classname1, classname2'
+			@type kw_classes: str
+			
+			@returns: list -- variable **select_id**, List of id with a class name specific.
 		"""
 		
 		# Convert string in a list. For that, it remove
 		# space and clip this string with comma (Add everywhere if the script modified
 		# because the process work with a input string chain)
+
 		kw_classes = kw_classes.replace(' ','').split(',')
+		self.logger.debug('kw_classes : {0}'.format(kw_classes))
 		
 		# List of class name id
 		select_id = []
@@ -113,11 +132,13 @@ class Sample(Vector):
 		# Loop on input polygons
 		in_feature = shp_ogr.SetNextByIndex(0) # Initialization
 		in_feature = shp_ogr.GetNextFeature()
+		
 		while in_feature:			
 			# if polygon is a defined class name 
 			## .replace('0','') to remove '0' in front of for example '1' (RPG -> '01')
 			table_name_class = in_feature.GetField(self.field_names[self.field_names.index(kw_field)])
-			# To avoid that the process crashed this part of the algorithm will be launch if the field is contains characters
+			# To avoid that the process crashed this part of the algorithm will be launch
+			# if the field is contains characters
 			if table_name_class != None :
 				if in_feature.GetField(self.field_names[self.field_names.index(kw_field)]).replace('0','') in kw_classes:
 					
@@ -127,26 +148,29 @@ class Sample(Vector):
 					in_feature.Destroy()
 					   
 			in_feature = shp_ogr.GetNextFeature()
+
 		return select_id
 	
 	def fill_sample(self, output_sample, polygon, **opt):
-		
 		"""
-			Function to fill and create the output sample shapefile. This function is used in :func:`create_sample`
-			to create samples polygons and validated polygons (to the take out the precision of the classification)
+			Function to fill and create the output sample shapefile.
+			This function is used in :func:`create_sample` to create samples polygons
+			and validated polygons (to the take out the precision of the classification).
 
-			:param output_sample: Path of the output shapefile
-			:type output_sample: str
-			:param polygon: Identity of the selected random polygons. If this variable = 0, the processing will take all polygons 
-			:type polygon: list or int	  
+			@param output_sample: Path of the output shapefile
+			@type output_sample: str
+			
+			@param polygon: Identity of the selected random polygons.
+			 If this variable = 0, the processing will take all polygons 
+			@type polygon: list or int	  
 		
-			:opt: **add_fieldname** (int) - Variable to kown if add a field. By default non (0), if it have to add (1)
+			@opt: **add_fieldname** (int) - Variable to kown if add a field. By default non (0), if it have to add (1)
 		
 					**fieldname** (str) - Fieldname to add in the input shapefile
 				
 					**class** (int) - class names in integer to add in the input shapefile
 		"""
-		
+
 		# In option to add a integer field
 		add_field = opt['add_fieldname'] if opt.get('add_fieldname') else 0
 		opt_field = opt['fieldname'] if opt.get('fieldname') else ''
@@ -167,6 +191,7 @@ class Sample(Vector):
 		## Remove the output shapefile if it exists
 		if os.path.exists(output_sample):
 			self.data_source.GetDriver().DeleteDataSource(output_sample)
+			
 		out_ds = self.data_source.GetDriver().CreateDataSource(output_sample)
 		
 		if out_ds is None:
diff --git a/Satellites.py b/Satellites.py
index 814082a83beba3b69af67512f979e330cafd51f6..ecb33f425b117f84c9a3ec0dc9107e9dce43ca22 100644
--- a/Satellites.py
+++ b/Satellites.py
@@ -12,12 +12,17 @@ SATELLITE = collections.defaultdict(dict)
 SATELLITE["SENTINEL2"]["server"] = "https://theia.cnes.fr/atdistrib"
 SATELLITE["SENTINEL2"]["resto"] = "resto2"
 SATELLITE["SENTINEL2"]["token_type"] = "text"
+SATELLITE["SENTINEL2"]["R"] = 2
+SATELLITE["SENTINEL2"]["PIR"] = 3
 
 ########################## LANDSAT #################################
 
 SATELLITE["Landsat"]["server"] = "https://theia-landsat.cnes.fr"
 SATELLITE["Landsat"]["resto"] = "resto"
 SATELLITE["Landsat"]["token_type"] = "json"
+SATELLITE["Landsat"]["R"] = 3
+SATELLITE["Landsat"]["PIR"] = 4
+
 
 
 
diff --git a/Segmentation.py b/Segmentation.py
index c95ed4acd972417bf6719c2f9b4fda6fabfbcffc..5bc3b462a3d4222990779b24c6ab5ec744589e3e 100644
--- a/Segmentation.py
+++ b/Segmentation.py
@@ -23,325 +23,336 @@ from Vector import Vector
 import Outils
 
 try :
-    import ogr, gdal
+	import ogr, gdal
 except :
-    from osgeo import ogr, gdal
+	from osgeo import ogr, gdal
 
 from collections import *
 
 class Segmentation(Vector):
-    
-    """
-        Vector class inherits the super vector class properties. This class create the final shapefile : Cartography 
-        on a input segmentation by decision tree.
-        
-        The output classname are (**out_class_name** variable):
-            - Vegetation non naturelle
-            - Vegetation semi-naturelle
-            - Herbacees
-            - Ligneux
-            - Ligneux mixtes
-            - Ligneux denses
-            - Forte phytomasse
-            - Moyenne phytomasse
-            - Faible phytomasse
-        
-        :param vector_used: Input/Output shapefile to clip (path)
-        :type vector_used: str
-        :param vector_cut: Area shapefile (path)
-        :type vector_cut: str
-        :param output_file: Output shapefile cartography. This path is the same than the segmentation path.
-        :type output_file: str
-        :param out_class_name: List of output class name
-        :type out_class_name: list of str
-        :param out_threshold: List of output threshold
-        :type out_threshold: list of str
-        :param max_...: Biomass and density (IDM, SFS index) maximum
-        :type max_...: float
-        :param class_tab_final: Final decision tree table
-        :type class_tab_final: dict
-        :param stats_rpg_tif: Dictionnary with pxl percent in every polygon of class RPG. Mainly 'Maj_count' (Majority Value) and 'Maj_count_perc' (Majority Percent)
-        :type stats_rpg_tif: dict
-    """
-    
-    def __init__(self, used, cut):
-        """
-            Create a new 'Clustering' instance
-        """
+	
+	"""
+		Vector class inherits the super vector class properties. This class create the final shapefile : Cartography 
+		on a input segmentation by decision tree.
+		
+		The output classname are (**out_class_name** variable):
+			- Vegetation non naturelle
+			- Vegetation semi-naturelle
+			- Herbacees
+			- Ligneux
+			- Ligneux mixtes
+			- Ligneux denses
+			- Forte phytomasse
+			- Moyenne phytomasse
+			- Faible phytomasse
+		
+		@param vector_used: Input/Output shapefile to clip (path)
+		@type vector_used: str
 
-        self.logger = Outils.Log("Log", "Segmentation")
+		@param vector_cut: Area shapefile (path)
+		@type vector_cut: str
 
-        Vector.__init__(self, used, cut)
-       
-        self.stats_rpg_tif = {}
-        self.output_file =  'Final_classification.shp'
-        
-        self.out_class_name = []
-        self.out_threshold = []
-        
-        self.class_tab_final = defaultdict(list)
-      
-        self.max_wood_idm = 0
-        self.max_wood_sfs = 0
-        self.max_bio = 0
-    
-    def create_cartography(self, out_fieldnames, out_fieldtype):
-        """
-        Function to create a output shapefile. In this output file,
-        there is the final cartography. With output defined field names
-        and field type in the main process.
+		@param output_file: Output shapefile cartography. This path is the same than the segmentation path.
+		@type output_file: str
 
-        :param out_fieldnames: List of output field names
-        :type out_fieldnames: list of str
-        :param out_fieldtype: List of outpu field type
-        :type out_fieldtype: list of str
-        """
-        
-        shp_ogr_ds = self.data_source
-        shp_ogr = self.data_source.GetLayer()
-                        
-        # Projection
-        # Import input shapefile projection
-        srsObj = shp_ogr.GetSpatialRef()
-        # Conversion to syntax ESRI
-        srsObj.MorphToESRI()
-        
-        ## Remove the output final shapefile if it exists
-        self.vector_used = self.output_file
-        if os.path.exists(self.vector_used):
-            self.data_source.GetDriver().DeleteDataSource(self.vector_used)
-        out_ds = self.data_source.GetDriver().CreateDataSource(self.vector_used)
-        
-        if out_ds is None:
-            self.logger.error('Could not create file')
-            sys.exit(1)
-            
-        #  Specific output layer
-        out_layer = out_ds.CreateLayer(str(self.vector_used), srsObj, geom_type=ogr.wkbMultiPolygon)
-        
-        # Add new fields
-        # To add RPG_CODE field
-        out_fieldnames.insert(2,'RPG_CODE')
-        out_fieldtype.insert(2,ogr.OFTInteger)
-        # To add the others
-        for i in range(0, len(out_fieldnames)):
-            fieldDefn = ogr.FieldDefn(str(out_fieldnames[i]), out_fieldtype[i])
-            out_layer.CreateField(fieldDefn)
-        # Add 2 fields to convert class string in code to confusion matrix
-        fieldDefn = ogr.FieldDefn('FBPHY_CODE', ogr.OFTInteger)
-        out_layer.CreateField(fieldDefn)
-        out_fieldnames.append('FBPHY_CODE')
-        fieldDefn = ogr.FieldDefn('FBPHY_SUB', ogr.OFTInteger)
-        out_layer.CreateField(fieldDefn)
-        out_fieldnames.append('FBPHY_SUB')
-        
-        # Feature for the ouput shapefile
-        featureDefn = out_layer.GetLayerDefn()
-        
-        in_feature = shp_ogr.SetNextByIndex(0) # Polygons initialisation
-        in_feature = shp_ogr.GetNextFeature()
-        
-        # Loop on input polygons
-        while in_feature:
-            
-            geom = in_feature.GetGeometryRef() # Extract input geometry
+		@param out_class_name: List of output class name
+		@type out_class_name: list of str
 
-            # Create a new polygon
-            out_feature = ogr.Feature(featureDefn)
-        
-            # Set the polygon geometry and attribute
-            out_feature.SetGeometry(geom)
-            # Set the existing ID 
-            out_feature.SetField(out_fieldnames[0], in_feature.GetField(self.field_names[2]))
-            # Set the area
-            out_feature.SetField(out_fieldnames[1], geom.GetArea()/10000)
-            
-            # Set the RPG column
-            recouv_crops_RPG = 0
-            pourc_inter = self.stats_rpg_tif[in_feature.GetFID()]['Maj_count_perc']
-            if pourc_inter >= 85:
-                recouv_crops_RPG = self.stats_rpg_tif[in_feature.GetFID()]['Maj_count']
-               
-            out_feature.SetField('RPG_CODE', int(recouv_crops_RPG))
-            
-            # Set the others polygons fields with the decision tree dictionnary
-            for i in range(3, len(out_fieldnames)):
-                # If list stopped it on the second level, complete by empty case
-                if len(self.class_tab_final[in_feature.GetFID()]) < len(out_fieldnames)-3 and \
-                                                    self.class_tab_final[in_feature.GetFID()] != []:
-                    self.class_tab_final[in_feature.GetFID()].insert(len(self.class_tab_final[in_feature.GetFID()])-3,'') # To 3rd level
-                    self.class_tab_final[in_feature.GetFID()].insert(len(self.class_tab_final[in_feature.GetFID()])-3,0) # To degree
-                    
-                try:
-                    # To the confusion matrix, replace level ++ by level --
-                    if i == len(out_fieldnames)-1:
-                        if self.class_tab_final[in_feature.GetFID()][i-3] == 6:
-                            # Crops to artificial vegetation
-                            self.class_tab_final[in_feature.GetFID()][i-4] = 0
-#                         if self.class_tab_final[in_feature.GetFID()][i-3] == 2:
-#                             # Grassland to natural vegetation
-#                             self.class_tab_final[in_feature.GetFID()][i-3] = 1
-                        if self.class_tab_final[in_feature.GetFID()][i-3] > 7:
-                            # Phytomass to natural vegetation
-                            self.class_tab_final[in_feature.GetFID()][i-4] = 1
-                            
-                    out_feature.SetField(str(out_fieldnames[i]), self.class_tab_final[in_feature.GetFID()][i-3])
-                except:
-#                     pass
-                    for i in range(3, len(out_fieldnames)-3):
-                        out_feature.SetField(str(out_fieldnames[i]), 'Undefined')
-                    out_feature.SetField('FBPHY_CODE', 255)
-                    out_feature.SetField('FBPHY_SUB', 255)
-#                     sys.exit(1)
-            # Append polygon to the output shapefile
-            out_layer.CreateFeature(out_feature)
-    
-            # Destroy polygons
-            out_feature.Destroy()    
-            in_feature.Destroy()
-            
-            # Next polygon
-            in_feature = shp_ogr.GetNextFeature()
-            
-        # Close data
-        out_ds.Destroy() 
+		@param out_threshold: List of output threshold
+		@type out_threshold: list of str
 
-    def decision_tree(self, combin_tree):
-        """
-        Function to build the decision tree. Taking account output threshold and input 
-        class name.
-        
-        :param combin_tree: Decision tree combination
-        :type combin_tree: list of number class name
-            
-        """
-        
-        # Combination tree on a sentence. Every sentence will be in a table.
-        cond_tab = []
-        for ct in combin_tree:
-            cond_a = '' # Condition Term
-            c = 0
-            while c < len(ct):  
-                # Loop on tree combinaison                    
-                if self.out_threshold[ct[c]] =='':
-                    # For interval condition
-                    cond_a = cond_a + 'self.stats_dict[ind_stats][' + str(ct[c]/2) + ']' +\
-                             self.out_threshold[ct[c]-1].replace('>', '<=') + \
-                             ' and self.stats_dict[ind_stats][' + str(ct[c]/2) + ']' +\
-                             self.out_threshold[ct[c]+1].replace('<', '>=') + ' and '
-                else:
-                    cond_a = cond_a + 'self.stats_dict[ind_stats][' + str(ct[c]/2) + ']' +\
-                            self.out_threshold[ct[c]] + ' and '
-                c = c + 1
-            cond_tab.append(cond_a[:-5]) # Remove the last 'and'
+		@param max_...: Biomass and density (IDM, SFS index) maximum
+		@type max_...: float
 
-        # Loop on every value 
-        for ind_stats in range(len(self.stats_dict)):
-            # Loop on decision tree combination.
-            for cond in cond_tab:
-                # Add class name in the output table
-                try:
-                    if eval(cond):
-                        self.class_tab_final[ind_stats] = [self.out_class_name[s] \
-                                                           for s in combin_tree[cond_tab.index(cond)]] + \
-                                                           [combin_tree[cond_tab.index(cond)][len(combin_tree[cond_tab.index(cond)])-1]] + \
-                                                           [combin_tree[cond_tab.index(cond)][len(combin_tree[cond_tab.index(cond)])-1]]
-                except NameError:
-                    # If there is 'nan' in the table statistics
-                    if eval(cond.replace('nan','-10000')):# If there is 'nan' in the table statistics
-                        self.class_tab_final[ind_stats] = [self.out_class_name[s] \
-                                                           for s in combin_tree[cond_tab.index(cond)]] + \
-                                                           [combin_tree[cond_tab.index(cond)][len(combin_tree[cond_tab.index(cond)])-1]] + \
-                                                           [combin_tree[cond_tab.index(cond)][len(combin_tree[cond_tab.index(cond)])-1]]
-    
-    def compute_biomass_density(self, method='SEATH'):
-        """
-        Function to compute the biomass and density distribution.
-        It returns threshold of biomass level.
-        
-        :param method: Classification method used. It can set 'SEATH' (by default) or 'RF'
-        :type method: str
-        """
-        
-        self.logger.info("Calcul densité : ")
-        if method == 'SEATH':
-            distri = [v[1:] for k, v in self.stats_dict.items() if eval('v[0]' + self.out_threshold[1])]
-            
-            distri_bio = []
-            distri_den = []
-            for b in distri:
-                if eval('b[0]' + self.out_threshold[2]) and b[len(b)-1] != float('inf') and b[len(b)-1] != float('nan') and b[len(b)-1] < 1:
-                    distri_bio.append(b)
-                else:
-                    distri_den.append(b)
-        elif method == 'RF':
+		@param class_tab_final: Final decision tree table
+		@type class_tab_final: dict
 
-            distri = [v[1:] for k, v in self.stats_dict.items() if not self.out_threshold[k] in [0,6,7]]
-            
-            distri_bio = []
-            distri_den = []
+		@param stats_rpg_tif: Dictionnary with pxl percent in every polygon of class RPG.
+		 Mainly 'Maj_count' (Majority Value) and 'Maj_count_perc' (Majority Percent)
+		@type stats_rpg_tif: dict
+	"""
+	
+	def __init__(self, used, cut):
+		"""
+			Create a new 'Clustering' instance
+		"""
 
-            for b in distri:
-                if self.out_threshold[distri.index(b)] in [1,2,8,9,10] and b[len(b)-1] != -10000 and b[len(b)-1] < 1:
-                    distri_bio.append(b)
-                else:
-                    distri_den.append(b)
+		self.logger = Outils.Log("Log", "Segmentation")
 
-            # Set this variable used normally to define threshold of the classification with SEATH method
-            self.out_threshold = []
+		Vector.__init__(self, used, cut)
+	   
+		self.stats_rpg_tif = {}
+		self.output_file =  'Final_classification.shp'
+		
+		self.out_class_name = []
+		# self.out_threshold = []
+		self.out_threshold = dict()
 
-        # Transpose table        
-        t_distri_bio = list(map(list, zip(*distri_bio)))
-        t_distri_den = list(map(list, zip(*distri_den)))
-       
-        # Biomass threshold
-        stdmore =  (np.mean(t_distri_bio[2]) + np.std(t_distri_bio[2]))/np.max(t_distri_bio[2])
-        stdless =  (np.mean(t_distri_bio[2]) - np.std(t_distri_bio[2]))/np.max(t_distri_bio[2])
-        self.out_threshold.append('>'+str(stdmore))
-        self.out_threshold.append('')
-        self.out_threshold.append('<'+str(stdless))
-        
-        # Compute density and biomass maximum
-        self.max_wood_idm = np.max(t_distri_den[1])
-        self.max_wood_sfs = np.max(t_distri_den[0])
-        self.max_bio = np.max(t_distri_bio[2])
-        
-    def append_scale(self, select_class, form):
-        """
-        Function to complete the 'class_tab_final' list with density and biomass information.
-        This list will be used to build the final shapefile.
-        
-        :param select_class: Class name to add degree
-        :type select_class: str
-        :param form: Formula to add degree
-        :type form: str
-        
-        """
+		self.class_tab_final = defaultdict(list)
 
-        for ind_stats in range(len(self.stats_dict)):
-            # Only valid on the second level
-            try:
-                if self.class_tab_final[ind_stats][1] == select_class:
-                    self.class_tab_final[ind_stats].insert(len(self.class_tab_final[ind_stats])-2,eval(form))
-                    # To add phytomasse
-                    try :
-                        if self.class_tab_final[ind_stats][self.class_tab_final[ind_stats].index(eval(form))-1] == '' and \
-                            self.class_tab_final[ind_stats][self.class_tab_final[ind_stats].index(eval(form))-2] != '':
-                            # dict[][A.index(real_pourcent)-1] == '' and dict[][A.index(real_pourcent)-2] != ''
-                            # Print phytomasse class in the tab because of self.in_class_name in the Processing class
-                            if not eval(form + self.out_threshold[0]) and not eval(form + self.out_threshold[2]):
-                                self.class_tab_final[ind_stats][self.class_tab_final[ind_stats].index(eval(form))-1] = self.out_class_name[9]
-                                self.class_tab_final[ind_stats][len(self.class_tab_final[ind_stats])-1] = 9
-                            elif eval(form + self.out_threshold[0]):
-                                self.class_tab_final[ind_stats][self.class_tab_final[ind_stats].index(eval(form))-1] = self.out_class_name[8]
-                                self.class_tab_final[ind_stats][len(self.class_tab_final[ind_stats])-1] = 8
-                            elif eval(form + self.out_threshold[2]):
-                                self.class_tab_final[ind_stats][self.class_tab_final[ind_stats].index(eval(form))-1] = self.out_class_name[10]
-                                self.class_tab_final[ind_stats][len(self.class_tab_final[ind_stats])-1] = 10
-                    except ValueError:
-                        pass
-                        
-            except IndexError:
-                pass
-                
-                
\ No newline at end of file
+		self.max_wood_idm = 0
+		self.max_wood_sfs = 0
+		self.max_bio = 0
+	
+	def create_cartography(self, out_fieldnames, out_fieldtype):
+		"""
+		Function to create a output shapefile. In this output file,
+		there is the final cartography. With output defined field names
+		and field type in the main process.
+
+		:param out_fieldnames: List of output field names
+		:type out_fieldnames: list of str
+		:param out_fieldtype: List of outpu field type
+		:type out_fieldtype: list of str
+		"""
+		
+		shp_ogr_ds = self.data_source
+		shp_ogr = self.data_source.GetLayer()
+						
+		# Projection
+		# Import input shapefile projection
+		srsObj = shp_ogr.GetSpatialRef()
+		# Conversion to syntax ESRI
+		srsObj.MorphToESRI()
+		
+		## Remove the output final shapefile if it exists
+		self.vector_used = self.output_file
+		if os.path.exists(self.vector_used):
+			self.data_source.GetDriver().DeleteDataSource(self.vector_used)
+		
+		out_ds = self.data_source.GetDriver().CreateDataSource(self.vector_used)
+		
+		if out_ds is None:
+			self.logger.error('Could not create file')
+			sys.exit(1)
+			
+		#  Specific output layer
+		out_layer = out_ds.CreateLayer(self.vector_used, srsObj, geom_type=ogr.wkbMultiPolygon)
+		
+		# Add new fields
+		# To add RPG_CODE field
+		out_fieldnames.insert(2,'RPG_CODE')
+		out_fieldtype.insert(2,ogr.OFTInteger)
+		# To add the others
+		for i in range(0, len(out_fieldnames)):
+			fieldDefn = ogr.FieldDefn(str(out_fieldnames[i]), out_fieldtype[i])
+			out_layer.CreateField(fieldDefn)
+
+		# Add 2 fields to convert class string in code to confusion matrix
+		fieldDefn = ogr.FieldDefn('FBPHY_CODE', ogr.OFTInteger)
+		out_layer.CreateField(fieldDefn)
+		out_fieldnames.append('FBPHY_CODE')
+		fieldDefn = ogr.FieldDefn('FBPHY_SUB', ogr.OFTInteger)
+		out_layer.CreateField(fieldDefn)
+		out_fieldnames.append('FBPHY_SUB')
+		
+		# Feature for the ouput shapefile
+		featureDefn = out_layer.GetLayerDefn()
+		
+		in_feature = shp_ogr.SetNextByIndex(0) # Polygons initialisation
+		in_feature = shp_ogr.GetNextFeature()
+		
+		# Loop on input polygons
+		while in_feature:
+			
+			geom = in_feature.GetGeometryRef() # Extract input geometry
+
+			# Create a new polygon
+			out_feature = ogr.Feature(featureDefn)
+		
+			# Set the polygon geometry and attribute
+			out_feature.SetGeometry(geom)
+			# Set the existing ID 
+			out_feature.SetField(out_fieldnames[0], in_feature.GetField(self.field_names[2]))
+			# Set the area
+			out_feature.SetField(out_fieldnames[1], geom.GetArea()/10000)
+			
+			# Set the RPG column
+			recouv_crops_RPG = 0
+			pourc_inter = self.stats_rpg_tif[in_feature.GetFID()]['Maj_count_perc']
+			if pourc_inter >= 85:
+				recouv_crops_RPG = self.stats_rpg_tif[in_feature.GetFID()]['Maj_count']
+			   
+			out_feature.SetField('RPG_CODE', int(recouv_crops_RPG))
+			
+			# Set the others polygons fields with the decision tree dictionnary
+			for i in range(3, len(out_fieldnames)):
+				# If list stopped it on the second level, complete by empty case
+				if len(self.class_tab_final[in_feature.GetFID()]) < len(out_fieldnames)-3 and \
+													self.class_tab_final[in_feature.GetFID()] != []:
+					self.class_tab_final[in_feature.GetFID()].insert(len(self.class_tab_final[in_feature.GetFID()])-3,'') # To 3rd level
+					self.class_tab_final[in_feature.GetFID()].insert(len(self.class_tab_final[in_feature.GetFID()])-3,0) # To degree
+					
+				try:
+					# To the confusion matrix, replace level ++ by level --
+					if i == len(out_fieldnames)-1:
+						if self.class_tab_final[in_feature.GetFID()][i-3] == 6:
+							# Crops to artificial vegetation
+							self.class_tab_final[in_feature.GetFID()][i-4] = 0
+						# if self.class_tab_final[in_feature.GetFID()][i-3] == 2:
+						# 	Grassland to natural vegetation
+						# 	self.class_tab_final[in_feature.GetFID()][i-3] = 1
+						if self.class_tab_final[in_feature.GetFID()][i-3] > 7:
+							# Phytomass to natural vegetation
+							self.class_tab_final[in_feature.GetFID()][i-4] = 1
+							
+					out_feature.SetField(str(out_fieldnames[i]), self.class_tab_final[in_feature.GetFID()][i-3])
+				except:
+
+					for i in range(3, len(out_fieldnames)-3):
+						out_feature.SetField(str(out_fieldnames[i]), 'Undefined')
+					
+					out_feature.SetField('FBPHY_CODE', 255)
+					out_feature.SetField('FBPHY_SUB', 255)
+
+			# Append polygon to the output shapefile
+			out_layer.CreateFeature(out_feature)
+	
+			# Destroy polygons
+			out_feature.Destroy()	
+			in_feature.Destroy()
+			
+			# Next polygon
+			in_feature = shp_ogr.GetNextFeature()
+			
+		# Close data
+		out_ds.Destroy() 
+
+	def decision_tree(self, combin_tree):
+		"""
+			Function to build the decision tree. Taking account output threshold and input 
+			class name.
+			
+			@param combin_tree: Decision tree combination
+			@type combin_tree: list of number class name
+		"""
+		
+		# Combination tree on a sentence. Every sentence will be in a table.
+		cond_tab = []
+		for ct in combin_tree:
+			cond_a = '' # Condition Term
+			c = 0
+			while c < len(ct):  
+				# Loop on tree combinaison					
+				if self.out_threshold[ct[c]] == '' :
+					# For interval condition
+					cond_a = cond_a + 'self.stats_dict[ind_stats][' + str(ct[c]/2) + ']' +\
+							 self.out_threshold[ct[c]-1].replace('>', '<=') + \
+							 ' and self.stats_dict[ind_stats][' + str(ct[c]/2) + ']' +\
+							 self.out_threshold[ct[c]+1].replace('<', '>=') + ' and '
+				else:
+					cond_a = cond_a + 'self.stats_dict[ind_stats][' + str(ct[c]/2) + ']' +\
+							self.out_threshold[ct[c]] + ' and '
+				c = c + 1
+			cond_tab.append(cond_a[:-5]) # Remove the last 'and'
+
+		# Loop on every value 
+		for ind_stats in range(len(self.stats_dict)):
+			# Loop on decision tree combination.
+			for cond in cond_tab:
+				# Add class name in the output table
+				try:
+					if eval(cond):
+						self.class_tab_final[ind_stats] = [self.out_class_name[s] \
+														   for s in combin_tree[cond_tab.index(cond)]] + \
+														   [combin_tree[cond_tab.index(cond)][len(combin_tree[cond_tab.index(cond)])-1]] + \
+														   [combin_tree[cond_tab.index(cond)][len(combin_tree[cond_tab.index(cond)])-1]]
+				except NameError:
+					# If there is 'nan' in the table statistics
+					if eval(cond.replace('nan','-10000')):# If there is 'nan' in the table statistics
+						self.class_tab_final[ind_stats] = [self.out_class_name[s] \
+														   for s in combin_tree[cond_tab.index(cond)]] + \
+														   [combin_tree[cond_tab.index(cond)][len(combin_tree[cond_tab.index(cond)])-1]] + \
+														   [combin_tree[cond_tab.index(cond)][len(combin_tree[cond_tab.index(cond)])-1]]
+	
+	def compute_biomass_density(self, method='SEATH'):
+		"""
+			Function to compute the biomass and density distribution.
+			It returns threshold of biomass level.
+			
+			@param method: Classification method used. It can set 'SEATH' (by default) or 'RF'
+			@type method: str
+		"""
+		
+		if method == 'SEATH':
+			distri = [v[1:] for k, v in self.stats_dict.items() if eval('v[0]' + self.out_threshold[1])]
+			
+			distri_bio = []
+			distri_den = []
+			for b in distri:
+				if eval('b[0]' + self.out_threshold[2]) and b[len(b)-1] != float('inf') and b[len(b)-1] != float('nan') and b[len(b)-1] < 1:
+					distri_bio.append(b)
+				else:
+					distri_den.append(b)
+		elif method == 'RF':
+
+			distri = [v[1:] for k, v in self.stats_dict.items() if not self.out_threshold["PREDICTION"][k] in [0,6,7]]
+			
+			distri_bio = []
+			distri_den = []
+
+			for b in distri:
+				if self.out_threshold["PREDICTION"][distri.index(b)] in [1,2,8,9,10]\
+				 and b[len(b)-1] != -10000 and b[len(b)-1] < 1:
+					distri_bio.append(b)
+				else:
+					distri_den.append(b)
+
+		# Transpose table		
+		t_distri_bio = list(map(list, zip(*distri_bio)))
+		t_distri_den = list(map(list, zip(*distri_den)))
+	   
+		# Biomass threshold
+		stdmore =  (np.mean(t_distri_bio[2]) + np.std(t_distri_bio[2])) / np.max(t_distri_bio[2])
+		stdless =  (np.mean(t_distri_bio[2]) - np.std(t_distri_bio[2])) / np.max(t_distri_bio[2])
+
+		# self.out_threshold.append('<'+str(stdmore))
+		# self.out_threshold.append('')
+		# self.out_threshold.append('>'+str(stdless))
+		self.out_threshold["STDMORE"] = stdmore
+		self.out_threshold["STDLESS"] = stdless
+
+		# Compute density and biomass maximum
+		self.max_wood_idm = np.max(t_distri_den[1])
+		self.max_wood_sfs = np.max(t_distri_den[0])
+		self.max_bio = np.max(t_distri_bio[2])
+		
+	def append_scale(self, select_class, form):
+		"""
+			Function to complete the 'class_tab_final' list with density and biomass information.
+			This list will be used to build the final shapefile.
+			
+			@param select_class: Class name to add degree
+			@type select_class: str
+
+			@param form: Formula to add degree
+			@type form: str	
+		"""
+
+		for ind_stats in range(len(self.stats_dict)):
+			# Only valid on the second level
+			try:
+				if self.class_tab_final[ind_stats][1] == select_class:
+					self.class_tab_final[ind_stats].insert(len(self.class_tab_final[ind_stats])-2,eval(form))
+					# To add phytomasse
+					try :
+						if self.class_tab_final[ind_stats][self.class_tab_final[ind_stats].index(eval(form))-1] == '' and \
+							self.class_tab_final[ind_stats][self.class_tab_final[ind_stats].index(eval(form))-2] != '':
+							# Print phytomasse class in the tab because of self.in_class_name
+							# in the Processing class
+							if not eval("{0}>{1}".format(form, self.out_threshold["STDMORE"])) and not \
+							eval("{0}<{1}".format(form, self.out_threshold["STDLESS"])):
+								self.class_tab_final[ind_stats][self.class_tab_final[ind_stats].index(eval(form))-1] = self.out_class_name[9]
+								self.class_tab_final[ind_stats][len(self.class_tab_final[ind_stats])-1] = 9
+							elif eval("{0}>{1}".format(form, self.out_threshold["STDMORE"])):
+								self.class_tab_final[ind_stats][self.class_tab_final[ind_stats].index(eval(form))-1] = self.out_class_name[8]
+								self.class_tab_final[ind_stats][len(self.class_tab_final[ind_stats])-1] = 8
+							elif eval("{0}<{1}".format(form, self.out_threshold["STDLESS"])):
+								self.class_tab_final[ind_stats][self.class_tab_final[ind_stats].index(eval(form))-1] = self.out_class_name[10]
+								self.class_tab_final[ind_stats][len(self.class_tab_final[ind_stats])-1] = 10
+					except ValueError:
+						pass
+			except IndexError:
+				pass
+				
+				
\ No newline at end of file
diff --git a/Toolbox.py b/Toolbox.py
index da50af53cf5f25a252e04f5227c30a399589fb5a..d12d207ae77b4195762570a9a1ab37b1a29311cb 100644
--- a/Toolbox.py
+++ b/Toolbox.py
@@ -22,135 +22,154 @@ from datetime import date
 import os, sys, subprocess, glob
 import numpy as np
 import json
+import Constantes
 import Outils
 
 class Toolbox(object):
-    """
-        Class used to grouped small tools to cut raster or compute statistics on a raster. 
-            
-        :param imag: Input image (path)
-        :type imag: str
-        
-        :param vect: Extent shapefile (path)
-        :type vect: str
-    """
-    
-    def __init__(self):
-        """
-            Create a new 'Toolbox' instance
-        """
-        self.image = ''
-        self.vect = ''
-        self.logger = Outils.Log("Log", "Toolbox")
-    
-    def clip_raster(self, **kwargs):
-        """
-            Function to clip a raster with a vector. 
-            The raster created will be in the same folder than the input raster.
-            With a prefix 'Clip_'.
-            
-            :kwargs: **rm_rast** (int) - 0 (by default) or 1. Variable to remove the output raster. 0 to keep and 1 to remove.
-            
-            :returns: str -- variable **outclip**, output raster clip (path).
-        """
-        
-        rm_rast = kwargs['rm_rast'] if kwargs.get('rm_rast') else 0
-        outclip = os.path.split(str(self.image))[0] + '/Clip_' + os.path.split(str(self.image))[1]
-        if not os.path.exists(outclip) or rm_rast == 1:
-            self.logger.info("Raster clip of {0}".format(os.path.split(str(self.image))[1]))
-            # Command to clip a raster with a shapefile by Gdal
-            process_tocall_clip = ['gdalwarp', '-overwrite', '-dstnodata', '-10000', '-q', '-cutline', self.vect, '-crop_to_cutline', '-of', 'GTiff', self.image, outclip]
-            # This is a trick to remove warning with the polygons that touch themselves
-            try:
-                r = subprocess.call(process_tocall_clip)
-                if r == 1:
-                    sys.exit(1)
-                    
-            except SystemExit:
-                self.logger.info("Dissolve vector cut of the validation !")
-                
-                vect_2 = self.vect[:-4] + '_v2.shp'
-                preprocess_tocall = 'ogr2ogr -overwrite ' + vect_2 + ' ' + self.vect + ' -dialect sqlite -sql "SELECT ST_Union(geometry), * FROM ' + \
-                                        os.path.basename(self.vect)[:-4] +'"'
-                os.system(preprocess_tocall)
-                self.logger.info("Raster clip of {0}".format(os.path.split(str(self.image))[1]))
-                process_tocall_clip = ['gdalwarp', '-overwrite', '-dstnodata', '-10000', '-q', '-cutline', vect_2, '-crop_to_cutline', '-of', 'GTiff', self.image, outclip]
-                subprocess.call(process_tocall_clip)
-                
-                for rem in glob.glob(vect_2[:-4] + '*'):
-                    os.remove(rem)
-                    
-        return outclip
-        
-    def calc_serie_stats(self, table):
-        """
-            Function to compute stats on temporal cloud and ndvi spectral table
-            Ndvi stats : min    max    std    max-min
-            
-            :param table: Spectral data, cloud raster and ndvi raster
-            :type table: numpy.ndarray
-            :returns: list of numpy.ndarray -- variable **account_stats**, list of temporal NDVI stats.
-                          
-                        numpy.ndarray -- variable **account_cloud**, pixel number clear on the area.
-        """ 
-        
-        # Compute stats on these indexes
-        ind = ['np.min(tab_ndvi_masked, axis=2)', 'np.max(tab_ndvi_masked, axis=2)', 'np.std(tab_ndvi_masked, axis=2)', \
-               'np.max(tab_ndvi_masked, axis=2)-np.min(tab_ndvi_masked, axis=2)'] # [Min, Max, Std, Max-Min]     
-        # For the cloud map 
-        # In the input table the cloud floor is the 5th
-        tab_cloud = np.dstack(table[5]) # Stack cloud table (dimension : 12*X*Y to X*Y*12)
-        
-        cloud_true = (tab_cloud == 0) # if tab_cloud = 0 then True else False / Mask cloud
-        account_cloud = np.sum(cloud_true, axis=2) # Account to tab_cloud if != 0 => Sum of True. (Dimension X*Y)
-        
-        # For the ndvi stats
-        # In the input table the ndvi floor is the 7th
-        stack_ndvi = np.dstack(table[7]) # Like cloud table, stack ndvi table
-        # mask_ndvi = np.ma.masked_equal(stack_ndvi, -10000, copy=True) # Mask values -10000
-        mask_cloud = np.ma.masked_where((stack_ndvi == -10000) | (tab_cloud != 0), tab_cloud) # Mask values -10000 and > 0(Not cloud)
-        # mask_cloud = (tab_cloud != 0) | (stack_ndvi == -10000)
-        tab_ndvi_masked = np.ma.array(stack_ndvi, mask=mask_cloud.mask) # mask_cloud.mask) # Ndvi table with clear values 
-        
-        # Stats on the indexes defined above
-        account_stats = []
-        for i in ind:
-            i_stats = eval(i) # Compute stats
-            i_stats.fill_value = -10000 # Substitute default fill value by -10000 
-            account_stats.append(i_stats.filled()) # Add stats table with true fill value
-            # To extract index of the minimum ndvi to find the date
-            if ind.index(i) == 0:
-                i_date = eval(i.replace('np.min','np.argmin'))
-                
-                # To create a matrix with the date of pxl with a ndvi min 
-                tab_date = np.transpose(np.array(table[:3], dtype=object))
-                # Loop on the temporal sequency
-                for d in range(len(tab_date)):
-                    mask_data = np.ma.masked_values(i_date, d)
-                    i_date = mask_data.filled(date(int(tab_date[d][0]), int(tab_date[d][1]), int(tab_date[d][2])).toordinal()) # Date = day for year =1 day = 1 and month = 1
-                    
-                account_stats.append(i_date) # Add the date table
-                
-        return account_stats, account_cloud
-    
-    def check_proj(self):
-        """
-            Function to check if raster's projection is RFG93. 
-            For the moment, PHYMOBAT works with one projection only Lambert 93 EPSG:2154 
-        """
-
-        # Projection for PHYMOBAT
-        epsg_phymobat = '2154'
-        proj_phymobat = 'AUTHORITY["EPSG","' + epsg_phymobat + '"]'
-
-        info_gdal = 'gdalinfo -json ' + self.image
-        info_json = json.loads(subprocess.check_output(info_gdal, shell=True))
-        # Check if projection is in Lambert 93
-        if not proj_phymobat in info_json['coordinateSystem']['wkt']:
-            output_proj = self.image[:-4] + '_L93.tif'
-            reproj = 'gdalwarp -t_srs EPSG:' + epsg_phymobat + ' ' + self.image + ' ' + output_proj
-            os.system(reproj)
-            # Remove old file and rename new file like the old file
-            os.remove(self.image)
-            os.rename(output_proj, self.image)
-        
\ No newline at end of file
+	"""
+		Class used to grouped small tools to cut raster or compute statistics on a raster. 
+			
+		:param imag: Input image (path)
+		:type imag: str
+		
+		:param vect: Extent shapefile (path)
+		:type vect: str
+	"""
+	
+	def __init__(self):
+		"""
+			Create a new 'Toolbox' instance
+		"""
+		self.image = ''
+		self.vect = ''
+		self.logger = Outils.Log("Log", "Toolbox")
+	
+	def clip_raster(self, **kwargs):
+		"""
+			Function to clip a raster with a vector. 
+			The raster created will be in the same folder than the input raster.
+			With a prefix 'Clip_'.
+			
+			:kwargs: **rm_rast** (int) - 0 (by default) or 1. Variable to remove the output raster. 0 to keep and 1 to remove.
+			
+			:returns: str -- variable **outclip**, output raster clip (path).
+		"""
+		
+		rm_rast = kwargs['rm_rast'] if kwargs.get('rm_rast') else 0
+		outclip = os.path.split(str(self.image))[0] + '/Clip_' + os.path.split(str(self.image))[1]
+		if not os.path.exists(outclip) or rm_rast == 1:
+			self.logger.info("Raster clip of {0}".format(os.path.split(str(self.image))[1]))
+			# Command to clip a raster with a shapefile by Gdal
+			process_tocall_clip = ['gdalwarp', '-overwrite', '-dstnodata', '-10000', '-q', '-cutline', self.vect, '-crop_to_cutline', '-of', 'GTiff', self.image, outclip]
+			# This is a trick to remove warning with the polygons that touch themselves
+			try:
+				r = subprocess.call(process_tocall_clip)
+				if r == 1:
+					sys.exit(1)
+					
+			except SystemExit:
+				self.logger.info("Dissolve vector cut of the validation !")
+				
+				vect_2 = self.vect[:-4] + '_v2.shp'
+				preprocess_tocall = 'ogr2ogr -overwrite ' + vect_2 + ' ' + self.vect + ' -dialect sqlite -sql "SELECT ST_Union(geometry), * FROM ' + \
+										os.path.basename(self.vect)[:-4] +'"'
+				os.system(preprocess_tocall)
+				self.logger.info("Raster clip of {0}".format(os.path.split(str(self.image))[1]))
+				process_tocall_clip = ['gdalwarp', '-overwrite', '-dstnodata', '-10000', '-q', '-cutline', vect_2, '-crop_to_cutline', '-of', 'GTiff', self.image, outclip]
+				subprocess.call(process_tocall_clip)
+				
+				for rem in glob.glob(vect_2[:-4] + '*'):
+					os.remove(rem)
+					
+		return outclip
+
+		
+	def calc_serie_stats(self, table):
+		"""
+			Function to compute stats on temporal cloud and ndvi spectral table
+			Ndvi stats : min max std max-min
+			
+			@param table: Spectral data, cloud raster and ndvi raster
+			@type table: numpy.ndarray
+			
+			@returns: list of numpy.ndarray -- variable **account_stats**, list of temporal NDVI stats.
+						  
+						numpy.ndarray -- variable **account_cloud**, pixel number clear on the area.
+		""" 
+
+		# Compute stats on these indexes
+		ind = ['np.min(tab_ndvi_masked, axis=2)', 'np.max(tab_ndvi_masked, axis=2)', 'np.std(tab_ndvi_masked, axis=2)', \
+			   'np.max(tab_ndvi_masked, axis=2)-np.min(tab_ndvi_masked, axis=2)'] # [Min, Max, Std, Max-Min]	 
+		
+
+		# table = ['Année', 'Mois', 'Jour', chemin image', 'chemin image nuage',
+		# 'données nuage', 'chemin image ndvi', 'données ndvi' ]
+
+		# For the cloud map 
+		
+		# In the input table the cloud floor is the 6th
+		tab_cloud = np.dstack(table[5]) # Stack cloud table (dimension : 12*X*Y to X*Y*12)
+		
+		 # if tab_cloud = 0 then True else False / Mask cloud
+		cloud_true = (tab_cloud == 0)
+
+		# Account to tab_cloud if != 0 => Sum of True. (Dimension X*Y)
+		account_cloud = np.sum(cloud_true, axis=2) 
+		
+		# For the ndvi stats
+
+		# In the input table the ndvi floor is the 8th
+		stack_ndvi = np.dstack(table[7]) # Like cloud table, stack ndvi table
+
+		# Mask values -10000 and > 0 (Not cloud)
+		mask_cloud = np.ma.masked_where((stack_ndvi == -10000) | (tab_cloud != 0), tab_cloud) 
+
+		# Ndvi table with clear values
+		tab_ndvi_masked = np.ma.array(stack_ndvi, mask=mask_cloud.mask)   
+		
+		# Stats on the indexes defined above
+		account_stats = []
+
+		for i in ind:
+			i_stats = eval(i) # Compute stats
+			i_stats.fill_value = -10000 # Substitute default fill value by -10000 
+			account_stats.append(i_stats.filled()) # Add stats table with true fill value
+			
+			# To extract index of the minimum ndvi to find the date
+			if ind.index(i) == 0:
+				i_date = eval(i.replace('np.min','np.argmin'))
+				
+				# To create a matrix with the date of pxl with a ndvi min 
+				tab_date = np.transpose(np.array(table[:3], dtype=object))
+
+				# Loop on the temporal sequency
+				for d in range(len(tab_date)):
+					mask_data = np.ma.masked_values(i_date, d)
+					i_date = mask_data.filled(date(int(tab_date[d][0]), int(tab_date[d][1]), int(tab_date[d][2])).toordinal()) # Date = day for year =1 day = 1 and month = 1
+					
+				account_stats.append(i_date) # Add the date table
+				
+		return account_stats, account_cloud
+	
+	def check_proj(self):
+		"""
+			Function to check if raster's projection is RFG93. 
+			For the moment, PHYMOBAT works with one projection only Lambert 93 EPSG:2154 
+		"""
+
+		# Projection for PHYMOBAT
+
+		proj_phymobat = 'AUTHORITY["EPSG","{0}"]'.format(Constantes.EPSG_PHYMOBAT)
+
+		info_gdal = 'gdalinfo -json {0}'.format(self.image)
+		info_json = json.loads(subprocess.check_output(info_gdal, shell=True))
+		
+		# Check if projection is in Lambert 93
+		if not proj_phymobat in info_json['coordinateSystem']['wkt']:
+			output_proj = "{0}_L93.tif".format(self.image[:-4])
+			reproj = "gdalwarp -t_srs EPSG: {0} {1} {2}".format(Constantes.EPSG_PHYMOBAT, self.image, output_proj)
+			os.system(reproj)
+			# Remove old file and rename new file like the old file
+			os.remove(self.image)
+			os.rename(output_proj, self.image)
+		
\ No newline at end of file
diff --git a/Vector.py b/Vector.py
index cdc08736a88ea61e2b7367114f8c0d5a9165eede..615530b923f14bd03dbdcdaf18190f60fec649a0 100644
--- a/Vector.py
+++ b/Vector.py
@@ -67,7 +67,8 @@ class Vector():
 			Create a new 'Vector' instance   
 		"""
 
-		self.logger = Outils.Log("Log", 'Vector')
+		if not hasattr(self, "logger") :
+			self.logger = Outils.Log("Log", 'vector')
 
 		self.vector_name = os.path.basename(used)
 		self.vector_folder = os.path.dirname(used)
@@ -91,13 +92,14 @@ class Vector():
 		"""
 			Function to clip a vector with a vector
 		"""	
-	
+		
 		outclip = "{0}/Clip_{1}".format(self.vector_folder, self.vector_name)
 
 		if not os.path.exists(outclip) or self.remove_shp == 1:
 			self.logger.info('Clip of {0}'.format(self.vector_name))
 			# Command to clip a vector with a shapefile by OGR
-			process_tocall_clip =  ['ogr2ogr', '-overwrite', '-skipfailures', outclip, self.vector_used, '-clipsrc', self.vector_cut]
+			process_tocall_clip =\
+			 ['ogr2ogr', '-overwrite', '-skipfailures', outclip, self.vector_used, '-clipsrc', self.vector_cut]
 			subprocess.call(process_tocall_clip)
 		
 		# Replace input filename by output filename
@@ -126,8 +128,8 @@ class Vector():
 	
 	def zonal_stats(self, inraster, band, **kwargs):
 		"""
-			Function to compute the mean in every polygons for a raster
-			because of package ``rasterstats`` in */usr/local/lib/python2.7/dist-packages/rasterstats-0.3.2-py2.7.egg/rasterstats/*
+			Function to compute the mean in every polygons for a raster because of package ``rasterstats`` 
+			 in */usr/local/lib/python2.7/dist-packages/rasterstats-0.3.2-py2.7.egg/rasterstats/*
 			
 			:param (inraster,band): inraster -> Input image path, and band -> band number
 			:type (inraster,band): tuple
@@ -139,8 +141,9 @@ class Vector():
 		ranking = kwargs['rank'] if kwargs.get('rank') else 0
 		nb_img = kwargs['nb_img'] if kwargs.get('nb_img') else 1
 
-		self.logger.info('Compute {0} stats on {1}'.format(os.path.split(self.vector_used)[1], os.path.split(inraster)[1]))
+		self.logger.info('Compute {0} stats on {1} band {2}'.format(os.path.split(self.vector_used)[1], os.path.split(inraster)[1], band))
 		stats = rasterstats.zonal_stats(self.vector_used, inraster,  stats =['mean'], nodata=float('nan'), band=band)
+		
 		for i in range(len(stats)):
 			temp = defaultdict(lambda : [0]*nb_img)
 			for j in range(nb_img):
@@ -152,7 +155,7 @@ class Vector():
 			temp[0][ranking] = list(stats)[i]['mean']
 			self.stats_dict[i] = temp[0]
 			
-		self.logger.info('End of stats on {0}'.format(os.path.split(str(inraster))[1]))
+		self.logger.info('End of stats on {0}'.format(os.path.split(inraster)[1]))
 
 	def zonal_stats_pp(self, inraster):
 		"""
@@ -194,7 +197,7 @@ class Vector():
 		
 		# Export a example of a raster out information
 		# for the validation shapefile
-		example_raster = RasterSat_by_date('', '', [0]) # Call the raster class
+		example_raster = RasterSat_by_date('', '') # Call the raster class
 		example_raster.choice_nb_b = kwargs['choice_nb_b'] if kwargs.get('choice_nb_b') else 0
 		raster_info = example_raster.raster_data(raster_head)# Extract data info
 		
@@ -209,7 +212,7 @@ class Vector():
 			data_raster = raster_info[0][0]
 		else:
 			data_raster = raster_info[0]
-		info_out = example_raster.create_raster(valid_raster, data_raster, raster_info[1])
+		info_out = example_raster.create_empty_raster(valid_raster, data_raster, raster_info[1])
 		self.raster_ds = example_raster.out_ds
 		
 		# Virtual rasterize the vector 
@@ -224,5 +227,6 @@ class Vector():
 		self.raster_ds = None
 		# Complete the raster creation
 		example_raster.complete_raster(*info_out, new_data)
+		example_raster.logger.close()
 		
 		return valid_raster
\ No newline at end of file
diff --git a/Vhrs.py b/Vhrs.py
index 5b02d673fcc93cd6d3e80583bf6495b4462cf964..b35516ad823654b7436e15fbe2b905b479393c0c 100644
--- a/Vhrs.py
+++ b/Vhrs.py
@@ -23,113 +23,116 @@ import Outils
 from multiprocessing import Process
 
 class Vhrs():
-    
-    """
-    Class to compute Haralick and SFS textures because of OTB application in command line
-    
-    :param imag: The input image path to compute texture image
-    :type imag: str
-    :param out_sfs/out_haralick: Output path
-    :type out_sfs/out_haralick: str
-    :param mp: Boolean variable -> 0 or 1.
-    
-            - 0 means, not multi-processing
-            - 1 means, launch process with multi-processing
-    :type mp: int
-    """
-    
-    def __init__(self, imag, mp):
-        """
-        	Create a new 'Texture' instance
-        """
-        
-        self._imag = imag
-        self.mp = mp
-        self.logger = Outils.Log('Log', "VHRS")
-        
-        self.logger.info('SFS image')
-        self.out_sfs = self._imag[:-4] + '_sfs.TIF'
-        if not os.path.exists(self.out_sfs):
-            self.logger.info('SFS image doesn\'t exist !')
-            p_sfs = Process(target=self.sfs_texture_extraction)
-            p_sfs.start()
-            if mp == 0:
-                p_sfs.join()
-#             self.sfs_texture_extraction()
-            
-        self.logger.info('Haralick image')
-        self.out_haralick = self._imag[:-4] + '_haralick.TIF'
-        if not os.path.exists(self.out_haralick):
-            self.logger.info("Haralick image doesn't exist !")
-            p_har = Process(target=self.haralick_texture_extraction, args=('simple', ))
-            p_har.start()
-            if mp == 0:
-                p_har.join()
-#             self.haralick_texture_extraction('simple')
-        
-        if mp == 1:
-            if not os.path.exists(self.out_sfs) and not os.path.exists(self.out_haralick):
-                p_sfs.join()
-                p_har.join()
-        
-    def sfs_texture_extraction(self):
-        
-        """
-        Function to compute SFS texture image with OTB command line.
-            :Example: otbcli_SFSTextureExtraction -in qb_RoadExtract.tif -channel 1 -parameters.spethre 50.0 -parameters.spathre 100 -out SFSTextures.tif
-            
-            - OTB help :
-                * in : Input Image
-                * channel : Selected Channel
-                * parameters : Texture feature parameters. This group of parameters allows to define SFS texture parameters. The available texture features are SFS’Length, SFS’Width, SFS’PSI, SFS’W-Mean, SFS’Ratio and SFS’SD. They are provided in this exact order in the output image.
-                    - parameters.spethre : Spectral Threshold
-                    - parameters.spathre : Spatial Threshold
-                    - parameters.nbdir : Number of Direction
-                    - parameters.alpha : Alpha
-                    - parameters.maxcons : Ratio Maximum Consideration Number
-                * out : Feature Output Image
-                
-            Source : http://otbcb.readthedocs.org/en/latest/Applications/app_SFSTextureExtraction.html
-        """
-              
-        process_tocall = ['otbcli_SFSTextureExtraction', '-in', self._imag, '-channel', '2', '-parameters.spethre', '50.0', \
-                          '-parameters.spathre', '100', '-out', self.out_sfs]
-        
-        self.logger.info(process_tocall)
-        subprocess.call(process_tocall) 
-        
-    def haralick_texture_extraction(self, texture_choice):
-        
-        """
-        Function to compute Haralick texture image with OTB command line.
-            :Example: otbcli_HaralickTextureExtraction -in qb_RoadExtract.tif -channel 2 -parameters.xrad 3 -parameters.yrad 3 -texture simple -out HaralickTextures.tif
-            
-            - OTB help :
-                * in : Input Image
-                * channel : Selected Channel
-                * Texture feature parameters : This group of parameters allows to define texture parameters.
-                    - X Radius : X Radius
-                    - Y Radius : Y Radius
-                    - X Offset : X Offset
-                    - Y Offset : Y Offset
-                * Image Minimum : Image Minimum
-                * Image Maximum : Image Maximum
-                * Histogram number of bin : Histogram number of bin 
-                * Texture Set Selection Choice of The Texture Set Available choices are :
-                    - Simple Haralick Texture Features: This group of parameters defines the 8 local Haralick texture feature output image. The image channels are: Energy, Entropy, Correlation, Inverse Difference Moment, Inertia, Cluster Shade, Cluster Prominence and Haralick Correlation
-                    - Advanced Texture Features: This group of parameters defines the 9 advanced texture feature output image. The image channels are: Mean, Variance, Sum Average, Sum Variance, Sum Entropy, Difference of Entropies, Difference of Variances, IC1 and IC2
-                    - Higher Order Texture Features: This group of parameters defines the 11 higher order texture feature output image. The image channels are: Short Run Emphasis, Long Run Emphasis, Grey-Level Nonuniformity, Run Length Nonuniformity, Run Percentage, Low Grey-Level Run Emphasis, High Grey-Level Run Emphasis, Short Run Low Grey-Level Emphasis, Short Run High Grey-Level Emphasis, Long Run Low Grey-Level Emphasis and Long Run High Grey-Level Emphasis
-                * out : Feature Output Image 
-                
-            Source : http://otbcb.readthedocs.org/en/latest/Applications/app_HaralickTextureExtraction.html
-        
-        :param texture_choice: Order texture choice -> Simple / Advanced / Higher
-        :type texture_choice: str
-        """
-        
-        process_tocall =  ['otbcli_HaralickTextureExtraction', '-in', self._imag, '-channel', '2', '-parameters.xrad', '3', \
-                           '-parameters.yrad', '3', '-texture', texture_choice, '-out', self.out_haralick]
-        
-        self.logger.info(process_tocall)
-        subprocess.call(process_tocall) 
-        
\ No newline at end of file
+	
+	"""
+		Class to compute Haralick and SFS textures because of OTB application in command line
+		
+		@param imag: The input image path to compute texture image
+		@type imag: str
+
+		@param out_sfs/out_haralick: Output path
+		@type out_sfs/out_haralick: str
+
+		@param mp: Boolean variable -> 0 or 1.
+			- 0 means, not multi-processing
+			- 1 means, launch process with multi-processing
+		@type mp: int
+	"""
+	
+	def __init__(self, imag, mp):
+		"""
+			Create a new 'Texture' instance
+		"""
+		
+		self._imag = imag
+		self.mp = mp
+		self.logger = Outils.Log('Log', "VHRS")
+		
+		self.logger.info('SFS image')
+		self.out_sfs = self._imag[:-4] + '_sfs.TIF'
+		
+		if not os.path.exists(self.out_sfs):
+			self.logger.info("SFS image doesn't exist !")
+			p_sfs = Process(target=self.sfs_texture_extraction)
+			p_sfs.start()
+
+			if mp == 0:
+				p_sfs.join()
+			
+		self.logger.info('Haralick image')
+		self.out_haralick = self._imag[:-4] + '_haralick.TIF'
+
+		if not os.path.exists(self.out_haralick):
+			self.logger.info("Haralick image doesn't exist !")
+			p_har = Process(target=self.haralick_texture_extraction, args=('simple', ))
+			p_har.start()
+			
+			if mp == 0:
+				p_har.join()
+		
+		if mp == 1:
+			if not os.path.exists(self.out_sfs) and not os.path.exists(self.out_haralick):
+				p_sfs.join()
+				p_har.join()
+		
+	def sfs_texture_extraction(self):
+		
+		"""
+		Function to compute SFS texture image with OTB command line.
+			:Example: otbcli_SFSTextureExtraction -in qb_RoadExtract.tif -channel 1 -parameters.spethre 50.0 -parameters.spathre 100 -out SFSTextures.tif
+			
+			- OTB help :
+				* in : Input Image
+				* channel : Selected Channel
+				* parameters : Texture feature parameters. This group of parameters allows to define SFS texture parameters. The available texture features are SFS’Length, SFS’Width, SFS’PSI, SFS’W-Mean, SFS’Ratio and SFS’SD. They are provided in this exact order in the output image.
+					- parameters.spethre : Spectral Threshold
+					- parameters.spathre : Spatial Threshold
+					- parameters.nbdir : Number of Direction
+					- parameters.alpha : Alpha
+					- parameters.maxcons : Ratio Maximum Consideration Number
+				* out : Feature Output Image
+				
+			Source : http://otbcb.readthedocs.org/en/latest/Applications/app_SFSTextureExtraction.html
+		"""
+			  
+		process_tocall = ['otbcli_SFSTextureExtraction', '-in', self._imag, '-channel', '2', '-parameters.spethre', '50.0', \
+						  '-parameters.spathre', '100', '-out', self.out_sfs]
+		
+		self.logger.info(process_tocall)
+		subprocess.call(process_tocall) 
+		
+	def haralick_texture_extraction(self, texture_choice):
+		
+		"""
+		Function to compute Haralick texture image with OTB command line.
+			:Example: otbcli_HaralickTextureExtraction -in qb_RoadExtract.tif -channel 2 -parameters.xrad 3 -parameters.yrad 3 -texture simple -out HaralickTextures.tif
+			
+			- OTB help :
+				* in : Input Image
+				* channel : Selected Channel
+				* Texture feature parameters : This group of parameters allows to define texture parameters.
+					- X Radius : X Radius
+					- Y Radius : Y Radius
+					- X Offset : X Offset
+					- Y Offset : Y Offset
+				* Image Minimum : Image Minimum
+				* Image Maximum : Image Maximum
+				* Histogram number of bin : Histogram number of bin 
+				* Texture Set Selection Choice of The Texture Set Available choices are :
+					- Simple Haralick Texture Features: This group of parameters defines the 8 local Haralick texture feature output image. The image channels are: Energy, Entropy, Correlation, Inverse Difference Moment, Inertia, Cluster Shade, Cluster Prominence and Haralick Correlation
+					- Advanced Texture Features: This group of parameters defines the 9 advanced texture feature output image. The image channels are: Mean, Variance, Sum Average, Sum Variance, Sum Entropy, Difference of Entropies, Difference of Variances, IC1 and IC2
+					- Higher Order Texture Features: This group of parameters defines the 11 higher order texture feature output image. The image channels are: Short Run Emphasis, Long Run Emphasis, Grey-Level Nonuniformity, Run Length Nonuniformity, Run Percentage, Low Grey-Level Run Emphasis, High Grey-Level Run Emphasis, Short Run Low Grey-Level Emphasis, Short Run High Grey-Level Emphasis, Long Run Low Grey-Level Emphasis and Long Run High Grey-Level Emphasis
+				* out : Feature Output Image 
+				
+			Source : http://otbcb.readthedocs.org/en/latest/Applications/app_HaralickTextureExtraction.html
+		
+		:param texture_choice: Order texture choice -> Simple / Advanced / Higher
+		:type texture_choice: str
+		"""
+		
+		process_tocall =  ['otbcli_HaralickTextureExtraction', '-in', self._imag, '-channel', '2', '-parameters.xrad', '3', \
+						   '-parameters.yrad', '3', '-texture', texture_choice, '-out', self.out_haralick]
+		
+		self.logger.info(process_tocall)
+		subprocess.call(process_tocall) 
+		
\ No newline at end of file