diff --git a/app/cbDimensionalityReductionTrainer.cxx b/app/cbDimensionalityReductionTrainer.cxx
index 0f085d4c498a9d59cf0ff4d763561523771c0afb..e83231428d04542a54966e5ae4a3aee0bb78ebd1 100644
--- a/app/cbDimensionalityReductionTrainer.cxx
+++ b/app/cbDimensionalityReductionTrainer.cxx
@@ -105,6 +105,7 @@ private:
 		
 		MeasurementType meanMeasurementVector;
 		MeasurementType stddevMeasurementVector;
+		
 		if (HasValue("io.stats") && IsParameterEnabled("io.stats"))
 		{
 			StatisticsReader::Pointer statisticsReader = StatisticsReader::New();
diff --git a/app/cbDimensionalityReductionVector.cxx b/app/cbDimensionalityReductionVector.cxx
index 25ac852bd47ecfb5f9a11920f80f2c7802fb62d2..f71f45609085fa2f7441ec787de2eef4aeda5837 100644
--- a/app/cbDimensionalityReductionVector.cxx
+++ b/app/cbDimensionalityReductionVector.cxx
@@ -107,6 +107,10 @@ class CbDimensionalityReductionVector : public Application
 		"If not given, the input vector data file is updated.");
 		MandatoryOff("out");
 		
+		AddParameter(ParameterType_Int, "pcadim", "Principal component"); //
+		SetParameterDescription("pcadim","This optional parameter can be set to reduce the number of eignevectors used in the PCA model file."); //
+		MandatoryOff("pcadim");
+		
 		// Doc example parameter settings
 		SetDocExampleParameterValue("in", "vectorData.shp");
 		SetDocExampleParameterValue("instat", "meanVar.xml");
@@ -211,13 +215,21 @@ class CbDimensionalityReductionVector : public Application
 			
 			
 			/** Read the model */
-			std::cout << "create the fact ?" << std::endl;
+
 			m_Model = DimensionalityReductionModelFactoryType::CreateDimensionalityReductionModel(GetParameterString("model"),
 			DimensionalityReductionModelFactoryType::ReadMode);
 			if (m_Model.IsNull())
 			{
 				otbAppLogFATAL(<< "Error when loading model " << GetParameterString("model") << " : unsupported model type");
 			}
+			if (HasValue("pcadim") && IsParameterEnabled("pcadim"))
+			{
+				int dimension = GetParameterInt("pcadim");
+				m_Model->SetDimension(dimension );
+				std::cout << "yo"  << std::endl;
+			}
+
+			
 			m_Model->Load(GetParameterString("model"));
 			otbAppLogINFO("Model loaded");
 			
diff --git a/include/AutoencoderModel.h b/include/AutoencoderModel.h
index 939f592610212021c1164b9ac3716c4d9fd0d4b1..72dd370568bdaea407b79531d4f626bd749193c9 100644
--- a/include/AutoencoderModel.h
+++ b/include/AutoencoderModel.h
@@ -4,6 +4,8 @@
 #include "otbMachineLearningModelTraits.h"
 #include "otbMachineLearningModel.h"
 
+#include <shark/Algorithms/StoppingCriteria/AbstractStoppingCriterion.h>
+
 namespace otb
 {
 template <class TInputValue, class AutoencoderType>
@@ -35,12 +37,15 @@ public:
 	itkNewMacro(Self);
 	itkTypeMacro(AutoencoderModel, DimensionalityReductionModel);
 
-	unsigned int GetDimension() {return m_NumberOfHiddenNeurons[m_net.size()-1];};  // Override the Dimensionality Reduction model method, it is used in the dimensionality reduction filter to set the output image size
+	//unsigned int GetDimension() {return m_NumberOfHiddenNeurons[m_net.size()-1];};  // Override the Dimensionality Reduction model method, it is used in the dimensionality reduction filter to set the output image size
 	itkGetMacro(NumberOfHiddenNeurons,itk::Array<unsigned int>);
 	itkSetMacro(NumberOfHiddenNeurons,itk::Array<unsigned int>);
 
 	itkGetMacro(NumberOfIterations,unsigned int);
 	itkSetMacro(NumberOfIterations,unsigned int);
+	
+	itkGetMacro(Epsilon,double);
+	itkSetMacro(Epsilon,double);
 
 	itkGetMacro(Regularization,itk::Array<double>);
 	itkSetMacro(Regularization,itk::Array<double>);
@@ -61,8 +66,12 @@ public:
 	void Load(const std::string & filename, const std::string & name="")  ITK_OVERRIDE;
 
 	void Train() ITK_OVERRIDE;
-	void TrainOneLayer(unsigned int,double, double, shark::Data<shark::RealVector> &);
-	void TrainOneSparseLayer(unsigned int,double, double,double, shark::Data<shark::RealVector> &);
+	
+	template <class T>
+	void TrainOneLayer(shark::AbstractStoppingCriterion<T> & criterion, unsigned int,double, double, shark::Data<shark::RealVector> &);
+	
+	template <class T>
+	void TrainOneSparseLayer(shark::AbstractStoppingCriterion<T> & criterion, unsigned int,double, double,double, shark::Data<shark::RealVector> &);
 	
 protected:
 	AutoencoderModel();	
@@ -80,7 +89,8 @@ private:
 	
 	itk::Array<unsigned int> m_NumberOfHiddenNeurons;
 	/** Training parameters */
-	unsigned int m_NumberOfIterations;
+	unsigned int m_NumberOfIterations; // stop the training after a fixed number of iterations
+	double m_Epsilon; // Stops the training when the training error seems to converge
 	itk::Array<double> m_Regularization;  // L2 Regularization parameter
 	itk::Array<double> m_Noise;  // probability for an input to be set to 0 (denosing autoencoder)
 	itk::Array<double> m_Rho; // Sparsity parameter
diff --git a/include/AutoencoderModel.txx b/include/AutoencoderModel.txx
index 3060d8fb30d29c7f8878ca38c5fcadbad1c73ae0..fd71a734520f6b7bf4a5b0240ec009a5722a0575 100644
--- a/include/AutoencoderModel.txx
+++ b/include/AutoencoderModel.txx
@@ -16,6 +16,8 @@
 #include <shark/Models/ImpulseNoiseModel.h> //noise source to corrupt the inputs
 #include <shark/Models/ConcatenatedModel.h>//to concatenate the noise with the model
 
+#include <shark/Algorithms/StoppingCriteria/MaxIterations.h> //A simple stopping criterion that stops after a fixed number of iterations
+#include <shark/Algorithms/StoppingCriteria/TrainingProgress.h> //Stops when the algorithm seems to converge, Tracks the progress of the training error over a period of time
 namespace otb
 {
 
@@ -35,24 +37,52 @@ template <class TInputValue, class AutoencoderType>
 void AutoencoderModel<TInputValue,AutoencoderType>::Train()
 {
 	std::vector<shark::RealVector> features;
+	std::cout << "converting the input ListSample to Shark vector" << std::endl;
 	Shark::ListSampleToSharkVector(this->GetInputListSample(), features);
+	std::cout << "creating the data vector" << std::endl;
 	shark::Data<shark::RealVector> inputSamples = shark::createDataFromRange( features );
 	
-	for (unsigned int i = 0 ; i < m_NumberOfHiddenNeurons.Size(); ++i)
-	{
-		if (m_Noise[i] != 0)   // Shark doesn't allow to train a layer using a sparsity term AND a noisy input. (shark::SparseAutoencoderError takes an autoen
+	
+	if (m_Epsilon > 0){
+		shark::TrainingProgress<> criterion(5,m_Epsilon);
+		
+		for (unsigned int i = 0 ; i < m_NumberOfHiddenNeurons.Size(); ++i)
 		{
-			TrainOneLayer( m_NumberOfHiddenNeurons[i],m_Noise[i],m_Regularization[i], inputSamples);
+			if (m_Noise[i] != 0)   // Shark doesn't allow to train a layer using a sparsity term AND a noisy input. (shark::SparseAutoencoderError takes an autoen
+			{
+				TrainOneLayer(criterion, m_NumberOfHiddenNeurons[i],m_Noise[i],m_Regularization[i], inputSamples);
+			}
+			else
+			{
+				TrainOneSparseLayer( criterion,m_NumberOfHiddenNeurons[i],m_Rho[i],m_Beta[i],m_Regularization[i], inputSamples);
+			}
+			criterion.reset();
 		}
-		else
+
+	}
+	
+	else {
+		shark::MaxIterations<> criterion(m_NumberOfIterations);
+		
+		for (unsigned int i = 0 ; i < m_NumberOfHiddenNeurons.Size(); ++i)
 		{
-			TrainOneSparseLayer( m_NumberOfHiddenNeurons[i],m_Rho[i],m_Beta[i],m_Regularization[i], inputSamples);
+			if (m_Noise[i] != 0)   // Shark doesn't allow to train a layer using a sparsity term AND a noisy input. (shark::SparseAutoencoderError takes an autoen
+			{
+				TrainOneLayer(criterion, m_NumberOfHiddenNeurons[i],m_Noise[i],m_Regularization[i], inputSamples);
+			}
+			else
+			{
+				TrainOneSparseLayer(criterion, m_NumberOfHiddenNeurons[i],m_Rho[i],m_Beta[i],m_Regularization[i], inputSamples);
+			}
+			criterion.reset();
 		}
+		
 	}
 }
 
 template <class TInputValue, class AutoencoderType>
-void AutoencoderModel<TInputValue,AutoencoderType>::TrainOneLayer(unsigned int nbneuron,double noise_strength,double regularization, shark::Data<shark::RealVector> &samples)
+template <class T>
+void AutoencoderModel<TInputValue,AutoencoderType>::TrainOneLayer(shark::AbstractStoppingCriterion<T> & criterion, unsigned int nbneuron,double noise_strength,double regularization, shark::Data<shark::RealVector> &samples)
 {
 	AutoencoderType net;
 
@@ -64,19 +94,22 @@ void AutoencoderModel<TInputValue,AutoencoderType>::TrainOneLayer(unsigned int n
 	shark::LabeledData<shark::RealVector,shark::RealVector> trainSet(samples,samples);//labels identical to inputs
 	shark::SquaredLoss<shark::RealVector> loss;
 	shark::ErrorFunction error(trainSet, &model, &loss);
-	//shark::SparseAutoencoderError error(trainSet,&model, &loss, m_Rho, m_Beta);
-	//shark::SparseAutoencoderError error(trainSet,&net, &loss, 0.1, 0.1);
+
 	shark::TwoNormRegularizer regularizer(error.numberOfVariables());
 	error.setRegularizer(regularization,&regularizer);
 
 	shark::IRpropPlusFull optimizer;
 	error.init();
 	optimizer.init(error);
-	std::cout<<"Optimizing model: "+net.name()<<std::endl;
-	for(std::size_t i = 0; i != m_NumberOfIterations; ++i){
+	
+	std::cout<<"error before training : " << optimizer.solution().value<<std::endl;
+	unsigned int i=0;
+	do{
+		i++;
 		optimizer.step(error);
-		std::cout<<i<<" "<<optimizer.solution().value<<std::endl;
-	}
+	} while( !criterion.stop( optimizer.solution() ) );
+	std::cout<<"error after " << i << "iterations : " << optimizer.solution().value<<std::endl;
+	
 	net.setParameterVector(optimizer.solution().point);
 	m_net.push_back(net);
 	samples = net.encode(samples);
@@ -84,7 +117,8 @@ void AutoencoderModel<TInputValue,AutoencoderType>::TrainOneLayer(unsigned int n
 
 
 template <class TInputValue, class AutoencoderType>
-void AutoencoderModel<TInputValue,AutoencoderType>::TrainOneSparseLayer(unsigned int nbneuron,double rho,double beta, double regularization, shark::Data<shark::RealVector> &samples)
+template <class T>
+void AutoencoderModel<TInputValue,AutoencoderType>::TrainOneSparseLayer(shark::AbstractStoppingCriterion<T> & criterion, unsigned int nbneuron,double rho,double beta, double regularization, shark::Data<shark::RealVector> &samples)
 {
 	AutoencoderType net;
 
@@ -102,11 +136,14 @@ void AutoencoderModel<TInputValue,AutoencoderType>::TrainOneSparseLayer(unsigned
 	shark::IRpropPlusFull optimizer;
 	error.init();
 	optimizer.init(error);
-	std::cout<<"Optimizing model: "+net.name()<<std::endl;
-	for(std::size_t i = 0; i != m_NumberOfIterations; ++i){
+	std::cout<<"error before training : " << optimizer.solution().value<<std::endl;
+	unsigned int i=0;
+	do{
+		i++;
 		optimizer.step(error);
-		std::cout<<i<<" "<<optimizer.solution().value<<std::endl;
-	}
+	} while( !criterion.stop( optimizer.solution() ) );
+	std::cout<<"error after " << i << "iterations : " << optimizer.solution().value<<std::endl;
+	
 	net.setParameterVector(optimizer.solution().point);
 	m_net.push_back(net);
 	samples = net.encode(samples);
@@ -165,7 +202,7 @@ void AutoencoderModel<TInputValue,AutoencoderType>::Load(const std::string & fil
 	for (int i=0; i<m_net.size(); i++){ 
 		m_NumberOfHiddenNeurons[i] = m_net[i].numberOfHiddenNeurons();
 	}
-	
+	this->m_Dimension = m_NumberOfHiddenNeurons[m_net.size()-1];
 }
 
 
diff --git a/include/DimensionalityReductionModelFactory.txx b/include/DimensionalityReductionModelFactory.txx
index ac8f40c6807c2ece12c9aec4eafaf017aee8ee56..a4cb4b87ed472c7e6a4e994962d461459d2c8327 100644
--- a/include/DimensionalityReductionModelFactory.txx
+++ b/include/DimensionalityReductionModelFactory.txx
@@ -35,11 +35,11 @@ namespace otb
 {
 
 template <class TInputValue, class TTargetValue>
-using AutoencoderModelFactory = AutoencoderModelFactoryBase<TInputValue, TTargetValue, shark::Autoencoder<shark::TanhNeuron, shark::LinearNeuron>>  ;
+using AutoencoderModelFactory = AutoencoderModelFactoryBase<TInputValue, TTargetValue, shark::Autoencoder<shark::LogisticNeuron, shark::LogisticNeuron>>  ;
 
 
 template <class TInputValue, class TTargetValue>
-using TiedAutoencoderModelFactory = AutoencoderModelFactoryBase<TInputValue, TTargetValue, shark::TiedAutoencoder< shark::TanhNeuron, shark::LinearNeuron>>  ;
+using TiedAutoencoderModelFactory = AutoencoderModelFactoryBase<TInputValue, TTargetValue, shark::TiedAutoencoder< shark::LogisticNeuron, shark::LogisticNeuron>>  ;
 
 
 template <class TInputValue, class TTargetValue>
diff --git a/include/PCAModel.h b/include/PCAModel.h
index f26f2233a3118b819e712a1c3a1eeaacbb94f903..bd51913803b1f431305cdee05893dbc8b2246081 100644
--- a/include/PCAModel.h
+++ b/include/PCAModel.h
@@ -36,10 +36,10 @@ public:
 
 	itkNewMacro(Self);
 	itkTypeMacro(PCAModel, DimensionalityReductionModel);
-
+/*
 	unsigned int GetDimension() {return m_Dimension;}; 
 	itkSetMacro(Dimension,unsigned int);
-	
+	*/
 	itkSetMacro(Do_resize_flag,bool);
 	
 	bool CanReadFile(const std::string & filename);
@@ -64,7 +64,7 @@ private:
 	shark::LinearModel<> m_encoder;
 	shark::LinearModel<> m_decoder;
 	shark::PCA m_pca;
-	unsigned int m_Dimension;
+	//unsigned int m_Dimension;
 	bool m_Do_resize_flag;
 };
 } // end namespace otb
diff --git a/include/PCAModel.txx b/include/PCAModel.txx
index 9cda11ec90e5ce9fd222ec51551f4ea2b5a49f00..ec941e1f2802578781ccf5fb206a9a5d48e89a3b 100644
--- a/include/PCAModel.txx
+++ b/include/PCAModel.txx
@@ -20,6 +20,7 @@ template <class TInputValue>
 PCAModel<TInputValue>::PCAModel()
 {
 	this->m_IsDoPredictBatchMultiThreaded = true;
+	this->m_Dimension = 0;
 }
 
 
@@ -39,9 +40,9 @@ void PCAModel<TInputValue>::Train()
 	
 	shark::Data<shark::RealVector> inputSamples = shark::createDataFromRange( features );
 	m_pca.setData(inputSamples);
-	m_pca.encoder(m_encoder, m_Dimension);
+	m_pca.encoder(m_encoder, this->m_Dimension);
 	std::cout << m_encoder.matrix() << std::endl;
-	m_pca.decoder(m_decoder, m_Dimension);
+	m_pca.decoder(m_decoder, this->m_Dimension);
 	
 }
 
@@ -94,11 +95,19 @@ void PCAModel<TInputValue>::Load(const std::string & filename, const std::string
 	boost::archive::polymorphic_text_iarchive ia(ifs);
 	m_encoder.read(ia);
 	ifs.close();
-	m_Dimension = m_encoder.outputSize();
+	if (this->m_Dimension ==0)
+	{
+		this->m_Dimension = m_encoder.outputSize();
+	}
+	else
+	{
+		std::cout << "yo" << std::endl;
+	}
+	
 	auto eigenvectors = m_encoder.matrix();
-	eigenvectors.resize(2,m_encoder.inputSize());
+	eigenvectors.resize(this->m_Dimension,m_encoder.inputSize());
 	m_encoder.setStructure(eigenvectors, m_encoder.offset() );
-	std::cout << m_encoder.matrix() << std::endl;
+	std::cout << m_encoder.matrix() << "end" << std::endl;
 	//this->m_Size = m_NumberOfHiddenNeurons;
 }
 
@@ -120,9 +129,9 @@ PCAModel<TInputValue>::DoPredict(const InputSampleType & value, ConfidenceValueT
      
 	data = m_encoder(data);
     TargetSampleType target;
-    target.SetSize(m_Dimension);
+    target.SetSize(this->m_Dimension);
 	
-	for(unsigned int a = 0; a < m_Dimension; ++a){
+	for(unsigned int a = 0; a < this->m_Dimension; ++a){
 		target[a]=data.element(0)[a];
 	}
 	return target;
@@ -140,10 +149,10 @@ void PCAModel<TInputValue>
 	TargetSampleType target;
 	data = m_encoder(data);
 	unsigned int id = startIndex;
-	target.SetSize(m_Dimension);
+	target.SetSize(this->m_Dimension);
 	for(const auto& p : data.elements()){
 		
-		for(unsigned int a = 0; a < m_Dimension; ++a){
+		for(unsigned int a = 0; a < this->m_Dimension; ++a){
 			target[a]=p[a];
 			//target[a]=1;
 		
diff --git a/include/SOMModel.h b/include/SOMModel.h
index 846b0f2a2eb4c7f4112117019261cb870890fb5b..88209ed24e0efc5df0c06cd8a3fa7b571e2cdd83 100644
--- a/include/SOMModel.h
+++ b/include/SOMModel.h
@@ -85,7 +85,7 @@ public:
 	void Train() ITK_OVERRIDE;
 	//void Dimensionality_reduction()  {}; // Dimensionality reduction is done by DoPredict
 	 
-	unsigned int GetDimension() { return MapType::ImageDimension;};
+	//unsigned int GetDimension() { return MapType::ImageDimension;};
 protected:
 	SOMModel();	
 	~SOMModel() ITK_OVERRIDE;
diff --git a/include/SOMModel.txx b/include/SOMModel.txx
index 29e226f4dab4cc39c59ca0af02e90a25a4c3e65c..7afbd6f501935b86af55569461e561b733b367d9 100644
--- a/include/SOMModel.txx
+++ b/include/SOMModel.txx
@@ -22,6 +22,7 @@ namespace otb
 template <class TInputValue, unsigned int MapDimension>
 SOMModel<TInputValue,  MapDimension>::SOMModel()
 {
+	//m_Dimension = typename MapType::ImageDimension;
 }
 
 
@@ -171,6 +172,7 @@ void SOMModel<TInputValue, MapDimension>::Load(const std::string & filename, con
 	}
 
 	ifs.close();
+	this->m_Dimension = MapType::ImageDimension;
 }
 
 
diff --git a/include/cbLearningApplicationBaseDR.h b/include/cbLearningApplicationBaseDR.h
index b3bf030786dad94d5ff956c90d8169dfad9a94a5..9d0a6c69087397333ba31ce0eaa18fe774ce1294 100644
--- a/include/cbLearningApplicationBaseDR.h
+++ b/include/cbLearningApplicationBaseDR.h
@@ -101,10 +101,10 @@ public:
 	
 
 #ifdef OTB_USE_SHARK
-	typedef shark::Autoencoder< shark::TanhNeuron, shark::LinearNeuron> AutoencoderType;
+	typedef shark::Autoencoder< shark::LogisticNeuron, shark::LogisticNeuron> AutoencoderType;
 	typedef otb::AutoencoderModel<InputValueType, AutoencoderType> AutoencoderModelType;
 	
-	typedef shark::TiedAutoencoder< shark::TanhNeuron, shark::LinearNeuron> TiedAutoencoderType;
+	typedef shark::TiedAutoencoder< shark::LogisticNeuron, shark::LogisticNeuron> TiedAutoencoderType;
 	typedef otb::AutoencoderModel<InputValueType, TiedAutoencoderType> TiedAutoencoderModelType;
 	
 	typedef otb::PCAModel<InputValueType> PCAModelType;
diff --git a/include/cbTrainAutoencoder.txx b/include/cbTrainAutoencoder.txx
index dce0efbe70c493c725fe42930d24a6c51eea586f..4efad5dae532f647cfc50961b37b56dc4cba0ced 100644
--- a/include/cbTrainAutoencoder.txx
+++ b/include/cbTrainAutoencoder.txx
@@ -44,6 +44,13 @@ cbLearningApplicationBaseDR<TInputValue,TOutputValue>
     "model.autoencoder.nbiter",
     "The maximum number of iterations used during training.");
   
+  AddParameter(ParameterType_Float, "model.autoencoder.epsilon",
+               " ");
+  SetParameterFloat("model.autoencoder.epsilon",0, false);
+  SetParameterDescription(
+    "model.autoencoder.epsilon",
+    " ");
+  
   
    //Number Of Hidden Neurons
   AddParameter(ParameterType_StringList ,  "model.autoencoder.nbneuron",   "Size");
@@ -134,6 +141,7 @@ void cbLearningApplicationBaseDR<TInputValue,TOutputValue>
 		std::cout << nb_neuron << std::endl;
 		dimredTrainer->SetNumberOfHiddenNeurons(nb_neuron);
 		dimredTrainer->SetNumberOfIterations(GetParameterInt("model.autoencoder.nbiter"));
+		dimredTrainer->SetEpsilon(GetParameterFloat("model.autoencoder.epsilon"));
 		dimredTrainer->SetRegularization(regularization);
 		dimredTrainer->SetNoise(noise);
 		dimredTrainer->SetRho(rho);