From 8397fec01280010a749ccdd90ba245f9864baf9a Mon Sep 17 00:00:00 2001
From: Guillaume Pasero <guillaume.pasero@c-s.fr>
Date: Tue, 28 Nov 2017 16:05:59 +0100
Subject: [PATCH] WRG: fix gazillion wrg from shark, fix assignments in if
 condition and other warnings

---
 .../app/otbVectorDimensionalityReduction.cxx  |  4 +-
 ...imensionalityReductionTrainAutoencoder.txx |  2 +-
 .../otbDimensionalityReductionTrainSOM.txx    |  4 +-
 .../include/otbAutoencoderModel.h             | 13 ++++++-
 .../include/otbAutoencoderModel.txx           | 39 ++++++++++++-------
 .../include/otbAutoencoderModelFactory.h      |  4 +-
 .../include/otbAutoencoderModelFactory.txx    |  2 +-
 .../otbImageDimensionalityReductionFilter.txx |  8 ++--
 .../include/otbPCAModel.h                     | 10 +++++
 .../include/otbPCAModel.txx                   | 22 ++++++++---
 .../include/otbSOMModel.txx                   | 16 ++++----
 11 files changed, 84 insertions(+), 40 deletions(-)

diff --git a/Modules/Applications/AppDimensionalityReduction/app/otbVectorDimensionalityReduction.cxx b/Modules/Applications/AppDimensionalityReduction/app/otbVectorDimensionalityReduction.cxx
index 16e5dfaf17..ffd02b56b8 100644
--- a/Modules/Applications/AppDimensionalityReduction/app/otbVectorDimensionalityReduction.cxx
+++ b/Modules/Applications/AppDimensionalityReduction/app/otbVectorDimensionalityReduction.cxx
@@ -155,7 +155,7 @@ class VectorDimensionalityReduction : public Application
 					/*
 					key.erase( std::remove_if(key.begin(),key.end(),IsNotAlphaNum), key.end());
 					std::transform(key.begin(), key.end(), key.begin(), tolower);*/
-					OGRFieldType fieldType = layerDefn.GetFieldDefn(iField)->GetType();
+					//OGRFieldType fieldType = layerDefn.GetFieldDefn(iField)->GetType();
 				/*	if(fieldType == OFTInteger || ogr::version_proxy::IsOFTInteger64(fieldType) || fieldType == OFTReal)
 					{*/
 						//std::string tmpKey="feat."+key;
@@ -320,7 +320,7 @@ class VectorDimensionalityReduction : public Application
 			
 			// Add the field of prediction in the output layer if field not exist
 			
-			for (int i=0; i<GetParameterStringList("featout").size() ;i++)
+			for (unsigned int i=0; i<GetParameterStringList("featout").size() ;i++)
 			{
 				OGRFeatureDefn &layerDefn = outLayer.GetLayerDefn();
 				int idx = layerDefn.GetFieldIndex(GetParameterStringList("featout")[i].c_str());
diff --git a/Modules/Applications/AppDimensionalityReduction/include/otbDimensionalityReductionTrainAutoencoder.txx b/Modules/Applications/AppDimensionalityReduction/include/otbDimensionalityReductionTrainAutoencoder.txx
index b12a2517e5..a4b8d72d83 100644
--- a/Modules/Applications/AppDimensionalityReduction/include/otbDimensionalityReductionTrainAutoencoder.txx
+++ b/Modules/Applications/AppDimensionalityReduction/include/otbDimensionalityReductionTrainAutoencoder.txx
@@ -148,7 +148,7 @@ void TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>::Trai
 		regularization.SetSize(s_nbneuron.size());
 		rho.SetSize(s_nbneuron.size());
 		beta.SetSize(s_nbneuron.size());
-		for (int i=0; i<s_nbneuron.size(); i++){ 
+		for (unsigned int i=0; i<s_nbneuron.size(); i++){
 			nb_neuron[i]=std::stoi(s_nbneuron[i]);
 			noise[i]=std::stof(s_noise[i]);
 			regularization[i]=std::stof(s_regularization[i]);
diff --git a/Modules/Applications/AppDimensionalityReduction/include/otbDimensionalityReductionTrainSOM.txx b/Modules/Applications/AppDimensionalityReduction/include/otbDimensionalityReductionTrainSOM.txx
index 8a05ecccd7..e9c22f3c85 100644
--- a/Modules/Applications/AppDimensionalityReduction/include/otbDimensionalityReductionTrainSOM.txx
+++ b/Modules/Applications/AppDimensionalityReduction/include/otbDimensionalityReductionTrainSOM.txx
@@ -125,14 +125,14 @@ void TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>
 		dimredTrainer->SetMaxWeight(GetParameterFloat("algorithm.som.iv"));
 		typename TemplateEstimatorType::SizeType size;
 		std::vector<std::basic_string<char>> s= GetParameterStringList("algorithm.som.s");
-		for (int i=0; i<dim; i++){ 
+		for (unsigned int i=0; i<dim; i++){
 			size[i]=std::stoi(s[i]);
 		}
 		
         dimredTrainer->SetMapSize(size);
         typename TemplateEstimatorType::SizeType radius;
         std::vector<std::basic_string<char>> n= GetParameterStringList("algorithm.som.n");
-        for (int i=0; i<dim; i++){ 
+        for (unsigned int i=0; i<dim; i++){
         radius[i]=std::stoi(n[i]);
         }
         dimredTrainer->SetNeighborhoodSizeInit(radius);
diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModel.h b/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModel.h
index 9723a5c27a..6807fe3fe9 100644
--- a/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModel.h
+++ b/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModel.h
@@ -4,10 +4,21 @@
 #include "otbMachineLearningModelTraits.h"
 #include "otbMachineLearningModel.h"
 #include <fstream>
-#include <shark/Algorithms/StoppingCriteria/AbstractStoppingCriterion.h>
 
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#pragma GCC diagnostic ignored "-Woverloaded-virtual"
+#endif
+#include "otb_shark.h"
+#include <shark/Algorithms/StoppingCriteria/AbstractStoppingCriterion.h>
 #include <shark/Models/FFNet.h>
 #include <shark/Models/Autoencoder.h>
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+
 namespace otb
 {
 template <class TInputValue, class NeuronType>
diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModel.txx b/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModel.txx
index e95748ff3c..163dc88d7d 100644
--- a/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModel.txx
+++ b/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModel.txx
@@ -1,11 +1,19 @@
 #ifndef AutoencoderModel_txx
 #define AutoencoderModel_txx
 
+#include "otbAutoencoderModel.h"
+
+
 #include <fstream>
-#include <shark/Data/Dataset.h>
 #include "itkMacro.h"
-#include "otbSharkUtils.h"
 
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#pragma GCC diagnostic ignored "-Woverloaded-virtual"
+#endif
+#include "otbSharkUtils.h"
 //include train function
 #include <shark/ObjectiveFunctions/ErrorFunction.h>
 #include <shark/ObjectiveFunctions/SparseAutoencoderError.h>//the error function performing the regularisation of the hidden neurons
@@ -20,6 +28,9 @@
 #include <shark/Algorithms/StoppingCriteria/TrainingProgress.h> //Stops when the algorithm seems to converge, Tracks the progress of the training error over a period of time
 
 #include <shark/Algorithms/GradientDescent/SteepestDescent.h>
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
 
 namespace otb
 {
@@ -46,7 +57,7 @@ void AutoencoderModel<TInputValue,NeuronType>::Train()
 	shark::Data<shark::RealVector> inputSamples_copy = inputSamples;
 	
 	std::ofstream ofs;
-	if (this->m_WriteLearningCurve =true) 
+	if (this->m_WriteLearningCurve == true)
 	{
 		ofs.open(m_LearningCurveFileName);
 		ofs << "learning curve" << std::endl; 
@@ -176,7 +187,7 @@ void AutoencoderModel<TInputValue,NeuronType>::TrainOneLayer(shark::AbstractStop
 	optimizer.init(error);
 	
 	std::cout<<"error before training : " << optimizer.solution().value<<std::endl;
-	if (this->m_WriteLearningCurve =true) 
+	if (this->m_WriteLearningCurve == true)
 	{
 		File << "end layer" << std::endl;
 	}
@@ -185,7 +196,7 @@ void AutoencoderModel<TInputValue,NeuronType>::TrainOneLayer(shark::AbstractStop
 	do{
 		i++;
 		optimizer.step(error);
-		if (this->m_WriteLearningCurve =true) 
+		if (this->m_WriteLearningCurve == true)
 		{	
 		File << optimizer.solution().value << std::endl;
 		}
@@ -252,12 +263,12 @@ void AutoencoderModel<TInputValue,NeuronType>::TrainOneSparseLayer(shark::Abstra
 		i++;
 		optimizer.step(error);
 		std::cout<<"error after " << i << "iterations : " << optimizer.solution().value <<std::endl;
-		if (this->m_WriteLearningCurve =true) 
+		if (this->m_WriteLearningCurve == true) 
 		{	
 		File << optimizer.solution().value << std::endl;
 		}
 	} while( !criterion.stop( optimizer.solution() ) );
-	if (this->m_WriteLearningCurve =true) 
+	if (this->m_WriteLearningCurve == true)
 	{
 		File << "end layer" << std::endl;
 	}
@@ -272,7 +283,7 @@ void AutoencoderModel<TInputValue,NeuronType>::TrainOneSparseLayer(shark::Abstra
 
 template <class TInputValue, class NeuronType>
 template <class T>
-void AutoencoderModel<TInputValue,NeuronType>::TrainNetwork(shark::AbstractStoppingCriterion<T> & criterion,double rho,double beta, double regularization, shark::Data<shark::RealVector> &samples, std::ostream& File)
+void AutoencoderModel<TInputValue,NeuronType>::TrainNetwork(shark::AbstractStoppingCriterion<T> & criterion,double /*rho*/,double /*beta*/, double regularization, shark::Data<shark::RealVector> &samples, std::ostream& File)
 {
 	
 	shark::LabeledData<shark::RealVector,shark::RealVector> trainSet(samples,samples);//labels identical to inputs
@@ -292,7 +303,7 @@ void AutoencoderModel<TInputValue,NeuronType>::TrainNetwork(shark::AbstractStopp
 		i++;
 		optimizer.step(error);
 		std::cout<<"error after " << i << "iterations : " << optimizer.solution().value<<std::endl;
-		if (this->m_WriteLearningCurve =true) 
+		if (this->m_WriteLearningCurve == true)
 		{	
 			File << optimizer.solution().value << std::endl;
 		}
@@ -319,13 +330,13 @@ bool AutoencoderModel<TInputValue,NeuronType>::CanReadFile(const std::string & f
 
 
 template <class TInputValue, class NeuronType>
-bool AutoencoderModel<TInputValue,NeuronType>::CanWriteFile(const std::string & filename)
+bool AutoencoderModel<TInputValue,NeuronType>::CanWriteFile(const std::string & /*filename*/)
 {
 	return true;
 }
 
 template <class TInputValue, class NeuronType>
-void AutoencoderModel<TInputValue,NeuronType>::Save(const std::string & filename, const std::string & name)
+void AutoencoderModel<TInputValue,NeuronType>::Save(const std::string & filename, const std::string & /*name*/)
 {
 	std::cout << "saving model ..." << std::endl;
 	std::ofstream ofs(filename);
@@ -382,7 +393,7 @@ void AutoencoderModel<TInputValue,NeuronType>::Save(const std::string & filename
 }
 
 template <class TInputValue, class NeuronType>
-void AutoencoderModel<TInputValue,NeuronType>::Load(const std::string & filename, const std::string & name)
+void AutoencoderModel<TInputValue,NeuronType>::Load(const std::string & filename, const std::string & /*name*/)
 {
 	
 	NetworkType net;
@@ -421,7 +432,7 @@ void AutoencoderModel<TInputValue,NeuronType>::Load(const std::string & filename
 
 template <class TInputValue, class NeuronType>
 typename AutoencoderModel<TInputValue,NeuronType>::TargetSampleType
-AutoencoderModel<TInputValue,NeuronType>::DoPredict(const InputSampleType & value, ConfidenceValueType * quality) const
+AutoencoderModel<TInputValue,NeuronType>::DoPredict(const InputSampleType & value, ConfidenceValueType * /*quality*/) const
 {  
 	
 	shark::RealVector samples(value.Size());
@@ -453,7 +464,7 @@ AutoencoderModel<TInputValue,NeuronType>::DoPredict(const InputSampleType & valu
 
 template <class TInputValue, class NeuronType>
 void AutoencoderModel<TInputValue,NeuronType>
-::DoPredictBatch(const InputListSampleType *input, const unsigned int & startIndex, const unsigned int & size, TargetListSampleType * targets, ConfidenceListSampleType * quality) const
+::DoPredictBatch(const InputListSampleType *input, const unsigned int & startIndex, const unsigned int & size, TargetListSampleType * targets, ConfidenceListSampleType * /*quality*/) const
 {
 	
 	std::vector<shark::RealVector> features;
diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModelFactory.h b/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModelFactory.h
index 5eb6a501ef..e614579f86 100644
--- a/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModelFactory.h
+++ b/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModelFactory.h
@@ -2,8 +2,8 @@
 #define AutoencoderModelFactory_h
 
 
-#include <shark/Models/TiedAutoencoder.h>
-#include <shark/Models/Autoencoder.h>
+//#include <shark/Models/TiedAutoencoder.h>
+//#include <shark/Models/Autoencoder.h>
 #include "itkObjectFactoryBase.h"
 #include "itkImageIOBase.h"
 
diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModelFactory.txx b/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModelFactory.txx
index 73dae93bde..b53c42b3a7 100644
--- a/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModelFactory.txx
+++ b/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModelFactory.txx
@@ -20,9 +20,9 @@
 
 
 #include "otbAutoencoderModelFactory.h"
+#include "otbAutoencoderModel.h"
 
 #include "itkCreateObjectFunction.h"
-#include "otbAutoencoderModel.h"
 #include "itkVersion.h"
 
 namespace otb
diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbImageDimensionalityReductionFilter.txx b/Modules/Learning/DimensionalityReductionLearning/include/otbImageDimensionalityReductionFilter.txx
index 418ecb4fa2..c32c9fd3f7 100644
--- a/Modules/Learning/DimensionalityReductionLearning/include/otbImageDimensionalityReductionFilter.txx
+++ b/Modules/Learning/DimensionalityReductionLearning/include/otbImageDimensionalityReductionFilter.txx
@@ -111,9 +111,9 @@ ImageDimensionalityReductionFilter<TInputImage, TOutputImage, TMaskImage>
 
   // Define iterators
   typedef itk::ImageRegionConstIterator<InputImageType> InputIteratorType;
-  typedef itk::ImageRegionConstIterator<MaskImageType>  MaskIteratorType;
+  //typedef itk::ImageRegionConstIterator<MaskImageType>  MaskIteratorType;
   typedef itk::ImageRegionIterator<OutputImageType>     OutputIteratorType;
-  typedef itk::ImageRegionIterator<ConfidenceImageType> ConfidenceMapIteratorType;
+  //typedef itk::ImageRegionIterator<ConfidenceImageType> ConfidenceMapIteratorType;
 
   InputIteratorType inIt(inputPtr, outputRegionForThread);
   OutputIteratorType outIt(outputPtr, outputRegionForThread);
@@ -155,9 +155,9 @@ ImageDimensionalityReductionFilter<TInputImage, TOutputImage, TMaskImage>
 
   // Define iterators
   typedef itk::ImageRegionConstIterator<InputImageType> InputIteratorType;
-  typedef itk::ImageRegionConstIterator<MaskImageType>  MaskIteratorType;
+  //typedef itk::ImageRegionConstIterator<MaskImageType>  MaskIteratorType;
   typedef itk::ImageRegionIterator<OutputImageType>     OutputIteratorType;
-  typedef itk::ImageRegionIterator<ConfidenceImageType> ConfidenceMapIteratorType;
+  //typedef itk::ImageRegionIterator<ConfidenceImageType> ConfidenceMapIteratorType;
 
   InputIteratorType inIt(inputPtr, outputRegionForThread);
   OutputIteratorType outIt(outputPtr, outputRegionForThread);
diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbPCAModel.h b/Modules/Learning/DimensionalityReductionLearning/include/otbPCAModel.h
index e39ca88341..72ad59805d 100644
--- a/Modules/Learning/DimensionalityReductionLearning/include/otbPCAModel.h
+++ b/Modules/Learning/DimensionalityReductionLearning/include/otbPCAModel.h
@@ -4,7 +4,17 @@
 #include "otbMachineLearningModelTraits.h"
 #include "otbMachineLearningModel.h"
 
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#pragma GCC diagnostic ignored "-Woverloaded-virtual"
+#endif
+#include "otb_shark.h"
 #include <shark/Algorithms/Trainers/PCA.h>
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
 
 namespace otb
 {
diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbPCAModel.txx b/Modules/Learning/DimensionalityReductionLearning/include/otbPCAModel.txx
index c5d274b324..11f07f3eb4 100644
--- a/Modules/Learning/DimensionalityReductionLearning/include/otbPCAModel.txx
+++ b/Modules/Learning/DimensionalityReductionLearning/include/otbPCAModel.txx
@@ -2,9 +2,16 @@
 #ifndef PCAModel_txx
 #define PCAModel_txx
 
+#include "otbPCAModel.h"
+
 #include <fstream>
-#include <shark/Data/Dataset.h>
 #include "itkMacro.h"
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#pragma GCC diagnostic ignored "-Woverloaded-virtual"
+#endif
 #include "otbSharkUtils.h"
 //include train function
 #include <shark/ObjectiveFunctions/ErrorFunction.h>
@@ -13,6 +20,9 @@
 #include <shark/ObjectiveFunctions/Regularizer.h> //L2 regulariziation
 
 #include <shark/ObjectiveFunctions/ErrorFunction.h>
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
 
 namespace otb
 {
@@ -65,13 +75,13 @@ bool PCAModel<TInputValue>::CanReadFile(const std::string & filename)
 
 
 template <class TInputValue>
-bool PCAModel<TInputValue>::CanWriteFile(const std::string & filename)
+bool PCAModel<TInputValue>::CanWriteFile(const std::string & /*filename*/)
 {
 	return true;
 }
 
 template <class TInputValue>
-void PCAModel<TInputValue>::Save(const std::string & filename, const std::string & name)
+void PCAModel<TInputValue>::Save(const std::string & filename, const std::string & /*name*/)
 {
 	std::ofstream ofs(filename);
 	//ofs << m_encoder.name() << std::endl; //first line
@@ -98,7 +108,7 @@ otxt.close();
 }
 
 template <class TInputValue>
-void PCAModel<TInputValue>::Load(const std::string & filename, const std::string & name)
+void PCAModel<TInputValue>::Load(const std::string & filename, const std::string & /*name*/)
 {
 	std::ifstream ifs(filename);
 	char encoder[256];
@@ -130,7 +140,7 @@ void PCAModel<TInputValue>::Load(const std::string & filename, const std::string
 
 template <class TInputValue>
 typename PCAModel<TInputValue>::TargetSampleType
-PCAModel<TInputValue>::DoPredict(const InputSampleType & value, ConfidenceValueType * quality) const
+PCAModel<TInputValue>::DoPredict(const InputSampleType & value, ConfidenceValueType * /*quality*/) const
 {  
 	shark::RealVector samples(value.Size());
 	for(size_t i = 0; i < value.Size();i++)
@@ -156,7 +166,7 @@ PCAModel<TInputValue>::DoPredict(const InputSampleType & value, ConfidenceValueT
 
 template <class TInputValue>
 void PCAModel<TInputValue>
-::DoPredictBatch(const InputListSampleType *input, const unsigned int & startIndex, const unsigned int & size, TargetListSampleType * targets, ConfidenceListSampleType * quality) const
+::DoPredictBatch(const InputListSampleType *input, const unsigned int & startIndex, const unsigned int & size, TargetListSampleType * targets, ConfidenceListSampleType * /*quality*/) const
 {
 	
 	std::vector<shark::RealVector> features;
diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbSOMModel.txx b/Modules/Learning/DimensionalityReductionLearning/include/otbSOMModel.txx
index 6195b9f961..2f5a69026d 100644
--- a/Modules/Learning/DimensionalityReductionLearning/include/otbSOMModel.txx
+++ b/Modules/Learning/DimensionalityReductionLearning/include/otbSOMModel.txx
@@ -1,6 +1,8 @@
 #ifndef SOMModel_txx
 #define SOMModel_txx
 
+#include "otbSOMModel.h"
+
 #include "otbImageFileReader.h"
 #include "otbImageFileWriter.h"
 
@@ -68,7 +70,7 @@ bool SOMModel<TInputValue, MapDimension>::CanReadFile(const std::string & filena
 
 
 template <class TInputValue, unsigned int MapDimension>
-bool SOMModel<TInputValue, MapDimension>::CanWriteFile(const std::string & filename)
+bool SOMModel<TInputValue, MapDimension>::CanWriteFile(const std::string & /*filename*/)
 {
 	return true;
 }
@@ -91,7 +93,7 @@ std::istream & binary_read(std::istream& stream, T& value){
 
 
 template <class TInputValue, unsigned int MapDimension>
-void SOMModel<TInputValue, MapDimension>::Save(const std::string & filename, const std::string & name)
+void SOMModel<TInputValue, MapDimension>::Save(const std::string & filename, const std::string & /*name*/)
 {
 	itk::ImageRegionConstIterator<MapType> inputIterator(m_SOMMap,m_SOMMap->GetLargestPossibleRegion());
 	inputIterator.GoToBegin();
@@ -133,7 +135,7 @@ void SOMModel<TInputValue, MapDimension>::Save(const std::string & filename, con
 }
 
 template <class TInputValue, unsigned int MapDimension>
-void SOMModel<TInputValue, MapDimension>::Load(const std::string & filename, const std::string & name)
+void SOMModel<TInputValue, MapDimension>::Load(const std::string & filename, const std::string & /*name*/)
 {
 	
 	std::ifstream ifs(filename, std::ios::binary);
@@ -154,7 +156,7 @@ void SOMModel<TInputValue, MapDimension>::Load(const std::string & filename, con
     
 	SizeType size;
 	itk::Index< MapDimension > index;
-	for (int i=0 ; i<MapDimension; i++)
+	for (unsigned int i=0 ; i<MapDimension; i++)
 	{
 		binary_read(ifs,size[i]);
 		index[i]=0;
@@ -174,7 +176,7 @@ void SOMModel<TInputValue, MapDimension>::Load(const std::string & filename, con
 	std::string value;
 	while(!outputIterator.IsAtEnd()){
 		InputSampleType  vect(numberOfElements);
-		for (int i=0 ; i<numberOfElements; i++)
+		for (unsigned int i=0 ; i<numberOfElements; i++)
 		{
 			float v;    // InputValue type is not the same during training anddimredvector.
 			binary_read(ifs,v);
@@ -191,13 +193,13 @@ void SOMModel<TInputValue, MapDimension>::Load(const std::string & filename, con
 
 template <class TInputValue, unsigned int MapDimension>
 typename SOMModel<TInputValue, MapDimension>::TargetSampleType
-SOMModel<TInputValue, MapDimension>::DoPredict(const InputSampleType & value, ConfidenceValueType * quality) const
+SOMModel<TInputValue, MapDimension>::DoPredict(const InputSampleType & value, ConfidenceValueType * /*quality*/) const
 { 
     TargetSampleType target;
     target.SetSize(this->m_Dimension);
 	
     auto winner =m_SOMMap->GetWinner(value);
-    for (int i=0; i< this->m_Dimension ;i++) {
+    for (unsigned int i=0; i< this->m_Dimension ;i++) {
 		target[i] = winner.GetElement(i); 
 	}
 
-- 
GitLab