diff --git a/Modules/Applications/AppDimensionalityReduction/app/otbVectorDimensionalityReduction.cxx b/Modules/Applications/AppDimensionalityReduction/app/otbVectorDimensionalityReduction.cxx index 16e5dfaf17459b086ec555d419ee44908e911709..ffd02b56b8ede681c6a392ee398db0d3a1ff1b26 100644 --- a/Modules/Applications/AppDimensionalityReduction/app/otbVectorDimensionalityReduction.cxx +++ b/Modules/Applications/AppDimensionalityReduction/app/otbVectorDimensionalityReduction.cxx @@ -155,7 +155,7 @@ class VectorDimensionalityReduction : public Application /* key.erase( std::remove_if(key.begin(),key.end(),IsNotAlphaNum), key.end()); std::transform(key.begin(), key.end(), key.begin(), tolower);*/ - OGRFieldType fieldType = layerDefn.GetFieldDefn(iField)->GetType(); + //OGRFieldType fieldType = layerDefn.GetFieldDefn(iField)->GetType(); /* if(fieldType == OFTInteger || ogr::version_proxy::IsOFTInteger64(fieldType) || fieldType == OFTReal) {*/ //std::string tmpKey="feat."+key; @@ -320,7 +320,7 @@ class VectorDimensionalityReduction : public Application // Add the field of prediction in the output layer if field not exist - for (int i=0; i<GetParameterStringList("featout").size() ;i++) + for (unsigned int i=0; i<GetParameterStringList("featout").size() ;i++) { OGRFeatureDefn &layerDefn = outLayer.GetLayerDefn(); int idx = layerDefn.GetFieldIndex(GetParameterStringList("featout")[i].c_str()); diff --git a/Modules/Applications/AppDimensionalityReduction/include/otbDimensionalityReductionTrainAutoencoder.txx b/Modules/Applications/AppDimensionalityReduction/include/otbDimensionalityReductionTrainAutoencoder.txx index b12a2517e59594544d44589dce03e609abfe384b..a4b8d72d83a4cfb24a9b8c868c0980b0c6943c0d 100644 --- a/Modules/Applications/AppDimensionalityReduction/include/otbDimensionalityReductionTrainAutoencoder.txx +++ b/Modules/Applications/AppDimensionalityReduction/include/otbDimensionalityReductionTrainAutoencoder.txx @@ -148,7 +148,7 @@ void TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>::Trai regularization.SetSize(s_nbneuron.size()); rho.SetSize(s_nbneuron.size()); beta.SetSize(s_nbneuron.size()); - for (int i=0; i<s_nbneuron.size(); i++){ + for (unsigned int i=0; i<s_nbneuron.size(); i++){ nb_neuron[i]=std::stoi(s_nbneuron[i]); noise[i]=std::stof(s_noise[i]); regularization[i]=std::stof(s_regularization[i]); diff --git a/Modules/Applications/AppDimensionalityReduction/include/otbDimensionalityReductionTrainSOM.txx b/Modules/Applications/AppDimensionalityReduction/include/otbDimensionalityReductionTrainSOM.txx index 8a05ecccd7ada09798e90b245a62c093d1995706..e9c22f3c85ca2bb87ef89e57782f2e402cff96fe 100644 --- a/Modules/Applications/AppDimensionalityReduction/include/otbDimensionalityReductionTrainSOM.txx +++ b/Modules/Applications/AppDimensionalityReduction/include/otbDimensionalityReductionTrainSOM.txx @@ -125,14 +125,14 @@ void TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue> dimredTrainer->SetMaxWeight(GetParameterFloat("algorithm.som.iv")); typename TemplateEstimatorType::SizeType size; std::vector<std::basic_string<char>> s= GetParameterStringList("algorithm.som.s"); - for (int i=0; i<dim; i++){ + for (unsigned int i=0; i<dim; i++){ size[i]=std::stoi(s[i]); } dimredTrainer->SetMapSize(size); typename TemplateEstimatorType::SizeType radius; std::vector<std::basic_string<char>> n= GetParameterStringList("algorithm.som.n"); - for (int i=0; i<dim; i++){ + for (unsigned int i=0; i<dim; i++){ radius[i]=std::stoi(n[i]); } dimredTrainer->SetNeighborhoodSizeInit(radius); diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModel.h b/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModel.h index 9723a5c27a482677298bdc98188c7abd741404aa..6807fe3fe9cbca54fd716344cf78f42d0ac798f2 100644 --- a/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModel.h +++ b/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModel.h @@ -4,10 +4,21 @@ #include "otbMachineLearningModelTraits.h" #include "otbMachineLearningModel.h" #include <fstream> -#include <shark/Algorithms/StoppingCriteria/AbstractStoppingCriterion.h> +#if defined(__GNUC__) || defined(__clang__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wshadow" +#pragma GCC diagnostic ignored "-Wunused-parameter" +#pragma GCC diagnostic ignored "-Woverloaded-virtual" +#endif +#include "otb_shark.h" +#include <shark/Algorithms/StoppingCriteria/AbstractStoppingCriterion.h> #include <shark/Models/FFNet.h> #include <shark/Models/Autoencoder.h> +#if defined(__GNUC__) || defined(__clang__) +#pragma GCC diagnostic pop +#endif + namespace otb { template <class TInputValue, class NeuronType> diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModel.txx b/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModel.txx index e95748ff3cf7a67222118a853d7d49d329d781bd..163dc88d7d075c8936155cbbb21f93c47054b7ed 100644 --- a/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModel.txx +++ b/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModel.txx @@ -1,11 +1,19 @@ #ifndef AutoencoderModel_txx #define AutoencoderModel_txx +#include "otbAutoencoderModel.h" + + #include <fstream> -#include <shark/Data/Dataset.h> #include "itkMacro.h" -#include "otbSharkUtils.h" +#if defined(__GNUC__) || defined(__clang__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wshadow" +#pragma GCC diagnostic ignored "-Wunused-parameter" +#pragma GCC diagnostic ignored "-Woverloaded-virtual" +#endif +#include "otbSharkUtils.h" //include train function #include <shark/ObjectiveFunctions/ErrorFunction.h> #include <shark/ObjectiveFunctions/SparseAutoencoderError.h>//the error function performing the regularisation of the hidden neurons @@ -20,6 +28,9 @@ #include <shark/Algorithms/StoppingCriteria/TrainingProgress.h> //Stops when the algorithm seems to converge, Tracks the progress of the training error over a period of time #include <shark/Algorithms/GradientDescent/SteepestDescent.h> +#if defined(__GNUC__) || defined(__clang__) +#pragma GCC diagnostic pop +#endif namespace otb { @@ -46,7 +57,7 @@ void AutoencoderModel<TInputValue,NeuronType>::Train() shark::Data<shark::RealVector> inputSamples_copy = inputSamples; std::ofstream ofs; - if (this->m_WriteLearningCurve =true) + if (this->m_WriteLearningCurve == true) { ofs.open(m_LearningCurveFileName); ofs << "learning curve" << std::endl; @@ -176,7 +187,7 @@ void AutoencoderModel<TInputValue,NeuronType>::TrainOneLayer(shark::AbstractStop optimizer.init(error); std::cout<<"error before training : " << optimizer.solution().value<<std::endl; - if (this->m_WriteLearningCurve =true) + if (this->m_WriteLearningCurve == true) { File << "end layer" << std::endl; } @@ -185,7 +196,7 @@ void AutoencoderModel<TInputValue,NeuronType>::TrainOneLayer(shark::AbstractStop do{ i++; optimizer.step(error); - if (this->m_WriteLearningCurve =true) + if (this->m_WriteLearningCurve == true) { File << optimizer.solution().value << std::endl; } @@ -252,12 +263,12 @@ void AutoencoderModel<TInputValue,NeuronType>::TrainOneSparseLayer(shark::Abstra i++; optimizer.step(error); std::cout<<"error after " << i << "iterations : " << optimizer.solution().value <<std::endl; - if (this->m_WriteLearningCurve =true) + if (this->m_WriteLearningCurve == true) { File << optimizer.solution().value << std::endl; } } while( !criterion.stop( optimizer.solution() ) ); - if (this->m_WriteLearningCurve =true) + if (this->m_WriteLearningCurve == true) { File << "end layer" << std::endl; } @@ -272,7 +283,7 @@ void AutoencoderModel<TInputValue,NeuronType>::TrainOneSparseLayer(shark::Abstra template <class TInputValue, class NeuronType> template <class T> -void AutoencoderModel<TInputValue,NeuronType>::TrainNetwork(shark::AbstractStoppingCriterion<T> & criterion,double rho,double beta, double regularization, shark::Data<shark::RealVector> &samples, std::ostream& File) +void AutoencoderModel<TInputValue,NeuronType>::TrainNetwork(shark::AbstractStoppingCriterion<T> & criterion,double /*rho*/,double /*beta*/, double regularization, shark::Data<shark::RealVector> &samples, std::ostream& File) { shark::LabeledData<shark::RealVector,shark::RealVector> trainSet(samples,samples);//labels identical to inputs @@ -292,7 +303,7 @@ void AutoencoderModel<TInputValue,NeuronType>::TrainNetwork(shark::AbstractStopp i++; optimizer.step(error); std::cout<<"error after " << i << "iterations : " << optimizer.solution().value<<std::endl; - if (this->m_WriteLearningCurve =true) + if (this->m_WriteLearningCurve == true) { File << optimizer.solution().value << std::endl; } @@ -319,13 +330,13 @@ bool AutoencoderModel<TInputValue,NeuronType>::CanReadFile(const std::string & f template <class TInputValue, class NeuronType> -bool AutoencoderModel<TInputValue,NeuronType>::CanWriteFile(const std::string & filename) +bool AutoencoderModel<TInputValue,NeuronType>::CanWriteFile(const std::string & /*filename*/) { return true; } template <class TInputValue, class NeuronType> -void AutoencoderModel<TInputValue,NeuronType>::Save(const std::string & filename, const std::string & name) +void AutoencoderModel<TInputValue,NeuronType>::Save(const std::string & filename, const std::string & /*name*/) { std::cout << "saving model ..." << std::endl; std::ofstream ofs(filename); @@ -382,7 +393,7 @@ void AutoencoderModel<TInputValue,NeuronType>::Save(const std::string & filename } template <class TInputValue, class NeuronType> -void AutoencoderModel<TInputValue,NeuronType>::Load(const std::string & filename, const std::string & name) +void AutoencoderModel<TInputValue,NeuronType>::Load(const std::string & filename, const std::string & /*name*/) { NetworkType net; @@ -421,7 +432,7 @@ void AutoencoderModel<TInputValue,NeuronType>::Load(const std::string & filename template <class TInputValue, class NeuronType> typename AutoencoderModel<TInputValue,NeuronType>::TargetSampleType -AutoencoderModel<TInputValue,NeuronType>::DoPredict(const InputSampleType & value, ConfidenceValueType * quality) const +AutoencoderModel<TInputValue,NeuronType>::DoPredict(const InputSampleType & value, ConfidenceValueType * /*quality*/) const { shark::RealVector samples(value.Size()); @@ -453,7 +464,7 @@ AutoencoderModel<TInputValue,NeuronType>::DoPredict(const InputSampleType & valu template <class TInputValue, class NeuronType> void AutoencoderModel<TInputValue,NeuronType> -::DoPredictBatch(const InputListSampleType *input, const unsigned int & startIndex, const unsigned int & size, TargetListSampleType * targets, ConfidenceListSampleType * quality) const +::DoPredictBatch(const InputListSampleType *input, const unsigned int & startIndex, const unsigned int & size, TargetListSampleType * targets, ConfidenceListSampleType * /*quality*/) const { std::vector<shark::RealVector> features; diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModelFactory.h b/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModelFactory.h index 5eb6a501ef501d9ea4cf58e0f42149312670623b..e614579f862869ab74c2acf7eb66fc475a6367c5 100644 --- a/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModelFactory.h +++ b/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModelFactory.h @@ -2,8 +2,8 @@ #define AutoencoderModelFactory_h -#include <shark/Models/TiedAutoencoder.h> -#include <shark/Models/Autoencoder.h> +//#include <shark/Models/TiedAutoencoder.h> +//#include <shark/Models/Autoencoder.h> #include "itkObjectFactoryBase.h" #include "itkImageIOBase.h" diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModelFactory.txx b/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModelFactory.txx index 73dae93bde0e7dd05cb647a6bb8cf55bf6282ffc..b53c42b3a7613404cb3cca36df6b8af4daa9fcc8 100644 --- a/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModelFactory.txx +++ b/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModelFactory.txx @@ -20,9 +20,9 @@ #include "otbAutoencoderModelFactory.h" +#include "otbAutoencoderModel.h" #include "itkCreateObjectFunction.h" -#include "otbAutoencoderModel.h" #include "itkVersion.h" namespace otb diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbImageDimensionalityReductionFilter.txx b/Modules/Learning/DimensionalityReductionLearning/include/otbImageDimensionalityReductionFilter.txx index 418ecb4fa24e028cf9f9873ba53c45924801901e..c32c9fd3f7944d09ddf2e1e405cc3e3e2dde2c64 100644 --- a/Modules/Learning/DimensionalityReductionLearning/include/otbImageDimensionalityReductionFilter.txx +++ b/Modules/Learning/DimensionalityReductionLearning/include/otbImageDimensionalityReductionFilter.txx @@ -111,9 +111,9 @@ ImageDimensionalityReductionFilter<TInputImage, TOutputImage, TMaskImage> // Define iterators typedef itk::ImageRegionConstIterator<InputImageType> InputIteratorType; - typedef itk::ImageRegionConstIterator<MaskImageType> MaskIteratorType; + //typedef itk::ImageRegionConstIterator<MaskImageType> MaskIteratorType; typedef itk::ImageRegionIterator<OutputImageType> OutputIteratorType; - typedef itk::ImageRegionIterator<ConfidenceImageType> ConfidenceMapIteratorType; + //typedef itk::ImageRegionIterator<ConfidenceImageType> ConfidenceMapIteratorType; InputIteratorType inIt(inputPtr, outputRegionForThread); OutputIteratorType outIt(outputPtr, outputRegionForThread); @@ -155,9 +155,9 @@ ImageDimensionalityReductionFilter<TInputImage, TOutputImage, TMaskImage> // Define iterators typedef itk::ImageRegionConstIterator<InputImageType> InputIteratorType; - typedef itk::ImageRegionConstIterator<MaskImageType> MaskIteratorType; + //typedef itk::ImageRegionConstIterator<MaskImageType> MaskIteratorType; typedef itk::ImageRegionIterator<OutputImageType> OutputIteratorType; - typedef itk::ImageRegionIterator<ConfidenceImageType> ConfidenceMapIteratorType; + //typedef itk::ImageRegionIterator<ConfidenceImageType> ConfidenceMapIteratorType; InputIteratorType inIt(inputPtr, outputRegionForThread); OutputIteratorType outIt(outputPtr, outputRegionForThread); diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbPCAModel.h b/Modules/Learning/DimensionalityReductionLearning/include/otbPCAModel.h index e39ca88341a88c10af9828c06de5b20e87b3e75c..72ad59805d77e8af896f660a5b8054c401a939f8 100644 --- a/Modules/Learning/DimensionalityReductionLearning/include/otbPCAModel.h +++ b/Modules/Learning/DimensionalityReductionLearning/include/otbPCAModel.h @@ -4,7 +4,17 @@ #include "otbMachineLearningModelTraits.h" #include "otbMachineLearningModel.h" +#if defined(__GNUC__) || defined(__clang__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wshadow" +#pragma GCC diagnostic ignored "-Wunused-parameter" +#pragma GCC diagnostic ignored "-Woverloaded-virtual" +#endif +#include "otb_shark.h" #include <shark/Algorithms/Trainers/PCA.h> +#if defined(__GNUC__) || defined(__clang__) +#pragma GCC diagnostic pop +#endif namespace otb { diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbPCAModel.txx b/Modules/Learning/DimensionalityReductionLearning/include/otbPCAModel.txx index c5d274b324f66b16d49239f32b5df8c4318d834d..11f07f3eb47e956ffa5fdbd8c5e20b3433be1290 100644 --- a/Modules/Learning/DimensionalityReductionLearning/include/otbPCAModel.txx +++ b/Modules/Learning/DimensionalityReductionLearning/include/otbPCAModel.txx @@ -2,9 +2,16 @@ #ifndef PCAModel_txx #define PCAModel_txx +#include "otbPCAModel.h" + #include <fstream> -#include <shark/Data/Dataset.h> #include "itkMacro.h" +#if defined(__GNUC__) || defined(__clang__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wshadow" +#pragma GCC diagnostic ignored "-Wunused-parameter" +#pragma GCC diagnostic ignored "-Woverloaded-virtual" +#endif #include "otbSharkUtils.h" //include train function #include <shark/ObjectiveFunctions/ErrorFunction.h> @@ -13,6 +20,9 @@ #include <shark/ObjectiveFunctions/Regularizer.h> //L2 regulariziation #include <shark/ObjectiveFunctions/ErrorFunction.h> +#if defined(__GNUC__) || defined(__clang__) +#pragma GCC diagnostic pop +#endif namespace otb { @@ -65,13 +75,13 @@ bool PCAModel<TInputValue>::CanReadFile(const std::string & filename) template <class TInputValue> -bool PCAModel<TInputValue>::CanWriteFile(const std::string & filename) +bool PCAModel<TInputValue>::CanWriteFile(const std::string & /*filename*/) { return true; } template <class TInputValue> -void PCAModel<TInputValue>::Save(const std::string & filename, const std::string & name) +void PCAModel<TInputValue>::Save(const std::string & filename, const std::string & /*name*/) { std::ofstream ofs(filename); //ofs << m_encoder.name() << std::endl; //first line @@ -98,7 +108,7 @@ otxt.close(); } template <class TInputValue> -void PCAModel<TInputValue>::Load(const std::string & filename, const std::string & name) +void PCAModel<TInputValue>::Load(const std::string & filename, const std::string & /*name*/) { std::ifstream ifs(filename); char encoder[256]; @@ -130,7 +140,7 @@ void PCAModel<TInputValue>::Load(const std::string & filename, const std::string template <class TInputValue> typename PCAModel<TInputValue>::TargetSampleType -PCAModel<TInputValue>::DoPredict(const InputSampleType & value, ConfidenceValueType * quality) const +PCAModel<TInputValue>::DoPredict(const InputSampleType & value, ConfidenceValueType * /*quality*/) const { shark::RealVector samples(value.Size()); for(size_t i = 0; i < value.Size();i++) @@ -156,7 +166,7 @@ PCAModel<TInputValue>::DoPredict(const InputSampleType & value, ConfidenceValueT template <class TInputValue> void PCAModel<TInputValue> -::DoPredictBatch(const InputListSampleType *input, const unsigned int & startIndex, const unsigned int & size, TargetListSampleType * targets, ConfidenceListSampleType * quality) const +::DoPredictBatch(const InputListSampleType *input, const unsigned int & startIndex, const unsigned int & size, TargetListSampleType * targets, ConfidenceListSampleType * /*quality*/) const { std::vector<shark::RealVector> features; diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbSOMModel.txx b/Modules/Learning/DimensionalityReductionLearning/include/otbSOMModel.txx index 6195b9f96189f1a781a6db581da79aa131f502e0..2f5a69026d3daf094f46f1a55ad32cdbf3d13b89 100644 --- a/Modules/Learning/DimensionalityReductionLearning/include/otbSOMModel.txx +++ b/Modules/Learning/DimensionalityReductionLearning/include/otbSOMModel.txx @@ -1,6 +1,8 @@ #ifndef SOMModel_txx #define SOMModel_txx +#include "otbSOMModel.h" + #include "otbImageFileReader.h" #include "otbImageFileWriter.h" @@ -68,7 +70,7 @@ bool SOMModel<TInputValue, MapDimension>::CanReadFile(const std::string & filena template <class TInputValue, unsigned int MapDimension> -bool SOMModel<TInputValue, MapDimension>::CanWriteFile(const std::string & filename) +bool SOMModel<TInputValue, MapDimension>::CanWriteFile(const std::string & /*filename*/) { return true; } @@ -91,7 +93,7 @@ std::istream & binary_read(std::istream& stream, T& value){ template <class TInputValue, unsigned int MapDimension> -void SOMModel<TInputValue, MapDimension>::Save(const std::string & filename, const std::string & name) +void SOMModel<TInputValue, MapDimension>::Save(const std::string & filename, const std::string & /*name*/) { itk::ImageRegionConstIterator<MapType> inputIterator(m_SOMMap,m_SOMMap->GetLargestPossibleRegion()); inputIterator.GoToBegin(); @@ -133,7 +135,7 @@ void SOMModel<TInputValue, MapDimension>::Save(const std::string & filename, con } template <class TInputValue, unsigned int MapDimension> -void SOMModel<TInputValue, MapDimension>::Load(const std::string & filename, const std::string & name) +void SOMModel<TInputValue, MapDimension>::Load(const std::string & filename, const std::string & /*name*/) { std::ifstream ifs(filename, std::ios::binary); @@ -154,7 +156,7 @@ void SOMModel<TInputValue, MapDimension>::Load(const std::string & filename, con SizeType size; itk::Index< MapDimension > index; - for (int i=0 ; i<MapDimension; i++) + for (unsigned int i=0 ; i<MapDimension; i++) { binary_read(ifs,size[i]); index[i]=0; @@ -174,7 +176,7 @@ void SOMModel<TInputValue, MapDimension>::Load(const std::string & filename, con std::string value; while(!outputIterator.IsAtEnd()){ InputSampleType vect(numberOfElements); - for (int i=0 ; i<numberOfElements; i++) + for (unsigned int i=0 ; i<numberOfElements; i++) { float v; // InputValue type is not the same during training anddimredvector. binary_read(ifs,v); @@ -191,13 +193,13 @@ void SOMModel<TInputValue, MapDimension>::Load(const std::string & filename, con template <class TInputValue, unsigned int MapDimension> typename SOMModel<TInputValue, MapDimension>::TargetSampleType -SOMModel<TInputValue, MapDimension>::DoPredict(const InputSampleType & value, ConfidenceValueType * quality) const +SOMModel<TInputValue, MapDimension>::DoPredict(const InputSampleType & value, ConfidenceValueType * /*quality*/) const { TargetSampleType target; target.SetSize(this->m_Dimension); auto winner =m_SOMMap->GetWinner(value); - for (int i=0; i< this->m_Dimension ;i++) { + for (unsigned int i=0; i< this->m_Dimension ;i++) { target[i] = winner.GetElement(i); }