Commit 810c681c authored by Jordi Inglada's avatar Jordi Inglada
Browse files

STYLE: rename parameters of the application

No related merge requests found
Showing with 123 additions and 123 deletions
+123 -123
...@@ -59,14 +59,14 @@ private: ...@@ -59,14 +59,14 @@ private:
SetParameterDescription("io.vd", "Input geometries used for training (note : all geometries from the layer will be used)"); SetParameterDescription("io.vd", "Input geometries used for training (note : all geometries from the layer will be used)");
AddParameter(ParameterType_OutputFilename, "io.out", "Output model"); AddParameter(ParameterType_OutputFilename, "io.out", "Output model");
SetParameterDescription("io.out", "Output file containing the model estimated (.txt format)."); SetParameterDescription("io.out", "Output file containing the estimated model (.txt format).");
AddParameter(ParameterType_InputFilename, "io.stats", "Input XML image statistics file"); AddParameter(ParameterType_InputFilename, "io.stats", "Input XML image statistics file");
MandatoryOff("io.stats"); MandatoryOff("io.stats");
SetParameterDescription("io.stats", "XML file containing mean and variance of each feature."); SetParameterDescription("io.stats", "XML file containing mean and variance of each feature.");
AddParameter(ParameterType_StringList, "feat", "Field names to be calculated."); // AddParameter(ParameterType_StringList, "feat", "Field names to be used for training."); //
SetParameterDescription("feat","List of field names in the input vector data used as features for training."); // SetParameterDescription("feat","List of field names in the input vector data used as features for training."); //
Superclass::DoInit(); Superclass::DoInit();
......
...@@ -16,87 +16,87 @@ TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue> ...@@ -16,87 +16,87 @@ TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>
{ {
AddChoice("model.tiedautoencoder", "Shark Tied Autoencoder"); AddChoice("algorithm.tiedautoencoder", "Shark Tied Autoencoder");
AddChoice("model.autoencoder", "Shark Autoencoder"); AddChoice("algorithm.autoencoder", "Shark Autoencoder");
SetParameterDescription("model.autoencoder", SetParameterDescription("algorithm.autoencoder",
"This group of parameters allows setting Shark autoencoder parameters. " "This group of parameters allows setting Shark autoencoder parameters. "
); );
//Tied Autoencoder //Tied Autoencoder
AddParameter(ParameterType_Choice, "model.autoencoder.istied", AddParameter(ParameterType_Choice, "algorithm.autoencoder.istied",
"tied weighth <tied/untied>"); "tied weighth <tied/untied>");
SetParameterDescription( SetParameterDescription(
"model.autoencoder.istied", "algorithm.autoencoder.istied",
"Parameter that determine if the weights are tied or not <tied/untied>"); "Parameter that determine if the weights are tied or not <tied/untied>");
AddChoice("model.autoencoder.istied.yes","Tied weigths"); AddChoice("algorithm.autoencoder.istied.yes","Tied weigths");
AddChoice("model.autoencoder.istied.no","Untied weights"); AddChoice("algorithm.autoencoder.istied.no","Untied weights");
//Number Of Iterations //Number Of Iterations
AddParameter(ParameterType_Int, "model.autoencoder.nbiter", AddParameter(ParameterType_Int, "algorithm.autoencoder.nbiter",
"Maximum number of iterations during training"); "Maximum number of iterations during training");
SetParameterInt("model.autoencoder.nbiter",100, false); SetParameterInt("algorithm.autoencoder.nbiter",100, false);
SetParameterDescription( SetParameterDescription(
"model.autoencoder.nbiter", "algorithm.autoencoder.nbiter",
"The maximum number of iterations used during training."); "The maximum number of iterations used during training.");
AddParameter(ParameterType_Int, "model.autoencoder.nbiterfinetuning", AddParameter(ParameterType_Int, "algorithm.autoencoder.nbiterfinetuning",
"Maximum number of iterations during training"); "Maximum number of iterations during training");
SetParameterInt("model.autoencoder.nbiterfinetuning",0, false); SetParameterInt("algorithm.autoencoder.nbiterfinetuning",0, false);
SetParameterDescription( SetParameterDescription(
"model.autoencoder.nbiterfinetuning", "algorithm.autoencoder.nbiterfinetuning",
"The maximum number of iterations used during fine tuning of the whole network."); "The maximum number of iterations used during fine tuning of the whole network.");
AddParameter(ParameterType_Float, "model.autoencoder.epsilon", AddParameter(ParameterType_Float, "algorithm.autoencoder.epsilon",
" "); " ");
SetParameterFloat("model.autoencoder.epsilon",0, false); SetParameterFloat("algorithm.autoencoder.epsilon",0, false);
SetParameterDescription( SetParameterDescription(
"model.autoencoder.epsilon", "algorithm.autoencoder.epsilon",
" "); " ");
AddParameter(ParameterType_Float, "model.autoencoder.initfactor", AddParameter(ParameterType_Float, "algorithm.autoencoder.initfactor",
" "); " ");
SetParameterFloat("model.autoencoder.initfactor",1, false); SetParameterFloat("algorithm.autoencoder.initfactor",1, false);
SetParameterDescription( SetParameterDescription(
"model.autoencoder.initfactor", "parameter that control the weight initialization of the autoencoder"); "algorithm.autoencoder.initfactor", "parameter that control the weight initialization of the autoencoder");
//Number Of Hidden Neurons //Number Of Hidden Neurons
AddParameter(ParameterType_StringList , "model.autoencoder.nbneuron", "Size"); AddParameter(ParameterType_StringList , "algorithm.autoencoder.nbneuron", "Size");
/*AddParameter(ParameterType_Int, "model.autoencoder.nbneuron", /*AddParameter(ParameterType_Int, "algorithm.autoencoder.nbneuron",
"Number of neurons in the hidden layer"); "Number of neurons in the hidden layer");
SetParameterInt("model.autoencoder.nbneuron",10, false);*/ SetParameterInt("algorithm.autoencoder.nbneuron",10, false);*/
SetParameterDescription( SetParameterDescription(
"model.autoencoder.nbneuron", "algorithm.autoencoder.nbneuron",
"The number of neurons in each hidden layer."); "The number of neurons in each hidden layer.");
//Regularization //Regularization
AddParameter(ParameterType_StringList, "model.autoencoder.regularization", "Strength of the regularization"); AddParameter(ParameterType_StringList, "algorithm.autoencoder.regularization", "Strength of the regularization");
SetParameterDescription("model.autoencoder.regularization", SetParameterDescription("algorithm.autoencoder.regularization",
"Strength of the L2 regularization used during training"); "Strength of the L2 regularization used during training");
//Noise strength //Noise strength
AddParameter(ParameterType_StringList, "model.autoencoder.noise", "Strength of the noise"); AddParameter(ParameterType_StringList, "algorithm.autoencoder.noise", "Strength of the noise");
SetParameterDescription("model.autoencoder.noise", SetParameterDescription("algorithm.autoencoder.noise",
"Strength of the noise"); "Strength of the noise");
// Sparsity parameter // Sparsity parameter
AddParameter(ParameterType_StringList, "model.autoencoder.rho", "Sparsity parameter"); AddParameter(ParameterType_StringList, "algorithm.autoencoder.rho", "Sparsity parameter");
SetParameterDescription("model.autoencoder.rho", SetParameterDescription("algorithm.autoencoder.rho",
"Sparsity parameter"); "Sparsity parameter");
// Sparsity regularization strength // Sparsity regularization strength
AddParameter(ParameterType_StringList, "model.autoencoder.beta", "Sparsity regularization strength"); AddParameter(ParameterType_StringList, "algorithm.autoencoder.beta", "Sparsity regularization strength");
SetParameterDescription("model.autoencoder.beta", SetParameterDescription("algorithm.autoencoder.beta",
"Sparsity regularization strength"); "Sparsity regularization strength");
AddParameter(ParameterType_OutputFilename, "model.autoencoder.learningcurve", "Learning curve"); AddParameter(ParameterType_OutputFilename, "algorithm.autoencoder.learningcurve", "Learning curve");
SetParameterDescription("model.autoencoder.learningcurve", "Learning error values"); SetParameterDescription("algorithm.autoencoder.learningcurve", "Learning error values");
MandatoryOff("model.autoencoder.learningcurve"); MandatoryOff("algorithm.autoencoder.learningcurve");
} }
...@@ -107,10 +107,10 @@ TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue> ...@@ -107,10 +107,10 @@ TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>
::BeforeTrainAutoencoder(typename ListSampleType::Pointer trainingListSample, ::BeforeTrainAutoencoder(typename ListSampleType::Pointer trainingListSample,
std::string modelPath) std::string modelPath)
{ {
std::string TiedWeigth = GetParameterString("model.autoencoder.istied"); std::string TiedWeigth = GetParameterString("algorithm.autoencoder.istied");
std::cout << TiedWeigth << std::endl; std::cout << TiedWeigth << std::endl;
if(TiedWeigth == "no") if(TiedWeigth == "no")
{ {
TrainAutoencoder<AutoencoderModelType>(trainingListSample,modelPath); TrainAutoencoder<AutoencoderModelType>(trainingListSample,modelPath);
} }
...@@ -138,11 +138,11 @@ void TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>::Trai ...@@ -138,11 +138,11 @@ void TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>::Trai
itk::Array<float> regularization; itk::Array<float> regularization;
itk::Array<float> rho; itk::Array<float> rho;
itk::Array<float> beta; itk::Array<float> beta;
std::vector<std::basic_string<char>> s_nbneuron= GetParameterStringList("model.autoencoder.nbneuron"); std::vector<std::basic_string<char>> s_nbneuron= GetParameterStringList("algorithm.autoencoder.nbneuron");
std::vector<std::basic_string<char>> s_noise= GetParameterStringList("model.autoencoder.noise"); std::vector<std::basic_string<char>> s_noise= GetParameterStringList("algorithm.autoencoder.noise");
std::vector<std::basic_string<char>> s_regularization= GetParameterStringList("model.autoencoder.regularization"); std::vector<std::basic_string<char>> s_regularization= GetParameterStringList("algorithm.autoencoder.regularization");
std::vector<std::basic_string<char>> s_rho= GetParameterStringList("model.autoencoder.rho"); std::vector<std::basic_string<char>> s_rho= GetParameterStringList("algorithm.autoencoder.rho");
std::vector<std::basic_string<char>> s_beta= GetParameterStringList("model.autoencoder.beta"); std::vector<std::basic_string<char>> s_beta= GetParameterStringList("algorithm.autoencoder.beta");
nb_neuron.SetSize(s_nbneuron.size()); nb_neuron.SetSize(s_nbneuron.size());
noise.SetSize(s_nbneuron.size()); noise.SetSize(s_nbneuron.size());
regularization.SetSize(s_nbneuron.size()); regularization.SetSize(s_nbneuron.size());
...@@ -156,22 +156,22 @@ void TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>::Trai ...@@ -156,22 +156,22 @@ void TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>::Trai
beta[i]=std::stof(s_beta[i]); beta[i]=std::stof(s_beta[i]);
} }
dimredTrainer->SetNumberOfHiddenNeurons(nb_neuron); dimredTrainer->SetNumberOfHiddenNeurons(nb_neuron);
dimredTrainer->SetNumberOfIterations(GetParameterInt("model.autoencoder.nbiter")); dimredTrainer->SetNumberOfIterations(GetParameterInt("algorithm.autoencoder.nbiter"));
dimredTrainer->SetNumberOfIterationsFineTuning(GetParameterInt("model.autoencoder.nbiterfinetuning")); dimredTrainer->SetNumberOfIterationsFineTuning(GetParameterInt("algorithm.autoencoder.nbiterfinetuning"));
dimredTrainer->SetEpsilon(GetParameterFloat("model.autoencoder.epsilon")); dimredTrainer->SetEpsilon(GetParameterFloat("algorithm.autoencoder.epsilon"));
dimredTrainer->SetInitFactor(GetParameterFloat("model.autoencoder.initfactor")); dimredTrainer->SetInitFactor(GetParameterFloat("algorithm.autoencoder.initfactor"));
dimredTrainer->SetRegularization(regularization); dimredTrainer->SetRegularization(regularization);
dimredTrainer->SetNoise(noise); dimredTrainer->SetNoise(noise);
dimredTrainer->SetRho(rho); dimredTrainer->SetRho(rho);
dimredTrainer->SetBeta(beta); dimredTrainer->SetBeta(beta);
dimredTrainer->SetWriteWeights(true); dimredTrainer->SetWriteWeights(true);
if (HasValue("model.autoencoder.learningcurve") && IsParameterEnabled("model.autoencoder.learningcurve")) if (HasValue("algorithm.autoencoder.learningcurve") && IsParameterEnabled("algorithm.autoencoder.learningcurve"))
{ {
std::cout << "yo" << std::endl; std::cout << "yo" << std::endl;
dimredTrainer->SetWriteLearningCurve(true); dimredTrainer->SetWriteLearningCurve(true);
dimredTrainer->SetLearningCurveFileName(GetParameterString("model.autoencoder.learningcurve")); dimredTrainer->SetLearningCurveFileName(GetParameterString("algorithm.autoencoder.learningcurve"));
} }
dimredTrainer->SetInputListSample(trainingListSample); dimredTrainer->SetInputListSample(trainingListSample);
std::cout << "before train" << std::endl; std::cout << "before train" << std::endl;
......
...@@ -16,18 +16,18 @@ TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue> ...@@ -16,18 +16,18 @@ TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>
{ {
AddChoice("model.pca", "Shark PCA"); AddChoice("algorithm.pca", "Shark PCA");
SetParameterDescription("model.pca", SetParameterDescription("algorithm.pca",
"This group of parameters allows setting Shark PCA parameters. " "This group of parameters allows setting Shark PCA parameters. "
); );
//Output Dimension //Output Dimension
AddParameter(ParameterType_Int, "model.pca.dim", AddParameter(ParameterType_Int, "algorithm.pca.dim",
"Dimension of the output of the pca transformation"); "Dimension of the output of the pca transformation");
SetParameterInt("model.pca.dim",10, false); SetParameterInt("algorithm.pca.dim",10, false);
SetParameterDescription( SetParameterDescription(
"model.pca.dim", "algorithm.pca.dim",
"Dimension of the output of the pca transformation."); "Dimension of the output of the pca transformation.");
...@@ -38,7 +38,7 @@ void TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue> ...@@ -38,7 +38,7 @@ void TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>
::TrainPCA(typename ListSampleType::Pointer trainingListSample,std::string modelPath) ::TrainPCA(typename ListSampleType::Pointer trainingListSample,std::string modelPath)
{ {
typename PCAModelType::Pointer dimredTrainer = PCAModelType::New(); typename PCAModelType::Pointer dimredTrainer = PCAModelType::New();
dimredTrainer->SetDimension(GetParameterInt("model.pca.dim")); dimredTrainer->SetDimension(GetParameterInt("algorithm.pca.dim"));
dimredTrainer->SetInputListSample(trainingListSample); dimredTrainer->SetInputListSample(trainingListSample);
dimredTrainer->SetWriteEigenvectors(true); dimredTrainer->SetWriteEigenvectors(true);
dimredTrainer->Train(); dimredTrainer->Train();
......
...@@ -14,62 +14,62 @@ TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue> ...@@ -14,62 +14,62 @@ TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>
::InitSOMParams() ::InitSOMParams()
{ {
AddChoice("model.som", "OTB SOM"); AddChoice("algorithm.som", "OTB SOM");
SetParameterDescription("model.som", SetParameterDescription("algorithm.som",
"This group of parameters allows setting SOM parameters. " "This group of parameters allows setting SOM parameters. "
); );
AddParameter(ParameterType_Int, "model.som.dim","Dimension of the map"); AddParameter(ParameterType_Int, "algorithm.som.dim","Dimension of the map");
SetParameterDescription("model.som.dim","Dimension of the SOM map."); SetParameterDescription("algorithm.som.dim","Dimension of the SOM map.");
AddParameter(ParameterType_StringList , "model.som.s", "Size"); AddParameter(ParameterType_StringList , "algorithm.som.s", "Size");
SetParameterDescription("model.som.s", "Size of the SOM map"); SetParameterDescription("algorithm.som.s", "Size of the SOM map");
MandatoryOff("model.som.s"); MandatoryOff("algorithm.som.s");
AddParameter(ParameterType_StringList , "model.som.n", "Size Neighborhood"); AddParameter(ParameterType_StringList , "algorithm.som.n", "Size Neighborhood");
SetParameterDescription("model.som.n", "Size of the initial neighborhood in the SOM map"); SetParameterDescription("algorithm.som.n", "Size of the initial neighborhood in the SOM map");
MandatoryOff("model.som.n"); MandatoryOff("algorithm.som.n");
AddParameter(ParameterType_Int, "model.som.sx", "SizeX"); AddParameter(ParameterType_Int, "algorithm.som.sx", "SizeX");
SetParameterDescription("model.som.sx", "X size of the SOM map"); SetParameterDescription("algorithm.som.sx", "X size of the SOM map");
MandatoryOff("model.som.sx"); MandatoryOff("algorithm.som.sx");
AddParameter(ParameterType_Int, "model.som.sy", "SizeY"); AddParameter(ParameterType_Int, "algorithm.som.sy", "SizeY");
SetParameterDescription("model.som.sy", "Y size of the SOM map"); SetParameterDescription("algorithm.som.sy", "Y size of the SOM map");
MandatoryOff("model.som.sy"); MandatoryOff("algorithm.som.sy");
AddParameter(ParameterType_Int, "model.som.nx", "NeighborhoodX"); AddParameter(ParameterType_Int, "algorithm.som.nx", "NeighborhoodX");
SetParameterDescription("model.som.nx", "X size of the initial neighborhood in the SOM map"); SetParameterDescription("algorithm.som.nx", "X size of the initial neighborhood in the SOM map");
MandatoryOff("model.som.nx"); MandatoryOff("algorithm.som.nx");
AddParameter(ParameterType_Int, "model.som.ny", "NeighborhoodY"); AddParameter(ParameterType_Int, "algorithm.som.ny", "NeighborhoodY");
SetParameterDescription("model.som.ny", "Y size of the initial neighborhood in the SOM map"); SetParameterDescription("algorithm.som.ny", "Y size of the initial neighborhood in the SOM map");
MandatoryOff("model.som.nx"); MandatoryOff("algorithm.som.nx");
AddParameter(ParameterType_Int, "model.som.ni", "NumberIteration"); AddParameter(ParameterType_Int, "algorithm.som.ni", "NumberIteration");
SetParameterDescription("model.som.ni", "Number of iterations for SOM learning"); SetParameterDescription("algorithm.som.ni", "Number of iterations for SOM learning");
MandatoryOff("model.som.ni"); MandatoryOff("algorithm.som.ni");
AddParameter(ParameterType_Float, "model.som.bi", "BetaInit"); AddParameter(ParameterType_Float, "algorithm.som.bi", "BetaInit");
SetParameterDescription("model.som.bi", "Initial learning coefficient"); SetParameterDescription("algorithm.som.bi", "Initial learning coefficient");
MandatoryOff("model.som.bi"); MandatoryOff("algorithm.som.bi");
AddParameter(ParameterType_Float, "model.som.bf", "BetaFinal"); AddParameter(ParameterType_Float, "algorithm.som.bf", "BetaFinal");
SetParameterDescription("model.som.bf", "Final learning coefficient"); SetParameterDescription("algorithm.som.bf", "Final learning coefficient");
MandatoryOff("model.som.bf"); MandatoryOff("algorithm.som.bf");
AddParameter(ParameterType_Float, "model.som.iv", "InitialValue"); AddParameter(ParameterType_Float, "algorithm.som.iv", "InitialValue");
SetParameterDescription("model.som.iv", "Maximum initial neuron weight"); SetParameterDescription("algorithm.som.iv", "Maximum initial neuron weight");
MandatoryOff("model.som.iv"); MandatoryOff("algorithm.som.iv");
SetDefaultParameterInt("model.som.sx", 32); SetDefaultParameterInt("algorithm.som.sx", 32);
SetDefaultParameterInt("model.som.sy", 32); SetDefaultParameterInt("algorithm.som.sy", 32);
SetDefaultParameterInt("model.som.nx", 10); SetDefaultParameterInt("algorithm.som.nx", 10);
SetDefaultParameterInt("model.som.ny", 10); SetDefaultParameterInt("algorithm.som.ny", 10);
SetDefaultParameterInt("model.som.ni", 5); SetDefaultParameterInt("algorithm.som.ni", 5);
SetDefaultParameterFloat("model.som.bi", 1.0); SetDefaultParameterFloat("algorithm.som.bi", 1.0);
SetDefaultParameterFloat("model.som.bf", 0.1); SetDefaultParameterFloat("algorithm.som.bf", 0.1);
SetDefaultParameterFloat("model.som.iv", 10.0); SetDefaultParameterFloat("algorithm.som.iv", 10.0);
} }
...@@ -80,10 +80,10 @@ TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue> ...@@ -80,10 +80,10 @@ TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>
::BeforeTrainSOM(typename ListSampleType::Pointer trainingListSample, ::BeforeTrainSOM(typename ListSampleType::Pointer trainingListSample,
std::string modelPath) std::string modelPath)
{ {
int SomDim = GetParameterInt("model.som.dim"); int SomDim = GetParameterInt("algorithm.som.dim");
std::cout << SomDim << std::endl; std::cout << SomDim << std::endl;
if(SomDim == 2) if(SomDim == 2)
{ {
TrainSOM<SOM2DModelType >(trainingListSample,modelPath); TrainSOM<SOM2DModelType >(trainingListSample,modelPath);
} }
...@@ -118,23 +118,23 @@ void TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue> ...@@ -118,23 +118,23 @@ void TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>
typename somchoice::Pointer dimredTrainer = somchoice::New(); typename somchoice::Pointer dimredTrainer = somchoice::New();
unsigned int dim = dimredTrainer->GetDimension(); unsigned int dim = dimredTrainer->GetDimension();
std::cout << dim << std::endl; std::cout << dim << std::endl;
dimredTrainer->SetNumberOfIterations(GetParameterInt("model.som.ni")); dimredTrainer->SetNumberOfIterations(GetParameterInt("algorithm.som.ni"));
dimredTrainer->SetBetaInit(GetParameterFloat("model.som.bi")); dimredTrainer->SetBetaInit(GetParameterFloat("algorithm.som.bi"));
dimredTrainer->SetWriteMap(true); dimredTrainer->SetWriteMap(true);
dimredTrainer->SetBetaEnd(GetParameterFloat("model.som.bf")); dimredTrainer->SetBetaEnd(GetParameterFloat("algorithm.som.bf"));
dimredTrainer->SetMaxWeight(GetParameterFloat("model.som.iv")); dimredTrainer->SetMaxWeight(GetParameterFloat("algorithm.som.iv"));
typename TemplateEstimatorType::SizeType size; typename TemplateEstimatorType::SizeType size;
std::vector<std::basic_string<char>> s= GetParameterStringList("model.som.s"); std::vector<std::basic_string<char>> s= GetParameterStringList("algorithm.som.s");
for (int i=0; i<dim; i++){ for (int i=0; i<dim; i++){
size[i]=std::stoi(s[i]); size[i]=std::stoi(s[i]);
} }
dimredTrainer->SetMapSize(size); dimredTrainer->SetMapSize(size);
typename TemplateEstimatorType::SizeType radius; typename TemplateEstimatorType::SizeType radius;
std::vector<std::basic_string<char>> n= GetParameterStringList("model.som.n"); std::vector<std::basic_string<char>> n= GetParameterStringList("algorithm.som.n");
for (int i=0; i<dim; i++){ for (int i=0; i<dim; i++){
radius[i]=std::stoi(n[i]); radius[i]=std::stoi(n[i]);
} }
dimredTrainer->SetNeighborhoodSizeInit(radius); dimredTrainer->SetNeighborhoodSizeInit(radius);
dimredTrainer->SetListSample(trainingListSample); dimredTrainer->SetListSample(trainingListSample);
dimredTrainer->Train(); dimredTrainer->Train();
......
...@@ -45,8 +45,8 @@ TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue> ...@@ -45,8 +45,8 @@ TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>
AddDocTag(Tags::Learning); AddDocTag(Tags::Learning);
// main choice parameter that will contain all dimensionality reduction options // main choice parameter that will contain all dimensionality reduction options
AddParameter(ParameterType_Choice, "model", "model to use for the training"); AddParameter(ParameterType_Choice, "algorithm", "algorithm to use for the training");
SetParameterDescription("model", "Choice of the dimensionality reduction model to use for the training."); SetParameterDescription("algorithm", "Choice of the dimensionality reduction algorithm to use for the training.");
InitSOMParams(); InitSOMParams();
...@@ -73,10 +73,10 @@ TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue> ...@@ -73,10 +73,10 @@ TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>
{ {
// get the name of the chosen machine learning model // get the name of the chosen machine learning model
const std::string modelName = GetParameterString("model"); const std::string modelName = GetParameterString("algorithm");
// call specific train function // call specific train function
if(modelName == "som") if(modelName == "som")
{ {
BeforeTrainSOM(trainingListSample,modelPath); BeforeTrainSOM(trainingListSample,modelPath);
} }
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment