Commit bc8b732a authored by Cédric Traizet's avatar Cédric Traizet
Browse files

multi layer autoencoders now working (app and reduction)

No related merge requests found
Showing with 62 additions and 28 deletions
+62 -28
......@@ -27,9 +27,9 @@ public:
itkNewMacro(Self);
itkTypeMacro(AutoencoderModel, DimensionalityReductionModel);
unsigned int GetDimension() {return m_NumberOfHiddenNeurons;}; // Override the Dimensionality Reduction model method, it is used in the dimensionality reduction filter to set the output image size
itkGetMacro(NumberOfHiddenNeurons,unsigned int);
itkSetMacro(NumberOfHiddenNeurons,unsigned int);
unsigned int GetDimension() {return m_NumberOfHiddenNeurons[m_net.size()-1];}; // Override the Dimensionality Reduction model method, it is used in the dimensionality reduction filter to set the output image size
itkGetMacro(NumberOfHiddenNeurons,itk::Array<unsigned int>);
itkSetMacro(NumberOfHiddenNeurons,itk::Array<unsigned int>);
itkGetMacro(NumberOfIterations,unsigned int);
itkSetMacro(NumberOfIterations,unsigned int);
......@@ -47,6 +47,7 @@ public:
void Load(const std::string & filename, const std::string & name="") ITK_OVERRIDE;
void Train() ITK_OVERRIDE;
void TrainOneLayer(unsigned int, shark::Data<shark::RealVector> &);
protected:
AutoencoderModel();
......@@ -59,8 +60,9 @@ private:
/** Network attributes */
std::vector<AutoencoderType> m_net;
unsigned int m_NumberOfHiddenNeurons;
itk::Array<unsigned int> m_NumberOfHiddenNeurons;
/** Training parameters */
unsigned int m_NumberOfIterations;
double m_Regularization; // L2 Regularization parameter
......
......@@ -31,23 +31,35 @@ AutoencoderModel<TInputValue,AutoencoderType>::~AutoencoderModel()
{
}
template <class TInputValue, class AutoencoderType>
void AutoencoderModel<TInputValue,AutoencoderType>::Train()
{
AutoencoderType net;
std::vector<shark::RealVector> features;
Shark::ListSampleToSharkVector(this->GetInputListSample(), features);
shark::Data<shark::RealVector> inputSamples = shark::createDataFromRange( features );
for (unsigned int i = 0 ; i < m_NumberOfHiddenNeurons.Size(); ++i)
{
TrainOneLayer( m_NumberOfHiddenNeurons[i], inputSamples);
}
}
template <class TInputValue, class AutoencoderType>
void AutoencoderModel<TInputValue,AutoencoderType>::TrainOneLayer(unsigned int nbneuron, shark::Data<shark::RealVector> &samples)
{
AutoencoderType net;
/*std::vector<shark::RealVector> features;
Shark::ListSampleToSharkVector(this->GetInputListSample(), features);
shark::Data<shark::RealVector> inputSamples = shark::createDataFromRange( features );
std::size_t inputs = dataDimension(inputSamples);
net.setStructure(inputs, m_NumberOfHiddenNeurons);
*/ //in Train() now
std::size_t inputs = dataDimension(samples);
net.setStructure(inputs, nbneuron);
initRandomUniform(net,-0.1*std::sqrt(1.0/inputs),0.1*std::sqrt(1.0/inputs));
shark::ImpulseNoiseModel noise(m_Noise,0.0); //set an input pixel with probability m_Noise to 0
shark::ConcatenatedModel<shark::RealVector,shark::RealVector> model = noise>> net;
shark::LabeledData<shark::RealVector,shark::RealVector> trainSet(inputSamples,inputSamples);//labels identical to inputs
shark::LabeledData<shark::RealVector,shark::RealVector> trainSet(samples,samples);//labels identical to inputs
shark::SquaredLoss<shark::RealVector> loss;
shark::ErrorFunction error(trainSet, &model, &loss);
shark::TwoNormRegularizer regularizer(error.numberOfVariables());
......@@ -63,7 +75,7 @@ void AutoencoderModel<TInputValue,AutoencoderType>::Train()
}
net.setParameterVector(optimizer.solution().point);
m_net.push_back(net);
samples = net.encode(samples);
}
......@@ -108,16 +120,19 @@ void AutoencoderModel<TInputValue,AutoencoderType>::Load(const std::string & fil
char autoencoder[256];
ifs.getline(autoencoder,256);
std::string autoencoderstr(autoencoder);
std::cout << "oy" << std::endl;
if (autoencoderstr != net.name()){
itkExceptionMacro(<< "Error opening " << filename.c_str() );
}
std::cout << "yo" << std::endl;
boost::archive::polymorphic_text_iarchive ia(ifs);
//m_net.read(ia);
ia >> m_net;
ifs.close();
m_NumberOfHiddenNeurons = m_net[0].numberOfHiddenNeurons();
m_NumberOfHiddenNeurons.SetSize(m_net.size());
for (int i=0; i<m_net.size(); i++){
m_NumberOfHiddenNeurons[i] = m_net[i].numberOfHiddenNeurons();
}
}
......@@ -135,15 +150,18 @@ AutoencoderModel<TInputValue,AutoencoderType>::DoPredict(const InputSampleType &
features.push_back(samples);
shark::Data<shark::RealVector> data = shark::createDataFromRange(features);
data = m_net[0].encode(data);
for (int i=0; i<m_net.size(); i++){ // loop over all autoencoders in m_net
data = m_net[i].encode(data);
}
TargetSampleType target;
target.SetSize(m_NumberOfHiddenNeurons);
target.SetSize(m_NumberOfHiddenNeurons[m_net.size()-1]);
for(unsigned int a = 0; a < m_NumberOfHiddenNeurons; ++a){
for(unsigned int a = 0; a < m_NumberOfHiddenNeurons[m_net.size()-1]; ++a){
target[a]=data.element(0)[a];
}
return target;
}
......@@ -155,12 +173,16 @@ void AutoencoderModel<TInputValue,AutoencoderType>
Shark::ListSampleRangeToSharkVector(input, features,startIndex,size);
shark::Data<shark::RealVector> data = shark::createDataFromRange(features);
TargetSampleType target;
data = m_net[0].encode(data);
for (auto net :m_net ){ // loop over all autoencoders in m_net
data = net.encode(data);
}
unsigned int id = startIndex;
target.SetSize(m_NumberOfHiddenNeurons);
for(const auto& p : data.elements()){
for(unsigned int a = 0; a < m_NumberOfHiddenNeurons; ++a){
target.SetSize(m_NumberOfHiddenNeurons[m_net.size()-1]);
for(const auto& p : data.elements())
{
for(unsigned int a = 0; a < m_NumberOfHiddenNeurons[m_net.size()-1]; ++a){
target[a]=p[a];
}
targets->SetMeasurementVector(id,target);
......
......@@ -46,12 +46,13 @@ cbLearningApplicationBaseDR<TInputValue,TOutputValue>
//Number Of Hidden Neurons
AddParameter(ParameterType_Int, "model.autoencoder.nbneuron",
AddParameter(ParameterType_StringList , "model.autoencoder.nbneuron", "Size");
/*AddParameter(ParameterType_Int, "model.autoencoder.nbneuron",
"Number of neurons in the hidden layer");
SetParameterInt("model.autoencoder.nbneuron",10, false);
SetParameterInt("model.autoencoder.nbneuron",10, false);*/
SetParameterDescription(
"model.autoencoder.nbneuron",
"The number of neurons in the hidden layer.");
"The number of neurons in each hidden layer.");
//Regularization
AddParameter(ParameterType_Float, "model.autoencoder.regularization", "Strength of the regularization");
......@@ -100,7 +101,14 @@ void cbLearningApplicationBaseDR<TInputValue,TOutputValue>
::TrainAutoencoder(typename ListSampleType::Pointer trainingListSample,std::string modelPath)
{
typename autoencoderchoice::Pointer dimredTrainer = autoencoderchoice::New();
dimredTrainer->SetNumberOfHiddenNeurons(GetParameterInt("model.autoencoder.nbneuron"));
itk::Array<unsigned int> nb_neuron;
std::vector<std::basic_string<char>> s= GetParameterStringList("model.autoencoder.nbneuron");
nb_neuron.SetSize(s.size());
for (int i=0; i<s.size(); i++){ // This will be templated later (the 3)
nb_neuron[i]=std::stoi(s[i]);
}
std::cout << nb_neuron << std::endl;
dimredTrainer->SetNumberOfHiddenNeurons(nb_neuron);
dimredTrainer->SetNumberOfIterations(GetParameterInt("model.autoencoder.nbiter"));
dimredTrainer->SetRegularization(GetParameterFloat("model.autoencoder.regularization"));
dimredTrainer->SetRegularization(GetParameterFloat("model.autoencoder.noise"));
......@@ -108,7 +116,9 @@ void cbLearningApplicationBaseDR<TInputValue,TOutputValue>
std::cout << "before train" << std::endl;
dimredTrainer->Train();
std::cout << "after train" << std::endl;
dimredTrainer->Save(modelPath);
dimredTrainer->Save(modelPath);
}
} //end namespace wrapper
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment