Commit f669c43d authored by Cresson Remi's avatar Cresson Remi

WIP: chess board mode

parent e4eac697
......@@ -118,14 +118,17 @@ public:
// Grid
AddParameter(ParameterType_Group, "grid", "grid settings");
AddParameter(ParameterType_Int, "grid.step", "step between patches");
SetMinimumParameterIntValue ("grid.step", 1);
AddParameter(ParameterType_Int, "grid.psize", "patches size");
SetMinimumParameterIntValue ("grid.psize", 1);
// Strategy
AddParameter(ParameterType_Choice, "strategy", "Selection strategy for validation/training patches");
AddChoice("strategy.chessboard", "fifty fifty, like a chess board");
// Output points
AddParameter(ParameterType_OutputVectorData, "outvec", "output set of points");
AddParameter(ParameterType_OutputVectorData, "outtrain", "output set of points (training)");
AddParameter(ParameterType_OutputVectorData, "outvalid", "output set of points (validation)");
AddRAMParameter();
......@@ -139,6 +142,7 @@ public:
// Compute no-data mask
m_NoDataFilter = IsNoDataFilterType::New();
m_NoDataFilter->GetFunctor().SetNoDataValue(GetParameterFloat("nodata"));
m_NoDataFilter->SetInput(GetParameterFloatVectorImage("in"));
m_NoDataFilter->UpdateOutputInformation();
// Padding 1 pixel
......@@ -167,224 +171,119 @@ public:
m_MorphoFilter->SetInput(m_PadFilter->GetOutput());
m_MorphoFilter->SetForegroundValue(1);
m_MorphoFilter->SetBackgroundValue(0);
m_MorphoFilter->UpdateOutputInformation();
// Prepare output vector data
m_OutVectorDataTrain = VectorDataType::New();
m_OutVectorDataValid = VectorDataType::New();
m_OutVectorDataTrain->SetProjectionRef(m_MorphoFilter->GetOutput()->GetProjectionRef());
m_OutVectorDataValid->SetProjectionRef(m_MorphoFilter->GetOutput()->GetProjectionRef());
DataTreeType::Pointer treeTrain = m_OutVectorDataTrain->GetDataTree();
DataTreeType::Pointer treeValid = m_OutVectorDataValid->GetDataTree();
DataNodePointer rootTrain = treeTrain->GetRoot()->Get();
DataNodePointer rootValid = treeValid->GetRoot()->Get();
DataNodePointer documentTrain = DataNodeType::New();
DataNodePointer documentValid = DataNodeType::New();
documentTrain->SetNodeType(DOCUMENT);
documentValid->SetNodeType(DOCUMENT);
treeTrain->Add(documentTrain, rootTrain);
treeValid->Add(documentValid, rootValid);
// Explicit streaming over the morphed mask, based on the RAM parameter
typedef otb::RAMDrivenStrippedStreamingManager<UInt8ImageType> StreamingManagerType;
StreamingManagerType::Pointer m_StreamingManager = StreamingManagerType::New();
m_StreamingManager->SetAvailableRAMInMB(GetParameterInt("ram"));
// We pad the image, if this is requested by the user
UInt8ImageType::Pointer inputImage = m_MorphoFilter->GetOutput();
UInt8ImageType::RegionType entireRegion = inputImage->GetLargestPossibleRegion();
entireRegion.ShrinkByRadius(rad);
m_StreamingManager->PrepareStreaming(inputImage, entireRegion );
UInt8ImageType::IndexType start;
start[0] = rad[0] + 1;
start[1] = rad[1] + 1;
// First iteration to count the objects in each class
int m_NumberOfDivisions = m_StreamingManager->GetNumberOfSplits();
UInt8ImageType::IndexType pos;
UInt8ImageType::IndexValueType step = GetParameterInt("grid.step");
pos.Fill(0);
bool black = true;
unsigned int id = 0;
for (int m_CurrentDivision = 0; m_CurrentDivision < m_NumberOfDivisions; m_CurrentDivision++)
{
otbAppLogINFO("Processing split " << (m_CurrentDivision+1) << "/" << m_NumberOfDivisions);
UInt8ImageType::RegionType streamRegion = m_StreamingManager->GetSplit(m_CurrentDivision);
tf::PropagateRequestedRegion<UInt8ImageType>(inputImage, streamRegion);
itk::ImageRegionConstIterator<UInt8ImageType> inIt (inputImage, streamRegion);
for (inIt.GoToBegin(); !inIt.IsAtEnd(); ++inIt)
{
UInt8ImageType::InternalPixelType pixVal = inIt.Get();
UInt8ImageType::IndexType idx = inIt.GetIndex();
if (pixVal == 1)
idx[0] -= start[0];
idx[1] -= start[1];
if (idx[0] % step == 0 && idx[1] % step == 0)
{
// select this sample
UInt8ImageType::InternalPixelType pixVal = inIt.Get();
if (pixVal == 1)
{
// Update grid position
pos[0] = idx[0] / step;
pos[1] = idx[1] / step;
// First box
black = ((pos[0] + pos[1]) % 2 == 0);
// Compute coordinates
UInt8ImageType::PointType geo;
inputImage->TransformIndexToPhysicalPoint(inIt.GetIndex(), geo);
DataNodeType::PointType point;
point[0] = geo[0];
point[1] = geo[1];
// Add point to the VectorData tree
DataNodePointer newDataNode = DataNodeType::New();
newDataNode->SetPoint(point);
newDataNode->SetFieldAsInt("id", id);
id++;
// select this sample
if (black)
{
// Train
treeTrain->Add(newDataNode, documentTrain);
}
else
{
// Valid
treeValid->Add(newDataNode, documentValid);
}
}
}
}
}
//
// // Number of classes
// const LabelImageType::InternalPixelType number_of_classes = class_end - class_begin + 1;
//
// // Number of samples in each class (counted)
// vnl_vector<IndexValueType> number_of_samples = tmp_number_of_samples.extract(number_of_classes, class_begin);
//
// // Number of samples in each class (target)
// vnl_vector<IndexValueType> target_number_of_samples(number_of_classes, 0);
//
// otbAppLogINFO( "Number of classes: " << number_of_classes <<
// " starting from " << class_begin <<
// " to " << class_end << " (no-data is " << nodata << ")");
// otbAppLogINFO( "Number of pixels in each class: " << number_of_samples );
//
// // Check the smallest number of samples amongst classes
// IndexValueType min_elem_in_class = itk::NumericTraits<IndexValueType>::max();
// for (LabelImageType::InternalPixelType classIdx = 0 ; classIdx < number_of_classes ; classIdx++)
// min_elem_in_class = vcl_min(min_elem_in_class, number_of_samples[classIdx]);
//
// // If one class is empty, throw an error
// if (min_elem_in_class == 0)
// {
// otbAppLogFATAL("There is at least one class with no sample!")
// }
//
// // Sampling step for each classes
// vnl_vector<IndexValueType> step_for_class(number_of_classes, 0);
//
// // Compute the sampling step for each classes, depending on the chosen strategy
// switch (this->GetParameterInt("strategy"))
// {
// // constant
// case 0:
// {
// // Set the target number of samples in each class
// target_number_of_samples.fill(GetParameterInt("strategy.constant.nb"));
//
// // re adjust the number of samples to select in each class
// if (min_elem_in_class < target_number_of_samples[0])
// {
// otbAppLogWARNING("Smallest class has " << min_elem_in_class <<
// " samples but a number of " << target_number_of_samples[0] <<
// " is given. Using " << min_elem_in_class);
// target_number_of_samples.fill( min_elem_in_class );
// }
//
// // Compute the sampling step
// for (LabelImageType::InternalPixelType classIdx = 0 ; classIdx < number_of_classes ; classIdx++)
// step_for_class[classIdx] = number_of_samples[classIdx] / target_number_of_samples[classIdx];
// }
// break;
//
// // total
// case 1:
// {
// // Compute the sampling step
// IndexValueType step = number_of_samples.sum() / this->GetParameterInt("strategy.total.v");
// if (step == 0)
// {
// otbAppLogWARNING("The number of samples available is smaller than the required number of samples. " <<
// "Setting sampling step to 1.");
// step = 1;
// }
// step_for_class.fill(step);
//
// // Compute the target number of samples
// for (LabelImageType::InternalPixelType classIdx = 0 ; classIdx < number_of_classes ; classIdx++)
// target_number_of_samples[classIdx] = number_of_samples[classIdx] / step;
//
// }
// break;
//
// // smallest
// case 2:
// {
// // Set the target number of samples to the smallest class
// target_number_of_samples.fill( min_elem_in_class );
//
// // Compute the sampling step
// for (LabelImageType::InternalPixelType classIdx = 0 ; classIdx < number_of_classes ; classIdx++)
// step_for_class[classIdx] = number_of_samples[classIdx] / target_number_of_samples[classIdx];
//
// }
// break;
//
// // All
// case 3:
// {
// // Easy
// step_for_class.fill(1);
// target_number_of_samples = number_of_samples;
// }
// break;
// default:
// otbAppLogFATAL("Strategy mode unknown :"<<this->GetParameterString("strategy"));
// break;
// }
//
// // Print quick summary
// otbAppLogINFO("Sampling summary:");
// otbAppLogINFO("\tClass\tStep\tTot");
// for (LabelImageType::InternalPixelType i = 0 ; i < number_of_classes ; i++)
// {
// vnl_vector<int> tmp (3,0);
// tmp[0] = i + class_begin;
// tmp[1] = step_for_class[i];
// tmp[2] = target_number_of_samples[i];
// otbAppLogINFO("\t" << tmp);
// }
//
// // Create a new vector data
// // TODO: how to pre-allocate the datatree?
// m_OutVectorData = VectorDataType::New();
// DataTreeType::Pointer tree = m_OutVectorData->GetDataTree();
// DataNodePointer root = tree->GetRoot()->Get();
// DataNodePointer document = DataNodeType::New();
// document->SetNodeType(DOCUMENT);
// tree->Add(document, root);
//
// // Duno if this makes sense?
// m_OutVectorData->SetProjectionRef(inputImage->GetProjectionRef());
// m_OutVectorData->SetOrigin(inputImage->GetOrigin());
// m_OutVectorData->SetSpacing(inputImage->GetSpacing());
//
// // Second iteration, to prepare the samples
// vnl_vector<IndexValueType> sampledCount(number_of_classes, 0);
// vnl_vector<IndexValueType> iteratorCount(number_of_classes, 0);
// IndexValueType n_tot = 0;
// const IndexValueType target_n_tot = target_number_of_samples.sum();
// for (int m_CurrentDivision = 0; m_CurrentDivision < m_NumberOfDivisions; m_CurrentDivision++)
// {
// LabelImageType::RegionType streamRegion = m_StreamingManager->GetSplit(m_CurrentDivision);
// tf::PropagateRequestedRegion<LabelImageType>(inputImage, streamRegion);
// itk::ImageRegionConstIterator<LabelImageType> inIt (inputImage, streamRegion);
//
// for (inIt.GoToBegin() ; !inIt.IsAtEnd() ; ++inIt)
// {
// LabelImageType::InternalPixelType classVal = inIt.Get();
//
// if (classVal != nodata)
// {
// classVal -= class_begin;
//
// // Update the current position
// iteratorCount[classVal]++;
//
// // Every Xi samples (Xi is the step for class i)
// if (iteratorCount[classVal] % ((int) step_for_class[classVal]) == 0 &&
// sampledCount[classVal] < target_number_of_samples[classVal])
// {
// // Add this sample
// sampledCount[classVal]++;
// n_tot++;
// ShowProgress(n_tot, target_n_tot);
//
// // Create a point
// LabelImageType::PointType geo;
// inputImage->TransformIndexToPhysicalPoint(inIt.GetIndex(), geo);
// DataNodeType::PointType point;
// point[0] = geo[0];
// point[1] = geo[1];
//
// // Add point to the VectorData tree
// DataNodePointer newDataNode = DataNodeType::New();
// newDataNode->SetPoint(point);
// newDataNode->SetFieldAsInt("class", static_cast<int>(classVal));
// tree->Add(newDataNode, document);
//
// } // sample this one
// }
// } // next pixel
// } // next streaming region
// ShowProgressDone();
//
// otbAppLogINFO( "Number of samples in each class: " << sampledCount );
//
// otbAppLogINFO( "Writing output vector data");
//
// SetParameterOutputVectorData("outvec", m_OutVectorData);
otbAppLogINFO( "Writing output samples positions (Training)");
SetParameterOutputVectorData("outtrain", m_OutVectorDataTrain);
otbAppLogINFO( "Writing output samples positions (Validation)");
SetParameterOutputVectorData("outvalid", m_OutVectorDataValid);
}
private:
IsNoDataFilterType::Pointer m_NoDataFilter;
UInt8ImageType::Pointer m_PadFilter;
MorphoFilterType::Pointer m_MorphoFilter;
VectorDataType::Pointer m_OutVectorData;
PadFilterType::Pointer m_PadFilter;
MorphoFilterType::Pointer m_MorphoFilter;
VectorDataType::Pointer m_OutVectorDataTrain;
VectorDataType::Pointer m_OutVectorDataValid;
}; // end of class
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment