Commit 718a8f35 authored by Cresson Remi's avatar Cresson Remi
Browse files

DOC: add documentation in filters classes

parent 35a892a4
......@@ -30,15 +30,36 @@ namespace otb
/**
* \class TensorflowMultisourceModelBase
* \brief This filter is base for TensorFlow model over multiple input images.
* \brief This filter is the base class for all TensorFlow model filters.
*
* The filter takes N input images and feed the TensorFlow model.
* Names of input placeholders must be specified using the
* SetInputPlaceholdersNames method
* This abstract class implements a number of generic methods that are used in
* filters that use the TensorFlow engine.
*
* TODO:
* Replace FOV (Field Of View) --> RF (Receptive Field)
* Replace FEO (Field Of Expr) --> EF (Expression Field)
* The filter has N input images (Input), each one corresponding to a placeholder
* that will fed the TensorFlow model. For each input, the name of the
* placeholder (InputPlaceholders, a std::vector of std::string) and the
* receptive field (InputReceptiveFields, a std::vector of SizeType) i.e. the
* input space that the model will "see", must be provided. Hence the number of
* input images, and the size of InputPlaceholders and InputReceptiveFields must
* be the same. If not, an exception will be thrown during the method
* GenerateOutputInformation().
*
* The TensorFlow graph and session must be set using the SetGraph() and
* SetSession() methods.
*
* Target nodes names of the TensorFlow graph that must be triggered can be set
* with the SetTargetNodesNames.
*
* The OutputTensorNames consists in a strd::vector of std::string, and
* corresponds to the names of tensors that will be computed during the session.
* As for input placeholders, output tensors field of expression
* (OutputExpressionFields, a std::vector of SizeType), i.e. the output
* space that the TensorFlow model will "generate", must be provided.
*
* Finally, a list of scalar placeholders can be fed in the form of std::vector
* of std::string, each one expressing the assigment of a signle valued
* placeholder, e.g. "drop_rate=0.5 learning_rate=0.002 toto=true".
* See otb::tf::ExpressionToTensor() to know more about syntax.
*
* \ingroup OTBTensorflow
*/
......@@ -135,8 +156,8 @@ private:
void operator=(const Self&); //purposely not implemented
// Tensorflow graph and session
tensorflow::GraphDef m_Graph; // The tensorflow graph
tensorflow::Session * m_Session; // The tensorflow session
tensorflow::GraphDef m_Graph; // The TensorFlow graph
tensorflow::Session * m_Session; // The TensorFlow session
// Model parameters
StringList m_InputPlaceholders; // Input placeholders names
......@@ -144,9 +165,9 @@ private:
StringList m_OutputTensors; // Output tensors names
SizeListType m_OutputExpressionFields; // Output expression fields
DictType m_UserPlaceholders; // User placeholders
StringList m_TargetNodesNames; // User target tensors
StringList m_TargetNodesNames; // User nodes target
// Read-only
// Internal, read-only
DataTypeListType m_InputTensorsDataTypes; // Input tensors datatype
DataTypeListType m_OutputTensorsDataTypes; // Output tensors datatype
TensorShapeProtoList m_InputTensorsShapes; // Input tensors shapes
......
......@@ -105,9 +105,9 @@ TensorflowMultisourceModelBase<TInputImage, TOutputImage>
{
// Check that the number of the following is the same
// - placeholders names
// - patches sizes
// - input image
// - input placeholders names
// - input receptive fields
// - input images
const unsigned int nbInputs = this->GetNumberOfInputs();
if (nbInputs != m_InputReceptiveFields.size() || nbInputs != m_InputPlaceholders.size())
{
......@@ -116,6 +116,15 @@ TensorflowMultisourceModelBase<TInputImage, TOutputImage>
" and the number of input tensors names is " << m_InputPlaceholders.size());
}
// Check that the number of the following is the same
// - output tensors names
// - output expression fields
if (m_OutputExpressionFields.size() != m_OutputTensors.size())
{
itkExceptionMacro("Number of output tensors names is " << m_OutputTensors.size() <<
" but the number of output fields of expression is " << m_OutputExpressionFields.size());
}
//////////////////////////////////////////////////////////////////////////////////////////
// Get tensors information
//////////////////////////////////////////////////////////////////////////////////////////
......
......@@ -26,17 +26,38 @@ namespace otb
/**
* \class TensorflowMultisourceModelFilter
* \brief This filter apply a TensorFlow model over multiple input images.
* \brief This filter apply a TensorFlow model over multiple input images and
* generates one output image corresponding to outputs of the model.
*
* The filter takes N input images and feed the TensorFlow model to produce
* one output image of desired TF op results.
* one output image corresponding to the desired results of the TensorFlow model.
* Names of input placeholders and output tensors must be specified using the
* SetPlaceholders() and SetTensors() methods.
*
* Example: we have a TensorFlow model which runs the input images "x1" and "x2"
* and produces the output image "y".
* "x1" and "x2" are two TF placeholders, we set InputPlaceholder={"x1","x2"}
* "y1" corresponds to one TF op output, we set OutputTensors={"y1"}
* "x1" and "x2" are two placeholders, we set InputPlaceholder={"x1","x2"}
* "y1" corresponds to one output tensor, we set OutputTensors={"y1"}
*
* The filter can work in two modes:
*
* 1.Patch-based mode:
* Extract and process patches independently at regular intervals.
* Patches sizes are equal to the perceptive field sizes of inputs. For each input,
* a tensor with a number of elements equal to the number of patches is fed to the
* TensorFlow model.
*
* 2.Fully-convolutional:
* Unlike patch-based mode, it allows the processing of an entire requested region.
* For each input, a tensor composed of one single element, corresponding to the input
* requested region, is fed to the TF model. This mode requires that perceptive fields,
* expression fields and scale factors are consistent with operators implemented in the
* TensorFlow model, input images physical spacing and alignment.
* The filter produces output blocks avoiding any blocking artifact in fully-convolutional
* mode. This is done in computing input images regions that are aligned to the expression
* field sizes of the model (eventually, input requested regions are enlarged, but still
* aligned), and keeping only the subset of the output corresponding to the requested
* output region.
*
* The reference grid for the output image is the same as the first input image.
* This grid can be scaled by setting the OutputSpacingScale value.
......@@ -48,8 +69,10 @@ namespace otb
* If the number of values in the output tensors (produced by the model) don't
* fit with the output image region, an exception will be thrown.
*
* The TensorFlow Graph is passed using the SetGraph() method
* The TensorFlow Session is passed using the SetSession() method
*
* TODO: the filter must be able to output multiple images eventually at different
* resolutions/sizes/origins.
*
*
* \ingroup OTBTensorflow
*/
......
......@@ -279,10 +279,10 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
// Set output image origin/spacing/size/projection
ImageType * outputPtr = this->GetOutput();
outputPtr->SetNumberOfComponentsPerPixel(outputPixelSize);
outputPtr->SetProjectionRef ( projectionRef );
outputPtr->SetOrigin ( m_OutputOrigin );
outputPtr->SetSignedSpacing ( m_OutputSpacing );
outputPtr->SetLargestPossibleRegion( largestPossibleRegion);
outputPtr->SetProjectionRef ( projectionRef );
outputPtr->SetOrigin ( m_OutputOrigin );
outputPtr->SetSignedSpacing ( m_OutputSpacing );
outputPtr->SetLargestPossibleRegion( largestPossibleRegion );
// Set null pixel
m_NullPixel.SetSize(outputPtr->GetNumberOfComponentsPerPixel());
......@@ -329,7 +329,7 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
// We need to avoid some extrapolation when mode is patch-based.
// The reason is that, when some input have a lower spacing than the
// reference image, the requested region of this lower res input image
// can be one pixel larger when the input image regions are not physicaly
// can be one pixel larger when the input image regions are not physically
// aligned.
if (!m_FullyConvolutional)
{
......@@ -341,8 +341,6 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
// Update the requested region
inputImage->SetRequestedRegion(inRegion);
// std::cout << "Input #" << i << " region starts at " << inRegion.GetIndex() << " with size " << inRegion.GetSize() << std::endl;
} // next image
}
......@@ -398,7 +396,7 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
// Recopy the whole input
tf::RecopyImageRegionToTensorWithCast<TInputImage>(inputPtr, reqRegion, inputTensor, 0);
// Input #1 : the tensor of patches (aka the batch)
// Input is the tensor representing the subset of image
DictElementType input = { this->GetInputPlaceholders()[i], inputTensor };
inputs.push_back(input);
}
......@@ -431,7 +429,7 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
elemIndex++;
}
// Input #1 : the tensor of patches (aka the batch)
// Input is the tensor of patches (aka the batch)
DictElementType input = { this->GetInputPlaceholders()[i], inputTensor };
inputs.push_back(input);
......
......@@ -23,7 +23,22 @@ namespace otb
/**
* \class TensorflowMultisourceModelLearningBase
* \brief This filter is the base class for learning filters.
* \brief This filter is the base class for all learning filters.
*
* The batch size can be set using the SetBatchSize() method.
* The streaming can be activated to allow the processing of huge datasets.
* However, it should be noted that the process is significantly slower due to
* multiple read of input patches. When streaming is deactivated, the whole
* patches images are read and kept in memory, guaranteeing fast patches access.
*
* The GenerateData() implements a loop over batches, that call the ProcessBatch()
* method for each one.
* The ProcessBatch() function is a pure virtual method that must be implemented in
* child classes.
*
* The PopulateInputTensors() method converts input patches images into placeholders
* that will be fed to the model. It is a common method to learning filters, and
* is intended to be used in child classes, as a kind of helper.
*
* \ingroup OTBTensorflow
*/
......
......@@ -30,9 +30,6 @@ namespace otb
* \class TensorflowMultisourceModelTrain
* \brief This filter train a TensorFlow model over multiple input images.
*
* The filter takes N input images and feed the TensorFlow model.
*
*
* \ingroup OTBTensorflow
*/
template <class TInputImage>
......
......@@ -32,7 +32,11 @@ namespace otb
* \class TensorflowMultisourceModelValidate
* \brief This filter validates a TensorFlow model over multiple input images.
*
* The filter takes N input images and feed the TensorFlow model.
* This filter computes confusion matrices for each output tensor.
* The references (i.e. ground truth for validation) must be set using the
* SetReferences() method. References must be provided in the same order as
* their related output tensors (i.e. names and patch sizes). If the number of
* references is not the same as output tensors, an exception is thrown.
*
* \ingroup OTBTensorflow
*/
......@@ -81,7 +85,6 @@ public:
typedef std::vector<ConfMatType> ConfMatListType;
typedef itk::ImageRegionConstIterator<ImageType> IteratorType;
/** Set and Get the input references */
virtual void SetInputReferences(ImageListType input);
ImagePointerType GetInputReference(unsigned int index);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment