Commit 411bdecd authored by Antoine Regimbeau's avatar Antoine Regimbeau
Browse files

Merge branch 'develop' into ci_gitclonedir

No related merge requests found
Showing with 291 additions and 403 deletions
+291 -403
......@@ -22,22 +22,6 @@
./BandMathFilterExample Input/qb_RoadExtract.tif Output/RoadExtractBandMath.tif Output/qb_BandMath-pretty.jpg
*/
// This filter is based on the mathematical parser library muParser.
// The built in functions and operators list is available at:
// http://muparser.sourceforge.net/mup_features.html.
//
// In order to use this filter, at least one input image should be
// set. An associated variable name can be specified or not by using
// the corresponding SetNthInput method. For the nth input image, if
// no associated variable name has been specified, a default variable
// name is given by concatenating the letter "b" (for band) and the
// corresponding input index.
//
// The next step is to set the expression according to the variable
// names. For example, in the default case with three input images the
// following expression is valid: ``(b1+b2)*b3``.
#include "itkMacro.h"
#include <iostream>
......@@ -65,11 +49,10 @@ int main(int argc, char* argv[])
return EXIT_FAILURE;
}
// We start by the typedef needed for reading and
// We start by the typedefs needed for reading and
// writing the images. The BandMathImageFilter class
// works with Image as input, so we need to define additional
// filters to extract each layer of the multispectral image.
typedef double PixelType;
typedef otb::VectorImage<PixelType, 2> InputImageType;
typedef otb::Image<PixelType, 2> OutputImageType;
......@@ -78,13 +61,12 @@ int main(int argc, char* argv[])
typedef otb::ImageFileReader<InputImageType> ReaderType;
typedef otb::ImageFileWriter<OutputImageType> WriterType;
// We can now define the type for the filter:
// We can now define the type for the filter
typedef otb::BandMathImageFilter<OutputImageType> FilterType;
// We instantiate the filter, the reader, and the writer:
// We instantiate the filter, the reader, and the writer
ReaderType::Pointer reader = ReaderType::New();
WriterType::Pointer writer = WriterType::New();
FilterType::Pointer filter = FilterType::New();
writer->SetInput(filter->GetOutput());
......@@ -93,9 +75,9 @@ int main(int argc, char* argv[])
reader->UpdateOutputInformation();
// We now need to extract each band from the input \doxygen{otb}{VectorImage},
// it illustrates the use of the \doxygen{otb}{VectorImageToImageList}.
// Each extracted layer is an input to the \doxygen{otb}{BandMathImageFilter}:
// We now need to extract each band from the input VectorImage,
// it illustrates the use of the VectorImageToImageList.
// Each extracted layer is an input to the BandMathImageFilter
VectorImageToImageListType::Pointer imageList = VectorImageToImageListType::New();
imageList->SetInput(reader->GetOutput());
......@@ -111,8 +93,8 @@ int main(int argc, char* argv[])
// Now we can define the mathematical expression to perform on the layers (b1, b2, b3, b4).
// The filter takes advantage of the parsing capabilities of the muParser library and
// allows setting the expression as on a digital calculator.
//
// The expression below returns 255 if the ratio $(NIR-RED)/(NIR+RED)$ is greater than 0.4 and 0 if not.
// The expression below returns 255 if the ratio (NIR-RED)/(NIR+RED) is greater than 0.4 and 0 if not.
filter->SetExpression("if((b4-b3)/(b4+b3) > 0.4, 255, 0)");
#ifdef OTB_MUPARSER_HAS_CXX_LOGICAL_OPERATORS
......@@ -125,19 +107,7 @@ int main(int argc, char* argv[])
writer->Update();
// The muParser library also provides the possibility to extend existing built-in functions. For example,
// you can use the OTB expression "ndvi(b3, b4)" with the filter. In this instance, the mathematical expression would be
// \textit{if($ndvi(b3, b4)>0.4$, 255, 0)}, which would return the same result.
// Figure~\ref{fig:BandMathImageFilter} shows the result of the threshold applied to the NDVI index
// of a Quickbird image.
// \begin{figure}
// \center
// \includegraphics[width=0.45\textwidth]{qb_ExtractRoad_pretty.eps}
// \includegraphics[width=0.45\textwidth]{qb_BandMath-pretty.eps}
// \itkcaption[Band Math]{From left to right:
// Original image, thresholded NDVI index.}
// \label{fig:BandMathImageFilter}
// \end{figure}
// you can use the OTB expression "ndvi(b3, b4)" with the filter. In this instance, the mathematical expression would be "if(ndvi(b3, b4)>0.4, 255, 0)", which would return the same result.
typedef otb::Image<unsigned char, 2> OutputPrettyImageType;
typedef otb::ImageFileWriter<OutputPrettyImageType> PrettyImageFileWriterType;
......@@ -151,6 +121,4 @@ int main(int argc, char* argv[])
prettyWriter->SetFileName(argv[3]);
prettyWriter->Update();
return EXIT_SUCCESS;
}
The :doxygen:`BandMathImageFilter` is based on the mathematical parser library muParser.
The built in functions and operators list is available at:
http://muparser.sourceforge.net/mup_features.html.
In order to use this filter, at least one input image should be
set. An associated variable name can be specified or not by using
the corresponding ``SetNthInput`` method. For the nth input image, if
no associated variable name has been specified, a default variable
name is given by concatenating the letter "b" (for band) and the
corresponding input index.
The next step is to set the expression according to the variable
names. For example, in the default case with three input images the
following expression is valid: ``(b1+b2)*b3``.
.. |image1| image:: /Input/qb_ExtractRoad_pretty.png
.. |image2| image:: /Output/qb_BandMath-pretty.jpg
.. _Figure1:
+--------------------------+-------------------------+
| |image1| | |image2| |
+--------------------------+-------------------------+
NDVI of a Quickbird image computed with BandMathImageFilter
......@@ -33,19 +33,6 @@
./DEMToRainbowExample Output/DEMToReliefImageGenerator.png 6.5 45.5 500 500 0.002 -0.002 Input/DEM_srtm relief
*/
// In some situation, it is desirable to represent a gray scale image in color for easier
// interpretation. This is particularly the case if pixel values in the image are used
// to represent some data such as elevation, deformation map,
// interferogram. In this case, it is important to ensure that similar
// values will get similar colors. You can notice how this requirement
// differs from the previous case.
//
// The following example illustrates the use of the \doxygen{otb}{DEMToImageGenerator} class
// combined with the \doxygen{otb}{ScalarToRainbowRGBPixelFunctor}. You can refer to the
// source code or to section \ref{sec:ReadDEM} for the DEM conversion to image,
// we will focus on the color conversion part here.
#include "otbImageFileReader.h"
#include "otbImageFileWriter.h"
......@@ -103,9 +90,9 @@ int main(int argc, char* argv[])
demToImage->SetOutputSpacing(spacing);
// As in the previous example, the \doxygen{itk}{ScalarToRGBColormapImageFilter} is
// The ScalarToRGBColormapImageFilter is
// the filter in charge of calling the functor we specify to do the work for
// each pixel. Here it is the \doxygen{otb}{ScalarToRainbowRGBPixelFunctor}.
// each pixel. Here it is the ScalarToRainbowRGBPixelFunctor.
typedef itk::ScalarToRGBColormapImageFilter<ImageType, RGBImageType> ColorMapFilterType;
ColorMapFilterType::Pointer colormapper = ColorMapFilterType::New();
......@@ -146,34 +133,5 @@ int main(int argc, char* argv[])
writer->SetInput(colormapper->GetOutput());
try
{
writer->Update();
}
catch (itk::ExceptionObject& excep)
{
std::cerr << "Exception caught !" << std::endl;
std::cerr << excep << std::endl;
}
catch (...)
{
std::cout << "Unknown exception !" << std::endl;
return EXIT_FAILURE;
}
// Figure~\ref{fig:RAINBOW_FILTER} shows the effect of applying the filter to
// a gray scale image.
//
// \begin{figure}
// \center
// \includegraphics[width=0.44\textwidth]{pretty_DEMToImageGenerator.eps}
// \includegraphics[width=0.44\textwidth]{DEMToRainbowImageGenerator.eps}
// \includegraphics[width=0.44\textwidth]{DEMToHotImageGenerator.eps}
// \includegraphics[width=0.44\textwidth]{DEMToReliefImageGenerator.eps}
// \itkcaption[Grayscale to color]{The gray level DEM extracted from SRTM
// data (top-left) and the same area represented in color.}
// \label{fig:RAINBOW_FILTER}
// \end{figure}
return EXIT_SUCCESS;
writer->Update();
}
In some situation, it is desirable to represent a gray scale image in color for easier
interpretation. This is particularly the case if pixel values in the image are used
to represent some data such as elevation, deformation map,
interferogram. In this case, it is important to ensure that similar
values will get similar colors. You can notice how this requirement
differs from the previous case.
The following example illustrates the use of the :doxygen:`DEMToImageGenerator`
class combined with the `ScalarToRainbowRGBPixelFunctor`. You can refer to the
source code for the DEM conversion to image, we will focus on the color
conversion part here.
.. |image1| image:: /Output/DEMToRainbowImageGenerator.png
.. |image2| image:: /Output/DEMToHotImageGenerator.png
.. |image3| image:: /Output/DEMToReliefImageGenerator.png
.. _Figure1:
+--------------------------+-------------------------+-------------------------+
| |image1| | |image2| | |image3| |
+--------------------------+-------------------------+-------------------------+
......@@ -23,34 +23,6 @@
./FrostImageFilter Input/GomaSmall.png Output/GomaSmallFrostFiltered.png 5 0.1
*/
// This example illustrates the use of the \doxygen{otb}{FrostImageFilter}.
// This filter belongs to the family of the edge-preserving smoothing
// filters which are usually used for speckle reduction in radar
// images.
//
// This filter uses a negative exponential convolution kernel.
// The output of the filter for pixel p is:
// $ \hat I_{s}=\sum_{p\in\eta_{p}} m_{p}I_{p} $
//
// where : $ m_{p}=\frac{KC_{s}^{2}\exp(-KC_{s}^{2}d_{s, p})}{\sum_{p\in\eta_{p}} KC_{s}^{2}\exp(-KC_{s}^{2}d_{s, p})} $
// and $ d_{s, p}=\sqrt{(i-i_{p})^2+(j-j_{p})^2} $
//
// \begin{itemize}
// \item $ K $ : the decrease coefficient
// \item $ (i, j)$ : the coordinates of the pixel inside the region
// defined by $ \eta_{s} $
// \item $ (i_{p}, j_{p})$ : the coordinates of the pixels belonging to $ \eta_{p} \subset \eta_{s} $
// \item $ C_{s}$ : the variation coefficient computed over $ \eta_{p}$
// \end{itemize}
//
//
//
// Most of this example is similar to the previous one and only the differences
// will be highlighted.
//
// First, we need to include the header:
#include "otbFrostImageFilter.h"
#include "otbImage.h"
......@@ -68,16 +40,12 @@ int main(int argc, char* argv[])
}
typedef unsigned char PixelType;
typedef otb::Image<PixelType, 2> InputImageType;
typedef otb::Image<PixelType, 2> OutputImageType;
// The filter can be instantiated using the image types defined previously.
// The filter can be instantiated using the image types defined previously.
typedef otb::FrostImageFilter<InputImageType, OutputImageType> FilterType;
typedef otb::ImageFileReader<InputImageType> ReaderType;
typedef otb::ImageFileWriter<OutputImageType> WriterType;
ReaderType::Pointer reader = ReaderType::New();
......@@ -87,22 +55,12 @@ int main(int argc, char* argv[])
writer->SetInput(filter->GetOutput());
reader->SetFileName(argv[1]);
// The image obtained with the reader is passed as input to the
// \doxygen{otb}{FrostImageFilter}.
//
// \index{otb::FrostImageFilter!SetInput()}
// \index{otb::FileImageReader!GetOutput()}
// The image obtained with the reader is passed as input to the FrostImageFilter
filter->SetInput(reader->GetOutput());
// The method \code{SetRadius()} defines the size of the window to
// be used for the computation of the local statistics. The method
// \code{SetDeramp()} sets the $K$ coefficient.
//
// \index{otb::FrostImageFilter!SetRadius()}
// \index{otb::FrostImageFilter!SetDeramp()}
// \index{SetDeramp()!otb::FrostImageFilter}
// The method SetRadius() defines the size of the window to
// be used for the computation of the local statistics. The method
// SetDeramp() sets the K coefficient.
FilterType::SizeType Radius;
Radius[0] = atoi(argv[3]);
Radius[1] = atoi(argv[3]);
......@@ -112,22 +70,4 @@ int main(int argc, char* argv[])
writer->SetFileName(argv[2]);
writer->Update();
// Figure~\ref{fig:FROST_FILTER} shows the result of applying the Frost
// filter to a SAR image.
// \begin{figure}
// \center
// \includegraphics[width=0.44\textwidth]{GomaSmall.eps}
// \includegraphics[width=0.44\textwidth]{GomaSmallFrostFiltered.eps}
// \itkcaption[Frost Filter Application]{Result of applying the
// \doxygen{otb}{FrostImageFilter} to a SAR image.}
// \label{fig:FROST_FILTER}
// \end{figure}
//
// \relatedClasses
// \begin{itemize}
// \item \doxygen{otb}{LeeImageFilter}
// \end{itemize}
return EXIT_SUCCESS;
}
This example illustrates the use of the :doxygen:`FrostImageFilter`.
This filter belongs to the family of the edge-preserving smoothing
filters which are usually used for speckle reduction in radar
images.
This filter uses a negative exponential convolution kernel.
The output of the filter for pixel p is:
.. math::
\hat I_{s}=\sum_{p\in\eta_{p}} m_{p}I_{p}
m_{p}=\frac{KC_{s}^{2}\exp(-KC_{s}^{2}d_{s, p})}{\sum_{p\in\eta_{p}} KC_{s}^{2}\exp(-KC_{s}^{2}d_{s, p})}
d_{s, p}=\sqrt{(i-i_{p})^2+(j-j_{p})^2}
where:
* :math:`K`: the decrease coefficient
* :math:`(i, j)`: the coordinates of the pixel inside the region defined by :math:`\eta_{s}`
* :math:`(i_{p}, j_{p})`: the coordinates of the pixels belonging to :math:`\eta_{p} \subset \eta_{s}`
* :math:`C_{s}`: the variation coefficient computed over :math:`\eta_{p}`
.. |image1| image:: /Input/GomaSmall.png
.. |image2| image:: /Output/GomaSmallFrostFiltered.png
.. _Figure1:
+--------------------------+-------------------------+
| |image1| | |image2| |
+--------------------------+-------------------------+
Result of applying the FrostImageFilter to a SAR image.
......@@ -24,18 +24,6 @@
*/
// Visualization of digital elevation models (DEM) is often more intuitive by simulating a
// lighting source and generating the corresponding shadows. This principle is called
// hill shading.
//
// Using a simple functor \doxygen{otb}{HillShadingFunctor} and the DEM image generated
// using the \doxygen{otb}{DEMToImageGenerator} (refer to \ref{sec:ReadDEM}), you can easily
// obtain a representation of the DEM. Better yet, using the
// \doxygen{otb}{ScalarToRainbowRGBPixelFunctor}, combined with the
// \doxygen{otb}{ReliefColormapFunctor} you can easily generate the classic elevation maps.
//
// This example will focus on the shading itself.
#include "otbImageFileReader.h"
#include "otbImageFileWriter.h"
......@@ -50,7 +38,6 @@
int main(int argc, char* argv[])
{
if (argc < 10)
{
std::cout << argv[0] << " <output_filename> <output_color_filename> "
......@@ -162,21 +149,8 @@ int main(int argc, char* argv[])
writer2->SetInput(multiply->GetOutput());
try
{
writer->Update();
writer2->Update();
}
catch (itk::ExceptionObject& excep)
{
std::cerr << "Exception caught !" << std::endl;
std::cerr << excep << std::endl;
}
catch (...)
{
std::cout << "Unknown exception !" << std::endl;
return EXIT_FAILURE;
}
writer->Update();
writer2->Update();
otb::WorldFile::Pointer worldFile = otb::WorldFile::New();
worldFile->SetLonOrigin(origin[0]);
......@@ -188,17 +162,4 @@ int main(int argc, char* argv[])
worldFile->Update();
worldFile->SetImageFilename(argv[2]);
worldFile->Update();
// Figure~\ref{fig:HILL_SHADING} shows the hill shading result from SRTM data.
//
// \begin{figure}
// \center
// \includegraphics[width=0.44\textwidth]{HillShadingExample.eps}
// \includegraphics[width=0.44\textwidth]{HillShadingColorExample.eps}
// \itkcaption[Hill shading]{Hill shading obtained from SRTM data (left) and combined with
// the color representation (right)}
// \label{fig:HILL_SHADING}
// \end{figure}
return EXIT_SUCCESS;
}
Visualization of digital elevation models (DEM) is often more intuitive by
simulating a lighting source and generating the corresponding shadows. This
principle is called hill shading.
Using :doxygen:`HillShadingFilter` and the DEM image generated
using the :doxygen:`DEMToImageGenerator`, you can easily obtain a representation
of the DEM. Better yet, using the :doxygen-itk:`ScalarToRGBColormapImageFilter`
combined with the ``ReliefColormapFunctor`` you can easily generate the
classic elevation maps.
This example will focus on the shading itself.
.. |image1| image:: /Output/HillShadingExample.png
.. |image2| image:: /Output/HillShadingColorExample.png
.. _Figure1:
+--------------------------+-------------------------+
| |image1| | |image2| |
+--------------------------+-------------------------+
Hill shading obtained from SRTM data (left) and combined with the color representation (right)
......@@ -24,25 +24,11 @@
*/
// Some algorithms produce an indexed image as output. In such images,
// each pixel is given a value according to the region number it belongs to.
// This value starting at 0 or 1 is usually an integer value.
// Often, such images are produced by segmentation or classification algorithms.
//
// If such regions are easy to manipulate -- it is easier and faster to compare two integers
// than a RGB value -- it is different when it comes to displaying the results.
//
// Here we present a convient way to convert such indexed image to a color image. In
// such conversion, it is important to ensure that neighborhood region, which are
// likely to have consecutive number have easily dicernable colors. This is done
// randomly using a hash function by the \doxygen{itk}{ScalarToRGBPixelFunctor}.
#include "otbImage.h"
#include "otbImageFileReader.h"
#include "otbImageFileWriter.h"
#include "itkUnaryFunctorImageFilter.h"
#include "itkScalarToRGBPixelFunctor.h"
#include "itkRescaleIntensityImageFilter.h"
int main(int argc, char* argv[])
......@@ -65,10 +51,8 @@ int main(int argc, char* argv[])
reader->SetFileName(inputFilename);
// The \doxygen{itk}{UnaryFunctorImageFilter} is the filter in charge of
// calling the functor we specify to do the work for each pixel. Here it is the
// \doxygen{itk}{ScalarToRGBPixelFunctor}.
// The UnaryFunctorImageFilter is the filter in charge of calling the functor
// we specify to do the work for each pixel. Here it is the ScalarToRGBPixelFunctor
typedef itk::Functor::ScalarToRGBPixelFunctor<unsigned long> ColorMapFunctorType;
typedef itk::UnaryFunctorImageFilter<ImageType, RGBImageType, ColorMapFunctorType> ColorMapFilterType;
ColorMapFilterType::Pointer colormapper = ColorMapFilterType::New();
......@@ -93,17 +77,4 @@ int main(int argc, char* argv[])
writer2->SetFileName(outputScaledFilename);
writer2->SetInput(rescaler->GetOutput());
writer2->Update();
// Figure~\ref{fig:INDEXTORGB_FILTER} shows the result of the conversion
// from an indexed image to a color image.
// \begin{figure}
// \center
// \includegraphics[width=0.44\textwidth]{buildingExtractionIndexed_scaled.eps}
// \includegraphics[width=0.44\textwidth]{buildingExtractionRGB.eps}
// \itkcaption[Scaling images]{The original indexed image (left) and the
// conversion to color image.}
// \label{fig:INDEXTORGB_FILTER}
// \end{figure}
return EXIT_SUCCESS;
}
Some algorithms produce an indexed image as output. In such images,
each pixel is given a value according to the region number it belongs to.
This value starting at 0 or 1 is usually an integer value.
Often, such images are produced by segmentation or classification algorithms.
If such regions are easy to manipulate -- it is easier and faster to compare two integers
than a RGB value -- it is different when it comes to displaying the results.
Here we present a convient way to convert such indexed image to a color image. In
such conversion, it is important to ensure that neighboring regions, which are
likely to have consecutive number have easily dicernable colors. This is done
randomly using a hash function by ``ScalarToRGBPixelFunctor``.
.. |image1| image:: /Output/buildingExtractionIndexed_scaled.png
.. |image2| image:: /Output/buildingExtractionRGB.png
.. _Figure1:
+--------------------------+-------------------------+
| |image1| | |image2| |
+--------------------------+-------------------------+
The original indexed image (left) and the conversion to color image.
......@@ -47,16 +47,16 @@ int main(int argc, char* argv[])
// The filter can be instantiated using the image types defined above.
typedef otb::LeeImageFilter<InputImageType, OutputImageType> FilterType;
// An ImageFileReader class is also instantiated in order to read
// image data from a file.
// An ImageFileReader class is also instantiated in order to read
// image data from a file.
typedef otb::ImageFileReader<InputImageType> ReaderType;
// An \doxygen{otb}{ImageFileWriter} is instantiated in order to write the
// An ImageFileWriter is instantiated in order to write the
// output image to a file.
typedef otb::ImageFileWriter<OutputImageType> WriterType;
// Both the filter and the reader are created by invoking their \code{New()}
// methods and assigning the result to SmartPointers.
// Both the filter and the reader are created by invoking their New()
// methods and assigning the result to SmartPointers.
ReaderType::Pointer reader = ReaderType::New();
FilterType::Pointer filter = FilterType::New();
......@@ -64,8 +64,8 @@ int main(int argc, char* argv[])
writer->SetInput(filter->GetOutput());
reader->SetFileName(argv[1]);
// The image obtained with the reader is passed as input to the
// LeeImageFilter.
// The image obtained with the reader is passed as input to the
// LeeImageFilter.
filter->SetInput(reader->GetOutput());
// The method SetRadius() defines the size of the window to
......
This example illustrates the use of the LeeImageFilter.
This example illustrates the use of the :doxygen:`LeeImageFilter`.
This filter belongs to the family of the edge-preserving smoothing
filters which are usually used for speckle reduction in radar
images. The LeeFilter aplies a linear regression
which minimizes the mean-square error in the frame of a
multiplicative speckle model.
.. figure:: /Input/GomaSmall.png
.. |image1| image:: /Input/GomaSmall.png
.. figure:: /Output/GomaSmallLeeFiltered.png
.. |image2| image:: /Output/GomaSmallLeeFiltered.png
.. _Figure1:
+--------------------------+-------------------------+
| |image1| | |image2| |
+--------------------------+-------------------------+
Result of applying the Lee filter to a SAR image.
......@@ -32,29 +32,15 @@
0.1
*/
// This example demonstrates the use of the
// \doxygen{otb}{MeanShiftSegmentationFilter} class which implements
// filtering and clustering using the mean shift algorithm
// \cite{Comaniciu2002}. For a given pixel, the mean shift will
// build a set of neighboring pixels within a given spatial radius
// and a color range. The spatial and color center of this set is
// then computed and the algorithm iterates with this new spatial and
// color center. The Mean Shift can be used for edge-preserving
// smoothing, or for clustering.
#include "otbVectorImage.h"
#include "otbImageFileReader.h"
#include "otbImageFileWriter.h"
#include "otbImageFileWriter.h"
#include "otbPrintableImageFilter.h"
#include "itkScalarToRGBPixelFunctor.h"
#include "itkUnaryFunctorImageFilter.h"
// We start by including the needed header file.
#include "otbMeanShiftSegmentationFilter.h"
int main(int argc, char* argv[])
{
if (argc != 11)
......@@ -75,9 +61,6 @@ int main(int argc, char* argv[])
const unsigned int maxiter = atoi(argv[9]);
const double thres = atof(argv[10]);
// We start by the classical \code{typedef}s needed for reading and
// writing the images.
const unsigned int Dimension = 2;
typedef float PixelType;
......@@ -94,36 +77,39 @@ int main(int argc, char* argv[])
typedef otb::MeanShiftSegmentationFilter<ImageType, LabelImageType, ImageType> FilterType;
// We instantiate the filter, the reader, and 2 writers (for the
// labeled and clustered images).
// We instantiate the filter, the reader, and 2 writers (for the
// labeled and clustered images).
FilterType::Pointer filter = FilterType::New();
ReaderType::Pointer reader = ReaderType::New();
WriterType::Pointer writer1 = WriterType::New();
LabelWriterType::Pointer writer2 = LabelWriterType::New();
// We set the file names for the reader and the writers:
// We set the file names for the reader and the writers:
reader->SetFileName(infname);
writer1->SetFileName(clusteredfname);
writer2->SetFileName(labeledfname);
// We can now set the parameters for the filter. There are 3 main
// parameters: the spatial radius used for defining the neighborhood,
// the range radius used for defining the interval in the color space
// and the minimum size for the regions to be kept after clustering.
// We can now set the parameters for the filter. There are 3 main
// parameters: the spatial radius used for defining the neighborhood,
// the range radius used for defining the interval in the color space
// and the minimum size for the regions to be kept after clustering.
filter->SetSpatialBandwidth(spatialRadius);
filter->SetRangeBandwidth(rangeRadius);
filter->SetMinRegionSize(minRegionSize);
// Two another parameters can be set : the maximum iteration number, which defines maximum number of iteration until convergence.
// Algorithm iterative scheme will stop if convergence hasn't been reached after the maximum number of iterations.
// Threshold parameter defines mean-shift vector convergence value. Algorithm iterative scheme will stop if mean-shift vector is below this threshold or if
// iteration number reached maximum number of iterations.
// Two another parameters can be set: the maximum iteration number, which
// defines maximum number of iteration until convergence. Algorithm
// iterative scheme will stop if convergence hasn't been reached after the
// maximum number of iterations. Threshold parameter defines mean-shift
// vector convergence value. Algorithm iterative scheme will stop if
// mean-shift vector is below this threshold or if iteration number reached
// maximum number of iterations.
filter->SetMaxIterationNumber(maxiter);
filter->SetThreshold(thres);
// We can now plug the pipeline and run it.
// We can now plug the pipeline and run it.
filter->SetInput(reader->GetOutput());
writer1->SetInput(filter->GetClusteredOutput());
......@@ -132,19 +118,6 @@ int main(int argc, char* argv[])
writer1->Update();
writer2->Update();
// Figure~\ref{fig:MeanShiftSegmentationFilter} shows the result of applying the mean shift
// to a Quickbird image.
// \begin{figure}
// \center
// \includegraphics[width=0.40\textwidth]{ROI_QB_MUL_1.eps}
// \includegraphics[width=0.40\textwidth]{MSClusteredOutput-pretty.eps}
// \includegraphics[width=0.40\textwidth]{MSLabeledOutput-pretty.eps}
// \itkcaption[Mean Shift]{From top to bottom and left to right:
// Original image, image filtered by
// mean shift after clustering , and labeled image.}
// \label{fig:MeanShiftSegmentationFilter}
// \end{figure}
typedef otb::PrintableImageFilter<ImageType> PrintableFilterType;
PrintableFilterType::Pointer printableImageFilter = PrintableFilterType::New();
......@@ -176,6 +149,4 @@ int main(int argc, char* argv[])
labelRGBWriter->SetFileName(labeledpretty);
labelRGBWriter->SetInput(labelToRGB->GetOutput());
labelRGBWriter->Update();
return EXIT_SUCCESS;
}
This example demonstrates the use of the :doxygen:`MeanShiftSegmentationFilter`
class which implements filtering and clustering using the mean shift algorithm.
For a given pixel, the mean shift will build a set of neighboring pixels within
a given spatial radius and a color range. The spatial and color center of this
set is then computed and the algorithm iterates with this new spatial and color
center. The Mean Shift can be used for edge-preserving smoothing, or for
clustering.
.. |image1| image:: /Input/ROI_QB_MUL_1.png
.. |image2| image:: /Output/MSClusteredOutput-pretty.png
.. |image3| image:: /Output/MSLabeledOutput-pretty.png
.. _Figure1:
+--------------------------+-------------------------+-------------------------+
| |image1| | |image2| | |image3| |
+--------------------------+-------------------------+-------------------------+
Original image, image filtered by mean shift after clustering, and labeled image.
......@@ -29,30 +29,6 @@
*/
// Most of the time, satellite images have more than three spectral bands. As we
// are only able to see three colors (red, green and blue), we have to find a way to
// represent these images using only three bands. This is called creating a color
// composition.
//
// Of course, any color composition will not be able to render all the information
// available in the original image. As a consequence, sometimes, creating more than
// one color composition will be necessary.
//
// If you want to obtain an image with natural colors, you have to match the wavelength
// captured by the satellite with those captured by your eye: thus matching the red band
// with the red color, etc.
//
// Some satellites (SPOT 5 is an example) do not acquire all the {\em human} spectral bands:
// the blue can be missing and replaced by some other wavelength of interest for a specific application.
// In these situations, another mapping has to be created. That's why, the vegetation often appears in
// red in satellite images (see on left of figure~\ref{fig:PRINTABLE_FILTER}).
//
// The band order in the image products can be also quite tricky. It could be in the wavelength order,
// as it is the case for Quickbird (1: Blue, 2: Green, 3: Red, 4: NIR), in this case, you
// have to be careful to reverse the order if you want a natural display. It could also be reverse
// to facilitate direct viewing, as for SPOT5 (1: NIR, 2: Red, 3: Green, 4: SWIR) but in this situations
// you have to be careful when you process the image.
#include "otbVectorImage.h"
#include "otbImageFileReader.h"
#include "otbImageFileWriter.h"
......@@ -84,9 +60,8 @@ int main(int argc, char* argv[])
ReaderType::Pointer reader = ReaderType::New();
reader->SetFileName(inputFilename);
// To easily convert the image to a {\em printable} format, i.e. 3 bands
// \code{unsigned char} value, you can use the \doxygen{otb}{PrintableImageFilter}.
// To easily convert the image to a printable format, i.e. 3 bands
// unsigned char value, you can use the PrintableImageFilter.
typedef otb::PrintableImageFilter<InputImageType> PrintableFilterType;
PrintableFilterType::Pointer printableImageFilter = PrintableFilterType::New();
......@@ -95,9 +70,8 @@ int main(int argc, char* argv[])
printableImageFilter->SetChannel(greenChannelNumber);
printableImageFilter->SetChannel(blueChannelNumber);
// When you create the writer to plug at the output of the \code{printableImageFilter}
// When you create the writer to plug at the output of the printableImageFilter
// you may want to use the direct type definition as it is a good way to avoid mismatch:
typedef PrintableFilterType::OutputImageType OutputImageType;
typedef otb::ImageFileWriter<OutputImageType> WriterType;
......@@ -106,17 +80,4 @@ int main(int argc, char* argv[])
writer->SetInput(printableImageFilter->GetOutput());
writer->Update();
// Figure~\ref{fig:PRINTABLE_FILTER} illustrates different color compositions for a SPOT 5 image.
// \begin{figure}
// \center
// \includegraphics[width=0.44\textwidth]{PrintableExampleOutput1.eps}
// \includegraphics[width=0.44\textwidth]{PrintableExampleOutput2.eps}
// \itkcaption[Scaling images]{On the left, a classic SPOT5
// combination: XS3 in red, XS2 in green and XS1 in blue. On the
// right another composition: XS3 in red, XS4 in green and XS2 in blue.}
// \label{fig:PRINTABLE_FILTER}
// \end{figure}
return EXIT_SUCCESS;
}
Most of the time, satellite images have more than three spectral bands. As we
are only able to see three colors (red, green and blue), we have to find a way
to represent these images using only three bands. This is called creating a
color composition.
Of course, any color composition will not be able to render all the information
available in the original image. As a consequence, sometimes, creating more than
one color composition will be necessary.
If you want to obtain an image with natural colors, you have to match the
wavelength captured by the satellite with those captured by your eye: thus
matching the red band with the red color, etc.
Some satellites (SPOT 5 is an example) do not acquire all the visible
spectral bands: the blue can be missing and replaced by some other wavelength of
interest for a specific application. In these situations, another mapping has
to be created. That's why the vegetation often appears in red in satellite
images.
The band order in the image products can be also quite tricky. It could be in
the wavelength order, as it is the case for Quickbird (1: Blue, 2: Green, 3:
Red, 4: NIR), in this case, you have to be careful to reverse the order if you
want a natural display. It could also be reverse to facilitate direct viewing,
as for SPOT5 (1: NIR, 2: Red, 3: Green, 4: SWIR) but in this situations you have
to be careful when you process the image.
.. |image1| image:: /Output/PrintableExampleOutput1.jpg
.. |image2| image:: /Output/PrintableExampleOutput2.jpg
.. _Figure1:
+--------------------------+-------------------------+
| |image1| | |image2| |
+--------------------------+-------------------------+
On the left, a classic SPOT5 combination: XS3 in red, XS2 in green and XS1 in blue. On the right another composition: XS3 in red, XS4 in green and XS2 in blue.
......@@ -23,13 +23,6 @@
./ScalingFilterExample Input/QB_Toulouse_Ortho_PAN.tif Output/QB_Toulouse_Ortho_PAN_rescaled.png Output/QB_Toulouse_Ortho_PAN_casted.png
*/
// On one hand, satellite images are commonly coded on more than 8 bits to provide
// the dynamic range required from shadows to clouds. On the other hand, image formats
// in use for printing and display are usually limited to 8 bits. We need to convert the value
// to enable a proper display. This is usually done using linear scaling. Of course, you have
// to be aware that some information is lost in the process.
#include "otbImage.h"
#include "otbImageFileReader.h"
#include "otbImageFileWriter.h"
......@@ -56,8 +49,7 @@ int main(int argc, char* argv[])
ReaderType::Pointer reader = ReaderType::New();
reader->SetFileName(argv[1]);
// The \doxygen{itk}{RescaleIntensityImageFilter} is used to rescale the value:
// The RescaleIntensityImageFilter is used to rescale the value
typedef itk::RescaleIntensityImageFilter<InputImageType, OutputImageType> RescalerType;
RescalerType::Pointer rescaler = RescalerType::New();
rescaler->SetInput(reader->GetOutput());
......@@ -75,20 +67,4 @@ int main(int argc, char* argv[])
writer->SetFileName(argv[3]);
writer->SetInput(caster->GetOutput());
writer->Update();
// Figure~\ref{fig:SCALING_FILTER} illustrates the difference between a proper scaling and
// a simple truncation of the value and demonstrates why it is
// important to keep this in mind.
// \begin{figure}
// \center
// \includegraphics[width=0.44\textwidth]{QB_Toulouse_Ortho_PAN_casted.eps}
// \includegraphics[width=0.44\textwidth]{QB_Toulouse_Ortho_PAN_rescaled.eps}
// \itkcaption[Scaling images]{On the left, the image obtained by truncated pixel values
// at the dynamic acceptable for a png file (between 0 and 255). On the right,
// the same image with
// a proper rescaling}
// \label{fig:SCALING_FILTER}
// \end{figure}
return EXIT_SUCCESS;
}
On one hand, satellite images are commonly coded on more than 8 bits to provide
the dynamic range required from shadows to clouds. On the other hand, image formats
in use for printing and display are usually limited to 8 bits. We need to convert the value
to enable a proper display. This is usually done using linear scaling. Of course, you have
to be aware that some information is lost in the process.
.. |image1| image:: /Output/QB_Toulouse_Ortho_PAN_casted.png
.. |image2| image:: /Output/QB_Toulouse_Ortho_PAN_rescaled.png
.. _Figure1:
+--------------------------+-------------------------+
| |image1| | |image2| |
+--------------------------+-------------------------+
On the left, the image obtained by truncated pixel values at the dynamic acceptable for a png file (between 0 and 255). On the right, the same image with a proper rescaling.
......@@ -77,13 +77,22 @@ protected:
MandatoryOff("ts");
AddParameter(ParameterType_Int, "maxit", "Maximum number of iterations");
SetParameterDescription("maxit", "Maximum number of iterations for the learning step.");
SetParameterDescription("maxit",
"Maximum number of iterations for the learning step."
" If this parameter is set to 0, the KMeans algorithm will not stop until convergence");
SetDefaultParameterInt("maxit", 1000);
MandatoryOff("maxit");
AddParameter(ParameterType_OutputFilename, "outmeans", "Centroid filename");
SetParameterDescription("outmeans", "Output text file containing centroid positions");
MandatoryOff("outmeans");
AddParameter(ParameterType_Group, "centroids", "Centroids IO parameters");
SetParameterDescription("centroids", "Group of parameters for centroids IO.");
AddParameter(ParameterType_InputFilename, "centroids.in", "input centroids text file");
SetParameterDescription("centroids.in",
"Input text file containing centroid positions used to initialize the algorithm. "
"Each centroid must be described by p parameters, p being the number of bands in "
"the input image, and the number of centroids must be equal to the number of classes "
"(one centroid per line with values separated by spaces).");
MandatoryOff("centroids.in");
ShareKMSamplingParameters();
ConnectKMSamplingParams();
......@@ -99,6 +108,7 @@ protected:
{
ShareParameter("ram", "polystats.ram");
ShareParameter("sampler", "select.sampler");
ShareParameter("centroids.out", "training.classifier.sharkkm.centroids.out");
ShareParameter("vm", "polystats.mask", "Validity Mask",
"Validity mask, only non-zero pixels will be used to estimate KMeans modes.");
}
......@@ -248,6 +258,14 @@ protected:
GetParameterInt("maxit"));
GetInternalApplication("training")->SetParameterInt("classifier.sharkkm.k",
GetParameterInt("nc"));
if (IsParameterEnabled("centroids.in") && HasValue("centroids.in"))
{
GetInternalApplication("training")->SetParameterString("classifier.sharkkm.centroids.in", GetParameterString("centroids.in"));
GetInternalApplication("training")
->SetParameterString("classifier.sharkkm.centroids.stats", GetInternalApplication("imgstats")->GetParameterString("out"));
}
if( IsParameterEnabled("rand"))
GetInternalApplication("training")->SetParameterInt("rand", GetParameterInt("rand"));
......@@ -276,55 +294,6 @@ protected:
ExecuteInternal( "classif" );
}
void CreateOutMeansFile(FloatVectorImageType *image,
const std::string &modelFileName,
unsigned int nbClasses)
{
if (IsParameterEnabled("outmeans"))
{
unsigned int nbBands = image->GetNumberOfComponentsPerPixel();
unsigned int nbElements = nbClasses * nbBands;
// get the line in model file that contains the centroids positions
std::ifstream infile(modelFileName);
if(!infile)
{
itkExceptionMacro(<< "File: " << modelFileName << " couldn't be opened");
}
// get the line with the centroids (starts with "2 ")
std::string line, centroidLine;
while(std::getline(infile,line))
{
if (line.size() > 2 && line[0] == '2' && line[1] == ' ')
{
centroidLine = line;
break;
}
}
std::vector<std::string> centroidElm;
boost::split(centroidElm,centroidLine,boost::is_any_of(" "));
// remove the first elements, not the centroids positions
int nbWord = centroidElm.size();
int beginCentroid = nbWord-nbElements;
centroidElm.erase(centroidElm.begin(), centroidElm.begin()+beginCentroid);
// write in the output file
std::ofstream outfile;
outfile.open(GetParameterString("outmeans"));
for (unsigned int i = 0; i < nbClasses; i++)
{
for (unsigned int j = 0; j < nbBands; j++)
{
outfile << std::setw(8) << centroidElm[i * nbBands + j] << " ";
}
outfile << std::endl;
}
}
}
class KMeansFileNamesHandler
{
public:
......@@ -495,9 +464,6 @@ private:
// Compute a classification of the input image according to a model file
Superclass::KMeansClassif();
// Create the output text file containing centroids positions
Superclass::CreateOutMeansFile(GetParameterImage("in"), fileNames.modelFile, GetParameterInt("nc"));
// Remove all tempory files
if( GetParameterInt( "cleanup" ) )
{
......
......@@ -122,7 +122,10 @@ LearningApplicationBase<TInputValue,TOutputValue>
::InitUnsupervisedClassifierParams()
{
#ifdef OTB_USE_SHARK
InitSharkKMeansParams();
if (!m_RegressionFlag)
{
InitSharkKMeansParams(); // Regression not supported
}
#endif
}
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment