Commit cde5f379 authored by Gaetano Raffaele's avatar Gaetano Raffaele
Browse files

Removed MPI, this version is for workstations only

No related merge requests found
Showing with 0 additions and 234 deletions
+0 -234
...@@ -297,12 +297,6 @@ private: ...@@ -297,12 +297,6 @@ private:
void AfterExecuteAndWriteOutputs() void AfterExecuteAndWriteOutputs()
{ {
#ifdef OTB_USE_MPI
// When MPI is activated, only the master thread proceed
if (otb::MPIConfig::Instance()->GetMyRank() != 0)
return;
#endif
// Delete temporary files // Delete temporary files
for (unsigned int i = 0 ; i < m_TemporaryFilesList.size() ; i++) for (unsigned int i = 0 ; i < m_TemporaryFilesList.size() ; i++)
{ {
......
...@@ -109,10 +109,6 @@ void Controller<TSegmenter>::RunSegmentation() ...@@ -109,10 +109,6 @@ void Controller<TSegmenter>::RunSegmentation()
m_Resuming, m_Resuming,
nextTile); nextTile);
#ifdef OTB_USE_MPI
GatherUsefulVariables(accumulatedMemory, isFusion);
#endif
// Time monitoring // Time monitoring
ShowTime(t); ShowTime(t);
...@@ -144,10 +140,6 @@ void Controller<TSegmenter>::RunSegmentation() ...@@ -144,10 +140,6 @@ void Controller<TSegmenter>::RunSegmentation()
if (m_Resuming) StopResumingMode(); if (m_Resuming) StopResumingMode();
#ifdef OTB_USE_MPI
GatherUsefulVariables(accumulatedMemory, isFusion);
#endif
// Time monitoring // Time monitoring
ShowTime(t); ShowTime(t);
...@@ -162,13 +154,6 @@ void Controller<TSegmenter>::RunSegmentation() ...@@ -162,13 +154,6 @@ void Controller<TSegmenter>::RunSegmentation()
} }
} }
#ifdef OTB_USE_MPI
// Only the master process is doing the next part
// TODO: Use the MPI process wich has the largest amount of memory
if (otb::MPIConfig::Instance()->GetMyRank() != 0)
return;
#endif
if(accumulatedMemory <= m_Memory) if(accumulatedMemory <= m_Memory)
{ {
// Merge all the graphs // Merge all the graphs
...@@ -196,15 +181,6 @@ void Controller<TSegmenter>::RunSegmentation() ...@@ -196,15 +181,6 @@ void Controller<TSegmenter>::RunSegmentation()
} }
else if (m_TilingMode == LSGRM_TILING_NONE)// tiling_mode is none else if (m_TilingMode == LSGRM_TILING_NONE)// tiling_mode is none
{ {
#ifdef OTB_USE_MPI
// Only the master process is doing the next part
if (otb::MPIConfig::Instance()->GetMyRank() > 0)
return;
else
// Warn that there is some unused MPI processes
if (otb::MPIConfig::Instance()->GetNbProcs() > 1)
itkWarningMacro(<< "Only 1 MPI process will be used");
#endif
// Update input image // Update input image
m_InputImage->Update(); m_InputImage->Update();
...@@ -224,7 +200,6 @@ void Controller<TSegmenter>::RunSegmentation() ...@@ -224,7 +200,6 @@ void Controller<TSegmenter>::RunSegmentation()
itkExceptionMacro(<<"Unknow tiling mode!"); itkExceptionMacro(<<"Unknow tiling mode!");
} }
// TODO: [MPI] broadcast the graph to other nodes
} }
...@@ -331,7 +306,6 @@ void Controller<TSegmenter>::GetAutomaticConfiguration() ...@@ -331,7 +306,6 @@ void Controller<TSegmenter>::GetAutomaticConfiguration()
itkDebugMacro(<<"Get automatic configuration"); itkDebugMacro(<<"Get automatic configuration");
// Compute the maximum number of nodes that can fit the memory // Compute the maximum number of nodes that can fit the memory
// TODO: Use the smallest number amongst MPI processes
unsigned long int maximumNumberOfNodesInMemory = GetMaximumNumberOfNodesInMemory(); unsigned long int maximumNumberOfNodesInMemory = GetMaximumNumberOfNodesInMemory();
itkDebugMacro(<<"Maximum number of nodes in memory is " << maximumNumberOfNodesInMemory); itkDebugMacro(<<"Maximum number of nodes in memory is " << maximumNumberOfNodesInMemory);
...@@ -367,13 +341,7 @@ void Controller<TSegmenter>::GetAutomaticConfiguration() ...@@ -367,13 +341,7 @@ void Controller<TSegmenter>::GetAutomaticConfiguration()
// Get the multiples of k. For each one, compute the criterion of the tiling // Get the multiples of k. For each one, compute the criterion of the tiling
for (unsigned int layoutNCol = 1; layoutNCol<=nbOfTiles; layoutNCol++) for (unsigned int layoutNCol = 1; layoutNCol<=nbOfTiles; layoutNCol++)
{ {
#ifdef OTB_USE_MPI
// We want number of tiles which is a multiple of the number of MPI processes
if (nbOfTiles % layoutNCol == 0 && // Is it a multiple of the nb of Tiles and nProcs?
nbOfTiles % otb::MPIConfig::Instance()->GetNbProcs() == 0)
#else
if (nbOfTiles % layoutNCol == 0) // Is it a multiple of the nb of Tiles? if (nbOfTiles % layoutNCol == 0) // Is it a multiple of the nb of Tiles?
#endif
{ {
// Tiling layout // Tiling layout
unsigned int layoutNRow = nbOfTiles / layoutNCol; unsigned int layoutNRow = nbOfTiles / layoutNCol;
...@@ -460,10 +428,6 @@ void Controller<TSegmenter>::SetSpecificParameters(const SegmentationParameterTy ...@@ -460,10 +428,6 @@ void Controller<TSegmenter>::SetSpecificParameters(const SegmentationParameterTy
//typename Controller<TSegmenter>::LabelImageType::Pointer //typename Controller<TSegmenter>::LabelImageType::Pointer
//Controller<TSegmenter>::GetLabeledClusteredOutput() //Controller<TSegmenter>::GetLabeledClusteredOutput()
//{ //{
//#ifdef OTB_USE_MPI
// // Get the label image from the master process (the one which achieves segmentation)
// BroadcastImage<typename TSegmenter::LabelImageType>(m_LabelImage);
//#endif
// return m_LabelImage; // return m_LabelImage;
//} //}
......
...@@ -168,9 +168,6 @@ long long unsigned int RunPartialSegmentation(const typename TSegmenter::ParamTy ...@@ -168,9 +168,6 @@ long long unsigned int RunPartialSegmentation(const typename TSegmenter::ParamTy
{ {
for(unsigned int col = 0; col < nbTilesX; col++) for(unsigned int col = 0; col < nbTilesX; col++)
{ {
#ifdef OTB_USE_MPI
if (MyTurn(row*nbTilesX + col))
#endif
{ {
// Get the current tile // Get the current tile
std::cout << "Processing tile " << row << ", " << col << std::endl; std::cout << "Processing tile " << row << ", " << col << std::endl;
...@@ -253,9 +250,6 @@ long long unsigned int RunPartialSegmentation(const typename TSegmenter::ParamTy ...@@ -253,9 +250,6 @@ long long unsigned int RunPartialSegmentation(const typename TSegmenter::ParamTy
if (resume) if (resume)
nextTile = 0; nextTile = 0;
#ifdef OTB_USE_MPI
otb::MPIConfig::Instance()->barrier();
#endif
std::cout << "Add stability margins to graph for the next round..."<< std::endl; std::cout << "Add stability margins to graph for the next round..."<< std::endl;
...@@ -264,9 +258,6 @@ long long unsigned int RunPartialSegmentation(const typename TSegmenter::ParamTy ...@@ -264,9 +258,6 @@ long long unsigned int RunPartialSegmentation(const typename TSegmenter::ParamTy
{ {
for(unsigned int col = 0; col < nbTilesX; col++) for(unsigned int col = 0; col < nbTilesX; col++)
{ {
#ifdef OTB_USE_MPI
if (MyTurn(row*nbTilesX + col))
#endif
{ {
// Get current tile // Get current tile
ProcessingTile currentTile = tiles[row*nbTilesX + col]; ProcessingTile currentTile = tiles[row*nbTilesX + col];
...@@ -682,9 +673,6 @@ long long unsigned int RunFirstPartialSegmentation( ...@@ -682,9 +673,6 @@ long long unsigned int RunFirstPartialSegmentation(
{ {
for(unsigned int col = 0; col < nbTilesX; col++) for(unsigned int col = 0; col < nbTilesX; col++)
{ {
#ifdef OTB_USE_MPI
if (MyTurn(row*nbTilesX + col))
#endif
{ {
// Reading images // Reading images
ProcessingTile currentTile = tiles[row*nbTilesX + col]; ProcessingTile currentTile = tiles[row*nbTilesX + col];
......
...@@ -13,184 +13,6 @@ ...@@ -13,184 +13,6 @@
#include <boost/progress.hpp> #include <boost/progress.hpp>
#ifdef OTB_USE_MPI
#include "otbMPIConfig.h"
#include "mpi.h" // TODO: implement needed methods inside otbMPIConfig.h
#include "otbExtractROI.h"
#include "itkImageRegionIterator.h"
#include "otbImageFileWriter.h"
#endif
/* MPI related functions */
#ifdef OTB_USE_MPI
/*
* This function returns TRUE if the process #myrank is assigned
* to the task #div in a pool of #nprocs processes
*/
bool MyTurn(int div = 0)
{
otb::MPIConfig::Pointer mpiConfig = otb::MPIConfig::Instance();
unsigned int proc = 0;
if (mpiConfig->GetNbProcs() != 0)
proc = div % mpiConfig->GetNbProcs();
return (proc == mpiConfig->GetMyRank());
}
/*
* This function gather the given value in other process, and update it
* TODO: MPI implementation using OTB MPI Wrapper
*/
template<typename T>
void GatherMe(T& x, MPI_Datatype dataType)
{
if (otb::MPIConfig::Instance()->GetMyRank() == 0)
{
// Master process
// Gather
for (unsigned int p = 1 ; p < otb::MPIConfig::Instance()->GetNbProcs() ; p++)
{
T partial_sum;
MPI_Recv( &partial_sum, 1, dataType, p, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
x += partial_sum;
}
// Dispatch
for (unsigned int p = 1 ; p < otb::MPIConfig::Instance()->GetNbProcs() ; p++)
MPI_Send(&x, 1, dataType, p, 0, MPI_COMM_WORLD);
}
else
{
// Slave process
MPI_Send(&x, 1, dataType, 0, 0, MPI_COMM_WORLD);
MPI_Recv(&x, 1, dataType, 0, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
}
/*
* Function used to broadcast the label image to every MPI process
*/
template<class TImageType>
void BroadcastImage(typename TImageType::Pointer & inPtr)
{
otb::MPIConfig::Instance()->barrier();
unsigned int width;
unsigned int height;
unsigned int block_height;
unsigned int current_start_y;
if (otb::MPIConfig::Instance()->GetMyRank() == 0)
{
// Master process
width = inPtr->GetLargestPossibleRegion().GetSize()[0];
height = inPtr->GetLargestPossibleRegion().GetSize()[1];
}
// Broadcast width and height
MPI_Bcast(&width, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD);
MPI_Bcast(&height, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD);
// Slave processes do allocate image
typename TImageType::IndexType index;
index.Fill(0);
typename TImageType::SizeType size;
size[0] = width;
size[1] = height;
typename TImageType::RegionType region(index,size);
if (otb::MPIConfig::Instance()->GetMyRank() > 0)
{
inPtr = TImageType::New();
inPtr->SetRegions(region);
inPtr->SetNumberOfComponentsPerPixel(1);
inPtr->Allocate();
}
// Maximum data count that mpi can handle
unsigned int maximum_count = std::numeric_limits<int>::max();
block_height = std::floor((float) maximum_count / width);
// Broadcast array block by block (lines)
current_start_y = 0;
while (current_start_y < height)
{
if ( current_start_y + block_height > height )
block_height = height - current_start_y;
// Subregion of image
typename TImageType::Pointer tmpPtr = TImageType::New();
typename TImageType::IndexType subregion_index;
subregion_index[0] = 0;
subregion_index[1] = current_start_y;
typename TImageType::SizeType subregion_size;
subregion_size[0] = width;
subregion_size[1] = block_height;
typename TImageType::RegionType subregion(subregion_index, subregion_size);
// Slave processes do allocate subregion image
if (otb::MPIConfig::Instance()->GetMyRank() > 0)
{
tmpPtr->SetRegions(subregion);
tmpPtr->Allocate();
}
else
{
typedef typename otb::ExtractROI<typename TImageType::InternalPixelType,
typename TImageType::InternalPixelType> ExtractROIFilterType;
typename ExtractROIFilterType::Pointer filter = ExtractROIFilterType::New();
filter->SetInput(inPtr);
filter->SetStartX(0);
filter->SetStartY(current_start_y);
filter->SetSizeX(width);
filter->SetSizeY(block_height);
filter->SetReleaseDataFlag(false);
filter->Update();
tmpPtr = filter->GetOutput();
}
current_start_y += block_height;
// Broadcast buffer
MPI_Bcast(tmpPtr->GetBufferPointer(), width*block_height, MPI_UNSIGNED, 0, MPI_COMM_WORLD);
// Slave process must recopy the image
if (otb::MPIConfig::Instance()->GetMyRank() > 0)
{
typedef itk::ImageRegionIterator<TImageType> IteratorType;
IteratorType it1(inPtr, subregion);
IteratorType it2(tmpPtr, subregion);
for (it1.GoToBegin(), it2.GoToBegin(); !it1.IsAtEnd(); ++it1, ++it2)
{
it1.Set(it2.Get());
}
} // recopy image
} // while data to transmit
}
/*
* Gather accumulatedMemory and isFusion variables
* TODO: MPI implementation using OTB MPI Wrapper
*/
void GatherUsefulVariables(unsigned long long int& accumulatedMemory, bool& isFusion)
{
otb::MPIConfig::Instance()->barrier();
int isFusionInteger = 0;
long long int accumulatedMemoryLLI = static_cast<long long int>(accumulatedMemory);
if (isFusion)
isFusionInteger = 1;
GatherMe<int>(isFusionInteger, MPI_INT);
GatherMe<long long int>(accumulatedMemoryLLI, MPI_LONG_LONG_INT);
accumulatedMemory = static_cast<long long unsigned int>(accumulatedMemoryLLI);
if (isFusionInteger>0)
isFusion = true;
}
#endif
/* /*
* Print time elapsed * Print time elapsed
*/ */
......
...@@ -6,8 +6,6 @@ otb_module(LSGRM ...@@ -6,8 +6,6 @@ otb_module(LSGRM
OTBCommon OTBCommon
OTBApplicationEngine OTBApplicationEngine
OTBConversion OTBConversion
OPTIONAL_DEPENDS
OTBMPI
TEST_DEPENDS TEST_DEPENDS
OTBTestKernel OTBTestKernel
OTBCommandLine OTBCommandLine
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment