diff --git a/app/otbLSGRM.cxx b/app/otbLSGRM.cxx index ac718793e364698d60599a104e6153b8b0ba9a4d..eff753411119b8663a64c6216f7e8b9f0282df4b 100644 --- a/app/otbLSGRM.cxx +++ b/app/otbLSGRM.cxx @@ -297,12 +297,6 @@ private: void AfterExecuteAndWriteOutputs() { -#ifdef OTB_USE_MPI - // When MPI is activated, only the master thread proceed - if (otb::MPIConfig::Instance()->GetMyRank() != 0) - return; -#endif - // Delete temporary files for (unsigned int i = 0 ; i < m_TemporaryFilesList.size() ; i++) { diff --git a/include/lsgrmController.txx b/include/lsgrmController.txx index d3af8efc837b6f16b84fbf8740ec01115c688c28..73492f425bc7977a84f1dfb60504e92ca5b254f1 100644 --- a/include/lsgrmController.txx +++ b/include/lsgrmController.txx @@ -109,10 +109,6 @@ void Controller<TSegmenter>::RunSegmentation() m_Resuming, nextTile); -#ifdef OTB_USE_MPI - GatherUsefulVariables(accumulatedMemory, isFusion); -#endif - // Time monitoring ShowTime(t); @@ -144,10 +140,6 @@ void Controller<TSegmenter>::RunSegmentation() if (m_Resuming) StopResumingMode(); -#ifdef OTB_USE_MPI - GatherUsefulVariables(accumulatedMemory, isFusion); -#endif - // Time monitoring ShowTime(t); @@ -162,13 +154,6 @@ void Controller<TSegmenter>::RunSegmentation() } } -#ifdef OTB_USE_MPI - // Only the master process is doing the next part - // TODO: Use the MPI process wich has the largest amount of memory - if (otb::MPIConfig::Instance()->GetMyRank() != 0) - return; -#endif - if(accumulatedMemory <= m_Memory) { // Merge all the graphs @@ -196,15 +181,6 @@ void Controller<TSegmenter>::RunSegmentation() } else if (m_TilingMode == LSGRM_TILING_NONE)// tiling_mode is none { -#ifdef OTB_USE_MPI - // Only the master process is doing the next part - if (otb::MPIConfig::Instance()->GetMyRank() > 0) - return; - else - // Warn that there is some unused MPI processes - if (otb::MPIConfig::Instance()->GetNbProcs() > 1) - itkWarningMacro(<< "Only 1 MPI process will be used"); -#endif // Update input image m_InputImage->Update(); @@ -224,7 +200,6 @@ void Controller<TSegmenter>::RunSegmentation() itkExceptionMacro(<<"Unknow tiling mode!"); } - // TODO: [MPI] broadcast the graph to other nodes } @@ -331,7 +306,6 @@ void Controller<TSegmenter>::GetAutomaticConfiguration() itkDebugMacro(<<"Get automatic configuration"); // Compute the maximum number of nodes that can fit the memory - // TODO: Use the smallest number amongst MPI processes unsigned long int maximumNumberOfNodesInMemory = GetMaximumNumberOfNodesInMemory(); itkDebugMacro(<<"Maximum number of nodes in memory is " << maximumNumberOfNodesInMemory); @@ -367,13 +341,7 @@ void Controller<TSegmenter>::GetAutomaticConfiguration() // Get the multiples of k. For each one, compute the criterion of the tiling for (unsigned int layoutNCol = 1; layoutNCol<=nbOfTiles; layoutNCol++) { -#ifdef OTB_USE_MPI - // We want number of tiles which is a multiple of the number of MPI processes - if (nbOfTiles % layoutNCol == 0 && // Is it a multiple of the nb of Tiles and nProcs? - nbOfTiles % otb::MPIConfig::Instance()->GetNbProcs() == 0) -#else if (nbOfTiles % layoutNCol == 0) // Is it a multiple of the nb of Tiles? -#endif { // Tiling layout unsigned int layoutNRow = nbOfTiles / layoutNCol; @@ -460,10 +428,6 @@ void Controller<TSegmenter>::SetSpecificParameters(const SegmentationParameterTy //typename Controller<TSegmenter>::LabelImageType::Pointer //Controller<TSegmenter>::GetLabeledClusteredOutput() //{ -//#ifdef OTB_USE_MPI -// // Get the label image from the master process (the one which achieves segmentation) -// BroadcastImage<typename TSegmenter::LabelImageType>(m_LabelImage); -//#endif // return m_LabelImage; //} diff --git a/include/lsgrmGraphOperations.txx b/include/lsgrmGraphOperations.txx index 76d647b21518ddedcbd438140f5416c9b531c804..6c11069f538836c50ff1a7a139e05e3bc38a1b5f 100644 --- a/include/lsgrmGraphOperations.txx +++ b/include/lsgrmGraphOperations.txx @@ -168,9 +168,6 @@ long long unsigned int RunPartialSegmentation(const typename TSegmenter::ParamTy { for(unsigned int col = 0; col < nbTilesX; col++) { -#ifdef OTB_USE_MPI - if (MyTurn(row*nbTilesX + col)) -#endif { // Get the current tile std::cout << "Processing tile " << row << ", " << col << std::endl; @@ -253,9 +250,6 @@ long long unsigned int RunPartialSegmentation(const typename TSegmenter::ParamTy if (resume) nextTile = 0; -#ifdef OTB_USE_MPI - otb::MPIConfig::Instance()->barrier(); -#endif std::cout << "Add stability margins to graph for the next round..."<< std::endl; @@ -264,9 +258,6 @@ long long unsigned int RunPartialSegmentation(const typename TSegmenter::ParamTy { for(unsigned int col = 0; col < nbTilesX; col++) { -#ifdef OTB_USE_MPI - if (MyTurn(row*nbTilesX + col)) -#endif { // Get current tile ProcessingTile currentTile = tiles[row*nbTilesX + col]; @@ -682,9 +673,6 @@ long long unsigned int RunFirstPartialSegmentation( { for(unsigned int col = 0; col < nbTilesX; col++) { -#ifdef OTB_USE_MPI - if (MyTurn(row*nbTilesX + col)) -#endif { // Reading images ProcessingTile currentTile = tiles[row*nbTilesX + col]; diff --git a/include/lsgrmHeader.h b/include/lsgrmHeader.h index 35bca3c9aa54d4e617e55030a480dad44a212584..ae357c52c081a22ddcf2d7ae4046ccfcd143552e 100644 --- a/include/lsgrmHeader.h +++ b/include/lsgrmHeader.h @@ -13,184 +13,6 @@ #include <boost/progress.hpp> -#ifdef OTB_USE_MPI -#include "otbMPIConfig.h" -#include "mpi.h" // TODO: implement needed methods inside otbMPIConfig.h -#include "otbExtractROI.h" -#include "itkImageRegionIterator.h" -#include "otbImageFileWriter.h" -#endif - -/* MPI related functions */ - -#ifdef OTB_USE_MPI -/* - * This function returns TRUE if the process #myrank is assigned - * to the task #div in a pool of #nprocs processes - */ -bool MyTurn(int div = 0) -{ - otb::MPIConfig::Pointer mpiConfig = otb::MPIConfig::Instance(); - unsigned int proc = 0; - if (mpiConfig->GetNbProcs() != 0) - proc = div % mpiConfig->GetNbProcs(); - return (proc == mpiConfig->GetMyRank()); -} - -/* - * This function gather the given value in other process, and update it - * TODO: MPI implementation using OTB MPI Wrapper - */ -template<typename T> -void GatherMe(T& x, MPI_Datatype dataType) -{ - - if (otb::MPIConfig::Instance()->GetMyRank() == 0) - { - // Master process - // Gather - for (unsigned int p = 1 ; p < otb::MPIConfig::Instance()->GetNbProcs() ; p++) - { - T partial_sum; - MPI_Recv( &partial_sum, 1, dataType, p, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); - x += partial_sum; - } - // Dispatch - for (unsigned int p = 1 ; p < otb::MPIConfig::Instance()->GetNbProcs() ; p++) - MPI_Send(&x, 1, dataType, p, 0, MPI_COMM_WORLD); - } - else - { - // Slave process - MPI_Send(&x, 1, dataType, 0, 0, MPI_COMM_WORLD); - MPI_Recv(&x, 1, dataType, 0, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); - } -} - -/* - * Function used to broadcast the label image to every MPI process - */ -template<class TImageType> -void BroadcastImage(typename TImageType::Pointer & inPtr) -{ - otb::MPIConfig::Instance()->barrier(); - - unsigned int width; - unsigned int height; - unsigned int block_height; - unsigned int current_start_y; - if (otb::MPIConfig::Instance()->GetMyRank() == 0) - { - // Master process - width = inPtr->GetLargestPossibleRegion().GetSize()[0]; - height = inPtr->GetLargestPossibleRegion().GetSize()[1]; - } - - // Broadcast width and height - MPI_Bcast(&width, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD); - MPI_Bcast(&height, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD); - - // Slave processes do allocate image - typename TImageType::IndexType index; - index.Fill(0); - typename TImageType::SizeType size; - size[0] = width; - size[1] = height; - typename TImageType::RegionType region(index,size); - if (otb::MPIConfig::Instance()->GetMyRank() > 0) - { - inPtr = TImageType::New(); - inPtr->SetRegions(region); - inPtr->SetNumberOfComponentsPerPixel(1); - inPtr->Allocate(); - } - - // Maximum data count that mpi can handle - unsigned int maximum_count = std::numeric_limits<int>::max(); - block_height = std::floor((float) maximum_count / width); - - // Broadcast array block by block (lines) - current_start_y = 0; - while (current_start_y < height) - { - if ( current_start_y + block_height > height ) - block_height = height - current_start_y; - - // Subregion of image - typename TImageType::Pointer tmpPtr = TImageType::New(); - - typename TImageType::IndexType subregion_index; - subregion_index[0] = 0; - subregion_index[1] = current_start_y; - typename TImageType::SizeType subregion_size; - subregion_size[0] = width; - subregion_size[1] = block_height; - typename TImageType::RegionType subregion(subregion_index, subregion_size); - - // Slave processes do allocate subregion image - if (otb::MPIConfig::Instance()->GetMyRank() > 0) - { - tmpPtr->SetRegions(subregion); - tmpPtr->Allocate(); - } - else - { - typedef typename otb::ExtractROI<typename TImageType::InternalPixelType, - typename TImageType::InternalPixelType> ExtractROIFilterType; - typename ExtractROIFilterType::Pointer filter = ExtractROIFilterType::New(); - filter->SetInput(inPtr); - filter->SetStartX(0); - filter->SetStartY(current_start_y); - filter->SetSizeX(width); - filter->SetSizeY(block_height); - filter->SetReleaseDataFlag(false); - filter->Update(); - tmpPtr = filter->GetOutput(); - } - - current_start_y += block_height; - - // Broadcast buffer - MPI_Bcast(tmpPtr->GetBufferPointer(), width*block_height, MPI_UNSIGNED, 0, MPI_COMM_WORLD); - - // Slave process must recopy the image - if (otb::MPIConfig::Instance()->GetMyRank() > 0) - { - typedef itk::ImageRegionIterator<TImageType> IteratorType; - IteratorType it1(inPtr, subregion); - IteratorType it2(tmpPtr, subregion); - - for (it1.GoToBegin(), it2.GoToBegin(); !it1.IsAtEnd(); ++it1, ++it2) - { - it1.Set(it2.Get()); - } - } // recopy image - } // while data to transmit - -} - - -/* - * Gather accumulatedMemory and isFusion variables - * TODO: MPI implementation using OTB MPI Wrapper - */ -void GatherUsefulVariables(unsigned long long int& accumulatedMemory, bool& isFusion) -{ - otb::MPIConfig::Instance()->barrier(); - int isFusionInteger = 0; - long long int accumulatedMemoryLLI = static_cast<long long int>(accumulatedMemory); - if (isFusion) - isFusionInteger = 1; - GatherMe<int>(isFusionInteger, MPI_INT); - GatherMe<long long int>(accumulatedMemoryLLI, MPI_LONG_LONG_INT); - accumulatedMemory = static_cast<long long unsigned int>(accumulatedMemoryLLI); - if (isFusionInteger>0) - isFusion = true; -} - - -#endif - /* * Print time elapsed */ diff --git a/otb-module.cmake b/otb-module.cmake index b399ffe0b9fcfc28dbf5b49816830caa3153321d..8913ff9d8ef57a717c1573c1744e24dc554c0952 100644 --- a/otb-module.cmake +++ b/otb-module.cmake @@ -6,8 +6,6 @@ otb_module(LSGRM OTBCommon OTBApplicationEngine OTBConversion - OPTIONAL_DEPENDS - OTBMPI TEST_DEPENDS OTBTestKernel OTBCommandLine