lsgrmHeader.h 5.65 KiB
#ifndef __LSGRM_HEADER_H
#define __LSGRM_HEADER_H
#include <cassert>
#include <cstdlib>
#include <string>
#include <sstream>
#include <fstream>
#include <algorithm>
#include <vector>
#include <iterator>
#include <stack>
#include <boost/algorithm/string.hpp>
#include <boost/progress.hpp>
#ifdef OTB_USE_MPI
#include "otbMPIConfig.h"
#include "mpi.h" // TODO: implement needed methods inside otbMPIConfig.h
#include "otbExtractROI.h"
#include "itkImageRegionIterator.h"
#include "otbImageFileWriter.h"
#endif
/* MPI related functions */
#ifdef OTB_USE_MPI
 * This function returns TRUE if the process #myrank is assigned
 * to the task #div in a pool of #nprocs processes
bool MyTurn(int div = 0)
  otb::MPIConfig::Pointer mpiConfig = otb::MPIConfig::Instance();
  unsigned int proc = 0;
  if (mpiConfig->GetNbProcs() != 0)
    proc = div % mpiConfig->GetNbProcs();
  return (proc == mpiConfig->GetMyRank());
 * This function gather the given value in other process, and update it
 * TODO: MPI implementation using OTB MPI Wrapper
template<typename T>
void GatherMe(T& x, MPI_Datatype dataType)
  if (otb::MPIConfig::Instance()->GetMyRank() == 0)
    // Master process
    // Gather
    for (unsigned int p = 1 ; p < otb::MPIConfig::Instance()->GetNbProcs() ; p++)
      T partial_sum;
      MPI_Recv( &partial_sum, 1, dataType, p, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
      x += partial_sum;
    // Dispatch
    for (unsigned int p = 1 ; p < otb::MPIConfig::Instance()->GetNbProcs() ; p++)
      MPI_Send(&x, 1, dataType, p, 0, MPI_COMM_WORLD);
  else
    // Slave process
    MPI_Send(&x, 1, dataType, 0, 0, MPI_COMM_WORLD);
    MPI_Recv(&x, 1, dataType, 0, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
7172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140
* Function used to broadcast the label image to every MPI process */ template<class TImageType> void BroadcastImage(typename TImageType::Pointer & inPtr) { otb::MPIConfig::Instance()->barrier(); unsigned int width; unsigned int height; unsigned int block_height; unsigned int current_start_y; if (otb::MPIConfig::Instance()->GetMyRank() == 0) { // Master process width = inPtr->GetLargestPossibleRegion().GetSize()[0]; height = inPtr->GetLargestPossibleRegion().GetSize()[1]; } // Broadcast width and height MPI_Bcast(&width, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD); MPI_Bcast(&height, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD); // Slave processes do allocate image typename TImageType::IndexType index; index.Fill(0); typename TImageType::SizeType size; size[0] = width; size[1] = height; typename TImageType::RegionType region(index,size); if (otb::MPIConfig::Instance()->GetMyRank() > 0) { inPtr = TImageType::New(); inPtr->SetRegions(region); inPtr->SetNumberOfComponentsPerPixel(1); inPtr->Allocate(); } // Maximum data count that mpi can handle unsigned int maximum_count = std::numeric_limits<int>::max(); block_height = std::floor((float) maximum_count / width); // Broadcast array block by block (lines) current_start_y = 0; while (current_start_y < height) { if ( current_start_y + block_height > height ) block_height = height - current_start_y; // Subregion of image typename TImageType::Pointer tmpPtr = TImageType::New(); typename TImageType::IndexType subregion_index; subregion_index[0] = 0; subregion_index[1] = current_start_y; typename TImageType::SizeType subregion_size; subregion_size[0] = width; subregion_size[1] = block_height; typename TImageType::RegionType subregion(subregion_index, subregion_size); // Slave processes do allocate subregion image if (otb::MPIConfig::Instance()->GetMyRank() > 0) { tmpPtr->SetRegions(subregion); tmpPtr->Allocate(); } else { typedef typename otb::ExtractROI<typename TImageType::InternalPixelType, typename TImageType::InternalPixelType> ExtractROIFilterType; typename ExtractROIFilterType::Pointer filter = ExtractROIFilterType::New();
141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203
filter->SetInput(inPtr); filter->SetStartX(0); filter->SetStartY(current_start_y); filter->SetSizeX(width); filter->SetSizeY(block_height); filter->SetReleaseDataFlag(false); filter->Update(); tmpPtr = filter->GetOutput(); } current_start_y += block_height; // Broadcast buffer MPI_Bcast(tmpPtr->GetBufferPointer(), width*block_height, MPI_UNSIGNED, 0, MPI_COMM_WORLD); // Slave process must recopy the image if (otb::MPIConfig::Instance()->GetMyRank() > 0) { typedef itk::ImageRegionIterator<TImageType> IteratorType; IteratorType it1(inPtr, subregion); IteratorType it2(tmpPtr, subregion); for (it1.GoToBegin(), it2.GoToBegin(); !it1.IsAtEnd(); ++it1, ++it2) { it1.Set(it2.Get()); } } // recopy image } // while data to transmit } /* * Gather accumulatedMemory and isFusion variables * TODO: MPI implementation using OTB MPI Wrapper */ void GatherUsefulVariables(unsigned long long int& accumulatedMemory, bool& isFusion) { otb::MPIConfig::Instance()->barrier(); int isFusionInteger = 0; long long int accumulatedMemoryLLI = static_cast<long long int>(accumulatedMemory); if (isFusion) isFusionInteger = 1; GatherMe<int>(isFusionInteger, MPI_INT); GatherMe<long long int>(accumulatedMemoryLLI, MPI_LONG_LONG_INT); accumulatedMemory = static_cast<long long unsigned int>(accumulatedMemoryLLI); if (isFusionInteger>0) isFusion = true; } #endif /* * Print time elapsed */ void ShowTime(boost::timer t) { std::cout << "--- Process duration : " << std::floor(t.elapsed()) << " s" << std::endl; t.restart(); } #endif