lsgrmHeader.h 5.65 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
#ifndef __LSGRM_HEADER_H
#define __LSGRM_HEADER_H
#include <cassert>
#include <cstdlib>
#include <string>
#include <sstream>
#include <fstream>
#include <algorithm>
#include <vector>
#include <iterator>
#include <stack>
#include <boost/algorithm/string.hpp>

14 15
#include <boost/progress.hpp>

remicres's avatar
remicres committed
16 17
#ifdef OTB_USE_MPI
#include "otbMPIConfig.h"
18
#include "mpi.h" // TODO: implement needed methods inside otbMPIConfig.h
remicres's avatar
remicres committed
19 20 21
#include "otbExtractROI.h"
#include "itkImageRegionIterator.h"
#include "otbImageFileWriter.h"
remicres's avatar
remicres committed
22
#endif
23

24 25 26
/* MPI related functions */

#ifdef OTB_USE_MPI
27
/*
28 29
 * This function returns TRUE if the process #myrank is assigned
 * to the task #div in a pool of #nprocs processes
30
 */
remicres's avatar
remicres committed
31
bool MyTurn(int div = 0)
32
{
remicres's avatar
remicres committed
33
  otb::MPIConfig::Pointer mpiConfig = otb::MPIConfig::Instance();
34
  unsigned int proc = 0;
remicres's avatar
remicres committed
35 36 37
  if (mpiConfig->GetNbProcs() != 0)
    proc = div % mpiConfig->GetNbProcs();
  return (proc == mpiConfig->GetMyRank());
38 39 40 41
}

/*
 * This function gather the given value in other process, and update it
remicres's avatar
remicres committed
42
 * TODO: MPI implementation using OTB MPI Wrapper
43 44
 */
template<typename T>
remicres's avatar
remicres committed
45
void GatherMe(T& x, MPI_Datatype dataType)
46
{
remicres's avatar
remicres committed
47

48
  if (otb::MPIConfig::Instance()->GetMyRank() == 0)
49 50 51
    {
    // Master process
    // Gather
52
    for (unsigned int p = 1 ; p < otb::MPIConfig::Instance()->GetNbProcs() ; p++)
53 54
      {
      T partial_sum;
55
      MPI_Recv( &partial_sum, 1, dataType, p, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
56 57 58
      x += partial_sum;
      }
    // Dispatch
59
    for (unsigned int p = 1 ; p < otb::MPIConfig::Instance()->GetNbProcs() ; p++)
remicres's avatar
remicres committed
60
      MPI_Send(&x, 1, dataType, p, 0, MPI_COMM_WORLD);
61 62 63 64
    }
  else
    {
    // Slave process
remicres's avatar
remicres committed
65
    MPI_Send(&x, 1, dataType, 0, 0, MPI_COMM_WORLD);
66
    MPI_Recv(&x, 1, dataType, 0, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
67 68
    }
}
69

remicres's avatar
remicres committed
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
/*
 * Function used to broadcast the label image to every MPI process
 */
template<class TImageType>
void BroadcastImage(typename TImageType::Pointer & inPtr)
{
  otb::MPIConfig::Instance()->barrier();

  unsigned int width;
  unsigned int height;
  unsigned int block_height;
  unsigned int current_start_y;
  if (otb::MPIConfig::Instance()->GetMyRank() == 0)
    {
    // Master process
    width = inPtr->GetLargestPossibleRegion().GetSize()[0];
    height = inPtr->GetLargestPossibleRegion().GetSize()[1];
    }

  // Broadcast width and height
  MPI_Bcast(&width, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD);
  MPI_Bcast(&height, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD);

  // Slave processes do allocate image
  typename TImageType::IndexType index;
  index.Fill(0);
  typename TImageType::SizeType size;
  size[0] = width;
  size[1] = height;
  typename TImageType::RegionType region(index,size);
  if (otb::MPIConfig::Instance()->GetMyRank() > 0)
    {
    inPtr = TImageType::New();
    inPtr->SetRegions(region);
    inPtr->SetNumberOfComponentsPerPixel(1);
    inPtr->Allocate();
    }

  // Maximum data count that mpi can handle
  unsigned int maximum_count = std::numeric_limits<int>::max();
  block_height = std::floor((float) maximum_count / width);

  // Broadcast array block by block (lines)
  current_start_y = 0;
  while (current_start_y < height)
    {
    if ( current_start_y + block_height > height )
      block_height = height - current_start_y;

    // Subregion of image
    typename TImageType::Pointer tmpPtr = TImageType::New();

    typename TImageType::IndexType subregion_index;
    subregion_index[0] = 0;
    subregion_index[1] = current_start_y;
    typename TImageType::SizeType subregion_size;
    subregion_size[0] = width;
    subregion_size[1] = block_height;
    typename TImageType::RegionType subregion(subregion_index, subregion_size);

    // Slave processes do allocate subregion image
    if (otb::MPIConfig::Instance()->GetMyRank() > 0)
      {
      tmpPtr->SetRegions(subregion);
      tmpPtr->Allocate();
      }
    else
      {
      typedef typename otb::ExtractROI<typename TImageType::InternalPixelType,
          typename TImageType::InternalPixelType> ExtractROIFilterType;
      typename ExtractROIFilterType::Pointer filter = ExtractROIFilterType::New();
      filter->SetInput(inPtr);
      filter->SetStartX(0);
      filter->SetStartY(current_start_y);
      filter->SetSizeX(width);
      filter->SetSizeY(block_height);
      filter->SetReleaseDataFlag(false);
      filter->Update();
      tmpPtr = filter->GetOutput();
      }

    current_start_y += block_height;

    // Broadcast buffer
    MPI_Bcast(tmpPtr->GetBufferPointer(), width*block_height, MPI_UNSIGNED, 0, MPI_COMM_WORLD);

    // Slave process must recopy the image
    if (otb::MPIConfig::Instance()->GetMyRank() > 0)
      {
      typedef itk::ImageRegionIterator<TImageType> IteratorType;
      IteratorType it1(inPtr, subregion);
      IteratorType it2(tmpPtr, subregion);

      for (it1.GoToBegin(), it2.GoToBegin(); !it1.IsAtEnd(); ++it1, ++it2)
        {
        it1.Set(it2.Get());
        }
      } // recopy image
    } // while data to transmit

}


173 174
/*
 * Gather accumulatedMemory and isFusion variables
remicres's avatar
remicres committed
175
 * TODO: MPI implementation using OTB MPI Wrapper
176
 */
remicres's avatar
remicres committed
177
void GatherUsefulVariables(unsigned long long int& accumulatedMemory, bool& isFusion)
178
{
179
  otb::MPIConfig::Instance()->barrier();
180 181 182 183
  int isFusionInteger = 0;
  long long int accumulatedMemoryLLI = static_cast<long long int>(accumulatedMemory);
  if (isFusion)
    isFusionInteger = 1;
184 185
  GatherMe<int>(isFusionInteger, MPI_INT);
  GatherMe<long long int>(accumulatedMemoryLLI, MPI_LONG_LONG_INT);
186 187 188 189 190
  accumulatedMemory = static_cast<long long unsigned int>(accumulatedMemoryLLI);
  if (isFusionInteger>0)
    isFusion = true;
}

191 192 193

#endif

194 195 196 197 198
/*
 * Print time elapsed
 */
void ShowTime(boost::timer t)
{
remicres's avatar
remicres committed
199
  std::cout << "--- Process duration : " << std::floor(t.elapsed()) << " s" << std::endl;
200 201
  t.restart();
}
202
#endif