lsgrmHeader.h 5.69 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
#ifndef __LSGRM_HEADER_H
#define __LSGRM_HEADER_H
#include <cassert>
#include <cstdlib>
#include <string>
#include <sstream>
#include <fstream>
#include <algorithm>
#include <vector>
#include <iterator>
#include <stack>
#include <boost/algorithm/string.hpp>

14 15
#include <boost/progress.hpp>

remicres's avatar
remicres committed
16 17
#ifdef OTB_USE_MPI
#include "otbMPIConfig.h"
18
#include "mpi.h" // TODO: implement needed methods inside otbMPIConfig.h
remicres's avatar
remicres committed
19 20 21
#include "otbExtractROI.h"
#include "itkImageRegionIterator.h"
#include "otbImageFileWriter.h"
remicres's avatar
remicres committed
22
#endif
23 24

/*
25 26
 * This function returns TRUE if the process #myrank is assigned
 * to the task #div in a pool of #nprocs processes
27
 */
remicres's avatar
remicres committed
28
bool MyTurn(int div = 0)
29
{
remicres's avatar
remicres committed
30 31
#ifdef OTB_USE_MPI
  otb::MPIConfig::Pointer mpiConfig = otb::MPIConfig::Instance();
32
  unsigned int proc = 0;
remicres's avatar
remicres committed
33 34 35 36 37
  if (mpiConfig->GetNbProcs() != 0)
    proc = div % mpiConfig->GetNbProcs();
  return (proc == mpiConfig->GetMyRank());
#endif
  return true;
38 39 40 41
}

/*
 * This function gather the given value in other process, and update it
remicres's avatar
remicres committed
42
 * TODO: MPI implementation using OTB MPI Wrapper
43
 */
44
#ifdef OTB_USE_MPI
45
template<typename T>
remicres's avatar
remicres committed
46
void GatherMe(T& x, MPI_Datatype dataType)
47
{
remicres's avatar
remicres committed
48

49
  if (otb::MPIConfig::Instance()->GetMyRank() == 0)
50 51 52
    {
    // Master process
    // Gather
53
    for (unsigned int p = 1 ; p < otb::MPIConfig::Instance()->GetNbProcs() ; p++)
54 55
      {
      T partial_sum;
56
      MPI_Recv( &partial_sum, 1, dataType, p, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
57 58 59
      x += partial_sum;
      }
    // Dispatch
60
    for (unsigned int p = 1 ; p < otb::MPIConfig::Instance()->GetNbProcs() ; p++)
remicres's avatar
remicres committed
61
      MPI_Send(&x, 1, dataType, p, 0, MPI_COMM_WORLD);
62 63 64 65
    }
  else
    {
    // Slave process
remicres's avatar
remicres committed
66
    MPI_Send(&x, 1, dataType, 0, 0, MPI_COMM_WORLD);
67
    MPI_Recv(&x, 1, dataType, 0, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
68 69
    }
}
70

remicres's avatar
remicres committed
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
/*
 * Function used to broadcast the label image to every MPI process
 */
template<class TImageType>
void BroadcastImage(typename TImageType::Pointer & inPtr)
{
  otb::MPIConfig::Instance()->barrier();

  unsigned int width;
  unsigned int height;
  unsigned int block_height;
  unsigned int current_start_y;
  if (otb::MPIConfig::Instance()->GetMyRank() == 0)
    {
    // Master process
    width = inPtr->GetLargestPossibleRegion().GetSize()[0];
    height = inPtr->GetLargestPossibleRegion().GetSize()[1];
    }

  // Broadcast width and height
  MPI_Bcast(&width, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD);
  MPI_Bcast(&height, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD);

  // Slave processes do allocate image
  typename TImageType::IndexType index;
  index.Fill(0);
  typename TImageType::SizeType size;
  size[0] = width;
  size[1] = height;
  typename TImageType::RegionType region(index,size);
  if (otb::MPIConfig::Instance()->GetMyRank() > 0)
    {
    inPtr = TImageType::New();
    inPtr->SetRegions(region);
    inPtr->SetNumberOfComponentsPerPixel(1);
    inPtr->Allocate();
    }

  // Maximum data count that mpi can handle
  unsigned int maximum_count = std::numeric_limits<int>::max();
  block_height = std::floor((float) maximum_count / width);

  // Broadcast array block by block (lines)
  current_start_y = 0;
  while (current_start_y < height)
    {
    if ( current_start_y + block_height > height )
      block_height = height - current_start_y;

    // Subregion of image
    typename TImageType::Pointer tmpPtr = TImageType::New();

    typename TImageType::IndexType subregion_index;
    subregion_index[0] = 0;
    subregion_index[1] = current_start_y;
    typename TImageType::SizeType subregion_size;
    subregion_size[0] = width;
    subregion_size[1] = block_height;
    typename TImageType::RegionType subregion(subregion_index, subregion_size);

    // Slave processes do allocate subregion image
    if (otb::MPIConfig::Instance()->GetMyRank() > 0)
      {
      tmpPtr->SetRegions(subregion);
      tmpPtr->Allocate();
      }
    else
      {
      typedef typename otb::ExtractROI<typename TImageType::InternalPixelType,
          typename TImageType::InternalPixelType> ExtractROIFilterType;
      typename ExtractROIFilterType::Pointer filter = ExtractROIFilterType::New();
      filter->SetInput(inPtr);
      filter->SetStartX(0);
      filter->SetStartY(current_start_y);
      filter->SetSizeX(width);
      filter->SetSizeY(block_height);
      filter->SetReleaseDataFlag(false);
      filter->Update();
      tmpPtr = filter->GetOutput();
      }

    current_start_y += block_height;

    // Broadcast buffer
    MPI_Bcast(tmpPtr->GetBufferPointer(), width*block_height, MPI_UNSIGNED, 0, MPI_COMM_WORLD);

    // Slave process must recopy the image
    if (otb::MPIConfig::Instance()->GetMyRank() > 0)
      {
      typedef itk::ImageRegionIterator<TImageType> IteratorType;
      IteratorType it1(inPtr, subregion);
      IteratorType it2(tmpPtr, subregion);

      for (it1.GoToBegin(), it2.GoToBegin(); !it1.IsAtEnd(); ++it1, ++it2)
        {
        it1.Set(it2.Get());
        }
      } // recopy image

    } // while data to transmit

}


#endif
176 177 178

/*
 * Gather accumulatedMemory and isFusion variables
remicres's avatar
remicres committed
179
 * TODO: MPI implementation using OTB MPI Wrapper
180
 */
remicres's avatar
remicres committed
181
void GatherUsefulVariables(unsigned long long int& accumulatedMemory, bool& isFusion)
182
{
183
#ifdef OTB_USE_MPI
184
  otb::MPIConfig::Instance()->barrier();
185 186 187 188
  int isFusionInteger = 0;
  long long int accumulatedMemoryLLI = static_cast<long long int>(accumulatedMemory);
  if (isFusion)
    isFusionInteger = 1;
189 190
  GatherMe<int>(isFusionInteger, MPI_INT);
  GatherMe<long long int>(accumulatedMemoryLLI, MPI_LONG_LONG_INT);
191 192 193
  accumulatedMemory = static_cast<long long unsigned int>(accumulatedMemoryLLI);
  if (isFusionInteger>0)
    isFusion = true;
194
#endif
195 196 197 198 199 200 201
}

/*
 * Print time elapsed
 */
void ShowTime(boost::timer t)
{
remicres's avatar
remicres committed
202
  std::cout << "--- Process duration : " << std::floor(t.elapsed()) << " s" << std::endl;
203 204
  t.restart();
}
205
#endif