diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000000000000000000000000000000000000..2ca1beb2a56a4cb45c526deab8306417c095f6fe --- /dev/null +++ b/.dockerignore @@ -0,0 +1,2 @@ +.git +python/__pycache__ diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..53e3f777910b643ae8780a14a3d7bbeb64342bf9 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,167 @@ +##### Configurable Dockerfile with multi-stage build - Author: Vincent Delbar +## Mandatory +ARG BASE_IMG + +# ---------------------------------------------------------------------------- +# Init base stage - will be cloned as intermediate build env +FROM $BASE_IMG AS otbtf-base +WORKDIR /tmp + +### System packages +COPY tools/docker/build-deps-*.txt ./ +ARG DEBIAN_FRONTEND=noninteractive +# CLI +RUN apt-get update -y && apt-get upgrade -y \ + && cat build-deps-cli.txt | xargs apt-get install --no-install-recommends -y \ + && apt-get clean && rm -rf /var/lib/apt/lists/* +# Optional GUI +ARG GUI=false +RUN if $GUI; then \ + apt-get update -y \ + && cat build-deps-gui.txt | xargs apt-get install --no-install-recommends -y \ + && apt-get clean && rm -rf /var/lib/apt/lists/* ; fi + +### Python3 links and pip packages +RUN ln -s /usr/bin/python3 /usr/local/bin/python && ln -s /usr/bin/pip3 /usr/local/bin/pip +# NumPy version is conflicting with system's gdal dep and may require venv +ARG NUMPY_SPEC="==1.19.*" +RUN pip install --no-cache-dir -U pip wheel mock six future deprecated "numpy$NUMPY_SPEC" \ + && pip install --no-cache-dir --no-deps keras_applications keras_preprocessing + +# ---------------------------------------------------------------------------- +# Tmp builder stage - dangling cache should persist until "docker builder prune" +FROM otbtf-base AS builder +# A smaller value may be required to avoid OOM errors when building OTB GUI +ARG CPU_RATIO=1 + +RUN mkdir -p /src/tf /opt/otbtf/bin /opt/otbtf/include /opt/otbtf/lib +WORKDIR /src/tf + +RUN git config --global advice.detachedHead false + +### TF +ARG TF=v2.5.0 +# Install bazelisk (will read .bazelversion and download the right bazel binary - latest by default) +RUN wget -qO /opt/otbtf/bin/bazelisk https://github.com/bazelbuild/bazelisk/releases/latest/download/bazelisk-linux-amd64 \ + && chmod +x /opt/otbtf/bin/bazelisk \ + && ln -s /opt/otbtf/bin/bazelisk /opt/otbtf/bin/bazel + +ARG BZL_TARGETS="//tensorflow:libtensorflow_cc.so //tensorflow/tools/pip_package:build_pip_package" +# "--config=opt" will enable 'march=native' (otherwise read comments about CPU compatibilty and edit CC_OPT_FLAGS in build-env-tf.sh) +ARG BZL_CONFIGS="--config=nogcp --config=noaws --config=nohdfs --config=opt" +# "--compilation_mode opt" is already enabled by default (see tf repo .bazelrc and configure.py) +ARG BZL_OPTIONS="--verbose_failures --remote_cache=http://localhost:9090" + +# Build +ARG ZIP_TF_BIN=false +COPY tools/docker/build-env-tf.sh ./ +RUN git clone --single-branch -b $TF https://github.com/tensorflow/tensorflow.git \ + && cd tensorflow \ + && export PATH=$PATH:/opt/otbtf/bin \ + && export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/otbtf/lib \ + && bash -c '\ + source ../build-env-tf.sh \ + && ./configure \ + && export TMP=/tmp/bazel \ + && BZL_CMD="build $BZL_TARGETS $BZL_CONFIGS $BZL_OPTIONS" \ + && bazel $BZL_CMD --jobs="HOST_CPUS*$CPU_RATIO" ' \ +# Installation - split here if you want to check files ^ +#RUN cd tensorflow \ + && ./bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg \ + && pip3 install --no-cache-dir --prefix=/opt/otbtf /tmp/tensorflow_pkg/tensorflow*.whl \ + && ln -s /opt/otbtf/lib/python3.* /opt/otbtf/lib/python3 \ + && cp -P bazel-bin/tensorflow/libtensorflow_cc.so* /opt/otbtf/lib/ \ + && ln -s $(find /opt/otbtf -type d -wholename "*/site-packages/tensorflow/include") /opt/otbtf/include/tf \ + # The only missing header in the wheel + && cp tensorflow/cc/saved_model/tag_constants.h /opt/otbtf/include/tf/tensorflow/cc/saved_model/ \ + # Symlink external libs (required for MKL - libiomp5) + && for f in $(find -L /opt/otbtf/include/tf -wholename "*/external/*/*.so"); do ln -s $f /opt/otbtf/lib/; done \ + # Compress and save TF binaries + && ( ! $ZIP_TF_BIN || zip -9 -j --symlinks /opt/otbtf/tf-$TF.zip tensorflow/cc/saved_model/tag_constants.h bazel-bin/tensorflow/libtensorflow_cc.so* /tmp/tensorflow_pkg/tensorflow*.whl ) \ + # Cleaning + && rm -rf bazel-* /src/tf /root/.cache/ /tmp/* + +### OTB +ARG GUI=false +ARG OTB=7.3.0 + +RUN mkdir /src/otb +WORKDIR /src/otb + +# SuperBuild OTB +COPY tools/docker/build-flags-otb.txt ./ +RUN git clone --single-branch -b $OTB https://gitlab.orfeo-toolbox.org/orfeotoolbox/otb.git \ + && mkdir -p build \ + && cd build \ + # Set GL/Qt build flags + && if $GUI; then \ + sed -i -r "s/-DOTB_USE_(QT|OPENGL|GL[UFE][WT])=OFF/-DOTB_USE_\1=ON/" ../build-flags-otb.txt; fi \ + # Possible ENH: superbuild-all-dependencies switch, with separated build-deps-minimal.txt and build-deps-otbcli.txt) + #&& if $OTB_SUPERBUILD_ALL; then sed -i -r "s/-DUSE_SYSTEM_([A-Z0-9]*)=ON/-DUSE_SYSTEM_\1=OFF/ " ../build-flags-otb.txt; fi \ + && OTB_FLAGS=$(cat "../build-flags-otb.txt") \ + && cmake ../otb/SuperBuild -DCMAKE_INSTALL_PREFIX=/opt/otbtf $OTB_FLAGS \ + && make -j $(python -c "import os; print(round( os.cpu_count() * $CPU_RATIO ))") + +### OTBTF - copy (without .git/) or clone repository +COPY . /src/otbtf +#RUN git clone https://github.com/remicres/otbtf.git /src/otbtf +RUN ln -s /src/otbtf /src/otb/otb/Modules/Remote/otbtf + +# Rebuild OTB with module +ARG KEEP_SRC_OTB=false +RUN cd /src/otb/build/OTB/build \ + && export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/otbtf/lib \ + && export PATH=$PATH:/opt/otbtf/bin \ + && cmake /src/otb/otb \ + -DCMAKE_INSTALL_PREFIX=/opt/otbtf \ + -DOTB_WRAP_PYTHON=ON -DPYTHON_EXECUTABLE=/usr/bin/python3 \ + -DOTB_USE_TENSORFLOW=ON -DModule_OTBTensorflow=ON \ + -Dtensorflow_include_dir=/opt/otbtf/include/tf \ + # Forcing TF>=2, this Dockerfile hasn't been tested with v1 + missing link for libtensorflow_framework.so in the wheel + -DTENSORFLOW_CC_LIB=/opt/otbtf/lib/libtensorflow_cc.so.2 \ + -DTENSORFLOW_FRAMEWORK_LIB=/opt/otbtf/lib/python3/site-packages/tensorflow/libtensorflow_framework.so.2 \ + && make install -j $(python -c "import os; print(round( os.cpu_count() * $CPU_RATIO ))") \ + # Cleaning + && ( $GUI || rm -rf /opt/otbtf/bin/otbgui* ) \ + && ( $KEEP_SRC_OTB || rm -rf /src/otb ) \ + && rm -rf /root/.cache /tmp/* + +# Symlink executable python files in PATH +RUN for f in /src/otbtf/python/*.py; do if [ -x $f ]; then ln -s $f /opt/otbtf/bin/; fi; done + +# ---------------------------------------------------------------------------- +# Final stage +FROM otbtf-base +LABEL maintainer="Remi Cresson <remi.cresson[at]inrae[dot]fr>" + +# Copy files from intermediate stage +COPY --from=builder /opt/otbtf /opt/otbtf +COPY --from=builder /src /src + +# System-wide ENV +ENV PATH="/opt/otbtf/bin:$PATH" +ENV LD_LIBRARY_PATH="/opt/otbtf/lib:$LD_LIBRARY_PATH" +ENV PYTHONPATH="/opt/otbtf/lib/python3/site-packages:/opt/otbtf/lib/otb/python:/src/otbtf/python" +ENV OTB_APPLICATION_PATH="/opt/otbtf/lib/otb/applications" + +# Default user, directory and command (bash is the entrypoint when using 'docker create') +RUN useradd -s /bin/bash -m otbuser +WORKDIR /home/otbuser + +# Admin rights without password +ARG SUDO=true +RUN if $SUDO; then \ + usermod -a -G sudo otbuser \ + && echo "otbuser ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers; fi + +# Set /src/otbtf ownership to otbuser (but you still need 'sudo -i' in order to rebuild TF or OTB) +RUN chown -R otbuser:otbuser /src/otbtf + +# This won't prevent ownership problems with volumes if you're not UID 1000 +USER otbuser +# User-only ENV + +# Test python imports +RUN python -c "import tensorflow" +RUN python -c "import otbtf, tricks" +RUN python -c "import otbApplication as otb; otb.Registry.CreateApplication('ImageClassifierFromDeepFeatures')" diff --git a/README.md b/README.md index c940a651f237d18f6d986cfe19ced3dd4cf9d240..2ce6452f57996552da5abf006d809e7a73d98a19 100644 --- a/README.md +++ b/README.md @@ -1,19 +1,26 @@ -#  OTBTF + -[](https://opensource.org/licenses/Apache-2.0) +# OTBTF: Orfeo ToolBox meets TensorFlow -## Orfeo ToolBox meets TensorFlow +[](https://opensource.org/licenses/Apache-2.0) This remote module of the [Orfeo ToolBox](https://www.orfeo-toolbox.org) provides a generic, multi purpose deep learning framework, targeting remote sensing images processing. It contains a set of new process objects that internally invoke [Tensorflow](https://www.tensorflow.org/), and a bunch of user-oriented applications to perform deep learning with real-world remote sensing images. Applications can be used to build OTB pipelines from Python or C++ APIs. -### Highlights - - Sampling, - - Training, supporting save/restore/import operations (a model can be trained from scratch or fine-tuned), - - Serving models with support of OTB streaming mechanism. Meaning (1) not limited by images sizes, (2) can be used as a "lego" in any OTB pipeline and preserve streaming, (3) MPI support available (use multiple processing unit to generate one single output image) +## Features + +### OTB Applications + +- Sample patches in remote sensing images with `PatchesExtraction`, +- Model training, supporting save/restore/import operations (a model can be trained from scratch or fine-tuned) with `TensorflowModelTrain`, +- Inference with support of OTB streaming mechanism with `TensorflowModelServe`. The streaming mechanism means (1) no limitation with images sizes, (2) inference can be used as a "lego" in any OTB pipeline (using C++ or Python APIs) and preserving streaming, (3) MPI support available (use multiple processing unit to generate one single output image) + +### Python -### Portfolio +This is a work in progress. For now, `tricks.py` provides a set of helpers to build deep nets, and `otbtf.py` provides datasets which can be used in Tensorflow pipelines to train networks from python. + +## Portfolio Below are some screen captures of deep learning applications performed at large scale with OTBTF. - Image to image translation (Spot-7 image --> Wikimedia Map using CGAN) @@ -27,411 +34,47 @@ Below are some screen captures of deep learning applications performed at large You can read more details about these applications on [this blog](https://mdl4eo.irstea.fr/2019/) -# How to install +## How to install -For now you have two options: either use the existing **docker image**, or build everything yourself **from source**. +For now you have two options: either use the existing **docker image**, or build everything **from source**. -## Docker image +### Docker Use the latest image from dockerhub: ``` -docker pull mdl4eo/otbtf2.0:cpu -docker run -u otbuser -v $(pwd):/home/otbuser mdl4eo/otbtf2.0:cpu otbcli_PatchesExtraction -help -``` - -Available docker images: - -| Name | Os | TF | OTB | Description | -| ----------------------- | ------------- | ------ | ----- | ---------------------- | -| **mdl4eo/otbtf1.6:cpu** | Ubuntu Xenial | r1.14 | 7.0.0 | CPU, no optimization | -| **mdl4eo/otbtf1.7:cpu** | Ubuntu Xenial | r1.14 | 7.0.0 | CPU, no optimization | -| **mdl4eo/otbtf1.7:gpu** | Ubuntu Xenial | r1.14 | 7.0.0 | GPU | -| **mdl4eo/otbtf2.0:cpu** | Ubuntu Xenial | r2.1 | 7.1.0 | CPU, no optimization | -| **mdl4eo/otbtf2.0:gpu** | Ubuntu Xenial | r2.1 | 7.1.0 | GPU | - -All GPU docker images are suited for **NVIDIA GPUs**. They use CUDA/CUDNN support and are built with compute capabilities 6.1, 5.2, 3.5. To change the compute capabilities, you can edit the dockerfile and build your own docker image (dockerfiles are provided in the `tools/dockerfiles/` path of the repository). -You can find more details on the **GPU docker image** and some **docker tips and tricks** on [this blog](https://mdl4eo.irstea.fr/2019/10/15/otbtf-docker-image-with-gpu/) - -## Build from sources - -See [here](doc/HOWTOBUILD.md) to see how to build the remote module from sources. - -# New applications - -Let's describe quickly the new applications provided. - -## PatchesExtraction - -This application performs the extraction of patches in images from a vector data containing points. -The OTB sampling framework can be used to generate the set of selected points. -After that, you can use the **PatchesExtraction** application to perform the sampling of your images. -We denote _input source_ an input image, or a stack of input images (of the same size !). -The user can set the `OTB_TF_NSOURCES` environment variable to select the number of _input sources_ that he wants. -For example, for sampling a Time Series (TS) together with a single Very High Resolution image (VHR), a number of 2 sources is required: 1 input images list for time series and 1 input image for the VHR. -The sampled patches will be extracted at each positions designed by the points, only if they are entirely lying inside all _input sources_ extents. -For each _input source_, patches sizes must be provided. -For each _input source_, the application export all sampled patches as a single multiband raster, stacked in rows. -For instance, for *n* samples of size *16 x 16* from a *4* channels _input source_, the output image will be a raster of size *16 x 16n* with *4* channels. -An optional output is an image of size *1 x n* containing the value of one specific field of the input vector data. -Typically, the *class* field can be used to generate a dataset suitable for a model that performs pixel wise classification. - - - -``` -This application extracts patches in multiple input images. Change the OTB_TF_NSOURCES environment variable to set the number of sources. -Parameters: - -source1 <group> Parameters for source 1 -MISSING -source1.il <string list> Input image(s) 1 (mandatory) -MISSING -source1.out <string> [pixel] Output patches for image 1 [pixel=uint8/uint16/int16/uint32/int32/float/double/cint16/cint32/cfloat/cdouble] (default value is float) (mandatory) -MISSING -source1.patchsizex <int32> X patch size for image 1 (mandatory) -MISSING -source1.patchsizey <int32> Y patch size for image 1 (mandatory) - -source1.nodata <float> No-data value for image 1(used only if "usenodata" is on) (mandatory, default value is 0) -MISSING -vec <string> Positions of the samples (must be in the same projection as input image) (mandatory) - -usenodata <boolean> Reject samples that have no-data value (optional, off by default, default value is false) - -outlabels <string> [pixel] output labels [pixel=uint8/uint16/int16/uint32/int32/float/double/cint16/cint32/cfloat/cdouble] (default value is uint8) (optional, off by default) -MISSING -field <string> field of class in the vector data (mandatory) - -progress <boolean> Report progress - -help <string list> Display long help (empty list), or help for given parameters keys - -Use -help param1 [... paramN] to see detailed documentation of those parameters. - -Examples: -otbcli_PatchesExtraction -vec points.sqlite -source1.il $s2_list -source1.patchsizex 16 -source1.patchsizey 16 -field class -source1.out outpatches_16x16.tif -outlabels outlabels.tif -``` - -## Build your Tensorflow model <a name="buildmodel"></a> - -You can build models using the TensorFlow Python API as shown in the `./python/` directory. -Models must be exported in **SavedModel** format. -When using a model in OTBTF, the important thing is to know the following parameters related to the _placeholders_ (the inputs of your model) and _output tensors_ (the outputs of your model). - - For each _input placeholder_: - - Name - - **Receptive field** - - For each _output tensor_: - - Name - - **Expression field** - - **Scale factor** - - - -The **scale factor** descibes the physical change of spacing of the outputs, typically introduced in the model by non unitary strides in pooling or convolution operators. -For each output, it is expressed relatively to one single input of the model called the _reference input source_. -Additionally, the names of the _target nodes_ must be known (e.g. "optimizer"). -Also, the names of _user placeholders_, typically scalars placeholders that are used to control some parameters of the model, must be know (e.g. "dropout_rate"). -The **receptive field** corresponds to the input volume that "sees" the deep net. -The **expression field** corresponds to the output volume that the deep net will create. - -## Train your Tensorflow model - -Here we assume that you have produced patches using the **PatchesExtraction** application, and that you have a **SavedModel** stored in a directory somewhere on your filesystem. -The **TensorflowModelTrain** application performs the training, validation (against test dataset, and against validation dataset) providing the usual metrics that machine learning frameworks provide (confusion matrix, recall, precision, f-score, ...). -You must provide the path of the **SavedModel** to the `model.dir` parameter. -The `model.restorefrom` and `model.saveto` corresponds to the variables of the **SavedModel** used respectively for restoring and saving them. -Set you _input sources_ for training (`training` parameter group) and for validation (`validation` parameter group): the evaluation is performed against training data, and optionally also against the validation data (only if you set `validation.mode` to "class"). -For each _input sources_, the patch size and the placeholder name must be provided. -Regarding validation, if a different name is found in a particular _input source_ of the `validation` parameter group, the application knows that the _input source_ is not fed to the model at inference, but is used as reference to compute evaluation metrics of the validation dataset. -Batch size (`training.batchsize`) and number of epochs (`training.epochs`) can be set. -_User placeholders_ can be set separately for training (`training.userplaceholders`) and validation (`validation.userplaceholders`). -The `validation.userplaceholders` can be useful if you have a model that behaves differently depending the given placeholder. -Let's take the example of dropout: it's nice for training, but you have to disable it to use the model at inference time. -Hence you will pass a placeholder with "dropout\_rate=0.3" for training and "dropout\_rate=0.0" for validation. -Of course, one can train models from handmade python code: to import the patches images, a convenient method consist in reading patches images as numpy arrays using OTB applications (e.g. **ExtractROI**) or GDAL, then do a np.reshape to the dimensions wanted. - - - -``` -Train a multisource deep learning net using Tensorflow. Change the OTB_TF_NSOURCES environment variable to set the number of sources. -Parameters: - -model <group> Model parameters -MISSING -model.dir <string> Tensorflow model_save directory (mandatory) - -model.restorefrom <string> Restore model from path (optional, off by default) - -model.saveto <string> Save model to path (optional, off by default) - -training <group> Training parameters - -training.batchsize <int32> Batch size (mandatory, default value is 100) - -training.epochs <int32> Number of epochs (mandatory, default value is 100) - -training.userplaceholders <string list> Additional single-valued placeholders for training. Supported types: int, float, bool. (optional, off by default) -MISSING -training.targetnodes <string list> Names of the target nodes (mandatory) - -training.outputtensors <string list> Names of the output tensors to display (optional, off by default) - -training.usestreaming <boolean> Use the streaming through patches (slower but can process big dataset) (optional, off by default, default value is false) - -training.source1 <group> Parameters for source #1 (training) -MISSING -training.source1.il <string list> Input image (or list to stack) for source #1 (training) (mandatory) -MISSING -training.source1.patchsizex <int32> Patch size (x) for source #1 (mandatory) -MISSING -training.source1.patchsizey <int32> Patch size (y) for source #1 (mandatory) -MISSING -training.source1.placeholder <string> Name of the input placeholder for source #1 (training) (mandatory) - -training.source2 <group> Parameters for source #2 (training) -MISSING -training.source2.il <string list> Input image (or list to stack) for source #2 (training) (mandatory) -MISSING -training.source2.patchsizex <int32> Patch size (x) for source #2 (mandatory) -MISSING -training.source2.patchsizey <int32> Patch size (y) for source #2 (mandatory) -MISSING -training.source2.placeholder <string> Name of the input placeholder for source #2 (training) (mandatory) - -validation <group> Validation parameters - -validation.step <int32> Perform the validation every Nth epochs (mandatory, default value is 10) - -validation.mode <string> Metrics to compute [none/class/rmse] (mandatory, default value is none) - -validation.userplaceholders <string list> Additional single-valued placeholders for validation. Supported types: int, float, bool. (optional, off by default) - -validation.usestreaming <boolean> Use the streaming through patches (slower but can process big dataset) (optional, off by default, default value is false) - -validation.source1 <group> Parameters for source #1 (validation) - -validation.source1.il <string list> Input image (or list to stack) for source #1 (validation) (mandatory) - -validation.source1.name <string> Name of the input placeholder or output tensor for source #1 (validation) (mandatory) - -validation.source2 <group> Parameters for source #2 (validation) - -validation.source2.il <string list> Input image (or list to stack) for source #2 (validation) (mandatory) - -validation.source2.name <string> Name of the input placeholder or output tensor for source #2 (validation) (mandatory) - -progress <boolean> Report progress - -help <string list> Display long help (empty list), or help for given parameters keys - -Use -help param1 [... paramN] to see detailed documentation of those parameters. - -Examples: -otbcli_TensorflowModelTrain -source1.il spot6pms.tif -source1.placeholder x1 -source1.patchsizex 16 -source1.patchsizey 16 -source2.il labels.tif -source2.placeholder y1 -source2.patchsizex 1 -source2.patchsizex 1 -model.dir /tmp/my_saved_model/ -training.userplaceholders is_training=true dropout=0.2 -training.targetnodes optimizer -model.saveto /tmp/my_saved_model/variables/variables +docker pull mdl4eo/otbtf2.4:cpu +docker run -u otbuser -v $(pwd):/home/otbuser mdl4eo/otbtf2.4:cpu otbcli_PatchesExtraction -help ``` -As you can note, there is `$OTB_TF_NSOURCES` + 1 sources because we often need at least one more source for the reference data (e.g. terrain truth for land cover mapping). - -## Serve the model +Read more in the [docker use documentation](doc/DOCKERUSE.md). -The **TensorflowModelServe** application perform model serving, it can be used to produce output raster with the desired tensors. -Thanks to the streaming mechanism, very large images can be produced. -The application uses the `TensorflowModelFilter` and a `StreamingFilter` to force the streaming of output. -This last can be optionally disabled by the user, if he prefers using the extended filenames to deal with chunk sizes. -However, it's still very useful when the application is used in other composites applications, or just without extended filename magic. -Some models can consume a lot of memory. -In addition, the native tiling strategy of OTB consists in strips but this might not always the best. -For Convolutional Neural Networks for instance, square tiles are more interesting because the padding required to perform the computation of one single strip of pixels induces to input a lot more pixels that to process the computation of one single tile of pixels. -So, this application takes in input one or multiple _input sources_ (the number of _input sources_ can be changed by setting the `OTB_TF_NSOURCES` to the desired number) and produce one output of the specified tensors. -The user is responsible of giving the **receptive field** and **name** of _input placeholders_, as well as the **expression field**, **scale factor** and **name** of _output tensors_. -The first _input source_ (`source1.il`) corresponds to the _reference input source_. -As explained [previously](#buildmodel), the **scale factor** provided for the _output tensors_ is related to this _reference input source_. -The user can ask for multiple _output tensors_, that will be stack along the channel dimension of the output raster. -However, if the sizes of those _output tensors_ are not consistent (e.g. a different number of (x,y) elements), an exception will be thrown. +### Build from sources - +Read more in the [build from sources documentation](doc/HOWTOBUILD.md). +## How to use -``` -Multisource deep learning classifier using TensorFlow. Change the OTB_TF_NSOURCES environment variable to set the number of sources. -Parameters: - -source1 <group> Parameters for source #1 -MISSING -source1.il <string list> Input image (or list to stack) for source #1 (mandatory) -MISSING -source1.rfieldx <int32> Input receptive field (width) for source #1 (mandatory) -MISSING -source1.rfieldy <int32> Input receptive field (height) for source #1 (mandatory) -MISSING -source1.placeholder <string> Name of the input placeholder for source #1 (mandatory) - -model <group> model parameters -MISSING -model.dir <string> TensorFlow model_save directory (mandatory) - -model.userplaceholders <string list> Additional single-valued placeholders. Supported types: int, float, bool. (optional, off by default) - -model.fullyconv <boolean> Fully convolutional (optional, off by default, default value is false) - -output <group> Output tensors parameters - -output.spcscale <float> The output spacing scale, related to the first input (mandatory, default value is 1) -MISSING -output.names <string list> Names of the output tensors (mandatory) - -output.efieldx <int32> The output expression field (width) (mandatory, default value is 1) - -output.efieldy <int32> The output expression field (height) (mandatory, default value is 1) - -optim <group> This group of parameters allows optimization of processing time - -optim.disabletiling <boolean> Disable tiling (optional, off by default, default value is false) - -optim.tilesizex <int32> Tile width used to stream the filter output (mandatory, default value is 16) - -optim.tilesizey <int32> Tile height used to stream the filter output (mandatory, default value is 16) -MISSING -out <string> [pixel] output image [pixel=uint8/uint16/int16/uint32/int32/float/double/cint16/cint32/cfloat/cdouble] (default value is float) (mandatory) - -progress <boolean> Report progress - -help <string list> Display long help (empty list), or help for given parameters keys +- Reading [the applications documentation](doc/APPLICATIONS.md) will help, of course 😉 +- A small [tutorial](https://mdl4eo.irstea.fr/2019/01/04/an-introduction-to-deep-learning-on-remote-sensing-images-tutorial/) on MDL4EO's blog +- in the `python` folder are provided some [ready-to-use deep networks, with documentation and scientific references](doc/EXAMPLES.md). +- A book: *Cresson, R. (2020). Deep Learning for Remote Sensing Images with Open Source Software. CRC Press.* Use QGIS, OTB and Tensorflow to perform various kind of deep learning sorcery on remote sensing images (patch-based classification for landcover mapping, semantic segmentation of buildings, optical image restoration from joint SAR/Optical time series). +- Check [our repository](https://github.com/remicres/otbtf_tutorials_resources) containing stuff (data and models) to begin with with! -Use -help param1 [... paramN] to see detailed documentation of those parameters. +## Contribute -Examples: -otbcli_TensorflowModelServe -source1.il spot6pms.tif -source1.placeholder x1 -source1.rfieldx 16 -source1.rfieldy 16 -model.dir /tmp/my_saved_model/ -model.userplaceholders is_training=false dropout=0.0 -output.names out_predict1 out_proba1 -out "classif128tgt.tif?&streaming:type=tiled&streaming:sizemode=height&streaming:sizevalue=256" -``` +Every one can **contribute** to OTBTF! Don't be shy. -## Composite applications for classification - -Who has never dreamed to use classic classifiers performing on deep learning features? -This is possible thank to two new applications that uses the existing training/classification applications of OTB: - -**TrainClassifierFromDeepFeatures**: is a composite application that wire the **TensorflowModelServe** application output into the existing official **TrainImagesClassifier** application. - -``` -Train a classifier from deep net based features of an image and training vector data. -Parameters: - -source1 <group> Parameters for source 1 -MISSING -source1.il <string list> Input image (or list to stack) for source #1 (mandatory) -MISSING -source1.rfieldx <int32> Input receptive field (width) for source #1 (mandatory) -MISSING -source1.rfieldy <int32> Input receptive field (height) for source #1 (mandatory) -MISSING -source1.placeholder <string> Name of the input placeholder for source #1 (mandatory) - -model <group> Deep net inputs parameters -MISSING -model.dir <string> TensorFlow model_save directory (mandatory) - -model.userplaceholders <string list> Additional single-valued placeholders. Supported types: int, float, bool. (optional, off by default) - -model.fullyconv <boolean> Fully convolutional (optional, off by default, default value is false) - -output <group> Deep net outputs parameters - -output.spcscale <float> The output spacing scale, related to the first input (mandatory, default value is 1) -MISSING -output.names <string list> Names of the output tensors (mandatory) - -output.efieldx <int32> The output expression field (width) (mandatory, default value is 1) - -output.efieldy <int32> The output expression field (height) (mandatory, default value is 1) - -optim <group> Processing time optimization - -optim.disabletiling <boolean> Disable tiling (optional, off by default, default value is false) - -optim.tilesizex <int32> Tile width used to stream the filter output (mandatory, default value is 16) - -optim.tilesizey <int32> Tile height used to stream the filter output (mandatory, default value is 16) - -ram <int32> Available RAM (Mb) (optional, off by default, default value is 128) -MISSING -vd <string list> Vector data for training (mandatory) - -valid <string list> Vector data for validation (optional, off by default) -MISSING -out <string> Output classification model (mandatory) - -confmatout <string> Output confusion matrix (optional, off by default) - -sample <group> Sampling parameters - -sample.mt <int32> Maximum training sample size per class (mandatory, default value is 1000) - -sample.mv <int32> Maximum validation sample size per class (mandatory, default value is 1000) - -sample.bm <int32> Bound sample number by minimum (mandatory, default value is 1) - -sample.vtr <float> Training and validation sample ratio (mandatory, default value is 0.5) - -sample.vfn <string> Field containing the class integer label for supervision (mandatory, no default value) - -elev <group> Elevation parameters - -elev.dem <string> DEM directory (optional, off by default) - -elev.geoid <string> Geoid File (optional, off by default) - -elev.default <float> Default elevation (mandatory, default value is 0) - -classifier <string> Classifier parameters [libsvm/boost/dt/gbt/ann/bayes/rf/knn/sharkrf/sharkkm] (mandatory, default value is libsvm) - -classifier.libsvm.k <string> SVM Kernel Type [linear/rbf/poly/sigmoid] (mandatory, default value is linear) - -classifier.libsvm.m <string> SVM Model Type [csvc/nusvc/oneclass] (mandatory, default value is csvc) - -classifier.libsvm.c <float> Cost parameter C (mandatory, default value is 1) - -classifier.libsvm.nu <float> Cost parameter Nu (mandatory, default value is 0.5) - -classifier.libsvm.opt <boolean> Parameters optimization (mandatory, default value is false) - -classifier.libsvm.prob <boolean> Probability estimation (mandatory, default value is false) - -classifier.boost.t <string> Boost Type [discrete/real/logit/gentle] (mandatory, default value is real) - -classifier.boost.w <int32> Weak count (mandatory, default value is 100) - -classifier.boost.r <float> Weight Trim Rate (mandatory, default value is 0.95) - -classifier.boost.m <int32> Maximum depth of the tree (mandatory, default value is 1) - -classifier.dt.max <int32> Maximum depth of the tree (mandatory, default value is 65535) - -classifier.dt.min <int32> Minimum number of samples in each node (mandatory, default value is 10) - -classifier.dt.ra <float> Termination criteria for regression tree (mandatory, default value is 0.01) - -classifier.dt.cat <int32> Cluster possible values of a categorical variable into K <= cat clusters to find a suboptimal split (mandatory, default value is 10) - -classifier.dt.f <int32> K-fold cross-validations (mandatory, default value is 10) - -classifier.dt.r <boolean> Set Use1seRule flag to false (mandatory, default value is false) - -classifier.dt.t <boolean> Set TruncatePrunedTree flag to false (mandatory, default value is false) - -classifier.gbt.w <int32> Number of boosting algorithm iterations (mandatory, default value is 200) - -classifier.gbt.s <float> Regularization parameter (mandatory, default value is 0.01) - -classifier.gbt.p <float> Portion of the whole training set used for each algorithm iteration (mandatory, default value is 0.8) - -classifier.gbt.max <int32> Maximum depth of the tree (mandatory, default value is 3) - -classifier.ann.t <string> Train Method Type [back/reg] (mandatory, default value is reg) - -classifier.ann.sizes <string list> Number of neurons in each intermediate layer (mandatory) - -classifier.ann.f <string> Neuron activation function type [ident/sig/gau] (mandatory, default value is sig) - -classifier.ann.a <float> Alpha parameter of the activation function (mandatory, default value is 1) - -classifier.ann.b <float> Beta parameter of the activation function (mandatory, default value is 1) - -classifier.ann.bpdw <float> Strength of the weight gradient term in the BACKPROP method (mandatory, default value is 0.1) - -classifier.ann.bpms <float> Strength of the momentum term (the difference between weights on the 2 previous iterations) (mandatory, default value is 0.1) - -classifier.ann.rdw <float> Initial value Delta_0 of update-values Delta_{ij} in RPROP method (mandatory, default value is 0.1) - -classifier.ann.rdwm <float> Update-values lower limit Delta_{min} in RPROP method (mandatory, default value is 1e-07) - -classifier.ann.term <string> Termination criteria [iter/eps/all] (mandatory, default value is all) - -classifier.ann.eps <float> Epsilon value used in the Termination criteria (mandatory, default value is 0.01) - -classifier.ann.iter <int32> Maximum number of iterations used in the Termination criteria (mandatory, default value is 1000) - -classifier.rf.max <int32> Maximum depth of the tree (mandatory, default value is 5) - -classifier.rf.min <int32> Minimum number of samples in each node (mandatory, default value is 10) - -classifier.rf.ra <float> Termination Criteria for regression tree (mandatory, default value is 0) - -classifier.rf.cat <int32> Cluster possible values of a categorical variable into K <= cat clusters to find a suboptimal split (mandatory, default value is 10) - -classifier.rf.var <int32> Size of the randomly selected subset of features at each tree node (mandatory, default value is 0) - -classifier.rf.nbtrees <int32> Maximum number of trees in the forest (mandatory, default value is 100) - -classifier.rf.acc <float> Sufficient accuracy (OOB error) (mandatory, default value is 0.01) - -classifier.knn.k <int32> Number of Neighbors (mandatory, default value is 32) - -classifier.sharkrf.nbtrees <int32> Maximum number of trees in the forest (mandatory, default value is 100) - -classifier.sharkrf.nodesize <int32> Min size of the node for a split (mandatory, default value is 25) - -classifier.sharkrf.mtry <int32> Number of features tested at each node (mandatory, default value is 0) - -classifier.sharkrf.oobr <float> Out of bound ratio (mandatory, default value is 0.66) - -classifier.sharkkm.maxiter <int32> Maximum number of iteration for the kmeans algorithm. (mandatory, default value is 10) - -classifier.sharkkm.k <int32> The number of class used for the kmeans algorithm. (mandatory, default value is 2) - -rand <int32> User defined random seed (optional, off by default) - -inxml <string> Load otb application from xml file (optional, off by default) - -progress <boolean> Report progress - -help <string list> Display long help (empty list), or help for given parameters keys - -Use -help param1 [... paramN] to see detailed documentation of those parameters. - -Examples: -None -``` - -**ImageClassifierFromDeepFeatures** same approach with the official **ImageClassifier**. - -``` -Classify image using features from a deep net and an OTB machine learning classification model -Parameters: - -source1 <group> Parameters for source 1 -MISSING -source1.il <string list> Input image (or list to stack) for source #1 (mandatory) -MISSING -source1.rfieldx <int32> Input receptive field (width) for source #1 (mandatory) -MISSING -source1.rfieldy <int32> Input receptive field (height) for source #1 (mandatory) -MISSING -source1.placeholder <string> Name of the input placeholder for source #1 (mandatory) - -deepmodel <group> Deep net model parameters -MISSING -deepmodel.dir <string> TensorFlow model_save directory (mandatory) - -deepmodel.userplaceholders <string list> Additional single-valued placeholders. Supported types: int, float, bool. (optional, off by default) - -deepmodel.fullyconv <boolean> Fully convolutional (optional, off by default, default value is false) - -output <group> Deep net outputs parameters - -output.spcscale <float> The output spacing scale, related to the first input (mandatory, default value is 1) -MISSING -output.names <string list> Names of the output tensors (mandatory) - -output.efieldx <int32> The output expression field (width) (mandatory, default value is 1) - -output.efieldy <int32> The output expression field (height) (mandatory, default value is 1) - -optim <group> This group of parameters allows optimization of processing time - -optim.disabletiling <boolean> Disable tiling (optional, off by default, default value is false) - -optim.tilesizex <int32> Tile width used to stream the filter output (mandatory, default value is 16) - -optim.tilesizey <int32> Tile height used to stream the filter output (mandatory, default value is 16) -MISSING -model <string> Model file (mandatory) - -imstat <string> Statistics file (optional, off by default) - -nodatalabel <int32> Label mask value (optional, off by default, default value is 0) -MISSING -out <string> [pixel] Output image [pixel=uint8/uint16/int16/uint32/int32/float/double/cint16/cint32/cfloat/cdouble] (default value is uint8) (mandatory) - -confmap <string> [pixel] Confidence map image [pixel=uint8/uint16/int16/uint32/int32/float/double/cint16/cint32/cfloat/cdouble] (default value is double) (optional, off by default) - -ram <int32> Ram (optional, off by default, default value is 128) - -inxml <string> Load otb application from xml file (optional, off by default) - -progress <boolean> Report progress - -help <string list> Display long help (empty list), or help for given parameters keys - -Use -help param1 [... paramN] to see detailed documentation of those parameters. - -Examples: -None -``` - -Note that you can still set the `OTB_TF_NSOURCES` environment variable. - -# How to use - -## The basics - -Here we will try to provide a simple example of doing a classification using a deep net that performs on one single VHR image. -Our data set consists in one Spot-7 image, *spot7.tif*, and a training vector data, *terrain_truth.shp* that describes sparsely forest / non-forest polygons. -First, we compute statistics of the vector data : how many points can we sample inside objects, and how many objects in each class. -We use the **PolygonClassStatistics** application of OTB. -``` -otbcli_PolygonClassStatistics -vec terrain_truth.shp -field class -in spot7.tif -out vec_stats.xml -``` -Then, we will select some samples with the **SampleSelection** application of the existing machine learning framework of OTB. -Since the terrain truth is sparse, we want to sample randomly points in polygons with the default strategy of the **SampleSelection** OTB application. -``` -otbcli_SampleSelection -in spot7.tif -vec terrain_truth.shp -instats vec_stats.xml -field class -out points.shp -``` -Now we extract the patches with the **PatchesExtraction** application. -We want to produce one image of 16x16 patches, and one image for the corresponding labels. -``` -otbcli_PatchesExtraction -source1.il spot7.tif -source1.patchsizex 16 -source1.patchsizey 16 -vec points.shp -field class -source1.out samp_labels.tif -outpatches samp_patches.tif -``` -Now we have two images for patches and labels. -We can split them to distinguish test/validation groups (with the **ExtractROI** application for instance). -But here, we will just perform some fine tuning of our model. -The **SavedModel** is located in the `outmodel` directory. -Our model is quite basic: it has two input placeholders, **x1** and **y1** respectively for input patches (with size 16x16) and input reference labels (with size 1x1). -We named **prediction** the tensor that predict the labels and the optimizer that perform the stochastic gradient descent is an operator named **optimizer**. -We perform the fine tuning and we export the new model variables directly in the _outmodel/variables_ folder, overwritting the existing variables of the model. -We use the **TensorflowModelTrain** application to perform the training of this existing model. -``` -otbcli_TensorflowModelTrain -model.dir /path/to/oursavedmodel -training.targetnodesnames optimizer -training.source1.il samp_patches.tif -training.source1.patchsizex 16 -training.source1.patchsizey 16 -training.source1.placeholder x1 -training.source2.il samp_labels.tif -training.source2.patchsizex 1 -training.source2.patchsizey 1 -training.source2.placeholder y1 -model.saveto /path/to/oursavedmodel/variables/variables -``` -Note that we could also have performed validation in this step. In this case, the `validation.source2.placeholder` would be different than the `training.source2.placeholder`, and would be **prediction**. This way, the program know what is the target tensor to evaluate. +## Cite -After this step, we use the trained model to produce the entire map of forest over the whole Spot-7 image. -For this, we use the **TensorflowModelServe** application to produce the **prediction** tensor output for the entire image. ``` -otbcli_TensorflowModelServe -source1.il spot7.tif -source1.placeholder x1 -source1.rfieldx 16 -source1.rfieldy 16 -model.dir /path/to/oursavedmodel -output.names prediction -out map.tif uint8 +@article{cresson2018framework, + title={A framework for remote sensing images processing using deep learning techniques}, + author={Cresson, R{\'e}mi}, + journal={IEEE Geoscience and Remote Sensing Letters}, + volume={16}, + number={1}, + pages={25--29}, + year={2018}, + publisher={IEEE} +} ``` - -## Begin with provided models - -In the `python` folder are provided some [ready-to-use deep networks, with documentation and scientific references](doc/EXAMPLES.md). -**Feel free to contribute with your own architecture!** - -## Tutorial - -A tutorial is available at [MDL4EO's blog](https://mdl4eo.irstea.fr/2019/01/04/an-introduction-to-deep-learning-on-remote-sensing-images-tutorial/) - -## Book - -A full tutorial is available in [this book](https://g.co/kgs/S77JPT). About 160 pages explain how to use OTBTF (with also OTB, QGIS and TensorFlow) for landcover mapping (patch-based image classification, fully convolutional models, hybrid deep networks X random forest classifiers, semantic segmentation from OSM data, image restoration with joint SAR/Optical sensors). - -## Cite - -Cresson, R. (2018). A framework for remote sensing images processing using deep learning techniques. IEEE Geoscience and Remote Sensing Letters, 16(1), 25-29. diff --git a/doc/APPLICATIONS.md b/doc/APPLICATIONS.md new file mode 100644 index 0000000000000000000000000000000000000000..69282cd543576f66a790f9619f6f75b31b4e1514 --- /dev/null +++ b/doc/APPLICATIONS.md @@ -0,0 +1,372 @@ +# Description of applications + +This section introduces the new OTB applications provided in OTBTF. + +## Patches extraction + +The `PatchesExtraction` application performs the extraction of patches in images from a vector data containing points. +Each point locates the **center** of the **central pixel** of the patch. +For patches with even size of *N*, the **central pixel** corresponds to the pixel index *N/2+1* (index starting at 0). +We denote one _input source_, either an input image, or a stack of input images that will be concatenated (they must have the same size). +The user can set the `OTB_TF_NSOURCES` environment variable to select the number of _input sources_ that he wants. +For example, for sampling a Time Series (TS) together with a single Very High Resolution image (VHR), two sources are required: + - 1 input images list for time series, + - 1 input image for the VHR. + +The sampled patches are extracted at each positions designed by the input vector data, only if a patch lies fully in all _input sources_ extents. +For each _input source_, patches sizes must be provided. +For each _input source_, the application export all sampled patches as a single multiband raster, stacked in rows. +For instance, for *n* samples of size *16 x 16* from a *4* channels _input source_, the output image will be a raster of size *16 x 16n* with *4* channels. +An optional output is an image of size *1 x n* containing the value of one specific field of the input vector data. +Typically, the *class* field can be used to generate a dataset suitable for a model that performs pixel wise classification. + + + +``` +This application extracts patches in multiple input images. Change the OTB_TF_NSOURCES environment variable to set the number of sources. +Parameters: + -source1 <group> Parameters for source 1 +MISSING -source1.il <string list> Input image(s) 1 (mandatory) +MISSING -source1.out <string> [pixel] Output patches for image 1 [pixel=uint8/uint16/int16/uint32/int32/float/double/cint16/cint32/cfloat/cdouble] (default value is float) (mandatory) +MISSING -source1.patchsizex <int32> X patch size for image 1 (mandatory) +MISSING -source1.patchsizey <int32> Y patch size for image 1 (mandatory) + -source1.nodata <float> No-data value for image 1(used only if "usenodata" is on) (mandatory, default value is 0) +MISSING -vec <string> Positions of the samples (must be in the same projection as input image) (mandatory) + -usenodata <boolean> Reject samples that have no-data value (optional, off by default, default value is false) + -outlabels <string> [pixel] output labels [pixel=uint8/uint16/int16/uint32/int32/float/double/cint16/cint32/cfloat/cdouble] (default value is uint8) (optional, off by default) +MISSING -field <string> field of class in the vector data (mandatory) + -progress <boolean> Report progress + -help <string list> Display long help (empty list), or help for given parameters keys + +Use -help param1 [... paramN] to see detailed documentation of those parameters. + +Examples: +otbcli_PatchesExtraction -vec points.sqlite -source1.il $s2_list -source1.patchsizex 16 -source1.patchsizey 16 -field class -source1.out outpatches_16x16.tif -outlabels outlabels.tif +``` + +## Build your Tensorflow model <a name="buildmodel"></a> + +You can build models using the TensorFlow Python API as shown in the `./python/` directory. +Models must be exported in **SavedModel** format. +When using a model in OTBTF, the important thing is to know the following parameters related to the _placeholders_ (the inputs of your model) and _output tensors_ (the outputs of your model). + - For each _input placeholder_: + - Name + - **Receptive field** + - For each _output tensor_: + - Name + - **Expression field** + - **Scale factor** + + + +The **scale factor** descibes the physical change of spacing of the outputs, typically introduced in the model by non unitary strides in pooling or convolution operators. +For each output, it is expressed relatively to one single input of the model called the _reference input source_. +Additionally, the names of the _target nodes_ must be known (e.g. "optimizer"). +Also, the names of _user placeholders_, typically scalars placeholders that are used to control some parameters of the model, must be know (e.g. "dropout_rate"). +The **receptive field** corresponds to the input volume that "sees" the deep net. +The **expression field** corresponds to the output volume that the deep net will create. + +## Train your Tensorflow model + +Here we assume that you have produced patches using the **PatchesExtraction** application, and that you have a **SavedModel** stored in a directory somewhere on your filesystem. +The **TensorflowModelTrain** application performs the training, validation (against test dataset, and against validation dataset) providing the usual metrics that machine learning frameworks provide (confusion matrix, recall, precision, f-score, ...). +You must provide the path of the **SavedModel** to the `model.dir` parameter. +The `model.restorefrom` and `model.saveto` corresponds to the variables of the **SavedModel** used respectively for restoring and saving them. +Set you _input sources_ for training (`training` parameter group) and for validation (`validation` parameter group): the evaluation is performed against training data, and optionally also against the validation data (only if you set `validation.mode` to "class"). +For each _input sources_, the patch size and the placeholder name must be provided. +Regarding validation, if a different name is found in a particular _input source_ of the `validation` parameter group, the application knows that the _input source_ is not fed to the model at inference, but is used as reference to compute evaluation metrics of the validation dataset. +Batch size (`training.batchsize`) and number of epochs (`training.epochs`) can be set. +_User placeholders_ can be set separately for training (`training.userplaceholders`) and validation (`validation.userplaceholders`). +The `validation.userplaceholders` can be useful if you have a model that behaves differently depending the given placeholder. +Let's take the example of dropout: it's nice for training, but you have to disable it to use the model at inference time. +Hence you will pass a placeholder with "dropout\_rate=0.3" for training and "dropout\_rate=0.0" for validation. +Of course, one can train models from handmade python code: to import the patches images, a convenient method consist in reading patches images as numpy arrays using OTB applications (e.g. **ExtractROI**) or GDAL, then do a np.reshape to the dimensions wanted. + + + +``` +Train a multisource deep learning net using Tensorflow. Change the OTB_TF_NSOURCES environment variable to set the number of sources. +Parameters: + -model <group> Model parameters +MISSING -model.dir <string> Tensorflow model_save directory (mandatory) + -model.restorefrom <string> Restore model from path (optional, off by default) + -model.saveto <string> Save model to path (optional, off by default) + -training <group> Training parameters + -training.batchsize <int32> Batch size (mandatory, default value is 100) + -training.epochs <int32> Number of epochs (mandatory, default value is 100) + -training.userplaceholders <string list> Additional single-valued placeholders for training. Supported types: int, float, bool. (optional, off by default) +MISSING -training.targetnodes <string list> Names of the target nodes (mandatory) + -training.outputtensors <string list> Names of the output tensors to display (optional, off by default) + -training.usestreaming <boolean> Use the streaming through patches (slower but can process big dataset) (optional, off by default, default value is false) + -training.source1 <group> Parameters for source #1 (training) +MISSING -training.source1.il <string list> Input image (or list to stack) for source #1 (training) (mandatory) +MISSING -training.source1.patchsizex <int32> Patch size (x) for source #1 (mandatory) +MISSING -training.source1.patchsizey <int32> Patch size (y) for source #1 (mandatory) +MISSING -training.source1.placeholder <string> Name of the input placeholder for source #1 (training) (mandatory) + -training.source2 <group> Parameters for source #2 (training) +MISSING -training.source2.il <string list> Input image (or list to stack) for source #2 (training) (mandatory) +MISSING -training.source2.patchsizex <int32> Patch size (x) for source #2 (mandatory) +MISSING -training.source2.patchsizey <int32> Patch size (y) for source #2 (mandatory) +MISSING -training.source2.placeholder <string> Name of the input placeholder for source #2 (training) (mandatory) + -validation <group> Validation parameters + -validation.step <int32> Perform the validation every Nth epochs (mandatory, default value is 10) + -validation.mode <string> Metrics to compute [none/class/rmse] (mandatory, default value is none) + -validation.userplaceholders <string list> Additional single-valued placeholders for validation. Supported types: int, float, bool. (optional, off by default) + -validation.usestreaming <boolean> Use the streaming through patches (slower but can process big dataset) (optional, off by default, default value is false) + -validation.source1 <group> Parameters for source #1 (validation) + -validation.source1.il <string list> Input image (or list to stack) for source #1 (validation) (mandatory) + -validation.source1.name <string> Name of the input placeholder or output tensor for source #1 (validation) (mandatory) + -validation.source2 <group> Parameters for source #2 (validation) + -validation.source2.il <string list> Input image (or list to stack) for source #2 (validation) (mandatory) + -validation.source2.name <string> Name of the input placeholder or output tensor for source #2 (validation) (mandatory) + -progress <boolean> Report progress + -help <string list> Display long help (empty list), or help for given parameters keys + +Use -help param1 [... paramN] to see detailed documentation of those parameters. + +Examples: +otbcli_TensorflowModelTrain -source1.il spot6pms.tif -source1.placeholder x1 -source1.patchsizex 16 -source1.patchsizey 16 -source2.il labels.tif -source2.placeholder y1 -source2.patchsizex 1 -source2.patchsizex 1 -model.dir /tmp/my_saved_model/ -training.userplaceholders is_training=true dropout=0.2 -training.targetnodes optimizer -model.saveto /tmp/my_saved_model/variables/variables +``` + +As you can note, there is `$OTB_TF_NSOURCES` + 1 sources because we often need at least one more source for the reference data (e.g. terrain truth for land cover mapping). + +## Inference + +The **TensorflowModelServe** application performs the inference, it can be used to produce an output raster with the specified tensors. +Thanks to the streaming mechanism, very large images can be produced. +The application uses the `TensorflowModelFilter` and a `StreamingFilter` to force the streaming of output. +This last can be optionally disabled by the user, if he prefers using the extended filenames to deal with chunk sizes. +However, it's still very useful when the application is used in other composites applications, or just without extended filename magic. +Some models can consume a lot of memory. +In addition, the native tiling strategy of OTB consists in strips but this might not always the best. +For Convolutional Neural Networks for instance, square tiles are more interesting because the padding required to perform the computation of one single strip of pixels induces to input a lot more pixels that to process the computation of one single tile of pixels. +So, this application takes in input one or multiple _input sources_ (the number of _input sources_ can be changed by setting the `OTB_TF_NSOURCES` to the desired number) and produce one output of the specified tensors. +The user is responsible of giving the **receptive field** and **name** of _input placeholders_, as well as the **expression field**, **scale factor** and **name** of _output tensors_. +The first _input source_ (`source1.il`) corresponds to the _reference input source_. +As explained [previously](#buildmodel), the **scale factor** provided for the _output tensors_ is related to this _reference input source_. +The user can ask for multiple _output tensors_, that will be stack along the channel dimension of the output raster. +However, if the sizes of those _output tensors_ are not consistent (e.g. a different number of (x,y) elements), an exception will be thrown. + + + + +``` +Multisource deep learning classifier using TensorFlow. Change the OTB_TF_NSOURCES environment variable to set the number of sources. +Parameters: + -source1 <group> Parameters for source #1 +MISSING -source1.il <string list> Input image (or list to stack) for source #1 (mandatory) +MISSING -source1.rfieldx <int32> Input receptive field (width) for source #1 (mandatory) +MISSING -source1.rfieldy <int32> Input receptive field (height) for source #1 (mandatory) +MISSING -source1.placeholder <string> Name of the input placeholder for source #1 (mandatory) + -model <group> model parameters +MISSING -model.dir <string> TensorFlow model_save directory (mandatory) + -model.userplaceholders <string list> Additional single-valued placeholders. Supported types: int, float, bool. (optional, off by default) + -model.fullyconv <boolean> Fully convolutional (optional, off by default, default value is false) + -output <group> Output tensors parameters + -output.spcscale <float> The output spacing scale, related to the first input (mandatory, default value is 1) +MISSING -output.names <string list> Names of the output tensors (mandatory) + -output.efieldx <int32> The output expression field (width) (mandatory, default value is 1) + -output.efieldy <int32> The output expression field (height) (mandatory, default value is 1) + -optim <group> This group of parameters allows optimization of processing time + -optim.disabletiling <boolean> Disable tiling (optional, off by default, default value is false) + -optim.tilesizex <int32> Tile width used to stream the filter output (mandatory, default value is 16) + -optim.tilesizey <int32> Tile height used to stream the filter output (mandatory, default value is 16) +MISSING -out <string> [pixel] output image [pixel=uint8/uint16/int16/uint32/int32/float/double/cint16/cint32/cfloat/cdouble] (default value is float) (mandatory) + -progress <boolean> Report progress + -help <string list> Display long help (empty list), or help for given parameters keys + +Use -help param1 [... paramN] to see detailed documentation of those parameters. + +Examples: +otbcli_TensorflowModelServe -source1.il spot6pms.tif -source1.placeholder x1 -source1.rfieldx 16 -source1.rfieldy 16 -model.dir /tmp/my_saved_model/ -model.userplaceholders is_training=false dropout=0.0 -output.names out_predict1 out_proba1 -out "classif128tgt.tif?&streaming:type=tiled&streaming:sizemode=height&streaming:sizevalue=256" +``` + +## Composite applications for classification + +Who has never dreamed to use classic classifiers performing on deep learning features? +This is possible thank to two new applications that uses the existing training/classification applications of OTB: + +**TrainClassifierFromDeepFeatures**: is a composite application that wire the **TensorflowModelServe** application output into the existing official **TrainImagesClassifier** application. + +``` +Train a classifier from deep net based features of an image and training vector data. +Parameters: + -source1 <group> Parameters for source 1 +MISSING -source1.il <string list> Input image (or list to stack) for source #1 (mandatory) +MISSING -source1.rfieldx <int32> Input receptive field (width) for source #1 (mandatory) +MISSING -source1.rfieldy <int32> Input receptive field (height) for source #1 (mandatory) +MISSING -source1.placeholder <string> Name of the input placeholder for source #1 (mandatory) + -model <group> Deep net inputs parameters +MISSING -model.dir <string> TensorFlow model_save directory (mandatory) + -model.userplaceholders <string list> Additional single-valued placeholders. Supported types: int, float, bool. (optional, off by default) + -model.fullyconv <boolean> Fully convolutional (optional, off by default, default value is false) + -output <group> Deep net outputs parameters + -output.spcscale <float> The output spacing scale, related to the first input (mandatory, default value is 1) +MISSING -output.names <string list> Names of the output tensors (mandatory) + -output.efieldx <int32> The output expression field (width) (mandatory, default value is 1) + -output.efieldy <int32> The output expression field (height) (mandatory, default value is 1) + -optim <group> Processing time optimization + -optim.disabletiling <boolean> Disable tiling (optional, off by default, default value is false) + -optim.tilesizex <int32> Tile width used to stream the filter output (mandatory, default value is 16) + -optim.tilesizey <int32> Tile height used to stream the filter output (mandatory, default value is 16) + -ram <int32> Available RAM (Mb) (optional, off by default, default value is 128) +MISSING -vd <string list> Vector data for training (mandatory) + -valid <string list> Vector data for validation (optional, off by default) +MISSING -out <string> Output classification model (mandatory) + -confmatout <string> Output confusion matrix (optional, off by default) + -sample <group> Sampling parameters + -sample.mt <int32> Maximum training sample size per class (mandatory, default value is 1000) + -sample.mv <int32> Maximum validation sample size per class (mandatory, default value is 1000) + -sample.bm <int32> Bound sample number by minimum (mandatory, default value is 1) + -sample.vtr <float> Training and validation sample ratio (mandatory, default value is 0.5) + -sample.vfn <string> Field containing the class integer label for supervision (mandatory, no default value) + -elev <group> Elevation parameters + -elev.dem <string> DEM directory (optional, off by default) + -elev.geoid <string> Geoid File (optional, off by default) + -elev.default <float> Default elevation (mandatory, default value is 0) + -classifier <string> Classifier parameters [libsvm/boost/dt/gbt/ann/bayes/rf/knn/sharkrf/sharkkm] (mandatory, default value is libsvm) + -classifier.libsvm.k <string> SVM Kernel Type [linear/rbf/poly/sigmoid] (mandatory, default value is linear) + -classifier.libsvm.m <string> SVM Model Type [csvc/nusvc/oneclass] (mandatory, default value is csvc) + -classifier.libsvm.c <float> Cost parameter C (mandatory, default value is 1) + -classifier.libsvm.nu <float> Cost parameter Nu (mandatory, default value is 0.5) + -classifier.libsvm.opt <boolean> Parameters optimization (mandatory, default value is false) + -classifier.libsvm.prob <boolean> Probability estimation (mandatory, default value is false) + -classifier.boost.t <string> Boost Type [discrete/real/logit/gentle] (mandatory, default value is real) + -classifier.boost.w <int32> Weak count (mandatory, default value is 100) + -classifier.boost.r <float> Weight Trim Rate (mandatory, default value is 0.95) + -classifier.boost.m <int32> Maximum depth of the tree (mandatory, default value is 1) + -classifier.dt.max <int32> Maximum depth of the tree (mandatory, default value is 65535) + -classifier.dt.min <int32> Minimum number of samples in each node (mandatory, default value is 10) + -classifier.dt.ra <float> Termination criteria for regression tree (mandatory, default value is 0.01) + -classifier.dt.cat <int32> Cluster possible values of a categorical variable into K <= cat clusters to find a suboptimal split (mandatory, default value is 10) + -classifier.dt.f <int32> K-fold cross-validations (mandatory, default value is 10) + -classifier.dt.r <boolean> Set Use1seRule flag to false (mandatory, default value is false) + -classifier.dt.t <boolean> Set TruncatePrunedTree flag to false (mandatory, default value is false) + -classifier.gbt.w <int32> Number of boosting algorithm iterations (mandatory, default value is 200) + -classifier.gbt.s <float> Regularization parameter (mandatory, default value is 0.01) + -classifier.gbt.p <float> Portion of the whole training set used for each algorithm iteration (mandatory, default value is 0.8) + -classifier.gbt.max <int32> Maximum depth of the tree (mandatory, default value is 3) + -classifier.ann.t <string> Train Method Type [back/reg] (mandatory, default value is reg) + -classifier.ann.sizes <string list> Number of neurons in each intermediate layer (mandatory) + -classifier.ann.f <string> Neuron activation function type [ident/sig/gau] (mandatory, default value is sig) + -classifier.ann.a <float> Alpha parameter of the activation function (mandatory, default value is 1) + -classifier.ann.b <float> Beta parameter of the activation function (mandatory, default value is 1) + -classifier.ann.bpdw <float> Strength of the weight gradient term in the BACKPROP method (mandatory, default value is 0.1) + -classifier.ann.bpms <float> Strength of the momentum term (the difference between weights on the 2 previous iterations) (mandatory, default value is 0.1) + -classifier.ann.rdw <float> Initial value Delta_0 of update-values Delta_{ij} in RPROP method (mandatory, default value is 0.1) + -classifier.ann.rdwm <float> Update-values lower limit Delta_{min} in RPROP method (mandatory, default value is 1e-07) + -classifier.ann.term <string> Termination criteria [iter/eps/all] (mandatory, default value is all) + -classifier.ann.eps <float> Epsilon value used in the Termination criteria (mandatory, default value is 0.01) + -classifier.ann.iter <int32> Maximum number of iterations used in the Termination criteria (mandatory, default value is 1000) + -classifier.rf.max <int32> Maximum depth of the tree (mandatory, default value is 5) + -classifier.rf.min <int32> Minimum number of samples in each node (mandatory, default value is 10) + -classifier.rf.ra <float> Termination Criteria for regression tree (mandatory, default value is 0) + -classifier.rf.cat <int32> Cluster possible values of a categorical variable into K <= cat clusters to find a suboptimal split (mandatory, default value is 10) + -classifier.rf.var <int32> Size of the randomly selected subset of features at each tree node (mandatory, default value is 0) + -classifier.rf.nbtrees <int32> Maximum number of trees in the forest (mandatory, default value is 100) + -classifier.rf.acc <float> Sufficient accuracy (OOB error) (mandatory, default value is 0.01) + -classifier.knn.k <int32> Number of Neighbors (mandatory, default value is 32) + -classifier.sharkrf.nbtrees <int32> Maximum number of trees in the forest (mandatory, default value is 100) + -classifier.sharkrf.nodesize <int32> Min size of the node for a split (mandatory, default value is 25) + -classifier.sharkrf.mtry <int32> Number of features tested at each node (mandatory, default value is 0) + -classifier.sharkrf.oobr <float> Out of bound ratio (mandatory, default value is 0.66) + -classifier.sharkkm.maxiter <int32> Maximum number of iteration for the kmeans algorithm. (mandatory, default value is 10) + -classifier.sharkkm.k <int32> The number of class used for the kmeans algorithm. (mandatory, default value is 2) + -rand <int32> User defined random seed (optional, off by default) + -inxml <string> Load otb application from xml file (optional, off by default) + -progress <boolean> Report progress + -help <string list> Display long help (empty list), or help for given parameters keys + +Use -help param1 [... paramN] to see detailed documentation of those parameters. + +Examples: +None +``` + +**ImageClassifierFromDeepFeatures** same approach with the official **ImageClassifier**. + +``` +Classify image using features from a deep net and an OTB machine learning classification model +Parameters: + -source1 <group> Parameters for source 1 +MISSING -source1.il <string list> Input image (or list to stack) for source #1 (mandatory) +MISSING -source1.rfieldx <int32> Input receptive field (width) for source #1 (mandatory) +MISSING -source1.rfieldy <int32> Input receptive field (height) for source #1 (mandatory) +MISSING -source1.placeholder <string> Name of the input placeholder for source #1 (mandatory) + -deepmodel <group> Deep net model parameters +MISSING -deepmodel.dir <string> TensorFlow model_save directory (mandatory) + -deepmodel.userplaceholders <string list> Additional single-valued placeholders. Supported types: int, float, bool. (optional, off by default) + -deepmodel.fullyconv <boolean> Fully convolutional (optional, off by default, default value is false) + -output <group> Deep net outputs parameters + -output.spcscale <float> The output spacing scale, related to the first input (mandatory, default value is 1) +MISSING -output.names <string list> Names of the output tensors (mandatory) + -output.efieldx <int32> The output expression field (width) (mandatory, default value is 1) + -output.efieldy <int32> The output expression field (height) (mandatory, default value is 1) + -optim <group> This group of parameters allows optimization of processing time + -optim.disabletiling <boolean> Disable tiling (optional, off by default, default value is false) + -optim.tilesizex <int32> Tile width used to stream the filter output (mandatory, default value is 16) + -optim.tilesizey <int32> Tile height used to stream the filter output (mandatory, default value is 16) +MISSING -model <string> Model file (mandatory) + -imstat <string> Statistics file (optional, off by default) + -nodatalabel <int32> Label mask value (optional, off by default, default value is 0) +MISSING -out <string> [pixel] Output image [pixel=uint8/uint16/int16/uint32/int32/float/double/cint16/cint32/cfloat/cdouble] (default value is uint8) (mandatory) + -confmap <string> [pixel] Confidence map image [pixel=uint8/uint16/int16/uint32/int32/float/double/cint16/cint32/cfloat/cdouble] (default value is double) (optional, off by default) + -ram <int32> Ram (optional, off by default, default value is 128) + -inxml <string> Load otb application from xml file (optional, off by default) + -progress <boolean> Report progress + -help <string list> Display long help (empty list), or help for given parameters keys + +Use -help param1 [... paramN] to see detailed documentation of those parameters. + +Examples: +None +``` + +Note that you can still set the `OTB_TF_NSOURCES` environment variable. + +# Basic example + +Below is a minimal example that presents the main steps to train a model, and perform the inference. + +## Sampling + +Here we will try to provide a simple example of doing a classification using a deep net that performs on one single VHR image. +Our data set consists in one Spot-7 image, *spot7.tif*, and a training vector data, *terrain_truth.shp* that describes sparsely forest / non-forest polygons. +First, we compute statistics of the vector data : how many points can we sample inside objects, and how many objects in each class. +We use the **PolygonClassStatistics** application of OTB. +``` +otbcli_PolygonClassStatistics -vec terrain_truth.shp -field class -in spot7.tif -out vec_stats.xml +``` +Then, we will select some samples with the **SampleSelection** application of the existing machine learning framework of OTB. +Since the terrain truth is sparse, we want to sample randomly points in polygons with the default strategy of the **SampleSelection** OTB application. +``` +otbcli_SampleSelection -in spot7.tif -vec terrain_truth.shp -instats vec_stats.xml -field class -out points.shp +``` +Now we extract the patches with the **PatchesExtraction** application. +We want to produce one image of 16x16 patches, and one image for the corresponding labels. +``` +otbcli_PatchesExtraction -source1.il spot7.tif -source1.patchsizex 16 -source1.patchsizey 16 -vec points.shp -field class -source1.out samp_labels.tif -outpatches samp_patches.tif +``` + +## Training + +Now we have two images for patches and labels. +We can split them to distinguish test/validation groups (with the **ExtractROI** application for instance). +But here, we will just perform some fine tuning of our model. +The **SavedModel** is located in the `outmodel` directory. +Our model is quite basic: it has two input placeholders, **x1** and **y1** respectively for input patches (with size 16x16) and input reference labels (with size 1x1). +We named **prediction** the tensor that predict the labels and the optimizer that perform the stochastic gradient descent is an operator named **optimizer**. +We perform the fine tuning and we export the new model variables directly in the _outmodel/variables_ folder, overwritting the existing variables of the model. +We use the **TensorflowModelTrain** application to perform the training of this existing model. +``` +otbcli_TensorflowModelTrain -model.dir /path/to/oursavedmodel -training.targetnodesnames optimizer -training.source1.il samp_patches.tif -training.source1.patchsizex 16 -training.source1.patchsizey 16 -training.source1.placeholder x1 -training.source2.il samp_labels.tif -training.source2.patchsizex 1 -training.source2.patchsizey 1 -training.source2.placeholder y1 -model.saveto /path/to/oursavedmodel/variables/variables +``` +Note that we could also have performed validation in this step. In this case, the `validation.source2.placeholder` would be different than the `training.source2.placeholder`, and would be **prediction**. This way, the program know what is the target tensor to evaluate. + +## Inference + +After this step, we use the trained model to produce the entire map of forest over the whole Spot-7 image. +For this, we use the **TensorflowModelServe** application to produce the **prediction** tensor output for the entire image. +``` +otbcli_TensorflowModelServe -source1.il spot7.tif -source1.placeholder x1 -source1.rfieldx 16 -source1.rfieldy 16 -model.dir /path/to/oursavedmodel -output.names prediction -out map.tif uint8 +``` diff --git a/doc/CREATEYOUROWN.md b/doc/CREATEYOUROWN.md new file mode 100644 index 0000000000000000000000000000000000000000..4a22d4ef2d1ba8a22817e05a68d6538ff522d1c0 --- /dev/null +++ b/doc/CREATEYOUROWN.md @@ -0,0 +1,37 @@ +# Create your own architecture + +This section gives a few tips to create your own models ready to be used in inference using OTBTF's `TensorflowModelServe` and `TensorflowModelTrain` applications. + +## Model inputs + +### Dimensions + +All networks must input **4D tensors**. +- **dim 0** is for the batch dimension. It is used in the `TensorflowModelTrain` application during training, and in **patch-based mode** during inference: in this mode, `TensorflowModelServe` performs the inference of several patches simultaneously. In **fully-convolutional mode**, a single slice of the batch dimension is used. +- **dim 1** and **2** are for the spatial dimensions, +- **dim 3** is for the image channels. Even if your image have only 1 channel, you must set a shape value equals to 1 for the last dimension of the input placeholder. + +### Shapes + +For nets intended to work in **patch-based** mode, you can stick with a placeholder where you define your patch size explicitly in **dim 1** and **dim 2**. +However, for nets intended to work in **fully-convolutional** mode, you must set `None` in **dim 1** and **dim 2** (before Tensorflow 2.X, it was possible to feed placeholders with a tensor of different size where the dims were defined, but no more after!). +For instance, let consider an input raster with 4 spectral bands: the input shape of the model input would be like `[None, None, None, 4]` to work in fully-convolutional mode. By doing so, the use of input images of any size is enabled (`TensorflowModelServe` will automatically compute the input/output regions sizes to process, given the **receptive field** and **expression field** of your net). + +## Model outputs + +### Dimensions + +Supported tensors for the outputs must have **between 2 and 4 dimensions**. +OTBTF always consider that **the size of the last dimension is the number of channels in the output**. +For instance, you can have a model that outputs 8 channels with a tensor of shape `[None, 8]` or `[None, None, None, 8]` + +### Name your tensors and nodes + +Always name explicitly your models outputs. You will need the output tensor name for performing the inference with `TensoflowModelServe`. If you forget to name them, use the graph viewer in `tensorboard` to get the names. + +### Training + +If you want to enable your network training with the `TensorflowModelTrain` application, do not forget to name your optimizers/operators! +You can build a single operator from multiple ones using the `tf.group` command, which also enable you to name your new operator. +For sequential nodes trigger, you can build an operator that do what you want is the desired order using the `tf.control_dependancies` with TF <= 1.15. + diff --git a/doc/DOCKERUSE.md b/doc/DOCKERUSE.md new file mode 100644 index 0000000000000000000000000000000000000000..b510877751d622bbf11b0410c336b2d41dffc654 --- /dev/null +++ b/doc/DOCKERUSE.md @@ -0,0 +1,239 @@ +# OTBTF docker images overview + +### Available images + +Here is the list of OTBTF docker images hosted on [dockerhub](https://hub.docker.com/u/mdl4eo). + +| Name | Os | TF | OTB | Description | +| ----------------------------- | ------------- | ------ | ----- | ---------------------- | +| **mdl4eo/otbtf1.6:cpu** | Ubuntu Xenial | r1.14 | 7.0.0 | CPU, no optimization | +| **mdl4eo/otbtf1.7:cpu** | Ubuntu Xenial | r1.14 | 7.0.0 | CPU, no optimization | +| **mdl4eo/otbtf1.7:gpu** | Ubuntu Xenial | r1.14 | 7.0.0 | GPU | +| **mdl4eo/otbtf2.0:cpu** | Ubuntu Xenial | r2.1 | 7.1.0 | CPU, no optimization | +| **mdl4eo/otbtf2.0:gpu** | Ubuntu Xenial | r2.1 | 7.1.0 | GPU | +| **mdl4eo/otbtf2.4:cpu-basic** | Ubuntu Focal | r2.4.1 | 7.2.0 | CPU, no optimization | +| **mdl4eo/otbtf2.4:cpu** | Ubuntu Focal | r2.4.1 | 7.2.0 | CPU, few optimizations | +| **mdl4eo/otbtf2.4:cpu-mkl** | Ubuntu Focal | r2.4.1 | 7.2.0 | CPU, Intel MKL, AVX512 | +| **mdl4eo/otbtf2.4:gpu** | Ubuntu Focal | r2.4.1 | 7.2.0 | GPU | + +- `cpu` tagged docker images are compiled without optimization. +- `gpu` tagged docker images are suited for **NVIDIA GPUs**. They use CUDA/CUDNN support and are built with compute capabilities 5.2, 6.1, 7.0, 7.5. +- `cpu-mkl` tagged docker image is experimental, it is optimized for Intel CPUs with AVX512 flags. + +You can also find plenty of interesting OTBTF flavored images at [LaTelescop gitlab registry](https://gitlab.com/latelescop/docker/otbtf/container_registry/). + +### Development ready images + +Until r2.4, all images are development-ready. For instance, you can recompile the whole OTB from `/work/otb/build/OTB/build`. +Since r2.4, only `gpu` tagged image is development-ready, and you can recompile OTB from `/src/otb/build/OTB/build`. + +### Build your own images + +If you want to use optimization flags, change GPUs compute capability, etc. you can build your own docker image using the provided dockerfile. +See the [docker build documentation](../tools/docker/README.md). + +# Mounting file systems + +You can mount filesystem in the docker image. +For instance, suppose you have some data in `/mnt/my_device/` that you want to use inside the container: + +The following command shows you how to access the folder from the docker image. + +```bash +docker run -v /mnt/my_device/:/data/ -ti mdl4eo/otbtf2.4:cpu bash -c "ls /data" +``` +Beware of ownership issues! see the last section of this doc. + +# GPU enabled docker + +In Linux, this is quite straightforward. +Just follow the steps described in the [nvidia-docker documentation](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). +You can then use the OTBTF `gpu` tagged docker images with the **NVIDIA runtime** : + +With Docker version earlier than 19.03 : + +```bash +docker run --runtime=nvidia -ti mdl4eo/otbtf2.4:gpu bash +``` + +With Docker version including and after 19.03 : + +```bash +docker run --gpus all -ti mdl4eo/otbtf2.4:gpu bash +``` + +You can find some details on the **GPU docker image** and some **docker tips and tricks** on [this blog](https://mdl4eo.irstea.fr/2019/10/15/otbtf-docker-image-with-gpu/). +Be careful though, these infos might be a bit outdated... + +# Docker Installation + +### Installation and first steps on Windows 10 + +1. Install [WSL2](https://docs.microsoft.com/en-us/windows/wsl/install-win10#manual-installation-steps) (Windows Subsystem for Linux) +2. Install [docker desktop](https://www.docker.com/products/docker-desktop) +3. Start **docker desktop** and **enable WSL2** from *Settings* > *General* then tick the box *Use the WSL2 based engine* +3. Open a **cmd.exe** or **PowerShell** terminal, and type `docker create --name otbtf-cpu --interactive --tty mdl4eo/otbtf2.4:cpu` +4. Open **docker desktop**, and check that the docker is running in the **Container/Apps** menu + +5. From **docker desktop**, click on the icon highlighted as shown below, and use the bash terminal that should pop up! + + +Troubleshooting: +- [Docker for windows WSL documentation](https://docs.docker.com/docker-for-windows/wsl) +- [WSL2 installation steps](https://docs.microsoft.com/en-us/windows/wsl/install-win10) + +### Use the GPU with Windows 10 + WSL2 + +*Work in progress* + +Some users have reported to use OTBTF with GPU in windows 10 using WSL2. +How to install WSL2 with Cuda on windows 10: +https://docs.nvidia.com/cuda/wsl-user-guide/index.html +https://docs.docker.com/docker-for-windows/wsl/#gpu-support + + +### Debian and Ubuntu + +See here how to install docker on Ubuntu [here](https://docs.docker.com/engine/install/ubuntu/). + +# Docker Usage + +This section is largely inspired from the [moringa docker help](https://gitlab.irstea.fr/raffaele.gaetano/moringa/blob/develop/docker/README.md). Big thanks to them. + +## Useful diagnostic commands + +Here are some usefull commands. + +```bash +docker info # System info +docker images # List local images +docker container ls # List containers +docker ps # Show running containers +``` + +On Linux, control state with systemd: +```bash +sudo systemctl {status,enable,disable,start,stop} docker +``` + +### Run some commands + +Run a simple command in a one-shot container: + +```bash +docker run mdl4eo/otbtf2.4:cpu otbcli_PatchesExtraction +``` + +You can also use the image in interactive mode with bash: +```bash +docker run -ti mdl4eo/otbtf2.4:cpu bash +``` + +### Persistent container + +Persistent (named) container with volume, here with home dir, but it can be any directory. +Beware of ownership issues, see the last section of this doc. + +```bash +docker create --interactive --tty --volume /home/$USER:/home/otbuser/ \ + --name otbtf mdl4eo/otbtf2.4:cpu /bin/bash +``` + +### Interactive session + +```bash +docker start -i otbtf +``` + +### Background container + +```bash +docker start otbtf +docker exec otbtf ls -alh +docker stop otbtf +``` + +### Running commands with root user + +Background container is the easiest way: + +```bash +docker start otbtf +# Example with apt update (you can't use &&, one docker exec is required for each command) +docker exec --user root otbtf apt-get update +docker exec --user root otbtf apt-get upgrade -y +``` + +### Container-specific commands, especially for background containers: + +```bash +docker inspect otbtf # See full container info dump +docker logs otbtf # See command logs and outputs +docker stats otbtf # Real time container statistics +docker {pause,unpause} otbtf # Freeze container +``` + +### Stop a background container + +Don't forget to stop the container after you have done. + +```bash +docker stop otbtf +``` + +### Remove a persistent container + +```bash +docker rm otbtf +``` + +# Fix volume ownership issue (required if host's UID > 1000) + +When mounting a volume, you may experience errors while trying to write files from within the container. +Since the default user (**otbuser**) is UID 1000, you won't be able to write files into your volume +which is mounted with the same UID than your linux host user (may be UID 1001 or more). +In order to address this, you need to edit the container's user UID and GID to match the right numerical value. +This will only persist in a named container, it is required every time you're creating a new one. + + +Create a named container (here with your HOME as volume), Docker will automatically pull image + +```bash +docker create --interactive --tty --volume /home/$USER:/home/otbuser \ + --name otbtf mdl4eo/otbtf2.4:cpu /bin/bash +``` + +Start a background container process: + +```bash +docker start otbtf +``` + +Exec required commands with user root (here with host's ID, replace $UID and $GID with desired values): + +```bash +docker exec --user root otbtf usermod otbuser -u $UID +docker exec --user root otbtf groupmod otbuser -g $GID +``` + +Force reset ownership with updated UID and GID. +Make sure to double check that `docker exec otbtf id` because recursive chown will apply to your volume in `/home/otbuser` + +```bash +docker exec --user root otbtf chown -R otbuser:otbuser /home/otbuser +``` + +Stop the background container and start a new interactive shell: + +```bash +docker stop otbtf +docker start -i otbtf +``` + +Check if ownership is right + +```bash +id +ls -Alh /home/otbuser +touch /home/otbuser/test.txt +``` diff --git a/doc/EXAMPLES.md b/doc/EXAMPLES.md index 2968ef032a87b7383923f0b5a004abaf4b6d9ba9..4184913b21aac2ec605cb0fefa2f8a08f10fb33b 100644 --- a/doc/EXAMPLES.md +++ b/doc/EXAMPLES.md @@ -224,6 +224,8 @@ otbcli_TensorflowModelTrain \ -validation.source2.il $patches_labels_valid -validation.source2.name "estimated" \ ``` +Note that the `userplaceholders` parameter contains the *is_training* placeholder, fed with value *true* because the default value for this placeholder is *false*, and it is used in the batch normalization layers (take a look in the `create_savedmodel_maggiori17_fullyconv.py` code). + ### Inference This model can be used in fully convolutional mode only. diff --git a/doc/images/docker_desktop_1.jpeg b/doc/images/docker_desktop_1.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..9a03bd58cae40dfb428de083531c687020572883 Binary files /dev/null and b/doc/images/docker_desktop_1.jpeg differ diff --git a/doc/images/docker_desktop_2.jpeg b/doc/images/docker_desktop_2.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..e393cdb40d45c1fbcfd69c8983a47aef8731b899 Binary files /dev/null and b/doc/images/docker_desktop_2.jpeg differ diff --git a/include/otbTensorflowCommon.cxx b/include/otbTensorflowCommon.cxx index a9f47a32004b6142f2c83e4e929f0eece9cb570b..b93717ed3aa5806f364ebcaa1d73edfa25c45f6b 100644 --- a/include/otbTensorflowCommon.cxx +++ b/include/otbTensorflowCommon.cxx @@ -1,6 +1,7 @@ /*========================================================================= - Copyright (c) Remi Cresson (IRSTEA). All rights reserved. + Copyright (c) 2018-2019 Remi Cresson (IRSTEA) + Copyright (c) 2020-2021 Remi Cresson (INRAE) This software is distributed WITHOUT ANY WARRANTY; without even diff --git a/include/otbTensorflowCommon.h b/include/otbTensorflowCommon.h index 96a1ec0a84d0cc11f8455984e9ed2c9147073ead..e8db4b8b361e9a1277f96ce50aba4027ffc5d251 100644 --- a/include/otbTensorflowCommon.h +++ b/include/otbTensorflowCommon.h @@ -1,6 +1,7 @@ /*========================================================================= - Copyright (c) Remi Cresson (IRSTEA). All rights reserved. + Copyright (c) 2018-2019 Remi Cresson (IRSTEA) + Copyright (c) 2020-2021 Remi Cresson (INRAE) This software is distributed WITHOUT ANY WARRANTY; without even diff --git a/include/otbTensorflowCopyUtils.cxx b/include/otbTensorflowCopyUtils.cxx index 7081a445312720f529b75668d454072df814f634..116aafb0b5fb1fe5b00f830291c0218fdb3f6bdf 100644 --- a/include/otbTensorflowCopyUtils.cxx +++ b/include/otbTensorflowCopyUtils.cxx @@ -1,6 +1,7 @@ /*========================================================================= - Copyright (c) Remi Cresson (IRSTEA). All rights reserved. + Copyright (c) 2018-2019 Remi Cresson (IRSTEA) + Copyright (c) 2020-2021 Remi Cresson (INRAE) This software is distributed WITHOUT ANY WARRANTY; without even @@ -97,13 +98,23 @@ void RecopyImageRegionToTensorWithCast(const typename TImage::Pointer inputPtr, { tensorflow::DataType dt = tensor.dtype(); if (dt == tensorflow::DT_FLOAT) - RecopyImageRegionToTensor<TImage, float> (inputPtr, region, tensor, elemIdx); + RecopyImageRegionToTensor<TImage, float>(inputPtr, region, tensor, elemIdx); else if (dt == tensorflow::DT_DOUBLE) - RecopyImageRegionToTensor<TImage, double> (inputPtr, region, tensor, elemIdx); + RecopyImageRegionToTensor<TImage, double>(inputPtr, region, tensor, elemIdx); + else if (dt == tensorflow::DT_UINT64) + RecopyImageRegionToTensor<TImage, unsigned long long int>(inputPtr, region, tensor, elemIdx); else if (dt == tensorflow::DT_INT64) RecopyImageRegionToTensor<TImage, long long int>(inputPtr, region, tensor, elemIdx); + else if (dt == tensorflow::DT_UINT32) + RecopyImageRegionToTensor<TImage, unsigned int>(inputPtr, region, tensor, elemIdx); else if (dt == tensorflow::DT_INT32) - RecopyImageRegionToTensor<TImage, int> (inputPtr, region, tensor, elemIdx); + RecopyImageRegionToTensor<TImage, int>(inputPtr, region, tensor, elemIdx); + else if (dt == tensorflow::DT_UINT16) + RecopyImageRegionToTensor<TImage, unsigned short int> (inputPtr, region, tensor, elemIdx); + else if (dt == tensorflow::DT_INT16) + RecopyImageRegionToTensor<TImage, short int>(inputPtr, region, tensor, elemIdx); + else if (dt == tensorflow::DT_UINT8) + RecopyImageRegionToTensor<TImage, unsigned char> (inputPtr, region, tensor, elemIdx); else itkGenericExceptionMacro("TF DataType "<< dt << " not currently implemented !"); } diff --git a/include/otbTensorflowCopyUtils.h b/include/otbTensorflowCopyUtils.h index 1dc876fe56ff75ae898d86f0bee8f32e33c98e0d..47ad6cf2366137ac9719158069ac0d91ffef5813 100644 --- a/include/otbTensorflowCopyUtils.h +++ b/include/otbTensorflowCopyUtils.h @@ -1,6 +1,7 @@ /*========================================================================= - Copyright (c) Remi Cresson (IRSTEA). All rights reserved. + Copyright (c) 2018-2019 Remi Cresson (IRSTEA) + Copyright (c) 2020-2021 Remi Cresson (INRAE) This software is distributed WITHOUT ANY WARRANTY; without even diff --git a/include/otbTensorflowDataTypeBridge.cxx b/include/otbTensorflowDataTypeBridge.cxx index 9b9d52e1987bc9d2342c9779331b3bc4543983b6..5a421c92ae0828a2cd39d91e06b6b0eb9aaa9aab 100644 --- a/include/otbTensorflowDataTypeBridge.cxx +++ b/include/otbTensorflowDataTypeBridge.cxx @@ -1,6 +1,7 @@ /*========================================================================= - Copyright (c) Remi Cresson (IRSTEA). All rights reserved. + Copyright (c) 2018-2019 Remi Cresson (IRSTEA) + Copyright (c) 2020-2021 Remi Cresson (INRAE) This software is distributed WITHOUT ANY WARRANTY; without even diff --git a/include/otbTensorflowDataTypeBridge.h b/include/otbTensorflowDataTypeBridge.h index 8556058936b90acba9aef08f01714939bf122590..16e9dd23e63beff92f092bfe75fe899afe78b478 100644 --- a/include/otbTensorflowDataTypeBridge.h +++ b/include/otbTensorflowDataTypeBridge.h @@ -1,6 +1,7 @@ /*========================================================================= - Copyright (c) Remi Cresson (IRSTEA). All rights reserved. + Copyright (c) 2018-2019 Remi Cresson (IRSTEA) + Copyright (c) 2020-2021 Remi Cresson (INRAE) This software is distributed WITHOUT ANY WARRANTY; without even diff --git a/include/otbTensorflowGraphOperations.cxx b/include/otbTensorflowGraphOperations.cxx index 794a302bf0f8e2e456da82fd95c061b1a843e0bb..16300f6c341a75a65947ed5bb7512d7d1187dbc7 100644 --- a/include/otbTensorflowGraphOperations.cxx +++ b/include/otbTensorflowGraphOperations.cxx @@ -1,6 +1,7 @@ /*========================================================================= - Copyright (c) Remi Cresson (IRSTEA). All rights reserved. + Copyright (c) 2018-2019 Remi Cresson (IRSTEA) + Copyright (c) 2020-2021 Remi Cresson (INRAE) This software is distributed WITHOUT ANY WARRANTY; without even @@ -16,10 +17,10 @@ namespace tf { // // Restore a model from a path // -void RestoreModel(const std::string path, tensorflow::SavedModelBundle & bundle) +void RestoreModel(const tensorflow::tstring path, tensorflow::SavedModelBundle & bundle) { tensorflow::Tensor checkpointPathTensor(tensorflow::DT_STRING, tensorflow::TensorShape()); - checkpointPathTensor.scalar<std::string>()() = path; + checkpointPathTensor.scalar<tensorflow::tstring>()() = path; std::vector<std::pair<std::string, tensorflow::Tensor>> feed_dict = {{bundle.meta_graph_def.saver_def().filename_tensor_name(), checkpointPathTensor}}; auto status = bundle.session->Run(feed_dict, {}, {bundle.meta_graph_def.saver_def().restore_op_name()}, nullptr); @@ -32,10 +33,10 @@ void RestoreModel(const std::string path, tensorflow::SavedModelBundle & bundle) // // Restore a model from a path // -void SaveModel(const std::string path, tensorflow::SavedModelBundle & bundle) +void SaveModel(const tensorflow::tstring path, tensorflow::SavedModelBundle & bundle) { tensorflow::Tensor checkpointPathTensor(tensorflow::DT_STRING, tensorflow::TensorShape()); - checkpointPathTensor.scalar<std::string>()() = path; + checkpointPathTensor.scalar<tensorflow::tstring>()() = path; std::vector<std::pair<std::string, tensorflow::Tensor>> feed_dict = {{bundle.meta_graph_def.saver_def().filename_tensor_name(), checkpointPathTensor}}; auto status = bundle.session->Run(feed_dict, {}, {bundle.meta_graph_def.saver_def().save_tensor_name()}, nullptr); @@ -48,7 +49,7 @@ void SaveModel(const std::string path, tensorflow::SavedModelBundle & bundle) // // Load a session and a graph from a folder // -void LoadModel(const std::string path, tensorflow::SavedModelBundle & bundle) +void LoadModel(const tensorflow::tstring path, tensorflow::SavedModelBundle & bundle) { tensorflow::RunOptions runoptions; diff --git a/include/otbTensorflowGraphOperations.h b/include/otbTensorflowGraphOperations.h index 4e30ae46ec68d7049d3a1d81649586e7322d0ed3..4b4e93c016a09eaab5182ceb6800126665ccad92 100644 --- a/include/otbTensorflowGraphOperations.h +++ b/include/otbTensorflowGraphOperations.h @@ -1,6 +1,7 @@ /*========================================================================= - Copyright (c) Remi Cresson (IRSTEA). All rights reserved. + Copyright (c) 2018-2019 Remi Cresson (IRSTEA) + Copyright (c) 2020-2021 Remi Cresson (INRAE) This software is distributed WITHOUT ANY WARRANTY; without even @@ -27,13 +28,13 @@ namespace otb { namespace tf { // Restore a model from a path -void RestoreModel(const std::string path, tensorflow::SavedModelBundle & bundle); +void RestoreModel(const tensorflow::tstring path, tensorflow::SavedModelBundle & bundle); // Restore a model from a path -void SaveModel(const std::string path, tensorflow::SavedModelBundle & bundle); +void SaveModel(const tensorflow::tstring path, tensorflow::SavedModelBundle & bundle); // Load a session and a graph from a folder -void LoadModel(const std::string path, tensorflow::SavedModelBundle & bundle); +void LoadModel(const tensorflow::tstring path, tensorflow::SavedModelBundle & bundle); // Load a graph from a .meta file tensorflow::GraphDef LoadGraph(std::string filename); diff --git a/include/otbTensorflowMultisourceModelBase.h b/include/otbTensorflowMultisourceModelBase.h index 0929f9494fed952c47c36f9703df8a28aca94916..9fb0a79ce7e48144af182dc02063f3c8911c7b7b 100644 --- a/include/otbTensorflowMultisourceModelBase.h +++ b/include/otbTensorflowMultisourceModelBase.h @@ -1,6 +1,7 @@ /*========================================================================= - Copyright (c) Remi Cresson (IRSTEA). All rights reserved. + Copyright (c) 2018-2019 Remi Cresson (IRSTEA) + Copyright (c) 2020-2021 Remi Cresson (INRAE) This software is distributed WITHOUT ANY WARRANTY; without even diff --git a/include/otbTensorflowMultisourceModelBase.hxx b/include/otbTensorflowMultisourceModelBase.hxx index 573aa9ca714f7a163076cc3c9ec4466998e60db5..02c98baeea3ec32f36f6a707a6a99d75fc99e79c 100644 --- a/include/otbTensorflowMultisourceModelBase.hxx +++ b/include/otbTensorflowMultisourceModelBase.hxx @@ -1,6 +1,7 @@ /*========================================================================= - Copyright (c) Remi Cresson (IRSTEA). All rights reserved. + Copyright (c) 2018-2019 Remi Cresson (IRSTEA) + Copyright (c) 2020-2021 Remi Cresson (INRAE) This software is distributed WITHOUT ANY WARRANTY; without even diff --git a/include/otbTensorflowMultisourceModelFilter.h b/include/otbTensorflowMultisourceModelFilter.h index 1da98c97cc96d1c5135c9b671835d6978dd47d2f..46a273af36f9a4aefd6a365ca36b9557eebab37e 100644 --- a/include/otbTensorflowMultisourceModelFilter.h +++ b/include/otbTensorflowMultisourceModelFilter.h @@ -1,6 +1,7 @@ /*========================================================================= - Copyright (c) Remi Cresson (IRSTEA). All rights reserved. + Copyright (c) 2018-2019 Remi Cresson (IRSTEA) + Copyright (c) 2020-2021 Remi Cresson (INRAE) This software is distributed WITHOUT ANY WARRANTY; without even diff --git a/include/otbTensorflowMultisourceModelFilter.hxx b/include/otbTensorflowMultisourceModelFilter.hxx index b59752e66a67dafc4bdef0dd56158a96bddd4c66..91c5384ea1b508fcd359c120a067ed99c4a11126 100644 --- a/include/otbTensorflowMultisourceModelFilter.hxx +++ b/include/otbTensorflowMultisourceModelFilter.hxx @@ -1,6 +1,7 @@ /*========================================================================= - Copyright (c) Remi Cresson (IRSTEA). All rights reserved. + Copyright (c) 2018-2019 Remi Cresson (IRSTEA) + Copyright (c) 2020-2021 Remi Cresson (INRAE) This software is distributed WITHOUT ANY WARRANTY; without even @@ -64,8 +65,8 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage> } /** - Compute the input image extent i.e. corners inf & sup - Function taken from "Mosaic" and adapted + Compute the input image extent: corners inf and sup. + Very important: **corners**, not pixel center */ template <class TInputImage, class TOutputImage> void @@ -90,8 +91,8 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage> image->TransformIndexToPhysicalPoint(imageFirstIndex, imageOrigin); for(unsigned int dim = 0; dim<OutputImageType::ImageDimension; ++dim) { - extentInf[dim] = vnl_math_min(imageOrigin[dim], imageEnd[dim]); - extentSup[dim] = vnl_math_max(imageOrigin[dim], imageEnd[dim]); + extentInf[dim] = vnl_math_min(imageOrigin[dim], imageEnd[dim]) - 0.5 * image->GetSpacing()[dim]; + extentSup[dim] = vnl_math_max(imageOrigin[dim], imageEnd[dim]) + 0.5 * image->GetSpacing()[dim]; } } @@ -207,7 +208,8 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage> extentSup.Fill(itk::NumericTraits<double>::max()); extentInf.Fill(itk::NumericTraits<double>::NonpositiveMin()); - // Compute the extent of each input images and update the global extent + // Compute the extent of each input images and update the extent or the output image. + // The extent of the output image is the intersection of all input images extents. for (unsigned int imageIndex = 0 ; imageIndex < this->GetNumberOfInputs() ; imageIndex++) { ImageType * currentImage = static_cast<ImageType *>( @@ -223,13 +225,21 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage> } } - // Set final size - m_OutputSize[0] = std::floor( (extentSup[0] - extentInf[0]) / std::abs(m_OutputSpacing[0]) ) + 1; - m_OutputSize[1] = std::floor( (extentSup[1] - extentInf[1]) / std::abs(m_OutputSpacing[1]) ) + 1; - // Set final origin - m_OutputOrigin[0] = extentInf[0]; - m_OutputOrigin[1] = extentSup[1]; + // Set final origin, aligned to the reference image grid. + // Here we simply get back to the center of the pixel (extents are pixels corners coordinates) + m_OutputOrigin[0] = extentInf[0] + 0.5 * this->GetInput(0)->GetSpacing()[0]; + m_OutputOrigin[1] = extentSup[1] - 0.5 * this->GetInput(0)->GetSpacing()[1]; + + // Set final size + m_OutputSize[0] = std::floor( (extentSup[0] - extentInf[0]) / std::abs(m_OutputSpacing[0]) ); + m_OutputSize[1] = std::floor( (extentSup[1] - extentInf[1]) / std::abs(m_OutputSpacing[1]) ); + + // We should take in account one more thing: the expression field. It enlarge slightly the output image extent. + m_OutputOrigin[0] -= m_OutputSpacing[0] * std::floor(0.5 * this->GetOutputExpressionFields().at(0)[0]); + m_OutputOrigin[1] -= m_OutputSpacing[1] * std::floor(0.5 * this->GetOutputExpressionFields().at(0)[1]); + m_OutputSize[0] += this->GetOutputExpressionFields().at(0)[0] - 1; + m_OutputSize[1] += this->GetOutputExpressionFields().at(0)[1] - 1; // Set output grid size if (!m_ForceOutputGridSize) diff --git a/include/otbTensorflowMultisourceModelLearningBase.h b/include/otbTensorflowMultisourceModelLearningBase.h index f72f376430606a566be9b2970415a5fdc3b90782..ba130453ff73a810e3cb25a15e577bea63c78377 100644 --- a/include/otbTensorflowMultisourceModelLearningBase.h +++ b/include/otbTensorflowMultisourceModelLearningBase.h @@ -1,6 +1,7 @@ /*========================================================================= - Copyright (c) Remi Cresson (IRSTEA). All rights reserved. + Copyright (c) 2018-2019 Remi Cresson (IRSTEA) + Copyright (c) 2020-2021 Remi Cresson (INRAE) This software is distributed WITHOUT ANY WARRANTY; without even diff --git a/include/otbTensorflowMultisourceModelLearningBase.hxx b/include/otbTensorflowMultisourceModelLearningBase.hxx index 61affa8edce64e2742723235c53cf0251181f5eb..353478292169c211bc1992549a3b814212f9b59f 100644 --- a/include/otbTensorflowMultisourceModelLearningBase.hxx +++ b/include/otbTensorflowMultisourceModelLearningBase.hxx @@ -1,6 +1,7 @@ /*========================================================================= - Copyright (c) Remi Cresson (IRSTEA). All rights reserved. + Copyright (c) 2018-2019 Remi Cresson (IRSTEA) + Copyright (c) 2020-2021 Remi Cresson (INRAE) This software is distributed WITHOUT ANY WARRANTY; without even diff --git a/include/otbTensorflowMultisourceModelTrain.h b/include/otbTensorflowMultisourceModelTrain.h index f6d8567c1b4a1f8a0e1f5a7ac0e455de69b48e09..8f8983ca0acd20533d9a94936f4d4f1bbec175ea 100644 --- a/include/otbTensorflowMultisourceModelTrain.h +++ b/include/otbTensorflowMultisourceModelTrain.h @@ -1,6 +1,7 @@ /*========================================================================= - Copyright (c) Remi Cresson (IRSTEA). All rights reserved. + Copyright (c) 2018-2019 Remi Cresson (IRSTEA) + Copyright (c) 2020-2021 Remi Cresson (INRAE) This software is distributed WITHOUT ANY WARRANTY; without even diff --git a/include/otbTensorflowMultisourceModelTrain.hxx b/include/otbTensorflowMultisourceModelTrain.hxx index 520c4f3db645a9823402485490930618b23384ee..e7b68dac9567ebb73065a368cee3cc8e4ef94992 100644 --- a/include/otbTensorflowMultisourceModelTrain.hxx +++ b/include/otbTensorflowMultisourceModelTrain.hxx @@ -1,6 +1,7 @@ /*========================================================================= - Copyright (c) Remi Cresson (IRSTEA). All rights reserved. + Copyright (c) 2018-2019 Remi Cresson (IRSTEA) + Copyright (c) 2020-2021 Remi Cresson (INRAE) This software is distributed WITHOUT ANY WARRANTY; without even diff --git a/include/otbTensorflowMultisourceModelValidate.h b/include/otbTensorflowMultisourceModelValidate.h index b4f42536094cb052d3d2a1d60ac37cbc60f152d3..f4a95406f06d5b5c1318cf1e5eb8ca16cdfb995e 100644 --- a/include/otbTensorflowMultisourceModelValidate.h +++ b/include/otbTensorflowMultisourceModelValidate.h @@ -1,6 +1,7 @@ /*========================================================================= - Copyright (c) Remi Cresson (IRSTEA). All rights reserved. + Copyright (c) 2018-2019 Remi Cresson (IRSTEA) + Copyright (c) 2020-2021 Remi Cresson (INRAE) This software is distributed WITHOUT ANY WARRANTY; without even diff --git a/include/otbTensorflowMultisourceModelValidate.hxx b/include/otbTensorflowMultisourceModelValidate.hxx index a8b5a32d107e89dbf748f35ae355ee3be2aacf3a..c7673c8fc79abcfe322b02e5ee765aa9e560914e 100644 --- a/include/otbTensorflowMultisourceModelValidate.hxx +++ b/include/otbTensorflowMultisourceModelValidate.hxx @@ -1,6 +1,7 @@ /*========================================================================= - Copyright (c) Remi Cresson (IRSTEA). All rights reserved. + Copyright (c) 2018-2019 Remi Cresson (IRSTEA) + Copyright (c) 2020-2021 Remi Cresson (INRAE) This software is distributed WITHOUT ANY WARRANTY; without even diff --git a/include/otbTensorflowSampler.h b/include/otbTensorflowSampler.h index eb2645e73075b34c3f404c65e7aae874096f1849..d71eba7af1fc6a9d4e24405e0c087c520d456d9d 100644 --- a/include/otbTensorflowSampler.h +++ b/include/otbTensorflowSampler.h @@ -1,6 +1,7 @@ /*========================================================================= - Copyright (c) Remi Cresson (IRSTEA). All rights reserved. + Copyright (c) 2018-2019 Remi Cresson (IRSTEA) + Copyright (c) 2020-2021 Remi Cresson (INRAE) This software is distributed WITHOUT ANY WARRANTY; without even diff --git a/include/otbTensorflowSampler.hxx b/include/otbTensorflowSampler.hxx index 0129b47b5b927c50a39f2b52487c40e33a965acb..9611d93453b1040aa23602a6d24f7159969902eb 100644 --- a/include/otbTensorflowSampler.hxx +++ b/include/otbTensorflowSampler.hxx @@ -1,6 +1,7 @@ /*========================================================================= - Copyright (c) Remi Cresson (IRSTEA). All rights reserved. + Copyright (c) 2018-2019 Remi Cresson (IRSTEA) + Copyright (c) 2020-2021 Remi Cresson (INRAE) This software is distributed WITHOUT ANY WARRANTY; without even diff --git a/include/otbTensorflowSamplingUtils.cxx b/include/otbTensorflowSamplingUtils.cxx index 303e2253216f204d3aafe1834242417880dc0542..5a8b8e3c03225c855479adc07f371b730cec787e 100644 --- a/include/otbTensorflowSamplingUtils.cxx +++ b/include/otbTensorflowSamplingUtils.cxx @@ -1,6 +1,7 @@ /*========================================================================= - Copyright (c) Remi Cresson (IRSTEA). All rights reserved. + Copyright (c) 2018-2019 Remi Cresson (IRSTEA) + Copyright (c) 2020-2021 Remi Cresson (INRAE) This software is distributed WITHOUT ANY WARRANTY; without even diff --git a/include/otbTensorflowSamplingUtils.h b/include/otbTensorflowSamplingUtils.h index 3b755c23cf4e0f6ddafcbacaf4e5a2d0a1805c39..93879301cf5341a2d479e8752d6af3c4d9ed6ee5 100644 --- a/include/otbTensorflowSamplingUtils.h +++ b/include/otbTensorflowSamplingUtils.h @@ -1,6 +1,7 @@ /*========================================================================= - Copyright (c) Remi Cresson (IRSTEA). All rights reserved. + Copyright (c) 2018-2019 Remi Cresson (IRSTEA) + Copyright (c) 2020-2021 Remi Cresson (INRAE) This software is distributed WITHOUT ANY WARRANTY; without even diff --git a/include/otbTensorflowSource.h b/include/otbTensorflowSource.h index defeac22c2b6260e72c391b55642f83b1627618d..f569c720f2143c81d7608072cd51f6eac746076d 100644 --- a/include/otbTensorflowSource.h +++ b/include/otbTensorflowSource.h @@ -1,6 +1,7 @@ /*========================================================================= - Copyright (c) Remi Cresson (IRSTEA). All rights reserved. + Copyright (c) 2018-2019 Remi Cresson (IRSTEA) + Copyright (c) 2020-2021 Remi Cresson (INRAE) This software is distributed WITHOUT ANY WARRANTY; without even diff --git a/include/otbTensorflowSource.hxx b/include/otbTensorflowSource.hxx index da3608cdafc009d0efc5b36fa59c0f4d165ecc65..bb3de775aa889b3fd0ffaa7324dc2f03ba5159e8 100644 --- a/include/otbTensorflowSource.hxx +++ b/include/otbTensorflowSource.hxx @@ -1,6 +1,7 @@ /*========================================================================= - Copyright (c) Remi Cresson (IRSTEA). All rights reserved. + Copyright (c) 2018-2019 Remi Cresson (IRSTEA) + Copyright (c) 2020-2021 Remi Cresson (INRAE) This software is distributed WITHOUT ANY WARRANTY; without even diff --git a/include/otbTensorflowStreamerFilter.h b/include/otbTensorflowStreamerFilter.h index bb7c982edf5bc66c2d91eb48d93a6b2ea22b6d92..eee73f875f47c7190fde3e3367b0af0381b51ddb 100644 --- a/include/otbTensorflowStreamerFilter.h +++ b/include/otbTensorflowStreamerFilter.h @@ -1,6 +1,7 @@ /*========================================================================= - Copyright (c) Remi Cresson (IRSTEA). All rights reserved. + Copyright (c) 2018-2019 Remi Cresson (IRSTEA) + Copyright (c) 2020-2021 Remi Cresson (INRAE) This software is distributed WITHOUT ANY WARRANTY; without even diff --git a/include/otbTensorflowStreamerFilter.hxx b/include/otbTensorflowStreamerFilter.hxx index 54b8563f7ee49f87477697471458c314c92dd0e9..5e5622be542b4849f016e8d2e8494974c75fd6da 100644 --- a/include/otbTensorflowStreamerFilter.hxx +++ b/include/otbTensorflowStreamerFilter.hxx @@ -1,6 +1,7 @@ /*========================================================================= - Copyright (c) Remi Cresson (IRSTEA). All rights reserved. + Copyright (c) 2018-2019 Remi Cresson (IRSTEA) + Copyright (c) 2020-2021 Remi Cresson (INRAE) This software is distributed WITHOUT ANY WARRANTY; without even diff --git a/python/.idea/workspace.xml b/python/.idea/workspace.xml deleted file mode 100644 index 8ff2cc6a8b2e4216da088ff3d27e1a54ac3c6d3b..0000000000000000000000000000000000000000 --- a/python/.idea/workspace.xml +++ /dev/null @@ -1,59 +0,0 @@ -<?xml version="1.0" encoding="UTF-8"?> -<project version="4"> - <component name="ChangeListManager"> - <list default="true" id="43f3e8f0-5b2f-4088-9291-3ce61be5356e" name="Default Changelist" comment="" /> - <option name="SHOW_DIALOG" value="false" /> - <option name="HIGHLIGHT_CONFLICTS" value="true" /> - <option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" /> - <option name="LAST_RESOLUTION" value="IGNORE" /> - </component> - <component name="Git.Settings"> - <option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$/.." /> - </component> - <component name="ProjectId" id="1cRG6FwfqwiBR0g7X3WJU428htz" /> - <component name="ProjectLevelVcsManager" settingsEditedManually="true" /> - <component name="ProjectViewState"> - <option name="hideEmptyMiddlePackages" value="true" /> - <option name="showLibraryContents" value="true" /> - </component> - <component name="PropertiesComponent"> - <property name="RunOnceActivity.OpenProjectViewOnStart" value="true" /> - <property name="RunOnceActivity.ShowReadmeOnStart" value="true" /> - <property name="last_opened_file_path" value="$USER_HOME$/workspace/deeplearning-framework/dl4rs/models/cyclegan/cyclegan.py" /> - <property name="settings.editor.selected.configurable" value="preferences.sourceCode" /> - </component> - <component name="SvnConfiguration"> - <configuration /> - </component> - <component name="TaskManager"> - <task active="true" id="Default" summary="Default task"> - <changelist id="43f3e8f0-5b2f-4088-9291-3ce61be5356e" name="Default Changelist" comment="" /> - <created>1590483814478</created> - <option name="number" value="Default" /> - <option name="presentableId" value="Default" /> - <updated>1590483814478</updated> - </task> - <servers /> - </component> - <component name="WindowStateProjectService"> - <state x="743" y="311" width="424" height="491" key="FileChooserDialogImpl" timestamp="1590492170637"> - <screen x="0" y="27" width="1920" height="1053" /> - </state> - <state x="743" y="311" width="424" height="491" key="FileChooserDialogImpl/0.27.1920.1053@0.27.1920.1053" timestamp="1590492170637" /> - <state x="456" y="188" width="1007" height="737" key="SettingsEditor" timestamp="1590499054377"> - <screen x="0" y="27" width="1920" height="1053" /> - </state> - <state x="456" y="188" width="1007" height="737" key="SettingsEditor/0.27.1920.1053@0.27.1920.1053" timestamp="1590499054377" /> - </component> - <component name="XDebuggerManager"> - <breakpoint-manager> - <breakpoints> - <line-breakpoint enabled="true" suspend="THREAD" type="python-line"> - <url>file://$PROJECT_DIR$/create_savedmodel_pxs_fcn.py</url> - <line>20</line> - <option name="timeStamp" value="1" /> - </line-breakpoint> - </breakpoints> - </breakpoint-manager> - </component> -</project> \ No newline at end of file diff --git a/python/ckpt2savedmodel.py b/python/ckpt2savedmodel.py old mode 100644 new mode 100755 index b7ae2bcde0d4b7b6dfbb30266b08a95e618f5760..cbb72bb941f4fbe4c06af9e3baa5883ca3646887 --- a/python/ckpt2savedmodel.py +++ b/python/ckpt2savedmodel.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 # -*- coding: utf-8 -*- # ========================================================================== # diff --git a/python/create_savedmodel_ienco-m3_patchbased.py b/python/create_savedmodel_ienco-m3_patchbased.py old mode 100644 new mode 100755 index e5ef21031e75cac82e3031b8fa2bdd6441979af4..fdb772278bb0ffd9db4844be70a8eae7ef6ff8c6 --- a/python/create_savedmodel_ienco-m3_patchbased.py +++ b/python/create_savedmodel_ienco-m3_patchbased.py @@ -1,7 +1,9 @@ +#!/usr/bin/env python3 # -*- coding: utf-8 -*- # ========================================================================== # -# Copyright Remi Cresson, Dino Ienco (IRSTEA) +# Copyright 2018-2019 Remi Cresson, Dino Ienco (IRSTEA) +# Copyright 2020-2021 Remi Cresson, Dino Ienco (INRAE) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,9 +28,9 @@ from tricks import create_savedmodel import tensorflow.compat.v1 as tf - +import tensorflow.compat.v1.nn.rnn_cell as rnn tf.disable_v2_behavior() -from tf.contrib import rnn + import argparse parser = argparse.ArgumentParser() @@ -64,7 +66,7 @@ def RnnAttention(x, nunits, nlayer, n_dims, n_timetamps, is_training_ph): # SIGNLE LAYER: single GRUCell, nunits hidden units each else: cell = rnn.GRUCell(nunits) - outputs, _ = rnn.static_rnn(cell, x, dtype="float32") + outputs, _ = tf.compat.v1.nn.static_rnn(cell, x, dtype="float32") # At this point, outputs is a list of "n_timestamps" tensors [N, B, C] outputs = tf.stack(outputs, axis=1) # At this point, outputs is a tensor of size [N, n_timestamps, B, C] diff --git a/python/create_savedmodel_maggiori17_fullyconv.py b/python/create_savedmodel_maggiori17_fullyconv.py old mode 100644 new mode 100755 index a88ab1cdca0ba972c88ed7d85edaf83797d4cedb..32843e764c22249c36a8089af132e453e42114e9 --- a/python/create_savedmodel_maggiori17_fullyconv.py +++ b/python/create_savedmodel_maggiori17_fullyconv.py @@ -1,8 +1,9 @@ +#!/usr/bin/env python3 # -*- coding: utf-8 -*- #========================================================================== # # Copyright 2018-2019 Remi Cresson (IRSTEA) -# Copyright 2020 Remi Cresson (INRAE) +# Copyright 2020-2021 Remi Cresson (INRAE) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -44,6 +45,8 @@ with tf.compat.v1.Graph().as_default(): # placeholder for images and labels lr = tf.compat.v1.placeholder_with_default(tf.constant(0.0002, dtype=tf.float32, shape=[]), shape=[], name="learning_rate") + training = tf.placeholder_with_default(tf.constant(False, dtype=tf.bool, shape=()), shape=(), + name="is_training") x = tf.compat.v1.placeholder(tf.float32, shape=(None, patch_size_xs, patch_size_xs, params.n_channels), name="x") y = tf.compat.v1.placeholder(tf.int32, shape=(None, patch_size_label, patch_size_label, 1), name="y") @@ -52,7 +55,7 @@ with tf.compat.v1.Graph().as_default(): activation=tf.nn.crelu) # Normalization of output of layer 1 - norm1 = tf.compat.v1.layers.batch_normalization(conv1) + norm1 = tf.compat.v1.layers.batch_normalization(conv1, training=training) # pooling layer #1 pool1 = tf.compat.v1.layers.max_pooling2d(inputs=norm1, pool_size=[4, 4], strides=4) @@ -62,14 +65,14 @@ with tf.compat.v1.Graph().as_default(): activation=tf.nn.crelu) # Normalization of output of layer 2 - norm2 = tf.compat.v1.layers.batch_normalization(conv2) + norm2 = tf.compat.v1.layers.batch_normalization(conv2, training=training) # Convolutional Layer #3 conv3 = tf.compat.v1.layers.conv2d(inputs=norm2, filters=80, kernel_size=[3, 3], padding="valid", activation=tf.nn.crelu) # Normalization of output of layer 3 - norm3 = tf.compat.v1.layers.batch_normalization(conv3) + norm3 = tf.compat.v1.layers.batch_normalization(conv3, training=training) # Convolutional Layer #4 conv4 = tf.compat.v1.layers.conv2d(inputs=norm3, filters=1, kernel_size=[8, 8], padding="valid", diff --git a/python/create_savedmodel_pxs_fcn.py b/python/create_savedmodel_pxs_fcn.py index f1ddac404c33ad0de91b65493d506001cf87b231..9dc6123c2e8d4d693bfbbb675a590a8d6938f96d 100755 --- a/python/create_savedmodel_pxs_fcn.py +++ b/python/create_savedmodel_pxs_fcn.py @@ -1,8 +1,9 @@ +#!/usr/bin/env python3 # -*- coding: utf-8 -*- # ========================================================================== # # Copyright 2018-2019 Remi Cresson (IRSTEA) -# Copyright 2020 Remi Cresson (INRAE) +# Copyright 2020-2021 Remi Cresson (INRAE) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/create_savedmodel_simple_cnn.py b/python/create_savedmodel_simple_cnn.py index 1bccd92867fce0bc9a223eb198edc15496d9fdf0..942860ee5655a619054d19b36c78c0dace92230c 100755 --- a/python/create_savedmodel_simple_cnn.py +++ b/python/create_savedmodel_simple_cnn.py @@ -1,8 +1,9 @@ +#!/usr/bin/env python3 # -*- coding: utf-8 -*- # ========================================================================== # # Copyright 2018-2019 Remi Cresson (IRSTEA) -# Copyright 2020 Remi Cresson (INRAE) +# Copyright 2020-2021 Remi Cresson (INRAE) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/create_savedmodel_simple_fcn.py b/python/create_savedmodel_simple_fcn.py index 94bd1d47b2e2a2e9b0975271dccdb2588fd8c57a..1b6a39613826d347fc7305d3fc56ddba9430306e 100755 --- a/python/create_savedmodel_simple_fcn.py +++ b/python/create_savedmodel_simple_fcn.py @@ -1,8 +1,9 @@ +#!/usr/bin/env python3 # -*- coding: utf-8 -*- # ========================================================================== # # Copyright 2018-2019 Remi Cresson (IRSTEA) -# Copyright 2020 Remi Cresson (INRAE) +# Copyright 2020-2021 Remi Cresson (INRAE) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/python/otbtf.py b/python/otbtf.py new file mode 100644 index 0000000000000000000000000000000000000000..a925d3ea07b3a0d54bca439fd011be5ccb1f62a0 --- /dev/null +++ b/python/otbtf.py @@ -0,0 +1,528 @@ +# -*- coding: utf-8 -*- +# ========================================================================== +# +# Copyright 2018-2019 Remi Cresson (IRSTEA) +# Copyright 2020 Remi Cresson (INRAE) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0.txt +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ==========================================================================*/ +import threading +import multiprocessing +import time +import numpy as np +import tensorflow as tf +import gdal +import logging +from abc import ABC, abstractmethod + + +""" +------------------------------------------------------- Helpers -------------------------------------------------------- +""" + + +def gdal_open(filename): + """ + Open a GDAL raster + :param filename: raster file + :return: a GDAL ds instance + """ + ds = gdal.Open(filename) + if ds is None: + raise Exception("Unable to open file {}".format(filename)) + return ds + + +def read_as_np_arr(ds, as_patches=True): + """ + Read a GDAL raster as numpy array + :param ds: GDAL ds instance + :param as_patches: if True, the returned numpy array has the following shape (n, psz_x, psz_x, nb_channels). If + False, the shape is (1, psz_y, psz_x, nb_channels) + :return: Numpy array of dim 4 + """ + buffer = ds.ReadAsArray() + szx = ds.RasterXSize + if len(buffer.shape) == 3: + buffer = np.transpose(buffer, axes=(1, 2, 0)) + if not as_patches: + n = 1 + szy = ds.RasterYSize + else: + n = int(ds.RasterYSize / szx) + szy = szx + return np.float32(buffer.reshape((n, szy, szx, ds.RasterCount))) + + +""" +---------------------------------------------------- Buffer class ------------------------------------------------------ +""" + + +class Buffer: + """ + Used to store and access list of objects + """ + + def __init__(self, max_length): + self.max_length = max_length + self.container = [] + + def size(self): + return len(self.container) + + def add(self, x): + self.container.append(x) + assert (self.size() <= self.max_length) + + def is_complete(self): + return self.size() == self.max_length + + +""" +------------------------------------------------ PatchesReaderBase class ----------------------------------------------- +""" + + +class PatchesReaderBase(ABC): + """ + Base class for patches delivery + """ + + @abstractmethod + def get_sample(self, index): + """ + Return one sample. + :return One sample instance, whatever the sample structure is (dict, numpy array, ...) + """ + pass + + @abstractmethod + def get_stats(self) -> dict: + """ + Compute some statistics for each source. + Depending if streaming is used, the statistics are computed directly in memory, or chunk-by-chunk. + + :return a dict having the following structure: + { + "src_key_0": + {"min": np.array([...]), + "max": np.array([...]), + "mean": np.array([...]), + "std": np.array([...])}, + ..., + "src_key_M": + {"min": np.array([...]), + "max": np.array([...]), + "mean": np.array([...]), + "std": np.array([...])}, + } + """ + pass + + @abstractmethod + def get_size(self): + """ + Returns the total number of samples + :return: number of samples (int) + """ + pass + + +""" +----------------------------------------------- PatchesImagesReader class ---------------------------------------------- +""" + + +class PatchesImagesReader(PatchesReaderBase): + """ + This class provides a read access to a set of patches images. + + A patches image is an image of patches stacked in rows, as produced from the OTBTF "PatchesExtraction" + application, and is stored in a raster format (e.g. GeoTiff). + A source can be a particular domain in which the patches are extracted (remember that in OTBTF applications, + the number of sources is controlled by the OTB_TF_NSOURCES environment variable). + + This class enables to use: + - multiple sources + - multiple patches images per source + + Each patch can be independently accessed using the get_sample(index) function, with index in [0, self.size), + self.size being the total number of patches (must be the same for each sources). + + :see PatchesReaderBase + """ + + def __init__(self, filenames_dict: dict, use_streaming=False): + """ + :param filenames_dict: A dict() structured as follow: + {src_name1: [src1_patches_image_1.tif, ..., src1_patches_image_N.tif], + src_name2: [src2_patches_image_1.tif, ..., src2_patches_image_N.tif], + ... + src_nameM: [srcM_patches_image_1.tif, ..., srcM_patches_image_N.tif]} + :param use_streaming: if True, the patches are read on the fly from the disc, nothing is kept in memory. + """ + + assert (len(filenames_dict.values()) > 0) + + # ds dict + self.ds = dict() + for src_key, src_filenames in filenames_dict.items(): + self.ds[src_key] = [] + for src_filename in src_filenames: + self.ds[src_key].append(gdal_open(src_filename)) + + if len(set([len(ds_list) for ds_list in self.ds.values()])) != 1: + raise Exception("Each source must have the same number of patches images") + + # streaming on/off + self.use_streaming = use_streaming + + # ds check + nb_of_patches = {key: 0 for key in self.ds} + self.nb_of_channels = dict() + for src_key, ds_list in self.ds.items(): + for ds in ds_list: + nb_of_patches[src_key] += self._get_nb_of_patches(ds) + if src_key not in self.nb_of_channels: + self.nb_of_channels[src_key] = ds.RasterCount + else: + if self.nb_of_channels[src_key] != ds.RasterCount: + raise Exception("All patches images from one source must have the same number of channels!" + "Error happened for source: {}".format(src_key)) + if len(set(nb_of_patches.values())) != 1: + raise Exception("Sources must have the same number of patches! Number of patches: {}".format(nb_of_patches)) + + # ds sizes + src_key_0 = list(self.ds)[0] # first key + self.ds_sizes = [self._get_nb_of_patches(ds) for ds in self.ds[src_key_0]] + self.size = sum(self.ds_sizes) + + # if use_streaming is False, we store in memory all patches images + if not self.use_streaming: + patches_list = {src_key: [read_as_np_arr(ds) for ds in self.ds[src_key]] for src_key in self.ds} + self.patches_buffer = {src_key: np.concatenate(patches_list[src_key], axis=-1) for src_key in self.ds} + + def _get_ds_and_offset_from_index(self, index): + offset = index + for i, ds_size in enumerate(self.ds_sizes): + if offset < ds_size: + break + offset -= ds_size + + return i, offset + + @staticmethod + def _get_nb_of_patches(ds): + return int(ds.RasterYSize / ds.RasterXSize) + + @staticmethod + def _read_extract_as_np_arr(ds, offset): + assert (ds is not None) + psz = ds.RasterXSize + yoff = int(offset * psz) + assert (yoff + psz <= ds.RasterYSize) + buffer = ds.ReadAsArray(0, yoff, psz, psz) + if len(buffer.shape) == 3: + buffer = np.transpose(buffer, axes=(1, 2, 0)) + return np.float32(buffer) + + def get_sample(self, index): + """ + Return one sample of the dataset. + :param index: the sample index. Must be in the [0, self.size) range. + :return: The sample is stored in a dict() with the following structure: + {"src_key_0": np.array((psz_y_0, psz_x_0, nb_ch_0)), + "src_key_1": np.array((psz_y_1, psz_x_1, nb_ch_1)), + ... + "src_key_M": np.array((psz_y_M, psz_x_M, nb_ch_M))} + """ + assert (0 <= index) + assert (index < self.size) + + if not self.use_streaming: + res = {src_key: self.patches_buffer[src_key][index, :, :, :] for src_key in self.ds} + else: + i, offset = self._get_ds_and_offset_from_index(index) + res = {src_key: self._read_extract_as_np_arr(self.ds[src_key][i], offset) for src_key in self.ds} + + return res + + def get_stats(self): + """ + Compute some statistics for each source. + Depending if streaming is used, the statistics are computed directly in memory, or chunk-by-chunk. + + :return statistics dict + """ + logging.info("Computing stats") + if not self.use_streaming: + axis = (0, 1, 2) # (row, col) + stats = {src_key: {"min": np.amin(patches_buffer, axis=axis), + "max": np.amax(patches_buffer, axis=axis), + "mean": np.mean(patches_buffer, axis=axis), + "std": np.std(patches_buffer, axis=axis)} for src_key, patches_buffer in + self.patches_buffer.items()} + else: + axis = (0, 1) # (row, col) + + def _filled(value): + return {src_key: value * np.ones((self.nb_of_channels[src_key])) for src_key in self.ds} + + _maxs = _filled(0.0) + _mins = _filled(float("inf")) + _sums = _filled(0.0) + _sqsums = _filled(0.0) + for index in range(self.size): + sample = self.get_sample(index=index) + for src_key, np_arr in sample.items(): + rnumel = 1.0 / float(np_arr.shape[0] * np_arr.shape[1]) + _mins[src_key] = np.minimum(np.amin(np_arr, axis=axis).flatten(), _mins[src_key]) + _maxs[src_key] = np.maximum(np.amax(np_arr, axis=axis).flatten(), _maxs[src_key]) + _sums[src_key] += rnumel * np.sum(np_arr, axis=axis).flatten() + _sqsums[src_key] += rnumel * np.sum(np.square(np_arr), axis=axis).flatten() + + rsize = 1.0 / float(self.size) + stats = {src_key: {"min": _mins[src_key], + "max": _maxs[src_key], + "mean": rsize * _sums[src_key], + "std": np.sqrt(rsize * _sqsums[src_key] - np.square(rsize * _sums[src_key])) + } for src_key in self.ds} + logging.info("Stats: {}".format(stats)) + return stats + + def get_size(self): + return self.size + + +""" +------------------------------------------------- IteratorBase class --------------------------------------------------- +""" + + +class IteratorBase(ABC): + """ + Base class for iterators + """ + @abstractmethod + def __init__(self, patches_reader: PatchesReaderBase): + pass + + +""" +------------------------------------------------ RandomIterator class -------------------------------------------------- +""" + + +class RandomIterator(IteratorBase): + """ + Pick a random number in the [0, handler.size) range. + """ + + def __init__(self, patches_reader): + super().__init__(patches_reader=patches_reader) + self.indices = np.arange(0, patches_reader.get_size()) + self._shuffle() + self.count = 0 + + def __iter__(self): + return self + + def __next__(self): + current_index = self.indices[self.count] + if self.count < len(self.indices) - 1: + self.count += 1 + else: + self._shuffle() + self.count = 0 + return current_index + + def _shuffle(self): + np.random.shuffle(self.indices) + + +""" +--------------------------------------------------- Dataset class ------------------------------------------------------ +""" + + +class Dataset: + """ + Handles the "mining" of patches. + This class has a thread that extract tuples from the readers, while ensuring the access of already gathered tuples. + + :see PatchesReaderBase + :see Buffer + """ + + def __init__(self, patches_reader: PatchesReaderBase, buffer_length: int = 128, + Iterator: IteratorBase = RandomIterator): + """ + :param patches_reader: The patches reader instance + :param buffer_length: The number of samples that are stored in the buffer + :param Iterator: The iterator class used to generate the sequence of patches indices. + """ + + # patches reader + self.patches_reader = patches_reader + self.size = self.patches_reader.get_size() + + # iterator + self.iterator = Iterator(patches_reader=self.patches_reader) + + # Get patches sizes and type, of the first sample of the first tile + self.output_types = dict() + self.output_shapes = dict() + one_sample = self.patches_reader.get_sample(index=0) + for src_key, np_arr in one_sample.items(): + self.output_shapes[src_key] = np_arr.shape + self.output_types[src_key] = tf.dtypes.as_dtype(np_arr.dtype) + + logging.info("output_types: {}".format(self.output_types)) + logging.info("output_shapes: {}".format(self.output_shapes)) + + # buffers + self.miner_buffer = Buffer(buffer_length) + self.mining_lock = multiprocessing.Lock() + self.consumer_buffer = Buffer(buffer_length) + self.consumer_buffer_pos = 0 + self.tot_wait = 0 + self.miner_thread = self._summon_miner_thread() + self.read_lock = multiprocessing.Lock() + self._dump() + + # Prepare tf dataset for one epoch + self.tf_dataset = tf.data.Dataset.from_generator(self._generator, + output_types=self.output_types, + output_shapes=self.output_shapes).repeat(1) + + def get_stats(self) -> dict: + """ + :return: the dataset statistics, computed by the patches reader + """ + with self.mining_lock: + return self.patches_reader.get_stats() + + def read_one_sample(self): + """ + Read one element of the consumer_buffer + The lock is used to prevent different threads to read and update the internal counter concurrently + """ + with self.read_lock: + output = None + if self.consumer_buffer_pos < self.consumer_buffer.max_length: + output = self.consumer_buffer.container[self.consumer_buffer_pos] + self.consumer_buffer_pos += 1 + if self.consumer_buffer_pos == self.consumer_buffer.max_length: + self._dump() + self.consumer_buffer_pos = 0 + return output + + def _dump(self): + """ + This function dumps the miner_buffer into the consumer_buffer, and restart the miner_thread + """ + # Wait for miner to finish his job + t = time.time() + self.miner_thread.join() + self.tot_wait += time.time() - t + + # Copy miner_buffer.container --> consumer_buffer.container + self.consumer_buffer.container = [elem for elem in self.miner_buffer.container] + + # Clear miner_buffer.container + self.miner_buffer.container.clear() + + # Restart miner_thread + self.miner_thread = self._summon_miner_thread() + + def _collect(self): + """ + This function collects samples. + It is threaded by the miner_thread. + """ + # Fill the miner_container until it's full + while not self.miner_buffer.is_complete(): + try: + index = next(self.iterator) + with self.mining_lock: + new_sample = self.patches_reader.get_sample(index=index) + self.miner_buffer.add(new_sample) + except Exception as e: + logging.warning("Error during collecting samples: {}".format(e)) + + def _summon_miner_thread(self): + """ + Create and starts the thread for the data collect + """ + t = threading.Thread(target=self._collect) + t.start() + return t + + def _generator(self): + """ + Generator function, used for the tf dataset + """ + for elem in range(self.size): + yield self.read_one_sample() + + def get_tf_dataset(self, batch_size, drop_remainder=True): + """ + Returns a TF dataset, ready to be used with the provided batch size + :param batch_size: the batch size + :param drop_remainder: drop incomplete batches + :return: The TF dataset + """ + if batch_size <= 2 * self.miner_buffer.max_length: + logging.warning("Batch size is {} but dataset buffer has {} elements. Consider using a larger dataset " + "buffer to avoid I/O bottleneck".format(batch_size, self.miner_buffer.max_length)) + return self.tf_dataset.batch(batch_size, drop_remainder=drop_remainder) + + def get_total_wait_in_seconds(self): + """ + Returns the number of seconds during which the data gathering was delayed because of I/O bottleneck + :return: duration in seconds + """ + return self.tot_wait + + +""" +------------------------------------------- DatasetFromPatchesImages class --------------------------------------------- +""" + + +class DatasetFromPatchesImages(Dataset): + """ + Handles the "mining" of a set of patches images. + + :see PatchesImagesReader + :see Dataset + """ + + def __init__(self, filenames_dict: dict, use_streaming: bool = False, buffer_length: int = 128, + Iterator: IteratorBase = RandomIterator): + """ + :param filenames_dict: A dict() structured as follow: + {src_name1: [src1_patches_image1, ..., src1_patches_imageN1], + src_name2: [src2_patches_image2, ..., src2_patches_imageN2], + ... + src_nameM: [srcM_patches_image1, ..., srcM_patches_imageNM]} + :param use_streaming: if True, the patches are read on the fly from the disc, nothing is kept in memory. + :param buffer_length: The number of samples that are stored in the buffer (used when "use_streaming" is True). + :param Iterator: The iterator class used to generate the sequence of patches indices. + """ + # patches reader + patches_reader = PatchesImagesReader(filenames_dict=filenames_dict, use_streaming=use_streaming) + + super().__init__(patches_reader=patches_reader, buffer_length=buffer_length, Iterator=Iterator) diff --git a/python/tricks.py b/python/tricks.py index 3c0983d5b8363439955a10bbef416511a55c45e8..fe4c5deaedb4095275f7161143a23ad968b10392 100644 --- a/python/tricks.py +++ b/python/tricks.py @@ -20,6 +20,7 @@ import gdal import numpy as np import tensorflow.compat.v1 as tf +from deprecated import deprecated tf.disable_v2_behavior() @@ -88,3 +89,34 @@ def ckpt_to_savedmodel(ckpt_path, inputs, outputs, savedmodel_path, clear_device # Create a SavedModel create_savedmodel(sess, inputs=inputs, outputs=outputs, directory=savedmodel_path) + +@deprecated +def read_samples(filename): + """ + Read a patches image. + @param filename: raster file name + """ + return read_image_as_np(filename, as_patches=True) + +@deprecated +def CreateSavedModel(sess, inputs, outputs, directory): + """ + Create a SavedModel + @param sess TF session + @param inputs List of inputs names (e.g. ["x_cnn_1:0", "x_cnn_2:0"]) + @param outputs List of outputs names (e.g. ["prediction:0", "features:0"]) + @param directory Path for the generated SavedModel + """ + create_savedmodel(sess, inputs, outputs, directory) + +@deprecated +def CheckpointToSavedModel(ckpt_path, inputs, outputs, savedmodel_path, clear_devices=False): + """ + Read a Checkpoint and build a SavedModel + @param ckpt_path Path to the checkpoint file (without the ".meta" extension) + @param inputs List of inputs names (e.g. ["x_cnn_1:0", "x_cnn_2:0"]) + @param outputs List of outputs names (e.g. ["prediction:0", "features:0"]) + @param savedmodel_path Path for the generated SavedModel + @param clear_devices Clear TF devices positionning (True/False) + """ + ckpt_to_savedmodel(ckpt_path, inputs, outputs, savedmodel_path, clear_devices) diff --git a/test/data/apTvClTensorflowModelServeCNN8x8_32x32FC.tif b/test/data/apTvClTensorflowModelServeCNN8x8_32x32FC.tif index 0662360f8e63ba3ee6d547fb62df25f3300dbb3b..f6155aa6a9e1e878d754e4db67a56f8e211809b3 100644 Binary files a/test/data/apTvClTensorflowModelServeCNN8x8_32x32FC.tif and b/test/data/apTvClTensorflowModelServeCNN8x8_32x32FC.tif differ diff --git a/test/data/apTvClTensorflowModelServeCNN8x8_32x32PB.tif b/test/data/apTvClTensorflowModelServeCNN8x8_32x32PB.tif index 35cb2f460a1c0f879af2ba9f170eabb6da542e4b..ec9c9d9f984ff728a2116637ba56ecc398aadaeb 100644 Binary files a/test/data/apTvClTensorflowModelServeCNN8x8_32x32PB.tif and b/test/data/apTvClTensorflowModelServeCNN8x8_32x32PB.tif differ diff --git a/tools/docker/README.md b/tools/docker/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8722b52e539303319b50a9de8ca3f11ff721597e --- /dev/null +++ b/tools/docker/README.md @@ -0,0 +1,155 @@ +# Build with Docker +Docker build has to be called from the root of the repository (i.e. `docker build .` or `bash tools/docker/multibuild.sh`). +You can build a custom image using `--build-arg` and several config files : +- Ubuntu : `BASE_IMG` should accept any version, for additional packages see [build-deps-cli.txt](build-deps-cli.txt) and [build-deps-gui.txt](build-deps-gui.txt). +- TensorFlow : `TF` arg for the git branch or tag + [build-env-tf.sh](build-env-tf.sh) and BZL_* arguments for the build configuration. `ZIP_TF_BIN` allows you to save compiled binaries if you want to install it elsewhere. +- OrfeoToolBox : `OTB` arg for the git branch or tag + [build-flags-otb.txt](build-flags-otb.txt) to edit cmake flags. Set `KEEP_SRC_OTB` in order to preserve OTB git directory. + +### Base images +```bash +UBUNTU=20.04 # or 16.04, 18.04 +CUDA=11.2.2 # or 10.1, 10.2, 11.0.3 +CUDNN=8 # or 7 +IMG=ubuntu:$UBUNTU +GPU_IMG=nvidia/cuda:$CUDA-cudnn$CUDNN-devel-ubuntu$UBUNTU +``` + +### Default arguments +```bash +BASE_IMG # mandatory +CPU_RATIO=1 +GUI=false +NUMPY_SPEC="==1.19.*" +TF=v2.5.0 +OTB=7.3.0 +BZL_TARGETS="//tensorflow:libtensorflow_cc.so //tensorflow/tools/pip_package:build_pip_package" +BZL_CONFIGS="--config=nogcp --config=noaws --config=nohdfs --config=opt" +BZL_OPTIONS="--verbose_failures --remote_cache=http://localhost:9090" +ZIP_TF_BIN=false +KEEP_SRC_OTB=false +SUDO=true + +# NumPy version requirement : +# TF < 2.4 : "numpy<1.19.0,>=1.16.0" +# TF >= 2.4 : "numpy==1.19.*" +``` + +### Bazel remote cache daemon +If you just need to rebuild with different GUI or KEEP_SRC arguments, or may be a different branch of OTB, bazel cache will help you to rebuild everything except TF, even if the docker cache was purged (after `docker [system|builder] prune`). +In order to recycle the cache, bazel config and TF git tag should be exactly the same, any change in [build-env-tf.sh](build-env-tf.sh) and `--build-arg` (if related to bazel env, cuda, mkl, xla...) may result in a fresh new build. + +Start a cache daemon - here with max 20GB but 10GB should be enough to save 2 TF builds (GPU and CPU): +```bash +mkdir -p $HOME/.cache/bazel-remote +docker run --detach -u 1000:1000 -v $HOME/.cache/bazel-remote:/data -p 9090:8080 buchgr/bazel-remote-cache --max_size=20 +``` +Then just add ` --network='host'` to the docker build command, or connect bazel to a remote server - see 'BZL_OPTIONS'. +The other way of docker is a virtual bridge, but you'll need to edit the IP address. + +## Images build examples +```bash +# Build for CPU using default Dockerfiles args (without AWS, HDFS or GCP support) +docker build --network='host' -t otbtf:cpu --build-arg BASE_IMG=ubuntu:20.04 . + +# Clear bazel config var (deactivate default optimizations and unset noaws/nogcp/nohdfs) +docker build --network='host' -t otbtf:cpu --build-arg BASE_IMG=ubuntu:20.04 --build-arg BZL_CONFIGS= . + +# Enable MKL +MKL_CONFIG="--config=nogcp --config=noaws --config=nohdfs --config=opt --config=mkl" +docker build --network='host' -t otbtf:cpu-mkl --build-arg BZL_CONFIGS="$MKL_CONFIG" --build-arg BASE_IMG=ubuntu:20.04 . + +# Build for GPU (if you're building for your system only you should edit CUDA_COMPUTE_CAPABILITIES in build-env-tf.sh) +docker build --network='host' -t otbtf:gpu --build-arg BASE_IMG=nvidia/cuda:11.0.3-cudnn8-devel-ubuntu20.04 . + +# Build latest TF and OTB, set git branches/tags to clone +docker build --network='host' -t otbtf:gpu-dev --build-arg BASE_IMG=nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04 \ + --build-arg KEEP_SRC_OTB=true --build-arg TF=nightly --build-arg OTB=develop . + +# Build old release (TF-2.1) +docker build --network='host' -t otbtf:oldstable-gpu --build-arg BASE_IMG=nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04 \ + --build-arg TF=r2.1 --build-arg NUMPY_SPEC="<1.19" \ + --build-arg BAZEL_OPTIONS="--noincompatible_do_not_split_linking_cmdline --verbose_failures --remote_cache=http://localhost:9090" . +# You could edit the Dockerfile in order to clone an old branch of the repo instead of copying files from the build context +``` + +### Build for another machine and save TF compiled files +```bash +# Use same ubuntu and CUDA version than your target machine, beware of CC optimization and CPU compatibilty +# (set env variable CC_OPT_FLAGS and avoid "-march=native" if your Docker's CPU is optimized with AVX2/AVX512 but your target CPU isn't) +docker build --network='host' -t otbtf:custom --build-arg BASE_IMG=nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04 \ + --build-arg TF=v2.5.0 --build-arg ZIP_TF_BIN=true . +# Retrieve zip file +docker run -v $HOME:/home/otbuser/volume otbtf:custom cp /opt/otbtf/tf-v2.5.0.zip /home/otbuser/volume + +# Target machine shell +cd $HOME +unzip tf-v2.5.0.zip +sudo mkdir -p /opt/tensorflow/lib +sudo mv tf-v2.5.0/libtensorflow_cc* /opt/tensorflow/lib +# You may need to create a virtualenv, here TF and dependencies are installed next to user's pip packages +pip3 install -U pip wheel mock six future deprecated "numpy==1.19.*" +pip3 install --no-deps keras_applications keras_preprocessing +pip3 install tf-v2.5.0/tensorflow-2.5.0-cp38-cp38-linux_x86_64.whl + +TF_WHEEL_DIR="$HOME/.local/lib/python3.8/site-packages/tensorflow" +# If you installed the wheel as regular user, with root pip it should be in /usr/local/lib/python3.*, or in your virtualenv lib/ directory +mv tf-v2.5.0/tag_constants.h $TF_WHEEL_DIR/include/tensorflow/cc/saved_model/ +# Then recompile OTB with OTBTF using libraries in /opt/tensorflow/lib and instructions in HOWTOBUILD.md. +cmake $OTB_GIT \ + -DOTB_USE_TENSORFLOW=ON -DModule_OTBTensorflow=ON \ + -DTENSORFLOW_CC_LIB=/opt/tensorflow/lib/libtensorflow_cc.so.2 \ + -Dtensorflow_include_dir=$TF_WHEEL_DIR/include \ + -DTENSORFLOW_FRAMEWORK_LIB=$TF_WHEEL_DIR/libtensorflow_framework.so.2 \ +&& make install -j +``` + +### Debug build +If you fail to build, you can log into the last layer and check CMake logs. Run `docker images`, find the latest layer ID and run a tmp container (`docker run -it d60496d9612e bash`). +You may also need to split some multi-command layers in the Dockerfile. +If you see OOM errors during SuperBuild you should decrease CPU_RATIO (e.g. 0.75). + +## Container examples +```bash +# Pull GPU image and create a new container with your home directory as volume (requires apt package nvidia-docker2 and CUDA>=11.0) +docker create --gpus=all --volume $HOME:/home/otbuser/volume -it --name otbtf-gpu mdl4eo/otbtf2.4:gpu + +# Run interactive +docker start -i otbtf-gpu + +# Run in background +docker start otbtf-gpu +docker exec otbtf-gpu python -c 'import tensorflow as tf; print(tf.test.is_gpu_available())' +``` + +### Rebuild OTB with more modules +```bash +docker create --gpus=all -it --name otbtf-gpu-dev mdl4eo/otbtf2.4:gpu-dev +docker start -i otbtf-gpu-dev +``` +```bash +# From the container shell: +sudo -i +cd /src/otb/otb/Modules/Remote +git clone https://gitlab.irstea.fr/raffaele.gaetano/otbSelectiveHaralickTextures.git +cd /src/otb/build/OTB/build +cmake -DModule_OTBAppSelectiveHaralickTextures=ON /src/otb/otb && make install -j +``` + +### Container with GUI +```bash +# GUI is disabled by default in order to save space, and because docker xvfb isn't working properly with OpenGL. +# => otbgui seems OK but monteverdi isn't working +docker build --network='host' -t otbtf:cpu-gui --build-arg BASE_IMG=ubuntu:20.04 --build-arg GUI=true . +docker create -v /tmp/.X11-unix:/tmp/.X11-unix -e DISPLAY=$DISPLAY -it --name otbtf-gui otbtf:cpu-gui +docker start -i otbtf-gui +$ mapla +``` + +## Common errors +Buid : +`Error response from daemon: manifest for nvidia/cuda:11.0-cudnn8-devel-ubuntu20.04 not found: manifest unknown: manifest unknown` +=> Image is missing from dockerhub + +Run : +`failed call to cuInit: UNKNOWN ERROR (303) / no NVIDIA GPU device is present: /dev/nvidia0 does not exist` +=> Nvidia driver is missing or disabled, make sure to add ` --gpus=all` to your docker run or create command diff --git a/tools/docker/build-deps-cli.txt b/tools/docker/build-deps-cli.txt new file mode 100644 index 0000000000000000000000000000000000000000..5d699cb19db6cd4845acaa909f50e148a172e318 --- /dev/null +++ b/tools/docker/build-deps-cli.txt @@ -0,0 +1,53 @@ +apt-transport-https +ca-certificates +curl +cmake +file +g++ +gcc +git +libc6-dev +libtool +lsb-release +make +nano +patch +pkg-config +python3-dev +python3-pip +python3-setuptools +python3-venv +sudo +swig +unzip +vim +wget +zip + +bison +gdal-bin +python3-gdal +libboost-date-time-dev +libboost-filesystem-dev +libboost-graph-dev +libboost-program-options-dev +libboost-system-dev +libboost-thread-dev +libcurl4-gnutls-dev +libexpat1-dev +libfftw3-dev +libgdal-dev +libgeotiff-dev +libgsl-dev +libinsighttoolkit4-dev +libkml-dev +libmuparser-dev +libmuparserx-dev +libopencv-core-dev +libopencv-ml-dev +libopenthreads-dev +libossim-dev +libpng-dev +libsvm-dev +libtinyxml-dev +zlib1g-dev diff --git a/tools/docker/build-deps-gui.txt b/tools/docker/build-deps-gui.txt new file mode 100644 index 0000000000000000000000000000000000000000..af6f6b8f2403660fc2207ad7b6f43f523600658f --- /dev/null +++ b/tools/docker/build-deps-gui.txt @@ -0,0 +1,13 @@ +freeglut3-dev +libglew-dev +libglfw3-dev +libqt5opengl5-dev +libqwt-qt5-dev +libx11-dev +libgl-dev +libxmu-dev +libxi-dev +qtbase5-dev +qttools5-dev +qttools5-dev-tools +xvfb \ No newline at end of file diff --git a/tools/docker/build-env-tf.sh b/tools/docker/build-env-tf.sh new file mode 100644 index 0000000000000000000000000000000000000000..b29f6c1ad94a6ca97c8d98b3da56661453073a1a --- /dev/null +++ b/tools/docker/build-env-tf.sh @@ -0,0 +1,44 @@ +### TF - bazel build env variables + +# As in official TF wheels, you'll need to remove "-march=native" to ensure portability (avoid AVX2 / AVX512 compatibility issues) +# You could also add CPUs instructions one by one, in this example to avoid only AVX512 but enable commons optimizations like FMA, SSE4.2 and AVX2 +#export CC_OPT_FLAGS="-Wno-sign-compare --copt=-mavx --copt=-mavx2 --copt=-mfma --copt=-mfpmath=both --copt=-msse4.2" +export CC_OPT_FLAGS="-march=native -Wno-sign-compare" +export GCC_HOST_COMPILER_PATH=$(which gcc) +export PYTHON_BIN_PATH=$(which python) +export PYTHON_LIB_PATH="$($PYTHON_BIN_PATH -c 'import site; print(site.getsitepackages()[0])')" +export TF_DOWNLOAD_CLANG=0 +export TF_ENABLE_XLA=1 +export TF_NEED_COMPUTECPP=0 +export TF_NEED_GDR=0 +export TF_NEED_JEMALLOC=1 +export TF_NEED_KAFKA=0 +export TF_NEED_MPI=0 +export TF_NEED_OPENCL=0 +export TF_NEED_OPENCL_SYCL=0 +export TF_NEED_VERBS=0 +export TF_SET_ANDROID_WORKSPACE=0 +# For MKL support BZL_CONFIGS+=" --config=mkl" +#export TF_DOWNLOAD_MKL=1 +#export TF_NEED_MKL=0 +# Needed BZL_CONFIGS=" --config=nogcp --config=noaws --config=nohdfs" +#export TF_NEED_S3=0 +#export TF_NEED_AWS=0 +#export TF_NEED_GCP=0 +#export TF_NEED_HDFS=0 + +## GPU +export TF_NEED_ROCM=0 +export TF_NEED_CUDA=0 +export CUDA_TOOLKIT_PATH=$(find /usr/local -maxdepth 1 -type d -name 'cuda-*') +if [ ! -z $CUDA_TOOLKIT_PATH ] ; then + export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$CUDA_TOOLKIT_PATH/lib64:$CUDA_TOOLKIT_PATH/lib64/stubs" + export TF_CUDA_VERSION=$(echo $CUDA_TOOLKIT_PATH | sed -r 's/.*\/cuda-(.*)/\1/') + export TF_CUDA_COMPUTE_CAPABILITIES="5.2,6.1,7.0,7.5,8.6" + export TF_NEED_CUDA=1 + export TF_CUDA_CLANG=0 + export TF_NEED_TENSORRT=0 + export CUDNN_INSTALL_PATH="/usr/" + export TF_CUDNN_VERSION=$(sed -n 's/^#define CUDNN_MAJOR\s*\(.*\).*/\1/p' $CUDNN_INSTALL_PATH/include/cudnn.h) + export TF_NCCL_VERSION=2 +fi diff --git a/tools/docker/build-flags-otb.txt b/tools/docker/build-flags-otb.txt new file mode 100644 index 0000000000000000000000000000000000000000..2c3e0feac4e480cd9f8a0c9969b70c301761e8da --- /dev/null +++ b/tools/docker/build-flags-otb.txt @@ -0,0 +1,32 @@ +-DUSE_SYSTEM_BOOST=ON +-DUSE_SYSTEM_CURL=ON +-DUSE_SYSTEM_EXPAT=ON +-DUSE_SYSTEM_FFTW=ON +-DUSE_SYSTEM_FREETYPE=ON +-DUSE_SYSTEM_GDAL=ON +-DUSE_SYSTEM_GEOS=ON +-DUSE_SYSTEM_GEOTIFF=ON +-DUSE_SYSTEM_GLEW=ON +-DUSE_SYSTEM_GLFW=ON +-DUSE_SYSTEM_GLUT=ON +-DUSE_SYSTEM_GSL=ON +-DUSE_SYSTEM_ITK=ON +-DUSE_SYSTEM_LIBKML=ON +-DUSE_SYSTEM_LIBSVM=ON +-DUSE_SYSTEM_MUPARSER=ON +-DUSE_SYSTEM_MUPARSERX=ON +-DUSE_SYSTEM_OPENCV=ON +-DUSE_SYSTEM_OPENTHREADS=ON +-DUSE_SYSTEM_OSSIM=ON +-DUSE_SYSTEM_PNG=ON +-DUSE_SYSTEM_QT5=ON +-DUSE_SYSTEM_QWT=ON +-DUSE_SYSTEM_TINYXML=ON +-DUSE_SYSTEM_ZLIB=ON +-DUSE_SYSTEM_SWIG=ON + +-DOTB_USE_QT=OFF +-DOTB_USE_OPENGL=OFF +-DOTB_USE_GLUT=OFF +-DOTB_USE_GLEW=OFF +-DOTB_USE_GLFW=OFF diff --git a/tools/docker/multibuild.sh b/tools/docker/multibuild.sh new file mode 100644 index 0000000000000000000000000000000000000000..c88bb0ac45ee8256d47dc505abeb4b85689fcdb5 --- /dev/null +++ b/tools/docker/multibuild.sh @@ -0,0 +1,37 @@ +#!/bin/bash +### Docker multibuild and push, see default args and more examples in tools/docker/README.md +RELEASE=2.5 +UBUNTU=20.04 +CUDA=11.2.2 +CUDNN=8 +IMG=ubuntu:$UBUNTU +GPU_IMG=nvidia/cuda:$CUDA-cudnn$CUDNN-devel-ubuntu$UBUNTU + +## Bazel remote cache daemon +mkdir -p $HOME/.cache/bazel-remote +docker run -d -u 1000:1000 -v $HOME/.cache/bazel-remote:/data -p 9090:8080 buchgr/bazel-remote-cache --max_size=20 + +### CPU (no MKL) +docker build --network='host' -t mdl4eo/otbtf$RELEASE:cpu-dev --build-arg BASE_IMG=$IMG --build-arg KEEP_SRC_OTB=true . +docker build --network='host' -t mdl4eo/otbtf$RELEASE:cpu --build-arg BASE_IMG=$IMG . +#docker build --network='host' -t mdl4eo/otbtf$RELEASE:-cpu-gui --build-arg BASE_IMG=$IMG --build-arg GUI=true . + +### MKL is enabled with bazel config flag +#MKL_CONF="--config=nogcp --config=noaws --config=nohdfs --config=mkl --config=opt" +#docker build --network='host' -t mdl4eo/otbtf$RELEASE:-cpu-mkl --build-arg BASE_IMG=$IMG --build-arg BZL_CONFIGS="$MKL_CONF" . +#docker build --network='host' -t mdl4eo/otbtf$RELEASE:-cpu-mkl-dev --build-arg BASE_IMG=$IMG --build-arg BZL_CONFIGS="$MKL_CONF" --build-arg KEEP_SRC_OTB=true . + +### GPU support is enabled if CUDA is found in /usr/local +docker build --network='host' -t mdl4eo/otbtf$RELEASE:gpu-dev --build-arg BASE_IMG=$GPU_IMG --build-arg KEEP_SRC_OTB=true . +docker build --network='host' -t mdl4eo/otbtf$RELEASE:gpu --build-arg BASE_IMG=$GPU_IMG . +#docker build --network='host' -t mdl4eo/otbtf$RELEASE:-gpu-gui --build-arg BASE_IMG=$GPU_IMG --build-arg GUI=true . + +#docker login +docker push mdl4eo/otbtf$RELEASE:-cpu-dev +docker push mdl4eo/otbtf$RELEASE:-cpu +#docker push mdl4eo/otbtf$RELEASE:-cpu-gui +#docker push mdl4eo/otbtf$RELEASE:-cpu-mkl + +docker push mdl4eo/otbtf$RELEASE:-gpu-dev +docker push mdl4eo/otbtf$RELEASE:-gpu +#docker push mdl4eo/otbtf$RELEASE:-gpu-gui diff --git a/tools/dockerfiles/r1.6/bionic.tf-r1.14.otb-develop/Dockerfile b/tools/dockerfiles/r1.6/bionic.tf-r1.14.otb-develop/Dockerfile deleted file mode 100644 index dac7c853af399afa487871b7e2436f6def3243d2..0000000000000000000000000000000000000000 --- a/tools/dockerfiles/r1.6/bionic.tf-r1.14.otb-develop/Dockerfile +++ /dev/null @@ -1,218 +0,0 @@ -FROM ubuntu:18.04 - -MAINTAINER Remi Cresson <remi.cresson[at]irstea[dot]fr> - -RUN apt-get update -y \ - && apt-get upgrade -y \ - && apt-get install -y --no-install-recommends \ - sudo \ - ca-certificates \ - curl \ - make \ - cmake \ - g++ \ - gcc \ - git \ - libtool \ - swig \ - xvfb \ - wget \ - autoconf \ - automake \ - pkg-config \ - zip \ - zlib1g-dev \ - unzip \ - && rm -rf /var/lib/apt/lists/* - -# ---------------------------------------------------------------------------- -# OTB and TensorFlow dependencies -# ---------------------------------------------------------------------------- -RUN apt-get update -y \ - && apt-get upgrade -y \ - && apt-get install -y --no-install-recommends \ - freeglut3-dev \ - libboost-date-time-dev \ - libboost-filesystem-dev \ - libboost-graph-dev \ - libboost-program-options-dev \ - libboost-system-dev \ - libboost-thread-dev \ - libcurl4-gnutls-dev \ - libexpat1-dev \ - libfftw3-dev \ - libgdal-dev \ - libgeotiff-dev \ - libglew-dev \ - libglfw3-dev \ - libgsl-dev \ - libinsighttoolkit4-dev \ - libkml-dev \ - libmuparser-dev \ - libmuparserx-dev \ - libopencv-core-dev \ - libopencv-ml-dev \ - libopenthreads-dev \ - libossim-dev \ - libpng-dev \ - libqt5opengl5-dev \ - libqwt-qt5-dev \ - libsvm-dev \ - libtinyxml-dev \ - qtbase5-dev \ - qttools5-dev \ - default-jdk \ - python3-pip \ - python3.6-dev \ - python3.6-gdal \ - python3-setuptools \ - libxmu-dev \ - libxi-dev \ - qttools5-dev-tools \ - bison \ - && rm -rf /var/lib/apt/lists/* - -# ---------------------------------------------------------------------------- -# Python packages -# ---------------------------------------------------------------------------- -RUN ln -s /usr/bin/python3 /usr/bin/python \ - && python3 -m pip install --upgrade pip \ - && python3 -m pip install pip six numpy wheel mock keras future - -# ---------------------------------------------------------------------------- -# Build TensorFlow -# ---------------------------------------------------------------------------- -RUN export TF_ROOT=/work/tf \ - && mkdir -p ${TF_ROOT}/bazel \ - && cd ${TF_ROOT}/bazel \ - && wget https://github.com/bazelbuild/bazel/releases/download/0.24.1/bazel-0.24.1-installer-linux-x86_64.sh \ - && chmod +x bazel-0.24.1-installer-linux-x86_64.sh \ - && ./bazel-0.24.1-installer-linux-x86_64.sh - -RUN export TF_ROOT=/work/tf \ - && export PATH="$PATH:$HOME/bin" \ - && cd $TF_ROOT \ - && git clone https://github.com/tensorflow/tensorflow.git \ - && cd tensorflow \ - && git checkout r1.14 \ - && echo "\n\n\n\n\n\n\n\n\n" | ./configure \ - && bazel build //tensorflow:libtensorflow_framework.so //tensorflow:libtensorflow_cc.so //tensorflow/tools/pip_package:build_pip_package - -RUN export TF_ROOT=/work/tf \ - && cd $TF_ROOT/tensorflow \ - && bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg \ - && pip3 install $(find /tmp/tensorflow_pkg/ -type f -iname "tensorflow*.whl") \ - && ./tensorflow/contrib/makefile/build_all_linux.sh \ - && mkdir -p /work/tf/installdir/lib \ - && mkdir -p /work/tf/installdir/include \ - && cp bazel-bin/tensorflow/libtensorflow_cc.so /work/tf/installdir/lib \ - && cp bazel-bin/tensorflow/libtensorflow_framework.so /work/tf/installdir/lib \ - && cp tensorflow/contrib/makefile/gen/protobuf/lib/libprotobuf.a /work/tf/installdir/lib \ - && cp tensorflow/contrib/makefile/downloads/nsync/builds/default.linux.c++11/*.a /work/tf/installdir/lib \ - && cp -r bazel-genfiles/* /work/tf/installdir/include/ \ - && cp -r tensorflow/cc /work/tf/installdir/include/tensorflow/ \ - && cp -r tensorflow/core /work/tf/installdir/include/tensorflow/ \ - && cp -r third_party /work/tf/installdir/include/ \ - && cp -r tensorflow/contrib/makefile/gen/protobuf/include/* /work/tf/installdir/include/ \ - && cp -r tensorflow/contrib/makefile/downloads/eigen/Eigen /work/tf/installdir/include/ \ - && cp -r tensorflow/contrib/makefile/downloads/eigen/unsupported /work/tf/installdir/include/ \ - && cp -r tensorflow/contrib/makefile/downloads/eigen/signature_of_eigen3_matrix_library /work/tf/installdir/include/ \ - && cd ${TF_ROOT}/tensorflow/tensorflow/contrib/makefile/downloads/absl \ - && find absl/ -name '*.h' -exec cp --parents \{\} /work/tf/installdir/include/ \; \ - && find absl/ -name '*.inc' -exec cp --parents \{\} /work/tf/installdir/include/ \; \ - && find /work/tf/installdir/ -name "*.cc" -type f -delete - -RUN echo "Create symlinks for tensorflow libs" \ - && ln -s /work/tf/installdir/lib/libtensorflow_cc.so /work/tf/installdir/lib/libtensorflow_cc.so.1 \ - && ln -s /work/tf/installdir/lib/libtensorflow_framework.so /work/tf/installdir/lib/libtensorflow_framework.so.1 - -# ---------------------------------------------------------------------------- -# Build OTB -# ---------------------------------------------------------------------------- -RUN mkdir -p /work/otb/build \ - && cd /work/otb \ - && git clone https://gitlab.orfeo-toolbox.org/orfeotoolbox/otb.git otb \ - && cd otb \ - && git checkout 0df44b312d64d6c3890b65d3790d4a17d0fd5f23 \ - && cd /work/otb/build \ - && cmake /work/otb/otb/SuperBuild \ - -DUSE_SYSTEM_BOOST=ON \ - -DUSE_SYSTEM_CURL=ON \ - -DUSE_SYSTEM_EXPAT=ON \ - -DUSE_SYSTEM_FFTW=ON \ - -DUSE_SYSTEM_FREETYPE=ON \ - -DUSE_SYSTEM_GDAL=ON \ - -DUSE_SYSTEM_GEOS=ON \ - -DUSE_SYSTEM_GEOTIFF=ON \ - -DUSE_SYSTEM_GLEW=ON \ - -DUSE_SYSTEM_GLFW=ON \ - -DUSE_SYSTEM_GLUT=ON \ - -DUSE_SYSTEM_GSL=ON \ - -DUSE_SYSTEM_ITK=ON \ - -DUSE_SYSTEM_LIBKML=ON \ - -DUSE_SYSTEM_LIBSVM=ON \ - -DUSE_SYSTEM_MUPARSER=ON \ - -DUSE_SYSTEM_MUPARSERX=ON \ - -DUSE_SYSTEM_OPENCV=ON \ - -DUSE_SYSTEM_OPENTHREADS=ON \ - -DUSE_SYSTEM_OSSIM=ON \ - -DUSE_SYSTEM_PNG=ON \ - -DUSE_SYSTEM_QT5=ON \ - -DUSE_SYSTEM_QWT=ON \ - -DUSE_SYSTEM_TINYXML=ON \ - -DUSE_SYSTEM_ZLIB=ON \ - -DUSE_SYSTEM_SWIG=OFF \ - && cd /work/otb/otb/Modules/Remote \ - && git clone https://github.com/remicres/otbtf.git \ - && cd /work/otb/build/OTB/build \ - && cmake /work/otb/otb \ - -DOTB_WRAP_PYTHON=ON \ - -DModule_Mosaic=ON \ - -DModule_OTBTensorflow=ON \ - -DOTB_USE_TENSORFLOW=ON \ - -Dopencv_INCLUDE_DIR=/usr/include \ - -DTENSORFLOW_CC_LIB=/work/tf/installdir/lib/libtensorflow_cc.so \ - -DTENSORFLOW_FRAMEWORK_LIB=/work/tf/installdir/lib/libtensorflow_framework.so \ - -Dtensorflow_include_dir=/work/tf/installdir/include/ \ - && cd /work/otb/build/ \ - && make -j $(grep -c ^processor /proc/cpuinfo) - -# ---------------------------------------------------------------------------- -# Install GDAL -# ---------------------------------------------------------------------------- -RUN apt-get update -y \ - && apt -y install \ - software-properties-common \ - dirmngr \ - apt-transport-https \ - lsb-release \ - gdal-bin \ - && rm -rf /var/lib/apt/lists/* - -# ---------------------------------------------------------------------------- -# Add user otbuser # Replace 1001 with your user / group id -# ---------------------------------------------------------------------------- - -RUN export uid=1001 gid=1001 && \ - mkdir -p /home/otbuser && \ - echo "otbuser:x:${uid}:${gid}:otbuser,,,:/home/otbuser:/bin/bash" >> /etc/passwd && \ - echo "otbuser:x:${uid}:" >> /etc/group && \ - echo "otbuser ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/otbuser && \ - chmod 0440 /etc/sudoers.d/otbuser && \ - chown ${uid}:${gid} -R /home/otbuser - -USER /home/otbuser -ENV HOME /home/otbuser -ENV OTB_MAX_RAM_HINT=512 -ENV PATH "$PATH:/work/otb/superbuild_install/bin/" -ENV PYTHONPATH="/work/otb/superbuild_install/lib/otb/python:$PYTHONPATH" -ENV OTB_APPLICATION_PATH="/work/otb/superbuild_install/lib/otb/applications" -ENV LD_LIBRARY_PATH "$LD_LIBRARY_PATH:/work/otb/superbuild_install/lib/:/work/tf/installdir/lib/" -WORKDIR /home/otbuser - -# ---------------------------------------------------------------------------- -# Exemple : -# docker build --tag otbtf_image -# docker run -u otbuser -v $(pwd):/home/otbuser otbtf_image otbcli_ExtractROI -# docker run -u otbuser -ti -v /tmp/.X11-unix:/tmp/.X11-unix -e DISPLAY=$DISPLAY -v $(pwd):/home/otbuser otbtf_image otbgui_ExtractROI -# ---------------------------------------------------------------------------- diff --git a/tools/dockerfiles/r1.7/bionic.tf-r1.14.otb-7.0-GPU/Dockerfile b/tools/dockerfiles/r1.7/bionic.tf-r1.14.otb-7.0-GPU/Dockerfile deleted file mode 100644 index 618635ccd6d6d6be93d026773ae46dfaa869b09b..0000000000000000000000000000000000000000 --- a/tools/dockerfiles/r1.7/bionic.tf-r1.14.otb-7.0-GPU/Dockerfile +++ /dev/null @@ -1,242 +0,0 @@ -FROM nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04 - -MAINTAINER Remi Cresson <remi.cresson[at]irstea[dot]fr> - -RUN apt-get update -y \ - && apt-get upgrade -y \ - && apt-get install -y --no-install-recommends \ - sudo \ - ca-certificates \ - curl \ - make \ - cmake \ - g++ \ - gcc \ - git \ - libtool \ - swig \ - xvfb \ - wget \ - autoconf \ - automake \ - pkg-config \ - zip \ - zlib1g-dev \ - unzip \ - && rm -rf /var/lib/apt/lists/* - -# ---------------------------------------------------------------------------- -# OTB and TensorFlow dependencies -# ---------------------------------------------------------------------------- -RUN apt-get update -y \ - && apt-get upgrade -y \ - && apt-get install -y --no-install-recommends \ - freeglut3-dev \ - libboost-date-time-dev \ - libboost-filesystem-dev \ - libboost-graph-dev \ - libboost-program-options-dev \ - libboost-system-dev \ - libboost-thread-dev \ - libcurl4-gnutls-dev \ - libexpat1-dev \ - libfftw3-dev \ - libgdal-dev \ - libgeotiff-dev \ - libglew-dev \ - libglfw3-dev \ - libgsl-dev \ - libinsighttoolkit4-dev \ - libkml-dev \ - libmuparser-dev \ - libmuparserx-dev \ - libopencv-core-dev \ - libopencv-ml-dev \ - libopenthreads-dev \ - libossim-dev \ - libpng-dev \ - libqt5opengl5-dev \ - libqwt-qt5-dev \ - libsvm-dev \ - libtinyxml-dev \ - qtbase5-dev \ - qttools5-dev \ - default-jdk \ - python3-pip \ - python3.6-dev \ - python3.6-gdal \ - python3-setuptools \ - libxmu-dev \ - libxi-dev \ - qttools5-dev-tools \ - bison \ - software-properties-common \ - dirmngr \ - apt-transport-https \ - lsb-release \ - gdal-bin \ - && rm -rf /var/lib/apt/lists/* - -# ---------------------------------------------------------------------------- -# Python packages -# ---------------------------------------------------------------------------- -RUN ln -s /usr/bin/python3 /usr/bin/python \ - && python3 -m pip install --upgrade pip \ - && python3 -m pip install pip six numpy wheel mock keras future - -# ---------------------------------------------------------------------------- -# Build TensorFlow -# ---------------------------------------------------------------------------- -RUN export TF_ROOT=/work/tf \ - && mkdir -p ${TF_ROOT}/bazel \ - && cd ${TF_ROOT}/bazel \ - && wget https://github.com/bazelbuild/bazel/releases/download/0.24.1/bazel-0.24.1-installer-linux-x86_64.sh \ - && chmod +x bazel-0.24.1-installer-linux-x86_64.sh \ - && ./bazel-0.24.1-installer-linux-x86_64.sh - -RUN export TF_ROOT=/work/tf \ - && export PATH="$PATH:$HOME/bin" \ - && cd $TF_ROOT \ - && git clone https://github.com/tensorflow/tensorflow.git \ - && cd tensorflow \ - && git checkout r1.14 \ - && export PYTHON_BIN_PATH=$(which python) \ - && export PYTHON_LIB_PATH="$($PYTHON_BIN_PATH -c 'import site; print(site.getsitepackages()[0])')" \ - && export PYTHONPATH=/usr/lib/ \ - && export PYTHON_ARG=/usr/lib/ \ - && export CUDA_TOOLKIT_PATH=/usr/local/cuda-10.1/ \ - && export CUDNN_INSTALL_PATH=/usr/ \ - && export TF_NEED_GCP=0 \ - && export TF_NEED_CUDA=1 \ - && export TF_CUDA_VERSION="$($CUDA_TOOLKIT_PATH/bin/nvcc --version | sed -n 's/^.*release \(.*\),.*/\1/p')" \ - && export TF_CUDA_COMPUTE_CAPABILITIES=6.1,5.2,3.5 \ - && export TF_NEED_HDFS=0 \ - && export TF_NEED_OPENCL=0 \ - && export TF_NEED_JEMALLOC=1 \ - && export TF_ENABLE_XLA=0 \ - && export TF_NEED_VERBS=0 \ - && export TF_CUDA_CLANG=0 \ - && export TF_CUDNN_VERSION="$(sed -n 's/^#define CUDNN_MAJOR\s*\(.*\).*/\1/p' $CUDNN_INSTALL_PATH/include/cudnn.h)" \ - && export TF_NEED_MKL=0 \ - && export TF_DOWNLOAD_MKL=0 \ - && export TF_NEED_AWS=0 \ - && export TF_NEED_MPI=0 \ - && export TF_NEED_GDR=0 \ - && export TF_NEED_S3=0 \ - && export TF_NEED_OPENCL_SYCL=0 \ - && export TF_SET_ANDROID_WORKSPACE=0 \ - && export TF_NEED_COMPUTECPP=0 \ - && export GCC_HOST_COMPILER_PATH=$(which gcc) \ - && export CC_OPT_FLAGS="-march=native" \ - && export TF_NEED_KAFKA=0 \ - && export TF_NEED_TENSORRT=0 \ - && export TF_NCCL_VERSION=2.4 \ - && export GCC_HOST_COMPILER_PATH=$(which gcc) \ - && export CC_OPT_FLAGS="-march=native" \ - && ./configure \ - && bazel build //tensorflow:libtensorflow_framework.so //tensorflow:libtensorflow_cc.so //tensorflow/tools/pip_package:build_pip_package - -RUN export TF_ROOT=/work/tf \ - && cd $TF_ROOT/tensorflow \ - && bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg \ - && pip3 install $(find /tmp/tensorflow_pkg/ -type f -iname "tensorflow*.whl") \ - && ./tensorflow/contrib/makefile/build_all_linux.sh \ - && mkdir -p /work/tf/installdir/lib \ - && mkdir -p /work/tf/installdir/include \ - && cp bazel-bin/tensorflow/libtensorflow_cc.so /work/tf/installdir/lib \ - && cp bazel-bin/tensorflow/libtensorflow_framework.so /work/tf/installdir/lib \ - && cp tensorflow/contrib/makefile/gen/protobuf/lib/libprotobuf.a /work/tf/installdir/lib \ - && cp tensorflow/contrib/makefile/downloads/nsync/builds/default.linux.c++11/*.a /work/tf/installdir/lib \ - && cp -r bazel-genfiles/* /work/tf/installdir/include/ \ - && cp -r tensorflow/cc /work/tf/installdir/include/tensorflow/ \ - && cp -r tensorflow/core /work/tf/installdir/include/tensorflow/ \ - && cp -r third_party /work/tf/installdir/include/ \ - && cp -r tensorflow/contrib/makefile/gen/protobuf/include/* /work/tf/installdir/include/ \ - && cp -r tensorflow/contrib/makefile/downloads/eigen/Eigen /work/tf/installdir/include/ \ - && cp -r tensorflow/contrib/makefile/downloads/eigen/unsupported /work/tf/installdir/include/ \ - && cp -r tensorflow/contrib/makefile/downloads/eigen/signature_of_eigen3_matrix_library /work/tf/installdir/include/ \ - && cd ${TF_ROOT}/tensorflow/tensorflow/contrib/makefile/downloads/absl \ - && find absl/ -name '*.h' -exec cp --parents \{\} /work/tf/installdir/include/ \; \ - && find absl/ -name '*.inc' -exec cp --parents \{\} /work/tf/installdir/include/ \; \ - && find /work/tf/installdir/ -name "*.cc" -type f -delete \ - && echo "Create symlinks for tensorflow libs" \ - && ln -s /work/tf/installdir/lib/libtensorflow_cc.so /work/tf/installdir/lib/libtensorflow_cc.so.1 \ - && ln -s /work/tf/installdir/lib/libtensorflow_framework.so /work/tf/installdir/lib/libtensorflow_framework.so.1 - -# ---------------------------------------------------------------------------- -# Build OTB: Stage 1 (clone) -# ---------------------------------------------------------------------------- -RUN mkdir -p /work/otb \ - && cd /work/otb \ - && git clone https://gitlab.orfeo-toolbox.org/orfeotoolbox/otb.git otb \ - && cd otb \ - && git checkout release-7.0 - -# ---------------------------------------------------------------------------- -# Build OTB: Stage 2 (superbuild) -# ---------------------------------------------------------------------------- -RUN mkdir -p /work/otb/build \ - && cd /work/otb/build \ - && cmake /work/otb/otb/SuperBuild \ - -DUSE_SYSTEM_BOOST=ON \ - -DUSE_SYSTEM_CURL=ON \ - -DUSE_SYSTEM_EXPAT=ON \ - -DUSE_SYSTEM_FFTW=ON \ - -DUSE_SYSTEM_FREETYPE=ON \ - -DUSE_SYSTEM_GDAL=ON \ - -DUSE_SYSTEM_GEOS=ON \ - -DUSE_SYSTEM_GEOTIFF=ON \ - -DUSE_SYSTEM_GLEW=ON \ - -DUSE_SYSTEM_GLFW=ON \ - -DUSE_SYSTEM_GLUT=ON \ - -DUSE_SYSTEM_GSL=ON \ - -DUSE_SYSTEM_ITK=ON \ - -DUSE_SYSTEM_LIBKML=ON \ - -DUSE_SYSTEM_LIBSVM=ON \ - -DUSE_SYSTEM_MUPARSER=ON \ - -DUSE_SYSTEM_MUPARSERX=ON \ - -DUSE_SYSTEM_OPENCV=ON \ - -DUSE_SYSTEM_OPENTHREADS=ON \ - -DUSE_SYSTEM_OSSIM=ON \ - -DUSE_SYSTEM_PNG=ON \ - -DUSE_SYSTEM_QT5=ON \ - -DUSE_SYSTEM_QWT=ON \ - -DUSE_SYSTEM_TINYXML=ON \ - -DUSE_SYSTEM_ZLIB=ON \ - -DUSE_SYSTEM_SWIG=OFF \ - -DOTB_WRAP_PYTHON=OFF \ - && make -j $(grep -c ^processor /proc/cpuinfo) - -# ---------------------------------------------------------------------------- -# Build OTB: Stage 3 (bindings) -# ---------------------------------------------------------------------------- -RUN cd /work/otb/otb/Modules/Remote \ - && git clone https://github.com/remicres/otbtf.git \ - && cd /work/otb/build/OTB/build \ - && cmake /work/otb/otb \ - -DOTB_WRAP_PYTHON=ON \ - -DPYTHON_EXECUTABLE=/usr/bin/python3.6 \ - -Dopencv_INCLUDE_DIR=/usr/include \ - -DModule_OTBTensorflow=ON \ - -DOTB_USE_TENSORFLOW=ON \ - -DTENSORFLOW_CC_LIB=/work/tf/installdir/lib/libtensorflow_cc.so \ - -DTENSORFLOW_FRAMEWORK_LIB=/work/tf/installdir/lib/libtensorflow_framework.so \ - -Dtensorflow_include_dir=/work/tf/installdir/include/ \ - && cd /work/otb/build/ \ - && make -j $(grep -c ^processor /proc/cpuinfo) - -# ---------------------------------------------------------------------------- -# Add important environment variables -# ---------------------------------------------------------------------------- -ENV PATH="$PATH:/work/otb/superbuild_install/bin/" -ENV PYTHONPATH="/work/otb/superbuild_install/lib/otb/python:$PYTHONPATH" -ENV OTB_APPLICATION_PATH="/work/otb/superbuild_install/lib/otb/applications" -ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/work/otb/superbuild_install/lib/:/work/tf/installdir/lib/" - -# ---------------------------------------------------------------------------- -# Exemple : -# docker build --tag otbtf_image -# docker run -v /path/to/host/:/path/to/mount/ otbtf_image otbcli_ExtractROI -# docker run -ti -v /tmp/.X11-unix:/tmp/.X11-unix -e DISPLAY=$DISPLAY otbtf_image otbgui_ExtractROI -# ---------------------------------------------------------------------------- - diff --git a/tools/dockerfiles/r1.7/bionic.tf-r1.14.otb-7.0/Dockerfile b/tools/dockerfiles/r1.7/bionic.tf-r1.14.otb-7.0/Dockerfile deleted file mode 100644 index 7995b0c9bf24a660577b425f2e0b7f961dc8e48e..0000000000000000000000000000000000000000 --- a/tools/dockerfiles/r1.7/bionic.tf-r1.14.otb-7.0/Dockerfile +++ /dev/null @@ -1,209 +0,0 @@ -FROM ubuntu:18.04 - -MAINTAINER Remi Cresson <remi.cresson[at]irstea[dot]fr> - -RUN apt-get update -y \ - && apt-get upgrade -y \ - && apt-get install -y --no-install-recommends \ - sudo \ - ca-certificates \ - curl \ - make \ - cmake \ - g++ \ - gcc \ - git \ - libtool \ - swig \ - xvfb \ - wget \ - autoconf \ - automake \ - pkg-config \ - zip \ - zlib1g-dev \ - unzip \ - && rm -rf /var/lib/apt/lists/* - -# ---------------------------------------------------------------------------- -# OTB and TensorFlow dependencies -# ---------------------------------------------------------------------------- -RUN apt-get update -y \ - && apt-get upgrade -y \ - && apt-get install -y --no-install-recommends \ - freeglut3-dev \ - libboost-date-time-dev \ - libboost-filesystem-dev \ - libboost-graph-dev \ - libboost-program-options-dev \ - libboost-system-dev \ - libboost-thread-dev \ - libcurl4-gnutls-dev \ - libexpat1-dev \ - libfftw3-dev \ - libgdal-dev \ - libgeotiff-dev \ - libglew-dev \ - libglfw3-dev \ - libgsl-dev \ - libinsighttoolkit4-dev \ - libkml-dev \ - libmuparser-dev \ - libmuparserx-dev \ - libopencv-core-dev \ - libopencv-ml-dev \ - libopenthreads-dev \ - libossim-dev \ - libpng-dev \ - libqt5opengl5-dev \ - libqwt-qt5-dev \ - libsvm-dev \ - libtinyxml-dev \ - qtbase5-dev \ - qttools5-dev \ - default-jdk \ - python3-pip \ - python3.6-dev \ - python3.6-gdal \ - python3-setuptools \ - libxmu-dev \ - libxi-dev \ - qttools5-dev-tools \ - bison \ - software-properties-common \ - dirmngr \ - apt-transport-https \ - lsb-release \ - gdal-bin \ - && rm -rf /var/lib/apt/lists/* - -# ---------------------------------------------------------------------------- -# Python packages -# ---------------------------------------------------------------------------- -RUN ln -s /usr/bin/python3 /usr/bin/python \ - && python3 -m pip install --upgrade pip \ - && python3 -m pip install pip six numpy wheel mock keras future - -# ---------------------------------------------------------------------------- -# Build TensorFlow -# ---------------------------------------------------------------------------- -RUN export TF_ROOT=/work/tf \ - && mkdir -p ${TF_ROOT}/bazel \ - && cd ${TF_ROOT}/bazel \ - && wget https://github.com/bazelbuild/bazel/releases/download/0.24.1/bazel-0.24.1-installer-linux-x86_64.sh \ - && chmod +x bazel-0.24.1-installer-linux-x86_64.sh \ - && ./bazel-0.24.1-installer-linux-x86_64.sh - -RUN export TF_ROOT=/work/tf \ - && export PATH="$PATH:$HOME/bin" \ - && cd $TF_ROOT \ - && git clone https://github.com/tensorflow/tensorflow.git \ - && cd tensorflow \ - && git checkout r1.14 \ - && echo "\n\n\n\n\n\n\n\n\n" | ./configure \ - && bazel build //tensorflow:libtensorflow_framework.so //tensorflow:libtensorflow_cc.so //tensorflow/tools/pip_package:build_pip_package - -RUN export TF_ROOT=/work/tf \ - && cd $TF_ROOT/tensorflow \ - && bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg \ - && pip3 install $(find /tmp/tensorflow_pkg/ -type f -iname "tensorflow*.whl") \ - && ./tensorflow/contrib/makefile/build_all_linux.sh \ - && mkdir -p /work/tf/installdir/lib \ - && mkdir -p /work/tf/installdir/include \ - && cp bazel-bin/tensorflow/libtensorflow_cc.so /work/tf/installdir/lib \ - && cp bazel-bin/tensorflow/libtensorflow_framework.so /work/tf/installdir/lib \ - && cp tensorflow/contrib/makefile/gen/protobuf/lib/libprotobuf.a /work/tf/installdir/lib \ - && cp tensorflow/contrib/makefile/downloads/nsync/builds/default.linux.c++11/*.a /work/tf/installdir/lib \ - && cp -r bazel-genfiles/* /work/tf/installdir/include/ \ - && cp -r tensorflow/cc /work/tf/installdir/include/tensorflow/ \ - && cp -r tensorflow/core /work/tf/installdir/include/tensorflow/ \ - && cp -r third_party /work/tf/installdir/include/ \ - && cp -r tensorflow/contrib/makefile/gen/protobuf/include/* /work/tf/installdir/include/ \ - && cp -r tensorflow/contrib/makefile/downloads/eigen/Eigen /work/tf/installdir/include/ \ - && cp -r tensorflow/contrib/makefile/downloads/eigen/unsupported /work/tf/installdir/include/ \ - && cp -r tensorflow/contrib/makefile/downloads/eigen/signature_of_eigen3_matrix_library /work/tf/installdir/include/ \ - && cd ${TF_ROOT}/tensorflow/tensorflow/contrib/makefile/downloads/absl \ - && find absl/ -name '*.h' -exec cp --parents \{\} /work/tf/installdir/include/ \; \ - && find absl/ -name '*.inc' -exec cp --parents \{\} /work/tf/installdir/include/ \; \ - && find /work/tf/installdir/ -name "*.cc" -type f -delete - -RUN echo "Create symlinks for tensorflow libs" \ - && ln -s /work/tf/installdir/lib/libtensorflow_cc.so /work/tf/installdir/lib/libtensorflow_cc.so.1 \ - && ln -s /work/tf/installdir/lib/libtensorflow_framework.so /work/tf/installdir/lib/libtensorflow_framework.so.1 - -# ---------------------------------------------------------------------------- -# Build OTB: Stage 1 (clone) -# ---------------------------------------------------------------------------- -RUN mkdir -p /work/otb \ - && cd /work/otb \ - && git clone https://gitlab.orfeo-toolbox.org/orfeotoolbox/otb.git otb \ - && cd otb \ - && git checkout release-7.0 - -# ---------------------------------------------------------------------------- -# Build OTB: Stage 2 (superbuild) -# ---------------------------------------------------------------------------- -RUN mkdir -p /work/otb/build \ - && cd /work/otb/build \ - && cmake /work/otb/otb/SuperBuild \ - -DUSE_SYSTEM_BOOST=ON \ - -DUSE_SYSTEM_CURL=ON \ - -DUSE_SYSTEM_EXPAT=ON \ - -DUSE_SYSTEM_FFTW=ON \ - -DUSE_SYSTEM_FREETYPE=ON \ - -DUSE_SYSTEM_GDAL=ON \ - -DUSE_SYSTEM_GEOS=ON \ - -DUSE_SYSTEM_GEOTIFF=ON \ - -DUSE_SYSTEM_GLEW=ON \ - -DUSE_SYSTEM_GLFW=ON \ - -DUSE_SYSTEM_GLUT=ON \ - -DUSE_SYSTEM_GSL=ON \ - -DUSE_SYSTEM_ITK=ON \ - -DUSE_SYSTEM_LIBKML=ON \ - -DUSE_SYSTEM_LIBSVM=ON \ - -DUSE_SYSTEM_MUPARSER=ON \ - -DUSE_SYSTEM_MUPARSERX=ON \ - -DUSE_SYSTEM_OPENCV=ON \ - -DUSE_SYSTEM_OPENTHREADS=ON \ - -DUSE_SYSTEM_OSSIM=ON \ - -DUSE_SYSTEM_PNG=ON \ - -DUSE_SYSTEM_QT5=ON \ - -DUSE_SYSTEM_QWT=ON \ - -DUSE_SYSTEM_TINYXML=ON \ - -DUSE_SYSTEM_ZLIB=ON \ - -DUSE_SYSTEM_SWIG=OFF \ - -DOTB_WRAP_PYTHON=OFF \ - && make -j $(grep -c ^processor /proc/cpuinfo) - -# ---------------------------------------------------------------------------- -# Build OTB: Stage 3 (bindings) -# ---------------------------------------------------------------------------- -RUN cd /work/otb/otb/Modules/Remote \ - && git clone https://github.com/remicres/otbtf.git \ - && cd /work/otb/build/OTB/build \ - && cmake /work/otb/otb \ - -DOTB_WRAP_PYTHON=ON \ - -DPYTHON_EXECUTABLE=/usr/bin/python3.6 \ - -Dopencv_INCLUDE_DIR=/usr/include \ - -DModule_OTBTensorflow=ON \ - -DOTB_USE_TENSORFLOW=ON \ - -DTENSORFLOW_CC_LIB=/work/tf/installdir/lib/libtensorflow_cc.so \ - -DTENSORFLOW_FRAMEWORK_LIB=/work/tf/installdir/lib/libtensorflow_framework.so \ - -Dtensorflow_include_dir=/work/tf/installdir/include/ \ - && cd /work/otb/build/ \ - && make -j $(grep -c ^processor /proc/cpuinfo) - -# ---------------------------------------------------------------------------- -# Add important environment variables -# ---------------------------------------------------------------------------- -ENV PATH="$PATH:/work/otb/superbuild_install/bin/" -ENV PYTHONPATH="/work/otb/superbuild_install/lib/otb/python:$PYTHONPATH" -ENV OTB_APPLICATION_PATH="/work/otb/superbuild_install/lib/otb/applications" -ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/work/otb/superbuild_install/lib/:/work/tf/installdir/lib/" - -# ---------------------------------------------------------------------------- -# Exemple : -# docker build --tag otbtf_image -# docker run -v /path/to/host/:/path/to/mount/ otbtf_image otbcli_ExtractROI -# docker run -ti -v /tmp/.X11-unix:/tmp/.X11-unix -e DISPLAY=$DISPLAY otbtf_image otbgui_ExtractROI -# ---------------------------------------------------------------------------- diff --git a/tools/dockerfiles/r2.0/bionic.tf-r2.1.otb-7.1-GPU/Dockerfile b/tools/dockerfiles/r2.0/bionic.tf-r2.1.otb-7.1-GPU/Dockerfile deleted file mode 100644 index eced9a133c421ec3cf0208793dd3bda23659a93d..0000000000000000000000000000000000000000 --- a/tools/dockerfiles/r2.0/bionic.tf-r2.1.otb-7.1-GPU/Dockerfile +++ /dev/null @@ -1,260 +0,0 @@ -FROM nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04 - -MAINTAINER Remi Cresson <remi.cresson[at]irstea[dot]fr> - -RUN apt-get update -y \ - && apt-get upgrade -y \ - && apt-get install -y --no-install-recommends \ - sudo \ - ca-certificates \ - curl \ - make \ - cmake \ - g++ \ - gcc \ - git \ - libtool \ - swig \ - xvfb \ - wget \ - autoconf \ - automake \ - pkg-config \ - zip \ - zlib1g-dev \ - unzip \ - && rm -rf /var/lib/apt/lists/* - -# ---------------------------------------------------------------------------- -# OTB and TensorFlow dependencies -# ---------------------------------------------------------------------------- -RUN apt-get update -y \ - && apt-get upgrade -y \ - && apt-get install -y --no-install-recommends \ - freeglut3-dev \ - libboost-date-time-dev \ - libboost-filesystem-dev \ - libboost-graph-dev \ - libboost-program-options-dev \ - libboost-system-dev \ - libboost-thread-dev \ - libcurl4-gnutls-dev \ - libexpat1-dev \ - libfftw3-dev \ - libgdal-dev \ - libgeotiff-dev \ - libglew-dev \ - libglfw3-dev \ - libgsl-dev \ - libinsighttoolkit4-dev \ - libkml-dev \ - libmuparser-dev \ - libmuparserx-dev \ - libopencv-core-dev \ - libopencv-ml-dev \ - libopenthreads-dev \ - libossim-dev \ - libpng-dev \ - libqt5opengl5-dev \ - libqwt-qt5-dev \ - libsvm-dev \ - libtinyxml-dev \ - qtbase5-dev \ - qttools5-dev \ - default-jdk \ - python3-pip \ - python3.6-dev \ - python3.6-gdal \ - python3-setuptools \ - libxmu-dev \ - libxi-dev \ - qttools5-dev-tools \ - bison \ - software-properties-common \ - dirmngr \ - apt-transport-https \ - lsb-release \ - gdal-bin \ - && rm -rf /var/lib/apt/lists/* - -# ---------------------------------------------------------------------------- -# Python packages -# ---------------------------------------------------------------------------- -RUN ln -s /usr/bin/python3 /usr/bin/python \ - && python3 -m pip install --upgrade pip \ - && python3 -m pip install pip six numpy wheel mock keras future setuptools - -# ---------------------------------------------------------------------------- -# Build TensorFlow -# ---------------------------------------------------------------------------- -RUN export TF_ROOT=/work/tf \ - && mkdir -p ${TF_ROOT}/bazel \ - && cd ${TF_ROOT}/bazel \ - && wget https://github.com/bazelbuild/bazel/releases/download/0.29.1/bazel-0.29.1-installer-linux-x86_64.sh \ - && chmod +x bazel-0.29.1-installer-linux-x86_64.sh \ - && ./bazel-0.29.1-installer-linux-x86_64.sh - -RUN export TF_ROOT=/work/tf \ - && export PATH="$PATH:$HOME/bin" \ - && cd $TF_ROOT \ - && git clone https://github.com/tensorflow/tensorflow.git \ - && cd tensorflow \ - && git checkout r2.1 \ - && export PYTHON_BIN_PATH=$(which python) \ - && export PYTHON_LIB_PATH="$($PYTHON_BIN_PATH -c 'import site; print(site.getsitepackages()[0])')" \ - && export PYTHONPATH=/usr/lib/ \ - && export PYTHON_ARG=/usr/lib/ \ - && export CUDA_TOOLKIT_PATH=/usr/local/cuda-10.1/ \ - && export CUDNN_INSTALL_PATH=/usr/ \ - && export TF_NEED_GCP=0 \ - && export TF_NEED_CUDA=1 \ - && export TF_CUDA_VERSION="$($CUDA_TOOLKIT_PATH/bin/nvcc --version | sed -n 's/^.*release \(.*\),.*/\1/p')" \ - && export TF_CUDA_COMPUTE_CAPABILITIES=6.1,5.2,3.5 \ - && export TF_NEED_HDFS=0 \ - && export TF_NEED_OPENCL=0 \ - && export TF_NEED_JEMALLOC=1 \ - && export TF_ENABLE_XLA=0 \ - && export TF_NEED_VERBS=0 \ - && export TF_CUDA_CLANG=0 \ - && export TF_CUDNN_VERSION="$(sed -n 's/^#define CUDNN_MAJOR\s*\(.*\).*/\1/p' $CUDNN_INSTALL_PATH/include/cudnn.h)" \ - && export TF_NEED_MKL=0 \ - && export TF_DOWNLOAD_MKL=0 \ - && export TF_NEED_AWS=0 \ - && export TF_NEED_MPI=0 \ - && export TF_NEED_GDR=0 \ - && export TF_NEED_S3=0 \ - && export TF_NEED_OPENCL_SYCL=0 \ - && export TF_SET_ANDROID_WORKSPACE=0 \ - && export TF_NEED_COMPUTECPP=0 \ - && export GCC_HOST_COMPILER_PATH=$(which gcc) \ - && export CC_OPT_FLAGS="-march=native" \ - && export TF_NEED_KAFKA=0 \ - && export TF_NEED_TENSORRT=0 \ - && export TF_NCCL_VERSION=2.4 \ - && export GCC_HOST_COMPILER_PATH=$(which gcc) \ - && export CC_OPT_FLAGS="-march=native" \ - && ./configure \ - && bazel build //tensorflow:libtensorflow_framework.so //tensorflow:libtensorflow_cc.so //tensorflow/tools/pip_package:build_pip_package --noincompatible_do_not_split_linking_cmdline - -# ---------------------------------------------------------------------------- -# Build protobuf -# ---------------------------------------------------------------------------- -RUN export TF_ROOT=/work/tf \ - && mkdir -p $TF_ROOT/installdir \ - && cd $TF_ROOT \ - && wget https://github.com/google/protobuf/releases/download/v3.8.0/protobuf-cpp-3.8.0.tar.gz \ - && tar -xvf protobuf-cpp-3.8.0.tar.gz \ - && cd protobuf-3.8.0 \ - && ./configure --prefix=/work/tf/installdir/ \ - && make install -j $(grep -c ^processor /proc/cpuinfo) - -# ---------------------------------------------------------------------------- -# Prepare TF dependencies -# ---------------------------------------------------------------------------- -RUN export TF_ROOT=/work/tf \ - && cd $TF_ROOT/tensorflow \ - && bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg \ - && pip3 install $(find /tmp/tensorflow_pkg/ -type f -iname "tensorflow*.whl") \ - && ./tensorflow/lite/tools/make/download_dependencies.sh \ - && ./tensorflow/lite/tools/make/build_lib.sh \ - && mkdir -p /work/tf/installdir/lib \ - && mkdir -p /work/tf/installdir/include \ - && cp bazel-bin/tensorflow/libtensorflow_cc.so* /work/tf/installdir/lib \ - && cp bazel-bin/tensorflow/libtensorflow_framework.so* /work/tf/installdir/lib \ - && cp -r bazel-genfiles/* /work/tf/installdir/include \ - && cp -r tensorflow/cc /work/tf/installdir/include/tensorflow \ - && cp -r tensorflow/core /work/tf/installdir/include/tensorflow \ - && cp -r third_party /work/tf/installdir/include \ - && cp -r bazel-tensorflow/external/eigen_archive/unsupported /work/tf/installdir/include \ - && cp -r bazel-tensorflow/external/eigen_archive/Eigen /work/tf/installdir/include \ - && cp -r tensorflow/lite/tools/make/downloads/absl/absl /work/tf/installdir/include - -# ---------------------------------------------------------------------------- -# Build OTB: Stage 1 (clone) -# ---------------------------------------------------------------------------- -RUN mkdir -p /work/otb \ - && cd /work/otb \ - && git clone https://gitlab.orfeo-toolbox.org/orfeotoolbox/otb.git otb \ - && cd otb \ - && git checkout release-7.1 - -# ---------------------------------------------------------------------------- -# Build OTB: Stage 2 (superbuild) -# ---------------------------------------------------------------------------- -RUN mkdir -p /work/otb/build \ - && cd /work/otb/build \ - && cmake /work/otb/otb/SuperBuild \ - -DUSE_SYSTEM_BOOST=ON \ - -DUSE_SYSTEM_CURL=ON \ - -DUSE_SYSTEM_EXPAT=ON \ - -DUSE_SYSTEM_FFTW=ON \ - -DUSE_SYSTEM_FREETYPE=ON \ - -DUSE_SYSTEM_GDAL=ON \ - -DUSE_SYSTEM_GEOS=ON \ - -DUSE_SYSTEM_GEOTIFF=ON \ - -DUSE_SYSTEM_GLEW=ON \ - -DUSE_SYSTEM_GLFW=ON \ - -DUSE_SYSTEM_GLUT=ON \ - -DUSE_SYSTEM_GSL=ON \ - -DUSE_SYSTEM_ITK=ON \ - -DUSE_SYSTEM_LIBKML=ON \ - -DUSE_SYSTEM_LIBSVM=ON \ - -DUSE_SYSTEM_MUPARSER=ON \ - -DUSE_SYSTEM_MUPARSERX=ON \ - -DUSE_SYSTEM_OPENCV=ON \ - -DUSE_SYSTEM_OPENTHREADS=ON \ - -DUSE_SYSTEM_OSSIM=ON \ - -DUSE_SYSTEM_PNG=ON \ - -DUSE_SYSTEM_QT5=ON \ - -DUSE_SYSTEM_QWT=ON \ - -DUSE_SYSTEM_TINYXML=ON \ - -DUSE_SYSTEM_ZLIB=ON \ - -DUSE_SYSTEM_SWIG=OFF \ - -DOTB_WRAP_PYTHON=OFF \ - && make -j $(grep -c ^processor /proc/cpuinfo) - -# ---------------------------------------------------------------------------- -# Build OTB: Stage 3 (bindings) -# ---------------------------------------------------------------------------- -RUN cd /work/otb/otb/Modules/Remote \ - && git clone -b release-2.0-rc2 https://github.com/remicres/otbtf.git \ - && cd /work/otb/build/OTB/build \ - && cmake /work/otb/otb \ - -DOTB_WRAP_PYTHON=ON \ - -DPYTHON_EXECUTABLE=/usr/bin/python3.6 \ - -Dopencv_INCLUDE_DIR=/usr/include \ - -DModule_OTBTensorflow=ON \ - -DOTB_USE_TENSORFLOW=ON \ - -DTENSORFLOW_CC_LIB=/work/tf/installdir/lib/libtensorflow_cc.so \ - -DTENSORFLOW_FRAMEWORK_LIB=/work/tf/installdir/lib/libtensorflow_framework.so \ - -Dtensorflow_include_dir=/work/tf/installdir/include/ \ - && cd /work/otb/build/ \ - && make -j $(grep -c ^processor /proc/cpuinfo) - -# ---------------------------------------------------------------------------- -# Add important environment variables -# ---------------------------------------------------------------------------- -ENV PATH="$PATH:/work/otb/superbuild_install/bin/" -ENV PYTHONPATH="/work/otb/superbuild_install/lib/otb/python:/work/otb/otb/Modules/Remote/otbtf/python:$PYTHONPATH" -ENV OTB_APPLICATION_PATH="/work/otb/superbuild_install/lib/otb/applications" -ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/work/otb/superbuild_install/lib/:/work/tf/installdir/lib/" - -# ---------------------------------------------------------------------------- -# Clean -# ---------------------------------------------------------------------------- -RUN rm -rf /tmp/* /root/.cache && apt-get clean - -# ---------------------------------------------------------------------------- -# Add one user -# ---------------------------------------------------------------------------- -RUN useradd -s /bin/bash -m otbuser -USER otbuser -WORKDIR /home/otbuser - -# ---------------------------------------------------------------------------- -# Exemple : -# docker build --tag otbtf_image -# docker run -v /path/to/host/:/path/to/mount/ otbtf_image otbcli_ExtractROI -# docker run -ti -v /tmp/.X11-unix:/tmp/.X11-unix -e DISPLAY=$DISPLAY otbtf_image otbgui_ExtractROI -# ---------------------------------------------------------------------------- - diff --git a/tools/dockerfiles/r2.0/bionic.tf-r2.1.otb-7.1-GPU/bionic.tf-r2.1.otb-7.1-GPU-WSL2/Dockerfile b/tools/dockerfiles/r2.0/bionic.tf-r2.1.otb-7.1-GPU/bionic.tf-r2.1.otb-7.1-GPU-WSL2/Dockerfile deleted file mode 100644 index dc6b63c9e962925482608eef62b5ea773daa6864..0000000000000000000000000000000000000000 --- a/tools/dockerfiles/r2.0/bionic.tf-r2.1.otb-7.1-GPU/bionic.tf-r2.1.otb-7.1-GPU-WSL2/Dockerfile +++ /dev/null @@ -1,261 +0,0 @@ -FROM nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04 - -MAINTAINER Remi Cresson <remi.cresson[at]irstea[dot]fr> - -RUN apt-get update -y \ - && apt-get upgrade -y \ - && apt-get install -y --no-install-recommends \ - sudo \ - ca-certificates \ - curl \ - make \ - cmake \ - g++ \ - gcc \ - git \ - libtool \ - swig \ - xvfb \ - wget \ - autoconf \ - automake \ - pkg-config \ - zip \ - zlib1g-dev \ - unzip \ - && rm -rf /var/lib/apt/lists/* - -# ---------------------------------------------------------------------------- -# OTB and TensorFlow dependencies -# ---------------------------------------------------------------------------- -RUN apt-get update -y \ - && apt-get upgrade -y \ - && apt-get install -y --no-install-recommends \ - freeglut3-dev \ - libboost-date-time-dev \ - libboost-filesystem-dev \ - libboost-graph-dev \ - libboost-program-options-dev \ - libboost-system-dev \ - libboost-thread-dev \ - libcurl4-gnutls-dev \ - libexpat1-dev \ - libfftw3-dev \ - libgdal-dev \ - libgeotiff-dev \ - libglew-dev \ - libglfw3-dev \ - libgsl-dev \ - libinsighttoolkit4-dev \ - libkml-dev \ - libmuparser-dev \ - libmuparserx-dev \ - libopencv-core-dev \ - libopencv-ml-dev \ - libopenthreads-dev \ - libossim-dev \ - libpng-dev \ - libqt5opengl5-dev \ - libqwt-qt5-dev \ - libsvm-dev \ - libtinyxml-dev \ - qtbase5-dev \ - qttools5-dev \ - default-jdk \ - python3-pip \ - python3.6-dev \ - python3.6-gdal \ - python3-setuptools \ - libxmu-dev \ - libxi-dev \ - qttools5-dev-tools \ - bison \ - software-properties-common \ - dirmngr \ - apt-transport-https \ - lsb-release \ - gdal-bin \ - && rm -rf /var/lib/apt/lists/* - -# ---------------------------------------------------------------------------- -# Python packages -# ---------------------------------------------------------------------------- -RUN ln -s /usr/bin/python3 /usr/bin/python \ - && python3 -m pip install --upgrade pip \ - && python3 -m pip install pip six numpy wheel mock keras keras_applications keras_preprocessing future setuptools - -# ---------------------------------------------------------------------------- -# Build TensorFlow -# ---------------------------------------------------------------------------- -RUN export TF_ROOT=/work/tf \ - && mkdir -p ${TF_ROOT}/bazel \ - && cd ${TF_ROOT}/bazel \ - && wget https://github.com/bazelbuild/bazel/releases/download/0.29.1/bazel-0.29.1-installer-linux-x86_64.sh \ - && chmod +x bazel-0.29.1-installer-linux-x86_64.sh \ - && ./bazel-0.29.1-installer-linux-x86_64.sh - -RUN export TF_ROOT=/work/tf \ - && export PATH="$PATH:$HOME/bin" \ - && cd $TF_ROOT \ - && git clone https://github.com/tensorflow/tensorflow.git \ - && cd tensorflow \ - && git checkout r2.1 \ - && export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/cuda/lib64/:/usr/local/cuda/lib64/stubs/" \ - && export PYTHON_BIN_PATH=$(which python) \ - && export PYTHON_LIB_PATH="$($PYTHON_BIN_PATH -c 'import site; print(site.getsitepackages()[0])')" \ - && export PYTHONPATH=/usr/lib/ \ - && export PYTHON_ARG=/usr/lib/ \ - && export CUDA_TOOLKIT_PATH=/usr/local/cuda-10.1/ \ - && export CUDNN_INSTALL_PATH=/usr/ \ - && export TF_NEED_GCP=0 \ - && export TF_NEED_CUDA=1 \ - && export TF_CUDA_VERSION="$($CUDA_TOOLKIT_PATH/bin/nvcc --version | sed -n 's/^.*release \(.*\),.*/\1/p')" \ - && export TF_CUDA_COMPUTE_CAPABILITIES=7.5,6.1,5.2 \ - && export TF_NEED_HDFS=0 \ - && export TF_NEED_OPENCL=0 \ - && export TF_NEED_JEMALLOC=1 \ - && export TF_ENABLE_XLA=1 \ - && export TF_NEED_VERBS=0 \ - && export TF_CUDA_CLANG=0 \ - && export TF_CUDNN_VERSION="$(sed -n 's/^#define CUDNN_MAJOR\s*\(.*\).*/\1/p' $CUDNN_INSTALL_PATH/include/cudnn.h)" \ - && export TF_NEED_MKL=0 \ - && export TF_DOWNLOAD_MKL=0 \ - && export TF_NEED_AWS=0 \ - && export TF_NEED_MPI=0 \ - && export TF_NEED_GDR=0 \ - && export TF_NEED_S3=0 \ - && export TF_NEED_OPENCL_SYCL=0 \ - && export TF_SET_ANDROID_WORKSPACE=0 \ - && export TF_NEED_COMPUTECPP=0 \ - && export GCC_HOST_COMPILER_PATH=$(which gcc) \ - && export CC_OPT_FLAGS="-march=native" \ - && export TF_NEED_KAFKA=0 \ - && export TF_NEED_TENSORRT=0 \ - && export TF_NCCL_VERSION=2.4 \ - && export GCC_HOST_COMPILER_PATH=$(which gcc) \ - && export CC_OPT_FLAGS="-march=native" \ - && ./configure \ - && bazel build //tensorflow:libtensorflow_framework.so //tensorflow:libtensorflow_cc.so //tensorflow/tools/pip_package:build_pip_package --noincompatible_do_not_split_linking_cmdline - -# ---------------------------------------------------------------------------- -# Build protobuf -# ---------------------------------------------------------------------------- -RUN export TF_ROOT=/work/tf \ - && mkdir -p $TF_ROOT/installdir \ - && cd $TF_ROOT \ - && wget https://github.com/google/protobuf/releases/download/v3.8.0/protobuf-cpp-3.8.0.tar.gz \ - && tar -xvf protobuf-cpp-3.8.0.tar.gz \ - && cd protobuf-3.8.0 \ - && ./configure --prefix=/work/tf/installdir/ \ - && make install -j $(grep -c ^processor /proc/cpuinfo) - -# ---------------------------------------------------------------------------- -# Prepare TF dependencies -# ---------------------------------------------------------------------------- -RUN export TF_ROOT=/work/tf \ - && cd $TF_ROOT/tensorflow \ - && bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg \ - && pip3 install $(find /tmp/tensorflow_pkg/ -type f -iname "tensorflow*.whl") \ - && ./tensorflow/lite/tools/make/download_dependencies.sh \ - && ./tensorflow/lite/tools/make/build_lib.sh \ - && mkdir -p /work/tf/installdir/lib \ - && mkdir -p /work/tf/installdir/include \ - && cp bazel-bin/tensorflow/libtensorflow_cc.so* /work/tf/installdir/lib \ - && cp bazel-bin/tensorflow/libtensorflow_framework.so* /work/tf/installdir/lib \ - && cp -r bazel-genfiles/* /work/tf/installdir/include \ - && cp -r tensorflow/cc /work/tf/installdir/include/tensorflow \ - && cp -r tensorflow/core /work/tf/installdir/include/tensorflow \ - && cp -r third_party /work/tf/installdir/include \ - && cp -r bazel-tensorflow/external/eigen_archive/unsupported /work/tf/installdir/include \ - && cp -r bazel-tensorflow/external/eigen_archive/Eigen /work/tf/installdir/include \ - && cp -r tensorflow/lite/tools/make/downloads/absl/absl /work/tf/installdir/include - -# ---------------------------------------------------------------------------- -# Build OTB: Stage 1 (clone) -# ---------------------------------------------------------------------------- -RUN mkdir -p /work/otb \ - && cd /work/otb \ - && git clone https://gitlab.orfeo-toolbox.org/orfeotoolbox/otb.git otb \ - && cd otb \ - && git checkout release-7.1 - -# ---------------------------------------------------------------------------- -# Build OTB: Stage 2 (superbuild) -# ---------------------------------------------------------------------------- -RUN mkdir -p /work/otb/build \ - && cd /work/otb/build \ - && cmake /work/otb/otb/SuperBuild \ - -DUSE_SYSTEM_BOOST=ON \ - -DUSE_SYSTEM_CURL=ON \ - -DUSE_SYSTEM_EXPAT=ON \ - -DUSE_SYSTEM_FFTW=ON \ - -DUSE_SYSTEM_FREETYPE=ON \ - -DUSE_SYSTEM_GDAL=ON \ - -DUSE_SYSTEM_GEOS=ON \ - -DUSE_SYSTEM_GEOTIFF=ON \ - -DUSE_SYSTEM_GLEW=ON \ - -DUSE_SYSTEM_GLFW=ON \ - -DUSE_SYSTEM_GLUT=ON \ - -DUSE_SYSTEM_GSL=ON \ - -DUSE_SYSTEM_ITK=ON \ - -DUSE_SYSTEM_LIBKML=ON \ - -DUSE_SYSTEM_LIBSVM=ON \ - -DUSE_SYSTEM_MUPARSER=ON \ - -DUSE_SYSTEM_MUPARSERX=ON \ - -DUSE_SYSTEM_OPENCV=ON \ - -DUSE_SYSTEM_OPENTHREADS=ON \ - -DUSE_SYSTEM_OSSIM=ON \ - -DUSE_SYSTEM_PNG=ON \ - -DUSE_SYSTEM_QT5=ON \ - -DUSE_SYSTEM_QWT=ON \ - -DUSE_SYSTEM_TINYXML=ON \ - -DUSE_SYSTEM_ZLIB=ON \ - -DUSE_SYSTEM_SWIG=OFF \ - -DOTB_WRAP_PYTHON=OFF \ - && make -j $(grep -c ^processor /proc/cpuinfo) - -# ---------------------------------------------------------------------------- -# Build OTB: Stage 3 (bindings) -# ---------------------------------------------------------------------------- -RUN cd /work/otb/otb/Modules/Remote \ - && git clone -b release-2.0-rc2 https://github.com/remicres/otbtf.git \ - && cd /work/otb/build/OTB/build \ - && cmake /work/otb/otb \ - -DOTB_WRAP_PYTHON=ON \ - -DPYTHON_EXECUTABLE=/usr/bin/python3.6 \ - -Dopencv_INCLUDE_DIR=/usr/include \ - -DModule_OTBTensorflow=ON \ - -DOTB_USE_TENSORFLOW=ON \ - -DTENSORFLOW_CC_LIB=/work/tf/installdir/lib/libtensorflow_cc.so \ - -DTENSORFLOW_FRAMEWORK_LIB=/work/tf/installdir/lib/libtensorflow_framework.so \ - -Dtensorflow_include_dir=/work/tf/installdir/include/ \ - && cd /work/otb/build/ \ - && make -j $(grep -c ^processor /proc/cpuinfo) - -# ---------------------------------------------------------------------------- -# Add important environment variables -# ---------------------------------------------------------------------------- -ENV PATH="$PATH:/work/otb/superbuild_install/bin/" -ENV PYTHONPATH="/work/otb/superbuild_install/lib/otb/python:/work/otb/otb/Modules/Remote/otbtf/python:$PYTHONPATH" -ENV OTB_APPLICATION_PATH="/work/otb/superbuild_install/lib/otb/applications" -ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/work/otb/superbuild_install/lib/:/work/tf/installdir/lib/" - -# ---------------------------------------------------------------------------- -# Clean -# ---------------------------------------------------------------------------- -RUN rm -rf /tmp/* /root/.cache && apt-get clean - -# ---------------------------------------------------------------------------- -# Add one user -# ---------------------------------------------------------------------------- -RUN useradd -s /bin/bash -m otbuser -USER otbuser -WORKDIR /home/otbuser - -# ---------------------------------------------------------------------------- -# Exemple : -# docker build --tag otbtf_image -# docker run -v /path/to/host/:/path/to/mount/ otbtf_image otbcli_ExtractROI -# docker run -ti -v /tmp/.X11-unix:/tmp/.X11-unix -e DISPLAY=$DISPLAY otbtf_image otbgui_ExtractROI -# ---------------------------------------------------------------------------- - diff --git a/tools/dockerfiles/r2.0/bionic.tf-r2.1.otb-7.1/Dockerfile b/tools/dockerfiles/r2.0/bionic.tf-r2.1.otb-7.1/Dockerfile deleted file mode 100644 index 5501a3a509a777e63d153683e7e82b62ed2035f2..0000000000000000000000000000000000000000 --- a/tools/dockerfiles/r2.0/bionic.tf-r2.1.otb-7.1/Dockerfile +++ /dev/null @@ -1,254 +0,0 @@ -FROM ubuntu:18.04 - -MAINTAINER Remi Cresson <remi.cresson[at]irstea[dot]fr> - -RUN apt-get update -y \ - && apt-get upgrade -y \ - && apt-get install -y --no-install-recommends \ - sudo \ - ca-certificates \ - curl \ - make \ - cmake \ - g++ \ - gcc \ - git \ - libtool \ - swig \ - xvfb \ - wget \ - autoconf \ - automake \ - pkg-config \ - zip \ - zlib1g-dev \ - unzip \ - && rm -rf /var/lib/apt/lists/* - -# ---------------------------------------------------------------------------- -# OTB and TensorFlow dependencies -# ---------------------------------------------------------------------------- -RUN apt-get update -y \ - && apt-get upgrade -y \ - && apt-get install -y --no-install-recommends \ - freeglut3-dev \ - libboost-date-time-dev \ - libboost-filesystem-dev \ - libboost-graph-dev \ - libboost-program-options-dev \ - libboost-system-dev \ - libboost-thread-dev \ - libcurl4-gnutls-dev \ - libexpat1-dev \ - libfftw3-dev \ - libgdal-dev \ - libgeotiff-dev \ - libglew-dev \ - libglfw3-dev \ - libgsl-dev \ - libinsighttoolkit4-dev \ - libkml-dev \ - libmuparser-dev \ - libmuparserx-dev \ - libopencv-core-dev \ - libopencv-ml-dev \ - libopenthreads-dev \ - libossim-dev \ - libpng-dev \ - libqt5opengl5-dev \ - libqwt-qt5-dev \ - libsvm-dev \ - libtinyxml-dev \ - qtbase5-dev \ - qttools5-dev \ - default-jdk \ - python3-pip \ - python3.6-dev \ - python3.6-gdal \ - python3-setuptools \ - libxmu-dev \ - libxi-dev \ - qttools5-dev-tools \ - bison \ - software-properties-common \ - dirmngr \ - apt-transport-https \ - lsb-release \ - gdal-bin \ - && rm -rf /var/lib/apt/lists/* - -# ---------------------------------------------------------------------------- -# Python packages -# ---------------------------------------------------------------------------- -RUN ln -s /usr/bin/python3 /usr/bin/python \ - && python3 -m pip install --upgrade pip \ - && python3 -m pip install pip six numpy wheel mock keras keras_applications keras_preprocessing future setuptools - -# ---------------------------------------------------------------------------- -# Build TensorFlow -# ---------------------------------------------------------------------------- -RUN export TF_ROOT=/work/tf \ - && mkdir -p ${TF_ROOT}/bazel \ - && cd ${TF_ROOT}/bazel \ - && wget https://github.com/bazelbuild/bazel/releases/download/0.29.1/bazel-0.29.1-installer-linux-x86_64.sh \ - && chmod +x bazel-0.29.1-installer-linux-x86_64.sh \ - && ./bazel-0.29.1-installer-linux-x86_64.sh - -RUN export TF_ROOT=/work/tf \ - && export PATH="$PATH:$HOME/bin" \ - && cd $TF_ROOT \ - && git clone https://github.com/tensorflow/tensorflow.git \ - && cd tensorflow \ - && git checkout r2.1 \ - && export PYTHON_BIN_PATH=$(which python) \ - && export PYTHON_LIB_PATH="$($PYTHON_BIN_PATH -c 'import site; print(site.getsitepackages()[0])')" \ - && export PYTHONPATH=/usr/lib/ \ - && export PYTHON_ARG=/usr/lib/ \ - && export TF_NEED_GCP=0 \ - && export TF_NEED_CUDA=0 \ - && export TF_NEED_HDFS=0 \ - && export TF_NEED_OPENCL=0 \ - && export TF_NEED_JEMALLOC=1 \ - && export TF_ENABLE_XLA=0 \ - && export TF_NEED_VERBS=0 \ - && export TF_NEED_MKL=1 \ - && export TF_DOWNLOAD_MKL=1 \ - && export TF_NEED_AWS=0 \ - && export TF_NEED_MPI=0 \ - && export TF_NEED_GDR=0 \ - && export TF_NEED_S3=0 \ - && export TF_NEED_OPENCL_SYCL=0 \ - && export TF_SET_ANDROID_WORKSPACE=0 \ - && export TF_NEED_COMPUTECPP=0 \ - && export GCC_HOST_COMPILER_PATH=$(which gcc) \ - && export CC_OPT_FLAGS="-march=native" \ - && export TF_NEED_KAFKA=0 \ - && export TF_NEED_TENSORRT=0 \ - && export TF_NCCL_VERSION=2.4 \ - && export GCC_HOST_COMPILER_PATH=$(which gcc) \ - && export CC_OPT_FLAGS="-march=native" \ - && ./configure \ - && bazel build -c opt --copt=-march=native --copt=-mfpmath=both //tensorflow:libtensorflow_framework.so //tensorflow:libtensorflow_cc.so //tensorflow/tools/pip_package:build_pip_package --noincompatible_do_not_split_linking_cmdline - -# ---------------------------------------------------------------------------- -# Build protobuf -# ---------------------------------------------------------------------------- -RUN export TF_ROOT=/work/tf \ - && mkdir -p $TF_ROOT/installdir \ - && cd $TF_ROOT \ - && wget https://github.com/google/protobuf/releases/download/v3.8.0/protobuf-cpp-3.8.0.tar.gz \ - && tar -xvf protobuf-cpp-3.8.0.tar.gz \ - && cd protobuf-3.8.0 \ - && ./configure --prefix=/work/tf/installdir/ \ - && make install -j $(grep -c ^processor /proc/cpuinfo) - -# ---------------------------------------------------------------------------- -# Prepare TF dependencies -# ---------------------------------------------------------------------------- -RUN export TF_ROOT=/work/tf \ - && cd $TF_ROOT/tensorflow \ - && bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg \ - && pip3 install $(find /tmp/tensorflow_pkg/ -type f -iname "tensorflow*.whl") \ - && ./tensorflow/lite/tools/make/download_dependencies.sh \ - && ./tensorflow/lite/tools/make/build_lib.sh \ - && mkdir -p /work/tf/installdir/lib \ - && mkdir -p /work/tf/installdir/include \ - && cp bazel-bin/tensorflow/libtensorflow_cc.so* /work/tf/installdir/lib \ - && cp bazel-bin/tensorflow/libtensorflow_framework.so* /work/tf/installdir/lib \ - && cp -r bazel-genfiles/* /work/tf/installdir/include \ - && cp -r tensorflow/cc /work/tf/installdir/include/tensorflow \ - && cp -r tensorflow/core /work/tf/installdir/include/tensorflow \ - && cp -r third_party /work/tf/installdir/include \ - && cp -r bazel-tensorflow/external/eigen_archive/unsupported /work/tf/installdir/include \ - && cp -r bazel-tensorflow/external/eigen_archive/Eigen /work/tf/installdir/include \ - && cp -r tensorflow/lite/tools/make/downloads/absl/absl /work/tf/installdir/include - -# ---------------------------------------------------------------------------- -# Build OTB: Stage 1 (clone) -# ---------------------------------------------------------------------------- -RUN mkdir -p /work/otb \ - && cd /work/otb \ - && git clone https://gitlab.orfeo-toolbox.org/orfeotoolbox/otb.git otb \ - && cd otb \ - && git checkout release-7.1 - -# ---------------------------------------------------------------------------- -# Build OTB: Stage 2 (superbuild) -# ---------------------------------------------------------------------------- -RUN mkdir -p /work/otb/build \ - && cd /work/otb/build \ - && cmake /work/otb/otb/SuperBuild \ - -DUSE_SYSTEM_BOOST=ON \ - -DUSE_SYSTEM_CURL=ON \ - -DUSE_SYSTEM_EXPAT=ON \ - -DUSE_SYSTEM_FFTW=ON \ - -DUSE_SYSTEM_FREETYPE=ON \ - -DUSE_SYSTEM_GDAL=ON \ - -DUSE_SYSTEM_GEOS=ON \ - -DUSE_SYSTEM_GEOTIFF=ON \ - -DUSE_SYSTEM_GLEW=ON \ - -DUSE_SYSTEM_GLFW=ON \ - -DUSE_SYSTEM_GLUT=ON \ - -DUSE_SYSTEM_GSL=ON \ - -DUSE_SYSTEM_ITK=ON \ - -DUSE_SYSTEM_LIBKML=ON \ - -DUSE_SYSTEM_LIBSVM=ON \ - -DUSE_SYSTEM_MUPARSER=ON \ - -DUSE_SYSTEM_MUPARSERX=ON \ - -DUSE_SYSTEM_OPENCV=ON \ - -DUSE_SYSTEM_OPENTHREADS=ON \ - -DUSE_SYSTEM_OSSIM=ON \ - -DUSE_SYSTEM_PNG=ON \ - -DUSE_SYSTEM_QT5=ON \ - -DUSE_SYSTEM_QWT=ON \ - -DUSE_SYSTEM_TINYXML=ON \ - -DUSE_SYSTEM_ZLIB=ON \ - -DUSE_SYSTEM_SWIG=OFF \ - -DOTB_WRAP_PYTHON=OFF \ - && make -j $(grep -c ^processor /proc/cpuinfo) - -# ---------------------------------------------------------------------------- -# Build OTB: Stage 3 (bindings) -# ---------------------------------------------------------------------------- -RUN cd /work/otb/otb/Modules/Remote \ - && git clone -b release-2.0-rc2 https://github.com/remicres/otbtf.git \ - && cd /work/otb/build/OTB/build \ - && cmake /work/otb/otb \ - -DOTB_WRAP_PYTHON=ON \ - -DPYTHON_EXECUTABLE=/usr/bin/python3.6 \ - -Dopencv_INCLUDE_DIR=/usr/include \ - -DModule_OTBTensorflow=ON \ - -DOTB_USE_TENSORFLOW=ON \ - -DTENSORFLOW_CC_LIB=/work/tf/installdir/lib/libtensorflow_cc.so \ - -DTENSORFLOW_FRAMEWORK_LIB=/work/tf/installdir/lib/libtensorflow_framework.so \ - -Dtensorflow_include_dir=/work/tf/installdir/include/ \ - && cd /work/otb/build/ \ - && make -j $(grep -c ^processor /proc/cpuinfo) - -# ---------------------------------------------------------------------------- -# Add important environment variables -# ---------------------------------------------------------------------------- -ENV PATH="$PATH:/work/otb/superbuild_install/bin/" -ENV PYTHONPATH="/work/otb/superbuild_install/lib/otb/python:/work/otb/otb/Modules/Remote/otbtf/python:$PYTHONPATH" -ENV OTB_APPLICATION_PATH="/work/otb/superbuild_install/lib/otb/applications" -ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/work/otb/superbuild_install/lib/:/work/tf/installdir/lib/" - -# ---------------------------------------------------------------------------- -# Clean -# ---------------------------------------------------------------------------- -RUN rm -rf /tmp/* /root/.cache && apt-get clean - -# ---------------------------------------------------------------------------- -# Add one user -# ---------------------------------------------------------------------------- -RUN useradd -s /bin/bash -m otbuser -USER otbuser -WORKDIR /home/otbuser - -# ---------------------------------------------------------------------------- -# Exemple : -# docker build --tag otbtf_image -# docker run -v /path/to/host/:/path/to/mount/ otbtf_image otbcli_ExtractROI -# docker run -ti -v /tmp/.X11-unix:/tmp/.X11-unix -e DISPLAY=$DISPLAY otbtf_image otbgui_ExtractROI -# ---------------------------------------------------------------------------- -