Unverified Commit bd8b6e1a authored by Rémi Cresson's avatar Rémi Cresson Committed by GitHub
Browse files

Merge pull request #63 from LaTeleScop/develop

Docker: OTB and TF latest releases + new ZIP_TF_BIN build argument
Showing with 60 additions and 33 deletions
+60 -33
...@@ -40,7 +40,7 @@ WORKDIR /src/tf ...@@ -40,7 +40,7 @@ WORKDIR /src/tf
RUN git config --global advice.detachedHead false RUN git config --global advice.detachedHead false
### TF ### TF
ARG TF=v2.4.1 ARG TF=v2.5.0
# Install bazelisk (will read .bazelversion and download the right bazel binary - latest by default) # Install bazelisk (will read .bazelversion and download the right bazel binary - latest by default)
RUN wget -qO /opt/otbtf/bin/bazelisk https://github.com/bazelbuild/bazelisk/releases/latest/download/bazelisk-linux-amd64 \ RUN wget -qO /opt/otbtf/bin/bazelisk https://github.com/bazelbuild/bazelisk/releases/latest/download/bazelisk-linux-amd64 \
&& chmod +x /opt/otbtf/bin/bazelisk \ && chmod +x /opt/otbtf/bin/bazelisk \
...@@ -53,7 +53,7 @@ ARG BZL_CONFIGS="--config=nogcp --config=noaws --config=nohdfs --config=opt" ...@@ -53,7 +53,7 @@ ARG BZL_CONFIGS="--config=nogcp --config=noaws --config=nohdfs --config=opt"
ARG BZL_OPTIONS="--verbose_failures --remote_cache=http://localhost:9090" ARG BZL_OPTIONS="--verbose_failures --remote_cache=http://localhost:9090"
# Build # Build
ARG KEEP_SRC_TF=false ARG ZIP_TF_BIN=false
COPY tools/docker/build-env-tf.sh ./ COPY tools/docker/build-env-tf.sh ./
RUN git clone --single-branch -b $TF https://github.com/tensorflow/tensorflow.git \ RUN git clone --single-branch -b $TF https://github.com/tensorflow/tensorflow.git \
&& cd tensorflow \ && cd tensorflow \
...@@ -76,14 +76,14 @@ RUN git clone --single-branch -b $TF https://github.com/tensorflow/tensorflow.gi ...@@ -76,14 +76,14 @@ RUN git clone --single-branch -b $TF https://github.com/tensorflow/tensorflow.gi
&& cp tensorflow/cc/saved_model/tag_constants.h /opt/otbtf/include/tf/tensorflow/cc/saved_model/ \ && cp tensorflow/cc/saved_model/tag_constants.h /opt/otbtf/include/tf/tensorflow/cc/saved_model/ \
# Symlink external libs (required for MKL - libiomp5) # Symlink external libs (required for MKL - libiomp5)
&& for f in $(find -L /opt/otbtf/include/tf -wholename "*/external/*/*.so"); do ln -s $f /opt/otbtf/lib/; done \ && for f in $(find -L /opt/otbtf/include/tf -wholename "*/external/*/*.so"); do ln -s $f /opt/otbtf/lib/; done \
# Compress and save TF binaries
&& ( ! $ZIP_TF_BIN || zip -9 -j --symlinks /opt/otbtf/tf-$TF.zip tensorflow/cc/saved_model/tag_constants.h bazel-bin/tensorflow/libtensorflow_cc.so* /tmp/tensorflow_pkg/tensorflow*.whl ) \
# Cleaning # Cleaning
&& rm -rf bazel-* \ && rm -rf bazel-* /src/tf /root/.cache/ /tmp/*
&& ( $KEEP_SRC_TF || rm -rf /src/tf ) \
&& rm -rf /root/.cache/ /tmp/*
### OTB ### OTB
ARG GUI=false ARG GUI=false
ARG OTB=7.2.0 ARG OTB=7.3.0
RUN mkdir /src/otb RUN mkdir /src/otb
WORKDIR /src/otb WORKDIR /src/otb
...@@ -97,7 +97,7 @@ RUN git clone --single-branch -b $OTB https://gitlab.orfeo-toolbox.org/orfeotool ...@@ -97,7 +97,7 @@ RUN git clone --single-branch -b $OTB https://gitlab.orfeo-toolbox.org/orfeotool
&& if $GUI; then \ && if $GUI; then \
sed -i -r "s/-DOTB_USE_(QT|OPENGL|GL[UFE][WT])=OFF/-DOTB_USE_\1=ON/" ../build-flags-otb.txt; fi \ sed -i -r "s/-DOTB_USE_(QT|OPENGL|GL[UFE][WT])=OFF/-DOTB_USE_\1=ON/" ../build-flags-otb.txt; fi \
# Possible ENH: superbuild-all-dependencies switch, with separated build-deps-minimal.txt and build-deps-otbcli.txt) # Possible ENH: superbuild-all-dependencies switch, with separated build-deps-minimal.txt and build-deps-otbcli.txt)
#&& if $OTB_SUPERBUILD_ALL; then sed -i -r "s/-DUSE_SYSTEM_([A-Z0-9]*)=ON/-DUSE_SYSTEM_\1=OFF/"" ../build-flags-otb.txt; fi \ #&& if $OTB_SUPERBUILD_ALL; then sed -i -r "s/-DUSE_SYSTEM_([A-Z0-9]*)=ON/-DUSE_SYSTEM_\1=OFF/ " ../build-flags-otb.txt; fi \
&& OTB_FLAGS=$(cat "../build-flags-otb.txt") \ && OTB_FLAGS=$(cat "../build-flags-otb.txt") \
&& cmake ../otb/SuperBuild -DCMAKE_INSTALL_PREFIX=/opt/otbtf $OTB_FLAGS \ && cmake ../otb/SuperBuild -DCMAKE_INSTALL_PREFIX=/opt/otbtf $OTB_FLAGS \
&& make -j $(python -c "import os; print(round( os.cpu_count() * $CPU_RATIO ))") && make -j $(python -c "import os; print(round( os.cpu_count() * $CPU_RATIO ))")
......
# Build with Docker # Build with Docker
Docker build has to be called from the root of the repository (i.e. `docker build .` or `bash tools/docker/multibuild.sh`). Docker build has to be called from the root of the repository (i.e. `docker build .` or `bash tools/docker/multibuild.sh`).
You can build a custom image using `--build-arg` and several config files : You can build a custom image using `--build-arg` and several config files :
- Ubuntu : `BASE_IMG` should accept any version, for additional packages see [build-deps-cli.txt](build-deps-cli.txt) and [build-deps-gui.txt](build-deps-gui.txt) - Ubuntu : `BASE_IMG` should accept any version, for additional packages see [build-deps-cli.txt](build-deps-cli.txt) and [build-deps-gui.txt](build-deps-gui.txt).
- TensorFlow : `TF` arg for the git branch or tag + [build-env-tf.sh](build-env-tf.sh) and BZL_* arguments for the build configuration - TensorFlow : `TF` arg for the git branch or tag + [build-env-tf.sh](build-env-tf.sh) and BZL_* arguments for the build configuration. `ZIP_TF_BIN` allows you to save compiled binaries if you want to install it elsewhere.
- OrfeoToolBox : `OTB` arg for the git branch or tag + [build-flags-otb.txt](build-flags-otb.txt) to edit cmake flags - OrfeoToolBox : `OTB` arg for the git branch or tag + [build-flags-otb.txt](build-flags-otb.txt) to edit cmake flags. Set `KEEP_SRC_OTB` in order to preserve OTB git directory.
### Base images ### Base images
```bash ```bash
UBUNTU=20.04 # or 16.04, 18.04 UBUNTU=20.04 # or 16.04, 18.04
CUDA=11.0.3 # or 10.1, 10.2 CUDA=11.2.2 # or 10.1, 10.2, 11.0.3
CUDNN=8 # or 7 CUDNN=8 # or 7
IMG=ubuntu:$UBUNTU IMG=ubuntu:$UBUNTU
GPU_IMG=nvidia/cuda:$CUDA-cudnn$CUDNN-devel-ubuntu$UBUNTU GPU_IMG=nvidia/cuda:$CUDA-cudnn$CUDNN-devel-ubuntu$UBUNTU
...@@ -17,28 +17,28 @@ GPU_IMG=nvidia/cuda:$CUDA-cudnn$CUDNN-devel-ubuntu$UBUNTU ...@@ -17,28 +17,28 @@ GPU_IMG=nvidia/cuda:$CUDA-cudnn$CUDNN-devel-ubuntu$UBUNTU
### Default arguments ### Default arguments
```bash ```bash
BASE_IMG # mandatory BASE_IMG # mandatory
CPU_RATIO=0.95 CPU_RATIO=1
GUI=false GUI=false
NUMPY_SPEC="~=1.19" NUMPY_SPEC="==1.19.*"
TF=r2.4.1 TF=v2.5.0
OTB=7.2.0 OTB=7.3.0
BZL_TARGETS="//tensorflow:libtensorflow_cc.so //tensorflow/tools/pip_package:build_pip_package" BZL_TARGETS="//tensorflow:libtensorflow_cc.so //tensorflow/tools/pip_package:build_pip_package"
BZL_CONFIGS="--config=nogcp --config=noaws --config=nohdfs --config=opt" BZL_CONFIGS="--config=nogcp --config=noaws --config=nohdfs --config=opt"
BZL_OPTIONS="--verbose_failures --remote_cache=http://localhost:9090" BZL_OPTIONS="--verbose_failures --remote_cache=http://localhost:9090"
KEEP_SRC_TF=false ZIP_TF_BIN=false
KEEP_SRC_OTB=false KEEP_SRC_OTB=false
SUDO=true SUDO=true
# NumPy version requirement : # NumPy version requirement :
# TF < 2.4 : "numpy<1.19.0,>=1.16.0" # TF < 2.4 : "numpy<1.19.0,>=1.16.0"
# TF >= 2.4 : "numpy~=1.19" # TF >= 2.4 : "numpy==1.19.*"
``` ```
### Bazel remote cache daemon ### Bazel remote cache daemon
If you just need to rebuild with different GUI or KEEP_SRC arguments, or may be a different branch of OTB, bazel cache will help you to rebuild everything except TF, even if the docker cache was purged (after `docker [system|builder] prune`). If you just need to rebuild with different GUI or KEEP_SRC arguments, or may be a different branch of OTB, bazel cache will help you to rebuild everything except TF, even if the docker cache was purged (after `docker [system|builder] prune`).
In order to recycle the cache, bazel config and TF git tag should be exactly the same, any change in [build-env-tf.sh](build-env-tf.sh) and `--build-arg` (if related to bazel env, cuda, mkl, xla...) may result in a fresh new build. In order to recycle the cache, bazel config and TF git tag should be exactly the same, any change in [build-env-tf.sh](build-env-tf.sh) and `--build-arg` (if related to bazel env, cuda, mkl, xla...) may result in a fresh new build.
Start a cache daemon - here with max 20GB but 12GB should be enough to save 2 TF builds (GPU and CPU): Start a cache daemon - here with max 20GB but 10GB should be enough to save 2 TF builds (GPU and CPU):
```bash ```bash
mkdir -p $HOME/.cache/bazel-remote mkdir -p $HOME/.cache/bazel-remote
docker run --detach -u 1000:1000 -v $HOME/.cache/bazel-remote:/data -p 9090:8080 buchgr/bazel-remote-cache --max_size=20 docker run --detach -u 1000:1000 -v $HOME/.cache/bazel-remote:/data -p 9090:8080 buchgr/bazel-remote-cache --max_size=20
...@@ -46,7 +46,7 @@ docker run --detach -u 1000:1000 -v $HOME/.cache/bazel-remote:/data -p 9090:8080 ...@@ -46,7 +46,7 @@ docker run --detach -u 1000:1000 -v $HOME/.cache/bazel-remote:/data -p 9090:8080
Then just add ` --network='host'` to the docker build command, or connect bazel to a remote server - see 'BZL_OPTIONS'. Then just add ` --network='host'` to the docker build command, or connect bazel to a remote server - see 'BZL_OPTIONS'.
The other way of docker is a virtual bridge, but you'll need to edit the IP address. The other way of docker is a virtual bridge, but you'll need to edit the IP address.
## Build examples ## Images build examples
```bash ```bash
# Build for CPU using default Dockerfiles args (without AWS, HDFS or GCP support) # Build for CPU using default Dockerfiles args (without AWS, HDFS or GCP support)
docker build --network='host' -t otbtf:cpu --build-arg BASE_IMG=ubuntu:20.04 . docker build --network='host' -t otbtf:cpu --build-arg BASE_IMG=ubuntu:20.04 .
...@@ -61,17 +61,48 @@ docker build --network='host' -t otbtf:cpu-mkl --build-arg BZL_CONFIGS="$MKL_CON ...@@ -61,17 +61,48 @@ docker build --network='host' -t otbtf:cpu-mkl --build-arg BZL_CONFIGS="$MKL_CON
# Build for GPU (if you're building for your system only you should edit CUDA_COMPUTE_CAPABILITIES in build-env-tf.sh) # Build for GPU (if you're building for your system only you should edit CUDA_COMPUTE_CAPABILITIES in build-env-tf.sh)
docker build --network='host' -t otbtf:gpu --build-arg BASE_IMG=nvidia/cuda:11.0.3-cudnn8-devel-ubuntu20.04 . docker build --network='host' -t otbtf:gpu --build-arg BASE_IMG=nvidia/cuda:11.0.3-cudnn8-devel-ubuntu20.04 .
# Build dev with TF and OTB sources (huge image) + set git branches/tags to clone # Build latest TF and OTB, set git branches/tags to clone
docker build --network='host' -t otbtf:gpu-dev-full --build-arg BASE_IMG=nvidia/cuda:11.0.3-cudnn8-devel-ubuntu20.04 \ docker build --network='host' -t otbtf:gpu-dev --build-arg BASE_IMG=nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04 \
--build-arg KEEP_SRC_OTB=true --build-arg KEEP_SRC_TF=true --build-arg TF=nightly --build-arg OTB=develop . --build-arg KEEP_SRC_OTB=true --build-arg TF=nightly --build-arg OTB=develop .
# Build old release # Build old release (TF-2.1)
docker build --network='host' -t otbtf:oldstable-gpu --build-arg BASE_IMG=nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04 \ docker build --network='host' -t otbtf:oldstable-gpu --build-arg BASE_IMG=nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04 \
--build-arg TF=r2.1 --build-arg NUMPY_SPEC="<1.19" \ --build-arg TF=r2.1 --build-arg NUMPY_SPEC="<1.19" \
--build-arg BAZEL_OPTIONS="--noincompatible_do_not_split_linking_cmdline --verbose_failures --remote_cache=http://localhost:9090" . --build-arg BAZEL_OPTIONS="--noincompatible_do_not_split_linking_cmdline --verbose_failures --remote_cache=http://localhost:9090" .
# You could edit the Dockerfile in order to clone an old branch of the repo instead of copying files from the build context # You could edit the Dockerfile in order to clone an old branch of the repo instead of copying files from the build context
``` ```
### Build for another machine and save TF compiled files
```bash
# Use same ubuntu and CUDA version than your target machine, beware of CC optimization and CPU compatibilty
# (set env variable CC_OPT_FLAGS and avoid "-march=native" if your Docker's CPU is optimized with AVX2/AVX512 but your target CPU isn't)
docker build --network='host' -t otbtf:custom --build-arg BASE_IMG=nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04 \
--build-arg TF=v2.5.0 --build-arg ZIP_TF_BIN=true .
# Retrieve zip file
docker run -v $HOME:/home/otbuser/volume otbtf:custom cp /opt/otbtf/tf-v2.5.0.zip /home/otbuser/volume
# Target machine shell
cd $HOME
unzip tf-v2.5.0.zip
sudo mkdir -p /opt/tensorflow/lib
sudo mv tf-v2.5.0/libtensorflow_cc* /opt/tensorflow/lib
# You may need to create a virtualenv, here TF and dependencies are installed next to user's pip packages
pip3 install -U pip wheel mock six future deprecated "numpy==1.19.*"
pip3 install --no-deps keras_applications keras_preprocessing
pip3 install tf-v2.5.0/tensorflow-2.5.0-cp38-cp38-linux_x86_64.whl
TF_WHEEL_DIR="$HOME/.local/lib/python3.8/site-packages/tensorflow"
# If you installed the wheel as regular user, with root pip it should be in /usr/local/lib/python3.*, or in your virtualenv lib/ directory
mv tf-v2.5.0/tag_constants.h $TF_WHEEL_DIR/include/tensorflow/cc/saved_model/
# Then recompile OTB with OTBTF using libraries in /opt/tensorflow/lib and instructions in HOWTOBUILD.md.
cmake $OTB_GIT \
-DOTB_USE_TENSORFLOW=ON -DModule_OTBTensorflow=ON \
-DTENSORFLOW_CC_LIB=/opt/tensorflow/lib/libtensorflow_cc.so.2 \
-Dtensorflow_include_dir=$TF_WHEEL_DIR/include \
-DTENSORFLOW_FRAMEWORK_LIB=$TF_WHEEL_DIR/libtensorflow_framework.so.2 \
&& make install -j
```
### Debug build ### Debug build
If you fail to build, you can log into the last layer and check CMake logs. Run `docker images`, find the latest layer ID and run a tmp container (`docker run -it d60496d9612e bash`). If you fail to build, you can log into the last layer and check CMake logs. Run `docker images`, find the latest layer ID and run a tmp container (`docker run -it d60496d9612e bash`).
You may also need to split some multi-command layers in the Dockerfile. You may also need to split some multi-command layers in the Dockerfile.
...@@ -80,7 +111,7 @@ If you see OOM errors during SuperBuild you should decrease CPU_RATIO (e.g. 0.75 ...@@ -80,7 +111,7 @@ If you see OOM errors during SuperBuild you should decrease CPU_RATIO (e.g. 0.75
## Container examples ## Container examples
```bash ```bash
# Pull GPU image and create a new container with your home directory as volume (requires apt package nvidia-docker2 and CUDA>=11.0) # Pull GPU image and create a new container with your home directory as volume (requires apt package nvidia-docker2 and CUDA>=11.0)
docker create --gpus=all --volume $HOME:/home/otbuser/volume -it --name otbtf-gpu mdl4eo/otbtf2.1:gpu docker create --gpus=all --volume $HOME:/home/otbuser/volume -it --name otbtf-gpu mdl4eo/otbtf2.4:gpu
# Run interactive # Run interactive
docker start -i otbtf-gpu docker start -i otbtf-gpu
...@@ -92,7 +123,7 @@ docker exec otbtf-gpu python -c 'import tensorflow as tf; print(tf.test.is_gpu_a ...@@ -92,7 +123,7 @@ docker exec otbtf-gpu python -c 'import tensorflow as tf; print(tf.test.is_gpu_a
### Rebuild OTB with more modules ### Rebuild OTB with more modules
```bash ```bash
docker create --gpus=all -it --name otbtf-gpu-dev mdl4eo/otbtf2.1:gpu-dev docker create --gpus=all -it --name otbtf-gpu-dev mdl4eo/otbtf2.4:gpu-dev
docker start -i otbtf-gpu-dev docker start -i otbtf-gpu-dev
``` ```
```bash ```bash
...@@ -114,7 +145,7 @@ docker start -i otbtf-gui ...@@ -114,7 +145,7 @@ docker start -i otbtf-gui
$ mapla $ mapla
``` ```
### Common errors ## Common errors
Buid : Buid :
`Error response from daemon: manifest for nvidia/cuda:11.0-cudnn8-devel-ubuntu20.04 not found: manifest unknown: manifest unknown` `Error response from daemon: manifest for nvidia/cuda:11.0-cudnn8-devel-ubuntu20.04 not found: manifest unknown: manifest unknown`
=> Image is missing from dockerhub => Image is missing from dockerhub
......
...@@ -34,7 +34,7 @@ export CUDA_TOOLKIT_PATH=$(find /usr/local -maxdepth 1 -type d -name 'cuda-*') ...@@ -34,7 +34,7 @@ export CUDA_TOOLKIT_PATH=$(find /usr/local -maxdepth 1 -type d -name 'cuda-*')
if [ ! -z $CUDA_TOOLKIT_PATH ] ; then if [ ! -z $CUDA_TOOLKIT_PATH ] ; then
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$CUDA_TOOLKIT_PATH/lib64:$CUDA_TOOLKIT_PATH/lib64/stubs" export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$CUDA_TOOLKIT_PATH/lib64:$CUDA_TOOLKIT_PATH/lib64/stubs"
export TF_CUDA_VERSION=$(echo $CUDA_TOOLKIT_PATH | sed -r 's/.*\/cuda-(.*)/\1/') export TF_CUDA_VERSION=$(echo $CUDA_TOOLKIT_PATH | sed -r 's/.*\/cuda-(.*)/\1/')
export TF_CUDA_COMPUTE_CAPABILITIES="5.2,6.1,7.0,7.5" export TF_CUDA_COMPUTE_CAPABILITIES="5.2,6.1,7.0,7.5,8.6"
export TF_NEED_CUDA=1 export TF_NEED_CUDA=1
export TF_CUDA_CLANG=0 export TF_CUDA_CLANG=0
export TF_NEED_TENSORRT=0 export TF_NEED_TENSORRT=0
......
#!/bin/bash #!/bin/bash
### Docker multibuild and push, see default args and more examples in tools/docker/README.md ### Docker multibuild and push, see default args and more examples in tools/docker/README.md
RELEASE=2.1 RELEASE=2.5
UBUNTU=20.04 UBUNTU=20.04
CUDA=11.0.3 CUDA=11.2.2
CUDNN=8 CUDNN=8
IMG=ubuntu:$UBUNTU IMG=ubuntu:$UBUNTU
GPU_IMG=nvidia/cuda:$CUDA-cudnn$CUDNN-devel-ubuntu$UBUNTU GPU_IMG=nvidia/cuda:$CUDA-cudnn$CUDNN-devel-ubuntu$UBUNTU
...@@ -12,7 +12,6 @@ mkdir -p $HOME/.cache/bazel-remote ...@@ -12,7 +12,6 @@ mkdir -p $HOME/.cache/bazel-remote
docker run -d -u 1000:1000 -v $HOME/.cache/bazel-remote:/data -p 9090:8080 buchgr/bazel-remote-cache --max_size=20 docker run -d -u 1000:1000 -v $HOME/.cache/bazel-remote:/data -p 9090:8080 buchgr/bazel-remote-cache --max_size=20
### CPU (no MKL) ### CPU (no MKL)
#docker build --network='host' -t mdl4eo/otbtf$RELEASE:-cpu-dev-all --build-arg BASE_IMG=$IMG --build-arg KEEP_SRC_OTB=true --build-arg KEEP_SRC_TF=true .
docker build --network='host' -t mdl4eo/otbtf$RELEASE:cpu-dev --build-arg BASE_IMG=$IMG --build-arg KEEP_SRC_OTB=true . docker build --network='host' -t mdl4eo/otbtf$RELEASE:cpu-dev --build-arg BASE_IMG=$IMG --build-arg KEEP_SRC_OTB=true .
docker build --network='host' -t mdl4eo/otbtf$RELEASE:cpu --build-arg BASE_IMG=$IMG . docker build --network='host' -t mdl4eo/otbtf$RELEASE:cpu --build-arg BASE_IMG=$IMG .
#docker build --network='host' -t mdl4eo/otbtf$RELEASE:-cpu-gui --build-arg BASE_IMG=$IMG --build-arg GUI=true . #docker build --network='host' -t mdl4eo/otbtf$RELEASE:-cpu-gui --build-arg BASE_IMG=$IMG --build-arg GUI=true .
...@@ -23,19 +22,16 @@ docker build --network='host' -t mdl4eo/otbtf$RELEASE:cpu --build-arg BASE_IMG=$ ...@@ -23,19 +22,16 @@ docker build --network='host' -t mdl4eo/otbtf$RELEASE:cpu --build-arg BASE_IMG=$
#docker build --network='host' -t mdl4eo/otbtf$RELEASE:-cpu-mkl-dev --build-arg BASE_IMG=$IMG --build-arg BZL_CONFIGS="$MKL_CONF" --build-arg KEEP_SRC_OTB=true . #docker build --network='host' -t mdl4eo/otbtf$RELEASE:-cpu-mkl-dev --build-arg BASE_IMG=$IMG --build-arg BZL_CONFIGS="$MKL_CONF" --build-arg KEEP_SRC_OTB=true .
### GPU support is enabled if CUDA is found in /usr/local ### GPU support is enabled if CUDA is found in /usr/local
#docker build --network='host' -t mdl4eo/otbtf$RELEASE:-gpu-dev-all --build-arg BASE_IMG=$GPU_IMG --build-arg KEEP_SRC_OTB=true --build-arg KEEP_SRC_TF=true .
docker build --network='host' -t mdl4eo/otbtf$RELEASE:gpu-dev --build-arg BASE_IMG=$GPU_IMG --build-arg KEEP_SRC_OTB=true . docker build --network='host' -t mdl4eo/otbtf$RELEASE:gpu-dev --build-arg BASE_IMG=$GPU_IMG --build-arg KEEP_SRC_OTB=true .
docker build --network='host' -t mdl4eo/otbtf$RELEASE:gpu --build-arg BASE_IMG=$GPU_IMG . docker build --network='host' -t mdl4eo/otbtf$RELEASE:gpu --build-arg BASE_IMG=$GPU_IMG .
#docker build --network='host' -t mdl4eo/otbtf$RELEASE:-gpu-gui --build-arg BASE_IMG=$GPU_IMG --build-arg GUI=true . #docker build --network='host' -t mdl4eo/otbtf$RELEASE:-gpu-gui --build-arg BASE_IMG=$GPU_IMG --build-arg GUI=true .
#docker login #docker login
#docker push mdl4eo/otbtf$RELEASE:-cpu-dev-all
docker push mdl4eo/otbtf$RELEASE:-cpu-dev docker push mdl4eo/otbtf$RELEASE:-cpu-dev
docker push mdl4eo/otbtf$RELEASE:-cpu docker push mdl4eo/otbtf$RELEASE:-cpu
#docker push mdl4eo/otbtf$RELEASE:-cpu-gui #docker push mdl4eo/otbtf$RELEASE:-cpu-gui
#docker push mdl4eo/otbtf$RELEASE:-cpu-mkl #docker push mdl4eo/otbtf$RELEASE:-cpu-mkl
#docker push mdl4eo/otbtf$RELEASE:-gpu-dev-all
docker push mdl4eo/otbtf$RELEASE:-gpu-dev docker push mdl4eo/otbtf$RELEASE:-gpu-dev
docker push mdl4eo/otbtf$RELEASE:-gpu docker push mdl4eo/otbtf$RELEASE:-gpu
#docker push mdl4eo/otbtf$RELEASE:-gpu-gui #docker push mdl4eo/otbtf$RELEASE:-gpu-gui
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment