diff --git a/README.md b/README.md
index 92272b59085bc394765d4675cfebebf6e0de4de8..aeeeaf4e6f1e0a0f755b6fc4bb3928b344dbcfa1 100644
--- a/README.md
+++ b/README.md
@@ -1,16 +1,30 @@
 # OTBTensorflow
 
-This remote module of the [Orfeo ToolBox](https://www.orfeo-toolbox.org) (OTB) aims to provide a deep learning framework targeting remote sensing images processing.
+This remote module of the [Orfeo ToolBox](https://www.orfeo-toolbox.org) provides a generic, multi purpose deep learning framework, targeting remote sensing images processing.
 It contains a set of new process objects that internally invoke [Tensorflow](https://www.tensorflow.org/), and a bunch of user-oriented applications to perform deep learning with real-world remote sensing images.
+Applications can be used to build OTB pipelines from Python or C++ APIs. 
 
 *Main highlights*
  - Sampling,
  - Training, supporting save/restore/import operations (a model can be trained from scratch or fine-tuned),
- - Serving models with support of OTB streaming mechanism 
+ - Serving models with support of OTB streaming mechanism. Meaning (1) not limited by images sizes, (2) can be used as a "lego" in any OTB pipeline and preserve streaming, (3) MPI support available (use multiple processing unit to generate one single output image)
 
+*Portfolio*
+
+Below are some screen captures of deep learning applications performed at large scale with OTBTF.
+ - Image to image translation (Spot-7 image --> Wikimedia Map using CGAN)
+<img src ="doc/pix2pix.png" />
+
+ - Landcover mapping (Spot-7 images --> Building map using semantic segmentation)
+<img src ="doc/landcover.png" />
+
+ - Image enhancement (Enhancement of Sentinel-2 images at 1.5m  using SRGAN)
+<img src ="doc/supresol.png" />
+
+You can read more details about these applications on [this blog](https://mdl4eo.irstea.fr/2019/)
 
 # How to install
-This remote module has been tested successfully on Ubuntu 18 and CentOs 7 with CUDA drivers.
+This remote module has been tested successfully on Ubuntu 18 and CentOs 7 with last CUDA drivers.
 
 ## Build OTB
 First, **build the latest *develop* branch of OTB from sources**. You can check the [OTB documentation](https://www.orfeo-toolbox.org/SoftwareGuide/SoftwareGuidech2.html) which details all the steps, if fact it is quite easy thank to the SuperBuild.
@@ -116,12 +130,7 @@ make install -j $(grep -c ^processor /proc/cpuinfo)
 ```
 Then, build NSync
 ```
-mkdir /tmp/proto
-cd /work/tf/tensorflow/tensorflow/contrib/makefile/downloads/protobuf/
-./autogen.sh
-./configure --prefix=/tmp/proto/
-make -j $(grep -c ^processor /proc/cpuinfo)
-make install
+/work/tf/tensorflow/tensorflow/contrib/makefile/compile_nsync.sh
 ```
 Then, build absl
 ```
@@ -155,7 +164,8 @@ cp -r /work/tf/tensorflow/third_party /work/tf/installdir/include
 cp -r /tmp/proto/include/* /work/tf/installdir/include
 cp -r /tmp/eigen/include/eigen3/* /work/tf/installdir/include
 cp /work/tf/tensorflow/tensorflow/contrib/makefile/downloads/nsync/public/* /work/tf/installdir/include/
-find /work/tf/tensorflow/tensorflow/contrib/makefile/downloads/absl/absl/ -name '*.h' -exec cp --parents \{\} /work/tf/installdir/include/ \;
+# the goal of the following command line is to put the absl headers in /work/tf/installdir/include/absl and preserving the folders structure
+find /work/tf/tensorflow/tensorflow/contrib/makefile/downloads/absl/absl/ -name '*.h' -exec cp --parents \{\} /work/tf/installdir/include/ \; 
 
 # Cleaning
 find /work/tf/installdir/ -name "*.cc" -type f -delete
@@ -494,7 +504,5 @@ otbcli_TensorflowModelServe -source1.il spot7.tif -source1.placeholder x1 -sourc
 # Tutorial
 A complete tutorial is available at [MDL4EO's blog](https://mdl4eo.irstea.fr/2019/01/04/an-introduction-to-deep-learning-on-remote-sensing-images-tutorial/)
 # Contact
-You can contact Rémi Cresson if you have any issues with this remote module at remi [dot] cresson [at] irstea [dot] fr
-
-
+You can contact Remi Cresson if you have any issues with this remote module at remi [dot] cresson [at] irstea [dot] fr
 
diff --git a/doc/landcover.png b/doc/landcover.png
new file mode 100644
index 0000000000000000000000000000000000000000..913f3600614420a70adc1bfa8110cdc6e3c3f45a
Binary files /dev/null and b/doc/landcover.png differ
diff --git a/doc/pix2pix.png b/doc/pix2pix.png
new file mode 100644
index 0000000000000000000000000000000000000000..1c79652ff0bae389cbee1c7cde40b2d46f20f089
Binary files /dev/null and b/doc/pix2pix.png differ
diff --git a/doc/supresol.png b/doc/supresol.png
new file mode 100644
index 0000000000000000000000000000000000000000..efff2fd23176e0fab501ced42aa24c56a2a77a9b
Binary files /dev/null and b/doc/supresol.png differ
diff --git a/include/otbTensorflowSampler.hxx b/include/otbTensorflowSampler.hxx
index 54850e3930246abe0085abe80ff1e70f1b6762e3..b7d4dd82a0f4e96a69c5469aff863188c99d3f8f 100644
--- a/include/otbTensorflowSampler.hxx
+++ b/include/otbTensorflowSampler.hxx
@@ -145,6 +145,7 @@ TensorflowSampler<TInputImage, TVectorData>
   {
     ImagePointerType newImage;
     AllocateImage(newImage, m_PatchSizes[i], nTotal, GetInput(i)->GetNumberOfComponentsPerPixel());
+    newImage->SetSignedSpacing(this->GetInput(i)->GetSignedSpacing());
     m_OutputPatchImages.push_back(newImage);
   }
 
diff --git a/python/ckpt2savedmodel.py b/python/ckpt2savedmodel.py
index 462d133a6989e4f2eb5bd46b22c1c0179e636a79..caf5b2fe097996fd0f2b813f202f3d5fc59ca178 100644
--- a/python/ckpt2savedmodel.py
+++ b/python/ckpt2savedmodel.py
@@ -21,10 +21,7 @@ from __future__ import division
 from __future__ import print_function
 
 import argparse
-from tricks import *
-
-# Logging
-tf.logging.set_verbosity(tf.logging.INFO)
+from tricks import CheckpointToSavedModel
 
 # Parser
 parser = argparse.ArgumentParser()
@@ -32,10 +29,12 @@ parser.add_argument("--ckpt",    help="checkpoint file prefix",   required=True)
 parser.add_argument("--inputs",  help="input placeholder names",  required=True, nargs='+')
 parser.add_argument("--outputs", help="output placeholder names", required=True, nargs='+')
 parser.add_argument("--model",   help="output SavedModel",        required=True)
+parser.add_argument('--clear_devices', dest='clear_devices', action='store_true')
+parser.set_defaults(clear_devices=False)
 params = parser.parse_args()
 
 if __name__ == "__main__":
 
-  CheckpointToSavedModel(params.ckpt, params.inputs, params.outputs, params.model)
+  CheckpointToSavedModel(params.ckpt, params.inputs, params.outputs, params.model, params.clear_devices)
   
   quit()
diff --git a/python/tricks.py b/python/tricks.py
index 0b1ae614c7947fec9f99b08f78d8674c92ee35c9..45727f1a2edd814a538bebf822cfe0798ca21e81 100644
--- a/python/tricks.py
+++ b/python/tricks.py
@@ -135,7 +135,7 @@ def CreateSavedModel(sess, inputs, outputs, directory):
   builder.add_meta_graph([tf.saved_model.tag_constants.SERVING])
   builder.save()
 
-def CheckpointToSavedModel(ckpt_path, inputs, outputs, savedmodel_path):
+def CheckpointToSavedModel(ckpt_path, inputs, outputs, savedmodel_path, clear_devices=False):
   """
   Read a Checkpoint and build a SavedModel
   
@@ -149,8 +149,13 @@ def CheckpointToSavedModel(ckpt_path, inputs, outputs, savedmodel_path):
   with tf.Session() as sess:
     
     # Restore variables from disk.
-    model_saver = tf.train.import_meta_graph(ckpt_path+".meta")
+    model_saver = tf.train.import_meta_graph(ckpt_path+".meta", clear_devices=clear_devices)
     model_saver.restore(sess, ckpt_path)
     
     # Create a SavedModel
-    CreateSavedModel(sess, inputs, outputs, savedmodel_path)
\ No newline at end of file
+    #CreateSavedModel(sess, inputs, outputs, savedmodel_path)
+    graph = tf.get_default_graph()
+    tf.saved_model.simple_save(sess,
+            savedmodel_path,
+            inputs={ i : graph.get_tensor_by_name(i) for i in inputs },
+            outputs={ o : graph.get_tensor_by_name(o) for o in outputs })