diff --git a/otbtf_pres/illustrations/gif_2160.gif b/otbtf_pres/illustrations/gif_2160.gif
index 692aa15592d490c0c4b7984ef641f8cad3667d1d..08512907e038413395ee398cfa7ec3b903e80198 100644
Binary files a/otbtf_pres/illustrations/gif_2160.gif and b/otbtf_pres/illustrations/gif_2160.gif differ
diff --git a/otbtf_pres/illustrations/net_semseg_spot67.jpg b/otbtf_pres/illustrations/net_semseg_spot67.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..d14ee9ed2d8cfb803964c64c1a3fe47484d85826
Binary files /dev/null and b/otbtf_pres/illustrations/net_semseg_spot67.jpg differ
diff --git a/otbtf_pres/illustrations/pyotbadvert.gif b/otbtf_pres/illustrations/pyotbadvert.gif
new file mode 100644
index 0000000000000000000000000000000000000000..816b2980a1cb8441105ebaca2c3e4f1c9f1941c4
Binary files /dev/null and b/otbtf_pres/illustrations/pyotbadvert.gif differ
diff --git a/otbtf_pres/illustrations/pyotbadvert.png b/otbtf_pres/illustrations/pyotbadvert.png
index e457f102bf2875ab5f752d71b4673bb1f4d56fac..f77b214d8c9eaf4164887125600c2d571cfb49b9 100644
Binary files a/otbtf_pres/illustrations/pyotbadvert.png and b/otbtf_pres/illustrations/pyotbadvert.png differ
diff --git a/otbtf_pres/index.html b/otbtf_pres/index.html
index 01a1caec486afc9091c7326ecc9dce5b72853381..28d8d8277a0046995c6e4984750af2100f5e4ca9 100644
--- a/otbtf_pres/index.html
+++ b/otbtf_pres/index.html
@@ -271,7 +271,7 @@ tf_ds = TFRecords("/path/to/tfrecords_dir").read()
           <br>
           <ul>
             <li>Ease the <h>implementation of deep nets</h> in python</li>
-            <li>Provides all the necessary to work smoothly with TensorflowModelServe</li>
+            <li>Provides everything to work smoothly with TensorflowModelServe</li>
           </ul>
           <br>
           <img width="50%" data-src="illustrations/modelbase.png">
@@ -379,7 +379,11 @@ app.write("output_y.tif")
           <img width="50%" data-src="illustrations/fig12.10_new.png">
           <br>
           <p><h>Example</h>: simple U-Net like model for dense pixel classification</p>
-          <p><small>Cresson, R. (2020). Deep Learning for Remote Sensing Images with Open Source Software. CRC Press.</small></p>
+          <p><small>
+            Cresson, R. (2020). 
+            Deep Learning for Remote Sensing Images with Open Source Software. 
+            CRC Press.
+          </small></p>
         </section>
 
       </section>
@@ -396,10 +400,22 @@ app.write("output_y.tif")
 
         <section>
           <h2>Large scale land cover mapping</h2>
-          <h4>Semantic segmentation of buildings footprint over france mainland at 1.5m spacing</h4>
+          <h4>Buildings footprint over france mainland from Spot-6/7</h4>
           <img width="65%" data-src="illustrations/tosca.png">
-          <p><small>Product available at <a href="https://www.theia-land.fr/en/product/buildings-footprint"
-                target="_blank">https://www.theia-land.fr/en/product/buildings-footprint/</a></small></p>
+          <p><small>
+            Product available at 
+            <a href="https://www.theia-land.fr/en/product/buildings-footprint"
+               target="_blank">https://www.theia-land.fr/en/product/buildings-footprint/</a>
+              </small></p>
+        </section>
+
+        <section>
+          <h4>Model designed for Spot-6/7 products</h4>
+          <img width="40%" data-src="illustrations/net_semseg_spot67.jpg">
+          <p><small>
+            Semantic segmentation network that inputs separately <h>multispectral</h> and 
+          <h>panchromatic</h> rasters of Spot-6/7 images
+          </small></p>
         </section>
 
         <section data-background-image='illustrations/gif_2160.gif'></section>
@@ -424,6 +440,7 @@ app.write("output_y.tif")
 
         <section>
           <h4>Easy to run</h4>
+          <img width="48px" data-src="illustrations/cli.png" style="float:left;padding-left:15%;margin:30px">
           <pre style="width:1000px"><code data-trim class="bash">
 # Download pre-trained model
 wget https://tinyurl.com/sr4rsmodelv2
@@ -490,7 +507,7 @@ if __name__ == "__main__":
     # if you need to write:
     infer.write("out", out_fn)
           </code></pre>
-          <img width="20%" data-src="illustrations/pyotbadvert.png">
+          <img width="20%" data-src="illustrations/pyotbadvert.gif">
         </section>
 
         <section>
@@ -539,10 +556,11 @@ if __name__ == "__main__":
           <ul>
             <li><h>Blog</h>: https://mdl4eo.irstea.fr/2022/04/09/bye-bye-clouds/</li>
             <li><h>Paper</h>: https://doi.org/10.5194/isprs-archives-XLIII-B3-2022-1317-2022</li>
-            <li><h>Code</h> https://github.com/cnes/decloud</li>
           </ul>
           <img width="50%" data-src="illustrations/crga_os2_unet_slide3.png">
-          <p><small>Cresson, R., Narçon, N., Gaetano, R., Dupuis, A., Tanguy, Y., May, S., and Commandré, B.: COMPARISON OF CONVOLUTIONAL NEURAL NETWORKS FOR CLOUDY OPTICAL IMAGES RECONSTRUCTION FROM SINGLE OR MULTITEMPORAL JOINT SAR AND OPTICAL IMAGES, Int. Arch. Photogramm. Remote Sens. Spatial Inf. Sci., XLIII-B3-2022, 1317–1326</small></p>
+          <p><small>Cresson, R., Narçon, N., Gaetano, R., Dupuis, A., Tanguy, Y., May, S., and Commandré, B.: 
+            Comparison of CNNs or cloudy optical images reconstruction from single or multitemporal 
+            joint SAR and optical images, Int. Arch. Photogramm. Remote Sens. Spatial Inf. Sci., XLIII-B3-2022, 1317–1326</small></p>
         </section>
 
         <section>