diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index edd9c36300444195670b68d431e10d4073ff522c..29bc7b9f346595dc0cd20ee8abec4eaeb5c622d2 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,5 +1,5 @@
 default:
-  image: "gitlab-registry.irstea.fr/remi.cresson/otbtf/3.2.1:cpu-basic-dev"
+  image: "gitlab-registry.irstea.fr/remi.cresson/otbtf:3.4.0-cpu-dev"
   cache:
     key: $CI_COMMIT_REF_SLUG
     paths:
@@ -48,12 +48,12 @@ make_env:
 flake8:
   extends: .static_analysis_base
   script:
-    - flake8 --max-line-length=120 $PWD/scenes
+    - flake8 $PWD/scenes
 
 pylint:
   extends: .static_analysis_base
   script:
-    pylint $PWD/scenes
+    - pylint --good-names=i,x,y --ignored-modules=pyotb --disable=too-many-arguments $PWD/scenes
 
 codespell:
   extends: .static_analysis_base
@@ -67,7 +67,6 @@ pip_install:
   before_script:
     - source venv/bin/activate
   script:
-    - sudo apt update && sudo apt install -y libgnutls28-dev
     - pip install .
 
 # ------------------------------------------------------- Doc ----------------------------------------------------------
@@ -114,6 +113,11 @@ spot-6/7 imagery:
   script:
     - pytest -o log_cli=true --log-cli-level=INFO --junitxml=report_imagery_test.xml test/spot67_imagery_test.py
 
+sentinel-2 theia imagery:
+  extends: .test_base
+  script:
+    - pytest -o log_cli=true --log-cli-level=INFO --junitxml=report_imagery_test.xml test/sentinel2_theia_test.py
+
 indexation:
   extends: .test_base
   script:
@@ -123,3 +127,15 @@ dates:
   extends: .test_base
   script:
     - pytest -o log_cli=true --log-cli-level=INFO --junitxml=report_dates_test.xml test/dates_test.py
+
+stac:
+  extends: .test_base
+  script:
+    - pip install planetary-computer
+    - pip install https://gitlab.irstea.fr/dinamis/dinamis-sdk/-/archive/main/dinamis-sdk-main.zip
+    - pytest -o log_cli=true --log-cli-level=INFO --junitxml=report_stac_test.xml test/stac_test.py
+
+serialization:
+  extends: .test_base
+  script:
+    - pytest -o log_cli=true --log-cli-level=INFO --junitxml=serialization_test.xml test/serialization_test.py
diff --git a/.pylintrc b/.pylintrc
deleted file mode 100644
index 944ac10683212779e8d49d4be73ebbf66071d87a..0000000000000000000000000000000000000000
--- a/.pylintrc
+++ /dev/null
@@ -1,521 +0,0 @@
-[MASTER]
-
-# A comma-separated list of package or module names from where C extensions may
-# be loaded. Extensions are loading into the active Python interpreter and may
-# run arbitrary code.
-extension-pkg-allow-list=
-
-# A comma-separated list of package or module names from where C extensions may
-# be loaded. Extensions are loading into the active Python interpreter and may
-# run arbitrary code. (This is an alternative name to extension-pkg-allow-list
-# for backward compatibility.)
-extension-pkg-whitelist=
-
-# Specify a score threshold to be exceeded before program exits with error.
-fail-under=10
-
-# Files or directories to be skipped. They should be base names, not paths.
-ignore=CVS
-
-# Files or directories matching the regex patterns are skipped. The regex
-# matches against base names, not paths.
-ignore-patterns=
-
-# Add files or directories matching the regex patterns to the ignore-list.
-# The regex matches against paths. NOTE: Because "\" represents the directory
-# delimiter on Windows systems it can't be used as the escape character
-# in these regular expressions.
-ignore-paths=
-
-# Python code to execute, usually for sys.path manipulation such as
-# pygtk.require().
-#init-hook=
-
-# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the
-# number of processors available to use.
-jobs=1
-
-# Control the amount of potential inferred values when inferring a single
-# object. This can help the performance when dealing with large functions or
-# complex, nested conditions.
-limit-inference-results=100
-
-# List of plugins (as comma separated values of python module names) to load,
-# usually to register additional checkers.
-load-plugins=
-
-# Pickle collected data for later comparisons.
-persistent=yes
-
-# When enabled, pylint would attempt to guess common misconfiguration and emit
-# user-friendly hints instead of false-positive error messages.
-suggestion-mode=yes
-
-# Allow loading of arbitrary C extensions. Extensions are imported into the
-# active Python interpreter and may run arbitrary code.
-unsafe-load-any-extension=no
-
-
-[MESSAGES CONTROL]
-
-# Only show warnings with the listed confidence levels. Leave empty to show
-# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED.
-confidence=
-
-# Disable the message, report, category or checker with the given id(s). You
-# can either give multiple identifiers separated by comma (,) or put this
-# option multiple times (only on the command line, not in the configuration
-# file where it should appear only once). You can also use "--disable=all" to
-# disable everything first and then re-enable specific checks. For example, if
-# you want to run only the similarities checker, you can use "--disable=all
-# --enable=similarities". If you want to run only the classes checker, but have
-# no Warning level messages displayed, use "--disable=all --enable=classes
-# --disable=W".
-disable=too-few-public-methods,
-        invalid-name,
-        too-many-instance-attributes,
-        too-many-locals,
-        too-many-branches,
-        too-many-statements,
-        too-many-arguments
-
-
-# Enable the message, report, category or checker with the given id(s). You can
-# either give multiple identifier separated by comma (,) or put this option
-# multiple time (only on the command line, not in the configuration file where
-# it should appear only once). See also the "--disable" option for examples.
-enable=c-extension-no-member
-
-
-[REPORTS]
-
-# Python expression which should return a score less than or equal to 10. You
-# have access to the variables 'fatal', 'error', 'warning', 'refactor', 'convention' and 'info'
-# which contain the number of messages in each category, as well as 'statement'
-# which is the total number of statements analyzed. This score is used by the
-# global evaluation report (RP0004).
-evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10))
-
-# Template used to display messages. This is a python new-style format string
-# used to format the message information. See doc for all details.
-#msg-template=
-
-# Set the output format. Available formats are text, parseable, colorized, json
-# and msvs (visual studio). You can also give a reporter class, e.g.
-# mypackage.mymodule.MyReporterClass.
-output-format=text
-
-# Tells whether to display a full report or only the messages.
-reports=no
-
-# Activate the evaluation score.
-score=yes
-
-
-[REFACTORING]
-
-# Maximum number of nested blocks for function / method body
-max-nested-blocks=5
-
-# Complete name of functions that never returns. When checking for
-# inconsistent-return-statements if a never returning function is called then
-# it will be considered as an explicit return statement and no message will be
-# printed.
-never-returning-functions=sys.exit
-
-
-[LOGGING]
-
-# Format style used to check logging format string. `old` means using %
-# formatting and `new` is for `{}` formatting.
-logging-format-style=old
-
-# Logging modules to check that the string format arguments are in logging
-# function parameter format.
-logging-modules=logging
-
-
-[SPELLING]
-
-# Limits count of emitted suggestions for spelling mistakes.
-max-spelling-suggestions=4
-
-# Spelling dictionary name. Available dictionaries: none. To make it work,
-# install the python-enchant package.
-spelling-dict=
-
-# List of comma separated words that should not be checked.
-spelling-ignore-words=
-
-# A path to a file that contains the private dictionary; one word per line.
-spelling-private-dict-file=
-
-# Tells whether to store unknown words to the private dictionary (see the
-# --spelling-private-dict-file option) instead of raising a message.
-spelling-store-unknown-words=no
-
-
-[MISCELLANEOUS]
-
-# List of note tags to take in consideration, separated by a comma.
-notes=FIXME,
-      XXX,
-      TODO
-
-# Regular expression of note tags to take in consideration.
-#notes-rgx=
-
-
-[TYPECHECK]
-
-# List of decorators that produce context managers, such as
-# contextlib.contextmanager. Add to this list to register other decorators that
-# produce valid context managers.
-contextmanager-decorators=contextlib.contextmanager
-
-# List of members which are set dynamically and missed by pylint inference
-# system, and so shouldn't trigger E1101 when accessed. Python regular
-# expressions are accepted.
-generated-members=
-
-# Tells whether to warn about missing members when the owner of the attribute
-# is inferred to be None.
-ignore-none=yes
-
-# This flag controls whether pylint should warn about no-member and similar
-# checks whenever an opaque object is returned when inferring. The inference
-# can return multiple potential results while evaluating a Python object, but
-# some branches might not be evaluated, which results in partial inference. In
-# that case, it might be useful to still emit no-member and other checks for
-# the rest of the inferred objects.
-ignore-on-opaque-inference=yes
-
-# List of class names for which member attributes should not be checked (useful
-# for classes with dynamically set attributes). This supports the use of
-# qualified names.
-ignored-classes=optparse.Values,thread._local,_thread._local
-
-# List of module names for which member attributes should not be checked
-# (useful for modules/projects where namespaces are manipulated during runtime
-# and thus existing member attributes cannot be deduced by static analysis). It
-# supports qualified module names, as well as Unix pattern matching.
-ignored-modules=tqdm,rtree,pyotb,pycurl
-
-# Show a hint with possible names when a member name was not found. The aspect
-# of finding the hint is based on edit distance.
-missing-member-hint=yes
-
-# The minimum edit distance a name should have in order to be considered a
-# similar match for a missing member name.
-missing-member-hint-distance=1
-
-# The total number of similar names that should be taken in consideration when
-# showing a hint for a missing member.
-missing-member-max-choices=1
-
-# List of decorators that change the signature of a decorated function.
-signature-mutators=
-
-
-[VARIABLES]
-
-# List of additional names supposed to be defined in builtins. Remember that
-# you should avoid defining new builtins when possible.
-additional-builtins=
-
-# Tells whether unused global variables should be treated as a violation.
-allow-global-unused-variables=yes
-
-# List of strings which can identify a callback function by name. A callback
-# name must start or end with one of those strings.
-callbacks=cb_,
-          _cb
-
-# A regular expression matching the name of dummy variables (i.e. expected to
-# not be used).
-dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
-
-# Argument names that match this expression will be ignored. Default to name
-# with leading underscore.
-ignored-argument-names=_.*|^ignored_|^unused_
-
-# Tells whether we should check for unused import in __init__ files.
-init-import=no
-
-# List of qualified module names which can have objects that can redefine
-# builtins.
-redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io
-
-
-[FORMAT]
-
-# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
-expected-line-ending-format=
-
-# Regexp for a line that is allowed to be longer than the limit.
-ignore-long-lines=^\s*(# )?<?https?://\S+>?$
-
-# Number of spaces of indent required inside a hanging or continued line.
-indent-after-paren=4
-
-# String used as indentation unit. This is usually "    " (4 spaces) or "\t" (1
-# tab).
-indent-string='    '
-
-# Maximum number of characters on a single line.
-max-line-length=120
-
-# Maximum number of lines in a module.
-max-module-lines=1000
-
-# List of optional constructs for which whitespace checking is disabled. `dict-
-# separator` is used to allow tabulation in dicts, etc.: {1  : 1,\n222: 2}.
-# `trailing-comma` allows a space between comma and closing bracket: (a, ).
-# `empty-line` allows space-only lines.
-no-space-check=trailing-comma,
-               dict-separator
-
-# Allow the body of a class to be on the same line as the declaration if body
-# contains single statement.
-single-line-class-stmt=no
-
-# Allow the body of an if to be on the same line as the test if there is no
-# else.
-single-line-if-stmt=no
-
-
-[SIMILARITIES]
-
-# Ignore comments when computing similarities.
-ignore-comments=yes
-
-# Ignore docstrings when computing similarities.
-ignore-docstrings=yes
-
-# Ignore imports when computing similarities.
-ignore-imports=no
-
-# Minimum lines number of a similarity.
-min-similarity-lines=4
-
-
-[BASIC]
-
-# Naming style matching correct argument names.
-argument-naming-style=snake_case
-
-# Regular expression matching correct argument names. Overrides argument-
-# naming-style.
-#argument-rgx=
-
-# Naming style matching correct attribute names.
-attr-naming-style=snake_case
-
-# Regular expression matching correct attribute names. Overrides attr-naming-
-# style.
-#attr-rgx=
-
-# Bad variable names which should always be refused, separated by a comma.
-bad-names=foo,bar
-
-# Bad variable names regexes, separated by a comma. If names match any regex,
-# they will always be refused
-bad-names-rgxs=
-
-# Naming style matching correct class attribute names.
-class-attribute-naming-style=any
-
-# Regular expression matching correct class attribute names. Overrides class-
-# attribute-naming-style.
-#class-attribute-rgx=
-
-# Naming style matching correct class names.
-class-naming-style=PascalCase
-
-# Regular expression matching correct class names. Overrides class-naming-
-# style.
-#class-rgx=
-
-# Naming style matching correct constant names.
-const-naming-style=UPPER_CASE
-
-# Regular expression matching correct constant names. Overrides const-naming-
-# style.
-#const-rgx=
-
-# Minimum line length for functions/classes that require docstrings, shorter
-# ones are exempt.
-docstring-min-length=-1
-
-# Naming style matching correct function names.
-function-naming-style=snake_case
-
-# Regular expression matching correct function names. Overrides function-
-# naming-style.
-#function-rgx=
-
-# Good variable names which should always be accepted, separated by a comma.
-good-names=i,j,k,ex,_,f,x,y,z,xy,xs,dt,ds,df
-
-# Good variable names regexes, separated by a comma. If names match any regex,
-# they will always be accepted
-good-names-rgxs=
-
-# Include a hint for the correct naming format with invalid-name.
-include-naming-hint=no
-
-# Naming style matching correct inline iteration names.
-inlinevar-naming-style=any
-
-# Regular expression matching correct inline iteration names. Overrides
-# inlinevar-naming-style.
-#inlinevar-rgx=
-
-# Naming style matching correct method names.
-method-naming-style=snake_case
-
-# Regular expression matching correct method names. Overrides method-naming-
-# style.
-#method-rgx=
-
-# Naming style matching correct module names.
-module-naming-style=snake_case
-
-# Regular expression matching correct module names. Overrides module-naming-
-# style.
-#module-rgx=
-
-# Colon-delimited sets of names that determine each other's naming style when
-# the name regexes allow several styles.
-name-group=
-
-# Regular expression which should only match function or class names that do
-# not require a docstring.
-no-docstring-rgx=^_
-
-# List of decorators that produce properties, such as abc.abstractproperty. Add
-# to this list to register other decorators that produce valid properties.
-# These decorators are taken in consideration only for invalid-name.
-property-classes=abc.abstractproperty
-
-# Naming style matching correct variable names.
-variable-naming-style=snake_case
-
-# Regular expression matching correct variable names. Overrides variable-
-# naming-style.
-#variable-rgx=
-
-
-[STRING]
-
-# This flag controls whether inconsistent-quotes generates a warning when the
-# character used as a quote delimiter is used inconsistently within a module.
-check-quote-consistency=no
-
-# This flag controls whether the implicit-str-concat should generate a warning
-# on implicit string concatenation in sequences defined over several lines.
-check-str-concat-over-line-jumps=no
-
-
-[IMPORTS]
-
-# List of modules that can be imported at any level, not just the top level
-# one.
-allow-any-import-level=
-
-# Allow wildcard imports from modules that define __all__.
-allow-wildcard-with-all=no
-
-# Analyse import fallback blocks. This can be used to support both Python 2 and
-# 3 compatible code, which means that the block might have code that exists
-# only in one or another interpreter, leading to false positives when analysed.
-analyse-fallback-blocks=no
-
-# Deprecated modules which should not be used, separated by a comma.
-deprecated-modules=optparse,tkinter.tix
-
-# Create a graph of external dependencies in the given file (report RP0402 must
-# not be disabled).
-ext-import-graph=
-
-# Create a graph of every (i.e. internal and external) dependencies in the
-# given file (report RP0402 must not be disabled).
-import-graph=
-
-# Create a graph of internal dependencies in the given file (report RP0402 must
-# not be disabled).
-int-import-graph=
-
-# Force import order to recognize a module as part of the standard
-# compatibility libraries.
-known-standard-library=
-
-# Force import order to recognize a module as part of a third party library.
-known-third-party=enchant
-
-# Couples of modules and preferred modules, separated by a comma.
-preferred-modules=
-
-
-[CLASSES]
-
-# List of method names used to declare (i.e. assign) instance attributes.
-defining-attr-methods=__init__,
-                      __new__,
-                      setUp,
-                      __post_init__
-
-# List of member names, which should be excluded from the protected access
-# warning.
-exclude-protected=_asdict,
-                  _fields,
-                  _replace,
-                  _source,
-                  _make
-
-# List of valid names for the first argument in a class method.
-valid-classmethod-first-arg=cls
-
-# List of valid names for the first argument in a metaclass class method.
-valid-metaclass-classmethod-first-arg=cls
-
-
-[DESIGN]
-
-# Maximum number of arguments for function / method.
-max-args=5
-
-# Maximum number of attributes for a class (see R0902).
-max-attributes=7
-
-# Maximum number of boolean expressions in an if statement (see R0916).
-max-bool-expr=5
-
-# Maximum number of branch for function / method body.
-max-branches=12
-
-# Maximum number of locals for function / method body.
-max-locals=15
-
-# Maximum number of parents for a class (see R0901).
-max-parents=7
-
-# Maximum number of public methods for a class (see R0904).
-max-public-methods=20
-
-# Maximum number of return / yield for function / method body.
-max-returns=6
-
-# Maximum number of statements in function / method body.
-max-statements=50
-
-# Minimum number of public methods for a class (see R0903).
-min-public-methods=2
-
-
-[EXCEPTIONS]
-
-# Exceptions that will emit a warning when being caught. Defaults to
-# "BaseException, Exception".
-overgeneral-exceptions=BaseException,
-                       Exception
diff --git a/README.md b/README.md
index a8af54b67b865862b3afa7d15a46e93af2358c28..0012d2330e38088fdc7c411b7be11a83821f1a9b 100644
--- a/README.md
+++ b/README.md
@@ -13,10 +13,8 @@ Supported products:
 
 # Install
 
-The following libraries are required: OTB, GDAL (both with python bindings), and pyotb. The `libgnutls28-dev` package is required on ubuntu, to use the `scenes.download` module.
+The following libraries are required: OTB and GDAL (both with python bindings).
 ```commandline
-pip install --upgrade pyotb
-sudo apt update && sudo apt install -y libgnutls28-dev
 pip install git+https://gitlab.irstea.fr/umr-tetis/scenes.git
 ```
 
diff --git a/apps/dinamis_stac_import.py b/apps/dinamis_stac_import.py
new file mode 100755
index 0000000000000000000000000000000000000000..dc80fd153e9570f8ec675f827b55daa73bcd408a
--- /dev/null
+++ b/apps/dinamis_stac_import.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python3
+"""
+This application enables to search existing Spot-6/7 products from the dinamis STAC catalog, and save a pickle object
+containing all the `scene.spot.Spot67Scene` instances
+
+``` command-line
+dinamis_stac_import
+  --roi_vec roi.gpkg
+  --out_pickle collection
+```
+
+"""
+import sys
+import argparse
+import scenes
+
+def main(args):
+    # Arguments
+    parser = argparse.ArgumentParser(description="Import Spot-6/7 images from DRS into a list of scenes (pickle)",)
+    parser.add_argument("--roi_vec", help="Root directories containing MS and PAN folders", required=True)
+    parser.add_argument("--out_pickle", help="Output pickle file", required=True)
+    params = parser.parse_args(args)
+
+    # Get vector bbox
+    bbox_wgs84 = scenes.vector.get_bbox_wgs84(params.roi_vec)
+    dinamis_provider = scenes.stac.DinamisSpot67Provider()
+    scs = dinamis_provider.scenes_search(bbox_wgs84=bbox_wgs84)
+
+    # Save scenes in a pickle file
+    scenes.save_scenes(scs, params.out_pickle)
+
+
+if __name__ == "__main__":
+
+    main(sys.argv[1:])
\ No newline at end of file
diff --git a/apps/s2_import.py b/apps/s2_import.py
index 92ae46beda0b385791f6ab328e7dd1482a8645ae..c5ea6aa4342f748c393254e469e8ad462ed356c3 100755
--- a/apps/s2_import.py
+++ b/apps/s2_import.py
@@ -23,7 +23,9 @@ def main(args):
     params = parser.parse_args(args)
 
     # Search all Sentinel-2 scenes
-    s2_scenes = [get_local_scenes(root_dir=root_dir, tile=params.tile_name) for root_dir in params.root_dirs]
+    s2_scenes = []
+    for root_dir in params.root_dirs:
+        s2_scenes += get_local_scenes(root_dir=root_dir, tile=params.tile_name)
 
     # Save scenes in a pickle file
     save_scenes(s2_scenes, params.out_pickle)
diff --git a/examples/drs_spot67_stac_example.py b/examples/drs_spot67_stac_example.py
new file mode 100755
index 0000000000000000000000000000000000000000..b54f3f46fc493786189149f7f8b3e6f73672b524
--- /dev/null
+++ b/examples/drs_spot67_stac_example.py
@@ -0,0 +1,16 @@
+import scenes
+import pyotb
+
+
+def main():
+
+    bbox = scenes.BoundingBox(xmin=2, ymin=45.23, xmax=2.1, ymax=45.26)
+    provider = scenes.stac.DinamisSpot67Provider()
+    scs = provider.scenes_search(bbox_wgs84=bbox)
+    for i, sc in enumerate(scs):
+        print(sc)
+        pyotb.HaralickTextureExtraction({"in": sc.get_xs(), "out": f"/data/tex_{i}.img"})
+
+
+if __name__ == "__main__":
+    main()
diff --git a/examples/mpc_s2_stac_example.py b/examples/mpc_s2_stac_example.py
new file mode 100755
index 0000000000000000000000000000000000000000..96601edd1258015af4f42c4bcfa651a3a63af403
--- /dev/null
+++ b/examples/mpc_s2_stac_example.py
@@ -0,0 +1,18 @@
+import scenes
+import pyotb
+
+
+def main():
+
+    bbox = scenes.BoundingBox(xmin=2, ymin=45.23, xmax=2.1, ymax=45.26)
+    provider = scenes.stac.MPCProvider()
+    results = provider.stac_search(bbox_wgs84=bbox)
+    for i, result in enumerate(results):
+        print(result)
+        sc = provider.stac_item_to_scene(result)
+        dyn = pyotb.DynamicConvert(sc.get_10m_bands()[0:256, 0:256, :])
+        dyn.write(f"/tmp/{i}.png")
+
+
+if __name__ == "__main__":
+    main()
diff --git a/scenes/__init__.py b/scenes/__init__.py
index 24c44e1172774938b54a19b1c7dd7519586fb267..9fa8b30af7021702531b92424c1696be284802e7 100644
--- a/scenes/__init__.py
+++ b/scenes/__init__.py
@@ -2,16 +2,15 @@
 """
 This module helps to process local remote sensing products
 """
-__version__ = "1.1.0"
-
 from os.path import dirname, basename, isfile, join
 import glob
 import importlib
+import pkg_resources
 from .core import load_scenes, save_scenes  # noqa: 401
 from .indexation import Index  # noqa: 401
-from .download import TheiaDownloader  # noqa: 401
 from .spatial import BoundingBox  # noqa: 401
 modules = glob.glob(join(dirname(__file__), "*.py"))
 for f in modules:
     if isfile(f) and f.endswith('.py') and not f.endswith('__init__.py'):
         importlib.import_module(f".{basename(f)[:-3]}", __name__)
+__version__ = pkg_resources.require("scenes")[0].version
diff --git a/scenes/cache.py b/scenes/cache.py
index f3ad47ca65c7052bc157d282e71a1aa0d675bcac..77a944796c0e9cde9c9b62d01792541925ef3df3 100644
--- a/scenes/cache.py
+++ b/scenes/cache.py
@@ -1,27 +1,36 @@
 """
-This module provides mechanisms to enable pyotb raster caching on the local filesystem.
+This module provides mechanisms to enable pyotb raster caching on the local
+filesystem.
 """
 from __future__ import annotations
-import json
 import hashlib
-import tempfile
+import json
 import os
+import tempfile
 import pyotb
 
 
 class Cache(pyotb.Input):
     """
-    Enable to manage a given pipeline output, depending on if it's already in the cache.
+    Enable to manage a given pipeline output, depending on if it's already in
+    the cache.
     """
 
-    def __init__(self, pyotb_output, temporary_directory: str = None, output_parameter_key: str = None,
-                 extension: str = None, pixel_type: str = None):
+    def __init__(
+            self,
+            pyotb_output: pyotb.Output,
+            temporary_directory: str = None,
+            output_parameter_key: str = None,
+            extension: str = None,
+            pixel_type: str = None
+    ):
         """
         Initialize the cache.
 
         Args:
             pyotb_output: a pyotb.Output instance.
-            temporary_directory: a temporary directory for the cached files. Default is system temp directory.
+            temporary_directory: a temporary directory for the cached files.
+                Default is system temp directory.
             output_parameter_key: output parameter key (default is first key)
             extension: file extension (default: .tif)
             pixel_type: pixel type
@@ -40,13 +49,19 @@ class Cache(pyotb.Input):
         app_name = summary["name"]
 
         # Cache filename
-        output_parameters_key = pyotb_app.output_param if not output_parameter_key else output_parameter_key
-        extension = extension if extension else ".tif?&gdal:co:COMPRESS=DEFLATE&gdal:co:BIGTIFF=YES"
-        if not extension.startswith("."):
+        if not output_parameter_key:
+            output_parameter_key = pyotb_app.output_param
+        if not extension:
+            extension = ".tif?&gdal:co:COMPRESS=DEFLATE&gdal:co:BIGTIFF=YES"
+        elif not extension.startswith("."):
             extension = f".{extension}"
-        pixel_type = pixel_type if pixel_type else "float"
-        tmpdir = temporary_directory if temporary_directory else tempfile.gettempdir()
-        prefix = os.path.join(tmpdir, f"{app_name}_{output_parameters_key}_{md5sum}")
+        if not pixel_type:
+            pixel_type = "float"
+        if not temporary_directory:
+            temporary_directory = tempfile.gettempdir()
+        prefix = os.path.join(
+            temporary_directory, f"{app_name}_{output_parameter_key}_{md5sum}"
+        )
         cache_file = f"{prefix}{extension}"
         json_file = f"{prefix}.json"
 
@@ -55,7 +70,7 @@ class Cache(pyotb.Input):
             # pyotb write
             pyotb_output.write(cache_file, pixel_type=pixel_type)
             # json
-            with open(json_file, 'w', encoding='utf-8') as f:
-                json.dump(summary, f, ensure_ascii=False, indent=4)
+            with open(json_file, 'w', encoding='utf-8') as file:
+                json.dump(summary, file, ensure_ascii=False, indent=4)
 
-        super().__init__(filepath=cache_file.split("?")[0])
+        super().__init__(filepath=cache_file.split("?&")[0])
diff --git a/scenes/core.py b/scenes/core.py
index aa82ac1b38c14d5a3ed78e3d67be281d4372069e..4dbdfcab5f54ac145a0f7b57951be471aa10256b 100644
--- a/scenes/core.py
+++ b/scenes/core.py
@@ -1,18 +1,75 @@
 """
-This module provides the core classes of the library, which aims to handle the access to the rasters delivered by
-different EO products (Spot-6/7, Sentinel-2, ...)
+This module provides the core classes of the library, which aims to handle the
+access to the rasters delivered by different EO products (Spot-6/7, Sentinel-2,
+...)
+
+# Scene
+
+Carry all the metadata, assets paths, and sources, of one remote sensing
+product.
+
+``` mermaid
+classDiagram
+
+    ABC <|-- Scene
+
+    class Scene{
+        +datetime.datetime acquisition_date
+        +int epsg
+        +list extent_wgs84
+        +dict assets_paths
+        +dict sources
+        +dict additional_metadata
+        +__repr__()
+        +get_roi_footprint()
+        +__getattr__(name)
+    }
+
+```
+
+# Source
+
+Sources are membres of one `Scene` instance, and deliver images ready to be
+used in otbApplication or pyotb pipelines.
+
+``` mermaid
+classDiagram
+    pyotb.Output <|-- Source
+    Source <|-- Spot67Source
+
+    class Source{
+        +__init__(root_scene, out, parent=None)
+        +root_scene
+        +parent
+        +_app_stack
+        +output_parameter_key
+        +Source new_source(*args)
+    }
+
+    class CommonImagerySource{
+        +drilled(msk_vec_file, nodata=0)
+        +cld_msk_drilled(nodata=0)
+        +resample_over(ref_img, interpolator="nn", nodata=0)
+        +clip_over_img(ref_img)
+        +clip_over_vec(ref_vec)
+    }
+
+```
 """
 from __future__ import annotations
+import datetime
 import pickle
 from abc import ABC
-import datetime
+from typing import Callable, List, Dict, Tuple, Union
 import pyotb
 
-from scenes.spatial import BoundingBox
+from scenes.cache import Cache
+from scenes.spatial import coord_list_to_bbox
+from scenes.utils import pprint
 from scenes.vector import ogr_open
 
 
-def save_scenes(scenes_list: list[Scene], pickle_file: str):
+def save_scenes(scenes_list: List[Scene], pickle_file: str):
     """
     Use pickle to save scenes
 
@@ -21,11 +78,11 @@ def save_scenes(scenes_list: list[Scene], pickle_file: str):
         pickle_file: pickle file
 
     """
-    with open(pickle_file, "wb") as w:
-        pickle.dump(scenes_list, w)
+    with open(pickle_file, "wb") as file:
+        pickle.dump(scenes_list, file)
 
 
-def load_scenes(pickle_file: str) -> list[Scene]:
+def load_scenes(pickle_file: str) -> List[Scene]:
     """
     Use pickle to load scenes
 
@@ -36,51 +93,64 @@ def load_scenes(pickle_file: str) -> list[Scene]:
         list of Scene instances
 
     """
-    with open(pickle_file, "rb") as r:
-        return pickle.load(r)
+    with open(pickle_file, "rb") as file:
+        return pickle.load(file)
 
 
 class Source(pyotb.Output):
     """
     Source class.
 
-    Holds common operations on image sources (e.g. drill, resample, extract an ROI, etc.)
+    Holds common operations on image sources (e.g. drill, resample, extract an
+    ROI, etc.)
     Inherits from pyotb.Output
 
     """
 
-    def __init__(self, root_scene: Scene, out: str | pyotb.core.otbObject | Source, parent: Source = None,
-                 output_parameter_key: str = 'out'):
+    def __init__(
+            self,
+            root_scene: Scene,
+            out: Union[str, pyotb.core.otbObject],
+            parent: Source = None,
+            output_parameter_key: str = 'out'
+    ):
         """
         Args:
-            root_scene: root Scene instance
-            out: image to deliver (can be an image filename (str), a pyotb.App, etc.)
+            root_scene: root scene
+            out: image to deliver (can be an image filename (str), a pyotb.App,
+                etc.)
             parent: parent Source instance
             output_parameter_key: output parameter key of the app of `out`
 
         """
-        assert isinstance(root_scene, Scene), f"root_scene type is {type(root_scene)}"
-        self.root_scene = root_scene  # root scene
+        assert isinstance(root_scene,
+                          Scene), f"root_scene type is {type(root_scene)}"
+        self.root_scene = root_scene
         # Here we call the pyotb.Output() constructor.
         # Since it can only be called with pyotb apps, we do the following:
-        # - if the output is a str, (e.g. the original dimap filename), we instantiate a pyotb.Input(),
+        # - if the output is a str, (e.g. the original dimap filename), we
+        #   instantiate a pyotb.Input(),
         # - else we use the original output (should be pyotb application)
         app = out  # Fine for all otbApplication, pyotb.App based classes
         if isinstance(out, str):
             app = pyotb.Input(out)
         elif isinstance(out, pyotb.Output):
             app = out.pyotb_app
+
         super().__init__(app=app, output_parameter_key=output_parameter_key)
-        assert parent is not self, "You cannot assign a new source to its parent instance"
+        assert parent is not self, "You cannot assign a new source to its " \
+                                   "parent instance"
         self.parent = parent  # parent source (is another Source instance)
-        self._app_stack = []  # list of otb applications or output to keep trace
+        self._app_stack = []  # list of otb applications or output to keep
 
     def new_source(self, *args, **kwargs) -> Source:
         """
-        Return a new Source instance with new apps added at the end of the pipeline.
+        Return a new Source instance with new apps added at the end of the
+        pipeline.
 
         Args:
-            *args: list of pyotb.app instances to append to the existing pipeline
+            *args: list of pyotb.app instances to append to the existing
+                pipeline
             **kwargs: some keyword arguments for Source instantiation
 
         Returns:
@@ -89,9 +159,48 @@ class Source(pyotb.Output):
         """
         for new_app in args:
             self._app_stack.append(new_app)
-        return self.__class__(root_scene=self.root_scene, out=self._app_stack[-1], parent=self, **kwargs)
+        return self.__class__(root_scene=self.root_scene,
+                              out=self._app_stack[-1], parent=self, **kwargs)
+
+    def __getattr__(self, name: str):
+        """
+        This function is called when an attribute or a method has been called,
+        but not found in the object properties.
+        We override it to avoid falling back into the depths of
+        `pyotb.otbObject`.
+
+        Args:
+            name: name of the attribute or method to access
+
+        """
+        try:
+            super().__getattr__(name)
+        except Exception as err:
+            raise AttributeError(
+                f"There is no such attribute as '{name}' here!") from err
+
+    def __str__(self) -> str:
+        """
+        Enable one instance to be used with print()
+
+        Returns:
+            a string summarizing the metadata
+
+        """
+        return pprint(self.summarize())
+
+
+class CommonImagerySource(Source):
+    """
+    Base Source capabilities
+    """
 
-    def drilled(self, msk_vec_file: str, inside: bool = True, nodata: float | int = 0) -> Source:
+    def drilled(
+            self,
+            msk_vec_file: str,
+            inside: bool = True,
+            nodata: Union[float, int] = 0
+    ) -> CommonImagerySource:
         """
         Return the source drilled from the input vector data.
 
@@ -100,7 +209,8 @@ class Source(pyotb.Output):
 
         Args:
             msk_vec_file: input vector data filename
-            inside: whether the drill is happening inside the polygon or outside (Default value = True)
+            inside: whether the drill is happening inside the polygon or
+                outside (Default value = True)
             nodata: nodata value inside holes (Default value = 0)
 
         Returns:
@@ -109,15 +219,24 @@ class Source(pyotb.Output):
         """
         if ogr_open(msk_vec_file):
             # Vector data not empty
-            rasterization = pyotb.Rasterization({"in": msk_vec_file,
-                                                 "im": self,
-                                                 "mode": "binary",
-                                                 "mode.binary.foreground": 0 if inside else 255,
-                                                 "background": 255 if inside else 0})
+            rasterization = pyotb.Rasterization({
+                "in": msk_vec_file,
+                "im": self,
+                "mode": "binary",
+                "mode.binary.foreground": 0 if inside else 255,
+                "background": 255 if inside else 0
+            })
             return self.masked(binary_mask=rasterization, nodata=nodata)
-        return self  # Nothing but a soft copy of the source
-
-    def masked(self, binary_mask: str | pyotb.core.otbObject, nodata: float | int = 0) -> Source:
+        if inside:
+            return self  # Nothing but a soft copy of the source
+        return self.new_source(
+            pyotb.BandMathX({"il": self, "exp": "0.0 * im1"}))
+
+    def masked(
+            self,
+            binary_mask: Union[str, pyotb.core.otbObject],
+            nodata: Union[float, int] = 0
+    ) -> CommonImagerySource:
         """
         Return the source masked from an uint8 binary raster (0 or 1..255).
 
@@ -131,14 +250,20 @@ class Source(pyotb.Output):
             masked source
 
         """
-        manage_nodata = pyotb.ManageNoData({"in": self,
-                                            "mode": "apply",
-                                            "mode.apply.mask": binary_mask,
-                                            "mode.apply.ndval": nodata})
+        manage_nodata = pyotb.ManageNoData({
+            "in": self,
+            "mode": "apply",
+            "mode.apply.mask": binary_mask,
+            "mode.apply.ndval": nodata
+        })
         return self.new_source(binary_mask, manage_nodata)
 
-    def resample_over(self, ref_img: str | pyotb.core.otbObject, interpolator: str = "bco",
-                      nodata: float | int = 0) -> Source:
+    def resample_over(
+            self,
+            ref_img: Union[str, pyotb.core.otbObject],
+            interpolator: str = "bco",
+            nodata: Union[float, int] = 0
+    ) -> CommonImagerySource:
         """
         Return the source superimposed over the input image
 
@@ -151,15 +276,45 @@ class Source(pyotb.Output):
             resampled image source
 
         """
-        superimpose = pyotb.Superimpose({"inm": self,
-                                         "inr": ref_img,
-                                         "interpolator": interpolator,
-                                         "fv": nodata})
+        superimpose = pyotb.Superimpose({
+            "inm": self,
+            "inr": ref_img,
+            "interpolator": interpolator,
+            "fv": nodata
+        })
         return self.new_source(ref_img, superimpose)
 
-    def clip_over_img(self, ref_img: str | pyotb.core.otbObject) -> Source:
+    def subset(
+            self,
+            startx: int,
+            starty: int,
+            sizex: int,
+            sizey: int
+    ) -> CommonImagerySource:
+        """
+        Return a subset
+
+        Args:
+            startx: start x
+            starty: start y
+            sizex: size x
+            sizey: size y
+
+        Returns:
+            subset
+
+        """
+        return self.new_source(
+            self[startx:startx+sizex, starty:starty+sizey, :]
+        )
+
+    def clip_over_img(
+            self,
+            ref_img: Union[str, pyotb.core.otbObject]
+    ) -> CommonImagerySource:
         """
-        Return the source clipped over the ROI specified by the input image extent
+        Return the source clipped over the ROI specified by the input image
+        extent.
 
         Args:
             ref_img: reference image
@@ -168,14 +323,20 @@ class Source(pyotb.Output):
             ROI clipped source
 
         """
-        extract_roi = pyotb.ExtractROI({"in": self,
-                                        "mode": "fit",
-                                        "mode.fit.im": ref_img})
+        extract_roi = pyotb.ExtractROI({
+            "in": self,
+            "mode": "fit",
+            "mode.fit.im": ref_img
+        })
         return self.new_source(ref_img, extract_roi)
 
-    def clip_over_vec(self, ref_vec: str) -> Source:
+    def clip_over_vec(
+            self,
+            ref_vec: str
+    ) -> CommonImagerySource:
         """
-        Return the source clipped over the ROI specified by the input vector extent
+        Return the source clipped over the ROI specified by the input vector
+        extent.
 
         Args:
             ref_vec: reference vector data
@@ -184,11 +345,17 @@ class Source(pyotb.Output):
             ROI clipped source
 
         """
-        return self.new_source(pyotb.ExtractROI({"in": self,
-                                                 "mode": "fit",
-                                                 "mode.fit.vect": ref_vec}))
-
-    def reproject(self, epsg: int, interpolator: str = "bco") -> Source:
+        return self.new_source(pyotb.ExtractROI({
+            "in": self,
+            "mode": "fit",
+            "mode.fit.vect": ref_vec
+        }))
+
+    def reproject(
+            self,
+            epsg: int,
+            interpolator: str = "bco"
+    ) -> CommonImagerySource:
         """
         Reproject the source into the specified EPSG
 
@@ -201,67 +368,165 @@ class Source(pyotb.Output):
 
         """
         if self.root_scene.epsg != epsg:
-            return self.new_source(pyotb.OrthoRectification({"io.in": self,
-                                                             "map": "epsg",
-                                                             "map.epsg.code": epsg,
-                                                             "interpolator": interpolator}),
-                                   output_parameter_key='io.out')
+            return self.new_source(
+                pyotb.OrthoRectification({
+                    "io.in": self,
+                    "map": "epsg",
+                    "map.epsg.code": epsg,
+                    "interpolator": interpolator
+                }),
+                output_parameter_key='io.out'
+            )
         return self  # Nothing but a soft copy of the source
 
+    def cached(
+            self,
+            temporary_directory: str = None,
+            extension: str = None,
+            pixel_type: str = None
+    ) -> CommonImagerySource:
+        """
+        Return the source cached.
+
+        Args:
+            temporary_directory: a temporary directory for the cached files.
+                Default is system temp directory.
+            extension: file extension (default: .tif)
+            pixel_type: pixel type
+
+        Returns:
+            Cached source
+
+        """
+        return self.new_source(
+            Cache(pyotb_output=self, temporary_directory=temporary_directory,
+                  extension=extension,
+                  pixel_type=pixel_type))
+
 
 class Scene(ABC):
     """
     Scene class.
 
-    The class carries all the metadata from the scene, and can be used to retrieve its imagery.
+    The class carries all the metadata and the imagery from the scene.
+    # TODO: add something to select some source to fetch epsg and extent
 
     """
 
-    def __init__(self, acquisition_date: datetime.datetime, bbox_wgs84: BoundingBox, epsg: int):
+    def __init__(
+            self,
+            acquisition_date: datetime.datetime,
+            epsg: int,
+            extent_wgs84: List[Tuple(float, float)],
+            assets_paths: Dict[str, str],
+            sources: Dict[str, Callable],
+            additional_metadata: Dict[str, str] = None,
+    ):
         """
-        Constructor
+        Initializer.
 
         Args:
             acquisition_date: Acquisition date
-            bbox_wgs84: Bounding box, in WGS84 coordinates reference system
             epsg: EPSG code
+            extent_wgs84: extent in WGS84 coordinates reference system
+            assets_paths: assets paths dict {source_name, source_path/url}
+            sources: assets sources decorators dict {source_name, callable}
+            additional_metadata: additional metadata
 
         """
-        assert isinstance(acquisition_date, datetime.datetime), "acquisition_date must be a datetime.datetime instance"
+        assert isinstance(acquisition_date, datetime.datetime), \
+            "acquisition_date must be a datetime.datetime instance"
         self.acquisition_date = acquisition_date
-        self.bbox_wgs84 = bbox_wgs84
         assert isinstance(epsg, int), "epsg must be an int"
         self.epsg = epsg
+        assert len(extent_wgs84) >= 4, "extent must have at least 4 " \
+                                       "coordinates"
+        self.extent_wgs84 = extent_wgs84
+        self.bbox_wgs84 = coord_list_to_bbox(extent_wgs84)
+        assert isinstance(assets_paths, dict)
+        self.assets_paths = assets_paths
+        assert isinstance(sources, dict)
+        for src_key, src in sources.items():
+            assert callable(src), \
+                f"source '{src_key}' must be a callable function!"
+        self.sources = sources
+        # Metadata only stores dict, lists, str, int, float, etc.
+        self.metadata = {
+            "acquisition_date": self.acquisition_date.isoformat(),
+            "epsg": self.epsg,
+            "extent_wgs84": self.extent_wgs84,
+            "bounding_box_wgs84": str(self.bbox_wgs84),
+            "rasterio_footprint_wgs84": self.get_rio_footprint()
+        }
+        self.metadata.update({"assets_paths": assets_paths.copy()})
+        if additional_metadata:
+            self.metadata.update(additional_metadata)
+
+    def __repr__(self) -> str:
+        """
+        Enable one instance to be used with print()
 
-    def get_metadata(self) -> dict[str, any]:
+        Returns:
+            a string summarizing the metadata
+
+        """
+        msg = "Metadata:\n"
+        msg += pprint(self.metadata)
+        msg += "\n"
+        msg += "Sources names: (type 'get_<name>()' to access one source)\n"
+        for src_name in self.sources:
+            msg += f"\t{src_name}\n"
+        return msg.replace("\t", "    ")
+
+    def get_rio_footprint(self):
         """
-        Metadata accessor
+        Footprint as RasterIO does.
 
         Returns:
-            metadata
+            a dict containing the footprint
 
         """
         return {
-            "Acquisition date": self.acquisition_date,
-            "Bounding box (WGS84)": self.bbox_wgs84,
-            "EPSG": self.epsg,
+            "type": "Polygon",
+            "coordinates": [self.extent_wgs84 + (self.extent_wgs84[0],)]
         }
 
-    def get_serializable_metadata(self) -> dict[str, str]:
+    def __getattr__(self, name: str) -> Source:
         """
-        Enable one instance to be used with print() to show the metadata items
+        Get a source
+        Args:
+            name: name of the source (e.g. to grab the "xs" source you have to
+                call "get_xs()")
 
-        Returns:
-            metadata items as str
         """
-        return {k: str(v) for k, v in self.get_metadata().items()}
-
-    def __repr__(self) -> str:
+        # Return None for all unknown attributes
+        # https://stackoverflow.com/questions/50888391/pickle-of-object-...
+        # ...with-getattr-method-in-python-returns-typeerror-object-no
+        if name.startswith('__') and name.endswith('__'):
+            raise AttributeError
+
+        assert name.startswith(
+            "get_"), f"{self.__class__} has no attribute '{name}'"
+
+        def get_(*args, **kwargs):
+            src_name = name[4:]  # remove "get_" to catch the source name
+            assert src_name in self.sources, \
+                f"Source {src_name} not registered. " \
+                f"Available sources: {self.sources.keys()}"
+            source_func = self.sources[src_name]
+            return source_func(*args, **kwargs)
+
+        return get_
+
+    def get_asset_path(self, key: str) -> str:
         """
-        Enable one instance to be used with print()
+        Get asset path.
+
+        Args:
+            key: asset key
 
         Returns:
-            a string summarizing the metadata
+            asset path
 
         """
-        return "\n".join([f"{key}: {value}" for key, value in self.get_metadata().items()])
+        return self.assets_paths.get(key)
diff --git a/scenes/dates.py b/scenes/dates.py
index c65972c2d3634fbe9b2bd94c65c08680e07bd676..4d4abeff18ccffa13cc1fbcd19bc2672894d12fd 100644
--- a/scenes/dates.py
+++ b/scenes/dates.py
@@ -5,18 +5,22 @@ This module aims to deal with dates.
 ---
 
 The `datetime.datetime` class is used as internal date type.
+
 ``` py
 dt = datetime.datetime(year=2020, month=12, day=2)
 ```
+
 Is equivalent to:
+
 ``` py
 dt = str2datetime("02-12-2020")
 dt = str2datetime("2020-12-02")
 dt = str2datetime("02/12/2020")
 ```
 
-The `any2datetime` method returns a `datetime.datetime` instance, whatever the input is (`str` or
-`datetime.datetime`).
+The `any2datetime` method returns a `datetime.datetime` instance, whatever the
+input is (`str` or `datetime.datetime`).
+
 ``` py
 dt1 = datetime.datetime(year=2020, month=12, day=2)
 dt2 = str2datetime("02-12-2020")
@@ -24,68 +28,80 @@ dt1_2 = any2datetime(dt1)  # same
 dt2_2 = any2datetime("02-12-2020")  # same
 ```
 
-The `get_timestamp` method converts a `datetime.datetime` instance into a number of seconds (int).
+The `get_timestamp` method converts a `datetime.datetime` instance into a
+number of seconds (int).
+
 ``` py
 ts = get_timestamp(dt)  # 1606780800.0
 ```
 """
 from __future__ import annotations
-import datetime
+from typing import Union
+from datetime import datetime, timezone
 
 
-MINDATE = datetime.datetime.strptime("1000-01-01", "%Y-%m-%d")
-MAXDATE = datetime.datetime.strptime("3000-01-01", "%Y-%m-%d")
+MINDATE = datetime.strptime("1000-01-01", "%Y-%m-%d")
+MAXDATE = datetime.strptime("3000-01-01", "%Y-%m-%d")
 
 
-def get_timestamp(dt: datetime.datetime) -> int:
+def get_timestamp(date: datetime) -> int:
     """
-    Converts datetime.datetime into a timestamp (in seconds)
+    Converts `datetime` into a timestamp (in seconds)
 
     Args:
-        dt: datetime.datetime instance
+        date: date
 
     Returns:
         timestamp (in seconds)
 
     """
-    return dt.replace(tzinfo=datetime.timezone.utc).timestamp()
+    return date.replace(tzinfo=timezone.utc).timestamp()
 
 
-def str2datetime(datestr: str) -> datetime.datetime:
+def str2datetime(datestr: str) -> datetime:
     """
     Converts an input date as string into a datetime instance.
 
     Args:
-        datestr: date (str) in the format "YYYY-MM-DD" or "DD/MM/YYYY" or "DD-MM-YYYY"
+        datestr: date (str) in any of those formats:
+            - "YYYY-MM-DD"
+            - "DD/MM/YYYY"
+            - "DD-MM-YYYY"
+            - "YYYYMMDD"
 
     Returns:
         A datetime.datetime instance
 
     """
     # source (with a few enhancements):
-    # https://stackoverflow.com/questions/23581128/how-to-format-date-string-via-multiple-formats-in-python
+    # https://stackoverflow.com/questions/23581128/how-to-format-date-...
+    # ...string-via-multiple-formats-in-python
     assert isinstance(datestr, str), "Input must be a str!"
-    formats = ('%Y-%m-%d', '%d/%m/%Y', '%d-%m-%Y')
+    formats = ('%Y-%m-%d', '%d/%m/%Y', '%d-%m-%Y', '%Y%m%d')
     for fmt in formats:
         try:
-            return datetime.datetime.strptime(datestr, fmt)
+            return datetime.strptime(datestr, fmt)
         except ValueError:
             pass
-    raise ValueError(f'No valid date format found. Accepted formats are {formats}. Input was: {datestr}')
+    raise ValueError(
+        f'No valid date format found. Accepted formats are {formats}. '
+        f'Input was: {datestr}')
 
 
-def any2datetime(str_or_datetime: str | datetime.datetime) -> datetime.datetime:
+def any2datetime(str_or_datetime: Union[str, datetime]) -> datetime:
     """
-    Normalizes the input such as the returned object is a datetime.datetime instance.
+    Normalizes the input such as the returned object is a `datetime` instance.
 
     Args:
-        str_or_datetime: a str (see `str2datetime()` for supported dates formats) or a datetime.datetime
+        str_or_datetime: a str (see `str2datetime()` for supported dates
+            formats) or a `datetime`
 
     Returns:
-        A datetime.datetime instance
+        A `datetime` instance
 
     """
-    if isinstance(str_or_datetime, datetime.datetime):
+    if isinstance(str_or_datetime, datetime):
         return str_or_datetime
-    assert isinstance(str_or_datetime, str), "Date must be a str, or a datetime.datetime instance!"
+    assert isinstance(str_or_datetime, str), \
+        "Date must be a str, or a datetime instance!"
     return str2datetime(str_or_datetime)
diff --git a/scenes/deepnets.py b/scenes/deepnets.py
index ee02abfec23e3fc83c60e91f36d8f8ebf40508ee..656cf1a2da3066cdb7e5fac030ecb4085191b281 100644
--- a/scenes/deepnets.py
+++ b/scenes/deepnets.py
@@ -9,9 +9,11 @@ OTBTF is needed to use this module.
 ## Super-resolution
 
 The SR4RS model can be applied over any `scenes.core.Source` instance.
-We recall that this model is intended to be used over Sentinel-2 optical images.
-For example, here is how we perform the super-resolution of a Theia S2 product.
-``` py
+We recall that this model is intended to be used over Sentinel-2 optical
+images. For example, here is how we perform the super-resolution of a Theia S2
+product:
+
+```python
 import scenes
 archive = "SENTINEL2B_..._T31TEJ_D_V1-8.zip"
 s2_scene = scenes.sentinel.Sentinel22AScene(archive)
@@ -21,16 +23,18 @@ sr.write("sr.tif")
 ```
 """
 from __future__ import annotations
+from typing import Dict, Union
 import os
 import zipfile
 import pyotb
-from scenes import download
+
+from scenes.utils import download
 
 SR4RS_MODEL_URL = "https://nextcloud.inrae.fr/s/boabW9yCjdpLPGX/download/" \
                   "sr4rs_sentinel2_bands4328_france2020_savedmodel.zip"
 
 
-def inference(dic: dict[str, any]):
+def inference(dic: Dict[str, any]):
     """
     Generic function to perform deep nets inference.
 
@@ -46,12 +50,16 @@ def inference(dic: dict[str, any]):
     try:
         output = pyotb.TensorflowModelServe(dic)
     except AttributeError:
-        print("OTBTF module has not been found in the system! It is mandatory to use deepnets. ")
+        print("OTBTF module has not been found in the system! It is mandatory "
+              "to use deepnets. ")
     return output
 
 
-def sr4rs(input_image: str | pyotb.core.otbObject, model_url: str = SR4RS_MODEL_URL,
-          tmp_dir: str = "/tmp") -> pyotb.core.App:
+def sr4rs(
+        input_image: Union[str, pyotb.core.otbObject],
+        model_url: str = SR4RS_MODEL_URL,
+        tmp_dir: str = "/tmp"
+) -> pyotb.core.App:
     """
     Applies the SR4RS model for super-resolution
 
@@ -59,7 +67,8 @@ def sr4rs(input_image: str | pyotb.core.otbObject, model_url: str = SR4RS_MODEL_
 
     Args:
         input_image: pyotb Input
-        model_url: SR4RS pre-trained model URL. Must point to a online .zip file.
+        model_url: SR4RS pre-trained model URL. Must point to a online .zip
+            file.
         tmp_dir: directory for temporary files.
 
     Returns:
@@ -75,19 +84,20 @@ def sr4rs(input_image: str | pyotb.core.otbObject, model_url: str = SR4RS_MODEL_
     tmp_zip = os.path.join(tmp_dir, os.path.basename(model_url))
     tmp_unzipped = os.path.splitext(tmp_zip)[0]
     if not os.path.exists(tmp_unzipped):
-        download.curl_url(model_url, postdata=None, out_file=tmp_zip)
+        download(model_url, out_filename=tmp_zip)
         with zipfile.ZipFile(tmp_zip, 'r') as zip_ref:
             print("Unzipping model...")
             zip_ref.extractall(tmp_dir)
 
-    return inference({"source1.il": input_image,
-                      "source1.rfieldx": rfield,
-                      "source1.rfieldy": rfield,
-                      "source1.placeholder": "lr_input",
-                      "model.dir": tmp_unzipped,
-                      "model.fullyconv": "on",
-                      "output.names": f"output_{gen_fcn}",
-                      "output.efieldx": efield,
-                      "output.efieldy": efield,
-                      "output.spcscale": ratio,
-                      })
+    return inference({
+        "source1.il": input_image,
+        "source1.rfieldx": rfield,
+        "source1.rfieldy": rfield,
+        "source1.placeholder": "lr_input",
+        "model.dir": tmp_unzipped,
+        "model.fullyconv": "on",
+        "output.names": f"output_{gen_fcn}",
+        "output.efieldx": efield,
+        "output.efieldy": efield,
+        "output.spcscale": ratio,
+    })
diff --git a/scenes/download.py b/scenes/download.py
deleted file mode 100644
index 5bf9f3f2a9341e50330633b02c74c4680cd30e6a..0000000000000000000000000000000000000000
--- a/scenes/download.py
+++ /dev/null
@@ -1,423 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-This module handles the download of Sentinel-2 images from Theia (L2A or L3A).
-
-The `scenes.download.TheiaDownloader` uses Theia credentials.
-Those must be stored in a file looking like this:
-
-```
-serveur = https://theia.cnes.fr/atdistrib
-resto = resto2
-token_type = text
-login_theia = remi.cresson@irstea.fr
-password_theia = thisisnotmyrealpassword
-```
-
-To instantiate the `scenes.download.TheiaDownloader`:
-``` py
-import scenes
-cfg_file = "config.txt"  # Theia config file
-theia = scenes.download.TheiaDownloader(cfg_file)
-```
-
-## Bounding box + temporal range
-
-The following example shows how to use the `scenes.download.TheiaDownloader` to search
-or download all Sentinel-2 images in a bounding box within a temporal range.
-
-### Search
-
-When the `download_dir` is not set, the download is not performed, and only the search results
-are returned from the function.
-``` py
-bbox = scenes.spatial.BoundingBox(43.706, 43.708, 4.317, 4.420)
-trange = ("01/01/2020", "O1/O1/2022")
-results = theia.download_in_range(bbox, trange, level="LEVEL2A")
-print(results)
-```
-
-### Download
-
-To download the files, the `download_dir` must be specified:
-``` py
-theia.download_in_range(bbox, trange, "/tmp/download/", "LEVEL2A")
-```
-When files already exist, the md5sum is computed and compared with the one in the catalog,
-in order to determine if it has to be downloaded again. If the file is already downloaded and
-is complete according to the md5sum, its download is skipped.
-
-## Bounding box + single date
-
-In the same manner, the downloader can search or download the closest images from a specific date.
-The returned dict from the function is updated with a "delta" key storing the value of the number of
-days from the specific date.
-``` py
-# For searching only
-results = theia.download_in_range(bbox, trange, "LEVEL2A")
-
-# For downloading
-theia.download_in_range(bbox, trange, "/tmp/download/", "LEVEL2A")
-```
-
-"""
-from __future__ import annotations
-import datetime
-import hashlib
-import io
-import json
-import os
-from urllib.parse import urlencode
-
-import pycurl
-from tqdm.autonotebook import tqdm
-
-from scenes import dates
-
-
-def compute_md5(fname: str) -> str:
-    """
-    Compute md5sum of a file
-
-    Args:
-        fname: file name
-
-    """
-    hash_md5 = hashlib.md5()
-    with open(fname, "rb") as f:
-        for chunk in iter(lambda: f.read(4096), b""):
-            hash_md5.update(chunk)
-    return hash_md5.hexdigest()
-
-
-def is_file_complete(filename: str, md5sum: str) -> bool:
-    """
-    Tell if a file is complete
-
-    Args:
-        filename: path of the file
-        md5sum: reference md5
-
-    """
-    # Does the file exist?
-    if not os.path.isfile(filename):
-        return False
-
-    # Does the file completed?
-    return md5sum == compute_md5(filename)
-
-
-def curl_url(url, postdata: dict[str, str], verbose: bool = False, out_file: str = None,
-             header: list[str] = None) -> str:
-    """
-    Use PyCurl to make some requests
-
-    Args:
-        url: url
-        postdata: POST data
-        verbose: boolean (Default value = False)
-        out_file: output file (Default value = None)
-        header: header. If None is kept, ['Accept:application/json'] is used (Default value = None)
-
-    Returns:
-        decoded contents
-
-    """
-    if not header:
-        header = ['Accept:application/json']
-
-    c = pycurl.Curl()
-    c.setopt(pycurl.URL, url)
-    c.setopt(pycurl.HTTPHEADER, header)
-    c.setopt(pycurl.SSL_VERIFYPEER, False)
-    c.setopt(pycurl.SSL_VERIFYHOST, False)
-    if postdata is not None:
-        c.setopt(pycurl.POST, 1)
-        postfields = urlencode(postdata)
-        c.setopt(pycurl.POSTFIELDS, postfields)
-    storage = io.BytesIO()
-    if verbose:
-        c.setopt(pycurl.VERBOSE, 1)
-    if out_file is not None:
-        with open(out_file, "wb") as fp:
-            progress_bar = None
-            last_download_d = 0
-            print("Downloading", flush=True)
-
-            def _status(download_t, download_d, *_):
-                """Callback function for c.XFERINFOFUNCTION
-                https://stackoverflow.com/questions/19724222/pycurl-attachments-and-progress-functions
-
-                Args:
-                    download_t: total
-                    download_d: already downloaded
-                    *_: any additional param (won't be used)
-
-                """
-                if download_d > 0:
-                    nonlocal progress_bar, last_download_d
-                    if not progress_bar:
-                        progress_bar = tqdm(total=download_t, unit='iB', unit_scale=True)
-                    progress_bar.update(download_d - last_download_d)
-                    last_download_d = download_d
-
-            c.setopt(c.NOPROGRESS, False)
-            c.setopt(c.XFERINFOFUNCTION, _status)
-            c.setopt(pycurl.WRITEDATA, fp)
-            c.perform()
-    else:
-        c.setopt(pycurl.WRITEFUNCTION, storage.write)
-        c.perform()
-    c.close()
-    content = storage.getvalue()
-    return content.decode(encoding="utf-8", errors="strict")
-
-
-class TheiaDownloader:
-    """The TheiaDownloader class enables to download L2A and L3A Theia products"""
-    def __init__(self, config_file: str, max_records: int = 500):
-        """
-        Args:
-            config_file: Theia configuration file
-                The file contents should look like this:
-
-                ```
-                serveur = https://theia.cnes.fr/atdistrib
-                resto = resto2
-                token_type = text
-                login_theia = remi.cresson@irstea.fr
-                password_theia = thisisnotmyrealpassword
-                ```
-
-            max_records: Maximum number of records
-
-        """
-        # Read the Theia config file
-        self.config = {}
-        with open(config_file, 'r', encoding="utf8") as r:
-            for line in r.readlines():
-                splits = line.split('=', 1)
-                if len(splits) == 2:
-                    self.config[splits[0].strip()] = splits[1].strip()
-
-        # Check keys
-        checking_keys = ["serveur", "resto", "login_theia", "password_theia", "token_type"]
-
-        # Proxy
-        if "proxy" in self.config:
-            checking_keys.extend(["login_proxy", "password_proxy"])
-
-        # Check keys
-        for key_name in checking_keys:
-            if key_name not in self.config:
-                raise ValueError(f"error with config file, missing key : {key_name}")
-
-        # Maximum number of records
-        self.max_records = max_records
-
-    @staticmethod
-    def _bbox2str(bbox):
-        """Return a str containing the bounding box
-
-        Args:
-            bbox: the bounding box (BoundingBox instance)
-
-        Returns:
-            a string
-
-        """
-        return f'{bbox.ymin},{bbox.xmin},{bbox.ymax},{bbox.xmax}'
-
-    def _get_token(self):
-        """Get the THEIA token"""
-        postdata_token = {"ident": self.config["login_theia"], "pass": self.config["password_theia"]}
-        url = f"{self.config['serveur']}/services/authenticate/"
-        token = curl_url(url, postdata_token)
-        if not token:
-            print("Empty token. Please check your credentials in config file.")
-        return token
-
-    def _query(self, dict_query):
-        """
-        Search products
-
-        Return a dict with the following structure
-            TILE_NAME
-               +---- DATE
-                +------ id
-                +------ url
-                +------ checksum
-                +------ product_name
-
-        Args:
-            dict_query: query
-
-        Returns:
-            tile dictionary
-
-        """
-        url = f"{self.config['serveur']}/{self.config['resto']}/api/collections/SENTINEL2/" \
-              f"search.json?{urlencode(dict_query)}"
-        print("Ask Theia catalog...")
-        search = json.loads(curl_url(url, None))
-
-        tiles_dict = {}
-        for record in search["features"]:
-            tile_name = record["properties"]["location"]  # T31TCJ
-            acq_date = record["properties"]["completionDate"][0:10]  # YYYY-MM-DD
-            new_acquisition = {
-                "id": record["id"],
-                "product_name": record["properties"]["productIdentifier"],  # SENTINEL2X_20191215-000000-000_L3A_T31...
-                "checksum": record["properties"]["services"]["download"]["checksum"],
-                "url": record["properties"]["services"]["download"]["url"]  # https://theia.cnes.fr/atdistr.../download
-            }
-            if tile_name not in tiles_dict:
-                tiles_dict[tile_name] = {acq_date: new_acquisition}
-            tiles_dict[tile_name].update({acq_date: new_acquisition})
-
-        return tiles_dict
-
-    def _download_tiles_and_dates(self, tiles_dict, download_dir):
-        """
-        Download a product.
-
-        Updates the "tiles_dict" with the filename of the downloaded files
-
-        Args:
-            tiles_dict: tiles dictionary. Must have the following structure:
-                       TILE_NAME
-                         +---- DATE
-                         +------ id
-                         +------ url
-                         +------ checksum
-                         +------ product_name
-          download_dir:
-
-        Returns:
-            tiles_dict updated with local_file:
-                         +------ local_file
-
-        """
-        print("Get token...")
-        token = self._get_token()
-        print(f"OK ({token})")
-        for tile_name, tile in tiles_dict.items():
-            print(f"Fetching products for tile {tile_name}...")
-            for acq_date, description in tile.items():
-                url = f"{description['url']}/?issuerId=theia"
-                header = [f'Authorization: Bearer {token}', 'Content-Type: application/json']
-                filename = f"{os.path.join(download_dir, description['product_name'])}.zip"
-
-                # Check if the destination file exist and is correct
-                if not is_file_complete(filename, description["checksum"]):
-                    print(f"Downloading {acq_date}")
-                    curl_url(url, postdata=None, out_file=filename, header=header)
-                else:
-                    print(f"{acq_date} already downloaded. Skipping.")
-                description["local_file"] = filename
-
-        return tiles_dict
-
-    def download_in_range(self, bbox_wgs84: list[float], dates_range: tuple(datetime.datetime, datetime.datetime),
-                          download_dir: str = None, level: str = "LEVEL3A") -> dict[str, dict[str, str]]:
-        """
-        Download all images within spatial and temporal ranges
-
-        Args:
-            bbox_wgs84: bounding box (WGS84)
-            dates_range: a tuple of datetime.datetime or str instances (start_date, end_date)
-            download_dir: download directory (Default value = None)
-            level: LEVEL2A, LEVEL3A, ... (Default value = "LEVEL3A")
-
-        Returns:
-            search results / downloaded files (dict)
-
-        """
-        start_date, end_date = dates_range
-
-        dict_query = {
-            "box": self._bbox2str(bbox_wgs84),  # lonmin, latmin, lonmax, latmax
-            "startDate": dates.any2datetime(start_date).strftime("%Y-%m-%d"),
-            "completionDate": dates.any2datetime(end_date).strftime("%Y-%m-%d"),
-            "maxRecords": self.max_records,
-            "processingLevel": level
-        }
-
-        # Search products
-        search_results = self._query(dict_query)
-
-        # Download products
-        if download_dir:
-            return self._download_tiles_and_dates(search_results, download_dir)
-
-        return search_results
-
-    def download_closest(self, bbox_wgs84: list[float], acq_date: datetime.datetime, download_dir: str = None,
-                         level: str = "LEVEL3A") -> dict[str, dict[str, str]]:
-        """
-        Query Theia catalog, download_closest the files
-
-        Args:
-            bbox_wgs84: bounding box (WGS84)
-            acq_date: acquisition date to look around (datetime.datetime or str)
-            download_dir: download directory (Default value = None)
-            level: LEVEL2A, LEVEL3A, ... (Default value = "LEVEL3A")
-
-        Returns:
-            search results / downloaded files (dict)
-
-        """
-
-        # Important parameters
-        ndays_seek = datetime.timedelta(days=17)  # temporal range to check for monthly synthesis
-
-        # Query products
-        actual_date = dates.any2datetime(acq_date)
-        dict_query = {'box': self._bbox2str(bbox_wgs84)}  # lonmin, latmin, lonmax, latmax
-        start_date = actual_date - ndays_seek
-        end_date = actual_date + ndays_seek
-
-        dict_query['startDate'] = start_date.strftime("%Y-%m-%d")
-        dict_query['completionDate'] = end_date.strftime("%Y-%m-%d")
-        dict_query['maxRecords'] = self.max_records
-        dict_query['processingLevel'] = level
-
-        # Search products
-        search_results = self._query(dict_query)
-
-        # Sort by closest date (add the "Delta" key/value)
-        for tile_name, tile in search_results.items():
-            print(tile_name)
-            for description_date in tile:
-                print("\t" + description_date)
-                dt = datetime.datetime.strptime(description_date, "%Y-%m-%d")
-                delta = actual_date - dt
-                delta = delta.days
-                search_results[tile_name][description_date]["delta"] = delta  # pylint: disable=R1733
-
-        # Rank delta
-        selected_tile = {}
-        for tile_name, x in search_results.items():
-            n_dates = 0
-            sorted_x = sorted(x.items(), key=lambda kv: abs(kv[1]["delta"]))
-            selected_tile[tile_name] = {}
-            for i in sorted_x:
-                description_date = i[0]
-                entry = i[1]
-                selected_tile[tile_name][description_date] = entry
-                n_dates += 1
-                if n_dates == 1:
-                    break
-
-        # Print summary
-        print("Best tiles/dates:")
-        for tile_name, tile in selected_tile.items():
-            print(f"Tile {tile_name}")
-            print("\tDate (delta)")
-            for description_date, description in tile.items():
-                print(f"\t{description_date} ({description['delta']})")
-
-        # Download products
-        if download_dir:
-            return self._download_tiles_and_dates(selected_tile, download_dir)
-
-        return selected_tile
diff --git a/scenes/indexation.py b/scenes/indexation.py
index ebfc76dace7a9dd360521883ac2af5eba5a750e1..287f4a9bde55c66b5daa4ea804e446414422307f 100644
--- a/scenes/indexation.py
+++ b/scenes/indexation.py
@@ -1,7 +1,8 @@
 """
 This module contains stuff for the spatio-temporal indexation of scenes.
 
-The `scenes.indexation.Index` uses an R-Tree to perform the indexation of objects in the (x, y, t) domain.
+The `scenes.indexation.Index` uses an R-Tree to perform the indexation of
+objects in the (x, y, t) domain.
 
 ---
 
@@ -17,18 +18,21 @@ index = scenes.indexation.Index(scenes_list)  # build the index
 
 ## Search from a `BoundingBox`
 
-The `find_indices()` method returns the indices of the indexed `scenes.core.Scene` instances matching
+The `find_indices()` method returns the indices of the indexed
+`scenes.core.Scene` instances matching the query.
+The `find()` method returns the indexed `scenes.core.Scene` instances matching
 the query.
-The `find()` method returns the indexed `scenes.core.Scene` instances matching the query.
+
 ``` py
 bbox = BoundingBox(43.706, 43.708, 4.317, 4.420)
-indices_results = index.find_indices(bbox)  # spatial query returning scenes indices
+indices_results = index.find_indices(bbox)  # spatial query returning indices
 scenes_results = index.find(bbox)  # spatial query returning scenes instances
 ```
 
 ## Spatio-temporal search
 
 A date min and/or a date max can be added in the query.
+
 ``` py
 scenes_results = index.find(bbox, "01/01/2020" "01/01/2022")
 ```
@@ -36,6 +40,7 @@ scenes_results = index.find(bbox, "01/01/2020" "01/01/2022")
 ## Search from a vector data file
 
 The search can also be performed with a vector data file.
+
 ``` py
 vec = "/path/to/vector.gpkg"
 scenes_results = index.find(vec, "01/01/2020" "01/01/2022")
@@ -43,29 +48,42 @@ scenes_results = index.find(vec, "01/01/2020" "01/01/2022")
 
 """
 from __future__ import annotations
-import datetime
+
+from datetime import datetime, timedelta
+from typing import List, Tuple, Union
 import rtree
+
 from scenes.core import Scene
 from scenes.dates import get_timestamp, any2datetime, MINDATE, MAXDATE
-from scenes.vector import reproject_ogr_layer, get_bbox_wgs84, ogr_open
 from scenes.spatial import poly_overlap, BoundingBox
+from scenes.vector import reproject_ogr_layer, get_bbox_wgs84, ogr_open
 
 
-def _bbox(bbox_wgs84: BoundingBox, date_min: datetime.datetime | str, date_max: datetime.datetime | str) -> tuple:
+def _bbox(
+        bbox_wgs84: BoundingBox,
+        date_min: Union[datetime, str],
+        date_max: Union[datetime, str]
+) -> Tuple[float, float, float, float, float, float]:
     """
     Return a bounding box in the domain (lat, lon, time)
 
     Args:
         bbox_wgs84: The bounding box in WGS84 (BoundingBox instance)
-        date_min: date min (datetime.datetime or str)
-        date_max: date max (datetime.datetime or str)
+        date_min: date min (datetime or str)
+        date_max: date max (datetime or str)
 
     Returns:
         item for rtree
 
     """
-    return (bbox_wgs84.xmin, bbox_wgs84.ymin, get_timestamp(any2datetime(date_min)),
-            bbox_wgs84.xmax, bbox_wgs84.ymax, get_timestamp(any2datetime(date_max)))
+    return (
+        bbox_wgs84.xmin,
+        bbox_wgs84.ymin,
+        get_timestamp(any2datetime(date_min)),
+        bbox_wgs84.xmax,
+        bbox_wgs84.ymax,
+        get_timestamp(any2datetime(date_max))
+    )
 
 
 class Index:
@@ -73,7 +91,7 @@ class Index:
     Stores an indexation structure for a list of Scenes
     """
 
-    def __init__(self, scenes_list: list[Scene]):
+    def __init__(self, scenes_list: List[Scene]):
         """
         Args:
             scenes_list: list of scenes
@@ -86,24 +104,32 @@ class Index:
         properties.dimension = 3
         self.index = rtree.index.Index(properties=properties)
         for scene_idx, scene in enumerate(scenes_list):
-            dt = scene.acquisition_date
-            dt_min = dt - datetime.timedelta(days=1)
-            dt_max = dt + datetime.timedelta(days=1)
-            new_bbox = _bbox(bbox_wgs84=scene.bbox_wgs84, date_min=dt_min, date_max=dt_max)
+            delta = scene.acquisition_date
+            dt_min = delta - timedelta(days=1)
+            dt_max = delta + timedelta(days=1)
+            new_bbox = _bbox(
+                bbox_wgs84=scene.bbox_wgs84, date_min=dt_min, date_max=dt_max
+            )
             self.index.insert(scene_idx, new_bbox)
 
-    def find_indices(self, vector_or_bbox: str | BoundingBox, date_min: datetime.datetime | str = None,
-                     date_max: datetime.datetime | str = None) -> list[int]:
+    def find_indices(
+            self,
+            vector_or_bbox: Union[str, BoundingBox],
+            date_min: Union[datetime, str] = None,
+            date_max: Union[datetime, str] = None
+    ) -> List[int]:
         """
         Search the intersecting scenes, and return their indices
 
-        The search is performed using the provided bounding box, or the provided input
-        vector data file. The date_min and date_max are both optional.
+        The search is performed using the provided bounding box, or the
+        provided input vector data file. The date_min and date_max are both
+        optional.
 
         Args:
-            vector_or_bbox: An input vector data file (str) or a bounding box in WGS84 (BoundingBox instance)
-            date_min: date min (datetime.datetime or str) (Default value = None)
-            date_max: date max (datetime.datetime or str) (Default value = None)
+            vector_or_bbox: An input vector data file (str) or a bounding box
+                in WGS84 (BoundingBox instance)
+            date_min: date min (datetime or str) (Default value = None)
+            date_max: date max (datetime or str) (Default value = None)
 
         Returns:
             list of indices
@@ -121,13 +147,17 @@ class Index:
             actual_bbox_wgs84 = get_bbox_wgs84(vector_or_bbox)
 
         # Search in R-Tree
-        bbox_search = _bbox(bbox_wgs84=actual_bbox_wgs84, date_min=date_min, date_max=date_max)
+        bbox_search = _bbox(
+            bbox_wgs84=actual_bbox_wgs84, date_min=date_min, date_max=date_max
+        )
         results_indices = list(self.index.intersection(bbox_search))
 
         # Filter results (if vector_file)
         if vector_file:
             poly_ds = ogr_open(vector_file=vector_or_bbox)
-            poly_layer_wgs84 = reproject_ogr_layer(poly_ds.GetLayer(), epsg=4326)
+            poly_layer_wgs84 = reproject_ogr_layer(
+                poly_ds.GetLayer(), epsg=4326
+            )
             filtered_indices = []
             for i in results_indices:
                 bbox_wgs84_geom = self.scenes_list[i].bbox_wgs84.to_ogrgeom()
@@ -136,19 +166,26 @@ class Index:
             return filtered_indices
         return results_indices
 
-    def find(self, vector_or_bbox: str | BoundingBox, date_min: datetime.datetime | str = None,
-             date_max: datetime.datetime | str = None) -> list[Scene]:
+    def find(
+            self,
+            vector_or_bbox: Union[str, BoundingBox],
+            date_min: Union[datetime, str] = None,
+            date_max: Union[datetime, str] = None
+    ) -> List[Scene]:
         """
         Search the intersecting scenes, and return them
 
         Args:
-            vector_or_bbox: An input vector data file (str) or a bounding box in WGS84 (BoundingBox instance)
-            date_min: date min (datetime.datetime or str) (Default value = None)
-            date_max: date max (datetime.datetime or str) (Default value = None)
+            vector_or_bbox: An input vector data file (str) or a bounding box
+                in WGS84 (BoundingBox instance)
+            date_min: date min (datetime or str) (Default value = None)
+            date_max: date max (datetime or str) (Default value = None)
 
         Returns:
-            list of indices
+            list of `Scene` instances
 
         """
-        indices = self.find_indices(vector_or_bbox=vector_or_bbox, date_min=date_min, date_max=date_max)
+        indices = self.find_indices(
+            vector_or_bbox=vector_or_bbox, date_min=date_min, date_max=date_max
+        )
         return [self.scenes_list[i] for i in indices]
diff --git a/scenes/raster.py b/scenes/raster.py
index 60998263dc979099d4529cd2cfada5ade30c92fe..af09efb2c26b2851d3b00dff9992a7ff0f08a0e9 100644
--- a/scenes/raster.py
+++ b/scenes/raster.py
@@ -2,10 +2,12 @@
 This module contains a set of functions to deal with GDAL datasets (rasters)
 """
 from __future__ import annotations
+from typing import Tuple, Union
 import pyotb
 from osgeo import osr, gdal
-from scenes.vector import reproject_coords
+
 from scenes.spatial import epsg2srs, coord_list_to_bbox, BoundingBox
+from scenes.vector import reproject_coords
 
 
 def get_epsg(gdal_ds: gdal.Dataset) -> int:
@@ -25,7 +27,9 @@ def get_epsg(gdal_ds: gdal.Dataset) -> int:
     return int(epsg)
 
 
-def get_extent(raster: gdal.Dataset | str | pyotb.core.otbObject) -> tuple[tuple]:
+def get_extent(
+        raster: Union[gdal.Dataset, str, pyotb.core.otbObject]
+) -> Tuple[Tuple[float]]:
     """
     Return list of corners coordinates from a raster
 
@@ -53,10 +57,12 @@ def get_extent(raster: gdal.Dataset | str | pyotb.core.otbObject) -> tuple[tuple
     xmax = xmin + szx * spcx
     ymin = ymax + szy * spcy
 
-    return (xmin, ymax), (xmax, ymax), (xmax, ymin), (xmin, ymin)
+    return tuple([(xmin, ymax), (xmax, ymax), (xmax, ymin), (xmin, ymin)])
 
 
-def get_projection(raster: gdal.Dataset | str | pyotb.core.otbObject) -> str:
+def get_projection(
+        raster: Union[gdal.Dataset, str, pyotb.core.otbObject]
+) -> str:
     """
     Returns the projection (as str) of a raster.
 
@@ -73,7 +79,9 @@ def get_projection(raster: gdal.Dataset | str | pyotb.core.otbObject) -> str:
     return gdal_ds.GetProjection()
 
 
-def get_extent_wgs84(raster: gdal.Dataset | str | pyotb.core.otbObject) -> tuple[tuple]:
+def get_extent_wgs84(
+        raster: Union[gdal.Dataset, str, pyotb.core.otbObject]
+) -> Tuple[Tuple[float]]:
     """
     Returns the extent in WGS84 CRS from a raster
 
@@ -93,7 +101,7 @@ def get_extent_wgs84(raster: gdal.Dataset | str | pyotb.core.otbObject) -> tuple
     return reproject_coords(coords, src_srs, tgt_srs)
 
 
-def get_epsg_extent_bbox(filename: str) -> tuple[int, tuple, BoundingBox]:
+def get_epsg_extent_wgs84(filename: str) -> Tuple[int, Tuple[Tuple[float]]]:
     """
     Returns (epsg, extent_wgs84) from a raster file that GDAL can open.
 
@@ -101,18 +109,20 @@ def get_epsg_extent_bbox(filename: str) -> tuple[int, tuple, BoundingBox]:
         filename: file name
 
     Returns:
-        (epsg, extent_wgs84, bbox_wgs84)
+        (epsg, extent_wgs84)
 
     """
     gdal_ds = gdal.Open(filename)
+    assert gdal_ds, f"File {filename} not accessible by GDAL"
     epsg = get_epsg(gdal_ds)
     extent_wgs84 = get_extent_wgs84(gdal_ds)
-    bbox_wgs84 = coord_list_to_bbox(extent_wgs84)
 
-    return epsg, extent_wgs84, bbox_wgs84
+    return epsg, extent_wgs84
 
 
-def get_bbox_wgs84(raster: gdal.Dataset | str | pyotb.core.otbObject) -> BoundingBox:
+def get_bbox_wgs84(
+        raster: Union[gdal.Dataset, str, pyotb.core.otbObject]
+) -> BoundingBox:
     """
     Returns the bounding box from a raster.
 
diff --git a/scenes/sentinel.py b/scenes/sentinel.py
index 2c1a4efbcf099a02cd372d70b5c9f7f8e9455259..a2776f6dd814e552b6b32b91805176c3c6bb298a 100644
--- a/scenes/sentinel.py
+++ b/scenes/sentinel.py
@@ -1,183 +1,213 @@
 """
 This module contains Sentinel-2 classes (sources and scenes).
 
-Right now everything is ready to use THEIA L2A and L3A products.
+# Overview
 
-## `Scene` based classes
+Right not, the following sentinel products are implemented:
+- Level 2A from Theia (from local files)
+- Level 3A from Theia (from local files)
+- Level 2A from Microsoft Planetary Computer (from STAC)
 
-The `Sentinel22AScene` and `Sentinel23AScene` classes carry metadata and sources
-respectively for Sentinel-2 Level 2A and Level 3A products. They both inherit from
-the generic abstract `Sentinel2SceneBase` class, which itself inherits from `Scene`.
+`Scene` classes:
 
 ``` mermaid
 classDiagram
 
     Scene <|-- Sentinel2SceneBase
-    Sentinel2SceneBase <|-- Sentinel22AScene
-    Sentinel2SceneBase <|-- Sentinel23AScene
-
-    class Scene{
-        +datetime acquisition_date
-        +int epsg
-        +bbox_wgs84
-        +get_metadata()
-        +__repr__()
-    }
+    Sentinel2SceneBase <|-- Sentinel2MPCScene
+    Sentinel2SceneBase <|--  Sentinel2TheiaScene
+    Sentinel2TheiaScene <|-- Sentinel22AScene
+    Sentinel2TheiaScene <|-- Sentinel23AScene
 
-    class Sentinel2SceneBase{
-        +__init__(archive, tag)
-        +get_file()
-        +get_band()
-        +get_metadata()
-        +Sentinel2Source get_10m_bands()
-        +Sentinel2Source get_20m_bands()
+```
 
-    }
+`Source` classes:
 
-    class Sentinel22AScene{
-        +__init__(archive)
-        +str clm_r1_msk_file
-        +str clm_r2_msk_file
-        +str edg_r1_msk_file
-        +str edg_r2_msk_file
-        +get_metadata()
-    }
+``` mermaid
+classDiagram
 
-    class Sentinel23AScene{
-        +__init__(archive)
-        +str flg_r1_msk_file
-        +str flg_r2_msk_file
-        +get_metadata()
-    }
+    Source <|-- CommonImagerySource
+    CommonImagerySource <|--  Sentinel2TheiaGenericSource
+    Sentinel2TheiaGenericSource <|-- Sentinel2Theia2ASource
+    Sentinel2TheiaGenericSource <|-- Sentinel2Theia3ASource
+
+```
+
+The `Sentinel2MPCScene` class delivers sources which are of type
+`CommonImagerySource`.
+
+# Sources
+
+The `Sentinel2SceneBase` class contains generic methods to access all
+Sentinel-2 products `Sources` members.
+- Single band getters (e.g. b3, b4)
+- Concatenated bands getters (e.g. 10m bands)
+
+You can do the following to list available sources:
+
+```python
+print(sc)  # show everything, including the sources names
+print(sc.sources.keys())  # show only the sources keys
+```
+
+## Single image getters
+
+Any `Source` corresponding to a specific raster asset can be retrieved using
+the `get_<name>()` function call.
+For instance this enables to retrieve the band 4:
+
+```python
+b4 = sc.get_b4()
 ```
 
-### Instantiation
+## Concatenated bands
+
+The sources carry the following images for:
+
+- The 10m spacing channels, in the following order: 4, 3, 2, 8
+- The 20m spacing channels, in the following order: 5, 6, 7, 8a, 11, 12
+
+```python
+bands_10m = sc.get_10m_bands()
+bands_20m = sc.get_20m_bands()
+```
+
+Subset of bands can also be requested:
+
+```python
+bands_10m_2a = sc_2a.get_10m_bands("b4")
+bands_10m_2a = sc_2a.get_10m_bands("b4", "b8")
+bands_10m_2a = sc_2a.get_10m_bands(["b4", "b8"])
+```
+
+# Theia
+
+The `Sentinel22AScene` and `Sentinel23AScene` classes carry metadata and
+sources respectively for Sentinel-2 Level 2A and Level 3A products. They both
+inherit from the generic abstract `Sentinel2TheiaScene` class.
+
+## Instantiation
 
-`Sentinel22AScene` and `Sentinel23AScene` are instantiated from the archive (.zip file)
-or the product folder.
-``` py
+`Sentinel22AScene` and `Sentinel23AScene` are instantiated from the archive
+(.zip file) or the product folder.
+
+```python
 import scenes
 sc_2a = scenes.sentinel.Sentinel22AScene("SENTINEL2B_..._T31TEJ_D_V1-8.zip")
 sc_3a = scenes.sentinel.Sentinel23AScene("SENTINEL2X_...L3A_T31TEJ_D_V1-0.zip")
 ```
 
-### Metadata
+## Metadata
 
-The scene metadata can be accessed with the `get_metadata()` method, like any
+The scene metadata can be accessed with the `metadata` attribute, like any
 `scenes.core.Scene` instance.
-``` py
-md_dict = sc_2a.get_metadata()
-for key, value in md_dict.items():
-    print(f"{key}: {value}")
+
+```python
+md_dict = sc_2a.metadata
+scenes.utils.pprint(md_dict)
 ```
 
-## `Source` based classes
+## Sources
 
-The `Sentinel22ASource` and `Sentinel23ASource` classes carry imagery sources
-respectively for Sentinel-2 Level 2A and Level 3A products. They both inherit from
-the generic `Sentinel2Source` class, which itself inherits from `Source`.
+The `Sentinel2Theia2ASource` and `Sentinel2Theia3ASource` classes carry imagery
+sources respectively for Sentinel-2 Level 2A and Level 3A products. They both
+inherit from the generic `Sentinel2TheiaGenericSource` class, which itself
+inherits from `CommonImagerySource`.
 
 ``` mermaid
 classDiagram
 
-    Source <|-- Sentinel2Source
-    Sentinel2Source <|-- Sentinel22ASource
-    Sentinel2Source <|-- Sentinel23ASource
-
-    class Source{
-        +__init__(root_scene, out, parent=None)
-        +Scene root_scene
-        +Source parent
-        +Source drilled(msk_vec_file, nodata=0)
-        +Source cld_msk_drilled(nodata=0)
-        +Source resample_over(ref_img, interpolator="nn", nodata=0)
-        +Source clip_over_img(ref_img)
-        +Source clip_over_vec(ref_vec)
-        +Source new_source(*args)
-    }
+    Sentinel2TheiaGenericSource <|-- Sentinel2Theia2ASource
+    Sentinel2TheiaGenericSource <|-- Sentinel2Theia3ASource
 
-    class Sentinel2Source{
+    class Sentinel2TheiaGenericSource{
         +R1_SIZE
         +R2_SIZE
-        +Sentinel2Source msk_drilled(msk_dict, exp, nodata=0)
+        +msk_drilled(msk_dict, exp, nodata=0)
     }
 
-    class Sentinel22ASource{
-        +Sentinel22ASource cld_msk_drilled(nodata=0)
+    class Sentinel2Theia2ASource{
+        +cld_msk_drilled(nodata=0)
     }
 
-    class Sentinel23ASource{
-        +Sentinel23ASource flg_msk_drilled(keep_flags_values=(3, 4), nodata=0)
+    class Sentinel2Theia3ASource{
+        +flg_msk_drilled(keep_flags_values=(3, 4), nodata=0)
     }
 ```
 
-The sources carry the following images for Level 2 and Level 3 products:
-
-- The 10m spacing channels, in the following order: 4, 3, 2, 8
-- The 20m spacing channels, in the following order: 5, 6, 7, 8a, 11, 12
-
-```
-bands_10m_2a = sc_2a.get_10m_bands()
-bands_20m_2a = sc_2a.get_20m_bands()
-bands_10m_3a = sc_3a.get_10m_bands()
-bands_20m_3a = sc_3a.get_20m_bands()
-```
-
-The `Sentinel22ASource` implements the `Sentinel22ASource.cld_msk_drilled` method, that
-enable to mask the cloud masks over the root source, with the specified
-no-data value (default is 0).
+The `Sentinel2Theia2ASource` implements the
+`Sentinel2Theia2ASource.cld_msk_drilled` method, that enable to mask the cloud
+masks over the root source, with the specified no-data value (default is 0).
 The following example show how to derive a child source replacing the
 pixels that are in the clouds with pixels at -10000 (which is the no-data
 value in Theia products):
-``` py
+
+```python
 drilled = bands_10m_2a.cld_msk_drilled()
 ```
 
-The `Sentinel23ASource` implements the `Sentinel23ASource.flg_msk_drilled` method, that
-enable to mask the pixels on a selection of labels of the quality mask.
-The following example shows how to mask pixels of anything other that land with -10000:
-``` py
+The `Sentinel2Theia3ASource` implements the
+`Sentinel2Theia3ASource.flg_msk_drilled` method, that enable to mask the pixels
+on a selection of labels of the quality mask. The following example shows how
+to mask pixels of anything other that land with -10000:
+
+```python
 drilled = bands_10m_3a.flg_msk_drilled(keep_flags_values=(4,))
 ```
 
-`Sentinel22ASource` and `Sentinel23ASource` inherit from `scenes.core.Source`,
-hence implemented sources transformations (e.g. `scenes.core.Source.masked`,
-`scenes.core.Source.clip_over_img`, `scenes.core.Source.resample_over`,
-`scenes.core.Source.reproject`, etc.
-``` py
+`Sentinel2Theia2ASource` and `Sentinel2Theia3ASource` inherit from
+`scenes.core.CommonImagerySource`, hence implemented sources transformations
+(e.g. `scenes.core.CommonImagerySource.masked`,
+`scenes.core.CommonImagerySource.clip_over_img`,
+`scenes.core.CommonImagerySource.resample_over`,
+`scenes.core.CommonImagerySource.reproject`, etc.)
+
+```python
 clipped = drilled.clip_over_img(roi)
 reprojected = clipped.reproject(epsg=4328)
 ```
-Note that the resulting transformed `Sentinel22ASource` and `Sentinel23ASource` are still
-instances of `Sentinel22ASource` and `Sentinel23ASource` even after generic operations
-implemented in `scenes.core.Source`.
+
+Note that the resulting transformed `Sentinel2Theia2ASource` and
+`Sentinel2Theia3ASource` are still instances of `Sentinel2Theia2ASource` and
+`Sentinel2Theia3ASource` after generic operations implemented in
+`scenes.core.CommonImagerySource`.
 
 ## Usage with pyotb
 
 As `scenes.core.Source`, it also can be used like any `pyotb.core.otbObject`.
-The following example show how to use an OTB application with a source at input.
-``` py
+The following example show how to use an OTB application with a source at
+input.
+
+```python
 rgb_nice = pyotb.DynamicConvert(reprojected)
 rgb_nice.write("image.tif", pixel_type="uint8")
 ```
 
 """
 from __future__ import annotations
-import datetime
 from abc import abstractmethod
-from typing import Type
+from datetime import datetime
+from functools import partial
+from typing import Type, Dict, Tuple, List, Any, Union
+from tqdm.autonotebook import tqdm
 import pyotb
+
 from scenes import utils
-from scenes.core import Source, Scene
-from scenes.raster import get_epsg_extent_bbox
+from scenes.core import Scene, CommonImagerySource, Source
+from scenes.raster import get_epsg_extent_wgs84
 
 
-class Sentinel2Source(Source):
+class Sentinel2TheiaGenericSource(CommonImagerySource):
     """Class for generic Sentinel-2 sources"""
-    R1_SIZE = 10980
-    R2_SIZE = 5490
-
-    def msk_drilled(self, msk_dict: dict[int, str], exp: str, nodata: float | int = 0) -> Sentinel2Source:
+    R1_SPC = 10.0
+    R2_SPC = 20.0
+
+    def msk_drilled(
+            self,
+            msk_dict: Dict[float, str],
+            exp: str,
+            nodata: Union[float, int] = 0
+    ) -> Sentinel2TheiaGenericSource:
         """
         Args:
             msk_dict: dict of masks
@@ -188,15 +218,20 @@ class Sentinel2Source(Source):
             new masked source
 
         """
-        img_size = pyotb.ReadImageInfo(self).GetParameterInt('sizex')
-        bm = pyotb.BandMath({"il": msk_dict[img_size], "exp": exp})
-        return self.masked(binary_mask=bm, nodata=nodata)
+        img_spc = pyotb.ReadImageInfo(self).GetParameterInt('spacingx')
+        if img_spc not in msk_dict:
+            raise KeyError(f"No mask for image spacing {img_spc}")
+        binary_mask = pyotb.BandMath({"il": msk_dict[img_spc], "exp": exp})
+        return self.masked(binary_mask=binary_mask, nodata=nodata)
 
 
-class Sentinel22ASource(Sentinel2Source):
+class Sentinel2Theia2ASource(Sentinel2TheiaGenericSource):
     """Sentinel-2 level 2A source class"""
 
-    def cld_msk_drilled(self, nodata: float | int = -10000) -> Sentinel22ASource:
+    def cld_msk_drilled(
+            self,
+            nodata: Union[float, int] = -10000
+    ) -> Sentinel2Theia2ASource:
         """Return the source drilled from the cloud mask
 
         Args:
@@ -206,89 +241,225 @@ class Sentinel22ASource(Sentinel2Source):
             drilled source
 
         """
-        return self.msk_drilled(msk_dict={self.R1_SIZE: self.root_scene.cld_r1_msk_file,
-                                          self.R2_SIZE: self.root_scene.cld_r2_msk_file},
-                                exp="im1b1==0?255:0",
-                                nodata=nodata)
+        return self.msk_drilled(
+            msk_dict={
+                self.R1_SPC: self.root_scene.get_asset_path("clm_r1"),
+                self.R2_SPC: self.root_scene.get_asset_path("clm_r2")
+            },
+            exp="im1b1==0?255:0",
+            nodata=nodata
+        )
 
 
-class Sentinel23ASource(Sentinel2Source):
+class Sentinel2Theia3ASource(Sentinel2TheiaGenericSource):
     """Sentinel-2 level 3A source class"""
 
-    def flg_msk_drilled(self, keep_flags_values: tuple[int] = (3, 4),
-                        nodata: float | int = -10000) -> Sentinel23ASource:
+    def flg_msk_drilled(
+            self,
+            keep_flags_values: Tuple[int] = (3, 4),
+            nodata: Union[float, int] = -10000
+    ) -> Sentinel2Theia3ASource:
         """
         Return the source drilled from the FLG mask
 
         Args:
-            keep_flags_values: flags values to keep (Default value = (3, 4)). Can be:
+            keep_flags_values: flags values to keep (Default value = (3, 4)).
+                Can be:
 
                - 0 = No data
                - 1 = Cloud
                - 2 = Snow
                - 3 = Water
                - 4 = Land
-               (source: https://labo.obs-mip.fr/multitemp/theias-l3a-product-format/)
+
+                (source:
+                https://labo.obs-mip.fr/multitemp/theias-l3a-product-format/)
             nodata: nodata value inside holes (Default value = -10000)
 
         Returns:
             drilled source
 
         """
-        exp = "||".join([f"im1b1=={val}" for val in keep_flags_values]) + "?255:0"
-        return self.msk_drilled(msk_dict={self.R1_SIZE: self.root_scene.flg_r1_msk_file,
-                                          self.R2_SIZE: self.root_scene.flg_r2_msk_file},
-                                exp=exp,
-                                nodata=nodata)
+        cond = "||".join([f"im1b1=={val}" for val in keep_flags_values])
+        exp = cond + "?255:0"
+        return self.msk_drilled(
+            msk_dict={
+                self.R1_SPC: self.root_scene.get_asset_path("flg_r1"),
+                self.R2_SPC: self.root_scene.get_asset_path("flg_r2")
+            },
+            exp=exp,
+            nodata=nodata
+        )
+
+
+BANDS_1 = ["b4", "b3", "b2", "b8"]
+BANDS_2 = ["b5", "b6", "b7", "b8a", "b11", "b12"]
 
 
 class Sentinel2SceneBase(Scene):
     """Base class for Sentinel-2 images"""
 
+    bands_keys = BANDS_1 + BANDS_2
+    concatenated_bands_dict = {
+        "10m_bands": BANDS_1,
+        "20m_bands": BANDS_2
+    }
+
+    def __init__(
+            self,
+            acquisition_date: datetime,
+            epsg: int,
+            extent_wgs84: List[Tuple(float, float)],
+            assets_paths: List[str, str],
+            src_classes: Dict[str, Type[Source]] = None,
+            additional_metadata: Dict[str, Any] = None,
+    ):
+        """
+        Initialize the Sentinel-2 Scene
+
+        Args:
+            acquisition_date: Acquisition date
+            epsg: EPSG code
+            extent_wgs84: extent in WGS84 coordinates reference system
+            assets_paths: assets dictionary
+                {source_name, source_path/url}, e.g.:
+                {'b4': '/vsicurl/https://.../...B4.tif'}
+            src_classes: imagery sources class
+            additional_metadata: additional metadata
+
+        """
+
+        assert all(key in src_classes for key in self.bands_keys), \
+            "Some keys in concatenated_bands_dict are not in src_classes"
+
+        # Sources
+        sources = {
+            key: partial(src_class, root_scene=self, out=assets_paths[key])
+            for key, src_class in src_classes.items()
+        }
+
+        # Sources for concatenated bands: get_b10m_bands, get_20m_bands
+        for key, default_bands_names in self.concatenated_bands_dict.items():
+            sources.update({
+                key: partial(
+                    self._get_bands,
+                    src_classes[default_bands_names[0]],
+                    default_bands_names
+                )
+            })
+
+        super().__init__(
+            acquisition_date=acquisition_date,
+            epsg=epsg,
+            extent_wgs84=extent_wgs84,
+            assets_paths=assets_paths,
+            sources=sources,
+            additional_metadata=additional_metadata
+        )
+
+    def _get_bands(
+            self,
+            imagery_src_class,
+            default_bands_names: tuple(str),
+            *args
+    ) -> Source:
+        """
+        Returns a Source for the concatenated bands
+
+        Args:
+            imagery_src_class: source class
+            default_bands_names: names of available bands
+            *args: can be:
+             - names of the bands
+             - a name
+             - a list or a tuple of names
+
+        Returns:
+            Selected spectral bands
+
+        """
+        bands = default_bands_names
+        if args:
+            bands = []
+            for arg in args:
+                bands += arg if isinstance(arg, (list, tuple)) else [arg]
+        for band in bands:
+            assert isinstance(band, str), f"{band} is not a string"
+        # user can use upper or lower cases
+        bands = [band.lower() for band in bands]
+        assert all(band in default_bands_names for band in bands), \
+            f"Some bands in {bands} are not in the available bands " \
+            f"{default_bands_names}"
+        concat = pyotb.ConcatenateImages(
+            [self.get_asset_path(band_name) for band_name in bands]
+        )
+        return imagery_src_class(self, concat)
+
+
+class Sentinel2TheiaScene(Sentinel2SceneBase):
+    """Base class for Sentinel-2 images from Theia"""
+
     @abstractmethod
-    def __init__(self, archive: str, tag: str, source_class: Type[Source]):
+    def __init__(
+            self,
+            archive: str,
+            tag: str,
+            src_classes: Dict[str, Type[Source]]
+    ):
         """
         Args:
             archive: product .zip or directory
             tag: pattern to match in filenames, e.g. "FRE"
+            src_classes: imagery sources classes dict
+
         """
         self.archive = archive
 
         # Retrieve the list of .tif files
         is_zip = self.archive.lower().endswith(".zip")
         if is_zip:
-            print("Input type is a .zip archive")
+            # print("Input type is a .zip archive")
             files = utils.list_files_in_zip(self.archive)
             self.files = [utils.to_vsizip(self.archive, f) for f in files]
         else:
-            print("Input type is a directory")
-            self.files = utils.find_files_in_all_subdirs(self.archive, "*.tif", case_sensitive=False)
-
-        # Assign bands files
-        self.band2_file = self.get_band(tag, "B2")
-        self.band3_file = self.get_band(tag, "B3")
-        self.band4_file = self.get_band(tag, "B4")
-        self.band8_file = self.get_band(tag, "B8")
-        self.band5_file = self.get_band(tag, "B5")
-        self.band6_file = self.get_band(tag, "B6")
-        self.band7_file = self.get_band(tag, "B7")
-        self.band8a_file = self.get_band(tag, "B8A")
-        self.band11_file = self.get_band(tag, "B11")
-        self.band12_file = self.get_band(tag, "B12")
-
-        # EPSG, bbox
-        epsg, _, bbox_wgs84 = get_epsg_extent_bbox(self.band2_file)
-
-        # Date
-        onefile = utils.basename(self.band2_file)  # SENTINEL2A_20180630-105440-000_L2A_T31TEJ_D_V1-8
-        datestr = onefile.split("_")[1]  # 20180630-105440
-        acquisition_date = datetime.datetime.strptime(datestr, '%Y%m%d-%H%M%S-%f')
-
-        # Source class
-        self.source_class = source_class
+            # print("Input type is a directory")
+            self.files = utils.find_files_in_all_subdirs(
+                self.archive, "*.tif", case_sensitive=False
+            )
+
+        # Assets
+        assets_paths = {}
+
+        # In Theia products, the suffixes are always uppercase (B4, ..., B8A)
+        # so we need to retrieve the files from the uppercase key.
+
+        # 1. Spectral bands
+        for key in self.bands_keys:
+            suffix = key.upper()  # e.g. b4 --> B4
+            assets_paths[key] = self.get_file(endswith=f"_{tag}_{suffix}.tif")
+
+        # 2. Other assets (quality masks, etc)
+        for key in [key for key in src_classes if key not in assets_paths]:
+            suffix = key.upper()  # e.g. cld_r1 --> CLD_R1
+            assets_paths[key] = self.get_file(endswith=f"_{suffix}.tif")
+
+        # Date, extent
+        b2_path = assets_paths["b2"]
+        epsg, extent = get_epsg_extent_wgs84(b2_path)
+        # Basename, e.g. SENTINEL2A_20180630-105440-000_L2A_T31TEJ_D_V1-8
+        b2_basepath = utils.basename(b2_path)
+        datestr = b2_basepath.split("_")[1]  # e.g. 20180630-105440
+        acquisition_date = datetime.strptime(datestr, '%Y%m%d-%H%M%S-%f')
 
         # Call parent constructor
-        super().__init__(acquisition_date=acquisition_date, bbox_wgs84=bbox_wgs84, epsg=epsg)
+        super().__init__(
+            acquisition_date=acquisition_date,
+            epsg=epsg,
+            extent_wgs84=extent,
+            assets_paths=assets_paths,
+            src_classes=src_classes,
+            additional_metadata={"archive": archive}
+        )
 
     def get_file(self, endswith: str) -> str:
         """
@@ -307,159 +478,64 @@ class Sentinel2SceneBase(Scene):
         filtered_files_list = [f for f in self.files if f.endswith(endswith)]
         nb_matches = len(filtered_files_list)
         if nb_matches != 1:
-            raise ValueError(f"Cannot instantiate Sentinel-2 Theia product from {self.archive}:"
-                             f"{nb_matches} occurrence(s) of file with suffix \"{endswith}\"")
+            raise ValueError(
+                "Cannot instantiate Sentinel-2 Theia product from "
+                f"{self.archive}: found {nb_matches} occurrence(s) of file "
+                f"with suffix \"{endswith}\""
+            )
         return filtered_files_list[0]
 
-    def get_band(self, suffix1: str, suffix2: str) -> str:
-        """
-        Return the file path for the specified band.
-
-        Args:
-            suffix1: suffix 1, e.g. "FRE"
-            suffix2: suffix 2, e.g. "B3"
-
-        Returns:
-            file path for the band
-
-        Raises:
-            ValueError: If none or multiple candidates are found.
-
-        """
-        return self.get_file(endswith=f"_{suffix1}_{suffix2}.tif")
-
-    def get_10m_bands(self) -> Sentinel2Source:
-        """
-        Returns 10m spacing bands
-
-        Returns:
-            10m spectral bands (Red, Green, Blue, Near-infrared)
-
-        """
-        concatenate_10m_bands = pyotb.ConcatenateImages([self.band4_file,
-                                                         self.band3_file,
-                                                         self.band2_file,
-                                                         self.band8_file])
-        return self.source_class(self, concatenate_10m_bands)
-
-    def get_20m_bands(self) -> Sentinel2Source:
-        """
-        Returns 20m spacing bands
-
-        Returns:
-            20m spectral bands (B6, B7, B8a, B11, B12)
-
-        """
-        concatenate_20m_bands = pyotb.ConcatenateImages([self.band5_file,
-                                                         self.band6_file,
-                                                         self.band7_file,
-                                                         self.band8a_file,
-                                                         self.band11_file,
-                                                         self.band12_file])
-        return self.source_class(self, concatenate_20m_bands)
-
-    def get_metadata(self) -> dict[str, any]:
-        """
-        Get metadata
-
-        Returns:
-            metadata
 
-        """
-        metadata = super().get_metadata()
-        metadata.update({
-            "Archive": self.archive,
-            "Band 2": self.band2_file,
-            "Band 3": self.band3_file,
-            "Band 4": self.band4_file,
-            "Band 8": self.band8_file,
-            "Band 5": self.band5_file,
-            "Band 6": self.band6_file,
-            "Band 7": self.band7_file,
-            "Band 8a": self.band8a_file,
-            "Band 11": self.band11_file,
-            "Band 12": self.band12_file,
-        })
-        return metadata
-
-
-class Sentinel22AScene(Sentinel2SceneBase):
+class Sentinel22AScene(Sentinel2TheiaScene):
     """
     Sentinel-2 level 2A scene class.
 
-    The class carries all the metadata from the root_scene, and can be used to retrieve its sources.
+    The class carries all the metadata from the root_scene, and can be used to
+    retrieve its sources.
 
     """
 
-    def __init__(self, archive: str):
+    def __init__(self, archive: str, tag: str = "FRE"):
         """
         Args:
             archive: .zip file or folder. Must be a product from MAJA.
-        """
-        # Call parent constructor
-        super().__init__(archive=archive, tag="FRE", source_class=Sentinel22ASource)
-
-        # Additional rasters
-        self.clm_r1_msk_file = self.get_file("_CLM_R1.tif")
-        self.edg_r1_msk_file = self.get_file("_EDG_R1.tif")
-        self.clm_r2_msk_file = self.get_file("_CLM_R2.tif")
-        self.edg_r2_msk_file = self.get_file("_EDG_R2.tif")
+            tag: product tag (SRE/FRE)
 
-    def get_metadata(self) -> dict[any]:
         """
-        Get metadata
-
-        Returns:
-            metadata
-
-        """
-        metadata = super().get_metadata()
-        metadata.update({
-            "CLD R1": self.clm_r1_msk_file,
-            "EDG R1": self.edg_r1_msk_file,
-            "CLD R2": self.clm_r2_msk_file,
-            "EDG R2": self.edg_r2_msk_file,
-        })
-        return metadata
+        masks = ["clm_r1", "edg_r1", "clm_r2", "edg_r2"]
+        src_classes = {
+            **{key: Sentinel2Theia2ASource for key in self.bands_keys},
+            **{key: CommonImagerySource for key in masks}
+        }
+        super().__init__(archive=archive, tag=tag, src_classes=src_classes)
 
 
-class Sentinel23AScene(Sentinel2SceneBase):
+class Sentinel23AScene(Sentinel2TheiaScene):
     """Sentinel-2 level 3A scene class.
 
-    The class carries all the metadata from the root_scene, and can be used to retrieve its sources.
+    The class carries all the metadata from the root_scene, and can be used to
+    retrieve its sources.
 
     """
 
-    def __init__(self, archive: str):
+    def __init__(self, archive: str, tag: str = "FRC"):
         """
         Args:
             archive: .zip file or folder. Must be a product from WASP.
-        """
-        super().__init__(archive=archive, tag="FRC", source_class=Sentinel23ASource)
-
-        # Additional rasters
-        self.flg_r1_msk_file = self.get_file("_FLG_R1.tif")
-        self.flg_r2_msk_file = self.get_file("_FLG_R2.tif")
+            tag: product tag (FRC)
 
-    def get_metadata(self) -> dict[str, any]:
         """
-        Get metadata
+        masks = ["flg_r1", "flg_r2"]
+        src_classes = {
+            **{key: Sentinel2Theia3ASource for key in self.bands_keys},
+            **{key: CommonImagerySource for key in masks}
+        }
+        super().__init__(archive=archive, tag=tag, src_classes=src_classes)
 
-        Returns:
-            metadata
 
-        """
-        metadata = super().get_metadata()
-        metadata.update({
-            "FLG R1": self.flg_r1_msk_file,
-            "FLG R2": self.flg_r2_msk_file,
-        })
-        return metadata
-
-
-def get_scene(archive: str) -> Sentinel22AScene | Sentinel23AScene:
+def get_scene(archive: str) -> Union[Sentinel22AScene, Sentinel23AScene]:
     """
-    Return the right sentinel scene instance from the given archive (L2A or L3A)
+    Return the right `Scene` instance from the given archive (L2A or L3A)
 
     Args:
         archive: L3A or L3A archive
@@ -479,7 +555,10 @@ def get_scene(archive: str) -> Sentinel22AScene | Sentinel23AScene:
     return None
 
 
-def get_local_scenes(root_dir: str, tile: str = None) -> list[Sentinel22AScene | Sentinel23AScene]:
+def get_local_scenes(
+        root_dir: str,
+        tile: str = None
+) -> List[Union[Sentinel22AScene, Sentinel23AScene]]:
     """
     Retrieve the sentinel scenes in the directory
 
@@ -492,8 +571,10 @@ def get_local_scenes(root_dir: str, tile: str = None) -> list[Sentinel22AScene |
 
     """
     scenes_list = []
-    archives = utils.find_files_in_all_subdirs(pth=root_dir, pattern="*.zip", case_sensitive=False)
-    for archive in archives:
+    archives = utils.find_files_in_all_subdirs(
+        pth=root_dir, pattern="*.zip", case_sensitive=False
+    )
+    for archive in tqdm(archives):
         candidate = get_scene(archive)
         if candidate:
             tile_name = archive.split("_")[3]
@@ -502,20 +583,54 @@ def get_local_scenes(root_dir: str, tile: str = None) -> list[Sentinel22AScene |
     return scenes_list
 
 
-def get_downloaded_scenes(download_results: dict[str, dict[str, str]]) -> list[Sentinel22AScene | Sentinel23AScene]:
-    """
-    Retrieve the sentinel scenes from the download results from the TheiaDownloader
+class Sentinel2MPCScene(Sentinel2SceneBase):
+    """class for Sentinel-2 images from mMicrosoft Planetary Computer"""
 
-    Args:
-        download_results: dict as generated from the TheiaDownloader
+    def __init__(
+            self,
+            assets_paths: Dict[str, str],
+    ):
+        """
+        Args:
+            assets_paths: assets paths
 
-    Returns:
-        a list of sentinel scenes instances
+        """
+        # Assets (spectral bands)
+        # We just use the same key for all Sentinel2SceneBase products
+        bands_names_mapping = {
+            "b4": "B04",
+            "b3": "B03",
+            "b2": "B02",
+            "b8": "B08",
+            "b5": "B05",
+            "b6": "B06",
+            "b7": "B07",
+            "b8a": "B8A",
+            "b11": "B11",
+            "b12": "B12"
+        }
+        updated_assets_paths = {
+            key: assets_paths[asset_key] for key, asset_key
+            in bands_names_mapping.items()
+        }
+
+        # Sources classes
+        src_classes = {
+            key: CommonImagerySource for key in updated_assets_paths
+        }
+
+        # Date, extent
+        b2_path = updated_assets_paths["b2"]
+        # Here, b2_path is something like ...T45WXU_20221019T062901_B03_10m.tif
+        epsg, extent = get_epsg_extent_wgs84(b2_path)
+        datestr = b2_path.split("_")[-3]  # 20180630T105440
+        acquisition_date = datetime.strptime(datestr, '%Y%m%dT%H%M%S')
 
-    """
-    scenes_list = []
-    for _, products in download_results.items():
-        for _, dic in products.items():
-            archive = dic['local_file']
-            scenes_list.append(get_scene(archive))
-    return scenes_list
+        # Call parent constructor
+        super().__init__(
+            acquisition_date=acquisition_date,
+            epsg=epsg,
+            extent_wgs84=extent,
+            assets_paths=updated_assets_paths,
+            src_classes=src_classes
+        )
diff --git a/scenes/spatial.py b/scenes/spatial.py
index a95498ba12846b61a26280212b8e63ef5cba2d5b..07fbee38ab20f380077259af37c80740bcdbbd62 100644
--- a/scenes/spatial.py
+++ b/scenes/spatial.py
@@ -1,48 +1,118 @@
 """
-This module provides classes and functions to help with light geospatial objects
-(projections, bounding boxes, etc).
+This module provides classes and functions to help with light geospatial
+objects (projections, bounding boxes, etc).
 """
 from __future__ import annotations
+from typing import List, Tuple
 import math
 from osgeo import osr, ogr
 
 
-def poly_overlap(poly: ogr.Geometry, other_poly: ogr.Geometry) -> float:
+def coords2poly(coords: Tuple[Tuple[float]]) -> ogr.Geometry:
     """
-    Returns the ratio of polygons overlap.
+    Converts a list of coordinates into a polygon
 
     Args:
-        poly: polygon
-        other_poly: other polygon
+        coords: tuple of (x, y) coordinates
 
     Returns:
-        overlap (in the [0, 1] range). 0 -> no overlap with other_poly, 1 -> poly is completely inside other_poly
+        a polygon
 
     """
-    inter = poly.Intersection(other_poly)
+    ring = ogr.Geometry(ogr.wkbLinearRing)
+    for coord in coords + (coords[0],):
+        x, y = coord
+        ring.AddPoint(x, y)
+    poly = ogr.Geometry(ogr.wkbPolygon)
+    poly.AddGeometry(ring)
 
-    return inter.GetArea() / poly.GetArea()
+    return poly
 
 
-def coords2poly(coords: list[tuple(float, float)]) -> ogr.Geometry:
+class BoundingBox:
     """
-    Converts a list of coordinates into a polygon
+    The bounding box class
+    """
+
+    def __init__(self, xmin: float, ymin: float, xmax: float, ymax: float):
+        """
+        Args:
+            xmin: Lower value on the x-axis
+            ymin: Lower value on the y-axis
+            xmax: Higher value on the x-axis
+            ymax: Higher value on the y-axis
+        """
+        self.xmin = xmin
+        self.ymin = ymin
+        self.xmax = xmax
+        self.ymax = ymax
+
+    def union(self, other: BoundingBox) -> BoundingBox:
+        """
+        Return a new bounding box resulting in the union of self and other
+
+        Args:
+            other: another bounding box
+
+        Returns:
+            a new bounding box
+
+        """
+        return BoundingBox(xmin=min(self.xmin, other.xmin),
+                           ymin=min(self.ymin, other.ymin),
+                           xmax=max(self.xmax, other.xmax),
+                           ymax=max(self.ymax, other.ymax))
+
+    def __str__(self) -> str:
+        """
+        Returns a string representing the object
+
+        Returns:
+            str(self.to_list())
+
+        """
+        return str(self.to_list())
+
+    def to_list(self) -> List[float]:
+        """
+        Converts the bbox into a list of coordinates, like rasterio does.
+
+        Returns:
+            [xmin, ymin, xmax, ymax]
+
+        """
+        return [self.xmin, self.ymin, self.xmax, self.ymax]
+
+    def to_ogrgeom(self) -> ogr.Geometry:
+        """
+        Converts the BoundingBox into an OGR geometry
+
+        Returns: an OGR Geometry from the bounding box
+
+        """
+        coords = ((self.xmin, self.ymin),
+                  (self.xmax, self.ymin),
+                  (self.xmax, self.ymax),
+                  (self.xmin, self.ymax))
+        return coords2poly(coords)
+
+
+def poly_overlap(poly: ogr.Geometry, other_poly: ogr.Geometry) -> float:
+    """
+    Returns the ratio of polygons overlap.
 
     Args:
-        coords: list of (x, y) coordinates
+        poly: polygon
+        other_poly: other polygon
 
     Returns:
-        a polygon
+        overlap (in the [0, 1] range). 0 -> no overlap with other_poly,
+            1 -> poly is completely inside other_poly
 
     """
-    ring = ogr.Geometry(ogr.wkbLinearRing)
-    for coord in coords + [coords[0]]:
-        x, y = coord
-        ring.AddPoint(x, y)
-    poly = ogr.Geometry(ogr.wkbPolygon)
-    poly.AddGeometry(ring)
+    inter = poly.Intersection(other_poly)
 
-    return poly
+    return inter.GetArea() / poly.GetArea()
 
 
 def epsg2srs(epsg: int) -> osr.SpatialReference:
@@ -61,8 +131,34 @@ def epsg2srs(epsg: int) -> osr.SpatialReference:
     return srs
 
 
-def reproject_coords(coords: list[tuple[float, float]], src_srs: osr.SpatialReference,
-                     tgt_srs: osr.SpatialReference) -> list[tuple[float, float]]:
+def coordinates_transform(
+        src_srs: osr.SpatialReference,
+        tgt_srs: osr.SpatialReference
+) -> osr.CoordinateTransformation:
+    """
+    Return a coordinates transform.
+
+    Args:
+        src_srs: source CRS
+        tgt_srs: target CRS
+
+    Returns:
+        osr.CoordinateTransformation
+
+    """
+    assert src_srs, "reproject_coords: src_srs is None!"
+    assert tgt_srs, "reproject_coords: tgt_srs is None!"
+    # Use x,y = lon, lat
+    src_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
+    tgt_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
+    return osr.CoordinateTransformation(src_srs, tgt_srs)
+
+
+def reproject_coords(
+        coords: List[Tuple[float]],
+        src_srs: osr.SpatialReference,
+        tgt_srs: osr.SpatialReference
+) -> Tuple[Tuple[float]]:
     """
     Reproject a list of x,y coordinates.
 
@@ -76,17 +172,15 @@ def reproject_coords(coords: list[tuple[float, float]], src_srs: osr.SpatialRefe
 
     """
     trans_coords = []
-    assert src_srs, "reproject_coords: src_srs is None!"
-    assert tgt_srs, "reproject_coords: tgt_srs is None!"
-    transform = osr.CoordinateTransformation(src_srs, tgt_srs)
+    transform = coordinates_transform(src_srs=src_srs, tgt_srs=tgt_srs)
     for x, y in coords:
         x, y, _ = transform.TransformPoint(x, y)
-        trans_coords.append([x, y])
+        trans_coords.append((x, y))
 
-    return trans_coords
+    return tuple(trans_coords)
 
 
-def coord_list_to_bbox(coords: list[tuple[float, float]]) -> BoundingBox:
+def coord_list_to_bbox(coords: Tuple[Tuple[float]]) -> BoundingBox:
     """
     Computes the bounding box of multiple coordinates
 
@@ -105,10 +199,13 @@ def coord_list_to_bbox(coords: list[tuple[float, float]]) -> BoundingBox:
         ymin = min(ymin, y)
         ymax = max(ymax, y)
 
-    return BoundingBox(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
+    return BoundingBox(xmin=xmin, ymin=ymin, xmax=xmax, ymax=ymax)
 
 
-def extent_overlap(extent: list[tuple[float, float]], other_extent: list[tuple[float, float]]) -> float:
+def extent_overlap(
+        extent: Tuple[Tuple[float]],
+        other_extent: Tuple[Tuple[float]]
+) -> float:
     """
     Returns the ratio of extents overlaps.
 
@@ -117,60 +214,10 @@ def extent_overlap(extent: list[tuple[float, float]], other_extent: list[tuple[f
         other_extent: other extent
 
     Returns:
-        overlap (in the [0, 1] range). 0 -> no overlap with other_extent, 1 -> extent lies inside other_extent
+        overlap (in the [0, 1] range). 0 -> no overlap with other_extent,
+            1 -> extent lies inside other_extent
 
     """
     poly = coords2poly(extent)
     other_poly = coords2poly(other_extent)
     return poly_overlap(poly=poly, other_poly=other_poly)
-
-
-class BoundingBox:
-    """
-    The bounding box class
-    """
-
-    def __init__(self, xmin: float, xmax: float, ymin: float, ymax: float):
-        """
-        Args:
-            xmin: Lower value on the x-axis
-            xmax: Higher value on the x-axis
-            ymin: Lower value on the y-axis
-            ymax: Higher value on the y-axis
-        """
-        self.xmin = xmin
-        self.xmax = xmax
-        self.ymin = ymin
-        self.ymax = ymax
-
-    def union(self, other: BoundingBox) -> BoundingBox:
-        """
-        Return a new bounding box resulting in the union of self and other
-
-        Args:
-            other: another bounding box
-
-        Returns:
-            a new bounding box
-
-        """
-        return BoundingBox(xmin=min(self.xmin, other.xmin),
-                           xmax=max(self.xmax, other.xmax),
-                           ymin=min(self.ymin, other.ymin),
-                           ymax=max(self.ymax, other.ymax))
-
-    def __str__(self):
-        return f"[{self.xmin}, {self.ymax}, {self.xmax}, {self.ymin}]"
-
-    def to_ogrgeom(self) -> ogr.Geometry:
-        """
-        Converts the BoundingBox into an OGR geometry
-
-        Returns: an OGR Geometry from the bounding box
-
-        """
-        coords = [[self.xmin, self.ymin],
-                  [self.xmax, self.ymin],
-                  [self.xmax, self.ymax],
-                  [self.xmin, self.ymax]]
-        return coords2poly(coords)
diff --git a/scenes/spot.py b/scenes/spot.py
index bf964958f1eccea7d0ceb06d52b434c0f5003862..6d2792a819db273127d3c08588ca181c68e7095e 100644
--- a/scenes/spot.py
+++ b/scenes/spot.py
@@ -4,155 +4,163 @@ In particular, it specializes `scenes.core.Source` and `scenes.core.Scene`
 for Spot 6/7 products.
 
 ---
+# Overview
 
-# `Scene` based class
-
-The `Spot67Scene` class carries metadata and image sources for Spot-6/7 sensors.
+We distinguish 2 kinds of products:
+- DRS products
+- IGN products (without any metadata, cloud mask, etc)
 
 ``` mermaid
 classDiagram
 
     Scene <|-- Spot67Scene
+    Spot67Scene <|-- Spot67DRSScene
+    Spot67Scene <|-- Spot67IGNScene
 
-    class Scene{
-        +datetime acquisition_date
-        +int epsg
-        +bbox_wgs84
-        +get_metadata()
-        +__repr__()
-    }
-
-    class Spot67Scene{
-        +float azimuth_angle
-        +float azimuth_angle_across
-        +float azimuth_angle_along
-        +float incidence_angle
-        +float sun_azimuth_angle
-        +float sun_elev_angle
-        +get_metadata()
-        +Spot67Source get_xs()
-        +Spot67Source get_pan()
-        +Spot67Source get_pxs()
-    }
 ```
 
-## Instantiation
+# Instantiation
 
-A `Spot67Scene` is instantiated from the .XML DIMAP files of PAN and XS:
-``` py
-import scenes
-sc = scenes.spot.Spot67Scene(dimap_file_xs="DIM_SPOT6_MS..._1.XML",
-                             dimap_file_pan="DIM_SPOT6_P..._1.XML")
-```
+## Generic
 
-## Metadata
+`Spot67Scene` are instantiated from the assets_paths dictionary.
 
-The scene metadata can be accessed with the `get_metadata()` method, like any
-`scenes.core.Scene` instance.
-``` py
-md_dict = sc.get_metadata()
-for key, value in md_dict.items():
-    print(f"{key}: {value}")
+```python
+import scenes
+sc_drs = scenes.spot.Spot67SceneDRS(assets_dict=...)
+sc_ign = scenes.spot.Spot67SceneIGN(assets_dict=...)
 ```
 
-## Sources
+The `Spot67DRSScene` instantiates as its parent class.
+However, to ease working with products stored on the local filesystem, the
+`get_local_spot67drs_scene` can help:
 
-The `Spot67Source` is the class for the different Spot-6/7 sources.
+```python
+sc = get_local_spot67drs_scene(
+    dimap_xs="/path/to/DIM_SPOT7_MS_..._1.XML",
+    dimap_pan="/path/to/DIM_SPOT7_P_..._1.XML"
+)
+```
 
-``` mermaid
-classDiagram
+## From STAC
+
+The following example show how to instantiate on the fly `Spot67SceneDRS` from
+a STAC catalog of Dinamis (prototype from CROCc).
+
+```python
+api = Client.open(
+    'https://stacapi-dinamis.apps.okd.crocc.meso.umontpellier.fr',
+    modifier=dinamis_sdk.sign_inplace,
+)
+res = api.search(
+    collections=["spot-6-7-drs"],
+    bbox=[4.09, 43.99, 5, 44.01],
+    datetime=['2020-01-01', '2021-01-01']
+)
+
+for item in res.items():
+    sc = from_stac(item)
+    assert isinstance(sc, Spot67DRSScene)
+    print(sc)
+```
 
-    Source <|-- Spot67Source
-
-        class Source{
-        +__init__(root_scene, out, parent=None)
-        +Scene root_scene
-        +Source parent
-        +Source drilled(msk_vec_file, nodata=0)
-        +Source cld_msk_drilled(nodata=0)
-        +Source resample_over(ref_img, interpolator="nn", nodata=0)
-        +Source clip_over_img(ref_img)
-        +Source clip_over_vec(ref_vec)
-        +Source new_source(*args)
-    }
+# Metadata
 
-    class Spot67Source{
-        +Spot67Source cld_msk_drilled(nodata=0)
-    }
+The scene metadata can be accessed with the `metadata` attribute, like any
+`scenes.core.Scene` instance.
 
+```python
+ms = sc.metadata
+scenes.utils.pprint(md)
 ```
 
-### Different sources
+# Sources
 
-The `Spot67Scene` delivers three `Spot67Source` instances:
+The following sources are delivered:
+- xs
+- pan
+- pxs
 
-- The multispectral image (xs)
-- The Panchromatic image (pan)
-- The pansharpenend image (pxs)
-``` py
-xs = sc.get_xs()
-pan = sc.get_pan()
-pxs = sc.get_pxs(method="bayes")
-```
+Imagery sources delivered from `Spot67DRSScene` are instances of
+`Spot67DRSSource`. Imagery sources delivered from `Spot67IGNScene` are
+instances of `CommonImagerySource`.
+The `Spot67DRSSource` have a few more useful methods, of which:
+- `reflectance()`: basically a wrapper of OTB OpticalCalibration. You can use
+  the same parameters, e.g. `reflectance(level="toa")`
+- `cld_msk_drilled()`: this drills down the image in clouds polygons. The
+  no-data value can be changed (default is `0`).
 
-### Radiometry
+``` mermaid
+classDiagram
 
-Sources radiometry level can be selected when requesting the source.
-Three level of radiometry are available for Spot-6/7 images:
+    Source <|-- CommonImagerySource
+    CommonImagerySource <|-- Spot67DRSSource
 
-- DN (Digital Number: raw sensor values)
-- TOA (Top Of Atmosphere)
-- TOC (Top Of Canopy)
+    class Spot67DRSSource{
+        +cld_msk_drilled(nodata=0)
+        +reflectance(**kwargs)
+    }
 
-``` py
-p_dn = sc.get_pan()
-xs_toa = sc.get_xs(reflectance="toa")
-pxs_toc = sc.get_pxs(reflectance="toc")
 ```
 
-The `Spot67Source` implements the `Spot67Source.cld_msk_drilled` method, that
-enable to mask the cloud masks over the root source, with the specified
-no-data value (default is 0).
 The following example show how to derive a child source replacing the
 pixels that are in the clouds with zero-valued pixels:
-``` py
+
+```python
 pxs_drilled = pxs.cld_msk_drilled()
 ```
 
-The `Spot67Source` inherits from `scenes.core.Source`, hence implemented
-sources transformations (e.g. `scenes.core.Source.masked()`, `scenes.core.Source.clip_over_img()`,
-`scenes.core.Source.resample_over()`, `scenes.core.Source.reproject()`, etc.
-``` py
+The `Spot67Source` inherits from `scenes.core.CommonImagerySource`, hence
+implemented sources transformations (e.g.
+`scenes.core.CommonImagerySource.masked()`,
+`scenes.core.CommonImagerySource.clip_over_img()`,
+`scenes.core.CommonImagerySource.resample_over()`,
+`scenes.core.CommonImagerySource.reproject()`, etc.)
+
+```python
 clipped = pxs_drilled.clip_over_img(roi)
 reprojected = clipped.reproject(epsg=4328)
 ```
-Note that the resulting transformed `Spot67Source` is still an instance of
-`Spot67Source` even after generic operations implemented in `scenes.core.Source`.
+
+Note that the resulting transformed `Spot67DRSSource` is still an instance of
+`Spot67DRSSource` after generic operations implemented in
+`scenes.core.CommonImagerySource`.
 
 # Usage with pyotb
 
 As `scenes.core.Source`, it also can be used like any `pyotb.core.otbObject`.
-The following example show how to use an OTB application with a source at input.
-``` py
+The following example show how to use an OTB application with a source at
+input.
+
+```python
 rgb_nice = pyotb.DynamicConvert(reprojected)
 rgb_nice.write("image.tif", pixel_type="uint8")
 ```
 
 """
 from __future__ import annotations
-import datetime
+
+import os
+import re
 import xml.etree.ElementTree as ET
+from datetime import datetime
+from functools import partial
+from typing import List, Dict, Type, Union
 from tqdm.autonotebook import tqdm
+import requests
 import pyotb
+
+from scenes import dates
 from scenes import utils
-from scenes.core import Source, Scene
-from scenes.raster import get_epsg_extent_bbox
+from scenes.core import Scene, Source, CommonImagerySource
+from scenes.raster import get_epsg_extent_wgs84
 from scenes.spatial import extent_overlap
 
 
-def find_all_dimaps(pth: str) -> list[str]:
+def find_all_dimaps(pth: str) -> List[str]:
     """
-    Return the list of DIMAPS XML files that are inside all subdirectories of the root directory.
+    Return the list of DIMAPS XML files that are inside all subdirectories of
+    the root directory.
 
     Args:
         pth: root directory
@@ -164,10 +172,10 @@ def find_all_dimaps(pth: str) -> list[str]:
     return utils.find_files_in_all_subdirs(pth=pth, pattern="DIM_*.XML")
 
 
-def get_spot67_scenes(root_dir: str) -> list[Spot67Scene]:
+def get_spot67_scenes(root_dir: str) -> List[Spot67Scene]:
     """
-    Return the list of scenes that can be instantiated from a root directory containing
-    a "PAN" and a "MS" subdirectories.
+    Return the list of scenes that can be instantiated from a root directory
+    containing a "PAN" and a "MS" subdirectories.
 
     Args:
         root_dir: directory containing "MS" and "PAN" subdirectories
@@ -194,10 +202,13 @@ def get_spot67_scenes(root_dir: str) -> list[Spot67Scene]:
             dimap_pan_files = find_all_dimaps(pan_path)
             nb_files = len(dimap_pan_files)
             if nb_files != 1:
-                raise ValueError(f"{nb_files} DIMAPS candidates found in {pan_path} ")
+                raise ValueError(
+                    f"{nb_files} DIMAPS candidates found in {pan_path} ")
             dimap_file_pan = dimap_pan_files[0]
             # Instantiate a new scene object
-            new_scene = Spot67Scene(dimap_file_pan=dimap_file_pan, dimap_file_xs=dimap_file_xs)
+            new_scene = get_local_spot67drs_scene(
+                dimap_xs=dimap_file_xs, dimap_pan=dimap_file_pan
+            )
             scenes.append(new_scene)
         except Exception as error:
             if dimap_file_xs not in errors:
@@ -217,12 +228,14 @@ def get_spot67_scenes(root_dir: str) -> list[Spot67Scene]:
     return scenes
 
 
-class Spot67Source(Source):
+class Spot67DRSSource(CommonImagerySource):
     """
-    Spot 6/7 source class
+    Source capabilities for Spot-6/7 ADS-DRS products
     """
 
-    def cld_msk_drilled(self, nodata: float | int = 0) -> Spot67Source:
+    def cld_msk_drilled(
+            self, nodata: Union[float, int] = 0
+    ) -> Spot67DRSSource:
         """
         Return the source drilled from the cloud mask
 
@@ -233,247 +246,356 @@ class Spot67Source(Source):
             drilled source
 
         """
-        return self.drilled(self.root_scene.cld_msk_file, nodata=nodata)
+        return self.drilled(
+            self.root_scene.assets_paths["cld_msk_vec"], nodata=nodata
+        )
 
+    def reflectance(self, factor: float = None, **kwargs) -> Spot67DRSSource:
+        """
+        This function is used internally by `get_xs()`, `get_pxs()`, and
+        `get_pan()` to compute the reflectance (or not!) of the optical image.
 
-class Spot67Scene(Scene):
-    """
-    Spot 6/7 root_scene class.
+        Warning: there is a bug in OTB<=8.0.2
+        see https://gitlab.orfeo-toolbox.org/orfeotoolbox/otb/-/issues/2314
 
-    The Spot67Scene class carries all metadata and images sources from the scene.
-    A Spot67Scene object can be instantiated from the XS and PAN DIMAPS (.XML) file.
+        Args:
+            factor: factor to scale pixel values (e.g. 10000)
+            **kwargs: same options as BundleToPerfectSensor in OTB
 
-    """
-    PXS_OVERLAP_THRESH = 0.995
+        Returns:
+            calibrated, or original image source
 
-    def __init__(self, dimap_file_xs: str, dimap_file_pan: str):
         """
-        Args:
-            dimap_file_xs: XML DIMAP file for the XS product
-            dimap_file_pan: XML DIMAP file for the PAN product
 
-        """
+        # Radiometry correction
+        params = {"in": self}
+        params.update(kwargs)
+        out = pyotb.OpticalCalibration(params)
+
+        if factor:
+            out = out * factor
 
-        # DIMAP files
-        def _check_is_dimap(dimap_file):
-            if not dimap_file.lower().endswith(".xml"):
-                raise FileNotFoundError(f"An input DIMAP file is needed. This file is not a .xml: {dimap_file}")
-            return dimap_file
+        return self.new_source(out)
+
+
+class Spot67Scene(Scene):
+    """
+    Generic Spot 6/7 Scene class.
 
-        self.dimap_file_xs = _check_is_dimap(dimap_file_xs)
-        self.dimap_file_pan = _check_is_dimap(dimap_file_pan)
+    The Spot67Scene class carries PAN, XS, and PXS images sources.
 
-        # Cloud masks
-        def _get_mask(dimap_file, pattern):
-            """
-            Retrieve GML file.
+    """
+    PXS_OVERLAP_THRESH = 0.995
 
-            Args:
-                dimap_file: DIMAP file
-                pattern: vector data file pattern
+    def _check_assets(self, assets_paths: Dict[str, str]):
+        """
+        Checks that all assets are here
 
-            Returns:
-                path of the file
+        """
+        for src_key in ["xs", "pan"]:
+            for req_prefix, req_description in self.req_assets.items():
+                key = f"{req_prefix}_{src_key}"
+                assert key in assets_paths, \
+                    f"key '{key}' for {req_description} " \
+                    "is missing from assets paths"
+
+    @property
+    def req_assets(self) -> Dict[str, str]:
+        """
+        Required assets
 
-            Raises:
-                FileNotFoundError: when multiple files are found.
+        Returns: required assets
+        """
+        return {
+            "src": "imagery source",
+        }
+
+    def __init__(
+            self,
+            acquisition_date: datetime,
+            assets_paths: Dict[str, str],
+            src_class: Type[Source] = CommonImagerySource,
+            additional_metadata: Dict[str, str] = None,
+    ):
+        """
+        Args:
+            acquisition_date: Acquisition date
+            assets_paths: assets paths
+            additional_metadata: additional metadata
 
-            """
-            cld_path = utils.get_parent_directory(dimap_file) + "/MASKS"
-            plist = utils.find_file_in_dir(cld_path, pattern=pattern)
-            if len(plist) != 1:
-                raise FileNotFoundError(
-                    f"ERROR: unable to find a unique file in {cld_path} with pattern {pattern}")
-            return plist[0]
-
-        # Get metadata
-        tree = ET.parse(dimap_file_xs)
-        root = tree.getroot()
-
-        # Acquisition angles
-        c_nodes = root.find("Geometric_Data/Use_Area/Located_Geometric_Values/Acquisition_Angles")
-        for node in c_nodes:
-            if node.tag == "AZIMUTH_ANGLE":
-                self.azimuth_angle = float(node.text)
-            elif node.tag == "VIEWING_ANGLE_ACROSS_TRACK":
-                self.viewing_angle_across = float(node.text)
-            elif node.tag == "VIEWING_ANGLE_ALONG_TRACK":
-                self.viewing_angle_along = float(node.text)
-            elif node.tag == "VIEWING_ANGLE":
-                self.viewing_angle = float(node.text)
-            elif node.tag == "INCIDENCE_ANGLE":
-                self.incidence_angle = float(node.text)
-
-        # Acquisition date
-        c_nodes = root.find("Geometric_Data/Use_Area/Located_Geometric_Values")
-        for node in c_nodes:
-            if node.tag == "TIME":
-                acquisition_date = datetime.datetime.strptime(node.text[0:10], "%Y-%m-%d")
-                break
-
-        # Sun angles
-        c_nodes = root.find("Geometric_Data/Use_Area/Located_Geometric_Values/Solar_Incidences")
-        for node in c_nodes:
-            if node.tag == "SUN_AZIMUTH":
-                self.sun_azimuth = float(node.text)
-            elif node.tag == "SUN_ELEVATION":
-                self.sun_elevation = float(node.text)
+        """
 
         # Get EPSG and bounding box
-        epsg_xs, extent_wgs84_xs, self.bbox_wgs84_xs = get_epsg_extent_bbox(dimap_file_xs)
-        epsg_pan, extent_wgs84_pan, self.bbox_wgs84_pan = get_epsg_extent_bbox(dimap_file_pan)
+        epsg_xs, extent_wgs84_xs = get_epsg_extent_wgs84(
+            assets_paths["src_xs"]
+        )
+        epsg_pan, extent_wgs84_pan = get_epsg_extent_wgs84(
+            assets_paths["src_pan"]
+        )
 
         # Check that EPSG for PAN and XS are the same
         if epsg_pan != epsg_xs:
-            raise ValueError(f"EPSG of XS and PAN sources are different:  XS EPSG is {epsg_xs}, PAN EPSG is {epsg_pan}")
-
-        # Here we compute bounding boxes overlap, to choose the most appropriated
-        # CLD and ROI masks for the scene. Indeed, sometimes products are not
-        # generated as they should (i.e. bundle) and XS and PAN have different extents
-        # so CLD and ROI masks are not the same for XS and PAN.
-        # We keep the ROI+CLD masks of the PAN or XS lying completely inside
-        # the other one.
-        self.xs_overlap = extent_overlap(extent_wgs84_xs, extent_wgs84_pan)
-        self.pan_overlap = extent_overlap(extent_wgs84_pan, extent_wgs84_xs)
-
-        # Get ROI+CLD filenames in XS and PAN products
-        self.cld_msk_file_xs = _get_mask(dimap_file_xs, "CLD*.GML")
-        self.cld_msk_file_pan = _get_mask(dimap_file_pan, "CLD*.GML")
-        self.roi_msk_file_xs = _get_mask(dimap_file_xs, "ROI*.GML")
-        self.roi_msk_file_pan = _get_mask(dimap_file_pan, "ROI*.GML")
-
-        # Choice based on the pxs overlap
-        bbox_wgs84 = self.bbox_wgs84_xs
-        self.cld_msk_file = self.cld_msk_file_xs
-        self.roi_msk_file = self.roi_msk_file_xs
-        self.pxs_overlap = self.xs_overlap
-        if self.pan_overlap > self.xs_overlap:
-            bbox_wgs84 = self.bbox_wgs84_pan
-            self.cld_msk_file = self.cld_msk_file_pan
-            self.roi_msk_file = self.roi_msk_file_pan
-            self.pxs_overlap = self.pan_overlap
+            raise ValueError(
+                f"EPSG of XS and PAN sources are different:  XS EPSG is "
+                f"{epsg_xs}, PAN EPSG is {epsg_pan}")
+
+        # Here we compute bounding boxes overlap, to choose the most
+        # appropriated CLD and ROI masks for the scene. Indeed, sometimes
+        # products are not generated as they should (i.e. bundle) and XS and
+        # PAN have different extents so CLD and ROI masks are not the same for
+        # XS and PAN. We keep the ROI+CLD masks of the PAN or XS lying
+        # completely inside the other one.
+        xs_overlap = extent_overlap(extent_wgs84_xs, extent_wgs84_pan)
+        pan_overlap = extent_overlap(extent_wgs84_pan, extent_wgs84_xs)
 
         # Throw some warning or error, depending on the pxs overlap value
-        msg = f"Bounding boxes of XS and PAN sources have {100 * self.pxs_overlap:.2f}% overlap. " \
-              + f"\n\tXS bbox_wgs84: {self.bbox_wgs84_xs} \n\tPAN bbox_wgs84: {self.bbox_wgs84_pan}"
-        if self.pxs_overlap == 0:
-            raise ValueError(msg)
-        if self.has_partial_pxs_overlap():
-            raise Warning(msg)
-
-        # Call parent constructor
-        super().__init__(acquisition_date=acquisition_date, bbox_wgs84=bbox_wgs84, epsg=epsg_xs)
+        pxs_overlap = max(xs_overlap, pan_overlap)
+        if pxs_overlap == 0:
+            raise ValueError("No overlap between PAN and XS images")
+        if max(pan_overlap, xs_overlap) < self.PXS_OVERLAP_THRESH:
+            print(f"Warning: partial overlap of {100 * pxs_overlap:.2f}%")
+
+        # Final extent
+        extent = extent_wgs84_pan if pan_overlap > xs_overlap else \
+            extent_wgs84_xs
+
+        sources = {
+            "xs": partial(
+                src_class, root_scene=self, out=assets_paths["src_xs"]
+            ),
+            "pan": partial(
+                src_class, root_scene=self, out=assets_paths["src_pan"]
+            ),
+            "pxs": partial(self._get_pxs, src_class=src_class)
+        }
+
+        additional_md = additional_metadata.copy() if additional_metadata \
+            else {}
+        additional_md.update({
+            "xs_extent_wgs84": extent_wgs84_xs,
+            "pan_extent_wgs84": extent_wgs84_pan,
+            "xs_overlap": xs_overlap,
+            "pan_overlap": pan_overlap
+        })
 
-    def has_partial_pxs_overlap(self) -> bool:
+        # Call parent constructor, before accessing assets dicts
+        super().__init__(
+            acquisition_date=acquisition_date,
+            epsg=epsg_xs,
+            extent_wgs84=extent,
+            assets_paths=assets_paths,
+            sources=sources,
+            additional_metadata=additional_md,
+        )
+
+    def _get_pxs(
+            self,
+            src_class: Type[CommonImagerySource],
+            **kwargs
+    ) -> CommonImagerySource:
         """
+        Create source for PXS (computed using OTB)
+
+        Args:
+            src_class: source class
+            **kwargs: OTB BundleToPerfectSensor options, e.g. method="bayes"
+
         Returns:
-            True if at least PAN or XS lies completely inside the other one. False else.
+            Source for BundleToPerfectSensor output
 
         """
-        return self.pxs_overlap < self.PXS_OVERLAP_THRESH
+        pan = self.get_pan()
+        params = {
+            "inxs": self.get_xs(),
+            "inp": pan
+        }
+        params.update(kwargs)
+        pansharp = src_class(self, pyotb.BundleToPerfectSensor(params))
+        binary_mask = pyotb.Superimpose({
+            "inr": pansharp,
+            "inm": pan,
+            "interpolator": "nn"
+        })
+        return pansharp.masked(binary_mask=binary_mask)
 
-    def reflectance(self, inp: str | pyotb.core.otbObject, reflectance: str = "dn", clamp: bool = False,
-                    factor: float = None):
-        """
-        This function is used internally by `get_xs()`, `get_pxs()`, and `get_pan()` to compute the
-        reflectance (or not!) of the optical image.
 
-        Args:
-            inp: input
-            reflectance: optional level of reflectance (can be "dn", "toa", "toc")
-            clamp: normalize reflectance values
-            factor: factor to scale pixel values (e.g. 10000)
+def spot67_metadata_parser(xml_path: str) -> Dict[str, str]:
+    """
+    Parse DIMAP XML file
 
-        Returns:
-            calibrated, or original image source
+    Args:
+        xml_path: XML file
 
-        """
-        reflectance = reflectance.lower()
-        assert reflectance in ("dn", "toa", "toc"), "reflectance can be 'dn', 'toa' or 'toc'"
+    Returns:
+        a dict of metadata
 
-        # Base
-        out = inp
+    """
+    # Detect if document is remote or local
+    if any(prefix in xml_path for prefix in ("http://", "https://")):
+        root = ET.fromstring(requests.get(xml_path, timeout=10).text)
+    else:
+        root = ET.parse(xml_path).getroot()
+
+    scalars_mappings = {
+        "Geometric_Data/Use_Area/Located_Geometric_Values/Acquisition_Angles":
+            {
+                "AZIMUTH_ANGLE": "azimuth_angle",
+                "VIEWING_ANGLE_ACROSS_TRACK": "viewing_angle_across",
+                "VIEWING_ANGLE_ALONG_TRACK": "viewing_angle_along",
+                "VIEWING_ANGLE": "viewing_angle",
+                "INCIDENCE_ANGLE": "incidence_angle",
+            },
+        "Geometric_Data/Use_Area/Located_Geometric_Values":
+            {
+                "TIME": "acquisition_date"
+            },
+        "Geometric_Data/Use_Area/Located_Geometric_Values/Solar_Incidences":
+            {
+                "SUN_AZIMUTH": "sun_azimuth",
+                "SUN_ELEVATION": "sun_elevation"
+            }
+    }
 
-        # Radiometry correction
-        if reflectance in ("toa", "toc"):
-            out = pyotb.OpticalCalibration({"in": inp, "level": reflectance, "clamp": clamp, "milli": False})
+    metadata = {}
+    for section, scalars_mapping in scalars_mappings.items():
+        for node in root.find(section):
+            key = node.tag
+            if key in scalars_mapping:
+                new_key = scalars_mapping[key]
+                text = node.text
+                metadata[new_key] = float(text) if text.isdigit() else text
 
-        if factor:
-            out = out * factor
+    return metadata
 
-        return Spot67Source(self, out)
 
-    def get_xs(self, **kwargs) -> Spot67Source:
-        """
-        Returns the MS source
+def get_local_spot67drs_scene(dimap_xs: str, dimap_pan: str) -> Spot67DRSScene:
+    """
+    Retrieve all required assets from the dimaps of the product.
 
-        Args:
-            kwargs: same as the `reflectance` method
+    Args:
+        dimap_xs: XML document for the XS image
+        dimap_pan: XML document for the PAN image
 
-        Returns:
-            A `Spot67Source` instance for the MS image
+    Returns:
+        Instantiated scene
 
-        """
-        return self.reflectance(self.dimap_file_xs, **kwargs)
+    """
 
-    def get_pan(self, **kwargs) -> Spot67Source:
-        """
-        Returns PAN source
+    # Get assets paths
+    def _check(dimap_file):
+        if not dimap_file.lower().endswith(".xml"):
+            raise FileNotFoundError(
+                f"An input XML file is needed (provided: {dimap_file})"
+            )
+
+    _check(dimap_xs)
+    _check(dimap_pan)
+    assets_paths = {
+        "dimap_xs": dimap_xs,
+        "dimap_pan": dimap_pan,
+        "src_xs": dimap_xs,
+        "src_pan": dimap_pan,
+    }
 
-        Args:
-            kwargs: same as the `reflectance` method
+    # Get ROI+CLD filenames in XS and PAN products, then set the final ROI+CLD
+    # filenames based on PAN/XS overlap
+    for key, pat in {"cld": "CLD", "roi": "ROI"}.items():
+        for src in ["xs", "pan"]:
+            pattern = f"{pat}*.GML"
+            cld_path = os.path.join(
+                utils.get_parent_directory(assets_paths[f"dimap_{src}"]),
+                "MASKS")
+            plist = utils.find_file_in_dir(cld_path, pattern=pattern)
+            if len(plist) != 1:
+                raise FileNotFoundError(
+                    f"ERROR: unable to find a unique file in {cld_path} with "
+                    f"pattern {pattern}"
+                )
+            assets_paths[f"{key}_msk_vec_{src}"] = plist[0]
 
-        Returns:
-            A `Spot67Source` instance for the PAN image
+    return Spot67DRSScene(assets_paths=assets_paths)
 
-        """
-        return self.reflectance(self.dimap_file_pan, **kwargs)
 
-    def get_pxs(self, method: str = "bayes", **kwargs) -> Spot67Source:
-        """
-        Returns the pansharpened source
+class Spot67DRSScene(Spot67Scene):
+    """
+    Spot 6/7 class for ADS-DRS products.
 
-        Args:
-            method: one of rcs, lmvm, bayes (Default value = "bayes")
-            kwargs: same as the `reflectance` method
+    The Spot67Scene class carries all metadata and images sources from the
+    scene. A Spot67Scene object can be instantiated from the XS and PAN DIMAPS
+    (.XML) file.
 
-        Returns:
-            A `Spot67Source` instance for the pansharpened image
+    """
+
+    @property
+    def req_assets(self) -> Dict[str, str]:
+        """
+        Required assets (overrides Spot67Scene property)
 
+        Returns: required assets
         """
-        xs = self.get_xs(**kwargs)
-        pan = self.get_pan(**kwargs)
-        # new source with pansharpening
-        pansharp = Spot67Source(self, pyotb.BundleToPerfectSensor({"inp": pan, "inxs": xs, "method": method}))
-        # new source masked outside the original ROI
-        return Spot67Source(self, pansharp.drilled(msk_vec_file=self.roi_msk_file, inside=False))
-
-    def get_metadata(self) -> dict[str, any]:
+        return {
+            "dimap": "XML DIMAP document",
+            "src": "imagery source",
+            "roi_msk_vec": "region of interest mask (vector data)",
+            "cld_msk_vec": "clouds mask (vector data)",
+        }
+
+    def __init__(self, assets_paths: Dict[str, str]):
         """
-        Get metadata.
+        Args:
+            assets_paths: assets paths
 
-        Returns:
-            metadata
+        """
+
+        # Parse dimap
+        assert "dimap_xs" in assets_paths, "XS DIMAP XML document is missing"
+        additional_md = spot67_metadata_parser(assets_paths["dimap_xs"])
+        acquisition_date = datetime.strptime(
+            additional_md["acquisition_date"], "%Y-%m-%dT%H:%M:%S.%fZ"
+        )
+
+        # Call parent constructor, before accessing to self.assets_paths
+        super().__init__(
+            assets_paths=assets_paths,
+            acquisition_date=acquisition_date,
+            src_class=Spot67DRSSource,
+            additional_metadata=additional_md,
+        )
+
+        # Choice of the CLD and ROI masks based on the pxs overlap
+        dic = self.metadata
+        ref = "pan" if dic["pan_overlap"] > dic["xs_overlap"] else "xs"
+        pths = self.assets_paths
+        for key in ["cld", "roi"]:
+            pths[f"{key}_msk_vec"] = pths[f"{key}_msk_vec_{ref}"]
+
+
+class Spot67IGNScene(Spot67Scene):
+    """
+    Spot 6/7 class for IGN products.
+
+    A Spot67IGNScene object can be instantiated from the XS and PAN rasters
+    paths.
+
+    """
 
+    def __init__(self, assets_paths: Dict[str, str]):
         """
-        metadata = super().get_metadata()
-        metadata.update({
-            "DIMAP XS": self.dimap_file_xs,
-            "DIMAP PAN": self.dimap_file_pan,
-            "Cloud mask": self.cld_msk_file,
-            "Cloud mask XS": self.cld_msk_file_xs,
-            "Cloud mask PAN": self.cld_msk_file_pan,
-            "ROI mask": self.roi_msk_file,
-            "ROI mask XS": self.roi_msk_file_xs,
-            "ROI mask PAN": self.roi_msk_file_pan,
-            "Azimuth angle": self.azimuth_angle,
-            "Viewing angle across track": self.viewing_angle_across,
-            "Viewing angle along track": self.viewing_angle_along,
-            "Viewing angle": self.viewing_angle,
-            "Incidence angle": self.incidence_angle,
-            "Sun elevation": self.sun_elevation,
-            "Sun azimuth": self.sun_azimuth,
-            "Bounding box XS (WGS84)": self.bbox_wgs84_xs,
-            "Bounding box PAN (WGS84)": self.bbox_wgs84_pan
-        })
-        return metadata
+        Args:
+            assets_paths: assets paths
+
+        """
+        # Retrieve date
+        assert "src_xs" in assets_paths, "XS image is missing"
+        assert "src_pan" in assets_paths, "PAN image is missing"
+        xs_basepath = utils.basename(assets_paths["src_xs"])
+        # 8 first consecutive digits in the XS filename
+        result = re.search(r"\d{8}", xs_basepath)
+        datestr = result.group(0) if result else result
+        acquisition_date = dates.str2datetime(datestr)
+
+        # Call parent constructor
+        super().__init__(
+            assets_paths=assets_paths,
+            acquisition_date=acquisition_date
+        )
diff --git a/scenes/stac.py b/scenes/stac.py
new file mode 100644
index 0000000000000000000000000000000000000000..380141ba101398dc436bf2389d00055cbe60fd85
--- /dev/null
+++ b/scenes/stac.py
@@ -0,0 +1,146 @@
+"""
+This module enables various operations from STAC catalogs.
+
+# Spot-6/7 DRS from Dinamis (experimental)
+
+```python
+api = Client.open(
+    'https://stacapi-dinamis.apps.okd.crocc.meso.umontpellier.fr',
+    modifier=dinamis_sdk.sign_inplace,
+)
+res = api.search(
+    collections=["spot-6-7-drs"],
+    bbox=[4.09, 43.99, 5, 44.01],
+    datetime=['2020-01-01', '2021-01-01']
+)
+
+for item in res.items():
+    sc = from_stac(item)
+    assert isinstance(sc, Spot67DRSScene)
+    print(sc)
+```
+
+# Sentinel-2 images from Microsoft planetary computer
+
+```python
+api = Client.open(
+    'https://planetarycomputer.microsoft.com/api/stac/v1',
+    modifier=planetary_computer.sign_inplace,
+)
+res = api.search(
+    collections=["sentinel-2-l2a"],
+    bbox=[4.99, 43.99, 5, 44.01],
+    datetime=['2020-01-01', '2020-01-08']
+)
+
+for item in res.items():
+    sc = from_stac(item)
+    assert isinstance(sc, Sentinel2MPCScene)
+    print(sc)
+```
+
+"""
+from __future__ import annotations
+from typing import Dict, List
+from urllib.parse import urlparse
+import pystac
+
+from scenes.core import Scene
+from scenes.sentinel import Sentinel2MPCScene
+from scenes.spot import Spot67DRSScene
+
+vsicurl_media_types = [
+    pystac.MediaType.GEOPACKAGE,
+    pystac.MediaType.GEOJSON,
+    pystac.MediaType.COG,
+    pystac.MediaType.GEOTIFF,
+    pystac.MediaType.TIFF,
+    pystac.MediaType.JPEG2000
+]
+
+
+def _get_asset_path(asset: pystac.asset) -> str:
+    """
+    Return the URI suited for GDAL if the asset is some geospatial data.
+    If the asset.href starts with "http://", "https://", etc. then the
+    returned URI is prefixed with "/vsicurl".
+
+    Args:
+        asset: STAC asset
+
+    Returns:
+        URI, with or without "/vsicurl/" prefix
+
+    """
+    protocols = ["http://", "https://"]
+    url = asset.href
+    if asset.media_type in vsicurl_media_types:
+        if any(url.lower().startswith(prefix) for prefix in protocols):
+            return f"/vsicurl/{url}"
+    return url
+
+
+def _get_assets_paths(
+        item: pystac.Item,
+        domains: List[str],
+        ids_prefixes: List[str]
+) -> Dict[str, str]:
+    """
+    Return the dict of assets paths.
+
+    Args:
+        item: STAC item
+        domains: authorized domains
+        ids_prefixes: authorized STAC item ID prefixes
+
+    Returns:
+        assets paths
+
+    """
+    ids_cond = any(item.id.startswith(v) for v in ids_prefixes)
+    url_cond = all(
+        any(
+            urlparse(asset.href).netloc.endswith(domain)
+            for domain in domains
+        )
+        for asset in item.assets.values()
+    )
+    if ids_cond and url_cond:
+        assets_paths = {
+            key: _get_asset_path(asset)
+            for key, asset in item.assets.items()
+        }
+        return assets_paths
+    return None
+
+
+def from_stac(item: pystac.Item) -> Scene:
+    """
+    Return a `Scene` object from a STAC item.
+
+    Args:
+        item: STAC item
+
+    Returns:
+        a `Scene` instance
+
+    """
+    # S2 images from Microsoft Planetary
+    assets_paths = _get_assets_paths(
+        item=item,
+        domains=["microsoft.com", "windows.net"],
+        ids_prefixes=["S2A_MSIL2A", "S2B_MSIL2A"]
+    )
+    if assets_paths:
+        return Sentinel2MPCScene(assets_paths=assets_paths)
+
+    # Spot 6/7 images from Dinamis
+    assets_paths = _get_assets_paths(
+        item=item,
+        domains=["dinamis.apps.okd.crocc.meso.umontpellier.fr"],
+        ids_prefixes=["SPOT"]
+    )
+    if assets_paths:
+        return Spot67DRSScene(assets_paths=assets_paths)
+
+    raise ValueError("No Scene class for this stac item")
diff --git a/scenes/utils.py b/scenes/utils.py
index 8472225be225dd5d841ce121ad9ef93f2385a1aa..19d587519909315e8d7fddcd12012889bd73c78b 100644
--- a/scenes/utils.py
+++ b/scenes/utils.py
@@ -2,15 +2,23 @@
 This module contains a set of generic purpose helpers
 """
 from __future__ import annotations
+from typing import List
 import os
 import glob
 import pathlib
 import zipfile
 import re
 import fnmatch
+import json
+import requests
+from tqdm.autonotebook import tqdm
 
 
-def find_files_in_all_subdirs(pth: str, pattern: str, case_sensitive: bool = True) -> list[str]:
+def find_files_in_all_subdirs(
+        pth: str,
+        pattern: str,
+        case_sensitive: bool = True
+) -> List[str]:
     """
     Returns the list of files matching the pattern in all subdirectories of pth
 
@@ -23,16 +31,18 @@ def find_files_in_all_subdirs(pth: str, pattern: str, case_sensitive: bool = Tru
         list of str
 
     """
-    result = []
-    reg_expr = re.compile(fnmatch.translate(pattern), 0 if case_sensitive else re.IGNORECASE)
+    reg_expr = re.compile(
+        fnmatch.translate(pattern), 0 if case_sensitive else re.IGNORECASE
+    )
+    res = []
     for root, _, files in os.walk(pth, topdown=True):
-        result += [os.path.join(root, j) for j in files if re.match(reg_expr, j)]
-    return result
+        res += [os.path.join(root, j) for j in files if re.match(reg_expr, j)]
+    return res
 
 
-def find_file_in_dir(pth: str, pattern: str) -> list[str]:
+def find_file_in_dir(pth: str, pattern: str) -> List[str]:
     """
-    Returns the list of files matching the pattern in the input directory
+    Returns the list of files matching the pattern in the input directory.
 
     Args:
         pth: path
@@ -47,7 +57,7 @@ def find_file_in_dir(pth: str, pattern: str) -> list[str]:
 
 def get_parent_directory(pth: str) -> str:
     """
-    Return the parent directory of the input directory or file
+    Return the parent directory of the input directory or file.
 
     Args:
         pth: input directory or file
@@ -62,13 +72,14 @@ def get_parent_directory(pth: str) -> str:
     return str(path.parent)
 
 
-def list_files_in_zip(filename: str, endswith: str = None) -> list[str]:
+def list_files_in_zip(filename: str, endswith: str = None) -> List[str]:
     """
-    List files in zip archive
+    List files in zip archive.
 
     Args:
         filename: path of the zip
-        endswith: optional, end of filename to be matched (Default value = None)
+        endswith: optional, end of filename to be matched (Default value is
+         None)
 
     Returns:
         list of filepaths
@@ -84,7 +95,7 @@ def list_files_in_zip(filename: str, endswith: str = None) -> list[str]:
 
 def to_vsizip(zipfn: str, relpth: str) -> str:
     """
-    Create path from zip file
+    Create path from zip file.
 
     Args:
         zipfn: zip archive
@@ -99,7 +110,7 @@ def to_vsizip(zipfn: str, relpth: str) -> str:
 
 def basename(pth: str) -> str:
     """
-    Returns the basename. Works with files and paths
+    Returns the basename. Works with files and paths.
 
     Args:
         pth: path
@@ -109,3 +120,45 @@ def basename(pth: str) -> str:
 
     """
     return str(pathlib.Path(pth).name)
+
+
+def pprint(dic: dict) -> str:
+    """
+    Returns a str pretty-printing the input dict.
+
+    Args:
+        dic: dictionary
+
+    Returns:
+        string
+
+    """
+    return json.dumps(dic, sort_keys=True, indent=4)
+
+
+def download(url: str, out_filename: str, nb_retries=5):
+    """
+    Download a remote file.
+
+    Args:
+        url: url of the file to download
+        out_filename: local file name for the downloaded file
+        nb_retries: number of retries
+
+    """
+    for attempt in range(nb_retries):
+        resp = requests.get(url, stream=True, timeout=5)
+        try:
+            tot_size_in_bytes = int(resp.headers.get('content-length', 0))
+            block_size = 32 * 1024  # 32 Kb
+            pbar = tqdm(total=tot_size_in_bytes, unit='iB', unit_scale=True)
+            with open(out_filename, 'wb') as file:
+                for data in resp.iter_content(block_size):
+                    pbar.update(len(data))
+                    file.write(data)
+            pbar.close()
+        except requests.exceptions.RequestException as err:
+            print(f"Warning: download has failed ({err}), "
+                  f"(attempt {attempt + 1} / {nb_retries})")
+            if attempt == nb_retries - 1:
+                raise
diff --git a/scenes/vector.py b/scenes/vector.py
index 7d729ea74bd1c0a179512b0a0ea9394eec92d53e..e4a6fffca0866b4cdd7a99a1dc0021a01cb2df79 100644
--- a/scenes/vector.py
+++ b/scenes/vector.py
@@ -2,13 +2,16 @@
 This module contains a set of functions to deal with OGR geometries
 """
 from __future__ import annotations
-from osgeo import osr, ogr
-from scenes.spatial import reproject_coords, epsg2srs, BoundingBox
+from osgeo import ogr
+
+from scenes.spatial import reproject_coords, epsg2srs, BoundingBox, \
+    coordinates_transform
 
 
 def ogr_open(vector_file: str) -> ogr.DataSource:
     """
-    Return the vector dataset from a vector file. If the vector is empty, None is returned.
+    Return the vector dataset from a vector file. If the vector is empty, None
+    is returned.
 
     Args:
         vector_file: input vector file
@@ -19,7 +22,7 @@ def ogr_open(vector_file: str) -> ogr.DataSource:
     """
     poly_ds = ogr.Open(vector_file)
     if poly_ds is None:
-        raise ValueError(f"ERROR: vector layer {vector_file} is None!")
+        return None
     return poly_ds
 
 
@@ -37,12 +40,14 @@ def get_bbox_wgs84(vector_file: str) -> BoundingBox:
     poly_ds = ogr_open(vector_file=vector_file)
     poly_layer = poly_ds.GetLayer()
     extent = poly_layer.GetExtent()
-    coords = [(extent[0], extent[2]), (extent[1], extent[3])]
+    coords = ((extent[0], extent[2]), (extent[1], extent[3]))
     src_srs = poly_layer.GetSpatialRef()
     tgt_srs = epsg2srs(4326)
-    [(xmin, ymin), (xmax, ymax)] = reproject_coords(coords=coords,  # pylint: disable=unbalanced-tuple-unpacking
-                                                    src_srs=src_srs,
-                                                    tgt_srs=tgt_srs)
+    ((xmin, ymin), (xmax, ymax)) = reproject_coords(  # pylint: disable=W0632
+        coords=coords,
+        src_srs=src_srs,
+        tgt_srs=tgt_srs
+    )
     return BoundingBox(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
 
 
@@ -84,7 +89,7 @@ def reproject_ogr_layer(layer: ogr.Layer, epsg: int = 4326) -> ogr.Geometry:
     # set spatial reference and transformation
     sourceprj = layer.GetSpatialRef()
     targetprj = epsg2srs(epsg)
-    transform = osr.CoordinateTransformation(sourceprj, targetprj)
+    transform = coordinates_transform(sourceprj, targetprj)
 
     # apply transformation
     poly = poly_union(layer)
diff --git a/setup.py b/setup.py
index 62f7233fc6eab6613e6c239444319ab20dd68fbc..d60ac3061b9ef4c74aa7cf73d51f07de7987fa1d 100644
--- a/setup.py
+++ b/setup.py
@@ -6,29 +6,28 @@ with open("README.md", "r", encoding="utf-8") as fh:
 
 setuptools.setup(
     name="scenes",
-    version="1.1.0",
+    version="1.6.2",
     author="Rémi Cresson",
     author_email="remi.cresson@inrae.fr",
-    description="Library to ease the processing of local remote sensing products",
+    description="Library to ease the processing of remote sensing products",
     long_description=long_description,
     long_description_content_type="text/markdown",
     url="https://gitlab.irstea.fr/remi.cresson/scenes",
-    classifiers=[
-        "Programming Language :: Python :: 3",
-        "Programming Language :: Python :: 3.6",
-        "Programming Language :: Python :: 3.7",
-        "Programming Language :: Python :: 3.8",
-        "Programming Language :: Python :: 3.9",
-        "Topic :: Scientific/Engineering :: GIS",
-        "Topic :: Scientific/Engineering :: Image Processing",
-        "License :: OSI Approved :: Apache Software License",
-        "Operating System :: OS Independent",
-    ],
     packages=setuptools.find_packages(),
     python_requires=">=3.6",
-    install_requires=["rtree", "pyotb", "pycurl", "tqdm"],
-    keywords="remote sensing, otb, orfeotoolbox, orfeo toolbox, pyotb",
-    scripts=["apps/drs_spot67_import.py", "apps/s2_download.py", "apps/s2_import.py", "apps/search.py"]
+    install_requires=[
+        "rtree",
+        "pyotb==1.5.4",
+        "requests",
+        "tqdm",
+        "pystac-client",
+        "theia-picker",
+    ],
+    keywords=["remote sensing", "otb", "orfeo toolbox", "pyotb", "stac"],
+    scripts=[
+        "apps/drs_spot67_import.py",
+        "apps/s2_download.py",
+        "apps/s2_import.py",
+        "apps/search.py"
+    ]
 )
-#package_dir={"": "src"},
-
diff --git a/test/indexation_test.py b/test/indexation_test.py
index 7e8f78fad06904adbbfcc1b42a2ab99f2b127fdd..7cbe20a398d7d98aa6511c6968db0c6c65241b8f 100644
--- a/test/indexation_test.py
+++ b/test/indexation_test.py
@@ -1,22 +1,25 @@
 # -*- coding: utf-8 -*-
 from scenes_test_base import ScenesTestBase
-from scenes import Index, vector, BoundingBox
+import scenes
 import tests_data
 
+sc = scenes.spot.get_local_spot67drs_scene(dimap_xs=tests_data.DIMAP1_XS, dimap_pan=tests_data.DIMAP1_P)
+
+
 class ImageryTest(ScenesTestBase):
 
     def test_scene1_indexation(self):
-        index = Index(scenes_list=[tests_data.SCENE1])
+        index = scenes.Index(scenes_list=[sc])
         for find in (index.find, index.find_indices):
-            self.assertTrue(find(BoundingBox(43.706, 43.708, 4.317, 4.420)))
-            self.assertFalse(find(BoundingBox(43.000, 43.001, 3.000, 3.001)))
-            self.assertTrue(find(vector.get_bbox_wgs84(tests_data.ROI_MTP_4326)))
-            self.assertTrue(find(vector.get_bbox_wgs84(tests_data.ROI_MTP_2154)))
+            self.assertTrue(find(scenes.BoundingBox(xmin=4.317, xmax=4.420, ymin=43.706, ymax=43.708)))
+            self.assertFalse(find(scenes.BoundingBox(xmin=3.000, xmax=3.001, ymin=43.000, ymax=43.001)))
+            self.assertTrue(find(scenes.vector.get_bbox_wgs84(tests_data.ROI_MTP_4326)))
+            self.assertTrue(find(scenes.vector.get_bbox_wgs84(tests_data.ROI_MTP_2154)))
             self.assertTrue(find(tests_data.ROI_MTP_4326))
             self.assertTrue(find(tests_data.ROI_MTP_2154))
 
     def test_epsg(self):
-        self.assertTrue(tests_data.SCENE1.epsg == 2154)
+        self.assertTrue(sc.epsg == 2154)
 
 
 if __name__ == '__main__':
diff --git a/test/scenes_test_base.py b/test/scenes_test_base.py
index 1a46f3383e90bf4f81a4ca9aa52cdf669c90f7a6..1e0be36527c2ae8191ff8e80843d54898a06ba30 100644
--- a/test/scenes_test_base.py
+++ b/test/scenes_test_base.py
@@ -1,14 +1,17 @@
 # -*- coding: utf-8 -*-
-import unittest
 import filecmp
-from abc import ABC
 import pyotb
+import unittest
+from abc import ABC
+
+from scenes import utils
 
 
 class ScenesTestBase(ABC, unittest.TestCase):
     """
     Base class for tests
     """
+
     def compare_images(self, image, reference, roi=None, mae_threshold=0.01):
         """
         Compare one image (typically: an output image) with a reference
@@ -18,24 +21,30 @@ class ScenesTestBase(ABC, unittest.TestCase):
         :param mae_threshold: mean absolute error threshold
         :return: boolean
         """
-
-        nbchannels_reconstruct = pyotb.Input(image).shape[-1]
-        nbchannels_baseline = pyotb.Input(reference).shape[-1]
-
-        self.assertTrue(nbchannels_reconstruct == nbchannels_baseline)
-
-        for i in range(1, nbchannels_baseline + 1):
-            dic = {'ref.in': reference,
-                   'ref.channel': i,
-                   'meas.in': image,
-                   'meas.channel': i}
+        nbands_in = pyotb.Input(image).shape[-1]
+        nbands_ref = pyotb.Input(reference).shape[-1]
+
+        assert nbands_in == nbands_ref, f"Image has {nbands_in} but baseline has {nbands_ref}"
+
+        for i in range(1, nbands_ref + 1):
+            dic = {
+                'ref.in': reference,
+                'ref.channel': i,
+                'meas.in': image,
+                'meas.channel': i
+            }
             if roi:
-                dic.update({"roi.startx": roi[0],
-                            "roi.starty": roi[1],
-                            "roi.sizex": roi[2],
-                            "roi.sizey": roi[3]})
+                dic.update({
+                    "roi.startx": roi[0],
+                    "roi.starty": roi[1],
+                    "roi.sizex": roi[2],
+                    "roi.sizey": roi[3]
+                })
             comp = pyotb.CompareImages(dic)
-            self.assertTrue(comp.GetParameterFloat('mae') < mae_threshold)
+            mae = comp.GetParameterFloat('mae')
+            assert mae < mae_threshold, f"MAE is {mae} > {mae_threshold}\n" \
+                                        f"\nTarget summary:\n{utils.pprint(image.summarize())}\n" \
+                                        f"\nReference summary:\n{utils.pprint(reference.summarize())}"
 
     def compare_file(self, file, reference):
         """
@@ -48,4 +57,5 @@ class ScenesTestBase(ABC, unittest.TestCase):
         Return:
              a boolean
         """
-        self.assertTrue(filecmp.cmp(file, reference))
+        assert filecmp.cmp(file,
+                           reference), f"File {file} differs with baseline ({reference})"
diff --git a/test/sentinel2_theia_test.py b/test/sentinel2_theia_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..c334a56a11ab5ef53b6665e9ea59edcbda80616e
--- /dev/null
+++ b/test/sentinel2_theia_test.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+import tempfile
+from scenes_test_base import ScenesTestBase
+import tests_data
+import scenes
+import pyotb
+
+
+def temp_file():
+    return tempfile.NamedTemporaryFile(suffix=".tif").name
+
+
+def concat(input_list):
+    return pyotb.ConcatenateImages({"il": input_list})
+
+
+sc_2a = scenes.sentinel.Sentinel22AScene(archive=tests_data.ARCHIVE1)
+sc_3a = scenes.sentinel.Sentinel23AScene(archive=tests_data.ARCHIVE2)
+scs = [sc_2a, sc_3a]
+all_10m_bands = ["b4", "b3", "b2", "b8"]
+all_20m_bands = ["b5", "b6", "b7", "b8a", "b11", "b12"]
+
+
+class Sentinel2TheiaImageryTest(ScenesTestBase):
+
+    def instantiate_theia_2a(self):
+        assert sc_2a
+
+    def instantiate_theia_3a(self):
+        assert sc_3a
+
+    def test_bands(self):
+        for sc in scs:
+            for getter in [sc.get_10m_bands, sc.get_20m_bands]:
+                assert getter()
+
+    def test_write_bands_all(self):
+        tmp = temp_file()
+        for sc in scs:
+            sc.get_10m_bands().write(tmp)
+            self.compare_images(tmp, concat([sc.assets_paths[key] for key in all_10m_bands]))
+            sc.get_20m_bands().write(tmp)
+            self.compare_images(tmp, concat([sc.assets_paths[key] for key in all_20m_bands]))
+
+    def test_write_bands_selection(self):
+        tmp = temp_file()
+        for sc in scs:
+            sc.get_10m_bands("b4").write(tmp)
+            self.compare_images(tmp, sc.assets_paths["b4"])
+            sc.get_10m_bands("B4").write(tmp)
+            self.compare_images(tmp, sc.assets_paths["b4"])
+            sc.get_10m_bands(["b4"]).write(tmp)
+            self.compare_images(tmp, sc.assets_paths["b4"])
+            sc.get_20m_bands("b5").write(tmp)
+            self.compare_images(tmp, sc.assets_paths["b5"])
+            sc.get_20m_bands(["b5", "b7"]).write(tmp)
+            self.compare_images(tmp, concat([sc.assets_paths["b5"], sc.assets_paths["b7"]]))
+            sc.get_20m_bands("b5", "b7").write(tmp)
+            self.compare_images(tmp, concat([sc.assets_paths["b5"], sc.assets_paths["b7"]]))
+
+    def test_write_single_band(self):
+        tmp = temp_file()
+        for sc in scs:
+            for band in all_10m_bands + all_20m_bands:
+                get_band = getattr(sc, f"get_{band}")
+                get_band().write(tmp)
+                self.compare_images(tmp, sc.assets_paths[band])
+
+    def test_cld_write(self):
+        # TODO: compare with baseline
+        tmp = temp_file()
+        sc_2a.get_10m_bands().cld_msk_drilled().write(tmp)
+        sc_2a.get_20m_bands().cld_msk_drilled().write(tmp)
+
+    def test_flg_write(self):
+        # TODO: compare with baseline
+        tmp = temp_file()
+        sc_3a.get_10m_bands().flg_msk_drilled().write(tmp)
+        sc_3a.get_20m_bands().flg_msk_drilled().write(tmp)
+
+    def test_print(self):
+        for sc in scs:
+            print(sc)
+            for getter in [sc.get_10m_bands, sc.get_20m_bands]:
+                print(getter())
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/test/serialization_test.py b/test/serialization_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..5ee733b226e0bb79038e250b8082d3f90329cbc3
--- /dev/null
+++ b/test/serialization_test.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+import os
+from scenes_test_base import ScenesTestBase
+import scenes
+import tests_data
+import tempfile
+
+sc_spot67 = scenes.spot.get_local_spot67drs_scene(
+    dimap_xs=tests_data.DIMAP1_XS,
+    dimap_pan=tests_data.DIMAP1_P
+)
+sc_s2a = scenes.sentinel.Sentinel22AScene(archive=tests_data.ARCHIVE1)
+sc_s3a = scenes.sentinel.Sentinel23AScene(archive=tests_data.ARCHIVE2)
+scs = [sc_spot67, sc_s2a, sc_s3a]
+
+class SerializationTest(ScenesTestBase):
+
+    def compare_scenes(self, sc1, sc2):
+        assert sc1.metadata == sc2.metadata
+
+    def test_serialize(self):
+        for sc in scs:
+            with tempfile.NamedTemporaryFile() as tmp:
+                pickle_file = tmp.name
+                scenes.save_scenes(scenes_list=[sc], pickle_file=pickle_file)
+                assert os.path.isfile(pickle_file), \
+                    f"File {pickle_file} not found!"
+
+    def test_deserialize(self):
+        for sc in scs:
+            with tempfile.NamedTemporaryFile() as tmp:
+                pickle_file = tmp.name
+                scenes.save_scenes(scenes_list=[sc], pickle_file=pickle_file)
+                loaded_scs = scenes.load_scenes(pickle_file=pickle_file)
+                assert len(loaded_scs)==1, \
+                    f"There is {len(loaded_scs)} scenes " \
+                    f"in {pickle_file}! (expected 1)"
+                self.compare_scenes(sc1=loaded_scs[0], sc2=sc)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/test/spatial_test.py b/test/spatial_test.py
index a05bc5fba078f53b4a10db8066895a0ec6252117..17d55ca7a2753570640bd1b4193d82d3c9d1509e 100644
--- a/test/spatial_test.py
+++ b/test/spatial_test.py
@@ -3,15 +3,18 @@ from scenes_test_base import ScenesTestBase
 import tests_data
 from scenes import raster, vector
 from scenes.spatial import BoundingBox
+import scenes
 import pyotb
-import gdal
+from osgeo import gdal
+
+sc = scenes.spot.get_local_spot67drs_scene(dimap_xs=tests_data.DIMAP1_XS, dimap_pan=tests_data.DIMAP1_P)
 
-class SpatialTest(ScenesTestBase):
 
-    BBOX_REF = BoundingBox(xmin=43.6279867026818,
-                           xmax=43.70827724313909,
-                           ymin=4.3153933040851165,
-                           ymax=4.421982273366675)
+class SpatialTest(ScenesTestBase):
+    BBOX_REF = BoundingBox(xmin=4.3153933040851165,
+                           xmax=4.421982273366675,
+                           ymin=43.6279867026818,
+                           ymax=43.70827724313909)
 
     def compare_bboxes(self, bbox1, bbox2, eps=1e-7):
         coords = [(bbox1.xmin, bbox2.xmin),
@@ -22,27 +25,28 @@ class SpatialTest(ScenesTestBase):
             self.assertTrue(abs(v1 - v2) < eps)
 
     def test_bbox(self):
-        assert isinstance(raster.get_bbox_wgs84(tests_data.SCENE1.dimap_file_xs), BoundingBox)
+        assert isinstance(raster.get_bbox_wgs84(sc.get_xs()), BoundingBox)
         assert isinstance(vector.get_bbox_wgs84(tests_data.ROI_MTP_4326), BoundingBox)
 
     def test_otbinput_bbox(self):
-        img = pyotb.Input(tests_data.SCENE1.dimap_file_xs)
+        img = pyotb.Input(tests_data.DIMAP1_XS)
         bbox_otb = raster.get_bbox_wgs84(img)
         self.compare_bboxes(bbox_otb, self.BBOX_REF)
 
     def test_otbapp_bbox(self):
-        img = pyotb.DynamicConvert(tests_data.SCENE1.dimap_file_xs)
+        img = pyotb.DynamicConvert(sc.get_xs())
         bbox_otb = raster.get_bbox_wgs84(img)
         self.compare_bboxes(bbox_otb, self.BBOX_REF)
 
     def test_gdal_ds_bbox(self):
-        gdal_ds = gdal.Open(tests_data.SCENE1.dimap_file_xs)
+        gdal_ds = gdal.Open(tests_data.DIMAP1_XS)
         bbox_gdal = raster.get_bbox_wgs84(gdal_ds)
         self.compare_bboxes(bbox_gdal, self.BBOX_REF)
 
     def test_filename_bbox(self):
-        bbox_file = raster.get_bbox_wgs84(tests_data.SCENE1.dimap_file_xs)
+        bbox_file = raster.get_bbox_wgs84(tests_data.DIMAP1_XS)
         self.compare_bboxes(bbox_file, self.BBOX_REF)
 
+
 if __name__ == '__main__':
     unittest.main()
diff --git a/test/spot67_imagery_test.py b/test/spot67_imagery_test.py
index 94d967067b59fd0ff6940a5fd3d606cd3ca32f58..6d1afbf7c5181f5b20be53de901a3fac9dfe0213 100644
--- a/test/spot67_imagery_test.py
+++ b/test/spot67_imagery_test.py
@@ -1,63 +1,136 @@
 # -*- coding: utf-8 -*-
-from scenes_test_base import ScenesTestBase
+from functools import singledispatch
+from typing import Any
 import pyotb
-import tests_data
 
+import scenes
+import tests_data
+from scenes_test_base import ScenesTestBase
 
-class Spot67ImageryTest(ScenesTestBase):
+# Baseline
+xs_dn_ref = tests_data.DIMAP1_XS
+p_dn_ref = tests_data.DIMAP1_P
+xs_toa_ref = pyotb.OpticalCalibration(tests_data.DIMAP1_XS)
+p_toa_ref = pyotb.OpticalCalibration(tests_data.DIMAP1_P)
+roi = [2500, 2500, 500, 500]  # For pxs we test the central area only, because
+# bayes method messes with nodata outside image boundaries
+pyotb_pxs = pyotb.BundleToPerfectSensor
+pxs_dn_ref = pyotb_pxs({"inxs": xs_dn_ref, "inp": p_dn_ref})
+pxs_toa_ref = pyotb_pxs({"inxs": xs_toa_ref, "inp": p_toa_ref})
+
+# Inputs
+sc = scenes.spot.get_local_spot67drs_scene(
+    dimap_xs=tests_data.DIMAP1_XS,
+    dimap_pan=tests_data.DIMAP1_P
+)
+xs = sc.get_xs()
+pan = sc.get_pan()
+pxs = sc.get_pxs()
+
+
+@singledispatch
+def subset(inp: Any) -> Any:
+    raise TypeError(
+        "Invalid type, must be one of: pyotb.otbObject, Source"
+    )
+
+
+@subset.register
+def subset_source(inp: scenes.core.Source):
+    return inp.subset(
+        startx=roi[0],
+        sizex=roi[0] + roi[2],
+        starty=roi[1],
+        sizey=roi[1] + roi[3]
+    )
+
+
+@subset.register
+def subset_pyotb(inp: pyotb.otbObject):
+    return inp[roi[0]:roi[0] + roi[2], roi[1]:roi[1] + roi[3], :]
 
-    def compare(self, inp, ref, reflectance):
-        actual_ref = pyotb.OpticalCalibration(ref) if reflectance=="toa" else ref
-        self.compare_images(inp, actual_ref)
 
-    def xs_imagery(self, reflectance):
-        xs = tests_data.SCENE1.get_xs(reflectance=reflectance)
-        self.compare(xs, tests_data.DIMAP1_XS, reflectance)
+class Spot67ImageryTest(ScenesTestBase):
 
-    def pan_imagery(self, reflectance):
-        pan = tests_data.SCENE1.get_pan(reflectance=reflectance)
-        self.compare(pan, tests_data.DIMAP1_P, reflectance)
+    def test_instanciate_sc(self):
+        assert sc
 
     def test_xs_dn(self):
-        self.xs_imagery("dn")
-
-    def test_pan_dn(self):
-        self.pan_imagery("dn")
+        self.compare_images(image=xs, reference=xs_dn_ref)
 
     def test_xs_toa(self):
-        self.xs_imagery("toa")
+        self.compare_images(image=xs.reflectance(), reference=xs_toa_ref)
 
-    def test_pan_toa(self):
-        self.pan_imagery("toa")
-
-    def pxs_compare(self, pxs, reflectance):
-        pxs_baseline = pyotb.BundleToPerfectSensor({"inxs": pyotb.OpticalCalibration(tests_data.DIMAP1_XS) if reflectance=="toa" else tests_data.DIMAP1_XS,
-                                                    "inp": pyotb.OpticalCalibration(tests_data.DIMAP1_P) if reflectance=="toa" else tests_data.DIMAP1_P,
-                                                    "method": "bayes"})
-        # We test the central area only, because bayes method messes with nodata outside image
-        self.compare_images(pxs, pxs_baseline, roi=[2500, 2500, 500, 500])
-
-    def pxs_imagery(self, reflectance):
-        pxs = tests_data.SCENE1.get_pxs(reflectance=reflectance)
-        self.pxs_compare(pxs, reflectance)
+    def test_pan_dn(self):
+        self.compare_images(image=pan, reference=p_dn_ref)
 
-    def pxs_imagery_cld_msk_drilled(self, reflectance):
-        pxs = tests_data.SCENE1.get_pxs(reflectance=reflectance)
-        pxs_drilled = pxs.cld_msk_drilled()
-        self.pxs_compare(pxs_drilled, reflectance)
+    def test_pan_toa(self):
+        self.compare_images(image=pan.reflectance(), reference=p_toa_ref)
 
     def test_pxs_dn(self):
-        self.pxs_imagery("dn")
+        self.compare_images(image=pxs, reference=pxs_dn_ref, roi=roi)
 
     def test_pxs_toa(self):
-        self.pxs_imagery("toa")
-
-    def test_pxs_drilled_dn(self):
-        self.pxs_imagery_cld_msk_drilled("dn")
-
-    def test_pxs_drilled_toa(self):
-        self.pxs_imagery_cld_msk_drilled("toa")
+        self.compare_images(
+            image=pxs.reflectance(), reference=pxs_toa_ref, roi=roi
+        )
+
+    def test_pxs_dn_msk_drilled(self):
+        """
+        Dummy test since no cloud in the input scene.
+        This test just checks that the code is running but not guarantee that
+        the results are good.
+        """
+        self.compare_images(
+            image=pxs.cld_msk_drilled(), reference=pxs_dn_ref, roi=roi
+        )
+
+    def test_pxs_toa_msk_drilled(self):
+        """
+        Dummy test since no cloud in the input scene.
+        This test just checks that the code is running but not guarantee that
+        the results are good.
+        """
+        self.compare_images(
+            image=pxs.reflectance().cld_msk_drilled(),
+            reference=pxs_toa_ref,
+            roi=roi
+        )
+
+    def test_pxs_dn_msk_drilled_cached(self):
+        """
+        Dummy test since no cloud in the input scene.
+        This test just checks that the code is running but not guarantee that
+        the results are good.
+        """
+        for _ in range(2):
+            self.compare_images(
+                image=subset(pxs).cld_msk_drilled().cached(),
+                reference=subset(pxs_dn_ref)
+            )
+
+    def test_pxs_toa_msk_drilled_cached(self):
+        """
+        Dummy test since no cloud in the input scene.
+        This test just checks that the code is running but not guarantee that
+        the results are good.
+        """
+        for _ in range(2):
+            self.compare_images(
+                image=subset(pxs).reflectance().cld_msk_drilled().cached(),
+                reference=subset(pxs_toa_ref),
+            )
+
+    def test_print(self):
+        print(sc)
+        print(xs)
+        print(pan)
+        print(pxs)
 
 
 if __name__ == '__main__':
-    unittest.main()
+    # unittest.main()
+    Spot67ImageryTest().test_pxs_toa_msk_drilled_cached()
+    Spot67ImageryTest().test_pxs_dn_msk_drilled_cached()
+    Spot67ImageryTest().test_pxs_toa_msk_drilled()
+    Spot67ImageryTest().test_pxs_dn_msk_drilled()
diff --git a/test/stac_test.py b/test/stac_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff21ff258cb1bcc41d49c0e9d207fbb981455d8b
--- /dev/null
+++ b/test/stac_test.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+import planetary_computer
+import dinamis_sdk
+from pystac_client import Client
+
+from scenes.stac import from_stac
+from scenes_test_base import ScenesTestBase
+from scenes.sentinel import Sentinel2MPCScene
+from scenes.spot import Spot67DRSScene
+
+
+class StacTest(ScenesTestBase):
+
+    def test_planetary(self):
+        api = Client.open(
+            'https://planetarycomputer.microsoft.com/api/stac/v1',
+            modifier=planetary_computer.sign_inplace,
+        )
+        res = api.search(
+            collections=["sentinel-2-l2a"],
+            bbox=[4.99, 43.99, 5, 44.01],
+            datetime=['2020-01-01', '2020-01-08']
+        )
+
+        for item in res.items():
+            sc = from_stac(item)
+            assert isinstance(sc, Sentinel2MPCScene)
+
+    # def test_dinamis(self):
+    #     api = Client.open(
+    #         'https://stacapi-dinamis.apps.okd.crocc.meso.umontpellier.fr',
+    #         modifier=dinamis_sdk.sign_inplace,
+    #     )
+    #     res = api.search(
+    #         collections=["spot-6-7-drs"],
+    #         bbox=[4.09, 43.99, 5, 44.01],
+    #         datetime=['2020-01-01', '2021-01-01']
+    #     )
+    #
+    #     for item in res.items():
+    #         sc = from_stac(item)
+    #         assert isinstance(sc, Spot67DRSScene)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/test/tests_data.py b/test/tests_data.py
index 90ffd2c0a35a41c9e3956b7cf63c9e0296806645..3fe6d2d395e506a32fa82241af0da24f4de8e0da 100644
--- a/test/tests_data.py
+++ b/test/tests_data.py
@@ -1,18 +1,11 @@
 # -*- coding: utf-8 -*-
 import os
-from scenes import spot
 
 
-# TEST_DATA_DIR environment variable
 TEST_DATA_DIR = os.environ["TEST_DATA_DIR"]
-
-# Filenames
-DIMAP1_XS = TEST_DATA_DIR + "/input/ROI_1_Bundle_Ortho_GSD2015/PROD_SPOT6_001/VOL_SPOT6_001_A/IMG_SPOT6_MS_001_A/" \
-                            "DIM_SPOT6_MS_201503261014386_ORT_SPOT6_20170524_1422391k0ha487979cy_1.XML"
-DIMAP1_P = TEST_DATA_DIR + "/input/ROI_1_Bundle_Ortho_GSD2015/PROD_SPOT6_001/VOL_SPOT6_001_A/IMG_SPOT6_P_001_A/" \
-                           "DIM_SPOT6_P_201503261014386_ORT_SPOT6_20170524_1422391k0ha487979cy_1.XML"
-ROI_MTP_2154 = TEST_DATA_DIR + "/input/roi_mtp_2154.gpkg"
-ROI_MTP_4326 = TEST_DATA_DIR + "/input/roi_mtp_4326.gpkg"
-
-# Instances
-SCENE1 = spot.Spot67Scene(dimap_file_xs=DIMAP1_XS, dimap_file_pan=DIMAP1_P)
\ No newline at end of file
+DIMAP1_XS = os.path.join(TEST_DATA_DIR, "input/ROI_1_Bundle_Ortho_GSD2015/PROD_SPOT6_001/VOL_SPOT6_001_A/IMG_SPOT6_MS_001_A/DIM_SPOT6_MS_201503261014386_ORT_SPOT6_20170524_1422391k0ha487979cy_1.XML")
+DIMAP1_P = os.path.join(TEST_DATA_DIR, "input/ROI_1_Bundle_Ortho_GSD2015/PROD_SPOT6_001/VOL_SPOT6_001_A/IMG_SPOT6_P_001_A/DIM_SPOT6_P_201503261014386_ORT_SPOT6_20170524_1422391k0ha487979cy_1.XML")
+ARCHIVE1 = os.path.join(TEST_DATA_DIR, "input/SENTINEL2B_20201012-105848-497_L2A_T31TEJ_C_V2-2")
+ARCHIVE2 = os.path.join(TEST_DATA_DIR, "input/SENTINEL2X_20191215-000000-000_L3A_T31TEJ_C_V2-2")
+ROI_MTP_2154 = os.path.join(TEST_DATA_DIR, "input/roi_mtp_2154.gpkg")
+ROI_MTP_4326 = os.path.join(TEST_DATA_DIR, "input/roi_mtp_4326.gpkg")