From a29992b74e165cab855adcb5dc4af57904ef3223 Mon Sep 17 00:00:00 2001
From: Le Roux Erwan <erwan.le-roux@irstea.fr>
Date: Mon, 2 Mar 2020 17:13:08 +0100
Subject: [PATCH] [paper 1] modify the source of data (use the last version
 from AERIS) and shift the year being used (now study go from 1959 to 2019)

---
 .../eurocode_data/main_eurocode_drawing.py    | 154 ------------------
 experiment/eurocode_data/utils.py             |   2 +-
 .../adamont_data/ensemble_simulation.py       |   1 +
 .../scm_models_data/abstract_study.py         |  16 +-
 .../scm_models_data/crocus/crocus.py          |   4 +-
 .../scm_models_data/safran/safran.py          |  36 +---
 .../main_files/main_full_hypercube.py         |   4 +-
 .../abstract_gev_trend_test.py                |  43 +++--
 .../main_bayesian_mcmc.py                     |   4 +-
 .../main_result_trends_and_return_levels.py   |   7 +-
 .../plot_uncertainty_curves.py                |   5 +-
 ...dy_visualizer_for_non_stationary_trends.py |   5 +-
 .../test_experiment/test_SCM_oriented_data.py |   4 +-
 test/test_experiment/test_SCM_study.py        |  10 +-
 14 files changed, 61 insertions(+), 234 deletions(-)
 delete mode 100644 experiment/eurocode_data/main_eurocode_drawing.py

diff --git a/experiment/eurocode_data/main_eurocode_drawing.py b/experiment/eurocode_data/main_eurocode_drawing.py
deleted file mode 100644
index e1c373f9..00000000
--- a/experiment/eurocode_data/main_eurocode_drawing.py
+++ /dev/null
@@ -1,154 +0,0 @@
-# import time
-# import os.path as op
-# import matplotlib.pyplot as plt
-# from collections import OrderedDict
-#
-# from experiment.meteo_france_data.scm_models_data.visualization.study_visualization.study_visualizer import \
-#     StudyVisualizer
-# from extreme_fit.model.result_from_model_fit.result_from_extremes.eurocode_return_level_uncertainties import \
-#     ConfidenceIntervalMethodFromExtremes
-# from papers.exceeding_snow_loads.result_trends_and_return_levels.plot_uncertainty_curves import \
-#     plot_uncertainty_massifs
-# from experiment.eurocode_data.massif_name_to_departement import MASSIF_NAMES_ALPS
-# from experiment.eurocode_data.utils import EUROCODE_ALTITUDES
-# from experiment.meteo_france_data.scm_models_data.crocus.crocus import CrocusSwe3Days, CrocusSweTotal
-# from experiment.meteo_france_data.scm_models_data.visualization.hypercube_visualization.altitude_hypercube_visualizer import \
-#     AltitudeHypercubeVisualizer
-# from experiment.meteo_france_data.scm_models_data.visualization.hypercube_visualization.utils_hypercube import \
-#     load_altitude_visualizer
-# from extreme_fit.model.margin_model.linear_margin_model.temporal_linear_margin_models import StationaryTemporalModel, \
-#     NonStationaryLocationAndScaleTemporalModel
-#
-# # Model class
-# import matplotlib as mpl
-#
-# from root_utils import VERSION_TIME
-#
-# mpl.rcParams['text.usetex'] = True
-# mpl.rcParams['text.latex.preamble'] = [r'\usepackage{amsmath}']
-#
-#
-# def massif_name_to_ordered_return_level_uncertainties(model_class, last_year_for_the_data, altitudes, massif_names,
-#                                                       uncertainty_methods, temporal_covariate):
-#     # Load model name
-#     model_name = get_model_name(model_class)
-#     # Load altitude visualizer
-#     altitude_visualizer = load_altitude_visualizer(AltitudeHypercubeVisualizer, altitudes=altitudes,
-#                                                    last_starting_year=None, nb_data_reduced_for_speed=False,
-#                                                    only_first_one=False, save_to_file=False,
-#                                                    exact_starting_year=1958,
-#                                                    first_starting_year=None,
-#                                                    study_classes=[CrocusSwe3Days, CrocusSweTotal][1:],
-#                                                    trend_test_class=None)  # type: AltitudeHypercubeVisualizer
-#     # Loop on the data
-#     assert isinstance(altitude_visualizer.tuple_to_study_visualizer, OrderedDict)
-#     massif_name_to_ordered_eurocode_level_uncertainty = {
-#         massif_name: {ci_method: [] for ci_method in uncertainty_methods} for massif_name in massif_names}
-#     for altitude, visualizer in altitude_visualizer.tuple_to_study_visualizer.items():
-#         print('{} processing altitude = {} '.format(model_name, altitude))
-#         for ci_method in uncertainty_methods:
-#             d = visualizer.massif_name_to_altitude_and_eurocode_level_uncertainty(model_class, last_year_for_the_data,
-#                                                                                   massif_names, ci_method,
-#                                                                                   temporal_covariate)
-#             # Append the altitude one by one
-#             for massif_name, return_level_uncertainty in d.items():
-#                 massif_name_to_ordered_eurocode_level_uncertainty[massif_name][ci_method].append(
-#                     return_level_uncertainty)
-#     return {model_name: massif_name_to_ordered_eurocode_level_uncertainty}
-#
-#
-# def plot_ci_graphs(altitudes, massif_names, model_class_and_last_year, show, temporal_covariate, uncertainty_methods):
-#     model_name_to_massif_name_to_ordered_return_level = {}
-#     for model_class, last_year_for_the_data in model_class_and_last_year:
-#         start = time.time()
-#         model_name_to_massif_name_to_ordered_return_level.update(
-#             massif_name_to_ordered_return_level_uncertainties(model_class, last_year_for_the_data, altitudes,
-#                                                               massif_names, uncertainty_methods, temporal_covariate))
-#         duration = time.time() - start
-#         print('Duration:', model_class, duration)
-#     # Transform the dictionary into the desired format
-#     massif_name_to_model_name_to_ordered_return_level_uncertainties = {}
-#     for massif_name in massif_names:
-#         d2 = {model_name: model_name_to_massif_name_to_ordered_return_level[model_name][massif_name] for model_name in
-#               model_name_to_massif_name_to_ordered_return_level.keys()}
-#         massif_name_to_model_name_to_ordered_return_level_uncertainties[massif_name] = d2
-#     # Plot graph
-#     plot_uncertainty_massifs(
-#         massif_name_to_model_name_to_ordered_return_level_uncertainties, nb_massif_names=len(massif_names),
-#         nb_model_names=len(model_class_and_last_year))
-#     if show:
-#         plt.show()
-#     else:
-#         massif_names_str = '_'.join(massif_names)
-#         model_names_str = '_'.join(
-#             [model_name for model_name in model_name_to_massif_name_to_ordered_return_level.keys()])
-#         filename = op.join(VERSION_TIME, model_names_str + '_' + massif_names_str)
-#         StudyVisualizer.savefig_in_results(filename)
-#
-#
-# def main_drawing():
-#     fast_plot = [True, False][0]
-#     temporal_covariate = 2017
-#     # Select parameters
-#     massif_names = MASSIF_NAMES_ALPS[:]
-#     model_class_and_last_year = [
-#                                     (StationaryTemporalModel, 2017),
-#                                     (NonStationaryLocationAndScaleTemporalModel, 2017),
-#                                     # Add the temperature here
-#                                 ][:]
-#     altitudes = EUROCODE_ALTITUDES[:]
-#     uncertainty_methods = [ConfidenceIntervalMethodFromExtremes.my_bayes,
-#                            ConfidenceIntervalMethodFromExtremes.ci_mle]
-#     show = False
-#
-#     if fast_plot:
-#         show = True
-#         model_class_and_last_year = model_class_and_last_year[:]
-#         altitudes = altitudes[-2:]
-#         # altitudes = altitudes[:]
-#         massif_names = massif_names[:1]
-#         uncertainty_methods = uncertainty_methods[:]
-#
-#     plot_ci_graphs(altitudes, massif_names, model_class_and_last_year, show, temporal_covariate, uncertainty_methods)
-#
-#
-# # Create 5 main plots
-# def main_5_drawings():
-#     model_class_and_last_year = [
-#                                     (StationaryTemporalModel, 2017),
-#                                     (NonStationaryLocationAndScaleTemporalModel, 2017),
-#                                     # Add the temperature here
-#                                 ][:1]
-#     altitudes = EUROCODE_ALTITUDES[:]
-#     uncertainty_methods = [ConfidenceIntervalMethodFromExtremes.my_bayes,
-#                            ConfidenceIntervalMethodFromExtremes.ci_mle]
-#     show = False
-#     massif_names = MASSIF_NAMES_ALPS[:]
-#     temporal_covariate = 2017
-#     m = 4
-#     n = (23 // m) + 1
-#     for i in list(range(n))[:]:
-#         massif_names = MASSIF_NAMES_ALPS[m * i: m * (i+1)]
-#         print(massif_names)
-#         plot_ci_graphs(altitudes, massif_names, model_class_and_last_year, show, temporal_covariate, uncertainty_methods)
-#
-#
-# def main_3_massif_of_interest():
-#     massif_names = ['Parpaillon', 'Chartreuse', 'Maurienne'][1:]
-#     model_class_and_last_year = [
-#                                     (StationaryTemporalModel, 2017),
-#                                     (NonStationaryLocationAndScaleTemporalModel, 2017),
-#                                     # Add the temperature here
-#                                 ][:]
-#     altitudes = EUROCODE_ALTITUDES[1:]
-#     uncertainty_methods = [ConfidenceIntervalMethodFromExtremes.my_bayes,
-#                            ConfidenceIntervalMethodFromExtremes.ci_mle][:]
-#     temporal_covariate = 2017
-#     show = False
-#     plot_ci_graphs(altitudes, massif_names, model_class_and_last_year, show, temporal_covariate, uncertainty_methods)
-#
-#
-# if __name__ == '__main__':
-#     # main_drawing()
-#     # main_5_drawings()
-#     main_3_massif_of_interest()
\ No newline at end of file
diff --git a/experiment/eurocode_data/utils.py b/experiment/eurocode_data/utils.py
index 7bc0287b..3e5312ca 100644
--- a/experiment/eurocode_data/utils.py
+++ b/experiment/eurocode_data/utils.py
@@ -10,4 +10,4 @@ EUROCODE_ALTITUDES = [300, 600, 900, 1200, 1500, 1800]
 # Therefore, the winter 2012/2013 was the last one. Thus, 2012 is the last year for the Eurocode
 LAST_YEAR_FOR_EUROCODE = 2012
 # Year of interest for the EUROCODE
-YEAR_OF_INTEREST_FOR_RETURN_LEVEL = 2017
\ No newline at end of file
+YEAR_OF_INTEREST_FOR_RETURN_LEVEL = 2019
\ No newline at end of file
diff --git a/experiment/meteo_france_data/adamont_data/ensemble_simulation.py b/experiment/meteo_france_data/adamont_data/ensemble_simulation.py
index 336df9ea..d669c35a 100644
--- a/experiment/meteo_france_data/adamont_data/ensemble_simulation.py
+++ b/experiment/meteo_france_data/adamont_data/ensemble_simulation.py
@@ -14,6 +14,7 @@ class EnsembleSimulation(object):
 
     def __init__(self, scenario='HISTO', parameter='SNOWSWE',
                  first_winter_required_for_histo=1958, last_winter_for_histo=2004):
+        assert False, 'RE-read the code to take into the new dates'
         self.scenario = scenario
         self.parameter = parameter
         self.first_winter_required_for_histo = first_winter_required_for_histo
diff --git a/experiment/meteo_france_data/scm_models_data/abstract_study.py b/experiment/meteo_france_data/scm_models_data/abstract_study.py
index 1d20283e..d32aa542 100644
--- a/experiment/meteo_france_data/scm_models_data/abstract_study.py
+++ b/experiment/meteo_france_data/scm_models_data/abstract_study.py
@@ -41,6 +41,7 @@ with redirect_stdout(f):
 
 filled_marker_legend_list = ['Filled marker =', 'Selected model is significant', 'w.r.t $\mathcal{M}_0$']
 
+
 class AbstractStudy(object):
     """
     A Study is defined by:
@@ -52,12 +53,13 @@ class AbstractStudy(object):
 
     The year 2017 represents the nc file that correspond to the winter between the year 2017 and 2018.
     """
-    REANALYSIS_FLAT_FOLDER = 'SAFRAN_montagne-CROCUS_2019/alp_flat/reanalysis'
+    # REANALYSIS_FLAT_FOLDER = 'SAFRAN_montagne-CROCUS_2019/alp_flat/reanalysis'
+    REANALYSIS_FLAT_FOLDER = 'S2M_AERIS_MARS_2020/'
     REANALYSIS_ALLSLOPES_FOLDER = 'SAFRAN_montagne-CROCUS_2019/alp_allslopes/reanalysis'
 
     # REANALYSIS_FOLDER = 'SAFRAN_montagne-CROCUS_2019/postes/reanalysis'
 
-    def __init__(self, variable_class: type, altitude: int = 1800, year_min=1000, year_max=3000,
+    def __init__(self, variable_class: type, altitude: int = 1800, year_min=1959, year_max=2020,
                  multiprocessing=True, orientation=None, slope=20.0):
         assert isinstance(altitude, int), type(altitude)
         assert altitude in ALTITUDES, altitude
@@ -128,7 +130,8 @@ class AbstractStudy(object):
 
     @property
     def observations_winter_annual_maxima(self) -> AnnualMaxima:
-        return AnnualMaxima(df_maxima_gev=pd.DataFrame(self.year_to_winter_annual_maxima, index=self.study_massif_names))
+        return AnnualMaxima(
+            df_maxima_gev=pd.DataFrame(self.year_to_winter_annual_maxima, index=self.study_massif_names))
 
     @cached_property
     def year_to_summer_annual_maxima(self) -> OrderedDict:
@@ -140,10 +143,10 @@ class AbstractStudy(object):
             year_to_annual_maxima[year] = annual_maxima
         return year_to_annual_maxima
 
-
     @property
     def observations_summer_annual_maxima(self) -> AnnualMaxima:
-        return AnnualMaxima(df_maxima_gev=pd.DataFrame(self.year_to_summer_annual_maxima, index=self.study_massif_names))
+        return AnnualMaxima(
+            df_maxima_gev=pd.DataFrame(self.year_to_summer_annual_maxima, index=self.study_massif_names))
 
     @cached_property
     def year_to_annual_maxima_index(self) -> OrderedDict:
@@ -235,7 +238,7 @@ class AbstractStudy(object):
 
     @cached_property
     def ordered_years_and_path_files(self):
-        nc_files = [(int(f.split('_')[-2][:4]), f) for f in os.listdir(self.study_full_path) if f.endswith('.nc')]
+        nc_files = [(int(f.split('_')[-2][:4])+1, f) for f in os.listdir(self.study_full_path) if f.endswith('.nc')]
         ordered_years, path_files = zip(*[(year, op.join(self.study_full_path, nc_file))
                                           for year, nc_file in sorted(nc_files, key=lambda t: t[0])
                                           if self.year_min <= year < self.year_max])
@@ -402,7 +405,6 @@ class AbstractStudy(object):
                 #             if is_hatch:
                 #                 ax.add_patch(Polygon(xy=a, fill=False, hatch=hatch))
 
-
         if show_label:
             # Improve some explanation on the X axis and on the Y axis
             ax.set_xlabel('Longitude (km)')
diff --git a/experiment/meteo_france_data/scm_models_data/crocus/crocus.py b/experiment/meteo_france_data/scm_models_data/crocus/crocus.py
index aafab19e..0ed8aea7 100644
--- a/experiment/meteo_france_data/scm_models_data/crocus/crocus.py
+++ b/experiment/meteo_france_data/scm_models_data/crocus/crocus.py
@@ -115,14 +115,14 @@ class CrocusDaysWithSnowOnGround(Crocus):
 
 if __name__ == '__main__':
     for study in [CrocusSwe3Days(altitude=900, orientation=90.0)]:
-        d = study.year_to_dataset_ordered_dict[1958]
+        d = study.year_to_dataset_ordered_dict[1959]
         print(d)
         print(study.reanalysis_path)
         for v in ['aspect', 'slope', 'ZS', 'massif_num']:
             a = np.array(d[v])
             print(list(a))
             print(sorted(list(set(a))))
-        print(study.year_to_daily_time_serie_array[1958])
+        print(study.year_to_daily_time_serie_array[1959])
     study = CrocusSnowLoadTotal(altitude=900)
     print(study.year_to_annual_maxima_index)
     print(study.year_to_daily_time_serie_array)
diff --git a/experiment/meteo_france_data/scm_models_data/safran/safran.py b/experiment/meteo_france_data/scm_models_data/safran/safran.py
index 885ad453..a19b8207 100644
--- a/experiment/meteo_france_data/scm_models_data/safran/safran.py
+++ b/experiment/meteo_france_data/scm_models_data/safran/safran.py
@@ -137,37 +137,5 @@ class SafranTemperature(Safran):
 
 if __name__ == '__main__':
     study = SafranRainfall1Day()
-    print(study.year_to_annual_maxima[1958])
-    # d = study.year_to_dataset_ordered_dict[1958]
-    # print(d.variables)
-    # print(study.study_massif_names)
-    # d = {
-    #     name: '' for name in study.study_massif_names
-    # }
-    # print(d)
-    # for i in range(1958, 1959):
-    #     d = study.year_to_dataset_ordered_dict[i]
-    #     # variable = 'station'
-    #     # print(np.array(d.variables[variable]))
-    #     variable = 'Tair'
-    #     a = np.mean(np.array(d.variables[variable]), axis=1)
-    #     d = study.year_to_dataset_ordered_dict[i + 1]
-    #     b = np.mean(np.array(d.variables[variable]), axis=1)
-    # print(a[-1] - b[0])
-    # print(d.variables['time'])
-    # print(study.all_massif_names)
-    # print(study.massif_name_to_altitudes)
-    # print(study.year_to_daily_time_serie_array[1958].shape)
-    # print(study.missing_massif_name)
-
-    # print(len(d.variables['time']))
-    # print(study.year_to_annual_total)
-    # print(study.df_annual_total.columns)
-
-    # for i in range(1958, 2016):
-    #     d = study.year_to_dataset_ordered_dict[i]
-    #     variable = 'Tair'
-    #     a = np.mean(np.array(d.variables[variable]), axis=1)
-    #     d = study.year_to_dataset_ordered_dict[i+1]
-    #     b = np.mean(np.array(d.variables[variable]), axis=1)
-    #     print(a[-1] - b[0])
+    print(study.year_to_annual_maxima[1959])
+    print(study.ordered_years)
diff --git a/experiment/meteo_france_data/scm_models_data/visualization/hypercube_visualization/main_files/main_full_hypercube.py b/experiment/meteo_france_data/scm_models_data/visualization/hypercube_visualization/main_files/main_full_hypercube.py
index 4fb4fbdd..679e5d75 100644
--- a/experiment/meteo_france_data/scm_models_data/visualization/hypercube_visualization/main_files/main_full_hypercube.py
+++ b/experiment/meteo_france_data/scm_models_data/visualization/hypercube_visualization/main_files/main_full_hypercube.py
@@ -17,8 +17,8 @@ def get_full_parameters(altitude=None, offset_starting_year=10):
         altitudes = [altitude]
     else:
         altitudes = ALL_ALTITUDES[3:-6]
-    first_starting_year = 1958
-    last_starting_year = 2017 - offset_starting_year
+    first_starting_year = 1959
+    last_starting_year = 2019 - offset_starting_year
     trend_test_class = GevLocationTrendTest
     return altitudes, first_starting_year, last_starting_year, nb_data_reduced_for_speed, only_first_one, save_to_file, trend_test_class
 
diff --git a/experiment/trend_analysis/univariate_test/extreme_trend_test/abstract_gev_trend_test.py b/experiment/trend_analysis/univariate_test/extreme_trend_test/abstract_gev_trend_test.py
index 295336f5..2782f461 100644
--- a/experiment/trend_analysis/univariate_test/extreme_trend_test/abstract_gev_trend_test.py
+++ b/experiment/trend_analysis/univariate_test/extreme_trend_test/abstract_gev_trend_test.py
@@ -5,7 +5,7 @@ import numpy as np
 from cached_property import cached_property
 from scipy.stats import chi2
 
-from experiment.eurocode_data.utils import EUROCODE_QUANTILE
+from experiment.eurocode_data.utils import EUROCODE_QUANTILE, YEAR_OF_INTEREST_FOR_RETURN_LEVEL
 from experiment.meteo_france_data.scm_models_data.crocus.crocus_variables import AbstractSnowLoadVariable
 from experiment.trend_analysis.univariate_test.abstract_univariate_test import AbstractUnivariateTest
 from experiment.trend_analysis.univariate_test.utils import load_temporal_coordinates_and_dataset, \
@@ -219,7 +219,7 @@ class AbstractGevTrendTest(AbstractUnivariateTest):
         # Plot for the empirical
         standard_gumbel_quantiles = self.get_standard_gumbel_quantiles()
         ax.plot(standard_gumbel_quantiles, sorted_maxima, linestyle='None',
-                label='Empirical model', marker='o')
+                label='Empirical model', marker='o', color='black')
 
 
 
@@ -238,8 +238,13 @@ class AbstractGevTrendTest(AbstractUnivariateTest):
         # Plot tor the selected model for different year
 
         end_real_proba = 1 - (0.02 / psnow)
-        self.plot_model(ax, 1958, end_proba=end_real_proba, label='Selected model, which is ${}$'.format(self.label))
-        self.plot_model(ax, 2017, end_proba=end_real_proba, label='Selected model, which is ${}$'.format(self.label))
+        stationary = True
+        if stationary:
+            self.plot_model(ax, None, end_proba=end_real_proba, label='Selected model\nwhich is ${}$'.format(self.label),
+                            color='grey')
+        else:
+            self.plot_model(ax, 1959, end_proba=end_real_proba, label='Selected model, which is ${}$'.format(self.label))
+            self.plot_model(ax, 2019, end_proba=end_real_proba, label='Selected model, which is ${}$'.format(self.label))
 
         # Plot for the discarded model
         # if 'Verdon' in massif_name and altitude == 300:
@@ -260,32 +265,36 @@ class AbstractGevTrendTest(AbstractUnivariateTest):
 
 
 
-        all_quantiles = standard_gumbel_quantiles + unconstrained_empirical_quantiles + quantiles
-        epsilon = 0.5
-        ax_lim = [min(all_quantiles) - epsilon, max(all_quantiles) + epsilon]
+        ax_lim = [-1.5, 4]
         ax.set_xlim(ax_lim)
         ax_twiny.set_xlim(ax_lim)
+        ax.set_xticks([-1 + i for i in range(6)])
+        epsilon = 0.005
+        ax.set_ylim(bottom=-epsilon)
+        lalsize = 13
+        ax.tick_params(axis='both', which='major', labelsize=lalsize)
+        ax_twiny.tick_params(axis='both', which='major', labelsize=lalsize)
 
-        ax.legend()
         ax.yaxis.grid()
 
         ax.set_xlabel("Standard Gumbel quantile", fontsize=size)
         ax.set_ylabel("Non-zero annual maxima of GSL ({})".format(AbstractSnowLoadVariable.UNIT), fontsize=size)
-        ax.legend(loc='upper left', prop={'size': 10})
+        ax.legend(prop={'size': 17})
 
-    def plot_model(self, ax, year, start_proba=0.02, end_proba=0.98, label=''):
+    def plot_model(self, ax, year, start_proba=0.02, end_proba=0.98, label='', color=None):
         standard_gumbel = GevParams(0, 1, 0)
         start_quantile = standard_gumbel.quantile(start_proba)
         end_quantile = standard_gumbel.quantile(end_proba)
         extended_quantiles = np.linspace(start_quantile, end_quantile, 500)
+        label = 'Y({})'.format(year) if year is not None else label
         if year is None:
-            year = 2017
+            year = 2019
         gev_params_year = self.unconstrained_estimator.margin_function_from_fit.get_gev_params(
                 coordinate=np.array([year]),
                 is_transformed=False)
         extended_maxima = [gev_params_year.gumbel_inverse_standardization(q) for q in extended_quantiles]
-        label = 'Y({})'.format(year) if year is not None else label
-        ax.plot(extended_quantiles, extended_maxima, linestyle='-', label=label)
+
+        ax.plot(extended_quantiles, extended_maxima, linestyle='-', label=label, color=color, linewidth=5)
 
     def linear_extension(self, ax, q, quantiles, sorted_maxima):
         # Extend the curve linear a bit if the return period 50 is not in the quantiles
@@ -383,9 +392,9 @@ class AbstractGevTrendTest(AbstractUnivariateTest):
     def return_level_plot_comparison(self, ax, label, color=None):
         # ax = plt.gca()
         size = 15
-        # Load Gev parameter in 2017 for the unconstrained estimator
+        # Load Gev parameter for the year of interest for the unconstrained estimator
         gev_params, gev_params_with_corrected_shape = self.get_gev_params_with_big_shape_and_correct_shape()
-        suffix = 'in 2017'
+        suffix = 'in {}'.format(YEAR_OF_INTEREST_FOR_RETURN_LEVEL)
         gev_params.return_level_plot_against_return_period(ax, color, linestyle='-', label=label,
                                                            suffix_return_level_label=suffix)
         gev_params_with_corrected_shape.return_level_plot_against_return_period(ax, color=color, linestyle='--',
@@ -403,7 +412,7 @@ class AbstractGevTrendTest(AbstractUnivariateTest):
         ax.vlines(50, 0, np.max(difference_quantile))
         ax.plot(return_periods, difference_quantile, color=color, linestyle='-', label=label)
         ax.legend(loc='upper left')
-        difference_ylabel = 'difference return level in 2017'
+        difference_ylabel = 'difference return level in 2019'
         ax.set_ylabel(difference_ylabel + ' (kPa)')
 
         ax2.vlines(50, 0, np.max(relative_difference))
@@ -417,7 +426,7 @@ class AbstractGevTrendTest(AbstractUnivariateTest):
         plt.gca().set_ylim(bottom=0)
 
     def get_gev_params_with_big_shape_and_correct_shape(self):
-        gev_params = self.unconstrained_estimator.margin_function_from_fit.get_gev_params(coordinate=np.array([2017]),
+        gev_params = self.unconstrained_estimator.margin_function_from_fit.get_gev_params(coordinate=np.array([YEAR_OF_INTEREST_FOR_RETURN_LEVEL]),
                                                                                           is_transformed=False)  # type: GevParams
         gev_params_with_corrected_shape = GevParams(loc=gev_params.location,
                                                     scale=gev_params.scale,
diff --git a/papers/exceeding_snow_loads/check_mcmc_convergence_for_return_levels/main_bayesian_mcmc.py b/papers/exceeding_snow_loads/check_mcmc_convergence_for_return_levels/main_bayesian_mcmc.py
index cc0a2b3e..a3e0a051 100644
--- a/papers/exceeding_snow_loads/check_mcmc_convergence_for_return_levels/main_bayesian_mcmc.py
+++ b/papers/exceeding_snow_loads/check_mcmc_convergence_for_return_levels/main_bayesian_mcmc.py
@@ -102,12 +102,12 @@ def get_return_level_bayesian_example(nb_iterations_for_bayesian_fit):
     # plt.show()
     model_class = StationaryTemporalModel
     coordinates, dataset = load_temporal_coordinates_and_dataset(maxima, years)
-    fitted_estimator = fitted_linear_margin_estimator(model_class, coordinates, dataset, starting_year=1958,
+    fitted_estimator = fitted_linear_margin_estimator(model_class, coordinates, dataset, starting_year=1959,
                                                       fit_method=TemporalMarginFitMethod.extremes_fevd_bayesian,
                                                       nb_iterations_for_bayesian_fit=nb_iterations_for_bayesian_fit)
     return_level_bayesian = ExtractEurocodeReturnLevelFromMyBayesianExtremes(estimator=fitted_estimator,
                                                                              ci_method=ConfidenceIntervalMethodFromExtremes.my_bayes,
-                                                                             temporal_covariate=2017)
+                                                                             temporal_covariate=2019)
     return return_level_bayesian
 
 
diff --git a/papers/exceeding_snow_loads/result_trends_and_return_levels/main_result_trends_and_return_levels.py b/papers/exceeding_snow_loads/result_trends_and_return_levels/main_result_trends_and_return_levels.py
index 7f20be5a..a3015cab 100644
--- a/papers/exceeding_snow_loads/result_trends_and_return_levels/main_result_trends_and_return_levels.py
+++ b/papers/exceeding_snow_loads/result_trends_and_return_levels/main_result_trends_and_return_levels.py
@@ -66,11 +66,10 @@ def intermediate_result(altitudes, massif_names=None,
             _ = compute_minimized_aic(visualizer)
 
     # Plots
-    # plot_trend_map(altitude_to_visualizer)
-    # plot_diagnosis_risk(altitude_to_visualizer)
-    # plot_trend_curves(altitude_to_visualizer={a: v for a, v in altitude_to_visualizer.items() if a >= 900})
+    plot_trend_map(altitude_to_visualizer)
+    plot_trend_curves(altitude_to_visualizer={a: v for a, v in altitude_to_visualizer.items() if a >= 900})
     plot_uncertainty_massifs(altitude_to_visualizer)
-    # plot_uncertainty_histogram(altitude_to_visualizer)
+    plot_uncertainty_histogram(altitude_to_visualizer)
     plot_selection_curves(altitude_to_visualizer)
 
 
diff --git a/papers/exceeding_snow_loads/result_trends_and_return_levels/plot_uncertainty_curves.py b/papers/exceeding_snow_loads/result_trends_and_return_levels/plot_uncertainty_curves.py
index 51a2646f..83400351 100644
--- a/papers/exceeding_snow_loads/result_trends_and_return_levels/plot_uncertainty_curves.py
+++ b/papers/exceeding_snow_loads/result_trends_and_return_levels/plot_uncertainty_curves.py
@@ -3,7 +3,8 @@ import matplotlib.pyplot as plt
 
 import numpy as np
 
-from experiment.eurocode_data.utils import EUROCODE_RETURN_LEVEL_STR, EUROCODE_ALTITUDES
+from experiment.eurocode_data.utils import EUROCODE_RETURN_LEVEL_STR, EUROCODE_ALTITUDES, \
+    YEAR_OF_INTEREST_FOR_RETURN_LEVEL
 from experiment.meteo_france_data.scm_models_data.abstract_study import AbstractStudy, filled_marker_legend_list
 from experiment.meteo_france_data.scm_models_data.visualization.study_visualization.main_study_visualizer import \
     SCM_STUDY_CLASS_TO_ABBREVIATION
@@ -68,7 +69,7 @@ def plot_single_uncertainty_massif(altitude_to_visualizer: Dict[int, StudyVisual
 
 def get_label_name(model_subset_for_uncertainty, ci_method_name, add_method_suffix):
     model_symbol = 'N' if model_subset_for_uncertainty is not ModelSubsetForUncertainty.stationary_gumbel else '0'
-    parameter = ', 2017' if model_subset_for_uncertainty not in [ModelSubsetForUncertainty.stationary_gumbel,
+    parameter = ', {}'.format(YEAR_OF_INTEREST_FOR_RETURN_LEVEL) if model_subset_for_uncertainty not in [ModelSubsetForUncertainty.stationary_gumbel,
                                                                  ModelSubsetForUncertainty.stationary_gumbel_and_gev] \
         else ''
     model_name = ' $ z_p(\\boldsymbol{\\widehat{\\theta}_{\\mathcal{M}'
diff --git a/papers/exceeding_snow_loads/study_visualizer_for_non_stationary_trends.py b/papers/exceeding_snow_loads/study_visualizer_for_non_stationary_trends.py
index 42b87600..4bd5b006 100644
--- a/papers/exceeding_snow_loads/study_visualizer_for_non_stationary_trends.py
+++ b/papers/exceeding_snow_loads/study_visualizer_for_non_stationary_trends.py
@@ -8,7 +8,8 @@ from cached_property import cached_property
 
 from experiment.eurocode_data.eurocode_region import C2, C1, E
 from experiment.eurocode_data.massif_name_to_departement import massif_name_to_eurocode_region
-from experiment.eurocode_data.utils import EUROCODE_QUANTILE, EUROCODE_RETURN_LEVEL_STR
+from experiment.eurocode_data.utils import EUROCODE_QUANTILE, EUROCODE_RETURN_LEVEL_STR, \
+    YEAR_OF_INTEREST_FOR_RETURN_LEVEL
 from experiment.meteo_france_data.plot.create_shifted_cmap import get_shifted_map, get_colors
 from experiment.meteo_france_data.scm_models_data.abstract_extended_study import AbstractExtendedStudy
 from experiment.meteo_france_data.scm_models_data.abstract_study import AbstractStudy
@@ -47,7 +48,7 @@ class StudyVisualizerForNonStationaryTrends(StudyVisualizer):
                  uncertainty_methods=None,
                  model_subsets_for_uncertainty=None,
                  uncertainty_massif_names=None,
-                 effective_temporal_covariate=2017,
+                 effective_temporal_covariate=YEAR_OF_INTEREST_FOR_RETURN_LEVEL,
                  relative_change_trend_plot=True,
                  non_stationary_trend_test_to_marker=None,
                  fit_method=TemporalMarginFitMethod.extremes_fevd_mle,
diff --git a/test/test_experiment/test_SCM_oriented_data.py b/test/test_experiment/test_SCM_oriented_data.py
index 3d731376..7b35d188 100644
--- a/test/test_experiment/test_SCM_oriented_data.py
+++ b/test/test_experiment/test_SCM_oriented_data.py
@@ -16,8 +16,8 @@ class TestSCMOrientedData(unittest.TestCase):
             for slope in SLOPES:
                 for orientation in [None, 45.0, 180.0][:2]:
                     for study_class in [SafranSnowfall, CrocusSwe3Days][:]:
-                        study = study_class(altitude=altitude, orientation=orientation, slope=slope, year_max=1959, multiprocessing=False)
-                        assert study.year_to_daily_time_serie_array[1958].shape == (365, 23)
+                        study = study_class(altitude=altitude, orientation=orientation, slope=slope, year_max=1960, multiprocessing=False)
+                        assert study.year_to_daily_time_serie_array[1959].shape == (365, 23)
 
 
 if __name__ == '__main__':
diff --git a/test/test_experiment/test_SCM_study.py b/test/test_experiment/test_SCM_study.py
index 9ed69a54..e7c14277 100644
--- a/test/test_experiment/test_SCM_study.py
+++ b/test/test_experiment/test_SCM_study.py
@@ -23,7 +23,7 @@ class TestSCMAllStudy(unittest.TestCase):
         for study_class in [ExtendedSafranSnowfall]:
             for study in study_iterator(study_class, only_first_one=True, verbose=False):
                 study_visualizer = StudyVisualizer(study, show=False, save_to_file=False, multiprocessing=True)
-                study_visualizer.df_trend_spatio_temporal(GevLocationTrendTest, [1958, 1959, 1960],
+                study_visualizer.df_trend_spatio_temporal(GevLocationTrendTest, [1959, 1960, 1961],
                                                           nb_massif_for_change_point_test=3,
                                                           sample_one_massif_from_each_region=False)
         self.assertTrue(True)
@@ -72,15 +72,15 @@ class TestSCMPrecipitation(TestSCMStudy):
 
     def setUp(self) -> None:
         super().setUp()
-        self.study = SafranPrecipitation(altitude=1800, year_min=1958, year_max=2002, nb_consecutive_days=1)
+        self.study = SafranPrecipitation(altitude=1800, year_min=1959, year_max=2003, nb_consecutive_days=1)
 
     def test_durand(self):
         # Test based on Durand paper
         # (some small differences probably due to the fact that SAFRAN model has evolved since then)
         # Test for the mean total precipitation (rainfall + snowfall) between 1958 and 2002
         self.check({
-            "Mercantour": 1281,
-            'Chablais': 1922,
+            "Mercantour": 1300,
+            'Chablais': 1947,
         })
 
     def round(self, f):
@@ -91,7 +91,7 @@ class TestSafranTemperature(TestSCMStudy):
 
     def setUp(self):
         super().setUp()
-        self.study = SafranTemperature(altitude=1800, year_min=1958, year_max=2002)
+        self.study = SafranTemperature(altitude=1800, year_min=1959, year_max=2003)
 
     def test_durand(self):
         # Test based on Durand paper
-- 
GitLab