Commit dadce483 authored by Le Roux Erwan's avatar Le Roux Erwan
Browse files

[exceeding] fix code to create a qqplot. (sort only once the empirical...

[exceeding] fix code to create a qqplot. (sort only once the empirical quantiles have been computed). Create ALTITUDE_TO_GREY_MASSIF in utils to ensure that the exceedance plots are always done with the same massifs everywhere (and these massifs are only the one that are not grey for the non-stationary gev & gumbel)
parent 19a423d2
No related merge requests found
Showing with 59 additions and 29 deletions
+59 -29
......@@ -369,7 +369,6 @@ class AbstractGevTrendTest(object):
def qqplot_wrt_standard_gumbel(self, massif_name, altitude):
ax = plt.gca()
size = 15
standard_gumbel_quantiles = self.get_standard_gumbel_quantiles()
unconstrained_empirical_quantiles = self.compute_empirical_quantiles(self.unconstrained_estimator)
# constrained_empirical_quantiles = self.compute_empirical_quantiles(self.constrained_estimator)
......@@ -385,9 +384,11 @@ class AbstractGevTrendTest(object):
ax.plot(standard_gumbel_quantiles, unconstrained_empirical_quantiles, linestyle='None',
label=label_generic + '(selected model is ${}$)'.format(self.label), marker='o')
ax.set_xlabel("Standard Gumbel quantile", fontsize=size)
ax.set_ylabel("Standard Empirical quantile", fontsize=size)
ax.legend(loc='lower right', prop={'size': 10})
size_label = 20
ax.set_xlabel("Theoretical quantile", fontsize=size_label)
ax.set_ylabel("Empirical quantile", fontsize=size_label)
# size_legend = 14
# ax.legend(loc='lower right', prop={'size': size_legend})
ax.set_xlim(ax_lim)
ax.set_ylim(ax_lim)
......@@ -396,7 +397,7 @@ class AbstractGevTrendTest(object):
ax.set_xticks(ticks)
ax.set_yticks(ticks)
# ax.grid()
ax.tick_params(labelsize=size)
ax.tick_params(labelsize=15)
def get_standard_gumbel_quantiles(self):
# Standard Gumbel quantiles
......@@ -413,12 +414,14 @@ class AbstractGevTrendTest(object):
def compute_empirical_quantiles(self, estimator):
empirical_quantiles = []
for year, maximum in sorted(zip(self.years, self.maxima), key=lambda t: t[1]):
# for year, maximum in sorted(zip(self.years, self.maxima), key=lambda t: t[1]):
for year, maximum in zip(self.years, self.maxima):
gev_param = estimator.function_from_fit.get_params(
coordinate=np.array([year]),
is_transformed=False)
maximum_standardized = gev_param.gumbel_standardization(maximum)
empirical_quantiles.append(maximum_standardized)
empirical_quantiles = sorted(empirical_quantiles)
return empirical_quantiles
# For some visualizations
......
......@@ -17,7 +17,7 @@ from extreme_data.meteo_france_data.scm_models_data.abstract_extended_study impo
from extreme_data.meteo_france_data.scm_models_data.abstract_study import AbstractStudy
from extreme_data.meteo_france_data.scm_models_data.visualization.study_visualizer import \
StudyVisualizer
from projects.exceeding_snow_loads.utils import NON_STATIONARY_TREND_TEST_PAPER_1
from projects.exceeding_snow_loads.utils import NON_STATIONARY_TREND_TEST_PAPER_1, ALTITUDE_TO_GREY_MASSIF
from extreme_trend.abstract_gev_trend_test import AbstractGevTrendTest
from extreme_trend.trend_test_one_parameter.gumbel_trend_test_one_parameter import \
GumbelLocationTrendTest, GevStationaryVersusGumbel, GumbelScaleTrendTest, GumbelVersusGumbel
......@@ -82,6 +82,7 @@ class StudyVisualizerForNonStationaryTrends(StudyVisualizer):
# or only the stationary gumbel model which correspond to the building standards
self.model_subsets_for_uncertainty = [ModelSubsetForUncertainty.stationary_gumbel,
ModelSubsetForUncertainty.non_stationary_gumbel_and_gev][:]
assert isinstance(self.model_subsets_for_uncertainty, list)
if self.uncertainty_methods is None:
self.uncertainty_methods = [ConfidenceIntervalMethodFromExtremes.my_bayes,
ConfidenceIntervalMethodFromExtremes.ci_mle][1:]
......@@ -127,8 +128,19 @@ class StudyVisualizerForNonStationaryTrends(StudyVisualizer):
return {m: EUROCODE_QUANTILE for m in self.massif_name_to_psnow.keys()}
@property
def massif_names_fitted(self):
return list(self.massif_name_to_years_and_maxima_for_model_fitting.keys())
def intersection_of_massif_names_fitted(self) -> List[str]:
all_massif = set(self.study.all_massif_names())
missing_massifs_given = set(ALTITUDE_TO_GREY_MASSIF[self.altitude])
if ModelSubsetForUncertainty.non_stationary_gumbel_and_gev in self.model_subsets_for_uncertainty:
# Check the assertion for the missing massifs
# For the paper 1, I should enter in this loop only for the ground snow load
# (because i should not use this ModelSubsetForUncertainty for the Eurocode)
massifs_found = set(self.massif_name_to_trend_test_that_minimized_aic.keys())
missing_massifs_found = all_massif - massifs_found
assert missing_massifs_found == missing_massifs_given
# Return by default this result
intersection_massif_names = all_massif - missing_massifs_given
return list(intersection_massif_names)
@cached_property
def massif_name_to_years_and_maxima_for_model_fitting(self):
......@@ -160,14 +172,20 @@ class StudyVisualizerForNonStationaryTrends(StudyVisualizer):
return massif_name_to_trend_test_that_minimized_aic
def get_sorted_trend_test(self, massif_name):
def get_trend_trend_test(self, massif_name, trend_test_classes):
x, y = self.massif_name_to_years_and_maxima_for_model_fitting[massif_name]
starting_year = None
quantile_level = self.massif_name_to_eurocode_quantile_level_in_practice[massif_name]
all_trend_test = [
t(years=x, maxima=y, starting_year=starting_year, quantile_level=quantile_level,
t(years=x, maxima=y, starting_year=None, quantile_level=quantile_level,
fit_method=self.fit_method)
for t in self.non_stationary_trend_test] # type: List[AbstractGevTrendTest]
for t in trend_test_classes] # type: List[AbstractGevTrendTest]
return all_trend_test
def get_sorted_trend_test(self, massif_name):
# Compute trend test
all_trend_test = self.get_trend_trend_test(massif_name, self.non_stationary_trend_test)
# Exclude GEV models whose shape parameter is not in the support of the prior distribution for GMLE
if self.select_only_acceptable_shape_parameter:
acceptable_shape_parameter = lambda s: -0.5 <= s <= 0.5 # physically acceptable prior
......@@ -332,12 +350,7 @@ class StudyVisualizerForNonStationaryTrends(StudyVisualizer):
model_subset_for_uncertainty=ModelSubsetForUncertainty.non_stationary_gumbel_and_gev) \
-> Dict[str, EurocodeConfidenceIntervalFromExtremes]:
# Compute for the uncertainty massif names
massifs_names = set(self.massif_name_to_years_and_maxima_for_model_fitting.keys()). \
intersection(self.uncertainty_massif_names)
# Update the name of the massif (because some massifs might have been removed by anderson test)
if model_subset_for_uncertainty is ModelSubsetForUncertainty.non_stationary_gumbel_and_gev\
and self.select_only_model_that_pass_anderson_test:
massifs_names = massifs_names.intersection(set(self.massif_name_to_trend_test_that_minimized_aic.keys()))
massifs_names = self.intersection_of_massif_names_fitted
arguments = [
[self.massif_name_to_years_and_maxima_for_model_fitting[m],
self.massif_name_and_model_subset_to_model_class(m, model_subset_for_uncertainty),
......@@ -384,7 +397,7 @@ class StudyVisualizerForNonStationaryTrends(StudyVisualizer):
triplet = [(massif_name_to_eurocode_region[massif_name],
self.massif_name_to_eurocode_values[massif_name],
self.triplet_to_eurocode_uncertainty[(ci_method, model_subset_for_uncertainty, massif_name)])
for massif_name in self.massif_names_fitted]
for massif_name in self.intersection_of_massif_names_fitted]
# First array for histogram
a = 100 * np.array([(uncertainty.confidence_interval[0] > eurocode,
uncertainty.mean_estimate > eurocode,
......@@ -422,7 +435,7 @@ class StudyVisualizerForNonStationaryTrends(StudyVisualizer):
trend_test.qqplot_wrt_standard_gumbel(massif_name, self.altitude)
self.plot_name = 'qpplot_plot_{}_{}_{}'.format(self.altitude, massif_name,
trend_test.unconstrained_estimator_gev_params_last_year.shape)
self.show_or_save_to_file(add_classic_title=False, no_title=True)
self.show_or_save_to_file(add_classic_title=False, no_title=True, tight_layout=True)
plt.close()
def return_level_plot(self, ax, ax2, massif_name, color=None):
......
......@@ -11,11 +11,11 @@ from projects.exceeding_snow_loads.section_results.plot_uncertainty_histogram im
mpl.rcParams['text.usetex'] = True
mpl.rcParams['text.latex.preamble'] = [r'\usepackage{amsmath}']
from extreme_data.meteo_france_data.scm_models_data.crocus.crocus import CrocusSnowLoadTotal
from extreme_data.meteo_france_data.scm_models_data.crocus.crocus import CrocusSnowLoadTotal, CrocusSnowLoadEurocode
from extreme_fit.model.result_from_model_fit.result_from_extremes.confidence_interval_method import \
ConfidenceIntervalMethodFromExtremes
from extreme_trend.visualizers.study_visualizer_for_non_stationary_trends import \
StudyVisualizerForNonStationaryTrends
StudyVisualizerForNonStationaryTrends, ModelSubsetForUncertainty
from extreme_trend.visualizers.utils import load_altitude_to_visualizer
from projects.exceeding_snow_loads.section_results.plot_uncertainty_curves import plot_uncertainty_massifs
from projects.exceeding_snow_loads.utils import paper_study_classes, paper_altitudes
......@@ -87,12 +87,16 @@ def major_result():
# massif_names = ['Beaufortain', 'Vercors']
massif_names = None
study_classes = paper_study_classes[:1]
model_subsets_for_uncertainty = None
# study_classes = [CrocusSnowLoad3Days, CrocusSnowLoad5Days, CrocusSnowLoad7Days][::-1]
# altitudes = [300, 600, 900, 1800, 2700][:2]
altitudes = [300, 600, 900, 1800, 2700][:2]
altitudes = [300, 600, 900, 1200, 1500, 1800]
altitudes = paper_altitudes
# altitudes = [900, 1800, 2700][:1]
# altitudes = [900, 1800, 270{{0][:1]
for study_class in study_classes:
if study_class == CrocusSnowLoadEurocode:
model_subsets_for_uncertainty = [ModelSubsetForUncertainty.stationary_gumbel]
else:
model_subsets_for_uncertainty = None
intermediate_result(altitudes, massif_names, model_subsets_for_uncertainty,
uncertainty_methods, study_class,
multiprocessing=True)
......
......@@ -154,7 +154,7 @@ def add_title(ax, eurocode_region, massif_name, non_stationary_context):
def plot_tdrl_bars(altitude_to_visualizer, ax, massif_name, valid_altitudes, legend_size, fontsize):
visualizers = [v for a, v in altitude_to_visualizer.items()
if a in valid_altitudes and massif_name in v.massif_names_fitted]
if a in valid_altitudes and massif_name in v.intersection_of_massif_names_fitted]
if len(visualizers) > 0:
tdrl_values = [v.massif_name_to_tdrl_value[massif_name] for v in visualizers]
# Plot bars
......
......@@ -63,7 +63,8 @@ def plot_histogram(altitude_to_visualizer, model_subset_for_uncertainty):
ax_twiny.tick_params(labelsize=fontsize_label)
ax_twiny.set_xlim(ax.get_xlim())
ax_twiny.set_xticks(altitudes)
nb_massif_names = [len(v.massif_names_fitted) for v in altitude_to_visualizer.values()]
nb_massif_names = [len(v.intersection_of_massif_names_fitted) for v in altitude_to_visualizer.values()]
ax_twiny.set_xticklabels(nb_massif_names)
ax_twiny.set_xlabel('Total number of massifs at each altitude (for the percentage)', fontsize=fontsize_label)
......
......@@ -29,6 +29,15 @@ NON_STATIONARY_TREND_TEST_PAPER_1 = [GumbelVersusGumbel,
GevLocationAgainstGumbel, GevScaleAgainstGumbel,
GevLocationAndScaleTrendTestAgainstGumbel]
ALTITUDE_TO_GREY_MASSIF = {
300: ['Devoluy', 'Queyras', 'Champsaur', 'Grandes-Rousses', 'Ubaye', 'Pelvoux', 'Haute-Maurienne', 'Maurienne', 'Vanoise', 'Thabor', 'Mercantour', 'Haut_Var-Haut_Verdon', 'Haute-Tarentaise', 'Parpaillon', 'Belledonne', 'Oisans'],
600: ['Queyras', 'Pelvoux', 'Haute-Maurienne', 'Mercantour', 'Haut_Var-Haut_Verdon', 'Thabor'],
900: [],
1200: [],
1500: [],
1800: [],
}
NON_STATIONARY_TREND_TEST_PAPER_2 = [
# Gumbel models
GumbelVersusGumbel,
......
......@@ -14,7 +14,7 @@ class TestResults(unittest.TestCase):
def test_run_intermediate_results(self):
# Load data
altitude_to_visualizer = load_altitude_to_visualizer(altitudes=[300, 600], massif_names=['Chartreuse'],
altitude_to_visualizer = load_altitude_to_visualizer(altitudes=[300, 600], massif_names=None,
study_class=CrocusSnowLoadTotal,
model_subsets_for_uncertainty=None,
uncertainty_methods=[
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment