From a6417e5d532e9becdeb04cffe34396deee2dc6ba Mon Sep 17 00:00:00 2001 From: Le Roux Erwan <erwan.le-roux@irstea.fr> Date: Mon, 1 Feb 2021 18:44:22 +0100 Subject: [PATCH] [contrasting] accelerate bootstrapping to with batch multiprocessing. improve optimisation. --- .../scm_models_data/abstract_study.py | 13 +- .../scm_models_data/utils_function.py | 73 ++++++--- .../abstract_extract_eurocode_return_level.py | 2 +- .../altitudes_fit/main_altitudes_studies.py | 22 ++- ...es_visualizer_for_non_stationary_models.py | 19 ++- .../one_fold_analysis/one_fold_fit.py | 154 ++++++++++++++---- .../one_fold_analysis/plot_total_aic.py | 2 +- .../utils_altitude_studies_visualizer.py | 3 +- root_utils.py | 6 + 9 files changed, 220 insertions(+), 74 deletions(-) diff --git a/extreme_data/meteo_france_data/scm_models_data/abstract_study.py b/extreme_data/meteo_france_data/scm_models_data/abstract_study.py index d3fe96f6..7a0a0f7e 100644 --- a/extreme_data/meteo_france_data/scm_models_data/abstract_study.py +++ b/extreme_data/meteo_france_data/scm_models_data/abstract_study.py @@ -1,4 +1,5 @@ import datetime +import time from matplotlib.lines import Line2D import io @@ -879,7 +880,7 @@ class AbstractStudy(object): return d def massif_name_to_stationary_gev_params_and_confidence(self, quantile_level, - confidence_interval_based_on_delta_method) -> Tuple[Dict]: + confidence_interval_based_on_delta_method): t = (quantile_level, confidence_interval_based_on_delta_method) if t in self._cache_for_pointwise_fit: return self._cache_for_pointwise_fit[t] @@ -890,18 +891,24 @@ class AbstractStudy(object): def _massif_name_to_stationary_gev_params_and_confidence(self, quantile_level=None, confidence_interval_based_on_delta_method=True): """ at least 90% of values must be above zero""" + print('study computation') massif_name_to_stationary_gev_params = {} massif_name_to_confidence = {} for massif_name, annual_maxima in self.massif_name_to_annual_maxima.items(): percentage_of_non_zeros = 100 * np.count_nonzero(annual_maxima) / len(annual_maxima) if percentage_of_non_zeros > 90: - gev_params, confidence = fitted_stationary_gev_with_uncertainty_interval(annual_maxima, + start = time.time() + gev_params, mean_estimate, confidence = fitted_stationary_gev_with_uncertainty_interval(annual_maxima, fit_method=MarginFitMethod.extremes_fevd_mle, quantile_level=quantile_level, confidence_interval_based_on_delta_method=confidence_interval_based_on_delta_method) + end = time.time() + duration = end - start + print('Multiprocessing for study duration', duration) + if -0.5 <= gev_params.shape <= 0.5: massif_name_to_stationary_gev_params[massif_name] = gev_params - massif_name_to_confidence[massif_name] = confidence + massif_name_to_confidence[massif_name] = EurocodeConfidenceIntervalFromExtremes(mean_estimate, confidence) return massif_name_to_stationary_gev_params, massif_name_to_confidence def massif_name_to_gev_param_list(self, year_min_and_max_list): diff --git a/extreme_data/meteo_france_data/scm_models_data/utils_function.py b/extreme_data/meteo_france_data/scm_models_data/utils_function.py index a4d98068..5ee9281a 100644 --- a/extreme_data/meteo_france_data/scm_models_data/utils_function.py +++ b/extreme_data/meteo_france_data/scm_models_data/utils_function.py @@ -1,3 +1,8 @@ +import datetime +import math +import time +import warnings +from itertools import chain from multiprocessing import Pool from typing import Tuple, Dict @@ -14,14 +19,14 @@ from extreme_fit.model.result_from_model_fit.result_from_extremes.confidence_int ConfidenceIntervalMethodFromExtremes from extreme_fit.model.result_from_model_fit.result_from_extremes.eurocode_return_level_uncertainties import \ EurocodeConfidenceIntervalFromExtremes -from root_utils import NB_CORES +from root_utils import NB_CORES, batch def fitted_stationary_gev_with_uncertainty_interval(x_gev, fit_method=MarginFitMethod.is_mev_gev_fit, model_class=StationaryTemporalModel, starting_year=None, quantile_level=0.98, - confidence_interval_based_on_delta_method=True) -> Tuple[Dict[str, GevParams], Dict[str, EurocodeConfidenceIntervalFromExtremes]]: + confidence_interval_based_on_delta_method=True): estimator, gev_param = _fitted_stationary_gev(fit_method, model_class, starting_year, x_gev) if quantile_level is not None: EurocodeConfidenceIntervalFromExtremes.quantile_level = quantile_level @@ -30,35 +35,65 @@ def fitted_stationary_gev_with_uncertainty_interval(x_gev, fit_method=MarginFitM confidence_interval = EurocodeConfidenceIntervalFromExtremes.from_estimator_extremes(estimator, ConfidenceIntervalMethodFromExtremes.ci_mle, coordinate) + mean_estimate = confidence_interval.mean_estimate + confidence_interval = confidence_interval.confidence_interval else: # Bootstrap method - nb_bootstrap = AbstractExtractEurocodeReturnLevel.NB_BOOTSTRAP - x_gev_list = [resample(x_gev) for _ in range(nb_bootstrap)] - arguments = [(fit_method, model_class, starting_year, x, quantile_level) for x in x_gev_list] - multiprocess = True - if multiprocess: - with Pool(NB_CORES) as p: - return_level_list = p.map(_compute_return_level, arguments) - else: - return_level_list = [_compute_return_level(argument) for argument in arguments] + return_level_list = ReturnLevelBootstrap(fit_method, model_class, starting_year, x_gev, + quantile_level).compute_all_return_level() # Remove infinite return levels and return level len_before_remove = len(return_level_list) - return_level_list = [r for r in return_level_list if not np.isinf(r)] threshold = 2000 - return_level_list = [r for r in return_level_list if r < threshold] + return_level_list = [r for r in return_level_list if (not np.isinf(r)) and (r < threshold)] len_after_remove = len(return_level_list) if len_after_remove < len_before_remove: print('Nb of fit removed (inf or > {}:'.format(threshold), len_before_remove - len_after_remove) confidence_interval = tuple([np.quantile(return_level_list, q) for q in AbstractExtractEurocodeReturnLevel.bottom_and_upper_quantile]) mean_estimate = gev_param.quantile(quantile_level) - confidence_interval = EurocodeConfidenceIntervalFromExtremes(mean_estimate, confidence_interval) else: confidence_interval = None - return gev_param, confidence_interval + mean_estimate = None + return gev_param, mean_estimate, confidence_interval + +class ReturnLevelBootstrap(object): + + def __init__(self, fit_method, model_class, starting_year, x_gev, quantile_level): + self.nb_bootstrap = AbstractExtractEurocodeReturnLevel.NB_BOOTSTRAP + self.quantile_level = quantile_level + self.x_gev = x_gev + self.starting_year = starting_year + self.model_class = model_class + self.fit_method = fit_method + + def compute_all_return_level(self): + multiprocess = None + idxs = list(range(self.nb_bootstrap)) + + if multiprocess is None: + print('multiprocessing batch') + + with Pool(NB_CORES) as p: + batchsize = math.ceil(AbstractExtractEurocodeReturnLevel.NB_BOOTSTRAP / NB_CORES) + list_return_level = p.map(self.compute_return_level_batch, batch(idxs, batchsize=batchsize)) + return_level_list = list(chain.from_iterable(list_return_level)) + + elif multiprocess: + with Pool(NB_CORES) as p: + return_level_list = p.map(self.compute_return_level, idxs) + else: + return_level_list = [self.compute_return_level(idx) for idx in idxs] + + return return_level_list + + def compute_return_level_batch(self, idxs): + return [self.compute_return_level(idx) for idx in idxs] + + def compute_return_level(self, idx): + x = resample(self.x_gev) + with warnings.catch_warnings(): + gev_params = _fitted_stationary_gev(self.fit_method, self.model_class, self.starting_year, x)[1] + return gev_params.quantile(self.quantile_level) + -def _compute_return_level(t): - fit_method, model_class, starting_year, x, quantile_level = t - gev_params = _fitted_stationary_gev(fit_method, model_class, starting_year, x)[1] - return gev_params.quantile(quantile_level) diff --git a/extreme_fit/model/result_from_model_fit/result_from_extremes/abstract_extract_eurocode_return_level.py b/extreme_fit/model/result_from_model_fit/result_from_extremes/abstract_extract_eurocode_return_level.py index db45b26a..ac2c6d9d 100644 --- a/extreme_fit/model/result_from_model_fit/result_from_extremes/abstract_extract_eurocode_return_level.py +++ b/extreme_fit/model/result_from_model_fit/result_from_extremes/abstract_extract_eurocode_return_level.py @@ -16,7 +16,7 @@ from root_utils import classproperty class AbstractExtractEurocodeReturnLevel(object): ALPHA_CONFIDENCE_INTERVAL_UNCERTAINTY = 0.05 - NB_BOOTSTRAP = 100 + NB_BOOTSTRAP = 1000 @classproperty def bottom_and_upper_quantile(cls): diff --git a/projects/altitude_spatial_model/altitudes_fit/main_altitudes_studies.py b/projects/altitude_spatial_model/altitudes_fit/main_altitudes_studies.py index ddb6cc3a..2d6d27af 100644 --- a/projects/altitude_spatial_model/altitudes_fit/main_altitudes_studies.py +++ b/projects/altitude_spatial_model/altitudes_fit/main_altitudes_studies.py @@ -1,3 +1,4 @@ +import datetime import time from typing import List @@ -30,18 +31,22 @@ def main(): set_seed_for_test() - fast = False + fast = True if fast is None: - massif_names = ['Vanoise', 'Haute-Maurienne', 'Vercors'][:1] - altitudes_list = altitudes_for_groups[:] + massif_names = ['Vanoise', 'Haute-Maurienne', 'Vercors'] + altitudes_list = altitudes_for_groups[1:2] elif fast: - massif_names = ['Vanoise', 'Haute-Maurienne', 'Vercors'][:] - altitudes_list = altitudes_for_groups[:2] + massif_names = ['Vanoise', 'Haute-Maurienne', 'Vercors'][:1] + altitudes_list = altitudes_for_groups[3:] else: massif_names = None altitudes_list = altitudes_for_groups[:] + start = time.time() main_loop(altitudes_list, massif_names, seasons, study_classes) + end = time.time() + duration = str(datetime.timedelta(seconds=end - start)) + print('Total duration', duration) def main_loop(altitudes_list, massif_names, seasons, study_classes): @@ -61,12 +66,11 @@ def main_loop(altitudes_list, massif_names, seasons, study_classes): def plot_visualizers(massif_names, visualizer_list): # plot_histogram_all_trends_against_altitudes(massif_names, visualizer_list) - plot_shoe_plot_ratio_interval_size_against_altitude(massif_names, visualizer_list) + # plot_shoe_plot_ratio_interval_size_against_altitude(massif_names, visualizer_list) # for relative in [True, False]: # plot_shoe_plot_changes_against_altitude(massif_names, visualizer_list, relative=relative) - # plot_shoe_plot_changes_against_altitude_for_maxima_and_total(massif_names, visualizer_list, relative=relative) - plot_coherence_curves(massif_names, visualizer_list) - # plot_coherence_curves(['Vanoise'], visualizer_list) + # plot_coherence_curves(massif_names, visualizer_list) + plot_coherence_curves(['Vanoise'], visualizer_list) def plot_visualizer(massif_names, visualizer): diff --git a/projects/altitude_spatial_model/altitudes_fit/one_fold_analysis/altitudes_studies_visualizer_for_non_stationary_models.py b/projects/altitude_spatial_model/altitudes_fit/one_fold_analysis/altitudes_studies_visualizer_for_non_stationary_models.py index d5ba71cc..d6fa176c 100644 --- a/projects/altitude_spatial_model/altitudes_fit/one_fold_analysis/altitudes_studies_visualizer_for_non_stationary_models.py +++ b/projects/altitude_spatial_model/altitudes_fit/one_fold_analysis/altitudes_studies_visualizer_for_non_stationary_models.py @@ -506,22 +506,29 @@ class AltitudesStudiesVisualizerForNonStationaryModels(StudyVisualizer): def plot_qqplots(self): for massif_name, one_fold_fit in self.massif_name_to_one_fold_fit.items(): ax = plt.gca() - standard_gumbel_quantiles = one_fold_fit.standard_gumbel_quantiles() - unconstrained_empirical_quantiles = one_fold_fit.best_estimator.sorted_empirical_standard_gumbel_quantiles() - all_quantiles = standard_gumbel_quantiles + unconstrained_empirical_quantiles - epsilon = 0.1 - ax_lim = [min(all_quantiles) - epsilon, max(all_quantiles) + epsilon] - model_name = self.massif_name_to_best_name[massif_name] altitudes = self.massif_name_to_massif_altitudes[massif_name] massif_name_corrected = massif_name.replace('_', ' ') label = '{} for altitudes {}'.format(massif_name_corrected, ' & '.join([str(a) + 'm' for a in altitudes])) + + + + all_quantiles = [] + + standard_gumbel_quantiles = one_fold_fit.standard_gumbel_quantiles() + + + unconstrained_empirical_quantiles = one_fold_fit.best_estimator.sorted_empirical_standard_gumbel_quantiles() + all_quantiles = standard_gumbel_quantiles + unconstrained_empirical_quantiles ax.plot(standard_gumbel_quantiles, unconstrained_empirical_quantiles, linestyle='None', label=label + '\n(selected model is ${}$)'.format(model_name), marker='o') size_label = 20 ax.set_xlabel("Theoretical quantile", fontsize=size_label) ax.set_ylabel("Empirical quantile", fontsize=size_label) + + epsilon = 0.1 + ax_lim = [min(all_quantiles) - epsilon, max(all_quantiles) + epsilon] ax.set_xlim(ax_lim) ax.set_ylim(ax_lim) diff --git a/projects/altitude_spatial_model/altitudes_fit/one_fold_analysis/one_fold_fit.py b/projects/altitude_spatial_model/altitudes_fit/one_fold_analysis/one_fold_fit.py index 82a1d738..95c0dec7 100644 --- a/projects/altitude_spatial_model/altitudes_fit/one_fold_analysis/one_fold_fit.py +++ b/projects/altitude_spatial_model/altitudes_fit/one_fold_analysis/one_fold_fit.py @@ -1,3 +1,7 @@ +import datetime +import math +import time +from itertools import chain from multiprocessing import Pool import numpy.testing as npt @@ -28,8 +32,8 @@ from extreme_fit.model.result_from_model_fit.result_from_extremes.confidence_int from extreme_fit.model.result_from_model_fit.result_from_extremes.eurocode_return_level_uncertainties import \ EurocodeConfidenceIntervalFromExtremes from projects.altitude_spatial_model.altitudes_fit.one_fold_analysis.altitude_group import AbstractAltitudeGroup, \ - DefaultAltitudeGroup -from root_utils import classproperty, NB_CORES + DefaultAltitudeGroup, altitudes_for_groups +from root_utils import classproperty, NB_CORES, batch from spatio_temporal_dataset.dataset.abstract_dataset import AbstractDataset from spatio_temporal_dataset.slicer.split import Split from spatio_temporal_dataset.spatio_temporal_observations.annual_maxima_observations import AnnualMaxima @@ -251,9 +255,10 @@ class OneFoldFit(object): def degree_freedom_chi2(self): return self.best_estimator.margin_model.nb_params - self.stationary_estimator.margin_model.nb_params - @property + @cached_property def is_significant(self) -> bool: if self.confidence_interval_based_on_delta_method: + print('old significance is used') stationary_model_classes = [StationaryAltitudinal, StationaryGumbelAltitudinal, AltitudinalShapeLinearTimeStationary] if any([isinstance(self.best_estimator.margin_model, c) @@ -264,11 +269,7 @@ class OneFoldFit(object): else: # Bootstrap based significance print('new significance is used') - sign_of_changes = [self.sign_of_change(f) for f in self.bootstrap_fitted_functions_from_fit] - if self.sign_of_change(self.best_function_from_fit) > 0: - return np.quantile(sign_of_changes, self.SIGNIFICANCE_LEVEL) > 0 - else: - return np.quantile(sign_of_changes, 1 - self.SIGNIFICANCE_LEVEL) < 0 + return self.cached_results_from_bootstrap[0] # @property # def goodness_of_fit_anderson_test(self): @@ -294,7 +295,8 @@ class OneFoldFit(object): fit_method=self.fit_method, temporal_covariate_for_fit=self.temporal_covariate_for_fit) # Compare sign of change - has_not_opposite_sign = self.sign_of_change(estimator.function_from_fit) * self.sign_of_change(new_estimator.function_from_fit) >= 0 + has_not_opposite_sign = self.sign_of_change(estimator.function_from_fit) * self.sign_of_change( + new_estimator.function_from_fit) >= 0 return has_not_opposite_sign def sensitivity_of_fit_test_last_years(self, estimator: LinearMarginEstimator): @@ -309,7 +311,8 @@ class OneFoldFit(object): fit_method=self.fit_method, temporal_covariate_for_fit=self.temporal_covariate_for_fit) # Compare sign of change - has_not_opposite_sign = self.sign_of_change(estimator.function_from_fit) * self.sign_of_change(new_estimator.function_from_fit) >= 0 + has_not_opposite_sign = self.sign_of_change(estimator.function_from_fit) * self.sign_of_change( + new_estimator.function_from_fit) >= 0 # if not has_not_opposite_sign: # print('Last years', self.massif_name, model_class, self.sign_of_change(estimator), self.sign_of_change(new_estimator)) return has_not_opposite_sign @@ -344,46 +347,129 @@ class OneFoldFit(object): ci_method=ConfidenceIntervalMethodFromExtremes.ci_mle, coordinate=coordinate) else: - print('nb of bootstrap for confidence interval=', AbstractExtractEurocodeReturnLevel.NB_BOOTSTRAP) - mean_estimate = self.get_return_level(self.best_function_from_fit, coordinate) - bootstrap_return_levels = [self.get_return_level(f, coordinate) for f in self.bootstrap_fitted_functions_from_fit] - print(mean_estimate, bootstrap_return_levels) - confidence_interval = tuple([np.quantile(bootstrap_return_levels, q) - for q in AbstractExtractEurocodeReturnLevel.bottom_and_upper_quantile]) - print(mean_estimate, confidence_interval) + key = (altitude, year) + mean_estimate = self.cached_results_from_bootstrap[1][key] + confidence_interval = self.cached_results_from_bootstrap[2][key] return EurocodeConfidenceIntervalFromExtremes(mean_estimate, confidence_interval) def get_return_level(self, function_from_fit, coordinate): return function_from_fit.get_params(coordinate).return_level(self.return_period) - @property - def bootstrap_data(self): - bootstrap = [] - for _ in range(AbstractExtractEurocodeReturnLevel.NB_BOOTSTRAP): - residuals = self.best_estimator.sorted_empirical_standard_gumbel_quantiles(split=Split.all) - resample_residuals = resample(residuals) - coordinate_values_to_maxima = self.best_estimator. \ - coordinate_values_to_maxima_from_standard_gumbel_quantiles(standard_gumbel_quantiles=resample_residuals) - bootstrap.append(coordinate_values_to_maxima) + @cached_property + def best_residuals(self): + return self.best_estimator.sorted_empirical_standard_gumbel_quantiles(split=Split.all) - return bootstrap + # @property + # def bootstrap_data(self): + # start = time.time() + # bootstrap = [] + # for _ in range(AbstractExtractEurocodeReturnLevel.NB_BOOTSTRAP): + # residuals = self.best_estimator.sorted_empirical_standard_gumbel_quantiles(split=Split.all) + # + # # yield coordinate_values_to_maxima + # bootstrap.append(coordinate_values_to_maxima) + # end1 = time.time() + # duration = str(datetime.timedelta(seconds=end1 - start)) + # print('bootstrap loader duration', duration) + # return bootstrap + + # def bootstrap_batch_data(self, batchsize=20): + # bootstrap_batch_data = [] + # batch = [] + # len_batch = 0 + # for bootstrap in self.bootstrap_data: + # batch.append(bootstrap) + # len_batch += 1 + # if len_batch == batchsize: + # yield batch + # batch = [] + # len_batch = 0 + # return bootstrap_batch_data @cached_property + def cached_results_from_bootstrap(self): + start = time.time() + bootstrap_fitted_functions = self.bootstrap_fitted_functions_from_fit + end1 = time.time() + duration = str(datetime.timedelta(seconds=end1 - start)) + print('Fit duration', duration) + + # First result - Compute the significance + sign_of_changes = [self.sign_of_change(f) for f in bootstrap_fitted_functions] + if self.sign_of_change(self.best_function_from_fit) > 0: + is_significant = np.quantile(sign_of_changes, self.SIGNIFICANCE_LEVEL) > 0 + else: + is_significant = np.quantile(sign_of_changes, 1 - self.SIGNIFICANCE_LEVEL) < 0 + + # Second result - Compute some dictionary for the return level + altitude_and_year_to_return_level_mean_estimate = {} + altitude_and_year_to_return_level_confidence_interval = {} + altitudes = altitudes_for_groups[self.altitude_group.group_id - 1] + years = [1959, 2019] + for year in years: + for altitude in altitudes: + key = (altitude, year) + coordinate = np.array([altitude, year]) + mean_estimate = self.get_return_level(self.best_function_from_fit, coordinate) + bootstrap_return_levels = [self.get_return_level(f, coordinate) for f in + bootstrap_fitted_functions] + confidence_interval = tuple([np.quantile(bootstrap_return_levels, q) + for q in AbstractExtractEurocodeReturnLevel.bottom_and_upper_quantile]) + altitude_and_year_to_return_level_mean_estimate[key] = mean_estimate + altitude_and_year_to_return_level_confidence_interval[key] = confidence_interval + + return is_significant, altitude_and_year_to_return_level_mean_estimate, altitude_and_year_to_return_level_confidence_interval + + + @property def bootstrap_fitted_functions_from_fit(self): - multiprocess = True - if multiprocess: + print('nb of bootstrap for confidence interval=', AbstractExtractEurocodeReturnLevel.NB_BOOTSTRAP) + multiprocess = None + idxs = list(range(AbstractExtractEurocodeReturnLevel.NB_BOOTSTRAP)) + + if multiprocess is None: + print('multiprocessing batch') + start = time.time() + with Pool(NB_CORES) as p: + batchsize = math.ceil(AbstractExtractEurocodeReturnLevel.NB_BOOTSTRAP / NB_CORES) + list_functions_from_fit = p.map(self.fit_batch_bootstrap_estimator, batch(idxs, batchsize=batchsize)) + functions_from_fit = list(chain.from_iterable(list_functions_from_fit)) + + elif multiprocess: + print('multiprocessing') + start = time.time() with Pool(NB_CORES) as p: - functions_from_fit = p.map(self.fit_one_bootstrap_estimator, self.bootstrap_data) + functions_from_fit = p.map(self.fit_one_bootstrap_estimator, idxs) + else: + start = time.time() + functions_from_fit = [] - for coordinate_values_to_maxima in self.bootstrap_data: - estimator = self.fit_one_bootstrap_estimator(coordinate_values_to_maxima) + for idx in idxs: + estimator = self.fit_one_bootstrap_estimator(idx) functions_from_fit.append(estimator) + + end1 = time.time() + duration = str(datetime.timedelta(seconds=end1 - start)) + print('Multiprocessing duration', duration) return functions_from_fit - def fit_one_bootstrap_estimator(self, coordinate_values_to_maxima): + def fit_batch_bootstrap_estimator(self, idxs): + list_function_from_fit = [] + for idx in idxs: + list_function_from_fit.append(self.fit_one_bootstrap_estimator(idx)) + return list_function_from_fit + + def fit_one_bootstrap_estimator(self, idx): + resample_residuals = resample(self.best_residuals) + coordinate_values_to_maxima = self.best_estimator. \ + coordinate_values_to_maxima_from_standard_gumbel_quantiles(standard_gumbel_quantiles=resample_residuals) + coordinates = self.dataset.coordinates observations = AnnualMaxima.from_coordinates(coordinates, coordinate_values_to_maxima) dataset = AbstractDataset(observations=observations, coordinates=coordinates) model_class = type(self.best_margin_model) - return self.fitted_linear_margin_estimator(model_class, dataset).function_from_fit + + function_from_fit = self.fitted_linear_margin_estimator(model_class, dataset).function_from_fit + + return function_from_fit diff --git a/projects/altitude_spatial_model/altitudes_fit/one_fold_analysis/plot_total_aic.py b/projects/altitude_spatial_model/altitudes_fit/one_fold_analysis/plot_total_aic.py index 45120d24..2a50df32 100644 --- a/projects/altitude_spatial_model/altitudes_fit/one_fold_analysis/plot_total_aic.py +++ b/projects/altitude_spatial_model/altitudes_fit/one_fold_analysis/plot_total_aic.py @@ -16,7 +16,7 @@ from projects.exceeding_snow_loads.utils import dpi_paper1_figure def plots(visualizer: AltitudesStudiesVisualizerForNonStationaryModels): visualizer.plot_shape_map() visualizer.plot_moments() - # visualizer.plot_qqplots() + visualizer.plot_qqplots() # for std in [True, False]: # visualizer.studies.plot_mean_maxima_against_altitude(std=std) diff --git a/projects/altitude_spatial_model/altitudes_fit/utils_altitude_studies_visualizer.py b/projects/altitude_spatial_model/altitudes_fit/utils_altitude_studies_visualizer.py index 9ce6d382..ccfdc024 100644 --- a/projects/altitude_spatial_model/altitudes_fit/utils_altitude_studies_visualizer.py +++ b/projects/altitude_spatial_model/altitudes_fit/utils_altitude_studies_visualizer.py @@ -16,7 +16,8 @@ def load_visualizer_list(season, study_class, altitudes_list, massif_names, **kw massif_names=massif_names, show=False, temporal_covariate_for_fit=None, - confidence_interval_based_on_delta_method=False + confidence_interval_based_on_delta_method=False, + display_only_model_that_pass_anderson_test=False ) visualizer_list.append(visualizer) compute_and_assign_max_abs(visualizer_list) diff --git a/root_utils.py b/root_utils.py index 8f112131..0b362aa3 100644 --- a/root_utils.py +++ b/root_utils.py @@ -12,6 +12,12 @@ for c in [' ', ':', '-']: NB_CORES = 7 +def batch(iterable, batchsize=1): + l = len(iterable) + for ndx in range(0, l, batchsize): + yield iterable[ndx:min(ndx + batchsize, l)] + + def terminal_command(command_str): return subprocess.check_output(command_str.split()).decode("utf-8").split('\n') -- GitLab