Commit dddb4182 authored by Dumoulin Nicolas's avatar Dumoulin Nicolas
Browse files

fully reallocated scenarios are not evaluated twice and simulation stops if...

fully reallocated scenarios are not evaluated twice and simulation stops if all scenario are fully reallocated
parent 97523014
......@@ -20,7 +20,6 @@ class MCMC:
print('Please copy the template file "MCMC_config.sample.yml" and adjust to your settings and run again this program')
sys.exit(1)
self.mcmc_config = yaml.load(open(mcmc_config_filename,'r'))
self.write_data = self.mcmc_config.get('write_data', False)
self.patches_md5sum = md5sum(self.mcmc_config['patches'])
if 'rng_seed' in self.mcmc_config:
self.rng = np.random.RandomState(self.mcmc_config['rng_seed'])
......@@ -89,7 +88,7 @@ class MCMC:
def scenario_scores(patches, indicators):
return indicators.compute_indicators(patches)
def step(self, iter_nb, nb_particles, scenarios, write_data=False):
def step(self, iter_nb, nb_particles, scenarios):
'''
Sample new scenarios, evaluate their scores and retain only pareto front.
......@@ -100,7 +99,8 @@ class MCMC:
# Loop of sampling and scoring
for index, scenario in tqdm(scenarios.sample(nb_particles, self.rng).iterrows(), total=nb_particles):
scenario = scenarios.reallocate(index, self.rng, self.patches, self.targetPAT, self.mcmc_config['ratio_patches_to_modify'])
scenarios.append(iter_nb, scenario)
if not scenario is None:
scenarios.append(iter_nb, scenario)
# Computing pareto front
start_time = time.time()
pareto_mask = MCMC.is_pareto_efficient(scenarios.scores[['Resilience','Proximity','Productivity','Biodiversity','Social','TargetDelta']].values)
......@@ -126,34 +126,42 @@ class MCMC:
print()
# Retaining only optimal particules
scenarios.retain(pareto_mask)
if write_data:
print(' Writing modified patches of scenarios')
# Writing shapefiles
start_time = time.time()
shp_dir = self.outputdir + '/patches_iter_{0:03d}'.format(iter_nb)
os.makedirs(shp_dir)
for index,scenario in tqdm(scenarios.cultgeopat.iterrows(), total=len(scenarios.cultgeopat)):
concat = pd.concat([self.patches, scenario.to_frame("newcult")], axis=1)
concat = concat[concat['init_cult']!=concat['newcult']]
if len(concat)>0:
for c in ['newcult','cultgeopat','init_cult']:
patutils.decode_cultgeopat(concat, c)
concat.to_file(shp_dir+'/{:03d}_{}'.format(iter_nb, index), encoding='utf-8')
# scenario[scenario['cultmodified']].drop('cultmodified', axis=1).to_file(shp_dir+'/{0:03d}_{}'.format(iter_nb,index), encoding='utf-8')
elapsed_time = time.time() - start_time
print(' Shape files written in {}'.format(time.strftime("%M:%S", time.gmtime(elapsed_time))))
return scenarios
def write_data(self, iter_nb, scenarios):
print(' Writing modified patches of scenarios')
# Writing shapefiles
start_time = time.time()
shp_dir = self.outputdir + '/patches_iter_{0:03d}'.format(iter_nb)
os.makedirs(shp_dir)
for index,scenario in tqdm(scenarios.cultgeopat.iterrows(), total=len(scenarios.cultgeopat)):
concat = pd.concat([self.patches, scenario.to_frame("newcult")], axis=1)
concat = concat[concat['init_cult']!=concat['newcult']]
if len(concat)>0:
for c in ['newcult','cultgeopat','init_cult']:
patutils.decode_cultgeopat(concat, c)
concat.to_file(shp_dir+'/{:03d}_{}'.format(iter_nb, index), encoding='utf-8')
elapsed_time = time.time() - start_time
print(' Shape files written in {}'.format(time.strftime("%M:%S", time.gmtime(elapsed_time))))
def run(self):
# Initial sampling and evaluation
nb_particles = self.mcmc_config['initial_nb_particles']
# Start with initial scenario
scenarios = ScenariosStack(self.indicators, self.patches)
# Iteration
nb_iteration=0
for i in range(self.mcmc_config['max_iterations']):
print('Iteration #{}'.format(i+1))
nb_iteration=i+1
print('Iteration #{}'.format(nb_iteration))
# Write SHP only for the last iteration
scenarios = self.step(i, nb_particles, scenarios, self.write_data and i == self.mcmc_config['max_iterations']-1)
scenarios = self.step(nb_iteration, nb_particles, scenarios)
if scenarios.allFullReallocated():
print('All scenarios are fully reallocated. Simulation finished.')
break
if self.mcmc_config.get('write_data', False):
self.write_data(nb_iteration, scenarios)
return scenarios
if __name__ == '__main__':
......
......@@ -5,6 +5,30 @@ import pandas as pd
import itertools
from patutils import load_pat_patches
def reallocate(patches, rng, targetPAT, ratioNbPatches):
nbPatches = int(len(patches)*ratioNbPatches)
surfDelta = targetPAT - patches.groupby('cultgeopat')['SURF_PARC'].sum()
cult_to_decrease = surfDelta[surfDelta<0].sort_values(ascending=True).keys().tolist()
cult_to_increase = surfDelta[surfDelta>0].sort_values(ascending=False).keys().tolist()
targetReached = False
# Sampling the patches to reallocate
patches_to_decrease = patches[patches['cultgeopat'].isin(cult_to_decrease)]
samples = patches_to_decrease.sample(n=min(nbPatches,len(patches_to_decrease)), random_state=rng)#.reset_index(drop=True)
# Building the new culture reallocated
if len(cult_to_increase) > 0:
factors = surfDelta[cult_to_increase]
factors = (factors*len(samples)/factors.sum()).map(round) # normalize on nb samples
newCult = pd.Series(cult_to_increase).repeat(factors)
else:
# All cultural types have increased to objective, so not changing the patches
return None
if len(newCult) < len(samples): # may be due to factors rounding
newCult = newCult.append(newCult.sample(n=len(samples)-len(newCult), random_state=rng), ignore_index=True)
newCult = newCult.sample(frac=1, random_state=rng)[:len(samples)].reset_index(drop=True) # shuffle and cut extra elements
# Doing the reallocation
patches.loc[samples.index.values,'cultgeopat'] = newCult.values
return patches
class ScenariosStack:
def __init__(self, indicators, initial_patches):
......@@ -15,54 +39,40 @@ class ScenariosStack:
self.cultgeopat = self.cultgeopat.T
# Now we have the id_parcel as columns, and each row will represent a scenario
self.cultgeopat.rename(index={'cultgeopat':scenario_id}, inplace=True)
# self.cultmodified = pd.DataFrame([[False]*len(initial_patches)], columns=initial_patches.index.values, dtype=bool)
self.scores = pd.DataFrame([self.indicators.compute_indicators(initial_patches) + [0, False]], columns=self.indicators.indicators_names+['iteration','pareto'], dtype='float64')
self.scores = pd.DataFrame([self.indicators.compute_indicators(initial_patches) + [0, False, False]], columns=self.indicators.indicators_names+['iteration','pareto','full_reallocated'], dtype='float64')
for c in ['pareto','full_reallocated']:
self.scores[c] = self.scores[c].astype('bool')
self.scores['iteration'] = self.scores['iteration'].astype('int')
def append(self, nb_iter, patches_cult):
id = next(self._counter)
self.cultgeopat.loc[id] = patches_cult['cultgeopat'].values
# self.cultmodified[id] = False
self.scores.at[id] = self.indicators.compute_indicators(patches_cult) + [nb_iter, False]
self.scores.at[id] = self.indicators.compute_indicators(patches_cult) + [nb_iter, False, False]
self.scores['iteration'] = self.scores['iteration'].astype('int')
def retain(self, mask):
self.cultgeopat = self.cultgeopat[mask]
# self.cultmodified = self.cultmodified[mask]
self.scores = self.scores[mask]
def drop_scenario(self, id_scenario):
pass
def setFullReallocated(self, id_scen, full_reallocated):
self.scores.loc[id_scen,'full_reallocated'] = full_reallocated
def isFullReallocated(self, id_scen):
return self.scores.loc[id_scen,'full_reallocated']
def allFullReallocated(self):
return self.scores[self.scores['full_reallocated']==False].empty
def sample(self, nb, rng):
return self.cultgeopat.sample(nb, replace=True, random_state=rng)
return self.cultgeopat[self.scores['full_reallocated']==False].sample(nb, replace=True, random_state=rng)
def consolidate_scenario(self, id, patches, columns):
selected_data = patches[columns]
return pd.concat([self.cultgeopat.loc[id].to_frame("cultgeopat"), selected_data],axis=1)
def reallocate(self, scen_id, rng, initial_patches, targetPAT, ratioNbPatches):
patches = self.consolidate_scenario(scen_id, initial_patches, ['SURF_PARC', 'Bdv','init_cult', 'VALEUR_CAD'])
nbPatches = int(len(patches)*ratioNbPatches)
surfDelta = targetPAT - patches.groupby('cultgeopat')['SURF_PARC'].sum()
cult_to_decrease = surfDelta[surfDelta<0].sort_values(ascending=True).keys().tolist()
cult_to_increase = surfDelta[surfDelta>0].sort_values(ascending=False).keys().tolist()
targetReached = False
# Sampling the patches to reallocate
patches_to_decrease = patches[patches['cultgeopat'].isin(cult_to_decrease)]
samples = patches_to_decrease.sample(n=min(nbPatches,len(patches_to_decrease)), random_state=rng)#.reset_index(drop=True)
# Building the new culture reallocated
if len(cult_to_increase) > 0:
factors = surfDelta[cult_to_increase]
factors = (factors*len(samples)/factors.sum()).map(round) # normalize on nb samples
newCult = pd.Series(cult_to_increase).repeat(factors)
if self.isFullReallocated(scen_id):
return None
else:
# All cultural types have increased to objective
# So we used all cultures with more weight for highest delta
raise RuntimeError("Reallocation impossible: All cultural types have reached the target.")
if len(newCult) < len(samples): # may be due to factors rounding
newCult = newCult.append(newCult.sample(n=len(samples)-len(newCult), random_state=rng), ignore_index=True)
newCult = newCult.sample(frac=1, random_state=rng)[:len(samples)].reset_index(drop=True) # shuffle and cut extra elements
# Doing the reallocation
patches.loc[samples.index.values,'cultgeopat'] = newCult.values
return patches
patches = self.consolidate_scenario(scen_id, initial_patches, ['SURF_PARC', 'Bdv','init_cult', 'VALEUR_CAD'])
return reallocate(patches, rng, targetPAT, ratioNbPatches)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment