Commit a13e8f7f authored by Dumoulin Nicolas's avatar Dumoulin Nicolas
Browse files

fix in indicators: they are inversed for minimisation inside their methods instead of mcmc loop

indicator on delta with cultural surfaces target
various fixes
parent 8b387f86
......@@ -15,6 +15,7 @@ from social import Social
from tqdm import tqdm
from patutils import md5sum, load_pat_patches
import seaborn as sns
import matplotlib.pyplot as plt
import multiprocessing as mp
from functools import partial
import itertools
......@@ -70,6 +71,19 @@ class CulturalIndicator(Indicator):
def compute_indicator(self, patches):
return patches[patches['cultgeopat']==self.cultgeopat]['SURF_PARC'].sum()
class TargetDeltaIndicator(Indicator):
'''
This indicator computes the delta between the surfaces of cultural and the given target
'''
@overrides
def __init__(self, config=None, initial_patches=None, patches_md5sum=None, targetPAT=None):
self.targetPAT = targetPAT
@overrides
def compute_indicator(self, patches):
surfDelta = self.targetPAT - patches.groupby('cultgeopat')['SURF_PARC'].sum()
return surfDelta.abs().sum()
class Indicators:
def __init__(self, config, initial_patches, patches_md5sum, targetPAT):
self.indicators_names = ['Proximity', 'Resilience', 'Productivity', 'Biodiversity', 'Social']
......@@ -80,9 +94,11 @@ class Indicators:
for cultgeopat in targetPAT.index.tolist():
self._indicators[cultgeopat] = CulturalIndicator(cultgeopat)
self.indicators_names.append(cultgeopat)
self._indicators['TargetDelta'] = TargetDeltaIndicator(targetPAT=targetPAT)
self.indicators_names.append('TargetDelta')
def compute_indicators(self, patches):
return [1/self._indicators[ind].compute_indicator(patches) for ind in self.indicators_names]
return [self._indicators[ind].compute_indicator(patches) for ind in self.indicators_names]
def compute_indicators_pool(self, scenarios):
rows=[]
......@@ -202,14 +218,16 @@ class MCMC:
scores = scores.append(new_scores, sort=False)
scenarios = scenarios.append(pd.DataFrame(new_scenarios, index=new_scenarios_id), sort=False)
# Computing pareto front
pareto_mask = MCMC.is_pareto_efficient(scores[['Resilience','Proximity','Productivity','Biodiversity','Social']].values)
pareto_mask = MCMC.is_pareto_efficient(scores[['Resilience','Proximity','Productivity','Biodiversity','Social','TargetDelta']].values)
scores['pareto'] = pareto_mask
# Writing output data
scores.to_csv(self.outputdir+'/mcmc_iter_{}.csv'.format(iter_nb), index=True)
scores.to_csv(self.outputdir+'/mcmc_iter_{0:03d}.csv'.format(iter_nb), index=True)
try:
sns.pairplot(scores, vars=['Resilience','Proximity','Productivity','Biodiversity','Social'],
pairplot = sns.pairplot(scores, vars=['Resilience','Proximity','Productivity','Biodiversity','Social','TargetDelta'],
diag_kind="kde",hue="pareto"
).savefig(self.outputdir+'/mcmc_iter_{}.png'.format(iter_nb))
)
pairplot.savefig(self.outputdir+'/mcmc_iter_{0:03d}.png'.format(iter_nb))
plt.close(pairplot.fig)
except (np.linalg.linalg.LinAlgError, ValueError) as e:
# when data doesn't vary enough on a dimension, it is impossible to generate the pareto density
# and an error is triggered. Here, we ignore this error.
......@@ -218,11 +236,11 @@ class MCMC:
scores = scores[pareto_mask]
# Writing shapefiles
if write_shp:
shp_dir = self.outputdir + '/patches_iter_{}'.format(iter_nb)
shp_dir = self.outputdir + '/patches_iter_{0:03d}'.format(iter_nb)
os.makedirs(shp_dir)
for index,scenario in scenarios.iterrows():
scenario = scenario[0]
scenario[scenario['cultmodified']].drop('cultmodified', axis=1).to_file(shp_dir+'/{}_{}'.format(iter_nb,index), encoding='utf-8')
scenario[scenario['cultmodified']].drop('cultmodified', axis=1).to_file(shp_dir+'/{0:03d}_{}'.format(iter_nb,index), encoding='utf-8')
return [scenarios, scores]
def run(self, nb_processes=mp.cpu_count()):
......@@ -234,9 +252,9 @@ class MCMC:
scenarios, scores = self.step(0, nb_particles, scenarios_init)
# Iteration
for i in range(self.mcmc_config['max_iterations']):
print('Iteration #{}'.format(i))
print('Iteration #{}'.format(i+1))
# Write SHP only for the last iteration
scenarios, scores = self.step(i+1, nb_particles, scenarios, scores, i == self.mcmc_config['max_iterations']-1)
scenarios, scores = self.step(i+1, nb_particles, scenarios, scores, self.write_shp and i == self.mcmc_config['max_iterations']-1)
# TODO
# Storing variation of indicators
init_var = scores.std()
......
......@@ -89,7 +89,7 @@ class Biodiversity(Indicator):
meadowmask = patches["cultgeopat"]=='Prairies'
biodiversity = ((self.probabilities*meadowmask).T*meadowmask).sum()
# We multiply by 100000 to get an indices with a value easy to understand for the users
return biodiversity/self.final_result_ratio
return self.final_result_ratio/biodiversity
def save_probabilities(self, filename):
'''
......
......@@ -20,7 +20,7 @@ class Productivity(Indicator):
def compute_indicator(self, layer_scenario):
valeurs_cad = layer_scenario[layer_scenario['cultgeopat']=='Fruits et légumes']
valeurs_cad = valeurs_cad['VALEUR_CAD'] / valeurs_cad['SURF_PARC']
return valeurs_cad.sum() / len(valeurs_cad)
return len(valeurs_cad) / valeurs_cad.sum()
if __name__ == '__main__':
import geopandas as gpd
......
......@@ -37,7 +37,7 @@ class Proximite(Indicator):
# put 0 where target is reached and computing mean
result = result.where(result<1,0).sum() / result.where(result<1).count()
result = result.values[0]
return result
return 1/result
if __name__ == '__main__':
import pandas as pd
......
......@@ -46,7 +46,7 @@ class Resilience(Indicator):
# building pivot table on grid cell and cultgeopat
pivot = pd.pivot_table(intersection2,values=['SURF_PARC_x','surf_cell'],index=['id','cultgeopat'],aggfunc={'SURF_PARC_x':np.sum,'surf_cell':'first'})
pivot['res'] = pivot['SURF_PARC_x']/pivot['surf_cell']
return - (pivot['res'] * pivot['res'].apply(math.log2)).sum() / self.size
return - self.size / (pivot['res'] * pivot['res'].apply(math.log2)).sum()
if __name__ == '__main__':
from patutils import load_pat_patches
......
......@@ -87,7 +87,7 @@ class Social(Indicator):
cost = 0
patches_altered = scenario_patches[scenario_patches['cultgeopat'] != scenario_patches['init_cult']]
cost = pd.merge(patches_altered, self.patches_transition_cost,on=['ID_PARCEL','cultgeopat'], how='left')['cost'].sum()
return 1/(1+cost)
return cost
if __name__ == '__main__':
import time
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment