Commit 2925c8ff authored by Dumoulin Nicolas's avatar Dumoulin Nicolas
Browse files

NEW: new datastructure for storing scenarios

parent 76a0c21b
...@@ -5,106 +5,13 @@ import geopandas as gpd ...@@ -5,106 +5,13 @@ import geopandas as gpd
import os, sys, time, shutil import os, sys, time, shutil
import yaml import yaml
import numpy as np import numpy as np
from Indicator import Indicator from indicators import Indicators
from overrides import overrides
from proximite import Proximite as Proximity
from resilience_list import Resilience
from productivite import Productivity
from indice_biodiversite_2 import Biodiversity
from social import Social
from tqdm import tqdm from tqdm import tqdm
import patutils
from patutils import md5sum, load_pat_patches from patutils import md5sum, load_pat_patches
import seaborn as sns import seaborn as sns
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import multiprocessing as mp from scenarios import ScenariosStack
from functools import partial
import itertools
class Scenario:
_counter = itertools.count()
def __init__(self, patches):
self.patches = patches
self.id = next(Scenario._counter)
def load_shp(shpfilename):
'''
Return an instance of class Patches by loading a shapefile of initial patches.
'''
scenario = Scenario(load_pat_patches(shpfilename))
return scenario
def reallocate(self, rng, targetPAT, ratioNbPatches):
nbPatches = int(len(self.patches)*ratioNbPatches)
surfDelta = targetPAT - self.patches.groupby('cultgeopat')['SURF_PARC'].sum()
cult_to_decrease = surfDelta[surfDelta<0].sort_values(ascending=True).keys().tolist()
cult_to_increase = surfDelta[surfDelta>0].sort_values(ascending=False).keys().tolist()
# Sampling the patches to reallocate
if len(self.patches[self.patches['cultgeopat'].isin(cult_to_decrease)]) >= nbPatches:
samples = self.patches[self.patches['cultgeopat'].isin(cult_to_decrease)].sample(n=nbPatches, random_state=rng)#.reset_index(drop=True)
else:
# All cultural types have decreased to objective
samples = self.patches.sample(n=nbPatches, random_state=rng)
# Building the new culture reallocated
if len(cult_to_increase) > 0:
factors = surfDelta[cult_to_increase]
factors = (factors*len(samples)/factors.sum()).map(round) # normalize on nb samples
newCult = pd.Series(cult_to_increase).repeat(factors)
else:
# All cultural types have increased to objective
# So we used all cultures with more weight for highest delta
factors = -1/surfDelta
factors = (factors*len(samples)/factors.sum()).map(round) # normalize on nb samples
newCult = pd.Series(surfDelta.keys().tolist()).repeat(factors)
if len(newCult) < len(samples): # may be due to factors rounding
newCult = newCult.append(newCult.sample(n=len(samples)-len(newCult), random_state=rng), ignore_index=True)
newCult = newCult.sample(frac=1, random_state=rng)[:len(samples)].reset_index(drop=True) # shuffle and cut extra elements
# Doing the reallocation
self.patches.loc[samples.index.values,'cultgeopat'] = newCult.values
self.patches.loc[samples.index.values,'cultmodified'] = True
class CulturalIndicator(Indicator):
@overrides
def __init__(self, config, initial_patches=None, patches_md5sum=None, targetPAT=None):
self.cultgeopat = config
@overrides
def compute_indicator(self, patches):
return patches[patches['cultgeopat']==self.cultgeopat]['SURF_PARC'].sum()
class TargetDeltaIndicator(Indicator):
'''
This indicator computes the delta between the surfaces of cultural and the given target
'''
@overrides
def __init__(self, config=None, initial_patches=None, patches_md5sum=None, targetPAT=None):
self.targetPAT = targetPAT
@overrides
def compute_indicator(self, patches):
surfDelta = self.targetPAT - patches.groupby('cultgeopat')['SURF_PARC'].sum()
return surfDelta.abs().sum()
class Indicators:
def __init__(self, config, initial_patches, patches_md5sum, targetPAT):
self.indicators_names = ['Proximity', 'Resilience', 'Productivity', 'Biodiversity', 'Social']
self._indicators = {
indicator: eval(indicator)(config.get(indicator.lower()), initial_patches, patches_md5sum, targetPAT)
for indicator in self.indicators_names
}
for cultgeopat in targetPAT.index.tolist():
self._indicators[cultgeopat] = CulturalIndicator(cultgeopat)
self.indicators_names.append(cultgeopat)
self._indicators['TargetDelta'] = TargetDeltaIndicator(targetPAT=targetPAT)
self.indicators_names.append('TargetDelta')
def compute_indicators(self, patches):
return [self._indicators[ind].compute_indicator(patches) for ind in self.indicators_names]
def compute_indicators_pool(self, scenarios):
rows=[]
for patches in scenarios:
rows.append(self.compute_indicators(patches))
return pd.DataFrame(rows, columns=self.indicators_names)
class MCMC: class MCMC:
def __init__(self, mcmc_config_filename): def __init__(self, mcmc_config_filename):
...@@ -113,7 +20,6 @@ class MCMC: ...@@ -113,7 +20,6 @@ class MCMC:
print('Please copy the template file "MCMC_config.sample.yml" and adjust to your settings and run again this program') print('Please copy the template file "MCMC_config.sample.yml" and adjust to your settings and run again this program')
sys.exit(1) sys.exit(1)
self.mcmc_config = yaml.load(open(mcmc_config_filename,'r')) self.mcmc_config = yaml.load(open(mcmc_config_filename,'r'))
self.multiprocessing = self.mcmc_config.get('multiprocessing', False)
self.write_data = self.mcmc_config.get('write_data', False) self.write_data = self.mcmc_config.get('write_data', False)
self.patches_md5sum = md5sum(self.mcmc_config['patches']) self.patches_md5sum = md5sum(self.mcmc_config['patches'])
if 'rng_seed' in self.mcmc_config: if 'rng_seed' in self.mcmc_config:
...@@ -146,8 +52,8 @@ class MCMC: ...@@ -146,8 +52,8 @@ class MCMC:
yaml.dump(config_data, outfile, default_flow_style=False) yaml.dump(config_data, outfile, default_flow_style=False)
# finishing init # finishing init
self.patches = load_pat_patches(self.mcmc_config['patches']) self.patches = load_pat_patches(self.mcmc_config['patches'])
self.patches['cultmodified'] = False self.surfaces = self.patches['SURF_PARC']
self.target = pd.read_csv(self.mcmc_config['target'], sep=';',index_col=0) self.target = pd.read_csv(self.mcmc_config['target'], sep=';',index_col=0).rename(index=patutils.code_cultgeopat)
targetRatio = (self.target['2050']-self.target['2016'])/self.target['2016'] targetRatio = (self.target['2050']-self.target['2016'])/self.target['2016']
self.targetPAT = self.patches.groupby('cultgeopat')['SURF_PARC'].sum()*(1+targetRatio) self.targetPAT = self.patches.groupby('cultgeopat')['SURF_PARC'].sum()*(1+targetRatio)
self.indicators = Indicators(self.mcmc_config['indicators_config'], self.patches, self.patches_md5sum, self.targetPAT) self.indicators = Indicators(self.mcmc_config['indicators_config'], self.patches, self.patches_md5sum, self.targetPAT)
...@@ -183,55 +89,30 @@ class MCMC: ...@@ -183,55 +89,30 @@ class MCMC:
def scenario_scores(patches, indicators): def scenario_scores(patches, indicators):
return indicators.compute_indicators(patches) return indicators.compute_indicators(patches)
def step(self, iter_nb, nb_particles, scenarios, scores=None, write_data=False): def step(self, iter_nb, nb_particles, scenarios, write_data=False):
''' '''
Sample new scenarios, evaluate their scores and retain only pareto front. Sample new scenarios, evaluate their scores and retain only pareto front.
:param iter_nb: (int) number of this iteration, 0 for initial step, 1 for first iteration and so on. :param iter_nb: (int) number of this iteration.
:param nb_particles: number of new scenarios to sample :param nb_particles: number of new scenarios to sample
:param scenarios: (DataFrame) list of the scenarios used as base for sampling the new scenarios. :param scenarios: (ScenariosStack) list of the scenarios used as base for sampling the new scenarios.
:param scores: (DataFrame) list of scores for each scenario. May be None
if given scenario are only used for sampling and will not be considered for pareto front.
''' '''
new_scenarios = []
new_scenarios_id = []
new_scores = []
# Loop of sampling and scoring # Loop of sampling and scoring
for index, patches in tqdm(scenarios.sample(nb_particles, replace=True, random_state=self.rng).iterrows(), total=nb_particles): for index, scenario in tqdm(scenarios.sample(nb_particles, self.rng).iterrows(), total=nb_particles):
scenario = Scenario(patches[0].copy()) scenario = scenarios.reallocate(index, self.rng, self.patches, self.targetPAT, self.mcmc_config['ratio_patches_to_modify'])
scenario.reallocate(self.rng, self.targetPAT, self.mcmc_config['ratio_patches_to_modify']) # 3.8 ms scenarios.append(iter_nb, scenario)
new_scenarios.append(scenario.patches)
new_scenarios_id.append(scenario.id)
if not self.multiprocessing:
new_scores.append(self.indicators.compute_indicators(scenario.patches))
if self.multiprocessing:
new_scores = list(tqdm(self.pool.imap(partial(MCMC.scenario_scores, indicators=self.indicators), new_scenarios, chunksize=50), total=len(new_scenarios)))
# merging with precedent data
start_time = time.time()
new_scores = pd.DataFrame(new_scores, index=new_scenarios_id, columns=self.indicators.indicators_names)
new_scores['iteration'] = iter_nb
if scores is None:
scores = new_scores
scenarios = pd.DataFrame(new_scenarios, columns=['patches'], index=new_scenarios_id)
scenarios['iteration'] = iter_nb
else:
scores = scores.append(new_scores, sort=False)
new_scenarios = pd.DataFrame(new_scenarios, columns=['patches'], index=new_scenarios_id)
new_scenarios['iteration'] = iter_nb
scenarios = scenarios.append(new_scenarios, sort=False)
elapsed_time = time.time() - start_time
print('Data merged in {} - '.format(time.strftime("%M:%S", time.gmtime(elapsed_time))), end="", flush=True)
# Computing pareto front # Computing pareto front
start_time = time.time() start_time = time.time()
pareto_mask = MCMC.is_pareto_efficient(scores[['Resilience','Proximity','Productivity','Biodiversity','Social','TargetDelta']].values) pareto_mask = MCMC.is_pareto_efficient(scenarios.scores[['Resilience','Proximity','Productivity','Biodiversity','Social','TargetDelta']].values)
scores['pareto'] = pareto_mask scenarios.scores['pareto'] = pareto_mask
elapsed_time = time.time() - start_time elapsed_time = time.time() - start_time
print('Pareto front computed in {} - '.format(time.strftime("%M:%S", time.gmtime(elapsed_time))), end="", flush=True) print('Pareto front computed in {} - '.format(time.strftime("%M:%S", time.gmtime(elapsed_time))), end="", flush=True)
# Writing output data # Writing output data
start_time = time.time() start_time = time.time()
scores.to_csv(self.outputdir+'/mcmc_iter_{0:03d}.csv'.format(iter_nb), index=True) scenarios.scores.to_csv(self.outputdir+'/scores_iter_{0:03d}.csv'.format(iter_nb), index=True)
scenarios.cultgeopat.to_csv(self.outputdir+'/cult_iter_{0:03d}.csv'.format(iter_nb), index=True)
try: try:
pairplot = sns.pairplot(scores, vars=['Resilience','Proximity','Productivity','Biodiversity','Social','TargetDelta'], pairplot = sns.pairplot(scenarios.scores, vars=['Resilience','Proximity','Productivity','Biodiversity','Social','TargetDelta'],
diag_kind="kde",hue="pareto" diag_kind="kde",hue="pareto"
) )
pairplot.savefig(self.outputdir+'/mcmc_iter_{0:03d}.png'.format(iter_nb)) pairplot.savefig(self.outputdir+'/mcmc_iter_{0:03d}.png'.format(iter_nb))
...@@ -241,47 +122,39 @@ class MCMC: ...@@ -241,47 +122,39 @@ class MCMC:
# and an error is triggered. Here, we ignore this error. # and an error is triggered. Here, we ignore this error.
pass pass
elapsed_time = time.time() - start_time elapsed_time = time.time() - start_time
print('Scores written in {} - '.format(time.strftime("%M:%S", time.gmtime(elapsed_time))), end="", flush=True) print('Scores written in {} '.format(time.strftime("%M:%S", time.gmtime(elapsed_time))), end="", flush=True)
# Retaining only optimal particules # Retaining only optimal particules
scenarios = scenarios[pareto_mask] scenarios.retain(pareto_mask)
scores = scores[pareto_mask]
if write_data: if write_data:
# Writing shapefiles # Writing shapefiles
start_time = time.time() start_time = time.time()
shp_dir = self.outputdir + '/patches_iter_{0:03d}'.format(iter_nb) shp_dir = self.outputdir + '/patches_iter_{0:03d}'.format(iter_nb)
os.makedirs(shp_dir) os.makedirs(shp_dir)
for index,scenario in scenarios.iterrows(): for index,scenario in scenarios.cultgeopat.iterrows():
scenario = scenario[0] concat = pd.concat([self.patches, scenario.to_frame("newcult")], axis=1)
scenario[scenario['cultmodified']].drop('cultmodified', axis=1).to_file(shp_dir+'/{0:03d}_{}'.format(iter_nb,index), encoding='utf-8') concat = concat[concat['init_cult']!=concat['newcult']]
if len(concat)>0:
for c in ['newcult','cultgeopat','init_cult']:
patutils.decode_cultgeopat(concat, c)
concat.to_file(shp_dir+'/{:03d}_{}'.format(iter_nb, index), encoding='utf-8')
# scenario[scenario['cultmodified']].drop('cultmodified', axis=1).to_file(shp_dir+'/{0:03d}_{}'.format(iter_nb,index), encoding='utf-8')
elapsed_time = time.time() - start_time elapsed_time = time.time() - start_time
print(' - Shape files written in {}'.format(time.strftime("%M:%S", time.gmtime(elapsed_time))), end="", flush=True) print(' - Shape files written in {}'.format(time.strftime("%M:%S", time.gmtime(elapsed_time))), end="", flush=True)
print() print()
return [scenarios, scores] return scenarios
def run(self, nb_processes=mp.cpu_count()): def run(self):
if self.multiprocessing:
self.pool = mp.Pool(processes=nb_processes)
# Initial sampling and evaluation # Initial sampling and evaluation
nb_particles = self.mcmc_config['initial_nb_particles'] nb_particles = self.mcmc_config['initial_nb_particles']
scenarios_init = pd.DataFrame([self.patches]) # Start with initial scenario
scenarios, scores = self.step(0, nb_particles, scenarios_init) scenarios = ScenariosStack(self.indicators, self.patches)
# Iteration # Iteration
for i in range(self.mcmc_config['max_iterations']): for i in range(self.mcmc_config['max_iterations']):
print('Iteration #{}'.format(i+1)) print('Iteration #{}'.format(i+1))
# Write SHP only for the last iteration # Write SHP only for the last iteration
scenarios, scores = self.step(i+1, nb_particles, scenarios, scores, self.write_data and i == self.mcmc_config['max_iterations']-1) scenarios = self.step(i, nb_particles, scenarios, self.write_data and i == self.mcmc_config['max_iterations']-1)
# TODO return scenarios
# Storing variation of indicators
init_var = scores.std()
# sequential optimization loop
if self.multiprocessing:
self.pool.close()
return [scenarios,scores]
if __name__ == '__main__': if __name__ == '__main__':
mcmc = MCMC('MCMC_config.yml') mcmc = MCMC('MCMC_config.yml')
# scenario = Scenario(mcmc.patches.copy()) scenario = mcmc.run()
# scenario.reallocate(mcmc.rng, mcmc.targetPAT, mcmc.mcmc_config['ratio_patches_to_modify'])
scenario,scores = mcmc.run()
# print(mcmc.indicators.biodiversity(mcmc.patches))
# print(mcmc.indicators.proximity(mcmc.patches))
#!/usr/bin/python
# -*- coding: utf-8 -*-
from overrides import overrides
from Indicator import Indicator
from proximite import Proximite as Proximity
from resilience_list import Resilience
from productivite import Productivity
from indice_biodiversite_2 import Biodiversity
from social import Social
import patutils
class CulturalIndicator(Indicator):
@overrides
def __init__(self, config, initial_patches=None, patches_md5sum=None, targetPAT=None):
self.cultgeopat = patutils.code_cultgeopat[config]
@overrides
def compute_indicator(self, patches):
return patches[patches['cultgeopat']==self.cultgeopat]['SURF_PARC'].sum()
class TargetDeltaIndicator(Indicator):
'''
This indicator computes the delta between the surfaces of cultural and the given target
'''
@overrides
def __init__(self, config=None, initial_patches=None, patches_md5sum=None, targetPAT=None):
self.targetPAT = targetPAT
@overrides
def compute_indicator(self, patches):
surfDelta = self.targetPAT - patches.groupby('cultgeopat')['SURF_PARC'].sum()
return surfDelta.abs().sum()
class Indicators:
def __init__(self, config, initial_patches, patches_md5sum, targetPAT):
self.indicators_names = ['Proximity', 'Resilience', 'Productivity', 'Biodiversity', 'Social']
self._indicators = {
indicator: eval(indicator)(config.get(indicator.lower()), initial_patches, patches_md5sum, targetPAT)
for indicator in self.indicators_names
}
for cultgeopat in sorted(patutils.code_cultgeopat.keys()):
self._indicators[cultgeopat] = CulturalIndicator(cultgeopat)
self.indicators_names.append(cultgeopat)
self._indicators['TargetDelta'] = TargetDeltaIndicator(targetPAT=targetPAT)
self.indicators_names.append('TargetDelta')
def compute_indicators(self, patches):
return [self._indicators[ind].compute_indicator(patches) for ind in self.indicators_names]
...@@ -8,6 +8,7 @@ import os ...@@ -8,6 +8,7 @@ import os
from tqdm import tqdm from tqdm import tqdm
from Indicator import Indicator from Indicator import Indicator
from overrides import overrides from overrides import overrides
import patutils
class Biodiversity(Indicator): class Biodiversity(Indicator):
''' '''
...@@ -86,7 +87,7 @@ class Biodiversity(Indicator): ...@@ -86,7 +87,7 @@ class Biodiversity(Indicator):
:param patches: List of PAT's patches and their data :param patches: List of PAT's patches and their data
''' '''
meadowmask = patches["cultgeopat"]=='Prairies' meadowmask = patches["cultgeopat"]==patutils.code_cultgeopat['Prairies']
biodiversity = ((self.probabilities*meadowmask).T*meadowmask).sum() biodiversity = ((self.probabilities*meadowmask).T*meadowmask).sum()
# We multiply by 100000 to get an indices with a value easy to understand for the users # We multiply by 100000 to get an indices with a value easy to understand for the users
return self.final_result_ratio/biodiversity return self.final_result_ratio/biodiversity
...@@ -122,6 +123,6 @@ if __name__ == '__main__': ...@@ -122,6 +123,6 @@ if __name__ == '__main__':
from patutils import load_pat_patches, md5sum from patutils import load_pat_patches, md5sum
patches_filename = "../output/PAT_patches/PAT_patches.shp" patches_filename = "../output/PAT_patches/PAT_patches.shp"
patches = load_pat_patches(patches_filename) patches = load_pat_patches(patches_filename)
matrix_filename='../output/matrix_biodiversity_test.npz' matrix_filename='../output/matrix_biodiversity.npz'
biodiv = Biodiversity({'dmax': 1000, 'epsilon': 0.001, 'matrixfilename': matrix_filename}, patches, md5sum(patches_filename)) biodiv = Biodiversity({'dmax': 1000, 'epsilon': 0.001, 'matrixfilename': matrix_filename}, patches, md5sum(patches_filename))
print(biodiv.compute_indicator(patches)) print(biodiv.compute_indicator(patches))
...@@ -4,15 +4,36 @@ ...@@ -4,15 +4,36 @@
import hashlib import hashlib
import geopandas as gpd import geopandas as gpd
def load_pat_patches(filename): def load_pat_patches(filename, encode_codecult=True):
''' '''
Load shapefile containing cultural patches and applying initial filters. Load shapefile containing cultural patches and applying initial filters.
''' '''
patches = gpd.GeoDataFrame.from_file(filename, encoding='utf-8') patches = gpd.GeoDataFrame.from_file(filename, encoding='utf-8')
for k in ['ID_PARCEL', 'INSEE_COM', 'CODE_EPCI', 'POPULATION','id_ilot','id_expl']:
patches[k] = patches[k].astype('int32')
patches.set_index('ID_PARCEL', inplace=True)
patches = patches[patches['cultgeopat']!='Non Considérée'] patches = patches[patches['cultgeopat']!='Non Considérée']
patches['init_cult'] = patches['cultgeopat'] if encode_codecult:
encode_cultgeopat(patches)
return patches return patches
code_cultgeopat = {'Cultures industrielles': 0,
'Céréales': 1,
'Fourrages': 2,
'Fruits et légumes': 3,
'Oléagineux': 4,
'Prairies': 5,
'Protéagineux': 6}
reverse_code_cultgeopat = {v: k for k, v in code_cultgeopat.items()}
def encode_cultgeopat(patches):
patches['cultgeopat'].replace(code_cultgeopat, inplace=True)
patches['cultgeopat'] = patches['cultgeopat'].astype('int8')
patches['init_cult'] = patches['cultgeopat']
def decode_cultgeopat(patches, column):
patches[column].replace(reverse_code_cultgeopat, inplace=True)
def md5sum(filename): def md5sum(filename):
hash_md5 = hashlib.md5() hash_md5 = hashlib.md5()
with open(filename, "rb") as f: with open(filename, "rb") as f:
......
...@@ -4,6 +4,7 @@ import geopandas as gpd ...@@ -4,6 +4,7 @@ import geopandas as gpd
import pandas as pd import pandas as pd
from Indicator import Indicator from Indicator import Indicator
from overrides import overrides from overrides import overrides
import patutils
class Productivity(Indicator): class Productivity(Indicator):
''' '''
...@@ -18,7 +19,7 @@ class Productivity(Indicator): ...@@ -18,7 +19,7 @@ class Productivity(Indicator):
@overrides @overrides
def compute_indicator(self, layer_scenario): def compute_indicator(self, layer_scenario):
valeurs_cad = layer_scenario[layer_scenario['cultgeopat']=='Fruits et légumes'] valeurs_cad = layer_scenario[layer_scenario['cultgeopat']==patutils.code_cultgeopat['Fruits et légumes']]
valeurs_cad = valeurs_cad['VALEUR_CAD'] / valeurs_cad['SURF_PARC'] valeurs_cad = valeurs_cad['VALEUR_CAD'] / valeurs_cad['SURF_PARC']
return len(valeurs_cad) / valeurs_cad.sum() return len(valeurs_cad) / valeurs_cad.sum()
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from Indicator import Indicator from Indicator import Indicator
from overrides import overrides from overrides import overrides
import patutils
class Proximite(Indicator): class Proximite(Indicator):
''' '''
...@@ -18,7 +19,7 @@ class Proximite(Indicator): ...@@ -18,7 +19,7 @@ class Proximite(Indicator):
popByBdv = initial_patches.groupby('Bdv').apply(lambda x:x.groupby('INSEE_COM')['POPULATION'].first().sum()) popByBdv = initial_patches.groupby('Bdv').apply(lambda x:x.groupby('INSEE_COM')['POPULATION'].first().sum())
# OK but slower # OK but slower
# popByBdv = patches.groupby(['Bdv','INSEE_COM']).first().groupby('Bdv')['POPULATION'].sum() # popByBdv = patches.groupby(['Bdv','INSEE_COM']).first().groupby('Bdv')['POPULATION'].sum()
self.targetSurfByBdv = targetPAT['Fruits et légumes'] * popByBdv/Population_PAT self.targetSurfByBdv = targetPAT[patutils.code_cultgeopat['Fruits et légumes']] * popByBdv/Population_PAT
@overrides @overrides
def compute_indicator(self, patches): def compute_indicator(self, patches):
...@@ -28,7 +29,7 @@ class Proximite(Indicator): ...@@ -28,7 +29,7 @@ class Proximite(Indicator):
:param affichage: True if we want the display in the console, False is the other case :param affichage: True if we want the display in the console, False is the other case
''' '''
# get area of "fruits et légumes" in the scenario # get area of "fruits et légumes" in the scenario
flSurfByBdv = patches[patches['cultgeopat']=='Fruits et légumes'].groupby('Bdv').agg({'SURF_PARC':sum}) flSurfByBdv = patches[patches['cultgeopat']==patutils.code_cultgeopat['Fruits et légumes']].groupby('Bdv').agg({'SURF_PARC':sum})
result = flSurfByBdv.div(self.targetSurfByBdv,axis=0).fillna(0) result = flSurfByBdv.div(self.targetSurfByBdv,axis=0).fillna(0)
if result.where(result<1).count().sum() == 0: if result.where(result<1).count().sum() == 0:
# All Bdv are fullfilled # All Bdv are fullfilled
...@@ -44,7 +45,7 @@ if __name__ == '__main__': ...@@ -44,7 +45,7 @@ if __name__ == '__main__':
from patutils import load_pat_patches from patutils import load_pat_patches
patches_filename = "../output/PAT_patches/PAT_patches.shp" patches_filename = "../output/PAT_patches/PAT_patches.shp"
patches = load_pat_patches(patches_filename) patches = load_pat_patches(patches_filename)
target = pd.read_csv('../resources/targetPAT.csv', sep=';',index_col=0) target = pd.read_csv('../resources/targetPAT.csv', sep=';',index_col=0).rename(index=patutils.code_cultgeopat)
targetRatio = (target['2050']-target['2016'])/target['2016'] targetRatio = (target['2050']-target['2016'])/target['2016']
targetPAT = patches.groupby('cultgeopat')['SURF_PARC'].sum()*(1+targetRatio) targetPAT = patches.groupby('cultgeopat')['SURF_PARC'].sum()*(1+targetRatio)
prox = Proximite(None, patches, None, targetPAT) prox = Proximite(None, patches, None, targetPAT)
......
...@@ -19,8 +19,9 @@ class Resilience(Indicator): ...@@ -19,8 +19,9 @@ class Resilience(Indicator):
# caching intersection between patches and grid # caching intersection between patches and grid
self.intersection = gpd.sjoin( self.intersection = gpd.sjoin(
self.grid[['id','geometry']], self.grid[['id','geometry']],
initial_patches[['geometry','SURF_PARC','ID_PARCEL']], initial_patches[['geometry','SURF_PARC']].copy(),
how='inner', op='intersects') how='inner', op='intersects')
self.intersection.rename(columns={'index_right':'ID_PARCEL'}, inplace=True)
# caching surface of patches in each cell # caching surface of patches in each cell
self.intersection['surf_cell'] = self.intersection[['id','SURF_PARC']].groupby('id').transform(sum) self.intersection['surf_cell'] = self.intersection[['id','SURF_PARC']].groupby('id').transform(sum)
self.size = len(self.intersection.groupby('id')) self.size = len(self.intersection.groupby('id'))
...@@ -42,14 +43,14 @@ class Resilience(Indicator): ...@@ -42,14 +43,14 @@ class Resilience(Indicator):
:param patches: The scenario to analyse as a list :param patches: The scenario to analyse as a list
''' '''
# merging given patches with table of intersection patches/grid # merging given patches with table of intersection patches/grid
intersection2 = pd.merge(self.intersection, patches[['ID_PARCEL','cultgeopat','SURF_PARC']],on='ID_PARCEL',how='left') intersection2 = pd.merge(self.intersection, patches[['cultgeopat','SURF_PARC']],left_on='ID_PARCEL', right_index=True,how='left')
# building pivot table on grid cell and cultgeopat # building pivot table on grid cell and cultgeopat
pivot = pd.pivot_table(intersection2,values=['SURF_PARC_x','surf_cell'],index=['id','cultgeopat'],aggfunc={'SURF_PARC_x':np.sum,'surf_cell':'first'}) pivot = pd.pivot_table(intersection2,values=['SURF_PARC_x','surf_cell'],index=['id','cultgeopat'],aggfunc={'SURF_PARC_x':np.sum,'surf_cell':'first'})
pivot['res'] = pivot['SURF_PARC_x']/pivot['surf_cell'] pivot['res'] = pivot['SURF_PARC_x']/pivot['surf_cell']
return - self.size / (pivot['res'] * pivot['res'].apply(math.log2)).sum() return - self.size / (pivot['res'] * pivot['res'].apply(math.log2)).sum()
if __name__ == '__main__': if __name__ == '__main__':
from patutils import load_pat_patches from patutils import load_pat_patches, code_cultgeopat
patches_filename = "../output/PAT_patches/PAT_patches.shp" patches_filename = "../output/PAT_patches/PAT_patches.shp"
patches = load_pat_patches(patches_filename) patches = load_pat_patches(patches_filename)
import time import time
...@@ -58,3 +59,7 @@ if __name__ == '__main__': ...@@ -58,3 +59,7 @@ if __name__ == '__main__':
print(res.compute_indicator(patches)) print(res.compute_indicator(patches))
elapsed = time.time()-start elapsed = time.time()-start
print(elapsed) print(elapsed)
cult_col_index = patches.columns.get_loc('cultgeopat')
for i in range(5000):
patches.iat[i,cult_col_index]=code_cultgeopat['Céréales']
print(res.compute_indicator(patches))
#!/usr/bin/python
# -*- coding: utf-8 -*-