diff --git a/src/Checker/Adists.py b/src/Checker/Adists.py index f47b441b71072379ba51c74bb397951f659fd645..a4315db4c8dc44e1ff4a90ab299a61234d57d6b1 100644 --- a/src/Checker/Adists.py +++ b/src/Checker/Adists.py @@ -29,6 +29,7 @@ _translate = QCoreApplication.translate logger = logging.getLogger() + class AdistsOutputRKChecker(AbstractModelChecker): def __init__(self): super(AdistsOutputRKChecker, self).__init__() diff --git a/src/Model/River.py b/src/Model/River.py index 0b1d024812fe9945d6a5332c7947f3b58ec3d1ca..db15b913af496425d91b7504fe97b0ef9c5f688b 100644 --- a/src/Model/River.py +++ b/src/Model/River.py @@ -521,7 +521,7 @@ Last export at: @date.""" return self._Pollutants @property - def initial_conditions_adists(self): + def ic_adists(self): return self._InitialConditionsAdisTS @property diff --git a/src/Solver/AdisTS.py b/src/Solver/AdisTS.py index 3d4ad61c3a81774e10fe7c210bc49b4046d2d312..a468a1f373a4f8384982877a40b1353862c7b2bc 100644 --- a/src/Solver/AdisTS.py +++ b/src/Solver/AdisTS.py @@ -16,9 +16,9 @@ # -*- coding: utf-8 -*- -import os, glob +import os +import glob import logging -from http.cookiejar import reach import numpy as np @@ -44,6 +44,7 @@ from itertools import chain logger = logging.getLogger() + def adists_file_open(filepath, mode): f = open(filepath, mode) @@ -60,6 +61,7 @@ def adists_file_open(filepath, mode): return f + class AdisTS(CommandLineSolver): _type = "adists" @@ -137,7 +139,9 @@ class AdisTS(CommandLineSolver): self._export_REP_additional_lines(study, f) - path_mage_net = os.path.join(os.path.abspath(os.path.join(repertory, os.pardir)), f"{mage_rep}/net") + path_mage_net = os.path.join(os.path.abspath( + os.path.join(repertory, os.pardir) + ), f"{mage_rep}/net") path_adists_net = os.path.join(repertory, "net") if os.path.exists(path_mage_net): @@ -194,7 +198,7 @@ class AdisTS(CommandLineSolver): _nodes_views = set() def get_reach_name(self, reach): - return f"Reach_{reach.id:>3}".replace(" ", "0") + return f"Reach_{reach.id:>3}".replace(" ", "0") def get_node_name(self, node): """Generate a 3 char name for node @@ -207,10 +211,10 @@ class AdisTS(CommandLineSolver): """ n = node.id - ##print("node name id: ", n) + # print("node name id: ", n) - ##if n in self._nodes_names: - ##return self._nodes_names[n] + # if n in self._nodes_names: + # return self._nodes_names[n] name = "" @@ -279,33 +283,49 @@ class AdisTSwc(AdisTS): for pollutant in pollutants: name = pollutant.name - with adists_file_open(os.path.join(repertory, f"{name}.POL"), "w+") as f: + with adists_file_open( + os.path.join(repertory, f"{name}.POL"), "w+" + ) as f: files.append(f"{name}.POL") f.write(f"name = {name}\n") - self._export_POL_Characteristics(study, pollutant._data, f, qlog) + self._export_POL_Characteristics( + study, pollutant._data, f, qlog + ) - POL_ICs = next(filter(lambda ic: ic.pollutant == pollutant.id,\ - study.river.initial_conditions_adists.Initial_Conditions_List)) + POL_ICs = next(filter( + lambda ic: ic.pollutant == pollutant.id, + study.river.ic_adists.Initial_Conditions_List + )) - if POL_ICs.concentration != None: + if POL_ICs.concentration is not None: f.write(f"file_ini = {name}.INI\n") - self._export_ICs_AdisTS(study, repertory, POL_ICs, qlog, name) + self._export_ICs_AdisTS( + study, repertory, POL_ICs, qlog, name + ) - POL_BCs = list(filter(lambda bc: bc.pollutant == pollutant.id,\ - study.river.boundary_conditions_adists.BCs_AdisTS_List)) + POL_BCs = list(filter( + lambda bc: bc.pollutant == pollutant.id, + study.river.boundary_conditions_adists.BCs_AdisTS_List + )) if len(POL_BCs) != 0: f.write(f"file_cl = {name}.CDT\n") - self._export_BCs_AdisTS(study, repertory, POL_BCs, qlog, name) + self._export_BCs_AdisTS( + study, repertory, POL_BCs, qlog, name + ) - POL_LAT_Cont = list(filter(lambda lc: lc.pollutant == pollutant.id,\ - study.river.lateral_contributions_adists.Lat_Cont_List)) + POL_LAT_Cont = list(filter( + lambda lc: lc.pollutant == pollutant.id, + study.river.lateral_contributions_adists.Lat_Cont_List + )) if len(POL_LAT_Cont) != 0: f.write(f"file_ald = {name}.ALD\n") f.write(f"*\n") - self._export_Lat_AdisTS(study, repertory, POL_LAT_Cont, qlog, name) + self._export_Lat_AdisTS( + study, repertory, POL_LAT_Cont, qlog, name + ) return files @@ -314,16 +334,22 @@ class AdisTSwc(AdisTS): if qlog is not None: qlog.put("Export POL LCs files") - with adists_file_open(os.path.join(repertory, f"{POL_name}.ALD"), "w+") as f: + with adists_file_open( + os.path.join(repertory, f"{POL_name}.ALD"), "w+" + ) as f: for LC in POL_LC: - reach = next(filter(lambda edge: edge.id == LC.edge, study.river.edges())) #.name + reach = next(filter( + lambda edge: edge.id == LC.edge, study.river.edges() + )) # .name reach_name = self.get_reach_name(self, reach) f.write(f"${reach_name} {LC.begin_rk} {LC.end_rk}\n") f.write(f"*temps |débit massique (kg/s)\n") f.write(f"*---------++++++++++\n") for LC_data in LC._data: - f.write(f"{timestamp_to_old_pamhyr_date_adists(int(LC_data[0]))} {LC_data[1]}\n") + tmp = timestamp_to_old_pamhyr_date_adists(int(LC_data[0])) + f.write(" ".join((f"{tmp}", + f"{LC_data[1]}\n"))) f.write(f"*\n") return True @@ -333,11 +359,17 @@ class AdisTSwc(AdisTS): if qlog is not None: qlog.put("Export POL BCs files") - with adists_file_open(os.path.join(repertory, f"{POL_name}.CDT"), "w+") as f: + with adists_file_open(os.path.join( + repertory, f"{POL_name}.CDT"), "w+" + ) as f: for BC in POL_BC: - node = next(filter(lambda x: x.id == BC.node, study.river._nodes)) #.name - print("node name in BC:", node, node.name, self.get_node_name(node)) - node_name = node.name #self.get_node_name(node) + node = next(filter( + lambda x: x.id == BC.node, study.river._nodes + )) # .name + print("node name in BC:", + node, node.name, + self.get_node_name(node)) + node_name = node.name # self.get_node_name(node) f.write(f"${node_name}\n") if BC.type == "Concentration": @@ -350,39 +382,59 @@ class AdisTSwc(AdisTS): f.write(f"*---------++++++++++\n") for BC_data in BC._data: - f.write(f"{timestamp_to_old_pamhyr_date_adists(int(BC_data[0]))} {BC_data[1]}\n") + tmp = timestamp_to_old_pamhyr_date_adists(int(BC_data[0])) + f.write(" ".join((f"{tmp}", + f"{BC_data[1]}\n"))) f.write(f"*\n") return True - def _export_ICs_AdisTS(self, study, repertory, POL_IC_default, qlog, POL_name): + def _export_ICs_AdisTS(self, study, repertory, + POL_IC_default, qlog, POL_name): if qlog is not None: qlog.put("Export POL ICs files") - with adists_file_open(os.path.join(repertory, f"{POL_name}.INI"), "w+") as f: + with adists_file_open(os.path.join( + repertory, f"{POL_name}.INI" + ), "w+") as f: f.write(f"*État initial pour le polluant {POL_name}\n") - f.write(f"DEFAULT = {POL_IC_default.concentration} {POL_IC_default.eg} "+ - f"{POL_IC_default.em} {POL_IC_default.ed}\n") + f.write(" ".join(("DEFAULT =", + f"{POL_IC_default.concentration}", + f"{POL_IC_default.eg}", + f"{POL_IC_default.em}", + f"{POL_IC_default.ed}\n"))) if len(POL_IC_default._data) != 0: - self._export_ICs_AdisTS_Spec(study, POL_IC_default._data, f, qlog) + self._export_ICs_AdisTS_Spec( + study, POL_IC_default._data, f, qlog + ) - def _export_ICs_AdisTS_Spec(self, study, pol_ics_spec_data, f, qlog, name="0"): + def _export_ICs_AdisTS_Spec(self, study, pol_ics_spec_data, + f, qlog, name="0"): for ic_spec in pol_ics_spec_data: - f.write(f"{ic_spec.name} = {ic_spec.reach} {ic_spec.start_rk} {ic_spec.end_rk} " + - f"{ic_spec.concentration} {ic_spec.eg} {ic_spec.em} {ic_spec.ed} {ic_spec.rate}") + f.write(" ".join((f"{ic_spec.name}", + "=", + f"{ic_spec.reach}", + f"{ic_spec.start_rk}", + f"{ic_spec.end_rk}", + f"{ic_spec.concentration}", + f"{ic_spec.eg}", + f"{ic_spec.em}", + f"{ic_spec.ed}", + f"{ic_spec.rate}\n"))) return True def _export_POL_Characteristics(self, study, pol_data, f, qlog, name="0"): - list_characteristics = ["type", "diametre", "rho", "porosity", "cdc_riv", "cdc_cas", "apd", "ac", "bc"] + list_characteristics = ["type", "diametre", "rho", "porosity", + "cdc_riv", "cdc_cas", "apd", "ac", "bc"] if len(list_characteristics) == (len(pol_data[0])-1): - for l in range(len(list_characteristics)): - f.write(f"{list_characteristics[l]} = {pol_data[0][l]}\n") + for i in range(len(list_characteristics)): + f.write(f"{list_characteristics[i]} = {pol_data[0][i]}\n") def _export_D90(self, study, repertory, qlog=None, name="0"): @@ -391,7 +443,9 @@ class AdisTSwc(AdisTS): if qlog is not None: qlog.put("Export D90 file") - with adists_file_open(os.path.join(repertory, f"{name}.D90"), "w+") as f: + with adists_file_open( + os.path.join(repertory, f"{name}.D90"), "w+" + ) as f: files.append(f"{name}.D90") f.write(f"*Diamètres caractéristiques du fond stable\n") @@ -407,8 +461,11 @@ class AdisTSwc(AdisTS): def _export_d90_spec(self, study, d90_spec_data, f, qlog, name="0"): for d90_spec in d90_spec_data: - if (d90_spec.name is None) or (d90_spec.reach is None) or (d90_spec.start_rk is None) or \ - (d90_spec.end_rk is None) or (d90_spec.d90 is None): + if (d90_spec.name is None + or d90_spec.reach is None + or d90_spec.start_rk is None + or d90_spec.end_rk is None + or d90_spec.d90 is None): return edges = study.river.enable_edges() @@ -420,7 +477,12 @@ class AdisTSwc(AdisTS): if id_reach not in id_edges: return - f.write(f"{d90_spec.name} = {id_reach} {d90_spec.start_rk} {d90_spec.end_rk} {d90_spec.d90}\n") + f.write(" ".join((f"{d90_spec.name}", + "=", + f"{id_reach}", + f"{d90_spec.start_rk}", + f"{d90_spec.end_rk}", + f"{d90_spec.d90}\n"))) def _export_DIF(self, study, repertory, qlog=None, name="0"): @@ -429,7 +491,9 @@ class AdisTSwc(AdisTS): if qlog is not None: qlog.put("Export DIF file") - with adists_file_open(os.path.join(repertory, f"{name}.DIF"), "w+") as f: + with adists_file_open( + os.path.join(repertory, f"{name}.DIF"), "w+" + ) as f: files.append(f"{name}.DIF") f.write(f"*Définition des paramètres des fonctions de calcul du\n") @@ -438,9 +502,15 @@ class AdisTSwc(AdisTS): difAdisTS = study.river.dif_adists.DIF_AdisTS_List if difAdisTS[0].method != "generique": - f.write(f"defaut = {difAdisTS[0].method} {difAdisTS[0].dif }\n") + f.write(" ".join((f"defaut = ", + f"{difAdisTS[0].method}", + f"{difAdisTS[0].dif}\n"))) else: - f.write(f"defaut = {difAdisTS[0].method} {difAdisTS[0].dif} {difAdisTS[0].b} {difAdisTS[0].c}\n") + f.write(" ".join((f"defaut =" + f"{difAdisTS[0].method}", + f"{difAdisTS[0].dif}", + f"{difAdisTS[0].b}", + f"{difAdisTS[0].c}\n"))) self._export_dif_spec(study, difAdisTS[0]._data, f, qlog) @@ -449,8 +519,12 @@ class AdisTSwc(AdisTS): def _export_dif_spec(self, study, dif_spec_data, f, qlog, name="0"): for dif_spec in dif_spec_data: - if (dif_spec.reach is None) or (dif_spec.start_rk is None) or \ - (dif_spec.end_rk is None) or (dif_spec.dif is None) or (dif_spec.b is None) or (dif_spec.c is None): + if (dif_spec.reach is None + or dif_spec.start_rk is None + or dif_spec.end_rk is None + or dif_spec.dif is None + or dif_spec.b is None + or dif_spec.c is None): return edges = study.river.enable_edges() @@ -463,28 +537,40 @@ class AdisTSwc(AdisTS): return if dif_spec.method != "generique": - f.write(f"{dif_spec.method} = {id_reach} {dif_spec.start_rk} {dif_spec.end_rk} {dif_spec.dif}\n") + f.write(" ".join((f"{dif_spec.method}", + "=", + f"{id_reach}", + f"{dif_spec.start_rk}", + f"{dif_spec.end_rk}", + f"{dif_spec.dif}\n"))) else: - f.write(f"{dif_spec.method} = {id_reach} {dif_spec.start_rk} {dif_spec.end_rk} {dif_spec.dif}" + - f"{dif_spec.b} {dif_spec.c}\n") + f.write(" ".join((f"{dif_spec.method}", + f"=" f"{id_reach}", + f"{dif_spec.start_rk}", + f"{dif_spec.end_rk}", + f"{dif_spec.dif}", + f"{dif_spec.b}", + f"{dif_spec.c}\n"))) def _export_NUM(self, study, repertory, qlog=None, name="0"): - dict_names = {"init_time":"start_date", - "final_time":"end_date", - "timestep":"dt0", - "implicitation_parameter":"theta", - "timestep_screen":"dtscr", - "timestep_bin":"dtbin", - "timestep_csv":"dtcsv", - "timestep_mage":"dtMage", - "initial_concentration":"c_initiale"} + dict_names = {"init_time": "start_date", + "final_time": "end_date", + "timestep": "dt0", + "implicitation_parameter": "theta", + "timestep_screen": "dtscr", + "timestep_bin": "dtbin", + "timestep_csv": "dtcsv", + "timestep_mage": "dtMage", + "initial_concentration": "c_initiale"} files = [] if qlog is not None: qlog.put("Export NUM file") - with adists_file_open(os.path.join(repertory, f"{name}.NUM"), "w+") as f: + with adists_file_open( + os.path.join(repertory, f"{name}.NUM"), "w+" + ) as f: files.append(f"{name}.NUM") params = study.river.get_params(self.type).parameters @@ -509,7 +595,9 @@ class AdisTSwc(AdisTS): return files def _export_outputrk(self, study, outputrk, f, qlog, name="0"): - if (outputrk.reach is None) or (outputrk.rk is None) or (outputrk.title is None): + if (outputrk.reach is None + or outputrk.rk is None + or outputrk.title is None): return edges = study.river.enable_edges() @@ -529,7 +617,8 @@ class AdisTSwc(AdisTS): def read_bin(self, study, repertory, results, qlog=None, name="0"): repertory_results = os.path.join(repertory, "resultats") - files_bin_names = [el.split("/")[-1] for el in glob.glob(repertory_results+"/*.bin")] + files_bin_names = [el.split("/")[-1] + for el in glob.glob(repertory_results+"/*.bin")] print("files names resultats: ", files_bin_names) ifilename = os.path.join(repertory_results, files_bin_names[0]) @@ -540,18 +629,18 @@ class AdisTSwc(AdisTS): with open(ifilename, 'rb') as f: # header # first line - data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (start) + data = np.fromfile(f, dtype=np.int32, count=1) # (start) data = np.fromfile(f, dtype=np.int32, count=3) ibmax = data[0] # number of reaches ismax = data[1] # total number of cross sections kbl = data[2] * -1 # block size for .BIN header - data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (end) + data = np.fromfile(f, dtype=np.int32, count=1) # (end) # second line - data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (start) + data = np.fromfile(f, dtype=np.int32, count=1) # (start) ibu = np.fromfile(f, dtype=np.int32, count=ibmax) - data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (end) + data = np.fromfile(f, dtype=np.int32, count=1) # (end) # third line - data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (start) + data = np.fromfile(f, dtype=np.int32, count=1) # (start) data = np.fromfile(f, dtype=np.int32, count=2 * ibmax) is1 = np.zeros(ibmax, dtype=np.int32) is2 = np.zeros(ibmax, dtype=np.int32) @@ -572,7 +661,7 @@ class AdisTSwc(AdisTS): r = results.river.add(i) reachs.append(r) - is1[i] = data[2 * i] - 1 # first section of reach i + is1[i] = data[2 * i] - 1 # first section of reach i is2[i] = data[2 * i + 1] - 1 # last section of reach i key = (is1[i], is2[i]) @@ -582,33 +671,40 @@ class AdisTSwc(AdisTS): logger.debug(f"read_bin: iprofiles = {iprofiles}") - data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (end) + data = np.fromfile(f, dtype=np.int32, count=1) # (end) # fourth line pk = np.zeros(ismax, dtype=np.float32) for k in range(0, ismax, kbl): - data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (start) - pk[k:min(k + kbl, ismax)] = np.fromfile(f, dtype=np.float32, count=min(k + kbl, ismax) - k) - data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (end) + data = np.fromfile(f, dtype=np.int32, count=1) # (start) + pk[k:min(k + kbl, ismax)] = np.fromfile(f, + dtype=np.float32, + count=min( + k + kbl, ismax + ) - k) + data = np.fromfile(f, dtype=np.int32, count=1) # (end) # fifth line (useless) - data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (start) + data = np.fromfile(f, dtype=np.int32, count=1) # (start) zmin_OLD = np.fromfile(f, dtype=np.float32, count=1)[0] - data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (end) + data = np.fromfile(f, dtype=np.int32, count=1) # (end) # sixth line zf = np.zeros(ismax, dtype=np.float32) z = np.zeros(ismax * 3, dtype=np.float32) for k in range(0, ismax, kbl): - data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (start) - z[3 * k:3 * min(k + kbl, ismax)] = np.fromfile(f, dtype=np.float32, - count=3 * (min(k + kbl, ismax) - k)) + data = np.fromfile(f, dtype=np.int32, count=1) # (start) + z[3 * k:3 * min(k + kbl, ismax)] = \ + np.fromfile(f, + dtype=np.float32, + count=3 * (min(k + kbl, ismax) - k) + ) # z[i*3+1] and z[i*3+2] are useless - data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (end) + data = np.fromfile(f, dtype=np.int32, count=1) # (end) zf = [z[i * 3] for i in range(ismax)] # seventh line (useless) for k in range(0, ismax, kbl): - data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (start) + data = np.fromfile(f, dtype=np.int32, count=1) # (start) zero = np.fromfile(f, dtype=np.int32, count=ismax) - data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (end) + data = np.fromfile(f, dtype=np.int32, count=1) # (end) # end header def ip_to_r(i): @@ -623,7 +719,8 @@ class AdisTSwc(AdisTS): def ip_to_ri(r, i): return i - reach_offset[r] - path_files = map(lambda file: os.path.join(repertory_results, file), files_bin_names) + path_files = map(lambda file: os.path.join( + repertory_results, file), files_bin_names) data_tmp = {} @@ -633,57 +730,63 @@ class AdisTSwc(AdisTS): with open(file_bin, 'rb') as f: # header # first line - data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (start) + data = np.fromfile(f, dtype=np.int32, count=1) # (start) data = np.fromfile(f, dtype=np.int32, count=3) ibmax = data[0] # number of reaches ismax = data[1] # total number of cross sections kbl = data[2] * -1 # block size for .BIN header - data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (end) + data = np.fromfile(f, dtype=np.int32, count=1) # (end) # second line - data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (start) + data = np.fromfile(f, dtype=np.int32, count=1) # (start) ibu = np.fromfile(f, dtype=np.int32, count=ibmax) - data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (end) + data = np.fromfile(f, dtype=np.int32, count=1) # (end) # third line - data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (start) + data = np.fromfile(f, dtype=np.int32, count=1) # (start) data = np.fromfile(f, dtype=np.int32, count=2 * ibmax) is1 = np.zeros(ibmax, dtype=np.int32) is2 = np.zeros(ibmax, dtype=np.int32) for i in range(ibmax): - is1[i] = data[2 * i] # first section of reach i (FORTRAN numbering) - is2[i] = data[2 * i + 1] # last section of reach i (FORTRAN numbering) - data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (end) + # first section of reach i (FORTRAN numbering) + is1[i] = data[2 * i] + # last section of reach i (FORTRAN numbering) + is2[i] = data[2 * i + 1] + data = np.fromfile(f, dtype=np.int32, count=1) # (end) # fourth line pk = np.zeros(ismax, dtype=np.float32) for k in range(0, ismax, kbl): - data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (start) - pk[k:min(k + kbl, ismax)] = np.fromfile(f, dtype=np.float32, count=min(k + kbl, ismax) - k) - data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (end) + data = np.fromfile(f, dtype=np.int32, count=1) # (start) + pk[k:min(k + kbl, ismax)] = np.fromfile( + f, dtype=np.float32, count=min(k + kbl, ismax) - k + ) + data = np.fromfile(f, dtype=np.int32, count=1) # (end) # fifth line (useless) - data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (start) + data = np.fromfile(f, dtype=np.int32, count=1) # (start) zmin_OLD = np.fromfile(f, dtype=np.float32, count=1)[0] - data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (end) + data = np.fromfile(f, dtype=np.int32, count=1) # (end) # sixth line zf = np.zeros(ismax, dtype=np.float32) z = np.zeros(ismax * 3, dtype=np.float32) for k in range(0, ismax, kbl): - data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (start) - z[3 * k:3 * min(k + kbl, ismax)] = np.fromfile(f, dtype=np.float32, - count=3 * (min(k + kbl, ismax) - k)) + data = np.fromfile(f, dtype=np.int32, count=1) # (start) + z[3 * k:3 * min(k + kbl, ismax)] = np.fromfile( + f, dtype=np.float32, + count=3 * (min(k + kbl, ismax) - k) + ) # z[i*3+1] and z[i*3+2] are useless - data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (end) + data = np.fromfile(f, dtype=np.int32, count=1) # (end) zf = [z[i * 3] for i in range(ismax)] # seventh line (useless) for k in range(0, ismax, kbl): - data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (start) + data = np.fromfile(f, dtype=np.int32, count=1) # (start) zero = np.fromfile(f, dtype=np.int32, count=ismax) - data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (end) + data = np.fromfile(f, dtype=np.int32, count=1) # (end) # end header # data - data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (start) + data = np.fromfile(f, dtype=np.int32, count=1) # (start) while data.size > 0: ismax = np.fromfile(f, dtype=np.int32, count=1)[0] t = np.fromfile(f, dtype=np.float64, count=1)[0] - if not t in data_tmp[key_pol]: + if t not in data_tmp[key_pol]: data_tmp[key_pol][t] = {} c = np.fromfile(f, dtype=np.byte, count=1) # possible values : @@ -692,61 +795,63 @@ class AdisTSwc(AdisTS): phys_var = bytearray(c).decode() data_tmp[key_pol][t][phys_var] = {} real_data = np.fromfile(f, dtype=np.float32, count=ismax) - data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (end) + data = np.fromfile(f, dtype=np.int32, count=1) # (end) data_tmp[key_pol][t][phys_var] = real_data - data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (start) + data = np.fromfile(f, dtype=np.int32, count=1) # (start) # end data - ###print("dta tmp AAA") - ###print("-----------") - ###print(data_tmp["AAA-silt"]) - pollutants_keys = list(data_tmp.keys()) timestamps_keys = list(data_tmp[pollutants_keys[0]].keys()) - phys_data_names = list(data_tmp[pollutants_keys[0]][timestamps_keys[0]].keys()) + phys_data_names = list(data_tmp[pollutants_keys[0]] + [timestamps_keys[0]].keys()) type_pol_index = len(phys_data_names) - ###print("pol keys: ", pollutants_keys) - ###print("t keys: ", timestamps_keys) - ###print("phys var: ", phys_data_names) - ###print("phys data names mass zero:", list(data_tmp[pollutants_keys[1]][timestamps_keys[0]].keys())) - #print("set timestamps keys: ", set(timestamps_keys)) - #print("isma") - ###print("iprofiles: ", iprofiles) + # ##print("pol keys: ", pollutants_keys) + # ##print("t keys: ", timestamps_keys) + # ##print("phys var: ", phys_data_names) + # ##print("phys data names mass zero:", + # list(data_tmp[pollutants_keys[1]][timestamps_keys[0]].keys())) + # print("set timestamps keys: ", set(timestamps_keys)) + # print("isma") + # ##print("iprofiles: ", iprofiles) print("reading bin files is ok =======") for i in range(ismax): - #print("first i: ", i) + # print("first i: ", i) reach = ip_to_r(i) - #print("reach i:", reach) - #print("second i: ", i) + # print("reach i:", reach) + # print("second i: ", i) p_i = ip_to_ri(reach, i) for t_data in timestamps_keys: pol_view = [] for pol in pollutants_keys: - #print("pol results: ", type(list(data_tmp[pol][t_data].values()))) - pol_view.append(tuple( list(map(lambda data_el: data_el[p_i], list(data_tmp[pol][t_data].values()))) )) + # print("pol results: ", + # type(list(data_tmp[pol][t_data].values()))) + pol_view.append(tuple(list(map( + lambda data_el: data_el[p_i], + list(data_tmp[pol][t_data].values()) + )))) reach.set(p_i, t_data, "pols", pol_view) - ###print("pi_tmp: ", pi_tmp) - ###print("pol view: ", pol_view) - ###print("reach from i: ", reach_tmp) - #print("pol view: ", pol_view) - #print("results: ", results) + # ##print("pi_tmp: ", pi_tmp) + # ##print("pol view: ", pol_view) + # ##print("reach from i: ", reach_tmp) + # print("pol view: ", pol_view) + # print("results: ", results) print("'''''''reading bin files is ok =======") results.set("timestamps", set(timestamps_keys)) - #print("------------------------set timestamps results meta data: ", set(timestamps_keys)) + # print("set timestamps results meta data: ", set(timestamps_keys)) - ###print("debug profiles for draw:") - ###print("------------------------") - ###print(data_tmp["AAA-silt"][0.0]["C"][0]) - ###print(data_tmp["AAA-silt"][600.0]["C"][0]) - ###print(data_tmp["AAA-silt"][1205.7208829578194]["C"][0]) - ###print("========================") + # ##print("debug profiles for draw:") + # ##print("------------------------") + # ##print(data_tmp["AAA-silt"][0.0]["C"][0]) + # ##print(data_tmp["AAA-silt"][600.0]["C"][0]) + # ##print(data_tmp["AAA-silt"][1205.7208829578194]["C"][0]) + # ##print("========================") print("****reading bin files is ok =======") @@ -787,16 +892,11 @@ class AdisTSwc(AdisTS): files = files + func(study, repertory, qlog, name=name) self.export_additional_files(study, repertory, qlog, name=name) - self._export_REP(study, repertory, mage_rep, files, qlog, name=name) + self._export_REP(study, repertory, mage_rep, + files, qlog, name=name) return True except Exception as e: logger.error(f"Failed to export study to {self._type}") logger_exception(e) return False - - - - - - diff --git a/src/View/InitialConditionsAdisTS/Window.py b/src/View/InitialConditionsAdisTS/Window.py index b5105a6eb92928652d274e808077966adf2d774a..f02ee6d74337dae5dc585211c5bae65d0f2cc936 100644 --- a/src/View/InitialConditionsAdisTS/Window.py +++ b/src/View/InitialConditionsAdisTS/Window.py @@ -89,7 +89,7 @@ class InitialConditionsAdisTSWindow(PamhyrWindow): self._hash_data.append(data) - self._ics_adists_lst = study.river.initial_conditions_adists + self._ics_adists_lst = study.river.ic_adists self.setup_table() diff --git a/src/View/Pollutants/Table.py b/src/View/Pollutants/Table.py index c3bbcd4dcd7916642c45545b65ffed93ad59c7fc..827ea3ae4d1ec80675452b5bb0b92e0a62e47706 100644 --- a/src/View/Pollutants/Table.py +++ b/src/View/Pollutants/Table.py @@ -90,7 +90,7 @@ class TableModel(PamhyrTableModel): self._undo.push( AddCommand( - self._lst, row, self._data.initial_conditions_adists + self._lst, row, self._data.ic_adists ) ) diff --git a/src/View/Pollutants/Window.py b/src/View/Pollutants/Window.py index a87c791fb1ac5f8a56c1173488ffe99f7a3c9558..840c9fd300814d7697bad7e53e4ec512d6c5a629 100644 --- a/src/View/Pollutants/Window.py +++ b/src/View/Pollutants/Window.py @@ -200,7 +200,7 @@ class PollutantsWindow(PamhyrWindow): pollutant_id = self._pollutants_lst.get(row).id ics_adists = next(filter(lambda x: x.pollutant == pollutant_id, - self._study.river.initial_conditions_adists.lst)) + self._study.river.ic_adists.lst)) if self.sub_window_exists( InitialConditionsAdisTSWindow, diff --git a/src/config.py b/src/config.py index 71b85a979221e47c7125f38d89776432b2298cf6..1209cc1686291210dc09661df1758809396446bc 100644 --- a/src/config.py +++ b/src/config.py @@ -41,7 +41,6 @@ class Config(SQL): self.filename = Config.filename() self.set_default_value() - logging.info(f"Configuration file : {self.filename}") super(Config, self).__init__(filename=self.filename) diff --git a/src/tools.py b/src/tools.py index 12308a7b23afc1c27f0c4c48f7a9a21f84a8558b..8c14e267985a53f04e53e83291bed41c16fb6cdd 100644 --- a/src/tools.py +++ b/src/tools.py @@ -299,6 +299,7 @@ def timestamp_to_old_pamhyr_date(time: int): return s + def timestamp_to_old_pamhyr_date_adists(time: int): t0 = datetime.fromtimestamp(0) @@ -318,6 +319,7 @@ def timestamp_to_old_pamhyr_date_adists(time: int): return s + def get_user_name(): if with_pwd: return pwd.getpwuid(os.getuid()).pw_gecos