Commit 596cd24c authored by Pierre-Antoine Rouby's avatar Pierre-Antoine Rouby
Browse files

Sediment: Add GRA file reading.

Showing with 163 additions and 2 deletions
+163 -2
......@@ -98,6 +98,9 @@ class River(object):
def reach(self, id):
return self._reachs[id]
def has_reach(self, id):
return 0 <= id < len(self._reachs)
def add(self, reach_id):
reachs = self._study.river.enable_edges()
......
......@@ -101,7 +101,6 @@ class Mage(AbstractSolver):
l = super(Mage, self).cmd_args(study)
l.append("-r")
l.append("-fp=1")
return l
......@@ -475,7 +474,6 @@ class Mage8(Mage):
f.write(f"{name} {value}\n")
return files
@timer
......@@ -555,6 +553,7 @@ class Mage8(Mage):
# RESULTS #
###########
@timer
def read_bin(self, study, repertory, results, qlog = None):
logger.info(f"read_bin: Start ...")
......@@ -675,3 +674,162 @@ class Mage8(Mage):
logger.debug(reachs[0].profiles[0]._data)
results.set("timestamps", ts)
logger.info(f"read_bin: ... end with {len(ts)} timestamp read")
@timer
def read_gra(self, study, repertory, results, qlog = None):
# if not study.river.has_sediment():
# return
logger.info(f"read_gra: Start ...")
with mage_file_open(os.path.join(repertory, f"0.GRA"), "r") as f:
newline = lambda: np.fromfile(f, dtype=np.int32, count=1)
endline = lambda: np.fromfile(f, dtype=np.int32, count=1)
read_int = lambda size: np.fromfile(f, dtype=np.int32, count=size)
read_float = lambda size: np.fromfile(f, dtype=np.float32, count=size)
read_float64 = lambda size: np.fromfile(f, dtype=np.float64, count=size)
# Meta data (1st line)
newline()
data = read_int(3)
nb_reach = data[0]
nb_profile = data[1]
mage_version = data[2]
logger.debug(f"read_gra: nb_reach = {nb_reach}")
logger.debug(f"read_gra: nb_profile = {nb_profile}")
logger.debug(f"read_gra: mage_version = {mage_version}")
if mage_version <= 80:
msg = (
"Read GRA files: " +
f"Possible incompatible mage version '{mage_version}', " +
"please check your solver configuration..."
)
logger.warning(msg)
if qlog is not None:
qlog.put("[WARNING] " + msg)
results.set("gra_solver_version", f"Mage8 ({mage_version})")
results.set("gra_nb_reach", f"{nb_reach}")
results.set("gra_nb_profile", f"{nb_profile}")
endline()
# Reach information (2nd line)
newline()
reachs = []
iprofiles = {}
reach_offset = {}
data = read_int(2*nb_reach)
for i in range(nb_reach):
# Get results reach to reach list
r = results.river.reach(i)
reachs.append(r)
# ID of first and last reach profiles
i1 = data[2*i] - 1
i2 = data[2*i+1] - 1
# Add profile id correspondance to reach
key = (i1, i2)
iprofiles[key] = r
# Profile ID offset
reach_offset[r] = i1
logger.debug(f"read_gra: iprofiles = {iprofiles}")
endline()
# X (3rd line)
newline()
_ = read_float(nb_profile)
endline()
# npts (4th line)
newline()
_ = read_int(nb_profile)
endline()
# Data
ip_to_r = lambda i: iprofiles[
next(
filter(
lambda k: k[0] <= i <= k[1],
iprofiles
)
)
]
ip_to_ri = lambda r, i: i - reach_offset[r]
ts = set()
ind = 0
end = False
newline()
while not end:
n = read_int(1)[0]
timestamp = read_float64(1)[0]
logger.debug(f"read_gra: timestamp = {timestamp} sec")
ts.add(timestamp)
endline()
for i in range(n):
newline()
nsl = read_int(1)[0]
endline()
# Get current profile id
reach = ip_to_r(i)
ri = ip_to_ri(reach, i)
if nsl > 1:
logger.warning(
"read_gra: " +
"Multiple sediment layers for one profile " +
"is not implemented yet..."
)
for j in range(nsl):
newline()
nl = read_int(1)[0]
endline()
sl = []
for k in range(nl):
newline()
data = read_float64(3)
endline()
h = data[0]
d50 = data[1]
sigma = data[2]
sl.append((h, d50, sigma))
reach.set(ri, timestamp, "sl", sl)
ind += 1
end = newline().size <= 0
logger.debug(reachs[0].profiles[0]._data)
results.set("timestamps", ts)
logger.info(f"read_gra: ... end with {len(ts)} timestamp read")
@timer
def results(self, study, repertory, qlog = None):
results = super(Mage8, self).results(study, repertory, qlog)
self.read_gra(study, repertory, results, qlog)
return results
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment