Commit bafc00a5 authored by Thibault Hallouin's avatar Thibault Hallouin
Browse files

move expected outputs into CSF files

this way, all 51 members can be used without clogging the scripts
1 merge request!1release v0.1.0.0
Pipeline #44298 passed with stage
in 5 minutes and 35 seconds
Showing with 50 additions and 91 deletions
+50 -91
0.6077170418006,0.0000000000000,0.0000000000000,0.0000000000000,0.0000000000000,0.0032154340836,0.0000000000000,0.0000000000000,0.0000000000000,0.0000000000000,0.0000000000000,0.0000000000000,0.0032154340836,0.0000000000000,0.0032154340836,0.0000000000000,0.0000000000000,0.0000000000000,0.0000000000000,0.0000000000000,0.0000000000000,0.0000000000000,0.0000000000000,0.0000000000000,0.0000000000000,0.0000000000000,0.0000000000000,0.0000000000000,0.0000000000000,0.0032154340836,0.0000000000000,0.0000000000000,0.0000000000000,0.0000000000000,0.0000000000000,0.0064308681672,0.0000000000000,0.0000000000000,0.0032154340836,0.0064308681672,0.0000000000000,0.0000000000000,0.0000000000000,0.0032154340836,0.0000000000000,0.0000000000000,0.0032154340836,0.0032154340836,0.0032154340836,0.0000000000000,0.0064308681672,0.3440514469453
0.7108499247114
0.8017176997760
0.7130661114003
nan
764.4471750114835,2578.1382636655953
0.6621887740287,0.4360388849930
......@@ -5,37 +5,25 @@ import evalhyd
# load some predicted and observed streamflow
_prd = numpy.genfromtxt("./data/q_prd.csv", delimiter=',')[:5, :]
_prd = numpy.genfromtxt("./data/q_prd.csv", delimiter=',')[:, :]
_obs = numpy.genfromtxt("./data/q_obs.csv", delimiter=',')[numpy.newaxis, :]
# list all available deterministic metrics
_all_metrics = (
# errors-based
'RMSE',
# efficiencies-based
'NSE', 'KGE', 'KGEPRIME'
)
class TestMetrics(unittest.TestCase):
expected = {
'RMSE':
[[[777.03427238]],
[[776.87847854]],
[[777.80021654]],
[[778.15108180]],
[[778.61486998]]],
'NSE':
[[[0.71891219]],
[[0.71902490]],
[[0.71835777]],
[[0.71810361]],
[[0.71776748]]],
'KGE':
[[[0.74808767]],
[[0.74610620]],
[[0.74411103]],
[[0.74301085]],
[[0.74176777]]],
'KGEPRIME':
[[[0.81314075]],
[[0.81277485]],
[[0.81203242]],
[[0.81178671]],
[[0.81138658]]]
metric: (
numpy.genfromtxt(f"./expected/evald/{metric}.csv", delimiter=',')
[:, numpy.newaxis, numpy.newaxis]
) for metric in _all_metrics
}
def test_metrics_2d(self):
......@@ -103,11 +91,7 @@ class TestMasking(unittest.TestCase):
def test_conditions(self):
with self.subTest(conditions="observed streamflow values"):
cdt = numpy.array([["q_obs{<2000,>3000}"],
["q_obs{<2000,>3000}"],
["q_obs{<2000,>3000}"],
["q_obs{<2000,>3000}"],
["q_obs{<2000,>3000}"]],
cdt = numpy.array([["q_obs{<2000,>3000}"]] * _prd.shape[0],
dtype='|S32')
msk = (_obs[0] < 2000) | (_obs[0] > 3000)
......@@ -122,11 +106,7 @@ class TestMasking(unittest.TestCase):
)
with self.subTest(conditions="observed streamflow statistics"):
cdt = numpy.array([["q_obs{>=median}"],
["q_obs{>=median}"],
["q_obs{>=median}"],
["q_obs{>=median}"],
["q_obs{>=median}"]],
cdt = numpy.array([["q_obs{>=median}"]] * _prd.shape[0],
dtype='|S32')
msk = _obs[0] >= numpy.median(_obs)
......@@ -141,8 +121,8 @@ class TestMasking(unittest.TestCase):
)
with self.subTest(conditions="time indices"):
cdt = numpy.array([["t{20:311}"],
["t{20:100,100:311}"],
cdt = numpy.array([["t{20:311}"]] * (_prd.shape[0] - 4) +
[["t{20:100,100:311}"],
["t{20,21,22,23,24:311}"],
["t{20,21,22,23:309,309,310}"],
["t{20:80,80,81,82,83:311}"]],
......
......@@ -7,7 +7,7 @@ import evalhyd
# load some predicted and observed streamflow
_prd = (
numpy.genfromtxt("./data/q_prd.csv", delimiter=',')
[:5, :][numpy.newaxis, numpy.newaxis, ...]
[numpy.newaxis, numpy.newaxis, ...]
)
_obs = numpy.genfromtxt("./data/q_obs.csv", delimiter=',')[numpy.newaxis, :]
......@@ -29,71 +29,38 @@ _all_metrics = (
class TestMetrics(unittest.TestCase):
expected_thr = {
'BS':
[[[[[0.1081672, 0.073954980, 0.08681672, numpy.nan]]]]],
'BSS':
[[[[[0.56240422, 0.66612211, 0.56288391, numpy.nan]]]]],
'BS_CRD':
[[[[[[0.01335634, 0.15237434, 0.24718520],
[0.00550861, 0.15305671, 0.22150309],
[0.00753750, 0.11933328, 0.19861250],
[numpy.nan, numpy.nan, numpy.nan]]]]]],
'BS_LBD':
[[[[[[0.01244569, 0.14933386, 0.24505537],
[0.00801337, 0.14745568, 0.21339730],
[0.01719462, 0.10479711, 0.17441921],
[numpy.nan, numpy.nan, numpy.nan]]]]]]
metric: (
numpy.genfromtxt(f"./expected/evalp/{metric}.csv", delimiter=',')
[numpy.newaxis, numpy.newaxis, numpy.newaxis, numpy.newaxis, ...]
) for metric in ('BS', 'BSS', 'BS_CRD', 'BS_LBD')
}
expected_qtl = {
'QS':
[[[[[321.1607717, 294.3494105, 265.70418006,
236.15648446, 206.03965702]]]]],
'CRPS':
[[[[176.63504823]]]]
metric: (
numpy.genfromtxt(f"./expected/evalp/{metric}.csv", delimiter=',')
[numpy.newaxis, numpy.newaxis, numpy.newaxis, numpy.newaxis, ...]
) for metric in ('QS', 'CRPS')
}
expected_ct = {
'POD': [[[[[[1.00000000, 1.00000000, 1.00000000, numpy.nan],
[0.86330935, 0.87378641, 0.75294118, numpy.nan],
[0.86330935, 0.87378641, 0.75294118, numpy.nan],
[0.86330935, 0.87378641, 0.75294118, numpy.nan],
[0.86330935, 0.87378641, 0.75294118, numpy.nan],
[0.86330935, 0.87378641, 0.75294118, numpy.nan]]]]]],
'POFD': [[[[[[1.00000000, 1.00000000, 1.00000000, numpy.nan],
[0.08720930, 0.03846154, 0.02654867, numpy.nan],
[0.08720930, 0.03846154, 0.02654867, numpy.nan],
[0.08720930, 0.03846154, 0.02654867, numpy.nan],
[0.08720930, 0.03846154, 0.02654867, numpy.nan],
[0.08139535, 0.03846154, 0.02654867, numpy.nan]]]]]],
'FAR': [[[[[[0.55305466, 0.66881029, 0.72668810, numpy.nan],
[0.11111111, 0.08163265, 0.08571429, numpy.nan],
[0.11111111, 0.08163265, 0.08571429, numpy.nan],
[0.11111111, 0.08163265, 0.08571429, numpy.nan],
[0.11111111, 0.08163265, 0.08571429, numpy.nan],
[0.10447761, 0.08163265, 0.08571429, numpy.nan]]]]]],
'CSI': [[[[[[0.44694534, 0.33118971, 0.27331190, numpy.nan],
[0.77922078, 0.81081081, 0.70329670, numpy.nan],
[0.77922078, 0.81081081, 0.70329670, numpy.nan],
[0.77922078, 0.81081081, 0.70329670, numpy.nan],
[0.77922078, 0.81081081, 0.70329670, numpy.nan],
[0.78431373, 0.81081081, 0.70329670, numpy.nan]]]]]],
'ROCSS': [[[[[0.71084992, 0.80171770, 0.70640292, numpy.nan]]]]]
metric: (
numpy.genfromtxt(f"./expected/evalp/{metric}.csv", delimiter=',')
[numpy.newaxis, numpy.newaxis, numpy.newaxis, numpy.newaxis, ...]
) for metric in ('POD', 'POFD', 'FAR', 'CSI', 'ROCSS')
}
expected_rk = {
'RANK_HIST': [[[[[0.607717, 0., 0., 0., 0., 0.392283]]]]],
'DS': [[[[133.1621622]]]],
'AS': [[[[0.4783321]]]]
metric: (
numpy.genfromtxt(f"./expected/evalp/{metric}.csv", delimiter=',')
[numpy.newaxis, numpy.newaxis, numpy.newaxis, numpy.newaxis, ...]
) for metric in ('RANK_HIST', 'DS', 'AS')
}
expected_itv = {
'CR': [[[[[0.00321543, 0.00321543]]]]],
'AW': [[[[[1.58392283, 4.50160772]]]]],
'AWN': [[[[[0.00126077, 0.00358319]]]]],
'AWI': [[[[[0.99694518, 0.99828901]]]]],
'WS': [[[[[758.45668351, 2637.85209003]]]]],
'WSS': [[[[[0.66483599, 0.42297664]]]]]
metric: (
numpy.genfromtxt(f"./expected/evalp/{metric}.csv", delimiter=',')
[numpy.newaxis, numpy.newaxis, numpy.newaxis, numpy.newaxis, ...]
) for metric in ('CR', 'AW', 'AWN', 'AWI', 'WS', 'WSS')
}
def test_thresholds_metrics(self):
......@@ -133,6 +100,11 @@ class TestMetrics(unittest.TestCase):
def test_intervals_metrics(self):
lvl = numpy.array([30., 80.])
for metric in self.expected_itv.keys():
numpy.set_printoptions(precision=13)
m = evalhyd.evalp(_obs, _prd, [metric], c_lvl=lvl)[0][0, 0, 0]
numpy.savetxt(f"./expected/evalp/{metric}.csv", m, delimiter=',', fmt="%.13f")
with self.subTest(metric=metric):
numpy.testing.assert_almost_equal(
evalhyd.evalp(_obs, _prd, [metric], c_lvl=lvl)[0],
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment