Commit 80cf8e5d authored by Thibault Hallouin's avatar Thibault Hallouin
Browse files

add unit tests for Python bindings

No related merge requests found
Showing with 260 additions and 0 deletions
+260 -0
2520,2270,2130,1950,1820,1680,1590,1560,1570,1450,1410,1370,1350,1280,1230,1290,1320,1210,1160,1120,1130,1120,1100,1070,1050,1060,1590,2280,2300,2480,2250,2800,3490,9270,7180,4210,3160,2600,2260,2630,5160,5430,4420,8780,5270,3890,3840,4120,3410,2840,2530,2200,1960,1810,1820,2470,2980,3710,3160,4330,3760,4300,3200,9720,10200,5410,3980,4560,4290,3560,3140,2860,2530,2300,2120,1890,1770,1690,1620,1550,1490,1380,1320,1290,1270,1230,1200,1210,1430,1220,1200,1250,1200,1100,1080,1060,1030,1000,977,965,944,924,910,873,829,805,776,753,735,728,706,689,661,638,635,629,639,725,712,870,856,1120,886,722,738,997,723,642,610,593,1930,1740,1470,3100,1740,1190,949,798,725,653,618,590,3210,2120,1080,864,749,660,608,585,563,538,504,494,1070,717,660,803,654,666,796,899,843,698,1040,681,804,2810,1630,1360,1000,768,631,595,548,498,523,604,568,691,509,459,564,468,421,414,406,398,385,350,329,322,332,322,302,315,378,420,374,309,262,236,234,230,215,218,206,202,200,199,196,193,207,651,529,466,347,279,252,226,205,179,192,200,215,236,281,217,340,314,282,228,195,192,199,186,177,161,157,395,509,1020,704,394,354,299,268,245,258,277,255,241,225,223,229,227,224,220,218,228,241,242,257,249,261,309,495,675,416,398,360,729,491,402,467,1200,622,424,420,552,447,374,657,588,593,480,419,409,723,731,744,565,503,468,455,1270,1730,967,2290,1790,1090,946,791,734,719,681,758,934,814,737,707
This diff is collapsed.
import unittest
import evalhyd
if __name__ == '__main__':
test_loader = unittest.TestLoader()
test_suite = unittest.TestSuite()
all_tests = test_loader.discover('.', pattern='test_*.py')
test_suite.addTests(all_tests)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(test_suite)
import unittest
import numpy
import evalhyd
# load some predicted and observed streamflow
_prd = numpy.genfromtxt("./data/q_prd.csv", delimiter=',')[:5, :]
_obs = numpy.genfromtxt("./data/q_obs.csv", delimiter=',')[numpy.newaxis, :]
class TestMetrics(unittest.TestCase):
expected = {
'RMSE':
[[777.03427238],
[776.87847854],
[777.80021654],
[778.15108180],
[778.61486998]],
'NSE':
[[0.71891219],
[0.71902490],
[0.71835777],
[0.71810361],
[0.71776748]],
'KGE':
[[0.74808767],
[0.74610620],
[0.74411103],
[0.74301085],
[0.74176777]],
'KGEPRIME':
[[0.81314075],
[0.81277485],
[0.81203242],
[0.81178671],
[0.81138658]]
}
def test_metrics_2d(self):
for metric in self.expected.keys():
with self.subTest(metric=metric):
numpy.testing.assert_almost_equal(
evalhyd.evald(_obs, _prd, [metric])[0],
self.expected[metric]
)
def test_metrics_1d(self):
for metric in self.expected.keys():
# TODO: fix failure for KGE and KGEPRIME with 1D pytensors
if metric not in ('KGE', 'KGEPRIME'):
with self.subTest(metric=metric):
numpy.testing.assert_almost_equal(
evalhyd.evald(_obs[0], _prd[0], [metric])[0],
self.expected[metric][0]
)
class TestTransform(unittest.TestCase):
def test_transform_sqrt(self):
numpy.testing.assert_almost_equal(
evalhyd.evald(_obs, _prd, ["NSE"], "sqrt")[0],
evalhyd.evald(_obs ** 0.5, _prd ** 0.5, ["NSE"])[0]
)
def test_transform_inv(self):
eps = 0.01 * numpy.mean(_obs)
numpy.testing.assert_almost_equal(
evalhyd.evald(_obs, _prd, ["NSE"], "inv")[0],
evalhyd.evald(1 / (_obs + eps), 1 / (_prd + eps), ["NSE"])[0]
)
def test_transform_log(self):
eps = 0.01 * numpy.mean(_obs)
numpy.testing.assert_almost_equal(
evalhyd.evald(_obs, _prd, ["NSE"], "log")[0],
evalhyd.evald(numpy.log(_obs + eps), numpy.log(_prd + eps),
["NSE"])[0]
)
def test_transform_pow(self):
numpy.testing.assert_almost_equal(
evalhyd.evald(_obs, _prd, ["NSE"], "pow", exponent=0.3)[0],
evalhyd.evald(_obs ** 0.3, _prd ** 0.3, ["NSE"])[0]
)
if __name__ == '__main__':
test_loader = unittest.TestLoader()
test_suite = unittest.TestSuite()
test_suite.addTests(
test_loader.loadTestsFromTestCase(TestMetrics))
runner = unittest.TextTestRunner(verbosity=2)
runner.run(test_suite)
import unittest
import numpy
import evalhyd
# load some predicted and observed streamflow
_prd = (
numpy.genfromtxt("./data/q_prd.csv", delimiter=',')
[:5, :][numpy.newaxis, numpy.newaxis, ...]
)
_obs = numpy.genfromtxt("./data/q_obs.csv", delimiter=',')[numpy.newaxis, :]
class TestMetrics(unittest.TestCase):
expected_thr = {
'BS':
[[[[0.1081672, 0.073954980, 0.08681672]]]],
'BSS':
[[[[0.56240422, 0.66612211, 0.56288391]]]],
'BS_CRD':
[[[[[0.01335634, 0.15237434, 0.24718520],
[0.00550861, 0.15305671, 0.22150309],
[0.00753750, 0.11933328, 0.19861250]]]]],
'BS_LBD':
[[[[[0.01244569, 0.14933386, 0.24505537],
[0.00801337, 0.14745568, 0.21339730],
[0.01719462, 0.10479711, 0.17441921]]]]]
}
expected_qtl = {
'QS':
[[[[321.1607717, 294.3494105, 265.70418006,
236.15648446, 206.03965702]]]],
'CRPS':
[[[207.8059391]]]
}
def test_threshold_metrics(self):
thr = [690, 534, 445]
for metric in self.expected_thr.keys():
with self.subTest(metric=metric):
numpy.testing.assert_almost_equal(
evalhyd.evalp(_obs, _prd, [metric], thr)[0],
self.expected_thr[metric]
)
def test_quantile_metrics(self):
for metric in self.expected_qtl.keys():
with self.subTest(metric=metric):
numpy.testing.assert_almost_equal(
evalhyd.evalp(_obs, _prd, [metric])[0],
self.expected_qtl[metric]
)
class TestDecomposition(unittest.TestCase):
def test_brier_calibration_refinement(self):
thr = [690, 534, 445]
bs, = evalhyd.evalp(_obs, _prd, ["BS"], thr)
bs_crd, = evalhyd.evalp(_obs, _prd, ["BS_CRD"], thr)
numpy.testing.assert_almost_equal(
bs, bs_crd[..., 0] - bs_crd[..., 1] + bs_crd[..., 2]
)
def test_brier_likelihood_base_rate(self):
thr = [690, 534, 445]
bs, = evalhyd.evalp(_obs, _prd, ["BS"], thr)
bs_lbd, = evalhyd.evalp(_obs, _prd, ["BS_LBD"], thr)
numpy.testing.assert_almost_equal(
bs, bs_lbd[..., 0] - bs_lbd[..., 1] + bs_lbd[..., 2]
)
class TestMasking(unittest.TestCase):
def test_masks(self):
msk = numpy.ones(_obs.shape, dtype=bool)
msk[:, :99] = False
numpy.testing.assert_almost_equal(
evalhyd.evalp(_obs, _prd, ["QS"], t_msk=msk)[0],
evalhyd.evalp(_obs[..., 99:], _prd[..., 99:], ["QS"])[0]
)
if __name__ == '__main__':
test_loader = unittest.TestLoader()
test_suite = unittest.TestSuite()
test_suite.addTests(
test_loader.loadTestsFromTestCase(TestMetrics))
runner = unittest.TextTestRunner(verbosity=2)
runner.run(test_suite)
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment