Commit f397da5f authored by Thibault Hallouin's avatar Thibault Hallouin
Browse files

add new probabilistic metrics CRPS_FROM_BS and CRPS_FROM_ECDF

existing CRPS is also renamed CRPS_FROM_QS
1 merge request!1release v0.1.0.0
Pipeline #45617 passed with stage
in 4 minutes and 58 seconds
Showing with 26 additions and 7 deletions
+26 -7
Subproject commit cbe1588dfbdfbfb3a2c66d49b5187955b57bc59f
Subproject commit c8d07b82ab8676eb1d898cfefe58356ba09097dc
Subproject commit 545dd7aa78104052bb8e45c3175efff313d477dc
Subproject commit e534928cc30eb3a4a05539747d98e1d6868c2d62
226.5713674310274
271.9578705197483
252.9569186533230
252.9569186533230
......@@ -14,9 +14,11 @@ _obs = numpy.genfromtxt("./data/q_obs.csv", delimiter=',')[numpy.newaxis, :]
# list all available probabilistic metrics
_all_metrics = (
# threshold-based
'BS', 'BSS', 'BS_CRD', 'BS_LBD', 'REL_DIAG',
'BS', 'BSS', 'BS_CRD', 'BS_LBD', 'REL_DIAG', 'CRPS_FROM_BS',
# CDF-based
'CRPS_FROM_ECDF',
# quantile-based
'QS', 'CRPS',
'QS', 'CRPS_FROM_QS',
# contingency table-based
'POD', 'POFD', 'FAR', 'CSI', 'ROCSS',
# ranks-based
......@@ -37,7 +39,7 @@ class TestMetrics(unittest.TestCase):
metric: (
numpy.genfromtxt(f"./expected/evalp/{metric}.csv", delimiter=',')
[numpy.newaxis, numpy.newaxis, numpy.newaxis, numpy.newaxis, ...]
) for metric in ('BS', 'BSS', 'BS_CRD', 'BS_LBD', 'REL_DIAG')
) for metric in ('BS', 'BSS', 'BS_CRD', 'BS_LBD', 'REL_DIAG', 'CRPS_FROM_BS')
}
# /!\ stacked-up thresholds in CSV file for REL_DIAG
# because 7D metric so need to reshape array
......@@ -46,11 +48,18 @@ class TestMetrics(unittest.TestCase):
+ (_prd.shape[2] + 1, 3))
)
expected_cdf = {
metric: (
numpy.genfromtxt(f"./expected/evalp/{metric}.csv", delimiter=',')
[numpy.newaxis, numpy.newaxis, numpy.newaxis, numpy.newaxis, ...]
) for metric in ('CRPS_FROM_ECDF',)
}
expected_qtl = {
metric: (
numpy.genfromtxt(f"./expected/evalp/{metric}.csv", delimiter=',')
[numpy.newaxis, numpy.newaxis, numpy.newaxis, numpy.newaxis, ...]
) for metric in ('QS', 'CRPS')
) for metric in ('QS', 'CRPS_FROM_QS')
}
expected_ct = {
......@@ -83,6 +92,14 @@ class TestMetrics(unittest.TestCase):
self.expected_thr[metric]
)
def test_cdf_metrics(self):
for metric in self.expected_cdf.keys():
with self.subTest(metric=metric):
numpy.testing.assert_almost_equal(
evalhyd.evalp(_obs, _prd, [metric])[0],
self.expected_cdf[metric]
)
def test_quantiles_metrics(self):
for metric in self.expected_qtl.keys():
with self.subTest(metric=metric):
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment