From f397da5f393a7a91142b6c730eb5386947ca4148 Mon Sep 17 00:00:00 2001
From: Thibault Hallouin <thibault.hallouin@inrae.fr>
Date: Fri, 24 Mar 2023 14:18:35 +0100
Subject: [PATCH] add new probabilistic metrics CRPS_FROM_BS and CRPS_FROM_ECDF

existing CRPS is also renamed CRPS_FROM_QS
---
 deps/evalhyd                                  |  2 +-
 deps/xtensor                                  |  2 +-
 tests/expected/evalp/CRPS_FROM_BS.csv         |  1 +
 tests/expected/evalp/CRPS_FROM_ECDF.csv       |  1 +
 .../evalp/{CRPS.csv => CRPS_FROM_QS.csv}      |  2 +-
 tests/test_probabilist.py                     | 25 ++++++++++++++++---
 6 files changed, 26 insertions(+), 7 deletions(-)
 create mode 100644 tests/expected/evalp/CRPS_FROM_BS.csv
 create mode 100644 tests/expected/evalp/CRPS_FROM_ECDF.csv
 rename tests/expected/evalp/{CRPS.csv => CRPS_FROM_QS.csv} (94%)

diff --git a/deps/evalhyd b/deps/evalhyd
index cbe1588..c8d07b8 160000
--- a/deps/evalhyd
+++ b/deps/evalhyd
@@ -1 +1 @@
-Subproject commit cbe1588dfbdfbfb3a2c66d49b5187955b57bc59f
+Subproject commit c8d07b82ab8676eb1d898cfefe58356ba09097dc
diff --git a/deps/xtensor b/deps/xtensor
index 545dd7a..e534928 160000
--- a/deps/xtensor
+++ b/deps/xtensor
@@ -1 +1 @@
-Subproject commit 545dd7aa78104052bb8e45c3175efff313d477dc
+Subproject commit e534928cc30eb3a4a05539747d98e1d6868c2d62
diff --git a/tests/expected/evalp/CRPS_FROM_BS.csv b/tests/expected/evalp/CRPS_FROM_BS.csv
new file mode 100644
index 0000000..7a15540
--- /dev/null
+++ b/tests/expected/evalp/CRPS_FROM_BS.csv
@@ -0,0 +1 @@
+226.5713674310274
diff --git a/tests/expected/evalp/CRPS_FROM_ECDF.csv b/tests/expected/evalp/CRPS_FROM_ECDF.csv
new file mode 100644
index 0000000..13832cc
--- /dev/null
+++ b/tests/expected/evalp/CRPS_FROM_ECDF.csv
@@ -0,0 +1 @@
+271.9578705197483
diff --git a/tests/expected/evalp/CRPS.csv b/tests/expected/evalp/CRPS_FROM_QS.csv
similarity index 94%
rename from tests/expected/evalp/CRPS.csv
rename to tests/expected/evalp/CRPS_FROM_QS.csv
index c449874..87b0d67 100644
--- a/tests/expected/evalp/CRPS.csv
+++ b/tests/expected/evalp/CRPS_FROM_QS.csv
@@ -1 +1 @@
-252.9569186533230
+252.9569186533230
diff --git a/tests/test_probabilist.py b/tests/test_probabilist.py
index 04d26c6..72502eb 100644
--- a/tests/test_probabilist.py
+++ b/tests/test_probabilist.py
@@ -14,9 +14,11 @@ _obs = numpy.genfromtxt("./data/q_obs.csv", delimiter=',')[numpy.newaxis, :]
 # list all available probabilistic metrics
 _all_metrics = (
     # threshold-based
-    'BS', 'BSS', 'BS_CRD', 'BS_LBD', 'REL_DIAG',
+    'BS', 'BSS', 'BS_CRD', 'BS_LBD', 'REL_DIAG', 'CRPS_FROM_BS',
+    # CDF-based
+    'CRPS_FROM_ECDF',
     # quantile-based
-    'QS', 'CRPS',
+    'QS', 'CRPS_FROM_QS',
     # contingency table-based
     'POD', 'POFD', 'FAR', 'CSI', 'ROCSS',
     # ranks-based
@@ -37,7 +39,7 @@ class TestMetrics(unittest.TestCase):
         metric: (
             numpy.genfromtxt(f"./expected/evalp/{metric}.csv", delimiter=',')
             [numpy.newaxis, numpy.newaxis, numpy.newaxis, numpy.newaxis, ...]
-        ) for metric in ('BS', 'BSS', 'BS_CRD', 'BS_LBD', 'REL_DIAG')
+        ) for metric in ('BS', 'BSS', 'BS_CRD', 'BS_LBD', 'REL_DIAG', 'CRPS_FROM_BS')
     }
     # /!\ stacked-up thresholds in CSV file for REL_DIAG
     #     because 7D metric so need to reshape array
@@ -46,11 +48,18 @@ class TestMetrics(unittest.TestCase):
                                          + (_prd.shape[2] + 1, 3))
     )
 
+    expected_cdf = {
+        metric: (
+            numpy.genfromtxt(f"./expected/evalp/{metric}.csv", delimiter=',')
+            [numpy.newaxis, numpy.newaxis, numpy.newaxis, numpy.newaxis, ...]
+        ) for metric in ('CRPS_FROM_ECDF',)
+    }
+
     expected_qtl = {
         metric: (
             numpy.genfromtxt(f"./expected/evalp/{metric}.csv", delimiter=',')
             [numpy.newaxis, numpy.newaxis, numpy.newaxis, numpy.newaxis, ...]
-        ) for metric in ('QS', 'CRPS')
+        ) for metric in ('QS', 'CRPS_FROM_QS')
     }
 
     expected_ct = {
@@ -83,6 +92,14 @@ class TestMetrics(unittest.TestCase):
                     self.expected_thr[metric]
                 )
 
+    def test_cdf_metrics(self):
+        for metric in self.expected_cdf.keys():
+            with self.subTest(metric=metric):
+                numpy.testing.assert_almost_equal(
+                    evalhyd.evalp(_obs, _prd, [metric])[0],
+                    self.expected_cdf[metric]
+                )
+
     def test_quantiles_metrics(self):
         for metric in self.expected_qtl.keys():
             with self.subTest(metric=metric):
-- 
GitLab