diff --git a/deps/evalhyd b/deps/evalhyd
index cbe1588dfbdfbfb3a2c66d49b5187955b57bc59f..c8d07b82ab8676eb1d898cfefe58356ba09097dc 160000
--- a/deps/evalhyd
+++ b/deps/evalhyd
@@ -1 +1 @@
-Subproject commit cbe1588dfbdfbfb3a2c66d49b5187955b57bc59f
+Subproject commit c8d07b82ab8676eb1d898cfefe58356ba09097dc
diff --git a/deps/xtensor b/deps/xtensor
index 545dd7aa78104052bb8e45c3175efff313d477dc..e534928cc30eb3a4a05539747d98e1d6868c2d62 160000
--- a/deps/xtensor
+++ b/deps/xtensor
@@ -1 +1 @@
-Subproject commit 545dd7aa78104052bb8e45c3175efff313d477dc
+Subproject commit e534928cc30eb3a4a05539747d98e1d6868c2d62
diff --git a/tests/expected/evalp/CRPS_FROM_BS.csv b/tests/expected/evalp/CRPS_FROM_BS.csv
new file mode 100644
index 0000000000000000000000000000000000000000..7a155403cfd93a41adafa598edbc99227973ad0a
--- /dev/null
+++ b/tests/expected/evalp/CRPS_FROM_BS.csv
@@ -0,0 +1 @@
+226.5713674310274
diff --git a/tests/expected/evalp/CRPS_FROM_ECDF.csv b/tests/expected/evalp/CRPS_FROM_ECDF.csv
new file mode 100644
index 0000000000000000000000000000000000000000..13832cc05b6185a94d67fc615af231bc718fe8e1
--- /dev/null
+++ b/tests/expected/evalp/CRPS_FROM_ECDF.csv
@@ -0,0 +1 @@
+271.9578705197483
diff --git a/tests/expected/evalp/CRPS.csv b/tests/expected/evalp/CRPS_FROM_QS.csv
similarity index 94%
rename from tests/expected/evalp/CRPS.csv
rename to tests/expected/evalp/CRPS_FROM_QS.csv
index c449874f5f2b510060ab8d33d4f09642f0f2221a..87b0d67fdfd3eb30ded9c9a1408f6bd762a9e349 100644
--- a/tests/expected/evalp/CRPS.csv
+++ b/tests/expected/evalp/CRPS_FROM_QS.csv
@@ -1 +1 @@
-252.9569186533230
+252.9569186533230
diff --git a/tests/test_probabilist.py b/tests/test_probabilist.py
index 04d26c682dd2a0c466fb791b99bb35cae7b9f168..72502eb5b6a6b463ca15f087a346c516df0c3676 100644
--- a/tests/test_probabilist.py
+++ b/tests/test_probabilist.py
@@ -14,9 +14,11 @@ _obs = numpy.genfromtxt("./data/q_obs.csv", delimiter=',')[numpy.newaxis, :]
 # list all available probabilistic metrics
 _all_metrics = (
     # threshold-based
-    'BS', 'BSS', 'BS_CRD', 'BS_LBD', 'REL_DIAG',
+    'BS', 'BSS', 'BS_CRD', 'BS_LBD', 'REL_DIAG', 'CRPS_FROM_BS',
+    # CDF-based
+    'CRPS_FROM_ECDF',
     # quantile-based
-    'QS', 'CRPS',
+    'QS', 'CRPS_FROM_QS',
     # contingency table-based
     'POD', 'POFD', 'FAR', 'CSI', 'ROCSS',
     # ranks-based
@@ -37,7 +39,7 @@ class TestMetrics(unittest.TestCase):
         metric: (
             numpy.genfromtxt(f"./expected/evalp/{metric}.csv", delimiter=',')
             [numpy.newaxis, numpy.newaxis, numpy.newaxis, numpy.newaxis, ...]
-        ) for metric in ('BS', 'BSS', 'BS_CRD', 'BS_LBD', 'REL_DIAG')
+        ) for metric in ('BS', 'BSS', 'BS_CRD', 'BS_LBD', 'REL_DIAG', 'CRPS_FROM_BS')
     }
     # /!\ stacked-up thresholds in CSV file for REL_DIAG
     #     because 7D metric so need to reshape array
@@ -46,11 +48,18 @@ class TestMetrics(unittest.TestCase):
                                          + (_prd.shape[2] + 1, 3))
     )
 
+    expected_cdf = {
+        metric: (
+            numpy.genfromtxt(f"./expected/evalp/{metric}.csv", delimiter=',')
+            [numpy.newaxis, numpy.newaxis, numpy.newaxis, numpy.newaxis, ...]
+        ) for metric in ('CRPS_FROM_ECDF',)
+    }
+
     expected_qtl = {
         metric: (
             numpy.genfromtxt(f"./expected/evalp/{metric}.csv", delimiter=',')
             [numpy.newaxis, numpy.newaxis, numpy.newaxis, numpy.newaxis, ...]
-        ) for metric in ('QS', 'CRPS')
+        ) for metric in ('QS', 'CRPS_FROM_QS')
     }
 
     expected_ct = {
@@ -83,6 +92,14 @@ class TestMetrics(unittest.TestCase):
                     self.expected_thr[metric]
                 )
 
+    def test_cdf_metrics(self):
+        for metric in self.expected_cdf.keys():
+            with self.subTest(metric=metric):
+                numpy.testing.assert_almost_equal(
+                    evalhyd.evalp(_obs, _prd, [metric])[0],
+                    self.expected_cdf[metric]
+                )
+
     def test_quantiles_metrics(self):
         for metric in self.expected_qtl.keys():
             with self.subTest(metric=metric):