diff --git a/evalhyd/evald.py b/evalhyd/evald.py index 0afbc776659385635f7553ce5d073e70e0ef7355..6abd841cbd335bf587ae18e3d8207bbb0f78118f 100644 --- a/evalhyd/evald.py +++ b/evalhyd/evald.py @@ -17,7 +17,8 @@ def evald(q_obs: NDArray[dtype('float64')], t_msk: NDArray[dtype('bool')] = None, m_cdt: ArrayLike = None, bootstrap: Dict[str, int] = None, - dts: ArrayLike = None) -> List[NDArray[dtype('float64')]]: + dts: ArrayLike = None, + seed: int = None) -> List[NDArray[dtype('float64')]]: """Function to evaluate deterministic streamflow predictions""" # required arguments @@ -43,5 +44,7 @@ def evald(q_obs: NDArray[dtype('float64')], kwargs['bootstrap'] = bootstrap if dts is not None: kwargs['dts'] = dts + if seed is not None: + kwargs['seed'] = seed return _evald(**kwargs) diff --git a/evalhyd/evalp.py b/evalhyd/evalp.py index 30c34ee823d635ab875cf8a2a04d0ca649376d5f..a50b365fbc637c0e2f488cb0241ef8187fee52be 100644 --- a/evalhyd/evalp.py +++ b/evalhyd/evalp.py @@ -12,10 +12,13 @@ def evalp(q_obs: NDArray[dtype('float64')], q_prd: NDArray[dtype('float64')], metrics: List[str], q_thr: NDArray[dtype('float64')] = None, + events: str = None, + c_lvl: NDArray[dtype('float64')] = None, t_msk: NDArray[dtype('bool')] = None, m_cdt: ArrayLike = None, bootstrap: Dict[str, int] = None, - dts: ArrayLike = None) -> List[NDArray[dtype('float64')]]: + dts: ArrayLike = None, + seed: int = None) -> List[NDArray[dtype('float64')]]: """Function to evaluate probabilistic streamflow predictions""" # required arguments @@ -28,6 +31,10 @@ def evalp(q_obs: NDArray[dtype('float64')], # optional arguments if q_thr is not None: kwargs['q_thr'] = q_thr + if events is not None: + kwargs['events'] = events + if c_lvl is not None: + kwargs['c_lvl'] = c_lvl if t_msk is not None: kwargs['t_msk'] = t_msk if m_cdt is not None: @@ -36,5 +43,7 @@ def evalp(q_obs: NDArray[dtype('float64')], kwargs['bootstrap'] = bootstrap if dts is not None: kwargs['dts'] = dts + if seed is not None: + kwargs['seed'] = seed return _evalp(**kwargs) diff --git a/evalhyd/src/evalhyd.cpp b/evalhyd/src/evalhyd.cpp index 868fd7f2c4e534a23ffe1c707b894da2fc3de824..720c682b3bf7a5f0c37ca30a7b1d73b40799651f 100644 --- a/evalhyd/src/evalhyd.cpp +++ b/evalhyd/src/evalhyd.cpp @@ -24,10 +24,11 @@ auto evald( std::optional<std::string> transform, std::optional<double> exponent, std::optional<double> epsilon, - const xt::pytensor<bool, 2>& t_msk, - const xt::pytensor<std::array<char, 32>, 1>& m_cdt, + const xt::pytensor<bool, 3>& t_msk, + const xt::pytensor<std::array<char, 32>, 2>& m_cdt, std::optional<std::unordered_map<std::string, int>> bootstrap, - const std::vector<std::string>& dts + const std::vector<std::string>& dts, + std::optional<int> seed ) { return evalhyd::evald( @@ -42,7 +43,8 @@ auto evald( (bootstrap.has_value()) ? bootstrap.value() : xtl::missing<std::unordered_map<std::string, int>>(), - dts + dts, + (seed.has_value()) ? seed.value() : xtl::missing<int>() ); } @@ -51,10 +53,13 @@ auto evalp( const xt::pytensor<double, 4>& q_prd, const std::vector<std::string>& metrics, const xt::pytensor<double, 2>& q_thr, + std::optional<std::string> events, + const std::vector<double>& c_lvl, const xt::pytensor<bool, 4>& t_msk, const xt::pytensor<std::array<char, 32>, 2>& m_cdt, std::optional<std::unordered_map<std::string, int>> bootstrap, - const std::vector<std::string>& dts + const std::vector<std::string>& dts, + std::optional<int> seed ) { return evalhyd::evalp( @@ -62,12 +67,15 @@ auto evalp( q_prd, metrics, q_thr, + (events.has_value()) ? events.value() : xtl::missing<std::string>(), + c_lvl, t_msk, m_cdt, (bootstrap.has_value()) ? bootstrap.value() : xtl::missing<std::unordered_map<std::string, int>>(), - dts + dts, + (seed.has_value()) ? seed.value() : xtl::missing<int>() ); } @@ -89,10 +97,11 @@ PYBIND11_MODULE(_evalhyd, m) py::arg("transform") = py::none(), py::arg("exponent") = py::none(), py::arg("epsilon") = py::none(), - py::arg("t_msk") = xt::pytensor<bool, 2>({0}), - py::arg("m_cdt") = xt::pytensor<std::array<char, 32>, 1>({}), + py::arg("t_msk") = xt::pytensor<bool, 3>({0}), + py::arg("m_cdt") = xt::pytensor<std::array<char, 32>, 2>({0}), py::arg("bootstrap") = py::none(), - py::arg("dts") = py::list() + py::arg("dts") = py::list(), + py::arg("seed") = py::none() ); // probabilistic evaluation @@ -104,10 +113,13 @@ PYBIND11_MODULE(_evalhyd, m) py::arg("q_prd"), py::arg("metrics"), py::arg("q_thr") = xt::pytensor<double, 2>({0}), + py::arg("events") = py::none(), + py::arg("c_lvl") = py::list(), py::arg("t_msk") = xt::pytensor<bool, 4>({0}), py::arg("m_cdt") = xt::pytensor<std::array<char, 32>, 2>({0}), py::arg("bootstrap") = py::none(), - py::arg("dts") = py::list() + py::arg("dts") = py::list(), + py::arg("seed") = py::none() ); #ifdef VERSION_INFO diff --git a/tests/test_determinist.py b/tests/test_determinist.py index 0c271a9c05b17acacbd78a714a7ee422251e10bb..0b4118fcaae1f70711f8d0f7989f63fc6efe9590 100644 --- a/tests/test_determinist.py +++ b/tests/test_determinist.py @@ -88,7 +88,8 @@ class TestTransform(unittest.TestCase): class TestMasking(unittest.TestCase): def test_masks(self): - msk = numpy.ones(_obs.shape, dtype=bool) + msk = numpy.ones(_prd.shape, dtype=bool) + msk = msk[:, numpy.newaxis, :] msk[..., :99] = False # TODO: figure out why passing views would not work @@ -102,7 +103,12 @@ class TestMasking(unittest.TestCase): def test_conditions(self): with self.subTest(conditions="observed streamflow values"): - cdt = numpy.array(["q_obs{<2000,>3000}"], dtype='|S32') + cdt = numpy.array([["q_obs{<2000,>3000}"], + ["q_obs{<2000,>3000}"], + ["q_obs{<2000,>3000}"], + ["q_obs{<2000,>3000}"], + ["q_obs{<2000,>3000}"]], + dtype='|S32') msk = (_obs[0] < 2000) | (_obs[0] > 3000) @@ -116,7 +122,12 @@ class TestMasking(unittest.TestCase): ) with self.subTest(conditions="observed streamflow statistics"): - cdt = numpy.array(["q_obs{>=median}"], dtype='|S32') + cdt = numpy.array([["q_obs{>=median}"], + ["q_obs{>=median}"], + ["q_obs{>=median}"], + ["q_obs{>=median}"], + ["q_obs{>=median}"]], + dtype='|S32') msk = _obs[0] >= numpy.median(_obs) diff --git a/tests/test_probabilist.py b/tests/test_probabilist.py index 092ea546f0faec3799faa0426a243574f89c68f2..306666c61962dd18dfa7aac37bc4011c481b661c 100644 --- a/tests/test_probabilist.py +++ b/tests/test_probabilist.py @@ -44,7 +44,7 @@ class TestMetrics(unittest.TestCase): for metric in self.expected_thr.keys(): with self.subTest(metric=metric): numpy.testing.assert_almost_equal( - evalhyd.evalp(_obs, _prd, [metric], thr)[0], + evalhyd.evalp(_obs, _prd, [metric], thr, "high")[0], self.expected_thr[metric] ) @@ -61,16 +61,16 @@ class TestDecomposition(unittest.TestCase): def test_brier_calibration_refinement(self): thr = numpy.array([[690, 534, 445]]) - bs, = evalhyd.evalp(_obs, _prd, ["BS"], thr) - bs_crd, = evalhyd.evalp(_obs, _prd, ["BS_CRD"], thr) + bs, = evalhyd.evalp(_obs, _prd, ["BS"], thr, "high") + bs_crd, = evalhyd.evalp(_obs, _prd, ["BS_CRD"], thr, "high") numpy.testing.assert_almost_equal( bs, bs_crd[..., 0] - bs_crd[..., 1] + bs_crd[..., 2] ) def test_brier_likelihood_base_rate(self): thr = numpy.array([[690, 534, 445]]) - bs, = evalhyd.evalp(_obs, _prd, ["BS"], thr) - bs_lbd, = evalhyd.evalp(_obs, _prd, ["BS_LBD"], thr) + bs, = evalhyd.evalp(_obs, _prd, ["BS"], thr, "high") + bs_lbd, = evalhyd.evalp(_obs, _prd, ["BS_LBD"], thr, "high") numpy.testing.assert_almost_equal( bs, bs_lbd[..., 0] - bs_lbd[..., 1] + bs_lbd[..., 2] ) @@ -79,7 +79,8 @@ class TestDecomposition(unittest.TestCase): class TestMasking(unittest.TestCase): def test_masks(self): - msk = numpy.ones((_prd.shape[0], _prd.shape[1], 1, _prd.shape[3]), dtype=bool) + msk = numpy.ones((_prd.shape[0], _prd.shape[1], 1, _prd.shape[3]), + dtype=bool) msk[..., :99] = False # TODO: figure out why passing views would not work @@ -136,7 +137,8 @@ class TestMissingData(unittest.TestCase): [4.3, 4.2, 4.7, 4.3, numpy.nan], [5.3, 5.2, 5.7, 2.3, numpy.nan]]]], [metric], - thr + thr, + "high" )[0], # missing data pairwise deleted from series evalhyd.evalp( @@ -145,7 +147,8 @@ class TestMissingData(unittest.TestCase): [4.3, 4.7, 4.3], [5.3, 5.7, 2.3]]]], [metric], - thr + thr, + "high" )[0] ) @@ -178,6 +181,7 @@ class TestUncertainty(unittest.TestCase): prd_1yr[numpy.newaxis, numpy.newaxis], [metric], q_thr=thr, + events="high", bootstrap={ "n_samples": 10, "len_sample": 3, "summary": 0 }, @@ -189,7 +193,8 @@ class TestUncertainty(unittest.TestCase): obs_3yrs[numpy.newaxis], prd_3yrs[numpy.newaxis, numpy.newaxis], [metric], - q_thr=thr + q_thr=thr, + events="high" )[0] )