Commit 448a8d8d authored by Thibault Hallouin's avatar Thibault Hallouin
Browse files

simplify conditions in unittests

replace clumsy xt::sum ... == xt::scalar, by xt::all
1 merge request!3release v0.1.0
Pipeline #43648 passed with stage
in 2 minutes and 38 seconds
Showing with 37 additions and 50 deletions
+37 -50
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <vector> #include <vector>
#include <tuple> #include <tuple>
#include <array> #include <array>
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <xtensor/xtensor.hpp> #include <xtensor/xtensor.hpp>
...@@ -107,7 +108,7 @@ TEST(DeterministTests, TestTransform) ...@@ -107,7 +108,7 @@ TEST(DeterministTests, TestTransform)
{{0.883019}}, {{0.883019}},
{{0.883029}}, {{0.883029}},
{{0.882972}}}; {{0.882972}}};
EXPECT_TRUE(xt::allclose(metrics[0], nse_sqrt)); EXPECT_TRUE(xt::all(xt::isclose(metrics[0], nse_sqrt)));
// compute and check results on inverted streamflow series // compute and check results on inverted streamflow series
metrics = evalhyd::evald(observed, predicted, {"NSE"}, "inv"); metrics = evalhyd::evald(observed, predicted, {"NSE"}, "inv");
...@@ -118,7 +119,7 @@ TEST(DeterministTests, TestTransform) ...@@ -118,7 +119,7 @@ TEST(DeterministTests, TestTransform)
{{0.737429}}, {{0.737429}},
{{0.737546}}, {{0.737546}},
{{0.737595}}}; {{0.737595}}};
EXPECT_TRUE(xt::allclose(metrics[0], nse_inv)); EXPECT_TRUE(xt::all(xt::isclose(metrics[0], nse_inv)));
// compute and check results on square-rooted streamflow series // compute and check results on square-rooted streamflow series
metrics = evalhyd::evald(observed, predicted, {"NSE"}, "log"); metrics = evalhyd::evald(observed, predicted, {"NSE"}, "log");
...@@ -129,7 +130,7 @@ TEST(DeterministTests, TestTransform) ...@@ -129,7 +130,7 @@ TEST(DeterministTests, TestTransform)
{{0.893585}}, {{0.893585}},
{{0.893758}}, {{0.893758}},
{{0.893793}}}; {{0.893793}}};
EXPECT_TRUE(xt::allclose(metrics[0], nse_log)); EXPECT_TRUE(xt::all(xt::isclose(metrics[0], nse_log)));
// compute and check results on power-transformed streamflow series // compute and check results on power-transformed streamflow series
metrics = evalhyd::evald(observed, predicted, {"NSE"}, "pow", 0.2); metrics = evalhyd::evald(observed, predicted, {"NSE"}, "pow", 0.2);
...@@ -140,7 +141,7 @@ TEST(DeterministTests, TestTransform) ...@@ -140,7 +141,7 @@ TEST(DeterministTests, TestTransform)
{{0.899451}}, {{0.899451}},
{{0.899578}}, {{0.899578}},
{{0.899588}}}; {{0.899588}}};
EXPECT_TRUE(xt::allclose(metrics[0], nse_pow)); EXPECT_TRUE(xt::all(xt::isclose(metrics[0], nse_pow)));
} }
...@@ -172,7 +173,7 @@ TEST(DeterministTests, TestMasks) ...@@ -172,7 +173,7 @@ TEST(DeterministTests, TestMasks)
// check results are identical // check results are identical
for (std::size_t m = 0; m < all_metrics_d.size(); m++) for (std::size_t m = 0; m < all_metrics_d.size(); m++)
{ {
EXPECT_TRUE(xt::allclose(metrics_masked[m], metrics_subset[m])) EXPECT_TRUE(xt::all(xt::isclose(metrics_masked[m], metrics_subset[m])))
<< "Failure for (" << all_metrics_d[m] << ")"; << "Failure for (" << all_metrics_d[m] << ")";
} }
} }
...@@ -212,9 +213,9 @@ TEST(DeterministTests, TestMaskingConditions) ...@@ -212,9 +213,9 @@ TEST(DeterministTests, TestMaskingConditions)
for (std::size_t m = 0; m < all_metrics_d.size(); m++) for (std::size_t m = 0; m < all_metrics_d.size(); m++)
{ {
EXPECT_TRUE( EXPECT_TRUE(
xt::allclose( xt::all(xt::isclose(
metrics_q_conditioned[m], metrics_q_preconditioned[m] metrics_q_conditioned[m], metrics_q_preconditioned[m]
) ))
) << "Failure for (" << all_metrics_d[m] << ")"; ) << "Failure for (" << all_metrics_d[m] << ")";
} }
...@@ -245,9 +246,9 @@ TEST(DeterministTests, TestMaskingConditions) ...@@ -245,9 +246,9 @@ TEST(DeterministTests, TestMaskingConditions)
for (std::size_t m = 0; m < all_metrics_d.size(); m++) for (std::size_t m = 0; m < all_metrics_d.size(); m++)
{ {
EXPECT_TRUE( EXPECT_TRUE(
xt::allclose( xt::all(xt::isclose(
metrics_q_conditioned_[m], metrics_q_preconditioned_[m] metrics_q_conditioned_[m], metrics_q_preconditioned_[m]
) ))
) << "Failure for (" << all_metrics_d[m] << ")"; ) << "Failure for (" << all_metrics_d[m] << ")";
} }
...@@ -276,9 +277,9 @@ TEST(DeterministTests, TestMaskingConditions) ...@@ -276,9 +277,9 @@ TEST(DeterministTests, TestMaskingConditions)
for (std::size_t m = 0; m < all_metrics_d.size(); m++) for (std::size_t m = 0; m < all_metrics_d.size(); m++)
{ {
EXPECT_TRUE( EXPECT_TRUE(
xt::allclose( xt::all(xt::isclose(
metrics_t_conditioned[m], metrics_t_subset[m] metrics_t_conditioned[m], metrics_t_subset[m]
) ))
) << "Failure for (" << all_metrics_d[m] << ")"; ) << "Failure for (" << all_metrics_d[m] << ")";
} }
} }
...@@ -323,10 +324,10 @@ TEST(DeterministTests, TestMissingData) ...@@ -323,10 +324,10 @@ TEST(DeterministTests, TestMissingData)
// compare to check results are the same // compare to check results are the same
EXPECT_TRUE( EXPECT_TRUE(
xt::allclose( xt::all(xt::isclose(
xt::view(metrics_nan[m], p), xt::view(metrics_nan[m], p),
metrics_sbs[0] metrics_sbs[0]
) ))
) << "Failure for (" << all_metrics_d[m] << ")"; ) << "Failure for (" << all_metrics_d[m] << ")";
} }
} }
...@@ -390,9 +391,9 @@ TEST(DeterministTests, TestBootstrap) ...@@ -390,9 +391,9 @@ TEST(DeterministTests, TestBootstrap)
for (std::size_t m = 0; m < all_metrics_d.size(); m++) for (std::size_t m = 0; m < all_metrics_d.size(); m++)
{ {
EXPECT_TRUE( EXPECT_TRUE(
xt::allclose( xt::all(xt::isclose(
metrics_bts[m], metrics_rep[m] metrics_bts[m], metrics_rep[m]
) ))
) << "Failure for (" << all_metrics_d[m] << ")"; ) << "Failure for (" << all_metrics_d[m] << ")";
} }
} }
...@@ -73,16 +73,14 @@ TEST(ProbabilistTests, TestBrier) ...@@ -73,16 +73,14 @@ TEST(ProbabilistTests, TestBrier)
xt::xtensor<double, 5> bs = xt::xtensor<double, 5> bs =
{{{{{0.10615136, 0.07395622, 0.08669186, NAN}}}}}; {{{{{0.10615136, 0.07395622, 0.08669186, NAN}}}}};
EXPECT_TRUE( EXPECT_TRUE(
xt::sum(xt::isclose(metrics[0], bs, 1e-05, 1e-08, true)) xt::all(xt::isclose(metrics[0], bs, 1e-05, 1e-08, true))
== xt::xscalar<double>(4)
); );
// Brier skill scores // Brier skill scores
xt::xtensor<double, 5> bss = xt::xtensor<double, 5> bss =
{{{{{0.5705594, 0.6661165, 0.5635126, NAN}}}}}; {{{{{0.5705594, 0.6661165, 0.5635126, NAN}}}}};
EXPECT_TRUE( EXPECT_TRUE(
xt::sum(xt::isclose(metrics[1], bss, 1e-05, 1e-08, true)) xt::all(xt::isclose(metrics[1], bss, 1e-05, 1e-08, true))
== xt::xscalar<double>(4)
); );
// Brier calibration-refinement decompositions // Brier calibration-refinement decompositions
...@@ -92,8 +90,7 @@ TEST(ProbabilistTests, TestBrier) ...@@ -92,8 +90,7 @@ TEST(ProbabilistTests, TestBrier)
{0.010139431, 0.1220601, 0.1986125}, {0.010139431, 0.1220601, 0.1986125},
{NAN, NAN, NAN}}}}}}; {NAN, NAN, NAN}}}}}};
EXPECT_TRUE( EXPECT_TRUE(
xt::sum(xt::isclose(metrics[2], bs_crd, 1e-05, 1e-08, true)) xt::all(xt::isclose(metrics[2], bs_crd, 1e-05, 1e-08, true))
== xt::xscalar<double>(12)
); );
// Brier likelihood-base rate decompositions // Brier likelihood-base rate decompositions
...@@ -103,8 +100,7 @@ TEST(ProbabilistTests, TestBrier) ...@@ -103,8 +100,7 @@ TEST(ProbabilistTests, TestBrier)
{0.017191279, 0.1048221, 0.1743227}, {0.017191279, 0.1048221, 0.1743227},
{NAN, NAN, NAN}}}}}}; {NAN, NAN, NAN}}}}}};
EXPECT_TRUE( EXPECT_TRUE(
xt::sum(xt::isclose(metrics[3], bs_lbd, 1e-05, 1e-08, true)) xt::all(xt::isclose(metrics[3], bs_lbd, 1e-05, 1e-08, true))
== xt::xscalar<double>(12)
); );
} }
...@@ -139,12 +135,12 @@ TEST(ProbabilistTests, TestQuantiles) ...@@ -139,12 +135,12 @@ TEST(ProbabilistTests, TestQuantiles)
205.189587, 200.395746, 195.2372, 190.080139, 185.384244, 205.189587, 200.395746, 195.2372, 190.080139, 185.384244,
180.617858, 174.58323, 169.154093, 163.110932, 156.274796, 180.617858, 174.58323, 169.154093, 163.110932, 156.274796,
147.575315}}}}}; 147.575315}}}}};
EXPECT_TRUE(xt::allclose(metrics[0], qs)); EXPECT_TRUE(xt::all(xt::isclose(metrics[0], qs)));
// Continuous ranked probability scores // Continuous ranked probability scores
xt::xtensor<double, 4> crps = xt::xtensor<double, 4> crps =
{{{{252.956919}}}}; {{{{252.956919}}}};
EXPECT_TRUE(xt::allclose(metrics[1], crps)); EXPECT_TRUE(xt::all(xt::isclose(metrics[1], crps)));
} }
TEST(ProbabilistTests, TestContingency) TEST(ProbabilistTests, TestContingency)
...@@ -224,8 +220,7 @@ TEST(ProbabilistTests, TestContingency) ...@@ -224,8 +220,7 @@ TEST(ProbabilistTests, TestContingency)
{ 0.848921, 0.854369, 0.752941, NAN}, { 0.848921, 0.854369, 0.752941, NAN},
{ 0.848921, 0.84466 , 0.752941, NAN}}}}}}; { 0.848921, 0.84466 , 0.752941, NAN}}}}}};
EXPECT_TRUE( EXPECT_TRUE(
xt::sum(xt::isclose(metrics[0], pod, 1e-05, 1e-08, true)) xt::all(xt::isclose(metrics[0], pod, 1e-05, 1e-08, true))
== xt::xscalar<double>(208)
); );
// POFD // POFD
...@@ -283,8 +278,7 @@ TEST(ProbabilistTests, TestContingency) ...@@ -283,8 +278,7 @@ TEST(ProbabilistTests, TestContingency)
{ 0.081395, 0.038462, 0.026549, NAN}, { 0.081395, 0.038462, 0.026549, NAN},
{ 0.081395, 0.038462, 0.022124, NAN}}}}}}; { 0.081395, 0.038462, 0.022124, NAN}}}}}};
EXPECT_TRUE( EXPECT_TRUE(
xt::sum(xt::isclose(metrics[1], pofd, 1e-04, 1e-07, true)) xt::all(xt::isclose(metrics[1], pofd, 1e-04, 1e-07, true))
== xt::xscalar<double>(208)
); );
// FAR // FAR
...@@ -342,8 +336,7 @@ TEST(ProbabilistTests, TestContingency) ...@@ -342,8 +336,7 @@ TEST(ProbabilistTests, TestContingency)
{ 0.106061, 0.083333, 0.085714, NAN}, { 0.106061, 0.083333, 0.085714, NAN},
{ 0.106061, 0.084211, 0.072464, NAN}}}}}}; { 0.106061, 0.084211, 0.072464, NAN}}}}}};
EXPECT_TRUE( EXPECT_TRUE(
xt::sum(xt::isclose(metrics[2], far, 1e-05, 1e-08, true)) xt::all(xt::isclose(metrics[2], far, 1e-05, 1e-08, true))
== xt::xscalar<double>(208)
); );
// CSI // CSI
...@@ -402,16 +395,14 @@ TEST(ProbabilistTests, TestContingency) ...@@ -402,16 +395,14 @@ TEST(ProbabilistTests, TestContingency)
{ 0.771242, 0.783784, 0.711111, NAN}}}}}} { 0.771242, 0.783784, 0.711111, NAN}}}}}}
; ;
EXPECT_TRUE( EXPECT_TRUE(
xt::sum(xt::isclose(metrics[3], csi, 1e-05, 1e-08, true)) xt::all(xt::isclose(metrics[3], csi, 1e-05, 1e-08, true))
== xt::xscalar<double>(208)
); );
// ROC skill scores // ROC skill scores
xt::xtensor<double, 5> rocss = xt::xtensor<double, 5> rocss =
{{{{{ 0.71085 , 0.783047, 0.713066, NAN}}}}}; {{{{{ 0.71085 , 0.783047, 0.713066, NAN}}}}};
EXPECT_TRUE( EXPECT_TRUE(
xt::sum(xt::isclose(metrics[4], rocss, 1e-05, 1e-08, true)) xt::all(xt::isclose(metrics[4], rocss, 1e-05, 1e-08, true))
== xt::xscalar<double>(4)
); );
} }
...@@ -616,10 +607,9 @@ TEST(ProbabilistTests, TestMaskingConditions) ...@@ -616,10 +607,9 @@ TEST(ProbabilistTests, TestMaskingConditions)
for (std::size_t m = 0; m < all_metrics_p.size(); m++) for (std::size_t m = 0; m < all_metrics_p.size(); m++)
{ {
EXPECT_TRUE( EXPECT_TRUE(
xt::sum(xt::isclose(metrics_q_conditioned[m], xt::all(xt::isclose(metrics_q_conditioned[m],
metrics_q_preconditioned[m], metrics_q_preconditioned[m],
1e-05, 1e-08, true)) 1e-05, 1e-06, true))
== xt::xscalar<double>(metrics_q_conditioned[m].size())
) << "Failure for (" << all_metrics_p[m] << ")"; ) << "Failure for (" << all_metrics_p[m] << ")";
} }
...@@ -658,10 +648,9 @@ TEST(ProbabilistTests, TestMaskingConditions) ...@@ -658,10 +648,9 @@ TEST(ProbabilistTests, TestMaskingConditions)
for (std::size_t m = 0; m < all_metrics_p.size(); m++) for (std::size_t m = 0; m < all_metrics_p.size(); m++)
{ {
EXPECT_TRUE( EXPECT_TRUE(
xt::sum(xt::isclose(metrics_q_conditioned_[m], xt::all(xt::isclose(metrics_q_conditioned_[m],
metrics_q_preconditioned_[m], metrics_q_preconditioned_[m],
1e-05, 1e-08, true)) 1e-05, 1e-06, true))
== xt::xscalar<double>(metrics_q_conditioned_[m].size())
) << "Failure for (" << all_metrics_p[m] << ")"; ) << "Failure for (" << all_metrics_p[m] << ")";
} }
...@@ -697,10 +686,9 @@ TEST(ProbabilistTests, TestMaskingConditions) ...@@ -697,10 +686,9 @@ TEST(ProbabilistTests, TestMaskingConditions)
for (std::size_t m = 0; m < all_metrics_p.size(); m++) for (std::size_t m = 0; m < all_metrics_p.size(); m++)
{ {
EXPECT_TRUE( EXPECT_TRUE(
xt::sum(xt::isclose(metrics_t_conditioned[m], xt::all(xt::isclose(metrics_t_conditioned[m],
metrics_t_subset[m], metrics_t_subset[m],
1e-05, 1e-08, true)) 1e-05, 1e-06, true))
== xt::xscalar<double>(metrics_t_conditioned[m].size())
) << "Failure for (" << all_metrics_p[m] << ")"; ) << "Failure for (" << all_metrics_p[m] << ")";
} }
} }
...@@ -778,18 +766,16 @@ TEST(ProbabilistTests, TestMissingData) ...@@ -778,18 +766,16 @@ TEST(ProbabilistTests, TestMissingData)
{ {
// for leadtime 1 // for leadtime 1
EXPECT_TRUE( EXPECT_TRUE(
xt::sum(xt::isclose(xt::view(metrics_nan[m], xt::all(), 0), xt::all(xt::isclose(xt::view(metrics_nan[m], xt::all(), 0),
xt::view(metrics_pp1[m], xt::all(), 0), xt::view(metrics_pp1[m], xt::all(), 0),
1e-05, 1e-08, true)) 1e-05, 1e-08, true))
== xt::xscalar<double>(metrics_pp1[m].size())
) << "Failure for (" << all_metrics_p[m] << ", " << "leadtime 1)"; ) << "Failure for (" << all_metrics_p[m] << ", " << "leadtime 1)";
// for leadtime 2 // for leadtime 2
EXPECT_TRUE( EXPECT_TRUE(
xt::sum(xt::isclose(xt::view(metrics_nan[m], xt::all(), 1), xt::all(xt::isclose(xt::view(metrics_nan[m], xt::all(), 1),
xt::view(metrics_pp2[m], xt::all(), 0), xt::view(metrics_pp2[m], xt::all(), 0),
1e-05, 1e-08, true)) 1e-05, 1e-08, true))
== xt::xscalar<double>(metrics_pp2[m].size())
) << "Failure for (" << all_metrics_p[m] << ", " << "leadtime 2)"; ) << "Failure for (" << all_metrics_p[m] << ", " << "leadtime 2)";
} }
} }
...@@ -870,9 +856,9 @@ TEST(ProbabilistTests, TestBootstrap) ...@@ -870,9 +856,9 @@ TEST(ProbabilistTests, TestBootstrap)
// --------------------------------------------------------------------- // ---------------------------------------------------------------------
EXPECT_TRUE( EXPECT_TRUE(
xt::allclose( xt::all(xt::isclose(
metrics_bts[m], metrics_rep[m] metrics_bts[m], metrics_rep[m]
) ))
) << "Failure for (" << all_metrics_p[m] << ")"; ) << "Failure for (" << all_metrics_p[m] << ")";
} }
} }
\ No newline at end of file
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment