diff --git a/tests/test_determinist.cpp b/tests/test_determinist.cpp
index b7ee38e3364101e29d78b27393ac4632089ce7f4..be2820410e29b35935a511d5b37f305337ffa6d8 100644
--- a/tests/test_determinist.cpp
+++ b/tests/test_determinist.cpp
@@ -6,6 +6,7 @@
 #include <vector>
 #include <tuple>
 #include <array>
+
 #include <gtest/gtest.h>
 
 #include <xtensor/xtensor.hpp>
@@ -107,7 +108,7 @@ TEST(DeterministTests, TestTransform)
              {{0.883019}},
              {{0.883029}},
              {{0.882972}}};
-    EXPECT_TRUE(xt::allclose(metrics[0], nse_sqrt));
+    EXPECT_TRUE(xt::all(xt::isclose(metrics[0], nse_sqrt)));
 
     // compute and check results on inverted streamflow series
     metrics = evalhyd::evald(observed, predicted, {"NSE"}, "inv");
@@ -118,7 +119,7 @@ TEST(DeterministTests, TestTransform)
              {{0.737429}},
              {{0.737546}},
              {{0.737595}}};
-    EXPECT_TRUE(xt::allclose(metrics[0], nse_inv));
+    EXPECT_TRUE(xt::all(xt::isclose(metrics[0], nse_inv)));
 
     // compute and check results on square-rooted streamflow series
     metrics = evalhyd::evald(observed, predicted, {"NSE"}, "log");
@@ -129,7 +130,7 @@ TEST(DeterministTests, TestTransform)
              {{0.893585}},
              {{0.893758}},
              {{0.893793}}};
-    EXPECT_TRUE(xt::allclose(metrics[0], nse_log));
+    EXPECT_TRUE(xt::all(xt::isclose(metrics[0], nse_log)));
 
     // compute and check results on power-transformed streamflow series
     metrics = evalhyd::evald(observed, predicted, {"NSE"}, "pow", 0.2);
@@ -140,7 +141,7 @@ TEST(DeterministTests, TestTransform)
              {{0.899451}},
              {{0.899578}},
              {{0.899588}}};
-    EXPECT_TRUE(xt::allclose(metrics[0], nse_pow));
+    EXPECT_TRUE(xt::all(xt::isclose(metrics[0], nse_pow)));
 
 }
 
@@ -172,7 +173,7 @@ TEST(DeterministTests, TestMasks)
     // check results are identical
     for (std::size_t m = 0; m < all_metrics_d.size(); m++)
     {
-        EXPECT_TRUE(xt::allclose(metrics_masked[m], metrics_subset[m]))
+        EXPECT_TRUE(xt::all(xt::isclose(metrics_masked[m], metrics_subset[m])))
         << "Failure for (" << all_metrics_d[m] << ")";
     }
 }
@@ -212,9 +213,9 @@ TEST(DeterministTests, TestMaskingConditions)
     for (std::size_t m = 0; m < all_metrics_d.size(); m++)
     {
         EXPECT_TRUE(
-                xt::allclose(
+                xt::all(xt::isclose(
                         metrics_q_conditioned[m], metrics_q_preconditioned[m]
-                )
+                ))
         ) << "Failure for (" << all_metrics_d[m] << ")";
     }
 
@@ -245,9 +246,9 @@ TEST(DeterministTests, TestMaskingConditions)
     for (std::size_t m = 0; m < all_metrics_d.size(); m++)
     {
         EXPECT_TRUE(
-                xt::allclose(
+                xt::all(xt::isclose(
                         metrics_q_conditioned_[m], metrics_q_preconditioned_[m]
-                )
+                ))
         ) << "Failure for (" << all_metrics_d[m] << ")";
     }
 
@@ -276,9 +277,9 @@ TEST(DeterministTests, TestMaskingConditions)
     for (std::size_t m = 0; m < all_metrics_d.size(); m++)
     {
         EXPECT_TRUE(
-                xt::allclose(
+                xt::all(xt::isclose(
                         metrics_t_conditioned[m], metrics_t_subset[m]
-                )
+                ))
         ) << "Failure for (" << all_metrics_d[m] << ")";
     }
 }
@@ -323,10 +324,10 @@ TEST(DeterministTests, TestMissingData)
 
             // compare to check results are the same
             EXPECT_TRUE(
-                    xt::allclose(
+                    xt::all(xt::isclose(
                             xt::view(metrics_nan[m], p),
                             metrics_sbs[0]
-                    )
+                    ))
             ) << "Failure for (" << all_metrics_d[m] << ")";
         }
     }
@@ -390,9 +391,9 @@ TEST(DeterministTests, TestBootstrap)
     for (std::size_t m = 0; m < all_metrics_d.size(); m++)
     {
         EXPECT_TRUE(
-                xt::allclose(
+                xt::all(xt::isclose(
                         metrics_bts[m], metrics_rep[m]
-                )
+                ))
         ) << "Failure for (" << all_metrics_d[m] << ")";
     }
 }
diff --git a/tests/test_probabilist.cpp b/tests/test_probabilist.cpp
index f8a1100c263dc7b7d817d023fc18e7b04e384691..816f1bd0a34fea8d630c7f1cb67cf11627dbe7f5 100644
--- a/tests/test_probabilist.cpp
+++ b/tests/test_probabilist.cpp
@@ -73,16 +73,14 @@ TEST(ProbabilistTests, TestBrier)
     xt::xtensor<double, 5> bs =
             {{{{{0.10615136, 0.07395622, 0.08669186, NAN}}}}};
     EXPECT_TRUE(
-            xt::sum(xt::isclose(metrics[0], bs, 1e-05, 1e-08, true))
-            == xt::xscalar<double>(4)
+            xt::all(xt::isclose(metrics[0], bs, 1e-05, 1e-08, true))
     );
 
     // Brier skill scores
     xt::xtensor<double, 5> bss =
             {{{{{0.5705594, 0.6661165, 0.5635126, NAN}}}}};
     EXPECT_TRUE(
-            xt::sum(xt::isclose(metrics[1], bss, 1e-05, 1e-08, true))
-            == xt::xscalar<double>(4)
+            xt::all(xt::isclose(metrics[1], bss, 1e-05, 1e-08, true))
     );
 
     // Brier calibration-refinement decompositions
@@ -92,8 +90,7 @@ TEST(ProbabilistTests, TestBrier)
                  {0.010139431, 0.1220601, 0.1986125},
                  {NAN, NAN, NAN}}}}}};
     EXPECT_TRUE(
-            xt::sum(xt::isclose(metrics[2], bs_crd, 1e-05, 1e-08, true))
-            == xt::xscalar<double>(12)
+            xt::all(xt::isclose(metrics[2], bs_crd, 1e-05, 1e-08, true))
     );
 
     // Brier likelihood-base rate decompositions
@@ -103,8 +100,7 @@ TEST(ProbabilistTests, TestBrier)
                  {0.017191279, 0.1048221, 0.1743227},
                  {NAN, NAN, NAN}}}}}};
     EXPECT_TRUE(
-            xt::sum(xt::isclose(metrics[3], bs_lbd, 1e-05, 1e-08, true))
-            == xt::xscalar<double>(12)
+            xt::all(xt::isclose(metrics[3], bs_lbd, 1e-05, 1e-08, true))
     );
 }
 
@@ -139,12 +135,12 @@ TEST(ProbabilistTests, TestQuantiles)
                  205.189587, 200.395746, 195.2372, 190.080139, 185.384244,
                  180.617858, 174.58323, 169.154093, 163.110932, 156.274796,
                  147.575315}}}}};
-    EXPECT_TRUE(xt::allclose(metrics[0], qs));
+    EXPECT_TRUE(xt::all(xt::isclose(metrics[0], qs)));
 
     // Continuous ranked probability scores
     xt::xtensor<double, 4> crps =
             {{{{252.956919}}}};
-    EXPECT_TRUE(xt::allclose(metrics[1], crps));
+    EXPECT_TRUE(xt::all(xt::isclose(metrics[1], crps)));
 }
 
 TEST(ProbabilistTests, TestContingency)
@@ -224,8 +220,7 @@ TEST(ProbabilistTests, TestContingency)
                  { 0.848921,  0.854369,  0.752941, NAN},
                  { 0.848921,  0.84466 ,  0.752941, NAN}}}}}};
     EXPECT_TRUE(
-            xt::sum(xt::isclose(metrics[0], pod, 1e-05, 1e-08, true))
-            == xt::xscalar<double>(208)
+            xt::all(xt::isclose(metrics[0], pod, 1e-05, 1e-08, true))
     );
 
     // POFD
@@ -283,8 +278,7 @@ TEST(ProbabilistTests, TestContingency)
                  { 0.081395,  0.038462,  0.026549, NAN},
                  { 0.081395,  0.038462,  0.022124, NAN}}}}}};
     EXPECT_TRUE(
-            xt::sum(xt::isclose(metrics[1], pofd, 1e-04, 1e-07, true))
-            == xt::xscalar<double>(208)
+            xt::all(xt::isclose(metrics[1], pofd, 1e-04, 1e-07, true))
     );
 
     // FAR
@@ -342,8 +336,7 @@ TEST(ProbabilistTests, TestContingency)
                  { 0.106061,  0.083333,  0.085714, NAN},
                  { 0.106061,  0.084211,  0.072464, NAN}}}}}};
     EXPECT_TRUE(
-            xt::sum(xt::isclose(metrics[2], far, 1e-05, 1e-08, true))
-            == xt::xscalar<double>(208)
+            xt::all(xt::isclose(metrics[2], far, 1e-05, 1e-08, true))
     );
 
     // CSI
@@ -402,16 +395,14 @@ TEST(ProbabilistTests, TestContingency)
                  { 0.771242,  0.783784,  0.711111, NAN}}}}}}
     ;
     EXPECT_TRUE(
-            xt::sum(xt::isclose(metrics[3], csi, 1e-05, 1e-08, true))
-            == xt::xscalar<double>(208)
+            xt::all(xt::isclose(metrics[3], csi, 1e-05, 1e-08, true))
     );
 
     // ROC skill scores
     xt::xtensor<double, 5> rocss =
             {{{{{ 0.71085 ,  0.783047,  0.713066, NAN}}}}};
     EXPECT_TRUE(
-            xt::sum(xt::isclose(metrics[4], rocss, 1e-05, 1e-08, true))
-            == xt::xscalar<double>(4)
+            xt::all(xt::isclose(metrics[4], rocss, 1e-05, 1e-08, true))
     );
 }
 
@@ -616,10 +607,9 @@ TEST(ProbabilistTests, TestMaskingConditions)
     for (std::size_t m = 0; m < all_metrics_p.size(); m++)
     {
         EXPECT_TRUE(
-                xt::sum(xt::isclose(metrics_q_conditioned[m],
+                xt::all(xt::isclose(metrics_q_conditioned[m],
                                     metrics_q_preconditioned[m],
-                                    1e-05, 1e-08, true))
-                == xt::xscalar<double>(metrics_q_conditioned[m].size())
+                                    1e-05, 1e-06, true))
         ) << "Failure for (" << all_metrics_p[m] << ")";
     }
 
@@ -658,10 +648,9 @@ TEST(ProbabilistTests, TestMaskingConditions)
     for (std::size_t m = 0; m < all_metrics_p.size(); m++)
     {
         EXPECT_TRUE(
-                xt::sum(xt::isclose(metrics_q_conditioned_[m],
+                xt::all(xt::isclose(metrics_q_conditioned_[m],
                                     metrics_q_preconditioned_[m],
-                                    1e-05, 1e-08, true))
-                == xt::xscalar<double>(metrics_q_conditioned_[m].size())
+                                    1e-05, 1e-06, true))
         ) << "Failure for (" << all_metrics_p[m] << ")";
     }
 
@@ -697,10 +686,9 @@ TEST(ProbabilistTests, TestMaskingConditions)
     for (std::size_t m = 0; m < all_metrics_p.size(); m++)
     {
         EXPECT_TRUE(
-                xt::sum(xt::isclose(metrics_t_conditioned[m],
+                xt::all(xt::isclose(metrics_t_conditioned[m],
                                     metrics_t_subset[m],
-                                    1e-05, 1e-08, true))
-                == xt::xscalar<double>(metrics_t_conditioned[m].size())
+                                    1e-05, 1e-06, true))
         ) << "Failure for (" << all_metrics_p[m] << ")";
     }
 }
@@ -778,18 +766,16 @@ TEST(ProbabilistTests, TestMissingData)
     {
         // for leadtime 1
         EXPECT_TRUE(
-                xt::sum(xt::isclose(xt::view(metrics_nan[m], xt::all(), 0),
+                xt::all(xt::isclose(xt::view(metrics_nan[m], xt::all(), 0),
                                     xt::view(metrics_pp1[m], xt::all(), 0),
                                     1e-05, 1e-08, true))
-                == xt::xscalar<double>(metrics_pp1[m].size())
         ) << "Failure for (" << all_metrics_p[m] << ", " << "leadtime 1)";
         
         // for leadtime 2
         EXPECT_TRUE(
-                xt::sum(xt::isclose(xt::view(metrics_nan[m], xt::all(), 1),
+                xt::all(xt::isclose(xt::view(metrics_nan[m], xt::all(), 1),
                                     xt::view(metrics_pp2[m], xt::all(), 0),
                                     1e-05, 1e-08, true))
-                == xt::xscalar<double>(metrics_pp2[m].size())
         ) << "Failure for (" << all_metrics_p[m] << ", " << "leadtime 2)";
     }
 }
@@ -870,9 +856,9 @@ TEST(ProbabilistTests, TestBootstrap)
         // ---------------------------------------------------------------------
 
         EXPECT_TRUE(
-                xt::allclose(
+                xt::all(xt::isclose(
                         metrics_bts[m], metrics_rep[m]
-                )
+                ))
         ) << "Failure for (" << all_metrics_p[m] << ")";
     }
 }
\ No newline at end of file