diff --git a/evalhyd/evald.py b/evalhyd/evald.py
index 6abd841cbd335bf587ae18e3d8207bbb0f78118f..05ba76ec8f6582b735dcd811b045765b6138e10e 100644
--- a/evalhyd/evald.py
+++ b/evalhyd/evald.py
@@ -1,6 +1,6 @@
 from typing import List, Dict
 from numpy import dtype
-from numpy.typing import NDArray, ArrayLike
+from numpy.typing import NDArray
 
 try:
     from ._evalhyd import _evald
@@ -15,9 +15,9 @@ def evald(q_obs: NDArray[dtype('float64')],
           exponent: float = None,
           epsilon: float = None,
           t_msk: NDArray[dtype('bool')] = None,
-          m_cdt: ArrayLike = None,
+          m_cdt: NDArray[dtype('|S32')] = None,
           bootstrap: Dict[str, int] = None,
-          dts: ArrayLike = None,
+          dts: NDArray[dtype('|S32')] = None,
           seed: int = None) -> List[NDArray[dtype('float64')]]:
     """Function to evaluate deterministic streamflow predictions"""
 
@@ -47,4 +47,22 @@ def evald(q_obs: NDArray[dtype('float64')],
     if seed is not None:
         kwargs['seed'] = seed
 
+    # check array ranks
+    _expected = {
+        'q_obs': 2,
+        'q_prd': 2,
+        't_msk': 3,
+        'm_cdt': 2,
+        'dts': 1
+    }
+
+    for arg, val in _expected.items():
+        try:
+            if kwargs[arg].ndim != val:
+                raise RuntimeError(
+                    f"'{arg}' must feature {val} {'axis' if val == 1 else 'axes'}"
+                )
+        except KeyError:
+            pass
+
     return _evald(**kwargs)
diff --git a/evalhyd/evalp.py b/evalhyd/evalp.py
index a50b365fbc637c0e2f488cb0241ef8187fee52be..17e2c5955deafa75e4a85409db7a95655e688145 100644
--- a/evalhyd/evalp.py
+++ b/evalhyd/evalp.py
@@ -1,6 +1,6 @@
 from typing import List, Dict
 from numpy import dtype
-from numpy.typing import NDArray, ArrayLike
+from numpy.typing import NDArray
 
 try:
     from ._evalhyd import _evalp
@@ -15,9 +15,9 @@ def evalp(q_obs: NDArray[dtype('float64')],
           events: str = None,
           c_lvl: NDArray[dtype('float64')] = None,
           t_msk: NDArray[dtype('bool')] = None,
-          m_cdt: ArrayLike = None,
+          m_cdt: NDArray[dtype('|S32')] = None,
           bootstrap: Dict[str, int] = None,
-          dts: ArrayLike = None,
+          dts: NDArray[dtype('|S32')] = None,
           seed: int = None) -> List[NDArray[dtype('float64')]]:
     """Function to evaluate probabilistic streamflow predictions"""
 
@@ -46,4 +46,24 @@ def evalp(q_obs: NDArray[dtype('float64')],
     if seed is not None:
         kwargs['seed'] = seed
 
+    # check array ranks
+    _expected = {
+        'q_obs': 2,
+        'q_prd': 4,
+        'q_thr': 2,
+        'c_lvl': 1,
+        't_msk': 4,
+        'm_cdt': 2,
+        'dts': 1
+    }
+
+    for arg, val in _expected.items():
+        try:
+            if kwargs[arg].ndim != val:
+                raise RuntimeError(
+                    f"'{arg}' must feature {val} {'axis' if val == 1 else 'axes'}"
+                )
+        except KeyError:
+            pass
+
     return _evalp(**kwargs)
diff --git a/tests/test_probabilist.py b/tests/test_probabilist.py
index 7a2c35ee82ee34d279df50a7820ff64bcd5c82c0..7f74f1f550172441ac6001382c75387afc55b3e8 100644
--- a/tests/test_probabilist.py
+++ b/tests/test_probabilist.py
@@ -131,10 +131,11 @@ class TestMetrics(unittest.TestCase):
                 )
 
     def test_intervals_metrics(self):
+        lvl = numpy.array([30., 80.])
         for metric in self.expected_itv.keys():
             with self.subTest(metric=metric):
                 numpy.testing.assert_almost_equal(
-                    evalhyd.evalp(_obs, _prd, [metric], c_lvl=[30., 80.])[0],
+                    evalhyd.evalp(_obs, _prd, [metric], c_lvl=lvl)[0],
                     self.expected_itv[metric]
                 )
 
@@ -176,7 +177,7 @@ class TestMasking(unittest.TestCase):
 
     def test_conditions(self):
         with self.subTest(conditions="observed streamflow values"):
-            cdt = numpy.array([["q_obs{<2000,>3000}"]], dtype='|S32')
+            cdt = numpy.array([["q_obs{<2000,>3000}"]])
 
             msk = (_obs[0] < 2000) | (_obs[0] > 3000)
 
@@ -215,28 +216,29 @@ class TestMissingData(unittest.TestCase):
                 continue
 
             with self.subTest(metric=metric):
+                lvl = numpy.array([30., 80.])
                 numpy.testing.assert_almost_equal(
                     # missing data flagged as NaN
                     evalhyd.evalp(
-                        [[4.7, numpy.nan, 5.5, 2.7, 4.1]],
-                        [[[[5.3, 4.2, 5.7, 2.3, numpy.nan],
-                           [4.3, 4.2, 4.7, 4.3, numpy.nan],
-                           [5.3, 5.2, 5.7, 2.3, numpy.nan]]]],
+                        numpy.array([[4.7, numpy.nan, 5.5, 2.7, 4.1]]),
+                        numpy.array([[[[5.3, 4.2, 5.7, 2.3, numpy.nan],
+                                       [4.3, 4.2, 4.7, 4.3, numpy.nan],
+                                       [5.3, 5.2, 5.7, 2.3, numpy.nan]]]]),
                         [metric],
                         thr,
                         "high",
-                        [30., 80.]
+                        lvl
                     )[0],
                     # missing data pairwise deleted from series
                     evalhyd.evalp(
-                        [[4.7, 5.5, 2.7]],
-                        [[[[5.3, 5.7, 2.3],
-                           [4.3, 4.7, 4.3],
-                           [5.3, 5.7, 2.3]]]],
+                        numpy.array([[4.7, 5.5, 2.7]]),
+                        numpy.array([[[[5.3, 5.7, 2.3],
+                                       [4.3, 4.7, 4.3],
+                                       [5.3, 5.7, 2.3]]]]),
                         [metric],
                         thr,
                         "high",
-                        [30., 80.]
+                        lvl
                     )[0]
                 )
 
@@ -265,6 +267,7 @@ class TestUncertainty(unittest.TestCase):
                 continue
 
             with self.subTest(metric=metric):
+                lvl = numpy.array([30., 80.])
                 numpy.testing.assert_almost_equal(
                     # bootstrap with only one year of data
                     # (compare last sample only to have matching dimensions)
@@ -278,7 +281,7 @@ class TestUncertainty(unittest.TestCase):
                             "n_samples": 10, "len_sample": 3, "summary": 0
                         },
                         dts=dts_1yr,
-                        c_lvl=[30., 80.]
+                        c_lvl=lvl
                     )[0][:, :, :, [0]],
                     # repeat year of data three times to correspond to a
                     # bootstrap sample of length 3
@@ -288,7 +291,7 @@ class TestUncertainty(unittest.TestCase):
                         [metric],
                         q_thr=thr,
                         events="high",
-                        c_lvl=[30., 80.]
+                        c_lvl=lvl
                     )[0]
                 )