Commit 99d76aab authored by Thibault Hallouin's avatar Thibault Hallouin
Browse files

release v0.1.0.0-rc.1

No related merge requests found
Pipeline #46404 passed with stage
in 4 minutes and 52 seconds
Showing with 1103 additions and 389 deletions
+1103 -389
image: python:3.9.13 image: mambaorg/micromamba
test-evalhyd-python: stages:
stage: test - build_and_test
build-and-test:
stage: build_and_test
variables: variables:
GIT_SUBMODULE_STRATEGY: recursive GIT_SUBMODULE_STRATEGY: recursive
script: script:
# install dependencies
- micromamba install --yes --file environment.yml
- export EVALHYD_PYTHON_VENDOR_XTL=FALSE
- export EVALHYD_PYTHON_VENDOR_XTENSOR=FALSE
- export EVALHYD_PYTHON_VENDOR_XTENSOR_PYTHON=FALSE
# vendor evalhyd (while waiting for evalhyd to be uploaded to conda-forge)
- micromamba install --yes -c conda-forge git
- export EVALHYD_PYTHON_VENDOR_EVALHYD=TRUE
# print Python version # print Python version
- python --version - python --version
# compile and install package # compile and install package
- python -m pip install --user .[tests] - python -m pip install --user .[tests] -v
# make sure package can be imported and check version # make sure package can be imported and check version
- python -c "import evalhyd;print(evalhyd.__version__)" - python -c "import evalhyd;print(evalhyd.__version__)"
# run test suite # run test suite
......
[submodule "deps/evalhyd"] [submodule "deps/xtl"]
path = deps/evalhyd path = deps/xtl
url = https://gitlab.irstea.fr/HYCAR-Hydro/evalhyd/evalhyd.git url = https://github.com/xtensor-stack/xtl.git
[submodule "deps/xtensor-python"] [submodule "deps/xtensor-python"]
path = deps/xtensor-python path = deps/xtensor-python
url = https://github.com/xtensor-stack/xtensor-python.git url = https://github.com/xtensor-stack/xtensor-python.git
[submodule "deps/xtensor"]
path = deps/xtensor
url = https://github.com/xtensor-stack/xtensor.git
[submodule "deps/evalhyd-cpp"]
path = deps/evalhyd-cpp
url = https://gitlab.irstea.fr/HYCAR-Hydro/evalhyd/evalhyd-cpp.git
cmake_minimum_required(VERSION 3.15)
project(
EvalHyd-Python
LANGUAGES CXX C
VERSION 0.1.0.0
DESCRIPTION "Python bindings for evalhyd utility"
)
add_library(
evalhyd-python MODULE
${CMAKE_CURRENT_SOURCE_DIR}/src/evalhyd-python.cpp
)
# ------------------------------------------------------------------------------
# dependencies and build
# ------------------------------------------------------------------------------
if(SKBUILD)
find_package(PythonExtensions REQUIRED)
find_package(NumPy REQUIRED)
python_extension_module(evalhyd-python)
else()
find_package(Python COMPONENTS Interpreter Development NumPy REQUIRED)
target_link_libraries(evalhyd-python Python::NumPy)
# use only header if numpy target links to libpython
#target_include_directories(evalhyd-python SYSTEM PRIVATE "${Python_NumPy_INCLUDE_DIRS}")
endif()
find_package(xtensor 0.7.5 REQUIRED)
message(STATUS "Found xtl: ${xtl_INCLUDE_DIRS}/xtl")
find_package(xtensor 0.24.6 REQUIRED)
message(STATUS "Found xtensor: ${xtensor_INCLUDE_DIRS}/xtensor")
find_package(xtensor-python 0.26.1 REQUIRED)
message(STATUS "Found xtensor-python: ${xtensor-python_INCLUDE_DIRS}/xtensor-python")
find_package(pybind11 REQUIRED)
message(STATUS "Found pybind11: ${pybind11_INCLUDE_DIRS}/pybind11")
if(DEFINED EVALHYD_SRC)
set(EVALHYD_BUILD_TEST OFF CACHE BOOL "configure and compile tests")
add_subdirectory(${EVALHYD_SRC} deps/evalhyd)
else()
find_package(EvalHyd 0.1.0 REQUIRED)
endif()
target_link_libraries(
evalhyd-python
EvalHyd::evalhyd
xtensor-python
pybind11::module
pybind11::lto
pybind11::windows_extras
)
set_target_properties(
evalhyd-python PROPERTIES
OUTPUT_NAME evalhyd
)
pybind11_extension(evalhyd-python)
if(NOT MSVC AND NOT ${CMAKE_BUILD_TYPE} MATCHES Debug|RelWithDebInfo)
# Strip unnecessary sections of the binary on Linux/macOS
pybind11_strip(evalhyd-python)
endif()
set_target_properties(
evalhyd-python PROPERTIES
CXX_VISIBILITY_PRESET "hidden"
CUDA_VISIBILITY_PRESET "hidden"
)
# add include directories
target_include_directories(
evalhyd-python
PUBLIC
${NUMPY_INCLUDE_DIRS}
)
# ------------------------------------------------------------------------------
# installation
# ------------------------------------------------------------------------------
if(SKBUILD)
install(
TARGETS evalhyd-python
DESTINATION evalhyd-python
)
endif()
LICENCE.rst 0 → 100644
This diff is collapsed.
...@@ -2,4 +2,4 @@ ...@@ -2,4 +2,4 @@
Python bindings for `evalhyd` utility Python bindings for `evalhyd` utility
Documentation: https://hycar-hydro.gitlab.irstea.page/evalhyd/evalhyd-docs/python Documentation: https://hydrogr.github.io/evalhyd/python
changelog.rst 0 → 100644
.. default-role:: obj
..
latest
------
Yet to be versioned and released. Only available from *dev* branch until then.
v0.1.0.0
--------
Released on 2023-04-14.
* first release
Subproject commit 592caff2eafc89a3cad9da4d47bb1a2eb777b2eb
Subproject commit d86043f5a28b697a902cc52842ed439583f5f42d
Subproject commit e534928cc30eb3a4a05539747d98e1d6868c2d62
Subproject commit 6544a559ae98953394a1c51d8a637b71882af8da Subproject commit 6a286681c48c35d3df342f291938f4825b20c0a3
Subproject commit fea39142693fbbc2ef19d75012bc6b46ef0a5f8c
channels:
- conda-forge
dependencies:
# Build dependencies
- cxx-compiler
- c-compiler
- cmake
- make
- scikit-build
# Host dependencies
- python
- numpy
- pybind11
- xtl==0.7.5
- xtensor==0.24.6
- xtensor-python==0.26.1
# - evalhyd-cpp==0.1.0
# Test dependencies
- numpy
"""An evaluator for determinist and probabilist streamflow predictions."""
from .version import __version__
from .evald import evald
from .evalp import evalp
from typing import List, Dict
from numpy import dtype
from numpy.typing import NDArray
try:
from ._evalhyd import _evald
except ImportError:
pass
def evald(q_obs: NDArray[dtype('float64')],
q_prd: NDArray[dtype('float64')],
metrics: List[str],
q_thr: NDArray[dtype('float64')] = None,
events: str = None,
transform: str = None,
exponent: float = None,
epsilon: float = None,
t_msk: NDArray[dtype('bool')] = None,
m_cdt: NDArray[dtype('|S32')] = None,
bootstrap: Dict[str, int] = None,
dts: NDArray[dtype('|S32')] = None,
seed: int = None,
diagnostics: List[str] = None) -> List[NDArray[dtype('float64')]]:
"""Function to evaluate deterministic streamflow predictions"""
# required arguments
kwargs = {
# convert 1D array into 2D array view
'q_obs': q_obs.reshape(1, q_obs.size) if q_obs.ndim == 1 else q_obs,
'q_prd': q_prd.reshape(1, q_prd.size) if q_prd.ndim == 1 else q_prd,
'metrics': metrics
}
# optional arguments
if q_thr is not None:
kwargs['q_thr'] = (
q_thr.reshape(1, q_thr.size) if q_thr.ndim == 1 else q_thr
)
if events is not None:
kwargs['events'] = events
if transform is not None:
kwargs['transform'] = transform
if exponent is not None:
kwargs['exponent'] = exponent
if epsilon is not None:
kwargs['epsilon'] = epsilon
if t_msk is not None:
kwargs['t_msk'] = t_msk
if m_cdt is not None:
kwargs['m_cdt'] = m_cdt
if bootstrap is not None:
kwargs['bootstrap'] = bootstrap
if dts is not None:
kwargs['dts'] = dts
if seed is not None:
kwargs['seed'] = seed
if diagnostics is not None:
kwargs['diagnostics'] = diagnostics
# check array ranks
_expected = {
'q_obs': 2,
'q_prd': 2,
'q_thr': 2,
't_msk': 3,
'm_cdt': 2,
'dts': 1
}
for arg, val in _expected.items():
try:
if kwargs[arg].ndim != val:
raise RuntimeError(
f"'{arg}' must feature {val} {'axis' if val == 1 else 'axes'}"
)
except KeyError:
pass
return _evald(**kwargs)
from typing import List, Dict
from numpy import dtype
from numpy.typing import NDArray
try:
from ._evalhyd import _evalp
except ImportError:
pass
def evalp(q_obs: NDArray[dtype('float64')],
q_prd: NDArray[dtype('float64')],
metrics: List[str],
q_thr: NDArray[dtype('float64')] = None,
events: str = None,
c_lvl: NDArray[dtype('float64')] = None,
t_msk: NDArray[dtype('bool')] = None,
m_cdt: NDArray[dtype('|S32')] = None,
bootstrap: Dict[str, int] = None,
dts: NDArray[dtype('|S32')] = None,
seed: int = None,
diagnostics: List[str] = None) -> List[NDArray[dtype('float64')]]:
"""Function to evaluate probabilistic streamflow predictions"""
# required arguments
kwargs = {
'q_obs': q_obs,
'q_prd': q_prd,
'metrics': metrics
}
# optional arguments
if q_thr is not None:
kwargs['q_thr'] = q_thr
if events is not None:
kwargs['events'] = events
if c_lvl is not None:
kwargs['c_lvl'] = c_lvl
if t_msk is not None:
kwargs['t_msk'] = t_msk
if m_cdt is not None:
kwargs['m_cdt'] = m_cdt
if bootstrap is not None:
kwargs['bootstrap'] = bootstrap
if dts is not None:
kwargs['dts'] = dts
if seed is not None:
kwargs['seed'] = seed
if diagnostics is not None:
kwargs['diagnostics'] = diagnostics
# check array ranks
_expected = {
'q_obs': 2,
'q_prd': 4,
'q_thr': 2,
'c_lvl': 1,
't_msk': 4,
'm_cdt': 2,
'dts': 1
}
for arg, val in _expected.items():
try:
if kwargs[arg].ndim != val:
raise RuntimeError(
f"'{arg}' must feature {val} {'axis' if val == 1 else 'axes'}"
)
except KeyError:
pass
return _evalp(**kwargs)
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <array>
#include <optional>
#define STRINGIFY(x) #x
#define MACRO_STRINGIFY(x) STRINGIFY(x)
#define FORCE_IMPORT_ARRAY
#include <xtl/xoptional.hpp>
#include <xtensor/xview.hpp>
#include <xtensor-python/pytensor.hpp>
#include "evalhyd/evald.hpp"
#include "evalhyd/evalp.hpp"
namespace py = pybind11;
using namespace py::literals;
auto evald(
const xt::pytensor<double, 2>& q_obs,
const xt::pytensor<double, 2>& q_prd,
const std::vector<std::string>& metrics,
const xt::pytensor<double, 2>& q_thr,
std::optional<std::string> events,
std::optional<std::string> transform,
std::optional<double> exponent,
std::optional<double> epsilon,
const xt::pytensor<bool, 3>& t_msk,
const xt::pytensor<std::array<char, 32>, 2>& m_cdt,
std::optional<std::unordered_map<std::string, int>> bootstrap,
const std::vector<std::string>& dts,
std::optional<int> seed,
std::optional<std::vector<std::string>> diagnostics
)
{
return evalhyd::evald(
q_obs,
q_prd,
metrics,
q_thr,
(events.has_value()) ? events.value() : xtl::missing<std::string>(),
(transform.has_value()) ? transform.value() : xtl::missing<std::string>(),
(exponent.has_value()) ? exponent.value() : xtl::missing<double>(),
(epsilon.has_value()) ? epsilon.value() : xtl::missing<double>(),
t_msk,
m_cdt,
(bootstrap.has_value())
? bootstrap.value()
: xtl::missing<std::unordered_map<std::string, int>>(),
dts,
(seed.has_value()) ? seed.value() : xtl::missing<int>(),
(diagnostics.has_value())
? diagnostics.value()
: xtl::missing<std::vector<std::string>>()
);
}
auto evalp(
const xt::pytensor<double, 2>& q_obs,
const xt::pytensor<double, 4>& q_prd,
const std::vector<std::string>& metrics,
const xt::pytensor<double, 2>& q_thr,
std::optional<std::string> events,
const std::vector<double>& c_lvl,
const xt::pytensor<bool, 4>& t_msk,
const xt::pytensor<std::array<char, 32>, 2>& m_cdt,
std::optional<std::unordered_map<std::string, int>> bootstrap,
const std::vector<std::string>& dts,
std::optional<int> seed,
std::optional<std::vector<std::string>> diagnostics
)
{
return evalhyd::evalp(
q_obs,
q_prd,
metrics,
q_thr,
(events.has_value()) ? events.value() : xtl::missing<std::string>(),
c_lvl,
t_msk,
m_cdt,
(bootstrap.has_value())
? bootstrap.value()
: xtl::missing<std::unordered_map<std::string, int>>(),
dts,
(seed.has_value()) ? seed.value() : xtl::missing<int>(),
(diagnostics.has_value())
? diagnostics.value()
: xtl::missing<std::vector<std::string>>()
);
}
// Python Module and Docstrings
PYBIND11_MODULE(_evalhyd, m)
{
xt::import_numpy();
m.doc() = "Python bindings for the C++ core of evalhyd";
// deterministic evaluation
m.def(
"_evald",
&evald,
"Function to evaluate determinist streamflow predictions (2D)",
py::arg("q_obs"),
py::arg("q_prd"),
py::arg("metrics"),
py::arg("q_thr") = xt::pytensor<double, 2>({0}),
py::arg("events") = py::none(),
py::arg("transform") = py::none(),
py::arg("exponent") = py::none(),
py::arg("epsilon") = py::none(),
py::arg("t_msk") = xt::pytensor<bool, 3>({0}),
py::arg("m_cdt") = xt::pytensor<std::array<char, 32>, 2>({0}),
py::arg("bootstrap") = py::none(),
py::arg("dts") = py::list(),
py::arg("seed") = py::none(),
py::arg("diagnostics") = py::none()
);
// probabilistic evaluation
m.def(
"_evalp",
&evalp,
"Function to evaluate probabilist streamflow predictions",
py::arg("q_obs"),
py::arg("q_prd"),
py::arg("metrics"),
py::arg("q_thr") = xt::pytensor<double, 2>({0}),
py::arg("events") = py::none(),
py::arg("c_lvl") = py::list(),
py::arg("t_msk") = xt::pytensor<bool, 4>({0}),
py::arg("m_cdt") = xt::pytensor<std::array<char, 32>, 2>({0}),
py::arg("bootstrap") = py::none(),
py::arg("dts") = py::list(),
py::arg("seed") = py::none(),
py::arg("diagnostics") = py::none()
);
#ifdef VERSION_INFO
m.attr("__version__") = MACRO_STRINGIFY(VERSION_INFO);
#else
m.attr("__version__") = "dev";
#endif
}
__version__ = '0.1.0.0'
...@@ -2,7 +2,10 @@ ...@@ -2,7 +2,10 @@
requires = [ requires = [
"setuptools>=42", "setuptools>=42",
"wheel", "wheel",
"scikit-build",
"cmake",
"pybind11>=2.8.0", "pybind11>=2.8.0",
"ninja",
"numpy>1.16", "numpy>1.16",
] ]
......
import sys import sys
import os import os
from pybind11 import get_cmake_dir
from pybind11.setup_helpers import Pybind11Extension, build_ext from pybind11.setup_helpers import Pybind11Extension, build_ext
from setuptools import setup from setuptools import setup
import numpy import numpy
__version__ = '0.0.1' # collect centrally sourced package version
with open("evalhyd/version.py", 'r') as fv:
exec(fv.read())
# vendor dependencies (unless told otherwise via environment variable)
deps = ['xtl', 'xtensor', 'xtensor-python', 'evalhyd-cpp']
deps_blank_path = os.path.join(os.getcwd(), 'deps', '{}', 'include')
deps_include_dirs = []
for dep in deps:
if not os.getenv(f"EVALHYD_PYTHON_VENDOR_{dep.upper().replace('-', '_')}") == 'FALSE':
# register dependency headers
deps_include_dirs.append(deps_blank_path.format(dep))
print(f"vendoring {dep}")
# configure Python extension
ext_modules = [ ext_modules = [
Pybind11Extension( Pybind11Extension(
"evalhyd", "evalhyd._evalhyd",
['src/evalhyd-python.cpp', ['evalhyd/src/evalhyd.cpp'],
'deps/evalhyd/src/probabilist/evaluator_brier.cpp',
'deps/evalhyd/src/probabilist/evaluator_elements.cpp',
'deps/evalhyd/src/probabilist/evaluator_quantiles.cpp'],
include_dirs=[ include_dirs=[
numpy.get_include(), numpy.get_include(),
os.path.join(os.getcwd(), 'deps', 'evalhyd', 'deps', 'xtl',
'include'),
os.path.join(os.getcwd(), 'deps', 'evalhyd', 'deps', 'xtensor',
'include'),
os.path.join(os.getcwd(), 'deps', 'xtensor-python', 'include'),
os.path.join(os.getcwd(), 'deps', 'evalhyd', 'include'),
os.path.join(os.getcwd(), 'deps', 'evalhyd', 'src'),
os.path.join(sys.prefix, 'include'), os.path.join(sys.prefix, 'include'),
os.path.join(sys.prefix, 'Library', 'include') os.path.join(sys.prefix, 'Library', 'include'),
*deps_include_dirs
], ],
language='c++', language='c++',
define_macros=[('VERSION_INFO', __version__)], define_macros=[('VERSION_INFO', __version__)]
), ),
] ]
# build Python extension and install Python package
setup( setup(
name='evalhyd-python', name='evalhyd-python',
version=__version__, version=__version__,
...@@ -42,8 +46,9 @@ setup( ...@@ -42,8 +46,9 @@ setup(
url='https://gitlab.irstea.fr/hycar-hydro/evalhyd/evalhyd-python', url='https://gitlab.irstea.fr/hycar-hydro/evalhyd/evalhyd-python',
description='Python bindings for EvalHyd', description='Python bindings for EvalHyd',
long_description='An evaluator for streamflow predictions.', long_description='An evaluator for streamflow predictions.',
packages=["evalhyd"],
ext_modules=ext_modules, ext_modules=ext_modules,
cmdclass={'build_ext': build_ext}, cmdclass={'build_ext': build_ext},
extras_require={"tests": "numpy>=1.16"}, extras_require={'tests': 'numpy>=1.16'},
zip_safe=False, zip_safe=False,
) )
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <array>
#define STRINGIFY(x) #x
#define MACRO_STRINGIFY(x) STRINGIFY(x)
#define FORCE_IMPORT_ARRAY
#include <xtensor/xview.hpp>
#include <xtensor-python/pytensor.hpp>
#include "evalhyd/evald.hpp"
#include "evalhyd/evalp.hpp"
namespace py = pybind11;
using namespace py::literals;
// reshape 1D tensors to 2D tensors
auto evald_1d(
const xt::xtensor<double, 1>& q_obs,
const xt::xtensor<double, 1>& q_prd,
const std::vector<std::string>& metrics,
const std::string& transform = "none",
const double exponent = 1,
double epsilon = -9,
const xt::xtensor<bool, 2>& t_msk = {},
const xt::xtensor<std::array<char, 32>, 1>& m_cdt = {},
const std::unordered_map<std::string, int>& bootstrap =
{{"n_samples", -9}, {"len_sample", -9}, {"summary", 0}},
const std::vector<std::string>& dts = {}
)
{
return evalhyd::evald(
xt::view(q_obs, xt::newaxis(), xt::all()),
xt::view(q_prd, xt::newaxis(), xt::all()),
metrics,
transform,
exponent,
epsilon,
t_msk,
m_cdt,
bootstrap,
dts
);
}
// Python Module and Docstrings
PYBIND11_MODULE(evalhyd, m)
{
xt::import_numpy();
m.doc() = R"pbdoc(
Utility for evaluation of streamflow predictions.
)pbdoc";
// deterministic evaluation
m.def(
"evald", evald_1d,
R"pbdoc(
Function to evaluate deterministic streamflow predictions.
:Parameters:
q_obs: `numpy.ndarray`
1D array of streamflow observations. Time steps with
missing observations must be assigned `numpy.nan`
values. Those time steps will be ignored both in
the observations and in the predictions before the
*metrics* are computed.
shape: (time,)
q_prd: `numpy.ndarray`
1D array of streamflow predictions. Time steps with
missing predictions must be assigned `numpy.nan`
values. Those time steps will be ignored both in
the observations and in the predictions before the
*metrics* are computed.
shape: (time,)
metrics: `List[str]`
The sequence of evaluation metrics to be computed.
transform: `str`, optional
The transformation to apply to both streamflow observations
and predictions prior to the calculation of the *metrics*.
exponent: `float`, optional
The value of the exponent n to use when the *transform* is
the power function. If not provided (or set to default value
1), the streamflow observations and predictions remain
untransformed.
epsilon: `float`, optional
The value of the small constant ε to add to both the
streamflow observations and predictions prior to the
calculation of the *metrics* when the *transform* is the
reciprocal function, the natural logarithm, or the power
function with a negative exponent (since none are defined
for 0). If not provided (or set to default value -9),
one hundredth of the mean of the streamflow observations
is used as value for epsilon.
t_msk: `numpy.ndarray`, optional
1D array of mask(s) used to generate temporal subsets of
the whole streamflow time series (where True/False is used for
the time steps to include/discard in a given subset). If not
provided and neither is *m_cdt*, no subset is performed. If
provided, masks must feature the same number of dimensions as
observations and predictions, and it must broadcastable with
both of them.
shape: (subsets, time)
m_cdt: `numpy.ndarray`, optional
1D array of masking condition(s) to use to generate
temporal subsets. Each condition consists in a string and
can be specified on observed streamflow values/statistics
(mean, median, quantile), or on time indices. If provided
in combination with *t_msk*, the latter takes precedence.
If not provided and neither is *t_msk*, no subset is
performed. If provided, only one condition per time series
of observations can be provided.
shape: (subsets,)
bootstrap: `dict`, optional
Parameters for the bootstrapping method used to estimate the
sampling uncertainty in the evaluation of the predictions.
Three parameters are mandatory ('n_samples' the number of
random samples, 'len_sample' the length of one sample in
number of years, and 'summary' the statistics to return to
characterise the sampling distribution), and one parameter
is optional ('seed'). If not provided, no bootstrapping is
performed. If provided, *dts* must also be provided.
dts: `List[str]`, optional
Datetimes. The corresponding date and time for the temporal
dimension of the streamflow observations and predictions.
The date and time must be specified in a string following
the ISO 8601-1:2019 standard, i.e. "YYYY-MM-DD hh:mm:ss"
(e.g. the 21st of May 2007 at 4 in the afternoon is
"2007-05-21 16:00:00"). If provided, it is only used if
*bootstrap* is also provided.
shape: (time,)
:Returns:
`List[numpy.ndarray]`
The sequence of evaluation metrics computed
in the same order as given in *metrics*.
shape: [(1, subsets, samples), ...]
)pbdoc",
py::arg("q_obs"), py::arg("q_prd"), py::arg("metrics"),
py::arg("transform") = "none",
py::arg("exponent") = 1,
py::arg("epsilon") = -9,
py::arg("t_msk") = xt::pytensor<bool, 2>({0}),
py::arg("m_cdt") = xt::pytensor<std::array<char, 32>, 1>({}),
py::arg("bootstrap") =
py::dict("n_samples"_a=-9, "len_sample"_a=-9, "summary"_a=0),
py::arg("dts") = py::list()
);
m.def(
"evald", evalhyd::evald,
R"pbdoc(
Function to evaluate deterministic streamflow predictions.
:Parameters:
q_obs: `numpy.ndarray`
2D array of streamflow observations. Time steps with
missing observations must be assigned `numpy.nan`
values. Those time steps will be ignored both in
the observations and in the predictions before the
*metrics* are computed.
shape: (1, time)
q_prd: `numpy.ndarray`
2D array of streamflow predictions. Time steps with
missing predictions must be assigned `numpy.nan`
values. Those time steps will be ignored both in
the observations and in the predictions before the
*metrics* are computed.
shape: (series, time)
metrics: `List[str]`
The sequence of evaluation metrics to be computed.
transform: `str`, optional
The transformation to apply to both streamflow observations
and predictions prior to the calculation of the *metrics*.
exponent: `float`, optional
The value of the exponent n to use when the *transform* is
the power function. If not provided (or set to default value
1), the streamflow observations and predictions remain
untransformed.
epsilon: `float`, optional
The value of the small constant ε to add to both the
streamflow observations and predictions prior to the
calculation of the *metrics* when the *transform* is the
reciprocal function, the natural logarithm, or the power
function with a negative exponent (since none are defined
for 0). If not provided (or set to default value -9),
one hundredth of the mean of the streamflow observations
is used as value for epsilon.
t_msk: `numpy.ndarray`, optional
2D array of mask(s) used to generate temporal subsets of
the whole streamflow time series (where True/False is used for
the time steps to include/discard in a given subset). If not
provided and neither is *m_cdt*, no subset is performed. If
provided, masks must feature the same number of dimensions as
observations and predictions, and it must broadcastable with
both of them.
shape: (subsets, time)
m_cdt: `numpy.ndarray`, optional
1D array of masking condition(s) to use to generate
temporal subsets. Each condition consists in a string and
can be specified on observed streamflow values/statistics
(mean, median, quantile), or on time indices. If provided
in combination with *t_msk*, the latter takes precedence.
If not provided and neither is *t_msk*, no subset is
performed. If provided, only one condition per time series
of observations can be provided.
shape: (subsets,)
bootstrap: `dict`, optional
Parameters for the bootstrapping method used to estimate the
sampling uncertainty in the evaluation of the predictions.
Three parameters are mandatory ('n_samples' the number of
random samples, 'len_sample' the length of one sample in
number of years, and 'summary' the statistics to return to
characterise the sampling distribution), and one parameter
is optional ('seed'). If not provided, no bootstrapping is
performed. If provided, *dts* must also be provided.
dts: `List[str]`, optional
Datetimes. The corresponding date and time for the temporal
dimension of the streamflow observations and predictions.
The date and time must be specified in a string following
the ISO 8601-1:2019 standard, i.e. "YYYY-MM-DD hh:mm:ss"
(e.g. the 21st of May 2007 at 4 in the afternoon is
"2007-05-21 16:00:00"). If provided, it is only used if
*bootstrap* is also provided.
shape: (time,)
:Returns:
`List[numpy.ndarray]`
The sequence of evaluation metrics computed
in the same order as given in *metrics*.
shape: [(series, subsets, samples), ...]
)pbdoc",
py::arg("q_obs"), py::arg("q_prd"), py::arg("metrics"),
py::arg("transform") = "none",
py::arg("exponent") = 1,
py::arg("epsilon") = -9,
py::arg("t_msk") = xt::pytensor<bool, 2>({0}),
py::arg("m_cdt") = xt::pytensor<std::array<char, 32>, 1>({}),
py::arg("bootstrap") =
py::dict("n_samples"_a=-9, "len_sample"_a=-9, "summary"_a=0),
py::arg("dts") = py::list()
);
// probabilistic evaluation
m.def(
"evalp", evalhyd::evalp,
R"pbdoc(
Function to evaluate probabilistic streamflow predictions.
:Parameters:
q_obs: `numpy.ndarray`
2D array of streamflow observations. Time steps with
missing observations must be assigned `numpy.nan`
values. Those time steps will be ignored both in
the observations and in the predictions before the
*metrics* are computed.
shape: (sites, time)
q_prd: `numpy.ndarray`
4D array of streamflow predictions. Time steps with
missing predictions must be assigned `numpy.nan`
values. Those time steps will be ignored both in
the observations and in the predictions before the
*metrics* are computed.
shape: (sites, lead times, members, time)
metrics: `List[str]`
The sequence of evaluation metrics to be computed.
q_thr: `List[float]`, optional
The streamflow threshold(s) to consider for the *metrics*
assessing the prediction of exceedance events. If not
provided, set to default value as an empty `list`.
shape: (thresholds,)
t_msk: `numpy.ndarray`, optional
4D array of masks to generate temporal subsets of the whole
streamflow time series (where True/False is used for the
time steps to include/discard in a given subset). If not
provided, no subset is performed and only one set of metrics
is returned corresponding to the whole time series. If
provided, as many sets of metrics are returned as they are
masks provided.
shape: (sites, lead times, subsets, time)
m_cdt: `numpy.ndarray`, optional
2D array of conditions to generate temporal subsets. Each
condition consists in a string and can be specified on
observed/predicted streamflow values/statistics (mean,
median, quantile), or on time indices. If provided in
combination with t_msk, the latter takes precedence. If not
provided and neither is t_msk, no subset is performed and
only one set of metrics is returned corresponding to the
whole time series. If provided, as many sets of metrics are
returned as they are conditions provided.
shape: (sites, subsets)
bootstrap: `dict`, optional
Parameters for the bootstrapping method used to estimate the
sampling uncertainty in the evaluation of the predictions.
Three parameters are mandatory ('n_samples' the number of
random samples, 'len_sample' the length of one sample in
number of years, and 'summary' the statistics to return to
characterise the sampling distribution), and one parameter
is optional ('seed'). If not provided, no bootstrapping is
performed. If provided, *dts* must also be provided.
dts: `List[str]`, optional
Datetimes. The corresponding date and time for the temporal
dimension of the streamflow observations and predictions.
The date and time must be specified in a string following
the ISO 8601-1:2019 standard, i.e. "YYYY-MM-DD hh:mm:ss"
(e.g. the 21st of May 2007 at 4 in the afternoon is
"2007-05-21 16:00:00"). If provided, it is only used if
*bootstrap* is also provided.
shape: (time,)
:Returns:
`List[numpy.ndarray]`
The sequence of evaluation metrics computed
in the same order as given in *metrics*.
shape: [(sites, lead times, subsets, samples, {quantiles,} {thresholds,} {components}), ...]
)pbdoc",
py::arg("q_obs"), py::arg("q_prd"), py::arg("metrics"),
py::arg("q_thr") = xt::pytensor<double, 2>({0}),
py::arg("t_msk") = xt::pytensor<bool, 4>({0}),
py::arg("m_cdt") = xt::pytensor<std::array<char, 32>, 2>({0}),
py::arg("bootstrap") =
py::dict("n_samples"_a=-9, "len_sample"_a=-9, "summary"_a=0),
py::arg("dts") = py::list()
);
#ifdef VERSION_INFO
m.attr("__version__") = MACRO_STRINGIFY(VERSION_INFO);
#else
m.attr("__version__") = "dev";
#endif
}
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment