Commit 7f038f88 authored by Delaigue Olivier's avatar Delaigue Olivier
Browse files

Merge branch '137b-fix-extractoutputsmodel-function' into 'dev'

Resolve "Fix .ExtractOutputsModel function to manage the new elements of OutputsModel" (2nd try)

See merge request HYCAR-Hydro/airgr!59
parents 84d860ae 3398e7d4
Pipeline #28830 passed with stages
in 130 minutes and 1 second
......@@ -27,14 +27,6 @@ default:
stage: check
script:
- R -e 'rcmdcheck::rcmdcheck(args = ifelse(as.logical(Sys.getenv("NOT_CRAN")), "", "--as-cran"), error_on = "warning")'
- R -e 'install.packages(c("microbenchmark", "airGR"), repos = "http://cran.r-project.org")'
- Rscript tests/scheduled_tests/benchmarkRunModel.R
- R CMD INSTALL .
- Rscript tests/scheduled_tests/benchmarkRunModel.R
artifacts:
paths:
- tests/tmp/benchmark.tsv
- tests/tmp/mean_execution_time.tsv
.check_not_cran:
variables:
......@@ -46,6 +38,22 @@ default:
NOT_CRAN: "false"
extends: .check
benchmark_patched:
stage: check
variables:
R_VERSION: "patched"
allow_failure: true
script:
- R -e 'remotes::update_packages("microbenchmark", repos = "http://cran.r-project.org")'
- R -e 'install.packages("airGR", repos = "http://cran.r-project.org")'
- Rscript tests/scheduled_tests/benchmarkRunModel.R
- R CMD INSTALL .
- Rscript tests/scheduled_tests/benchmarkRunModel.R
artifacts:
paths:
- tests/tmp/benchmark.tsv
- tests/tmp/mean_execution_time.tsv
scheduled_tests_patched:
# only:
# refs:
......
......@@ -11,8 +11,7 @@
RunModel_CemaNeige RunOptions$Outputs_Cal
Param_Sets_GR4J RunOptions_Cal
Param_Sets_GR4J RunOptions_Val
* OutputsModel$Param
* OutputsModel$WarmUpQsim
* OutputsModel$RunOptions
* OutputsModel$StateEnd
Param_Sets_GR4J OutputsModel_Val
RunModel_Lag InputsModel
......
Package: airGR
Type: Package
Title: Suite of GR Hydrological Models for Precipitation-Runoff Modelling
Version: 1.6.12.9000
Date: 2021-04-27
Version: 1.6.12.9001
Date: 2021-10-26
Authors@R: c(
person("Laurent", "Coron", role = c("aut", "trl"), comment = c(ORCID = "0000-0002-1503-6204")),
person("Olivier", "Delaigue", role = c("aut", "cre"), comment = c(ORCID = "0000-0002-7668-8468"), email = "airGR@inrae.fr"),
......
......@@ -22,16 +22,19 @@ RunModel_Lag <- function(InputsModel, RunOptions, Param, QcontribDown) {
stop("'QcontribDown' should contain a key 'Qsim' containing the output of the runoff of the downstream subcatchment")
}
if (length(QcontribDown$Qsim) != length(RunOptions$IndPeriod_Run)) {
stop("Time series Qsim in 'QcontribDown' should have the same lenght as 'RunOptions$IndPeriod_Run'")
stop("Time series Qsim in 'QcontribDown' should have the same length as 'RunOptions$IndPeriod_Run'")
}
if (!is.null(QcontribDown$WarmUpQsim) &&
length(QcontribDown$WarmUpQsim) != length(RunOptions$IndPeriod_WarmUp) &&
RunOptions$IndPeriod_WarmUp != 0L) {
stop("Time series WarmUpQsim in 'QcontribDown' should have the same lenght as 'RunOptions$IndPeriod_WarmUp'")
if (!identical(RunOptions$IndPeriod_WarmUp, 0L) && !identical(RunOptions$Outputs_Sim, RunOptions$Outputs_Cal)) {
# This test is not necessary during calibration but usefull in other cases because
# WarmUpQsim is then used for downstream sub-basins because of the delay in Qupstream
if (is.null(QcontribDown$RunOptions$WarmUpQsim) ||
length(QcontribDown$RunOptions$WarmUpQsim) != length(RunOptions$IndPeriod_WarmUp)) {
stop("Time series WarmUpQsim in 'QcontribDown' should have the same length as 'RunOptions$IndPeriod_WarmUp'")
}
}
} else if (is.vector(QcontribDown) && is.numeric(QcontribDown)) {
if (length(QcontribDown) != length(RunOptions$IndPeriod_Run)) {
stop("'QcontribDown' should have the same lenght as 'RunOptions$IndPeriod_Run'")
stop("'QcontribDown' should have the same length as 'RunOptions$IndPeriod_Run'")
}
} else {
stop("'QcontribDown' must be a numeric vector or a 'OutputsModel' object")
......
......@@ -37,8 +37,7 @@
if (!is.null(CemaNeigeLayers)) {
OutputsModel$CemaNeigeLayers <- CemaNeigeLayers
}
if ("WarmUpQsim" %in% RunOptions$Outputs_Sim) {
if ("WarmUpQsim" %in% RunOptions$Outputs_Sim && !identical(RunOptions$IndPeriod_WarmUp, 0L)) {
OutputsModel$RunOptions$WarmUpQsim <- RESULTS$Outputs[seq_len(length(RunOptions$IndPeriod_WarmUp)),
which(FortranOutputs == "Qsim")]
# class(OutputsModel$RunOptions$WarmUpQsim) <- c("WarmUpOutputsModelItem", class(OutputsModel$RunOptions$WarmUpQsim))
......
library(airGR)
source("tests/testthat/helper_scheduled_Calibration.R")
BenchmarkRunModel <- function(model) {
e <- PrepareCalibration(model)
for(n in ls(e, all.names=TRUE)) assign(n, get(n, e))
for (n in ls(e, all.names = TRUE)) {
assign(n, value = get(n, e))
}
# RunOptions calibration configuration
RunOptions$Outputs_Sim <- RunOptions$Outputs_Cal
mbm <- microbenchmark::microbenchmark(RunModel =
RunModel(InputsModel = InputsModel, RunOptions = RunOptions,
Param = as.numeric(strsplit(model$ParamFinalR, ";")[[1]]), FUN_MOD = sModel)
mbm <- microbenchmark::microbenchmark(
RunModel = RunModel(InputsModel = InputsModel, RunOptions = RunOptions,
Param = as.numeric(strsplit(model$ParamFinalR, ";")[[1]]), FUN_MOD = sModel)
)
mbm$time
return(mbm$time)
}
Args <- commandArgs(trailingOnly = TRUE)
sModelNames <- paste0(dfModels$name,
ifelse(as.logical(dfModels$IsHyst), "_Hyst", ""))
ifelse(as.logical(dfModels$IsHyst), "_Hyst", ""))
dfBM <- as.data.frame(apply(dfModels, 1, BenchmarkRunModel))
dfBM <- as.data.frame(apply(dfModels, MARGIN = 1, FUN = BenchmarkRunModel))
colnames(dfBM) <- sModelNames
dfBM <- cbind(version = as.character(packageVersion('airGR')), dfBM)
......@@ -31,15 +35,15 @@ write.table(dfBM, file = file,
sep = "\t", append = file.exists(file))
df <- read.table(file = file, sep = "\t", header = TRUE)
if(length(unique(df$version)) > 1) {
if (length(unique(df$version)) > 1) {
lV <- lapply(unique(df$version), function(version) {
apply(df[df$version == version, -1] / 1E6, 2, mean)
apply(df[df$version == version, -1] / 1e6, MARGIN = 2, FUN = mean)
})
names(lV) <- unique(df$version)
dfMean <- cbind(model = sModelNames, as.data.frame(t(do.call(rbind, lV))))
dfMean$evolution <- (dfMean[,3] - dfMean[,2]) / dfMean[,2]
write.table(dfMean, "tests/tmp/mean_execution_time.tsv", row.names = FALSE, quote = F, sep = "\t")
dfMean$evolution <- (dfMean[, 3] - dfMean[, 2]) / dfMean[, 2]
write.table(dfMean, "tests/tmp/mean_execution_time.tsv", row.names = FALSE, quote = FALSE, sep = "\t")
res <- testthat::test_file("tests/testthat/benchmarkRunModel.R")
dRes <- as.data.frame(res)
if (any(dRes[, "failed"] > 0) | any(dRes[, "error"])) {
......
......@@ -3,7 +3,10 @@ df <- read.table("../tmp/mean_execution_time.tsv", sep = "\t", header = T)
lapply(df$model, function(model) {
test_that(paste(model, ": RunModel should be as fast as CRAN version"), {
sel <- df$model == model
threshold <- max(0.2, -0.15 * df[sel, 2] + 1) # decrease from 1 at 0.1ms to 0.2 at 5.5ms
# Limit threshold for evolution of execution time (in %) between the 2 versions
# Negative values of evolution are expected but we apply an error margin depending on execution time
# decrease from 1.5 at 0.0ms to 0.5 at 10ms with a minimum at 0.5
threshold <- max(0.5, 2 - 0.1 * df[sel, 2])
expect_lt(df$evolution[sel], threshold)
})
})
......
......@@ -72,12 +72,12 @@ test_that("QcontribDown should contain a Qsim key", {
)
})
test_that("'QcontribDown$Qim' should have the same lenght as 'RunOptions$IndPeriod_Run'", {
test_that("'QcontribDown$Qim' should have the same length as 'RunOptions$IndPeriod_Run'", {
QcontribDown <- OutputsGR4JOnly
QcontribDown$Qsim <- c(QcontribDown$Qsim, 0)
expect_error(
RunModel_Lag(InputsModel = InputsModel, RunOptions = RunOptions, Param = 1, QcontribDown = QcontribDown),
regexp = "should have the same lenght as"
regexp = "should have the same length as"
)
})
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment