diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index a95b86c279722366e067b6e45849272357685d65..afe329dc9e14a879dbdc111dd59c6807a87196f2 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -27,14 +27,6 @@ default: stage: check script: - R -e 'rcmdcheck::rcmdcheck(args = ifelse(as.logical(Sys.getenv("NOT_CRAN")), "", "--as-cran"), error_on = "warning")' - - R -e 'install.packages(c("microbenchmark", "airGR"), repos = "http://cran.r-project.org")' - - Rscript tests/scheduled_tests/benchmarkRunModel.R - - R CMD INSTALL . - - Rscript tests/scheduled_tests/benchmarkRunModel.R - artifacts: - paths: - - tests/tmp/benchmark.tsv - - tests/tmp/mean_execution_time.tsv .check_not_cran: variables: @@ -46,6 +38,22 @@ default: NOT_CRAN: "false" extends: .check +benchmark_patched: + stage: check + variables: + R_VERSION: "patched" + allow_failure: true + script: + - R -e 'remotes::update_packages("microbenchmark", repos = "http://cran.r-project.org")' + - R -e 'install.packages("airGR", repos = "http://cran.r-project.org")' + - Rscript tests/scheduled_tests/benchmarkRunModel.R + - R CMD INSTALL . + - Rscript tests/scheduled_tests/benchmarkRunModel.R + artifacts: + paths: + - tests/tmp/benchmark.tsv + - tests/tmp/mean_execution_time.tsv + scheduled_tests_patched: # only: # refs: diff --git a/.regressionignore b/.regressionignore index 44aced51723929d2b68509f61dd2e192ef30f2d9..56129af6f27be57107c6aebf7ec07f5a5d5eb5d9 100644 --- a/.regressionignore +++ b/.regressionignore @@ -11,8 +11,7 @@ RunModel_CemaNeige RunOptions$Outputs_Cal Param_Sets_GR4J RunOptions_Cal Param_Sets_GR4J RunOptions_Val -* OutputsModel$Param -* OutputsModel$WarmUpQsim +* OutputsModel$RunOptions * OutputsModel$StateEnd Param_Sets_GR4J OutputsModel_Val RunModel_Lag InputsModel diff --git a/DESCRIPTION b/DESCRIPTION index 2d31e79c54e9669fbb9557f1fbaa7cc25b679f06..4212eba9b9112bac090e25ef9d0cc8ee94e36167 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,8 +1,8 @@ Package: airGR Type: Package Title: Suite of GR Hydrological Models for Precipitation-Runoff Modelling -Version: 1.6.12.9000 -Date: 2021-04-27 +Version: 1.6.12.9001 +Date: 2021-10-26 Authors@R: c( person("Laurent", "Coron", role = c("aut", "trl"), comment = c(ORCID = "0000-0002-1503-6204")), person("Olivier", "Delaigue", role = c("aut", "cre"), comment = c(ORCID = "0000-0002-7668-8468"), email = "airGR@inrae.fr"), diff --git a/R/RunModel_Lag.R b/R/RunModel_Lag.R index 34ea35b623e42e79aa61d91f299672762d10a943..5b9bec1092004d50eabd392fc9bb177e8d7f9596 100644 --- a/R/RunModel_Lag.R +++ b/R/RunModel_Lag.R @@ -22,16 +22,19 @@ RunModel_Lag <- function(InputsModel, RunOptions, Param, QcontribDown) { stop("'QcontribDown' should contain a key 'Qsim' containing the output of the runoff of the downstream subcatchment") } if (length(QcontribDown$Qsim) != length(RunOptions$IndPeriod_Run)) { - stop("Time series Qsim in 'QcontribDown' should have the same lenght as 'RunOptions$IndPeriod_Run'") + stop("Time series Qsim in 'QcontribDown' should have the same length as 'RunOptions$IndPeriod_Run'") } - if (!is.null(QcontribDown$WarmUpQsim) && - length(QcontribDown$WarmUpQsim) != length(RunOptions$IndPeriod_WarmUp) && - RunOptions$IndPeriod_WarmUp != 0L) { - stop("Time series WarmUpQsim in 'QcontribDown' should have the same lenght as 'RunOptions$IndPeriod_WarmUp'") + if (!identical(RunOptions$IndPeriod_WarmUp, 0L) && !identical(RunOptions$Outputs_Sim, RunOptions$Outputs_Cal)) { + # This test is not necessary during calibration but usefull in other cases because + # WarmUpQsim is then used for downstream sub-basins because of the delay in Qupstream + if (is.null(QcontribDown$RunOptions$WarmUpQsim) || + length(QcontribDown$RunOptions$WarmUpQsim) != length(RunOptions$IndPeriod_WarmUp)) { + stop("Time series WarmUpQsim in 'QcontribDown' should have the same length as 'RunOptions$IndPeriod_WarmUp'") + } } } else if (is.vector(QcontribDown) && is.numeric(QcontribDown)) { if (length(QcontribDown) != length(RunOptions$IndPeriod_Run)) { - stop("'QcontribDown' should have the same lenght as 'RunOptions$IndPeriod_Run'") + stop("'QcontribDown' should have the same length as 'RunOptions$IndPeriod_Run'") } } else { stop("'QcontribDown' must be a numeric vector or a 'OutputsModel' object") diff --git a/R/UtilsRunModel.R b/R/UtilsRunModel.R index 17476edeb1060f36c19d33ccb48b1bfea2c14f13..1a384a333b25d79a021e63fb6009ad0ce8d96ff9 100644 --- a/R/UtilsRunModel.R +++ b/R/UtilsRunModel.R @@ -37,8 +37,7 @@ if (!is.null(CemaNeigeLayers)) { OutputsModel$CemaNeigeLayers <- CemaNeigeLayers } - - if ("WarmUpQsim" %in% RunOptions$Outputs_Sim) { + if ("WarmUpQsim" %in% RunOptions$Outputs_Sim && !identical(RunOptions$IndPeriod_WarmUp, 0L)) { OutputsModel$RunOptions$WarmUpQsim <- RESULTS$Outputs[seq_len(length(RunOptions$IndPeriod_WarmUp)), which(FortranOutputs == "Qsim")] # class(OutputsModel$RunOptions$WarmUpQsim) <- c("WarmUpOutputsModelItem", class(OutputsModel$RunOptions$WarmUpQsim)) diff --git a/tests/scheduled_tests/benchmarkRunModel.R b/tests/scheduled_tests/benchmarkRunModel.R index a983b2f697b841cfafac9aea11affe9bee6c4376..709d52da309535f370b64e2387ac1b4987851f13 100644 --- a/tests/scheduled_tests/benchmarkRunModel.R +++ b/tests/scheduled_tests/benchmarkRunModel.R @@ -1,25 +1,29 @@ library(airGR) source("tests/testthat/helper_scheduled_Calibration.R") + BenchmarkRunModel <- function(model) { e <- PrepareCalibration(model) - for(n in ls(e, all.names=TRUE)) assign(n, get(n, e)) + for (n in ls(e, all.names = TRUE)) { + assign(n, value = get(n, e)) + } # RunOptions calibration configuration RunOptions$Outputs_Sim <- RunOptions$Outputs_Cal - mbm <- microbenchmark::microbenchmark(RunModel = - RunModel(InputsModel = InputsModel, RunOptions = RunOptions, - Param = as.numeric(strsplit(model$ParamFinalR, ";")[[1]]), FUN_MOD = sModel) + mbm <- microbenchmark::microbenchmark( + RunModel = RunModel(InputsModel = InputsModel, RunOptions = RunOptions, + Param = as.numeric(strsplit(model$ParamFinalR, ";")[[1]]), FUN_MOD = sModel) ) - mbm$time + return(mbm$time) + } -Args <- commandArgs(trailingOnly = TRUE) + sModelNames <- paste0(dfModels$name, - ifelse(as.logical(dfModels$IsHyst), "_Hyst", "")) + ifelse(as.logical(dfModels$IsHyst), "_Hyst", "")) -dfBM <- as.data.frame(apply(dfModels, 1, BenchmarkRunModel)) +dfBM <- as.data.frame(apply(dfModels, MARGIN = 1, FUN = BenchmarkRunModel)) colnames(dfBM) <- sModelNames dfBM <- cbind(version = as.character(packageVersion('airGR')), dfBM) @@ -31,15 +35,15 @@ write.table(dfBM, file = file, sep = "\t", append = file.exists(file)) df <- read.table(file = file, sep = "\t", header = TRUE) -if(length(unique(df$version)) > 1) { +if (length(unique(df$version)) > 1) { lV <- lapply(unique(df$version), function(version) { - apply(df[df$version == version, -1] / 1E6, 2, mean) + apply(df[df$version == version, -1] / 1e6, MARGIN = 2, FUN = mean) }) names(lV) <- unique(df$version) dfMean <- cbind(model = sModelNames, as.data.frame(t(do.call(rbind, lV)))) - dfMean$evolution <- (dfMean[,3] - dfMean[,2]) / dfMean[,2] - write.table(dfMean, "tests/tmp/mean_execution_time.tsv", row.names = FALSE, quote = F, sep = "\t") + dfMean$evolution <- (dfMean[, 3] - dfMean[, 2]) / dfMean[, 2] + write.table(dfMean, "tests/tmp/mean_execution_time.tsv", row.names = FALSE, quote = FALSE, sep = "\t") res <- testthat::test_file("tests/testthat/benchmarkRunModel.R") dRes <- as.data.frame(res) if (any(dRes[, "failed"] > 0) | any(dRes[, "error"])) { diff --git a/tests/testthat/benchmarkRunModel.R b/tests/testthat/benchmarkRunModel.R index 67997d97cca6aa44d237d3acbcc322d505ca4306..bc539a324ab641415c5299d0a7766eb7cdcb5b14 100644 --- a/tests/testthat/benchmarkRunModel.R +++ b/tests/testthat/benchmarkRunModel.R @@ -3,7 +3,10 @@ df <- read.table("../tmp/mean_execution_time.tsv", sep = "\t", header = T) lapply(df$model, function(model) { test_that(paste(model, ": RunModel should be as fast as CRAN version"), { sel <- df$model == model - threshold <- max(0.2, -0.15 * df[sel, 2] + 1) # decrease from 1 at 0.1ms to 0.2 at 5.5ms + # Limit threshold for evolution of execution time (in %) between the 2 versions + # Negative values of evolution are expected but we apply an error margin depending on execution time + # decrease from 1.5 at 0.0ms to 0.5 at 10ms with a minimum at 0.5 + threshold <- max(0.5, 2 - 0.1 * df[sel, 2]) expect_lt(df$evolution[sel], threshold) }) }) diff --git a/tests/testthat/test-RunModel_Lag.R b/tests/testthat/test-RunModel_Lag.R index 23889967f71f1fef5983d51c86c92931481842fd..09f1d338d5864baa7c9e207ca158ceb7b19716ec 100644 --- a/tests/testthat/test-RunModel_Lag.R +++ b/tests/testthat/test-RunModel_Lag.R @@ -72,12 +72,12 @@ test_that("QcontribDown should contain a Qsim key", { ) }) -test_that("'QcontribDown$Qim' should have the same lenght as 'RunOptions$IndPeriod_Run'", { +test_that("'QcontribDown$Qim' should have the same length as 'RunOptions$IndPeriod_Run'", { QcontribDown <- OutputsGR4JOnly QcontribDown$Qsim <- c(QcontribDown$Qsim, 0) expect_error( RunModel_Lag(InputsModel = InputsModel, RunOptions = RunOptions, Param = 1, QcontribDown = QcontribDown), - regexp = "should have the same lenght as" + regexp = "should have the same length as" ) })