Commit 3453acee authored by Heraut Louis's avatar Heraut Louis
Browse files

Formatting for group and QMNA

parent c90cff65
No related merge requests found
Showing with 117 additions and 59 deletions
+117 -59
library(dplyr) library(dplyr)
prepare = function(df_data, df_info) { join = function (df_data_BH, df_data_NV, df_info_BH, df_info_NV) {
df_data$Gcode = as.integer(factor(df_data$code)) if (!is.null(df_data_NV) & !is.null(df_data_BH)) {
df_data = tibble(Date=df_data$Date,
Gcode=df_data$Gcode, # Get the station in common
Qm3s=df_data$Qm3s common = levels(factor(df_info_NV[df_info_NV$code %in% df_info_BH$code,]$code))
) # Get the Nv station to add
NVadd = levels(factor(df_info_NV[!(df_info_NV$code %in% df_info_BH$code),]$code))
# Select only the NV info to add
df_info_NVadd = df_info_NV[df_info_NV$code %in% NVadd,]
# Join NV data to BH data
df_info = full_join(df_info_BH, df_info_NVadd, by=c("code", "nom", "L93X", "L93Y", "surface_km2", "file_path"))
df_info$Gcode = as.integer(factor(df_info$code)) # Select only the NV data to add
df_data_NVadd = df_data_NV[df_data_NV$code %in% NVadd,]
# Join NV info to BH info
df_data = full_join(df_data_BH, df_data_NVadd, by=c("Date", "Qm3s", "code"))
} else if (is.null(df_data_NV) & !is.null(df_data_BH)) {
df_info = df_info_BH
df_data = df_data_BH
} else if (!is.null(df_data_NV) & is.null(df_data_BH)) {
df_info = df_info_NV
df_data = df_data_NV
} else {
stop('No data')
}
return (list(data=df_data, info=df_info)) return (list(data=df_data, info=df_info))
} }
clean = function (df_xTrend, df_info) { prepare = function(df_data, colnamegroup) {
colnamegroup = c(colnamegroup)
colindgroup = which(colnames(df_data) == colnamegroup)
df_data = group_by_at(df_data, colindgroup)
data = tibble(Date=df_data$Date,
group=group_indices(df_data),
Qm3s=df_data$Qm3s)
Gkey = group_keys(df_data)
info = bind_cols(group=seq(1:nrow(Gkey)),
Gkey)
# df_data = tibble(Date=df_data$Date,
# group=as.integer(factor(df_data$code)),
# Qm3s=df_data$Qm3s)
# df_info$group = as.integer(factor(df_info$code))
return (list(data=data, info=info))
}
clean = function (df_xTrend, df_list) {
df_xTrend = bind_cols(df_xTrend,
df_list$info[df_xTrend$group1,
2:ncol(df_list$info)])
print(df_info$Gcode == df_xTrend$group1) colnames(df_xTrend)[1] = 'group'
df_xTrend$code = df_info$code[df_info$Gcode == df_xTrend$group1]
return (df_xTrend) return (df_xTrend)
} }
...@@ -16,15 +16,14 @@ computer_work_path = ...@@ -16,15 +16,14 @@ computer_work_path =
### BANQUE HYDRO ### ### BANQUE HYDRO ###
# Path to the directory where BH data is stored # Path to the directory where BH data is stored
BHfiledir = BHfiledir =
# "test" # ""
"" "BanqueHydro_Export2021"
# "BanqueHydro_Export2021"
## Manual selection ## ## Manual selection ##
# Name of the file that will be analysed from the BH directory # Name of the file that will be analysed from the BH directory
BHfilename = BHfilename =
"" # ""
# c("H5920011_HYDRO_QJM.txt", "K4470010_HYDRO_QJM.txt") c("H5920011_HYDRO_QJM.txt")#, "K4470010_HYDRO_QJM.txt")
# "all" # "all"
## Or list selection ## ## Or list selection ##
...@@ -40,13 +39,13 @@ BHlistname = ...@@ -40,13 +39,13 @@ BHlistname =
### NIVALE ### ### NIVALE ###
# Path to the directory where NV data is stored # Path to the directory where NV data is stored
NVfiledir = NVfiledir =
# "" ""
"France207" # "France207"
# Name of the file that will be analysed from the NV directory # Name of the file that will be analysed from the NV directory
NVfilename = NVfilename =
# "" ""
"all" # "all"
# Path to the list file of information about station that will be analysed # Path to the list file of information about station that will be analysed
...@@ -54,14 +53,14 @@ NVlistdir = ...@@ -54,14 +53,14 @@ NVlistdir =
"" ""
NVlistname = NVlistname =
# "" ""
"liste_bv_principaux_global.txt" # "liste_bv_principaux_global.txt"
### TREND ANALYSIS ### ### TREND ANALYSIS ###
# Time period to analyse # Time period to analyse
period = c("1960-01-01","2020-01-01") period = c("1960-01-01","2019-12-31")
# period = c("1960-01-01","2020-01-01")
...@@ -132,33 +131,11 @@ df_info_NV = extractNVlist_info(computer_data_path, NVfiledir, NVlistdir, NVlist ...@@ -132,33 +131,11 @@ df_info_NV = extractNVlist_info(computer_data_path, NVfiledir, NVlistdir, NVlist
# Extract data about selected stations # Extract data about selected stations
df_data_NV = extractNV_data(computer_data_path, NVfiledir, NVfilename) df_data_NV = extractNV_data(computer_data_path, NVfiledir, NVfilename)
# JOIN # # JOIN #
if (!is.null(df_data_NV) & !is.null(df_data_BH)) { df_join = join(df_data_BH, df_data_NV, df_info_BH, df_info_NV)
df_data = df_join$data
# Get the station in common df_info = df_join$info
common = levels(factor(df_info_NV[df_info_NV$code %in% df_info_BH$code,]$code))
# Get the Nv station to add
NVadd = levels(factor(df_info_NV[!(df_info_NV$code %in% df_info_BH$code),]$code))
# Select only the NV info to add
df_info_NVadd = df_info_NV[df_info_NV$code %in% NVadd,]
# Join NV data to BH data
df_info = full_join(df_info_BH, df_info_NVadd, by=c("code", "nom", "L93X", "L93Y", "surface_km2", "file_path"))
# Select only the NV data to add
df_data_NVadd = df_data_NV[df_data_NV$code %in% NVadd,]
# Join NV info to BH info
df_data = full_join(df_data_BH, df_data_NVadd, by=c("Date", "Qm3s", "code"))
} else if (is.null(df_data_NV) & !is.null(df_data_BH)) {
df_info = df_info_BH
df_data = df_data_BH
} else if (!is.null(df_data_NV) & is.null(df_data_BH)) {
df_info = df_info_NV
df_data = df_data_NV
} else {
stop('No data')
}
# TIME PANEL # # TIME PANEL #
...@@ -168,18 +145,51 @@ if (!is.null(df_data_NV) & !is.null(df_data_BH)) { ...@@ -168,18 +145,51 @@ if (!is.null(df_data_NV) & !is.null(df_data_BH)) {
### /!\ Removed 185 row(s) containing missing values (geom_path) -> remove NA ### ### /!\ Removed 185 row(s) containing missing values (geom_path) -> remove NA ###
# ANALYSE #
# Compute gap parameters for stations # Compute gap parameters for stations
# df_lac = get_lacune(df_data, df_info) # df_lac = get_lacune(df_data, df_info)
df_list = prepare(df_data, df_info)
df_meanEx = extract.Var(data.station=df_list,
funct=mean,
period=period,
pos.datetime=1,
na.rm=TRUE)
df_meanTrend = Estimate.stats(data.extract=df_meanEx)
### /!\ verify order conservation ### ### /!\ verify order conservation ###
df_meanTrend = clean(df_meanTrend, df_info) # AVERAGE ANNUAL FLOW : QA #
df_QAlist = prepare(df_data, colnamegroup=c('code'))
df_QAEx = extract.Var(data.station=df_QAlist,
funct=mean,
period=period,
pos.datetime=1,
na.rm=TRUE)
df_QATrend = Estimate.stats(data.extract=df_QAEx)
df_QATrend = clean(df_QATrend, df_QAlist)
# MONTHLY MINIMUM FLOW IN THE YEAR : QMNA #
fMNA = function (X) {
# prendre un paquet de 1 ans et faire la moyenne par mois et retourner le minimum des debit
dpm = length(X)/12
# print(dpm)
# print(length(X))
monthmean = c()
for (i in 1:12) {
id = round(dpm*(i-1)+1, 0)
iu = round(i*dpm, 0)
monthmean = append(monthmean, mean(X[id:iu], na.rm=TRUE))
# print(paste('start', id))
# print(paste('end', iu))
# print('')
}
# print(monthmean)
return (min(monthmean, na.rm=TRUE))
}
df_QMNAlist = prepare(df_data, colnamegroup=c('code'))
df_QMNAEx = extract.Var(data.station=df_QMNAlist,
funct=fMNA,
period=period,
pos.datetime=1)#,
# na.rm=TRUE) ### /!\ PAS COMPRIS ###
df_QMNATrend = Estimate.stats(data.extract=df_QMNAEx)
df_QMNATrend = clean(df_QMNATrend, df_QMNAlist)
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment