Commit c3100cf5 authored by Heraut Louis's avatar Heraut Louis
Browse files

taking into account of hydrological sector

parent 49325a64
No related merge requests found
Showing with 133 additions and 105 deletions
+133 -105
...@@ -471,55 +471,58 @@ matrix_panel = function (list_df2plot, df_meta, trend_period, mean_period, slice ...@@ -471,55 +471,58 @@ matrix_panel = function (list_df2plot, df_meta, trend_period, mean_period, slice
for (itype in 1:nbType) { for (itype in 1:nbType) {
# Gets the type # Gets the type
type = Type[itype] type = Type[itype]
# Extracts each possibilities of first letter of station code # Extracts each possibilities of hydrological region
firstLetter = levels(factor(substr(Code, 1, 1))) RH = rle(sort(df_meta$region_hydro))$values
twoL = names(df_meta$region_hydro)
# Number of different first letters # Number of different first letters
nfL = length(firstLetter) nRH = length(RH)
# For all the available first letter # For all the available first letter
for (ifL in 1:nfL) { for (iR in 1:nRH) {
# Gets the first letter # Gets the first letter
fL = firstLetter[ifL] rh = RH[iR]
okL = rle(sort(twoL[df_meta$region_hydro == rh]))$values
nL = nchar(okL[1])
# Get only station code with the same first letter # Get only station code with the same first letter
subCodefL = Code[substr(Code, 1, 1) == fL] subCodeRh = Code[substr(Code, 1, nL) %in% okL]
# Counts the number of station in it # Counts the number of station in it
nsubCodefL = length(subCodefL) nsubCodeRh = length(subCodeRh)
# Computes the number of pages needed to plot all stations # Computes the number of pages needed to plot
nMat = as.integer(nsubCodefL/slice) + 1 # all stations
nMat = as.integer(nsubCodeRh/slice) + 1
# Counts the number of pages # Counts the number of pages
N_loop = N_loop + nMat N_loop = N_loop + nMat
} }
} }
# For all the type of plots # For all the type of plots
for (itype in 1:nbType) { for (itype in 1:nbType) {
# Gets the type # Gets the type
type = Type[itype] type = Type[itype]
# Extracts each possibilities of hydrological region
# Extracts each possibilities of first letter of station code RH = rle(sort(df_meta$region_hydro))$values
firstLetter = levels(factor(substr(Code, 1, 1))) twoL = names(df_meta$region_hydro)
# Number of different first letters # Number of different first letters
nfL = length(firstLetter) nRH = length(RH)
# For all the available first letter # For all the available first letter
for (ifL in 1:nfL) { for (iR in 1:nRH) {
# Gets the first letter # Gets the first letter
fL = firstLetter[ifL] rh = RH[iR]
okL = rle(sort(twoL[df_meta$region_hydro == rh]))$values
nL = nchar(okL[1])
# Get only station code with the same first letter # Get only station code with the same first letter
subCodefL = Code[substr(Code, 1, 1) == fL] subCodeRh = Code[substr(Code, 1, nL) %in% okL]
# Counts the number of station in it # Counts the number of station in it
nsubCodefL = length(subCodefL) nsubCodeRh = length(subCodeRh)
# Computes the number of pages needed to plot all stations # Computes the number of pages needed to
nMat = as.integer(nsubCodefL/slice) + 1 # plot all stations
nMat = as.integer(nsubCodeRh/slice) + 1
# For all the pages # For all the pages
for (iMat in 1:nMat) { for (iMat in 1:nMat) {
n_loop = ifL + nfL*(itype-1) + (iMat-1) n_loop = iR + nRH*(itype-1) + (iMat-1)
# N_loop = nfL*nbType
# Print the matrix name # Print the matrix name
print(paste('Matrix ', iMat, '/', nMat, print(paste('Matrix ', iMat, '/', nMat,
' of ', type, ' of ', type,
' for region : ', fL, ' for region : ', rh,
" (", " (",
round(n_loop / N_loop * 100, round(n_loop / N_loop * 100,
0), 0),
...@@ -527,7 +530,7 @@ matrix_panel = function (list_df2plot, df_meta, trend_period, mean_period, slice ...@@ -527,7 +530,7 @@ matrix_panel = function (list_df2plot, df_meta, trend_period, mean_period, slice
sep='')) sep=''))
# Extracts the station for the current page # Extracts the station for the current page
subCode = subCodefL[(slice*(iMat-1)+1):(slice*iMat)] subCode = subCodeRh[(slice*(iMat-1)+1):(slice*iMat)]
# Removes NA stations # Removes NA stations
subCode = subCode[!is.na(subCode)] subCode = subCode[!is.na(subCode)]
# Reverses verticale order of stations # Reverses verticale order of stations
...@@ -537,33 +540,33 @@ matrix_panel = function (list_df2plot, df_meta, trend_period, mean_period, slice ...@@ -537,33 +540,33 @@ matrix_panel = function (list_df2plot, df_meta, trend_period, mean_period, slice
# Creates logical vector to select only info about # Creates logical vector to select only info about
# stations that will be plot on the page # stations that will be plot on the page
CodefL_trend = CodeRh_trend =
Code_trend %in% subCode & Type_trend == type Code_trend %in% subCode & Type_trend == type
# Extracts those info # Extracts those info
subPeriods_trend = Periods_trend[CodefL_trend] subPeriods_trend = Periods_trend[CodeRh_trend]
subNPeriod_trend = NPeriod_trend[CodefL_trend] subNPeriod_trend = NPeriod_trend[CodeRh_trend]
subVar_trend = Var_trend[CodefL_trend] subVar_trend = Var_trend[CodeRh_trend]
subType_trend = Type_trend[CodefL_trend] subType_trend = Type_trend[CodeRh_trend]
subCode_trend = Code_trend[CodefL_trend] subCode_trend = Code_trend[CodeRh_trend]
subAlpha_trend = Alpha_trend[CodefL_trend] subAlpha_trend = Alpha_trend[CodeRh_trend]
subTrendValue_trend = TrendValue_trend[CodefL_trend] subTrendValue_trend = TrendValue_trend[CodeRh_trend]
subDataMean_trend = DataMean_trend[CodefL_trend] subDataMean_trend = DataMean_trend[CodeRh_trend]
subFill_trend = Fill_trend[CodefL_trend] subFill_trend = Fill_trend[CodeRh_trend]
subColor_trend = Color_trend[CodefL_trend] subColor_trend = Color_trend[CodeRh_trend]
# Same for breaking analysis # Same for breaking analysis
CodefL_mean = CodeRh_mean =
Code_mean %in% subCode & Type_mean == type Code_mean %in% subCode & Type_mean == type
# Extracts right info # Extracts right info
subPeriods_mean = Periods_mean[CodefL_mean] subPeriods_mean = Periods_mean[CodeRh_mean]
subNPeriod_mean = NPeriod_mean[CodefL_mean] subNPeriod_mean = NPeriod_mean[CodeRh_mean]
subVar_mean = Var_mean[CodefL_mean] subVar_mean = Var_mean[CodeRh_mean]
subType_mean = Type_mean[CodefL_mean] subType_mean = Type_mean[CodeRh_mean]
subCode_mean = Code_mean[CodefL_mean] subCode_mean = Code_mean[CodeRh_mean]
subDataMean_mean = DataMean_mean[CodefL_mean] subDataMean_mean = DataMean_mean[CodeRh_mean]
subbreakValue_mean = breakValue_mean[CodefL_mean] subbreakValue_mean = breakValue_mean[CodeRh_mean]
subFill_mean = Fill_mean[CodefL_mean] subFill_mean = Fill_mean[CodeRh_mean]
subColor_mean = Color_mean[CodefL_mean] subColor_mean = Color_mean[CodeRh_mean]
# Gets the number of variable to plot in # Gets the number of variable to plot in
# function of the current type # function of the current type
...@@ -1219,7 +1222,7 @@ matrix_panel = function (list_df2plot, df_meta, trend_period, mean_period, slice ...@@ -1219,7 +1222,7 @@ matrix_panel = function (list_df2plot, df_meta, trend_period, mean_period, slice
path=outdirTmp, path=outdirTmp,
filename=paste(outnameTmp, filename=paste(outnameTmp,
'_', type, '_', type,
'_', fL, '_', rh,
iMat, sep=''), iMat, sep=''),
device='pdf', device='pdf',
width=width, height=height, width=width, height=height,
......
...@@ -105,7 +105,16 @@ iRegHydro = c('D'='Affluents du Rhin', ...@@ -105,7 +105,16 @@ iRegHydro = c('D'='Affluents du Rhin',
'L'='Loire', 'L'='Loire',
'M'='Loire', 'M'='Loire',
'N'='Fleuves côtiers au sud de la Loire', 'N'='Fleuves côtiers au sud de la Loire',
'O'='Garonne', 'O0'='Garonne',
'O1'='Garonne',
'O2'='Garonne',
'O3'='Tarn-Aveyron',
'O4'='Tarn-Aveyron',
'O5'='Tarn-Aveyron',
'O6'='Tarn-Aveyron',
'O7'='Lot',
'O8'='Lot',
'O9'='Lot',
'P'='Dordogne', 'P'='Dordogne',
'Q'='Adour', 'Q'='Adour',
'R'='Charente', 'R'='Charente',
...@@ -369,9 +378,25 @@ extract_meta = function (computer_data_path, filedir, filename, ...@@ -369,9 +378,25 @@ extract_meta = function (computer_data_path, filedir, filename,
# The path to the data file of BH # The path to the data file of BH
file_path=file_path) file_path=file_path)
Ltmp = names(iRegHydro)[nchar(names(iRegHydro)) == 2]
Ltmp = substr(Ltmp, 1, 1)
infoSecteur = rle(sort(Ltmp))$values
oneL = substr(df_meta$code, 1, 1)
twoL = substr(df_meta$code, 1, 2)
RH = c()
for (i in 1:length(oneL)) {
if (oneL[i] %in% infoSecteur) {
RHtmp = iRegHydro[twoL[i]]
} else {
RHtmp = iRegHydro[oneL[i]]
}
RH = c(RH, RHtmp)
}
# Adding of the hydrological region # Adding of the hydrological region
df_meta$region_hydro = iRegHydro[substr(df_meta$code, 1, 1)] df_meta$region_hydro = RH
return (df_meta) return (df_meta)
} else { } else {
......
...@@ -55,21 +55,21 @@ filedir = ...@@ -55,21 +55,21 @@ filedir =
# Name of the file that will be analysed from the BH directory # Name of the file that will be analysed from the BH directory
# (if 'all', all the file of the directory will be chosen) # (if 'all', all the file of the directory will be chosen)
filename = filename =
"" # ""
"all" # "all"
# c( c(
# "S2235610_HYDRO_QJM.txt", "S2235610_HYDRO_QJM.txt",
# "P1712910_HYDRO_QJM.txt", "P1712910_HYDRO_QJM.txt",
# "P0885010_HYDRO_QJM.txt", "P0885010_HYDRO_QJM.txt",
# "O5055010_HYDRO_QJM.txt", "O5055010_HYDRO_QJM.txt",
# "O0384010_HYDRO_QJM.txt", "O0384010_HYDRO_QJM.txt",
# "S4214010_HYDRO_QJM.txt", "S4214010_HYDRO_QJM.txt",
# "Q7002910_HYDRO_QJM.txt" "Q7002910_HYDRO_QJM.txt",
# "Q0214010_HYDRO_QJM.txt" "Q0214010_HYDRO_QJM.txt",
# "O3035210_HYDRO_QJM.txt", "O3035210_HYDRO_QJM.txt",
# "O0554010_HYDRO_QJM.txt", "O0554010_HYDRO_QJM.txt",
# "O1584610_HYDRO_QJM.txt" "O1584610_HYDRO_QJM.txt"
# ) )
## AGENCE EAU ADOUR GARONNE SELECTION ## AGENCE EAU ADOUR GARONNE SELECTION
...@@ -79,8 +79,8 @@ AEAGlistdir = ...@@ -79,8 +79,8 @@ AEAGlistdir =
"" ""
AEAGlistname = AEAGlistname =
# "" ""
"Liste-station_RRSE.docx" # "Liste-station_RRSE.docx"
## NIVALE SELECTION ## NIVALE SELECTION
...@@ -249,42 +249,42 @@ df_meta = get_lacune(df_data, df_meta) ...@@ -249,42 +249,42 @@ df_meta = get_lacune(df_data, df_meta)
df_meta = get_hydrograph(df_data, df_meta, period=mean_period[[1]])$meta df_meta = get_hydrograph(df_data, df_meta, period=mean_period[[1]])$meta
### 3.2. Trend analysis ### 3.2. Trend analysis
# QA trend # # QA trend
res_QAtrend = get_QAtrend(df_data, df_meta, # res_QAtrend = get_QAtrend(df_data, df_meta,
period=trend_period, # period=trend_period,
alpha=alpha, # alpha=alpha,
yearLac_day=yearLac_day) # yearLac_day=yearLac_day)
# QMNA tend # # QMNA tend
res_QMNAtrend = get_QMNAtrend(df_data, df_meta, # res_QMNAtrend = get_QMNAtrend(df_data, df_meta,
period=trend_period, # period=trend_period,
alpha=alpha, # alpha=alpha,
sampleSpan=sampleSpan, # sampleSpan=sampleSpan,
yearLac_day=yearLac_day) # yearLac_day=yearLac_day)
# VCN10 trend # # VCN10 trend
res_VCN10trend = get_VCN10trend(df_data, df_meta, # res_VCN10trend = get_VCN10trend(df_data, df_meta,
period=trend_period, # period=trend_period,
alpha=alpha, # alpha=alpha,
sampleSpan=sampleSpan, # sampleSpan=sampleSpan,
yearLac_day=yearLac_day) # yearLac_day=yearLac_day)
# Start date for low water trend # # Start date for low water trend
res_tDEBtrend = get_tDEBtrend(df_data, df_meta, # res_tDEBtrend = get_tDEBtrend(df_data, df_meta,
period=trend_period, # period=trend_period,
alpha=alpha, # alpha=alpha,
sampleSpan=sampleSpan, # sampleSpan=sampleSpan,
thresold_type='VCN10', # thresold_type='VCN10',
select_longest=TRUE, # select_longest=TRUE,
yearLac_day=yearLac_day) # yearLac_day=yearLac_day)
# res_tDEBtrend = read_listofdf(resdir, 'res_tDEBtrend') # # res_tDEBtrend = read_listofdf(resdir, 'res_tDEBtrend')
# Center date for low water trend # # Center date for low water trend
res_tCENtrend = get_tCENtrend(df_data, df_meta, # res_tCENtrend = get_tCENtrend(df_data, df_meta,
period=trend_period, # period=trend_period,
alpha=alpha, # alpha=alpha,
sampleSpan=sampleSpan, # sampleSpan=sampleSpan,
yearLac_day=yearLac_day) # yearLac_day=yearLac_day)
### 3.3. Break analysis ### 3.3. Break analysis
# df_break = get_break(res_QAtrend$data, df_meta) # df_break = get_break(res_QAtrend$data, df_meta)
...@@ -361,7 +361,7 @@ datasheet_layout(toplot=c( ...@@ -361,7 +361,7 @@ datasheet_layout(toplot=c(
glose=list( glose=list(
"Moyenne annuelle du débit journalier", "Moyenne annuelle du débit journalier",
"Minimum annuel de la moyenne menusuelle du débit journalier", "Minimum annuel de la moyenne mensuelle du débit journalier",
"Minimum annuel de la moyenne sur 10 jours du débit journalier", "Minimum annuel de la moyenne sur 10 jours du débit journalier",
"Début d'étiage (jour de l'année de la première moyenne sur 10 jours sous le maximum des VCN10)", "Début d'étiage (jour de l'année de la première moyenne sur 10 jours sous le maximum des VCN10)",
"Centre d'étiage (jour de l'année du VCN10)" "Centre d'étiage (jour de l'année du VCN10)"
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment