diff --git a/plotting/datasheet.R b/plotting/datasheet.R
index 2c7c284cfb0f2b3da38e81b700961ae77530759b..46e42223a8315c3d9ef3ecd06bc79c9b117f33ec 100644
--- a/plotting/datasheet.R
+++ b/plotting/datasheet.R
@@ -30,7 +30,7 @@
 source('processing/analyse.R', encoding='UTF-8')
 
 
-## 1. DATASHEET PANEL
+## 1. DATASHEET PANEL ________________________________________________
 # Manages datasheets creations for all stations. Makes the call to
 # the different headers, trend analysis graphs and realises arranging
 # every plots.
@@ -538,8 +538,8 @@ datasheet_panel = function (list_df2plot, df_meta, trend_period, info_header, ti
 }
 
 
-## 2. OTHER PANEL FOR THE DATASHEET
-### 2.1. Time panel
+## 2. OTHER PANEL FOR THE DATASHEET __________________________________
+### 2.1. Time panel __________________________________________________
 time_panel = function (df_data_code, df_trend_code, var, type, alpha=0.1, missRect=FALSE, unit2day=365.25, trend_period=NULL, mean_period=NULL, axis_xlim=NULL, grid=TRUE, ymin_lim=NULL, color=NULL, NspaceMax=NULL, first=FALSE, last=FALSE, lim_pct=10) {
 
     # If 'type' is square root apply it to data
@@ -1410,7 +1410,7 @@ time_panel = function (df_data_code, df_trend_code, var, type, alpha=0.1, missRe
 }
 
  
-### 2.2. Info panel
+### 2.2. Info panel __________________________________________________
 # Plots the header that regroups all the info on the station
 info_panel = function(list_df2plot, df_meta, period, df_shapefile, codeLight, df_data_code=NULL) {
 
@@ -1530,7 +1530,7 @@ info_panel = function(list_df2plot, df_meta, period, df_shapefile, codeLight, df
     return(plot)
 } 
 
-### 2.3. Hydrograph panel
+### 2.3. Hydrograph panel ____________________________________________
 # Creates a hydrograph for a station with the data serie of flow
 hydrograph_panel = function (df_data_code, period, margin=NULL) {
 
diff --git a/plotting/layout.R b/plotting/layout.R
index 0ebbee1ab4fcd06819e0ba5029512d4e0a3089b7..2cc723dd44ea99ab1089a50db3cf308b08976789 100644
--- a/plotting/layout.R
+++ b/plotting/layout.R
@@ -51,8 +51,8 @@ source('plotting/matrix.R', encoding='UTF-8')
 source('plotting/break.R', encoding='UTF-8')
 
 
-## 1. PERSONALISATION
-### 1.1. Personal theme
+## 1. PERSONALISATION ________________________________________________
+### 1.1. Personal theme ______________________________________________
 theme_ash =
     theme(
         # White background
@@ -87,7 +87,7 @@ theme_ash =
         axis.line.y=element_blank(),
         )
 
-### 1.2. Color palette
+### 1.2. Color palette _______________________________________________
 palette_perso = c('#0f3b57', # cold
                   '#1d7881',
                   '#80c4a9',
@@ -97,8 +97,8 @@ palette_perso = c('#0f3b57', # cold
                   '#7e392f') # hot
 
 
-## 2. USEFUL GENERICAL PLOT
-### 2.1. Void plot
+## 2. USEFUL GENERICAL PLOT __________________________________________
+### 2.1. Void plot ___________________________________________________
 # A plot completly blank
 void = ggplot() + geom_blank(aes(1,1)) +
     theme(
@@ -120,7 +120,7 @@ contour = void +
     theme(plot.background=element_rect(fill=NA, color="#EC4899"),
           plot.margin=margin(t=0, r=0, b=0, l=0, unit="mm"))
 
-### 2.2. Circle
+### 2.2. Circle ______________________________________________________
 # Allow to draw circle in ggplot2 with a radius and a center position
 gg_circle = function(r, xc, yc, color="black", fill=NA, ...) {
     x = xc + r*cos(seq(0, pi, length.out=100))
@@ -131,7 +131,7 @@ gg_circle = function(r, xc, yc, color="black", fill=NA, ...) {
 }
 
 
-## 3. LAYOUT
+## 3. LAYOUT _________________________________________________________
 # Generates a PDF that gather datasheets, map and summarize matrix about the trend analyses realised on selected stations
 datasheet_layout = function (df_data, df_meta, layout_matrix,
                              toplot=c('datasheet', 'matrix', 'map'),
@@ -336,170 +336,8 @@ datasheet_layout = function (df_data, df_meta, layout_matrix,
 } 
 
 
-## 4. COLOR MANAGEMENT
-### 4.1. Color on colorbar
-# Returns a color of a palette corresponding to a value included
-# between the min and the max of the variable
-get_color = function (value, min, max, ncolor=256, palette_name='perso', reverse=FALSE) {
-
-    # If the value is a NA return NA color
-    if (is.na(value)) {
-        return (NA)
-    }
-    
-    # If the palette chosen is the personal ones
-    if (palette_name == 'perso') {
-        colorList = palette_perso
-    # Else takes the palette corresponding to the name given
-    } else {
-        colorList = brewer.pal(11, palette_name)
-    }
-    
-    # Gets the number of discrete colors in the palette
-    nSample = length(colorList)
-    # Recreates a continuous color palette
-    palette = colorRampPalette(colorList)(ncolor)
-    # Separates it in the middle to have a cold and a hot palette
-    Sample_hot = 1:(as.integer(nSample/2)+1)
-    Sample_cold = (as.integer(nSample/2)+1):nSample
-    palette_hot = colorRampPalette(colorList[Sample_hot])(ncolor)
-    palette_cold = colorRampPalette(colorList[Sample_cold])(ncolor)
-
-    # Reverses the palette if it needs to be
-    if (reverse) {
-        palette = rev(palette)
-        palette_hot = rev(palette_hot)
-        palette_cold = rev(palette_cold)
-    }
-
-    # Computes the absolute max
-    maxAbs = max(abs(max), abs(min))
-
-    # If the value is negative
-    if (value < 0) {
-        # Gets the relative position of the value in respect
-        # to its span
-        idNorm = (value + maxAbs) / maxAbs
-        # The index corresponding
-        id = round(idNorm*(ncolor - 1) + 1, 0)
-        # The associated color
-        color = palette_cold[id]
-    # Same if it is a positive value
-    } else {
-        idNorm = value / maxAbs
-        id = round(idNorm*(ncolor - 1) + 1, 0)
-        color = palette_hot[id]
-    }
-    return(color)
-}
-
-### 4.2. Colorbar
-# Returns the colorbar but also positions, labels and colors of some
-# ticks along it 
-get_palette = function (min, max, ncolor=256, palette_name='perso', reverse=FALSE, nbTick=10) {
-    
-    # If the palette chosen is the personal ones
-    if (palette_name == 'perso') {
-        colorList = palette_perso
-    # Else takes the palette corresponding to the name given
-    } else {
-        colorList = brewer.pal(11, palette_name)
-    }
-    
-    # Gets the number of discrete colors in the palette
-    nSample = length(colorList)
-    # Recreates a continuous color palette
-    palette = colorRampPalette(colorList)(ncolor)
-    # Separates it in the middle to have a cold and a hot palette
-    Sample_hot = 1:(as.integer(nSample/2)+1)
-    Sample_cold = (as.integer(nSample/2)+1):nSample
-    palette_hot = colorRampPalette(colorList[Sample_hot])(ncolor)
-    palette_cold = colorRampPalette(colorList[Sample_cold])(ncolor)
-
-    # Reverses the palette if it needs to be
-    if (reverse) {
-        palette = rev(palette)
-        palette_hot = rev(palette_hot)
-        palette_cold = rev(palette_cold)
-    }
-
-    # If the min and the max are below zero
-    if (min < 0 & max < 0) {
-        # The palette show is only the cold one
-        paletteShow = palette_cold
-    # If the min and the max are above zero
-    } else if (min > 0 & max > 0) {
-        # The palette show is only the hot one
-        paletteShow = palette_hot
-    # Else it is the entire palette that is shown
-    } else {
-        paletteShow = palette
-    }
-
-    # The position of ticks is between 0 and 1
-    posTick = seq(0, 1, length.out=nbTick)
-    # Blank vector to store corresponding labels and colors
-    labTick = c()
-    colTick = c()
-    # For each tick
-    for (i in 1:nbTick) {
-        # Computes the graduation between the min and max
-        lab = (i-1)/(nbTick-1) * (max - min) + min
-        # Gets the associated color
-        col = get_color(lab, min=min, max=max,
-                        ncolor=ncolor,
-                        palette_name=palette_name,
-                        reverse=reverse)
-        # Stores them
-        labTick = c(labTick, lab)
-        colTick = c(colTick, col)
-    }
-    # List of results
-    res = list(palette=paletteShow, posTick=posTick,
-               labTick=labTick, colTick=colTick)
-    return(res)
-}
-
-### 4.3. Palette tester
-# Allows to display the current personal palette
-palette_tester = function (n=256) {
-
-    # An arbitrary x vector
-    X = 1:n
-    # All the same arbitrary y position to create a colorbar
-    Y = rep(0, times=n)
-
-    # Recreates a continuous color palette
-    palette = colorRampPalette(palette_perso)(n)
-
-    # Open a plot
-    p = ggplot() + 
-        # Make the theme blank
-        theme(
-            plot.background = element_blank(), 
-            panel.grid.major = element_blank(),
-            panel.grid.minor = element_blank(), 
-            panel.border = element_blank(),
-            panel.background = element_blank(),
-            axis.title.x = element_blank(),
-            axis.title.y = element_blank(),
-            axis.text.x = element_blank(), 
-            axis.text.y = element_blank(),
-            axis.ticks = element_blank(),
-            axis.line = element_blank()
-        ) +
-        # Plot the palette
-        geom_line(aes(x=X, y=Y), color=palette[X], size=60) +
-        scale_y_continuous(expand=c(0, 0))
-
-    # Saves the plot
-    ggsave(plot=p,
-           filename=paste('palette_test', '.pdf', sep=''),
-           width=10, height=10, units='cm', dpi=100)
-}
-
-
-### Summary panel
+## 4. PDF ORGANISATION PANEL _________________________________________
+### 4.1. Summary _____________________________________________________
 summary_panel = function (df_page, foot_note, foot_height, resources_path, logo_dir, AEAGlogo_file, INRAElogo_file, FRlogo_file, outdirTmp) {
     
     text_title = paste(
@@ -690,8 +528,7 @@ summary_panel = function (df_page, foot_note, foot_height, resources_path, logo_
            width=width, height=height, units='cm', dpi=100)
 }
 
-
-### Foot note panel
+### 4.2. Foot note panel______________________________________________
 foot_panel = function (name, n_page, resources_path, logo_dir, AEAGlogo_file, INRAElogo_file, FRlogo_file, foot_height) {
     
     text_page = paste(
@@ -762,8 +599,170 @@ foot_panel = function (name, n_page, resources_path, logo_dir, AEAGlogo_file, IN
 }
 
 
-## 5. OTHER TOOLS
-### 5.1. Number formatting
+## 5. COLOR MANAGEMENT _______________________________________________
+### 5.1. Color on colorbar ___________________________________________
+# Returns a color of a palette corresponding to a value included
+# between the min and the max of the variable
+get_color = function (value, min, max, ncolor=256, palette_name='perso', reverse=FALSE) {
+
+    # If the value is a NA return NA color
+    if (is.na(value)) {
+        return (NA)
+    }
+    
+    # If the palette chosen is the personal ones
+    if (palette_name == 'perso') {
+        colorList = palette_perso
+    # Else takes the palette corresponding to the name given
+    } else {
+        colorList = brewer.pal(11, palette_name)
+    }
+    
+    # Gets the number of discrete colors in the palette
+    nSample = length(colorList)
+    # Recreates a continuous color palette
+    palette = colorRampPalette(colorList)(ncolor)
+    # Separates it in the middle to have a cold and a hot palette
+    Sample_hot = 1:(as.integer(nSample/2)+1)
+    Sample_cold = (as.integer(nSample/2)+1):nSample
+    palette_hot = colorRampPalette(colorList[Sample_hot])(ncolor)
+    palette_cold = colorRampPalette(colorList[Sample_cold])(ncolor)
+
+    # Reverses the palette if it needs to be
+    if (reverse) {
+        palette = rev(palette)
+        palette_hot = rev(palette_hot)
+        palette_cold = rev(palette_cold)
+    }
+
+    # Computes the absolute max
+    maxAbs = max(abs(max), abs(min))
+
+    # If the value is negative
+    if (value < 0) {
+        # Gets the relative position of the value in respect
+        # to its span
+        idNorm = (value + maxAbs) / maxAbs
+        # The index corresponding
+        id = round(idNorm*(ncolor - 1) + 1, 0)
+        # The associated color
+        color = palette_cold[id]
+    # Same if it is a positive value
+    } else {
+        idNorm = value / maxAbs
+        id = round(idNorm*(ncolor - 1) + 1, 0)
+        color = palette_hot[id]
+    }
+    return(color)
+}
+
+### 5.2. Colorbar ____________________________________________________
+# Returns the colorbar but also positions, labels and colors of some
+# ticks along it 
+get_palette = function (min, max, ncolor=256, palette_name='perso', reverse=FALSE, nbTick=10) {
+    
+    # If the palette chosen is the personal ones
+    if (palette_name == 'perso') {
+        colorList = palette_perso
+    # Else takes the palette corresponding to the name given
+    } else {
+        colorList = brewer.pal(11, palette_name)
+    }
+    
+    # Gets the number of discrete colors in the palette
+    nSample = length(colorList)
+    # Recreates a continuous color palette
+    palette = colorRampPalette(colorList)(ncolor)
+    # Separates it in the middle to have a cold and a hot palette
+    Sample_hot = 1:(as.integer(nSample/2)+1)
+    Sample_cold = (as.integer(nSample/2)+1):nSample
+    palette_hot = colorRampPalette(colorList[Sample_hot])(ncolor)
+    palette_cold = colorRampPalette(colorList[Sample_cold])(ncolor)
+
+    # Reverses the palette if it needs to be
+    if (reverse) {
+        palette = rev(palette)
+        palette_hot = rev(palette_hot)
+        palette_cold = rev(palette_cold)
+    }
+
+    # If the min and the max are below zero
+    if (min < 0 & max < 0) {
+        # The palette show is only the cold one
+        paletteShow = palette_cold
+    # If the min and the max are above zero
+    } else if (min > 0 & max > 0) {
+        # The palette show is only the hot one
+        paletteShow = palette_hot
+    # Else it is the entire palette that is shown
+    } else {
+        paletteShow = palette
+    }
+
+    # The position of ticks is between 0 and 1
+    posTick = seq(0, 1, length.out=nbTick)
+    # Blank vector to store corresponding labels and colors
+    labTick = c()
+    colTick = c()
+    # For each tick
+    for (i in 1:nbTick) {
+        # Computes the graduation between the min and max
+        lab = (i-1)/(nbTick-1) * (max - min) + min
+        # Gets the associated color
+        col = get_color(lab, min=min, max=max,
+                        ncolor=ncolor,
+                        palette_name=palette_name,
+                        reverse=reverse)
+        # Stores them
+        labTick = c(labTick, lab)
+        colTick = c(colTick, col)
+    }
+    # List of results
+    res = list(palette=paletteShow, posTick=posTick,
+               labTick=labTick, colTick=colTick)
+    return(res)
+}
+
+### 5.3. Palette tester ______________________________________________
+# Allows to display the current personal palette
+palette_tester = function (n=256) {
+
+    # An arbitrary x vector
+    X = 1:n
+    # All the same arbitrary y position to create a colorbar
+    Y = rep(0, times=n)
+
+    # Recreates a continuous color palette
+    palette = colorRampPalette(palette_perso)(n)
+
+    # Open a plot
+    p = ggplot() + 
+        # Make the theme blank
+        theme(
+            plot.background = element_blank(), 
+            panel.grid.major = element_blank(),
+            panel.grid.minor = element_blank(), 
+            panel.border = element_blank(),
+            panel.background = element_blank(),
+            axis.title.x = element_blank(),
+            axis.title.y = element_blank(),
+            axis.text.x = element_blank(), 
+            axis.text.y = element_blank(),
+            axis.ticks = element_blank(),
+            axis.line = element_blank()
+        ) +
+        # Plot the palette
+        geom_line(aes(x=X, y=Y), color=palette[X], size=60) +
+        scale_y_continuous(expand=c(0, 0))
+
+    # Saves the plot
+    ggsave(plot=p,
+           filename=paste('palette_test', '.pdf', sep=''),
+           width=10, height=10, units='cm', dpi=100)
+}
+
+## 6. OTHER TOOLS ____________________________________________________
+### 6.1. Number formatting ___________________________________________
 # Returns the power of ten of the scientific expression of a value
 get_power = function (value) {
 
@@ -793,7 +792,7 @@ get_power = function (value) {
     return(power)
 }
 
-### 5.2. Pourcentage of variable
+### 6.2. Pourcentage of variable _____________________________________
 # Returns the value corresponding of a certain percentage of a
 # data serie
 gpct = function (pct, L, min_lim=NULL, shift=FALSE) {
@@ -822,7 +821,7 @@ gpct = function (pct, L, min_lim=NULL, shift=FALSE) {
     return (xL)
 }
 
-### 5.3. Add months
+### 6.3. Add months __________________________________________________
 add_months = function (date, n) {
     new_date = seq(date, by = paste (n, "months"), length = 2)[2]
     return (new_date)
diff --git a/plotting/map.R b/plotting/map.R
index ae7628f3fce02df57ff3b97973788f888f4a90d0..ed432ace52c2381daa556150d57095cf83d8f6f4 100644
--- a/plotting/map.R
+++ b/plotting/map.R
@@ -26,7 +26,7 @@
 # Deals with the creation of a map for presenting the trend analysis of hydrological variables
 
 
-## 1. MAP PANEL
+## 1. MAP PANEL ______________________________________________________
 # Generates a map plot of the tendancy of a hydrological variable
 map_panel = function (list_df2plot, df_meta, df_shapefile, idPer_trend=1,
                       trend_period=NULL,
diff --git a/plotting/matrix.R b/plotting/matrix.R
index e14b1054438bdc02e57f2e80d1c8dfcbe1bcb3f5..4816c12d9c86891fe8c6babb1ed7df6bad37da20 100644
--- a/plotting/matrix.R
+++ b/plotting/matrix.R
@@ -26,7 +26,7 @@
 # Allows the creation of a summarizing matrix of trend and break analyses
 
 
-## 1. MATRIX PANEL
+## 1. MATRIX PANEL ___________________________________________________
 # Generates a summarizing matrix of the trend analyses of all station for different hydrological variables and periods. Also shows difference of means between specific periods.
 matrix_panel = function (list_df2plot, df_meta, trend_period, mean_period, slice=NULL, outdirTmp='', outnameTmp='matrix', title=NULL, A3=FALSE,
                          foot_note=FALSE,
diff --git a/processing/analyse.R b/processing/analyse.R
index 267b891d953ac72eb36d511ca33d7028a95623ca..b39e8b5e7eb8a8852897eca41f68aa76a87ed0ab 100644
--- a/processing/analyse.R
+++ b/processing/analyse.R
@@ -31,7 +31,7 @@
 
 # Usefull library
 library(dplyr)
-library(zoo)
+library(zoo)                 # rollmean
 library(StatsAnalysisTrend)
 library(lubridate)
 library(trend)
@@ -40,8 +40,8 @@ library(trend)
 source('processing/format.R', encoding='latin1')
 
 
-## 1. TREND ANALYSIS
-### 1.0. Intercept of trend
+## 1. TREND ANALYSIS _________________________________________________
+### 1.0. Intercept of trend __________________________________________
 # Compute intercept values of linear trends with first order values
 # of trends and the data on which analysis is performed.
 get_intercept = function (df_Xtrend, df_Xlist, unit2day=365.25) {
@@ -94,15 +94,17 @@ get_intercept = function (df_Xtrend, df_Xlist, unit2day=365.25) {
     return (df_Xtrend)
 }
 
-### 1.1. QA
+### 1.1. QA __________________________________________________________
 # Realise the trend analysis of the average annual flow (QA)
 # hydrological variable
-get_QAtrend = function (df_data, df_meta, period, alpha, yearLac_day) {
+get_QAtrend = function (df_data, df_meta, period, alpha, yearLac_day, df_mod=tibble()) {
 
     # Removes incomplete data from time series
-    df_data = missing_data(df_data, df_meta,
-                           yearLac_day=yearLac_day,
-                           yearStart='01-01')
+    res = missing_data(df_data, df_meta,
+                       yearLac_day=yearLac_day,
+                       df_mod=df_mod)
+    df_data = res$data
+    df_mod = res$mod
     
     # Make sure to convert the period to a list
     period = as.list(period)
@@ -145,20 +147,29 @@ get_QAtrend = function (df_data, df_meta, period, alpha, yearLac_day) {
     } 
     # Clean results of trend analyse
     res_QAtrend = clean(df_QAtrendB, df_QAExB, df_QAlistB)
-    return (res_QAtrend)
+
+    res = list(data=df_data, mod=df_mod, analyse=res_QAtrend)
+    return (res)
 }
 
-### 1.2. QMNA
+### 1.2. QMNA ________________________________________________________
 # Realise the trend analysis of the monthly minimum flow in the
 # year (QMNA) hydrological variable
-get_QMNAtrend = function (df_data, df_meta, period, alpha, sampleSpan, yearLac_day) {
+get_QMNAtrend = function (df_data, df_meta, period, alpha, sampleSpan, yearLac_day, df_mod=tibble()) {
 
     # Removes incomplete data from time series
-    df_data = missing_data(df_data, df_meta,
-                           yearLac_day=yearLac_day, yearStart='01-01')
+    res = missing_data(df_data, df_meta,
+                       yearLac_day=yearLac_day,
+                       df_mod=df_mod)
+    df_data = res$data
+    df_mod = res$mod
+    
     # Samples the data
-    df_data = sampling_data(df_data, df_meta,
-                            sampleSpan=sampleSpan)
+    res = sampling_data(df_data, df_meta,
+                        sampleSpan=sampleSpan,
+                        df_mod=df_mod)
+    df_data = res$data
+    df_mod = res$mod
     
     # Make sure to convert the period to a list
     period = as.list(period)
@@ -214,27 +225,15 @@ get_QMNAtrend = function (df_data, df_meta, period, alpha, sampleSpan, yearLac_d
     }
     # Clean results of trend analyse
     res_QMNAtrend = clean(df_QMNAtrendB, df_QMNAExB, df_QMNAlistB)
-    return (res_QMNAtrend)
+        
+    res = list(data=df_data, mod=df_mod, analyse=res_QMNAtrend)
+    return (res)
 }
 
-### 1.3. VCN10
-# Realises the trend analysis of the minimum 10 day average flow
-# over the year (VCN10) hydrological variable
-get_VCN10trend = function (df_data, df_meta, period, alpha, sampleSpan, yearLac_day) {
-
-    # Removes incomplete data from time series
-    df_data = missing_data(df_data, df_meta,
-                           yearLac_day=yearLac_day, yearStart='01-01')
-
-    # Samples the data
-    df_data = sampling_data(df_data, df_meta,
-                            sampleSpan=sampleSpan)
-    
-    # Get all different stations code
-    Code = levels(factor(df_meta$code))
+### 1.3. VCN10 _______________________________________________________
+rollmean_code = function (df_data, Code, nroll=10, df_mod=NULL) {
     # Blank tibble to store the data averaged
     df_data_roll = tibble()
-
     # For all the code
     for (code in Code) {
         # Get the data associated to the code
@@ -247,7 +246,47 @@ get_VCN10trend = function (df_data, df_meta, period, alpha, sampleSpan, yearLac_
                                    code=code)
         # Store the results
         df_data_roll = bind_rows(df_data_roll, df_data_roll_code)
+
+        if (!is.null(df_mod)) {
+            df_mod = add_mod(df_mod, code,
+                             type='Rolling average',
+                             fun_name='rollmean',
+                             comment='Rolling average of 10 day over all the data')
+        }
     }
+    if (!is.null(df_mod)) {
+        res = list(data=df_data, mod=df_mod)
+        return (res)
+    } else {
+        return (df_data_roll)
+    }
+}
+
+# Realises the trend analysis of the minimum 10 day average flow
+# over the year (VCN10) hydrological variable
+get_VCN10trend = function (df_data, df_meta, period, alpha, sampleSpan, yearLac_day, df_mod=tibble()) {
+    
+    # Get all different stations code
+    Code = levels(factor(df_meta$code))
+
+    # Computes the rolling average by 10 days over the data
+    res = rollmean_code(df_data, Code, 10, df_mod=df_mod)
+    df_data_roll = res$data
+    df_mod = res$mod
+
+    # Removes incomplete data from time series
+    res = missing_data(df_data_roll, df_meta,
+                       yearLac_day=yearLac_day,
+                       df_mod=df_mod)
+    df_data_roll = res$data
+    df_mod = res$mod
+
+    # Samples the data
+    res = sampling_data(df_data_roll, df_meta,
+                        sampleSpan=sampleSpan,
+                        df_mod=df_mod)
+    df_data_roll = res$data
+    df_mod = res$mod
 
     # Make sure to convert the period to a list
     period = as.list(period)
@@ -289,10 +328,13 @@ get_VCN10trend = function (df_data, df_meta, period, alpha, sampleSpan, yearLac_
     }
     # Clean results of trend analyse
     res_VCN10trend = clean(df_VCN10trendB, df_VCN10ExB, df_VCN10listB)
-    return (res_VCN10trend)
+    
+    res = list(data=df_data_roll, mod=df_mod,
+               analyse=res_VCN10trend)
+    return (res)
 }
 
-### 1.4. tDEB date
+### 1.4. tDEB date ___________________________________________________
 which_underfirst = function (L, UpLim, select_longest=TRUE) {
     
     ID = which(L <= UpLim)
@@ -328,47 +370,43 @@ which_underfirst = function (L, UpLim, select_longest=TRUE) {
     return (id)
 }
 
-get_tDEBtrend = function (df_data, df_meta, period, alpha, sampleSpan, yearLac_day, thresold_type='VCN10', select_longest=TRUE) {
+get_tDEBtrend = function (df_data, df_meta, period, alpha, sampleSpan, yearLac_day, thresold_type='VCN10', select_longest=TRUE, df_mod=tibble()) {
 
     # Get all different stations code
     Code = levels(factor(df_meta$code))
     # Gets the number of station
     nCode = length(Code)
     
-    # Blank tibble to store the data averaged
-    df_data_roll = tibble() 
-
-    # For all the code
-    for (code in Code) {
-        # Get the data associated to the code
-        df_data_code = df_data[df_data$code == code,]
-        # Perform the roll mean of the flow over 10 days
-        df_data_roll_code = tibble(Date=df_data_code$Date,
-                                   Value=rollmean(df_data_code$Value, 
-                                                  10,
-                                                  fill=NA),
-                                   code=code)
-        # Store the results
-        df_data_roll = bind_rows(df_data_roll, df_data_roll_code)
-    }
+    # Computes the rolling average by 10 days over the data
+    res = rollmean_code(df_data, Code, 10, df_mod=df_mod)
+    df_data_roll = res$data
+    df_mod = res$mod
 
     # Removes incomplete data from time series
     df_data = missing_data(df_data,
                            df_meta=df_meta,
                            yearLac_day=yearLac_day)
+    
     # Samples the data
     df_data = sampling_data(df_data,
                             df_meta=df_meta,
                             sampleSpan=sampleSpan)
     
     # Removes incomplete data from the averaged time series
-    df_data_roll = missing_data(df_data_roll,
-                                df_meta=df_meta,
-                                yearLac_day=yearLac_day)
+    res = missing_data(df_data_roll,
+                       df_meta=df_meta,
+                       yearLac_day=yearLac_day,
+                       df_mod=df_mod)
+    df_data_roll = res$data
+    df_mod = res$mod
+    
     # Samples the data
-    df_data_roll = sampling_data(df_data_roll,
-                                 df_meta=df_meta,
-                                 sampleSpan=sampleSpan)
+    res = sampling_data(df_data_roll,
+                        df_meta=df_meta,
+                        sampleSpan=sampleSpan,
+                        df_mod=df_mod)
+    df_data_roll = res$data
+    df_mod = res$mod
 
     # Make sure to convert the period to a list
     period = as.list(period)
@@ -475,41 +513,41 @@ get_tDEBtrend = function (df_data, df_meta, period, alpha, sampleSpan, yearLac_d
         df_tDEBtrendB = bind_rows(df_tDEBtrendB, df_tDEBtrend)
     }
     # Clean results of trend analyse
-    res_tDEBtrend = clean(df_tDEBtrendB, df_tDEBExB, df_tDEBlistB)    
-    return (res_tDEBtrend)
+    res_tDEBtrend = clean(df_tDEBtrendB, df_tDEBExB, df_tDEBlistB)
+    
+    res = list(data=df_data_roll, mod=df_mod,
+               analyse=res_tDEBtrend)
+    return (res)
 }
 
-### 1.5. tCEN date
+### 1.5. tCEN date ___________________________________________________
 # Realises the trend analysis of the date of the minimum 10 day
 # average flow over the year (VCN10) hydrological variable
-get_tCENtrend = function (df_data, df_meta, period, alpha, sampleSpan, yearLac_day) {
+get_tCENtrend = function (df_data, df_meta, period, alpha, sampleSpan, yearLac_day, df_mod=tibble()) {
     
     # Get all different stations code
     Code = levels(factor(df_meta$code))
     # Blank tibble to store the data averaged
     df_data_roll = tibble() 
 
-    # For all the code
-    for (code in Code) {
-        # Get the data associated to the code
-        df_data_code = df_data[df_data$code == code,]
-        # Perform the roll mean of the flow over 10 days
-        df_data_roll_code = tibble(Date=df_data_code$Date,
-                                   Value=rollmean(df_data_code$Value, 
-                                                  10,
-                                                  fill=NA),
-                                   code=code)
-        # Store the results
-        df_data_roll = bind_rows(df_data_roll, df_data_roll_code)
-    }
+    # Computes the rolling average by 10 days over the data
+    res = rollmean_code(df_data, Code, 10, df_mod=df_mod)
+    df_data_roll = res$data
+    df_mod = res$mod
 
     # Removes incomplete data from time series
-    df_data_roll = missing_data(df_data_roll, df_meta,
-                                yearLac_day=yearLac_day,
-                                yearStart='01-01')
+    res = missing_data(df_data_roll, df_meta,
+                       yearLac_day=yearLac_day,
+                       df_mod=df_mod)
+    df_data_roll = res$data
+    df_mod = res$mod
+    
     # Samples the data
-    df_data_roll = sampling_data(df_data_roll, df_meta,
-                                 sampleSpan=sampleSpan)
+    res = sampling_data(df_data_roll, df_meta,
+                                 sampleSpan=sampleSpan,
+                                 df_mod=df_mod)
+    df_data_roll = res$data
+    df_mod = res$mod
 
     # Make sure to convert the period to a list
     period = as.list(period)
@@ -554,12 +592,15 @@ get_tCENtrend = function (df_data, df_meta, period, alpha, sampleSpan, yearLac_d
     }
     # Clean results of trend analyse
     res_tCENtrend = clean(df_tCENtrendB, df_tCENExB, df_tCENlistB)
-    return (res_tCENtrend)
+
+    res = list(data=df_data_roll, mod=df_mod,
+               analyse=res_tCENtrend)
+    return (res)
 }
 
 
-## 2. OTHER ANALYSES
-### 2.1. Hydrograph
+## 2. OTHER ANALYSES _________________________________________________
+### 2.1. Hydrograph __________________________________________________
 xref = matrix(
     c(0.099, 0.100, 0.101, 0.099, 0.088, 0.078, 0.072,
       0.064, 0.064, 0.069, 0.076, 0.089,
@@ -689,7 +730,7 @@ get_hydrograph = function (df_data, period=NULL, df_meta=NULL) {
     return (list(QM=df_QM, meta=df_meta))
 }
     
-### 2.2. Break date
+### 2.2. Break date __________________________________________________
 # Compute the break date of the flow data by station 
 get_break = function (df_data, df_meta, alpha=0.05) {
     
@@ -736,7 +777,7 @@ get_break = function (df_data, df_meta, alpha=0.05) {
     return (df_break)
 }
 
-### 2.3. Time gap
+### 2.3. Time gap ____________________________________________________
 # Compute the time gap by station
 get_lacune = function (df_data, df_meta) {
     
@@ -789,4 +830,3 @@ get_lacune = function (df_data, df_meta) {
     df_meta = full_join(df_meta, df_lac)
     return (df_meta)
 }
-
diff --git a/processing/extract.R b/processing/extract.R
index fc99c92dec05b426a412085e019baa553aef222b..ba0ac8e51d8a666976b7b3e53da7bb8c3067834f 100644
--- a/processing/extract.R
+++ b/processing/extract.R
@@ -37,7 +37,7 @@ library(dplyr)
 library(officer)
 
 
-### 1. GENERAL METADATA ON STATION
+### 1. GENERAL METADATA ON STATION ___________________________________
 # Status of the station
 iStatut = c('0'='inconnu', 
             '1'='station avec signification hydrologique', 
@@ -135,8 +135,8 @@ iRegHydro = c('D'='Affluents du Rhin',
               '4'='Réunion')
 
 
-## 2. SELECTION
-### 2.1. Creation of selection
+## 2. SELECTION ______________________________________________________
+### 2.1. Creation of selection _______________________________________
 # Create a txt file that resume all the station data files present
 # in a filedir
 create_selection = function (computer_data_path, filedir, outname, optname='_HYDRO_QJM') {
@@ -180,7 +180,7 @@ create_selection = function (computer_data_path, filedir, outname, optname='_HYD
 #     "France207",
 #     "INRAE_selection.txt")
 
-### 2.2. Agence de l'eau Adour-Garonne selection
+### 2.2. Agence de l'eau Adour-Garonne selection _____________________
 # Gets the selection of station from the 'Liste-station_RRSE.docx' file
 get_selection_AEAG = function (computer_data_path, listdir, listname,
                              cnames=c('code','station', 'BV_km2',
@@ -240,7 +240,7 @@ get_selection_AEAG = function (computer_data_path, listdir, listname,
     # c_num=c('BV_km2',
             # 'longueur_serie'))
 
-### 2.3. INRAE selection
+### 2.3. INRAE selection _____________________________________________
 # Gets the selection of station from the selection txt file generated
 # by the 'create_selection' function
 get_selection_INRAE = function (computer_data_path, listdir, listname) {
@@ -265,7 +265,7 @@ get_selection_INRAE = function (computer_data_path, listdir, listname) {
     # "INRAE_selection.txt")
 
 
-## 3. EXTRACTION
+## 3. EXTRACTION _____________________________________________________
 ### 3.1. Extraction of metadata
 # Extraction of metadata of stations
 extract_meta = function (computer_data_path, filedir, filename,
@@ -410,7 +410,7 @@ extract_meta = function (computer_data_path, filedir, filename,
 #     "BanqueHydro_Export2021",
 #     c('H5920011_HYDRO_QJM.txt', 'K4470010_HYDRO_QJM.txt'))
 
-### 3.2. Extraction of data
+### 3.2. Extraction of data __________________________________________
 # Extraction of data
 extract_data = function (computer_data_path, filedir, filename,
                          verbose=TRUE) {
@@ -505,7 +505,7 @@ extract_data = function (computer_data_path, filedir, filename,
 #     c('H5920011_HYDRO_QJM.txt', 'K4470010_HYDRO_QJM.txt'))
 
 
-## 4. SHAPEFILE MANAGEMENT
+## 4. SHAPEFILE MANAGEMENT ___________________________________________
 # Generates a list of shapefiles to draw a hydrological map of
 # the France
 ini_shapefile = function (resources_path, fr_shpdir, fr_shpname, bs_shpdir, bs_shpname, sbs_shpdir, sbs_shpname, rv_shpdir, rv_shpname, is_river=TRUE) {
diff --git a/processing/format.R b/processing/format.R
index 175109e3532b00ff25ab5eb3c046d535c1cca660..b994dce4e14866b72afb14f1c4b23f905cc1c03b 100644
--- a/processing/format.R
+++ b/processing/format.R
@@ -34,16 +34,16 @@ library(dplyr)
 library(Hmisc)
 
 
-## 1. BEFORE TREND ANALYSE
-### 1.1. Joining selection
+## 1. BEFORE TREND ANALYSE ___________________________________________
+### 1.1. Joining selection ___________________________________________
 # Joins tibbles of different selection of station as a unique one
-join = function (df_data_AEAG, df_data_INRAE, df_meta_AEAG, df_meta_INRAE) {
+join_selection = function (df_data_AEAG, df_data_INRAE, df_meta_AEAG, df_meta_INRAE) {
 
     # If there is an INRAE and an Agence de l'eau Adour-Garonne selection
     if (!is.null(df_data_INRAE) & !is.null(df_data_AEAG)) {
 
         # Gets the station in common
-        common = levels(factor(df_meta_INRAE[df_meta_INRAE$code %in% df_meta_AEAG$code,]$code)) 
+        common = levels(factor(df_meta_INRAE[df_meta_INRAE$code %in% df_meta_AEAG$code,]$code))
         # Gets the Nv station to add
         INRAEadd = levels(factor(df_meta_INRAE[!(df_meta_INRAE$code %in% df_meta_AEAG$code),]$code))
 
@@ -81,8 +81,8 @@ join = function (df_data_AEAG, df_data_INRAE, df_meta_AEAG, df_meta_INRAE) {
     return (list(data=df_data, meta=df_meta))
 }
 
-### 1.2. Manages missing data
-missing_data = function (df_data, df_meta, yearLac_day=3, yearStart='01-01', Code=NULL) {
+### 1.2. Manages missing data ________________________________________
+missing_data = function (df_data, df_meta, yearLac_day=3, yearStart='01-01', Code=NULL, df_mod=NULL) {
 
     if (is.null(Code)) {
         # Get all different stations code
@@ -133,6 +133,14 @@ missing_data = function (df_data, df_meta, yearLac_day=3, yearStart='01-01', Cod
             if (nbNA > yearLac_day) {
                 df_data_code_year$Value = NA
                 df_data_code[OkYear,] = df_data_code_year
+
+                if (!is.null(df_mod)) {
+                    df_mod = add_mod(df_mod, code,
+                                     type='Missing data management',
+                                     fun_name='NA assignment',
+                                     comment=paste('From ', Start,
+                                                   ' to ', End, sep=''))
+                }
                 
             } else if (nbNA <= yearLac_day & nbNA > 1) {
                 DateJ = as.numeric(df_data_code_year$Date)
@@ -144,16 +152,29 @@ missing_data = function (df_data, df_meta, yearLac_day=3, yearStart='01-01', Cod
                                      method="linear",
                                      na.rm=TRUE)$y                
                 df_data_code$Value[OkYear] = Value
+
+                if (!is.null(df_mod)) {
+                    df_mod = add_mod(df_mod, code,
+                                     type='Missing data management',
+                                     fun_name='approxExtrap',
+                                     comment=paste(
+                                         'Linear extrapolation of NA from ',
+                                         Start, ' to ', End, sep=''))
+                }
             }
         }
         df_data[df_data$code == code,] = df_data_code        
     }
-    
-    return (df_data)
+    if (!is.null(df_mod)) {
+        res = list(data=df_data, mod=df_mod)
+        return (res)
+    } else {
+        return (df_data)
+    }
 }
 
-### 1.3. Sampling of the data
-sampling_data = function (df_data, df_meta, sampleSpan=c('05-01', '11-30'), Code=NULL) {
+### 1.3. Sampling of the data ________________________________________
+sampling_data = function (df_data, df_meta, sampleSpan=c('05-01', '11-30'), Code=NULL, df_mod=NULL) {
 
     if (is.null(Code)) {
         # Get all different stations code
@@ -176,18 +197,33 @@ sampling_data = function (df_data, df_meta, sampleSpan=c('05-01', '11-30'), Code
         
         df_data_code$Value[Date < sampleStart | Date > sampleEnd] = NA
 
+
+        if (!is.null(df_mod)) {
+            df_mod = add_mod(df_mod, code,
+                             type='Seasonal sampling ',
+                             fun_name='NA assignment',
+                             comment=paste('Before ', sampleStart,
+                                           ' and after ', sampleEnd,
+                                           sep=''))
+        }
+        
         # Leap year verification
         # print(df_data_code[df_data_code$Date > as.Date("1992-02-25"),])
 
         df_data[df_data$code == code,] = df_data_code
     }
-    
-    return (df_data)
+
+    if (!is.null(df_mod)) {
+        res = list(data=df_data, mod=df_mod)
+        return (res)
+    } else {
+        return (df_data)
+    }
 }
 
 
-## 2. DURING TREND ANALYSE
-### 2.1. Preparation
+## 2. DURING TREND ANALYSE ___________________________________________
+### 2.1. Preparation _________________________________________________
 # Prepares the data in order to have a list of a data tibble with
 # date, group and flow column and a info tibble with the station code
 # and group column to fit the entry of the 'extract.Var' function in
@@ -218,7 +254,7 @@ prepare = function(df_data, colnamegroup=NULL) {
     return (res)
 }
 
-### 2.2. Re-preparation
+### 2.2. Re-preparation ______________________________________________
 # Re-prepares the data in outing of the 'extract.Var' function in
 # the 'StatsAnalysisTrend' package in order to fit again to the
 # entry of the same function
@@ -259,7 +295,7 @@ reprepare = function(df_XEx, df_Xlist, colnamegroup=NULL) {
     return (df_XlistEx)
 }
 
-### 2.3. Prepare date 
+### 2.3. Prepare date ________________________________________________
 prepare_date = function(df_XEx, df_Xlist, per.start="01-01") {
 
     df_dateStart = summarise(group_by(df_Xlist$data, group),
@@ -325,8 +361,8 @@ prepare_date = function(df_XEx, df_Xlist, per.start="01-01") {
 }
 
 
-## 3. AFTER TREND ANALYSE
-### 3.1. Period of trend
+## 3. AFTER TREND ANALYSE ____________________________________________
+### 3.1. Period of trend _____________________________________________
 # Compute the start and the end of the period for a trend analysis
 # according to the accessible data 
 get_period = function (per, df_Xtrend, df_XEx, df_Xlist) {
@@ -374,7 +410,7 @@ get_period = function (per, df_Xtrend, df_XEx, df_Xlist) {
     return (df_Xtrend)
 }
 
-### 3.2. Cleaning
+### 3.2. Cleaning ____________________________________________________
 # Cleans the trend results of the function 'Estimate.stats' in the
 # 'StatsAnalysisTrend' package. It adds the station code and the
 # intercept of the trend to the trend results. Also makes the data
@@ -408,3 +444,20 @@ clean = function (df_Xtrend, df_XEx, df_Xlist) {
     return (res)
 }
 
+## 4. OTHER __________________________________________________________
+add_mod = function (df_mod, Code, type, fun_name, comment, df_meta=NULL) {
+    if (Code == 'all' & is.null(df_meta)) {
+        Code = NA # erreur
+    } else if (Code == 'all' & !is.null(df_meta)) {
+        # Get all different stations code
+        Code = levels(factor(df_meta$code))
+    }
+    
+    for (code in Code) {
+        df_modtmp = tibble(code=code, type=type,
+                           fun_name=fun_name,
+                           comment=comment)
+        df_mod = bind_rows(df_mod, df_modtmp)
+    }
+    return (df_mod)
+}
diff --git a/processing/results_manager.R b/processing/results_manager.R
index fdfe9977ea76fa14e0426a0bdc64524cdd4a2710..d564cd58e4c5cfd28f74a95fd44e2efbe16e849f 100644
--- a/processing/results_manager.R
+++ b/processing/results_manager.R
@@ -25,7 +25,8 @@
 #
 # Manages the writing and reading of results data of the trend analysis.
 
-
+## 1. WRITING ________________________________________________________
+### 1.1. List of dataframe ___________________________________________
 write_listofdf = function (Ldf, resdir, filedir, optdir='') {
     
     outdir = file.path(resdir, optdir, filedir)
@@ -33,8 +34,8 @@ write_listofdf = function (Ldf, resdir, filedir, optdir='') {
         dir.create(outdir, recursive=TRUE)
     }
 
-    print(outdir)
-
+    print(paste('Writing of list of dataframe in : ', outdir, sep=''))
+    
     Lname = names(Ldf)
     Nname = length(Lname)
     for (i in 1:Nname) {
@@ -48,12 +49,50 @@ write_listofdf = function (Ldf, resdir, filedir, optdir='') {
     }
 }
 
+### 2.2. Dataframe of modified data __________________________________
+write_dfdata = function (df_data, df_mod, resdir, filedir, optdir='') {
+
+    Code = rle(sort(df_mod$code))$values
+    print(Code)
+    
+    outdir = file.path(resdir, optdir, filedir)
+    if (!(file.exists(outdir))) {
+        dir.create(outdir, recursive=TRUE)
+    }
+
+    print(paste('Writing of modified data in : ', outdir, sep=''))
+
+    for (code in Code) {
+        df_data_code = df_data[df_data$code == code,]
+        df_mod_code = df_mod[df_mod$code == code,]
+    
+        outfile1 = paste(code, '.txt', sep='')
+        write.table(df_data_code,
+                    file=file.path(outdir, outfile1),
+                    sep=";",
+                    quote=TRUE,
+                    row.names=FALSE)
+        
+        outfile2 = paste(code, '_modification', '.txt', sep='')
+        write.table(df_mod_code,
+                    file=file.path(outdir, outfile2),
+                    sep=";",
+                    quote=TRUE,
+                    row.names=FALSE)
+    }
+}
+    
+
+## 2. READING ________________________________________________________
+### 2.1. List of dataframe ___________________________________________
 read_listofdf = function (resdir, filedir) {
 
     outdir = file.path(resdir, filedir)
     files = list.files(outdir)
     Nfile = length(files)
 
+    print(paste('Reading of list of dataframe in : ', outdir, sep=''))
+
     Ldf = list()
     for (i in 1:Nfile) {
         name = splitext(files[i])$name
@@ -82,6 +121,7 @@ read_listofdf = function (resdir, filedir) {
     return (Ldf)
 }
 
+## 3. OTHER __________________________________________________________
 splitext = function(file) { 
     ex = strsplit(basename(file), split="\\.")[[1]]
     res = list(name=ex[1], extension=ex[2])
diff --git a/script.R b/script.R
index a4a4e1f28653fe1416e65c68be7f61b45b51cf2f..3fe43893c024c8b17a2e4e4e6bfed46f7fd87050 100644
--- a/script.R
+++ b/script.R
@@ -124,7 +124,7 @@ is_river = FALSE
 ############### END OF REGION TO MODIFY (without risk) ###############
 
 
-## 1. FILE STRUCTURE
+## 1. FILE STRUCTURE _________________________________________________
 # Set working directory
 setwd(computer_work_path)
 
@@ -179,15 +179,15 @@ rv_shpdir = 'map/river'
 rv_shpname = 'CoursEau_FXX.shp'
 
 
-## 2. SELECTION OF STATION
+## 2. SELECTION OF STATION ___________________________________________
 # Initialization of null data frame if there is no data selected
 df_data_AEAG = NULL
 df_data_INRAE = NULL
 df_meta_AEAG = NULL
 df_meta_INRAE = NULL
 
-### 2.1. Selection of the Agence de l'eau Adour-Garonne 
-if (AEAGlistname != ""){
+### 2.1. Selection of the Agence de l'eau Adour-Garonne ______________
+if (AEAGlistname != "") {
     
     # Get only the selected station from a list station file
     df_selec_AEAG = get_selection_AEAG(computer_data_path, 
@@ -211,7 +211,7 @@ if (AEAGlistname != ""){
     df_data_AEAG = extract_data(computer_data_path, filedir, filename)
 }
 
-### 2.2. INRAE selection 
+### 2.2. INRAE selection _____________________________________________
 if (INRAElistname != ""){
     
     # Get only the selected station from a list station file
@@ -227,7 +227,7 @@ if (INRAElistname != ""){
     df_data_INRAE = extract_data(computer_data_path, filedir, filename)
 } 
 
-### 2.3. Manual selection 
+### 2.3. Manual selection ____________________________________________
 if (AEAGlistname == "" & INRAElistname == "") {
     # Extract metadata about selected stations
     df_meta_AEAG = extract_meta(computer_data_path, filedir, filename)
@@ -235,57 +235,80 @@ if (AEAGlistname == "" & INRAElistname == "") {
     df_data_AEAG = extract_data(computer_data_path, filedir, filename)
 }
 
-### 2.4. Data join
-df_join = join(df_data_AEAG, df_data_INRAE, df_meta_AEAG, df_meta_INRAE)
+### 2.4. Data join ___________________________________________________
+df_join = join_selection(df_data_AEAG, df_data_INRAE,
+                         df_meta_AEAG, df_meta_INRAE)
 df_data = df_join$data
 df_meta = df_join$meta
 
 
-## 3. ANALYSE
-### 3.1. Compute other parameters for stations
+## 3. ANALYSE ________________________________________________________
+var = list(
+    'QA',
+    'QMNA',
+    'VCN10',
+    'tDEB',
+    'tCEN'
+)
+### 3.1. Compute other parameters for stations _______________________
 # Time gap
 df_meta = get_lacune(df_data, df_meta)
 # Hydrograph
 df_meta = get_hydrograph(df_data, df_meta, period=mean_period[[1]])$meta
 
-### 3.2. Trend analysis
+### 3.2. Trend analysis ______________________________________________
 # QA trend
-res_QAtrend = get_QAtrend(df_data, df_meta,
-                          period=trend_period,
-                          alpha=alpha,
-                          yearLac_day=yearLac_day)
+res = get_QAtrend(df_data, df_meta,
+                  period=trend_period,
+                  alpha=alpha,
+                  yearLac_day=yearLac_day)
+df_QAdata = res$data
+df_QAmod = res$mod
+res_QAtrend = res$analyse
 
 # QMNA tend
-res_QMNAtrend = get_QMNAtrend(df_data, df_meta,
-                              period=trend_period,
-                              alpha=alpha,
-                              sampleSpan=sampleSpan,
-                              yearLac_day=yearLac_day)
+res = get_QMNAtrend(df_data, df_meta,
+                    period=trend_period,
+                    alpha=alpha,
+                    sampleSpan=sampleSpan,
+                    yearLac_day=yearLac_day)
+df_QMNAdata = res$data
+df_QMNAmod = res$mod
+res_QMNAtrend = res$analyse
 
 # VCN10 trend
-res_VCN10trend = get_VCN10trend(df_data, df_meta,
-                                period=trend_period,
-                                alpha=alpha,
-                                sampleSpan=sampleSpan,
-                                yearLac_day=yearLac_day)
+res = get_VCN10trend(df_data, df_meta,
+                     period=trend_period,
+                     alpha=alpha,
+                     sampleSpan=sampleSpan,
+                     yearLac_day=yearLac_day)
+df_VCN10data = res$data
+df_VCN10mod = res$mod
+res_VCN10trend = res$analyse
 
 # Start date for low water trend
-res_tDEBtrend = get_tDEBtrend(df_data, df_meta, 
-                            period=trend_period,
-                            alpha=alpha,
-                            sampleSpan=sampleSpan,
-                            thresold_type='VCN10',
-                            select_longest=TRUE,
-                            yearLac_day=yearLac_day)
+res = get_tDEBtrend(df_data, df_meta, 
+                    period=trend_period,
+                    alpha=alpha,
+                    sampleSpan=sampleSpan,
+                    thresold_type='VCN10',
+                    select_longest=TRUE,
+                    yearLac_day=yearLac_day)
+df_tDEBdata = res$data
+df_tDEBmod = res$mod
+res_tDEBtrend = res$analyse
 
 # Center date for low water trend
-res_tCENtrend = get_tCENtrend(df_data, df_meta, 
-                            period=trend_period,
-                            alpha=alpha,
-                            sampleSpan=sampleSpan,
-                            yearLac_day=yearLac_day)
-
-### 3.3. Break analysis
+res = get_tCENtrend(df_data, df_meta, 
+                    period=trend_period,
+                    alpha=alpha,
+                    sampleSpan=sampleSpan,
+                    yearLac_day=yearLac_day)
+df_tCENdata = res$data
+df_tCENmod = res$mod
+res_tCENtrend = res$analyse
+
+### 3.3. Break analysis ______________________________________________
 # df_break = get_break(res_QAtrend$data, df_meta)
 # df_break = get_break(res_QMNAtrend$data, df_meta)
 # df_break = get_break(res_VCN10trend$data, df_meta)
@@ -297,23 +320,22 @@ res_tCENtrend = get_tCENtrend(df_data, df_meta,
 #           figdir=figdir)
 
 
-
-## 4. SAVING
-### 4.1. Modified data saving
-
-
-### 4.2. Trend analysis saving
-trenddir = 'trend_analyse'
-write_listofdf(res_QAtrend, resdir, optdir=trenddir, filedir='QA')
-write_listofdf(res_QMNAtrend, resdir, optdir=trenddir, filedir='QMNA')
-write_listofdf(res_VCN10trend, resdir, optdir=trenddir, filedir='VCN10')
-write_listofdf(res_tDEBtrend, resdir, optdir=trenddir, filedir='tDEB')
-write_listofdf(res_tCENtrend, resdir, optdir=trenddir, filedir='tCEN')
-
+## 4. SAVING _________________________________________________________
+for (v in var) {
+    df_datatmp = get(paste('df_', v, 'data', sep=''))
+    df_modtmp = get(paste('df_', v, 'mod', sep=''))
+    res_trendtmp = get(paste('res_', v, 'trend', sep=''))
+    # Modified data saving
+    write_dfdata(df_datatmp, df_modtmp, resdir, optdir='modified_data',
+                 filedir=v)
+    # Trend analysis saving
+    write_listofdf(res_trendtmp, resdir, optdir='trend_analyse',
+                   filedir=v)
+}
 # res_tDEBtrend = read_listofdf(resdir, 'res_tDEBtrend')
 
 
-## 5. PLOTTING
+## 5. PLOTTING _______________________________________________________
 # Shapefile importation in order to it only once time
 df_shapefile = ini_shapefile(resources_path,
                              fr_shpdir, fr_shpname,
@@ -321,7 +343,7 @@ df_shapefile = ini_shapefile(resources_path,
                              sbs_shpdir, sbs_shpname,
                              rv_shpdir, rv_shpname, is_river=is_river)
 
-### 5.1. Simple time panel to criticize station data
+### 5.1. Simple time panel to criticize station data _________________
 # Plot time panel of debit by stations
 # datasheet_layout(list(df_data, df_data),
                  # layout_matrix=c(1, 2),
@@ -334,7 +356,7 @@ df_shapefile = ini_shapefile(resources_path,
                  # figdir=figdir,
                  # filename_opt='time')
 
-### 5.2. Analysis layout 
+### 5.2. Analysis layout _____________________________________________
 datasheet_layout(toplot=c(
                      'datasheet'
                      # 'matrix'
@@ -358,13 +380,7 @@ datasheet_layout(toplot=c(
                      res_tCENtrend$trend
                  ),
                  
-                 var=list(
-                     'QA',
-                     'QMNA',
-                     'VCN10',
-                     'tDEB',
-                     'tCEN'
-                 ),
+                 var=var,
                  
                  type=list(
                      'sévérité',