diff --git a/HydrologicalSignatures.py b/HydrologicalSignatures.py
index a6a3f46c7e38312ae38d01a08e7d3783f03d0baa..570d9c2f7c10b4f37262eb56404ea715d60fcb1b 100644
--- a/HydrologicalSignatures.py
+++ b/HydrologicalSignatures.py
@@ -308,8 +308,9 @@ class HydrologicalSignatures(object):
         d = np.abs(np.diff(q))
         prominence = 5 * np.min(d[d > 0])
 
-        [idmax, pmax, idmin, pmin] = self.IDRecession(q, t, prominence, min_recession_time, t_overland)
+        idmax, pmax, idmin, pmin = self.IDRecession(q, t, prominence, min_recession_time, t_overland)
 
+        aH, bH, aL, bL, ts, d_all, d_L = self.SRanalysis(q, idmin, idmax)
 
         # import matlab.engine
         # eng = matlab.engine.start_matlab()
@@ -329,9 +330,9 @@ class HydrologicalSignatures(object):
         # aL = np.asarray(aL)
         # bL = np.asarray(bL)
         # ts = np.asarray(ts)
-        aL = np.nan
-        bL = np.nan
-        ts = np.nan
+        # aL = np.nan
+        # bL = np.nan
+        # ts = np.nan
         self.tau_roques = np.nanmedian(ts)
         self.a_q = np.nanmedian(aL)
         self.b_q = np.nanmedian(bL)
@@ -460,8 +461,105 @@ class HydrologicalSignatures(object):
         L1 = 0.5
         L2 = 0.05
 
+        for zz in np.arange(0,len(idmax)):
+            time_event = np.arange(1,(idmin[zz] - idmax[zz])+1)
+            d_all[zz] = len(time_event)
+            Qevent = q[int(idmax[zz]):int(idmin[zz])]
+
+            # Fit exponential function on the data
+            #     [xDataexp, yDataexp] = prepareCurveData(time_event', Qevent./max(Qevent));
+            xDataexp = time_event
+            yDataexp = Qevent/max(Qevent)
+            # Set up fittype and options.
+            from scipy.optimize import curve_fit
+            f = lambda x, a, b, c : a*np.exp(-b*x)+c
+            popt, pcov = curve_fit(f, xDataexp, yDataexp, p0=[0.1, 0.1, 0.1], method='lm', )
+
+            if popt[0] and popt[1] > 0:
+                step_max = 0.25*len(time_event)
+                cc = np.ceil(step_max * np.exp(-1/(popt[1]*time_event)))+1
+                Lderiv = int(len(time_event)-cc[-1])
+                dQ_dt = np.zeros((Lderiv,))
+                Q_deriv = np.zeros((Lderiv,))
+                Rsq = np.zeros((Lderiv,))
+                t_deriv = np.zeros((Lderiv,))
+                from sklearn.linear_model import LinearRegression
+
+                for ee in np.arange(0,int(len(time_event)-cc[-1])):
+                    X = time_event[ee:int(ee+cc[ee])].reshape((-1, 1))
+                    Y = Qevent[ee:int(ee+cc[ee])].reshape((-1, 1))
+                    model = LinearRegression(fit_intercept=True)
+                    model.fit(X, Y)
+                    dQdt = np.array([model.coef_,model.intercept_])
+                    Rsq[ee] = np.max([0,model.score(X,Y)])
+                    if dQdt[1] > 0:
+                        dQdt[1] = np.nan
+                    elif Rsq < limrsq:
+                        dQdt[1] = np.nan
+                    elif np.log10(dQdt[1]) < -8:
+                        dQdt[1] = np.nan
+
+                    dQ_dt[ee] = -1*dQdt[1]
+                    Q_deriv[ee]= np.nanmean(Y)
+                    t_deriv[ee] = np.nanmean(X)
+            else:
+                dQ_dt = -999
+                Q_deriv = -999
+                Rsq = -999
+
+            # Fit the power law for a and b linear fit: log(y) = p(1) * log(x) + p(2)
+            print(dQ_dt)
+            nonan_bool = (~np.isnan(Q_deriv)) | (~np.isnan(dQ_dt))
+            Q_deriv = Q_deriv[nonan_bool]
+            dQ_dt = dQ_dt[nonan_bool]
+            Rsq = Rsq[nonan_bool]
+            # fit early-time flow
+            H = (Q_deriv < np.quantile(q,H1)) & (Q_deriv > np.quantile(q,H2))
+
+            if(np.sum(H) >= lr):
+                f2 = lambda x, a, b: a * x + b
+                popt, pcov = curve_fit(f2, np.log(Q_deriv[H]), np.log(dQ_dt[H]), sigma=Rsq[H])
+                bH[zz] = popt[0]
+                aH[zz] = np.exp(popt[1])
+                # need to return goodness of fit ??
+                if bH[zz] < 0:
+                    bH[zz] = np.nan
+                    aH[zz] = np.nan
+            else:
+                bH[zz] = np.nan
+                aH[zz] = np.nan
+
+            #  fit late-time flow
+            L = (Q_deriv < np.quantile(q,L1)) & (Q_deriv > np.quantile(q,L2))
+            if len(L)>=lr:
+                f2 = lambda x, a, b: a * x + b
+                pL, pcov = curve_fit(f2, np.log(Q_deriv[L]), np.log(dQ_dt[L]), sigma=Rsq[L])
+
+                bL[zz] = pL[0]
+                aL[zz] = np.exp(pL[1])
+                if bL[zz] < 0:
+                    bL[zz] = np.nan
+                    aL[zz] = np.nan
+            else:
+                bL[zz] = np.nan
+                aL[zz] = np.nan
+
+            # duration of recession in late-time
+            d_L[zz] = len(L)
+            if len(L) < lr:
+                d_L[zz] = np.nan
+
+            # Fit slope of b=1 for characteristic time scale
+            if len(L) >= lr:
+                f3 = lambda x : x + a
+                popt, pcov = curve_fit(f3, np.log(Q_deriv[L]), np.log(dQ_dt[L]), p0=0.1)
+                ts[zz] = 1/(np.exp(popt));
+                if bL[zz]<0: # ?? weird isn't it tz[zz]
+                    ts[zz] = np.nan
+            else:
+                ts[zz] = np.nan
 
-        return aH, bH, aL, bL, ts, d_all, d_L, gofH, gofL, gofT
+        return aH, bH, aL, bL, ts, d_all, d_L
 
     def compute_fdc_slope(self, df_obs, column_name='Q', portion_to_compute=(0.33333, 0.66666)):
         fdc_quantile = self.compute_fdc_quantiles(df_obs, column_name, tuple([100*x for x in portion_to_compute]))