diff --git a/Classes/DepthData.py b/Classes/DepthData.py
index 946908873307ac7467ed20d1f11eff5c0355daa0..b0e39aee4c9c38a803c39d2475ab6c6405cac76a 100644
--- a/Classes/DepthData.py
+++ b/Classes/DepthData.py
@@ -61,7 +61,7 @@ class DepthData(object):
         valid_beams: np.array
             Logical array, 1 row for each beam identifying valid data.
     """
-    
+
     def __init__(self):
         """Initialize attributes.
         """
@@ -88,7 +88,7 @@ class DepthData(object):
         self.valid_data_method = None  # QRev or TRDI
         self.valid_data = None  # Logical array of valid mean depth for each ensemble
         self.valid_beams = None  # Logical array, 1 row for each beam identifying valid data
-        
+
     def populate_data(self, depth_in, source_in, freq_in, draft_in, cell_depth_in, cell_size_in):
         """Stores data in DepthData.
 
@@ -120,7 +120,7 @@ class DepthData(object):
         self.filter_type = 'None'
         self.interp_type = 'None'
         self.valid_data_method = 'QRev'
-        
+
         # For BT data set method to average multiple beam depths
         if source_in == 'BT':
             self.avg_method = 'IDW'
@@ -223,19 +223,19 @@ class DepthData(object):
 
     def change_draft(self, draft):
         """Changes the draft for object
-        
+
         draft: new draft for object
         """
         # Compute draft change
         draft_change = draft - self.draft_use_m
         self.draft_use_m = draft
-        
+
         # Apply draft to ensemble depths if BT or VB
         if self.depth_source != 'DS':
             self.depth_beams_m = self.depth_beams_m + draft_change
-            self.depth_processed_m = self.depth_processed_m + draft_change 
-            
-        # Apply draft to depth cell locations
+            self.depth_processed_m = self.depth_processed_m + draft_change
+
+            # Apply draft to depth cell locations
         if len(self.depth_cell_depth_m) > 0:
             self.depth_cell_depth_m = self.depth_cell_depth_m + draft_change
 
@@ -249,7 +249,7 @@ class DepthData(object):
         bt_depths: DepthData
             Object of DepthData with bottom track depths
         """
-        
+
         self.depth_cell_depth_orig_m = bt_depths.depth_cell_depth_orig_m
         self.depth_cell_size_m = bt_depths.depth_cell_size_m
         self.depth_cell_depth_m = bt_depths.depth_cell_depth_m
@@ -301,7 +301,7 @@ class DepthData(object):
             # TRDI filter for multiple returns
             self.filter_trdi()
             self.filter_type = 'TRDI'
-            
+
         self.valid_mean_data()
 
         # Update processed depth with filtered results
@@ -312,7 +312,7 @@ class DepthData(object):
             # Single beam (VB or DS) save to 1-D array
             self.depth_processed_m = np.array(self.depth_beams_m[0, :])
             self.depth_processed_m[np.squeeze(np.equal(self.valid_data, 0))] = np.nan
-            
+
     def apply_interpolation(self, transect, method=None):
         """Coordinates application of interpolations
 
@@ -323,11 +323,10 @@ class DepthData(object):
         method: str
             Type of interpolation to apply (None, HoldLast, Smooth, Linear)
         """
-        
         # Determine interpolation to apply
         if method is None:
             method = self.interp_type
-            
+
         # Apply selected interpolation
         self.interp_type = method
         # No filtering
@@ -345,7 +344,7 @@ class DepthData(object):
         # Linear interpolation
         else:
             self.interpolate_linear(transect=transect)
-            
+
         # Identify ensembles with interpolated depths
         idx = np.where(np.logical_not(self.valid_data[:]))
         if len(idx[0]) > 0:
@@ -354,7 +353,7 @@ class DepthData(object):
             if len(idx2) > 0:
                 idx2 = idx2[0]
                 self.depth_source_ens[idx[idx2]] = 'IN'
-        
+
     def apply_composite(self, comp_depth, comp_source):
         """Applies the data from CompDepth computed in DepthStructure
         to DepthData object
@@ -366,17 +365,17 @@ class DepthData(object):
         comp_source: str
             Source of composite depth (BT, VB, DS)
         """
-        
+
         # Assign composite depth to property
         self.depth_processed_m = comp_depth
-        
+
         # Assign appropriate composite source for each ensemble
         self.depth_source_ens[comp_source == 1] = 'BT'
         self.depth_source_ens[comp_source == 2] = 'VB'
         self.depth_source_ens[comp_source == 3] = 'DS'
         self.depth_source_ens[comp_source == 4] = 'IN'
         self.depth_source_ens[comp_source == 0] = 'NA'
-        
+
     def sos_correction(self, ratio):
         """Correct depth for new speed of sound setting
 
@@ -385,48 +384,48 @@ class DepthData(object):
         ratio: float
             Ratio of new to old speed of sound value
         """
-        
+
         # Correct unprocessed depths
-        self.depth_beams_m = self.draft_use_m+np.multiply(self.depth_beams_m-self.draft_use_m, ratio)
-        
+        self.depth_beams_m = self.draft_use_m + np.multiply(self.depth_beams_m - self.draft_use_m, ratio)
+
         # Correct processed depths
-        self.depth_processed_m = self.draft_use_m+np.multiply(self.depth_processed_m-self.draft_use_m, ratio)
-        
+        self.depth_processed_m = self.draft_use_m + np.multiply(self.depth_processed_m - self.draft_use_m, ratio)
+
         # Correct cell size and location
         self.depth_cell_size_m = np.multiply(self.depth_cell_size_m, ratio)
         self.depth_cell_depth_m = self.draft_use_m + np.multiply(self.depth_cell_depth_m - self.draft_use_m, ratio)
-        
+
     def valid_mean_data(self):
         """Determines if raw data are sufficient to compute a valid depth without interpolation.
         """
-        
+
         if self.depth_source == 'BT':
             self.valid_data = np.tile(True, self.valid_beams.shape[1])
             nvalid = np.sum(self.valid_beams, axis=0)
-            
+
             if self.valid_data_method == 'TRDI':
                 self.valid_data[nvalid < 3] = False
             else:
                 self.valid_data[nvalid < 2] = False
         else:
             self.valid_data = self.valid_beams[0, :]
-            
+
     def filter_none(self):
         """Applies no filter to depth data. Removes filter if one was applied.
         """
-        
+
         # Set all ensembles to have valid data
         if len(self.depth_beams_m.shape) > 1:
             self.valid_beams = np.tile(True, self.depth_beams_m.shape)
         else:
             self.valid_beams = np.tile(True, (1, self.depth_beams_m.shape[0]))
-        
+
         # Set ensembles with no depth data to invalid
         self.valid_beams[self.depth_beams_m == 0] = False
         self.valid_beams[np.isnan(self.depth_beams_m)] = False
-        
+
         self.filter_type = 'None'
-        
+
     def filter_smooth(self, transect):
         """This filter uses a moving InterQuartile Range filter on residuals from a
         robust Loess smooth of the depths in each beam to identify unnatural spikes in the depth
@@ -444,18 +443,18 @@ class DepthData(object):
             This is the raw number of points actual points used may be less if some are bad.
 
         multiplier - number multiplied times the IQR to determine the filter criteria
-        
+
         """
 
         # If the smoothed depth has not been computed
         if self.smooth_depth is None or len(self.smooth_depth) == 0:
-            
+
             # Set filter characteristics
             self.filter_type = 'Smooth'
             # cycles = 3
             # half_width = 10
             # multiplier = 15
-            
+
             # Determine number of beams
             if len(self.depth_orig_m.shape) > 1:
                 n_beams, n_ensembles = self.depth_orig_m.shape[0], self.depth_orig_m.shape[1]
@@ -487,7 +486,7 @@ class DepthData(object):
 
             idx = np.where(np.isnan(track_x))
             if len(idx[0]) < 2:
-                x = np.nancumsum(np.sqrt(track_x**2+track_y**2))
+                x = np.nancumsum(np.sqrt(track_x ** 2 + track_y ** 2))
             else:
                 x = np.nancumsum(transect.date_time.ens_duration_sec)
 
@@ -515,10 +514,10 @@ class DepthData(object):
 
         # Reset valid data
         self.filter_none()
-        
+
         # Set filter type
         self.filter_type = 'Smooth'
-        
+
         # Determine number of beams
         if len(self.depth_orig_m.shape) > 1:
             n_beams, n_ensembles = self.depth_orig_m.shape[0], self.depth_orig_m.shape[1]
@@ -533,7 +532,7 @@ class DepthData(object):
         # Set bad depths to nan
         depth = repmat(np.nan, depth_raw.shape[0], depth_raw.shape[1])
         depth[nan_greater(depth_raw, 0)] = depth_raw[nan_greater(depth_raw, 0)]
-        
+
         # Apply filter
         for j in range(n_beams):
             if np.nansum(self.smooth_upper_limit[j, :]) > 0:
@@ -746,7 +745,7 @@ class DepthData(object):
     def interpolate_none(self):
         """Applies no interpolation.
         """
-        
+
         # Compute processed depth without interpolation
         if self.depth_source == 'BT':
             # Bottom track methods
@@ -754,25 +753,25 @@ class DepthData(object):
         else:
             # Vertical beam or depth sounder depths
             self.depth_processed_m = self.depth_beams_m[0, :]
-            
+
         self.depth_processed_m[np.squeeze(np.equal(self.valid_data, False))] = np.nan
-        
+
         # Set interpolation type
         self.interp_type = 'None'
-        
+
     def interpolate_hold_last(self):
         """This function holds the last valid value until the next valid data point.
         """
-        
+
         # Get number of ensembles
         n_ensembles = len(self.depth_processed_m)
-        
+
         # Process data by ensemble
         for n in range(1, n_ensembles):
-            
+
             # If current ensemble's depth is invalid assign depth from previous example
             if not self.valid_data[n]:
-                self.depth_processed_m[n] = self.depth_processed_m[n-1]
+                self.depth_processed_m[n] = self.depth_processed_m[n - 1]
 
     def interpolate_next(self):
         """This function back fills with the next valid value.
@@ -782,7 +781,7 @@ class DepthData(object):
         n_ens = len(self.depth_processed_m)
 
         # Process data by ensemble
-        for n in np.arange(0, n_ens-1)[::-1]:
+        for n in np.arange(0, n_ens - 1)[::-1]:
 
             # If current ensemble's depth is invalid assign depth from previous example
             if not self.valid_data[n]:
@@ -791,15 +790,15 @@ class DepthData(object):
     def interpolate_smooth(self):
         """Apply interpolation based on the robust loess smooth
         """
-        
+
         self.interp_type = 'Smooth'
-        
+
         # Get depth data from object
         depth_new = self.depth_beams_m
-        
+
         # Update depth data with interpolated depths
         depth_new[not self.valid_beams] = self.smooth_depth[not self.valid_beams]
-        
+
         # Compute processed depths with interpolated values
         if self.depth_source == 'BT':
             # Temporarily change self.depth_beams_m to compute average
@@ -812,11 +811,10 @@ class DepthData(object):
         else:
             # Assignment for VB or DS
             self.depth_processed_m = depth_new[0, :]
-            
+
     def interpolate_linear(self, transect):
         """Apply linear interpolation
         """
-        
         # Set interpolation type
         self.interp_type = 'Linear'
 
@@ -831,31 +829,31 @@ class DepthData(object):
             select = getattr(transect.boat_vel, 'bt_vel')
             track_x = np.tile(np.nan, select.u_processed_mps.shape)
             track_y = np.tile(np.nan, select.v_processed_mps.shape)
-              
+
         idx = np.where(np.isnan(track_x[1:]))
-        
+
         # If the navigation reference has no gaps use it for interpolation, if not use time
         if len(idx[0]) < 1:
-            x = np.nancumsum(np.sqrt(track_x**2 + track_y**2))
+            x = np.nancumsum(np.sqrt(track_x ** 2 + track_y ** 2))
         else:
             # Compute accumulated time
             x = np.nancumsum(transect.date_time.ens_duration_sec)
-            
+
         # Determine number of beams
         n_beams = self.depth_beams_m.shape[0]
         depth_mono = copy.deepcopy(self.depth_beams_m)
         depth_new = copy.deepcopy(self.depth_beams_m)
-        
-#       Create strict monotonic arrays for depth and track by identifying duplicate
-#       track values.  The first track value is used and the remaining duplicates
-#       are set to nan.  The depth assigned to that first track value is the average
-#       of all duplicates.  The depths for the duplicates are then set to nan.  Only
-#       valid strictly monotonic track and depth data are used for the input in to linear
-#       interpolation.   Only the interpolated data for invalid depths are added
-#       to the valid depth data to create depth_new
+
+        #       Create strict monotonic arrays for depth and track by identifying duplicate
+        #       track values.  The first track value is used and the remaining duplicates
+        #       are set to nan.  The depth assigned to that first track value is the average
+        #       of all duplicates.  The depths for the duplicates are then set to nan.  Only
+        #       valid strictly monotonic track and depth data are used for the input in to linear
+        #       interpolation.   Only the interpolated data for invalid depths are added
+        #       to the valid depth data to create depth_new
 
         x_mono = x
-        
+
         idx0 = np.where(np.diff(x) == 0)[0]
         if len(idx0) > 0:
             if len(idx0) > 1:
@@ -876,7 +874,7 @@ class DepthData(object):
                 depth_mono[:, indices[0]] = depth_avg
                 depth_mono[:, indices[1:]] = np.nan
                 x[indices[1:]] = np.nan
-                
+
         # Interpolate each beam
 
         for n in range(n_beams):
@@ -912,12 +910,12 @@ class DepthData(object):
             Draft of ADCP
         method: str
             Averaging method (Simple, IDW)
-        
+
         Returns
         -------
         avg_depth: np.array(float)
             Average depth for each ensemble
-        
+
         """
         if method == 'Simple':
             avg_depth = np.nanmean(depth, 0)
@@ -925,7 +923,8 @@ class DepthData(object):
             # Compute inverse weighted mean depth
             rng = depth - draft
             w = 1 - np.divide(rng, np.nansum(rng, 0))
-            avg_depth = draft+np.nansum(np.divide((rng * w), np.nansum(w, 0), where=np.nansum(w, 0) != 0), 0)
+            avg_depth = draft + np.nansum(np.divide((rng * w), np.nansum(w, 0), out=np.zeros_like(rng),
+                                                    where=np.nansum(w, 0) != 0), 0)
             avg_depth[avg_depth == draft] = np.nan
 
         return avg_depth
diff --git a/open_functions.py b/open_functions.py
index 239f2f2b37dbe9e71d8d362afdaa1c092bf1665a..52131dba96a01e17cb6656ffdf1879c3768b59d7 100644
--- a/open_functions.py
+++ b/open_functions.py
@@ -46,18 +46,21 @@ def select_directory():
     if path_folder:
         # ADCP folders path
         path_folder = '\\'.join(path_folder.split('/'))
-        path_folders = np.array(glob.glob(path_folder + "/*"))
+        # path_folders = np.array(glob.glob(path_folder + "/*"))
+        path_folders = [f.path for f in os.scandir(path_folder) if f.is_dir()]
+
         # Load their name
         name_folders = np.array([os.path.basename((x)) for x in path_folders])
         # Exclude files
-        excluded_folders = [s.find('.') == -1 for s in name_folders]
-        path_folders = path_folders[excluded_folders]
-        name_folders = name_folders[excluded_folders]
+        # excluded_folders = [s.find('.') == -1 for s in name_folders]
+        # path_folders = path_folders[excluded_folders]
+        # name_folders = name_folders[excluded_folders]
 
         # Open measurement
         type_meas = list()
         path_meas = list()
         name_meas = list()
+        no_adcp = list()
         for id_meas in range(len(path_folders)):
             list_files = os.listdir(path_folders[id_meas])
             exte_files = list([i.split('.', 1)[-1] for i in list_files])
@@ -66,17 +69,30 @@ def select_directory():
                     loca_meas = [i for i, s in enumerate(exte_files) if "mat" in s]  # transects index
                     fileNameRaw = [(list_files[x]) for x in loca_meas]
                     qrev_data = False
+                    path_qrev_meas = []
                     for name in fileNameRaw:
                         path = os.path.join(path_folders[id_meas], name)
                         mat_data = sio.loadmat(path, struct_as_record=False, squeeze_me=True)
                         if 'version' in mat_data:
-                            type_meas.append('QRev')
-                            name_meas.append(name_folders[id_meas])
-                            path_meas.append(mat_data)
+                            path_qrev_meas.append(os.path.join(path_folders[id_meas], name))
                             qrev_data = True
-                            print('QRev detected')
-                            break
-                    if not qrev_data:
+                    if qrev_data:
+                        recent_time = None
+                        recent_id = 0
+                        for i in range(len(path_qrev_meas)):
+                            qrev_time = os.path.getmtime(path_qrev_meas[i])
+                            if recent_time is None or qrev_time < recent_time:
+                                recent_time = qrev_time
+                                recent_id = i
+                        path = path_qrev_meas[recent_id]
+                        mat_data = sio.loadmat(path, struct_as_record=False, squeeze_me=True)
+                        print('QRev file')
+                        type_meas.append('QRev')
+                        name_meas.append(name_folders[id_meas])
+                        path_meas.append(mat_data)
+
+                    else:
+                        print('SonTek file')
                         type_meas.append('SonTek')
                         fileName = [s for i, s in enumerate(fileNameRaw)
                                     if "QRev.mat" not in s]
@@ -85,16 +101,20 @@ def select_directory():
                             [os.path.join(path_folders[id_meas], fileName[x]) for x in
                              range(len(fileName))])
 
-                elif 'mmt' in exte_files:
+                else:
+                    print('TRDI file')
                     type_meas.append('TRDI')
                     loca_meas = exte_files.index("mmt")  # index of the measurement file
                     fileName = list_files[loca_meas]
                     path_meas.append(os.path.join(path_folders[id_meas], fileName))
                     name_meas.append(name_folders[id_meas])
-        return path_folder, path_meas, type_meas, name_meas
+            else:
+                no_adcp.append(name_folders[id_meas])
+                print(f"No ADCP : {name_folders[id_meas]}")
+        return path_folder, path_meas, type_meas, name_meas, no_adcp
     else:
         warnings.warn('No folder selected - end')
-        return None, None, None, None
+        return None, None, None, None, None
 
 
 def open_measurement(path_meas, type_meas, apply_settings=False, navigation_reference=None,