From 17b3f12008b5f5a387a2d877d80877cc96a28033 Mon Sep 17 00:00:00 2001 From: bcalmel <blaise.calmel@inrae.fr> Date: Mon, 25 Sep 2023 15:42:17 +0200 Subject: [PATCH] Update QRevInt Version --- Classes/BoatData.py | 945 ++-- Classes/BoatStructure.py | 433 +- Classes/CompassCal.py | 14 +- Classes/ComputeExtrap.py | 202 +- Classes/CoordError.py | 3 +- Classes/CrossSectionComp.py | 675 +++ Classes/DateTime.py | 43 +- Classes/DepthData.py | 636 +-- Classes/DepthStructure.py | 305 +- Classes/EdgeData.py | 43 +- Classes/Edges.py | 22 +- Classes/ExtrapData.py | 29 +- Classes/ExtrapQSensitivity.py | 214 +- Classes/FitData.py | 144 +- Classes/GPSData.py | 382 +- Classes/HeadingData.py | 98 +- Classes/InstrumentData.py | 254 +- Classes/LoadMeasurements.py | 7 +- Classes/MMT_TRDI.py | 549 ++- Classes/MatSonTek.py | 54 +- Classes/Measurement.py | 3836 +++++++++++------ Classes/MovingBedTests.py | 639 +-- Classes/MultiThread.py | 5 +- Classes/NormData.py | 238 +- Classes/Oursin.py | 2407 +++++++---- Classes/Pd0TRDI.py | 1972 ++++++--- Classes/Pd0TRDI_2.py | 2390 +++++----- Classes/PreMeasurement.py | 614 ++- Classes/Python2Matlab.py | 816 ++-- Classes/QAData.py | 3508 ++++++++++----- Classes/QComp.py | 1747 +++++--- Classes/SelectFit.py | 265 +- Classes/SensorData.py | 14 +- Classes/SensorStructure.py | 12 +- Classes/Sensors.py | 87 +- Classes/TransectData.py | 2165 ++++++---- Classes/TransformationMatrix.py | 126 +- Classes/Uncertainty.py | 176 +- Classes/WaterData.py | 1918 +++++---- Classes/stickysettings.py | 45 +- Classes/test_stickysettings.py | 38 +- .../bottom_discharge_extrapolation.py | 127 +- .../top_discharge_extrapolation.py | 119 +- MiscLibs/abba_2d_interpolation.py | 190 +- MiscLibs/bayes_cov_compiled.py | 110 +- MiscLibs/common_functions.py | 264 +- MiscLibs/non_uniform_savgol.py | 11 +- MiscLibs/robust_loess.py | 70 +- MiscLibs/robust_loess_compiled.py | 86 +- MiscLibs/run_iqr.py | 33 +- UI/main.py | 7 +- 51 files changed, 18733 insertions(+), 10354 deletions(-) create mode 100644 Classes/CrossSectionComp.py diff --git a/Classes/BoatData.py b/Classes/BoatData.py index b748f17..a0ecabc 100644 --- a/Classes/BoatData.py +++ b/Classes/BoatData.py @@ -1,8 +1,17 @@ import copy import numpy as np from numpy.matlib import repmat -from MiscLibs.common_functions import cosd, sind, cart2pol, iqr, pol2cart, nan_less_equal, \ - nan_greater_equal, nan_greater, nan_less +from MiscLibs.common_functions import ( + cosd, + sind, + cart2pol, + iqr, + pol2cart, + nan_less_equal, + nan_greater_equal, + nan_greater, + nan_less, +) from MiscLibs.robust_loess import rloess @@ -15,13 +24,22 @@ class BoatData(object): Original data provided to the class raw_vel_mps: np.array Contains the raw unfiltered velocity data in m/s. - First index is 1-4 are beams 1,2,3,3 if if beam or u,v,w,d if otherwise. + + First index is 1-4 are beams 1,2,3,3 if if beam or u,v,w,d if + otherwise. frequency_khz: np.array or float Defines ADCP frequency used for velocity Measurement. orig_coord_sys: str - Defines the original raw data velocity Coordinate, "Beam", "Inst", "Ship", "Earth". + + Defines the original raw data velocity Coordinate, "Beam", "Inst", + "Ship", "Earth". nav_ref: str - Defines the original raw data navigation reference, "None", "BT", "GGA" "VTG". + Defines the original raw data navigation reference, "None", "BT", + "GGA" "VTG". + corr: np.array + Correlation values for bottom track + rssi: np.array + Returned signal strength for bottom track corr: np.array Correlation values for bottom track rssi: np.array @@ -29,7 +47,8 @@ class BoatData(object): Coordinate transformed data coord_sys: str - Defines the current coordinate system "Beam", "Inst", "Ship", "Earth" used to compute u, v, w, and d. + Defines the current coordinate system "Beam", "Inst", "Ship", + "Earth" used to compute u, v, w, and d. u_mps: np.array(float) Horizontal velocity in x-direction, in m/s. v_mps: np.array(float) @@ -37,7 +56,8 @@ class BoatData(object): w_mps: np.array(float) Vertical velocity (+ up), m/s. d_mps: np.array(float) - Difference in vertical velocities compute from opposing beam pairs in m/s. + Difference in vertical velocities compute from opposing beam + pairs in m/s. num_invalid: float Number of ensembles with invalid velocity data. bottom_mode: str @@ -54,11 +74,11 @@ class BoatData(object): Settings variables d_filter: str Difference velocity filter "Manual", "Off", "Auto". - d_filter_thresholds: float + d_filter_thresholds: dict Threshold for difference velocity filter. w_filter: str Vertical velocity filter "Manual", "Off", "Auto". - w_filter_threshold: float + w_filter_thresholds: dict Threshold for vertical velocity filter. gps_diff_qual_filter: integer Differential correction quality (1,2,4). @@ -83,20 +103,26 @@ class BoatData(object): interpolate: str Type of interpolation: "None", "Linear", "Smooth" etc. beam_filter: integer - Minimum number of beams for valid data, 3 for 3-beam solutions, 4 for 4-beam. + Minimum number of beams for valid data, 3 for 3-beam solutions, + 4 for 4-beam. valid_data: np.array(bool) - Logical array of identifying valid and invalid data for each filter applied. + Logical array of identifying valid and invalid data for each + filter applied. Row 1 [0] - composite Row 2 [1] - original Row 3 [2] - d_filter of diff_qual Row 4 [3] - w_filter or altitude Row 5 [4] - smooth_filter Row 6 [5] - beam_filter or HDOP - + ping_type: np.array(int) + Indicates type of ping for each cell: 0-incoherent, 1-coherent, + 2-surface d_meas_thresholds: dict - Dictionary of difference velocity thresholds computed using the whole measurement + Dictionary of difference velocity thresholds computed using the + whole measurement w_meas_thresholds: dict - Dictionary of vertical velocity thresholds computed using the whole measurement + Dictionary of vertical velocity thresholds computed using the + whole measurement use_measurement_thresholds: bool Indicates if the measurement based thresholds should be used """ @@ -105,45 +131,46 @@ class BoatData(object): """Initialize instance variables.""" # Variables passed to the constructor - self.raw_vel_mps = None # contains the raw unfiltered velocity data in m/s. - self.frequency_khz = None # Defines ADCP frequency used for velocity Measurement - self.orig_coord_sys = None # Defines the original raw data velocity Coordinate - self.nav_ref = None # Defines the original raw data navigation reference + self.raw_vel_mps = None + self.frequency_khz = None + self.orig_coord_sys = None + self.nav_ref = None self.corr = np.array([]) self.rssi = np.array([]) - - # Coordinate transformed data - self.coord_sys = None # Defines the current coordinate system "Beam", "Inst", "Ship", "Earth" - self.u_mps = None # Horizontal velocity in x-direction, in m/s - self.v_mps = None # Horizontal velocity in y-direction, in m/s - self.w_mps = None # Vertical velocity (+ up), m/s - self.d_mps = None # Difference in vertical velocities compute from opposing beam pairs in m/s - self.num_invalid = None # Number of ensembles with invalid velocity data - self.bottom_mode = None # BT mode for TRDI, 'Variable' for SonTek + self.corr = np.array([]) + self.rssi = np.array([]) + self.coord_sys = None + self.u_mps = None + self.v_mps = None + self.w_mps = None + self.d_mps = None + self.num_invalid = None + self.bottom_mode = None # Processed data - self.u_processed_mps = None # Horizontal velocity in x-direction filtered and interpolated - self.v_processed_mps = None # Horizontal velocity in y-direction filtered and interpolated - self.processed_source = None # Source of data, BT, GGA, VTG, INT + self.u_processed_mps = None + self.v_processed_mps = None + self.processed_source = None # Filter and interpolation properties - self.d_filter = None # Difference velocity filter "Manual", "Off", "Auto" - self.d_filter_thresholds = {} # Threshold for difference velocity filter - self.w_filter = None # Vertical velocity filter "On", "Off" - self.w_filter_thresholds = {} # Threshold for vertical velocity filter - self.gps_diff_qual_filter = None # Differential correction quality (1,2,4) - self.gps_altitude_filter = None # Change in altitude filter "Auto", "Manual", "Off" - self.gps_altitude_filter_change = None # Threshold from mean for altitude filter - self.gps_HDOP_filter = None # HDOP filter "Auto", "Manual", "Off" - self.gps_HDOP_filter_max = None # Max acceptable value for HDOP - self.gps_HDOP_filter_change = None # Maximum change allowed from mean - self.smooth_filter = None # Filter based on smoothing function - self.smooth_speed = None # Smoothed boat speed - self.smooth_upper_limit = None # Smooth function upper limit of window - self.smooth_lower_limit = None # Smooth function lower limit of window - self.interpolate = None # Type of interpolation: "None", "Linear", "Smooth" etc. - self.beam_filter = None # 3 for 3-beam solutions, 4 for 4-beam SolutionStackDescription - self.valid_data = None # Logical array of identifying valid and invalid data for each filter applied + self.d_filter = None + self.d_filter_thresholds = {} + self.w_filter = None + self.w_filter_thresholds = {} + self.gps_diff_qual_filter = None + self.gps_altitude_filter = None + self.gps_altitude_filter_change = None + self.gps_HDOP_filter = None + self.gps_HDOP_filter_max = None + self.gps_HDOP_filter_change = None + self.smooth_filter = None + self.smooth_speed = None + self.smooth_upper_limit = None + self.smooth_lower_limit = None + self.interpolate = None + self.beam_filter = None + self.valid_data = None + self.ping_type = np.array([]) # Filter settings populated from Measurement.create_filter_composites self.d_meas_thresholds = {} @@ -151,8 +178,19 @@ class BoatData(object): self.use_measurement_thresholds = False - def populate_data(self, source, vel_in, freq_in, coord_sys_in, nav_ref_in, beam_filter_in=3, - bottom_mode_in='Variable', corr_in=None, rssi_in=None): + def populate_data( + self, + source, + vel_in, + freq_in, + coord_sys_in, + nav_ref_in, + beam_filter_in=3, + bottom_mode_in="Variable", + corr_in=None, + rssi_in=None, + ping_type=None, + ): """Assigns data to instance variables. Parameters @@ -171,19 +209,22 @@ class BoatData(object): Minimum number of valid beams for valid data. bottom_mode_in: str Bottom mode for TRDI ADCP - corr: np.array + corr_in: np.array Correlation values for bottom track - rssi: np.array + rssi_in: np.array Returned signal strength for bottom track + ping_type: np.array(str) + Indicates type of ping used for bottom tracking """ # Identify invalid ensembles for SonTek data. - if source == 'SonTek': + if source == "SonTek": vel_in = BoatData.filter_sontek(vel_in) # Store input data self.raw_vel_mps = vel_in self.frequency_khz = freq_in + self.ping_type = ping_type self.coord_sys = coord_sys_in self.orig_coord_sys = coord_sys_in self.nav_ref = nav_ref_in @@ -194,21 +235,22 @@ class BoatData(object): if rssi_in is not None: self.rssi = rssi_in - if nav_ref_in == 'BT': + if nav_ref_in == "BT": - # Boat velocities are referenced to ADCP not the streambed and thus must be reversed + # Boat velocities are referenced to ADCP not the streambed and + # thus must be reversed self.u_mps = np.copy(-1 * vel_in[0, :]) self.v_mps = np.copy(-1 * vel_in[1, :]) self.w_mps = np.copy(vel_in[2, :]) self.d_mps = np.copy(vel_in[3, :]) # Default filtering applied during initial construction of object - self.d_filter = 'Off' + self.d_filter = "Off" self.d_filter_thresholds = {} - self.w_filter = 'Off' + self.w_filter = "Off" self.w_filter_thresholds = {} - self.smooth_filter = 'Off' - self.interpolate = 'None' + self.smooth_filter = "Off" + self.interpolate = "None" else: @@ -220,13 +262,13 @@ class BoatData(object): # Default filtering self.gps_diff_qual_filter = 2 - self.gps_altitude_filter = 'Off' + self.gps_altitude_filter = "Off" self.gps_altitude_filter_change = 3 - self.gps_HDOP_filter = 'Off' + self.gps_HDOP_filter = "Off" self.gps_HDOP_filter_max = 2.5 self.gps_HDOP_filter_change = 1 - self.smooth_filter = 'Off' - self.interpolate = 'None' + self.smooth_filter = "Off" + self.interpolate = "None" # Assign data to processed property self.u_processed_mps = np.copy(self.u_mps) @@ -246,20 +288,21 @@ class BoatData(object): valid_vel[np.isnan(self.raw_vel_mps)] = False # Identify invalid ensembles - if nav_ref_in == 'BT': + if nav_ref_in == "BT": self.valid_data[1, np.sum(valid_vel, 0) < 3] = False else: self.valid_data[1, np.sum(valid_vel, 0) < 2] = False # Combine all filter data to composite valid data self.valid_data[0, :] = np.all(self.valid_data[1:, :], 0) - self.num_invalid = np.sum(self.valid_data[0, :] == False) - self.processed_source = np.array([''] * self.u_mps.shape[0], dtype=object) - self.processed_source[np.where(self.valid_data[0, :] == True)] = nav_ref_in - self.processed_source[np.where(self.valid_data[0, :] == False)] = "INT" + self.num_invalid = np.sum(np.logical_not(self.valid_data[0, :])) + self.processed_source = np.array([""] * self.u_mps.shape[0], dtype=object) + self.processed_source[np.where(self.valid_data[0, :])] = nav_ref_in + self.processed_source[np.where(np.logical_not(self.valid_data[0, :]))] = "INT" def populate_from_qrev_mat(self, mat_data): - """Populates the object using data from previously saved QRev Matlab file. + """Populates the object using data from previously saved QRev Matlab + file. Parameters ---------- @@ -275,30 +318,41 @@ class BoatData(object): self.frequency_khz = None else: self.frequency_khz = np.array([mat_data.frequency_hz]) + + if hasattr(mat_data, "ping_type"): + if type(mat_data.ping_type) is np.ndarray: + if np.size(mat_data.ping_type): + self.ping_type = mat_data.ping_type + self.ping_type = np.char.strip(self.ping_type) + else: + self.ping_type = None + else: + self.ping_type = None + self.orig_coord_sys = mat_data.origCoordSys self.nav_ref = mat_data.navRef # Data requiring manipulation if only 1 ensemble if type(mat_data.u_mps) is float: - self.raw_vel_mps = mat_data.rawVel_mps.reshape(mat_data.rawVel_mps.shape[0], 1) + self.raw_vel_mps = mat_data.rawVel_mps.reshape( + mat_data.rawVel_mps.shape[0], 1 + ) # Coordinate transformed data self.coord_sys = np.array([mat_data.coordSys]) self.u_mps = np.array([mat_data.u_mps]) self.v_mps = np.array([mat_data.v_mps]) self.w_mps = np.array([mat_data.w_mps]) self.d_mps = np.array([mat_data.d_mps]) - if hasattr(mat_data, 'corr'): + if hasattr(mat_data, "corr"): self.corr = mat_data.corr.reshape(mat_data.corr.shape[0], 1) - if hasattr(mat_data, 'rssi'): + if hasattr(mat_data, "rssi"): self.rssi = mat_data.rssi.reshape(mat_data.rssi.shape[0], 1) - # self.bottom_mode = np.array([mat_data.bottomMode]) - # Processed data self.u_processed_mps = np.array([mat_data.uProcessed_mps]) self.v_processed_mps = np.array([mat_data.vProcessed_mps]) self.processed_source = np.array([mat_data.processedSource]) - self.valid_data = np.array([ mat_data.validData]).astype(bool) + self.valid_data = np.array([mat_data.validData]).astype(bool) if self.valid_data.shape[1] > 1: self.valid_data = self.valid_data.reshape(-1, 1) self.smooth_speed = np.array([mat_data.smoothSpeed]) @@ -308,18 +362,22 @@ class BoatData(object): self.raw_vel_mps = mat_data.rawVel_mps # Coordinate transformed data self.coord_sys = mat_data.coordSys - self.u_mps = mat_data.u_mps - self.v_mps = mat_data.v_mps - self.w_mps = mat_data.w_mps - self.d_mps = mat_data.d_mps - - if hasattr(mat_data, 'corr'): + self.u_mps = mat_data.u_mps.astype("float64") + self.v_mps = mat_data.v_mps.astype("float64") + try: + self.w_mps = mat_data.w_mps.astype("float64") + except AttributeError: + self.w_mps = np.array(mat_data.w_mps) + try: + self.d_mps = mat_data.d_mps.astype("float64") + except AttributeError: + self.d_mps = np.array(mat_data.d_mps) + + if hasattr(mat_data, "corr"): self.corr = mat_data.corr - if hasattr(mat_data, 'rssi'): + if hasattr(mat_data, "rssi"): self.rssi = mat_data.rssi - # self.bottom_mode = mat_data.bottomMode - # Processed data self.u_processed_mps = mat_data.uProcessed_mps self.v_processed_mps = mat_data.vProcessed_mps @@ -397,7 +455,7 @@ class BoatData(object): self.beam_filter = mat_data.beamFilter # Use measurement for filter - if hasattr(mat_data, 'use_measurement_thresholds'): + if hasattr(mat_data, "use_measurement_thresholds"): self.use_measurement_thresholds = mat_data.use_measurement_thresholds self.d_meas_thresholds = self.struct_to_dict(mat_data.d_meas_thresholds) self.w_meas_thresholds = self.struct_to_dict(mat_data.w_meas_thresholds) @@ -433,7 +491,8 @@ class BoatData(object): def change_coord_sys(self, new_coord_sys, sensors, adcp): """This function allows the coordinate system to be changed. - Current implementation is only to allow change to a higher order coordinate system Beam - Inst - Ship - Earth + Current implementation is only to allow change to a higher order + coordinate system Beam - Inst - Ship - Earth Parameters ---------- @@ -464,36 +523,35 @@ class BoatData(object): r = getattr(sensors.roll_deg, sensors.roll_deg.selected).data h = getattr(sensors.heading_deg, sensors.heading_deg.selected).data - # Modify the transformation matrix and heading, pitch, and roll values base on - # the original coordinate system so that only the needed values are used in - # computing the new coordinate system - if o_coord_sys == 'Beam': + # Modify the transformation matrix and heading, pitch, and roll + # values base on the original coordinate system so that only + # the needed values are used in computing the new coordinate system + if o_coord_sys == "Beam": orig_sys = 1 - elif o_coord_sys == 'Inst': + elif o_coord_sys == "Inst": orig_sys = 2 - t_matrix[:] = np.eye(t_matrix.shape[0]) - elif o_coord_sys == 'Ship': + elif o_coord_sys == "Ship": orig_sys = 3 p = np.zeros(h.shape) r = np.zeros(h.shape) - t_matrix[:] = np.eye(t_matrix.shape[0]) - elif o_coord_sys == 'Earth': + elif o_coord_sys == "Earth": orig_sys = 4 # Assign a value to the new coordinate system - if new_coord_sys == 'Beam': + if new_coord_sys == "Beam": new_sys = 1 - elif new_coord_sys == 'Inst': + elif new_coord_sys == "Inst": new_sys = 2 - elif new_coord_sys == 'Ship': + elif new_coord_sys == "Ship": new_sys = 3 - elif new_coord_sys == 'Earth': + elif new_coord_sys == "Earth": new_sys = 4 - # Check to ensure the new coordinate system is a higher order than the original system + # Check to ensure the new coordinate system is a higher order + # than the original system if new_sys - orig_sys > 0: - # Compute trig function for heaing, pitch and roll + # Compute trig function for heading, pitch and roll ch = cosd(h) sh = sind(h) cp = cosd(p) @@ -507,23 +565,27 @@ class BoatData(object): for ii in range(n_ens): # Compute matrix for heading, pitch, and roll - hpr_matrix = [[((ch[ii] * cr[ii]) + (sh[ii]*sp[ii]*sr[ii])), - (sh[ii] * cp[ii]), - ((ch[ii] * sr[ii]) - sh[ii]*sp[ii]*cr[ii])], - [(-1 * sh[ii] * cr[ii])+(ch[ii] * sp[ii] * sr[ii]), - ch[ii] * cp[ii], - (-1 * sh[ii] * sr[ii])-(ch[ii] * sp[ii] * cr[ii])], - [(-1.*cp[ii] * sr[ii]), - sp[ii], - cp[ii] * cr[ii]]] + hpr_matrix = [ + [ + ((ch[ii] * cr[ii]) + (sh[ii] * sp[ii] * sr[ii])), + (sh[ii] * cp[ii]), + ((ch[ii] * sr[ii]) - sh[ii] * sp[ii] * cr[ii]), + ], + [ + (-1 * sh[ii] * cr[ii]) + (ch[ii] * sp[ii] * sr[ii]), + ch[ii] * cp[ii], + (-1 * sh[ii] * sr[ii]) - (ch[ii] * sp[ii] * cr[ii]), + ], + [(-1.0 * cp[ii] * sr[ii]), sp[ii], cp[ii] * cr[ii]], + ] # Transform beam coordinates - if o_coord_sys == 'Beam': + if o_coord_sys == "Beam": # Determine frequency index for transformation matrix if len(t_matrix.shape) > 2: idx_freq = np.where(t_matrix_freq == self.frequency_khz[ii]) - t_mult = np.copy(t_matrix[idx_freq]) + t_mult = np.copy(t_matrix[:, :, idx_freq[0][0]]) else: t_mult = np.copy(t_matrix) @@ -537,7 +599,7 @@ class BoatData(object): if len(idx_3_beam[0]) == 1: # Special processing for RiverRay - if adcp.model == 'RiverRay': + if adcp.model == "RiverRay": # Set beam pairing beam_pair_1a = 0 @@ -545,16 +607,22 @@ class BoatData(object): beam_pair_2a = 2 beam_pair_2b = 3 - # Set speed of sound correction variables Note: Currently (2013-09-06) - # WinRiver II does not use a variable correction and assumes the speed - # of sound and the reference speed of sound are the same. - # sos = sensors.speed_ofs_sound_mps.selected.data[ii] + # Set speed of sound correction variables + # Note: Currently (2013-09-06) + # WinRiver II does not use a variable + # correction and assumes the speed + # of sound and the reference speed of sound + # are the same. + # sos = + # sensors.speed_ofs_sound_mps.selected.data[ii] # sos_reference = 1536 - # sos_correction = np.sqrt(((2 * sos_reference) / sos) **2 -1) + # sos_correction = np.sqrt(((2 * + # sos_reference) / sos) **2 -1) sos_correction = np.sqrt(3) - # Reconfigure transformation matrix based on which beam is invalid + # Reconfigure transformation matrix based on + # which beam is invalid # Beam 1 invalid if idx_3_beam[0][0] == beam_pair_1a: @@ -562,19 +630,32 @@ class BoatData(object): # Double valid beam in invalid pair t_mult[0:2, beam_pair_1b] *= 2 - # Eliminate invalid pair from vertical velocity computations - t_mult[2, :] = [0, 0, 1/sos_correction, 1/sos_correction] - - # Reconstruct beam velocity matrix to use only valid beams - t_mult = t_mult[0:3, [beam_pair_1b, beam_pair_2a, beam_pair_2b]] - - # Reconstruct beam velocity matrix to use only valid beams - vel = vel[[beam_pair_1b, beam_pair_2a, beam_pair_2b]] + # Eliminate invalid pair from vertical + # velocity computations + t_mult[2, :] = [ + 0, + 0, + 1 / sos_correction, + 1 / sos_correction, + ] + + # Reconstruct beam velocity matrix to use + # only valid beams + t_mult = t_mult[ + 0:3, [beam_pair_1b, beam_pair_2a, beam_pair_2b] + ] + + # Reconstruct beam velocity matrix to + # use only valid beams + vel = vel[ + [beam_pair_1b, beam_pair_2a, beam_pair_2b] + ] # Apply transformation matrix temp_t = t_mult.dot(vel) - # Correct horizontal velocity for invalid pair with the vertical velocity + # Correct horizontal velocity for invalid + # pair with the vertical velocity # and speed of sound correction temp_t[0] = temp_t[0] + temp_t[2] * sos_correction @@ -582,21 +663,36 @@ class BoatData(object): if idx_3_beam[0][0] == beam_pair_1b: # Double valid beam in invalid pair - t_mult[0:2, beam_pair_1a] = t_mult[0:2, beam_pair_1a] * 2 - - # Eliminate invalid pair from vertical velocity computations - t_mult[2, :] = [0, 0, 1/sos_correction, 1/sos_correction] - - # Reconstruct transformation matrix as a 3x3 matrix - t_mult = t_mult[0:3, [beam_pair_1a, beam_pair_2a, beam_pair_2b]] - - # Reconstruct beam velocity matrix to use only valid beams - vel = vel[[beam_pair_1a, beam_pair_2a, beam_pair_2b]] + t_mult[0:2, beam_pair_1a] = ( + t_mult[0:2, beam_pair_1a] * 2 + ) + + # Eliminate invalid pair from vertical + # velocity computations + t_mult[2, :] = [ + 0, + 0, + 1 / sos_correction, + 1 / sos_correction, + ] + + # Reconstruct transformation matrix as a + # 3x3 matrix + t_mult = t_mult[ + 0:3, [beam_pair_1a, beam_pair_2a, beam_pair_2b] + ] + + # Reconstruct beam velocity matrix to use + # only valid beams + vel = vel[ + [beam_pair_1a, beam_pair_2a, beam_pair_2b] + ] # Apply transformation matrix temp_t = t_mult.dot(vel) - # Correct horizontal velocity for invalid pair with the vertical + # Correct horizontal velocity for invalid + # pair with the vertical # velocity and speed of sound correction temp_t[0] = temp_t[0] - temp_t[2] * sos_correction @@ -604,21 +700,36 @@ class BoatData(object): if idx_3_beam[0][0] == beam_pair_2a: # Double valid beam in invalid pair - t_mult[0:2, beam_pair_2b] = t_mult[:2, beam_pair_2b] * 2 - - # Eliminate invalid pair from vertical velocity computations - t_mult[2, :] = [1/sos_correction, 1/sos_correction, 0, 0] - - # Reconstruct transformation matrix as a 3x3 matrid - t_mult = t_mult[:3, [beam_pair_1a, beam_pair_1b, beam_pair_2b]] - - # Reconstruct beam velocity matrix to use only valid beams - vel = vel[[beam_pair_1a, beam_pair_1b, beam_pair_2b]] + t_mult[0:2, beam_pair_2b] = ( + t_mult[:2, beam_pair_2b] * 2 + ) + + # Eliminate invalid pair from vertical + # velocity computations + t_mult[2, :] = [ + 1 / sos_correction, + 1 / sos_correction, + 0, + 0, + ] + + # Reconstruct transformation matrix as a + # 3x3 matrid + t_mult = t_mult[ + :3, [beam_pair_1a, beam_pair_1b, beam_pair_2b] + ] + + # Reconstruct beam velocity matrix to use + # only valid beams + vel = vel[ + [beam_pair_1a, beam_pair_1b, beam_pair_2b] + ] # Apply transformation matrix temp_t = t_mult.dot(vel) - # Correct horizontal velocity for invalid pair with the vertical + # Correct horizontal velocity for invalid + # pair with the vertical # velocity and speed of sound correction temp_t[1] = temp_t[1] - temp_t[2] * sos_correction @@ -628,19 +739,32 @@ class BoatData(object): # Double valid beam in invalid pair t_mult[:2, beam_pair_2a] *= 2 - # Eliminate invalid pair from vertical velocity computations - t_mult[2, :] = [1/sos_correction, 1/sos_correction, 0, 0] - - # Reconstruct transformations matrix as a 3x3 matrix - t_mult = t_mult[:3, [beam_pair_1a, beam_pair_1b, beam_pair_2a]] - - # Reconstruct beam velocity matrix to use only valid beams - vel = vel[[beam_pair_1a, beam_pair_1b, beam_pair_2a]] + # Eliminate invalid pair from vertical + # velocity computations + t_mult[2, :] = [ + 1 / sos_correction, + 1 / sos_correction, + 0, + 0, + ] + + # Reconstruct transformations matrix as a + # 3x3 matrix + t_mult = t_mult[ + :3, [beam_pair_1a, beam_pair_1b, beam_pair_2a] + ] + + # Reconstruct beam velocity matrix to use + # only valid beams + vel = vel[ + [beam_pair_1a, beam_pair_1b, beam_pair_2a] + ] # Apply transformation matrix temp_t = t_mult.dot(vel) - # Correct horizontal velocity for invalid pair with the vertical + # Correct horizontal velocity for invalid + # pair with the vertical # velocity and speed of sound correction temp_t[1] = temp_t[1] + temp_t[2] * sos_correction @@ -650,7 +774,9 @@ class BoatData(object): vel_3_beam_zero = vel vel_3_beam_zero[np.isnan(vel)] = 0 vel_error = np.matmul(t_mult[3, :], vel_3_beam_zero) - vel[idx_3_beam] = -1 * vel_error / np.squeeze(t_mult[3, idx_3_beam]) + vel[idx_3_beam] = ( + -1 * vel_error / np.squeeze(t_mult[3, idx_3_beam]) + ) temp_t = t_mult.dot(vel) # Apply transformation matrix for 3 beam solutions @@ -671,7 +797,8 @@ class BoatData(object): # Get velocity data vel = np.copy(np.squeeze(self.raw_vel_mps[:, ii])) - # Apply heading pitch roll for inst and ship coordinate data + # Apply heading pitch roll for inst and ship + # coordinate data temp_thpr = np.array(hpr_matrix).dot(vel[:3]) temp_thpr = np.hstack([temp_thpr, vel[3]]) @@ -687,8 +814,8 @@ class BoatData(object): self.v_processed_mps = np.copy(self.v_mps) def change_heading(self, heading_change): - """Rotates the boat velocities for a change in heading due to a change in - magnetic variation, heading offset, or heading source. + """Rotates the boat velocities for a change in heading due to a change + in magnetic variation, heading offset, or heading source. Parameters ---------- @@ -698,7 +825,9 @@ class BoatData(object): # Apply change to processed data direction, mag = cart2pol(self.u_processed_mps, self.v_processed_mps) - self.u_processed_mps, self.v_processed_mps = pol2cart(direction - np.deg2rad(heading_change), mag) + self.u_processed_mps, self.v_processed_mps = pol2cart( + direction - np.deg2rad(heading_change), mag + ) # Apply change to unprocessed data direction, mag = cart2pol(self.u_mps, self.v_mps) @@ -719,8 +848,8 @@ class BoatData(object): if self.u_mps is not None: self.u_processed_mps = np.copy(self.u_mps) self.v_processed_mps = np.copy(self.v_mps) - self.u_processed_mps[self.valid_data[0, :] == False] = np.nan - self.v_processed_mps[self.valid_data[0, :] == False] = np.nan + self.u_processed_mps[np.logical_not(self.valid_data[0, :])] = np.nan + self.v_processed_mps[np.logical_not(self.valid_data[0, :])] = np.nan # Determine interpolation methods to apply if interpolation_method is None: @@ -730,33 +859,36 @@ class BoatData(object): # Apply specified interpolation method - if interpolation_method == 'None': + if interpolation_method == "None": # Sets invalid data to nan with no interpolation self.interpolate_none() - elif interpolation_method == 'ExpandedT': - # Set interpolate to none as the interpolation done is in the QComp + elif interpolation_method == "ExpandedT": + # Set interpolate to none as the interpolation done is in + # the QComp self.interpolate_next() - elif interpolation_method == 'Hold9': - # Interpolates using SonTek method of holding last valid for up to 9 samples + elif interpolation_method == "Hold9": + # Interpolates using SonTek method of holding last valid + # for up to 9 samples self.interpolate_hold_9() - elif interpolation_method == 'HoldLast': + elif interpolation_method == "HoldLast": # Interpolates by holding last valid indefinitely self.interpolate_hold_last() - elif interpolation_method == 'Linear': + elif interpolation_method == "Linear": # Interpolates using linear interpolation self.interpolate_linear(transect) - elif interpolation_method == 'Smooth': + elif interpolation_method == "Smooth": # Interpolates using smooth interpolation self.interpolate_smooth(transect) - elif interpolation_method == 'TRDI': + elif interpolation_method == "TRDI": # TRDI interpolation is done in discharge. - # For TRDI the interpolation is done on discharge not on velocities + # For TRDI the interpolation is done on discharge not on + # velocities self.interpolate_none() def apply_composite(self, u_composite, v_composite, composite_source): @@ -768,17 +900,17 @@ class BoatData(object): Composite u-velocity component, in m/s v_composite: np.array(float) Composite v-velocity component, in m/s - composite_source: str + composite_source: np.array() Reference used for each ensemble velocity. """ self.u_processed_mps = u_composite self.v_processed_mps = v_composite - self.processed_source[composite_source == 1] = 'BT' - self.processed_source[composite_source == 2] = 'GGA' - self.processed_source[composite_source == 3] = 'VTG' - self.processed_source[composite_source == 0] = 'INT' - self.processed_source[composite_source == -1] = 'INV' + self.processed_source[composite_source == 1] = "BT" + self.processed_source[composite_source == 2] = "GGA" + self.processed_source[composite_source == 3] = "VTG" + self.processed_source[composite_source == 0] = "INT" + self.processed_source[composite_source == -1] = "INV" def sos_correction(self, ratio): """Correct boat velocity for a change in speed of sound. @@ -795,7 +927,8 @@ class BoatData(object): self.w_mps = self.w_mps * ratio def interpolate_hold_9(self): - """This function applies Sontek's approach to maintaining the last valid boat speed for up to 9 invalid samples. + """This function applies Sontek's approach to maintaining the last + valid boat speed for up to 9 invalid samples. """ # Initialize variables @@ -804,14 +937,15 @@ class BoatData(object): # Get data from object self.u_processed_mps = np.copy(self.u_mps) self.v_processed_mps = np.copy(self.v_mps) - self.u_processed_mps[self.valid_data[0, :] == False] = np.nan - self.v_processed_mps[self.valid_data[0, :] == False] = np.nan + self.u_processed_mps[np.logical_not(self.valid_data[0, :])] = np.nan + self.v_processed_mps[np.logical_not(self.valid_data[0, :])] = np.nan n_invalid = 0 # Process data by ensembles for n in range(n_ensembles): - # Check if ensemble is invalid and number of consecutive invalids is less than 9 - if self.valid_data[0, n] == False and n_invalid < 9: + # Check if ensemble is invalid and number of consecutive + # invalids is less than 9 + if self.valid_data[0, n] is False and n_invalid < 9: self.u_processed_mps[n] = self.u_processed_mps[n - 1] self.v_processed_mps[n] = self.v_processed_mps[n - 1] n_invalid += 1 @@ -819,16 +953,18 @@ class BoatData(object): n_invalid = 0 def interpolate_none(self): - """This function removes any interpolation from the data and sets filtered data to nan.""" + """This function removes any interpolation from the data and sets + filtered data to nan.""" # Reset processed data self.u_processed_mps = np.copy(self.u_mps) self.v_processed_mps = np.copy(self.v_mps) - self.u_processed_mps[self.valid_data[0, :] == False] = np.nan - self.v_processed_mps[self.valid_data[0, :] == False] = np.nan + self.u_processed_mps[np.logical_not(self.valid_data[0, :])] = np.nan + self.v_processed_mps[np.logical_not(self.valid_data[0, :])] = np.nan def interpolate_hold_last(self): - """This function holds the last valid value until the next valid data point.""" + """This function holds the last valid value until the next valid data + point.""" if self.u_mps is not None: # Initialize variables @@ -837,14 +973,15 @@ class BoatData(object): # Get data from object self.u_processed_mps = np.copy(self.u_mps) self.v_processed_mps = np.copy(self.v_mps) - self.u_processed_mps[self.valid_data[0, :] == False] = np.nan - self.v_processed_mps[self.valid_data[0, :] == False] = np.nan + self.u_processed_mps[np.logical_not(self.valid_data[0, :])] = np.nan + self.v_processed_mps[np.logical_not(self.valid_data[0, :])] = np.nan n_invalid = 0 # Process data by ensembles for n in range(1, n_ensembles): - # Check if ensemble is invalid and number of consecutive invalids is less than 9 - if (self.valid_data[0, n] == False) and (n_invalid < 9): + # Check if ensemble is invalid and number of consecutive + # invalids is less than 9 + if (self.valid_data[0, n] is False) and (n_invalid < 9): self.u_processed_mps[n] = self.u_processed_mps[n - 1] self.v_processed_mps[n] = self.v_processed_mps[n - 1] @@ -857,13 +994,14 @@ class BoatData(object): # Process ensembles n_ens = len(valid_ens) - for n in np.arange(0, n_ens-1)[::-1]: + for n in np.arange(0, n_ens - 1)[::-1]: if not valid_ens[n]: - self.u_processed_mps[n] = self.u_processed_mps[n+1] - self.v_processed_mps[n] = self.v_processed_mps[n+1] + self.u_processed_mps[n] = self.u_processed_mps[n + 1] + self.v_processed_mps[n] = self.v_processed_mps[n + 1] def interpolate_smooth(self, transect): - """This function interpolates data flagged invalid using the smooth function. + """This function interpolates data flagged invalid using the smooth + function. Parameters ---------- @@ -872,11 +1010,10 @@ class BoatData(object): """ # Get data from object - u = np.copy(self.u_mps) v = np.copy(self.v_mps) - u[self.valid_data[0, :] == False] = np.nan - v[self.valid_data[0, :] == False] = np.nan + u[np.logical_not(self.valid_data[0, :])] = np.nan + v[np.logical_not(self.valid_data[0, :])] = np.nan # Compute ens_time ens_time = np.nancumsum(transect.date_time.ens_duration_sec) @@ -892,7 +1029,8 @@ class BoatData(object): self.v_processed_mps[np.isnan(v)] = v_smooth[np.isnan(v)] def interpolate_linear(self, transect): - """This function interpolates data flagged invalid using linear interpolation. + """This function interpolates data flagged invalid using linear + interpolation. Parameters ---------- @@ -903,7 +1041,7 @@ class BoatData(object): u = np.copy(self.u_mps) v = np.copy(self.v_mps) - valid = np.isnan(u) == False + valid = np.logical_not(np.isnan(u)) # Check for valid data if sum(valid) > 1 and sum(self.valid_data[0, :]) > 1: @@ -912,20 +1050,25 @@ class BoatData(object): ens_time = np.nancumsum(transect.date_time.ens_duration_sec) # Apply linear interpolation - self.u_processed_mps = np.interp(x=ens_time, - xp=ens_time[self.valid_data[0, :]], - fp=u[self.valid_data[0, :]], - left=np.nan, - right=np.nan) + self.u_processed_mps = np.interp( + x=ens_time, + xp=ens_time[self.valid_data[0, :]], + fp=u[self.valid_data[0, :]], + left=np.nan, + right=np.nan, + ) # Apply linear interpolation - self.v_processed_mps = np.interp(x=ens_time, - xp=ens_time[self.valid_data[0, :]], - fp=v[self.valid_data[0, :]], - left=np.nan, - right=np.nan) + self.v_processed_mps = np.interp( + x=ens_time, + xp=ens_time[self.valid_data[0, :]], + fp=v[self.valid_data[0, :]], + left=np.nan, + right=np.nan, + ) def interpolate_composite(self, transect): - """This function interpolates processed data flagged invalid using linear interpolation. + """This function interpolates processed data flagged invalid using + linear interpolation. Parameters ---------- @@ -936,7 +1079,7 @@ class BoatData(object): u = np.copy(self.u_processed_mps) v = np.copy(self.v_processed_mps) - valid = np.isnan(u) == False + valid = np.logical_not(np.isnan(u)) # Check for valid data if np.sum(valid) > 1: @@ -948,23 +1091,32 @@ class BoatData(object): diff_time = np.diff(ens_time[valid]) idx = np.where(diff_time == 0)[0] mono_array = np.vstack([ens_time[valid], u[valid], v[valid]]) + # Replace non-monotonic times with average values for i in idx[::-1]: - mono_array[1, i] = np.nanmean(mono_array[1, i:i+2]) - mono_array[2, i] = np.nanmean(mono_array[2, i:i + 2]) - mono_array = np.delete(mono_array, i+1, 1) - # Apply linear interpolation + mono_array[1, i] = np.nanmean(mono_array[1, i : i + 2]) + mono_array[2, i] = np.nanmean(mono_array[2, i : i + 2]) + mono_array = np.delete(mono_array, i + 1, 1) + # Apply linear interpolation - self.u_processed_mps = np.interp(ens_time, - mono_array[0, :], - mono_array[1, :]) + self.u_processed_mps = np.interp( + ens_time, mono_array[0, :], mono_array[1, :] + ) # Apply linear interpolation - self.v_processed_mps = np.interp(ens_time, - mono_array[0, :], - mono_array[2, :]) - - def apply_filter(self, transect, beam=None, difference=None, difference_threshold=None, vertical=None, - vertical_threshold=None, other=None): + self.v_processed_mps = np.interp( + ens_time, mono_array[0, :], mono_array[2, :] + ) + + def apply_filter( + self, + transect, + beam=None, + difference=None, + difference_threshold=None, + vertical=None, + vertical_threshold=None, + other=None, + ): """Function to apply filters to navigation data. More than one filter can be applied during a single call. @@ -987,7 +1139,19 @@ class BoatData(object): Setting to other filter """ - if len({beam, difference, difference_threshold, vertical, vertical_threshold, other}) > 1: + if ( + len( + { + beam, + difference, + difference_threshold, + vertical, + vertical_threshold, + other, + } + ) + > 1 + ): # Filter based on number of valid beams if beam is not None: @@ -995,14 +1159,16 @@ class BoatData(object): # Filter based on difference velocity if difference is not None: - if difference == 'Manual': - self.filter_diff_vel(setting=difference, threshold=difference_threshold) + if difference == "Manual": + self.filter_diff_vel( + setting=difference, threshold=difference_threshold + ) else: self.filter_diff_vel(setting=difference) # Filter based on vertical velocity if vertical is not None: - if vertical == 'Manual': + if vertical == "Manual": self.filter_vert_vel(setting=vertical, threshold=vertical_threshold) else: self.filter_vert_vel(setting=vertical) @@ -1013,8 +1179,12 @@ class BoatData(object): else: self.filter_beam(setting=self.beam_filter) - self.filter_diff_vel(setting=self.d_filter, threshold=self.d_filter_thresholds) - self.filter_vert_vel(setting=self.w_filter, threshold=self.w_filter_thresholds) + self.filter_diff_vel( + setting=self.d_filter, threshold=self.d_filter_thresholds + ) + self.filter_vert_vel( + setting=self.w_filter, threshold=self.w_filter_thresholds + ) self.filter_smooth(setting=self.smooth_filter, transect=transect) # Apply previously specified interpolation method @@ -1045,7 +1215,8 @@ class BoatData(object): self.beam_filter = setting - # In manual mode determine number of raw invalid and number of 3 beam solutions + # In manual mode determine number of raw invalid and number of 3 + # beam solutions # 3 beam solutions if selected if self.beam_filter > 0: @@ -1071,9 +1242,11 @@ class BoatData(object): self.filter_beam(3) beam_3_valid_data = copy.deepcopy(self.valid_data) self.filter_beam(4) - valid_3_beams = np.logical_xor(beam_3_valid_data[5, :], self.valid_data[5, :]) + valid_3_beams = np.logical_xor( + beam_3_valid_data[5, :], self.valid_data[5, :] + ) n_ens = len(self.valid_data[5, :]) - idx = np.where(valid_3_beams == True)[0] + idx = np.where(valid_3_beams)[0] # If 3 beam solutions exist evaluate there validity if len(idx) > 0: @@ -1090,28 +1263,33 @@ class BoatData(object): # Find nearest 4 beam solutions before and after # 3 beam solution - ref_idx_before = np.where(self.valid_data[5, :idx[m]] == True)[0] + ref_idx_before = np.where(self.valid_data[5, : idx[m]])[0] if len(ref_idx_before) > 0: ref_idx_before = ref_idx_before[-1] else: ref_idx_before = None - ref_idx_after = np.where(self.valid_data[5, idx[m]:] == True)[0] + ref_idx_after = np.where(self.valid_data[5, idx[m] :])[0] if len(ref_idx_after) > 0: ref_idx_after = idx[m] + ref_idx_after[0] else: ref_idx_after = None if (ref_idx_after is not None) and (ref_idx_before is not None): - u_ratio = (self.u_mps[idx[m]]) / ((self.u_mps[ref_idx_before] - + self.u_mps[ref_idx_after]) / 2.) - 1 - v_ratio = (self.v_mps[idx[m]]) / ((self.v_mps[ref_idx_before] - + self.v_mps[ref_idx_after]) / 2.) - 1 + u_ratio = (self.u_mps[idx[m]]) / ( + (self.u_mps[ref_idx_before] + self.u_mps[ref_idx_after]) + / 2.0 + ) - 1 + v_ratio = (self.v_mps[idx[m]]) / ( + (self.v_mps[ref_idx_before] + self.v_mps[ref_idx_after]) + / 2.0 + ) - 1 else: u_ratio = 1 v_ratio = 1 - # If 3-beam differs from 4-beam by more than 50% mark it invalid + # If 3-beam differs from 4-beam by more than 50% mark + # it invalid if (np.abs(u_ratio) > 0.5) and (np.abs(v_ratio) > 0.5): self.valid_data[5, idx[m]] = False else: @@ -1121,7 +1299,7 @@ class BoatData(object): # Combine all filter data to composite valid data self.valid_data[0, :] = np.all(self.valid_data[1:, :], 0) - self.num_invalid = np.sum(self.valid_data[0, :] == False) + self.num_invalid = np.sum(np.logical_not(self.valid_data[0, :])) def filter_diff_vel(self, setting, threshold=None): """Applies either manual or automatic filtering of the difference @@ -1132,47 +1310,62 @@ class BoatData(object): should follow a gaussian distribution. Therefore, 5 iqr should encompass all of the valid data. The standard deviation and limits (multiplier*standard deviation) are computed in an iterative - process until filtering out additional data does not change the computed - standard deviation. + process until filtering out additional data does not change the + computed standard deviation. Parameters ---------- setting: str Difference velocity setting (Off, Manual, Auto) - threshold: float + threshold: float, dict If manual, the user specified threshold """ self.d_filter = setting - if setting == 'Manual': + if setting == "Manual": self.d_filter_thresholds = threshold # Apply selected method - if self.d_filter == 'Manual': + if self.d_filter == "Manual": d_vel_max_ref = np.abs(self.d_filter_thresholds) d_vel_min_ref = -1 * d_vel_max_ref - invalid_idx = np.where(np.logical_or(nan_greater(self.d_mps, d_vel_max_ref), - nan_less(self.d_mps, d_vel_min_ref)))[0] - elif self.d_filter == 'Off': + invalid_idx = np.where( + np.logical_or( + nan_greater(self.d_mps, d_vel_max_ref), + nan_less(self.d_mps, d_vel_min_ref), + ) + )[0] + elif self.d_filter == "Off": invalid_idx = np.array([]) - elif self.d_filter == 'Auto': + elif self.d_filter == "Auto": if self.use_measurement_thresholds: - freq_ensembles = self.frequency_khz.astype(int).astype(str) + if self.ping_type is None: + freq_ensembles = self.frequency_khz.astype(int).astype(str) + else: + freq_ensembles = self.ping_type invalid_idx = np.array([]) for freq in self.d_meas_thresholds.keys(): filter_data = np.copy(self.d_mps) filter_data[freq_ensembles != freq] = np.nan - idx = np.where(np.logical_or(np.greater(filter_data, self.d_meas_thresholds[freq][0]), - np.less(filter_data, self.d_meas_thresholds[freq][1])))[0] + idx = np.where( + np.logical_or( + np.greater(filter_data, self.d_meas_thresholds[freq][0]), + np.less(filter_data, self.d_meas_thresholds[freq][1]), + ) + )[0] if idx.size > 0: if invalid_idx.size > 0: invalid_idx = np.hstack((invalid_idx, idx)) else: invalid_idx = idx else: - freq_used = np.unique(self.frequency_khz).astype(int).astype(str) - freq_ensembles = self.frequency_khz.astype(int).astype(str) + if self.ping_type is None: + freq_used = np.unique(self.frequency_khz).astype(int).astype(str) + freq_ensembles = self.frequency_khz.astype(int).astype(str) + else: + freq_used = np.unique(self.ping_type) + freq_ensembles = self.ping_type self.d_filter_thresholds = {} invalid_idx = np.array([]) for freq in freq_used: @@ -1180,8 +1373,12 @@ class BoatData(object): filter_data[freq_ensembles != freq] = np.nan d_vel_max_ref, d_vel_min_ref = self.iqr_filter(filter_data) self.d_filter_thresholds[freq] = [d_vel_max_ref, d_vel_min_ref] - idx = np.where(np.logical_or(nan_greater(filter_data, d_vel_max_ref), - nan_less(filter_data, d_vel_min_ref)))[0] + idx = np.where( + np.logical_or( + nan_greater(filter_data, d_vel_max_ref), + nan_less(filter_data, d_vel_min_ref), + ) + )[0] if idx.size > 0: if invalid_idx.size > 0: invalid_idx = np.hstack((invalid_idx, idx)) @@ -1197,7 +1394,7 @@ class BoatData(object): # Combine all filter data to composite filter data self.valid_data[0, :] = np.all(self.valid_data[1:, :], 0) - self.num_invalid = np.sum(self.valid_data[0, :] == False) + self.num_invalid = np.sum(np.logical_not(self.valid_data[0, :])) def filter_vert_vel(self, setting, threshold=None): """Applies either manual or automatic filtering of the vertical @@ -1207,42 +1404,57 @@ class BoatData(object): ---------- setting: str Filter setting (Off, Manual, Auto) - threshold: float + threshold: float, dict If setting is manual, the user specified threshold """ # Set vertical velocity filter properties self.w_filter = setting - if setting == 'Manual': + if setting == "Manual": self.w_filter_thresholds = threshold # Apply selected method - if self.w_filter == 'Manual': + if self.w_filter == "Manual": w_vel_max_ref = np.abs(self.w_filter_thresholds) w_vel_min_ref = -1 * w_vel_max_ref - invalid_idx = np.where(np.logical_or(nan_greater(self.w_mps, w_vel_max_ref), - nan_less(self.w_mps, w_vel_min_ref)))[0] - - elif self.w_filter == 'Off': + invalid_idx = np.where( + np.logical_or( + nan_greater(self.w_mps, w_vel_max_ref), + nan_less(self.w_mps, w_vel_min_ref), + ) + )[0] + + elif self.w_filter == "Off": invalid_idx = np.array([]) - elif self.w_filter == 'Auto': + elif self.w_filter == "Auto": if self.use_measurement_thresholds: - freq_ensembles = self.frequency_khz.astype(int).astype(str) + if self.ping_type is None: + freq_ensembles = self.frequency_khz.astype(int).astype(str) + else: + freq_ensembles = self.ping_type invalid_idx = np.array([]) for freq in self.w_meas_thresholds.keys(): filter_data = np.copy(self.w_mps.astype(float)) filter_data[freq_ensembles != freq] = np.nan - idx = np.where(np.logical_or(np.greater(filter_data, self.w_meas_thresholds[freq][0]), - np.less(filter_data, self.w_meas_thresholds[freq][1])))[0] + idx = np.where( + np.logical_or( + np.greater(filter_data, self.w_meas_thresholds[freq][0]), + np.less(filter_data, self.w_meas_thresholds[freq][1]), + ) + )[0] if idx.size > 0: if invalid_idx.size > 0: invalid_idx = np.hstack((invalid_idx, idx)) else: invalid_idx = idx else: - freq_used = np.unique(self.frequency_khz).astype(int).astype(str) - freq_ensembles = self.frequency_khz.astype(int).astype(str) + if self.ping_type is None: + freq_used = np.unique(self.frequency_khz).astype(int).astype(str) + freq_ensembles = self.frequency_khz.astype(int).astype(str) + else: + freq_used = np.unique(self.ping_type) + freq_ensembles = self.ping_type self.w_filter_thresholds = {} invalid_idx = np.array([]) for freq in freq_used: @@ -1250,8 +1462,12 @@ class BoatData(object): filter_data[freq_ensembles != freq] = np.nan w_vel_max_ref, w_vel_min_ref = self.iqr_filter(filter_data) self.w_filter_thresholds[freq] = [w_vel_max_ref, w_vel_min_ref] - idx = np.where(np.logical_or(nan_greater(filter_data, w_vel_max_ref), - nan_less(filter_data, w_vel_min_ref)))[0] + idx = np.where( + np.logical_or( + nan_greater(filter_data, w_vel_max_ref), + nan_less(filter_data, w_vel_min_ref), + ) + )[0] if idx.size > 0: if invalid_idx.size > 0: invalid_idx = np.hstack((invalid_idx, idx)) @@ -1267,10 +1483,10 @@ class BoatData(object): # Combine all filter data to composite valid data self.valid_data[0, :] = np.all(self.valid_data[1:, :], 0) - self.num_invalid = np.sum(self.valid_data[0, :] == False) + self.num_invalid = np.sum(np.logical_not(self.valid_data[0, :])) @staticmethod - def iqr_filter(data, multiplier=5, minimum_window = 0.01): + def iqr_filter(data, multiplier=5, minimum_window=0.01): """Apply the iqr filter to bt data. Parameters @@ -1298,7 +1514,6 @@ class BoatData(object): k = 0 iqr_diff = 1 - # Loop until no additional data are removed while iqr_diff != 0 and k < 1000: k += 1 @@ -1336,9 +1551,10 @@ class BoatData(object): First a robust Loess smooth is fitted to the boat speed time series and residuals between the raw data and the smoothed line are computed. The - trimmed standard deviation is computed by selecting the number of residuals - specified by "halfwidth" before the target point and after the target point, - but not including the target point. These values are then sorted, and the points + trimmed standard deviation is computed by selecting the number of + residuals specified by "halfwidth" before the target point and + after the target point, but not including the target point. + These values are then sorted, and the points with the highest and lowest values are removed from the subset, and the standard deviation of the trimmed subset is computed. The filter criteria are determined by multiplying the standard deviation by a user @@ -1367,8 +1583,9 @@ class BoatData(object): # Compute ens_time ens_time = np.nancumsum(transect.date_time.ens_duration_sec) n_ensembles = len(ens_time) + # Determine if smooth filter should be applied - if self.smooth_filter == 'On': + if self.smooth_filter == "On": # Initialize arrays self.smooth_speed = repmat([np.nan], 1, n_ensembles) self.smooth_upper_limit = repmat([np.nan], 1, n_ensembles) @@ -1405,13 +1622,17 @@ class BoatData(object): lower_limit = speed_smooth - multiplier * filter_array # Apply filter to residuals - bt_bad_idx = np.where(np.logical_or(np.greater(speed, upper_limit), np.less(speed, lower_limit)))[0] + bt_bad_idx = np.where( + np.logical_or( + np.greater(speed, upper_limit), np.less(speed, lower_limit) + ) + )[0] speed_res[bt_bad_idx] = np.nan # Update valid_data property self.valid_data[4, :] = True self.valid_data[4, bt_bad_idx] = False - self.valid_data[4, self.valid_data[1, :] == False] = True + self.valid_data[4, np.logical_not(self.valid_data[1, :])] = True self.smooth_upper_limit = upper_limit self.smooth_lower_limit = lower_limit self.smooth_speed = speed_smooth @@ -1425,11 +1646,25 @@ class BoatData(object): self.smooth_speed = np.nan # Combine all filter data to composite valid data - self.valid_data[0, :] = np.all(self.valid_data[1:, ], 0) - self.num_invalid = np.sum(self.valid_data[0, :] == False, 0) - - def apply_gps_filter(self, transect, differential=None, altitude=None, altitude_threshold=None, - hdop=None, hdop_max_threshold=None, hdop_change_threshold=None, other=None): + self.valid_data[0, :] = np.all( + self.valid_data[ + 1:, + ], + 0, + ) + self.num_invalid = np.sum(np.logical_not(self.valid_data[0, :]), 0) + + def apply_gps_filter( + self, + transect, + differential=None, + altitude=None, + altitude_threshold=None, + hdop=None, + hdop_max_threshold=None, + hdop_change_threshold=None, + other=None, + ): """Applies filters to GPS referenced boat velocity data. Parameters @@ -1452,26 +1687,49 @@ class BoatData(object): Other filter typically a smooth. """ - if len({differential, altitude, altitude_threshold, hdop, - hdop_max_threshold, hdop_change_threshold, other}) > 0: - # Differential filter only applies to GGA data, defaults to 1 for VTG + if ( + len( + { + differential, + altitude, + altitude_threshold, + hdop, + hdop_max_threshold, + hdop_change_threshold, + other, + } + ) + > 0 + ): + # Differential filter only applies to GGA data, defaults to 1 + # for VTG if differential is not None: - if self.nav_ref == 'GGA': - self.filter_diff_qual(gps_data=transect.gps, setting=int(differential)) + if self.nav_ref == "GGA": + self.filter_diff_qual( + gps_data=transect.gps, setting=int(differential) + ) else: self.filter_diff_qual(gps_data=transect.gps, setting=1) # Altitude filter only applies to GGA data if altitude is not None: - if (altitude == 'Manual') and (self.nav_ref == 'GGA'): - self.filter_altitude(gps_data=transect.gps, setting=altitude, threshold=altitude_threshold) - elif self.nav_ref == 'GGA': + if (altitude == "Manual") and (self.nav_ref == "GGA"): + self.filter_altitude( + gps_data=transect.gps, + setting=altitude, + threshold=altitude_threshold, + ) + elif self.nav_ref == "GGA": self.filter_altitude(gps_data=transect.gps, setting=altitude) if hdop is not None: - if hdop == 'Manual': - self.filter_hdop(gps_data=transect.gps, setting=hdop, max_threshold=hdop_max_threshold, - change_threshold=hdop_change_threshold) + if hdop == "Manual": + self.filter_hdop( + gps_data=transect.gps, + setting=hdop, + max_threshold=hdop_max_threshold, + change_threshold=hdop_change_threshold, + ) else: self.filter_hdop(gps_data=transect.gps, setting=hdop) @@ -1487,7 +1745,8 @@ class BoatData(object): self.apply_interpolation(transect=transect) def filter_diff_qual(self, gps_data, setting=None): - """Filters GPS data based on the minimum acceptable differential correction quality. + """Filters GPS data based on the minimum acceptable differential + correction quality. Parameters ---------- @@ -1520,14 +1779,14 @@ class BoatData(object): self.valid_data[2, gps_data.diff_qual_ens < 4] = False # If there is no indication of the quality assume 1 fot vtg - if self.nav_ref == 'VTG': + if self.nav_ref == "VTG": self.valid_data[2, np.isnan(gps_data.diff_qual_ens)] = True else: self.valid_data[2, :] = False # Combine all filter data to composite valid data self.valid_data[0, :] = np.all(self.valid_data[1:, :], 0) - self.num_invalid = np.sum(self.valid_data[0, :] == False) + self.num_invalid = np.sum(np.logical_not(self.valid_data[0, :])) def filter_altitude(self, gps_data, setting=None, threshold=None): """Filter GPS data based on a change in altitude. @@ -1551,19 +1810,18 @@ class BoatData(object): # New filter settings if provided if setting is not None: self.gps_altitude_filter = setting - if setting == 'Manual': + if setting == "Manual": self.gps_altitude_filter_change = threshold # Set threshold for Auto - if self.gps_altitude_filter == 'Auto': + if self.gps_altitude_filter == "Auto": self.gps_altitude_filter_change = 3 # Set all data to valid self.valid_data[3, :] = True - # self.valid_data[5, :] = True # Manual or Auto is selected, apply filter - if not self.gps_altitude_filter == 'Off': + if not self.gps_altitude_filter == "Off": # Initialize variables num_valid_old = np.sum(self.valid_data[3, :]) k = 0 @@ -1572,17 +1830,20 @@ class BoatData(object): while k < 100 and change > 0.1: # Compute mean using valid ensembles if self.valid_data.shape[1] == 1: - if self.valid_data[1,0]: + if self.valid_data[1, 0]: alt_mean = gps_data.altitude_ens_m else: alt_mean = np.nan else: - alt_mean = np.nanmean(gps_data.altitude_ens_m[self.valid_data[1, :]]) + alt_mean = np.nanmean( + gps_data.altitude_ens_m[self.valid_data[1, :]] + ) # Compute difference for each ensemble diff = np.abs(gps_data.altitude_ens_m - alt_mean) - # Mark invalid those ensembles with differences greater than the change threshold + # Mark invalid those ensembles with differences greater than + # the change threshold self.valid_data[3, diff > self.gps_altitude_filter_change] = False k += 1 num_valid = np.sum(self.valid_data[3, :]) @@ -1590,9 +1851,11 @@ class BoatData(object): # Combine all filter data to composite valid data self.valid_data[0, :] = np.all(self.valid_data[1:, :], 0) - self.num_invalid = np.sum(self.valid_data[0, :] == False) + self.num_invalid = np.sum(np.logical_not(self.valid_data[0, :])) - def filter_hdop(self, gps_data, setting=None, max_threshold=None, change_threshold=None): + def filter_hdop( + self, gps_data, setting=None, max_threshold=None, change_threshold=None + ): """Filter GPS data based on both a maximum HDOP and a change in HDOP over the transect. @@ -1609,17 +1872,17 @@ class BoatData(object): """ if gps_data.hdop_ens is None or gps_data.hdop_ens.size == 0: - self.valid_data[5, :self.valid_data.shape[1]] = True + self.valid_data[5, : self.valid_data.shape[1]] = True else: # New settings if provided if setting is not None: self.gps_HDOP_filter = setting - if self.gps_HDOP_filter == 'Manual': + if self.gps_HDOP_filter == "Manual": self.gps_HDOP_filter_max = max_threshold self.gps_HDOP_filter_change = change_threshold # Settings for auto mode - if self.gps_HDOP_filter == 'Auto': + if self.gps_HDOP_filter == "Auto": self.gps_HDOP_filter_change = 3 self.gps_HDOP_filter_max = 4 @@ -1627,7 +1890,7 @@ class BoatData(object): self.valid_data[5, :] = True # Apply filter for manual or auto - if not self.gps_HDOP_filter == 'Off': + if not self.gps_HDOP_filter == "Off": # Initialize variables num_valid_old = np.sum(self.valid_data[5, :]) @@ -1635,7 +1898,9 @@ class BoatData(object): change = 1 # Apply max filter - self.valid_data[5, np.greater(gps_data.hdop_ens, self.gps_HDOP_filter_max)] = False + self.valid_data[ + 5, np.greater(gps_data.hdop_ens, self.gps_HDOP_filter_max) + ] = False # Loop until the number of valid ensembles does not change while k < 100 and change > 0.1: @@ -1649,12 +1914,15 @@ class BoatData(object): else: hdop_mean = np.nanmean(gps_data.hdop_ens[self.valid_data[5, :]]) - # Compute the difference in HDOP and the mean for all ensembles + # Compute the difference in HDOP and the mean for + # all ensembles diff = np.abs(gps_data.hdop_ens - hdop_mean) # If the change is HDOP or the value of HDOP is greater # than the threshold setting mark the data invalid - self.valid_data[5, np.greater(diff, self.gps_HDOP_filter_change)] = False + self.valid_data[ + 5, np.greater(diff, self.gps_HDOP_filter_change) + ] = False k += 1 num_valid = np.sum(self.valid_data[5, :]) @@ -1663,13 +1931,14 @@ class BoatData(object): # Combine all filter data to composite data self.valid_data[0, :] = np.all(self.valid_data[1:, :], 0) - self.num_invalid = np.sum(self.valid_data[0, :] == False) + self.num_invalid = np.sum(np.logical_not(self.valid_data[0, :])) @staticmethod def filter_sontek(vel_in): """Determines invalid raw bottom track samples for SonTek data. - Invalid data are those that are zero or where the velocity doesn't change between ensembles. + Invalid data are those that are zero or where the velocity doesn't + change between ensembles. Parameters ---------- @@ -1679,7 +1948,8 @@ class BoatData(object): Returns ------- vel_out: np.array(float) - Filtered bottom track velocity data with all invalid data set to np.nan. + Filtered bottom track velocity data with all invalid data set to + np.nan. """ # Identify all samples where the velocity did not change @@ -1687,7 +1957,9 @@ class BoatData(object): # Identify all samples with all zero values test2 = np.nansum(np.abs(vel_in), 0) < 0.00001 - test2 = test2[1:] * 4 # using 1: makes the array dimension consistent with test1 as diff results in 1 less. + # using 1: makes the array dimension + # consistent with test1 as diff results in 1 less. + test2 = test2[1:] * 4 # Combine criteria test_sum = np.sum(test1, 0) + test2 @@ -1695,6 +1967,7 @@ class BoatData(object): # Develop logical vector of invalid ensembles invalid_bool = np.full(test_sum.size, False) invalid_bool[test_sum > 3] = True + # Handle first ensemble invalid_bool = np.concatenate((np.array([False]), invalid_bool), 0) if np.nansum(vel_in[:, 0]) == 0: @@ -1710,14 +1983,16 @@ class BoatData(object): def run_std_trim(half_width, my_data): """Computes a standard deviation over +/- halfwidth of points. - The routine accepts a column vector as input. "halfWidth" number of data + The routine accepts a column vector as input. + "halfWidth" number of data points for computing the standard deviation are selected before and after the target data point, but not including the target data point. Near the ends of the series the number of points before or after are reduced. nan in the data are counted as points. The selected subset of points are sorted and the points with the highest and lowest values are removed from the subset and the standard deviation computed on the - remaining points in the subset. The process occurs for each point in the + remaining points in the subset. + The process occurs for each point in the provided column vector. A column vector with the computed standard deviation at each point is returned. @@ -1738,7 +2013,7 @@ class BoatData(object): # Determine number of points to process n_pts = my_data.shape[0] if n_pts < 20: - half_width = np.floor(n_pts / 2.) + half_width = np.floor(n_pts / 2.0) filter_array = [] # Compute standard deviation for each point @@ -1746,22 +2021,26 @@ class BoatData(object): # Sample selection for 1st point if n == 0: - sample = my_data[1:1 + half_width] + sample = my_data[1 : 1 + half_width] # Sample selection at end of data set elif n + half_width > n_pts: - sample = np.hstack((my_data[n - half_width - 1:n - 1], my_data[n:n_pts])) + sample = np.hstack( + (my_data[n - half_width - 1 : n - 1], my_data[n:n_pts]) + ) # Sample selection at beginning of data set elif half_width >= n + 1: - sample = np.hstack((my_data[0:n], my_data[n + 1:n + half_width + 1])) + sample = np.hstack((my_data[0:n], my_data[n + 1 : n + half_width + 1])) # Samples selection in body of data set else: - sample = np.hstack((my_data[n - half_width:n], my_data[n + 1:n + half_width + 1])) + sample = np.hstack( + (my_data[n - half_width : n], my_data[n + 1 : n + half_width + 1]) + ) # Sort and compute trummed standard deviation sample = np.sort(sample) - filter_array.append(np.nanstd(sample[1:sample.shape[0] - 1], ddof=1)) + filter_array.append(np.nanstd(sample[1 : sample.shape[0] - 1], ddof=1)) return np.array(filter_array) diff --git a/Classes/BoatStructure.py b/Classes/BoatStructure.py index f009c7b..d160221 100644 --- a/Classes/BoatStructure.py +++ b/Classes/BoatStructure.py @@ -23,17 +23,29 @@ class BoatStructure(object): def __init__(self): - self.selected = None # Name of BoatData object to be used for discharge computations - self.bt_vel = None # BoatData object for bottom track velocity - self.gga_vel = None # BoatData object for gga velocity - self.vtg_vel = None # BoatData object for vtg velocity + self.selected = None + self.bt_vel = None + self.gga_vel = None + self.vtg_vel = None - # Composite track information is not currently provided by the manufacturers. + # Composite track information is not currently provided by the + # manufacturers. # Future versions may try to determine this setting from SonTek data - self.composite = 'Off' # Setting for compositir tracks - - def add_boat_object(self, source, vel_in, freq_in=None, coord_sys_in=None, nav_ref_in=None, - min_beams=3, bottom_mode='Variable', corr_in=None, rssi_in=None): + self.composite = "Off" + + def add_boat_object( + self, + source, + vel_in, + freq_in=None, + coord_sys_in=None, + nav_ref_in=None, + min_beams=3, + bottom_mode="Variable", + corr_in=None, + rssi_in=None, + ping_type=None, + ): """Adds a BoatData object to the appropriate property Parameters @@ -49,24 +61,46 @@ class BoatStructure(object): nav_ref_in: str Source of boat velocity data min_beams: int - Setting to allow 3 beam solutions or require 4 beam solutions or set to Auto (-1) + Setting to allow 3 beam solutions or require 4 beam solutions or + set to Auto (-1) bottom_mode: str Bottom mode used + corr_in: np.array + Correlation values for bottom track + rssi_in: np.array + Returned signal strength for bottom track + ping_type: np.array + Indicates type of ping used for bottom tracking """ - if nav_ref_in == 'BT': + if nav_ref_in == "BT": self.bt_vel = BoatData() - self.bt_vel.populate_data(source, vel_in, freq_in, coord_sys_in, nav_ref_in, min_beams, bottom_mode, - corr_in, rssi_in) - if nav_ref_in == 'GGA': + self.bt_vel.populate_data( + source, + vel_in, + freq_in, + coord_sys_in, + nav_ref_in, + min_beams, + bottom_mode, + corr_in, + rssi_in, + ping_type=ping_type, + ) + if nav_ref_in == "GGA": self.gga_vel = BoatData() - self.gga_vel.populate_data(source, vel_in, freq_in, coord_sys_in, nav_ref_in) - if nav_ref_in == 'VTG': + self.gga_vel.populate_data( + source, vel_in, freq_in, coord_sys_in, nav_ref_in, ping_type=ping_type + ) + if nav_ref_in == "VTG": self.vtg_vel = BoatData() - self.vtg_vel.populate_data(source, vel_in, freq_in, coord_sys_in, nav_ref_in) + self.vtg_vel.populate_data( + source, vel_in, freq_in, coord_sys_in, nav_ref_in, ping_type=ping_type + ) def set_nav_reference(self, reference): - """This function will set the navigation reference property to the specified object reference. + """This function will set the navigation reference property to the + specified object reference. Parameters ---------- @@ -74,16 +108,16 @@ class BoatStructure(object): Navigation reference, BT, GGA, or VTG """ - if reference == 'BT': - self.selected = 'bt_vel' - elif reference == 'GGA': - self.selected = 'gga_vel' - elif reference == 'VTG': - self.selected = 'vtg_vel' + if reference == "BT": + self.selected = "bt_vel" + elif reference == "GGA": + self.selected = "gga_vel" + elif reference == "VTG": + self.selected = "vtg_vel" def change_nav_reference(self, reference, transect): - """This function changes the navigation reference to the specified object reference and recomputes - the composite tracks, if necessary. + """This function changes the navigation reference to the specified + object reference and recomputes the composite tracks, if necessary. Parameters ---------- @@ -93,24 +127,25 @@ class BoatStructure(object): Object of TransectData. """ - if reference == 'BT': - self.selected = 'bt_vel' - elif reference == 'GGA': - self.selected = 'gga_vel' - elif reference == 'VTG': - self.selected = 'vtg_vel' - elif reference == 'bt_vel': - self.selected = 'bt_vel' - elif reference == 'gga_vel': - self.selected = 'gga_vel' - elif reference == 'vtg_vel': - self.selected = 'vtg_vel' + if reference == "BT": + self.selected = "bt_vel" + elif reference == "GGA": + self.selected = "gga_vel" + elif reference == "VTG": + self.selected = "vtg_vel" + elif reference == "bt_vel": + self.selected = "bt_vel" + elif reference == "gga_vel": + self.selected = "gga_vel" + elif reference == "vtg_vel": + self.selected = "vtg_vel" self.composite_tracks(transect) def change_coord_sys(self, new_coord_sys, sensors, adcp): - """This function will change the coordinate system of the boat velocity reference. - + """This function will change the coordinate system of the boat + velocity reference. + Parameters ---------- new_coord_sys: str @@ -130,8 +165,9 @@ class BoatStructure(object): self.vtg_vel.change_coord_sys(new_coord_sys, sensors, adcp) def composite_tracks(self, transect, setting=None): - """If new composite setting is provided it is used, if not the setting saved in the object is used - + """If new composite setting is provided it is used, if not the + setting saved in the object is used + Parameters ---------- transect: TransectData @@ -147,7 +183,7 @@ class BoatStructure(object): self.composite = setting # Composite depths turned on - if setting == 'On': + if setting == "On": # Initialize variables u_bt = np.array([]) v_bt = np.array([]) @@ -162,8 +198,8 @@ class BoatStructure(object): v_bt = self.bt_vel.v_processed_mps # Set to invalid all interpolated velocities valid_bt = self.bt_vel.valid_data[0, :] - u_bt[valid_bt == False] = np.nan - v_bt[valid_bt == False] = np.nan + u_bt[np.logical_not(valid_bt)] = np.nan + v_bt[np.logical_not(valid_bt)] = np.nan # Prepare gga data if self.gga_vel is not None: @@ -172,8 +208,8 @@ class BoatStructure(object): v_gga = self.gga_vel.v_processed_mps # Set to invalid all interpolated velocities valid_gga = self.gga_vel.valid_data[0, :] - u_gga[valid_gga == False] = np.nan - v_gga[valid_gga == False] = np.nan + u_gga[np.logical_not(valid_gga)] = np.nan + v_gga[np.logical_not(valid_gga)] = np.nan elif self.bt_vel is not None: u_gga = np.tile([np.nan], u_bt.shape) v_gga = np.tile([np.nan], v_bt.shape) @@ -185,128 +221,179 @@ class BoatStructure(object): v_vtg = self.vtg_vel.v_processed_mps # Set to invalid all interpolated velocities valid_vtg = self.vtg_vel.valid_data[0, :] - u_vtg[valid_vtg == False] = np.nan - v_vtg[valid_vtg == False] = np.nan + u_vtg[np.logical_not(valid_vtg)] = np.nan + v_vtg[np.logical_not(valid_vtg)] = np.nan elif self.bt_vel is not None: u_vtg = np.tile([np.nan], u_bt.shape) v_vtg = np.tile([np.nan], v_bt.shape) # Process bt as primary - if self.selected == 'bt_vel': + if self.selected == "bt_vel": # Initialize composite source comp_source = np.tile(np.nan, u_bt.shape) # Process u velocity component u_comp = u_bt - comp_source[np.isnan(u_comp) == False] = 1 + comp_source[np.logical_not(np.isnan(u_comp))] = 1 - # If BT data are not valid try VTG and set composite source (BUG HERE DSM) + # If BT data are not valid try VTG and set composite source u_comp[np.isnan(u_comp)] = u_vtg[np.isnan(u_comp)] - comp_source[np.logical_and(np.isnan(u_comp) == False, np.isnan(comp_source))] = 3 - - # If there are still invalid boat velocities, try GGA and set composite source + comp_source[ + np.logical_and( + np.logical_not(np.isnan(u_comp)), np.isnan(comp_source) + ) + ] = 3 + + # If there are still invalid boat velocities, try GGA and + # set composite source u_comp[np.isnan(u_comp)] = u_gga[np.isnan(u_comp)] - comp_source[np.logical_and(np.isnan(u_comp) == False, np.isnan(comp_source))] = 2 + comp_source[ + np.logical_and( + np.logical_not(np.isnan(u_comp)), np.isnan(comp_source) + ) + ] = 2 # If there are still invalid boat velocities, use interpolated # values if present and set composite source - comp_source[np.logical_and(np.isnan(u_comp) == False, np.isnan(comp_source))] = 0 - - # Set composite source to invalid for all remaining invalid boat velocity data + comp_source[ + np.logical_and( + np.logical_not(np.isnan(u_comp)), np.isnan(comp_source) + ) + ] = 0 + + # Set composite source to invalid for all remaining invalid + # boat velocity data comp_source[np.isnan(comp_source)] = -1 - # Process v velocity component. Assume that the composite source is the same - # as the u component + # Process v velocity component. Assume that the composite + # source is the same as the u component v_comp = v_bt v_comp[np.isnan(v_comp)] = v_vtg[np.isnan(v_comp)] v_comp[np.isnan(v_comp)] = v_gga[np.isnan(v_comp)] - v_comp[np.isnan(v_comp)] = self.bt_vel.v_processed_mps[np.isnan(v_comp)] - # Apply the composite settings to the bottom track Boatdata objects + # Apply the composite settings to the bottom track Boatdata + # objects self.bt_vel.apply_composite(u_comp, v_comp, comp_source) self.bt_vel.interpolate_composite(transect) # Process gga as primary - elif self.selected == 'gga_vel': + elif self.selected == "gga_vel": # Initialize the composite source comp_source = np.tile([np.nan], u_bt.shape) # Process the u velocity component u_comp = u_gga - comp_source[np.isnan(u_comp) == False] = 2 + comp_source[np.logical_not(np.isnan(u_comp))] = 2 # If GGA data are not valid try VTG and set composite source u_comp[np.isnan(u_comp)] = u_vtg[np.isnan(u_comp)] - comp_source[np.logical_and(np.isnan(u_comp) == False, np.isnan(comp_source))] = 3 - - # If there are still invalid boar velocities, try BT and set composite source + comp_source[ + np.logical_and( + np.logical_not(np.isnan(u_comp)), np.isnan(comp_source) + ) + ] = 3 + + # If there are still invalid boat velocities, try BT and set + # composite source u_comp[np.isnan(u_comp)] = u_bt[np.isnan(u_comp)] - comp_source[np.logical_and(np.isnan(u_comp) == False, np.isnan(comp_source))] = 1 - - # If there are still invalid boat velocities, use interpolated values, - # if present and set composite source - comp_source[np.logical_and(np.isnan(u_comp) == False, np.isnan(comp_source))] = 0 - - # Set composite source to invalid for all remaining invalid boat velocity data + comp_source[ + np.logical_and( + np.logical_not(np.isnan(u_comp)), np.isnan(comp_source) + ) + ] = 1 + + # If there are still invalid boat velocities, + # use interpolated values, if present and set composite source + comp_source[ + np.logical_and( + np.logical_not(np.isnan(u_comp)), np.isnan(comp_source) + ) + ] = 0 + + # Set composite source to invalid for all remaining invalid + # boat velocity data comp_source[np.isnan(comp_source)] = -1 - # Process v velocity component. Assume that the composite source is the - # same as the u component + # Process v velocity component. Assume that the composite + # source is the same as the u component v_comp = v_gga v_comp[np.isnan(v_comp)] = v_vtg[np.isnan(v_comp)] v_comp[np.isnan(v_comp)] = v_bt[np.isnan(v_comp)] - # v_comp[np.isnan(v_comp)] = self.gga_vel.v_processed_mps[np.isnan(v_comp)] # Apply the composite settings to the gga BoatData object - # For the situation where the transect has no GGA data but other transects do and composite tracks - # has been turned on, create the gga_vel object and populate only the u and v processed, comp_source, + # For the situation where the transect has no GGA data but + # other transects do and composite tracks + # has been turned on, create the gga_vel object and populate + # only the u and v processed, comp_source, # and valid_data attributes. if self.gga_vel is None: self.gga_vel = BoatData() - self.gga_vel.processed_source = np.array([''] * comp_source.shape[0], dtype=object) + self.gga_vel.processed_source = np.array( + [""] * comp_source.shape[0], dtype=object + ) self.gga_vel.valid_data = np.full((6, comp_source.shape[0]), False) self.gga_vel.apply_composite(u_comp, v_comp, comp_source) self.gga_vel.interpolate_composite(transect) # Process vtg as primary - elif self.selected == 'vtg_vel': + elif self.selected == "vtg_vel": # Initialize the composite source comp_source = np.tile([np.nan], u_bt.shape) # Process the u velocity component u_comp = u_vtg - comp_source[np.isnan(u_comp) == False] = 3 + comp_source[np.logical_not(np.isnan(u_comp))] = 3 # If VTG data are not valid try GGA and set composite source u_comp[np.isnan(u_comp)] = u_gga[np.isnan(u_comp)] - comp_source[np.logical_and(np.isnan(u_comp) == False, np.isnan(comp_source))] = 2 - - # If there are still invalid boat velocities, try BT and set composite source + comp_source[ + np.logical_and( + np.logical_not(np.isnan(u_comp)), np.isnan(comp_source) + ) + ] = 2 + + # If there are still invalid boat velocities, try BT and set + # composite source u_comp[np.isnan(u_comp)] = u_bt[np.isnan(u_comp)] - comp_source[np.logical_and(np.isnan(u_comp) == False, np.isnan(comp_source))] = 1 - - # If there are still invalid boat velocities, use interpolated values, + comp_source[ + np.logical_and( + np.logical_not(np.isnan(u_comp)), np.isnan(comp_source) + ) + ] = 1 + + # If there are still invalid boat velocities, + # use interpolated values, # if present and set composite source - comp_source[np.logical_and(np.isnan(u_comp) == False, np.isnan(comp_source))] = 0 - - # Set composite source to invalid for all remaining invalid boat velocity data + comp_source[ + np.logical_and( + np.logical_not(np.isnan(u_comp)), np.isnan(comp_source) + ) + ] = 0 + + # Set composite source to invalid for all remaining invalid + # boat velocity data comp_source[np.isnan(comp_source)] = -1 - # Process v velocity component. Assume that the composite source is the + # Process v velocity component. Assume that the composite + # source is the # same as the u component v_comp = v_vtg - # DSM wrong in Matlab version 1/29/2018 v_comp[np.isnan(v_comp)] = v_vtg[np.isnan(v_comp)] + # DSM wrong in Matlab version 1/29/2018 v_comp[np.isnan( + # v_comp)] = v_vtg[np.isnan(v_comp)] v_comp[np.isnan(v_comp)] = v_gga[np.isnan(v_comp)] v_comp[np.isnan(v_comp)] = v_bt[np.isnan(v_comp)] - # v_comp[np.isnan(v_comp)] = self.vtg_vel.v_processed_mps[np.isnan(v_comp)] # Apply the composite settings to the gga BoatData object - # For the situation where the transect has no GGA data but other transects do and composite tracks - # has been turned on, create the gga_vel object and populate only the u and v processed, comp_source, + # For the situation where the transect has no GGA data but + # other transects do and composite tracks + # has been turned on, create the gga_vel object and populate + # only the u and v processed, comp_source, # and valid_data attributes. if self.vtg_vel is None: self.vtg_vel = BoatData() - self.vtg_vel.processed_source = np.array([''] * comp_source.shape[0], dtype=object) + self.vtg_vel.processed_source = np.array( + [""] * comp_source.shape[0], dtype=object + ) self.vtg_vel.valid_data = np.full((6, comp_source.shape[0]), False) self.vtg_vel.apply_composite(u_comp, v_comp, comp_source) self.vtg_vel.interpolate_composite(transect) @@ -315,74 +402,107 @@ class BoatStructure(object): # Use only interpolations for bt if self.bt_vel is not None: - self.bt_vel.apply_interpolation(transect=transect, - interpolation_method=transect.boat_vel.bt_vel.interpolate) + self.bt_vel.apply_interpolation( + transect=transect, + interpolation_method=transect.boat_vel.bt_vel.interpolate, + ) comp_source = np.tile(np.nan, self.bt_vel.u_processed_mps.shape) comp_source[self.bt_vel.valid_data[0, :]] = 1 - comp_source[np.logical_and(np.isnan(comp_source), - (np.isnan(self.bt_vel.u_processed_mps) == False))] = 0 + comp_source[ + np.logical_and( + np.isnan(comp_source), + (np.logical_not(np.isnan(self.bt_vel.u_processed_mps))), + ) + ] = 0 comp_source[np.isnan(comp_source)] = -1 - self.bt_vel.apply_composite(u_composite=self.bt_vel.u_processed_mps, - v_composite=self.bt_vel.v_processed_mps, - composite_source=comp_source) + self.bt_vel.apply_composite( + u_composite=self.bt_vel.u_processed_mps, + v_composite=self.bt_vel.v_processed_mps, + composite_source=comp_source, + ) # Use only interpolations for gga if self.gga_vel is not None: - # This if statement handles the situation where there is no GPS data for a transect but there is GPS - # data for other transects and the user has turned on / off composite tracks. + # This if statement handles the situation where there is no + # GPS data for a transect but there is GPS data for other + # transects and the user has turned on / off composite tracks. if self.gga_vel.u_mps is not None: - self.gga_vel.apply_interpolation(transect=transect, - interpolation_method=transect.boat_vel.gga_vel.interpolate) + self.gga_vel.apply_interpolation( + transect=transect, + interpolation_method=transect.boat_vel.gga_vel.interpolate, + ) comp_source = np.tile(np.nan, self.gga_vel.u_processed_mps.shape) comp_source[self.gga_vel.valid_data[0, :]] = 2 - comp_source[np.logical_and(np.isnan(comp_source), - (np.isnan(self.gga_vel.u_processed_mps) == False))] = 0 + comp_source[ + np.logical_and( + np.isnan(comp_source), + (np.logical_not(np.isnan(self.gga_vel.u_processed_mps))), + ) + ] = 0 comp_source[np.isnan(comp_source)] = -1 - self.gga_vel.apply_composite(u_composite=self.gga_vel.u_processed_mps, - v_composite=self.gga_vel.v_processed_mps, - composite_source=comp_source) + self.gga_vel.apply_composite( + u_composite=self.gga_vel.u_processed_mps, + v_composite=self.gga_vel.v_processed_mps, + composite_source=comp_source, + ) else: self.gga_vel = None # Use only interpolations for vtg if self.vtg_vel is not None: - # This if statement handles the situation where there is no GPS data for a transect but there is GPS - # data for other transects and the user has turned on / off composite tracks. + # This if statement handles the situation where there is no + # GPS data for a transect but there is GPS data for other + # transects and the user has turned on / off composite tracks. if self.vtg_vel.u_mps is not None: - self.vtg_vel.apply_interpolation(transect=transect, - interpolation_method=transect.boat_vel.vtg_vel.interpolate) + self.vtg_vel.apply_interpolation( + transect=transect, + interpolation_method=transect.boat_vel.vtg_vel.interpolate, + ) comp_source = np.tile(np.nan, self.vtg_vel.u_processed_mps.shape) comp_source[self.vtg_vel.valid_data[0, :]] = 3 - comp_source[np.logical_and(np.isnan(comp_source), - (np.isnan(self.vtg_vel.u_processed_mps) == False))] = 0 + comp_source[ + np.logical_and( + np.isnan(comp_source), + (np.logical_not(np.isnan(self.vtg_vel.u_processed_mps))), + ) + ] = 0 comp_source[np.isnan(comp_source)] = -1 - self.vtg_vel.apply_composite(u_composite=self.vtg_vel.u_processed_mps, - v_composite=self.vtg_vel.v_processed_mps, - composite_source=comp_source) + self.vtg_vel.apply_composite( + u_composite=self.vtg_vel.u_processed_mps, + v_composite=self.vtg_vel.v_processed_mps, + composite_source=comp_source, + ) else: self.vtg_vel = None @staticmethod def compute_boat_track(transect, ref=None): - """Computes the shiptrack coordinates, along track distance, and distance made - good for the selected boat reference. + """Computes the shiptrack coordinates, along track distance, + and distance made good for the selected boat reference. Parameters ---------- transect: TransectData Object of TransectData ref: str - Setting to determine what navigation reference should be used. In None use selected. + Setting to determine what navigation reference should be used. + If None use selected. Returns ------- boat_track: dict - Dictionary containing shiptrack coordinates (track_x_m, track_y_m), along track distance (distance_m), + Dictionary containing shiptrack coordinates (track_x_m, + track_y_m), along track distance (distance_m), and distance made good (dmg_m) """ # Initialize dictionary - boat_track = {'track_x_m': np.nan, 'track_y_m': np.nan, 'distance_m': np.nan, 'dmg_m': np.nan} + boat_track = { + "track_x_m": np.nan, + "track_y_m": np.nan, + "distance_m": np.nan, + "dmg_m": np.nan, + } # Compute incremental track coordinates if ref is None: @@ -391,25 +511,34 @@ class BoatStructure(object): boat_vel_selected = getattr(transect.boat_vel, ref) if boat_vel_selected is None: - boat_vel_selected = getattr(transect.boat_vel, 'bt_vel') - track_x = boat_vel_selected.u_processed_mps[transect.in_transect_idx] * \ - transect.date_time.ens_duration_sec[transect.in_transect_idx] - track_y = boat_vel_selected.v_processed_mps[transect.in_transect_idx] * \ - transect.date_time.ens_duration_sec[transect.in_transect_idx] + boat_vel_selected = getattr(transect.boat_vel, "bt_vel") + track_x = ( + boat_vel_selected.u_processed_mps[transect.in_transect_idx] + * transect.date_time.ens_duration_sec[transect.in_transect_idx] + ) + track_y = ( + boat_vel_selected.v_processed_mps[transect.in_transect_idx] + * transect.date_time.ens_duration_sec[transect.in_transect_idx] + ) # Check for any valid data idx = np.where(np.logical_not(np.isnan(track_x))) if idx[0].size > 1: # Compute variables - boat_track['distance_m'] = np.nancumsum(np.sqrt(track_x ** 2 + track_y ** 2)) - boat_track['track_x_m'] = np.nancumsum(track_x) - boat_track['track_y_m'] = np.nancumsum(track_y) - boat_track['dmg_m'] = np.sqrt(boat_track['track_x_m'] ** 2 + boat_track['track_y_m'] ** 2) + boat_track["distance_m"] = np.nancumsum( + np.sqrt(track_x**2 + track_y**2) + ) + boat_track["track_x_m"] = np.nancumsum(track_x) + boat_track["track_y_m"] = np.nancumsum(track_y) + boat_track["dmg_m"] = np.sqrt( + boat_track["track_x_m"] ** 2 + boat_track["track_y_m"] ** 2 + ) return boat_track def populate_from_qrev_mat(self, transect): - """Populates the object using data from previously saved QRev Matlab file. + """Populates the object using data from previously saved QRev Matlab + file. Parameters ---------- @@ -417,21 +546,27 @@ class BoatStructure(object): Matlab data structure obtained from sio.loadmat """ - if hasattr(transect, 'boatVel'): - if hasattr(transect.boatVel, 'btVel'): - if hasattr(transect.boatVel.btVel, 'u_mps'): + if hasattr(transect, "boatVel"): + if hasattr(transect.boatVel, "btVel"): + if hasattr(transect.boatVel.btVel, "u_mps"): self.bt_vel = BoatData() self.bt_vel.populate_from_qrev_mat(transect.boatVel.btVel) - if hasattr(transect.boatVel, 'ggaVel'): - if hasattr(transect.boatVel.ggaVel, 'u_mps'): + if hasattr(transect.boatVel, "ggaVel"): + if hasattr(transect.boatVel.ggaVel, "u_mps"): self.gga_vel = BoatData() self.gga_vel.populate_from_qrev_mat(transect.boatVel.ggaVel) - if hasattr(transect.boatVel, 'vtgVel'): - if hasattr(transect.boatVel.vtgVel, 'u_mps'): + if hasattr(transect.boatVel, "vtgVel"): + if hasattr(transect.boatVel.vtgVel, "u_mps"): self.vtg_vel = BoatData() self.vtg_vel.populate_from_qrev_mat(transect.boatVel.vtgVel) - nav_dict = {'btVel':'bt_vel', 'bt_vel':'bt_vel', - 'ggaVel':'gga_vel', 'gga_vel':'gga_vel', - 'vtgVel':'vtg_vel', 'vtg_vel':'vtg_vel'} + if hasattr(transect.boatVel, "composite"): + self.composite = transect.boatVel.composite + nav_dict = { + "btVel": "bt_vel", + "bt_vel": "bt_vel", + "ggaVel": "gga_vel", + "gga_vel": "gga_vel", + "vtgVel": "vtg_vel", + "vtg_vel": "vtg_vel", + } self.selected = nav_dict[transect.boatVel.selected] - diff --git a/Classes/CompassCal.py b/Classes/CompassCal.py index 2b279e9..de93d0b 100644 --- a/Classes/CompassCal.py +++ b/Classes/CompassCal.py @@ -2,7 +2,8 @@ import re class CompassCal(object): - """Class stores compass calibration or evaluation data and parses the compass error from the raw data. + """Class stores compass calibration or evaluation data and parses the + compass error from the raw data. Attributes ---------- @@ -11,7 +12,8 @@ class CompassCal(object): data: str All calibration or evaluation data provided by the manufacturer. error: float - Remaining compass error after calibration or from evaluation, in degrees. + Remaining compass error after calibration or from evaluation, in + degrees. """ def __init__(self): @@ -34,8 +36,10 @@ class CompassCal(object): self.time_stamp = time_stamp self.data = data_in - splits = re.split('(Total error:|Double Cycle Errors:|Error from calibration:)', data_in) + splits = re.split( + "(Total error:|Double Cycle Errors:|Error from calibration:)", data_in + ) if len(splits) > 1: - self.error = re.search('\d+\.*\d*', splits[2])[0] + self.error = re.search("\d+\.*\d*", splits[2])[0] else: - self.error = 'N/A' + self.error = "N/A" diff --git a/Classes/ComputeExtrap.py b/Classes/ComputeExtrap.py index fd272d1..a728b44 100644 --- a/Classes/ComputeExtrap.py +++ b/Classes/ComputeExtrap.py @@ -5,7 +5,8 @@ from Classes.NormData import NormData class ComputeExtrap(object): - """Class to compute the optimized or manually specified extrapolation methods + """Class to compute the optimized or manually specified extrapolation + methods Attributes ---------- @@ -26,27 +27,35 @@ class ComputeExtrap(object): use_weighted: bool Specifies if discharge weighted medians are used in extrapolations sub_from_left: bool - Specifies if when subsectioning the subsection should start from left to right. + Specifies if when subsectioning the subsection should start from + left to right. use_q: bool Specifies to use the discharge rather than the xprod when subsectioning """ - + def __init__(self): """Initialize instance variables.""" - self.threshold = None # Threshold as a percent for determining if a median is valid - self.subsection = None # Percent of discharge, does not account for transect direction - self.fit_method = None # Method used to determine fit. Automatic or manual - self.norm_data = [] # Object of class norm data - self.sel_fit = [] # Object of class SelectFit - self.q_sensitivity = None # Object of class ExtrapQSensitivity - self.messages = [] # Variable for messages to UserWarning + self.threshold = None + self.subsection = None + self.fit_method = None + self.norm_data = [] + self.sel_fit = [] + self.q_sensitivity = None + self.messages = [] self.use_weighted = False self.use_q = False self.sub_from_left = False - - def populate_data(self, transects, compute_sensitivity=True, use_weighted=False, use_q=True, sub_from_left=True): + + def populate_data( + self, + transects, + compute_sensitivity=True, + use_weighted=False, + use_q=True, + sub_from_left=True, + ): """Store data in instance variables. Parameters @@ -56,24 +65,35 @@ class ComputeExtrap(object): compute_sensitivity: bool Determines is sensitivity should be computed. use_weighted: bool - Specifies if discharge weighted medians are used in extrapolations + Specifies if discharge weighted medians are used in extrapolations + use_q: bool + Specifies to use the discharge rather than the xprod when subsectioning + sub_from_left: bool + Specifies if when subsectioning the subsection should start from + left to right. """ self.threshold = 20 self.subsection = [0, 100] - self.fit_method = 'Automatic' + self.fit_method = "Automatic" self.use_weighted = use_weighted self.use_q = use_q self.sub_from_left = sub_from_left - self.process_profiles(transects=transects, data_type='q', use_weighted=use_weighted) + self.process_profiles( + transects=transects, data_type="q", use_weighted=use_weighted + ) - # Compute the sensitivity of the final discharge to changes in extrapolation methods + # Compute the sensitivity of the final discharge to changes in + # extrapolation methods if compute_sensitivity: self.q_sensitivity = ExtrapQSensitivity() - self.q_sensitivity.populate_data(transects=transects, extrap_fits=self.sel_fit) + self.q_sensitivity.populate_data( + transects=transects, extrap_fits=self.sel_fit + ) def populate_from_qrev_mat(self, meas_struct): - """Populates the object using data from previously saved QRev Matlab file. + """Populates the object using data from previously saved QRev Matlab + file. Parameters ---------- @@ -81,17 +101,18 @@ class ComputeExtrap(object): Matlab data structure obtained from sio.loadmat """ - if hasattr(meas_struct, 'extrapFit'): + if hasattr(meas_struct, "extrapFit"): self.threshold = meas_struct.extrapFit.threshold self.subsection = meas_struct.extrapFit.subsection self.fit_method = meas_struct.extrapFit.fitMethod - # Check for consistency between transects and norm_data. If only checked transects were saved, the - # normData and selfit will also include unchecked transects which must be removed prior to + # Check for consistency between transects and norm_data. If only + # checked transects were saved, the normData and selfit will also + # include unchecked transects which must be removed prior to # continuing to process. - # If only a single transect the meas_struct.transects will be structure not an array, so the len method - # won't work. + # If only a single transect the meas_struct.transects will be + # structure not an array, so the len method won't work. try: n_transects = len(meas_struct.transects) except TypeError: @@ -113,7 +134,8 @@ class ComputeExtrap(object): else: for transect in meas_struct.transects: file_names.append(transect.fileName) - # Create a list of norm_data and sel_fit objects that match the filenames in transects + # Create a list of norm_data and sel_fit objects that match + # the filenames in transects for n in range(len(meas_struct.extrapFit.normData) - 1): if meas_struct.extrapFit.normData[n].fileName in file_names: valid_norm_data.append(meas_struct.extrapFit.normData[n]) @@ -129,17 +151,17 @@ class ComputeExtrap(object): self.sel_fit = SelectFit.qrev_mat_in(meas_struct.extrapFit) self.q_sensitivity = ExtrapQSensitivity() self.q_sensitivity.populate_from_qrev_mat(meas_struct.extrapFit) - if hasattr(meas_struct.extrapFit, 'use_weighted'): + if hasattr(meas_struct.extrapFit, "use_weighted"): self.use_weighted = meas_struct.extrapFit.use_weighted else: self.use_weighted = False - if hasattr(meas_struct.extrapFit, 'use_q'): + if hasattr(meas_struct.extrapFit, "use_q"): self.use_q = meas_struct.extrapFit.use_q else: self.use_q = False - if hasattr(meas_struct.extrapFit, 'sub_from_left'): + if hasattr(meas_struct.extrapFit, "sub_from_left"): self.sub_from_left = meas_struct.extrapFit.sub_from_left else: self.sub_from_left = False @@ -149,7 +171,9 @@ class ComputeExtrap(object): elif type(meas_struct.extrapFit.messages) is np.ndarray: self.messages = meas_struct.extrapFit.messages.tolist() - def process_profiles(self, transects, data_type, use_weighted=None, use_q=True, sub_from_left=True): + def process_profiles( + self, transects, data_type, use_weighted=None, use_q=True, sub_from_left=True + ): """Function that coordinates the fitting process. Parameters @@ -158,10 +182,14 @@ class ComputeExtrap(object): Object of TransectData data_type: str Type of data processing (q or v) + use_weighted: bool + Specifies if discharge weighted medians are used in extrapolations sub_from_left: bool - Specifies if when subsectioning the subsection should start from left to right. + Specifies if when subsectioning the subsection should start from + left to right. use_q: bool - Specifies to use the discharge rather than the xprod when subsectioning + Specifies to use the discharge rather than the xprod when + subsectioning """ if use_weighted is not None: self.use_weighted = use_weighted @@ -175,30 +203,36 @@ class ComputeExtrap(object): self.norm_data = [] for transect in transects: norm_data = NormData() - norm_data.populate_data(transect=transect, - data_type=data_type, - threshold=self.threshold, - data_extent=self.subsection, - use_weighted=self.use_weighted, - use_q=self.use_q, - sub_from_left=self.sub_from_left) + norm_data.populate_data( + transect=transect, + data_type=data_type, + threshold=self.threshold, + data_extent=self.subsection, + use_weighted=self.use_weighted, + use_q=self.use_q, + sub_from_left=self.sub_from_left, + ) self.norm_data.append(norm_data) # Compute composite normalized data comp_data = NormData() comp_data.use_q = self.norm_data[-1].use_q comp_data.sub_from_left = self.norm_data[-1].sub_from_left - comp_data.create_composite(transects=transects, norm_data=self.norm_data, threshold=self.threshold) + comp_data.create_composite( + transects=transects, norm_data=self.norm_data, threshold=self.threshold + ) self.norm_data.append(comp_data) # Compute the fit for the selected method - if self.fit_method == 'Manual': + if self.fit_method == "Manual": for n in range(len(transects)): - self.sel_fit[n].populate_data(normalized=self.norm_data[n], - fit_method=self.fit_method, - top=transects[n].extrap.top_method, - bot=transects[n].extrap.bot_method, - exponent=transects[n].extrap.exponent) + self.sel_fit[n].populate_data( + normalized=self.norm_data[n], + fit_method=self.fit_method, + top=transects[n].extrap.top_method, + bot=transects[n].extrap.bot_method, + exponent=transects[n].extrap.exponent, + ) else: self.sel_fit = [] for n in range(len(self.norm_data)): @@ -207,11 +241,15 @@ class ComputeExtrap(object): self.sel_fit.append(sel_fit) if self.sel_fit[-1].top_fit_r2 is not None: - # Evaluate if there is a potential that a 3-point top method may be appropriate - if (self.sel_fit[-1].top_fit_r2 > 0.9 or self.sel_fit[-1].top_r2 > 0.9) \ - and np.abs(self.sel_fit[-1].top_max_diff) > 0.2: - self.messages.append('The measurement profile may warrant a 3-point fit at the top') - + # Evaluate if there is a potential that a 3-point top method may + # be appropriate + if ( + self.sel_fit[-1].top_fit_r2 > 0.9 or self.sel_fit[-1].top_r2 > 0.9 + ) and np.abs(self.sel_fit[-1].top_max_diff) > 0.2: + self.messages.append( + "The measurement profile may warrant a 3-point fit at " "the top" + ) + def update_q_sensitivity(self, transects): """Updates the discharge sensitivity values. @@ -222,8 +260,17 @@ class ComputeExtrap(object): """ self.q_sensitivity = ExtrapQSensitivity() self.q_sensitivity.populate_data(transects, self.sel_fit) - - def change_fit_method(self, transects, new_fit_method, idx, top=None, bot=None, exponent=None, compute_qsens=True): + + def change_fit_method( + self, + transects, + new_fit_method, + idx, + top=None, + bot=None, + exponent=None, + compute_qsens=True, + ): """Function to change the extrapolation method. Parameters @@ -245,14 +292,17 @@ class ComputeExtrap(object): """ self.fit_method = new_fit_method - self.sel_fit[idx].populate_data(self.norm_data[idx], new_fit_method, top=top, bot=bot, exponent=exponent) - if compute_qsens & idx == len(self.norm_data)-1: + self.sel_fit[idx].populate_data( + self.norm_data[idx], new_fit_method, top=top, bot=bot, exponent=exponent + ) + if compute_qsens & idx == len(self.norm_data) - 1: self.q_sensitivity = ExtrapQSensitivity() self.q_sensitivity.populate_data(transects, self.sel_fit) - + def change_threshold(self, transects, data_type, threshold): - """Function to change the threshold for accepting the increment median as valid. The threshold - is in percent of the median number of points in all increments. + """Function to change the threshold for accepting the increment + median as valid. The threshold is in percent of the median number + of points in all increments. Parameters ---------- @@ -261,17 +311,19 @@ class ComputeExtrap(object): data_type: str Specifies the data type (discharge or velocity) threshold: float - Percent of data that must be in a median to include the median in the fit algorithm + Percent of data that must be in a median to include the median + in the fit algorithm """ - + self.threshold = threshold self.process_profiles(transects=transects, data_type=data_type) self.q_sensitivity = ExtrapQSensitivity() self.q_sensitivity.populate_data(transects=transects, extrap_fits=self.sel_fit) - + def change_extents(self, transects, data_type, extents, use_q, sub_from_left): - """Function allows the data to be subsection by specifying the percent cumulative discharge - for the start and end points. Currently this function does not consider transect direction. + """Function allows the data to be subsection by specifying the + percent cumulative discharge for the start and end points. + Currently this function does not consider transect direction. Parameters ---------- @@ -280,20 +332,27 @@ class ComputeExtrap(object): data_type: str Specifies the data type (discharge or velocity) extents: list - List containing two values, the minimum and maximum discharge percentages to subsectioning + List containing two values, the minimum and maximum discharge + percentages to subsectioning + sub_from_left: bool + Specifies if when subsectioning the subsection should start from + left to right. + use_q: bool + Specifies to use the discharge rather than the xprod when + subsectioning sub_from_left: bool Specifies if when subsectioning the subsection should start from left to right. use_q: bool Specifies to use the discharge rather than the xprod when subsectioning """ - + self.subsection = extents self.use_q = use_q self.sub_from_left = sub_from_left - self.process_profiles(transects=transects, data_type=data_type ) + self.process_profiles(transects=transects, data_type=data_type) self.q_sensitivity = ExtrapQSensitivity() self.q_sensitivity.populate_data(transects=transects, extrap_fits=self.sel_fit) - + def change_data_type(self, transects, data_type): """Changes the data type to be processed in extrap. @@ -304,12 +363,14 @@ class ComputeExtrap(object): data_type: str Specifies the data type (discharge or velocity) """ - if data_type.lower() == 'q': + if data_type.lower() == "q": use_weighted = self.use_weighted else: use_weighted = False - self.process_profiles(transects=transects, data_type=data_type, use_weighted=use_weighted) + self.process_profiles( + transects=transects, data_type=data_type, use_weighted=use_weighted + ) self.q_sensitivity = ExtrapQSensitivity() self.q_sensitivity.populate_data(transects=transects, extrap_fits=self.sel_fit) @@ -323,8 +384,11 @@ class ComputeExtrap(object): """ self.threshold = 20 self.subsection = [0, 100] - self.process_profiles(transects=transects, data_type='q', use_weighted=self.use_weighted) + self.process_profiles( + transects=transects, data_type="q", use_weighted=self.use_weighted + ) - # Compute the sensitivity of the final discharge to changes in extrapolation methods + # Compute the sensitivity of the final discharge to changes in + # extrapolation methods self.q_sensitivity = ExtrapQSensitivity() self.q_sensitivity.populate_data(transects=transects, extrap_fits=self.sel_fit) diff --git a/Classes/CoordError.py b/Classes/CoordError.py index 1411a30..95800f9 100644 --- a/Classes/CoordError.py +++ b/Classes/CoordError.py @@ -1,4 +1,3 @@ class CoordError(Exception): - def __init__(self, text): - self.text = text \ No newline at end of file + self.text = text diff --git a/Classes/CrossSectionComp.py b/Classes/CrossSectionComp.py new file mode 100644 index 0000000..9cc94c4 --- /dev/null +++ b/Classes/CrossSectionComp.py @@ -0,0 +1,675 @@ +import math +import os + +import matplotlib.pyplot as plt +import numpy as np +import utm + +from Classes.BoatStructure import BoatStructure + +# ToDo: Add stats to show variability between transects ie Area and +# mean depth variance. + + +class CrossSectionComp(object): + """Creates average cross-section. + + Attributes + ---------- + cross_section: list + list of transect cross-sections stored as np.arrays. + checked_idx: np.array + Array of checked transect indices + gps: bool + True is GPS data available + rec_spacing: float + Recommended spacing for horizontal cross-section + unproj_xs: list of np.array + List of unprojected cross-sections + + """ + + def __init__(self, transects, path=None): + """Initiate attributes""" + + self.cross_section = [] + self.checked_idx = [] + self.gps = True + self.rec_spacing = 0 + self.unproj_xs = [] + self.zone_number = None + self.zone_letter = None + + self.create_cross_sections(transects) + + # Export CSV and PDF plots for testing + if path is not None: + self.export_csv(path) + self.export_plots(path) + + def create_cross_sections(self, transects): + """Create the axes and lines for the figure. + + Parameters + ---------- + transects: TransectData + Object of TransectData containing boat speeds + + """ + + # Initialize variables + self.cross_section = [] + self.checked_idx = [] + left_edge = [] + start_edge = [] + lon_list = [] + lat_list = [] + x_list = [] + y_list = [] + depth_list = [] + self.gps = True + station_list = [] + + # Process each transect + for n, transect in enumerate(transects): + + if transect.checked: + + # self.checked_idx = np.append(checked_idx, n) + self.checked_idx.append(n) + + # Compute boat track properties + boat_track = BoatStructure.compute_boat_track(transect) + + if np.logical_not(np.all(np.isnan(boat_track["track_x_m"]))): + + # get x/y boat track data + unit_x = boat_track["track_x_m"] + unit_y = boat_track["track_y_m"] + + # if the start bank is right then flip the x/y coords. + if transect.start_edge == "Right": + unit_x = (np.amax(unit_x) - unit_x - (0 - np.amin(unit_x))) * -1 + unit_y = (np.amax(unit_y) - unit_y - (0 - np.amin(unit_y))) * -1 + edge = [transect.start_edge, transect.edges.left.distance_m] + else: + edge = [transect.start_edge, transect.edges.left.distance_m] + + start_edge.append(edge) + x_list.append(unit_x) + y_list.append(unit_y) + left_edge.append(transect.edges.left.distance_m) + + # Get selected depth object + depth = getattr(transect.depths, transect.depths.selected) + depth_a = np.copy(depth.depth_processed_m) + depth_a[np.isnan(depth_a)] = 0 + depth_list.append(depth_a) + + # pull GPS coords if available. If not fill with NANs + if hasattr(transect.gps, "gga_lon_ens_deg"): + if transect.boat_vel.selected == "vtg_vel" or "gga_vel": + try: + lon = transect.gps.gga_lon_ens_deg + lat = transect.gps.gga_lat_ens_deg + + # replace nan values with 0 to avoid utm crash + lon_nan = np.argwhere(np.isnan(lon)) + lat_nan = np.argwhere(np.isnan(lat)) + + if len(lon_nan) > 0: + lon = np.nan_to_num(lon, nan=0) + + if len(lat_nan) > 0: + lat = np.nan_to_num(lat, nan=0) + + # convert lat/lon to UTM coords. + lat_lon = utm.from_latlon(lat, lon) + lat = lat_lon[0] + lon = lat_lon[1] + self.zone_number = lat_lon[2] + self.zone_letter = lat_lon[3] + + # replace 0 values with nan + if len(lat_nan) > 0: + for idx in lat_nan: + lat[idx] = np.nan + if len(lon_nan) > 0: + for idx in lon_nan: + lon[idx] = np.nan + + self.gps = True + lon_list.append(lon) + lat_list.append(lat) + + except (ValueError, TypeError): + lat, lon = self.create_empty_gps(unit_x) + self.gps = False + lon_list.append(lon) + lat_list.append(lat) + else: + lat, lon = self.create_empty_gps(unit_x) + self.gps = False + lon_list.append(lon) + lat_list.append(lat) + + else: + lat, lon = self.create_empty_gps(unit_x) + self.gps = False + lon_list.append(lon) + lat_list.append(lat) + + unprojected_xs = np.array([lon, lat, unit_x, unit_y, depth_a]).T + self.unproj_xs.append(unprojected_xs) + + if self.gps is True: + ( + lon_list, + lat_list, + gps_slope, + gps_intercept, + ) = self.create_projected_cross_section(lon_list, lat_list) + + x_list, y_list, xy_slope, xy_intercept = self.create_projected_cross_section( + x_list, y_list + ) + + x_list, y_list = self.adjust_xy_distances(x_list, y_list, xy_slope, start_edge) + + for xs in range(len(x_list)): + + station = np.sqrt(x_list[xs] ** 2 + y_list[xs] ** 2) + station_list.append(station) + + xs = np.array( + [ + lon_list[xs], + lat_list[xs], + x_list[xs], + y_list[xs], + station_list[xs], + depth_list[xs], + ] + ).T + + self.cross_section.append(xs) + + # set recommended spacing + self.compute_auto_spacing(station_list) + + # compute average cross section and append + avg_cs = self.average_cross_section(self.cross_section) + self.cross_section.append(avg_cs) + + @staticmethod + def create_empty_gps(unit_x): + + array_size = unit_x.shape + lon = np.empty(array_size) + lon[:] = np.nan + lat = np.empty(array_size) + lat[:] = np.nan + + return lat, lon + + @staticmethod + def create_projected_cross_section(x_list, y_list): + """Computes projected ship track to convert transects to. + + Parameters + ---------- + x_list: lst of np.arrays + arrays are either long or x data. + y_list: lst of np.arrays + arrays are either lat or y data. + + :return projected_x_list: lst of np.arrays + arrays are either long or x data. + projected_y_list: lst of np.arrays + arrays are either lat or y data. + + """ + + x_array = np.concatenate(x_list) + y_array = np.concatenate(y_list) + + # remove nan values to avoid convergence crash + idx = np.argwhere(np.isnan(x_array)) + x_array = np.delete(x_array, idx) + y_array = np.delete(y_array, idx) + + idx = np.argwhere(np.isnan(y_array)) + x_array = np.delete(x_array, idx) + y_array = np.delete(y_array, idx) + + # Find ranges and extremes + x_w = np.amin(x_array) + x_e = np.amax(x_array) + y_s = np.amin(y_array) + y_n = np.amax(y_array) + + x_rng = x_e - x_w + y_rng = y_n - y_s + + # use Least squares polynomial fit + if x_rng >= y_rng: + model = np.polyfit(x_array, y_array, 1) + # predict_function = np.poly1d(model) + + slope = model[0] + intercept = model[1] + + else: + model = np.polyfit(y_array, x_array, 1) + # predict_function = np.poly1d(model) + + # slope and intercept in terms of x + slope = 1 / model[0] + intercept = -model[1] / model[0] + + # map the ensembles to the mean cross-section line + projected_x_list = [] + projected_y_list = [] + for transect in range(len(x_list)): + + projected_x = ( + x_list[transect] - (slope * intercept) + (slope * y_list[transect]) + ) / (slope**2 + 1) + + projected_y = ( + intercept + (slope * x_list[transect]) + (slope**2 * y_list[transect]) + ) / (slope**2 + 1) + + projected_x_list.append(projected_x) + projected_y_list.append(projected_y) + + return projected_x_list, projected_y_list, slope, intercept + + @staticmethod + def adjust_xy_distances(x_list, y_list, slope, start_edge): + """Adjusts projected x/y based on start edge distance. + + Parameters + ---------- + x_list: lst of np.arrays + arrays are either long or x data. + y_list: lst of np.arrays + arrays are either lat or y data. + slope: float + slope of projection + start_edge: list + list of start edges and distances + + :return projected_x_list: lst of np.arrays + arrays are either long or x data. + projected_y_list: lst of np.arrays + arrays are either lat or y data. + """ + + theta = math.atan(slope) + + projected_x_list = [] + projected_y_list = [] + for transect in range(len(x_list)): + + # adjust the x/y lists using the left edge distance + if start_edge[transect][0] == "Left": + dist_x = start_edge[transect][1] * (math.cos(theta)) + dist_y = start_edge[transect][1] * (math.sin(theta)) + projected_x = x_list[transect] - dist_x + projected_y = y_list[transect] - dist_y + else: + dist_x = start_edge[transect][1] * (math.cos(theta)) + dist_y = start_edge[transect][1] * (math.sin(theta)) + + projected_x = x_list[transect] - dist_x + projected_y = y_list[transect] - dist_y + + projected_x_list.append(projected_x) + projected_y_list.append(projected_y) + + return projected_x_list, projected_y_list + + @staticmethod + def create_gps_xs(stations, x_start, y_start, slope): + """Adjusts projected x/y based on start edge distance. + + Parameters + ---------- + stations: np.arrays + mean cross-section. + x_start: float + starting point of transect on x axis + y_start: float + starting point of transect on y axis + slope: float + slope of projection + start: float + list of start edges and distances + + :return x_array: np.arrays + mean cross-section x values + y_array: np.arrays + mean cross-section y values + """ + + # compute the geographic angle of the mean cross-section + theta = math.atan(slope) + + # create x and y arrays + x_array = (stations * (math.cos(theta))) + x_start + y_array = (stations * (math.sin(theta))) + y_start + + return x_array, y_array + + def average_cross_section(self, cross_section, hor_spacing="Auto"): + """Compute average cross-section. + + Parameters + ---------- + cross_section: list of np.arrays + list of cross-section + hor_spacing: 'Auto' or float + spacing for stationing. Defaults to 'Auto' + + :return average_cs + + """ + + x_list = [] + y_list = [] + depth_list = [] + length_list = [] + start_list = [] + end_list = [] + lon_end_lst = [] + lat_end_lst = [] + lon_start_lst = [] + lat_start_lst = [] + + # find max and min for the all transects to create mean xs + for transect in cross_section: + trans_end = np.max(np.array(transect[:, 4], dtype="f"), axis=0) + trans_end_idx = np.argmax(np.array(transect[:, 4], dtype="f"), axis=0) + + trans_start = np.min(np.array(transect[:, 4], dtype="f"), axis=0) + trans_start_idx = np.argmin(np.array(transect[:, 4], dtype="f"), axis=0) + + lat_start_lst.append(transect[:, 1][trans_start_idx]) + lon_start_lst.append(transect[:, 0][trans_start_idx]) + lat_end_lst.append(transect[:, 1][trans_end_idx]) + lon_end_lst.append(transect[:, 0][trans_end_idx]) + + rows = transect.shape[0] + + start_list.append(trans_start) + end_list.append(trans_end) + length_list.append(rows) + + max_trans_start = np.min(start_list) + idx = np.argmin(start_list) + lat_start = lat_start_lst[idx] + lon_start = lon_start_lst[idx] + min_trans_end = np.max(end_list) + idx = np.argmax(start_list) + lat_end = lat_end_lst[idx] + lon_end = lon_end_lst[idx] + + # set horizontal spacing. Default to 0.1m for xs widths > 10m. + if hor_spacing == "Auto": + spacing = self.rec_spacing + else: + spacing = hor_spacing + + # create mean cross-section stationing line + station_line = np.arange(max_trans_start, min_trans_end, spacing) + + # create nan array + num_pnts = station_line.shape[0] + nan_array = np.empty(num_pnts) + nan_array[:] = np.nan + + # create GPS array for mcs + if not self.gps: + lon_array = nan_array.copy() + lat_array = nan_array.copy() + else: + lon_array = np.linspace(lon_start, lon_end, num_pnts) + lat_array = np.linspace(lat_start, lat_end, num_pnts) + + for transect in cross_section: + + # sort and separate individual arrays and set d-type. + sort_array = transect[transect[:, 4].argsort()].copy() + + station_array = np.array(sort_array[:, 4], dtype="f") + depth_array = np.array(sort_array[:, 5], dtype="f") + x_array = np.array(sort_array[:, 2], dtype="f") + y_array = np.array(sort_array[:, 3], dtype="f") + + # get index for stations outside current transect + station_max = station_array.max() + station_min = station_array.min() + greater = np.argwhere(station_line >= station_max) + less = np.argwhere(station_line <= station_min) + + # Create stationing for transect + one_m_station = np.interp(station_line, station_array, station_array) + + # create array of intervals and interpolate x/y and depth + new_x = np.interp(one_m_station, station_array, x_array) + new_y = np.interp(one_m_station, station_array, y_array) + new_depth = np.interp(one_m_station, station_array, depth_array) + + # fill extents of transect with NANs as to avoid repeating values + if len(greater) > 0: + for idx in greater: + new_depth[idx] = np.nan + new_x[idx] = np.nan + new_y[idx] = np.nan + if len(less) > 0: + for idx in less: + new_depth[idx] = np.nan + new_x[idx] = np.nan + new_y[idx] = np.nan + + x_list.append(new_x) + y_list.append(new_y) + depth_list.append(new_depth) + + # create new arrays from list of arrays for each element + x_array = np.array(x_list, dtype="f") + y_array = np.array(y_list, dtype="f") + depth_array = np.array(depth_list, dtype="f") + + # create mean array of each element + depth_avg = np.nanmean(depth_array, axis=0) + x_avg = np.nanmean(x_array, axis=0) + y_avg = np.nanmean(y_array, axis=0) + + # create new array + average_cs = np.array( + [lon_array, lat_array, x_avg, y_avg, station_line, depth_avg] + ).T + + return average_cs + + def compute_auto_spacing(self, station_list): + """Compute recomended horizontal spacing. + + Parameters + ---------- + station_list: list of np.arrays + list of stations + + """ + + stations = np.concatenate(station_list) + + # remove nan values to avoid convergence crash + stations = stations[~np.isnan(stations)] + + station_dif = np.diff(stations) + diff_med = np.median(station_dif) + diff_std = np.std(station_dif) + + self.rec_spacing = diff_med + diff_std + + def export_csv(self, file_name): + """Exports CSV file for each checked transect. + + Parameters + ---------- + file_name: str + path to save files + """ + # Todo add comment lines at the top of the file for metadata. + for n in range(len(self.cross_section)): + + if n == (len(self.cross_section) - 1): + f_name = "cross_section_mean" + else: + f_name = "transect_" + str(self.checked_idx[n]) + + path = file_name[:-8] + f_name + "_QRev" + ".csv" + path = os.path.join(os.getcwd(), path) + + np.savetxt(path, self.cross_section[n], delimiter=",", fmt="%s") + + def export_plots(self, file_name): + """Exports PDF file of cross-section plots. This method is used for + troubleshooting. + + Parameters + ---------- + file_name: str + path to save files + """ + + mean_survey = self.cross_section[-1].T + + try: + # XY plot + fig = plt.figure() + ax_1 = fig.add_subplot(1, 1, 1) + + for index, xs in enumerate(self.cross_section[:-1]): + survey = xs.T + + ax_1.plot(survey[2], survey[3], ".", label=index) + + ax_1.plot( + mean_survey[2], mean_survey[3], ".", label="Projected Cross-Section" + ) + + # set axis labels and legend + ax_1.set_xlabel("X") + ax_1.set_ylabel("Y") + ax_1.legend() + + # save plot to PDF + path = file_name[:-8] + "plots" + "_QRev" + ".pdf" + path = os.path.join(os.getcwd(), path) + fig.savefig(path) + + except BaseException: + pass + + try: + # mean depth plot + fig_2 = plt.figure() + ax_2 = fig_2.add_subplot(1, 1, 1) + + for index, xs in enumerate(self.cross_section[:-1]): + survey = xs.T + + ax_2.plot(survey[4], survey[5], "-", label=index) + + ax_2.plot( + mean_survey[4], mean_survey[5], "-", label="Average Cross-Section" + ) + + ax_2.set_xlabel("Station") + ax_2.set_ylabel("Depth") + ax_2.invert_yaxis() + ax_2.legend() + + path_2 = file_name[:-8] + "plots_2" + "_QRev" + ".pdf" + path_2 = os.path.join(os.getcwd(), path_2) + fig_2.savefig(path_2) + + except BaseException: + pass + + try: + # mean GPS plot + fig_3 = plt.figure() + ax_3 = fig_3.add_subplot(1, 1, 1) + + for index, xs in enumerate(self.cross_section[:-1]): + survey = xs.T + + ax_3.plot(survey[0], survey[1], ".", label=index) + + ax_3.plot( + mean_survey[0], mean_survey[1], ".", label="Projected Cross-Section" + ) + + ax_3.set_xlabel("Long UTM") + ax_3.set_ylabel("Lat UTM") + ax_3.legend() + + path_3 = file_name[:-8] + "plots_3" + "_QRev" + ".pdf" + path_3 = os.path.join(os.getcwd(), path_3) + fig_3.savefig(path_3) + + except BaseException: + pass + + try: + # unprojected XY with mean xs + fig_4 = plt.figure() + ax_4 = fig_4.add_subplot(1, 1, 1) + for index, xs in enumerate(self.unproj_xs): + survey = xs.T + + ax_4.plot(survey[2], survey[3], "-", label=index) + + ax_4.plot( + mean_survey[2], mean_survey[3], "-", label="Projected Cross-Section" + ) + + ax_4.set_xlabel("X") + ax_4.set_ylabel("Y") + ax_4.legend() + + path_4 = file_name[:-8] + "plots_4" + "_QRev" + ".pdf" + path_4 = os.path.join(os.getcwd(), path_4) + fig_4.savefig(path_4) + + except BaseException: + pass + + try: + # unprojected GPS with mean xs + fig_5 = plt.figure() + ax_5 = fig_5.add_subplot(1, 1, 1) + for index, xs in enumerate(self.unproj_xs): + survey = xs.T + + ax_5.plot(survey[0], survey[1], "-", label=index) + + ax_5.plot( + mean_survey[0], mean_survey[1], "-", label="Projected Cross-Section" + ) + + ax_5.set_xlabel("Long UTM") + ax_5.set_ylabel("Lat UTM") + ax_5.legend() + + path_5 = file_name[:-8] + "plots_5" + "_QRev" + ".pdf" + path_5 = os.path.join(os.getcwd(), path_5) + fig_5.savefig(path_5) + + except BaseException: + pass diff --git a/Classes/DateTime.py b/Classes/DateTime.py index 9f7e3d3..6d811f2 100644 --- a/Classes/DateTime.py +++ b/Classes/DateTime.py @@ -1,5 +1,6 @@ import numpy as np + class DateTime(object): """This stores the date and time data in Python compatible format. @@ -8,24 +9,26 @@ class DateTime(object): date: str Measurement date as mm/dd/yyyy start_serial_time: float - Python serial time for start of transect (seconds since 1/1/1970), timestamp + Python serial time for start of transect (seconds since 1/1/1970), + timestamp end_serial_time: float - Python serial time for end of transect (seconds since 1/1/1970), timestamp + Python serial time for end of transect (seconds since 1/1/1970), + timestamp transect_duration_sec: float Duration of transect, in seconds. ens_duration_sec: np.array(float) Duration of each ensemble, in seconds. """ - + def __init__(self): """Initialize class and instance variables.""" - self.date = None # Measurement date mm/dd/yyyy - self.start_serial_time = None # Python serial time for start of transect, timestamp - self.end_serial_time = None # Python serial time for end of transect, timestamp - self.transect_duration_sec = None # Duration of transect in seconds - self.ens_duration_sec = None # Duration of each ensemble in seconds - + self.date = None + self.start_serial_time = None + self.end_serial_time = None + self.transect_duration_sec = None + self.ens_duration_sec = None + def populate_data(self, date_in, start_in, end_in, ens_dur_in): """Populate data in object. @@ -40,7 +43,7 @@ class DateTime(object): ens_dur_in: np.array(float) Duration of each ensemble, in seconds. """ - + self.date = date_in self.start_serial_time = start_in self.end_serial_time = end_in @@ -48,7 +51,8 @@ class DateTime(object): self.ens_duration_sec = ens_dur_in.astype(float) def populate_from_qrev_mat(self, transect): - """Populates the object using data from previously saved QRev Matlab file. + """Populates the object using data from previously saved QRev Matlab + file. Parameters ---------- @@ -56,22 +60,19 @@ class DateTime(object): Matlab data structure obtained from sio.loadmat """ - if hasattr(transect, 'dateTime'): + if hasattr(transect, "dateTime"): seconds_day = 86400 time_correction = 719529.0000000003 self.date = transect.dateTime.date - self.start_serial_time = (transect.dateTime.startSerialTime - time_correction) * seconds_day - self.end_serial_time = (transect.dateTime.endSerialTime - time_correction) * seconds_day + self.start_serial_time = ( + transect.dateTime.startSerialTime - time_correction + ) * seconds_day + self.end_serial_time = ( + transect.dateTime.endSerialTime - time_correction + ) * seconds_day self.transect_duration_sec = float(transect.dateTime.transectDuration_sec) try: self.ens_duration_sec = transect.dateTime.ensDuration_sec.astype(float) except AttributeError: self.ens_duration_sec = np.array([np.nan]) - - # - # self.date = transect.dateTime.date - # self.start_serial_time = transect.dateTime.startSerialTime - # self.end_serial_time = transect.dateTime.endSerialTime - # self.transect_duration_sec = float(transect.dateTime.transectDuration_sec) - # self.ens_duration_sec = transect.dateTime.ensDuration_sec.astype(float) diff --git a/Classes/DepthData.py b/Classes/DepthData.py index 73b5e87..a891f7a 100644 --- a/Classes/DepthData.py +++ b/Classes/DepthData.py @@ -1,7 +1,5 @@ import copy -import concurrent.futures import numpy as np -import itertools as it from numpy.matlib import repmat from MiscLibs.common_functions import iqr, nan_less, nan_greater from MiscLibs.robust_loess_compiled import rloess @@ -10,16 +8,17 @@ from MiscLibs.run_iqr import run_iqr, compute_quantile class DepthData(object): - """Process and store depth data. - Supported sources include bottom track + """Process and store depth data. Supported sources include bottom track vertical beam, and external depth sounder. Attributes ---------- depth_orig_m: np.array - Original multi-beam depth data from transect file (includes draft_orig) in meters. + Original multi-beam depth data from transect file (includes + draft_orig) in meters. depth_beams_m: np.array - Depth data from transect file adjusted for any draft changes, in meters. + Depth data from transect file adjusted for any draft changes, + in meters. depth_processed_m: np.array Depth data filtered and interpolated. depth_freq_kHz: float @@ -37,11 +36,13 @@ class DepthData(object): depth_cell_depth_orig_m: np.array Depth to centerline of depth cells in raw data, in meters. depth_cell_depth_m: np.array - Depth to centerline of depth cells adjusted for draft or speed of sound changes, in meters. + Depth to centerline of depth cells adjusted for draft or speed + of sound changes, in meters. depth_cell_size_orig_m: np.array Size of depth cells in meters from raw data, in meters. depth_cell_size_m: - Size of depth cells adjusted for draft or speed of sound changes, in meters. + Size of depth cells adjusted for draft or speed of sound + changes, in meters. smooth_depth: np.array Smoothed beam depth, in meters. smooth_upper_limit: np.array @@ -49,7 +50,8 @@ class DepthData(object): smooth_lower_limit: np.array Smooth function lower limit or window, in meters. avg_method:str - Defines averaging method: "Simple", "IDW", only applicable to bottom track. + Defines averaging method: "Simple", "IDW", only applicable to + bottom track. filter_type: str Type of filter: "None", "TRDI", "Smooth". interp_type: str @@ -61,35 +63,36 @@ class DepthData(object): valid_beams: np.array Logical array, 1 row for each beam identifying valid data. """ - - def __init__(self): - """Initialize attributes. - """ - self.depth_orig_m = None # Original multi-beam depth data from transect file (includes draft_orig) in meters - self.depth_beams_m = None # Depth data from transect file adjusted for any draft changes, in meters - self.depth_processed_m = None # Depth data filtered and interpolated - self.depth_freq_kHz = None # Defines ADCP frequency used of each raw data point - self.depth_invalid_index = None # Index of depths marked invalid - self.depth_source = None # Source of depth data ("BT", "VB", "DS") - self.depth_source_ens = None # Source of each depth value ("BT", "VB", "DS", "IN") - self.draft_orig_m = None # Original draft from data files, in meters - self.draft_use_m = None # Draft used in computation of depth_beams_m and depth_cell_depths_m - self.depth_cell_depth_orig_m = None # Depth cell range from the transducer, in meters - self.depth_cell_depth_m = None # Depth to centerline of depth cells, in meters - self.depth_cell_size_orig_m = None # Size of depth cells in meters from raw data - self.depth_cell_size_m = None # Size of depth cells in meters - self.smooth_depth = None # Smoothed beam depth - self.smooth_upper_limit = None # Smooth function upper limit of window - self.smooth_lower_limit = None # Smooth function lower limit or window - self.avg_method = None # Defines averaging method: "Simple", "IDW" - self.filter_type = None # Type of filter: "None", "TRDI", "Smooth" - self.interp_type = None # Type of interpolation: "None", "Linear", "Smooth" - self.valid_data_method = None # QRev or TRDI - self.valid_data = None # Logical array of valid mean depth for each ensemble - self.valid_beams = None # Logical array, 1 row for each beam identifying valid data - - def populate_data(self, depth_in, source_in, freq_in, draft_in, cell_depth_in, cell_size_in): + def __init__(self): + """Initialize attributes.""" + + self.depth_orig_m = None + self.depth_beams_m = None + self.depth_processed_m = None + self.depth_freq_kHz = None + self.depth_invalid_index = None + self.depth_source = None + self.depth_source_ens = None + self.draft_orig_m = None + self.draft_use_m = None + self.depth_cell_depth_orig_m = None + self.depth_cell_depth_m = None + self.depth_cell_size_orig_m = None + self.depth_cell_size_m = None + self.smooth_depth = None + self.smooth_upper_limit = None + self.smooth_lower_limit = None + self.avg_method = None + self.filter_type = None + self.interp_type = None + self.valid_data_method = None + self.valid_data = None + self.valid_beams = None + + def populate_data( + self, depth_in, source_in, freq_in, draft_in, cell_depth_in, cell_size_in + ): """Stores data in DepthData. Parameters @@ -103,11 +106,13 @@ class DepthData(object): draft_in: float Draft of transducer used to measure depths, in meters. cell_depth_in: np.array - Depth to centerline of each depth cell, in meters. If source does not have depth cells the depth cell depth + Depth to centerline of each depth cell, in meters. If source + does not have depth cells the depth cell depth from bottom track should be used. cell_size_in: np.array - Size of each depth cell, in meters. If source does not have depth cells the depth cell size - from bottom track should be used. + Size of each depth cell, in meters. If source does not have + depth cells the depth cell size from bottom track should be used. + """ self.depth_orig_m = depth_in @@ -117,15 +122,15 @@ class DepthData(object): self.depth_freq_kHz = freq_in self.draft_orig_m = draft_in self.draft_use_m = draft_in - self.filter_type = 'None' - self.interp_type = 'None' - self.valid_data_method = 'QRev' - + self.filter_type = "None" + self.interp_type = "None" + self.valid_data_method = "QRev" + # For BT data set method to average multiple beam depths - if source_in == 'BT': - self.avg_method = 'IDW' + if source_in == "BT": + self.avg_method = "IDW" else: - self.avg_method = 'None' + self.avg_method = "None" # Store cell data self.depth_cell_depth_orig_m = cell_depth_in @@ -134,10 +139,11 @@ class DepthData(object): self.depth_cell_depth_m = cell_depth_in # Remove all filters to initialize data - self.apply_filter('dummy', filter_type='Off') + self.apply_filter("dummy", filter_type="Off") def populate_from_qrev_mat(self, mat_data): - """Populates the object using data from previously saved QRev Matlab file. + """Populates the object using data from previously saved QRev Matlab + file. Parameters ---------- @@ -167,7 +173,7 @@ class DepthData(object): self.depth_cell_size_m = mat_data.depthCellSize_m # Configure arrays properly for VB and DS - if mat_data.depthSource == 'BT': + if mat_data.depthSource == "BT": self.depth_beams_m = mat_data.depthBeams_m self.depth_orig_m = mat_data.depthOrig_m self.smooth_depth = mat_data.smoothDepth @@ -197,44 +203,69 @@ class DepthData(object): self.valid_beams = self.valid_beams.astype(bool) - # Handle data with one ensemble and multiple cells or one cell and multiple ensembles + # Handle data with one ensemble and multiple cells or one cell and + # multiple ensembles if len(self.depth_beams_m.shape) == 1: # One ensemble multiple cells - self.depth_beams_m = self.depth_beams_m.reshape(self.depth_beams_m.shape[0], 1) - self.depth_cell_depth_m = self.depth_cell_depth_m.reshape(self.depth_cell_depth_m.shape[0], 1) + self.depth_beams_m = self.depth_beams_m.reshape( + self.depth_beams_m.shape[0], 1 + ) + self.depth_cell_depth_m = self.depth_cell_depth_m.reshape( + self.depth_cell_depth_m.shape[0], 1 + ) self.depth_cell_depth_orig_m = self.depth_cell_depth_orig_m.reshape( - self.depth_cell_depth_orig_m.shape[0], 1) - self.depth_cell_size_m = self.depth_cell_size_m.reshape(self.depth_cell_size_m.shape[0], 1) - self.depth_cell_size_orig_m = self.depth_cell_size_orig_m.reshape(self.depth_cell_size_orig_m.shape[0], 1) + self.depth_cell_depth_orig_m.shape[0], 1 + ) + self.depth_cell_size_m = self.depth_cell_size_m.reshape( + self.depth_cell_size_m.shape[0], 1 + ) + self.depth_cell_size_orig_m = self.depth_cell_size_orig_m.reshape( + self.depth_cell_size_orig_m.shape[0], 1 + ) self.depth_orig_m = self.depth_orig_m.reshape(self.depth_orig_m.shape[0], 1) self.depth_processed_m = np.array([self.depth_processed_m]) self.smooth_depth = self.smooth_depth.reshape(self.smooth_depth.shape[0], 1) - self.smooth_lower_limit = self.smooth_lower_limit.reshape(self.smooth_lower_limit.shape[0], 1) - self.smooth_upper_limit = self.smooth_upper_limit.reshape(self.smooth_upper_limit.shape[0], 1) + self.smooth_lower_limit = self.smooth_lower_limit.reshape( + self.smooth_lower_limit.shape[0], 1 + ) + self.smooth_upper_limit = self.smooth_upper_limit.reshape( + self.smooth_upper_limit.shape[0], 1 + ) self.valid_data = np.array([self.valid_data]) self.depth_source_ens = np.array([mat_data.depthSourceEns]) elif len(self.depth_cell_depth_m.shape) == 1: # One cell, multiple ensembles - self.depth_cell_depth_m = self.depth_cell_depth_m.reshape(1, self.depth_cell_depth_m.shape[0]) - self.depth_cell_depth_orig_m = self.depth_cell_depth_orig_m.reshape(1, - self.depth_cell_depth_orig_m.shape[0]) - self.depth_cell_size_m = self.depth_cell_size_m.reshape(1, self.depth_cell_size_m.shape[0]) - self.depth_cell_size_orig_m = self.depth_cell_size_orig_m.reshape(1, self.depth_cell_size_orig_m.shape[0]) + self.depth_cell_depth_m = self.depth_cell_depth_m.reshape( + 1, self.depth_cell_depth_m.shape[0] + ) + self.depth_cell_depth_orig_m = self.depth_cell_depth_orig_m.reshape( + 1, self.depth_cell_depth_orig_m.shape[0] + ) + self.depth_cell_size_m = self.depth_cell_size_m.reshape( + 1, self.depth_cell_size_m.shape[0] + ) + self.depth_cell_size_orig_m = self.depth_cell_size_orig_m.reshape( + 1, self.depth_cell_size_orig_m.shape[0] + ) def change_draft(self, draft): """Changes the draft for object - - draft: new draft for object + + Parameters + ---------- + draft: float + New draft for object """ + # Compute draft change draft_change = draft - self.draft_use_m self.draft_use_m = draft - + # Apply draft to ensemble depths if BT or VB - if self.depth_source != 'DS': + if self.depth_source != "DS": self.depth_beams_m = self.depth_beams_m + draft_change - self.depth_processed_m = self.depth_processed_m + draft_change - + self.depth_processed_m = self.depth_processed_m + draft_change + # Apply draft to depth cell locations if len(self.depth_cell_depth_m) > 0: self.depth_cell_depth_m = self.depth_cell_depth_m + draft_change @@ -249,7 +280,7 @@ class DepthData(object): bt_depths: DepthData Object of DepthData with bottom track depths """ - + self.depth_cell_depth_orig_m = bt_depths.depth_cell_depth_orig_m self.depth_cell_size_m = bt_depths.depth_cell_size_m self.depth_cell_depth_m = bt_depths.depth_cell_depth_m @@ -271,7 +302,9 @@ class DepthData(object): depth[np.logical_not(self.valid_beams)] = np.nan # Compute average depths - self.depth_processed_m = DepthData.average_depth(depth, self.draft_use_m, self.avg_method) + self.depth_processed_m = DepthData.average_depth( + depth, self.draft_use_m, self.avg_method + ) # Set depths to nan if depth are not valid beam depths self.depth_processed_m[np.equal(self.valid_data, False)] = np.nan @@ -288,31 +321,31 @@ class DepthData(object): """ # Compute selected filter - if filter_type == 'Off' or filter_type is None: + if filter_type == "Off" or filter_type is None: # No filter self.filter_none() # Savitzky-Golay - elif filter_type == 'SavGol': + elif filter_type == "SavGol": self.filter_savgol(transect) - elif filter_type == 'Smooth': + elif filter_type == "Smooth": # Smooth filter self.filter_smooth(transect) - elif filter_type == 'TRDI' and self.depth_source == 'BT': + elif filter_type == "TRDI" and self.depth_source == "BT": # TRDI filter for multiple returns self.filter_trdi() - self.filter_type = 'TRDI' - + self.filter_type = "TRDI" + self.valid_mean_data() # Update processed depth with filtered results - if self.depth_source == 'BT': + if self.depth_source == "BT": # Multiple beams require averaging to obtain 1-D array self.compute_avg_bt_depth() else: # Single beam (VB or DS) save to 1-D array self.depth_processed_m = np.array(self.depth_beams_m[0, :]) self.depth_processed_m[np.squeeze(np.equal(self.valid_data, 0))] = np.nan - + def apply_interpolation(self, transect, method=None): """Coordinates application of interpolations @@ -323,29 +356,29 @@ class DepthData(object): method: str Type of interpolation to apply (None, HoldLast, Smooth, Linear) """ - + # Determine interpolation to apply if method is None: method = self.interp_type - + # Apply selected interpolation self.interp_type = method # No filtering - if method == 'None': + if method == "None": self.interpolate_none() # Hold last valid depth indefinitely - elif method == 'HoldLast': + elif method == "HoldLast": self.interpolate_hold_last() # Use values form a Loess smooth - elif method == 'Smooth': + elif method == "Smooth": self.interpolate_smooth() # Linear interpolation else: self.interpolate_linear(transect=transect) - + # Identify ensembles with interpolated depths idx = np.where(np.logical_not(self.valid_data[:])) if len(idx[0]) > 0: @@ -353,8 +386,8 @@ class DepthData(object): idx2 = np.where(np.logical_not(np.isnan(self.depth_processed_m[idx]))) if len(idx2) > 0: idx2 = idx2[0] - self.depth_source_ens[idx[idx2]] = 'IN' - + self.depth_source_ens[idx[idx2]] = "IN" + def apply_composite(self, comp_depth, comp_source): """Applies the data from CompDepth computed in DepthStructure to DepthData object @@ -366,17 +399,17 @@ class DepthData(object): comp_source: str Source of composite depth (BT, VB, DS) """ - + # Assign composite depth to property self.depth_processed_m = comp_depth - + # Assign appropriate composite source for each ensemble - self.depth_source_ens[comp_source == 1] = 'BT' - self.depth_source_ens[comp_source == 2] = 'VB' - self.depth_source_ens[comp_source == 3] = 'DS' - self.depth_source_ens[comp_source == 4] = 'IN' - self.depth_source_ens[comp_source == 0] = 'NA' - + self.depth_source_ens[comp_source == 1] = "BT" + self.depth_source_ens[comp_source == 2] = "VB" + self.depth_source_ens[comp_source == 3] = "DS" + self.depth_source_ens[comp_source == 4] = "IN" + self.depth_source_ens[comp_source == 0] = "NA" + def sos_correction(self, ratio): """Correct depth for new speed of sound setting @@ -385,53 +418,60 @@ class DepthData(object): ratio: float Ratio of new to old speed of sound value """ - + # Correct unprocessed depths - self.depth_beams_m = self.draft_use_m+np.multiply(self.depth_beams_m-self.draft_use_m, ratio) - + self.depth_beams_m = self.draft_use_m + np.multiply( + self.depth_beams_m - self.draft_use_m, ratio + ) + # Correct processed depths - self.depth_processed_m = self.draft_use_m+np.multiply(self.depth_processed_m-self.draft_use_m, ratio) - + self.depth_processed_m = self.draft_use_m + np.multiply( + self.depth_processed_m - self.draft_use_m, ratio + ) + # Correct cell size and location self.depth_cell_size_m = np.multiply(self.depth_cell_size_m, ratio) - self.depth_cell_depth_m = self.draft_use_m + np.multiply(self.depth_cell_depth_m - self.draft_use_m, ratio) - + self.depth_cell_depth_m = self.draft_use_m + np.multiply( + self.depth_cell_depth_m - self.draft_use_m, ratio + ) + def valid_mean_data(self): - """Determines if raw data are sufficient to compute a valid depth without interpolation. + """Determines if raw data are sufficient to compute a valid depth + without interpolation. """ - - if self.depth_source == 'BT': + + if self.depth_source == "BT": self.valid_data = np.tile(True, self.valid_beams.shape[1]) nvalid = np.sum(self.valid_beams, axis=0) - - if self.valid_data_method == 'TRDI': + + if self.valid_data_method == "TRDI": self.valid_data[nvalid < 3] = False else: self.valid_data[nvalid < 2] = False else: self.valid_data = self.valid_beams[0, :] - + def filter_none(self): - """Applies no filter to depth data. Removes filter if one was applied. - """ - + """Applies no filter to depth data. Removes filter if one was applied.""" + # Set all ensembles to have valid data if len(self.depth_beams_m.shape) > 1: self.valid_beams = np.tile(True, self.depth_beams_m.shape) else: self.valid_beams = np.tile(True, (1, self.depth_beams_m.shape[0])) - + # Set ensembles with no depth data to invalid self.valid_beams[self.depth_beams_m == 0] = False self.valid_beams[np.isnan(self.depth_beams_m)] = False - - self.filter_type = 'None' - + + self.filter_type = "None" + def filter_smooth(self, transect): - """This filter uses a moving InterQuartile Range filter on residuals from a - robust Loess smooth of the depths in each beam to identify unnatural spikes in the depth - measurements from each beam. Each beam is filtered independently. The filter - criteria are set to be the maximum of the IQR filter, 5% of the measured depth, or 0.1 meter + """This filter uses a moving InterQuartile Range filter on residuals + from a robust Loess smooth of the depths in each beam + to identify unnatural spikes in the depth measurements from each beam. + Each beam is filtered independently. The filter criteria are set + to be the maximum of the IQR filter, 5% of the measured depth, or 0.1 meter Parameters ---------- @@ -440,25 +480,26 @@ class DepthData(object): Notes ----- - half_width - number of points to each side of target point used in computing IQR. - This is the raw number of points actual points used may be less if some are bad. + half_width - number of points to each side of target point used in + computing IQR. This is the raw number of points actual points used may be less + if some are bad. - multiplier - number multiplied times the IQR to determine the filter criteria - + multiplier - number multiplied times the IQR to determine the filter + criteria """ # If the smoothed depth has not been computed if self.smooth_depth is None or len(self.smooth_depth) == 0: - + # Set filter characteristics - self.filter_type = 'Smooth' - # cycles = 3 - # half_width = 10 - # multiplier = 15 - + self.filter_type = "Smooth" + # Determine number of beams if len(self.depth_orig_m.shape) > 1: - n_beams, n_ensembles = self.depth_orig_m.shape[0], self.depth_orig_m.shape[1] + n_beams, n_ensembles = ( + self.depth_orig_m.shape[0], + self.depth_orig_m.shape[1], + ) depth_raw = np.copy(self.depth_orig_m) else: n_beams = 1 @@ -470,7 +511,6 @@ class DepthData(object): # Arrays initialized depth_smooth = repmat([np.nan], n_beams, n_ensembles) - # depth_res = repmat([np.nan], n_beams, n_ensembles) upper_limit = repmat([np.nan], n_beams, n_ensembles) lower_limit = repmat([np.nan], n_beams, n_ensembles) depth_filtered = depth @@ -479,34 +519,29 @@ class DepthData(object): # Create position array boat_vel_selected = getattr(transect.boat_vel, transect.boat_vel.selected) if boat_vel_selected is not None: - track_x = boat_vel_selected.u_processed_mps * transect.date_time.ens_duration_sec - track_y = boat_vel_selected.v_processed_mps * transect.date_time.ens_duration_sec + track_x = ( + boat_vel_selected.u_processed_mps + * transect.date_time.ens_duration_sec + ) + track_y = ( + boat_vel_selected.v_processed_mps + * transect.date_time.ens_duration_sec + ) else: track_x = np.nan track_y = np.nan idx = np.where(np.isnan(track_x)) if len(idx[0]) < 2: - x = np.nancumsum(np.sqrt(track_x**2+track_y**2)) + x = np.nancumsum(np.sqrt(track_x**2 + track_y**2)) else: x = np.nancumsum(transect.date_time.ens_duration_sec) - multi_processing = False - # start = time.perf_counter() - if multi_processing: - with concurrent.futures.ProcessPoolExecutor() as executor: - results = executor.map(self.compute_smooth, depth, depth_filtered, it.repeat(x)) - - for j, result in enumerate(results): - depth_smooth[j] = result[0] - upper_limit[j] = result[1] - lower_limit[j] = result[2] - else: - # Loop for each beam, smooth is applied to each beam - for j in range(n_beams): - depth_smooth[j], upper_limit[j], lower_limit[j] = self.compute_smooth(depth[j], - depth_filtered[j], - x) + # Loop for each beam, smooth is applied to each beam + for j in range(n_beams): + depth_smooth[j], upper_limit[j], lower_limit[j] = self.compute_smooth( + depth[j], depth_filtered[j], x + ) # Save smooth results to avoid recomputing them if needed later self.smooth_depth = depth_smooth @@ -515,13 +550,16 @@ class DepthData(object): # Reset valid data self.filter_none() - + # Set filter type - self.filter_type = 'Smooth' - + self.filter_type = "Smooth" + # Determine number of beams if len(self.depth_orig_m.shape) > 1: - n_beams, n_ensembles = self.depth_orig_m.shape[0], self.depth_orig_m.shape[1] + n_beams, n_ensembles = ( + self.depth_orig_m.shape[0], + self.depth_orig_m.shape[1], + ) depth_raw = np.copy(self.depth_orig_m) else: n_beams = 1 @@ -533,13 +571,16 @@ class DepthData(object): # Set bad depths to nan depth = repmat(np.nan, depth_raw.shape[0], depth_raw.shape[1]) depth[nan_greater(depth_raw, 0)] = depth_raw[nan_greater(depth_raw, 0)] - + # Apply filter for j in range(n_beams): if np.nansum(self.smooth_upper_limit[j, :]) > 0: bad_idx = np.where( - np.logical_or(nan_greater(depth[j], self.smooth_upper_limit[j]), - nan_less(depth[j], self.smooth_lower_limit[j])))[0] + np.logical_or( + nan_greater(depth[j], self.smooth_upper_limit[j]), + nan_less(depth[j], self.smooth_lower_limit[j]), + ) + )[0] # Update depth matrix depth_res[j, bad_idx] = np.nan @@ -559,7 +600,7 @@ class DepthData(object): lower_limit = np.nan # At least 50% of the data in a beam must be valid to apply the smooth - # if np.nansum((np.isnan(depth_filtered) == False) / len(depth_filtered)) > .5: + # if np.nansum((np.isnan(depth_filtered) == False) / len( depth_filtered)) > .5: # Compute residuals based on robust loess smooth if len(x) > 1: # Fit smooth @@ -585,14 +626,24 @@ class DepthData(object): # Compute filter criteria and apply appropriate criteria = multiplier * fill_array - idx = np.where(nan_less(criteria, np.max(np.vstack((depth * .05, - np.ones(depth.shape) / 10)), 0)))[0] + idx = np.where( + nan_less( + criteria, + np.max(np.vstack((depth * 0.05, np.ones(depth.shape) / 10)), 0), + ) + )[0] if len(idx) > 0: - criteria[idx] = np.max(np.vstack((depth[idx] * .05, np.ones(idx.shape) / 10)), 0) + criteria[idx] = np.max( + np.vstack((depth[idx] * 0.05, np.ones(idx.shape) / 10)), 0 + ) # Compute limits upper_limit = depth_smooth + criteria - idx = np.where(np.logical_or(np.greater(upper_limit, max_upper_limit), np.isnan(upper_limit)))[0] + idx = np.where( + np.logical_or( + np.greater(upper_limit, max_upper_limit), np.isnan(upper_limit) + ) + )[0] if len(idx) > 0: upper_limit[idx] = max_upper_limit lower_limit = depth_smooth - criteria @@ -600,9 +651,11 @@ class DepthData(object): lower_limit[idx] = 0 bad_idx = np.where( - np.logical_or(nan_greater(depth, upper_limit), nan_less(depth, lower_limit)))[0] + np.logical_or( + nan_greater(depth, upper_limit), nan_less(depth, lower_limit) + ) + )[0] # Update depth matrix - # depth_res[bad_idx] = np.nan if len(bad_idx) == 0: break else: @@ -619,11 +672,12 @@ class DepthData(object): return depth_smooth, upper_limit, lower_limit def filter_savgol(self, transect): - """This filter uses a moving InterQuartile Range filter on residuals from a - a Savitzky-Golay filter on y with non-uniform spaced x + """This filter uses a moving InterQuartile Range filter on residuals + from a Savitzky-Golay filter on y with non-uniform spaced x of the depths in each beam to identify unnatural spikes in the depth - measurements from each beam. Each beam is filtered independently. The filter - criteria are set to be the maximum of the IQR filter, 5% of the measured depth, or 0.1 meter + measurements from each beam. Each beam is filtered independently. + The filter criteria are set to be the maximum of the IQR filter, + 5% of the measured depth, or 0.1 meter Parameters ---------- @@ -632,17 +686,21 @@ class DepthData(object): Notes ----- - half_width - number of points to each side of target point used in computing IQR. - This is the raw number of points actual points used may be less if some are bad. - - multiplier - number multiplied times the IQR to determine the filter criteria + half_width - number of points to each side of target point used in + computing IQR. This is the raw number of points actual points used + may be less if some are bad. + multiplier - number multiplied times the IQR to determine the filter + criteria """ # Determine number of beams if len(self.depth_orig_m.shape) > 1: # For slant beams - n_beams, n_ensembles = self.depth_orig_m.shape[0], self.depth_orig_m.shape[1] + n_beams, n_ensembles = ( + self.depth_orig_m.shape[0], + self.depth_orig_m.shape[1], + ) depth_raw = np.copy(self.depth_orig_m) else: # For vertical beam or depth sounder @@ -658,7 +716,7 @@ class DepthData(object): if self.smooth_depth is None: # Set filter characteristics - self.filter_type = 'SavGol' + self.filter_type = "SavGol" cycles = 3 half_width = 10 multiplier = 15 @@ -669,20 +727,27 @@ class DepthData(object): upper_limit = repmat([np.nan], n_beams, n_ensembles) lower_limit = repmat([np.nan], n_beams, n_ensembles) - # Create position array. If there are insufficient track data use elapsed time + # Create position array. If there are insufficient track data + # use elapsed time boat_vel_selected = getattr(transect.boat_vel, transect.boat_vel.selected) - if boat_vel_selected is not None and \ - np.nansum(np.isnan(boat_vel_selected.u_processed_mps)) < 2: - track_x = boat_vel_selected.u_processed_mps * transect.date_time.ens_duration_sec - track_y = boat_vel_selected.v_processed_mps * transect.date_time.ens_duration_sec - x = np.nancumsum(np.sqrt(track_x ** 2 + track_y ** 2)) + if ( + boat_vel_selected is not None + and np.nansum(np.isnan(boat_vel_selected.u_processed_mps)) < 2 + ): + track_x = ( + boat_vel_selected.u_processed_mps + * transect.date_time.ens_duration_sec + ) + track_y = ( + boat_vel_selected.v_processed_mps + * transect.date_time.ens_duration_sec + ) + x = np.nancumsum(np.sqrt(track_x**2 + track_y**2)) else: x = np.nancumsum(transect.date_time.ens_duration_sec) # Loop for each beam, smooth is applied to each beam for j in range(n_beams): - # At least 50% of the data in a beam must be valid to apply the smooth - # if np.nansum((np.isnan(depth[j, :]) == False) / depth.shape[0]) > .5: # Compute residuals based on non-uniform Savitzky-Golay try: @@ -700,23 +765,36 @@ class DepthData(object): for n in range(cycles - 1): # Compute inner quartile range - # fill_array = DepthData.run_iqr(half_width, depth_res[j, :]) + fill_array = run_iqr(half_width, depth_res[j, :]) # Compute filter criteria criteria = multiplier * fill_array - # Adjust criteria so that it is never less than 5% of depth or 0.1 m which ever is greater - idx = np.where(criteria < np.max(np.vstack((depth[j, :] * .05, - np.ones(depth.shape) / 10)), 0))[0] + # Adjust criteria so that it is never less than 5% of + # depth or 0.1 m which ever is greater + idx = np.where( + criteria + < np.max( + np.vstack((depth[j, :] * 0.05, np.ones(depth.shape) / 10)), + 0, + ) + )[0] if len(idx) > 0: - criteria[idx] = np.max(np.vstack((depth[j, idx] * .05, np.ones(idx.shape) / 10)), 0) + criteria[idx] = np.max( + np.vstack((depth[j, idx] * 0.05, np.ones(idx.shape) / 10)), + 0, + ) # Compute limits upper_limit[j] = depth_smooth[j, :] + criteria lower_limit[j] = depth_smooth[j, :] - criteria - bad_idx = np.where(np.logical_or(np.greater(depth[j], upper_limit[j]), - np.less(depth[j], lower_limit[j])))[0] + bad_idx = np.where( + np.logical_or( + np.greater(depth[j], upper_limit[j]), + np.less(depth[j], lower_limit[j]), + ) + )[0] # Update residual matrix depth_res[j, bad_idx] = np.nan @@ -729,14 +807,17 @@ class DepthData(object): self.filter_none() # Set filter type - self.filter_type = 'SavGol' + self.filter_type = "SavGol" # Apply filter for j in range(n_beams): if np.nansum(self.smooth_upper_limit[j]) > 0: bad_idx = np.where( - np.logical_or(np.greater(depth[j], self.smooth_upper_limit[j]), - np.less(depth[j], self.smooth_lower_limit[j])))[0] + np.logical_or( + np.greater(depth[j], self.smooth_upper_limit[j]), + np.less(depth[j], self.smooth_lower_limit[j]), + ) + )[0] else: bad_idx = np.isnan(depth[j]) @@ -744,64 +825,64 @@ class DepthData(object): self.valid_beams[j, bad_idx] = False def interpolate_none(self): - """Applies no interpolation. - """ - + """Applies no interpolation.""" + # Compute processed depth without interpolation - if self.depth_source == 'BT': + if self.depth_source == "BT": # Bottom track methods self.compute_avg_bt_depth() else: # Vertical beam or depth sounder depths self.depth_processed_m = self.depth_beams_m[0, :] - + self.depth_processed_m[np.squeeze(np.equal(self.valid_data, False))] = np.nan - + # Set interpolation type - self.interp_type = 'None' - + self.interp_type = "None" + def interpolate_hold_last(self): - """This function holds the last valid value until the next valid data point. + """This function holds the last valid value until the next valid + data point. """ - + # Get number of ensembles n_ensembles = len(self.depth_processed_m) - + # Process data by ensemble for n in range(1, n_ensembles): - - # If current ensemble's depth is invalid assign depth from previous example + + # If current ensemble's depth is invalid assign depth from + # previous example if not self.valid_data[n]: - self.depth_processed_m[n] = self.depth_processed_m[n-1] + self.depth_processed_m[n] = self.depth_processed_m[n - 1] def interpolate_next(self): - """This function back fills with the next valid value. - """ + """This function back fills with the next valid value.""" # Get number of ensembles n_ens = len(self.depth_processed_m) # Process data by ensemble - for n in np.arange(0, n_ens-1)[::-1]: + for n in np.arange(0, n_ens - 1)[::-1]: - # If current ensemble's depth is invalid assign depth from previous example + # If current ensemble's depth is invalid assign depth from + # previous example if not self.valid_data[n]: self.depth_processed_m[n] = self.depth_processed_m[n + 1] def interpolate_smooth(self): - """Apply interpolation based on the robust loess smooth - """ - - self.interp_type = 'Smooth' - + """Apply interpolation based on the robust loess smooth""" + + self.interp_type = "Smooth" + # Get depth data from object depth_new = self.depth_beams_m - + # Update depth data with interpolated depths depth_new[not self.valid_beams] = self.smooth_depth[not self.valid_beams] - + # Compute processed depths with interpolated values - if self.depth_source == 'BT': + if self.depth_source == "BT": # Temporarily change self.depth_beams_m to compute average # for bottom track based depths temp_save = copy.deepcopy(self.depth_beams_m) @@ -812,13 +893,12 @@ class DepthData(object): else: # Assignment for VB or DS self.depth_processed_m = depth_new[0, :] - + def interpolate_linear(self, transect): - """Apply linear interpolation - """ - + """Apply linear interpolation""" + # Set interpolation type - self.interp_type = 'Linear' + self.interp_type = "Linear" # Create position array select = getattr(transect.boat_vel, transect.boat_vel.selected) @@ -828,38 +908,43 @@ class DepthData(object): track_x = boat_vel_x * transect.date_time.ens_duration_sec track_y = boat_vel_y * transect.date_time.ens_duration_sec else: - select = getattr(transect.boat_vel, 'bt_vel') + select = getattr(transect.boat_vel, "bt_vel") track_x = np.tile(np.nan, select.u_processed_mps.shape) track_y = np.tile(np.nan, select.v_processed_mps.shape) - + idx = np.where(np.isnan(track_x[1:])) - - # If the navigation reference has no gaps use it for interpolation, if not use time + + # If the navigation reference has no gaps use it for interpolation, + # if not use time if len(idx[0]) < 1: x = np.nancumsum(np.sqrt(track_x**2 + track_y**2)) else: # Compute accumulated time x = np.nancumsum(transect.date_time.ens_duration_sec) - + # Determine number of beams n_beams = self.depth_beams_m.shape[0] depth_mono = copy.deepcopy(self.depth_beams_m) depth_new = copy.deepcopy(self.depth_beams_m) - -# Create strict monotonic arrays for depth and track by identifying duplicate -# track values. The first track value is used and the remaining duplicates -# are set to nan. The depth assigned to that first track value is the average -# of all duplicates. The depths for the duplicates are then set to nan. Only -# valid strictly monotonic track and depth data are used for the input in to linear -# interpolation. Only the interpolated data for invalid depths are added -# to the valid depth data to create depth_new + + # Create strict monotonic arrays for depth and track by + # identifying duplicate track values. + # The first track value is used and the remaining duplicates + # are set to nan. The depth assigned to that first track + # value is the average of all duplicates. + # The depths for the duplicates are then set to nan. Only + # valid strictly monotonic track and depth data are used for + # the input in to linear interpolation. + # Only the interpolated data for invalid depths are added + # to the valid depth data to create depth_new x_mono = x - + idx0 = np.where(np.diff(x) == 0)[0] if len(idx0) > 0: if len(idx0) > 1: - # Split array into subarrays in proper sequence e.g [[2,3,4],[7,8,9]] etc. + # Split array into subarrays in proper sequence e.g [[2,3, + # 4],[7,8,9]] etc. idx1 = np.add(np.where(np.diff(idx0) != 1)[0], 1) group = np.split(idx0, idx1) @@ -876,7 +961,7 @@ class DepthData(object): depth_mono[:, indices[0]] = depth_avg depth_mono[:, indices[1:]] = np.nan x[indices[1:]] = np.nan - + # Interpolate each beam for n in range(n_beams): @@ -889,13 +974,23 @@ class DepthData(object): if np.sum(valid) > 1: # Compute interpolation function from all valid data - depth_int = np.interp(x_mono, x_mono[valid], depth_mono[n, valid], left=np.nan, right=np.nan) + depth_int = np.interp( + x_mono, + x_mono[valid], + depth_mono[n, valid], + left=np.nan, + right=np.nan, + ) # Fill in invalid data with interpolated data - depth_new[n, np.logical_not(self.valid_beams[n])] = depth_int[np.logical_not(self.valid_beams[n])] + depth_new[n, np.logical_not(self.valid_beams[n])] = depth_int[ + np.logical_not(self.valid_beams[n]) + ] - if self.depth_source == 'BT': + if self.depth_source == "BT": # Bottom track depths - self.depth_processed_m = self.average_depth(depth_new, self.draft_use_m, self.avg_method) + self.depth_processed_m = self.average_depth( + depth_new, self.draft_use_m, self.avg_method + ) else: # Vertical beam or depth sounder depths self.depth_processed_m = np.copy(depth_new[0, :]) @@ -907,36 +1002,35 @@ class DepthData(object): Parameters ---------- depth: np.array(float) - Individual beam depths for each beam in each ensemble including the draft + Individual beam depths for each beam in each ensemble including + the draft draft: float Draft of ADCP method: str Averaging method (Simple, IDW) - + Returns ------- avg_depth: np.array(float) Average depth for each ensemble - + """ - if method == 'Simple': + if method == "Simple": avg_depth = np.nanmean(depth, 0) else: # Compute inverse weighted mean depth rng = depth - draft w = 1 - np.divide(rng, np.nansum(rng, 0)) avg_depth = draft + np.nansum( - np.divide((rng * w), - np.nansum(w, 0), - out=np.zeros_like(rng), - where=np.nansum(w, 0) != 0) - , 0) + np.divide((rng * w), np.nansum(w, 0), where=np.nansum(w, 0) != 0), 0 + ) avg_depth[avg_depth == draft] = np.nan return avg_depth def filter_trdi(self): - """Filter used by TRDI to filter out multiple reflections that get digitized as depth. + """Filter used by TRDI to filter out multiple reflections that get + digitized as depth. """ # Assign raw depth data to local variable @@ -949,7 +1043,7 @@ class DepthData(object): self.filter_none() # Set filter type to TRDI - self.filter_type = 'TRDI' + self.filter_type = "TRDI" for n in range(n_beams): depth_ratio = depth_raw[n, :] / depth_raw @@ -957,28 +1051,32 @@ class DepthData(object): exceeded_ens = np.nansum(exceeded, 0) self.valid_beams[n, exceeded_ens > 0] = False - # ============================================================================================ + # ======================================================================= # The methods below are not being used. - # The methods have been moved to separate files and compiled using Numba AOT. + # The methods have been moved to separate files and compiled using Numba + # AOT. # The methods below are included here for historical purposes - # and may provide an easier approach to adding new features/algorithms prior to recoding - # them in a manner that can be compiled using Numba AOT. - # ============================================================================================= + # and may provide an easier approach to adding new features/algorithms + # prior to recoding them in a manner that can be compiled using Numba AOT. + # ====================================================================== @staticmethod def run_iqr(half_width, data): """Computes a running Innerquartile Range - The routine accepts a column vector as input. "halfWidth" number of data + The routine accepts a column vector as input. + "halfWidth" number of data points for computing the Innerquartile Range are selected before and after the target data point, but no including the target data point. - Near the ends of the series the number of points before or after are reduced. - Nan in the data are counted as points. The IQR is computed on the slected - subset of points. The process occurs for each point in the provided column vector. + Near the ends of the series the number of points before or after are + reduced. Nan in the data are counted as points. + The IQR is computed on the slected subset of points. + The process occurs for each point in the provided column vector. A column vector with the computed IQR at each point is returned. Parameters ---------- half_width: int - Number of ensembles before and after current ensemble which are used to compute the IQR + Number of ensembles before and after current ensemble which are + used to compute the IQR data: np.array(float) Data for which the IQR is computed """ @@ -995,19 +1093,21 @@ class DepthData(object): # Sample selection for 1st point if n == 0: - sample = data[1:1 + half_width] + sample = data[1 : 1 + half_width] # Sample selection a end of data set elif n + half_width > npts: - sample = np.hstack([data[n - half_width - 1:n - 1], data[n:npts]]) + sample = np.hstack([data[n - half_width - 1 : n - 1], data[n:npts]]) # Sample selection at beginning of data set elif half_width >= n + 1: - sample = np.hstack([data[0:n], data[n + 1:n + half_width + 1]]) + sample = np.hstack([data[0:n], data[n + 1 : n + half_width + 1]]) # Sample selection in body of data set else: - sample = np.hstack([data[n - half_width:n], data[n + 1:n + half_width + 1]]) + sample = np.hstack( + [data[n - half_width : n], data[n + 1 : n + half_width + 1]] + ) iqr_array.append(iqr(sample)) diff --git a/Classes/DepthStructure.py b/Classes/DepthStructure.py index 4dcb2c6..fe615a0 100644 --- a/Classes/DepthStructure.py +++ b/Classes/DepthStructure.py @@ -3,7 +3,8 @@ from Classes.DepthData import DepthData class DepthStructure(object): - """This class creates the data structure used store depths from different sources + """This class creates the data structure used store depths from + different sources Attributes ---------- @@ -18,62 +19,72 @@ class DepthStructure(object): composite: str Indicates use of composite depths ("On" or "Off". """ - + def __init__(self): """Creates object and initializes variables to None""" - self.selected = None # name of object DepthData that contains the depth data for q computation - self.bt_depths = None # object of DepthData for by depth data - self.vb_depths = None # object of DepthData for vertical beam depth data - self.ds_depths = None # object of DepthData for depth sounder depth data - self.composite = "On" # Turn composite depths "on" or "off" + self.selected = None + self.bt_depths = None + self.vb_depths = None + self.ds_depths = None + self.composite = "On" - def add_depth_object(self, depth_in, source_in, freq_in, draft_in, cell_depth_in, cell_size_in): - """Adds a DepthData object to the depth structure for the specified type of depths. + def add_depth_object( + self, depth_in, source_in, freq_in, draft_in, cell_depth_in, cell_size_in + ): + """Adds a DepthData object to the depth structure for the specified + type of depths. Parameters ---------- depth_in: np.array Depth data in meters. source_in: str - Specifies source of depth data: bottom track (BT), vertical beam (VB), or depth sounder (DS) + Specifies source of depth data: bottom track (BT), vertical beam + (VB), or depth sounder (DS) freq_in: np.array Acoustic frequency in kHz of beams used to determine depth. draft_in: Draft of transducer (in meters) used to measure depths. cell_depth_in - Depth of each cell in the profile. If the referenced depth does not have depth cells the depth cell - values from the bottom track (BT) depths should be used. + Depth of each cell in the profile. If the referenced depth does + not have depth cells the depth cell values from the bottom track + (BT) depths should be used. cell_size_in - Size of each depth cell. If the referenced depth does not have depth cells the cell size from - the bottom track (BT) depths should be used. + Size of each depth cell. If the referenced depth does not have + depth cells the cell size from the bottom track (BT) + depths should be used. """ - if source_in == 'BT': + if source_in == "BT": self.bt_depths = DepthData() - self.bt_depths.populate_data(depth_in, source_in, freq_in, draft_in, cell_depth_in, cell_size_in) - elif source_in == 'VB': + self.bt_depths.populate_data( + depth_in, source_in, freq_in, draft_in, cell_depth_in, cell_size_in + ) + elif source_in == "VB": self.vb_depths = DepthData() - self.vb_depths.populate_data(depth_in, source_in, freq_in, draft_in, cell_depth_in, cell_size_in) - elif source_in == 'DS': + self.vb_depths.populate_data( + depth_in, source_in, freq_in, draft_in, cell_depth_in, cell_size_in + ) + elif source_in == "DS": self.ds_depths = DepthData() - self.ds_depths.populate_data(depth_in, source_in, freq_in, draft_in, cell_depth_in, cell_size_in) + self.ds_depths.populate_data( + depth_in, source_in, freq_in, draft_in, cell_depth_in, cell_size_in + ) def populate_from_qrev_mat(self, transect): - """Populates the object using data from previously saved QRev Matlab file. + """Populates the object using data from previously saved QRev Matlab + file. Parameters ---------- transect: mat_struct Matlab data structure obtained from sio.loadmat """ - if hasattr(transect, 'depths'): + if hasattr(transect, "depths"): - # try: self.bt_depths = DepthData() self.bt_depths.populate_from_qrev_mat(transect.depths.btDepths) - # except AttributeError: - # self.bt_depths = None try: self.vb_depths = DepthData() @@ -87,29 +98,40 @@ class DepthStructure(object): except AttributeError: self.ds_depths = None - if transect.depths.selected == 'btDepths': - self.selected = 'bt_depths' - elif transect.depths.selected == 'vbDepths': - self.selected = 'vb_depths' - elif transect.depths.selected == 'dsDepths': - self.selected = 'ds_depths' + if ( + transect.depths.selected == "btDepths" + or transect.depths.selected == "bt_depths" + ): + self.selected = "bt_depths" + elif ( + transect.depths.selected == "vbDepths" + or transect.depths.selected == "vb_depths" + ): + self.selected = "vb_depths" + elif ( + transect.depths.selected == "dsDepths" + or transect.depths.selected == "ds_depths" + ): + self.selected = "ds_depths" + else: + self.selected = "bt_depths" self.composite = transect.depths.composite if self.vb_depths is None and self.ds_depths is None: - self.composite = 'Off' + self.composite = "Off" def composite_depths(self, transect, setting="Off"): """Depth composite is based on the following assumptions - - 1. If a depth sounder is available the user must have assumed the ADCP beams - (BT or vertical) might have problems and it will be the second alternative if - not selected as the preferred source - - 2. For 4-beam BT depths, if 3 beams are valid the average is considered valid. - It may be based on interpolation of the invalid beam. However, if only 2 beams - are valid even though the other two beams may be interpolated and included in the average the - average will be replaced by an alternative if available. If no alternative is - available the multi-beam average based on available beams and interpolation will - be used. + + 1. If a depth sounder is available the user must have assumed the + ADCP beams (BT or vertical) might have problems + and it will be the second alternative if not selected as the preferred source + + 2. For 4-beam BT depths, if 3 beams are valid the average is + considered valid. It may be based on interpolation of the invalid beam. + However, if only 2 beams are valid even though the other two beams may be + interpolated and included in the average the average will be replaced by an + alternative if available. If no alternative is available the multi-beam average + based on available beams and interpolation will be used. Parameters ---------- @@ -118,87 +140,145 @@ class DepthStructure(object): setting: str Setting to use ("On") or not use ("Off") composite depths. """ - + if setting is None: setting = self.composite else: self.composite = setting - + # The primary depth reference is the selected reference ref = self.selected comp_depth = np.array([]) - if setting == 'On': - # Prepare vector of valid BT averages, which are defined as having at least 2 valid beams + if setting == "On": + # Prepare vector of valid BT averages, which are defined as + # having at least 2 valid beams bt_valid = self.bt_depths.valid_data n_ensembles = bt_valid.shape[-1] bt_filtered = np.copy(self.bt_depths.depth_processed_m) bt_filtered[np.logical_not(bt_valid)] = np.nan - - # Prepare vertical beam data, using only data prior to interpolation + + # Prepare vertical beam data, using only data prior to + # interpolation if self.vb_depths is not None: vb_filtered = np.copy(self.vb_depths.depth_processed_m) - vb_filtered[np.squeeze(np.equal(self.vb_depths.valid_data, False))] = np.nan + vb_filtered[ + np.squeeze(np.equal(self.vb_depths.valid_data, False)) + ] = np.nan else: vb_filtered = np.tile(np.nan, n_ensembles) - - # Prepare depth sounder data, using only data prior to interpolation + + # Prepare depth sounder data, using only data prior to + # interpolation if self.ds_depths is not None: ds_filtered = np.copy(self.ds_depths.depth_processed_m) - ds_filtered[np.squeeze(np.equal(self.ds_depths.valid_data, False))] = np.nan + ds_filtered[ + np.squeeze(np.equal(self.ds_depths.valid_data, False)) + ] = np.nan else: ds_filtered = np.tile(np.nan, n_ensembles) comp_source = np.tile(np.nan, bt_filtered.shape) # Apply composite depths - if ref == 'bt_depths': + if ref == "bt_depths": comp_depth = np.copy(bt_filtered) comp_source[np.isnan(comp_depth) == False] = 1 - comp_depth[np.isnan(comp_depth)] = np.squeeze(ds_filtered[np.isnan(comp_depth)]) - comp_source[np.logical_and((np.isnan(comp_depth) == False), (np.isnan(comp_source) == True))] = 3 + comp_depth[np.isnan(comp_depth)] = np.squeeze( + ds_filtered[np.isnan(comp_depth)] + ) + comp_source[ + np.logical_and( + (np.isnan(comp_depth) == False), (np.isnan(comp_source) == True) + ) + ] = 3 comp_depth[np.isnan(comp_depth)] = vb_filtered[np.isnan(comp_depth)] - comp_source[np.logical_and((np.isnan(comp_depth) == False), (np.isnan(comp_source) == True))] = 2 - comp_depth = self.interpolate_composite(transect=transect, composite_depth=comp_depth) - # comp_depth[np.isnan(comp_depth)] = np.squeeze(self.bt_depths.depth_processed_m[np.isnan(comp_depth)]) - comp_source[np.logical_and((np.isnan(comp_depth) == False), (np.isnan(comp_source) == True))] = 4 - - elif ref == 'vb_depths': + comp_source[ + np.logical_and( + (np.isnan(comp_depth) == False), (np.isnan(comp_source) == True) + ) + ] = 2 + comp_depth = self.interpolate_composite( + transect=transect, composite_depth=comp_depth + ) + + comp_source[ + np.logical_and( + (np.isnan(comp_depth) == False), (np.isnan(comp_source) == True) + ) + ] = 4 + + elif ref == "vb_depths": comp_depth = np.copy(vb_filtered) comp_source[np.isnan(comp_depth) == False] = 2 - comp_depth[np.isnan(comp_depth)] = np.squeeze(ds_filtered[np.isnan(comp_depth)]) - comp_source[np.logical_and((np.isnan(comp_depth) == False), (np.isnan(comp_source) == True))] = 3 - comp_depth[np.isnan(comp_depth)] = np.squeeze(bt_filtered[np.isnan(comp_depth)]) - comp_source[np.logical_and((np.isnan(comp_depth) == False), (np.isnan(comp_source) == True))] = 1 - comp_depth = self.interpolate_composite(transect=transect, composite_depth=comp_depth) - # comp_depth[np.isnan(comp_depth)] = np.squeeze(self.vb_depths.depth_processed_m[np.isnan(comp_depth)]) - comp_source[np.logical_and((np.isnan(comp_depth) == False), (np.isnan(comp_source) == True))] = 4 - - elif ref == 'ds_depths': + comp_depth[np.isnan(comp_depth)] = np.squeeze( + ds_filtered[np.isnan(comp_depth)] + ) + comp_source[ + np.logical_and( + (np.isnan(comp_depth) == False), (np.isnan(comp_source) == True) + ) + ] = 3 + comp_depth[np.isnan(comp_depth)] = np.squeeze( + bt_filtered[np.isnan(comp_depth)] + ) + comp_source[ + np.logical_and( + (np.isnan(comp_depth) == False), (np.isnan(comp_source) == True) + ) + ] = 1 + comp_depth = self.interpolate_composite( + transect=transect, composite_depth=comp_depth + ) + comp_source[ + np.logical_and( + (np.isnan(comp_depth) == False), (np.isnan(comp_source) == True) + ) + ] = 4 + + elif ref == "ds_depths": comp_depth = np.copy(ds_filtered) comp_source[np.isnan(comp_depth) == False] = 3 - comp_depth[np.isnan(comp_depth)] = np.squeeze(vb_filtered[np.isnan(comp_depth)]) - comp_source[np.logical_and((np.isnan(comp_depth) == False), (np.isnan(comp_source) == True))] = 2 - comp_depth[np.isnan(comp_depth)] = np.squeeze(bt_filtered[np.isnan(comp_depth)]) - comp_source[np.logical_and((np.isnan(comp_depth) == False), (np.isnan(comp_source) == True))] = 1 - comp_depth = self.interpolate_composite(transect=transect, composite_depth=comp_depth) - # comp_depth[np.isnan(comp_depth)] = np.squeeze(self.ds_depths.depth_processed_m[np.isnan(comp_depth)]) - comp_source[np.logical_and((np.isnan(comp_depth) == False), (np.isnan(comp_source) == True))] = 4 - - # Save composite depth to depth_processed of selected primary reference + comp_depth[np.isnan(comp_depth)] = np.squeeze( + vb_filtered[np.isnan(comp_depth)] + ) + comp_source[ + np.logical_and( + (np.isnan(comp_depth) == False), (np.isnan(comp_source) == True) + ) + ] = 2 + comp_depth[np.isnan(comp_depth)] = np.squeeze( + bt_filtered[np.isnan(comp_depth)] + ) + comp_source[ + np.logical_and( + (np.isnan(comp_depth) == False), (np.isnan(comp_source) == True) + ) + ] = 1 + comp_depth = self.interpolate_composite( + transect=transect, composite_depth=comp_depth + ) + comp_source[ + np.logical_and( + (np.isnan(comp_depth) == False), (np.isnan(comp_source) == True) + ) + ] = 4 + + # Save composite depth to depth_processed of selected primary + # reference selected_data = getattr(self, ref) selected_data.apply_composite(comp_depth, comp_source.astype(int)) - + else: selected_data = getattr(self, ref) comp_source = np.zeros(selected_data.depth_processed_m.shape) - - if ref == 'bt_depths': + + if ref == "bt_depths": selected_data.valid_data[np.isnan(selected_data.valid_data)] = False comp_source[np.squeeze(selected_data.valid_data)] = 1 - elif ref == 'vb_depths': + elif ref == "vb_depths": comp_source[np.squeeze(selected_data.valid_data)] = 2 - elif ref == 'ds_depths': + elif ref == "ds_depths": comp_source[np.squeeze(selected_data.valid_data)] = 3 selected_data.apply_interpolation(transect) @@ -215,13 +295,13 @@ class DepthStructure(object): draft: float New draft. """ - - if target == 'ADCP': + + if target == "ADCP": self.bt_depths.change_draft(draft) self.vb_depths.change_draft(draft) else: - self.ds_depths.change_draft(draft) - + self.ds_depths.change_draft(draft) + def depth_filter(self, transect, filter_method): """Method to apply filter to all available depth sources, so that all sources have the same filter applied. @@ -233,14 +313,14 @@ class DepthStructure(object): filter_method: str Method to use to filter data (Smooth, TRDI, None). """ - + if self.bt_depths is not None: self.bt_depths.apply_filter(transect, filter_method) if self.vb_depths is not None: self.vb_depths.apply_filter(transect, filter_method) if self.ds_depths is not None: self.ds_depths.apply_filter(transect, filter_method) - + def depth_interpolation(self, transect, method=None): """Method to apply interpolation to all available depth sources, so that all sources have the same filter applied. @@ -251,15 +331,15 @@ class DepthStructure(object): Object of TransectData method: str Interpolation method (None, HoldLast, Smooth, Linear) - """ - + """ + if self.bt_depths is not None: self.bt_depths.apply_interpolation(transect, method) if self.vb_depths is not None: self.vb_depths.apply_interpolation(transect, method) if self.ds_depths is not None: self.ds_depths.apply_interpolation(transect, method) - + def sos_correction(self, ratio): """Correct depths for change in speed of sound. @@ -268,11 +348,11 @@ class DepthStructure(object): ratio: float Ratio of new to old speed of sound. """ - + # Bottom Track Depths if self.bt_depths is not None: self.bt_depths.sos_correction(ratio) - + # Vertical beam depths if self.vb_depths is not None: self.vb_depths.sos_correction(ratio) @@ -303,15 +383,16 @@ class DepthStructure(object): track_x = boat_vel_x * transect.date_time.ens_duration_sec track_y = boat_vel_y * transect.date_time.ens_duration_sec else: - select = getattr(transect.boat_vel, 'bt_vel') + select = getattr(transect.boat_vel, "bt_vel") track_x = np.tile(np.nan, select.u_processed_mps.shape) track_y = np.tile(np.nan, select.v_processed_mps.shape) idx = np.where(np.isnan(track_x[1:])) - # If the navigation reference has no gaps use it for interpolation, if not use time + # If the navigation reference has no gaps use it for interpolation, + # if not use time if len(idx[0]) < 1: - x = np.nancumsum(np.sqrt(track_x ** 2 + track_y ** 2)) + x = np.nancumsum(np.sqrt(track_x**2 + track_y**2)) else: # Compute accumulated time x = np.nancumsum(transect.date_time.ens_duration_sec) @@ -319,20 +400,21 @@ class DepthStructure(object): depth_mono = np.copy(composite_depth) depth_new = np.copy(composite_depth) - # Create strict monotonic arrays for depth and track by identifying duplicate - # track values. The first track value is used and the remaining duplicates - # are set to nan. The depth assigned to that first track value is the average - # of all duplicates. The depths for the duplicates are then set to nan. Only - # valid strictly monotonic track and depth data are used for the input in to linear - # interpolation. Only the interpolated data for invalid depths are added - # to the valid depth data to create depth_new + # Create strict monotonic arrays for depth and track by identifying duplicate + # track values. The first track value is used and the remaining duplicates + # are set to nan. The depth assigned to that first track value is the average + # of all duplicates. The depths for the duplicates are then set to nan. Only + # valid strictly monotonic track and depth data are used for the input in to + # linear interpolation. Only the interpolated data for invalid + # depths are added to the valid depth data to create depth_new x_mono = x idx0 = np.where(np.diff(x) == 0)[0] if len(idx0) > 0: if len(idx0) > 1: - # Split array into subarrays in proper sequence e.g [[2,3,4],[7,8,9]] etc. + # Split array into subarrays in proper sequence e.g [[2,3, + # 4],[7,8,9]] etc. idx1 = np.add(np.where(np.diff(idx0) != 1)[0], 1) group = np.split(idx0, idx1) @@ -352,7 +434,6 @@ class DepthStructure(object): # Interpolate - # Determine ensembles with valid depth data valid_depth_mono = np.logical_not(np.isnan(depth_mono)) valid_x_mono = np.logical_not(np.isnan(x_mono)) @@ -361,8 +442,12 @@ class DepthStructure(object): if np.sum(valid) > 1: # Compute interpolation function from all valid data - depth_int = np.interp(x_mono, x_mono[valid], depth_mono[valid], left=np.nan, right=np.nan) + depth_int = np.interp( + x_mono, x_mono[valid], depth_mono[valid], left=np.nan, right=np.nan + ) # Fill in invalid data with interpolated data - depth_new[np.logical_not(valid_depth_mono)] = depth_int[np.logical_not(valid_depth_mono)] + depth_new[np.logical_not(valid_depth_mono)] = depth_int[ + np.logical_not(valid_depth_mono) + ] return depth_new diff --git a/Classes/EdgeData.py b/Classes/EdgeData.py index a4fddc5..f927a65 100644 --- a/Classes/EdgeData.py +++ b/Classes/EdgeData.py @@ -27,27 +27,31 @@ class EdgeData(object): orig_user_discharge_cms: float Original user supplied discharge for edge, in cms. """ - + def __init__(self): - """Initialize EdgeData. - """ - - self.type = None # Shape of edge: 'Triangular', 'Rectangular', 'Custom, 'User Q' - self.distance_m = None # Distance to shore - self.cust_coef = None # Custom coefficient provided by user - self.number_ensembles = None # Number of ensembles to average for depth and velocities - self.user_discharge_cms = None # User supplied edge discharge. + """Initialize EdgeData.""" - self.orig_type = None # Shape of edge: 'Triangular', 'Rectangular', 'Custom, 'User Q' - self.orig_distance_m = None # Distance to shore - self.orig_cust_coef = None # Custom coefficient provided by user - self.orig_number_ensembles = None # Number of ensembles to average for depth and velocities - self.orig_user_discharge_cms = None # User supplied edge discharge. + self.type = None + self.distance_m = None + self.cust_coef = None + self.number_ensembles = None + self.user_discharge_cms = None + self.orig_type = None + self.orig_distance_m = None + self.orig_cust_coef = None + self.orig_number_ensembles = None + self.orig_user_discharge_cms = None - - def populate_data(self, edge_type, distance=None, number_ensembles=10, coefficient=None, user_discharge=None): + def populate_data( + self, + edge_type, + distance=None, + number_ensembles=10, + coefficient=None, + user_discharge=None, + ): """Construct left or right edge object from provided inputs - + Parameters ---------- edge_type: str @@ -77,7 +81,8 @@ class EdgeData(object): self.orig_cust_coef = coefficient def populate_from_qrev_mat(self, mat_data): - """Populates the object using data from previously saved QRev Matlab file. + """Populates the object using data from previously saved QRev Matlab + file. Parameters ---------- @@ -93,7 +98,7 @@ class EdgeData(object): self.user_discharge_cms = mat_data.userQ_cms if type(mat_data.custCoef) is float: self.cust_coef = mat_data.custCoef - if hasattr(mat_data, 'orig_type'): + if hasattr(mat_data, "orig_type"): self.orig_type = mat_data.orig_type self.orig_distance_m = mat_data.orig_distance_m self.orig_number_ensembles = mat_data.orig_number_ensembles diff --git a/Classes/Edges.py b/Classes/Edges.py index 436cec2..594f937 100644 --- a/Classes/Edges.py +++ b/Classes/Edges.py @@ -15,16 +15,15 @@ class Edges(object): right: EdgeData Object of EdgeData for right edge. """ - + def __init__(self): - """Initialize Edges. - """ + """Initialize Edges.""" self.rec_edge_method = None self.vel_method = None self.left = EdgeData() self.right = EdgeData() - + def populate_data(self, rec_edge_method, vel_method): """Store the general methods used for edge data. @@ -39,19 +38,20 @@ class Edges(object): self.vel_method = vel_method def populate_from_qrev_mat(self, transect): - """Populates the object using data from previously saved QRev Matlab file. + """Populates the object using data from previously saved QRev Matlab + file. Parameters ---------- transect: mat_struct Matlab data structure obtained from sio.loadmat - """ + """ - if hasattr(transect, 'edges'): - if hasattr(transect.edges, 'left'): + if hasattr(transect, "edges"): + if hasattr(transect.edges, "left"): self.left = EdgeData() self.left.populate_from_qrev_mat(transect.edges.left) - if hasattr(transect.edges, 'right'): + if hasattr(transect.edges, "right"): self.right = EdgeData() self.right.populate_from_qrev_mat(transect.edges.right) self.rec_edge_method = transect.edges.recEdgeMethod @@ -59,7 +59,7 @@ class Edges(object): def change_property(self, prop, setting, edge=None): """Change edge property - + Parameters ---------- prop: str @@ -69,7 +69,7 @@ class Edges(object): edge: str Edge to change (left, right) """ - + if edge is None: setattr(self, prop, setting) else: diff --git a/Classes/ExtrapData.py b/Classes/ExtrapData.py index 2a6e1d4..4f29abb 100644 --- a/Classes/ExtrapData.py +++ b/Classes/ExtrapData.py @@ -4,28 +4,30 @@ class ExtrapData(object): Attributes ---------- top_method_orig: str - Original extrapolation method for top of profile: Power, Constant, 3-Point. + Original extrapolation method for top of profile: Power, Constant, + 3-Point. bot_method_orig: str Original extrapolation method for bottom of profile: Power, No Slip. exponent_orig: float Original exponent for power of no slip methods. top_method: str - Applied extrapolation method for top of profile: Power, Constant, 3-Point. + Applied extrapolation method for top of profile: Power, Constant, + 3-Point. bot_method: str Applied extrapolation method for bottom of profile: Power, No Slip exponent: float Applied exponent for power of no slip methods """ - + def __init__(self): """Initialize class and set defaults.""" - self.top_method_orig = None # Extrapolation method for top of profile: Power, Constant, 3-Point - self.bot_method_orig = None # Extrapolation method for bottom of profile: Power, No Slip - self.exponent_orig = None # Exponent for power of no slip methods - self.top_method = None # Extrapolation method for top of profile: Power, Constant, 3-Point - self.bot_method = None # Extrapolation method for bottom of profile: Power, No Slip - self.exponent = None # Exponent for power of no slip methods - + self.top_method_orig = None + self.bot_method_orig = None + self.exponent_orig = None + self.top_method = None + self.bot_method = None + self.exponent = None + def populate_data(self, top, bot, exp): """Store data in class variables. @@ -46,7 +48,8 @@ class ExtrapData(object): self.exponent = float(exp) def populate_from_qrev_mat(self, transect): - """Populates the object using data from previously saved QRev Matlab file. + """Populates the object using data from previously saved QRev Matlab + file. Parameters ---------- @@ -54,7 +57,7 @@ class ExtrapData(object): Matlab data structure obtained from sio.loadmat """ - if hasattr(transect, 'extrap'): + if hasattr(transect, "extrap"): self.top_method_orig = transect.extrap.topMethodOrig self.bot_method_orig = transect.extrap.botMethodOrig self.exponent_orig = transect.extrap.exponentOrig @@ -77,7 +80,7 @@ class ExtrapData(object): self.top_method = top self.bot_method = bot self.exponent = exp - + def set_property(self, prop, setting): """Allows setting any property. diff --git a/Classes/ExtrapQSensitivity.py b/Classes/ExtrapQSensitivity.py index 75be866..886890e 100644 --- a/Classes/ExtrapQSensitivity.py +++ b/Classes/ExtrapQSensitivity.py @@ -3,7 +3,8 @@ from Classes.QComp import QComp class ExtrapQSensitivity(object): - """Class to compute the sensitivity of the discharge to various extrapolation methods. + """Class to compute the sensitivity of the discharge to various + extrapolation methods. Attributes ---------- @@ -50,80 +51,90 @@ class ExtrapQSensitivity(object): q_pp_opt_list: list List of single transect discharges base on optimized power-power law q_cns_list: list - List of single transect discharges base on default 1/6 constant no slip law + List of single transect discharges base on default 1/6 constant no + slip law q_cns_opt_list: list - List of single transect discharges base on optimized constant no slip law + List of single transect discharges base on optimized constant no + slip law q_3p_ns_list: list List of single transect discharges base on default 3pt no slip q_3p_ns_opt_list: list List of single transect discharges base on optimized 3pt no slip q_top_pp_list: list - List of single transect top discharges base on default 1/6 power-power law + List of single transect top discharges base on default 1/6 + power-power law q_top_pp_opt_list: list - List of single transect top discharges base on optimized power-power law + List of single transect top discharges base on optimized power-power + law q_top_cns_list: list - List of single transect top discharges base on default 1/6 constant no slip law + List of single transect top discharges base on default 1/6 constant + no slip law q_top_cns_opt_list: list - List of single transect top discharges base on optimized constant no slip law + List of single transect top discharges base on optimized constant no + slip law q_top_3p_ns_list: list List of single transect top discharges base on default 3pt no slip q_top_3p_ns_opt_list: list List of single transect top discharges base on optimized 3pt no slip q_bot_pp_list: list - List of single transect bottom discharges base on default 1/6 power-power law + List of single transect bottom discharges base on default 1/6 + power-power law q_bot_pp_opt_list: list - List of single transect bottom discharges base on optimized power-power law + List of single transect bottom discharges base on optimized + power-power law q_bot_cns_list: list - List of single transect bottom discharges base on default 1/6 constant no slip law + List of single transect bottom discharges base on default 1/6 + constant no slip law q_bot_cns_opt_list: list - List of single transect bottom discharges base on optimized constant no slip law + List of single transect bottom discharges base on optimized constant + no slip law q_bot_3p_ns_list: list List of single transect bottom discharges base on default 3pt no slip q_bot_3p_ns_opt_list: list List of single transect bottom discharges base on optimized 3pt no slip """ - + def __init__(self): """Initialize object and instance variables.""" - self.q_pp_mean = None # Discharge power power 1/6 - self.q_pp_opt_mean = None # discharge power power optimized - self.q_cns_mean = None # Discharge constant no RoutingSlipDelivery - self.q_cns_opt_mean = None # Discharge constant optimized no slip - self.q_3p_ns_mean = None # Discharge 3-pt no slip - self.q_3p_ns_opt_mean = None # Discharge 3-pt optimized no slip - self.q_pp_per_diff = None # Power power 1/6 difference from reference - self.q_pp_opt_per_diff = None # Power power optimized percent difference from reference - self.q_cns_per_diff = None # Constant no slip percent difference from reference - self.q_cns_opt_per_diff = None # Constant optimized no slip percent difference from reference - self.q_3p_ns_per_diff = None # 3-point no skip percent difference from reference - self.q_3p_ns_opt_per_diff = None # 3-point optimized no slip percent difference from reference - self.pp_exp = None # Optimized power power exponent - self.ns_exp = None # Optimized no slip Exponent - self.man_top = None # Manually specified top method - self.man_bot = None # Manually specified bottom method - self.man_exp = None # Manually specified exponent - self.q_man_mean = None # Mean discharge for manually specified extrapolations - self.q_man_per_diff = None # Manually specified extrapolations percent difference from reference - self.q_pp_list = [] # List of single transect discharges base on default 1/6 power-power law - self.q_pp_opt_list = [] # List of single transect discharges base on optimized power-power law - self.q_cns_list = [] # List of single transect discharges base on default 1/6 constant no slip law - self.q_cns_opt_list = [] # List of single transect discharges base on optimized constant no slip law - self.q_3p_ns_list = [] # List of single transect discharges base on default 3pt no slip - self.q_3p_ns_opt_list = [] # List of single transect discharges base on optimized 3pt no slip - self.q_top_pp_list = [] # List of single transect top discharges base on default 1/6 power-power law - self.q_top_pp_opt_list = [] # List of single transect top discharges base on optimized power-power law - self.q_top_cns_list = [] # List of single transect top discharges base on default 1/6 constant no slip law - self.q_top_cns_opt_list = [] # List of single transect top discharges base on optimized constant no slip law - self.q_top_3p_ns_list = [] # List of single transect top discharges base on default 3pt no slip - self.q_top_3p_ns_opt_list = [] # List of single transect top discharges base on optimized 3pt no slip - self.q_bot_pp_list = [] # List of single transect bottom discharges base on default 1/6 power-power law - self.q_bot_pp_opt_list = [] # List of single transect bottom discharges base on optimized power-power law - self.q_bot_cns_list = [] # List of single transect bottom discharges base on default 1/6 constant no slip law - self.q_bot_cns_opt_list = [] # List of single transect bottom discharges base on optimized constant no slip law - self.q_bot_3p_ns_list = [] # List of single transect bottom discharges base on default 3pt no slip - self.q_bot_3p_ns_opt_list = [] # List of single transect bottom discharges base on optimized 3pt no slip - + self.q_pp_mean = None + self.q_pp_opt_mean = None + self.q_cns_mean = None + self.q_cns_opt_mean = None + self.q_3p_ns_mean = None + self.q_3p_ns_opt_mean = None + self.q_pp_per_diff = None + self.q_pp_opt_per_diff = None + self.q_cns_per_diff = None + self.q_cns_opt_per_diff = None + self.q_3p_ns_per_diff = None + self.q_3p_ns_opt_per_diff = None + self.pp_exp = None + self.ns_exp = None + self.man_top = None + self.man_bot = None + self.man_exp = None + self.q_man_mean = None + self.q_man_per_diff = None + self.q_pp_list = [] + self.q_pp_opt_list = [] + self.q_cns_list = [] + self.q_cns_opt_list = [] + self.q_3p_ns_list = [] + self.q_3p_ns_opt_list = [] + self.q_top_pp_list = [] + self.q_top_pp_opt_list = [] + self.q_top_cns_list = [] + self.q_top_cns_opt_list = [] + self.q_top_3p_ns_list = [] + self.q_top_3p_ns_opt_list = [] + self.q_bot_pp_list = [] + self.q_bot_pp_opt_list = [] + self.q_bot_cns_list = [] + self.q_bot_cns_opt_list = [] + self.q_bot_3p_ns_list = [] + self.q_bot_3p_ns_opt_list = [] + def populate_data(self, transects, extrap_fits): """Compute means and percent differences. @@ -159,37 +170,68 @@ class ExtrapQSensitivity(object): q_3p_ns_bot = [] q_3p_ns_opt_bot = [] - # Compute discharges for each transect for possible extrapolation combinations + # Compute discharges for each transect for possible extrapolation + # combinations for transect in transects: if transect.checked: q = QComp() - q.populate_data(data_in=transect, top_method='Power', bot_method='Power', exponent=0.1667) + q.populate_data( + data_in=transect, + top_method="Power", + bot_method="Power", + exponent=0.1667, + ) q_pp.append(q.total) q_pp_top.append(q.top) q_pp_bot.append(q.bottom) - q.populate_data(data_in=transect, top_method='Power', bot_method='Power', exponent=self.pp_exp) + q.populate_data( + data_in=transect, + top_method="Power", + bot_method="Power", + exponent=self.pp_exp, + ) q_pp_opt.append(q.total) q_pp_opt_top.append(q.top) q_pp_opt_bot.append(q.bottom) - q.populate_data(data_in=transect, top_method='Constant', bot_method='No Slip', exponent=0.1667) + q.populate_data( + data_in=transect, + top_method="Constant", + bot_method="No Slip", + exponent=0.1667, + ) q_cns.append(q.total) q_cns_top.append(q.top) q_cns_bot.append(q.bottom) - q.populate_data(data_in=transect, top_method='Constant', bot_method='No Slip', exponent=self.ns_exp) + q.populate_data( + data_in=transect, + top_method="Constant", + bot_method="No Slip", + exponent=self.ns_exp, + ) q_cns_opt.append(q.total) q_cns_opt_top.append(q.top) q_cns_opt_bot.append(q.bottom) - q.populate_data(data_in=transect, top_method='3-Point', bot_method='No Slip', exponent=0.1667) + q.populate_data( + data_in=transect, + top_method="3-Point", + bot_method="No Slip", + exponent=0.1667, + ) q_3p_ns.append(q.total) q_3p_ns_top.append(q.top) q_3p_ns_bot.append(q.bottom) - q.populate_data(data_in=transect, top_method='3-Point', bot_method='No Slip', exponent=self.ns_exp) + q.populate_data( + data_in=transect, + top_method="3-Point", + bot_method="No Slip", + exponent=self.ns_exp, + ) q_3p_ns_opt.append(q.total) q_3p_ns_opt_top.append(q.top) q_3p_ns_opt_bot.append(q.bottom) @@ -229,7 +271,8 @@ class ExtrapQSensitivity(object): self.compute_percent_diff(extrap_fits=extrap_fits, transects=transects) def populate_from_qrev_mat(self, mat_data): - """Populates the object using data from previously saved QRev Matlab file. + """Populates the object using data from previously saved QRev Matlab + file. Parameters ---------- @@ -237,7 +280,7 @@ class ExtrapQSensitivity(object): Matlab data structure obtained from sio.loadmat """ - if hasattr(mat_data, 'qSensitivity'): + if hasattr(mat_data, "qSensitivity"): self.q_pp_mean = mat_data.qSensitivity.qPPmean self.q_pp_opt_mean = mat_data.qSensitivity.qPPoptmean self.q_cns_mean = mat_data.qSensitivity.qCNSmean @@ -246,7 +289,7 @@ class ExtrapQSensitivity(object): self.q_3p_ns_opt_mean = mat_data.qSensitivity.q3pNSoptmean # For compatibility with older QRev.mat files - if hasattr(mat_data.qSensitivity, 'qPPperdiff'): + if hasattr(mat_data.qSensitivity, "qPPperdiff"): self.q_pp_per_diff = mat_data.qSensitivity.qPPperdiff else: self.q_pp_per_diff = np.nan @@ -268,7 +311,7 @@ class ExtrapQSensitivity(object): self.q_man_per_diff = mat_data.qSensitivity.qManperdiff # Add compatibility for Oursin uncertainty model - if hasattr(mat_data.qSensitivity, 'q_pp_list'): + if hasattr(mat_data.qSensitivity, "q_pp_list"): self.q_pp_list = mat_data.qSensitivity.q_pp_list self.q_pp_opt_list = mat_data.qSensitivity.q_pp_opt_list self.q_cns_list = mat_data.qSensitivity.q_cns_list @@ -305,10 +348,11 @@ class ExtrapQSensitivity(object): self.q_bot_cns_list = [] self.q_bot_cns_opt_list = [] self.q_bot_3p_ns_list = [] - self.q_bot_3p_ns_opt_list =[] + self.q_bot_3p_ns_opt_list = [] def compute_percent_diff(self, extrap_fits, transects=None): - """Computes the percent difference for each of the extrapolation options as compared to selected method. + """Computes the percent difference for each of the extrapolation + options as compared to selected method. Parameters ---------- @@ -318,7 +362,7 @@ class ExtrapQSensitivity(object): List of TransectData objects """ # Determine which mean is the reference - if extrap_fits[-1].fit_method == 'Manual': + if extrap_fits[-1].fit_method == "Manual": self.man_top = extrap_fits[-1].top_method self.man_bot = extrap_fits[-1].bot_method self.man_exp = extrap_fits[-1].exponent @@ -331,10 +375,12 @@ class ExtrapQSensitivity(object): q = QComp() checked.append(transect.checked) - q.populate_data(data_in=transect, - top_method=self.man_top, - bot_method=self.man_bot, - exponent=self.man_exp) + q.populate_data( + data_in=transect, + top_method=self.man_top, + bot_method=self.man_bot, + exponent=self.man_exp, + ) q_man.append(q) container = [] for index, item in enumerate(q_man): @@ -344,12 +390,12 @@ class ExtrapQSensitivity(object): reference_mean = self.q_man_mean else: - if extrap_fits[-1].top_method_auto == 'Power': + if extrap_fits[-1].top_method_auto == "Power": if np.abs(extrap_fits[-1].exponent_auto - 0.1667) < 0.0001: reference_mean = self.q_pp_mean else: reference_mean = self.q_pp_opt_mean - elif extrap_fits[-1].top_method_auto == 'Constant': + elif extrap_fits[-1].top_method_auto == "Constant": if np.abs(extrap_fits[-1].exponent_auto - 0.1667) < 0.0001: reference_mean = self.q_cns_mean else: @@ -362,11 +408,23 @@ class ExtrapQSensitivity(object): # Compute percent difference from reference self.q_pp_per_diff = ((self.q_pp_mean - reference_mean) / reference_mean) * 100 - self.q_pp_opt_per_diff = ((self.q_pp_opt_mean - reference_mean) / reference_mean) * 100 - self.q_cns_per_diff = ((self.q_cns_mean - reference_mean) / reference_mean) * 100 - self.q_cns_opt_per_diff = ((self.q_cns_opt_mean - reference_mean) / reference_mean) * 100 - self.q_3p_ns_per_diff = ((self.q_3p_ns_mean - reference_mean) / reference_mean) * 100 - self.q_3p_ns_opt_per_diff = ((self.q_3p_ns_opt_mean - reference_mean) / reference_mean) * 100 - - if extrap_fits[-1].fit_method == 'Manual': - self.q_man_per_diff = ((self.q_man_mean - reference_mean) / reference_mean) * 100 + self.q_pp_opt_per_diff = ( + (self.q_pp_opt_mean - reference_mean) / reference_mean + ) * 100 + self.q_cns_per_diff = ( + (self.q_cns_mean - reference_mean) / reference_mean + ) * 100 + self.q_cns_opt_per_diff = ( + (self.q_cns_opt_mean - reference_mean) / reference_mean + ) * 100 + self.q_3p_ns_per_diff = ( + (self.q_3p_ns_mean - reference_mean) / reference_mean + ) * 100 + self.q_3p_ns_opt_per_diff = ( + (self.q_3p_ns_opt_mean - reference_mean) / reference_mean + ) * 100 + + if extrap_fits[-1].fit_method == "Manual": + self.q_man_per_diff = ( + (self.q_man_mean - reference_mean) / reference_mean + ) * 100 diff --git a/Classes/FitData.py b/Classes/FitData.py index da61b86..41dfb06 100644 --- a/Classes/FitData.py +++ b/Classes/FitData.py @@ -4,7 +4,8 @@ from scipy.stats import t class FitData(object): - """Class to compute top and bottom extrapolation methods and associated statistics. + """Class to compute top and bottom extrapolation methods and associated + statistics. Data required for the constructor method include data of class NormData, threshold for the minimum number of points for a valid @@ -46,20 +47,20 @@ class FitData(object): def __init__(self): """Initialize object and instance variables.""" - self.file_name = None # Name of transect file - self.top_method = 'Power' # Top extrapolation method - self.bot_method = 'Power' # Bottom extrapolation method - self.coef = 0 # Power fit coefficient - self.exponent = 0.1667 # Power fit exponent - self.u = None # Fit values of the variable - self.u_auto = None # Fit values from automatic fit - self.z_auto = None # z values for automtic fit - self.z = None # Distance from the streambed for fit variable - self.exp_method = 'Power' # Method to determine exponent (default, optimize, or manual) - self.data_type = None # Type of data (velocity or unit discharge) - self.exponent_95_ci = 0 # 95% confidence intervals for optimized exponent - self.residuals = np.array([]) # Residuals from fit - self.r_squared = 0 # R squared of model + self.file_name = None + self.top_method = "Power" + self.bot_method = "Power" + self.coef = 0 + self.exponent = 0.1667 + self.u = None + self.u_auto = None + self.z_auto = None + self.z = None + self.exp_method = "Power" + self.data_type = None + self.exponent_95_ci = 0 + self.residuals = np.array([]) + self.r_squared = 0 def populate_data(self, norm_data, top, bot, method, exponent=None): """Computes fit and stores associated data. @@ -73,7 +74,8 @@ class FitData(object): bot: str Bottom extrapolation method method: - Method used to define the exponent (default, optimize, or manual), default is 1/6. + Method used to define the exponent (default, optimize, or manual), + default is 1/6. exponent: Exponent for power or no slip fit methods. """ @@ -93,26 +95,26 @@ class FitData(object): if len(idxz) > 0: idx_power = idxz - # Create arrays for data fitting - # Select median values to use in extrapolation methods selected and create - # methods selected and create fir output data arrays + # Create arrays for data fitting. + # Select median values to use in extrapolation methods selected + # and create methods selected and create fir output data arrays # If bottom is No Slip, Power at top is not allowed - if bot == 'No Slip': - if top == 'Power': - top = 'Constant' + if bot == "No Slip": + if top == "Power": + top = "Constant" - fit_combo = ''.join([top, bot]) - if fit_combo == 'PowerPower': + fit_combo = "".join([top, bot]) + if fit_combo == "PowerPower": self.z = np.arange(0, 1.01, 0.01) zc = np.nan uc = np.nan - elif fit_combo == 'ConstantPower': + elif fit_combo == "ConstantPower": self.z = np.arange(0, np.max(avg_z[idxz]), 0.01) self.z = np.hstack([self.z, np.nan]) zc = np.arange(np.max(avg_z[idxz]) + 0.01, 1.0, 0.01) uc = np.tile(y[idxz[0]], zc.shape) - elif fit_combo == '3-PointPower': + elif fit_combo == "3-PointPower": self.z = np.arange(0, np.max(avg_z[idxz]), 0.01) self.z = np.hstack([self.z, np.nan]) # If less than 6 bins use constant at the top @@ -125,17 +127,19 @@ class FitData(object): # zc = zc.T uc = zc * p[0] + p[1] - elif fit_combo == 'ConstantNo Slip': + elif fit_combo == "ConstantNo Slip": # Optimize constant / no slip if sufficient cells are available - if method.lower() == 'optimize': - idx = idxz[int(1+len(idxz) - np.floor(len(avg_z[idxz]) / 3) - 1)::] + if method.lower() == "optimize": + idx = idxz[ + int(1 + len(idxz) - np.floor(len(avg_z[idxz]) / 3) - 1) : : + ] if len(idx) < 4: - method = 'default' + method = "default" # Compute Constant / No Slip using WinRiver II and # RiverSurveyor Live default cells else: - idx = np.where(avg_z[idxz] <= .2)[0] + idx = np.where(avg_z[idxz] <= 0.2)[0] if len(idx) < 1: idx = idxz[-1] else: @@ -149,17 +153,19 @@ class FitData(object): zc = np.arange(np.max(avg_z[idxz]) + 0.01, 1.00, 0.01) uc = np.tile(y[idxz[0]], zc.shape) - elif fit_combo == '3-PointNo Slip': + elif fit_combo == "3-PointNo Slip": # Optimize 3-Point / no slip if sufficient cells are available - if method.lower() == 'optimize': - idx = idxz[int(1 + len(idxz) - np.floor(len(avg_z[idxz])) / 3) - 1::] + if method.lower() == "optimize": + idx = idxz[ + int(1 + len(idxz) - np.floor(len(avg_z[idxz])) / 3) - 1 : : + ] if len(idx) < 4: - method = 'default' + method = "default" # Compute 3-Point / No Slip using WinRiver II and # RiverSurveyor Live default cells else: - idx = np.where(avg_z[idxz] <= .2)[0] + idx = np.where(avg_z[idxz] <= 0.2)[0] if len(idx) < 1: idx = idxz[-1] else: @@ -189,77 +195,87 @@ class FitData(object): self.exponent = np.nan self.exponent_95_ci = np.nan self.r_squared = np.nan - fit_func = 'linear' + fit_func = "linear" lower_method = method.lower() - if lower_method == 'manual': - fit_func = 'linear' + if lower_method == "manual": + fit_func = "linear" self.exponent = exponent bounds = None p0 = None - elif lower_method == 'default': - fit_func = 'linear' - self.exponent = 1./6. + elif lower_method == "default": + fit_func = "linear" + self.exponent = 1.0 / 6.0 bounds = None p0 = None - elif lower_method == 'optimize': - fit_func = 'power' + elif lower_method == "optimize": + fit_func = "power" bounds = [lower_bound, upper_bound] strt = yfit[ok_] - p0 = [strt[-1], 1./6] + p0 = [strt[-1], 1.0 / 6] fit_funcs = { - 'linear': lambda x, a: a * x**self.exponent, - 'power': lambda x, a, b: a * x**b + "linear": lambda x, a: a * x**self.exponent, + "power": lambda x, a, b: a * x**b, } if ok_.size > 1: if bounds is not None: - popt, pcov = curve_fit(fit_funcs[fit_func], - zfit, yfit, p0=p0, bounds=bounds) + popt, pcov = curve_fit( + fit_funcs[fit_func], zfit, yfit, p0=p0, bounds=bounds + ) else: - popt, pcov = curve_fit(fit_funcs[fit_func], - zfit, yfit, p0=p0) + popt, pcov = curve_fit(fit_funcs[fit_func], zfit, yfit, p0=p0) # Extract exponent and confidence intervals from fit - if lower_method == 'optimize': + if lower_method == "optimize": self.exponent = popt[1] if self.exponent is None or self.exponent < 0.05: self.exponent = 0.05 if len(zfit[ok_]) > 2: - n = len(zfit) # number of data points + n = len(zfit) # number of data points - t_val = t.ppf(.975, n-2) + t_val = t.ppf(0.975, n - 2) # Get 95% confidence intervals - lower = (popt[-1] - t_val * np.sqrt(np.diag(pcov)[-1])) - upper = (popt[-1] + t_val * np.sqrt(np.diag(pcov)[-1])) + lower = popt[-1] - t_val * np.sqrt(np.diag(pcov)[-1]) + upper = popt[-1] + t_val * np.sqrt(np.diag(pcov)[-1]) self.exponent_95_ci = np.hstack([lower, upper]) # Get the rsquared for the model - ss_tot = np.sum((y[idx_power] - np.mean(yfit))**2) - ss_res = np.sum((y[idx_power] - fit_funcs[fit_func](zfit, *popt))**2) - self.r_squared = 1 - (ss_res/ss_tot) + ss_tot = np.sum((y[idx_power] - np.mean(yfit)) ** 2) + ss_res = np.sum( + (y[idx_power] - fit_funcs[fit_func](zfit, *popt)) ** 2 + ) + self.r_squared = 1 - (ss_res / ss_tot) else: self.exponent_95_ci = [np.nan, np.nan] self.r_squared = np.nan # Fit power curve to appropriate data - self.coef = ((self.exponent + 1) * 0.05 * np.nansum(y[idx_power])) / \ - np.nansum(((avg_z[idx_power] + (0.5 * 0.05))**(self.exponent + 1) - - ((avg_z[idx_power] - (0.5 * 0.05))**(self.exponent + 1)))) + self.coef = ( + (self.exponent + 1) * 0.05 * np.nansum(y[idx_power]) + ) / np.nansum( + ( + (avg_z[idx_power] + (0.5 * 0.05)) ** (self.exponent + 1) + - ((avg_z[idx_power] - (0.5 * 0.05)) ** (self.exponent + 1)) + ) + ) # Compute residuals - self.residuals = y[idx_power] - self.coef * avg_z[idx_power]**self.exponent + self.residuals = ( + y[idx_power] - self.coef * avg_z[idx_power] ** self.exponent + ) if self.residuals is None: self.residuals = np.array([]) - # Compute values (velocity or discharge) based on exponent and compute coefficient + # Compute values (velocity or discharge) based on exponent and + # compute coefficient self.u = self.coef * self.z**self.exponent if type(zc) == np.ndarray: self.u = np.append(self.u, uc) diff --git a/Classes/GPSData.py b/Classes/GPSData.py index 1ca09b5..a48a98c 100644 --- a/Classes/GPSData.py +++ b/Classes/GPSData.py @@ -4,7 +4,7 @@ from MiscLibs.common_functions import azdeg2rad, pol2cart, nans, nan_less class GPSData(object): - """Class containing the raw GPS data and algorithms to convert + """Class containing the raw GPS data and algorithms to convert that raw data to boat velocity. Attributes @@ -51,7 +51,8 @@ class GPSData(object): ext_gga_utc: np.array(float) UTC time, hhmmss.ss for each ensemble [ensemble] ext_gga_serial_time: np.array(float) - UTC time of gga data in seconds past midnight for each ensemble [ensemble] + UTC time of gga data in seconds past midnight for each ensemble + [ensemble] ext_gga_num_sats: np.array(float) Number of satellites for each ensemble [ensemble] ext_vtg_course_deg: np.array(float) @@ -61,9 +62,11 @@ class GPSData(object): # User specifications: gga_position_method: str - Method used to process gga data for position ('End', 'Average' 'External') + Method used to process gga data for position ('End', 'Average' + 'External') gga_velocity_method: str - Method used to process gga data for velocity ('End','Average' 'External') + Method used to process gga data for velocity ('End','Average' + 'External') vtg_velocity_method: str Method used to process vtg data for velocity ('Average' 'External) @@ -75,77 +78,103 @@ class GPSData(object): utm_ens_m: np.array(float) UTM position from processed gga data, in m [2,ensemble] gga_velocity_ens_mps: np.array(float) - Boat velocity components computed from gga data, in m/s [2,ensemble] + Boat velocity components computed from gga data, in m/s [2, + ensemble] gga_serial_time_ens: np.array(float) UTC time of gga data, in seconds past midnight [ensemble] vtg_velocity_ens_mps: np.array(float) - Boat velocity components computed from vtg data, in m/s [2,ensemble] + Boat velocity components computed from vtg data, in m/s [2, + ensemble] per_good_ens: np.array(float) - Percentage of available data used to compute ensemble value [ensemble] + Percentage of available data used to compute ensemble value [ + ensemble] hdop_ens: np.array(float) HDOP for each ensemble using velocity method [ensemble] num_sats_ens: np.array(float) - Number of satellites for each ensemble, using velocity method [ensemble] + Number of satellites for each ensemble, using velocity method [ + ensemble] altitude_ens_m: np.array(float) Altitude for each ensemble, using velocity method [ensemble] diff_qual_ens: np.array(float) - Differential quality for each ensemble, using velocity method [ensemble] + Differential quality for each ensemble, using velocity method [ + ensemble] """ - + def __init__(self): - """Initialize instance variables. - """ - + """Initialize instance variables.""" + # Raw properties - self.raw_gga_lat_deg = None # self.raw_ latitude, in degress [ensemble,n] - self.raw_gga_lon_deg = None # self.raw_ longitude, in degrees [ensemble,n] - self.raw_gga_altitude_m = None # self.raw_ altitude in meters, [ensemble,n] - self.raw_gga_differential = None # Differential correction indicator [ensemble,n] - self.raw_gga_hdop = None # Horizontal dilution of precision [ensemble,n] - self.raw_gga_utc = None # UTC time, in hhmmss.ss [ensemble,n] - self.raw_gga_serial_time = None # UTC time of gga data, in seconds past midnight [ensemble,n] - self.raw_gga_num_sats = None # Number of satellites reported in gga sentence [ensemble,n] - self.raw_vtg_course_deg = None # Course, in degress [ensemble,n] - self.raw_vtg_speed_mps = None # Speed, in m/s [ensemble,n] - self.raw_vtg_delta_time = None # vtg delta time, in sec [ensemble,n] - self.raw_vtg_mode_indicator = None # vtg mode indicator [ensemble,n] - self.raw_gga_delta_time = None # gga delta time, in sec [ensemble,n] - + self.raw_gga_lat_deg = None + self.raw_gga_lon_deg = None + self.raw_gga_altitude_m = None + self.raw_gga_differential = None + self.raw_gga_hdop = None + self.raw_gga_utc = None + self.raw_gga_serial_time = None + self.raw_gga_num_sats = None + self.raw_vtg_course_deg = None + self.raw_vtg_speed_mps = None + self.raw_vtg_delta_time = None + self.raw_vtg_mode_indicator = None + self.raw_gga_delta_time = None + # Manufacturer assigned ensemble values - self.ext_gga_lat_deg = None # Raw latitude, in degrees [1,ensemble] - self.ext_gga_lon_deg = None # Raw longitude, in degrees [1,ensemble] - self.ext_gga_altitude_m = None # Raw altitude, in meters [1,ensemble] - self.ext_gga_differential = None # Differential correction indicator [1,ensemble] - self.ext_gga_hdop = None # Horizontal dilution of precision [1,ensemble] - self.ext_gga_utc = None # UTC time, in hhmmss.ss [1, ensemble] - self.ext_gga_serial_time = None # UTC time of gga data, in seconds past midnight [1,ensemble] - self.ext_gga_num_sats = None # Number of satellites reported by software [1,ensemble] - self.ext_vtg_course_deg = None # Course, in degress [1, ensemble] - self.ext_vtg_speed_mps = None # Speed, in m/s [1, ensemble] - + self.ext_gga_lat_deg = None + self.ext_gga_lon_deg = None + self.ext_gga_altitude_m = None + self.ext_gga_differential = None + self.ext_gga_hdop = None + self.ext_gga_utc = None + self.ext_gga_serial_time = None + self.ext_gga_num_sats = None + self.ext_vtg_course_deg = None + self.ext_vtg_speed_mps = None + # User specification - self.gga_position_method = None # Method used to process gga data for position ('End', 'Average' 'External') - self.gga_velocity_method = None # Method used to process gga data for velocity ('End','Average' 'External') - self.vtg_velocity_method = None # Method used to process vtg data for velocity ('Average' 'External) - + self.gga_position_method = None + self.gga_velocity_method = None + self.vtg_velocity_method = None + # Computed properties for ensembles - self.gga_lat_ens_deg = None # Processed latitude in degrees, [ensemble] - self.gga_lon_ens_deg = None # Processed longitude in degrees, [ensemble] - self.utm_ens_m = None # UTM position from processed gga data, [2,ensemble] - self.gga_velocity_ens_mps = None # Boat velocity computed from gga data [2,ensemble] - self.gga_serial_time_ens = None # UTC time of gga data in seconds past midnight, [ensemble] - self.vtg_velocity_ens_mps = None # Boat velocity computed from vtg data [2,ensemble] - self.per_good_ens = None # Percentage of available data used to compute ensemble value [ensemble] - self.hdop_ens = None # HDOP for each ensemble using velocity method [ensemble] - self.num_sats_ens = None # Number of satellites for each ensemble, using velocity method [ensemble] - self.altitude_ens_m = None # Altitude for each ensemble, using velocity method [ensemble] - self.diff_qual_ens = None # Differential quality for each ensemble, using velocity method [ensemble] - - def populate_data(self, raw_gga_utc, raw_gga_lat, raw_gga_lon, raw_gga_alt, raw_gga_diff, - raw_gga_hdop, raw_gga_num_sats, raw_gga_delta_time, raw_vtg_course, raw_vtg_speed, - raw_vtg_delta_time, raw_vtg_mode_indicator, ext_gga_utc, ext_gga_lat, ext_gga_lon, ext_gga_alt, - ext_gga_diff, ext_gga_hdop, ext_gga_num_sats, ext_vtg_course, ext_vtg_speed, - gga_p_method, gga_v_method, vtg_method): + self.gga_lat_ens_deg = None + self.gga_lon_ens_deg = None + self.utm_ens_m = None + self.gga_velocity_ens_mps = None + self.gga_serial_time_ens = None + self.vtg_velocity_ens_mps = None + self.per_good_ens = None + self.hdop_ens = None + self.num_sats_ens = None + self.altitude_ens_m = None + self.diff_qual_ens = None + + def populate_data( + self, + raw_gga_utc, + raw_gga_lat, + raw_gga_lon, + raw_gga_alt, + raw_gga_diff, + raw_gga_hdop, + raw_gga_num_sats, + raw_gga_delta_time, + raw_vtg_course, + raw_vtg_speed, + raw_vtg_delta_time, + raw_vtg_mode_indicator, + ext_gga_utc, + ext_gga_lat, + ext_gga_lon, + ext_gga_alt, + ext_gga_diff, + ext_gga_hdop, + ext_gga_num_sats, + ext_vtg_course, + ext_vtg_speed, + gga_p_method, + gga_v_method, + vtg_method, + ): """Store and process provided data in GPSData class. Parameters @@ -193,9 +222,11 @@ class GPSData(object): ext_vtg_speed: np.array(float) Speed for each ensemble, in m/s [ensemble] gga_p_method: str - Method used to process gga data for position ('End', 'Average' 'External') + Method used to process gga data for position ('End', 'Average' + 'External') gga_v_method: str - Method used to process gga data for velocity ('End','Average' 'External') + Method used to process gga data for velocity ('End','Average' + 'External') vtg_method: str Method used to process vtg data for velocity ('Average' 'External) """ @@ -206,23 +237,31 @@ class GPSData(object): self.raw_gga_serial_time = np.tile([np.nan], raw_gga_lat.shape) else: self.raw_gga_utc = raw_gga_utc - self.raw_gga_serial_time = np.floor(raw_gga_utc / 10000) * 3600 \ - + np.floor(np.mod(raw_gga_utc, 10000, where=~np.isnan(raw_gga_utc)) / 100) \ - * 60 + np.mod(raw_gga_utc, 100, where=~np.isnan(raw_gga_utc)) + self.raw_gga_serial_time = ( + np.floor(raw_gga_utc / 10000) * 3600 + + np.floor( + np.mod(raw_gga_utc, 10000, where=~np.isnan(raw_gga_utc)) / 100 + ) + * 60 + + np.mod(raw_gga_utc, 100, where=~np.isnan(raw_gga_utc)) + ) self.raw_gga_lat_deg = raw_gga_lat self.raw_gga_lon_deg = raw_gga_lon - self.raw_gga_lat_deg[np.where(np.logical_and((self.raw_gga_lat_deg == 0), - (self.raw_gga_lon_deg == 0)))] = np.nan + self.raw_gga_lat_deg[ + np.where( + np.logical_and((self.raw_gga_lat_deg == 0), (self.raw_gga_lon_deg == 0)) + ) + ] = np.nan self.raw_gga_lat_deg[nan_less(raw_gga_diff, 1)] = np.nan self.raw_gga_lon_deg[np.isnan(self.raw_gga_lat_deg)] = np.nan self.raw_gga_altitude_m = raw_gga_alt self.raw_gga_altitude_m[np.isnan(self.raw_gga_lat_deg)] = np.nan - self.raw_gga_differential = raw_gga_diff.astype('float') + self.raw_gga_differential = raw_gga_diff.astype("float") self.raw_gga_differential[np.isnan(self.raw_gga_lat_deg)] = np.nan - self.raw_gga_hdop = raw_gga_hdop.astype('float') + self.raw_gga_hdop = raw_gga_hdop.astype("float") self.raw_gga_hdop[np.isnan(self.raw_gga_lat_deg)] = np.nan - self.raw_gga_num_sats = raw_gga_num_sats.astype('float') + self.raw_gga_num_sats = raw_gga_num_sats.astype("float") self.raw_gga_num_sats[np.isnan(self.raw_gga_lat_deg)] = np.nan self.raw_gga_serial_time[np.isnan(self.raw_gga_lat_deg)] = np.nan @@ -234,8 +273,13 @@ class GPSData(object): self.raw_vtg_course_deg = raw_vtg_course self.raw_vtg_speed_mps = raw_vtg_speed - self.raw_vtg_course_deg[np.where(np.logical_and((self.raw_vtg_course_deg == 0), - (self.raw_vtg_speed_mps == 0)))] = np.nan + self.raw_vtg_course_deg[ + np.where( + np.logical_and( + (self.raw_vtg_course_deg == 0), (self.raw_vtg_speed_mps == 0) + ) + ) + ] = np.nan self.raw_vtg_speed_mps[np.isnan(self.raw_vtg_course_deg)] = np.nan # Delta time is a TRDI only variable @@ -245,7 +289,7 @@ class GPSData(object): self.raw_vtg_delta_time = raw_vtg_delta_time self.raw_vtg_mode_indicator = np.array(raw_vtg_mode_indicator) - + # Assign input data to ensemble values computed by other software self.ext_gga_utc = ext_gga_utc self.ext_gga_lat_deg = ext_gga_lat @@ -254,26 +298,30 @@ class GPSData(object): self.ext_gga_differential = ext_gga_diff self.ext_gga_hdop = ext_gga_hdop self.ext_gga_num_sats = ext_gga_num_sats - self.ext_gga_serial_time = np.floor(np.array(ext_gga_utc) / 10000) * 3600 + \ - np.floor(np.mod(ext_gga_utc, 10000) / 100) * 60 + np.mod(ext_gga_utc, 100) + self.ext_gga_serial_time = ( + np.floor(np.array(ext_gga_utc) / 10000) * 3600 + + np.floor(np.mod(ext_gga_utc, 10000) / 100) * 60 + + np.mod(ext_gga_utc, 100) + ) self.ext_vtg_course_deg = ext_vtg_course self.ext_vtg_speed_mps = ext_vtg_speed - + # Assign input data to method properties self.gga_position_method = gga_p_method self.gga_velocity_method = gga_v_method self.vtg_velocity_method = vtg_method - + # If gga data exist compute position and velocity - if np.sum(np.sum(np.isnan(raw_gga_lat) == False)) > 0: + if np.sum(np.sum(np.logical_not(np.isnan(raw_gga_lat)))) > 0: self.process_gga() - + # If vtg data exist compute velocity - if np.sum(np.sum(np.isnan(raw_vtg_speed) == False)) > 0: + if np.sum(np.sum(np.logical_not(np.isnan(raw_vtg_speed)))) > 0: self.process_vtg() def populate_from_qrev_mat(self, transect): - """Populates the object using data from previously saved QRev Matlab file. + """Populates the object using data from previously saved QRev Matlab + file. Parameters ---------- @@ -281,8 +329,8 @@ class GPSData(object): Matlab data structure obtained from sio.loadmat """ - if hasattr(transect, 'gps'): - if hasattr(transect.gps, 'diffQualEns'): + if hasattr(transect, "gps"): + if hasattr(transect.gps, "diffQualEns"): # Raw properties self.raw_gga_lat_deg = transect.gps.rawGGALat_deg @@ -297,10 +345,13 @@ class GPSData(object): self.raw_vtg_speed_mps = transect.gps.rawVTGSpeed_mps self.raw_vtg_delta_time = transect.gps.rawVTGDeltaTime - # Older versions of QRev Matlab files represented the VTG mode differently. + # Older versions of QRev Matlab files represented the VTG + # mode differently. try: - if transect.gps.rawVTGModeIndicator.ndim == 2 and \ - type(transect.gps.rawVTGModeIndicator[0][0]) is np.float64: + if ( + transect.gps.rawVTGModeIndicator.ndim == 2 + and type(transect.gps.rawVTGModeIndicator[0][0]) is np.float64 + ): indicator = [] for row in transect.gps.rawVTGModeIndicator.astype(int): row_indicator = [] @@ -308,11 +359,13 @@ class GPSData(object): if 127 > value > 0: row_indicator.append(chr(value)) else: - row_indicator. append('') + row_indicator.append("") indicator.append(row_indicator) self.raw_vtg_mode_indicator = np.array(indicator) else: - raw_vtg_mode_indicator = transect.gps.rawVTGModeIndicator.tolist() + raw_vtg_mode_indicator = ( + transect.gps.rawVTGModeIndicator.tolist() + ) new_list = [] for row in raw_vtg_mode_indicator: new_list.append(list(row)) @@ -364,9 +417,11 @@ class GPSData(object): Parameters ---------- p_setting: str - Specifies method to use for computing positions from gga data (External, End, First, Average, Mindt). + Specifies method to use for computing positions from gga data ( + External, End, First, Average, Mindt). v_setting: str - Specifies method to use for computing velocity from gga data (External, End, First, Average, Mindt). + Specifies method to use for computing velocity from gga data ( + External, End, First, Average, Mindt). """ if p_setting is None: @@ -374,39 +429,39 @@ class GPSData(object): if v_setting is None: v_setting = self.gga_velocity_method - + # Use only valid gga data valid = np.copy(self.raw_gga_lat_deg) valid[np.logical_not(np.isnan(valid))] = 1 valid[np.isnan(valid)] = 0 # valid[valid > 0] = 1 gga_lat_deg = np.copy(self.raw_gga_lat_deg) - gga_lat_deg[valid == False] = np.nan + gga_lat_deg[np.logical_not(valid)] = np.nan gga_lon_deg = np.copy(self.raw_gga_lon_deg) - gga_lon_deg[valid == False] = np.nan + gga_lon_deg[np.logical_not(valid)] = np.nan gga_serial_time = np.copy(self.raw_gga_serial_time) - gga_serial_time[valid == False] = np.nan + gga_serial_time[np.logical_not(valid)] = np.nan gga_delta_time = np.copy(self.raw_gga_delta_time) - gga_delta_time[valid == False] = np.nan + gga_delta_time[np.logical_not(valid)] = np.nan gga_hdop = np.copy(self.raw_gga_hdop) - gga_hdop[valid == False] = np.nan + gga_hdop[np.logical_not(valid)] = np.nan gga_num_sats = np.copy(self.raw_gga_num_sats) - gga_num_sats[valid == False] = np.nan + gga_num_sats[np.logical_not(valid)] = np.nan gga_altitude_m = np.copy(self.raw_gga_altitude_m) - gga_altitude_m[valid == False] = np.nan + gga_altitude_m[np.logical_not(valid)] = np.nan gga_differential = np.copy(self.raw_gga_differential) - gga_differential[valid == False] = np.nan + gga_differential[np.logical_not(valid)] = np.nan n_ensembles = gga_lat_deg.shape[0] # Apply method for computing position of ensemble # Use ensemble data from other software - if p_setting == 'External': + if p_setting == "External": self.gga_lat_ens_deg = self.ext_gga_lat_deg self.gga_lon_ens_deg = self.ext_gga_lon_deg # Uses last valid data for each ensemble - elif p_setting == 'End': + elif p_setting == "End": self.gga_lat_ens_deg = np.tile(np.nan, gga_lat_deg.shape[0]) self.gga_lon_ens_deg = np.tile(np.nan, gga_lon_deg.shape[0]) for n in range(n_ensembles): @@ -419,7 +474,7 @@ class GPSData(object): self.gga_lon_ens_deg[n] = gga_lon_deg[n, idx] # Use first valid data for each ensemble - elif p_setting == 'First': + elif p_setting == "First": self.gga_lat_ens_deg = np.tile(np.nan, gga_lat_deg.shape[0]) self.gga_lon_ens_deg = np.tile(np.nan, gga_lon_deg.shape[0]) for n in range(n_ensembles): @@ -428,26 +483,26 @@ class GPSData(object): self.gga_lon_ens_deg[n] = gga_lon_deg[n, idx] # Use minimum delta time - elif p_setting == 'Mindt': + elif p_setting == "Mindt": self.gga_lat_ens_deg = np.tile(np.nan, gga_lat_deg.shape[0]) self.gga_lon_ens_deg = np.tile(np.nan, gga_lon_deg.shape[0]) d_time = np.abs(gga_delta_time) d_time_min = np.nanmin(d_time.T, 0).T - + use = [] for n in range(len(d_time_min)): use.append(np.abs(d_time[n, :]) == d_time_min[n]) - + use = np.array(use) - self.gga_lat_ens_deg = np.tile([np.nan], (len(d_time_min))) + self.gga_lat_ens_deg = np.tile([np.nan], (len(d_time_min))) self.gga_lon_ens_deg = np.tile([np.nan], (len(d_time_min))) for n in range(len(d_time_min)): - idx = np.where(use[n, :] == True)[0] + idx = np.where(use[n, :])[0] if len(idx) > 0: idx = idx[0] self.gga_lat_ens_deg[n] = gga_lat_deg[n, idx] self.gga_lon_ens_deg[n] = gga_lon_deg[n, idx] - + y_utm, x_utm = self.compute_utm(self.gga_lat_ens_deg, self.gga_lon_ens_deg) self.utm_ens_m = (x_utm, y_utm) @@ -459,9 +514,9 @@ class GPSData(object): self.diff_qual_ens = np.tile([np.nan], n_ensembles) self.hdop_ens = np.tile([np.nan], n_ensembles) self.num_sats_ens = np.tile([np.nan], n_ensembles) - + # Apply method for computing velocity of ensemble - if v_setting == 'External': + if v_setting == "External": lat = self.ext_gga_lat_deg lon = self.ext_gga_lon_deg self.gga_serial_time_ens = self.ext_gga_serial_time @@ -469,9 +524,9 @@ class GPSData(object): self.num_sats_ens = self.ext_gga_num_sats self.altitude_ens_m = self.ext_gga_altitude_m self.diff_qual_ens = self.ext_gga_differential - + # Average all position during an ensemble - elif v_setting == 'Average': + elif v_setting == "Average": lat = np.nanmean(gga_lat_deg, 1) lon = np.nanmean(gga_lon_deg, 1) self.gga_serial_time_ens = np.nanmean(gga_serial_time, 1) @@ -479,12 +534,12 @@ class GPSData(object): self.num_sats_ens = np.floor(np.nanmean(gga_num_sats, 1)) self.altitude_ens_m = np.nanmean(self.raw_gga_altitude_m, 1) self.diff_qual_ens = np.floor(np.nanmean(self.raw_gga_differential, 1)) - + # Use the last valid data in an ensemble - elif v_setting == 'End': + elif v_setting == "End": for n in range(n_ensembles): - idx = np.where(np.isnan(gga_lat_deg[n, :]) == False)[0] + idx = np.where(np.logical_not(np.isnan(gga_lat_deg[n, :])))[0] if len(idx) > 0: idx = idx[-1] lat[n] = gga_lat_deg[n, idx] @@ -495,12 +550,12 @@ class GPSData(object): if idx <= len(self.raw_gga_hdop): self.hdop_ens[n] = gga_hdop[n, idx] - + if idx <= len(gga_num_sats[n]): self.num_sats_ens[n] = gga_num_sats[n, idx] # Use the first valid data in an ensemble - elif v_setting == 'First': + elif v_setting == "First": for n in range(n_ensembles): idx = 0 lat[n] = gga_lat_deg[n, idx] @@ -508,23 +563,23 @@ class GPSData(object): self.gga_serial_time_ens[n] = gga_serial_time[n, idx] self.altitude_ens_m[n] = gga_altitude_m[n, idx] self.diff_qual_ens[n] = gga_differential[n, idx] - + if idx <= len(self.raw_gga_hdop): self.hdop_ens[n] = gga_hdop[n, idx] - + if idx <= len(gga_num_sats[n]): self.num_sats_ens[n] = gga_num_sats[n, idx] # Use the minimum delta time to assign data to an ensemble - elif v_setting == 'Mindt': + elif v_setting == "Mindt": d_time = np.abs(gga_delta_time) d_time_min = np.nanmin(d_time, 1) use = [] for n in range(len(d_time_min)): use.append(np.abs(d_time[n, :]) == d_time_min[n]) - use = np.array(use) + use = np.array(use) for n in range(len(d_time_min)): - idx = np.where(use[n, :] == True)[0] + idx = np.where(use[n, :])[0] if len(idx) > 0: idx = idx[0] lat[n] = gga_lat_deg[n, idx] @@ -532,15 +587,15 @@ class GPSData(object): self.gga_serial_time_ens[n] = gga_serial_time[n, idx] self.altitude_ens_m[n] = gga_altitude_m[n, idx] self.diff_qual_ens[n] = gga_differential[n, idx] - + if idx <= len(gga_hdop[n]): self.hdop_ens[n] = gga_hdop[n, idx] - + if idx <= len(gga_num_sats[n]): self.num_sats_ens[n] = gga_num_sats[n, idx] - + # Identify valid values - idx_values = np.where(np.isnan(x_utm) == False)[0] + idx_values = np.where(np.logical_not(np.isnan(x_utm)))[0] if len(idx_values) > 1: u, v = self.gga2_vel_trdi(lat, lon, self.gga_serial_time_ens, idx_values) self.gga_velocity_ens_mps = np.tile([np.nan], (2, len(lat))) @@ -550,14 +605,15 @@ class GPSData(object): self.gga_velocity_ens_mps = np.tile([np.nan], (2, len(lat))) def process_vtg(self, v_setting=None): - """Processes raw vtg data to achieve a velocity for each ensemble containing data. + """Processes raw vtg data to achieve a velocity for each ensemble + containing data. Parameters ---------- v_setting: str Method to used to compute ensemble velocity. """ - + # Determine method used to compute ensemble velocity if v_setting is None: v_setting = self.vtg_velocity_method @@ -568,13 +624,13 @@ class GPSData(object): vtg_delta_time = np.copy(self.raw_vtg_delta_time) # Use mode indicator to identify invalid original data - idx = np.where(self.raw_vtg_mode_indicator == 'N') + idx = np.where(self.raw_vtg_mode_indicator == "N") vtg_speed_mps[idx] = np.nan vtg_course_deg[idx] = np.nan vtg_delta_time[idx] = np.nan # Use average velocity for ensemble velocity - if v_setting == 'Average': + if v_setting == "Average": # Compute vtg velocity in x y coordinates from speed and course direction = azdeg2rad(vtg_course_deg) vx, vy = pol2cart(direction, vtg_speed_mps) @@ -585,11 +641,11 @@ class GPSData(object): self.vtg_velocity_ens_mps = np.vstack([vx_mean.T, vy_mean.T]) # Use last velocity for ensemble velocity - elif v_setting == 'End': + elif v_setting == "End": n_ensembles = vtg_speed_mps.shape[0] vtg_vel = nans(n_ensembles) vtg_dir = nans(n_ensembles) - + for n in range(n_ensembles): idx = np.where(~np.isnan(vtg_speed_mps[n, :]))[0] if len(idx) > 0: @@ -598,7 +654,7 @@ class GPSData(object): idx = 0 vtg_vel[n] = vtg_speed_mps[n, idx] vtg_dir[n] = vtg_course_deg[n, idx] - + direction = azdeg2rad(vtg_dir) vx, vy = pol2cart(direction, vtg_vel) vx[np.logical_and(vx == 0, vy == 0)] = np.nan @@ -606,11 +662,11 @@ class GPSData(object): self.vtg_velocity_ens_mps = np.vstack([vx, vy]) # Use first velocity for ensemble velocity - elif v_setting == 'First': + elif v_setting == "First": n_ensembles = vtg_speed_mps.shape[0] vtg_vel = nans(n_ensembles) vtg_dir = nans(n_ensembles) - + for n in range(n_ensembles): idx = 0 vtg_vel[n] = vtg_speed_mps[n, idx] @@ -621,22 +677,23 @@ class GPSData(object): vy[np.isnan(vx)] = np.nan self.vtg_velocity_ens_mps = np.vstack([vx, vy]) - # Use the velocity with the minimum delta time for the ensemble velocity - elif v_setting == 'Mindt': + # Use the velocity with the minimum delta time for the ensemble + # velocity + elif v_setting == "Mindt": d_time = np.abs(vtg_delta_time) # d_time[d_time==0] = np.nan d_time_min = np.nanmin(d_time.T, 0).T - + use = [] vtg_speed = [] vtg_dir = [] - + for n in range(len(d_time_min)): use.append(np.abs(d_time[n, :]) == d_time_min[n]) - + use = np.array(use) for n in range(len(d_time_min)): - idx = np.where(use[n, :] == True)[0] + idx = np.where(use[n, :])[0] if len(idx) > 0: idx = idx[0] vtg_speed.append(vtg_speed_mps[n, idx]) @@ -644,13 +701,13 @@ class GPSData(object): else: vtg_speed.append(np.nan) vtg_dir.append(np.nan) - + direction = azdeg2rad(np.array(vtg_dir)) vx, vy = pol2cart(direction, np.array(vtg_speed)) self.vtg_velocity_ens_mps = np.vstack([vx, vy]) # Use velocity selected by external algorithm for ensemble velocity - elif v_setting == 'External': + elif v_setting == "External": direction = azdeg2rad(self.ext_vtg_course_deg) vx, vy = pol2cart(direction, self.ext_vtg_speed_mps) self.vtg_velocity_ens_mps = np.vstack([vx.T, vy.T]) @@ -673,50 +730,60 @@ class GPSData(object): lat2 = np.deg2rad(lat_in) lon2 = np.deg2rad(lon_in) - + y = np.tile([np.nan], lat_in.shape) x = np.tile([np.nan], lon_in.shape) - idx = np.where(np.logical_and((np.isnan(lat2) == False), (np.isnan(lon2) == False))) + idx = np.where( + np.logical_and( + (np.logical_not(np.isnan(lat2))), (np.logical_not(np.isnan(lon2))) + ) + ) for ind in idx[0]: y[ind], x[ind], _, _ = utm.from_latlon(lat2[ind], lon2[ind]) x_utm = x.reshape(lon_in.shape) y_utm = y.reshape(lat_in.shape) - + return y_utm, x_utm @staticmethod def gga2_vel_trdi(lat, lon, t, idx_values): - """Computes velocity from gga data using approach from TRDI WinRiver II. + """Computes velocity from gga data using approach from TRDI WinRiver + II. Parameters ---------- lat: np.array(float) - Latitude for each ensemble used for velocity computations, in degrees. + Latitude for each ensemble used for velocity computations, + in degrees. lon: np.array(float) - Longitude for each ensemble used for velocity computations, in degrees. + Longitude for each ensemble used for velocity computations, + in degrees. t: np.array(float) - GGA time associated with the latitude and longitude selected for velocity computations. + GGA time associated with the latitude and longitude selected for + velocity computations. idx_values: np.array(bool) Index of valid lat-lon data. """ - + u = np.zeros(lat.shape) v = np.zeros(lat.shape) - + for n in range(1, len(idx_values)): - lat1 = lat[idx_values[n-1]] + lat1 = lat[idx_values[n - 1]] lat2 = lat[idx_values[n]] - lon1 = lon[idx_values[n-1]] + lon1 = lon[idx_values[n - 1]] lon2 = lon[idx_values[n]] - t1 = t[idx_values[n-1]] + t1 = t[idx_values[n - 1]] t2 = t[idx_values[n]] lat_avg_rad = ((lat1 + lat2) / 2) * np.pi / 180 sin_lat_avg_rad = np.sin(lat_avg_rad) coefficient = 6378137 * np.pi / 180 ellipticity = 1 / 298.257223563 - re = coefficient * (1 + ellipticity * sin_lat_avg_rad ** 2) - rn = coefficient * (1 - 2 * ellipticity + 3 * ellipticity * sin_lat_avg_rad ** 2) + re = coefficient * (1 + ellipticity * sin_lat_avg_rad**2) + rn = coefficient * ( + 1 - 2 * ellipticity + 3 * ellipticity * sin_lat_avg_rad**2 + ) delta_x = re * (lon2 - lon1) * np.cos(lat_avg_rad) delta_y = rn * (lat2 - lat1) delta_time = t2 - t1 @@ -726,6 +793,5 @@ class GPSData(object): else: u[idx_values[n]] = np.nan v[idx_values[n]] = np.nan - - return u, v + return u, v diff --git a/Classes/HeadingData.py b/Classes/HeadingData.py index aa660d5..d9d0849 100644 --- a/Classes/HeadingData.py +++ b/Classes/HeadingData.py @@ -14,13 +14,16 @@ class HeadingData(object): source: str Source of heading data (internal, external). mag_var_deg: float - Magnetic variation applied to get corrected data, in degrees (East +, West -). + Magnetic variation applied to get corrected data, in degrees (East + +, West -). mag_var_orig_deg: float Original magnetic variation, in degrees (East +, West -). align_correction_deg: float - Alignment correction to align compass with instrument (used for external heading), in degrees CW. + Alignment correction to align compass with instrument (used for + external heading), in degrees CW. align_correction_orig_deg: float - Alignment correction to align compass with instrument (used for external heading), in degrees CW. + Alignment correction to align compass with instrument (used for + external heading), in degrees CW. mag_error: np.array(float) Percent change in mean magnetic field from calibration (SonTek only).` pitch_limit: np.array(float) @@ -28,22 +31,31 @@ class HeadingData(object): roll_limit: np.array(float) Roll limit of compass calibration (SonTek only), in degrees. """ - + def __init__(self): """Initialize class and set variables to None.""" - self.data = None # Corrected self.data data - self.original_data = None # original uncorrected self.data data - self.source = None # Source of self.data data (internal, external) - self.mag_var_deg = None # Magnetic variation for these self.data data - self.mag_var_orig_deg = None # Original magnetic variation - self.align_correction_deg = None # Alignment correction to align compass with instrument + self.data = None + self.original_data = None + self.source = None + self.mag_var_deg = None + self.mag_var_orig_deg = None + self.align_correction_deg = None self.align_correction_orig_deg = None - self.mag_error = None # Percent change in mean magnetic field from calibration` - self.pitch_limit = None # Pitch limit of compass calibration (SonTek only), in degrees. - self.roll_limit = None # Roll limit of compass calibration (SonTek only), in degrees. - - def populate_data(self, data_in, source_in, magvar=0, align=0, mag_error=None, pitch_limit=None, roll_limit=None): + self.mag_error = None + self.pitch_limit = None + self.roll_limit = None + + def populate_data( + self, + data_in, + source_in, + magvar=0, + align=0, + mag_error=None, + pitch_limit=None, + roll_limit=None, + ): """Assigns values to instance variables. Parameters @@ -83,7 +95,7 @@ class HeadingData(object): self.roll_limit = roll_limit # Correct the original data for the magvar and alignment - if source_in == 'internal': + if source_in == "internal": self.data = self.original_data + self.mag_var_deg else: self.data = self.original_data + self.align_correction_deg @@ -91,7 +103,8 @@ class HeadingData(object): self.interp_heading() def populate_from_qrev_mat(self, mat_data): - """Populates the object using data from previously saved QRev Matlab file. + """Populates the object using data from previously saved QRev Matlab + file. Parameters ---------- @@ -105,7 +118,7 @@ class HeadingData(object): self.mag_var_deg = float(mat_data.magVar_deg) self.mag_var_orig_deg = float(mat_data.magVarOrig_deg) self.align_correction_deg = mat_data.alignCorrection_deg - if hasattr(mat_data, 'align_correction_orig_deg'): + if hasattr(mat_data, "align_correction_orig_deg"): self.align_correction_orig_deg = mat_data.align_correction_orig_deg else: self.align_correction_orig_deg = mat_data.alignCorrection_deg @@ -145,10 +158,10 @@ class HeadingData(object): """ self.mag_var_deg = mag_var - if h_source == 'internal': + if h_source == "internal": self.data = self.original_data + self.mag_var_deg self.fix_upper_limit() - + def set_align_correction(self, align_correction, h_source): """Applies a new alignment correction to the object data. @@ -161,38 +174,41 @@ class HeadingData(object): """ self.align_correction_deg = align_correction - if h_source == 'external': + if h_source == "external": self.data = self.original_data + self.align_correction_deg self.fix_upper_limit() def fix_upper_limit(self): - """Fixes heading when magvar and or alignment are applied resulting in heading greater than 360 degrees. + """Fixes heading when magvar and or alignment are applied resulting + in heading greater than 360 degrees. """ idx = np.where(self.data > 360)[0] if len(idx) > 0: - self.data[idx] = self.data[idx] - 360 - + self.data[idx] = self.data[idx] - 360 + def interp_heading(self): """Interpolate invalid headings. Use linear interpolation if there are - valid values on either side of the invalid heading. If the invalid heading - occurs at the beginning of the time series, back fill using the 1st valid. - If the invalid heading occurs at the end of the time series, forward fill - with the last valid self.data. + valid values on either side of the invalid heading. If the invalid + heading occurs at the beginning of the time series, back fill using the 1st + valid. If the invalid heading occurs at the end of the time series, forward + fill with the last valid self.data. """ - + idx_invalid = np.where(np.isnan(self.data))[0] - + if len(idx_invalid) > 0: - + first_valid_idx = np.where(np.isnan(self.data) == False)[0][0] last_valid_idx = np.where(np.isnan(self.data) == False)[0][-1] - + # Process each invalid self.data for n in range(len(idx_invalid)): - before_idx = np.where(np.isnan(self.data[0:idx_invalid[n] + 1]) == False)[0] - after_idx = np.where(np.isnan(self.data[idx_invalid[n]:]) == False)[0] - + before_idx = np.where( + np.isnan(self.data[0 : idx_invalid[n] + 1]) == False + )[0] + after_idx = np.where(np.isnan(self.data[idx_invalid[n] :]) == False)[0] + # If invalid self.data is beginning back fill if len(before_idx) < 1: self.data[idx_invalid[n]] = self.data[first_valid_idx] @@ -205,7 +221,7 @@ class HeadingData(object): else: before_idx = before_idx[-1] after_idx = after_idx[0] + idx_invalid[n] - + test1 = self.data[before_idx] > 180 test2 = self.data[after_idx] > 180 c = None @@ -215,9 +231,13 @@ class HeadingData(object): c = 360 elif test2: c = -360 - self.data[idx_invalid[n]] = (((self.data[after_idx] - self.data[before_idx] + c) / - (before_idx - after_idx)) * - (before_idx - idx_invalid[n])) + self.data[before_idx] + self.data[idx_invalid[n]] = ( + ( + (self.data[after_idx] - self.data[before_idx] + c) + / (before_idx - after_idx) + ) + * (before_idx - idx_invalid[n]) + ) + self.data[before_idx] if self.data[idx_invalid[n]] > 360: self.data[idx_invalid[n]] = self.data[idx_invalid[n]] - 360 elif self.data[idx_invalid[n]] < 0: diff --git a/Classes/InstrumentData.py b/Classes/InstrumentData.py index cffa41b..51769fe 100644 --- a/Classes/InstrumentData.py +++ b/Classes/InstrumentData.py @@ -26,21 +26,20 @@ class InstrumentData(object): configuration_commands: Commands used to configure the instrument. """ - + def __init__(self): - """Constructor initializes the variables to None. - """ + """Constructor initializes the variables to None.""" + + self.serial_num = None + self.manufacturer = None + self.model = None + self.firmware = None + self.frequency_khz = None + self.beam_angle_deg = None + self.beam_pattern = None + self.t_matrix = None + self.configuration_commands = np.array([]) - self.serial_num = None # Serial number of ADCP - self.manufacturer = None # manufacturer of ADCP (SonTek, TRDI) - self.model = None # model of ADCP (Rio Grande, StreamPro, RiverRay, M9, S5) - self.firmware = None # firmware version - self.frequency_khz = None # frquency of ADCP (could be "Multi") - self.beam_angle_deg = None # angle of beam from vertical - self.beam_pattern = None # pattern of beams - self.t_matrix = None # object of TransformationMatrix - self.configuration_commands = np.array([]) # configuration commands sent to ADCP - def populate_data(self, manufacturer, raw_data, mmt_transect=None, mmt=None): """Manages method calls for different manufacturers. @@ -57,13 +56,13 @@ class InstrumentData(object): """ # Process based on manufacturer - if manufacturer == 'TRDI': + if manufacturer == "TRDI": self.manufacturer = manufacturer self.trdi(pd0=raw_data, mmt_transect=mmt_transect, mmt=mmt) - elif manufacturer == 'SonTek': + elif manufacturer == "SonTek": self.manufacturer = manufacturer self.sontek(rs=raw_data) - elif manufacturer == 'Nortek': + elif manufacturer == "Nortek": self.manufacturer = manufacturer self.nortek(rs=raw_data) @@ -91,106 +90,148 @@ class InstrumentData(object): self.beam_pattern = pd0.Inst.pat[0] # Instrument characteristics - mmt_site = getattr(mmt, 'site_info') - mmt_config = getattr(mmt_transect, 'active_config') + mmt_site = getattr(mmt, "site_info") + mmt_config = getattr(mmt_transect, "active_config") - self.serial_num = mmt_site['ADCPSerialNmb'] + self.serial_num = mmt_site["ADCPSerialNmb"] # Determine TRDI model num = float(self.firmware) model_switch = np.floor(num) if model_switch == 10: - self.model = 'Rio Grande' - if 'Fixed_Commands' in mmt_config.keys(): - self.configuration_commands = np.append(self.configuration_commands, 'Fixed') - self.configuration_commands = np.append(self.configuration_commands, mmt_config['Fixed_Commands']) + self.model = "Rio Grande" + if "Fixed_Commands" in mmt_config.keys(): + self.configuration_commands = np.append( + self.configuration_commands, "Fixed" + ) + self.configuration_commands = np.append( + self.configuration_commands, mmt_config["Fixed_Commands"] + ) elif model_switch == 31: - self.model = 'StreamPro' + self.model = "StreamPro" self.frequency_khz = 2000 - if 'Fixed_Commands_StreamPro' in mmt_config.keys(): - self.configuration_commands = np.append(self.configuration_commands, 'Fixed') - self.configuration_commands = np.append(self.configuration_commands, - mmt_config['Fixed_Commands_StreamPro']) + if "Fixed_Commands_StreamPro" in mmt_config.keys(): + self.configuration_commands = np.append( + self.configuration_commands, "Fixed" + ) + self.configuration_commands = np.append( + self.configuration_commands, mmt_config["Fixed_Commands_StreamPro"] + ) elif model_switch == 44: - self.model = 'RiverRay' - if 'Fixed_Commands_RiverRay' in mmt_config.keys(): - self.configuration_commands = np.append(self.configuration_commands, 'Fixed') - self.configuration_commands = np.append(self.configuration_commands, - mmt_config['Fixed_Commands_RiverRay']) + self.model = "RiverRay" + if "Fixed_Commands_RiverRay" in mmt_config.keys(): + self.configuration_commands = np.append( + self.configuration_commands, "Fixed" + ) + self.configuration_commands = np.append( + self.configuration_commands, mmt_config["Fixed_Commands_RiverRay"] + ) elif model_switch == 56: - self.model = 'RiverPro' + self.model = "RiverPro" if pd0.Cfg.n_beams[0] < 5: - if 'RG_Test' in mmt.qaqc.keys(): - idx = mmt.qaqc['RG_Test'][0].find('RioPro') + if "RG_Test" in mmt.qaqc.keys(): + idx = mmt.qaqc["RG_Test"][0].find("RioPro") if idx != -1: - self.model = 'RioPro' - - if 'Fixed_Commands_RiverPro' in mmt_config.keys(): - self.configuration_commands = np.append(self.configuration_commands, 'Fixed') - self.configuration_commands = np.append(self.configuration_commands, - mmt_config['Fixed_Commands_RiverPro']) + self.model = "RioPro" + else: + self.model = "RiverPro" + else: + # Assume RioPro based on number of beams + self.model = "RioPro" + + if "Fixed_Commands_RiverPro" in mmt_config.keys(): + self.configuration_commands = np.append( + self.configuration_commands, "Fixed" + ) + self.configuration_commands = np.append( + self.configuration_commands, mmt_config["Fixed_Commands_RiverPro"] + ) else: - self.configuration_commands = np.append(self.configuration_commands, 'Fixed') - self.configuration_commands = np.append(self.configuration_commands, ' ') + self.configuration_commands = np.append( + self.configuration_commands, "Fixed" + ) + self.configuration_commands = np.append( + self.configuration_commands, " " + ) else: - self.model = 'Unknown' - if 'Fixed_Commands' in mmt_config.keys(): - self.configuration_commands = np.append(self.configuration_commands, 'Fixed') - self.configuration_commands = np.append(self.configuration_commands, mmt_config['Fixed_Commands']) - - if 'Wizard_Commands' in mmt_config.keys(): - self.configuration_commands = np.append(self.configuration_commands, ['Wizard']) - self.configuration_commands = np.append(self.configuration_commands, - mmt_config['Wizard_Commands']) - - if 'User_Commands' in mmt_config.keys(): - self.configuration_commands = np.append(self.configuration_commands, ['User']) - self.configuration_commands = np.append(self.configuration_commands, - mmt_config['User_Commands']) + self.model = "Unknown" + if "Fixed_Commands" in mmt_config.keys(): + self.configuration_commands = np.append( + self.configuration_commands, "Fixed" + ) + self.configuration_commands = np.append( + self.configuration_commands, mmt_config["Fixed_Commands"] + ) + + if "Wizard_Commands" in mmt_config.keys(): + self.configuration_commands = np.append( + self.configuration_commands, ["Wizard"] + ) + self.configuration_commands = np.append( + self.configuration_commands, mmt_config["Wizard_Commands"] + ) + + if "User_Commands" in mmt_config.keys(): + self.configuration_commands = np.append( + self.configuration_commands, ["User"] + ) + self.configuration_commands = np.append( + self.configuration_commands, mmt_config["User_Commands"] + ) # Obtain transformation matrix from one of the available sources if not np.isnan(pd0.Inst.t_matrix[0, 0]): self.t_matrix = TransformationMatrix() - self.t_matrix.populate_data(manufacturer='TRDI', model='pd0', data_in=pd0) - elif self.model == 'RiverRay': - self.t_matrix = TransformationMatrix() - self.t_matrix.populate_data(manufacturer='TRDI', model=self.model, data_in='Nominal') + self.t_matrix.populate_data(manufacturer="TRDI", model="pd0", data_in=pd0) + # elif self.model == "RiverRay": + # self.t_matrix = TransformationMatrix() + # self.t_matrix.populate_data( + # manufacturer="TRDI", model=self.model, data_in="Nominal" + # ) else: if isinstance(mmt.qaqc, dict) and len(mmt.qaqc) > 0: - if 'RG_Test' in mmt.qaqc.keys(): + if "RG_Test" in mmt.qaqc.keys(): self.t_matrix = TransformationMatrix() - self.t_matrix.populate_data(manufacturer='TRDI', model=self.model, data_in=mmt.qaqc['RG_Test'][0]) + self.t_matrix.populate_data( + manufacturer="TRDI", + model=self.model, + data_in=mmt.qaqc["RG_Test"][0], + ) - elif 'Compass_Calibration' in mmt.qaqc.keys(): + elif "Compass_Calibration" in mmt.qaqc.keys(): self.t_matrix = TransformationMatrix() - self.t_matrix.populate_data(manufacturer='TRDI', - model=self.model, - data_in=mmt.qaqc['Compass_Calibration'][0]) + self.t_matrix.populate_data( + manufacturer="TRDI", + model=self.model, + data_in=mmt.qaqc["Compass_Calibration"][0], + ) - elif 'Compass_Eval_Timestamp' in mmt.qaqc.keys(): + elif "Compass_Eval_Timestamp" in mmt.qaqc.keys(): self.t_matrix = TransformationMatrix() - self.t_matrix.populate_data(manufacturer='TRDI', - model=self.model, - data_in=mmt.qaqc['Compass_Evaluation'][0]) + self.t_matrix.populate_data( + manufacturer="TRDI", + model=self.model, + data_in=mmt.qaqc["Compass_Evaluation"][0], + ) else: self.t_matrix = TransformationMatrix() - self.t_matrix.populate_data(manufacturer='TRDI', - model=self.model, - data_in='Nominal') + self.t_matrix.populate_data( + manufacturer="TRDI", model=self.model, data_in="Nominal" + ) else: self.t_matrix = TransformationMatrix() - self.t_matrix.populate_data(manufacturer='TRDI', - model=self.model, - data_in='Nominal') + self.t_matrix.populate_data( + manufacturer="TRDI", model=self.model, data_in="Nominal" + ) def sontek(self, rs): """Populates the variables with data from SonTek ADCPs. @@ -203,49 +244,62 @@ class InstrumentData(object): self.serial_num = rs.System.SerialNumber self.frequency_khz = rs.Transformation_Matrices.Frequency if self.frequency_khz[2] > 0: - self.model = 'M9' - elif hasattr(rs.WaterTrack, 'Vel_Expected_StdDev'): - self.model = 'RS5' + self.model = "M9" + elif hasattr(rs.WaterTrack, "Vel_Expected_StdDev"): + self.model = "RS5" else: - self.model = 'S5' - if hasattr(rs, 'SystemHW'): + self.model = "S5" + if hasattr(rs, "SystemHW"): revision = str(rs.SystemHW.FirmwareRevision) if len(revision) < 2: - revision = '0' + revision - self.firmware = str(rs.SystemHW.FirmwareVersion) + '.' + revision + revision = "0" + revision + self.firmware = str(rs.SystemHW.FirmwareVersion) + "." + revision + elif self.model == "RS5": + try: + self.firmware = str(rs.System.FirmwareVersion) + except BaseException: + self.firmware = "" else: - self.firmware = '' + self.firmware = "" self.beam_angle_deg = 25 - self.beam_pattern = 'Convex' + self.beam_pattern = "Convex" self.t_matrix = TransformationMatrix() - self.t_matrix.populate_data('SonTek', data_in=rs.Transformation_Matrices.Matrix) + self.t_matrix.populate_data("SonTek", data_in=rs.Transformation_Matrices.Matrix) self.configuration_commands = None def nortek(self, rs): + """Populates the variables with data from Nortek ADCPs. + + Parameters + ---------- + rs: MatSonTek + """ + self.serial_num = rs.System.SerialNumber self.frequency_khz = rs.Transformation_Matrices.Frequency self.model = rs.System.InstrumentModel - if hasattr(rs, 'SystemHW'): + if hasattr(rs, "SystemHW"): revision = str(rs.SystemHW.FirmwareRevision) if len(revision) < 2: - revision = '0' + revision - self.firmware = str(rs.SystemHW.FirmwareVersion) + '.' + revision + revision = "0" + revision + self.firmware = str(rs.SystemHW.FirmwareVersion) + "." + revision else: - self.firmware = '' + self.firmware = "" self.beam_angle_deg = 25 - self.beam_pattern = 'Convex' + self.beam_pattern = "Convex" self.t_matrix = TransformationMatrix() - self.t_matrix.populate_data('SonTek', data_in=rs.Transformation_Matrices.Matrix) + self.t_matrix.populate_data("SonTek", data_in=rs.Transformation_Matrices.Matrix) self.configuration_commands = None def populate_from_qrev_mat(self, transect): - """Populates the object using data from previously saved QRev Matlab file. + """Populates the object using data from previously saved QRev Matlab + file. - Parameters - ---------- - transect: mat_struct - Matlab data structure obtained from sio.loadmat - """ + Parameters + ---------- + transect: mat_struct + Matlab data structure obtained from sio.loadmat + """ self.serial_num = str(transect.adcp.serialNum) self.manufacturer = transect.adcp.manufacturer diff --git a/Classes/LoadMeasurements.py b/Classes/LoadMeasurements.py index f122722..c6cfaab 100644 --- a/Classes/LoadMeasurements.py +++ b/Classes/LoadMeasurements.py @@ -191,14 +191,14 @@ class LoadMeasurements(QThread): if 'start_time' in self.settings.keys() or 'end_time' in self.settings.keys(): if 'start_time' in self.settings.keys(): id_transect = meas.checked_transect_idx[0] - detla_t = self.settings['start_time'] - meas.transects[id_transect].date_time.start_serial_time + delta_t = self.settings['start_time'] - meas.transects[id_transect].date_time.start_serial_time else: id_transect = meas.checked_transect_idx[-1] delta_t = self.settings['end_time'] - meas.transects[id_transect].date_time.end_serial_time for id_transect in meas.checked_transect_idx: - meas.transects[id_transect].date_time.start_serial_time += detla_t - meas.transects[id_transect].date_time.end_serial_time += detla_t + meas.transects[id_transect].date_time.start_serial_time += delta_t + meas.transects[id_transect].date_time.end_serial_time += delta_t if 'change_nav_ref' in self.settings.keys(): # Change navigation reference @@ -296,6 +296,7 @@ class LoadMeasurements(QThread): i = 0 for key in measurements_dict: + print(key) meas = measurements_dict[key] oursin = meas.oursin n_transects = len(meas.transects) diff --git a/Classes/MMT_TRDI.py b/Classes/MMT_TRDI.py index 10b9676..23adaf1 100644 --- a/Classes/MMT_TRDI.py +++ b/Classes/MMT_TRDI.py @@ -14,13 +14,16 @@ class MMTtrdi(object): site_info: dict Dictionary of site information transects: list - List of Transect objects containing information for each discharge transect + List of Transect objects containing information for each discharge + transect summary: dict - Dictionary of measurement summary for each available boat velocity reference + Dictionary of measurement summary for each available boat velocity + reference qaqc: dict Dictionary of premeasurement tests, calibrations, and evaluations mbt_transects: list - List of Transect objects containing information for each moving-bed test transect + List of Transect objects containing information for each moving-bed + test transect path: str Path for mmt file and associated files """ @@ -56,87 +59,96 @@ class MMTtrdi(object): """ # Open the file and convert to an ordered dictionary tree - with open(mmt_file, 'r', encoding='utf-8') as fd: + with open(mmt_file, "r", encoding="utf-8") as fd: xml_data = fd.read() - clean_xml_data = '' - remove_re = re.compile(u'[\x00-\x08\x0B-\x0C\x0E-\x1F\x7F%]') + clean_xml_data = "" + remove_re = re.compile("[\x00-\x08\x0B-\x0C\x0E-\x1F\x7F%]") for line in xml_data: - new_line, count = remove_re.subn('', line) + new_line, count = remove_re.subn("", line) clean_xml_data = clean_xml_data + new_line win_river = xmltodict.parse(clean_xml_data) # UnicodeDecodeError - win_river = win_river['WinRiver'] + win_river = win_river["WinRiver"] self.path = os.path.split(mmt_file)[0] # Process project settings - self.project['Name'] = win_river['Project']['@Name'] - self.project['Version'] = win_river['Project']['@Version'] - if 'Locked' in win_river['Project'].keys(): - self.project['Locked'] = win_river['Project']['Locked'] + self.project["Name"] = win_river["Project"]["@Name"] + self.project["Version"] = win_river["Project"]["@Version"] + if "Locked" in win_river["Project"].keys(): + self.project["Locked"] = win_river["Project"]["Locked"] else: - self.project['Locked'] = None + self.project["Locked"] = None # Process site information - siteinfo_keys = win_river['Project']['Site_Information'].keys() + siteinfo_keys = win_river["Project"]["Site_Information"].keys() # Iterate through all of the keys and values of site info for x in siteinfo_keys: - site_data = win_river['Project']['Site_Information'][x] + site_data = win_river["Project"]["Site_Information"][x] if site_data is not None: # Remove @ symbol from properties - if '@' in x: + if "@" in x: x = x[1:] - if x == 'Water_Temperature': + if x == "Water_Temperature": self.site_info[x] = float(site_data) # -32768 used to denote no data if self.site_info[x] < -100: - self.site_info[x] = '' + self.site_info[x] = "" else: self.site_info[x] = site_data else: - self.site_info[x] = '' - if 'Transect' in win_river['Project']['Site_Discharge'].keys(): - trans = win_river['Project']['Site_Discharge']['Transect'] + self.site_info[x] = "" + if "Transect" in win_river["Project"]["Site_Discharge"].keys(): + trans = win_river["Project"]["Site_Discharge"]["Transect"] - # Create a Transect class for each transect found under Site_Discharge + # Create a Transect class for each transect found under + # Site_Discharge if type(trans) == list: for i in range(len(trans)): - if 'File' in trans[i]: + if "File" in trans[i]: self.transects.append(MMTtransect(trans[i])) else: self.transects = [MMTtransect(trans)] # Discharge Summary - if 'Discharge_Summary' in win_river['Project']['Site_Discharge'].keys(): - discharge_summary = win_river['Project']['Site_Discharge']['Discharge_Summary'] + if "Discharge_Summary" in win_river["Project"]["Site_Discharge"].keys(): + discharge_summary = win_river["Project"]["Site_Discharge"][ + "Discharge_Summary" + ] - self.summary['NONE'] = self.mmtqsum(discharge_summary['None']) - self.summary['BT'] = self.mmtqsum(discharge_summary['BottomTrack']) - self.summary['GGA'] = self.mmtqsum(discharge_summary['GGA']) - self.summary['VTG'] = self.mmtqsum(discharge_summary['VTG']) + self.summary["NONE"] = self.mmtqsum(discharge_summary["None"]) + self.summary["BT"] = self.mmtqsum(discharge_summary["BottomTrack"]) + self.summary["GGA"] = self.mmtqsum(discharge_summary["GGA"]) + self.summary["VTG"] = self.mmtqsum(discharge_summary["VTG"]) # QA_QC - if 'QA_QC' in win_river['Project'].keys(): - qaqc = win_river['Project']['QA_QC'] + if "QA_QC" in win_river["Project"].keys(): + qaqc = win_river["Project"]["QA_QC"] for qaqc_type, data in qaqc.items(): - # Parse qaqc data from dictionary if the type is a test, cal, or eval - if qaqc_type in ['RG_Test', 'Compass_Calibration', 'Compass_Evaluation']: - # There could be multiple tests of the same type so they are stored in a list - time_stamp = qaqc_type + '_TimeStamp' - if not isinstance(data['TestResult'], list): - self.qaqc[qaqc_type] = [data['TestResult']['Text']] - self.qaqc[time_stamp] = [data['TestResult']['TimeStamp']] + # Parse qaqc data from dictionary if the type is a test, + # cal, or eval + if qaqc_type in [ + "RG_Test", + "Compass_Calibration", + "Compass_Evaluation", + ]: + # There could be multiple tests of the same type so they + # are stored in a list + time_stamp = qaqc_type + "_TimeStamp" + if not isinstance(data["TestResult"], list): + self.qaqc[qaqc_type] = [data["TestResult"]["Text"]] + self.qaqc[time_stamp] = [data["TestResult"]["TimeStamp"]] else: self.qaqc[qaqc_type] = [] self.qaqc[time_stamp] = [] - for result in data['TestResult']: - self.qaqc[qaqc_type].append(result['Text']) - self.qaqc[time_stamp].append(result['TimeStamp']) + for result in data["TestResult"]: + self.qaqc[qaqc_type].append(result["Text"]) + self.qaqc[time_stamp].append(result["TimeStamp"]) - if qaqc_type == 'Moving_Bed_Test': - if 'Transect' in data.keys(): + if qaqc_type == "Moving_Bed_Test": + if "Transect" in data.keys(): self.moving_bed_test(data) def moving_bed_test(self, mb_data): @@ -148,7 +160,7 @@ class MMTtrdi(object): Dictionary containing moving-bed test information """ - transects = mb_data['Transect'] + transects = mb_data["Transect"] # If only one transect make it a list if not isinstance(transects, list): @@ -159,22 +171,22 @@ class MMTtrdi(object): transect = MMTtransect(tsect) # Determine type of moving-bed test - if '@MBTType' in tsect: - if tsect['@MBTType'] == '0': - transect.moving_bed_type = 'Loop' - elif tsect['@MBTType'] == '1': - transect.moving_bed_type = 'Stationary' + if "@MBTType" in tsect: + if tsect["@MBTType"] == "0": + transect.moving_bed_type = "Loop" + elif tsect["@MBTType"] == "1": + transect.moving_bed_type = "Stationary" else: # Use the file name to determine the moving-bed test type file_name = transect.Files[0] - fidx = file_name.rfind('.') - if file_name[fidx-3:fidx] == 'SBT': - transect.moving_bed_type = 'Stationary' - elif file_name[fidx-3:fidx] == 'LBT': - transect.moving_bed_type = 'Loop' + fidx = file_name.rfind(".") + if file_name[fidx - 3 : fidx] == "SBT": + transect.moving_bed_type = "Stationary" + elif file_name[fidx - 3 : fidx] == "LBT": + transect.moving_bed_type = "Loop" else: # If type can't be determined process as stationary - transect.moving_bed_type = 'Stationary' + transect.moving_bed_type = "Stationary" self.mbt_transects.append(transect) @@ -192,32 +204,33 @@ class MMTtrdi(object): sum_dict: dict Dictionary of summary with a couple of key names changed. """ - + sum_dict = { - 'Use': [], - 'Begin_Left': [], - 'FileName': [], - 'LeftEdgeSlopeCoeff': [], - 'RightEdgeSlopeCoeff': [] - } + "Use": [], + "Begin_Left": [], + "FileName": [], + "LeftEdgeSlopeCoeff": [], + "RightEdgeSlopeCoeff": [], + } # Iterate through each transect for transect in data.values(): # Iterate through each key and val in the transect summary for key2, val2 in transect.items(): # Append value from transect to appropriate key - if key2 == 'UseInSummary': - sum_dict['Use'].append(float(val2)) + if key2 == "UseInSummary": + sum_dict["Use"].append(float(val2)) elif key2 == "BeginLeft": - sum_dict['Begin_Left'].append(float(val2)) - elif key2 == 'FileName': - sum_dict['FileName'].append(val2) - elif key2 == 'LeftEdgeSlopeCoeff': - sum_dict['LeftEdgeSlopeCoeff'].append(float(val2)) - elif key2 == 'RightEdgeSlopeCoeff': - sum_dict['RightEdgeSlopeCoeff'].append(float(val2)) + sum_dict["Begin_Left"].append(float(val2)) + elif key2 == "FileName": + sum_dict["FileName"].append(val2) + elif key2 == "LeftEdgeSlopeCoeff": + sum_dict["LeftEdgeSlopeCoeff"].append(float(val2)) + elif key2 == "RightEdgeSlopeCoeff": + sum_dict["RightEdgeSlopeCoeff"].append(float(val2)) else: - # If the key has not been specified use key from transect summary + # If the key has not been specified use key from + # transect summary if key2 not in sum_dict: sum_dict[key2] = [] try: @@ -240,49 +253,57 @@ class MMTtransect(object): def __init__(self, trans): """Constructor immediately begins extraction of data""" - self.Checked = int(trans['@Checked']) + self.Checked = int(trans["@Checked"]) self.Files = [] self.Notes = [] self.field_config = None self.active_config = None self.moving_bed_type = None - files = trans['File'] + files = trans["File"] # Create File classes for each file associated with transect if type(files) is list: for file in files: - self.Files.append(file['#text']) + self.Files.append(file["#text"]) else: - self.Files.append(files['#text']) + self.Files.append(files["#text"]) # Create Note classes for each file associated with transect - if 'Note' in trans.keys(): - note = trans['Note'] + if "Note" in trans.keys(): + note = trans["Note"] if type(note) is list: for n in note: - if type(trans['File']) is list: - self.Notes.append(self.note_dict(n, trans['File'][0]['@TransectNmb'])) + if type(trans["File"]) is list: + self.Notes.append( + self.note_dict(n, trans["File"][0]["@TransectNmb"]) + ) else: - self.Notes.append(self.note_dict(n, trans['File']['@TransectNmb'])) + self.Notes.append( + self.note_dict(n, trans["File"]["@TransectNmb"]) + ) else: - if type(trans['File']) is list: - self.Notes.append(self.note_dict(note, trans['File'][0]['@TransectNmb'])) + if type(trans["File"]) is list: + self.Notes.append( + self.note_dict(note, trans["File"][0]["@TransectNmb"]) + ) else: - self.Notes.append(self.note_dict(note, trans['File']['@TransectNmb'])) + self.Notes.append( + self.note_dict(note, trans["File"]["@TransectNmb"]) + ) # Create configuration dictionaries for each config attribute - if type(trans['Configuration']) is list: - for config in trans['Configuration']: - if int(config['@Checked']) == 0: + if type(trans["Configuration"]) is list: + for config in trans["Configuration"]: + if int(config["@Checked"]) == 0: self.field_config = self.parse_config(config) - if int(config['@Checked']) == 1: + if int(config["@Checked"]) == 1: self.active_config = self.parse_config(config) else: - if int(trans['Configuration']['@Checked']) == 0: - self.field_config = self.parse_config(trans['Configuration']) - if int(trans['Configuration']['@Checked']) == 1: - self.active_config = self.parse_config(trans['Configuration']) + if int(trans["Configuration"]["@Checked"]) == 0: + self.field_config = self.parse_config(trans["Configuration"]) + if int(trans["Configuration"]["@Checked"]) == 1: + self.active_config = self.parse_config(trans["Configuration"]) # Assign active config to field config if there is no field config if self.field_config is None: @@ -318,173 +339,265 @@ class MMTtransect(object): config_dict = {} # Store all instrument commands - command_groups = config['Commands'] + command_groups = config["Commands"] for group in command_groups.keys(): config_dict[group] = [] for key, command in command_groups[group].items(): - if key != '@Status': + if key != "@Status": config_dict[group].append(command) # Depth sounder configuration - if 'Use_Depth_Sounder_In_Processing' in config['Depth_Sounder'].keys(): - if config['Depth_Sounder']['Use_Depth_Sounder_In_Processing']['#text'] == "YES": - config_dict['DS_Use_Process'] = 1 + if "Use_Depth_Sounder_In_Processing" in config["Depth_Sounder"].keys(): + if ( + config["Depth_Sounder"]["Use_Depth_Sounder_In_Processing"]["#text"] + == "YES" + ): + config_dict["DS_Use_Process"] = 1 else: - config_dict['DS_Use_Process'] = 0 + config_dict["DS_Use_Process"] = 0 else: - config_dict['DS_Use_Process'] = -1 - - config_dict['DS_Transducer_Depth'] = float(config['Depth_Sounder']['Depth_Sounder_Transducer_Depth']['#text']) - config_dict['DS_Transducer_Offset'] = float(config['Depth_Sounder']['Depth_Sounder_Transducer_Offset']['#text']) - - if config['Depth_Sounder']['Depth_Sounder_Correct_Speed_of_Sound']['#text'] == 'YES': - config_dict['DS_Cor_Spd_Sound'] = 1 + config_dict["DS_Use_Process"] = -1 + + config_dict["DS_Transducer_Depth"] = float( + config["Depth_Sounder"]["Depth_Sounder_Transducer_Depth"]["#text"] + ) + config_dict["DS_Transducer_Offset"] = float( + config["Depth_Sounder"]["Depth_Sounder_Transducer_Offset"]["#text"] + ) + + if ( + config["Depth_Sounder"]["Depth_Sounder_Correct_Speed_of_Sound"]["#text"] + == "YES" + ): + config_dict["DS_Cor_Spd_Sound"] = 1 else: - config_dict['DS_Cor_Spd_Sound'] = 0 + config_dict["DS_Cor_Spd_Sound"] = 0 - config_dict['DS_Scale_Factor'] = float(config['Depth_Sounder']['Depth_Sounder_Scale_Factor']['#text']) + config_dict["DS_Scale_Factor"] = float( + config["Depth_Sounder"]["Depth_Sounder_Scale_Factor"]["#text"] + ) # External heading configuration - config_dict['Ext_Heading_Offset'] = float(config['Ext_Heading']['Offset']['#text']) + config_dict["Ext_Heading_Offset"] = float( + config["Ext_Heading"]["Offset"]["#text"] + ) - if 'Use_Ext_Heading' in config['Ext_Heading'].keys(): - if config['Ext_Heading']['Use_Ext_Heading']['#text'] == 'NO': - config_dict['Ext_Heading_Use'] = False + if "Use_Ext_Heading" in config["Ext_Heading"].keys(): + if config["Ext_Heading"]["Use_Ext_Heading"]["#text"] == "NO": + config_dict["Ext_Heading_Use"] = False else: - config_dict['Ext_Heading_Use'] = True + config_dict["Ext_Heading_Use"] = True else: - config_dict['Ext_Heading_Use'] = False + config_dict["Ext_Heading_Use"] = False # GPS configuration - if 'GPS' in config.keys(): - config_dict['GPS_Time_Delay'] = config['GPS']['Time_Delay']['#text'] + if "GPS" in config.keys(): + config_dict["GPS_Time_Delay"] = config["GPS"]["Time_Delay"]["#text"] # Discharge settings - config_dict['Q_Top_Method'] = float(config['Discharge']['Top_Discharge_Estimate']['#text']) - config_dict['Q_Bottom_Method'] = float(config['Discharge']['Bottom_Discharge_Estimate']['#text']) - config_dict['Q_Power_Curve_Coeff'] = float(config['Discharge']['Power_Curve_Coef']['#text']) - config_dict['Q_Cut_Top_Bins'] = float(config['Discharge']['Cut_Top_Bins']['#text']) - config_dict['Q_Bins_Above_Sidelobe'] = float(config['Discharge']['Cut_Bins_Above_Sidelobe']['#text']) - config_dict['Q_Left_Edge_Type'] = float(config['Discharge']['River_Left_Edge_Type']['#text']) - config_dict['Q_Left_Edge_Coeff'] = float(config['Discharge']['Left_Edge_Slope_Coeff']['#text']) - config_dict['Q_Right_Edge_Type'] = float(config['Discharge']['River_Right_Edge_Type']['#text']) - config_dict['Q_Right_Edge_Coeff'] = float(config['Discharge']['Right_Edge_Slope_Coeff']['#text']) - config_dict['Q_Shore_Pings_Avg'] = float(config['Discharge']['Shore_Pings_Avg']['#text']) + config_dict["Q_Top_Method"] = float( + config["Discharge"]["Top_Discharge_Estimate"]["#text"] + ) + config_dict["Q_Bottom_Method"] = float( + config["Discharge"]["Bottom_Discharge_Estimate"]["#text"] + ) + config_dict["Q_Power_Curve_Coeff"] = float( + config["Discharge"]["Power_Curve_Coef"]["#text"] + ) + config_dict["Q_Cut_Top_Bins"] = float( + config["Discharge"]["Cut_Top_Bins"]["#text"] + ) + config_dict["Q_Bins_Above_Sidelobe"] = float( + config["Discharge"]["Cut_Bins_Above_Sidelobe"]["#text"] + ) + config_dict["Q_Left_Edge_Type"] = float( + config["Discharge"]["River_Left_Edge_Type"]["#text"] + ) + config_dict["Q_Left_Edge_Coeff"] = float( + config["Discharge"]["Left_Edge_Slope_Coeff"]["#text"] + ) + config_dict["Q_Right_Edge_Type"] = float( + config["Discharge"]["River_Right_Edge_Type"]["#text"] + ) + config_dict["Q_Right_Edge_Coeff"] = float( + config["Discharge"]["Right_Edge_Slope_Coeff"]["#text"] + ) + config_dict["Q_Shore_Pings_Avg"] = float( + config["Discharge"]["Shore_Pings_Avg"]["#text"] + ) # Edge estimate settings - config_dict['Edge_Begin_Shore_Distance'] = config['Edge_Estimates']['Begin_Shore_Distance']['#text'] - config_dict['Edge_End_Shore_Distance'] = float(config['Edge_Estimates']['End_Shore_Distance']['#text']) - if config['Edge_Estimates']['Begin_Left_Bank']['#text'] == 'YES': - config_dict['Edge_Begin_Left_Bank'] = 1 + config_dict["Edge_Begin_Shore_Distance"] = config["Edge_Estimates"][ + "Begin_Shore_Distance" + ]["#text"] + config_dict["Edge_End_Shore_Distance"] = float( + config["Edge_Estimates"]["End_Shore_Distance"]["#text"] + ) + if config["Edge_Estimates"]["Begin_Left_Bank"]["#text"] == "YES": + config_dict["Edge_Begin_Left_Bank"] = 1 else: - config_dict['Edge_Begin_Left_Bank'] = 0 + config_dict["Edge_Begin_Left_Bank"] = 0 # Check for user discharge feature in mmt file - if 'Begin_Manual_Discharge' in config['Edge_Estimates']: - config_dict['Edge_Begin_Manual_Discharge'] = float(config['Edge_Estimates']['Begin_Manual_Discharge']['#text']) - config_dict['Edge_Begin_Method_Distance'] = \ - config['Edge_Estimates']['Begin_Edge_Discharge_Method_Distance']['#text'] - config_dict['Edge_End_Manual_Discharge'] = float(config['Edge_Estimates']['End_Manual_Discharge']['#text']) - config_dict['Edge_End_Method_Distance'] = \ - config['Edge_Estimates']['End_Edge_Discharge_Method_Distance']['#text'] + if "Begin_Manual_Discharge" in config["Edge_Estimates"]: + config_dict["Edge_Begin_Manual_Discharge"] = float( + config["Edge_Estimates"]["Begin_Manual_Discharge"]["#text"] + ) + config_dict["Edge_Begin_Method_Distance"] = config["Edge_Estimates"][ + "Begin_Edge_Discharge_Method_Distance" + ]["#text"] + config_dict["Edge_End_Manual_Discharge"] = float( + config["Edge_Estimates"]["End_Manual_Discharge"]["#text"] + ) + config_dict["Edge_End_Method_Distance"] = config["Edge_Estimates"][ + "End_Edge_Discharge_Method_Distance" + ]["#text"] # Offsets - for key in config['Offsets'].keys(): - if key == 'ADCP_Transducer_Depth': + for key in config["Offsets"].keys(): + if key == "ADCP_Transducer_Depth": child = "Offsets_Transducer_Depth" else: child = "Offsets_" + key - config_dict[child] = float(config['Offsets'][key]['#text']) + config_dict[child] = float(config["Offsets"][key]["#text"]) # Processing settings - for key in config['Processing'].keys(): - if key == 'Use_3_Beam_Solution_For_BT': - child = 'Proc_Use_3_Beam_BT' - elif key == 'Use_3_Beam_Solution_For_WT': - child = 'Proc_Use_3_Beam_WT' - elif key == 'BT_Error_Velocity_Threshold': - child = 'Proc_BT_Error_Vel_Threshold' - elif key == 'WT_Error_Velocity_Threshold': - child = 'Proc_WT_Error_Velocity_Threshold' - elif key == 'BT_Up_Velocity_Threshold': - child = 'Proc_BT_Up_Vel_Threshold' - elif key == 'WT_Up_Velocity_Threshold': - child = 'Proc_WT_Up_Vel_Threshold' - elif key == 'Fixed_Speed_Of_Sound': - child = 'Proc_Fixed_Speed_Of_Sound' - elif key == 'Mark_Below_Bottom_Bad': - child = 'Proc_Mark_Below_Bottom_Bad' - elif key == 'Use_Weighted_Mean': - child = 'Proc_Use_Weighted_Mean' - elif key == 'Absorption': - child = 'Proc_Absorption' + for key in config["Processing"].keys(): + if key == "Use_3_Beam_Solution_For_BT": + child = "Proc_Use_3_Beam_BT" + elif key == "Use_3_Beam_Solution_For_WT": + child = "Proc_Use_3_Beam_WT" + elif key == "BT_Error_Velocity_Threshold": + child = "Proc_BT_Error_Vel_Threshold" + elif key == "WT_Error_Velocity_Threshold": + child = "Proc_WT_Error_Velocity_Threshold" + elif key == "BT_Up_Velocity_Threshold": + child = "Proc_BT_Up_Vel_Threshold" + elif key == "WT_Up_Velocity_Threshold": + child = "Proc_WT_Up_Vel_Threshold" + elif key == "Fixed_Speed_Of_Sound": + child = "Proc_Fixed_Speed_Of_Sound" + elif key == "Mark_Below_Bottom_Bad": + child = "Proc_Mark_Below_Bottom_Bad" + elif key == "Use_Weighted_Mean": + child = "Proc_Use_Weighted_Mean" + elif key == "Absorption": + child = "Proc_Absorption" else: - child = 'Proc_' + key + child = "Proc_" + key - # Try to cast to float otherwise assign 1 or 0 based on string value + # Try to cast to float otherwise assign 1 or 0 based on string + # value try: - config_dict[child] = float(config['Processing'][key]['#text']) + config_dict[child] = float(config["Processing"][key]["#text"]) except ValueError: - if config['Processing'][key]['#text'] == 'YES': + if config["Processing"][key]["#text"] == "YES": config_dict[child] = 1 else: config_dict[child] = 0 # Recording - config_dict['Rec_Filename_Prefix'] = config['Recording']['Filename_Prefix']['#text'] - config_dict['Rec_Output_Directory'] = config['Recording']['Output_Directory']['#text'] - - if 'Root_Directory' in config['Recording'].keys(): - if '#text' in config['Recording']['Root_Directory']: - config_dict['Rec_Root_Directory'] = config['Recording']['Root_Directory']['#text'] + config_dict["Rec_Filename_Prefix"] = config["Recording"]["Filename_Prefix"][ + "#text" + ] + config_dict["Rec_Output_Directory"] = config["Recording"][ + "Output_Directory" + ]["#text"] + + if "Root_Directory" in config["Recording"].keys(): + if "#text" in config["Recording"]["Root_Directory"]: + config_dict["Rec_Root_Directory"] = config["Recording"][ + "Root_Directory" + ]["#text"] else: - config_dict['Rec_Root_Directory'] = None + config_dict["Rec_Root_Directory"] = None else: - config_dict['Rec_Root_Directory'] = None + config_dict["Rec_Root_Directory"] = None - if config['Recording']['MeasurmentNmb'] is None: - config_dict['Rec_MeasNmb'] = config['Recording']['MeasurmentNmb'] + if config["Recording"]["MeasurmentNmb"] is None: + config_dict["Rec_MeasNmb"] = config["Recording"]["MeasurmentNmb"] else: - config_dict['Rec_MeasNmb'] = config['Recording']['MeasurmentNmb'] - config_dict['Rec_GPS'] = config['Recording']['GPS_Recording']['#text'] - config_dict['Rec_DS'] = config['Recording']['DS_Recording']['#text'] - config_dict['Rec_EH'] = config['Recording']['EH_Recording']['#text'] - config_dict['Rec_ASCII_Output'] = config['Recording']['ASCII_Output_Recording']['#text'] - config_dict['Rec_Max_File_Size'] = float(config['Recording']['Maximum_File_Size']['#text']) - config_dict['Rec_Next_Transect_Number'] = float(config['Recording']['Next_Transect_Number']['#text']) - config_dict['Rec_Add_Date_Time'] = float(config['Recording']['Add_Date_Time']['#text']) - config_dict['Rec_Use_Delimiter'] = config['Recording']['Use_Delimiter']['#text'] - config_dict['Rec_Delimiter'] = config['Recording']['Custom_Delimiter']['#text'] - config_dict['Rec_Prefix'] = config['Recording']['Use_Prefix']['#text'] - config_dict['Rec_Use_MeasNmb'] = config['Recording']['Use_MeasurementNmb']['#text'] - config_dict['Rec_Use_TransectNmb'] = config['Recording']['Use_TransectNmb']['#text'] - config_dict['Rec_Use_SequenceNmb'] = config['Recording']['Use_SequenceNmb']['#text'] + config_dict["Rec_MeasNmb"] = config["Recording"]["MeasurmentNmb"] + config_dict["Rec_GPS"] = config["Recording"]["GPS_Recording"]["#text"] + config_dict["Rec_DS"] = config["Recording"]["DS_Recording"]["#text"] + config_dict["Rec_EH"] = config["Recording"]["EH_Recording"]["#text"] + config_dict["Rec_ASCII_Output"] = config["Recording"][ + "ASCII_Output_Recording" + ]["#text"] + config_dict["Rec_Max_File_Size"] = float( + config["Recording"]["Maximum_File_Size"]["#text"] + ) + config_dict["Rec_Next_Transect_Number"] = float( + config["Recording"]["Next_Transect_Number"]["#text"] + ) + config_dict["Rec_Add_Date_Time"] = float( + config["Recording"]["Add_Date_Time"]["#text"] + ) + config_dict["Rec_Use_Delimiter"] = config["Recording"]["Use_Delimiter"][ + "#text" + ] + config_dict["Rec_Delimiter"] = config["Recording"]["Custom_Delimiter"][ + "#text" + ] + config_dict["Rec_Prefix"] = config["Recording"]["Use_Prefix"]["#text"] + config_dict["Rec_Use_MeasNmb"] = config["Recording"]["Use_MeasurementNmb"][ + "#text" + ] + config_dict["Rec_Use_TransectNmb"] = config["Recording"]["Use_TransectNmb"][ + "#text" + ] + config_dict["Rec_Use_SequenceNmb"] = config["Recording"]["Use_SequenceNmb"][ + "#text" + ] # Wizard settings - config_dict['Wiz_ADCP_Type'] = float(config['Wizard_Info']['ADCP_Type']) - config_dict['Wiz_Firmware'] = float(config['Wizard_Info']['ADCP_FW_Version']) - config_dict['Wiz_Use_Ext_Heading'] = config['Wizard_Info']['Use_Ext_Heading'] - config_dict['Wiz_Use_GPS'] = config['Wizard_Info']['Use_GPS'] - config_dict['Wiz_Use_DS'] = config['Wizard_Info']['Use_Depth_Sounder'] - config_dict['Wiz_Max_Water_Depth'] = float(config['Wizard_Info']['Max_Water_Depth']) - config_dict['Wiz_Max_Water_Speed'] = float(config['Wizard_Info']['Max_Water_Speed']) - config_dict['Wiz_Max_Boat_Space'] = float(config['Wizard_Info']['Max_Boat_Speed']) - config_dict['Wiz_Material'] = float(config['Wizard_Info']['Material']) - config_dict['Wiz_Water_Mode'] = float(config['Wizard_Info']['Water_Mode']) - config_dict['Wiz_Bottom_Mode'] = float(config['Wizard_Info']['Bottom_Mode']) - config_dict['Wiz_Beam_Angle'] = float(config['Wizard_Info']['Beam_Angle']) - config_dict['Wiz_Pressure_Sensor'] = config['Wizard_Info']['Pressure_Sensor'] - config_dict['Wiz_Water_Mode_13'] = float(config['Wizard_Info']['Water_Mode_13_Avail']) - config_dict['Wiz_StreamPro_Default'] = float(config['Wizard_Info']['Use_StreamPro_Def_Cfg']) - config_dict['Wiz_StreamPro_Bin_Size'] = float(config['Wizard_Info']['StreamPro_Bin_Size']) - config_dict['Wiz_StreamPro_Bin_Number'] = float(config['Wizard_Info']['StreamPro_Bin_Num']) - - if 'Use_GPS_Internal' in config['Wizard_Info'].keys(): - config_dict['Wiz_Use_GPS_Internal'] = config['Wizard_Info']['Use_GPS_Internal'] - if 'Internal_GPS_Baud_Rate_Index' in config['Wizard_Info'].keys(): - config_dict['Wiz_Internal_GPS_Baud_Rate_Index'] = float(config['Wizard_Info'] - ['Internal_GPS_Baud_Rate_Index']) + config_dict["Wiz_ADCP_Type"] = float(config["Wizard_Info"]["ADCP_Type"]) + config_dict["Wiz_Firmware"] = float( + config["Wizard_Info"]["ADCP_FW_Version"] + ) + config_dict["Wiz_Use_Ext_Heading"] = config["Wizard_Info"][ + "Use_Ext_Heading" + ] + config_dict["Wiz_Use_GPS"] = config["Wizard_Info"]["Use_GPS"] + config_dict["Wiz_Use_DS"] = config["Wizard_Info"]["Use_Depth_Sounder"] + config_dict["Wiz_Max_Water_Depth"] = float( + config["Wizard_Info"]["Max_Water_Depth"] + ) + config_dict["Wiz_Max_Water_Speed"] = float( + config["Wizard_Info"]["Max_Water_Speed"] + ) + config_dict["Wiz_Max_Boat_Space"] = float( + config["Wizard_Info"]["Max_Boat_Speed"] + ) + config_dict["Wiz_Material"] = float(config["Wizard_Info"]["Material"]) + config_dict["Wiz_Water_Mode"] = float(config["Wizard_Info"]["Water_Mode"]) + config_dict["Wiz_Bottom_Mode"] = float(config["Wizard_Info"]["Bottom_Mode"]) + config_dict["Wiz_Beam_Angle"] = float(config["Wizard_Info"]["Beam_Angle"]) + config_dict["Wiz_Pressure_Sensor"] = config["Wizard_Info"][ + "Pressure_Sensor" + ] + config_dict["Wiz_Water_Mode_13"] = float( + config["Wizard_Info"]["Water_Mode_13_Avail"] + ) + config_dict["Wiz_StreamPro_Default"] = float( + config["Wizard_Info"]["Use_StreamPro_Def_Cfg"] + ) + config_dict["Wiz_StreamPro_Bin_Size"] = float( + config["Wizard_Info"]["StreamPro_Bin_Size"] + ) + config_dict["Wiz_StreamPro_Bin_Number"] = float( + config["Wizard_Info"]["StreamPro_Bin_Num"] + ) + + if "Use_GPS_Internal" in config["Wizard_Info"].keys(): + config_dict["Wiz_Use_GPS_Internal"] = config["Wizard_Info"][ + "Use_GPS_Internal" + ] + if "Internal_GPS_Baud_Rate_Index" in config["Wizard_Info"].keys(): + config_dict["Wiz_Internal_GPS_Baud_Rate_Index"] = float( + config["Wizard_Info"]["Internal_GPS_Baud_Rate_Index"] + ) return config_dict @@ -509,7 +622,11 @@ class MMTtransect(object): Transect number assigned in WinRiver 2 """ - transect_file = {'Path': file['@PathName'], 'File': file['#text'], 'Number': file['@TransectNmb']} + transect_file = { + "Path": file["@PathName"], + "File": file["#text"], + "Number": file["@TransectNmb"], + } return transect_file @staticmethod @@ -535,5 +652,9 @@ class MMTtransect(object): Text of note """ - note_dict_out = {'NoteFileNo': number, 'NoteDate': note['@TimeStamp'], 'NoteText': note['@Text']} + note_dict_out = { + "NoteFileNo": number, + "NoteDate": note["@TimeStamp"], + "NoteText": note["@Text"], + } return note_dict_out diff --git a/Classes/MatSonTek.py b/Classes/MatSonTek.py index e78c346..83d1b13 100644 --- a/Classes/MatSonTek.py +++ b/Classes/MatSonTek.py @@ -1,13 +1,15 @@ import scipy.io as sio import numpy as np + class MatSonTek(object): """Read SonTek Matlab files and returns a dictionary of mat_struct. - Any data in English units are converted to SI units. + Any data in English units are converted to SI units. """ def __init__(self, fullname): - """Initializes the object, reads the Matlab file, and converts all English units to metric. + """Initializes the object, reads the Matlab file, and converts all + English units to metric. Parameters ---------- @@ -18,15 +20,18 @@ class MatSonTek(object): # Read Matlab file mat_data = sio.loadmat(fullname, struct_as_record=False, squeeze_me=True) - if 'BottomTrack' in mat_data: + if "BottomTrack" in mat_data: # Convert data to SI units if in English units - if mat_data['BottomTrack'].Units.BT_Depth == 'ft': + if mat_data["BottomTrack"].Units.BT_Depth == "ft": self.convert2metric(mat_data) - if hasattr(mat_data['RawGPSData'], 'VtgMode'): - mat_data['RawGPSData'].VtgMode[np.isnan(mat_data['RawGPSData'].VtgMode)] = 0 - mat_data['RawGPSData'].VtgMode = \ - np.array([chr(x) for x in range(127)])[mat_data['RawGPSData'].VtgMode.astype(int)] + if hasattr(mat_data["RawGPSData"], "VtgMode"): + mat_data["RawGPSData"].VtgMode[ + np.isnan(mat_data["RawGPSData"].VtgMode) + ] = 0 + mat_data["RawGPSData"].VtgMode = np.array([chr(x) for x in range(127)])[ + mat_data["RawGPSData"].VtgMode.astype(int) + ] # Create structure from dictionary vars(self).update(mat_data) @@ -41,24 +46,31 @@ class MatSonTek(object): Dictionary of data from Matlab file """ - data2correct = ['BottomTrack', 'GPS', 'Setup', 'Summary', 'System', 'WaterTrack'] + data2correct = [ + "BottomTrack", + "GPS", + "Setup", + "Summary", + "System", + "WaterTrack", + ] for item in data2correct: data = mat_data[item] units = data.Units names = units._fieldnames for name in names: - if getattr(units, name) == 'ft': + if getattr(units, name) == "ft": setattr(data, name, getattr(data, name) * 0.3048) - setattr(units, name, 'm') - elif getattr(units, name) == 'ft/s': + setattr(units, name, "m") + elif getattr(units, name) == "ft/s": setattr(data, name, getattr(data, name) * 0.3048) - setattr(units, name, 'm/s') - elif getattr(units, name) == 'degF': - setattr(data, name, (getattr(data, name)-32) * (5.0/9.0)) - setattr(units, name, 'degC') - elif getattr(units, name) == 'cfs': + setattr(units, name, "m/s") + elif getattr(units, name) == "degF": + setattr(data, name, (getattr(data, name) - 32) * (5.0 / 9.0)) + setattr(units, name, "degC") + elif getattr(units, name) == "cfs": setattr(data, name, getattr(data, name) * (0.3048**3)) - setattr(units, name, 'm3/s') - elif getattr(units, name) == 'ft2': - setattr(data, name, getattr(data, name) * (0.3048 ** 2)) - setattr(units, name, 'm2') + setattr(units, name, "m3/s") + elif getattr(units, name) == "ft2": + setattr(data, name, getattr(data, name) * (0.3048**2)) + setattr(units, name, "m2") diff --git a/Classes/Measurement.py b/Classes/Measurement.py index ebcba8f..8330055 100644 --- a/Classes/Measurement.py +++ b/Classes/Measurement.py @@ -1,8 +1,10 @@ +import copy import os import datetime import numpy as np import xml.etree.ElementTree as ETree from xml.dom.minidom import parseString +import utm from Classes.MMT_TRDI import MMTtrdi from Classes.TransectData import TransectData from Classes.PreMeasurement import PreMeasurement @@ -10,6 +12,7 @@ from Classes.MovingBedTests import MovingBedTests from Classes.QComp import QComp from Classes.MatSonTek import MatSonTek from Classes.ComputeExtrap import ComputeExtrap +from Classes.CrossSectionComp import CrossSectionComp from Classes.ExtrapQSensitivity import ExtrapQSensitivity from Classes.Uncertainty import Uncertainty from Classes.QAData import QAData @@ -19,7 +22,7 @@ from Classes.WaterData import WaterData from Classes.Oursin import Oursin from Classes.Pd0TRDI_2 import Pd0TRDI from MiscLibs.common_functions import cart2pol, pol2cart, rad2azdeg, nans, azdeg2rad -# from profilehooks import profile, timecall +# from profilehooks import profile class Measurement(object): @@ -67,6 +70,21 @@ class Measurement(object): Indicates the setting for use_weighted to be used for reprocessing use_ping_type: bool Indicates if ping types should be used in BT and WT filters + use_measurement_thresholds: bool + Indicates if the entire measurement should be used to set filter + thresholds + stage_start_m: float + Stage at start of measurement + stage_end_m: float + Stage at end of measurement + stage_meas_m: float + Stage assigned to measurement + export_xs: bool + Indicates if average cross-section should be computed and exported + use_weighted: bool + Indicates the setting for use_weighted to be used for reprocessing + use_ping_type: bool + Indicates if ping types should be used in BT and WT filters use_measurement_thresholds: bool Indicates if the entire measurement should be used to set filter thresholds stage_start_m: float @@ -75,11 +93,31 @@ class Measurement(object): Stage at end of measurement stage_meas_m: float Stage assigned to measurement + gps_quality_threshold: int + Sets the threshold for which the GPS quality must equal to or greater than + export_xs: bool + Specifies if average cross-section should be computed and exported + snr_3beam_comp: bool + Indicates the use of 3-beam velocity computations when invalid SNR is found """ # @profile - def __init__(self, in_file, source, proc_type='QRev', checked=False, run_oursin=False, use_weighted=False, - use_measurement_thresholds=False, use_ping_type=True, min_transects=2, min_duration=720): + def __init__( + self, + in_file, + source, + proc_type="QRev", + checked=False, + run_oursin=False, + use_weighted=False, + use_measurement_thresholds=False, + use_ping_type=True, + min_transects=2, + min_duration=720, + export_xs=True, + gps_quality_threshold=2, + snr_3beam_comp=False, + ): """Initialize instance variables and initiate processing of measurement data. @@ -107,6 +145,10 @@ class Measurement(object): Minimum number of transects required to pass QA min_duration: float Minimum duration in seconds of all transects to pass QA + export_xs: bool + Specifies if average cross-section should be computed and exported + gps_quality_threshold: int + Sets the threshold for which the GPS quality must equal to or greater than """ self.use_ping_type = use_ping_type @@ -116,8 +158,8 @@ class Measurement(object): self.min_duration = min_duration self.station_name = None self.station_number = None - self.persons = '' - self.meas_number = '' + self.persons = "" + self.meas_number = "" self.transects = [] self.mb_tests = [] self.system_tst = [] @@ -129,91 +171,109 @@ class Measurement(object): self.uncertainty = None self.initial_settings = None self.qa = None - self.user_rating = 'Not Rated' + self.user_rating = "Not Rated" self.comments = [] - self.ext_temp_chk = {'user': np.nan, 'units': 'C', 'adcp': np.nan, 'user_orig': np.nan, 'adcp_orig': np.nan} + self.ext_temp_chk = { + "user": np.nan, + "units": "C", + "adcp": np.nan, + "user_orig": np.nan, + "adcp_orig": np.nan, + } self.checked_transect_idx = [] self.oursin = None + self.map = None self.use_weighted = use_weighted self.observed_no_moving_bed = False self.stage_meas_m = 0 self.stage_end_m = 0 self.stage_start_m = 0 + self.export_xs = export_xs + self.gps_quality_threshold = gps_quality_threshold # Load data from selected source - if source == 'QRev': + if source == "QRev": self.load_qrev_mat(mat_data=in_file) - if proc_type == 'QRev': + if proc_type == "QRev": # Apply QRev default settings self.run_oursin = run_oursin self.use_weighted = use_weighted self.use_measurement_thresholds = use_measurement_thresholds settings = self.current_settings() - settings['WTEnsInterpolation'] = 'abba' - settings['WTCellInterpolation'] = 'abba' - settings['Processing'] = 'QRev' - settings['UseMeasurementThresholds'] = use_measurement_thresholds + settings["WTEnsInterpolation"] = "abba" + settings["WTCellInterpolation"] = "abba" + settings["Processing"] = "QRev" + settings["UseMeasurementThresholds"] = use_measurement_thresholds self.apply_settings(settings) else: - if source == 'TRDI': + + if source == "TRDI": self.load_trdi(in_file, checked=checked) - elif source == 'SonTek': - self.load_sontek(in_file) + elif source == "SonTek": + self.load_sontek(in_file, snr_3beam_comp=snr_3beam_comp) - elif source == 'Nortek': - self.load_sontek(in_file) + elif source == "Nortek": + self.load_sontek(in_file, snr_3beam_comp=snr_3beam_comp) # Process data if len(self.transects) > 0: - # Save initial settings self.initial_settings = self.current_settings() - # Process moving-bed tests - if len(self.mb_tests) > 0: - # Get navigation reference - select = self.initial_settings['NavRef'] - ref = None - if select == 'bt_vel': - ref = 'BT' - elif select == 'gga_vel': - ref = 'GGA' - elif select == 'vtg_vel': - ref = 'VTG' - self.mb_tests = MovingBedTests.auto_use_2_correct( - moving_bed_tests=self.mb_tests, boat_ref=ref) - # Set processing type - if proc_type == 'QRev': + if proc_type == "QRev": + # Apply QRev default settings - settings = self.qrev_default_settings(check_user_excluded_dist=True, use_weighted=use_weighted) - settings['Processing'] = 'QRev' - settings['UseMeasurementThresholds'] = use_measurement_thresholds - settings['UsePingType'] = self.use_ping_type + settings = self.qrev_default_settings( + check_user_excluded_dist=True, use_weighted=use_weighted + ) + + settings["Processing"] = "QRev" + settings["UseMeasurementThresholds"] = use_measurement_thresholds + + settings["UsePingType"] = self.use_ping_type self.apply_settings(settings) - elif proc_type == 'None': + elif proc_type == "None": # Processing with no filters and interpolation settings = self.no_filter_interp_settings(self) - settings['Processing'] = 'None' + settings["Processing"] = "None" self.apply_settings(settings) - elif proc_type == 'Original': + elif proc_type == "Original": # Processing for original settings # from manufacturer software for transect in self.transects: q = QComp() - q.populate_data(data_in=transect, - moving_bed_data=self.mb_tests) + q.populate_data(data_in=transect, moving_bed_data=self.mb_tests) + self.discharge.append(q) + + # Process moving-bed tests + if len(self.mb_tests) > 0: + # Get navigation reference + select = self.initial_settings["NavRef"] + ref = None + + if select == "bt_vel": + ref = "BT" + + elif select == "gga_vel": + ref = "GGA" + + elif select == "vtg_vel": + ref = "VTG" + self.mb_tests = MovingBedTests.auto_use_2_correct( + moving_bed_tests=self.mb_tests, boat_ref=ref + ) + self.uncertainty = Uncertainty() self.uncertainty.compute_uncertainty(self) - self.qa = QAData(self) - def load_trdi(self, mmt_file, transect_type='Q', checked=False): + def load_trdi(self, mmt_file, transect_type="Q", checked=False): """Method to load TRDI data. Parameters @@ -230,30 +290,30 @@ class Measurement(object): mmt = MMTtrdi(mmt_file) # Get properties if they exist, otherwise set them as blank strings - self.station_name = str(mmt.site_info['Name']) - self.station_number = str(mmt.site_info['Number']) - self.persons = str(mmt.site_info['Party']) - self.meas_number = str(mmt.site_info['MeasurementNmb']) + self.station_name = str(mmt.site_info["Name"]) + self.station_number = str(mmt.site_info["Number"]) + self.persons = str(mmt.site_info["Party"]) + self.meas_number = str(mmt.site_info["MeasurementNmb"]) # Get stage readings, if available. Note: mmt stage is always in m. - if mmt.site_info['Use_Inside_Gage_Height'] == '1': - stage = float(mmt.site_info['Inside_Gage_Height']) + if mmt.site_info["Use_Inside_Gage_Height"] == "1": + stage = float(mmt.site_info["Inside_Gage_Height"]) else: - stage = float(mmt.site_info['Outside_Gage_Height']) + stage = float(mmt.site_info["Outside_Gage_Height"]) self.stage_start_m = stage - change = float(mmt.site_info['Gage_Height_Change']) + change = float(mmt.site_info["Gage_Height_Change"]) self.stage_end_m = stage + change - self.stage_meas_m = (self.stage_start_m + self.stage_end_m) / 2. + self.stage_meas_m = (self.stage_start_m + self.stage_end_m) / 2.0 # Initialize processing variable - self.processing = 'WR2' + self.processing = "WR2" if len(mmt.transects) > 0: # Create transect objects for TRDI data - self.transects = self.allocate_transects(mmt=mmt, - transect_type=transect_type, - checked=checked) + self.transects = self.allocate_transects( + mmt=mmt, transect_type=transect_type, checked=checked + ) self.checked_transect_idx = self.checked_transects(self) @@ -262,81 +322,96 @@ class Measurement(object): self.qaqc_trdi(mmt) # Save comments from mmt file in comments - self.comments.append('MMT Remarks: ' + mmt.site_info['Remarks']) + self.comments.append("MMT Remarks: " + mmt.site_info["Remarks"]) for t in range(len(self.transects)): - notes = getattr(mmt.transects[t], 'Notes') + notes = getattr(mmt.transects[t], "Notes") for note in notes: - note_text = ' File: ' + note['NoteFileNo'] + ' ' \ - + note['NoteDate'] + ': ' + note['NoteText'] + note_text = ( + " File: " + + note["NoteFileNo"] + + " " + + note["NoteDate"] + + ": " + + note["NoteText"] + ) self.comments.append(note_text) # Get external temperature - if type(mmt.site_info['Water_Temperature']) is float: - self.ext_temp_chk['user'] = mmt.site_info['Water_Temperature'] - self.ext_temp_chk['units'] = 'C' - self.ext_temp_chk['user_orig'] = mmt.site_info['Water_Temperature'] + if type(mmt.site_info["Water_Temperature"]) is float: + self.ext_temp_chk["user"] = mmt.site_info["Water_Temperature"] + self.ext_temp_chk["units"] = "C" + self.ext_temp_chk["user_orig"] = mmt.site_info["Water_Temperature"] # Initialize thresholds settings dictionary threshold_settings = dict() - threshold_settings['wt_settings'] = {} - threshold_settings['bt_settings'] = {} - threshold_settings['depth_settings'] = {} + threshold_settings["wt_settings"] = {} + threshold_settings["bt_settings"] = {} + threshold_settings["depth_settings"] = {} - # Select reference transect use first checked or if none then first transect + # Select reference transect use first checked or if none then + # first transect if len(self.checked_transect_idx) > 0: ref_transect = self.checked_transect_idx[0] else: ref_transect = 0 # Water track filter threshold settings - threshold_settings['wt_settings']['beam'] = \ - self.set_num_beam_wt_threshold_trdi(mmt.transects[ref_transect]) - threshold_settings['wt_settings']['difference'] = 'Manual' - threshold_settings['wt_settings']['difference_threshold'] = \ - mmt.transects[ref_transect].active_config['Proc_WT_Error_Velocity_Threshold'] - threshold_settings['wt_settings']['vertical'] = 'Manual' - threshold_settings['wt_settings']['vertical_threshold'] = \ - mmt.transects[ref_transect].active_config['Proc_WT_Up_Vel_Threshold'] + threshold_settings["wt_settings"][ + "beam" + ] = self.set_num_beam_wt_threshold_trdi(mmt.transects[ref_transect]) + threshold_settings["wt_settings"]["difference"] = "Manual" + threshold_settings["wt_settings"]["difference_threshold"] = mmt.transects[ + ref_transect + ].active_config["Proc_WT_Error_Velocity_Threshold"] + threshold_settings["wt_settings"]["vertical"] = "Manual" + threshold_settings["wt_settings"]["vertical_threshold"] = mmt.transects[ + ref_transect + ].active_config["Proc_WT_Up_Vel_Threshold"] # Bottom track filter threshold settings - threshold_settings['bt_settings']['beam'] = \ - self.set_num_beam_bt_threshold_trdi(mmt.transects[ref_transect]) - threshold_settings['bt_settings']['difference'] = 'Manual' - threshold_settings['bt_settings']['difference_threshold'] = \ - mmt.transects[ref_transect].active_config['Proc_BT_Error_Vel_Threshold'] - threshold_settings['bt_settings']['vertical'] = 'Manual' - threshold_settings['bt_settings']['vertical_threshold'] = \ - mmt.transects[ref_transect].active_config['Proc_BT_Up_Vel_Threshold'] + threshold_settings["bt_settings"][ + "beam" + ] = self.set_num_beam_bt_threshold_trdi(mmt.transects[ref_transect]) + threshold_settings["bt_settings"]["difference"] = "Manual" + threshold_settings["bt_settings"]["difference_threshold"] = mmt.transects[ + ref_transect + ].active_config["Proc_BT_Error_Vel_Threshold"] + threshold_settings["bt_settings"]["vertical"] = "Manual" + threshold_settings["bt_settings"]["vertical_threshold"] = mmt.transects[ + ref_transect + ].active_config["Proc_BT_Up_Vel_Threshold"] # Depth filter and averaging settings - threshold_settings['depth_settings']['depth_weighting'] = \ - self.set_depth_weighting_trdi(mmt.transects[ref_transect]) - threshold_settings['depth_settings']['depth_valid_method'] = 'TRDI' - threshold_settings['depth_settings']['depth_screening'] = \ - self.set_depth_screening_trdi(mmt.transects[ref_transect]) + threshold_settings["depth_settings"][ + "depth_weighting" + ] = self.set_depth_weighting_trdi(mmt.transects[ref_transect]) + threshold_settings["depth_settings"]["depth_valid_method"] = "TRDI" + threshold_settings["depth_settings"][ + "depth_screening" + ] = self.set_depth_screening_trdi(mmt.transects[ref_transect]) # Determine reference used in WR2 if available - reference = 'BT' - if 'Reference' in mmt.site_info.keys(): - reference = mmt.site_info['Reference'] - if reference == 'BT': - target = 'bt_vel' - elif reference == 'GGA': - target = 'gga_vel' - elif reference == 'VTG': - target = 'vtg_vel' + reference = "BT" + if "Reference" in mmt.site_info.keys(): + reference = mmt.site_info["Reference"] + if reference == "BT": + target = "bt_vel" + elif reference == "GGA": + target = "gga_vel" + elif reference == "VTG": + target = "vtg_vel" else: - target = 'bt_vel' + target = "bt_vel" for transect in self.transects: if getattr(transect.boat_vel, target) is None: - reference = 'BT' + reference = "BT" # Convert to earth coordinates for transect_idx, transect in enumerate(self.transects): # Convert to earth coordinates - transect.change_coord_sys(new_coord_sys='Earth') + transect.change_coord_sys(new_coord_sys="Earth") # Set navigation reference transect.change_nav_reference(update=False, new_nav_ref=reference) @@ -345,13 +420,11 @@ class Measurement(object): self.thresholds_trdi(transect, threshold_settings) # Apply boat interpolations - transect.boat_interpolations(update=False, - target='BT', - method='None') + transect.boat_interpolations(update=False, target="BT", method="None") if transect.gps is not None: - transect.boat_interpolations(update=False, - target='GPS', - method='HoldLast') + transect.boat_interpolations( + update=False, target="GPS", method="HoldLast" + ) # Update water data for changes in boat velocity transect.update_water() @@ -360,29 +433,35 @@ class Measurement(object): transect.w_vel.apply_filter(transect=transect, wt_depth=True) # Interpolate water data - transect.w_vel.apply_interpolation(transect=transect, - ens_interp='None', - cells_interp='None') + transect.w_vel.apply_interpolation( + transect=transect, ens_interp="None", cells_interp="None" + ) # Apply speed of sound computations as required mmt_sos_method = mmt.transects[transect_idx].active_config[ - 'Proc_Speed_of_Sound_Correction'] + "Proc_Speed_of_Sound_Correction" + ] # Speed of sound computed based on user supplied values if mmt_sos_method == 1: - salinity = mmt.transects[transect_idx].active_config['Proc_Salinity'] - transect.change_sos(parameter='salinity', selected='user', salinity=salinity) + salinity = mmt.transects[transect_idx].active_config[ + "Proc_Salinity" + ] + transect.change_sos( + parameter="salinity", selected="user", salinity=salinity + ) elif mmt_sos_method == 2: # Speed of sound set by user speed = mmt.transects[transect_idx].active_config[ - 'Proc_Fixed_Speed_Of_Sound'] - transect.change_sos(parameter='sosSrc', - selected='user', - speed=speed) + "Proc_Fixed_Speed_Of_Sound" + ] + transect.change_sos( + parameter="sosSrc", selected="user", speed=speed + ) def qaqc_trdi(self, mmt): """Processes qaqc test, calibrations, and evaluations - + Parameters ---------- mmt: MMTtrdi @@ -390,34 +469,41 @@ class Measurement(object): """ # ADCP Test - if 'RG_Test' in mmt.qaqc: - for n in range(len(mmt.qaqc['RG_Test'])): + if "RG_Test" in mmt.qaqc: + for n in range(len(mmt.qaqc["RG_Test"])): p_m = PreMeasurement() - p_m.populate_data(mmt.qaqc['RG_Test_TimeStamp'][n], - mmt.qaqc['RG_Test'][n], 'TST') + p_m.populate_data( + mmt.qaqc["RG_Test_TimeStamp"][n], mmt.qaqc["RG_Test"][n], "TST" + ) self.system_tst.append(p_m) # Compass calibration - if 'Compass_Calibration' in mmt.qaqc: - for n in range(len(mmt.qaqc['Compass_Calibration'])): + if "Compass_Calibration" in mmt.qaqc: + for n in range(len(mmt.qaqc["Compass_Calibration"])): cc = PreMeasurement() - cc.populate_data(mmt.qaqc['Compass_Calibration_TimeStamp'][n], - mmt.qaqc['Compass_Calibration'][n], 'TCC') + cc.populate_data( + mmt.qaqc["Compass_Calibration_TimeStamp"][n], + mmt.qaqc["Compass_Calibration"][n], + "TCC", + ) self.compass_cal.append(cc) - + # Compass evaluation - if 'Compass_Evaluation' in mmt.qaqc: - for n in range(len(mmt.qaqc['Compass_Evaluation'])): + if "Compass_Evaluation" in mmt.qaqc: + for n in range(len(mmt.qaqc["Compass_Evaluation"])): ce = PreMeasurement() - ce.populate_data(mmt.qaqc['Compass_Evaluation_TimeStamp'][n], - mmt.qaqc['Compass_Evaluation'][n], 'TCC') + ce.populate_data( + mmt.qaqc["Compass_Evaluation_TimeStamp"][n], + mmt.qaqc["Compass_Evaluation"][n], + "TCC", + ) self.compass_eval.append(ce) # Check for moving-bed tests if len(mmt.mbt_transects) > 0: - + # Create transect objects - transects = self.allocate_transects(mmt, transect_type='MB') + transects = self.allocate_transects(mmt, transect_type="MB") # Process moving-bed tests if len(transects) > 0: @@ -426,14 +512,23 @@ class Measurement(object): # Create moving-bed test object mb_test = MovingBedTests() - mb_test.populate_data('TRDI', transects[n], - mmt.mbt_transects[n].moving_bed_type) - + mb_test.populate_data( + source="TRDI", + file=transects[n], + test_type=mmt.mbt_transects[n].moving_bed_type, + ) + # Save notes from mmt files in comments - notes = getattr(mmt.mbt_transects[n], 'Notes') + notes = getattr(mmt.mbt_transects[n], "Notes") for note in notes: - note_text = ' File: ' + note['NoteFileNo'] + ' ' \ - + note['NoteDate'] + ': ' + note['NoteText'] + note_text = ( + " File: " + + note["NoteFileNo"] + + " " + + note["NoteDate"] + + ": " + + note["NoteText"] + ) self.comments.append(note_text) self.mb_tests.append(mb_test) @@ -451,25 +546,28 @@ class Measurement(object): """ # Apply WT settings - transect.w_vel.apply_filter(transect, **settings['wt_settings']) + transect.w_vel.apply_filter(transect, **settings["wt_settings"]) # Apply BT settings - transect.boat_vel.bt_vel.apply_filter(transect, **settings[ - 'bt_settings']) + transect.boat_vel.bt_vel.apply_filter(transect, **settings["bt_settings"]) # Apply depth settings - transect.depths.bt_depths.valid_data_method = settings[ - 'depth_settings']['depth_valid_method'] - transect.depths.depth_filter(transect=transect, filter_method=settings[ - 'depth_settings']['depth_screening']) - transect.depths.bt_depths.compute_avg_bt_depth(method=settings[ - 'depth_settings']['depth_weighting']) + transect.depths.bt_depths.valid_data_method = settings["depth_settings"][ + "depth_valid_method" + ] + transect.depths.depth_filter( + transect=transect, + filter_method=settings["depth_settings"]["depth_screening"], + ) + transect.depths.bt_depths.compute_avg_bt_depth( + method=settings["depth_settings"]["depth_weighting"] + ) # Apply composite depths as per setting stored in transect # from TransectData transect.depths.composite_depths(transect) - def load_sontek(self, fullnames): + def load_sontek(self, fullnames, snr_3beam_comp): """Coordinates reading of all SonTek data files. Parameters @@ -477,104 +575,116 @@ class Measurement(object): fullnames: list File names including path for all discharge transects converted to Matlab files. + snr_3beam_comp: bool + Indicates the use of 3-beam velocity computations when invalid SNR is found """ # Initialize variables rsdata = None pathname = None + fullnames.sort() for file in fullnames: # Read data file rsdata = MatSonTek(file) pathname, file_name = os.path.split(file) - if hasattr(rsdata, 'BottomTrack'): + if hasattr(rsdata, "BottomTrack"): # Create transect objects for each discharge transect self.transects.append(TransectData()) - self.transects[-1].sontek(rsdata, file_name) + self.transects[-1].sontek( + rsdata, file_name, snr_3beam_comp=snr_3beam_comp + ) else: - self.comments.append(file + ' is incomplete and is not included in measurement processing') + self.comments.append( + file + " is incomplete and is not included in " + "measurement processing" + ) # Identify checked transects self.checked_transect_idx = self.checked_transects(self) # Site information pulled from last file - if hasattr(rsdata, 'SiteInfo'): - if hasattr(rsdata.SiteInfo, 'Site_Name'): + if hasattr(rsdata, "SiteInfo"): + if hasattr(rsdata.SiteInfo, "Site_Name"): if len(rsdata.SiteInfo.Site_Name) > 0: self.station_name = rsdata.SiteInfo.Site_Name else: - self.station_name = '' - if hasattr(rsdata.SiteInfo, 'Station_Number'): + self.station_name = "" + if hasattr(rsdata.SiteInfo, "Station_Number"): if len(rsdata.SiteInfo.Station_Number) > 0: self.station_number = rsdata.SiteInfo.Station_Number else: - self.station_number = '' - if hasattr(rsdata.SiteInfo, 'Meas_Number'): + self.station_number = "" + if hasattr(rsdata.SiteInfo, "Meas_Number"): if len(rsdata.SiteInfo.Meas_Number) > 0: self.meas_number = rsdata.SiteInfo.Meas_Number - if hasattr(rsdata.SiteInfo, 'Party'): + if hasattr(rsdata.SiteInfo, "Party"): if len(rsdata.SiteInfo.Party) > 0: self.persons = rsdata.SiteInfo.Party - if hasattr(rsdata.SiteInfo, 'Comments'): + if hasattr(rsdata.SiteInfo, "Comments"): if len(rsdata.SiteInfo.Comments) > 0: - self.comments.append('RS Comments: ' + rsdata.SiteInfo.Comments) - - # Although units imply meters the data are actually stored as m / 10,000 - if hasattr(rsdata.Setup, 'startGaugeHeight'): - self.stage_start_m = rsdata.Setup.startGaugeHeight / 10000. + self.comments.append("RS Comments: " + rsdata.SiteInfo.Comments) - if hasattr(rsdata.Setup, 'endGaugeHeight'): - self.stage_end_m = rsdata.Setup.endGaugeHeight / 10000. + # Although units imply meters the data are actually stored as m + # / 10,000 + if hasattr(rsdata.Setup, "startGaugeHeight"): + self.stage_start_m = rsdata.Setup.startGaugeHeight / 10000.0 - self.stage_meas_m = (self.stage_start_m + self.stage_end_m) / 2. + if hasattr(rsdata.Setup, "endGaugeHeight"): + self.stage_end_m = rsdata.Setup.endGaugeHeight / 10000.0 + self.stage_meas_m = (self.stage_start_m + self.stage_end_m) / 2.0 - self.qaqc_sontek(pathname) + self.qaqc_sontek(pathname, snr_3beam_comp=snr_3beam_comp) for transect in self.transects: - transect.change_coord_sys(new_coord_sys='Earth') + transect.change_coord_sys(new_coord_sys="Earth") transect.change_nav_reference( update=False, - new_nav_ref=self.transects[self.checked_transect_idx[0]].boat_vel.selected) - transect.boat_interpolations(update=False, - target='BT', - method='Hold9') - transect.boat_interpolations(update=False, - target='GPS', - method='None') - transect.apply_averaging_method(setting='Simple') - transect.process_depths(update=False, - interpolation_method='HoldLast') + new_nav_ref=self.transects[ + self.checked_transect_idx[0] + ].boat_vel.selected, + ) + transect.boat_interpolations(update=False, target="BT", method="Hold9") + transect.boat_interpolations(update=False, target="GPS", method="None") + transect.apply_averaging_method(setting="Simple") + transect.process_depths(update=False, interpolation_method="HoldLast") transect.update_water() # Filter water data transect.w_vel.apply_filter(transect=transect, wt_depth=True) # Interpolate water data - transect.w_vel.apply_interpolation(transect=transect, - ens_interp='None', - cells_interp='None') - transect.w_vel.apply_interpolation(transect=transect, - ens_interp='None', - cells_interp='TRDI') - - if transect.sensors.speed_of_sound_mps.selected == 'user': - transect.sensors.speed_of_sound_mps.selected = 'internal' - transect.change_sos(parameter='sosSrc', - selected='user', - speed=transect.sensors.speed_of_sound_mps.user.data) - elif transect.sensors.salinity_ppt.selected == 'user': - transect.change_sos(parameter='salinity', - selected='user', - salinity=transect.sensors.salinity_ppt.user.data) - elif transect.sensors.temperature_deg_c.selected == 'user': - transect.change_sos(parameter='temperature', - selected='user', - temperature=transect.sensors.temperature_deg_c.user.data) - - def qaqc_sontek(self, pathname): + transect.w_vel.apply_interpolation( + transect=transect, ens_interp="None", cells_interp="None" + ) + transect.w_vel.apply_interpolation( + transect=transect, ens_interp="None", cells_interp="TRDI" + ) + + if transect.sensors.speed_of_sound_mps.selected == "user": + transect.sensors.speed_of_sound_mps.selected = "internal" + transect.change_sos( + parameter="sosSrc", + selected="user", + speed=transect.sensors.speed_of_sound_mps.user.data, + ) + elif transect.sensors.salinity_ppt.selected == "user": + transect.change_sos( + parameter="salinity", + selected="user", + salinity=transect.sensors.salinity_ppt.user.data, + ) + elif transect.sensors.temperature_deg_c.selected == "user": + transect.change_sos( + parameter="temperature", + selected="user", + temperature=transect.sensors.temperature_deg_c.user.data, + ) + + def qaqc_sontek(self, pathname, snr_3beam_comp): """Reads and stores system tests, compass calibrations, and moving-bed tests. @@ -582,53 +692,55 @@ class Measurement(object): ---------- pathname: str Path to discharge transect files. + snr_3beam_comp: bool + Indicates the use of 3-beam velocity computations when invalid SNR is found """ # Compass Calibration - compass_cal_folder = os.path.join(pathname, 'CompassCal') + compass_cal_folder = os.path.join(pathname, "CompassCal") time_stamp = None if os.path.isdir(compass_cal_folder): for file in os.listdir(compass_cal_folder): valid_file = False # G3 compasses - if file.endswith('.ccal'): - time_stamp = file.split('_') - time_stamp = time_stamp[0] + '_' + time_stamp[1] + if file.endswith(".ccal"): + time_stamp = file.split("_") + time_stamp = time_stamp[0] + "_" + time_stamp[1] valid_file = True # G2 compasses - elif file.endswith('.txt'): + elif file.endswith(".txt"): prefix, _ = os.path.splitext(file) - time_stamp = prefix.split('l')[1] + time_stamp = prefix.split("l")[1] valid_file = True if valid_file: with open(os.path.join(compass_cal_folder, file)) as f: cal_data = f.read() cal = PreMeasurement() - cal.populate_data(time_stamp, cal_data, 'SCC') + cal.populate_data(time_stamp, cal_data, "SCC") self.compass_cal.append(cal) # System Test - system_test_folder = os.path.join(pathname, 'SystemTest') + system_test_folder = os.path.join(pathname, "SystemTest") if os.path.isdir(system_test_folder): for file in os.listdir(system_test_folder): # Find system test files. - if file.startswith('SystemTest'): + if file.startswith("SystemTest"): with open(os.path.join(system_test_folder, file)) as f: test_data = f.read() - test_data = test_data.replace('\x00', '') + test_data = test_data.replace("\x00", "") time_stamp = file[10:24] sys_test = PreMeasurement() - sys_test.populate_data(time_stamp=time_stamp, - data_in=test_data, - data_type='SST') + sys_test.populate_data( + time_stamp=time_stamp, data_in=test_data, data_type="SST" + ) self.system_tst.append(sys_test) # Moving-bed tests - self.sontek_moving_bed_tests(pathname) + self.sontek_moving_bed_tests(pathname, snr_3beam_comp=snr_3beam_comp) - def sontek_moving_bed_tests(self, pathname): + def sontek_moving_bed_tests(self, pathname, snr_3beam_comp): """Locates and processes SonTek moving-bed tests. Searches the pathname for Matlab files that start with Loop or SMBA. @@ -638,22 +750,30 @@ class Measurement(object): ---------- pathname: str Path to discharge transect files. + snr_3beam_comp: bool + Indicates the use of 3-beam velocity computations when invalid SNR is found """ for file in os.listdir(pathname): # Find moving-bed test files. - if file.endswith('.mat'): + if file.endswith(".mat"): # Process Loop test - if file.lower().startswith('loop'): + if file.lower().startswith("loop"): self.mb_tests.append(MovingBedTests()) - self.mb_tests[-1].populate_data(source='SonTek', - file=os.path.join(pathname, file), - test_type='Loop') + self.mb_tests[-1].populate_data( + source="SonTek", + file=os.path.join(pathname, file), + test_type="Loop", + snr_3beam_comp=snr_3beam_comp, + ) # Process Stationary test - elif file.lower().startswith('smba'): + elif file.lower().startswith("smba"): self.mb_tests.append(MovingBedTests()) - self.mb_tests[-1].populate_data(source='SonTek', - file=os.path.join(pathname, file), - test_type='Stationary') + self.mb_tests[-1].populate_data( + source="SonTek", + file=os.path.join(pathname, file), + test_type="Stationary", + snr_3beam_comp=snr_3beam_comp, + ) def load_qrev_mat(self, mat_data): """Loads and coordinates the mapping of existing QRev Matlab files @@ -665,7 +785,7 @@ class Measurement(object): Dictionary containing Matlab data. """ - meas_struct = mat_data['meas_struct'] + meas_struct = mat_data["meas_struct"] # Assign data from meas_struct to associated instance variables # in Measurement and associated objects. @@ -673,21 +793,21 @@ class Measurement(object): self.station_name = meas_struct.stationName if len(meas_struct.stationNumber) > 0: self.station_number = meas_struct.stationNumber - if hasattr(meas_struct, 'meas_number'): + if hasattr(meas_struct, "meas_number"): if len(meas_struct.meas_number) == 0: - self.meas_number = '' + self.meas_number = "" else: self.meas_number = meas_struct.meas_number - if hasattr(meas_struct, 'persons'): + if hasattr(meas_struct, "persons"): if len(meas_struct.persons) == 0: - self.persons = '' + self.persons = "" else: self.persons = meas_struct.persons - if hasattr(meas_struct, 'stage_start_m'): + if hasattr(meas_struct, "stage_start_m"): self.stage_start_m = meas_struct.stage_start_m - if hasattr(meas_struct, 'stage_end_m'): + if hasattr(meas_struct, "stage_end_m"): self.stage_end_m = meas_struct.stage_end_m - if hasattr(meas_struct, 'stage_meas_m'): + if hasattr(meas_struct, "stage_meas_m"): self.stage_meas_m = meas_struct.stage_meas_m self.processing = meas_struct.processing if type(meas_struct.comments) == np.ndarray: @@ -696,12 +816,12 @@ class Measurement(object): # Needed to handle comments with blank lines for n, comment in enumerate(self.comments): if type(comment) is not str: - new_comment = '' + new_comment = "" for item in comment: if len(item.strip()) > 0: new_comment = new_comment + item else: - new_comment = new_comment + '\n' + new_comment = new_comment + "\n" self.comments[n] = new_comment else: self.comments = [meas_struct.comments] @@ -712,58 +832,76 @@ class Measurement(object): # Using comment =... didn't work but self.comments[n] does self.comments[2] = np.array2string(comment) - if hasattr(meas_struct, 'userRating'): + if hasattr(meas_struct, "userRating"): self.user_rating = meas_struct.userRating else: - self.user_rating = '' + self.user_rating = "" self.initial_settings = vars(meas_struct.initialSettings) # Update initial settings to agree with Python definitions - nav_dict = {'btVel': 'bt_vel', 'ggaVel': 'gga_vel', 'vtgVel': 'vtg_vel', - 'bt_vel': 'bt_vel', 'gga_vel': 'gga_vel', 'vtg_vel': 'vtg_vel'} - self.initial_settings['NavRef'] = nav_dict[self.initial_settings['NavRef']] - - on_off_dict = {'Off': False, 'On': True, 0: False, 1: True} - self.initial_settings['WTwtDepthFilter'] = on_off_dict[self.initial_settings['WTwtDepthFilter']] - - if type(self.initial_settings['WTsnrFilter']) is np.ndarray: - self.initial_settings['WTsnrFilter'] = 'Off' - - nav_dict = {'btDepths': 'bt_depths', 'vbDepths': 'vb_depths', 'dsDepths': 'ds_depths', - 'bt_depths': 'bt_depths', 'vb_depths': 'vb_depths', 'ds_depths': 'ds_depths'} - self.initial_settings['depthReference'] = nav_dict[self.initial_settings['depthReference']] - - self.ext_temp_chk = {'user': meas_struct.extTempChk.user, - 'units': meas_struct.extTempChk.units, - 'adcp': meas_struct.extTempChk.adcp} - - if hasattr(meas_struct.extTempChk, 'user_orig'): - self.ext_temp_chk['user_orig'] = meas_struct.extTempChk.user_orig + nav_dict = { + "btVel": "bt_vel", + "ggaVel": "gga_vel", + "vtgVel": "vtg_vel", + "bt_vel": "bt_vel", + "gga_vel": "gga_vel", + "vtg_vel": "vtg_vel", + } + self.initial_settings["NavRef"] = nav_dict[self.initial_settings["NavRef"]] + + on_off_dict = {"Off": False, "On": True, 0: False, 1: True} + self.initial_settings["WTwtDepthFilter"] = on_off_dict[ + self.initial_settings["WTwtDepthFilter"] + ] + + if type(self.initial_settings["WTsnrFilter"]) is np.ndarray: + self.initial_settings["WTsnrFilter"] = "Off" + + nav_dict = { + "btDepths": "bt_depths", + "vbDepths": "vb_depths", + "dsDepths": "ds_depths", + "bt_depths": "bt_depths", + "vb_depths": "vb_depths", + "ds_depths": "ds_depths", + } + self.initial_settings["depthReference"] = nav_dict[ + self.initial_settings["depthReference"] + ] + + self.ext_temp_chk = { + "user": meas_struct.extTempChk.user, + "units": meas_struct.extTempChk.units, + "adcp": meas_struct.extTempChk.adcp, + } + + if hasattr(meas_struct.extTempChk, "user_orig"): + self.ext_temp_chk["user_orig"] = meas_struct.extTempChk.user_orig else: - self.ext_temp_chk['user_orig'] = meas_struct.extTempChk.user + self.ext_temp_chk["user_orig"] = meas_struct.extTempChk.user - if hasattr(meas_struct.extTempChk, 'adcp_orig'): - self.ext_temp_chk['adcp_orig'] = meas_struct.extTempChk.adcp_orig + if hasattr(meas_struct.extTempChk, "adcp_orig"): + self.ext_temp_chk["adcp_orig"] = meas_struct.extTempChk.adcp_orig else: - self.ext_temp_chk['adcp_orig'] = meas_struct.extTempChk.adcp - - if type(self.ext_temp_chk['user']) is str: - self.ext_temp_chk['user'] = np.nan - if type(self.ext_temp_chk['adcp']) is str: - self.ext_temp_chk['adcp'] = np.nan - if type(self.ext_temp_chk['user']) is np.ndarray: - self.ext_temp_chk['user'] = np.nan - if type(self.ext_temp_chk['adcp']) is np.ndarray: - self.ext_temp_chk['adcp'] = np.nan - if type(self.ext_temp_chk['user_orig']) is str: - self.ext_temp_chk['user_orig'] = np.nan - if type(self.ext_temp_chk['adcp_orig']) is str: - self.ext_temp_chk['adcp_orig'] = np.nan - if type(self.ext_temp_chk['user_orig']) is np.ndarray: - self.ext_temp_chk['user_orig'] = np.nan - if type(self.ext_temp_chk['adcp_orig']) is np.ndarray: - self.ext_temp_chk['adcp_orig'] = np.nan + self.ext_temp_chk["adcp_orig"] = meas_struct.extTempChk.adcp + + if type(self.ext_temp_chk["user"]) is str: + self.ext_temp_chk["user"] = np.nan + if type(self.ext_temp_chk["adcp"]) is str: + self.ext_temp_chk["adcp"] = np.nan + if type(self.ext_temp_chk["user"]) is np.ndarray: + self.ext_temp_chk["user"] = np.nan + if type(self.ext_temp_chk["adcp"]) is np.ndarray: + self.ext_temp_chk["adcp"] = np.nan + if type(self.ext_temp_chk["user_orig"]) is str: + self.ext_temp_chk["user_orig"] = np.nan + if type(self.ext_temp_chk["adcp_orig"]) is str: + self.ext_temp_chk["adcp_orig"] = np.nan + if type(self.ext_temp_chk["user_orig"]) is np.ndarray: + self.ext_temp_chk["user_orig"] = np.nan + if type(self.ext_temp_chk["adcp_orig"]) is np.ndarray: + self.ext_temp_chk["adcp_orig"] = np.nan self.system_tst = PreMeasurement.sys_test_qrev_mat_in(meas_struct) @@ -785,23 +923,30 @@ class Measurement(object): self.discharge = QComp.qrev_mat_in(meas_struct) - # For compatibility with older QRev.mat files that didn't have this feature + # For compatibility with older QRev.mat files that didn't have this + # feature for n in range(len(self.transects)): if len(self.discharge[n].left_idx) == 0: - self.discharge[n].left_idx = self.discharge[n].edge_ensembles(edge_loc='left', - transect=self.transects[n]) + self.discharge[n].left_idx = self.discharge[n].edge_ensembles( + edge_loc="left", transect=self.transects[n] + ) if len(self.discharge[n].right_idx) == 0: - self.discharge[n].right_idx = self.discharge[n].edge_ensembles(edge_loc='right', - transect=self.transects[n]) + self.discharge[n].right_idx = self.discharge[n].edge_ensembles( + edge_loc="right", transect=self.transects[n] + ) if type(self.discharge[n].correction_factor) is list: - self.discharge[n].correction_factor = self.discharge[n].total / self.discharge[n].total_uncorrected + self.discharge[n].correction_factor = ( + self.discharge[n].total / self.discharge[n].total_uncorrected + ) + self.discharge[n].compute_topbot_speed(self.transects[n]) + self.discharge[n].compute_edge_speed(self.transects[n]) # Identify checked transects self.checked_transect_idx = self.checked_transects(self) - if hasattr(meas_struct, 'observed_no_moving_bed'): + if hasattr(meas_struct, "observed_no_moving_bed"): self.observed_no_moving_bed = meas_struct.observed_no_moving_bed else: self.observed_no_moving_bed = False @@ -809,23 +954,25 @@ class Measurement(object): self.uncertainty = Uncertainty() self.uncertainty.populate_from_qrev_mat(meas_struct) self.qa = QAData(self, mat_struct=meas_struct, compute=False) - if hasattr(meas_struct, 'run_oursin'): + if hasattr(meas_struct, "run_oursin"): self.run_oursin = meas_struct.run_oursin else: self.run_oursin = False - if hasattr(meas_struct, 'oursin'): + if hasattr(meas_struct, "oursin"): self.oursin = Oursin() self.oursin.populate_from_qrev_mat(meas_struct=meas_struct) else: self.oursin = None self.use_weighted = self.extrap_fit.use_weighted - self.use_measurement_thresholds = \ - self.transects[self.checked_transect_idx[0]].boat_vel.bt_vel.use_measurement_thresholds + self.use_measurement_thresholds = self.transects[ + self.checked_transect_idx[0] + ].boat_vel.bt_vel.use_measurement_thresholds + def create_filter_composites(self): - """Create composite for water and bottom track difference and vertical velocities and compute the thresholds - using these composites. + """Create composite for water and bottom track difference and + vertical velocities and compute the thresholds using these composites. """ # Initialize dictionaries @@ -837,15 +984,36 @@ class Measurement(object): # Create composite arrays for all checked transects for transect in self.transects: if transect.checked: - bt_freq = transect.boat_vel.bt_vel.frequency_khz.astype(int).astype(str) - freq = np.unique(bt_freq) - for f in freq: - if f in bt_d: - bt_d[f] = np.hstack((bt_d[f], transect.boat_vel.bt_vel.d_mps[bt_freq == f])) - bt_w[f] = np.hstack((bt_w[f], transect.boat_vel.bt_vel.w_mps[bt_freq == f])) - else: - bt_d[f] = transect.boat_vel.bt_vel.d_mps[bt_freq == f] - bt_w[f] = transect.boat_vel.bt_vel.w_mps[bt_freq == f] + if transect.adcp.model == "RS5": + bt_pt = transect.boat_vel.bt_vel.ping_type + pt = np.unique(bt_pt) + for p in pt: + if p in bt_d: + bt_d[p] = np.hstack( + (bt_d[p], transect.boat_vel.bt_vel.d_mps[bt_pt == p]) + ) + bt_w[p] = np.hstack( + (bt_w[p], transect.boat_vel.bt_vel.w_mps[bt_pt == p]) + ) + else: + bt_d[p] = transect.boat_vel.bt_vel.d_mps[bt_pt == p] + bt_w[p] = transect.boat_vel.bt_vel.w_mps[bt_pt == p] + else: + bt_freq = transect.boat_vel.bt_vel.frequency_khz.astype(int).astype( + str + ) + freq = np.unique(bt_freq) + for f in freq: + if f in bt_d: + bt_d[f] = np.hstack( + (bt_d[f], transect.boat_vel.bt_vel.d_mps[bt_freq == f]) + ) + bt_w[f] = np.hstack( + (bt_w[f], transect.boat_vel.bt_vel.w_mps[bt_freq == f]) + ) + else: + bt_d[f] = transect.boat_vel.bt_vel.d_mps[bt_freq == f] + bt_w[f] = transect.boat_vel.bt_vel.w_mps[bt_freq == f] if transect.w_vel.ping_type.size > 0: # Identify the ping types used in the transect @@ -854,27 +1022,63 @@ class Measurement(object): for p_type in p_types: if p_type in wt_d: wt_d[p_type] = np.hstack( - (wt_d[p_type], transect.w_vel.d_mps[np.logical_and(transect.w_vel.ping_type == p_type, - transect.w_vel.cells_above_sl)])) + ( + wt_d[p_type], + transect.w_vel.d_mps[ + np.logical_and( + transect.w_vel.ping_type == p_type, + transect.w_vel.cells_above_sl, + ) + ], + ) + ) wt_w[p_type] = np.hstack( - (wt_d[p_type], transect.w_vel.w_mps[np.logical_and(transect.w_vel.ping_type == p_type, - transect.w_vel.cells_above_sl)])) + ( + wt_d[p_type], + transect.w_vel.w_mps[ + np.logical_and( + transect.w_vel.ping_type == p_type, + transect.w_vel.cells_above_sl, + ) + ], + ) + ) else: - wt_d[p_type] = transect.w_vel.d_mps[np.logical_and(transect.w_vel.ping_type == p_type, - transect.w_vel.cells_above_sl)] - wt_w[p_type] = transect.w_vel.w_mps[np.logical_and(transect.w_vel.ping_type == p_type, - transect.w_vel.cells_above_sl)] + wt_d[p_type] = transect.w_vel.d_mps[ + np.logical_and( + transect.w_vel.ping_type == p_type, + transect.w_vel.cells_above_sl, + ) + ] + wt_w[p_type] = transect.w_vel.w_mps[ + np.logical_and( + transect.w_vel.ping_type == p_type, + transect.w_vel.cells_above_sl, + ) + ] else: - p_types = np.array(['U']) + p_types = np.array(["U"]) for p_type in p_types: if p_type in wt_d: - wt_d[p_type] = np.hstack((wt_d[p_type], - transect.w_vel.d_mps[transect.w_vel.cells_above_sl])) - wt_w[p_type] = np.hstack((wt_d[p_type], - transect.w_vel.w_mps[transect.w_vel.cells_above_sl])) + wt_d[p_type] = np.hstack( + ( + wt_d[p_type], + transect.w_vel.d_mps[transect.w_vel.cells_above_sl], + ) + ) + wt_w[p_type] = np.hstack( + ( + wt_d[p_type], + transect.w_vel.w_mps[transect.w_vel.cells_above_sl], + ) + ) else: - wt_d[p_type] = transect.w_vel.d_mps[transect.w_vel.cells_above_sl] - wt_w[p_type] = transect.w_vel.w_mps[transect.w_vel.cells_above_sl] + wt_d[p_type] = transect.w_vel.d_mps[ + transect.w_vel.cells_above_sl + ] + wt_w[p_type] = transect.w_vel.w_mps[ + transect.w_vel.cells_above_sl + ] # Compute thresholds based on composite arrays @@ -882,8 +1086,12 @@ class Measurement(object): wt_d_meas_thresholds = {} wt_w_meas_thresholds = {} for p_type in wt_d.keys(): - wt_d_meas_thresholds[p_type] = WaterData.meas_iqr_filter(wt_d[p_type], multiplier=5) - wt_w_meas_thresholds[p_type] = WaterData.meas_iqr_filter(wt_w[p_type], multiplier=5) + wt_d_meas_thresholds[p_type] = WaterData.meas_iqr_filter( + wt_d[p_type], multiplier=5 + ) + wt_w_meas_thresholds[p_type] = WaterData.meas_iqr_filter( + wt_w[p_type], multiplier=5 + ) # Bottom track bt_d_meas_thresholds = {} @@ -910,23 +1118,23 @@ class Measurement(object): @staticmethod def set_num_beam_wt_threshold_trdi(mmt_transect): """Get number of beams to use in processing for WT from mmt file - + Parameters ---------- mmt_transect: MMT_Transect Object of MMT_Transect - + Returns ------- num_3_beam_wt_Out: int """ - use_3_beam_wt = mmt_transect.active_config['Proc_Use_3_Beam_WT'] + use_3_beam_wt = mmt_transect.active_config["Proc_Use_3_Beam_WT"] if use_3_beam_wt == 0: num_beam_wt_out = 4 else: num_beam_wt_out = 3 - + return num_beam_wt_out @staticmethod @@ -943,7 +1151,7 @@ class Measurement(object): num_3_beam_WT_Out: int """ - use_3_beam_bt = mmt_transect.active_config['Proc_Use_3_Beam_BT'] + use_3_beam_bt = mmt_transect.active_config["Proc_Use_3_Beam_BT"] if use_3_beam_bt == 0: num_beam_bt_out = 4 else: @@ -954,86 +1162,100 @@ class Measurement(object): @staticmethod def set_depth_weighting_trdi(mmt_transect): """Get the average depth method from mmt - + Parameters ---------- mmt_transect: MMT_Transect Object of MMT_Transect - + Returns ------- depth_weighting_setting: str Method to compute mean depth """ - depth_weighting = mmt_transect.active_config['Proc_Use_Weighted_Mean_Depth'] - + depth_weighting = mmt_transect.active_config["Proc_Use_Weighted_Mean_Depth"] + if depth_weighting == 0: - depth_weighting_setting = 'Simple' + depth_weighting_setting = "Simple" else: - depth_weighting_setting = 'IDW' + depth_weighting_setting = "IDW" return depth_weighting_setting @staticmethod def set_depth_screening_trdi(mmt_transect): """Get the depth screening setting from mmt - + Parameters ---------- mmt_transect: MMT_Transect Object of MMT_Transect - + Returns ------- depth_screening_setting: str Type of depth screening to use """ - depth_screen = mmt_transect.active_config['Proc_Screen_Depth'] + depth_screen = mmt_transect.active_config["Proc_Screen_Depth"] if depth_screen == 0: - depth_screening_setting = 'None' + depth_screening_setting = "None" else: - depth_screening_setting = 'TRDI' - + depth_screening_setting = "TRDI" + return depth_screening_setting - - def change_sos(self, transect_idx=None, parameter=None, salinity=None, temperature=None, selected=None, speed=None): + + def change_sos( + self, + transect_idx=None, + parameter=None, + salinity=None, + temperature=None, + selected=None, + speed=None, + ): """Applies a change in speed of sound to one or all transects and update the discharge and uncertainty computations - + Parameters ---------- transect_idx: int Index of transect to change parameter: str - Speed of sound parameter to be changed ('temperatureSrc', 'temperature', 'salinity', 'sosSrc') + Speed of sound parameter to be changed ('temperatureSrc', + 'temperature', 'salinity', 'sosSrc') salinity: float Salinity in ppt temperature: float Temperature in deg C selected: str - Selected speed of sound ('internal', 'computed', 'user') or temperature ('internal', 'user') + Selected speed of sound ('internal', 'computed', 'user') or + temperature ('internal', 'user') speed: float Manually supplied speed of sound for 'user' source """ - + s = self.current_settings() if transect_idx is None: # Apply to all transects for transect in self.transects: - transect.change_sos(parameter=parameter, - salinity=salinity, - temperature=temperature, - selected=selected, - speed=speed) + transect.change_sos( + parameter=parameter, + salinity=salinity, + temperature=temperature, + selected=selected, + speed=speed, + ) else: # Apply to a single transect - self.transects[transect_idx].change_sos(parameter=parameter, - salinity=salinity, - temperature=temperature, - selected=selected, - speed=speed) + self.transects[transect_idx].change_sos( + parameter=parameter, + salinity=salinity, + temperature=temperature, + selected=selected, + speed=speed, + ) # Reapply settings to newly adjusted data self.apply_settings(s) @@ -1045,7 +1267,8 @@ class Measurement(object): magvar: float Magnetic variation transect_idx: int - Index of transect to which the change is applied. None is all transects. + Index of transect to which the change is applied. None is all + transects. """ # Get current settings @@ -1058,7 +1281,7 @@ class Measurement(object): # If the internal compass is used the recompute is necessary while n < n_transects and recompute is False: - if self.transects[n].sensors.heading_deg.selected == 'internal': + if self.transects[n].sensors.heading_deg.selected == "internal": recompute = True n += 1 @@ -1092,7 +1315,8 @@ class Measurement(object): h_offset: float Heading offset transect_idx: int - Index of transect to which the change is applied. None is all transects. + Index of transect to which the change is applied. None is all + transects. """ # Get current settings @@ -1105,7 +1329,7 @@ class Measurement(object): # If external compass is used then a recompute is necessary while n < n_transects and recompute is False: - if self.transects[n].sensors.heading_deg.selected == 'external': + if self.transects[n].sensors.heading_deg.selected == "external": recompute = True n += 1 @@ -1117,7 +1341,9 @@ class Measurement(object): # Apply change to moving-bed tests if len(self.mb_tests) > 0: for test in self.mb_tests: - old_h_offset = test.transect.sensors.heading_deg.external.align_correction_deg + old_h_offset = ( + test.transect.sensors.heading_deg.external.align_correction_deg + ) test.transect.change_offset(h_offset) test.h_offset_change(h_offset, old_h_offset) else: @@ -1138,7 +1364,8 @@ class Measurement(object): h_source: str Heading source (internal or external) transect_idx: int - Index of transect to which the change is applied. None is all transects. + Index of transect to which the change is applied. None is all + transects. """ # Get current settings @@ -1155,16 +1382,17 @@ class Measurement(object): test.transect.change_heading_source(h_source) test.process_mb_test(source=test.transect.adcp.manufacturer) settings = self.current_settings() - select = settings['NavRef'] + select = settings["NavRef"] ref = None - if select == 'bt_vel': - ref = 'BT' - elif select == 'gga_vel': - ref = 'GGA' - elif select == 'vtg_vel': - ref = 'VTG' + if select == "bt_vel": + ref = "BT" + elif select == "gga_vel": + ref = "GGA" + elif select == "vtg_vel": + ref = "VTG" self.mb_tests = MovingBedTests.auto_use_2_correct( - moving_bed_tests=self.mb_tests, boat_ref=ref) + moving_bed_tests=self.mb_tests, boat_ref=ref + ) else: self.transects[transect_idx].change_heading_source(h_source) @@ -1179,7 +1407,8 @@ class Measurement(object): draft: float Draft of ADCP in m transect_idx: int - Index of transect to which the change is applied. None is all transects. + Index of transect to which the change is applied. None is all + transects. """ # Get current settings @@ -1196,7 +1425,8 @@ class Measurement(object): @staticmethod def h_external_valid(meas): - """Determine if valid external heading data is included in the measurement. + """Determine if valid external heading data is included in the + measurement. Parameters ---------- @@ -1211,25 +1441,33 @@ class Measurement(object): break return external - # @profile def apply_settings(self, settings, force_abba=True): """Applies reference, filter, and interpolation settings. - + Parameters ---------- settings: dict Dictionary of reference, filter, and interpolation settings force_abba: bool - Allows the above, below, before, after interpolation to be applied even when the data use another approach. + Allows the above, below, before, after interpolation to be + applied even when the data use another approach. """ - self.use_ping_type = settings['UsePingType'] + self.use_ping_type = settings["UsePingType"] - # If SonTek data does not have ping type identified, determine ping types - if self.transects[0].w_vel.ping_type.size == 1 and self.transects[0].adcp.manufacturer == 'SonTek': + # If SonTek data does not have ping type identified, determine ping + # types + if ( + self.transects[0].w_vel.ping_type.size == 1 + and self.transects[0].adcp.manufacturer == "SonTek" + ): for transect in self.transects: - ping_type = TransectData.sontek_ping_type(transect.w_vel.corr, transect.w_vel.frequency) - transect.w_vel.ping_type = np.tile(np.array([ping_type]), (transect.w_vel.corr.shape[1], 1)) + ping_type = TransectData.sontek_ping_type( + transect.w_vel.corr, transect.w_vel.frequency + ) + transect.w_vel.ping_type = np.tile( + np.array([ping_type]), (transect.w_vel.corr.shape[1], 1) + ) # If the measurement thresholds have not been computed, compute them if not self.transects[0].w_vel.d_meas_thresholds: @@ -1242,220 +1480,271 @@ class Measurement(object): # Apply settings to discharge transects for transect in self.transects: - if not settings['UsePingType']: - transect.w_vel.ping_type = np.tile('U', transect.w_vel.ping_type.shape) - transect.boat_vel.bt_vel.frequency_khz = np.tile(0, transect.boat_vel.bt_vel.frequency_khz.shape) + if not settings["UsePingType"]: + transect.w_vel.ping_type = np.tile("U", transect.w_vel.ping_type.shape) + transect.boat_vel.bt_vel.frequency_khz = np.tile( + 0, transect.boat_vel.bt_vel.frequency_khz.shape + ) # Moving-boat ensembles - if 'Processing' in settings.keys(): - transect.change_q_ensembles(proc_method=settings['Processing']) - self.processing = settings['Processing'] + if "Processing" in settings.keys(): + transect.change_q_ensembles(proc_method=settings["Processing"]) + self.processing = settings["Processing"] # Navigation reference - if transect.boat_vel.selected != settings['NavRef']: - transect.change_nav_reference(update=False, new_nav_ref=settings['NavRef']) + if transect.boat_vel.selected != settings["NavRef"]: + transect.change_nav_reference( + update=False, new_nav_ref=settings["NavRef"] + ) if len(self.mb_tests) > 0: self.mb_tests = MovingBedTests.auto_use_2_correct( - moving_bed_tests=self.mb_tests, - boat_ref=settings['NavRef']) + moving_bed_tests=self.mb_tests, boat_ref=settings["NavRef"] + ) # Changing the nav reference applies the current setting for # Composite tracks, check to see if a change is needed - if transect.boat_vel.composite != settings['CompTracks']: - transect.composite_tracks(update=False, setting=settings['CompTracks']) + if transect.boat_vel.composite != settings["CompTracks"]: + transect.composite_tracks(update=False, setting=settings["CompTracks"]) # Set difference velocity BT filter bt_kwargs = {} - if settings['BTdFilter'] == 'Manual': - bt_kwargs['difference'] = settings['BTdFilter'] - bt_kwargs['difference_threshold'] = settings['BTdFilterThreshold'] + if settings["BTdFilter"] == "Manual": + bt_kwargs["difference"] = settings["BTdFilter"] + bt_kwargs["difference_threshold"] = settings["BTdFilterThreshold"] else: - bt_kwargs['difference'] = settings['BTdFilter'] + bt_kwargs["difference"] = settings["BTdFilter"] # Set vertical velocity BT filter - if settings['BTwFilter'] == 'Manual': - bt_kwargs['vertical'] = settings['BTwFilter'] - bt_kwargs['vertical_threshold'] = settings['BTwFilterThreshold'] + if settings["BTwFilter"] == "Manual": + bt_kwargs["vertical"] = settings["BTwFilter"] + bt_kwargs["vertical_threshold"] = settings["BTwFilterThreshold"] else: - bt_kwargs['vertical'] = settings['BTwFilter'] + bt_kwargs["vertical"] = settings["BTwFilter"] - # Apply beam filter - bt_kwargs['beam'] = settings['BTbeamFilter'] + # Apply beam filter + bt_kwargs["beam"] = settings["BTbeamFilter"] - # Apply smooth filter - bt_kwargs['other'] = settings['BTsmoothFilter'] + # Apply smooth filter + bt_kwargs["other"] = settings["BTsmoothFilter"] - transect.boat_vel.bt_vel.use_measurement_thresholds = settings['UseMeasurementThresholds'] + transect.boat_vel.bt_vel.use_measurement_thresholds = settings[ + "UseMeasurementThresholds" + ] # Apply BT settings transect.boat_filters(update=False, **bt_kwargs) # BT Interpolation - transect.boat_interpolations(update=False, - target='BT', - method=settings['BTInterpolation']) + transect.boat_interpolations( + update=False, target="BT", method=settings["BTInterpolation"] + ) # GPS filter settings if transect.gps is not None: gga_kwargs = {} if transect.boat_vel.gga_vel is not None: # GGA - gga_kwargs['differential'] = settings['ggaDiffQualFilter'] - if settings['ggaAltitudeFilter'] == 'Manual': - gga_kwargs['altitude'] = settings['ggaAltitudeFilter'] - gga_kwargs['altitude_threshold'] = settings['ggaAltitudeFilterChange'] + gga_kwargs["differential"] = settings["ggaDiffQualFilter"] + if settings["ggaAltitudeFilter"] == "Manual": + gga_kwargs["altitude"] = settings["ggaAltitudeFilter"] + gga_kwargs["altitude_threshold"] = settings[ + "ggaAltitudeFilterChange" + ] else: - gga_kwargs['altitude'] = settings['ggaAltitudeFilter'] + gga_kwargs["altitude"] = settings["ggaAltitudeFilter"] # Set GGA HDOP Filter - if settings['GPSHDOPFilter'] == 'Manual': - gga_kwargs['hdop'] = settings['GPSHDOPFilter'] - gga_kwargs['hdop_max_threshold'] = settings['GPSHDOPFilterMax'] - gga_kwargs['hdop_change_threshold'] = settings['GPSHDOPFilterChange'] + if settings["GPSHDOPFilter"] == "Manual": + gga_kwargs["hdop"] = settings["GPSHDOPFilter"] + gga_kwargs["hdop_max_threshold"] = settings["GPSHDOPFilterMax"] + gga_kwargs["hdop_change_threshold"] = settings[ + "GPSHDOPFilterChange" + ] else: - gga_kwargs['hdop'] = settings['GPSHDOPFilter'] + gga_kwargs["hdop"] = settings["GPSHDOPFilter"] - gga_kwargs['other'] = settings['GPSSmoothFilter'] + gga_kwargs["other"] = settings["GPSSmoothFilter"] # Apply GGA filters transect.gps_filters(update=False, **gga_kwargs) if transect.boat_vel.vtg_vel is not None: vtg_kwargs = {} - if settings['GPSHDOPFilter'] == 'Manual': - vtg_kwargs['hdop'] = settings['GPSHDOPFilter'] - vtg_kwargs['hdop_max_threshold'] = settings['GPSHDOPFilterMax'] - vtg_kwargs['hdop_change_threshold'] = settings['GPSHDOPFilterChange'] - vtg_kwargs['other'] = settings['GPSSmoothFilter'] + if settings["GPSHDOPFilter"] == "Manual": + vtg_kwargs["hdop"] = settings["GPSHDOPFilter"] + vtg_kwargs["hdop_max_threshold"] = settings["GPSHDOPFilterMax"] + vtg_kwargs["hdop_change_threshold"] = settings[ + "GPSHDOPFilterChange" + ] + vtg_kwargs["other"] = settings["GPSSmoothFilter"] else: - vtg_kwargs['hdop'] = settings['GPSHDOPFilter'] - vtg_kwargs['other'] = settings['GPSSmoothFilter'] + vtg_kwargs["hdop"] = settings["GPSHDOPFilter"] + vtg_kwargs["other"] = settings["GPSSmoothFilter"] # Apply VTG filters transect.gps_filters(update=False, **vtg_kwargs) - transect.boat_interpolations(update=False, - target='GPS', - method=settings['GPSInterpolation']) + transect.boat_interpolations( + update=False, target="GPS", method=settings["GPSInterpolation"] + ) # Set depth reference - transect.set_depth_reference(update=False, setting=settings['depthReference']) - - transect.process_depths(update=True, - filter_method=settings['depthFilterType'], - interpolation_method=settings['depthInterpolation'], - composite_setting=settings['depthComposite'], - avg_method=settings['depthAvgMethod'], - valid_method=settings['depthValidMethod']) + transect.set_depth_reference( + update=False, setting=settings["depthReference"] + ) + + transect.process_depths( + update=True, + filter_method=settings["depthFilterType"], + interpolation_method=settings["depthInterpolation"], + composite_setting=settings["depthComposite"], + avg_method=settings["depthAvgMethod"], + valid_method=settings["depthValidMethod"], + ) # Set WT difference velocity filter wt_kwargs = {} - if settings['WTdFilter'] == 'Manual': - wt_kwargs['difference'] = settings['WTdFilter'] - wt_kwargs['difference_threshold'] = settings['WTdFilterThreshold'] + if settings["WTdFilter"] == "Manual": + wt_kwargs["difference"] = settings["WTdFilter"] + wt_kwargs["difference_threshold"] = settings["WTdFilterThreshold"] else: - wt_kwargs['difference'] = settings['WTdFilter'] + wt_kwargs["difference"] = settings["WTdFilter"] # Set WT vertical velocity filter - if settings['WTwFilter'] == 'Manual': - wt_kwargs['vertical'] = settings['WTwFilter'] - wt_kwargs['vertical_threshold'] = settings['WTwFilterThreshold'] + if settings["WTwFilter"] == "Manual": + wt_kwargs["vertical"] = settings["WTwFilter"] + wt_kwargs["vertical_threshold"] = settings["WTwFilterThreshold"] else: - wt_kwargs['vertical'] = settings['WTwFilter'] + wt_kwargs["vertical"] = settings["WTwFilter"] - wt_kwargs['beam'] = settings['WTbeamFilter'] - wt_kwargs['other'] = settings['WTsmoothFilter'] - wt_kwargs['snr'] = settings['WTsnrFilter'] - wt_kwargs['wt_depth'] = settings['WTwtDepthFilter'] - wt_kwargs['excluded'] = settings['WTExcludedDistance'] + wt_kwargs["beam"] = settings["WTbeamFilter"] + wt_kwargs["other"] = settings["WTsmoothFilter"] + wt_kwargs["snr"] = settings["WTsnrFilter"] + wt_kwargs["wt_depth"] = settings["WTwtDepthFilter"] + wt_kwargs["excluded"] = settings["WTExcludedDistance"] - # Data loaded from old QRev.mat files will be set to use this new interpolation method. When reprocessing + # Data loaded from old QRev.mat files will be set to use this + # new interpolation method. When reprocessing # any data the interpolation method should be 'abba' if force_abba: - transect.w_vel.interpolate_cells = 'abba' - transect.w_vel.interpolate_ens = 'abba' - settings['WTEnsInterpolation'] = 'abba' - settings['WTCellInterpolation'] = 'abba' - - transect.w_vel.use_measurement_thresholds = settings['UseMeasurementThresholds'] - if transect.w_vel.ping_type.size == 0 and transect.adcp.manufacturer == 'SonTek': + transect.w_vel.interpolate_cells = "abba" + transect.w_vel.interpolate_ens = "abba" + settings["WTEnsInterpolation"] = "abba" + settings["WTCellInterpolation"] = "abba" + + transect.w_vel.use_measurement_thresholds = settings[ + "UseMeasurementThresholds" + ] + if ( + transect.w_vel.ping_type.size == 0 + and transect.adcp.manufacturer == "SonTek" + ): # Correlation and frequency can be used to determine ping type - transect.w_vel.ping_type = TransectData.sontek_ping_type(corr=transect.w_vel.corr, - freq=transect.w_vel.frequency) + transect.w_vel.ping_type = TransectData.sontek_ping_type( + corr=transect.w_vel.corr, freq=transect.w_vel.frequency + ) transect.w_vel.apply_filter(transect=transect, **wt_kwargs) # Edge methods - transect.edges.rec_edge_method = settings['edgeRecEdgeMethod'] - transect.edges.vel_method = settings['edgeVelMethod'] + transect.edges.rec_edge_method = settings["edgeRecEdgeMethod"] + transect.edges.vel_method = settings["edgeVelMethod"] - if settings['UseWeighted'] and not self.use_weighted: + if settings["UseWeighted"] and not self.use_weighted: if self.extrap_fit.norm_data[-1].weights is None: - # Compute normalized data for each transect to obtain the weights - self.extrap_fit.process_profiles(self.transects, self.extrap_fit.norm_data[-1].data_type, - use_weighted=settings['UseWeighted']) + # Compute normalized data for each transect to obtain the + # weights + self.extrap_fit.process_profiles( + self.transects, + self.extrap_fit.norm_data[-1].data_type, + use_weighted=settings["UseWeighted"], + ) - self.use_weighted = settings['UseWeighted'] + self.use_weighted = settings["UseWeighted"] if len(self.checked_transect_idx) > 0: ref_transect = self.checked_transect_idx[0] else: ref_transect = 0 - if self.transects[ref_transect].w_vel.interpolate_cells == 'TRDI': + if self.transects[ref_transect].w_vel.interpolate_cells == "TRDI": if self.extrap_fit is None: self.extrap_fit = ComputeExtrap() - self.extrap_fit.populate_data(transects=self.transects, compute_sensitivity=False, - use_weighted=settings['UseWeighted']) - self.change_extrapolation(self.extrap_fit.fit_method, compute_q=False, - use_weighted=settings['UseWeighted']) - elif self.extrap_fit.fit_method == 'Automatic': - self.change_extrapolation(self.extrap_fit.fit_method, compute_q=False, - use_weighted=settings['UseWeighted']) + self.extrap_fit.populate_data( + transects=self.transects, + compute_sensitivity=False, + use_weighted=settings["UseWeighted"], + ) + self.change_extrapolation( + self.extrap_fit.fit_method, + compute_q=False, + use_weighted=settings["UseWeighted"], + ) + elif self.extrap_fit.fit_method == "Automatic": + self.change_extrapolation( + self.extrap_fit.fit_method, + compute_q=False, + use_weighted=settings["UseWeighted"], + ) else: - if 'extrapTop' not in settings.keys(): - settings['extrapTop'] = self.extrap_fit.sel_fit[-1].top_method - settings['extrapBot'] = self.extrap_fit.sel_fit[-1].bot_method - settings['extrapExp'] = self.extrap_fit.sel_fit[-1].exponent - - self.change_extrapolation(self.extrap_fit.fit_method, - top=settings['extrapTop'], - bot=settings['extrapBot'], - exp=settings['extrapExp'], - compute_q=False, - use_weighted=settings['UseWeighted']) + if "extrapTop" not in settings.keys(): + settings["extrapTop"] = self.extrap_fit.sel_fit[-1].top_method + settings["extrapBot"] = self.extrap_fit.sel_fit[-1].bot_method + settings["extrapExp"] = self.extrap_fit.sel_fit[-1].exponent + + self.change_extrapolation( + self.extrap_fit.fit_method, + top=settings["extrapTop"], + bot=settings["extrapBot"], + exp=settings["extrapExp"], + compute_q=False, + use_weighted=settings["UseWeighted"], + ) for transect in self.transects: - # Water track interpolations - transect.w_vel.apply_interpolation(transect=transect, - ens_interp=settings['WTEnsInterpolation'], - cells_interp=settings['WTCellInterpolation']) + transect.w_vel.apply_interpolation( + transect=transect, + ens_interp=settings["WTEnsInterpolation"], + cells_interp=settings["WTCellInterpolation"], + ) if self.extrap_fit is None: self.extrap_fit = ComputeExtrap() - self.extrap_fit.populate_data(transects=self.transects, compute_sensitivity=False, - use_weighted=settings['UseWeighted']) - self.change_extrapolation(self.extrap_fit.fit_method, compute_q=False, - use_weighted=settings['UseWeighted']) - elif self.extrap_fit.fit_method == 'Automatic': - self.change_extrapolation(self.extrap_fit.fit_method, compute_q=False, - use_weighted=settings['UseWeighted']) + self.extrap_fit.populate_data( + transects=self.transects, + compute_sensitivity=False, + use_weighted=settings["UseWeighted"], + ) + self.change_extrapolation( + self.extrap_fit.fit_method, + compute_q=False, + use_weighted=settings["UseWeighted"], + ) + elif self.extrap_fit.fit_method == "Automatic": + self.change_extrapolation( + self.extrap_fit.fit_method, + compute_q=False, + use_weighted=settings["UseWeighted"], + ) else: - if 'extrapTop' not in settings.keys(): - settings['extrapTop'] = self.extrap_fit.sel_fit[-1].top_method - settings['extrapBot'] = self.extrap_fit.sel_fit[-1].bot_method - settings['extrapExp'] = self.extrap_fit.sel_fit[-1].exponent - - self.change_extrapolation(self.extrap_fit.fit_method, - top=settings['extrapTop'], - bot=settings['extrapBot'], - exp=settings['extrapExp'], - compute_q=False, - use_weighted=settings['UseWeighted']) + if "extrapTop" not in settings.keys(): + settings["extrapTop"] = self.extrap_fit.sel_fit[-1].top_method + settings["extrapBot"] = self.extrap_fit.sel_fit[-1].bot_method + settings["extrapExp"] = self.extrap_fit.sel_fit[-1].exponent + + self.change_extrapolation( + self.extrap_fit.fit_method, + top=settings["extrapTop"], + bot=settings["extrapBot"], + exp=settings["extrapExp"], + compute_q=False, + use_weighted=settings["UseWeighted"], + ) self.extrap_fit.q_sensitivity = ExtrapQSensitivity() - self.extrap_fit.q_sensitivity.populate_data(transects=self.transects, - extrap_fits=self.extrap_fit.sel_fit) + self.extrap_fit.q_sensitivity.populate_data( + transects=self.transects, extrap_fits=self.extrap_fit.sel_fit + ) self.compute_discharge() @@ -1469,155 +1758,187 @@ class Measurement(object): settings: dict Dictionary of reference, filter, and interpolation settings force_abba: bool - Allows the above, below, before, after interpolation to be applied even when the data use another approach. + Allows the above, below, before, after interpolation to be applied + even when the data use another approach. """ - self.use_ping_type = settings['UsePingType'] - # If SonTek data does not have ping type identified, determine ping types - if self.mb_tests[0].transect.w_vel.ping_type.size == 1 and self.transects[0].adcp.manufacturer == 'SonTek': + self.use_ping_type = settings["UsePingType"] + # If SonTek data does not have ping type identified, determine ping + # types + if ( + self.mb_tests[0].transect.w_vel.ping_type.size == 1 + and self.transects[0].adcp.manufacturer == "SonTek" + ): for test in self.mb_tests: transect = test.transect - ping_type = TransectData.sontek_ping_type(transect.w_vel.corr, transect.w_vel.frequency) - transect.w_vel.ping_type = np.tile(np.array([ping_type]), (transect.w_vel.corr.shape[1], 1)) + ping_type = TransectData.sontek_ping_type( + transect.w_vel.corr, transect.w_vel.frequency + ) + transect.w_vel.ping_type = np.tile( + np.array([ping_type]), (transect.w_vel.corr.shape[1], 1) + ) for test in self.mb_tests: transect = test.transect - if not settings['UsePingType']: - transect.w_vel.ping_type = np.tile('U', transect.w_vel.ping_type.shape) - transect.boat_vel.bt_vel.frequency_khz = np.tile(0, transect.boat_vel.bt_vel.frequency_khz.shape) + if not settings["UsePingType"]: + transect.w_vel.ping_type = np.tile("U", transect.w_vel.ping_type.shape) + transect.boat_vel.bt_vel.frequency_khz = np.tile( + 0, transect.boat_vel.bt_vel.frequency_khz.shape + ) # Moving-boat ensembles - if 'Processing' in settings.keys(): - transect.change_q_ensembles(proc_method=settings['Processing']) - self.processing = settings['Processing'] + if "Processing" in settings.keys(): + transect.change_q_ensembles(proc_method=settings["Processing"]) + self.processing = settings["Processing"] # Set difference velocity BT filter bt_kwargs = {} - if settings['BTdFilter'] == 'Manual': - bt_kwargs['difference'] = settings['BTdFilter'] - bt_kwargs['difference_threshold'] = settings['BTdFilterThreshold'] + if settings["BTdFilter"] == "Manual": + bt_kwargs["difference"] = settings["BTdFilter"] + bt_kwargs["difference_threshold"] = settings["BTdFilterThreshold"] else: - bt_kwargs['difference'] = settings['BTdFilter'] + bt_kwargs["difference"] = settings["BTdFilter"] # Set vertical velocity BT filter - if settings['BTwFilter'] == 'Manual': - bt_kwargs['vertical'] = settings['BTwFilter'] - bt_kwargs['vertical_threshold'] = settings['BTwFilterThreshold'] + if settings["BTwFilter"] == "Manual": + bt_kwargs["vertical"] = settings["BTwFilter"] + bt_kwargs["vertical_threshold"] = settings["BTwFilterThreshold"] else: - bt_kwargs['vertical'] = settings['BTwFilter'] + bt_kwargs["vertical"] = settings["BTwFilter"] # Apply beam filter - bt_kwargs['beam'] = settings['BTbeamFilter'] + bt_kwargs["beam"] = settings["BTbeamFilter"] # Apply smooth filter - bt_kwargs['other'] = settings['BTsmoothFilter'] + bt_kwargs["other"] = settings["BTsmoothFilter"] - transect.boat_vel.bt_vel.use_measurement_thresholds = settings['UseMeasurementThresholds'] + transect.boat_vel.bt_vel.use_measurement_thresholds = settings[ + "UseMeasurementThresholds" + ] # Apply BT settings transect.boat_filters(update=False, **bt_kwargs) # Don't interpolate for stationary tests - if test.type == 'Loop': + if test.type == "Loop": # BT Interpolation - transect.boat_interpolations(update=False, - target='BT', - method=settings['BTInterpolation']) + transect.boat_interpolations( + update=False, target="BT", method=settings["BTInterpolation"] + ) # GPS filter settings if transect.gps is not None: gga_kwargs = {} if transect.boat_vel.gga_vel is not None: # GGA - gga_kwargs['differential'] = settings['ggaDiffQualFilter'] - if settings['ggaAltitudeFilter'] == 'Manual': - gga_kwargs['altitude'] = settings['ggaAltitudeFilter'] - gga_kwargs['altitude_threshold'] = settings['ggaAltitudeFilterChange'] + gga_kwargs["differential"] = settings["ggaDiffQualFilter"] + if settings["ggaAltitudeFilter"] == "Manual": + gga_kwargs["altitude"] = settings["ggaAltitudeFilter"] + gga_kwargs["altitude_threshold"] = settings[ + "ggaAltitudeFilterChange" + ] else: - gga_kwargs['altitude'] = settings['ggaAltitudeFilter'] + gga_kwargs["altitude"] = settings["ggaAltitudeFilter"] # Set GGA HDOP Filter - if settings['GPSHDOPFilter'] == 'Manual': - gga_kwargs['hdop'] = settings['GPSHDOPFilter'] - gga_kwargs['hdop_max_threshold'] = settings['GPSHDOPFilterMax'] - gga_kwargs['hdop_change_threshold'] = settings['GPSHDOPFilterChange'] + if settings["GPSHDOPFilter"] == "Manual": + gga_kwargs["hdop"] = settings["GPSHDOPFilter"] + gga_kwargs["hdop_max_threshold"] = settings["GPSHDOPFilterMax"] + gga_kwargs["hdop_change_threshold"] = settings[ + "GPSHDOPFilterChange" + ] else: - gga_kwargs['hdop'] = settings['GPSHDOPFilter'] + gga_kwargs["hdop"] = settings["GPSHDOPFilter"] - gga_kwargs['other'] = settings['GPSSmoothFilter'] + gga_kwargs["other"] = settings["GPSSmoothFilter"] # Apply GGA filters transect.gps_filters(update=False, **gga_kwargs) if transect.boat_vel.vtg_vel is not None: vtg_kwargs = {} - if settings['GPSHDOPFilter'] == 'Manual': - vtg_kwargs['hdop'] = settings['GPSHDOPFilter'] - vtg_kwargs['hdop_max_threshold'] = settings['GPSHDOPFilterMax'] - vtg_kwargs['hdop_change_threshold'] = settings['GPSHDOPFilterChange'] - vtg_kwargs['other'] = settings['GPSSmoothFilter'] + if settings["GPSHDOPFilter"] == "Manual": + vtg_kwargs["hdop"] = settings["GPSHDOPFilter"] + vtg_kwargs["hdop_max_threshold"] = settings["GPSHDOPFilterMax"] + vtg_kwargs["hdop_change_threshold"] = settings[ + "GPSHDOPFilterChange" + ] + vtg_kwargs["other"] = settings["GPSSmoothFilter"] else: - vtg_kwargs['hdop'] = settings['GPSHDOPFilter'] - vtg_kwargs['other'] = settings['GPSSmoothFilter'] + vtg_kwargs["hdop"] = settings["GPSHDOPFilter"] + vtg_kwargs["other"] = settings["GPSSmoothFilter"] # Apply VTG filters transect.gps_filters(update=False, **vtg_kwargs) # Don't interpolate for stationary tests - if test.type == 'Loop': - transect.boat_interpolations(update=False, - target='GPS', - method=settings['GPSInterpolation']) + if test.type == "Loop": + transect.boat_interpolations( + update=False, target="GPS", method=settings["GPSInterpolation"] + ) # Set depth reference - transect.set_depth_reference(update=False, setting=settings['depthReference']) - transect.process_depths(update=False, - filter_method=settings['depthFilterType'], - interpolation_method=settings['depthInterpolation'], - composite_setting=settings['depthComposite'], - avg_method=settings['depthAvgMethod'], - valid_method=settings['depthValidMethod']) + transect.set_depth_reference( + update=False, setting=settings["depthReference"] + ) + transect.process_depths( + update=False, + filter_method=settings["depthFilterType"], + interpolation_method=settings["depthInterpolation"], + composite_setting=settings["depthComposite"], + avg_method=settings["depthAvgMethod"], + valid_method=settings["depthValidMethod"], + ) # Set WT difference velocity filter wt_kwargs = {} - if settings['WTdFilter'] == 'Manual': - wt_kwargs['difference'] = settings['WTdFilter'] - wt_kwargs['difference_threshold'] = settings['WTdFilterThreshold'] + if settings["WTdFilter"] == "Manual": + wt_kwargs["difference"] = settings["WTdFilter"] + wt_kwargs["difference_threshold"] = settings["WTdFilterThreshold"] else: - wt_kwargs['difference'] = settings['WTdFilter'] + wt_kwargs["difference"] = settings["WTdFilter"] # Set WT vertical velocity filter - if settings['WTwFilter'] == 'Manual': - wt_kwargs['vertical'] = settings['WTwFilter'] - wt_kwargs['vertical_threshold'] = settings['WTwFilterThreshold'] + if settings["WTwFilter"] == "Manual": + wt_kwargs["vertical"] = settings["WTwFilter"] + wt_kwargs["vertical_threshold"] = settings["WTwFilterThreshold"] else: - wt_kwargs['vertical'] = settings['WTwFilter'] + wt_kwargs["vertical"] = settings["WTwFilter"] - wt_kwargs['beam'] = settings['WTbeamFilter'] - wt_kwargs['other'] = settings['WTsmoothFilter'] - wt_kwargs['snr'] = settings['WTsnrFilter'] - wt_kwargs['wt_depth'] = settings['WTwtDepthFilter'] - wt_kwargs['excluded'] = settings['WTExcludedDistance'] + wt_kwargs["beam"] = settings["WTbeamFilter"] + wt_kwargs["other"] = settings["WTsmoothFilter"] + wt_kwargs["snr"] = settings["WTsnrFilter"] + wt_kwargs["wt_depth"] = settings["WTwtDepthFilter"] + wt_kwargs["excluded"] = settings["WTExcludedDistance"] - # Data loaded from old QRev.mat files will be set to use this new interpolation method. When reprocessing - # any data the interpolation method should be 'abba' + # Data loaded from old QRev.mat files will be set to use this new + # interpolation method. When reprocessing any data the interpolation method + # should be 'abba' if force_abba: - transect.w_vel.interpolate_cells = 'abba' - transect.w_vel.interpolate_ens = 'abba' - settings['WTEnsInterpolation'] = 'abba' - settings['WTCellInterpolation'] = 'abba' - - transect.w_vel.use_measurement_thresholds = settings['UseMeasurementThresholds'] - if transect.w_vel.ping_type.size == 0 and transect.adcp.manufacturer == 'SonTek': + transect.w_vel.interpolate_cells = "abba" + transect.w_vel.interpolate_ens = "abba" + settings["WTEnsInterpolation"] = "abba" + settings["WTCellInterpolation"] = "abba" + + transect.w_vel.use_measurement_thresholds = settings[ + "UseMeasurementThresholds" + ] + if ( + transect.w_vel.ping_type.size == 0 + and transect.adcp.manufacturer == "SonTek" + ): # Correlation and frequency can be used to determine ping type - transect.w_vel.ping_type = TransectData.sontek_ping_type(corr=transect.w_vel.corr, - freq=transect.w_vel.frequency) + transect.w_vel.ping_type = TransectData.sontek_ping_type( + corr=transect.w_vel.corr, freq=transect.w_vel.frequency + ) transect.w_vel.apply_filter(transect=transect, **wt_kwargs) - transect.w_vel.apply_interpolation(transect=transect, - ens_interp=settings['WTEnsInterpolation'], - cells_interp=settings['WTCellInterpolation']) + transect.w_vel.apply_interpolation( + transect=transect, + ens_interp=settings["WTEnsInterpolation"], + cells_interp=settings["WTCellInterpolation"], + ) test.process_mb_test(source=self.transects[0].adcp.manufacturer) @@ -1634,38 +1955,36 @@ class Measurement(object): else: ref_transect = 0 transect = self.transects[ref_transect] - + # Navigation reference - settings['NavRef'] = transect.boat_vel.selected - + settings["NavRef"] = transect.boat_vel.selected + # Composite tracks - settings['CompTracks'] = transect.boat_vel.composite - + settings["CompTracks"] = transect.boat_vel.composite + # Water track settings - settings['WTbeamFilter'] = transect.w_vel.beam_filter - settings['WTdFilter'] = transect.w_vel.d_filter - settings['WTdFilterThreshold'] = transect.w_vel.d_filter_thresholds - settings['WTwFilter'] = transect.w_vel.w_filter - settings['WTwFilterThreshold'] = transect.w_vel.w_filter_thresholds - settings['WTsmoothFilter'] = transect.w_vel.smooth_filter - settings['WTsnrFilter'] = transect.w_vel.snr_filter - settings['WTwtDepthFilter'] = transect.w_vel.wt_depth_filter - settings['WTEnsInterpolation'] = transect.w_vel.interpolate_ens - settings['WTCellInterpolation'] = transect.w_vel.interpolate_cells - settings['WTExcludedDistance'] = transect.w_vel.excluded_dist_m - + settings["WTbeamFilter"] = transect.w_vel.beam_filter + settings["WTdFilter"] = transect.w_vel.d_filter + settings["WTdFilterThreshold"] = transect.w_vel.d_filter_thresholds + settings["WTwFilter"] = transect.w_vel.w_filter + settings["WTwFilterThreshold"] = transect.w_vel.w_filter_thresholds + settings["WTsmoothFilter"] = transect.w_vel.smooth_filter + settings["WTsnrFilter"] = transect.w_vel.snr_filter + settings["WTwtDepthFilter"] = transect.w_vel.wt_depth_filter + settings["WTEnsInterpolation"] = transect.w_vel.interpolate_ens + settings["WTCellInterpolation"] = transect.w_vel.interpolate_cells + settings["WTExcludedDistance"] = transect.w_vel.excluded_dist_m + # Bottom track settings - settings['BTbeamFilter'] = transect.boat_vel.bt_vel.beam_filter - settings['BTdFilter'] = transect.boat_vel.bt_vel.d_filter - settings['BTdFilterThreshold'] = transect.boat_vel.bt_vel.d_filter_thresholds - settings['BTwFilter'] = transect.boat_vel.bt_vel.w_filter - settings['BTwFilterThreshold'] = transect.boat_vel.bt_vel.w_filter_thresholds - settings['BTsmoothFilter'] = transect.boat_vel.bt_vel.smooth_filter - settings['BTInterpolation'] = transect.boat_vel.bt_vel.interpolate - - # Gps Settings - # if transect.gps is not None: + settings["BTbeamFilter"] = transect.boat_vel.bt_vel.beam_filter + settings["BTdFilter"] = transect.boat_vel.bt_vel.d_filter + settings["BTdFilterThreshold"] = transect.boat_vel.bt_vel.d_filter_thresholds + settings["BTwFilter"] = transect.boat_vel.bt_vel.w_filter + settings["BTwFilterThreshold"] = transect.boat_vel.bt_vel.w_filter_thresholds + settings["BTsmoothFilter"] = transect.boat_vel.bt_vel.smooth_filter + settings["BTInterpolation"] = transect.boat_vel.bt_vel.interpolate + # Gps Settings gga_present = False for idx in self.checked_transect_idx: if self.transects[idx].boat_vel.gga_vel is not None: @@ -1675,29 +1994,36 @@ class Measurement(object): # GGA settings if gga_present: - settings['ggaDiffQualFilter'] = transect.boat_vel.gga_vel.gps_diff_qual_filter - settings['ggaAltitudeFilter'] = transect.boat_vel.gga_vel.gps_altitude_filter - settings['ggaAltitudeFilterChange'] = \ - transect.boat_vel.gga_vel.gps_altitude_filter_change - settings['GPSHDOPFilter'] = transect.boat_vel.gga_vel.gps_HDOP_filter - settings['GPSHDOPFilterMax'] = transect.boat_vel.gga_vel.gps_HDOP_filter_max - settings['GPSHDOPFilterChange'] = transect.boat_vel.gga_vel.gps_HDOP_filter_change - settings['GPSSmoothFilter'] = transect.boat_vel.gga_vel.smooth_filter - settings['GPSInterpolation'] = transect.boat_vel.gga_vel.interpolate + settings[ + "ggaDiffQualFilter" + ] = transect.boat_vel.gga_vel.gps_diff_qual_filter + settings[ + "ggaAltitudeFilter" + ] = transect.boat_vel.gga_vel.gps_altitude_filter + settings[ + "ggaAltitudeFilterChange" + ] = transect.boat_vel.gga_vel.gps_altitude_filter_change + settings["GPSHDOPFilter"] = transect.boat_vel.gga_vel.gps_HDOP_filter + settings["GPSHDOPFilterMax"] = transect.boat_vel.gga_vel.gps_HDOP_filter_max + settings[ + "GPSHDOPFilterChange" + ] = transect.boat_vel.gga_vel.gps_HDOP_filter_change + settings["GPSSmoothFilter"] = transect.boat_vel.gga_vel.smooth_filter + settings["GPSInterpolation"] = transect.boat_vel.gga_vel.interpolate else: - settings['ggaDiffQualFilter'] = 1 - settings['ggaAltitudeFilter'] = 'Off' - settings['ggaAltitudeFilterChange'] = [] - - settings['ggaSmoothFilter'] = 'Off' - if 'GPSInterpolation' not in settings.keys(): - settings['GPSInterpolation'] = 'None' - if 'GPSHDOPFilter' not in settings.keys(): - settings['GPSHDOPFilter'] = 'Off' - settings['GPSHDOPFilterMax'] = [] - settings['GPSHDOPFilterChange'] = [] - if 'GPSSmoothFilter' not in settings.keys(): - settings['GPSSmoothFilter'] = 'Off' + settings["ggaDiffQualFilter"] = 1 + settings["ggaAltitudeFilter"] = "Off" + settings["ggaAltitudeFilterChange"] = [] + + settings["ggaSmoothFilter"] = "Off" + if "GPSInterpolation" not in settings.keys(): + settings["GPSInterpolation"] = "None" + if "GPSHDOPFilter" not in settings.keys(): + settings["GPSHDOPFilter"] = "Off" + settings["GPSHDOPFilterMax"] = [] + settings["GPSHDOPFilterChange"] = [] + if "GPSSmoothFilter" not in settings.keys(): + settings["GPSSmoothFilter"] = "Off" # VTG settings vtg_present = False @@ -1708,50 +2034,54 @@ class Measurement(object): break if vtg_present: - settings['GPSHDOPFilter'] = transect.boat_vel.vtg_vel.gps_HDOP_filter - settings['GPSHDOPFilterMax'] = transect.boat_vel.vtg_vel.gps_HDOP_filter_max - settings['GPSHDOPFilterChange'] = transect.boat_vel.vtg_vel.gps_HDOP_filter_change - settings['GPSSmoothFilter'] = transect.boat_vel.vtg_vel.smooth_filter - settings['GPSInterpolation'] = transect.boat_vel.vtg_vel.interpolate + settings["GPSHDOPFilter"] = transect.boat_vel.vtg_vel.gps_HDOP_filter + settings["GPSHDOPFilterMax"] = transect.boat_vel.vtg_vel.gps_HDOP_filter_max + settings[ + "GPSHDOPFilterChange" + ] = transect.boat_vel.vtg_vel.gps_HDOP_filter_change + settings["GPSSmoothFilter"] = transect.boat_vel.vtg_vel.smooth_filter + settings["GPSInterpolation"] = transect.boat_vel.vtg_vel.interpolate # Depth Settings - settings['depthAvgMethod'] = transect.depths.bt_depths.avg_method - settings['depthValidMethod'] = transect.depths.bt_depths.valid_data_method - + settings["depthAvgMethod"] = transect.depths.bt_depths.avg_method + settings["depthValidMethod"] = transect.depths.bt_depths.valid_data_method + # Depth settings are always applied to all available depth sources. - # Only those saved in the bt_depths are used here but are applied to all sources - settings['depthFilterType'] = transect.depths.bt_depths.filter_type - settings['depthReference'] = transect.depths.selected - settings['depthComposite'] = transect.depths.composite + # Only those saved in the bt_depths are used here but are applied to + # all sources + settings["depthFilterType"] = transect.depths.bt_depths.filter_type + settings["depthReference"] = transect.depths.selected + settings["depthComposite"] = transect.depths.composite select = getattr(transect.depths, transect.depths.selected) - settings['depthInterpolation'] = select.interp_type - + settings["depthInterpolation"] = select.interp_type + # Extrap Settings if self.extrap_fit is None: - settings['extrapTop'] = transect.extrap.top_method - settings['extrapBot'] = transect.extrap.bot_method - settings['extrapExp'] = transect.extrap.exponent + settings["extrapTop"] = transect.extrap.top_method + settings["extrapBot"] = transect.extrap.bot_method + settings["extrapExp"] = transect.extrap.exponent else: - settings['extrapTop'] = self.extrap_fit.sel_fit[-1].top_method - settings['extrapBot'] = self.extrap_fit.sel_fit[-1].bot_method - settings['extrapExp'] = self.extrap_fit.sel_fit[-1].exponent + settings["extrapTop"] = self.extrap_fit.sel_fit[-1].top_method + settings["extrapBot"] = self.extrap_fit.sel_fit[-1].bot_method + settings["extrapExp"] = self.extrap_fit.sel_fit[-1].exponent - # Use of self.use_weighted allows a QRev mat file to be loaded and initially processed with the settings from - # the QRev file but upon reprocessing the self.use_weights will be set to the options setting for use_weights - settings['UseWeighted'] = self.use_weighted + # Use of self.use_weighted allows a QRev mat file to be loaded and + # initially processed with the settings from the QRev file but upon + # reprocessing the self.use_weights will be set to the options setting + # for use_weights + settings["UseWeighted"] = self.use_weighted # Edge Settings - settings['edgeVelMethod'] = transect.edges.vel_method - settings['edgeRecEdgeMethod'] = transect.edges.rec_edge_method + settings["edgeVelMethod"] = transect.edges.vel_method + settings["edgeRecEdgeMethod"] = transect.edges.rec_edge_method - settings['UseMeasurementThresholds'] = transect.w_vel.use_measurement_thresholds - settings['UsePingType'] = self.use_ping_type + settings["UseMeasurementThresholds"] = transect.w_vel.use_measurement_thresholds + settings["UsePingType"] = self.use_ping_type return settings def qrev_default_settings(self, check_user_excluded_dist=False, use_weighted=False): - """QRev default and filter settings for a measurement. - """ + """QRev default and filter settings for a measurement.""" settings = dict() @@ -1761,94 +2091,100 @@ class Measurement(object): ref_transect = 0 # Navigation reference - settings['NavRef'] = self.transects[ref_transect].boat_vel.selected + settings["NavRef"] = self.transects[ref_transect].boat_vel.selected # Composite tracks - settings['CompTracks'] = 'Off' + settings["CompTracks"] = "Off" # Water track filter settings - settings['WTbeamFilter'] = -1 - settings['WTdFilter'] = 'Auto' - settings['WTdFilterThreshold'] = np.nan - settings['WTwFilter'] = 'Auto' - settings['WTwFilterThreshold'] = np.nan - settings['WTsmoothFilter'] = 'Off' - - if self.transects[ref_transect].adcp.manufacturer == 'TRDI': - settings['WTsnrFilter'] = 'Off' + settings["WTbeamFilter"] = -1 + settings["WTdFilter"] = "Auto" + settings["WTdFilterThreshold"] = np.nan + settings["WTwFilter"] = "Auto" + settings["WTwFilterThreshold"] = np.nan + settings["WTsmoothFilter"] = "Off" + + if self.transects[ref_transect].adcp.manufacturer == "TRDI": + settings["WTsnrFilter"] = "Off" else: - settings['WTsnrFilter'] = 'Auto' + settings["WTsnrFilter"] = "Auto" if check_user_excluded_dist: temp = [x.w_vel for x in self.transects] excluded_dist = np.nanmin([x.excluded_dist_m for x in temp]) else: excluded_dist = 0 - if excluded_dist < 0.158 and self.transects[ref_transect].adcp.model == 'M9': - settings['WTExcludedDistance'] = 0.16 - elif excluded_dist < 0.248 and self.transects[ref_transect].adcp.model == 'RioPro': - settings['WTExcludedDistance'] = 0.25 + if excluded_dist < 0.158 and self.transects[ref_transect].adcp.model == "M9": + settings["WTExcludedDistance"] = 0.16 + elif ( + excluded_dist < 0.248 + and self.transects[ref_transect].adcp.model == "RioPro" + ): + settings["WTExcludedDistance"] = 0.25 else: - settings['WTExcludedDistance'] = excluded_dist + settings["WTExcludedDistance"] = excluded_dist # Bottom track filter settings - settings['BTbeamFilter'] = -1 - settings['BTdFilter'] = 'Auto' - settings['BTdFilterThreshold'] = np.nan - settings['BTwFilter'] = 'Auto' - settings['BTwFilterThreshold'] = np.nan - settings['BTsmoothFilter'] = 'Off' + settings["BTbeamFilter"] = -1 + settings["BTdFilter"] = "Auto" + settings["BTdFilterThreshold"] = np.nan + settings["BTwFilter"] = "Auto" + settings["BTwFilterThreshold"] = np.nan + settings["BTsmoothFilter"] = "Off" # GGA Filter settings - settings['ggaDiffQualFilter'] = 2 - settings['ggaAltitudeFilter'] = 'Auto' - settings['ggaAltitudeFilterChange'] = np.nan + settings["ggaDiffQualFilter"] = self.gps_quality_threshold + settings["ggaAltitudeFilter"] = "Auto" + settings["ggaAltitudeFilterChange"] = np.nan # VTG filter settings - settings['vtgsmoothFilter'] = 'Off' + settings["vtgsmoothFilter"] = "Off" # GGA and VTG filter settings - settings['GPSHDOPFilter'] = 'Auto' - settings['GPSHDOPFilterMax'] = np.nan - settings['GPSHDOPFilterChange'] = np.nan - settings['GPSSmoothFilter'] = 'Off' + settings["GPSHDOPFilter"] = "Auto" + settings["GPSHDOPFilterMax"] = np.nan + settings["GPSHDOPFilterChange"] = np.nan + settings["GPSSmoothFilter"] = "Off" # Depth Averaging - settings['depthAvgMethod'] = 'IDW' - settings['depthValidMethod'] = 'QRev' + settings["depthAvgMethod"] = "IDW" + settings["depthValidMethod"] = "QRev" # Depth Reference # Default to 4 beam depth average - settings['depthReference'] = 'bt_depths' + settings["depthReference"] = "bt_depths" # Depth settings - settings['depthFilterType'] = 'Smooth' - settings['depthComposite'] = 'Off' + settings["depthFilterType"] = "Smooth" + settings["depthComposite"] = "Off" for transect in self.transects: if transect.checked: - if transect.depths.vb_depths is not None or transect.depths.ds_depths is not None: - settings['depthComposite'] = 'On' + if ( + transect.depths.vb_depths is not None + or transect.depths.ds_depths is not None + ): + settings["depthComposite"] = "On" break else: - settings['depthComposite'] = 'Off' + settings["depthComposite"] = "Off" break # Interpolation settings settings = self.qrev_default_interpolation_methods(settings) # Edge settings - settings['edgeVelMethod'] = 'MeasMag' - settings['edgeRecEdgeMethod'] = 'Fixed' + settings["edgeVelMethod"] = "MeasMag" + settings["edgeRecEdgeMethod"] = "Fixed" # Extrapolation Settings - settings['extrapTop'] = 'Power' - settings['extrapBot'] = 'Power' - settings['extrapExp'] = 0.1667 - settings['UseWeighted'] = use_weighted + settings["extrapTop"] = "Power" + settings["extrapBot"] = "Power" + settings["extrapExp"] = 0.1667 + settings["UseWeighted"] = use_weighted - settings['UseMeasurementThresholds'] = False - settings['UsePingType'] = True + settings["UseMeasurementThresholds"] = False + settings["UsePingType"] = True return settings @@ -1871,71 +2207,71 @@ class Measurement(object): else: ref_transect = 0 - settings['NavRef'] = self.transects[ref_transect].boatVel.selected + settings["NavRef"] = self.transects[ref_transect].boatVel.selected # Composite tracks - settings['CompTracks'] = 'Off' + settings["CompTracks"] = "Off" # Water track filter settings - settings['WTbeamFilter'] = 3 - settings['WTdFilter'] = 'Off' - settings['WTdFilterThreshold'] = np.nan - settings['WTwFilter'] = 'Off' - settings['WTwFilterThreshold'] = np.nan - settings['WTsmoothFilter'] = 'Off' - settings['WTsnrFilter'] = 'Off' + settings["WTbeamFilter"] = 3 + settings["WTdFilter"] = "Off" + settings["WTdFilterThreshold"] = np.nan + settings["WTwFilter"] = "Off" + settings["WTwFilterThreshold"] = np.nan + settings["WTsmoothFilter"] = "Off" + settings["WTsnrFilter"] = "Off" temp = [x.w_vel for x in self.transects] excluded_dist = np.nanmin([x.excluded_dist_m for x in temp]) - settings['WTExcludedDistance'] = excluded_dist + settings["WTExcludedDistance"] = excluded_dist # Bottom track filter settings - settings['BTbeamFilter'] = 3 - settings['BTdFilter'] = 'Off' - settings['BTdFilterThreshold'] = np.nan - settings['BTwFilter'] = 'Off' - settings['BTwFilterThreshold'] = np.nan - settings['BTsmoothFilter'] = 'Off' + settings["BTbeamFilter"] = 3 + settings["BTdFilter"] = "Off" + settings["BTdFilterThreshold"] = np.nan + settings["BTwFilter"] = "Off" + settings["BTwFilterThreshold"] = np.nan + settings["BTsmoothFilter"] = "Off" # GGA filter settings - settings['ggaDiffQualFilter'] = 1 - settings['ggaAltitudeFilter'] = 'Off' - settings['ggaAltitudeFilterChange'] = np.nan + settings["ggaDiffQualFilter"] = 1 + settings["ggaAltitudeFilter"] = "Off" + settings["ggaAltitudeFilterChange"] = np.nan # VTG filter settings - settings['vtgsmoothFilter'] = 'Off' + settings["vtgsmoothFilter"] = "Off" # GGA and VTG filter settings - settings['GPSHDOPFilter'] = 'Off' - settings['GPSHDOPFilterMax'] = np.nan - settings['GPSHDOPFilterChange'] = np.nan - settings['GPSSmoothFilter'] = 'Off' + settings["GPSHDOPFilter"] = "Off" + settings["GPSHDOPFilterMax"] = np.nan + settings["GPSHDOPFilterChange"] = np.nan + settings["GPSSmoothFilter"] = "Off" # Depth Averaging - settings['depthAvgMethod'] = 'IDW' - settings['depthValidMethod'] = 'QRev' + settings["depthAvgMethod"] = "IDW" + settings["depthValidMethod"] = "QRev" # Depth Reference # Default to 4 beam depth average - settings['depthReference'] = 'btDepths' + settings["depthReference"] = "btDepths" # Depth settings - settings['depthFilterType'] = 'None' - settings['depthComposite'] = 'Off' + settings["depthFilterType"] = "None" + settings["depthComposite"] = "Off" # Interpolation settings - settings['BTInterpolation'] = 'None' - settings['WTEnsInterpolation'] = 'None' - settings['WTCellInterpolation'] = 'None' - settings['GPSInterpolation'] = 'None' - settings['depthInterpolation'] = 'None' - settings['WTwtDepthFilter'] = 'Off' + settings["BTInterpolation"] = "None" + settings["WTEnsInterpolation"] = "None" + settings["WTCellInterpolation"] = "None" + settings["GPSInterpolation"] = "None" + settings["depthInterpolation"] = "None" + settings["WTwtDepthFilter"] = "Off" # Edge Settings - settings['edgeVelMethod'] = 'MeasMag' + settings["edgeVelMethod"] = "MeasMag" # settings['edgeVelMethod'] = 'Profile' - settings['edgeRecEdgeMethod'] = 'Fixed' + settings["edgeRecEdgeMethod"] = "Fixed" return settings @@ -1963,8 +2299,7 @@ class Measurement(object): self.apply_settings(settings=settings) def compute_discharge(self): - """Computes the discharge for all transects in the measurement. - """ + """Computes the discharge for all transects in the measurement.""" self.discharge = [] for transect in self.transects: @@ -1973,8 +2308,7 @@ class Measurement(object): self.discharge.append(q) def compute_uncertainty(self): - """Computes uncertainty using QRev model and Oursin model if selected. - """ + """Computes uncertainty using QRev model and Oursin model if selected.""" self.uncertainty = Uncertainty() self.uncertainty.compute_uncertainty(self) @@ -1989,14 +2323,62 @@ class Measurement(object): user_advanced_settings = self.oursin.user_advanced_settings u_measurement_user = self.oursin.u_measurement_user self.oursin = Oursin() - self.oursin.compute_oursin(self, - user_advanced_settings=user_advanced_settings, - u_measurement_user=u_measurement_user) + self.oursin.compute_oursin( + self, + user_advanced_settings=user_advanced_settings, + u_measurement_user=u_measurement_user, + ) + + def compute_map( + self, + node_horizontal_user=None, + node_vertical_user=None, + extrap_option=True, + edges_option=True, + interp_option=False, + ): + """Computes Multi-transect Average Profile + + Parameters + ---------- + node_horizontal_user: float + Width of MAP cell (in m) + node_vertical_user: float + Height of MAP cell (in m) + extrap_option: bool + Boolean indicating if top/bottom extrapolation should be apply + edges_option: bool + Boolean indicating if edges extrapolation should be apply + interp_option: bool + Boolean indicating if interpolated data should be used + """ + + # Check for heading data + if all( + deg == 0 + for deg in self.transects[ + self.checked_transect_idx[0] + ].sensors.heading_deg.internal.data + ): + self.map = None + else: + settings = self.current_settings() + self.map = MAP() + self.map.populate_data( + transects=self.transects, + settings=settings, + node_horizontal_user=node_horizontal_user, + node_vertical_user=node_vertical_user, + extrap_option=extrap_option, + edges_option=edges_option, + interp_option=interp_option, + n_burn=None, + ) @staticmethod def compute_edi(meas, selected_idx, percents): - """Computes the locations and vertical properties for the user selected transect and - flow percentages. + """Computes the locations and vertical properties for the user selected + transect and flow percentages. Parameters ---------- @@ -2016,13 +2398,15 @@ class Measurement(object): percents.sort() # Compute cumulative discharge - q_cum = np.nancumsum(discharge.middle_ens + discharge.top_ens + discharge.bottom_ens) + q_cum = np.nancumsum( + discharge.middle_ens + discharge.top_ens + discharge.bottom_ens + ) # Adjust for moving-bed conditions q_cum = q_cum * discharge.correction_factor # Adjust q for starting edge - if transect.start_edge == 'Left': + if transect.start_edge == "Left": q_cum = q_cum + discharge.left q_cum[-1] = q_cum[-1] + discharge.right start_dist = transect.edges.left.distance_m @@ -2043,12 +2427,16 @@ class Measurement(object): # Compute distance from start bank boat_vel_selected = getattr(transect.boat_vel, transect.boat_vel.selected) - track_x = np.nancumsum(boat_vel_selected.u_processed_mps[transect.in_transect_idx] * - transect.date_time.ens_duration_sec[transect.in_transect_idx]) - track_y = np.nancumsum(boat_vel_selected.v_processed_mps[transect.in_transect_idx] * - transect.date_time.ens_duration_sec[transect.in_transect_idx]) + track_x = np.nancumsum( + boat_vel_selected.u_processed_mps[transect.in_transect_idx] + * transect.date_time.ens_duration_sec[transect.in_transect_idx] + ) + track_y = np.nancumsum( + boat_vel_selected.v_processed_mps[transect.in_transect_idx] + * transect.date_time.ens_duration_sec[transect.in_transect_idx] + ) - dist = np.sqrt(track_x ** 2 + track_y ** 2) + start_dist + dist = np.sqrt(track_x**2 + track_y**2) + start_dist # Initialize variables for computing vertical data n_pts_in_avg = int(len(q_cum) * 0.01) @@ -2069,19 +2457,37 @@ class Measurement(object): lat.append(transect.gps.gga_lat_ens_deg[ensemble]) lon.append(transect.gps.gga_lon_ens_deg[ensemble]) except (ValueError, AttributeError, TypeError): - lat.append('') - lon.append('') + lat.append("") + lon.append("") depth.append(depth_selected.depth_processed_m[ensemble]) - # The velocity is an average velocity for ensembles +/- 1% of the total ensembles - # about the selected ensemble - u = np.nanmean(transect.w_vel.u_processed_mps[:, ensemble - n_pts_in_avg: ensemble + n_pts_in_avg + 1], 1) - v = np.nanmean(transect.w_vel.v_processed_mps[:, ensemble - n_pts_in_avg: ensemble + n_pts_in_avg + 1], 1) - velocity.append(np.sqrt(np.nanmean(u)**2 + np.nanmean(v)**2)) + # The velocity is an average velocity for ensembles +/- 1% of the + # total ensembles about the selected ensemble + u = np.nanmean( + transect.w_vel.u_processed_mps[ + :, ensemble - n_pts_in_avg : ensemble + n_pts_in_avg + 1 + ], + 1, + ) + v = np.nanmean( + transect.w_vel.v_processed_mps[ + :, ensemble - n_pts_in_avg : ensemble + n_pts_in_avg + 1 + ], + 1, + ) + velocity.append(np.sqrt(np.nanmean(u) ** 2 + np.nanmean(v) ** 2)) # Save computed results in a dictionary - edi_results = {'percent': percents, 'target_q': q_target, 'actual_q': q_actual, 'distance': distance, - 'depth': depth, 'velocity': velocity, 'lat': lat, 'lon': lon} + edi_results = { + "percent": percents, + "target_q": q_target, + "actual_q": q_actual, + "distance": distance, + "depth": depth, + "velocity": velocity, + "lat": lat, + "lon": lon, + } return edi_results @staticmethod @@ -2099,17 +2505,26 @@ class Measurement(object): Dictionary with reference, filter, and interpolation settings """ - settings['BTInterpolation'] = 'Linear' - settings['WTEnsInterpolation'] = 'abba' - settings['WTCellInterpolation'] = 'abba' - settings['GPSInterpolation'] = 'Linear' - settings['depthInterpolation'] = 'Linear' - settings['WTwtDepthFilter'] = 'On' + settings["BTInterpolation"] = "Linear" + settings["WTEnsInterpolation"] = "abba" + settings["WTCellInterpolation"] = "abba" + settings["GPSInterpolation"] = "Linear" + settings["depthInterpolation"] = "Linear" + settings["WTwtDepthFilter"] = "On" return settings - def change_extrapolation(self, method, top=None, bot=None, exp=None, extents=None, threshold=None, compute_q=True, - use_weighted=False): + def change_extrapolation( + self, + method, + top=None, + bot=None, + exp=None, + extents=None, + threshold=None, + compute_q=True, + use_weighted=False, + ): """Applies the selected extrapolation method to each transect. Parameters @@ -2145,32 +2560,38 @@ class Measurement(object): data_type = self.extrap_fit.norm_data[-1].data_type if data_type is None: - data_type = 'q' + data_type = "q" - if method == 'Manual': - self.extrap_fit.fit_method = 'Manual' + if method == "Manual": + self.extrap_fit.fit_method = "Manual" for transect in self.transects: transect.extrap.set_extrap_data(top=top, bot=bot, exp=exp) - self.extrap_fit.process_profiles(transects=self.transects, data_type=data_type, use_weighted=use_weighted) + self.extrap_fit.process_profiles( + transects=self.transects, data_type=data_type, use_weighted=use_weighted + ) else: - self.extrap_fit.fit_method = 'Automatic' - self.extrap_fit.process_profiles(transects=self.transects, data_type=data_type, use_weighted=use_weighted) + self.extrap_fit.fit_method = "Automatic" + self.extrap_fit.process_profiles( + transects=self.transects, data_type=data_type, use_weighted=use_weighted + ) for transect in self.transects: - transect.extrap.set_extrap_data(top=self.extrap_fit.sel_fit[-1].top_method, - bot=self.extrap_fit.sel_fit[-1].bot_method, - exp=self.extrap_fit.sel_fit[-1].exponent) + transect.extrap.set_extrap_data( + top=self.extrap_fit.sel_fit[-1].top_method, + bot=self.extrap_fit.sel_fit[-1].bot_method, + exp=self.extrap_fit.sel_fit[-1].exponent, + ) if compute_q: self.extrap_fit.q_sensitivity = ExtrapQSensitivity() - self.extrap_fit.q_sensitivity.populate_data(transects=self.transects, - extrap_fits=self.extrap_fit.sel_fit) + self.extrap_fit.q_sensitivity.populate_data( + transects=self.transects, extrap_fits=self.extrap_fit.sel_fit + ) self.compute_discharge() @staticmethod def measurement_duration(self): - """Computes the duration of the measurement. - """ + """Computes the duration of the measurement.""" duration = 0 for transect in self.transects: @@ -2180,8 +2601,7 @@ class Measurement(object): @staticmethod def mean_discharges(self): - """Computes the mean discharge for the measurement. - """ + """Computes the mean discharge for the measurement.""" # Initialize lists total_q = [] @@ -2206,22 +2626,24 @@ class Measurement(object): int_cells_q.append(self.discharge[n].int_cells) int_ensembles_q.append(self.discharge[n].int_ens) - discharge = {'total_mean': np.nanmean(total_q), - 'uncorrected_mean': np.nanmean(uncorrected_q), - 'top_mean': np.nanmean(top_q), - 'mid_mean': np.nanmean(mid_q), - 'bot_mean': np.nanmean(bot_q), - 'left_mean': np.nanmean(left_q), - 'right_mean': np.nanmean(right_q), - 'int_cells_mean': np.nanmean(int_cells_q), - 'int_ensembles_mean': np.nanmean(int_ensembles_q)} + discharge = { + "total_mean": np.nanmean(total_q), + "uncorrected_mean": np.nanmean(uncorrected_q), + "top_mean": np.nanmean(top_q), + "mid_mean": np.nanmean(mid_q), + "bot_mean": np.nanmean(bot_q), + "left_mean": np.nanmean(left_q), + "right_mean": np.nanmean(right_q), + "int_cells_mean": np.nanmean(int_cells_q), + "int_ensembles_mean": np.nanmean(int_ensembles_q), + } return discharge @staticmethod def compute_measurement_properties(self): - """Computes characteristics of the transects and measurement that assist in evaluating the consistency - of the transects. + """Computes characteristics of the transects and measurement that + assist in evaluating the consistency of the transects. Returns ------- @@ -2254,17 +2676,19 @@ class Measurement(object): # Initialize variables checked_idx = np.array([], dtype=int) n_transects = len(self.transects) - trans_prop = {'width': np.array([np.nan] * (n_transects + 1)), - 'width_cov': np.array([np.nan] * (n_transects + 1)), - 'area': np.array([np.nan] * (n_transects + 1)), - 'area_cov': np.array([np.nan] * (n_transects + 1)), - 'avg_boat_speed': np.array([np.nan] * (n_transects + 1)), - 'avg_boat_course': np.array([np.nan] * n_transects), - 'avg_water_speed': np.array([np.nan] * (n_transects + 1)), - 'avg_water_dir': np.array([np.nan] * (n_transects + 1)), - 'avg_depth': np.array([np.nan] * (n_transects + 1)), - 'max_depth': np.array([np.nan] * (n_transects + 1)), - 'max_water_speed': np.array([np.nan] * (n_transects + 1))} + trans_prop = { + "width": np.array([np.nan] * (n_transects + 1)), + "width_cov": np.array([np.nan] * (n_transects + 1)), + "area": np.array([np.nan] * (n_transects + 1)), + "area_cov": np.array([np.nan] * (n_transects + 1)), + "avg_boat_speed": np.array([np.nan] * (n_transects + 1)), + "avg_boat_course": np.array([np.nan] * n_transects), + "avg_water_speed": np.array([np.nan] * (n_transects + 1)), + "avg_water_dir": np.array([np.nan] * (n_transects + 1)), + "avg_depth": np.array([np.nan] * (n_transects + 1)), + "max_depth": np.array([np.nan] * (n_transects + 1)), + "max_water_speed": np.array([np.nan] * (n_transects + 1)), + } # Process each transect for n, transect in enumerate(self.transects): @@ -2279,23 +2703,37 @@ class Measurement(object): u_boat = boat_selected.u_processed_mps[in_transect_idx] v_boat = boat_selected.v_processed_mps[in_transect_idx] else: - u_boat = nans(transect.boat_vel.bt_vel.u_processed_mps[in_transect_idx].shape) - v_boat = nans(transect.boat_vel.bt_vel.v_processed_mps[in_transect_idx].shape) + u_boat = nans( + transect.boat_vel.bt_vel.u_processed_mps[in_transect_idx].shape + ) + v_boat = nans( + transect.boat_vel.bt_vel.v_processed_mps[in_transect_idx].shape + ) - if np.logical_not(np.all(np.isnan(boat_track['track_x_m']))): + if np.logical_not(np.all(np.isnan(boat_track["track_x_m"]))): # Compute boat course and mean speed - [course_radians, dmg] = cart2pol(boat_track['track_x_m'][-1], boat_track['track_y_m'][-1]) - trans_prop['avg_boat_course'][n] = rad2azdeg(course_radians) - trans_prop['avg_boat_speed'][n] = np.nanmean(np.sqrt(u_boat**2 + v_boat**2)) + [course_radians, dmg] = cart2pol( + boat_track["track_x_m"][-1], boat_track["track_y_m"][-1] + ) + trans_prop["avg_boat_course"][n] = rad2azdeg(course_radians) + trans_prop["avg_boat_speed"][n] = np.nanmean( + np.sqrt(u_boat**2 + v_boat**2) + ) # Compute width - trans_prop['width'][n] = np.nansum([dmg, transect.edges.left.distance_m, - transect.edges.right.distance_m]) - - # Project the shiptrack onto a line from the beginning to end of the transect + trans_prop["width"][n] = np.nansum( + [ + dmg, + transect.edges.left.distance_m, + transect.edges.right.distance_m, + ] + ) + + # Project the shiptrack onto a line from the beginning to end + # of the transect unit_x, unit_y = pol2cart(course_radians, 1) - bt = np.array([boat_track['track_x_m'], boat_track['track_y_m']]).T + bt = np.array([boat_track["track_x_m"], boat_track["track_y_m"]]).T dot_prod = bt @ np.array([unit_x, unit_y]) projected_x = dot_prod * unit_x projected_y = dot_prod * unit_y @@ -2305,99 +2743,131 @@ class Measurement(object): depth = getattr(transect.depths, transect.depths.selected) depth_a = np.copy(depth.depth_processed_m) depth_a[np.isnan(depth_a)] = 0 - # Compute area of the moving-boat portion of the cross section using trapezoidal integration. - # This method is consistent with AreaComp but is different from QRev in Matlab - area_moving_boat = np.abs(np.trapz(depth_a[in_transect_idx], station[in_transect_idx])) + # Compute area of the moving-boat portion of the cross section + # using trapezoidal integration. This method is consistent with + # AreaComp but is different from QRev in Matlab + area_moving_boat = np.abs( + np.trapz(depth_a[in_transect_idx], station[in_transect_idx]) + ) # Compute area of left edge edge_type = transect.edges.left.type coef = 1 - if edge_type == 'Triangular': + if edge_type == "Triangular": coef = 0.5 - elif edge_type == 'Rectangular': + elif edge_type == "Rectangular": coef = 1.0 - elif edge_type == 'Custom': + elif edge_type == "Custom": coef = 0.5 + (transect.edges.left.cust_coef - 0.3535) - elif edge_type == 'User Q': + elif edge_type == "User Q": coef = 0.5 - edge_idx = QComp.edge_ensembles('left', transect) + edge_idx = QComp.edge_ensembles("left", transect) edge_depth = np.nanmean(depth.depth_processed_m[edge_idx]) area_left = edge_depth * transect.edges.left.distance_m * coef # Compute area of right edge edge_type = transect.edges.right.type - if edge_type == 'Triangular': + if edge_type == "Triangular": coef = 0.5 - elif edge_type == 'Rectangular': + elif edge_type == "Rectangular": coef = 1.0 - elif edge_type == 'Custom': + elif edge_type == "Custom": coef = 0.5 + (transect.edges.right.cust_coef - 0.3535) - elif edge_type == 'User Q': + elif edge_type == "User Q": coef = 0.5 - edge_idx = QComp.edge_ensembles('right', transect) + edge_idx = QComp.edge_ensembles("right", transect) edge_depth = np.nanmean(depth.depth_processed_m[edge_idx]) area_right = edge_depth * transect.edges.right.distance_m * coef # Compute total cross sectional area - trans_prop['area'][n] = np.nansum([area_left, area_moving_boat, area_right]) + trans_prop["area"][n] = np.nansum( + [area_left, area_moving_boat, area_right] + ) # Compute average water speed - trans_prop['avg_water_speed'][n] = self.discharge[n].total / trans_prop['area'][n] + trans_prop["avg_water_speed"][n] = ( + self.discharge[n].total / trans_prop["area"][n] + ) # Compute flow direction using discharge weighting u_water = transect.w_vel.u_processed_mps[:, in_transect_idx] v_water = transect.w_vel.v_processed_mps[:, in_transect_idx] weight = np.abs(self.discharge[n].middle_cells) - u = np.nansum(np.nansum(u_water * weight)) / np.nansum(np.nansum(weight)) - v = np.nansum(np.nansum(v_water * weight)) / np.nansum(np.nansum(weight)) - trans_prop['avg_water_dir'][n] = np.arctan2(u, v) * 180 / np.pi - if trans_prop['avg_water_dir'][n] < 0: - trans_prop['avg_water_dir'][n] = trans_prop['avg_water_dir'][n] + 360 + u = np.nansum(np.nansum(u_water * weight)) / np.nansum( + np.nansum(weight) + ) + v = np.nansum(np.nansum(v_water * weight)) / np.nansum( + np.nansum(weight) + ) + trans_prop["avg_water_dir"][n] = np.arctan2(u, v) * 180 / np.pi + if trans_prop["avg_water_dir"][n] < 0: + trans_prop["avg_water_dir"][n] = ( + trans_prop["avg_water_dir"][n] + 360 + ) # Compute average and max depth - # This is a deviation from QRev in Matlab which simply averaged all the depths - trans_prop['avg_depth'][n] = trans_prop['area'][n] / trans_prop['width'][n] - trans_prop['max_depth'][n] = np.nanmax(depth.depth_processed_m[in_transect_idx]) + # This is a deviation from QRev in Matlab which simply + # averaged all the depths + trans_prop["avg_depth"][n] = ( + trans_prop["area"][n] / trans_prop["width"][n] + ) + trans_prop["max_depth"][n] = np.nanmax( + depth.depth_processed_m[in_transect_idx] + ) # Compute max water speed using the 99th percentile water_speed = np.sqrt(u_water**2 + v_water**2) - trans_prop['max_water_speed'][n] = np.nanpercentile(water_speed, 99) + trans_prop["max_water_speed"][n] = np.nanpercentile(water_speed, 99) if transect.checked: checked_idx = np.append(checked_idx, n) - # Only transects used for discharge are included in measurement properties + # Only transects used for discharge are included in measurement + # properties if len(checked_idx) > 0: n = n_transects - trans_prop['width'][n] = np.nanmean(trans_prop['width'][checked_idx]) - trans_prop['width_cov'][n] = (np.nanstd(trans_prop['width'][checked_idx], ddof=1) / - trans_prop['width'][n]) * 100 - trans_prop['area'][n] = np.nanmean(trans_prop['area'][checked_idx]) - trans_prop['area_cov'][n] = (np.nanstd(trans_prop['area'][checked_idx], ddof=1) / - trans_prop['area'][n]) * 100 - trans_prop['avg_boat_speed'][n] = np.nanmean(trans_prop['avg_boat_speed'][checked_idx]) - trans_prop['avg_water_speed'][n] = np.nanmean(trans_prop['avg_water_speed'][checked_idx]) - trans_prop['avg_depth'][n] = np.nanmean(trans_prop['avg_depth'][checked_idx]) - trans_prop['max_depth'][n] = np.nanmax(trans_prop['max_depth'][checked_idx]) - trans_prop['max_water_speed'][n] = np.nanmax(trans_prop['max_water_speed'][checked_idx]) - - # Compute average water direction using vector coordinates to avoid the problem of averaging - # fluctuations that cross zero degrees + trans_prop["width"][n] = np.nanmean(trans_prop["width"][checked_idx]) + trans_prop["width_cov"][n] = ( + np.nanstd(trans_prop["width"][checked_idx], ddof=1) + / trans_prop["width"][n] + ) * 100 + trans_prop["area"][n] = np.nanmean(trans_prop["area"][checked_idx]) + trans_prop["area_cov"][n] = ( + np.nanstd(trans_prop["area"][checked_idx], ddof=1) + / trans_prop["area"][n] + ) * 100 + trans_prop["avg_boat_speed"][n] = np.nanmean( + trans_prop["avg_boat_speed"][checked_idx] + ) + trans_prop["avg_water_speed"][n] = np.nanmean( + trans_prop["avg_water_speed"][checked_idx] + ) + trans_prop["avg_depth"][n] = np.nanmean( + trans_prop["avg_depth"][checked_idx] + ) + trans_prop["max_depth"][n] = np.nanmax( + trans_prop["max_depth"][checked_idx] + ) + trans_prop["max_water_speed"][n] = np.nanmax( + trans_prop["max_water_speed"][checked_idx] + ) + + # Compute average water direction using vector coordinates to + # avoid the problem of averaging fluctuations that cross zero degrees x_coord = [] y_coord = [] for idx in checked_idx: - water_dir_rad = azdeg2rad(trans_prop['avg_water_dir'][idx]) + water_dir_rad = azdeg2rad(trans_prop["avg_water_dir"][idx]) x, y = pol2cart(water_dir_rad, 1) x_coord.append(x) y_coord.append(y) avg_water_dir_rad, _ = cart2pol(np.mean(x_coord), np.mean(y_coord)) - trans_prop['avg_water_dir'][n] = rad2azdeg(avg_water_dir_rad) + trans_prop["avg_water_dir"][n] = rad2azdeg(avg_water_dir_rad) return trans_prop @staticmethod def checked_transects(meas): - """Create a list of indices of the checked transects. - """ + """Create a list of indices of the checked transects.""" checked_transect_idx = [] for n in range(len(meas.transects)): @@ -2424,8 +2894,10 @@ class Measurement(object): # Process transects for idx in idx_transects: - if variable == 'Temperature': - data = np.append(data, meas.transects[idx].sensors.temperature_deg_c.internal.data) + if variable == "Temperature": + data = np.append( + data, meas.transects[idx].sensors.temperature_deg_c.internal.data + ) ens_cum_time = np.nancumsum(meas.transects[idx].date_time.ens_duration_sec) ens_time = meas.transects[idx].date_time.start_serial_time + ens_cum_time serial_time = np.append(serial_time, ens_time) @@ -2433,130 +2905,167 @@ class Measurement(object): return data, serial_time def xml_output(self, version, file_name): - channel = ETree.Element('Channel', QRevFilename=os.path.basename(file_name[:-4]), QRevVersion=version) + channel = ETree.Element( + "Channel", + QRevFilename=os.path.basename(file_name[:-4]), + QRevVersion=version, + ) # (2) SiteInformation Node if self.station_name or self.station_number: - site_info = ETree.SubElement(channel, 'SiteInformation') + site_info = ETree.SubElement(channel, "SiteInformation") # (3) StationName Node if self.station_name: - ETree.SubElement(site_info, 'StationName', type='char').text = self.station_name + ETree.SubElement( + site_info, "StationName", type="char" + ).text = self.station_name # (3) SiteID Node if type(self.station_number) is str: - ETree.SubElement(site_info, 'SiteID', type='char').text = self.station_number + ETree.SubElement( + site_info, "SiteID", type="char" + ).text = self.station_number else: - ETree.SubElement(site_info, 'SiteID', type='char').text = str(self.station_number) + ETree.SubElement(site_info, "SiteID", type="char").text = str( + self.station_number + ) # (3) Persons - ETree.SubElement(site_info, 'Persons', type='char').text = self.persons + ETree.SubElement(site_info, "Persons", type="char").text = self.persons # (3) Measurement Number - ETree.SubElement(site_info, 'MeasurementNumber', type='char').text = self.meas_number + ETree.SubElement( + site_info, "MeasurementNumber", type="char" + ).text = self.meas_number # (3) Stage start temp = self.stage_start_m - ETree.SubElement(site_info, 'StageStart', type='double', unitsCode='m').text = '{:.5f}'.format(temp) + ETree.SubElement( + site_info, "StageStart", type="double", unitsCode="m" + ).text = "{:.5f}".format(temp) # (4) Stage start temp = self.stage_end_m - ETree.SubElement(site_info, 'StageEnd', type='double', unitsCode='m').text = '{:.5f}'.format(temp) + ETree.SubElement( + site_info, "StageEnd", type="double", unitsCode="m" + ).text = "{:.5f}".format(temp) # (3) Stage start temp = self.stage_meas_m - ETree.SubElement(site_info, 'StageMeasurement', type='double', unitsCode='m').text = '{:.5f}'.format(temp) + ETree.SubElement( + site_info, "StageMeasurement", type="double", unitsCode="m" + ).text = "{:.5f}".format(temp) # (2) QA Node - qa = ETree.SubElement(channel, 'QA') + qa = ETree.SubElement(channel, "QA") # (3) DiagnosticTestResult Node if len(self.system_tst) > 0: last_test = self.system_tst[-1].data - failed_idx = last_test.count('FAIL') + failed_idx = last_test.count("FAIL") if failed_idx == 0: - test_result = 'Pass' + test_result = "Pass" else: - test_result = str(failed_idx) + ' Failed' + test_result = str(failed_idx) + " Failed" else: - test_result = 'None' - ETree.SubElement(qa, 'DiagnosticTestResult', type='char').text = test_result + test_result = "None" + ETree.SubElement(qa, "DiagnosticTestResult", type="char").text = test_result # (3) CompassCalibrationResult Node try: last_eval = self.compass_eval[-1] # StreamPro, RR - idx = last_eval.data.find('Typical Heading Error: <') + idx = last_eval.data.find("Typical Heading Error: <") if idx == (-1): # Rio Grande - idx = last_eval.data.find('>>> Total error:') + idx = last_eval.data.find(">>> Total error:") if idx != (-1): idx_start = idx + 17 idx_end = idx_start + 10 comp_error = last_eval.data[idx_start:idx_end] - comp_error = ''.join([n for n in comp_error if n.isdigit() or n == '.']) + comp_error = "".join( + [n for n in comp_error if n.isdigit() or n == "."] + ) else: - comp_error = '' + comp_error = "" else: # StreamPro, RR idx_start = idx + 24 idx_end = idx_start + 10 comp_error = last_eval.data[idx_start:idx_end] - comp_error = ''.join([n for n in comp_error if n.isdigit() or n == '.']) + comp_error = "".join([n for n in comp_error if n.isdigit() or n == "."]) # Evaluation could not be determined if not comp_error: - ETree.SubElement(qa, 'CompassCalibrationResult', type='char').text = 'Yes' - elif comp_error == '': - ETree.SubElement(qa, 'CompassCalibrationResult', type='char').text = 'No' + ETree.SubElement( + qa, "CompassCalibrationResult", type="char" + ).text = "Yes" + elif comp_error == "": + ETree.SubElement( + qa, "CompassCalibrationResult", type="char" + ).text = "No" else: - ETree.SubElement(qa, 'CompassCalibrationResult', type='char').text = 'Max ' + comp_error + ETree.SubElement(qa, "CompassCalibrationResult", type="char").text = ( + "Max " + comp_error + ) except (IndexError, TypeError, AttributeError): try: if len(self.compass_cal) > 0: - ETree.SubElement(qa, 'CompassCalibrationResult', type='char').text = 'Yes' + ETree.SubElement( + qa, "CompassCalibrationResult", type="char" + ).text = "Yes" else: - ETree.SubElement(qa, 'CompassCalibrationResult', type='char').text = 'No' + ETree.SubElement( + qa, "CompassCalibrationResult", type="char" + ).text = "No" except (IndexError, TypeError): - ETree.SubElement(qa, 'CompassCalibrationResult', type='char').text = 'No' + ETree.SubElement( + qa, "CompassCalibrationResult", type="char" + ).text = "No" # (3) MovingBedTestType Node if not self.mb_tests: - ETree.SubElement(qa, 'MovingBedTestType', type='char').text = 'None' + ETree.SubElement(qa, "MovingBedTestType", type="char").text = "None" else: - selected_idx = [i for (i, val) in enumerate(self.mb_tests) if val.selected is True] + selected_idx = [ + i for (i, val) in enumerate(self.mb_tests) if val.selected is True + ] if len(selected_idx) >= 1: temp = self.mb_tests[selected_idx[0]].type else: temp = self.mb_tests[-1].type - ETree.SubElement(qa, 'MovingBedTestType', type='char').text = str(temp) + ETree.SubElement(qa, "MovingBedTestType", type="char").text = str(temp) # MovingBedTestResult Node - temp = 'Unknown' + temp = "Unknown" for idx in selected_idx: - if self.mb_tests[idx].moving_bed == 'Yes': - temp = 'Yes' + if self.mb_tests[idx].moving_bed == "Yes": + temp = "Yes" break - elif self.mb_tests[idx].moving_bed == 'No': - temp = 'No' + elif self.mb_tests[idx].moving_bed == "No": + temp = "No" - ETree.SubElement(qa, 'MovingBedTestResult', type='char').text = temp + ETree.SubElement(qa, "MovingBedTestResult", type="char").text = temp # (3) DiagnosticTest and Text Node if self.system_tst: - test_text = '' + test_text = "" for test in self.system_tst: test_text += test.data - diag_test = ETree.SubElement(qa, 'DiagnosticTest') - ETree.SubElement(diag_test, 'Text', type='char').text = test_text + diag_test = ETree.SubElement(qa, "DiagnosticTest") + ETree.SubElement(diag_test, "Text", type="char").text = test_text # (3) CompassCalibration and Text Node - compass_text = '' + compass_text = "" try: for each in self.compass_cal: - if self.transects[self.checked_transect_idx[0]].adcp.manufacturer == 'SonTek': - idx = each.data.find('CAL_TIME') + if ( + self.transects[self.checked_transect_idx[0]].adcp.manufacturer + == "SonTek" + ): + idx = each.data.find("CAL_TIME") compass_text += each.data[idx:] else: compass_text += each.data @@ -2564,8 +3073,11 @@ class Measurement(object): pass try: for each in self.compass_eval: - if self.transects[self.checked_transect_idx[0]].adcp.manufacturer == 'SonTek': - idx = each.data.find('CAL_TIME') + if ( + self.transects[self.checked_transect_idx[0]].adcp.manufacturer + == "SonTek" + ): + idx = each.data.find("CAL_TIME") compass_text += each.data[idx:] else: compass_text += each.data @@ -2573,131 +3085,172 @@ class Measurement(object): pass if len(compass_text) > 0: - comp_cal = ETree.SubElement(qa, 'CompassCalibration') - ETree.SubElement(comp_cal, 'Text', type='char').text = compass_text + comp_cal = ETree.SubElement(qa, "CompassCalibration") + ETree.SubElement(comp_cal, "Text", type="char").text = compass_text # (3) MovingBedTest Node if self.mb_tests: for each in self.mb_tests: - mbt = ETree.SubElement(qa, 'MovingBedTest') + mbt = ETree.SubElement(qa, "MovingBedTest") # (4) Filename Node - ETree.SubElement(mbt, 'Filename', type='char').text = each.transect.file_name + ETree.SubElement( + mbt, "Filename", type="char" + ).text = each.transect.file_name # (4) TestType Node - ETree.SubElement(mbt, 'TestType', type='char').text = each.type + ETree.SubElement(mbt, "TestType", type="char").text = each.type # (4) Duration Node - ETree.SubElement(mbt, 'Duration', type='double', - unitsCode='sec').text = '{:.2f}'.format(each.duration_sec) + ETree.SubElement( + mbt, "Duration", type="double", unitsCode="sec" + ).text = "{:.2f}".format(each.duration_sec) # (4) PercentInvalidBT Node - ETree.SubElement(mbt, 'PercentInvalidBT', type='double').text = '{:.4f}'.format(each.percent_invalid_bt) + ETree.SubElement( + mbt, "PercentInvalidBT", type="double" + ).text = "{:.4f}".format(each.percent_invalid_bt) # (4) HeadingDifference Node if each.compass_diff_deg: - temp = '{:.2f}'.format(each.compass_diff_deg) + temp = "{:.2f}".format(each.compass_diff_deg) else: - temp = '' - ETree.SubElement(mbt, 'HeadingDifference', type='double', unitsCode='deg').text = temp + temp = "" + ETree.SubElement( + mbt, "HeadingDifference", type="double", unitsCode="deg" + ).text = temp # (4) MeanFlowDirection Node if each.flow_dir: - temp = '{:.2f}'.format(each.flow_dir) + temp = "{:.2f}".format(each.flow_dir) else: - temp = '' - ETree.SubElement(mbt, 'MeanFlowDirection', type='double', unitsCode='deg').text = temp + temp = "" + ETree.SubElement( + mbt, "MeanFlowDirection", type="double", unitsCode="deg" + ).text = temp # (4) MovingBedDirection Node if each.mb_dir: - temp = '{:.2f}'.format(each.mb_dir) + temp = "{:.2f}".format(each.mb_dir) else: - temp = '' - ETree.SubElement(mbt, 'MovingBedDirection', type='double', unitsCode='deg').text = temp + temp = "" + ETree.SubElement( + mbt, "MovingBedDirection", type="double", unitsCode="deg" + ).text = temp # (4) DistanceUpstream Node - ETree.SubElement(mbt, 'DistanceUpstream', type='double', unitsCode='m').text = \ - '{:.4f}'.format(each.dist_us_m) + ETree.SubElement( + mbt, "DistanceUpstream", type="double", unitsCode="m" + ).text = "{:.4f}".format(each.dist_us_m) # (4) MeanFlowSpeed Node - ETree.SubElement(mbt, 'MeanFlowSpeed', type='double', unitsCode='mps').text = \ - '{:.4f}'.format(each.flow_spd_mps) + ETree.SubElement( + mbt, "MeanFlowSpeed", type="double", unitsCode="mps" + ).text = "{:.4f}".format(each.flow_spd_mps) # (4) MovingBedSpeed Node - ETree.SubElement(mbt, 'MovingBedSpeed', type='double', unitsCode='mps').text = \ - '{:.4f}'.format(each.mb_spd_mps) + ETree.SubElement( + mbt, "MovingBedSpeed", type="double", unitsCode="mps" + ).text = "{:.4f}".format(each.mb_spd_mps) # (4) PercentMovingBed Node - ETree.SubElement(mbt, 'PercentMovingBed', type='double').text = '{:.2f}'.format(each.percent_mb) + ETree.SubElement( + mbt, "PercentMovingBed", type="double" + ).text = "{:.2f}".format(each.percent_mb) # (4) TestQuality Node - ETree.SubElement(mbt, 'TestQuality', type='char').text = each.test_quality + ETree.SubElement( + mbt, "TestQuality", type="char" + ).text = each.test_quality # (4) MovingBedPresent Node - ETree.SubElement(mbt, 'MovingBedPresent', type='char').text = each.moving_bed + ETree.SubElement( + mbt, "MovingBedPresent", type="char" + ).text = each.moving_bed # (4) UseToCorrect Node if each.use_2_correct: - ETree.SubElement(mbt, 'UseToCorrect', type='char').text = 'Yes' + ETree.SubElement(mbt, "UseToCorrect", type="char").text = "Yes" else: - ETree.SubElement(mbt, 'UseToCorrect', type='char').text = 'No' + ETree.SubElement(mbt, "UseToCorrect", type="char").text = "No" # (4) UserValid Node if each.user_valid: - ETree.SubElement(mbt, 'UserValid', type='char').text = 'Yes' + ETree.SubElement(mbt, "UserValid", type="char").text = "Yes" else: - ETree.SubElement(mbt, 'UserValid', type='char').text = 'No' + ETree.SubElement(mbt, "UserValid", type="char").text = "No" # (4) Message Node if len(each.messages) > 0: - str_out = '' + str_out = "" for message in each.messages: - str_out = str_out + message + '; ' - ETree.SubElement(mbt, 'Message', type='char').text = str_out + str_out = str_out + message + "; " + ETree.SubElement(mbt, "Message", type="char").text = str_out # (3) TemperatureCheck Node - temp_check = ETree.SubElement(qa, 'TemperatureCheck') + temp_check = ETree.SubElement(qa, "TemperatureCheck") # (4) VerificationTemperature Node - if not np.isnan(self.ext_temp_chk['user']): - ETree.SubElement(temp_check, 'VerificationTemperature', type='double', unitsCode='degC').text = \ - '{:.2f}'.format(self.ext_temp_chk['user']) + if not np.isnan(self.ext_temp_chk["user"]): + ETree.SubElement( + temp_check, "VerificationTemperature", type="double", unitsCode="degC" + ).text = "{:.2f}".format(self.ext_temp_chk["user"]) # (4) InstrumentTemperature Node - if not np.isnan(self.ext_temp_chk['adcp']): - ETree.SubElement(temp_check, 'InstrumentTemperature', type='double', - unitsCode='degC').text = '{:.2f}'.format(self.ext_temp_chk['adcp']) + if not np.isnan(self.ext_temp_chk["adcp"]): + ETree.SubElement( + temp_check, "InstrumentTemperature", type="double", unitsCode="degC" + ).text = "{:.2f}".format(self.ext_temp_chk["adcp"]) # (4) TemperatureChange Node: temp_all = np.array([np.nan]) for each in self.transects: # Check for situation where user has entered a constant temperature - temperature_selected = getattr(each.sensors.temperature_deg_c, each.sensors.temperature_deg_c.selected) + temperature_selected = getattr( + each.sensors.temperature_deg_c, each.sensors.temperature_deg_c.selected + ) temperature = temperature_selected.data - if each.sensors.temperature_deg_c.selected != 'user': + if each.sensors.temperature_deg_c.selected != "user": # Temperatures for ADCP. temp_all = np.concatenate((temp_all, temperature)) else: # User specified constant temperature. - # Concatenate a matrix of size of internal data with repeated user values. - user_arr = np.tile(each.sensors.temperature_deg_c.user.data, - (np.size(each.sensors.temperature_deg_c.internal.data))) + # Concatenate a matrix of size of internal data with repeated + # user values. + user_arr = np.tile( + each.sensors.temperature_deg_c.user.data, + (np.size(each.sensors.temperature_deg_c.internal.data)), + ) temp_all = np.concatenate((temp_all, user_arr)) t_range = np.nanmax(temp_all) - np.nanmin(temp_all) - ETree.SubElement(temp_check, 'TemperatureChange', type='double', - unitsCode='degC').text = '{:.2f}'.format(t_range) + ETree.SubElement( + temp_check, "TemperatureChange", type="double", unitsCode="degC" + ).text = "{:.2f}".format(t_range) # (3) QRev_Message Node - qa_check_keys = ['bt_vel', 'compass', 'depths', 'edges', 'extrapolation', 'gga_vel', 'movingbed', 'system_tst', - 'temperature', 'transects', 'user', 'vtg_vel', 'w_vel'] + qa_check_keys = [ + "bt_vel", + "compass", + "depths", + "edges", + "extrapolation", + "gga_vel", + "movingbed", + "system_tst", + "temperature", + "transects", + "user", + "vtg_vel", + "w_vel", + ] # For each qa check retrieve messages messages = [] for key in qa_check_keys: qa_type = getattr(self.qa, key) - if qa_type['messages']: - for message in qa_type['messages']: + if qa_type["messages"]: + for message in qa_type["messages"]: if type(message) is str: if message[:3].isupper(): messages.append([message, 1]) @@ -2710,39 +3263,45 @@ class Measurement(object): messages.sort(key=lambda x: x[1]) if len(messages) > 0: - temp = '' + temp = "" for message in messages: temp = temp + message[0] - ETree.SubElement(qa, 'QRev_Message', type='char').text = temp + ETree.SubElement(qa, "QRev_Message", type="char").text = temp # (2) Instrument Node - instrument = ETree.SubElement(channel, 'Instrument') + instrument = ETree.SubElement(channel, "Instrument") # (3) Manufacturer Node - ETree.SubElement(instrument, 'Manufacturer', type='char').text = \ - self.transects[self.checked_transect_idx[0]].adcp.manufacturer + ETree.SubElement(instrument, "Manufacturer", type="char").text = self.transects[ + self.checked_transect_idx[0] + ].adcp.manufacturer # (3) Model Node - ETree.SubElement(instrument, 'Model', type='char').text = \ - self.transects[self.checked_transect_idx[0]].adcp.model + ETree.SubElement(instrument, "Model", type="char").text = self.transects[ + self.checked_transect_idx[0] + ].adcp.model # (3) SerialNumber Node sn = self.transects[self.checked_transect_idx[0]].adcp.serial_num - ETree.SubElement(instrument, 'SerialNumber', type='char').text = str(sn) + ETree.SubElement(instrument, "SerialNumber", type="char").text = str(sn) # (3) FirmwareVersion Node ver = self.transects[self.checked_transect_idx[0]].adcp.firmware - ETree.SubElement(instrument, 'FirmwareVersion', type='char').text = str(ver) + ETree.SubElement(instrument, "FirmwareVersion", type="char").text = str(ver) # (3) Frequency Node freq = self.transects[self.checked_transect_idx[0]].adcp.frequency_khz if type(freq) == np.ndarray: freq = "Multi" - ETree.SubElement(instrument, 'Frequency', type='char', unitsCode='kHz').text = str(freq) + ETree.SubElement( + instrument, "Frequency", type="char", unitsCode="kHz" + ).text = str(freq) # (3) BeamAngle Node ang = self.transects[self.checked_transect_idx[0]].adcp.beam_angle_deg - ETree.SubElement(instrument, 'BeamAngle', type='double', unitsCode='deg').text = '{:.1f}'.format(ang) + ETree.SubElement( + instrument, "BeamAngle", type="double", unitsCode="deg" + ).text = "{:.1f}".format(ang) # (3) BlankingDistance Node w_vel = [] @@ -2753,135 +3312,187 @@ class Measurement(object): blank.append(each.blanking_distance_m) if isinstance(blank[0], float): temp = np.mean(blank) - if self.transects[self.checked_transect_idx[0]].w_vel.excluded_dist_m > temp: - temp = self.transects[self.checked_transect_idx[0]].w_vel.excluded_dist_m + if ( + self.transects[self.checked_transect_idx[0]].w_vel.excluded_dist_m + > temp + ): + temp = self.transects[ + self.checked_transect_idx[0] + ].w_vel.excluded_dist_m else: temp = self.transects[self.checked_transect_idx[0]].w_vel.excluded_dist_m - ETree.SubElement(instrument, 'BlankingDistance', type='double', unitsCode='m').text = '{:.4f}'.format(temp) + ETree.SubElement( + instrument, "BlankingDistance", type="double", unitsCode="m" + ).text = "{:.4f}".format(temp) # (3) InstrumentConfiguration Node - commands = '' - if self.transects[self.checked_transect_idx[0]].adcp.configuration_commands is not None: - for each in self.transects[self.checked_transect_idx[0]].adcp.configuration_commands: + commands = "" + if ( + self.transects[self.checked_transect_idx[0]].adcp.configuration_commands + is not None + ): + for each in self.transects[ + self.checked_transect_idx[0] + ].adcp.configuration_commands: if type(each) is str: - commands += each + ' ' - ETree.SubElement(instrument, 'InstrumentConfiguration', type='char').text = commands + commands += each + " " + ETree.SubElement( + instrument, "InstrumentConfiguration", type="char" + ).text = commands # (2) Processing Node - processing = ETree.SubElement(channel, 'Processing') + processing = ETree.SubElement(channel, "Processing") # (3) SoftwareVersion Node - ETree.SubElement(processing, 'SoftwareVersion', type='char').text = version + ETree.SubElement(processing, "SoftwareVersion", type="char").text = version # (3) Type Node - ETree.SubElement(processing, 'Type', type='char').text = self.processing + ETree.SubElement(processing, "Type", type="char").text = self.processing # (3) AreaComputationMethod Node - ETree.SubElement(processing, 'AreaComputationMethod', type='char').text = 'Parallel' + ETree.SubElement( + processing, "AreaComputationMethod", type="char" + ).text = "Parallel" # (3) Navigation Node - navigation = ETree.SubElement(processing, 'Navigation') + navigation = ETree.SubElement(processing, "Navigation") # (4) Reference Node - ETree.SubElement(navigation, 'Reference', type='char').text = \ - self.transects[self.checked_transect_idx[0]].w_vel.nav_ref + ETree.SubElement(navigation, "Reference", type="char").text = self.transects[ + self.checked_transect_idx[0] + ].w_vel.nav_ref # (4) CompositeTrack - ETree.SubElement(navigation, 'CompositeTrack', type='char').text = \ - self.transects[self.checked_transect_idx[0]].boat_vel.composite + ETree.SubElement( + navigation, "CompositeTrack", type="char" + ).text = self.transects[self.checked_transect_idx[0]].boat_vel.composite # (4) MagneticVariation Node - mag_var = self.transects[self.checked_transect_idx[0]].sensors.heading_deg.internal.mag_var_deg - ETree.SubElement(navigation, 'MagneticVariation', type='double', - unitsCode='deg').text = '{:.2f}'.format(mag_var) + mag_var = self.transects[ + self.checked_transect_idx[0] + ].sensors.heading_deg.internal.mag_var_deg + ETree.SubElement( + navigation, "MagneticVariation", type="double", unitsCode="deg" + ).text = "{:.2f}".format(mag_var) # (4) BeamFilter - nav_data = getattr(self.transects[self.checked_transect_idx[0]].boat_vel, - self.transects[self.checked_transect_idx[0]].boat_vel.selected) + nav_data = getattr( + self.transects[self.checked_transect_idx[0]].boat_vel, + self.transects[self.checked_transect_idx[0]].boat_vel.selected, + ) temp = nav_data.beam_filter if temp < 0: - temp = 'Auto' + temp = "Auto" else: temp = str(temp) - ETree.SubElement(navigation, 'BeamFilter', type='char').text = temp + ETree.SubElement(navigation, "BeamFilter", type="char").text = temp # (4) ErrorVelocityFilter Node evf = nav_data.d_filter - if evf == 'Manual': - evf = '{:.4f}'.format(nav_data.d_filter_thresholds) - ETree.SubElement(navigation, 'ErrorVelocityFilter', type='char', unitsCode='mps').text = evf + if evf == "Manual": + evf = "{:.4f}".format(nav_data.d_filter_thresholds) + ETree.SubElement( + navigation, "ErrorVelocityFilter", type="char", unitsCode="mps" + ).text = evf # (4) VerticalVelocityFilter Node vvf = nav_data.w_filter - if vvf == 'Manual': - vvf = '{:.4f}'.format(nav_data.w_filter_thresholds) - ETree.SubElement(navigation, 'VerticalVelocityFilter', type='char', unitsCode='mps').text = vvf + if vvf == "Manual": + vvf = "{:.4f}".format(nav_data.w_filter_thresholds) + ETree.SubElement( + navigation, "VerticalVelocityFilter", type="char", unitsCode="mps" + ).text = vvf # (4) Use measurement thresholds temp = nav_data.use_measurement_thresholds if temp: - temp = 'Yes' + temp = "Yes" else: - temp = 'No' - ETree.SubElement(navigation, 'UseMeasurementThresholds', type='char').text = temp + temp = "No" + ETree.SubElement( + navigation, "UseMeasurementThresholds", type="char" + ).text = temp # (4) OtherFilter Node o_f = nav_data.smooth_filter - ETree.SubElement(navigation, 'OtherFilter', type='char').text = o_f + ETree.SubElement(navigation, "OtherFilter", type="char").text = o_f # (4) GPSDifferentialQualityFilter Node temp = nav_data.gps_diff_qual_filter if temp: if isinstance(temp, int) or isinstance(temp, float): temp = str(temp) - ETree.SubElement(navigation, 'GPSDifferentialQualityFilter', type='char').text = temp + ETree.SubElement( + navigation, "GPSDifferentialQualityFilter", type="char" + ).text = temp # (4) GPSAltitudeFilter Node temp = nav_data.gps_altitude_filter if temp: - if temp == 'Manual': - temp = self.transects[self.checked_transect_idx[0]].boat_vel.gps_altitude_filter_change - ETree.SubElement(navigation, 'GPSAltitudeFilter', type='char', unitsCode='m').text = str(temp) + if temp == "Manual": + temp = self.transects[ + self.checked_transect_idx[0] + ].boat_vel.gps_altitude_filter_change + ETree.SubElement( + navigation, "GPSAltitudeFilter", type="char", unitsCode="m" + ).text = str(temp) # (4) HDOPChangeFilter temp = nav_data.gps_HDOP_filter if temp: - if temp == 'Manual': - temp = '{:.2f}'.format(self.transects[self.checked_transect_idx[0]].boat_vel.gps_hdop_filter_change) - ETree.SubElement(navigation, 'HDOPChangeFilter', type='char').text = temp + if temp == "Manual": + temp = "{:.2f}".format( + self.transects[ + self.checked_transect_idx[0] + ].boat_vel.gps_hdop_filter_change + ) + ETree.SubElement(navigation, "HDOPChangeFilter", type="char").text = temp # (4) HDOPThresholdFilter temp = nav_data.gps_HDOP_filter if temp: - if temp == 'Manual': - temp = '{:.2f}'.format(self.transects[self.checked_transect_idx[0]].boat_vel.gps_HDOP_filter_max) - ETree.SubElement(navigation, 'HDOPThresholdFilter', type='char').text = temp + if temp == "Manual": + temp = "{:.2f}".format( + self.transects[ + self.checked_transect_idx[0] + ].boat_vel.gps_HDOP_filter_max + ) + ETree.SubElement(navigation, "HDOPThresholdFilter", type="char").text = temp # (4) InterpolationType Node temp = nav_data.interpolate - ETree.SubElement(navigation, 'InterpolationType', type='char').text = temp + ETree.SubElement(navigation, "InterpolationType", type="char").text = temp # (3) Depth Node - depth = ETree.SubElement(processing, 'Depth') + depth = ETree.SubElement(processing, "Depth") # (4) Reference Node - if self.transects[self.checked_transect_idx[0]].depths.selected == 'bt_depths': - temp = 'BT' - elif self.transects[self.checked_transect_idx[0]].depths.selected == 'vb_depths': - temp = 'VB' - elif self.transects[self.checked_transect_idx[0]].depths.selected == 'ds_depths': - temp = 'DS' - ETree.SubElement(depth, 'Reference', type='char').text = temp + if self.transects[self.checked_transect_idx[0]].depths.selected == "bt_depths": + temp = "BT" + elif ( + self.transects[self.checked_transect_idx[0]].depths.selected == "vb_depths" + ): + temp = "VB" + elif ( + self.transects[self.checked_transect_idx[0]].depths.selected == "ds_depths" + ): + temp = "DS" + ETree.SubElement(depth, "Reference", type="char").text = temp # (4) CompositeDepth Node - ETree.SubElement(depth, 'CompositeDepth', type='char').text = \ - self.transects[self.checked_transect_idx[0]].depths.composite + ETree.SubElement(depth, "CompositeDepth", type="char").text = self.transects[ + self.checked_transect_idx[0] + ].depths.composite # (4) ADCPDepth Node - depth_data = getattr(self.transects[self.checked_transect_idx[0]].depths, - self.transects[self.checked_transect_idx[0]].depths.selected) + depth_data = getattr( + self.transects[self.checked_transect_idx[0]].depths, + self.transects[self.checked_transect_idx[0]].depths.selected, + ) temp = depth_data.draft_use_m - ETree.SubElement(depth, 'ADCPDepth', type='double', unitsCode='m').text = '{:.4f}'.format(temp) + ETree.SubElement( + depth, "ADCPDepth", type="double", unitsCode="m" + ).text = "{:.4f}".format(temp) # (4) ADCPDepthConsistent Node drafts = [] @@ -2892,88 +3503,102 @@ class Measurement(object): unique_drafts = set(drafts) num_drafts = len(unique_drafts) if num_drafts > 1: - temp = 'No' + temp = "No" else: - temp = 'Yes' - ETree.SubElement(depth, 'ADCPDepthConsistent', type='boolean').text = temp + temp = "Yes" + ETree.SubElement(depth, "ADCPDepthConsistent", type="boolean").text = temp # (4) FilterType Node temp = depth_data.filter_type - ETree.SubElement(depth, 'FilterType', type='char').text = temp + ETree.SubElement(depth, "FilterType", type="char").text = temp # (4) InterpolationType Node temp = depth_data.interp_type - ETree.SubElement(depth, 'InterpolationType', type='char').text = temp + ETree.SubElement(depth, "InterpolationType", type="char").text = temp # (4) AveragingMethod Node temp = depth_data.avg_method - ETree.SubElement(depth, 'AveragingMethod', type='char').text = temp + ETree.SubElement(depth, "AveragingMethod", type="char").text = temp # (4) ValidDataMethod Node temp = depth_data.valid_data_method - ETree.SubElement(depth, 'ValidDataMethod', type='char').text = temp + ETree.SubElement(depth, "ValidDataMethod", type="char").text = temp # (3) WaterTrack Node - water_track = ETree.SubElement(processing, 'WaterTrack') + water_track = ETree.SubElement(processing, "WaterTrack") # (4) ExcludedDistance Node temp = self.transects[self.checked_transect_idx[0]].w_vel.excluded_dist_m - ETree.SubElement(water_track, 'ExcludedDistance', type='double', unitsCode='m').text = '{:.4f}'.format(temp) + ETree.SubElement( + water_track, "ExcludedDistance", type="double", unitsCode="m" + ).text = "{:.4f}".format(temp) # (4) BeamFilter Node temp = self.transects[self.checked_transect_idx[0]].w_vel.beam_filter if temp < 0: - temp = 'Auto' + temp = "Auto" else: temp = str(temp) - ETree.SubElement(water_track, 'BeamFilter', type='char').text = temp + ETree.SubElement(water_track, "BeamFilter", type="char").text = temp # (4) ErrorVelocityFilter Node temp = self.transects[self.checked_transect_idx[0]].w_vel.d_filter - if temp == 'Manual': - temp = '{:.4f}'.format(self.transects[self.checked_transect_idx[0]].w_vel.d_filter_thresholds) - ETree.SubElement(water_track, 'ErrorVelocityFilter', type='char', unitsCode='mps').text = temp + if temp == "Manual": + temp = "{:.4f}".format( + self.transects[self.checked_transect_idx[0]].w_vel.d_filter_thresholds + ) + ETree.SubElement( + water_track, "ErrorVelocityFilter", type="char", unitsCode="mps" + ).text = temp # (4) VerticalVelocityFilter Node temp = self.transects[self.checked_transect_idx[0]].w_vel.w_filter - if temp == 'Manual': - temp = '{:.4f}'.format(self.transects[self.checked_transect_idx[0]].w_vel.w_filter_thresholds) - ETree.SubElement(water_track, 'VerticalVelocityFilter', type='char', unitsCode='mps').text = temp + if temp == "Manual": + temp = "{:.4f}".format( + self.transects[self.checked_transect_idx[0]].w_vel.w_filter_thresholds + ) + ETree.SubElement( + water_track, "VerticalVelocityFilter", type="char", unitsCode="mps" + ).text = temp # (4) Use measurement thresholds - temp = self.transects[self.checked_transect_idx[0]].w_vel.use_measurement_thresholds + temp = self.transects[ + self.checked_transect_idx[0] + ].w_vel.use_measurement_thresholds if temp: - temp = 'Yes' + temp = "Yes" else: - temp = 'No' - ETree.SubElement(water_track, 'UseMeasurementThresholds', type='char').text = temp + temp = "No" + ETree.SubElement( + water_track, "UseMeasurementThresholds", type="char" + ).text = temp # (4) OtherFilter Node temp = self.transects[self.checked_transect_idx[0]].w_vel.smooth_filter - ETree.SubElement(water_track, 'OtherFilter', type='char').text = temp + ETree.SubElement(water_track, "OtherFilter", type="char").text = temp # (4) SNRFilter Node temp = self.transects[self.checked_transect_idx[0]].w_vel.snr_filter - ETree.SubElement(water_track, 'SNRFilter', type='char').text = temp + ETree.SubElement(water_track, "SNRFilter", type="char").text = temp # (4) CellInterpolation Node temp = self.transects[self.checked_transect_idx[0]].w_vel.interpolate_cells - ETree.SubElement(water_track, 'CellInterpolation', type='char').text = temp + ETree.SubElement(water_track, "CellInterpolation", type="char").text = temp # (4) EnsembleInterpolation Node temp = self.transects[self.checked_transect_idx[0]].w_vel.interpolate_ens - ETree.SubElement(water_track, 'EnsembleInterpolation', type='char').text = temp + ETree.SubElement(water_track, "EnsembleInterpolation", type="char").text = temp # (3) Edge Node - edge = ETree.SubElement(processing, 'Edge') + edge = ETree.SubElement(processing, "Edge") # (4) RectangularEdgeMethod Node temp = self.transects[self.checked_transect_idx[0]].edges.rec_edge_method - ETree.SubElement(edge, 'RectangularEdgeMethod', type='char').text = temp + ETree.SubElement(edge, "RectangularEdgeMethod", type="char").text = temp # (4) VelocityMethod Node temp = self.transects[self.checked_transect_idx[0]].edges.vel_method - ETree.SubElement(edge, 'VelocityMethod', type='char').text = temp + ETree.SubElement(edge, "VelocityMethod", type="char").text = temp # (4) LeftType Node typ = [] @@ -2983,27 +3608,27 @@ class Measurement(object): unique_type = set(typ) num_types = len(unique_type) if num_types > 1: - temp = 'Varies' + temp = "Varies" else: temp = typ[0] - ETree.SubElement(edge, 'LeftType', type='char').text = temp + ETree.SubElement(edge, "LeftType", type="char").text = temp # LeftEdgeCoefficient - if temp == 'User Q': - temp = 'N/A' - elif temp == 'Varies': - temp = 'N/A' + if temp == "User Q": + temp = "N/A" + elif temp == "Varies": + temp = "N/A" else: coef = [] for transect in self.transects: if transect.checked: - coef.append(QComp.edge_coef('left', transect)) + coef.append(QComp.edge_coef("left", transect)) num_coef = len(set(coef)) if num_coef > 1: - temp = 'Varies' + temp = "Varies" else: - temp = '{:.4f}'.format(coef[0]) - ETree.SubElement(edge, 'LeftEdgeCoefficient', type='char').text = temp + temp = "{:.4f}".format(coef[0]) + ETree.SubElement(edge, "LeftEdgeCoefficient", type="char").text = temp # (4) RightType Node typ = [] @@ -3013,53 +3638,53 @@ class Measurement(object): unique_type = set(typ) num_types = len(unique_type) if num_types > 1: - temp = 'Varies' + temp = "Varies" else: temp = typ[0] - ETree.SubElement(edge, 'RightType', type='char').text = temp + ETree.SubElement(edge, "RightType", type="char").text = temp # RightEdgeCoefficient - if temp == 'User Q': - temp = 'N/A' - elif temp == 'Varies': - temp = 'N/A' + if temp == "User Q": + temp = "N/A" + elif temp == "Varies": + temp = "N/A" else: coef = [] for transect in self.transects: if transect.checked: - coef.append(QComp.edge_coef('right', transect)) + coef.append(QComp.edge_coef("right", transect)) num_coef = len(set(coef)) if num_coef > 1: - temp = 'Varies' + temp = "Varies" else: - temp = '{:.4f}'.format(coef[0]) - ETree.SubElement(edge, 'RightEdgeCoefficient', type='char').text = temp + temp = "{:.4f}".format(coef[0]) + ETree.SubElement(edge, "RightEdgeCoefficient", type="char").text = temp # (3) Extrapolation Node - extrap = ETree.SubElement(processing, 'Extrapolation') + extrap = ETree.SubElement(processing, "Extrapolation") # (4) TopMethod Node temp = self.transects[self.checked_transect_idx[0]].extrap.top_method - ETree.SubElement(extrap, 'TopMethod', type='char').text = temp + ETree.SubElement(extrap, "TopMethod", type="char").text = temp # (4) BottomMethod Node temp = self.transects[self.checked_transect_idx[0]].extrap.bot_method - ETree.SubElement(extrap, 'BottomMethod', type='char').text = temp + ETree.SubElement(extrap, "BottomMethod", type="char").text = temp # (4) Exponent Node temp = self.transects[self.checked_transect_idx[0]].extrap.exponent - ETree.SubElement(extrap, 'Exponent', type='double').text = '{:.4f}'.format(temp) + ETree.SubElement(extrap, "Exponent", type="double").text = "{:.4f}".format(temp) # (4) Discharge weighted medians temp = self.extrap_fit.use_weighted if temp: - temp = 'Yes' + temp = "Yes" else: - temp = 'No' - ETree.SubElement(extrap, 'UseWeighted', type='char').text = temp + temp = "No" + ETree.SubElement(extrap, "UseWeighted", type="char").text = temp # (3) Sensor Node - sensor = ETree.SubElement(processing, 'Sensor') + sensor = ETree.SubElement(processing, "Sensor") # (4) TemperatureSource Node temp = [] @@ -3069,23 +3694,26 @@ class Measurement(object): temp.append(n.sensors.temperature_deg_c.selected) sources = len(set(temp)) if sources > 1: - temp = 'Varies' + temp = "Varies" else: temp = temp[0] - ETree.SubElement(sensor, 'TemperatureSource', type='char').text = temp + ETree.SubElement(sensor, "TemperatureSource", type="char").text = temp # (4) Salinity temp = np.array([]) for transect in self.transects: if transect.checked: - sal_selected = getattr(transect.sensors.salinity_ppt, transect.sensors.salinity_ppt.selected) + sal_selected = getattr( + transect.sensors.salinity_ppt, + transect.sensors.salinity_ppt.selected, + ) temp = np.append(temp, sal_selected.data) values = np.unique(temp) if len(values) > 1: - temp = 'Varies' + temp = "Varies" else: - temp = '{:2.1f}'.format(values[0]) - ETree.SubElement(sensor, 'Salinity', type='char', unitsCode='ppt').text = temp + temp = "{:2.1f}".format(values[0]) + ETree.SubElement(sensor, "Salinity", type="char", unitsCode="ppt").text = temp # (4) SpeedofSound Node temp = [] @@ -3094,344 +3722,489 @@ class Measurement(object): temp.append(n.sensors.speed_of_sound_mps.selected) sources = len(set(temp)) if sources > 1: - temp = 'Varies' + temp = "Varies" else: temp = temp[0] - if temp == 'internal': - temp = 'ADCP' - ETree.SubElement(sensor, 'SpeedofSound', type='char', unitsCode='mps').text = temp + if temp == "internal": + temp = "ADCP" + ETree.SubElement( + sensor, "SpeedofSound", type="char", unitsCode="mps" + ).text = temp # (2) Transect Node other_prop = self.compute_measurement_properties(self) for n in range(len(self.transects)): if self.transects[n].checked: - transect = ETree.SubElement(channel, 'Transect') + transect = ETree.SubElement(channel, "Transect") # (3) Filename Node temp = self.transects[n].file_name - ETree.SubElement(transect, 'Filename', type='char').text = temp + ETree.SubElement(transect, "Filename", type="char").text = temp # (3) StartDateTime Node temp = int(self.transects[n].date_time.start_serial_time) - temp = datetime.datetime.utcfromtimestamp(temp).strftime('%m/%d/%Y %H:%M:%S') - ETree.SubElement(transect, 'StartDateTime', type='char').text = temp + temp = datetime.datetime.utcfromtimestamp(temp).strftime( + "%m/%d/%Y %H:%M:%S" + ) + ETree.SubElement(transect, "StartDateTime", type="char").text = temp # (3) EndDateTime Node temp = int(self.transects[n].date_time.end_serial_time) - temp = datetime.datetime.utcfromtimestamp(temp).strftime('%m/%d/%Y %H:%M:%S') - ETree.SubElement(transect, 'EndDateTime', type='char').text = temp + temp = datetime.datetime.utcfromtimestamp(temp).strftime( + "%m/%d/%Y %H:%M:%S" + ) + ETree.SubElement(transect, "EndDateTime", type="char").text = temp # (3) Discharge Node - t_q = ETree.SubElement(transect, 'Discharge') + t_q = ETree.SubElement(transect, "Discharge") # (4) Top Node temp = self.discharge[n].top - ETree.SubElement(t_q, 'Top', type='double', unitsCode='cms').text = '{:.5f}'.format(temp) + ETree.SubElement( + t_q, "Top", type="double", unitsCode="cms" + ).text = "{:.5f}".format(temp) # (4) Middle Node temp = self.discharge[n].middle - ETree.SubElement(t_q, 'Middle', type='double', unitsCode='cms').text = '{:.5f}'.format(temp) + ETree.SubElement( + t_q, "Middle", type="double", unitsCode="cms" + ).text = "{:.5f}".format(temp) # (4) Bottom Node temp = self.discharge[n].bottom - ETree.SubElement(t_q, 'Bottom', type='double', unitsCode='cms').text = '{:.5f}'.format(temp) + ETree.SubElement( + t_q, "Bottom", type="double", unitsCode="cms" + ).text = "{:.5f}".format(temp) # (4) Left Node temp = self.discharge[n].left - ETree.SubElement(t_q, 'Left', type='double', unitsCode='cms').text = '{:.5f}'.format(temp) + ETree.SubElement( + t_q, "Left", type="double", unitsCode="cms" + ).text = "{:.5f}".format(temp) # (4) Right Node temp = self.discharge[n].right - ETree.SubElement(t_q, 'Right', type='double', unitsCode='cms').text = '{:.5f}'.format(temp) + ETree.SubElement( + t_q, "Right", type="double", unitsCode="cms" + ).text = "{:.5f}".format(temp) # (4) Total Node temp = self.discharge[n].total - ETree.SubElement(t_q, 'Total', type='double', unitsCode='cms').text = '{:.5f}'.format(temp) + ETree.SubElement( + t_q, "Total", type="double", unitsCode="cms" + ).text = "{:.5f}".format(temp) # (4) MovingBedPercentCorrection Node - temp = ((self.discharge[n].total / self.discharge[n].total_uncorrected) - 1) * 100 - ETree.SubElement(t_q, 'MovingBedPercentCorrection', type='double').text = '{:.2f}'.format(temp) + temp = ( + (self.discharge[n].total / self.discharge[n].total_uncorrected) - 1 + ) * 100 + ETree.SubElement( + t_q, "MovingBedPercentCorrection", type="double" + ).text = "{:.2f}".format(temp) # (3) Edge Node - t_edge = ETree.SubElement(transect, 'Edge') + t_edge = ETree.SubElement(transect, "Edge") # (4) StartEdge Node temp = self.transects[n].start_edge - ETree.SubElement(t_edge, 'StartEdge', type='char').text = temp + ETree.SubElement(t_edge, "StartEdge", type="char").text = temp # (4) RectangularEdgeMethod Node temp = self.transects[n].edges.rec_edge_method - ETree.SubElement(t_edge, 'RectangularEdgeMethod', type='char').text = temp + ETree.SubElement( + t_edge, "RectangularEdgeMethod", type="char" + ).text = temp # (4) VelocityMethod Node temp = self.transects[n].edges.vel_method - ETree.SubElement(t_edge, 'VelocityMethod', type='char').text = temp + ETree.SubElement(t_edge, "VelocityMethod", type="char").text = temp # (4) LeftType Node temp = self.transects[n].edges.left.type - ETree.SubElement(t_edge, 'LeftType', type='char').text = temp + ETree.SubElement(t_edge, "LeftType", type="char").text = temp # (4) LeftEdgeCoefficient Node - if temp == 'User Q': - temp = '' + if temp == "User Q": + temp = "" else: - temp = '{:.4f}'.format(QComp.edge_coef('left', self.transects[n])) - ETree.SubElement(t_edge, 'LeftEdgeCoefficient', type='double').text = temp + temp = "{:.4f}".format(QComp.edge_coef("left", self.transects[n])) + ETree.SubElement( + t_edge, "LeftEdgeCoefficient", type="double" + ).text = temp # (4) LeftDistance Node - temp = '{:.4f}'.format(self.transects[n].edges.left.distance_m) - ETree.SubElement(t_edge, 'LeftDistance', type='double', unitsCode='m').text = temp + temp = "{:.4f}".format(self.transects[n].edges.left.distance_m) + ETree.SubElement( + t_edge, "LeftDistance", type="double", unitsCode="m" + ).text = temp # (4) LeftNumberEnsembles - temp = '{:.0f}'.format(self.transects[n].edges.left.number_ensembles) - ETree.SubElement(t_edge, 'LeftNumberEnsembles', type='double').text = temp + temp = "{:.0f}".format(self.transects[n].edges.left.number_ensembles) + ETree.SubElement( + t_edge, "LeftNumberEnsembles", type="double" + ).text = temp # (4) RightType Node temp = self.transects[n].edges.right.type - ETree.SubElement(t_edge, 'RightType', type='char').text = temp + ETree.SubElement(t_edge, "RightType", type="char").text = temp # (4) RightEdgeCoefficient Node - if temp == 'User Q': - temp = '' + if temp == "User Q": + temp = "" else: - temp = '{:.4f}'.format(QComp.edge_coef('right', self.transects[n])) - ETree.SubElement(t_edge, 'RightEdgeCoefficient', type='double').text = temp + temp = "{:.4f}".format(QComp.edge_coef("right", self.transects[n])) + ETree.SubElement( + t_edge, "RightEdgeCoefficient", type="double" + ).text = temp # (4) RightDistance Node - temp = '{:.4f}'.format(self.transects[n].edges.right.distance_m) - ETree.SubElement(t_edge, 'RightDistance', type='double', unitsCode='m').text = temp + temp = "{:.4f}".format(self.transects[n].edges.right.distance_m) + ETree.SubElement( + t_edge, "RightDistance", type="double", unitsCode="m" + ).text = temp # (4) RightNumberEnsembles Node - temp = '{:.0f}'.format(self.transects[n].edges.right.number_ensembles) - ETree.SubElement(t_edge, 'RightNumberEnsembles', type='double').text = temp + temp = "{:.0f}".format(self.transects[n].edges.right.number_ensembles) + ETree.SubElement( + t_edge, "RightNumberEnsembles", type="double" + ).text = temp # (3) Sensor Node - t_sensor = ETree.SubElement(transect, 'Sensor') + t_sensor = ETree.SubElement(transect, "Sensor") # (4) TemperatureSource Node temp = self.transects[n].sensors.temperature_deg_c.selected - ETree.SubElement(t_sensor, 'TemperatureSource', type='char').text = temp + ETree.SubElement(t_sensor, "TemperatureSource", type="char").text = temp # (4) MeanTemperature Node - dat = getattr(self.transects[n].sensors.temperature_deg_c, - self.transects[n].sensors.temperature_deg_c.selected) + dat = getattr( + self.transects[n].sensors.temperature_deg_c, + self.transects[n].sensors.temperature_deg_c.selected, + ) temp = np.nanmean(dat.data) - temp = '{:.2f}'.format(temp) - ETree.SubElement(t_sensor, 'MeanTemperature', type='double', unitsCode='degC').text = temp + temp = "{:.2f}".format(temp) + ETree.SubElement( + t_sensor, "MeanTemperature", type="double", unitsCode="degC" + ).text = temp # (4) MeanSalinity - sal_data = getattr(self.transects[n].sensors.salinity_ppt, - self.transects[n].sensors.salinity_ppt.selected) - temp = '{:.0f}'.format(np.nanmean(sal_data.data)) - ETree.SubElement(t_sensor, 'MeanSalinity', type='double', unitsCode='ppt').text = temp + sal_data = getattr( + self.transects[n].sensors.salinity_ppt, + self.transects[n].sensors.salinity_ppt.selected, + ) + temp = "{:.0f}".format(np.nanmean(sal_data.data)) + ETree.SubElement( + t_sensor, "MeanSalinity", type="double", unitsCode="ppt" + ).text = temp # (4) SpeedofSoundSource Node - sos_selected = getattr(self.transects[n].sensors.speed_of_sound_mps, - self.transects[n].sensors.speed_of_sound_mps.selected) + sos_selected = getattr( + self.transects[n].sensors.speed_of_sound_mps, + self.transects[n].sensors.speed_of_sound_mps.selected, + ) temp = sos_selected.source - ETree.SubElement(t_sensor, 'SpeedofSoundSource', type='char').text = temp + ETree.SubElement( + t_sensor, "SpeedofSoundSource", type="char" + ).text = temp # (4) SpeedofSound - sos_data = getattr(self.transects[n].sensors.speed_of_sound_mps, - self.transects[n].sensors.speed_of_sound_mps.selected) - temp = '{:.4f}'.format(np.nanmean(sos_data.data)) - ETree.SubElement(t_sensor, 'SpeedofSound', type='double', unitsCode='mps').text = temp + sos_data = getattr( + self.transects[n].sensors.speed_of_sound_mps, + self.transects[n].sensors.speed_of_sound_mps.selected, + ) + temp = "{:.4f}".format(np.nanmean(sos_data.data)) + ETree.SubElement( + t_sensor, "SpeedofSound", type="double", unitsCode="mps" + ).text = temp # (3) Other Node - t_other = ETree.SubElement(transect, 'Other') + t_other = ETree.SubElement(transect, "Other") # (4) Duration Node - temp = '{:.2f}'.format(self.transects[n].date_time.transect_duration_sec) - ETree.SubElement(t_other, 'Duration', type='double', unitsCode='sec').text = temp + temp = "{:.2f}".format( + self.transects[n].date_time.transect_duration_sec + ) + ETree.SubElement( + t_other, "Duration", type="double", unitsCode="sec" + ).text = temp # (4) Width - temp = other_prop['width'][n] - ETree.SubElement(t_other, 'Width', type='double', unitsCode='m').text = '{:.4f}'.format(temp) + temp = other_prop["width"][n] + ETree.SubElement( + t_other, "Width", type="double", unitsCode="m" + ).text = "{:.4f}".format(temp) # (4) Area - temp = other_prop['area'][n] - ETree.SubElement(t_other, 'Area', type='double', unitsCode='sqm').text = '{:.4f}'.format(temp) + temp = other_prop["area"][n] + ETree.SubElement( + t_other, "Area", type="double", unitsCode="sqm" + ).text = "{:.4f}".format(temp) # (4) MeanBoatSpeed - temp = other_prop['avg_boat_speed'][n] - ETree.SubElement(t_other, 'MeanBoatSpeed', type='double', unitsCode='mps').text = '{:.4f}'.format(temp) + temp = other_prop["avg_boat_speed"][n] + ETree.SubElement( + t_other, "MeanBoatSpeed", type="double", unitsCode="mps" + ).text = "{:.4f}".format(temp) # (4) QoverA - temp = other_prop['avg_water_speed'][n] - ETree.SubElement(t_other, 'QoverA', type='double', unitsCode='mps').text = '{:.4f}'.format(temp) + temp = other_prop["avg_water_speed"][n] + ETree.SubElement( + t_other, "QoverA", type="double", unitsCode="mps" + ).text = "{:.4f}".format(temp) # (4) CourseMadeGood - temp = other_prop['avg_boat_course'][n] - ETree.SubElement(t_other, 'CourseMadeGood', type='double', unitsCode='deg').text = '{:.2f}'.format(temp) + temp = other_prop["avg_boat_course"][n] + ETree.SubElement( + t_other, "CourseMadeGood", type="double", unitsCode="deg" + ).text = "{:.2f}".format(temp) # (4) MeanFlowDirection - temp = other_prop['avg_water_dir'][n] - ETree.SubElement(t_other, 'MeanFlowDirection', type='double', - unitsCode='deg').text = '{:.2f}'.format(temp) + temp = other_prop["avg_water_dir"][n] + ETree.SubElement( + t_other, "MeanFlowDirection", type="double", unitsCode="deg" + ).text = "{:.2f}".format(temp) # (4) NumberofEnsembles temp = len(self.transects[n].boat_vel.bt_vel.u_processed_mps) - ETree.SubElement(t_other, 'NumberofEnsembles', type='integer').text = str(temp) + ETree.SubElement( + t_other, "NumberofEnsembles", type="integer" + ).text = str(temp) # (4) PercentInvalidBins valid_ens, valid_cells = TransectData.raw_valid_data(self.transects[n]) - temp = (1 - (np.nansum(np.nansum(valid_cells)) - / np.nansum(np.nansum(self.transects[n].w_vel.cells_above_sl)))) * 100 - ETree.SubElement(t_other, 'PercentInvalidBins', type='double').text = '{:.2f}'.format(temp) + temp = ( + 1 + - ( + np.nansum(np.nansum(valid_cells)) + / np.nansum(np.nansum(self.transects[n].w_vel.cells_above_sl)) + ) + ) * 100 + ETree.SubElement( + t_other, "PercentInvalidBins", type="double" + ).text = "{:.2f}".format(temp) # (4) PercentInvalidEnsembles - temp = (1 - (np.nansum(valid_ens) / len(self.transects[n].boat_vel.bt_vel.u_processed_mps))) * 100 - ETree.SubElement(t_other, 'PercentInvalidEns', type='double').text = '{:.2f}'.format(temp) - - pitch_source_selected = getattr(self.transects[n].sensors.pitch_deg, - self.transects[n].sensors.pitch_deg.selected) - roll_source_selected = getattr(self.transects[n].sensors.roll_deg, - self.transects[n].sensors.roll_deg.selected) + temp = ( + 1 + - ( + np.nansum(valid_ens) + / len(self.transects[n].boat_vel.bt_vel.u_processed_mps) + ) + ) * 100 + ETree.SubElement( + t_other, "PercentInvalidEns", type="double" + ).text = "{:.2f}".format(temp) + + pitch_source_selected = getattr( + self.transects[n].sensors.pitch_deg, + self.transects[n].sensors.pitch_deg.selected, + ) + roll_source_selected = getattr( + self.transects[n].sensors.roll_deg, + self.transects[n].sensors.roll_deg.selected, + ) # (4) MeanPitch temp = np.nanmean(pitch_source_selected.data) - ETree.SubElement(t_other, 'MeanPitch', type='double', unitsCode='deg').text = '{:.2f}'.format(temp) + ETree.SubElement( + t_other, "MeanPitch", type="double", unitsCode="deg" + ).text = "{:.2f}".format(temp) # (4) MeanRoll temp = np.nanmean(roll_source_selected.data) - ETree.SubElement(t_other, 'MeanRoll', type='double', unitsCode='deg').text = '{:.2f}'.format(temp) + ETree.SubElement( + t_other, "MeanRoll", type="double", unitsCode="deg" + ).text = "{:.2f}".format(temp) # (4) PitchStdDev temp = np.nanstd(pitch_source_selected.data, ddof=1) - ETree.SubElement(t_other, 'PitchStdDev', type='double', unitsCode='deg').text = '{:.2f}'.format(temp) + ETree.SubElement( + t_other, "PitchStdDev", type="double", unitsCode="deg" + ).text = "{:.2f}".format(temp) # (4) RollStdDev temp = np.nanstd(roll_source_selected.data, ddof=1) - ETree.SubElement(t_other, 'RollStdDev', type='double', unitsCode='deg').text = '{:.2f}'.format(temp) + ETree.SubElement( + t_other, "RollStdDev", type="double", unitsCode="deg" + ).text = "{:.2f}".format(temp) # (4) ADCPDepth - depth_source_selected = getattr(self.transects[n].depths, - self.transects[n].depths.selected) + depth_source_selected = getattr( + self.transects[n].depths, self.transects[n].depths.selected + ) temp = depth_source_selected.draft_use_m - ETree.SubElement(t_other, 'ADCPDepth', type='double', unitsCode='m').text = '{:.4f}'.format(temp) + ETree.SubElement( + t_other, "ADCPDepth", type="double", unitsCode="m" + ).text = "{:.4f}".format(temp) # (2) ChannelSummary Node - summary = ETree.SubElement(channel, 'ChannelSummary') + summary = ETree.SubElement(channel, "ChannelSummary") # (3) Discharge Node - s_q = ETree.SubElement(summary, 'Discharge') + s_q = ETree.SubElement(summary, "Discharge") discharge = self.mean_discharges(self) # (4) Top - temp = discharge['top_mean'] - ETree.SubElement(s_q, 'Top', type='double', unitsCode='cms').text = '{:.5f}'.format(temp) + temp = discharge["top_mean"] + ETree.SubElement( + s_q, "Top", type="double", unitsCode="cms" + ).text = "{:.5f}".format(temp) # (4) Middle - temp = discharge['mid_mean'] - ETree.SubElement(s_q, 'Middle', type='double', unitsCode='cms').text = '{:.5f}'.format(temp) + temp = discharge["mid_mean"] + ETree.SubElement( + s_q, "Middle", type="double", unitsCode="cms" + ).text = "{:.5f}".format(temp) # (4) Bottom - temp = discharge['bot_mean'] - ETree.SubElement(s_q, 'Bottom', type='double', unitsCode='cms').text = '{:.5f}'.format(temp) + temp = discharge["bot_mean"] + ETree.SubElement( + s_q, "Bottom", type="double", unitsCode="cms" + ).text = "{:.5f}".format(temp) # (4) Left - temp = discharge['left_mean'] - ETree.SubElement(s_q, 'Left', type='double', unitsCode='cms').text = '{:.5f}'.format(temp) + temp = discharge["left_mean"] + ETree.SubElement( + s_q, "Left", type="double", unitsCode="cms" + ).text = "{:.5f}".format(temp) # (4) Right - temp = discharge['right_mean'] - ETree.SubElement(s_q, 'Right', type='double', unitsCode='cms').text = '{:.5f}'.format(temp) + temp = discharge["right_mean"] + ETree.SubElement( + s_q, "Right", type="double", unitsCode="cms" + ).text = "{:.5f}".format(temp) # (4) Total - temp = discharge['total_mean'] - ETree.SubElement(s_q, 'Total', type='double', unitsCode='cms').text = '{:.5f}'.format(temp) + temp = discharge["total_mean"] + ETree.SubElement( + s_q, "Total", type="double", unitsCode="cms" + ).text = "{:.5f}".format(temp) # (4) MovingBedPercentCorrection - temp = ((discharge['total_mean'] / discharge['uncorrected_mean']) - 1) * 100 - ETree.SubElement(s_q, 'MovingBedPercentCorrection', type='double').text = '{:.2f}'.format(temp) + temp = ((discharge["total_mean"] / discharge["uncorrected_mean"]) - 1) * 100 + ETree.SubElement( + s_q, "MovingBedPercentCorrection", type="double" + ).text = "{:.2f}".format(temp) # (3) Uncertainty Node - s_u = ETree.SubElement(summary, 'Uncertainty') + s_u = ETree.SubElement(summary, "Uncertainty") if self.run_oursin: - u_total = self.oursin.u_measurement_user['total_95'][0] - u_model = 'OURSIN' + u_total = self.oursin.u_measurement_user["total_95"][0] + u_model = "OURSIN" else: u_total = self.uncertainty.total_95_user - u_model = 'QRevUA' + u_model = "QRevUA" if not np.isnan(temp): - ETree.SubElement(s_u, 'Total', type='double').text = '{:.1f}'.format(u_total) - ETree.SubElement(s_u, 'Model', type='char').text = u_model + ETree.SubElement(s_u, "Total", type="double").text = "{:.1f}".format( + u_total + ) + ETree.SubElement(s_u, "Model", type="char").text = u_model # (3) QRev_UA Uncertainty Node if self.uncertainty is not None: - s_qu = ETree.SubElement(summary, 'QRevUAUncertainty') + s_qu = ETree.SubElement(summary, "QRevUAUncertainty") uncertainty = self.uncertainty # (4) COV Node temp = uncertainty.cov if not np.isnan(temp): - ETree.SubElement(s_qu, 'COV', type='double').text = '{:.1f}'.format(temp) + ETree.SubElement(s_qu, "COV", type="double").text = "{:.1f}".format( + temp + ) # (4) AutoRandom Node temp = uncertainty.cov_95 if not np.isnan(temp): - ETree.SubElement(s_qu, 'AutoRandom', type='double').text = '{:.1f}'.format(temp) + ETree.SubElement( + s_qu, "AutoRandom", type="double" + ).text = "{:.1f}".format(temp) # (4) AutoInvalidData Node temp = uncertainty.invalid_95 - ETree.SubElement(s_qu, 'AutoInvalidData', type='double').text = '{:.1f}'.format(temp) + ETree.SubElement( + s_qu, "AutoInvalidData", type="double" + ).text = "{:.1f}".format(temp) # (4) AutoEdge Node temp = uncertainty.edges_95 - ETree.SubElement(s_qu, 'AutoEdge', type='double').text = '{:.1f}'.format(temp) + ETree.SubElement(s_qu, "AutoEdge", type="double").text = "{:.1f}".format( + temp + ) # (4) AutoExtrapolation Node temp = uncertainty.extrapolation_95 - ETree.SubElement(s_qu, 'AutoExtrapolation', type='double').text = '{:.1f}'.format(temp) + ETree.SubElement( + s_qu, "AutoExtrapolation", type="double" + ).text = "{:.1f}".format(temp) # (4) AutoMovingBed temp = uncertainty.moving_bed_95 - ETree.SubElement(s_qu, 'AutoMovingBed', type='double').text = '{:.1f}'.format(temp) + ETree.SubElement( + s_qu, "AutoMovingBed", type="double" + ).text = "{:.1f}".format(temp) # (4) AutoSystematic temp = uncertainty.systematic - ETree.SubElement(s_qu, 'AutoSystematic', type='double').text = '{:.1f}'.format(temp) + ETree.SubElement( + s_qu, "AutoSystematic", type="double" + ).text = "{:.1f}".format(temp) # (4) AutoTotal temp = uncertainty.total_95 if not np.isnan(temp): - ETree.SubElement(s_qu, 'AutoTotal', type='double').text = '{:.1f}'.format(temp) + ETree.SubElement( + s_qu, "AutoTotal", type="double" + ).text = "{:.1f}".format(temp) # (4) UserRandom Node user_random = uncertainty.cov_95_user if user_random: - ETree.SubElement(s_qu, 'UserRandom', type='double').text = '{:.1f}'.format(user_random) + ETree.SubElement( + s_qu, "UserRandom", type="double" + ).text = "{:.1f}".format(user_random) # (4) UserInvalidData Node user_invalid = uncertainty.invalid_95_user if user_invalid: - ETree.SubElement(s_qu, 'UserInvalidData', type='double').text = '{:.1f}'.format(user_invalid) + ETree.SubElement( + s_qu, "UserInvalidData", type="double" + ).text = "{:.1f}".format(user_invalid) # (4) UserEdge user_edge = uncertainty.edges_95_user if user_edge: - ETree.SubElement(s_qu, 'UserEdge', type='double').text = '{:.1f}'.format(user_edge) + ETree.SubElement( + s_qu, "UserEdge", type="double" + ).text = "{:.1f}".format(user_edge) # (4) UserExtrapolation user_extrap = uncertainty.extrapolation_95_user if user_extrap: - ETree.SubElement(s_qu, 'UserExtrapolation', type='double').text = '{:.1f}'.format(user_extrap) + ETree.SubElement( + s_qu, "UserExtrapolation", type="double" + ).text = "{:.1f}".format(user_extrap) # (4) UserMovingBed user_mb = uncertainty.moving_bed_95_user if user_mb: - ETree.SubElement(s_qu, 'UserMovingBed', type='double').text = '{:.1f}'.format(user_mb) + ETree.SubElement( + s_qu, "UserMovingBed", type="double" + ).text = "{:.1f}".format(user_mb) # (4) UserSystematic user_systematic = uncertainty.systematic_user if user_systematic: - ETree.SubElement(s_qu, 'UserSystematic', type='double').text = '{:.1f}'.format(user_systematic) + ETree.SubElement( + s_qu, "UserSystematic", type="double" + ).text = "{:.1f}".format(user_systematic) # (4) UserTotal Node temp = uncertainty.total_95_user if not np.isnan(temp): - ETree.SubElement(s_qu, 'UserTotal', type='double').text = '{:.1f}'.format(temp) + ETree.SubElement( + s_qu, "UserTotal", type="double" + ).text = "{:.1f}".format(temp) # (4) Random if user_random: @@ -3439,434 +4212,678 @@ class Measurement(object): else: temp = uncertainty.cov_95 if not np.isnan(temp): - ETree.SubElement(s_qu, 'Random', type='double').text = '{:.1f}'.format(temp) + ETree.SubElement(s_qu, "Random", type="double").text = "{:.1f}".format( + temp + ) # (4) InvalidData if user_invalid: temp = user_invalid else: temp = uncertainty.invalid_95 - ETree.SubElement(s_qu, 'InvalidData', type='double').text = '{:.1f}'.format(temp) + ETree.SubElement(s_qu, "InvalidData", type="double").text = "{:.1f}".format( + temp + ) # (4) Edge if user_edge: temp = user_edge else: temp = uncertainty.edges_95 - ETree.SubElement(s_qu, 'Edge', type='double').text = '{:.1f}'.format(temp) + ETree.SubElement(s_qu, "Edge", type="double").text = "{:.1f}".format(temp) # (4) Extrapolation if user_extrap: temp = user_extrap else: temp = uncertainty.extrapolation_95 - ETree.SubElement(s_qu, 'Extrapolation', type='double').text = '{:.1f}'.format(temp) + ETree.SubElement( + s_qu, "Extrapolation", type="double" + ).text = "{:.1f}".format(temp) # (4) MovingBed if user_mb: temp = user_mb else: temp = uncertainty.moving_bed_95 - ETree.SubElement(s_qu, 'MovingBed', type='double').text = '{:.1f}'.format(temp) + ETree.SubElement(s_qu, "MovingBed", type="double").text = "{:.1f}".format( + temp + ) # (4) Systematic if user_systematic: temp = user_systematic else: temp = uncertainty.systematic - ETree.SubElement(s_qu, 'Systematic', type='double').text = '{:.1f}'.format(temp) + ETree.SubElement(s_qu, "Systematic", type="double").text = "{:.1f}".format( + temp + ) # (4) UserTotal Node temp = uncertainty.total_95_user if not np.isnan(temp): - ETree.SubElement(s_qu, 'Total', type='double').text = '{:.1f}'.format(temp) + ETree.SubElement(s_qu, "Total", type="double").text = "{:.1f}".format( + temp + ) # Oursin Uncertainty Node if self.oursin is not None: # (3) Uncertainty Node - s_ou = ETree.SubElement(summary, 'OursinUncertainty') + s_ou = ETree.SubElement(summary, "OursinUncertainty") oursin = self.oursin # (4) System Node - temp = oursin.u_measurement['u_syst'][0] + temp = oursin.u_measurement["u_syst"][0] if not np.isnan(temp): - ETree.SubElement(s_ou, 'System', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement(s_ou, "System", type="double").text = "{:.2f}".format( + temp + ) # (4) Compass Node - temp = oursin.u_measurement['u_compass'][0] + temp = oursin.u_measurement["u_compass"][0] if not np.isnan(temp): - ETree.SubElement(s_ou, 'Compass', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement(s_ou, "Compass", type="double").text = "{:.2f}".format( + temp + ) # (4) Moving-bed Node - temp = oursin.u_measurement['u_movbed'][0] + temp = oursin.u_measurement["u_movbed"][0] if not np.isnan(temp): - ETree.SubElement(s_ou, 'MovingBed', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "MovingBed", type="double" + ).text = "{:.2f}".format(temp) # (4) Ensembles Node - temp = oursin.u_measurement['u_ens'][0] + temp = oursin.u_measurement["u_ens"][0] if not np.isnan(temp): - ETree.SubElement(s_ou, 'Ensembles', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "Ensembles", type="double" + ).text = "{:.2f}".format(temp) # (4) Measured Node - temp = oursin.u_measurement['u_meas'][0] + temp = oursin.u_measurement["u_meas"][0] if not np.isnan(temp): - ETree.SubElement(s_ou, 'Measured', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "Measured", type="double" + ).text = "{:.2f}".format(temp) # (4) Top Node - temp = oursin.u_measurement['u_top'][0] + temp = oursin.u_measurement["u_top"][0] if not np.isnan(temp): - ETree.SubElement(s_ou, 'Top', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement(s_ou, "Top", type="double").text = "{:.2f}".format( + temp + ) # (4) Bottom Node - temp = oursin.u_measurement['u_bot'][0] + temp = oursin.u_measurement["u_bot"][0] if not np.isnan(temp): - ETree.SubElement(s_ou, 'Bottom', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement(s_ou, "Bottom", type="double").text = "{:.2f}".format( + temp + ) # (4) Left Node - temp = oursin.u_measurement['u_left'][0] + temp = oursin.u_measurement["u_left"][0] if not np.isnan(temp): - ETree.SubElement(s_ou, 'Left', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement(s_ou, "Left", type="double").text = "{:.2f}".format( + temp + ) # (4) Bottom Node - temp = oursin.u_measurement['u_right'][0] + temp = oursin.u_measurement["u_right"][0] if not np.isnan(temp): - ETree.SubElement(s_ou, 'Right', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement(s_ou, "Right", type="double").text = "{:.2f}".format( + temp + ) # (4) Invalid Boat Node - temp = oursin.u_measurement['u_boat'][0] + temp = oursin.u_measurement["u_boat"][0] if not np.isnan(temp): - ETree.SubElement(s_ou, 'InvalidBoat', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "InvalidBoat", type="double" + ).text = "{:.2f}".format(temp) # (4) Invalid Depth Node - temp = oursin.u_measurement['u_depth'][0] + temp = oursin.u_measurement["u_depth"][0] if not np.isnan(temp): - ETree.SubElement(s_ou, 'InvalidDepth', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "InvalidDepth", type="double" + ).text = "{:.2f}".format(temp) # (4) Invalid Water Node - temp = oursin.u_measurement['u_water'][0] + temp = oursin.u_measurement["u_water"][0] if not np.isnan(temp): - ETree.SubElement(s_ou, 'InvalidWater', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "InvalidWater", type="double" + ).text = "{:.2f}".format(temp) # (4) COV Node - temp = oursin.u_measurement['u_cov'][0] + temp = oursin.u_measurement["u_cov"][0] if not np.isnan(temp): - ETree.SubElement(s_ou, 'COV', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement(s_ou, "COV", type="double").text = "{:.2f}".format( + temp + ) # (4) Auto Total 95% Node - temp = oursin.u_measurement['total_95'][0] + temp = oursin.u_measurement["total_95"][0] if not np.isnan(temp): - ETree.SubElement(s_ou, 'AutoTotal95', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "AutoTotal95", type="double" + ).text = "{:.2f}".format(temp) # (4) Extrapolation Power/Power Minimum - temp = oursin.default_advanced_settings['exp_pp_min'] + temp = oursin.default_advanced_settings["exp_pp_min"] if type(temp) is float: - ETree.SubElement(s_ou, 'ExtrapPPMin', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "ExtrapPPMin", type="double" + ).text = "{:.2f}".format(temp) else: - ETree.SubElement(s_ou, 'ExtrapPPMin', type='char').text = temp + ETree.SubElement(s_ou, "ExtrapPPMin", type="char").text = temp # (4) Extrapolation Power/Power Maximum - temp = oursin.default_advanced_settings['exp_pp_max'] + temp = oursin.default_advanced_settings["exp_pp_max"] if type(temp) is float: - ETree.SubElement(s_ou, 'ExtrapPPMax', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "ExtrapPPMax", type="double" + ).text = "{:.2f}".format(temp) else: - ETree.SubElement(s_ou, 'ExtrapPPMax', type='char').text = temp + ETree.SubElement(s_ou, "ExtrapPPMax", type="char").text = temp # (4) Extrapolation No Slip Minimum - temp = oursin.default_advanced_settings['exp_ns_min'] + temp = oursin.default_advanced_settings["exp_ns_min"] if type(temp) is float: - ETree.SubElement(s_ou, 'ExtrapNSMin', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "ExtrapNSMin", type="double" + ).text = "{:.2f}".format(temp) else: - ETree.SubElement(s_ou, 'ExtrapNSMin', type='char').text = temp + ETree.SubElement(s_ou, "ExtrapNSMin", type="char").text = temp # (4) Extrapolation No Slip Maximum - temp = oursin.default_advanced_settings['exp_ns_max'] + temp = oursin.default_advanced_settings["exp_ns_max"] if type(temp) is float: - ETree.SubElement(s_ou, 'ExtrapNSMax', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "ExtrapNSMax", type="double" + ).text = "{:.2f}".format(temp) else: - ETree.SubElement(s_ou, 'ExtrapNSMax', type='char').text = temp + ETree.SubElement(s_ou, "ExtrapNSMax", type="char").text = temp # (4) Draft error in m - temp = oursin.default_advanced_settings['draft_error_m'] + temp = oursin.default_advanced_settings["draft_error_m"] if type(temp) is float: - ETree.SubElement(s_ou, 'DraftErrorm', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "DraftErrorm", type="double" + ).text = "{:.2f}".format(temp) else: - ETree.SubElement(s_ou, 'DraftErrorm', type='char').text = temp + ETree.SubElement(s_ou, "DraftErrorm", type="char").text = temp # (4) Bin size error in percent - temp = oursin.default_advanced_settings['dzi_prct'] + temp = oursin.default_advanced_settings["dzi_prct"] if not np.isnan(temp): - ETree.SubElement(s_ou, 'BinErrorPer', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "BinErrorPer", type="double" + ).text = "{:.2f}".format(temp) # (4) Right edge distance error in percent - temp = oursin.default_advanced_settings['right_edge_dist_prct'] + temp = oursin.default_advanced_settings["right_edge_dist_prct"] if not np.isnan(temp): - ETree.SubElement(s_ou, 'REdgeDistErrorPer', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "REdgeDistErrorPer", type="double" + ).text = "{:.2f}".format(temp) # (4) Left edge distance error in percent - temp = oursin.default_advanced_settings['left_edge_dist_prct'] + temp = oursin.default_advanced_settings["left_edge_dist_prct"] if not np.isnan(temp): - ETree.SubElement(s_ou, 'LEdgeDistErrorPer', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "LEdgeDistErrorPer", type="double" + ).text = "{:.2f}".format(temp) # (4) GGA Boat Velocity Error in mps - temp = oursin.default_advanced_settings['gga_boat_mps'] + temp = oursin.default_advanced_settings["gga_boat_mps"] if type(temp) is float: - ETree.SubElement(s_ou, 'GGABoatVelErrormps', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "GGABoatVelErrormps", type="double" + ).text = "{:.2f}".format(temp) else: - ETree.SubElement(s_ou, 'GGABoatVelErrormps', type='char').text = temp + ETree.SubElement(s_ou, "GGABoatVelErrormps", type="char").text = temp # (4) VTG Boat Velocity Error in mps - temp = oursin.default_advanced_settings['vtg_boat_mps'] + temp = oursin.default_advanced_settings["vtg_boat_mps"] if not np.isnan(temp): - ETree.SubElement(s_ou, 'VTGBoatVelErrormps', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "VTGBoatVelErrormps", type="double" + ).text = "{:.2f}".format(temp) # (4) Compass Error in deg - temp = oursin.default_advanced_settings['compass_error_deg'] + temp = oursin.default_advanced_settings["compass_error_deg"] if not np.isnan(temp): - ETree.SubElement(s_ou, 'CompassErrordeg', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "CompassErrordeg", type="double" + ).text = "{:.2f}".format(temp) # (4) Bayesian COV prior in percent - temp = oursin.default_advanced_settings['cov_prior'] + temp = oursin.default_advanced_settings["cov_prior"] if not np.isnan(temp): - ETree.SubElement(s_ou, 'BayesCOVPriorper', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "BayesCOVPriorper", type="double" + ).text = "{:.2f}".format(temp) # (4) Bayesian COV prior uncertaint in percent - temp = oursin.default_advanced_settings['cov_prior_u'] + temp = oursin.default_advanced_settings["cov_prior_u"] if not np.isnan(temp): - ETree.SubElement(s_ou, 'BayesCOVPriorUncertaintyper', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "BayesCOVPriorUncertaintyper", type="double" + ).text = "{:.2f}".format(temp) # User # (4) System Node - temp = oursin.u_measurement_user['u_syst'][0] + temp = oursin.u_measurement_user["u_syst"][0] if not np.isnan(temp): - ETree.SubElement(s_ou, 'SystemUser', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "SystemUser", type="double" + ).text = "{:.2f}".format(temp) # (4) Compass Node - temp = oursin.u_measurement_user['u_compass'][0] + temp = oursin.u_measurement_user["u_compass"][0] if not np.isnan(temp): - ETree.SubElement(s_ou, 'CompassUser', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "CompassUser", type="double" + ).text = "{:.2f}".format(temp) # (4) Moving-bed Node - temp = oursin.u_measurement_user['u_movbed'][0] + temp = oursin.u_measurement_user["u_movbed"][0] if not np.isnan(temp): - ETree.SubElement(s_ou, 'MovingBedUser', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "MovingBedUser", type="double" + ).text = "{:.2f}".format(temp) # (4) Ensembles Node - temp = oursin.u_measurement_user['u_ens'][0] + temp = oursin.u_measurement_user["u_ens"][0] if not np.isnan(temp): - ETree.SubElement(s_ou, 'EnsemblesUser', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "EnsemblesUser", type="double" + ).text = "{:.2f}".format(temp) # (4) Measured Node - temp = oursin.u_measurement_user['u_meas'][0] + temp = oursin.u_measurement_user["u_meas"][0] if not np.isnan(temp): - ETree.SubElement(s_ou, 'MeasuredUser', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "MeasuredUser", type="double" + ).text = "{:.2f}".format(temp) # (4) Top Node - temp = oursin.u_measurement_user['u_top'][0] + temp = oursin.u_measurement_user["u_top"][0] if not np.isnan(temp): - ETree.SubElement(s_ou, 'TopUser', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement(s_ou, "TopUser", type="double").text = "{:.2f}".format( + temp + ) # (4) Bottom Node - temp = oursin.u_measurement_user['u_bot'][0] + temp = oursin.u_measurement_user["u_bot"][0] if not np.isnan(temp): - ETree.SubElement(s_ou, 'BottomUser', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "BottomUser", type="double" + ).text = "{:.2f}".format(temp) # (4) Left Node - temp = oursin.u_measurement_user['u_left'][0] + temp = oursin.u_measurement_user["u_left"][0] if not np.isnan(temp): - ETree.SubElement(s_ou, 'LeftUser', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "LeftUser", type="double" + ).text = "{:.2f}".format(temp) # (4) Bottom Node - temp = oursin.u_measurement_user['u_right'][0] + temp = oursin.u_measurement_user["u_right"][0] if not np.isnan(temp): - ETree.SubElement(s_ou, 'RightUser', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "RightUser", type="double" + ).text = "{:.2f}".format(temp) # (4) Invalid Boat Node - temp = oursin.u_measurement_user['u_boat'][0] + temp = oursin.u_measurement_user["u_boat"][0] if not np.isnan(temp): - ETree.SubElement(s_ou, 'InvalidBoatUser', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "InvalidBoatUser", type="double" + ).text = "{:.2f}".format(temp) # (4) Invalid Depth Node - temp = oursin.u_measurement_user['u_depth'][0] + temp = oursin.u_measurement_user["u_depth"][0] if not np.isnan(temp): - ETree.SubElement(s_ou, 'InvalidDepthUser', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "InvalidDepthUser", type="double" + ).text = "{:.2f}".format(temp) # (4) Invalid Water Node - temp = oursin.u_measurement_user['u_water'][0] + temp = oursin.u_measurement_user["u_water"][0] if not np.isnan(temp): - ETree.SubElement(s_ou, 'InvalidWaterUser', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "InvalidWaterUser", type="double" + ).text = "{:.2f}".format(temp) # (4) Auto Total 95% Node - temp = oursin.u_measurement_user['total_95'][0] + temp = oursin.u_measurement_user["total_95"][0] if not np.isnan(temp): - ETree.SubElement(s_ou, 'AutoTotal95User', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "AutoTotal95User", type="double" + ).text = "{:.2f}".format(temp) # (4) Extrapolation Power/Power Minimum - temp = oursin.user_advanced_settings['exp_pp_min_user'] + temp = oursin.user_advanced_settings["exp_pp_min_user"] if not np.isnan(temp): - ETree.SubElement(s_ou, 'ExtrapPPMinUser', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "ExtrapPPMinUser", type="double" + ).text = "{:.2f}".format(temp) # (4) Extrapolation Power/Power Maximum - temp = oursin.user_advanced_settings['exp_pp_max_user'] + temp = oursin.user_advanced_settings["exp_pp_max_user"] if not np.isnan(temp): - ETree.SubElement(s_ou, 'ExtrapPPMaxUser', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "ExtrapPPMaxUser", type="double" + ).text = "{:.2f}".format(temp) # (4) Extrapolation No Slip Minimum - temp = oursin.user_advanced_settings['exp_ns_min_user'] + temp = oursin.user_advanced_settings["exp_ns_min_user"] if not np.isnan(temp): - ETree.SubElement(s_ou, 'ExtrapNSMinUser', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "ExtrapNSMinUser", type="double" + ).text = "{:.2f}".format(temp) # (4) Extrapolation No Slip Maximum - temp = oursin.user_advanced_settings['exp_ns_max_user'] + temp = oursin.user_advanced_settings["exp_ns_max_user"] if not np.isnan(temp): - ETree.SubElement(s_ou, 'ExtrapNSMaxUser', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "ExtrapNSMaxUser", type="double" + ).text = "{:.2f}".format(temp) # (4) Draft error in m - temp = oursin.user_advanced_settings['draft_error_m_user'] + temp = oursin.user_advanced_settings["draft_error_m_user"] if not np.isnan(temp): - ETree.SubElement(s_ou, 'DraftErrormUser', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "DraftErrormUser", type="double" + ).text = "{:.2f}".format(temp) # (4) Bin size error in percent - temp = oursin.user_advanced_settings['dzi_prct_user'] + temp = oursin.user_advanced_settings["dzi_prct_user"] if not np.isnan(temp): - ETree.SubElement(s_ou, 'BinErrorperUser', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "BinErrorperUser", type="double" + ).text = "{:.2f}".format(temp) # (4) Right edge distance error in percent - temp = oursin.user_advanced_settings['right_edge_dist_prct_user'] + temp = oursin.user_advanced_settings["right_edge_dist_prct_user"] if not np.isnan(temp): - ETree.SubElement(s_ou, 'REdgeDistErrorperUser', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "REdgeDistErrorperUser", type="double" + ).text = "{:.2f}".format(temp) # (4) Left edge distance error in percent - temp = oursin.user_advanced_settings['left_edge_dist_prct_user'] + temp = oursin.user_advanced_settings["left_edge_dist_prct_user"] if not np.isnan(temp): - ETree.SubElement(s_ou, 'LEdgeDistErrorperUser', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "LEdgeDistErrorperUser", type="double" + ).text = "{:.2f}".format(temp) # (4) GGA Boat Velocity Error in mps - temp = oursin.user_advanced_settings['gga_boat_mps_user'] + temp = oursin.user_advanced_settings["gga_boat_mps_user"] if not np.isnan(temp): - ETree.SubElement(s_ou, 'GGABoatVelErrormpsUser', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "GGABoatVelErrormpsUser", type="double" + ).text = "{:.2f}".format(temp) # (4) VTG Boat Velocity Error in mps - temp = oursin.user_advanced_settings['vtg_boat_mps_user'] + temp = oursin.user_advanced_settings["vtg_boat_mps_user"] if not np.isnan(temp): - ETree.SubElement(s_ou, 'VTGBoatVelErrormpsUser', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "VTGBoatVelErrormpsUser", type="double" + ).text = "{:.2f}".format(temp) # (4) Compass Error in deg - temp = oursin.user_advanced_settings['compass_error_deg_user'] + temp = oursin.user_advanced_settings["compass_error_deg_user"] if not np.isnan(temp): - ETree.SubElement(s_ou, 'CompassErrordegUser', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "CompassErrordegUser", type="double" + ).text = "{:.2f}".format(temp) # (4) Bayesian COV prior in percent - temp = oursin.user_advanced_settings['cov_prior_user'] + temp = oursin.user_advanced_settings["cov_prior_user"] if not np.isnan(temp): - ETree.SubElement(s_ou, 'BayesCOVPriorperUser', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "BayesCOVPriorperUser", type="double" + ).text = "{:.2f}".format(temp) # (4) Bayesian COV prior uncertaint in percent - temp = oursin.user_advanced_settings['cov_prior_u_user'] + temp = oursin.user_advanced_settings["cov_prior_u_user"] if not np.isnan(temp): - ETree.SubElement(s_ou, 'BayesCOVPriorUncertaintyperUser', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_ou, "BayesCOVPriorUncertaintyperUser", type="double" + ).text = "{:.2f}".format(temp) # (3) Other Node - s_o = ETree.SubElement(summary, 'Other') + s_o = ETree.SubElement(summary, "Other") # (4) MeanWidth - temp = other_prop['width'][-1] - ETree.SubElement(s_o, 'MeanWidth', type='double', unitsCode='m').text = '{:.4f}'.format(temp) + temp = other_prop["width"][-1] + ETree.SubElement( + s_o, "MeanWidth", type="double", unitsCode="m" + ).text = "{:.4f}".format(temp) # (4) WidthCOV - temp = other_prop['width_cov'][-1] + temp = other_prop["width_cov"][-1] if not np.isnan(temp): - ETree.SubElement(s_o, 'WidthCOV', type='double').text = '{:.4f}'.format(temp) + ETree.SubElement(s_o, "WidthCOV", type="double").text = "{:.4f}".format( + temp + ) # (4) MeanArea - temp = other_prop['area'][-1] - ETree.SubElement(s_o, 'MeanArea', type='double', unitsCode='sqm').text = '{:.4f}'.format(temp) + temp = other_prop["area"][-1] + ETree.SubElement( + s_o, "MeanArea", type="double", unitsCode="sqm" + ).text = "{:.4f}".format(temp) # (4) AreaCOV - temp = other_prop['area_cov'][-1] + temp = other_prop["area_cov"][-1] if not np.isnan(temp): - ETree.SubElement(s_o, 'AreaCOV', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement(s_o, "AreaCOV", type="double").text = "{:.2f}".format(temp) # (4) MeanBoatSpeed - temp = other_prop['avg_boat_speed'][-1] - ETree.SubElement(s_o, 'MeanBoatSpeed', type='double', unitsCode='mps').text = '{:.4f}'.format(temp) + temp = other_prop["avg_boat_speed"][-1] + ETree.SubElement( + s_o, "MeanBoatSpeed", type="double", unitsCode="mps" + ).text = "{:.4f}".format(temp) # (4) MeanQoverA - temp = other_prop['avg_water_speed'][-1] - ETree.SubElement(s_o, 'MeanQoverA', type='double', unitsCode='mps').text = '{:.4f}'.format(temp) + temp = other_prop["avg_water_speed"][-1] + ETree.SubElement( + s_o, "MeanQoverA", type="double", unitsCode="mps" + ).text = "{:.4f}".format(temp) # (4) MeanCourseMadeGood - temp = other_prop['avg_boat_course'][-1] - ETree.SubElement(s_o, 'MeanCourseMadeGood', type='double', unitsCode='deg').text = '{:.2f}'.format(temp) + temp = other_prop["avg_boat_course"][-1] + ETree.SubElement( + s_o, "MeanCourseMadeGood", type="double", unitsCode="deg" + ).text = "{:.2f}".format(temp) # (4) MeanFlowDirection - temp = other_prop['avg_water_dir'][-1] - ETree.SubElement(s_o, 'MeanFlowDirection', type='double', unitsCode='deg').text = '{:.2f}'.format(temp) + temp = other_prop["avg_water_dir"][-1] + ETree.SubElement( + s_o, "MeanFlowDirection", type="double", unitsCode="deg" + ).text = "{:.2f}".format(temp) # (4) MeanDepth - temp = other_prop['avg_depth'][-1] - ETree.SubElement(s_o, 'MeanDepth', type='double', unitsCode='m').text = '{:.4f}'.format(temp) + temp = other_prop["avg_depth"][-1] + ETree.SubElement( + s_o, "MeanDepth", type="double", unitsCode="m" + ).text = "{:.4f}".format(temp) # (4) MaximumDepth - temp = other_prop['max_depth'][-1] - ETree.SubElement(s_o, 'MaximumDepth', type='double', unitsCode='m').text = '{:.4f}'.format(temp) + temp = other_prop["max_depth"][-1] + ETree.SubElement( + s_o, "MaximumDepth", type="double", unitsCode="m" + ).text = "{:.4f}".format(temp) # (4) MaximumWaterSpeed - temp = other_prop['max_water_speed'][-1] - ETree.SubElement(s_o, 'MaximumWaterSpeed', type='double', unitsCode='mps').text = '{:.4f}'.format(temp) + temp = other_prop["max_water_speed"][-1] + ETree.SubElement( + s_o, "MaximumWaterSpeed", type="double", unitsCode="mps" + ).text = "{:.4f}".format(temp) # (4) NumberofTransects temp = len(self.checked_transects(self)) - ETree.SubElement(s_o, 'NumberofTransects', type='integer').text = str(temp) + ETree.SubElement(s_o, "NumberofTransects", type="integer").text = str(temp) # (4) Duration temp = self.measurement_duration(self) - ETree.SubElement(s_o, 'Duration', type='double', unitsCode='sec').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_o, "Duration", type="double", unitsCode="sec" + ).text = "{:.2f}".format(temp) # (4) LeftQPer - temp = 100 * discharge['left_mean'] / discharge['total_mean'] - ETree.SubElement(s_o, 'LeftQPer', type='double').text = '{:.2f}'.format(temp) + temp = 100 * discharge["left_mean"] / discharge["total_mean"] + ETree.SubElement(s_o, "LeftQPer", type="double").text = "{:.2f}".format(temp) # (4) RightQPer - temp = 100 * discharge['right_mean'] / discharge['total_mean'] - ETree.SubElement(s_o, 'RightQPer', type='double').text = '{:.2f}'.format(temp) + temp = 100 * discharge["right_mean"] / discharge["total_mean"] + ETree.SubElement(s_o, "RightQPer", type="double").text = "{:.2f}".format(temp) # (4) InvalidCellsQPer - temp = 100 * discharge['int_cells_mean'] / discharge['total_mean'] - ETree.SubElement(s_o, 'InvalidCellsQPer', type='double').text = '{:.2f}'.format(temp) + temp = 100 * discharge["int_cells_mean"] / discharge["total_mean"] + ETree.SubElement(s_o, "InvalidCellsQPer", type="double").text = "{:.2f}".format( + temp + ) # (4) InvalidEnsQPer - temp = 100 * discharge['int_ensembles_mean'] / discharge['total_mean'] - ETree.SubElement(s_o, 'InvalidEnsQPer', type='double').text = '{:.2f}'.format(temp) + temp = 100 * discharge["int_ensembles_mean"] / discharge["total_mean"] + ETree.SubElement(s_o, "InvalidEnsQPer", type="double").text = "{:.2f}".format( + temp + ) # (4) UserRating if self.user_rating: temp = self.user_rating else: - temp = 'Not Rated' - ETree.SubElement(s_o, 'UserRating', type='char').text = temp + temp = "Not Rated" + ETree.SubElement(s_o, "UserRating", type="char").text = temp # (4) DischargePPDefault temp = self.extrap_fit.q_sensitivity.q_pp_mean - ETree.SubElement(s_o, 'DischargePPDefault', type='double').text = '{:.2f}'.format(temp) + ETree.SubElement( + s_o, "DischargePPDefault", type="double" + ).text = "{:.2f}".format(temp) # (2) UserComment if len(self.comments) > 1: - temp = '' + temp = "" for comment in self.comments: - temp = temp + comment.replace('\n', ' |||') + ' |||' - ETree.SubElement(channel, 'UserComment', type='char').text = temp + temp = temp + comment.replace("\n", " |||") + " |||" + ETree.SubElement(channel, "UserComment", type="char").text = temp + + # Average cross-section + if self.export_xs: + # xs = CrossSectionComp(self.transects, file_name) + xs = CrossSectionComp(self.transects) + + cross_section = xs.cross_section[(len(xs.cross_section) - 1)] + rows = cross_section.shape[0] + + survey = ETree.SubElement(channel, "CrossSectionSurvey") + + for row in range(rows): + lon = cross_section[row, 0] + lat = cross_section[row, 1] + dist_x = cross_section[row, 2] + dist_y = cross_section[row, 3] + station = cross_section[row, 4] + depth = cross_section[row, 5] + + if not np.isnan(lon): + try: + lat, lon = utm.to_latlon( + lat, + lon, + zone_number=xs.zone_number, + zone_letter=xs.zone_letter, + ) + except BaseException: + lat = np.nan + lon = np.nan + + meas_pts = ETree.SubElement(survey, "MeasurementPoints") + ETree.SubElement(meas_pts, "TableRow", type="integer").text = str(row) + + # latitude + measurements = ETree.SubElement(meas_pts, "Measurements") + sensor = ETree.SubElement(measurements, "Sensor") + s_type = ETree.SubElement(sensor, "SensorType") + parm = ETree.SubElement(s_type, "Parameter") + ETree.SubElement(parm, "Name").text = "Latitude" + ETree.SubElement(parm, "Units").text = "Degrees" + ETree.SubElement(measurements, "Value", type="double").text = str(lat) + + # Longitude + measurements = ETree.SubElement(meas_pts, "Measurements") + sensor = ETree.SubElement(measurements, "Sensor") + s_type = ETree.SubElement(sensor, "SensorType") + parm = ETree.SubElement(s_type, "Parameter") + ETree.SubElement(parm, "Name").text = "Longitude" + ETree.SubElement(parm, "Units").text = "Degrees" + ETree.SubElement(measurements, "Value", type="double").text = str(lon) + + # station + measurements = ETree.SubElement(meas_pts, "Measurements") + sensor = ETree.SubElement(measurements, "Sensor") + s_type = ETree.SubElement(sensor, "SensorType") + parm = ETree.SubElement(s_type, "Parameter") + ETree.SubElement(parm, "Name").text = "Distance" + ETree.SubElement(parm, "Units").text = "Meters" + ETree.SubElement( + measurements, "Value", type="double", unitsCode="m" + ).text = "{:.3f}".format(station) + + # distance x + measurements = ETree.SubElement(meas_pts, "Measurements") + sensor = ETree.SubElement(measurements, "Sensor") + s_type = ETree.SubElement(sensor, "SensorType") + parm = ETree.SubElement(s_type, "Parameter") + ETree.SubElement(parm, "Name").text = "Distance X" + ETree.SubElement(parm, "Units").text = "Meters" + ETree.SubElement( + measurements, "Value", type="double", unitsCode="m" + ).text = "{:.3f}".format(dist_x) + + # distance y + measurements = ETree.SubElement(meas_pts, "Measurements") + sensor = ETree.SubElement(measurements, "Sensor") + s_type = ETree.SubElement(sensor, "SensorType") + parm = ETree.SubElement(s_type, "Parameter") + ETree.SubElement(parm, "Name").text = "Distance Y" + ETree.SubElement(parm, "Units").text = "Meters" + ETree.SubElement( + measurements, "Value", type="double", unitsCode="m" + ).text = "{:.3f}".format(dist_y) + + # depth + measurements = ETree.SubElement(meas_pts, "Measurements") + sensor = ETree.SubElement(measurements, "Sensor") + s_type = ETree.SubElement(sensor, "SensorType") + parm = ETree.SubElement(s_type, "Parameter") + ETree.SubElement(parm, "Name").text = "Depth" + ETree.SubElement(parm, "Units").text = "Meters" + ETree.SubElement( + measurements, "Value", type="double", unitsCode="m" + ).text = "{:.3f}".format(depth) # Create xml output file - with open(file_name, 'wb') as xml_file: + with open(file_name, "wb") as xml_file: # Create binary coded output file et = ETree.ElementTree(channel) root = et.getroot() xml_out = ETree.tostring(root) # Add stylesheet instructions - xml_out = b'<?xml-stylesheet type= "text/xsl" href="QRevStylesheet.xsl"?>' + xml_out + xml_out = ( + b'<?xml-stylesheet type= "text/xsl" ' + b'href="QRevStylesheet.xsl"?>' + xml_out + ) # Add tabs to make output more readable and apply utf-8 encoding - xml_out = parseString(xml_out).toprettyxml(encoding='utf-8') + xml_out = parseString(xml_out).toprettyxml(encoding="utf-8") # Write file xml_file.write(xml_out) @@ -3892,20 +4909,18 @@ class Measurement(object): """ pd0_data = Pd0TRDI(filename) - if transect_type == 'MB': + if transect_type == "MB": mmt_transect = mmt.mbt_transects[index] else: mmt_transect = mmt.transects[index] transect = TransectData() - transect.trdi(mmt=mmt, - mmt_transect=mmt_transect, - pd0_data=pd0_data) + transect.trdi(mmt=mmt, mmt_transect=mmt_transect, pd0_data=pd0_data) return transect - def allocate_transects(self, mmt, transect_type='Q', checked=False): - """Method to load transect data. Changed from Matlab approach by Greg to allow possibility - of multi-thread approach. + def allocate_transects(self, mmt, transect_type="Q", checked=False): + """Method to load transect data. Changed from Matlab approach by Greg + to allow possibility of multi-thread approach. Parameters ---------- @@ -3914,14 +4929,15 @@ class Measurement(object): transect_type: str Type of transect (Q: discharge or MB: moving-bed test) checked: bool - Determines if all files are loaded (False) or only checked files (True) + Determines if all files are loaded (False) or only checked files + (True) """ file_names = [] file_idx = [] # Setup processing for discharge or moving-bed transects - if transect_type == 'Q': + if transect_type == "Q": # Identify discharge transect files to load if checked: for idx, transect in enumerate(mmt.transects): @@ -3933,7 +4949,7 @@ class Measurement(object): file_names = [transect.Files[0] for transect in mmt.transects] file_idx = list(range(0, len(file_names))) - elif transect_type == 'MB': + elif transect_type == "MB": file_names = [transect.Files[0] for transect in mmt.mbt_transects] file_idx = list(range(0, len(file_names))) @@ -3950,12 +4966,14 @@ class Measurement(object): num = len(valid_indices) for k in range(num): - temp = self.add_transect(mmt, valid_files[k], valid_indices[k], transect_type) + temp = self.add_transect( + mmt, valid_files[k], valid_indices[k], transect_type + ) if temp.w_vel is not None: transects.append(temp) return transects -if __name__ == '__main__': +if __name__ == "__main__": pass diff --git a/Classes/MovingBedTests.py b/Classes/MovingBedTests.py index 117ab21..9e46fc1 100644 --- a/Classes/MovingBedTests.py +++ b/Classes/MovingBedTests.py @@ -4,7 +4,14 @@ from Classes.TransectData import adjusted_ensemble_duration from Classes.TransectData import TransectData from Classes.QComp import QComp from Classes.MatSonTek import MatSonTek -from MiscLibs.common_functions import cart2pol, sind, pol2cart, rad2azdeg, nan_less, nan_greater +from MiscLibs.common_functions import ( + cart2pol, + sind, + pol2cart, + rad2azdeg, + nan_less, + nan_greater, +) class MovingBedTests(object): @@ -37,13 +44,15 @@ class MovingBedTests(object): moving_bed: str Moving-bed determined ("Yes" or "No") user_valid: bool - Boolean to allow user to determine if test should be considered a valid test (True or False) + Boolean to allow user to determine if test should be considered a + valid test (True or False) test_quality: str Quality of test, 'Valid' 'Warnings' 'Errors' use_2_correct: bool Use this test to correct discharge (True or False) selected: bool - Selected as valid moving-bed test to use for correction or determine moving-bed condition + Selected as valid moving-bed test to use for correction or determine + moving-bed condition messages: list List of strings for warning and error messages based on data processing near_bed_speed_mps: float @@ -77,32 +86,32 @@ class MovingBedTests(object): gps_flow_spd_mps: float Corrected flow speed using BT and GPS """ - + def __init__(self): """Initialize class and instance variables.""" - self.type = None # Loop or Stationary - self.transect = None # Object of TransectData - self.duration_sec = np.nan # Duration of test in secs - self.percent_invalid_bt = np.nan # Percent of invalid bottom track - self.compass_diff_deg = np.nan # Difference in heading for out and back of loop - self.flow_dir = np.nan # Mean flow direction from loop test - self.mb_dir = np.nan # Moving bed or closure error direction - self.dist_us_m = np.nan # Distance moved upstream in m - self.flow_spd_mps = np.nan # Magnitude of water velocity in mps - self.mb_spd_mps = np.nan # Magnitude of moving=bed velocity in mps - self.percent_mb = np.nan # Potential error due to moving bed in percent - self.moving_bed = np.nan # Moving-bed determined 'Yes' 'No' - self.user_valid = True # Logical to allow user to determine if test should be considered a valid test - self.test_quality = None # Quality of test 'Valid' 'Warnings' 'Errors' - self.use_2_correct = None # Use this test to correct discharge - self.selected = None # Selected valid moving-bed test to use for correction or determine moving-bed condition - self.messages = None # Cell array of warning and error messages based on data processing - self.near_bed_speed_mps = np.nan # Mean near-bed water speed for test in mps - self.stationary_us_track = np.array([]) # Upstream component of the bottom track referenced ship track - self.stationary_cs_track = np.array([]) # Cross=stream component of the bottom track referenced ship track - self.stationary_mb_vel = np.array([]) # Moving-bed velocity by ensemble - self.ref = 'BT' + self.type = None + self.transect = None + self.duration_sec = np.nan + self.percent_invalid_bt = np.nan + self.compass_diff_deg = np.nan + self.flow_dir = np.nan + self.mb_dir = np.nan + self.dist_us_m = np.nan + self.flow_spd_mps = np.nan + self.mb_spd_mps = np.nan + self.percent_mb = np.nan + self.moving_bed = np.nan + self.user_valid = True + self.test_quality = None + self.use_2_correct = None + self.selected = None + self.messages = None + self.near_bed_speed_mps = np.nan + self.stationary_us_track = np.array([]) + self.stationary_cs_track = np.array([]) + self.stationary_mb_vel = np.array([]) + self.ref = "BT" self.bt_percent_mb = np.nan self.bt_dist_us_m = np.nan self.bt_mb_dir = np.nan @@ -113,8 +122,8 @@ class MovingBedTests(object): self.gps_mb_dir = np.nan self.gps_mb_spd_mps = np.nan self.gps_flow_spd_mps = np.nan - - def populate_data(self, source, file=None, test_type=None): + + def populate_data(self, source, snr_3beam_comp=False, file=None, test_type=None): """Process and store moving-bed test data. Parameters @@ -125,56 +134,59 @@ class MovingBedTests(object): Object of TransectData for TRDI and str of filename for SonTek test_type: str Type of moving-bed test (Loop or Stationary) + snr_3beam_comp: bool + Indicates is 3 beam solutions should be used for invalid snr data """ - if source == 'TRDI': + if source == "TRDI": self.mb_trdi(file, test_type) else: - self.mb_sontek(file, test_type) + self.mb_sontek(file, test_type, snr_3beam_comp) self.process_mb_test(source) def process_mb_test(self, source): - + # Convert to earth coordinates and set the navigation reference to BT # for both boat and water data - # self.transect.boat_vel.bt_vel.apply_interpolation(transect=self.transect, interpolation_method='Linear') - self.transect.change_coord_sys(new_coord_sys='Earth') - self.transect.change_nav_reference(update=True, new_nav_ref='BT') - - # Adjust data for default manufacturer specific handling of invalid data - delta_t = adjusted_ensemble_duration(self.transect, 'mbt') - - if self.type == 'Loop': - if source == 'TRDI': + self.transect.change_coord_sys(new_coord_sys="Earth") + self.transect.change_nav_reference(update=True, new_nav_ref="BT") + + # Adjust data for default manufacturer specific handling of invalid + # data + delta_t = adjusted_ensemble_duration(self.transect, "mbt") + + if self.type == "Loop": + if source == "TRDI": self.loop_test(delta_t) else: self.loop_test() - elif self.type == 'Stationary': + elif self.type == "Stationary": self.stationary_test() else: - raise ValueError('Invalid moving-bed test identifier specified.') + raise ValueError("Invalid moving-bed test identifier specified.") @staticmethod def qrev_mat_in(meas_struct): - """Processes the Matlab data structure to obtain a list of TransectData objects containing transect - data from the Matlab data structure. + """Processes the Matlab data structure to obtain a list of + TransectData objects containing transect data from the Matlab data structure. - Parameters - ---------- - meas_struct: mat_struct - Matlab data structure obtained from sio.loadmat + Parameters + ---------- + meas_struct: mat_struct + Matlab data structure obtained from sio.loadmat - Returns - ------- - mb_tests: list - List of MovingBedTests objects - """ + Returns + ------- + mb_tests: list + List of MovingBedTests objects + """ mb_tests = [] - if hasattr(meas_struct, 'mbTests'): + if hasattr(meas_struct, "mbTests"): try: - # If there are multiple test the Matlab structure will be an array + # If there are multiple test the Matlab structure will be an + # array if type(meas_struct.mbTests) == np.ndarray: for test in meas_struct.mbTests: temp = MovingBedTests() @@ -190,7 +202,8 @@ class MovingBedTests(object): return mb_tests def populate_from_qrev_mat(self, mat_data): - """Populates the object using data from previously saved QRev Matlab file. + """Populates the object using data from previously saved QRev Matlab + file. Parameters ---------- @@ -231,7 +244,7 @@ class MovingBedTests(object): self.stationary_mb_vel = mat_data.stationaryMBVel # Feature that can use GPS for moving-bed tests - if hasattr(mat_data, 'bt_percent_mb'): + if hasattr(mat_data, "bt_percent_mb"): self.bt_percent_mb = self.return_float(mat_data.bt_percent_mb) self.bt_dist_us_m = self.return_float(mat_data.bt_dist_us_m) self.bt_mb_dir = self.return_float(mat_data.bt_mb_dir) @@ -270,11 +283,12 @@ class MovingBedTests(object): @staticmethod def make_list(array_in): - """Method to make list from several special cases that can occur in the Matlab data. + """Method to make list from several special cases that can occur in + the Matlab data. Parameters ---------- - array_in: np.ndarray + array_in: np.array Input that needs to be convert to a list """ @@ -302,12 +316,12 @@ class MovingBedTests(object): Object of TransectData test_type: str Type of moving-bed test.""" - + self.transect = transect self.user_valid = True self.type = test_type - def mb_sontek(self, file_name, test_type): + def mb_sontek(self, file_name, test_type, snr_3beam_comp): """Function to create object properties for SonTek moving-bed tests Parameters @@ -315,7 +329,10 @@ class MovingBedTests(object): file_name: str Name of moving-bed test data file test_type: str - Type of moving-bed test.""" + Type of moving-bed test. + snr_3beam_comp: bool + Indicates is 3 beam solutions should be used for invalid snr data + """ self.type = test_type # Read Matlab file for moving-bed test @@ -323,9 +340,9 @@ class MovingBedTests(object): # Create transect objects for each discharge transect self.transect = TransectData() - self.transect.sontek(rsdata, file_name) - - def loop_test(self, ens_duration=None, ref='BT'): + self.transect.sontek(rsdata, file_name, snr_3beam_comp=snr_3beam_comp) + + def loop_test(self, ens_duration=None, ref="BT"): """Process loop moving bed test. Parameters @@ -337,8 +354,10 @@ class MovingBedTests(object): """ # Assign data from transect to local variables - # self.transect.boat_interpolations(update=False, target='BT', method='Linear') - # self.transect.boat_interpolations(update=False, target='GPS', method='Linear') + # self.transect.boat_interpolations(update=False, target='BT', + # method='Linear') + # self.transect.boat_interpolations(update=False, target='GPS', + # method='Linear') trans_data = copy.deepcopy(self.transect) in_transect_idx = trans_data.in_transect_idx n_ensembles = len(in_transect_idx) @@ -382,14 +401,22 @@ class MovingBedTests(object): # Compute flow speed and direction self.flow_dir = rad2azdeg(direct) - + # Compute the area weighted mean velocity components for the - # purposed of computing the mean flow speed. Area weighting is used for flow speed instead of - # discharge so that the flow speed is not included in the weighting used to compute the mean flow speed. - wght_area = np.multiply(np.multiply(np.sqrt(bt_u ** 2 + bt_v ** 2), bin_size), ens_duration) - idx = np.where(np.isnan(wt_u) == False) - se = np.nansum(np.nansum(wt_u[idx] * wght_area[idx])) / np.nansum(np.nansum(wght_area[idx])) - sn = np.nansum(np.nansum(wt_v[idx] * wght_area[idx])) / np.nansum(np.nansum(wght_area[idx])) + # purposed of computing the mean flow speed. Area weighting is + # used for flow speed instead of + # discharge so that the flow speed is not included in the + # weighting used to compute the mean flow speed. + wght_area = np.multiply( + np.multiply(np.sqrt(bt_u**2 + bt_v**2), bin_size), ens_duration + ) + idx = np.where(np.logical_not(np.isnan(wt_u))) + se = np.nansum(np.nansum(wt_u[idx] * wght_area[idx])) / np.nansum( + np.nansum(wght_area[idx]) + ) + sn = np.nansum(np.nansum(wt_v[idx] * wght_area[idx])) / np.nansum( + np.nansum(wght_area[idx]) + ) dir_a, self.bt_flow_spd_mps = cart2pol(se, sn) self.bt_flow_spd_mps = self.bt_flow_spd_mps + self.bt_mb_spd_mps @@ -400,7 +427,7 @@ class MovingBedTests(object): self.compute_mb_gps() # Store selected test characteristics - if ref == 'BT': + if ref == "BT": self.mb_spd_mps = self.bt_mb_spd_mps self.dist_us_m = self.bt_dist_us_m self.percent_mb = self.bt_percent_mb @@ -415,9 +442,12 @@ class MovingBedTests(object): # Assess invalid bottom track # Compute percent invalid bottom track - self.percent_invalid_bt = (np.nansum(bt_valid == False) / len(bt_valid)) * 100 + self.percent_invalid_bt = ( + np.nansum(np.logical_not(bt_valid)) / len(bt_valid) + ) * 100 - # Determine if more than 9 consecutive seconds of invalid BT occurred + # Determine if more than 9 consecutive seconds of invalid BT + # occurred consect_bt_time = np.zeros(n_ensembles) for n in range(1, n_ensembles): if bt_valid[n]: @@ -430,22 +460,24 @@ class MovingBedTests(object): # Evaluate compass calibration based on flow direction # Find apex of loop adapted from - # http://www.mathworks.de/matlabcentral/newsreader/view_thread/164048 + # http://www.mathworks.de/matlabcentral/newsreader/view_thread + # /164048 loop_out = np.array([bt_x[0], bt_y[0], 0]) loop_return = np.array([bt_x[-1], bt_y[-1], 0]) distance = np.zeros(n_ensembles) for n in range(n_ensembles): p = np.array([bt_x[n], bt_y[n], 0]) - distance[n] = np.linalg.norm(np.cross(loop_return - loop_out, p - loop_out)) \ - / np.linalg.norm(loop_return - loop_out) + distance[n] = np.linalg.norm( + np.cross(loop_return - loop_out, p - loop_out) + ) / np.linalg.norm(loop_return - loop_out) dmg_idx = np.where(distance == np.nanmax(distance))[0][0] # Compute flow direction on outgoing part of loop - u_out = wt_u[:, :dmg_idx + 1] - v_out = wt_v[:, :dmg_idx + 1] - wght = np.abs(q[:, :dmg_idx+1]) + u_out = wt_u[:, : dmg_idx + 1] + v_out = wt_v[:, : dmg_idx + 1] + wght = np.abs(q[:, : dmg_idx + 1]) se = np.nansum(u_out * wght) / np.nansum(wght) sn = np.nansum(v_out * wght) / np.nansum(wght) direct, _ = cart2pol(se, sn) @@ -458,22 +490,28 @@ class MovingBedTests(object): # Compute difference from mean and correct to +/- 180 v_dir_corr = flow_dir_cell - flow_dir1 v_dir_idx = nan_greater(v_dir_corr, 180) - v_dir_corr[v_dir_idx] = 360-v_dir_corr[v_dir_idx] + v_dir_corr[v_dir_idx] = 360 - v_dir_corr[v_dir_idx] v_dir_idx = nan_less(v_dir_corr, -180) v_dir_corr[v_dir_idx] = 360 + v_dir_corr[v_dir_idx] # Number of invalid weights - idx2 = np.where(np.isnan(wght) == False) + idx2 = np.where(np.logical_not(np.isnan(wght))) nwght = len(idx2[0]) # Compute 95% uncertainty using weighted standard deviation - uncert1 = 2. * np.sqrt(np.nansum(np.nansum(wght * v_dir_corr**2)) - / (((nwght - 1) * np.nansum(np.nansum(wght))) / nwght)) / np.sqrt(nwght) + uncert1 = ( + 2.0 + * np.sqrt( + np.nansum(np.nansum(wght * v_dir_corr**2)) + / (((nwght - 1) * np.nansum(np.nansum(wght))) / nwght) + ) + / np.sqrt(nwght) + ) # Compute flow direction on returning part of loop - u_ret = wt_u[:, dmg_idx + 1:] - v_ret = wt_v[:, dmg_idx + 1:] - wght = np.abs(q[:, dmg_idx+1:]) + u_ret = wt_u[:, dmg_idx + 1 :] + v_ret = wt_v[:, dmg_idx + 1 :] + wght = np.abs(q[:, dmg_idx + 1 :]) se = np.nansum(u_ret * wght) / np.nansum(wght) sn = np.nansum(v_ret * wght) / np.nansum(wght) direct, _ = cart2pol(se, sn) @@ -491,12 +529,18 @@ class MovingBedTests(object): v_dir_corr[v_dir_idx] = 360 + v_dir_corr[v_dir_idx] # Number of valid weights - idx2 = np.where(np.isnan(wght) == False) + idx2 = np.where(np.logical_not(np.isnan(wght))) nwght = len(idx2[0]) # Compute 95% uncertainty using weighted standard deviation - uncert2 = 2.*np.sqrt(np.nansum(np.nansum(wght * v_dir_corr**2)) - / (((nwght-1)*np.nansum(np.nansum(wght))) / nwght)) / np.sqrt(nwght) + uncert2 = ( + 2.0 + * np.sqrt( + np.nansum(np.nansum(wght * v_dir_corr**2)) + / (((nwght - 1) * np.nansum(np.nansum(wght))) / nwght) + ) + / np.sqrt(nwght) + ) # Compute and report difference in flow direction diff_dir = np.abs(flow_dir1 - flow_dir2) @@ -506,93 +550,140 @@ class MovingBedTests(object): uncert = uncert1 + uncert2 # Compute potential compass error - idx = np.where(np.isnan(bt_x) == False) + idx = np.where(np.logical_not(np.isnan(bt_x))) if len(idx[0]) > 0: idx = idx[0][-1] - width = np.sqrt((bt_x[dmg_idx] - bt_x[idx] / 2) ** 2 + (bt_y[dmg_idx] - bt_y[idx] / 2) ** 2) - compass_error = (2 * width * sind(diff_dir / 2) * 100) / (self.duration_sec * self.flow_spd_mps) + width = np.sqrt( + (bt_x[dmg_idx] - bt_x[idx] / 2) ** 2 + + (bt_y[dmg_idx] - bt_y[idx] / 2) ** 2 + ) + compass_error = (2 * width * sind(diff_dir / 2) * 100) / ( + self.duration_sec * self.flow_spd_mps + ) # Initialize message counter - self.test_quality = 'Good' + self.test_quality = "Good" # Low water velocity if self.flow_spd_mps < 0.25: - self.messages.append('WARNING: The water velocity is less than recommended minimum for ' - + 'this test and could cause the loop method to be inaccurate. ' - + 'CONSIDER USING A STATIONARY TEST TO CHECK MOVING-BED CONDITIONS') - self.test_quality = 'Warnings' + self.messages.append( + "WARNING: The water velocity is less than recommended " + "minimum for " + "this test and could cause the loop method to be " + "inaccurate. " + + "CONSIDER USING A STATIONARY TEST TO CHECK MOVING-BED " + "CONDITIONS" + ) + self.test_quality = "Warnings" # Percent invalid bottom track if self.percent_invalid_bt > 20: - self.messages.append('ERROR: Percent invalid bottom track exceeds 20 percent. ' - + 'THE LOOP IS NOT ACCURATE. TRY A STATIONARY MOVING-BED TEST.') - self.test_quality = 'Errors' + self.messages.append( + "ERROR: Percent invalid bottom track exceeds 20 percent. " + + "THE LOOP IS NOT ACCURATE. TRY A STATIONARY MOVING-BED " + "TEST." + ) + self.test_quality = "Errors" elif self.percent_invalid_bt > 5: - self.messages.append('WARNING: Percent invalid bottom track exceeds 5 percent. ' - + 'Loop may not be accurate. PLEASE REVIEW DATA.') - self.test_quality = 'Warnings' + self.messages.append( + "WARNING: Percent invalid bottom track exceeds 5 percent. " + + "Loop may not be accurate. PLEASE REVIEW DATA." + ) + self.test_quality = "Warnings" # More than 9 consecutive seconds of invalid BT if max_consect_bt_time > 9: - self.messages.append('ERROR: Bottom track is invalid for more than 9 consecutive seconds.' - + 'THE LOOP IS NOT ACCURATE. TRY A STATIONARY MOVING-BED TEST.') - self.test_quality = 'Errors' - - if np.abs(compass_error) > 5 and np.abs(diff_dir) > 3 and np.abs(diff_dir) > uncert: - self.messages.append('ERROR: Difference in flow direction between out and back sections of ' - + 'loop could result in a 5 percent or greater error in final discharge. ' - + 'REPEAT LOOP AFTER COMPASS CAL. OR USE A STATIONARY MOVING-BED TEST.') - self.test_quality = 'Errors' + self.messages.append( + "ERROR: Bottom track is invalid for more than 9 " + "consecutive seconds." + + "THE LOOP IS NOT ACCURATE. TRY A STATIONARY MOVING-BED " + "TEST." + ) + self.test_quality = "Errors" + + if ( + np.abs(compass_error) > 5 + and np.abs(diff_dir) > 3 + and np.abs(diff_dir) > uncert + ): + self.messages.append( + "ERROR: Difference in flow direction between out and " + "back sections of " + + "loop could result in a 5 percent or greater error in " + "final discharge. " + + "REPEAT LOOP AFTER COMPASS CAL. OR USE A STATIONARY " + "MOVING-BED TEST." + ) + self.test_quality = "Errors" else: - self.messages.append('ERROR: Loop has no valid bottom track data. ' - + 'REPEAT OR USE A STATIONARY MOVING-BED TEST.') - self.test_quality = 'Errors' + self.messages.append( + "ERROR: Loop has no valid bottom track data. " + + "REPEAT OR USE A STATIONARY MOVING-BED " + "TEST." + ) + self.test_quality = "Errors" # If loop is valid then evaluate moving-bed condition - if self.test_quality != 'Errors': + if self.test_quality != "Errors": # Check minimum moving-bed velocity criteria if self.mb_spd_mps > vel_criteria: # Check that closure error is in upstream direction if 135 < np.abs(self.flow_dir - self.mb_dir) < 225: - # Check if moving-bed is greater than 1% of the mean flow speed + # Check if moving-bed is greater than 1% of the mean + # flow speed if self.percent_mb > 1: - self.messages.append('Loop Indicates a Moving Bed -- Use GPS as reference. If GPS is ' - + 'unavailable or invalid use the loop method to correct the ' - + 'final discharge.') - self.moving_bed = 'Yes' + self.messages.append( + "Loop Indicates a Moving Bed -- Use GPS as " + "reference. If GPS is " + + "unavailable or invalid use the loop method to " + "correct the " + "final discharge." + ) + self.moving_bed = "Yes" else: - self.messages.append('Moving Bed Velocity < 1% of Mean Velocity -- No Correction Recommended') - self.moving_bed = 'No' + self.messages.append( + "Moving Bed Velocity < 1% of Mean Velocity -- No " + "Correction Recommended" + ) + self.moving_bed = "No" else: - self.messages.append('ERROR: Loop closure error not in upstream direction. ' - + 'REPEAT LOOP or USE STATIONARY TEST') - self.test_quality = 'Errors' - self.moving_bed = 'Unknown' + self.messages.append( + "ERROR: Loop closure error not in upstream direction. " + + "REPEAT LOOP or USE STATIONARY TEST" + ) + self.test_quality = "Errors" + self.moving_bed = "Unknown" else: - self.messages.append('Moving-bed velocity < Minimum moving-bed velocity criteria ' - + '-- No correction recommended') - self.moving_bed = 'No' + self.messages.append( + "Moving-bed velocity < Minimum moving-bed velocity " + "criteria " + "-- No correction recommended" + ) + self.moving_bed = "No" # Notify of differences in results of test between BT and GPS if not np.isnan(self.gps_percent_mb): if np.abs(self.bt_percent_mb - self.gps_percent_mb) > 2: - self.messages.append('WARNING - Bottom track and GPS results differ by more than 2%.') - self.test_quality = 'Warnings' - - if np.logical_xor(self.bt_percent_mb >= 1, self.gps_percent_mb >= 1): - self.messages.append('WARNING - Bottom track and GPS results do not agree.') - self.test_quality = 'Warnings' + self.messages.append( + "WARNING - Bottom track and GPS results differ by " + "more than 2%." + ) + self.test_quality = "Warnings" + + if np.logical_xor(self.bt_percent_mb >= 1, self.gps_percent_mb >= 1): + self.messages.append( + "WARNING - Bottom track and GPS results do not agree." + ) + self.test_quality = "Warnings" else: - self.messages.append('ERROR: Due to ERRORS noted above this loop is NOT VALID. ' - + 'Please consider suggestions.') - self.moving_bed = 'Unknown' + self.messages.append( + "ERROR: Due to ERRORS noted above this loop is NOT VALID. " + + "Please consider suggestions." + ) + self.moving_bed = "Unknown" - def stationary_test(self, ref='BT'): - """Processed the stationary moving-bed tests. - """ + def stationary_test(self, ref="BT"): + """Processed the stationary moving-bed tests.""" # Assign data from transect to local variables trans_data = copy.deepcopy(self.transect) @@ -611,10 +702,10 @@ class MovingBedTests(object): # Use only data with valid bottom track valid_bt = trans_data.boat_vel.bt_vel.valid_data[0, in_transect_idx] - wt_u[:, valid_bt == False] = np.nan - wt_v[:, valid_bt == False] = np.nan - bt_u[valid_bt == False] = np.nan - bt_v[valid_bt == False] = np.nan + wt_u[:, np.logical_not(valid_bt)] = np.nan + wt_v[:, np.logical_not(valid_bt)] = np.nan + bt_u[np.logical_not(valid_bt)] = np.nan + bt_v[np.logical_not(valid_bt)] = np.nan u_water = np.nanmean(wt_u) v_water = np.nanmean(wt_v) @@ -622,12 +713,16 @@ class MovingBedTests(object): if self.flow_dir < 0: self.flow_dir = self.flow_dir + 360 - bin_depth = trans_data.depths.bt_depths.depth_cell_depth_m[:, in_transect_idx] + bin_depth = trans_data.depths.bt_depths.depth_cell_depth_m[ + :, in_transect_idx + ] trans_select = getattr(trans_data.depths, trans_data.depths.selected) depth_ens = trans_select.depth_processed_m[in_transect_idx] - nb_u, nb_v, unit_nbu, unit_nbv = self.near_bed_velocity(wt_u, wt_v, depth_ens, bin_depth) - + nb_u, nb_v, unit_nbu, unit_nbv = self.near_bed_velocity( + wt_u, wt_v, depth_ens, bin_depth + ) + # Compute bottom track parallel to water velocity unit_nb_vel = np.vstack([unit_nbu, unit_nbv]) bt_vel = np.vstack([bt_u, bt_v]) @@ -643,9 +738,9 @@ class MovingBedTests(object): bt_vel_cs = np.sum(bt_vel * nb_vel_unit_cs, 0) bt_cs_strm_dist = bt_vel_cs * ens_duration bt_cs_strm_dist_cum = np.nancumsum(bt_cs_strm_dist) - + # Compute cumulative mean moving bed velocity - valid_bt_vel_up_strm = np.isnan(bt_vel_up_strm) == False + valid_bt_vel_up_strm = np.logical_not(np.isnan(bt_vel_up_strm)) mb_vel = np.nancumsum(bt_vel_up_strm) / np.nancumsum(valid_bt_vel_up_strm) @@ -656,17 +751,22 @@ class MovingBedTests(object): else: u_corrected = wt_u v_corrected = wt_v - + # Compute the mean of the ensemble magnitudes - # Mean is computed using magnitudes because if a Streampro with no compass is the data source the change - # in direction could be either real change in water direction or an uncompensated turn of the floating - # platform. This approach is the best compromise when there is no compass or the compass is unreliable, - # which is often why the stationary method is used. A weighted average is used to account for the possible - # change in cell size within and ensemble for the RiverRay and RiverPro. + # Mean is computed using magnitudes because if a Streampro with + # no compass is the data source the change in direction could be + # either real change in water direction or an uncompensated turn of + # the floating platform. This approach is the best compromise when + # there is no compass or the compass is unreliable, + # which is often why the stationary method is used. A weighted + # average is used to account for the possible cell size within and + # ensemble for the RiverRay and RiverPro. mag = np.sqrt(u_corrected**2 + v_corrected**2) - depth_cell_size = trans_data.depths.bt_depths.depth_cell_size_m[:, in_transect_idx] + depth_cell_size = trans_data.depths.bt_depths.depth_cell_size_m[ + :, in_transect_idx + ] depth_cell_size[np.isnan(mag)] = np.nan mag_w = mag * depth_cell_size self.bt_flow_spd_mps = np.nansum(mag_w) / np.nansum(depth_cell_size) @@ -676,14 +776,16 @@ class MovingBedTests(object): self.bt_percent_mb = 0 # Compute percent invalid bottom track - self.percent_invalid_bt = (np.nansum(bt_valid == False) / len(bt_valid)) * 100 + self.percent_invalid_bt = ( + np.nansum(np.logical_not(bt_valid)) / len(bt_valid) + ) * 100 self.duration_sec = np.nansum(ens_duration) # Compute test using GPS self.compute_mb_gps() # Store selected test characteristics - if ref == 'BT': + if ref == "BT": self.mb_spd_mps = self.bt_mb_spd_mps self.dist_us_m = self.bt_dist_us_m self.percent_mb = self.bt_percent_mb @@ -696,67 +798,91 @@ class MovingBedTests(object): self.mb_dir = self.gps_mb_dir self.flow_spd_mps = self.bt_flow_spd_mps - self.near_bed_speed_mps = np.sqrt(np.nanmean(nb_u)**2 + np.nanmean(nb_v)**2) + self.near_bed_speed_mps = np.sqrt( + np.nanmean(nb_u) ** 2 + np.nanmean(nb_v) ** 2 + ) self.stationary_us_track = bt_up_strm_dist_cum self.stationary_cs_track = bt_cs_strm_dist_cum self.stationary_mb_vel = mb_vel # Quality check - self.test_quality = 'Good' + self.test_quality = "Good" # Check duration if self.duration_sec < 299: - self.messages.append('WARNING - Duration of stationary test is less than 5 minutes') - self.test_quality = 'Warnings' - + self.messages.append( + "WARNING - Duration of stationary test is less than 5 " "minutes" + ) + self.test_quality = "Warnings" + # Check validity of mean moving-bed velocity if self.duration_sec > 60: mb_vel_std = np.nanstd(mb_vel[-30:], ddof=1) cov = mb_vel_std / mb_vel[-1] if cov > 0.25 and mb_vel_std > 0.03: - self.messages.append('WARNING - Moving-bed velocity may not be consistent. ' - + 'Average maybe inaccurate.') - self.test_quality = 'Warnings' - + self.messages.append( + "WARNING - Moving-bed velocity may not be consistent. " + + "Average maybe inaccurate." + ) + self.test_quality = "Warnings" + # Check percentage of invalid BT data if np.nansum(ens_duration[valid_bt_vel_up_strm]) <= 120: - - self.messages.append('ERROR - Total duration of valid BT data is insufficient for a valid test.') - self.test_quality = 'Errors' - self.moving_bed = 'Unknown' + + self.messages.append( + "ERROR - Total duration of valid BT data is insufficient " + "for a valid test." + ) + self.test_quality = "Errors" + self.moving_bed = "Unknown" elif self.percent_invalid_bt > 10: - self.messages.append('WARNING - Number of ensembles with invalid bottom track exceeds 10%') - self.test_quality = 'Warnings' - + self.messages.append( + "WARNING - Number of ensembles with invalid bottom track " + "exceeds 10%" + ) + self.test_quality = "Warnings" + # Determine if the test indicates a moving bed - if self.test_quality != 'Errors': + if self.test_quality != "Errors": if self.percent_mb >= 1: - self.moving_bed = 'Yes' + self.moving_bed = "Yes" else: - self.moving_bed = 'No' + self.moving_bed = "No" # Notify of differences in results of test between BT and GPS if not np.isnan(self.gps_percent_mb): if np.abs(self.bt_percent_mb - self.gps_percent_mb) > 2: - self.messages.append('WARNING - Bottom track and GPS results differ by more than 2%.') - self.test_quality = 'Warnings' - - if np.logical_xor(self.bt_percent_mb >= 1, self.gps_percent_mb >= 1): - self.messages.append('WARNING - Bottom track and GPS results do not agree.') - self.test_quality = 'Warnings' + self.messages.append( + "WARNING - Bottom track and GPS results differ by " + "more than 2%." + ) + self.test_quality = "Warnings" + + if np.logical_xor(self.bt_percent_mb >= 1, self.gps_percent_mb >= 1): + self.messages.append( + "WARNING - Bottom track and GPS results do not agree." + ) + self.test_quality = "Warnings" else: - self.messages.append('ERROR - Stationary moving-bed test has no valid bottom track data.') - self.test_quality = 'Errors' - self.moving_bed = 'Unknown' - self.duration_sec = np.nansum(trans_data.date_time.ens_duration_sec[in_transect_idx]) + self.messages.append( + "ERROR - Stationary moving-bed test has no valid bottom " "track data." + ) + self.test_quality = "Errors" + self.moving_bed = "Unknown" + self.duration_sec = np.nansum( + trans_data.date_time.ens_duration_sec[in_transect_idx] + ) self.percent_invalid_bt = 100 def compute_mb_gps(self): - """Computes moving-bed data using GPS. - """ + """Computes moving-bed data using GPS.""" if np.isnan(self.flow_dir): - u_water = np.nanmean(self.transect.w_vel.u_processed_mps[:, self.transect.in_transect_idx]) - v_water = np.nanmean(self.transect.w_vel.v_processed_mps[:, self.transect.in_transect_idx]) + u_water = np.nanmean( + self.transect.w_vel.u_processed_mps[:, self.transect.in_transect_idx] + ) + v_water = np.nanmean( + self.transect.w_vel.v_processed_mps[:, self.transect.in_transect_idx] + ) self.flow_dir = np.arctan2(u_water, v_water) * 180 / np.pi if self.flow_dir < 0: self.flow_dir = self.flow_dir + 360 @@ -764,14 +890,16 @@ class MovingBedTests(object): gps_bt = None # Use GGA data if available and VTG is GGA is not available if self.transect.boat_vel.gga_vel is not None: - gps_bt = TransectData.compute_gps_bt(self.transect, gps_ref='gga_vel') + gps_bt = TransectData.compute_gps_bt(self.transect, gps_ref="gga_vel") elif self.transect.boat_vel.vtg_vel is not None: - gps_bt = TransectData.compute_gps_bt(self.transect, gps_ref='vtg_vel') + gps_bt = TransectData.compute_gps_bt(self.transect, gps_ref="vtg_vel") if gps_bt is not None and len(gps_bt) > 0: - self.gps_dist_us_m = gps_bt['mag'] - self.gps_mb_dir = gps_bt['dir'] + self.gps_dist_us_m = gps_bt["mag"] + self.gps_mb_dir = gps_bt["dir"] self.gps_mb_spd_mps = self.gps_dist_us_m / self.duration_sec - self.gps_flow_spd_mps = self.bt_flow_spd_mps - self.bt_mb_spd_mps + self.gps_mb_spd_mps + self.gps_flow_spd_mps = ( + self.bt_flow_spd_mps - self.bt_mb_spd_mps + self.gps_mb_spd_mps + ) self.gps_percent_mb = (self.gps_mb_spd_mps / self.gps_flow_spd_mps) * 100 def magvar_change(self, magvar, old_magvar): @@ -785,12 +913,13 @@ class MovingBedTests(object): Existing magvar """ - if self.transect.sensors.heading_deg.selected == 'internal': + if self.transect.sensors.heading_deg.selected == "internal": magvar_change = magvar - old_magvar self.bt_mb_dir = self.bt_mb_dir + magvar_change self.flow_dir = self.flow_dir + magvar_change - # Recompute moving-bed tests with GPS and set results using existing reference + # Recompute moving-bed tests with GPS and set results using + # existing reference self.compute_mb_gps() self.change_ref(self.ref) @@ -805,12 +934,13 @@ class MovingBedTests(object): Existing h_offset """ - if self.transect.sensors.heading_deg.selected == 'external': + if self.transect.sensors.heading_deg.selected == "external": h_offset_change = h_offset - old_h_offset self.bt_mb_dir = self.bt_mb_dir + h_offset_change self.flow_dir = self.flow_dir + h_offset_change - # Recompute moving-bed tests with GPS and set results using existing reference + # Recompute moving-bed tests with GPS and set results using + # existing reference self.compute_mb_gps() self.change_ref(self.ref) @@ -823,60 +953,63 @@ class MovingBedTests(object): Defines specified reference (BT or GPS) """ - if ref == 'BT': + if ref == "BT": self.mb_spd_mps = self.bt_mb_spd_mps self.dist_us_m = self.bt_dist_us_m self.percent_mb = self.bt_percent_mb self.mb_dir = self.bt_mb_dir self.flow_spd_mps = self.bt_flow_spd_mps - self.ref = 'BT' + self.ref = "BT" check_mb = True - if self.test_quality != 'Errors': - if self.type == 'Loop': + if self.test_quality != "Errors": + if self.type == "Loop": if self.mb_spd_mps <= 0.012: check_mb = False - self.moving_bed = 'No' + self.moving_bed = "No" else: if 135 < np.abs(self.flow_dir - self.mb_dir) < 225: check_mb = True else: check_mb = False - self.moving_bed = 'Unknown' + self.moving_bed = "Unknown" if check_mb: if self.percent_mb > 1: - self.moving_bed = 'Yes' + self.moving_bed = "Yes" else: - self.moving_bed = 'No' + self.moving_bed = "No" else: - self.moving_bed = 'Unknown' - elif ref == 'GPS': + self.moving_bed = "Unknown" + elif ref == "GPS": self.mb_spd_mps = self.gps_mb_spd_mps self.dist_us_m = self.gps_dist_us_m self.percent_mb = self.gps_percent_mb self.mb_dir = self.gps_mb_dir self.flow_spd_mps = self.gps_flow_spd_mps - self.ref = 'GPS' + self.ref = "GPS" check_mb = True - if self.test_quality != 'Errors': - if self.type == 'Loop': + if self.test_quality != "Errors": + if self.type == "Loop": if self.mb_spd_mps <= 0.012: check_mb = False - self.moving_bed = 'No' + self.moving_bed = "No" else: if 135 < np.abs(self.flow_dir - self.mb_dir) < 225: check_mb = True else: check_mb = False - self.messages.append('ERROR: GPS Loop closure error not in upstream direction. ' - + 'REPEAT LOOP or USE STATIONARY TEST') - self.moving_bed = 'Unknown' + self.messages.append( + "ERROR: GPS Loop closure error not in " + "upstream direction. " + + "REPEAT LOOP or USE STATIONARY TEST" + ) + self.moving_bed = "Unknown" if check_mb: if self.percent_mb > 1: - self.moving_bed = 'Yes' + self.moving_bed = "Yes" else: - self.moving_bed = 'No' + self.moving_bed = "No" else: - self.moving_bed = 'Unknown' + self.moving_bed = "Unknown" @staticmethod def near_bed_velocity(u, v, depth, bin_depth): @@ -921,7 +1054,7 @@ class MovingBedTests(object): # Compute near bed velocity for each ensemble for n in range(n_ensembles): - idx = np.where(np.isnan(u[:, n]) == False) + idx = np.where(np.logical_not(np.isnan(u[:, n]))) if len(idx[-1]) > 0: if len(idx[-1]) > 0: idx = idx[-1][-2::] @@ -931,8 +1064,12 @@ class MovingBedTests(object): z_depth[n] = depth[n] - np.nanmean(bin_depth[idx, n], 0) u_mean[n] = np.nanmean(u[idx, n], 0) v_mean[n] = np.nanmean(v[idx, n], 0) - nb_u[n] = (u_mean[n] / z_depth[n] ** (1. / 6.)) * (z_near_bed[n] ** (1. / 6.)) - nb_v[n] = (v_mean[n] / z_depth[n] ** (1. / 6.)) * (z_near_bed[n] ** (1. / 6.)) + nb_u[n] = (u_mean[n] / z_depth[n] ** (1.0 / 6.0)) * ( + z_near_bed[n] ** (1.0 / 6.0) + ) + nb_v[n] = (v_mean[n] / z_depth[n] ** (1.0 / 6.0)) * ( + z_near_bed[n] ** (1.0 / 6.0) + ) speed_near_bed[n] = np.sqrt(nb_u[n] ** 2 + nb_v[n] ** 2) unit_nbu[n] = nb_u[n] / speed_near_bed[n] unit_nbv[n] = nb_v[n] / speed_near_bed[n] @@ -942,7 +1079,8 @@ class MovingBedTests(object): @staticmethod def auto_use_2_correct(moving_bed_tests, boat_ref=None): """Apply logic to determine which moving-bed tests should be used - for correcting bottom track referenced discharges with moving-bed conditions. + for correcting bottom track referenced discharges with moving-bed + conditions. Parameters ---------- @@ -969,33 +1107,37 @@ class MovingBedTests(object): test.use_2_correct = False test.selected = False # Valid test according to user - lidx_user.append(test.user_valid == True) + lidx_user.append(test.user_valid) # Valid test according to quality assessment - lidx_no_errors.append(test.test_quality != 'Errors') + lidx_no_errors.append(test.test_quality != "Errors") # Identify type of test test_type.append(test.type) - lidx_stationary.append(test.type == 'Stationary') - lidx_loop.append(test.type == 'Loop') + lidx_stationary.append(test.type == "Stationary") + lidx_loop.append(test.type == "Loop") flow_speed.append(test.flow_spd_mps) # Combine - lidx_valid_loop = np.all(np.vstack((lidx_user, lidx_no_errors, lidx_loop)), 0) - lidx_valid_stationary = np.all(np.vstack((lidx_user, lidx_no_errors, lidx_stationary)), 0) + lidx_valid_loop = np.all( + np.vstack((lidx_user, lidx_no_errors, lidx_loop)), 0 + ) + lidx_valid_stationary = np.all( + np.vstack((lidx_user, lidx_no_errors, lidx_stationary)), 0 + ) # Check flow speed lidx_flow_speed = np.array(flow_speed) > 0.25 # Determine if there are valid loop tests - # This is the code in matlab but I don't think it is correct. I the valid loop should also have a valid - # flow speed, if not then a stationary test, if available could be used. - lidx_loops_2_select = np.all(np.vstack((lidx_flow_speed, lidx_valid_loop)), 0) + lidx_loops_2_select = np.all( + np.vstack((lidx_flow_speed, lidx_valid_loop)), 0 + ) if np.any(lidx_loops_2_select): # Select last loop idx_select = np.where(lidx_loops_2_select)[0][-1] test_select = moving_bed_tests[idx_select] test_select.selected = True - if test_select.moving_bed == 'Yes': + if test_select.moving_bed == "Yes": test_select.use_2_correct = True # If there are no valid loop look for valid stationary tests @@ -1004,23 +1146,26 @@ class MovingBedTests(object): for n, lidx in enumerate(lidx_valid_stationary): if lidx: moving_bed_tests[n].selected = True - # Determine if any stationary test resulted in a moving bed - if moving_bed_tests[n].moving_bed == 'Yes': + # Determine if any stationary test resulted in a + # moving bed + if moving_bed_tests[n].moving_bed == "Yes": moving_bed.append(True) else: moving_bed.append(False) - # If any stationary test shows a moving-bed use all valid stationary test to correct BT discharge + # If any stationary test shows a moving-bed use all valid + # stationary test to correct BT discharge if any(moving_bed) > 0: for n, test in enumerate(moving_bed_tests): if lidx_valid_stationary[n]: test.use_2_correct = True - # If the flow speed is too low but there are not valid stationary tests use the last loop test. + # If the flow speed is too low but there are not valid stationary + # tests use the last loop test. elif np.any(lidx_valid_loop): # Select last loop idx_select = np.where(lidx_valid_loop)[0][-1] moving_bed_tests[idx_select].selected = True - if moving_bed_tests[idx_select].moving_bed == 'Yes': + if moving_bed_tests[idx_select].moving_bed == "Yes": moving_bed_tests[idx_select].use_2_correct = True # If the navigation reference for discharge computations is set @@ -1028,11 +1173,11 @@ class MovingBedTests(object): # selected test should be used to determine if there is a valid # moving-bed and a moving-bed condition. if boat_ref is None: - ref = 'BT' + ref = "BT" else: ref = boat_ref - if ref != 'BT': + if ref != "BT": for test in moving_bed_tests: test.use_2_correct = False return moving_bed_tests diff --git a/Classes/MultiThread.py b/Classes/MultiThread.py index 51d4580..c1543ac 100644 --- a/Classes/MultiThread.py +++ b/Classes/MultiThread.py @@ -7,15 +7,14 @@ import threading class MultiThread(threading.Thread): - def __init__(self, thread_id, function, args=None): threading.Thread.__init__(self) self.thread_id = thread_id self.function = function self.args = args - + def run(self): - + if self.args is not None: self.function(**self.args) else: diff --git a/Classes/NormData.py b/Classes/NormData.py index 9a35a84..2ce28e8 100644 --- a/Classes/NormData.py +++ b/Classes/NormData.py @@ -1,8 +1,14 @@ import warnings import numpy as np import scipy.stats as sp -from MiscLibs.common_functions import cart2pol, pol2cart, nan_less, nan_less_equal, nan_greater -from Classes.QComp import QComp +from MiscLibs.common_functions import ( + cart2pol, + pol2cart, + nan_less, + nan_less_equal, + nan_greater, +) + class NormData(object): """Class creates normalized depth and unit discharge or velocity. @@ -39,33 +45,44 @@ class NormData(object): Index of median values with point count greater than threshold cutoff weights: np.array(float) Discharge based weights for computing a weighted median - use_weights: bool - Specifies if discharge weighted medians are to be used in the extrapolation fit + use_weighted: bool + Specifies if discharge weighted medians are to be used in the + extrapolation fit sub_from_left: bool - Specifies if when subsectioning the subsection should start from left to right. + Specifies if when subsectioning the subsection should start from + left to right. use_q: bool Specifies to use the discharge rather than the xprod when subsectioning """ - + def __init__(self): """Creates object and initializes instance variables.""" - self.file_name = None # Name of transect file - self.cell_depth_normalized = None # Normalized depth of cell - self.unit_normalized = None # Normalized discharge or velocity for all depth cells - self.unit_normalized_med = None # Median of normalized data within 5% partitions - self.unit_normalized_no = None # Number of data points in each median - self.unit_normalized_z = None # Relative depth for each median (5% increments) - self.unit_normalized_25 = None # Value for which 25% of normalized values are smaller - self.unit_normalized_75 = None # Value for which 75% or normalized values are larger - self.data_type = 'q' # Type of data (v, q, V, or Q) - self.data_extent = None # Defines percent of data from start of transect to use, default [0, 100] - self.valid_data = np.array([]) # Index of median values with point count greater than threshold cutoff + self.file_name = None + self.cell_depth_normalized = None + self.unit_normalized = None + self.unit_normalized_med = None + self.unit_normalized_no = None + self.unit_normalized_z = None + self.unit_normalized_25 = None + self.unit_normalized_75 = None + self.data_type = "q" + self.data_extent = None + self.valid_data = np.array([]) self.weights = np.array([]) self.use_weighted = True self.sub_from_left = False self.use_q = False - - def populate_data(self, transect, data_type, threshold, data_extent=None, use_weighted=True, sub_from_left=True, use_q=True): + + def populate_data( + self, + transect, + data_type, + threshold, + data_extent=None, + use_weighted=True, + sub_from_left=True, + use_q=True, + ): """Computes the normalized values for a single transect. Parameters @@ -75,9 +92,20 @@ class NormData(object): data_type: str Type of data (v, q, V, or Q) threshold: int - Number of data points in an increment for the increment to be valid. + Number of data points in an increment for the increment to be + valid. data_extent: list - Defines percent of data from start of transect to use, default [0, 100] + Defines percent of data from start of transect to use, default [ + 0, 100] + use_weighted: bool + Specifies if discharge weighted medians are to be used in the + extrapolation fit + sub_from_left: bool + Specifies if when subsectioning the subsection should start from + left to right. + use_q: bool + Specifies to use the discharge rather than the xprod when + subsectioning use_weighted: bool Specifies if discharge weighted medians are to be used in the extrapolation fit sub_from_left: bool @@ -86,13 +114,14 @@ class NormData(object): Specifies to use the discharge rather than the xprod when subsectioning """ - # If the data extent is not defined set data_extent to zero to trigger all data to be used + # If the data extent is not defined set data_extent to zero to + # trigger all data to be used if data_extent is None: data_extent = [0, 100] self.sub_from_left = sub_from_left self.use_q = use_q - + # Get data copies to avoid changing original data filename = transect.file_name in_transect_idx = transect.in_transect_idx @@ -100,13 +129,15 @@ class NormData(object): depths_selected = getattr(transect.depths, transect.depths.selected) cell_depth = np.copy(depths_selected.depth_cell_depth_m[:, in_transect_idx]) cells_above_sl = transect.w_vel.cells_above_sl[:, in_transect_idx] - cell_depth[cells_above_sl == False] = np.nan + cell_depth[np.logical_not(cells_above_sl)] = np.nan depth_ens = np.copy(depths_selected.depth_processed_m[in_transect_idx]) w_vel_x = np.copy(transect.w_vel.u_processed_mps[:, in_transect_idx]) w_vel_y = np.copy(transect.w_vel.v_processed_mps[:, in_transect_idx]) - invalid_data = np.logical_not(transect.w_vel.valid_data[0, :, in_transect_idx]).T + invalid_data = np.logical_not( + transect.w_vel.valid_data[0, :, in_transect_idx] + ).T w_vel_x[invalid_data] = np.nan w_vel_y[invalid_data] = np.nan @@ -115,17 +146,26 @@ class NormData(object): bt_vel_x = np.copy(boat_select.u_processed_mps[in_transect_idx]) bt_vel_y = np.copy(boat_select.v_processed_mps[in_transect_idx]) else: - bt_vel_x = np.tile([np.nan], transect.boat_vel.bt_vel.u_processed_mps[in_transect_idx].shape) - bt_vel_y = np.tile([np.nan], transect.boat_vel.bt_vel.u_processed_mps[in_transect_idx].shape) + bt_vel_x = np.tile( + [np.nan], + transect.boat_vel.bt_vel.u_processed_mps[in_transect_idx].shape, + ) + bt_vel_y = np.tile( + [np.nan], + transect.boat_vel.bt_vel.u_processed_mps[in_transect_idx].shape, + ) # Compute discharges xprod = np.multiply(w_vel_x, bt_vel_y) - np.multiply(w_vel_y, bt_vel_x) cell_size = depths_selected.depth_cell_size_m delta_t = transect.date_time.ens_duration_sec[in_transect_idx] - q = np.multiply(xprod[:, in_transect_idx] * cell_size[:, in_transect_idx], delta_t) + q = np.multiply( + xprod[:, in_transect_idx] * cell_size[:, in_transect_idx], delta_t + ) q_ens = np.nansum(q, 0) - # Ensure all elements of xprod can be used to compute q (have a delta_t), first ensemble has no delta_t + # Ensure all elements of xprod can be used to compute q (have a + # delta_t), first ensemble has no delta_t idx_invalid = np.where(np.isnan(delta_t))[0] xprod[:, idx_invalid] = np.nan @@ -143,14 +183,16 @@ class NormData(object): norm_cell_depth[nan_less(norm_cell_depth, 0)] = np.nan # If data type is discharge compute unit discharge for each cell - if data_type.lower() == 'q': + if data_type.lower() == "q": # Compute the cross product for each cell unit = xprod else: w_vel_x = np.copy(transect.w_vel.u_processed_mps[:, in_transect_idx]) w_vel_y = np.copy(transect.w_vel.v_processed_mps[:, in_transect_idx]) - invalid_data = np.logical_not(transect.w_vel.valid_data[0, :, in_transect_idx]).T + invalid_data = np.logical_not( + transect.w_vel.valid_data[0, :, in_transect_idx] + ).T w_vel_x[invalid_data] = np.nan w_vel_y[invalid_data] = np.nan @@ -162,12 +204,14 @@ class NormData(object): direction, _ = cart2pol(w_vel_mean_1, w_vel_mean_2) unit_vec_1, unit_vec_2 = pol2cart(direction, 1) unit_vec = np.vstack([unit_vec_1, unit_vec_2]) - - # Compute the velocity magnitude in the direction of the mean velocity of each - # ensemble using the dot product and unit vector + + # Compute the velocity magnitude in the direction of the mean + # velocity of each ensemble using the dot product and unit vector unit = np.tile([np.nan], w_vel_x.shape) for i in range(w_vel_x.shape[0]): - unit[i, :] = np.sum(np.vstack([w_vel_x[i, :], w_vel_y[i, :]]) * unit_vec, 0) + unit[i, :] = np.sum( + np.vstack([w_vel_x[i, :], w_vel_y[i, :]]) * unit_vec, 0 + ) # Discharge weighting of velocity data is not permitted use_weighted = False @@ -176,7 +220,7 @@ class NormData(object): unit_total = np.nansum(np.nansum(unit), 0) if unit_total < 0: unit *= -1 - + # Compute normalize unit values with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) @@ -185,8 +229,9 @@ class NormData(object): # Apply extents if they have been specified if data_extent[0] != 0 or data_extent[1] != 100: if use_q: - # Adjust cumulative sum direction based on start bank so that cumsum is always from left to right - if transect.start_edge == 'Right' and sub_from_left: + # Adjust cumulative sum direction based on start bank so + # that cumsum is always from left to right + if transect.start_edge == "Right" and sub_from_left: q_ens_flipped = np.flip(q_ens) q_cum = np.nancumsum(q_ens_flipped) q_max = q_cum[-1] @@ -206,33 +251,20 @@ class NormData(object): # Apply extents unit_left = q_max * data_extent[0] / 100 unit_right = q_max * data_extent[1] / 100 - idx_extent = np.where(np.logical_and(np.greater(q_cum, unit_left), - np.less(q_cum, unit_right)))[0] - # if data_type.lower() == 'v': - # # Unit discharge is computed here because the unit norm could be based on velocity - # unit = np.multiply(w_vel_x, bt_vel_y) - np.multiply(w_vel_y, bt_vel_x) - # unit_ens = np.nansum(unit, 0) - # unit_total = np.nancumsum(unit_ens) - # - # # Adjust so total discharge is positive - # if unit_total[-1] < 0: - # unit_total *= -1 - - # # Apply extents - # unit_lower = unit_total[-1] * data_extent[0] / 100 - # unit_upper = unit_total[-1] * data_extent[1] / 100 - # idx_extent = np.where(np.logical_and(np.greater(unit_total, unit_lower), - # np.less(unit_total, unit_upper)))[0] + idx_extent = np.where( + np.logical_and(np.greater(q_cum, unit_left), np.less(q_cum, unit_right)) + )[0] + unit_norm = unit_norm[:, idx_extent] norm_cell_depth = norm_cell_depth[:, idx_extent] weights = weights[:, idx_extent] - + # If whole profile is negative make positive idx_neg1 = np.tile([np.nan], [unit_norm.shape[1], 1]) idx_neg2 = np.tile([np.nan], [unit_norm.shape[1], 1]) for c in range(unit_norm.shape[1]): idx_neg1[c] = len(np.where(nan_less(unit_norm[:, c], 0))[0]) - idx_neg2[c] = len(np.where(np.isnan(unit_norm[:, c]) == False)[0]) + idx_neg2[c] = len(np.where(np.logical_not(np.isnan(unit_norm[:, c])))[0]) idx_neg = np.squeeze(idx_neg1) == np.squeeze(idx_neg2) unit_norm[:, idx_neg] = unit_norm[:, idx_neg] * -1 @@ -248,21 +280,21 @@ class NormData(object): @staticmethod def qrev_mat_in(mat_data): - """Processes the Matlab data structure to obtain a list of NormData objects containing transect - data from the Matlab data structure. + """Processes the Matlab data structure to obtain a list of NormData + objects containing transect data from the Matlab data structure. - Parameters - ---------- - mat_data: mat_struct - Matlab data structure obtained from sio.loadmat + Parameters + ---------- + mat_data: mat_struct + Matlab data structure obtained from sio.loadmat - Returns - ------- - norm_data: list - List of NormData objects - """ + Returns + ------- + norm_data: list + List of NormData objects + """ norm_data = [] - if hasattr(mat_data, 'normData'): + if hasattr(mat_data, "normData"): for data in mat_data.normData: temp = NormData() temp.populate_from_qrev_mat(data) @@ -270,7 +302,8 @@ class NormData(object): return norm_data def populate_from_qrev_mat(self, mat_data): - """Populates the object using data from previously saved QRev Matlab file. + """Populates the object using data from previously saved QRev Matlab + file. Parameters ---------- @@ -289,15 +322,15 @@ class NormData(object): self.data_type = mat_data.dataType self.data_extent = mat_data.dataExtent self.valid_data = mat_data.validData - 1 - if hasattr(mat_data, 'use_weighted'): + if hasattr(mat_data, "use_weighted"): self.use_weighted = mat_data.use_weighted self.weights = mat_data.weights else: self.use_weighted = False self.weights = None - if hasattr(mat_data, 'use_q'): + if hasattr(mat_data, "use_q"): self.use_q = mat_data.use_q - if hasattr(mat_data, 'sub_from_left'): + if hasattr(mat_data, "sub_from_left"): self.sub_from_left = mat_data.sub_from_left def compute_stats(self, threshold): @@ -306,11 +339,12 @@ class NormData(object): Parameters ---------- threshold: int - Number of data points in an increment for the increment to be valid. + Number of data points in an increment for the increment to be + valid. """ # Set averaging interval - avg_interval = np.arange(0, 1.05, .05) + avg_interval = np.arange(0, 1.05, 0.05) # Initialize variables to nan unit_norm_med = np.tile([np.nan], len(avg_interval) - 1) @@ -322,26 +356,37 @@ class NormData(object): # Process each normalized increment for i in range(len(avg_interval) - 1): condition_1 = nan_greater(self.cell_depth_normalized, avg_interval[i]) - condition_2 = nan_less_equal(self.cell_depth_normalized, avg_interval[i + 1]) + condition_2 = nan_less_equal( + self.cell_depth_normalized, avg_interval[i + 1] + ) condition_3 = np.logical_not(np.isnan(self.unit_normalized)) - condition_all = np.logical_and(np.logical_and(condition_1, condition_2), condition_3) + condition_all = np.logical_and( + np.logical_and(condition_1, condition_2), condition_3 + ) if np.any(condition_all): - if self.data_type.lower() == 'q' and self.use_weighted: - results = self.weighted_quantile(self.unit_normalized[condition_all], - quantiles=[0.25, 0.5, 0.75], - sample_weight=self.weights[condition_all]) + if self.data_type.lower() == "q" and self.use_weighted: + results = self.weighted_quantile( + self.unit_normalized[condition_all], + quantiles=[0.25, 0.5, 0.75], + sample_weight=self.weights[condition_all], + ) unit_25[i] = results[0] unit_norm_med[i] = results[1] unit_75[i] = results[2] else: - unit_25[i], unit_norm_med[i], unit_75[i] = sp.mstats.mquantiles(self.unit_normalized[condition_all], - alphap=0.5, betap=0.5) + unit_25[i], unit_norm_med[i], unit_75[i] = sp.mstats.mquantiles( + self.unit_normalized[condition_all], alphap=0.5, betap=0.5 + ) - unit_norm_med_no[i] = np.sum(np.isnan(self.unit_normalized[condition_all]) == False) + unit_norm_med_no[i] = np.sum( + np.logical_not(np.isnan(self.unit_normalized[condition_all])) + ) avgz[i] = 1 - np.nanmean(self.cell_depth_normalized[condition_all]) # Mark increments invalid if they do not have sufficient data - cutoff = np.nanmedian(unit_norm_med_no[nan_greater(unit_norm_med_no, 0)]) * (threshold / 100) + cutoff = np.nanmedian(unit_norm_med_no[nan_greater(unit_norm_med_no, 0)]) * ( + threshold / 100 + ) self.valid_data = np.where(nan_greater(unit_norm_med_no, cutoff))[0] self.unit_normalized_med = unit_norm_med @@ -352,7 +397,7 @@ class NormData(object): @staticmethod def weighted_quantile(values, quantiles, sample_weight): - """ Very close to numpy.percentile, but supports weights. + """Very close to numpy.percentile, but supports weights. NOTE: quantiles should be in [0, 1]! Parameters @@ -394,7 +439,8 @@ class NormData(object): norm_data: list List of objects of NormData threshold: int - Number of data points in an increment for the increment to be valid. + Number of data points in an increment for the increment to be + valid. """ # Initialize lists @@ -415,21 +461,25 @@ class NormData(object): self.unit_normalized = np.tile([np.nan], (max_cells, sum_ens[-1])) self.cell_depth_normalized = np.tile([np.nan], (max_cells, sum_ens[-1])) self.weights = np.tile([np.nan], (max_cells, sum_ens[-1])) + self.weights = np.tile([np.nan], (max_cells, sum_ens[-1])) # Process each transect using data from only the checked transects for n in range(len(transects)): if transects[n].checked: - self.unit_normalized[:n_cells[n], np.arange(sum_ens[n], sum_ens[n + 1])] \ - = norm_data[n].unit_normalized - self.weights[:n_cells[n], np.arange(sum_ens[n], sum_ens[n + 1])] \ - = norm_data[n].weights - self.cell_depth_normalized[:n_cells[n], np.arange(sum_ens[n], sum_ens[n + 1])] \ - = norm_data[n].cell_depth_normalized + self.unit_normalized[ + : n_cells[n], np.arange(sum_ens[n], sum_ens[n + 1]) + ] = norm_data[n].unit_normalized + self.weights[ + : n_cells[n], np.arange(sum_ens[n], sum_ens[n + 1]) + ] = norm_data[n].weights + self.cell_depth_normalized[ + : n_cells[n], np.arange(sum_ens[n], sum_ens[n + 1]) + ] = norm_data[n].cell_depth_normalized # if self.data_extent is None: self.data_extent = norm_data[n].data_extent self.data_type = norm_data[n].data_type self.use_weighted = norm_data[n].use_weighted # Store data - self.file_name = 'Measurement' + self.file_name = "Measurement" self.compute_stats(threshold) diff --git a/Classes/Oursin.py b/Classes/Oursin.py index 263ed78..e98f193 100644 --- a/Classes/Oursin.py +++ b/Classes/Oursin.py @@ -1,5 +1,3 @@ -import time - import pandas as pd import copy from Classes.QComp import QComp @@ -7,7 +5,8 @@ from scipy.stats import t import numpy as np import math import scipy.stats -from profilehooks import profile + +# from profilehooks import profile from MiscLibs.common_functions import cosd, sind from MiscLibs.bayes_cov_compiled import bayes_cov @@ -20,13 +19,17 @@ class Oursin(object): bot_meth: list List that contains the method proposed by Extrap for each transect exp_95ic_min: list - List that contains the min range of 95% interval if power-power method is used for transect + List that contains the min range of 95% interval if power-power + method is used for transect exp_95ic_max: list - List that contains the max range of 95% interval if power-power method is used for transect + List that contains the max range of 95% interval if power-power + method is used for transect pp_exp: list - List that contains the power-power exponent computed by Extrap for Power-Power transect only + List that contains the power-power exponent computed by Extrap for + Power-Power transect only ns_exp: list - List that contains the no-slip exponent computed by Extrap for No-Slip method transect only + List that contains the no-slip exponent computed by Extrap for + No-Slip method transect only exp_pp_min: list Minimum power-power exponent used for simulating possible discharge exp_pp_max: list @@ -36,83 +39,120 @@ class Oursin(object): exp_ns_max: list Maximum no-slip exponent used for simulating possible discharge d_right_error_min: list - List that contains the minimum right distance (in m) used for simulating the discharge for each transect + List that contains the minimum right distance (in m) used for + simulating the discharge for each transect d_left_error_min: list - List that contains the minimum left distance (in m) used for simulating the discharge for each transect + List that contains the minimum left distance (in m) used for + simulating the discharge for each transect d_right_error_max: list - List that contains the maximum right distance (in m) used for simulating the discharge for each transect + List that contains the maximum right distance (in m) used for + simulating the discharge for each transect d_left_error_max: list - List that contains the maximum left distance (in m) used for simulating the discharge for each transect + List that contains the maximum left distance (in m) used for + simulating the discharge for each transect draft_error_list: list - List that contains the draft (in cm) used for simulating the discharge for each transect + List that contains the draft (in cm) used for simulating the + discharge for each transect u_syst_list: list - List that contains the computed systematic uncertainty (68%) for each transect + List that contains the computed systematic uncertainty (68%) for + each transect u_compass_list: list - List that contains the computed uncertainty (68%) due to compass error for each transect + List that contains the computed uncertainty (68%) due to compass + error for each transect u_meas_list: list - List that contains the computed measured area uncertainty (68%) for each transect + List that contains the computed measured area uncertainty (68%) for + each transect u_ens_list: list - List that contains the computed uncertainty (68%) due to limited number of ensemble for each transect + List that contains the computed uncertainty (68%) due to limited + number of ensemble for each transect u_movbed_list: list - List that contains the estimated uncertainty (68%) due to moving bed for each transect + List that contains the estimated uncertainty (68%) due to moving bed + for each transect u_invalid_water_list: list - List that contains the computed uncertainty (68%) due to invalid water velocities for each transect + List that contains the computed uncertainty (68%) due to invalid + water velocities for each transect u_invalid_boat_list: list - List that contains the computed uncertainty (68%) due to invalid boat velocities for each transect + List that contains the computed uncertainty (68%) due to invalid + boat velocities for each transect u_invalid_depth_list: list - List that contains the computed uncertainty (68%) due to invalid depths for each transect + List that contains the computed uncertainty (68%) due to invalid + depths for each transect u_top_list: list - List that contains the computed uncertainty (68%) due to top discharge extrapolation for each transect + List that contains the computed uncertainty (68%) due to top + discharge extrapolation for each transect u_bot_list: list - List that contains the computed uncertainty (68%) due to bottom discharge extrapolation for each transect + List that contains the computed uncertainty (68%) due to bottom + discharge extrapolation for each transect u_left_list: list - List that contains the computed uncertainty (68%) due to left discharge extrapolation for each transect + List that contains the computed uncertainty (68%) due to left + discharge extrapolation for each transect u_right_list: list - List that contains the computed uncertainty (68%) due to right discharge extrapolation for each transect + List that contains the computed uncertainty (68%) due to right + discharge extrapolation for each transect u_syst_mean_user_list: list - List that contains the user specified systematic uncertainty (68%) for each transect + List that contains the user specified systematic uncertainty (68%) + for each transect u_compass_user_list: list - List that contains user specified uncertainty (68%) due to compass error for each transect + List that contains user specified uncertainty (68%) due to compass + error for each transect u_meas_mean_user_list: list - List that contains the user specified measured area uncertainty (68%) for each transect + List that contains the user specified measured area uncertainty ( + 68%) for each transect u_ens_user_list: list - List that contains the user specified uncertainty (68%) due to limited number of ensemble for each transect + List that contains the user specified uncertainty (68%) due to + limited number of ensemble for each transect u_movbed_user_list: list - List that contains the user specified uncertainty (68%) due to moving bed for each transect + List that contains the user specified uncertainty (68%) due to moving + bed for each transect u_invalid_water_user_list: list - List that contains the user specified uncertainty (68%) due to invalid water velocities for each transect + List that contains the user specified uncertainty (68%) due to + invalid water velocities for each transect u_invalid_boat_user_list: list - List that contains the user specified uncertainty (68%) due to invalid boat velocities for each transect + List that contains the user specified uncertainty (68%) due to + invalid boat velocities for each transect u_invalid_depth_user_list: list - List that contains the user specified uncertainty (68%) due to invalid depths for each transect + List that contains the user specified uncertainty (68%) due to + invalid depths for each transect u_top_mean_user_list: list - List that contains the user specified uncertainty (68%) due to top discharge extrapolation for each transect + List that contains the user specified uncertainty (68%) due to top + discharge extrapolation for each transect u_bot_mean_user_list: list - List that contains the user specified uncertainty (68%) due to bottom discharge extrapolation for each transect + List that contains the user specified uncertainty (68%) due to bottom + discharge extrapolation for each transect u_left_mean_user_list: list - List that contains the user specified uncertainty (68%) due to left discharge extrapolation for each transect + List that contains the user specified uncertainty (68%) due to left + discharge extrapolation for each transect u_right_mean_user_list: list - List that contains the user specified uncertainty (68%) due to right discharge extrapolation for each transect + List that contains the user specified uncertainty (68%) due to right + discharge extrapolation for each transect cov_68: float Computed uncertainty (68%) due to coefficient of variation sim_original: DataFrame Discharges (total, and subareas) computed for the processed discharge sim_extrap_pp_16: DataFrame - Discharges (total, and subareas) computed using power fit with 1/6th exponent + Discharges (total, and subareas) computed using power fit with 1/6th + exponent sim_extrap_pp_min: DataFrame - Discharges (total, and subareas) computed using power fit with minimum exponent + Discharges (total, and subareas) computed using power fit with + minimum exponent sim_extrap_pp_max: DataFrame - Discharges (total, and subareas) computed using power fit with maximum exponent + Discharges (total, and subareas) computed using power fit with + maximum exponent sim_extrap_cns_16: DataFrame - Discharges (total, and subareas) computed using constant no slip with 1/6th exponent + Discharges (total, and subareas) computed using constant no slip + with 1/6th exponent sim_extrap_cns_min: DataFrame - Discharges (total, and subareas) computed using constant no slip with minimum exponent + Discharges (total, and subareas) computed using constant no slip + with minimum exponent sim_extrap_cns_max: DataFrame - Discharges (total, and subareas) computed using constant no slip with maximum exponent + Discharges (total, and subareas) computed using constant no slip + with maximum exponent sim_extrap_3pns_16: DataFrame - Discharges (total, and subareas) computed using 3pt no slip with 1/6the exponent + Discharges (total, and subareas) computed using 3pt no slip with + 1/6the exponent sim_extrap_3pns_opt: DataFrame - Discharges (total, and subareas) computed using 3pt no slip with optimized exponent + Discharges (total, and subareas) computed using 3pt no slip with + optimized exponent sim_edge_min: DataFrame Discharges (total, and subareas) computed using minimum edge q sim_edge_max: DataFrame @@ -122,15 +162,20 @@ class Oursin(object): sim_draft_max: DataFrame Discharges (total, and subareas) computed using maximum draft sim_cells_trdi: DataFrame - Discharges (total, and subareas) computed using TRDI method for invalid cells + Discharges (total, and subareas) computed using TRDI method for + invalid cells sim_cells_above: DataFrame - Discharges (total, and subareas) computed using cells above for invalid cells + Discharges (total, and subareas) computed using cells above for + invalid cells sim_cells_below: DataFrame - Discharges (total, and subareas) computed using cells below for invalid cells + Discharges (total, and subareas) computed using cells below for + invalid cells sim_cells_before: DataFrame - Discharges (total, and subareas) computed for using cells before for invalid cells + Discharges (total, and subareas) computed for using cells before for + invalid cells sim_cells_after: DataFrame - Discharges (total, and subareas) computed for using cells before for invalid cells + Discharges (total, and subareas) computed for using cells before for + invalid cells nb_transects: float Number of transects used checked_idx: list @@ -154,9 +199,11 @@ class Oursin(object): left_edge_dist_prct_user: float User specified percent error in left edge distance gga_boat_user: float - User specified standard deviation of boat velocities based on gga in m/s + User specified standard deviation of boat velocities based on + gga in m/s vtg_boat_user: float - User specified standard deviation of boat velocities based on vtg in m/s + User specified standard deviation of boat velocities based on + vtg in m/s compass_error_user: float User specified compass error in degrees default_advanced_settings: dict @@ -184,97 +231,127 @@ class Oursin(object): compass_error: float Default compass error in degrees user_specified_u: dict - Dictionary of user specified uncertainties as standard deviation in percent + Dictionary of user specified uncertainties as standard deviation in + percent u_syst_mean_user: float User specified uncertianty (bias) due to the system, in percent u_movbed_user: float - User specified uncertianty (bias) due to the moving-bed conditions, in percent + User specified uncertianty (bias) due to the moving-bed + conditions, in percent u_compass_user: float - User specified uncertianty (bias) due to the compass error, in percent + User specified uncertianty (bias) due to the compass error, + in percent u_ens_user: float - User specified uncertianty (bias) due to the number of ensembles collected, in percent + User specified uncertianty (bias) due to the number of ensembles + collected, in percent u_meas_mean_user: float - User specified uncertianty (random) of the measured portion of the cross section, in percent + User specified uncertianty (random) of the measured portion of + the cross section, in percent u_top_mean_user: float - User specified uncertianty (bias) due to the top extrapolation, in percent + User specified uncertianty (bias) due to the top extrapolation, + in percent u_bot_mean_user: float - User specified uncertianty (bias) due to the bottom extrapolation, in percent + User specified uncertianty (bias) due to the bottom + extrapolation, in percent u_right_mean_user: float - User specified uncertianty (bias) due to the right edge discharge estimate, in percent + User specified uncertianty (bias) due to the right edge + discharge estimate, in percent u_left_mean_user: float - User specified uncertianty (bias) due to the left edge discharge estimate, in percent + User specified uncertianty (bias) due to the left edge discharge + estimate, in percent u_invalid_boat_user: float - User specified uncertianty (bias) due to invalid boat velocities, in percent + User specified uncertianty (bias) due to invalid boat + velocities, in percent u_invalid_depth_user User specified uncertianty (bias) due to invalid depths, in percent u_invalid_water_user: float - User specified uncertianty (bias) due to invalid water velocities, in percent + User specified uncertianty (bias) due to invalid water + velocities, in percent u: DataFrame - DataFrame containing standard deviations in percent for each transect: u_syst, u_compass, u_movbed, u_ens, - u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, u_water, u_cov, total, and total_95 + DataFrame containing standard deviations in percent for each + transect: u_syst, u_compass, u_movbed, u_ens, + u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, u_water, + u_cov, total, and total_95 u_contribution_meas: DataFrame - DataFrame containing measured discharge uncertainty contribution from: boat, water, depth, and dzi + DataFrame containing measured discharge uncertainty contribution + from: boat, water, depth, and dzi u_measurement: DataFrame - DataFrame containing standard deviations in percent for the whole measurement: u_syst, u_compass, u_movbed, - u_ens, u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, u_water, u_cov, total, and total_95 + DataFrame containing standard deviations in percent for the whole + measurement: u_syst, u_compass, u_movbed, + u_ens, u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, + u_water, u_cov, total, and total_95 u_contribution_measurement: DataFrame - DataFrame containing uncertainty contribution in percent from: u_syst, u_compass, u_movbed, - u_ens, u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, u_water, u_cov, and total + DataFrame containing uncertainty contribution in percent from: + u_syst, u_compass, u_movbed, + u_ens, u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, + u_water, u_cov, and total u_user: DataFrame - DataFrame containing standard deviations in percent for each transect: u_syst, u_compass, u_movbed, u_ens, - u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, u_water, u_cov, total, and total_95 + DataFrame containing standard deviations in percent for each + transect: u_syst, u_compass, u_movbed, u_ens, + u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, u_water, + u_cov, total, and total_95 u_measurement_user: DataFrame - DataFrame containing standard deviations in percent for the whole measurement: u_syst, u_compass, u_movbed, - u_ens, u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, u_water, u_cov, total, and total_95 + DataFrame containing standard deviations in percent for the whole + measurement: u_syst, u_compass, u_movbed, + u_ens, u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, + u_water, u_cov, total, and total_95 u_contribution_measurement_user: DataFrame - DataFrame containing uncertainty contribution in percent from: u_syst, u_compass, u_movbed, - u_ens, u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, u_water, u_cov, and total + DataFrame containing uncertainty contribution in percent from: + u_syst, u_compass, u_movbed, + u_ens, u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, + u_water, u_cov, and total """ def __init__(self): """Initialize class and instance variables.""" # User provided parameters - self.user_advanced_settings = {'exp_pp_min_user': np.nan, - 'exp_pp_max_user': np.nan, - 'exp_ns_min_user': np.nan, - 'exp_ns_max_user': np.nan, - 'draft_error_m_user': np.nan, - 'dzi_prct_user': np.nan, - 'right_edge_dist_prct_user': np.nan, - 'left_edge_dist_prct_user': np.nan, - 'gga_boat_mps_user': np.nan, - 'vtg_boat_mps_user': np.nan, - 'compass_error_deg_user': np.nan, - 'cov_prior_user': np.nan, - 'cov_prior_u_user': np.nan} - - self.default_advanced_settings = {'exp_pp_min': 'computed', - 'exp_pp_max': 'computed', - 'exp_ns_min': 'computed', - 'exp_ns_max': 'computed', - 'draft_error_m': 'computed', - 'dzi_prct': 0.5, - 'right_edge_dist_prct': 20, - 'left_edge_dist_prct': 20, - 'gga_boat_mps': 'computed', - 'vtg_boat_mps': 0.05, - 'compass_error_deg': 1, - 'cov_prior': 0.03, - 'cov_prior_u': 0.20} - - self.user_specified_u = {'u_syst_mean_user': np.nan, - 'u_movbed_user': np.nan, - 'u_compass_user': np.nan, - 'u_ens_user': np.nan, - 'u_meas_mean_user': np.nan, - 'u_top_mean_user': np.nan, - 'u_bot_mean_user': np.nan, - 'u_right_mean_user': np.nan, - 'u_left_mean_user': np.nan, - 'u_invalid_boat_user': np.nan, - 'u_invalid_depth_user': np.nan, - 'u_invalid_water_user': np.nan} + self.user_advanced_settings = { + "exp_pp_min_user": np.nan, + "exp_pp_max_user": np.nan, + "exp_ns_min_user": np.nan, + "exp_ns_max_user": np.nan, + "draft_error_m_user": np.nan, + "dzi_prct_user": np.nan, + "right_edge_dist_prct_user": np.nan, + "left_edge_dist_prct_user": np.nan, + "gga_boat_mps_user": np.nan, + "vtg_boat_mps_user": np.nan, + "compass_error_deg_user": np.nan, + "cov_prior_user": np.nan, + "cov_prior_u_user": np.nan, + } + + self.default_advanced_settings = { + "exp_pp_min": "computed", + "exp_pp_max": "computed", + "exp_ns_min": "computed", + "exp_ns_max": "computed", + "draft_error_m": "computed", + "dzi_prct": 0.5, + "right_edge_dist_prct": 20, + "left_edge_dist_prct": 20, + "gga_boat_mps": "computed", + "vtg_boat_mps": 0.05, + "compass_error_deg": 1, + "cov_prior": 0.03, + "cov_prior_u": 0.20, + } + + self.user_specified_u = { + "u_syst_mean_user": np.nan, + "u_movbed_user": np.nan, + "u_compass_user": np.nan, + "u_ens_user": np.nan, + "u_meas_mean_user": np.nan, + "u_top_mean_user": np.nan, + "u_bot_mean_user": np.nan, + "u_right_mean_user": np.nan, + "u_left_mean_user": np.nan, + "u_invalid_boat_user": np.nan, + "u_invalid_depth_user": np.nan, + "u_invalid_water_user": np.nan, + } # Extrap results self.bot_meth = [] @@ -327,91 +404,223 @@ class Oursin(object): self.nb_transects = np.nan self.checked_idx = [] - # --- Store results of all simulations in DataFrame - self.sim_original = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot', 'q_left', 'q_right', 'q_middle']) - self.sim_extrap_pp_16 = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot']) - self.sim_extrap_pp_opt = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot']) - self.sim_extrap_pp_min = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot']) - self.sim_extrap_pp_max = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot']) - self.sim_extrap_cns_16 = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot']) - self.sim_extrap_cns_opt = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot']) - self.sim_extrap_cns_min = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot']) - self.sim_extrap_cns_max = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot']) - self.sim_extrap_3pns_16 = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot']) - self.sim_extrap_3pns_opt = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot']) - self.sim_edge_min = pd.DataFrame(columns=['q_total', 'q_left', 'q_right']) - self.sim_edge_max = pd.DataFrame(columns=['q_total', 'q_left', 'q_right']) - self.sim_draft_min = pd.DataFrame(columns=['q_total', 'q_top', 'q_left', 'q_right']) - self.sim_draft_max = pd.DataFrame(columns=['q_total', 'q_top', 'q_left', 'q_right']) - self.sim_cells_trdi = pd.DataFrame(columns=['q_total', 'q_middle']) - self.sim_cells_above = pd.DataFrame(columns=['q_total', 'q_middle']) - self.sim_cells_below = pd.DataFrame(columns=['q_total', 'q_middle']) - self.sim_cells_before = pd.DataFrame(columns=['q_total', 'q_middle']) - self.sim_cells_after = pd.DataFrame(columns=['q_total', 'q_middle']) - self.sim_shallow = pd.DataFrame(columns=['q_total', 'q_middle']) - self.sim_depth_hold = pd.DataFrame(columns=['q_total', 'q_middle']) - self.sim_depth_next = pd.DataFrame(columns=['q_total', 'q_middle']) - self.sim_boat_hold = pd.DataFrame(columns=['q_total', 'q_middle']) - self.sim_boat_next = pd.DataFrame(columns=['q_total', 'q_middle']) - self.u_contribution_meas = pd.DataFrame(columns=['boat', 'water', 'depth', 'dzi']) - self.u = pd.DataFrame(columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top', 'u_bot', - 'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water', 'u_cov', 'total', - 'total_95']) - self.u_measurement = pd.DataFrame(columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top', - 'u_bot', 'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water', - 'u_cov', 'total', 'total_95']) - self.u_contribution = pd.DataFrame(columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top', - 'u_bot', 'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water', - 'u_cov', 'total']) - self.u_contribution_measurement = pd.DataFrame(columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', - 'u_top', 'u_bot', 'u_left', 'u_right', 'u_boat', - 'u_depth', 'u_water', 'u_cov', 'total']) - self.u_user = pd.DataFrame(columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top', 'u_bot', - 'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water', 'u_cov', 'total', - 'total_95']) - self.u_measurement_user = pd.DataFrame(columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top', - 'u_bot', 'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water', - 'u_cov', 'total', 'total_95']) - self.u_contribution_user = pd.DataFrame(columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top', - 'u_bot', 'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water', - 'u_cov', 'total']) - self.u_contribution_measurement_user = pd.DataFrame(columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', - 'u_meas', 'u_top', 'u_bot', 'u_left', 'u_right', - 'u_boat', 'u_depth', 'u_water', 'u_cov', 'total']) + # Store results of all simulations in DataFrame + self.sim_original = pd.DataFrame( + columns=["q_total", "q_top", "q_bot", "q_left", "q_right", "q_middle"] + ) + self.sim_extrap_pp_16 = pd.DataFrame(columns=["q_total", "q_top", "q_bot"]) + self.sim_extrap_pp_opt = pd.DataFrame(columns=["q_total", "q_top", "q_bot"]) + self.sim_extrap_pp_min = pd.DataFrame(columns=["q_total", "q_top", "q_bot"]) + self.sim_extrap_pp_max = pd.DataFrame(columns=["q_total", "q_top", "q_bot"]) + self.sim_extrap_cns_16 = pd.DataFrame(columns=["q_total", "q_top", "q_bot"]) + self.sim_extrap_cns_opt = pd.DataFrame(columns=["q_total", "q_top", "q_bot"]) + self.sim_extrap_cns_min = pd.DataFrame(columns=["q_total", "q_top", "q_bot"]) + self.sim_extrap_cns_max = pd.DataFrame(columns=["q_total", "q_top", "q_bot"]) + self.sim_extrap_3pns_16 = pd.DataFrame(columns=["q_total", "q_top", "q_bot"]) + self.sim_extrap_3pns_opt = pd.DataFrame(columns=["q_total", "q_top", "q_bot"]) + self.sim_edge_min = pd.DataFrame(columns=["q_total", "q_left", "q_right"]) + self.sim_edge_max = pd.DataFrame(columns=["q_total", "q_left", "q_right"]) + self.sim_draft_min = pd.DataFrame( + columns=["q_total", "q_top", "q_left", "q_right"] + ) + self.sim_draft_max = pd.DataFrame( + columns=["q_total", "q_top", "q_left", "q_right"] + ) + self.sim_cells_trdi = pd.DataFrame(columns=["q_total", "q_middle"]) + self.sim_cells_above = pd.DataFrame(columns=["q_total", "q_middle"]) + self.sim_cells_below = pd.DataFrame(columns=["q_total", "q_middle"]) + self.sim_cells_before = pd.DataFrame(columns=["q_total", "q_middle"]) + self.sim_cells_after = pd.DataFrame(columns=["q_total", "q_middle"]) + self.sim_shallow = pd.DataFrame(columns=["q_total", "q_middle"]) + self.sim_depth_hold = pd.DataFrame(columns=["q_total", "q_middle"]) + self.sim_depth_next = pd.DataFrame(columns=["q_total", "q_middle"]) + self.sim_boat_hold = pd.DataFrame(columns=["q_total", "q_middle"]) + self.sim_boat_next = pd.DataFrame(columns=["q_total", "q_middle"]) + self.u_contribution_meas = pd.DataFrame( + columns=["boat", "water", "depth", "dzi"] + ) + self.u = pd.DataFrame( + columns=[ + "u_syst", + "u_compass", + "u_movbed", + "u_ens", + "u_meas", + "u_top", + "u_bot", + "u_left", + "u_right", + "u_boat", + "u_depth", + "u_water", + "u_cov", + "total", + "total_95", + ] + ) + self.u_measurement = pd.DataFrame( + columns=[ + "u_syst", + "u_compass", + "u_movbed", + "u_ens", + "u_meas", + "u_top", + "u_bot", + "u_left", + "u_right", + "u_boat", + "u_depth", + "u_water", + "u_cov", + "total", + "total_95", + ] + ) + self.u_contribution = pd.DataFrame( + columns=[ + "u_syst", + "u_compass", + "u_movbed", + "u_ens", + "u_meas", + "u_top", + "u_bot", + "u_left", + "u_right", + "u_boat", + "u_depth", + "u_water", + "u_cov", + "total", + ] + ) + self.u_contribution_measurement = pd.DataFrame( + columns=[ + "u_syst", + "u_compass", + "u_movbed", + "u_ens", + "u_meas", + "u_top", + "u_bot", + "u_left", + "u_right", + "u_boat", + "u_depth", + "u_water", + "u_cov", + "total", + ] + ) + self.u_user = pd.DataFrame( + columns=[ + "u_syst", + "u_compass", + "u_movbed", + "u_ens", + "u_meas", + "u_top", + "u_bot", + "u_left", + "u_right", + "u_boat", + "u_depth", + "u_water", + "u_cov", + "total", + "total_95", + ] + ) + self.u_measurement_user = pd.DataFrame( + columns=[ + "u_syst", + "u_compass", + "u_movbed", + "u_ens", + "u_meas", + "u_top", + "u_bot", + "u_left", + "u_right", + "u_boat", + "u_depth", + "u_water", + "u_cov", + "total", + "total_95", + ] + ) + self.u_contribution_user = pd.DataFrame( + columns=[ + "u_syst", + "u_compass", + "u_movbed", + "u_ens", + "u_meas", + "u_top", + "u_bot", + "u_left", + "u_right", + "u_boat", + "u_depth", + "u_water", + "u_cov", + "total", + ] + ) + self.u_contribution_measurement_user = pd.DataFrame( + columns=[ + "u_syst", + "u_compass", + "u_movbed", + "u_ens", + "u_meas", + "u_top", + "u_bot", + "u_left", + "u_right", + "u_boat", + "u_depth", + "u_water", + "u_cov", + "total", + ] + ) def populate_from_qrev_mat(self, meas_struct): # User provided parameters - self.user_advanced_settings = {'exp_pp_min_user': meas_struct.oursin.user_advanced_settings.exp_pp_min_user, - 'exp_pp_max_user': meas_struct.oursin.user_advanced_settings.exp_pp_max_user, - 'exp_ns_min_user': meas_struct.oursin.user_advanced_settings.exp_ns_min_user, - 'exp_ns_max_user': meas_struct.oursin.user_advanced_settings.exp_ns_max_user, - 'draft_error_m_user': - meas_struct.oursin.user_advanced_settings.draft_error_m_user, - 'dzi_prct_user': meas_struct.oursin.user_advanced_settings.dzi_prct_user, - 'right_edge_dist_prct_user': - meas_struct.oursin.user_advanced_settings.right_edge_dist_prct_user, - 'left_edge_dist_prct_user': - meas_struct.oursin.user_advanced_settings.left_edge_dist_prct_user, - 'gga_boat_mps_user': meas_struct.oursin.user_advanced_settings.gga_boat_mps_user, - 'vtg_boat_mps_user': meas_struct.oursin.user_advanced_settings.vtg_boat_mps_user, - 'compass_error_deg_user': - meas_struct.oursin.user_advanced_settings.compass_error_deg_user, - 'cov_prior_user': meas_struct.oursin.user_advanced_settings.cov_prior_user, - 'cov_prior_u_user': meas_struct.oursin.user_advanced_settings.cov_prior_u_user} - - self.user_specified_u = {'u_syst_mean_user': meas_struct.oursin.user_specified_u.u_syst_mean_user, - 'u_movbed_user': meas_struct.oursin.user_specified_u.u_movbed_user, - 'u_compass_user': meas_struct.oursin.user_specified_u.u_compass_user, - 'u_ens_user': meas_struct.oursin.user_specified_u.u_ens_user, - 'u_meas_mean_user': meas_struct.oursin.user_specified_u.u_meas_mean_user, - 'u_top_mean_user': meas_struct.oursin.user_specified_u.u_top_mean_user, - 'u_bot_mean_user': meas_struct.oursin.user_specified_u.u_bot_mean_user, - 'u_right_mean_user': meas_struct.oursin.user_specified_u.u_right_mean_user, - 'u_left_mean_user': meas_struct.oursin.user_specified_u.u_left_mean_user, - 'u_invalid_boat_user': meas_struct.oursin.user_specified_u.u_invalid_boat_user, - 'u_invalid_depth_user': meas_struct.oursin.user_specified_u.u_invalid_depth_user, - 'u_invalid_water_user': meas_struct.oursin.user_specified_u.u_invalid_water_user} + self.user_advanced_settings = { + "exp_pp_min_user": meas_struct.oursin.user_advanced_settings.exp_pp_min_user, + "exp_pp_max_user": meas_struct.oursin.user_advanced_settings.exp_pp_max_user, + "exp_ns_min_user": meas_struct.oursin.user_advanced_settings.exp_ns_min_user, + "exp_ns_max_user": meas_struct.oursin.user_advanced_settings.exp_ns_max_user, + "draft_error_m_user": meas_struct.oursin.user_advanced_settings.draft_error_m_user, + "dzi_prct_user": meas_struct.oursin.user_advanced_settings.dzi_prct_user, + "right_edge_dist_prct_user": meas_struct.oursin.user_advanced_settings.right_edge_dist_prct_user, + "left_edge_dist_prct_user": meas_struct.oursin.user_advanced_settings.left_edge_dist_prct_user, + "gga_boat_mps_user": meas_struct.oursin.user_advanced_settings.gga_boat_mps_user, + "vtg_boat_mps_user": meas_struct.oursin.user_advanced_settings.vtg_boat_mps_user, + "compass_error_deg_user": meas_struct.oursin.user_advanced_settings.compass_error_deg_user, + "cov_prior_user": meas_struct.oursin.user_advanced_settings.cov_prior_user, + "cov_prior_u_user": meas_struct.oursin.user_advanced_settings.cov_prior_u_user, + } + + self.user_specified_u = { + "u_syst_mean_user": meas_struct.oursin.user_specified_u.u_syst_mean_user, + "u_movbed_user": meas_struct.oursin.user_specified_u.u_movbed_user, + "u_compass_user": meas_struct.oursin.user_specified_u.u_compass_user, + "u_ens_user": meas_struct.oursin.user_specified_u.u_ens_user, + "u_meas_mean_user": meas_struct.oursin.user_specified_u.u_meas_mean_user, + "u_top_mean_user": meas_struct.oursin.user_specified_u.u_top_mean_user, + "u_bot_mean_user": meas_struct.oursin.user_specified_u.u_bot_mean_user, + "u_right_mean_user": meas_struct.oursin.user_specified_u.u_right_mean_user, + "u_left_mean_user": meas_struct.oursin.user_specified_u.u_left_mean_user, + "u_invalid_boat_user": meas_struct.oursin.user_specified_u.u_invalid_boat_user, + "u_invalid_depth_user": meas_struct.oursin.user_specified_u.u_invalid_depth_user, + "u_invalid_water_user": meas_struct.oursin.user_specified_u.u_invalid_water_user, + } # Extrap results if type(meas_struct.oursin.bot_meth) is str: @@ -481,9 +690,15 @@ class Oursin(object): self.u_top_mean_user_list = [meas_struct.oursin.u_top_mean_user_list] self.u_bot_mean_user_list = [meas_struct.oursin.u_bot_mean_user_list] self.u_left_mean_user_list = [meas_struct.oursin.u_left_mean_user_list] - self.u_invalid_boat_user_list = [meas_struct.oursin.u_invalid_boat_user_list] - self.u_invalid_depth_user_list = [meas_struct.oursin.u_invalid_depth_user_list] - self.u_invalid_water_user_list = [meas_struct.oursin.u_invalid_water_user_list] + self.u_invalid_boat_user_list = [ + meas_struct.oursin.u_invalid_boat_user_list + ] + self.u_invalid_depth_user_list = [ + meas_struct.oursin.u_invalid_depth_user_list + ] + self.u_invalid_water_user_list = [ + meas_struct.oursin.u_invalid_water_user_list + ] else: self.u_syst_list = meas_struct.oursin.u_syst_list.tolist() self.u_compass_list = meas_struct.oursin.u_compass_list.tolist() @@ -498,18 +713,32 @@ class Oursin(object): self.u_left_list = meas_struct.oursin.u_left_list.tolist() self.u_right_list = meas_struct.oursin.u_right_list.tolist() - self.u_syst_mean_user_list = meas_struct.oursin.u_syst_mean_user_list.tolist() + self.u_syst_mean_user_list = ( + meas_struct.oursin.u_syst_mean_user_list.tolist() + ) self.u_compass_user_list = meas_struct.oursin.u_compass_user_list.tolist() self.u_movbed_user_list = meas_struct.oursin.u_movbed_user_list.tolist() - self.u_meas_mean_user_list = meas_struct.oursin.u_meas_mean_user_list.tolist() + self.u_meas_mean_user_list = ( + meas_struct.oursin.u_meas_mean_user_list.tolist() + ) self.u_ens_user_list = meas_struct.oursin.u_ens_user_list.tolist() self.u_top_mean_user_list = meas_struct.oursin.u_top_mean_user_list.tolist() self.u_bot_mean_user_list = meas_struct.oursin.u_bot_mean_user_list.tolist() - self.u_left_mean_user_list = meas_struct.oursin.u_left_mean_user_list.tolist() - self.u_right_mean_user_list = meas_struct.oursin.u_right_mean_user_list.tolist() - self.u_invalid_boat_user_list = meas_struct.oursin.u_invalid_boat_user_list.tolist() - self.u_invalid_depth_user_list = meas_struct.oursin.u_invalid_depth_user_list.tolist() - self.u_invalid_water_user_list = meas_struct.oursin.u_invalid_water_user_list.tolist() + self.u_left_mean_user_list = ( + meas_struct.oursin.u_left_mean_user_list.tolist() + ) + self.u_right_mean_user_list = ( + meas_struct.oursin.u_right_mean_user_list.tolist() + ) + self.u_invalid_boat_user_list = ( + meas_struct.oursin.u_invalid_boat_user_list.tolist() + ) + self.u_invalid_depth_user_list = ( + meas_struct.oursin.u_invalid_depth_user_list.tolist() + ) + self.u_invalid_water_user_list = ( + meas_struct.oursin.u_invalid_water_user_list.tolist() + ) # COV self.cov_68 = meas_struct.oursin.cov_68 @@ -518,91 +747,266 @@ class Oursin(object): self.checked_idx = meas_struct.oursin.checked_idx # Reconstruct data frames from Matlab arrays - self.sim_original = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_original), - columns=['q_total', 'q_top', 'q_bot', 'q_left', 'q_right', 'q_middle']) - self.sim_extrap_pp_16 = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_extrap_pp_16), - columns=['q_total', 'q_top', 'q_bot']) - self.sim_extrap_pp_opt = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_extrap_pp_opt), - columns=['q_total', 'q_top', 'q_bot']) - self.sim_extrap_pp_min = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_extrap_pp_min), - columns=['q_total', 'q_top', 'q_bot']) - self.sim_extrap_pp_max = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_extrap_pp_max), - columns=['q_total', 'q_top', 'q_bot']) - self.sim_extrap_cns_16 = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_extrap_cns_16), - columns=['q_total', 'q_top', 'q_bot']) - self.sim_extrap_cns_opt = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_extrap_cns_opt), - columns=['q_total', 'q_top', 'q_bot']) - self.sim_extrap_cns_min = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_extrap_cns_min), - columns=['q_total', 'q_top', 'q_bot']) - self.sim_extrap_cns_max = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_extrap_cns_max), - columns=['q_total', 'q_top', 'q_bot']) - self.sim_extrap_3pns_16 = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_extrap_3pns_16), - columns=['q_total', 'q_top', 'q_bot']) - self.sim_extrap_3pns_opt = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_extrap_3pns_opt), - columns=['q_total', 'q_top', 'q_bot']) - self.sim_edge_min = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_edge_min), - columns=['q_total', 'q_left', 'q_right']) - self.sim_edge_max = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_edge_max), - columns=['q_total', 'q_left', 'q_right']) - self.sim_draft_min = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_draft_min), - columns=['q_total', 'q_top', 'q_left', 'q_right']) - self.sim_draft_max = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_draft_max), - columns=['q_total', 'q_top', 'q_left', 'q_right']) - self.sim_cells_trdi = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_cells_trdi), - columns=['q_total', 'q_middle']) - self.sim_cells_above = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_cells_above), - columns=['q_total', 'q_middle']) - self.sim_cells_below = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_cells_below), - columns=['q_total', 'q_middle']) - self.sim_cells_before = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_cells_before), - columns=['q_total', 'q_middle']) - self.sim_cells_after = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_cells_after), - columns=['q_total', 'q_middle']) - self.sim_shallow = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_shallow), - columns=['q_total', 'q_middle']) - self.sim_depth_hold = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_depth_hold), - columns=['q_total', 'q_middle']) - self.sim_depth_next = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_depth_next), - columns=['q_total', 'q_middle']) - self.sim_boat_hold = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_boat_hold), - columns=['q_total', 'q_middle']) - self.sim_boat_next = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_boat_next), - columns=['q_total', 'q_middle']) - self.u_contribution_meas = pd.DataFrame(self.checkshape(meas_struct.oursin.u_contribution_meas), - columns=['boat', 'water', 'dzi']) - self.u = pd.DataFrame(self.checkshape(meas_struct.oursin.u), - columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top', 'u_bot', - 'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water', 'u_cov', 'total', - 'total_95']) - self.u_measurement = pd.DataFrame(self.checkshape(meas_struct.oursin.u_measurement), - columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top', - 'u_bot', 'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water', - 'u_cov', 'total', 'total_95']) - self.u_contribution = pd.DataFrame(self.checkshape(meas_struct.oursin.u_contribution), - columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top', - 'u_bot', 'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water', - 'u_cov', 'total']) - self.u_contribution_measurement = pd.DataFrame(self.checkshape(meas_struct.oursin.u_contribution_measurement), - columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', - 'u_top', 'u_bot', 'u_left', 'u_right', 'u_boat', - 'u_depth', 'u_water', 'u_cov', 'total']) - self.u_user = pd.DataFrame(self.checkshape(meas_struct.oursin.u_user), - columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top', 'u_bot', - 'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water', 'u_cov', 'total', - 'total_95']) - self.u_measurement_user = pd.DataFrame(self.checkshape(meas_struct.oursin.u_measurement_user), - columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top', - 'u_bot', 'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water', - 'u_cov', 'total', 'total_95']) - self.u_contribution_user = pd.DataFrame(self.checkshape(meas_struct.oursin.u_contribution_user), - columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top', - 'u_bot', 'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water', - 'u_cov', 'total']) + self.sim_original = pd.DataFrame( + self.checkshape(meas_struct.oursin.sim_original), + columns=["q_total", "q_top", "q_bot", "q_left", "q_right", "q_middle"], + ) + self.sim_extrap_pp_16 = pd.DataFrame( + self.checkshape(meas_struct.oursin.sim_extrap_pp_16), + columns=["q_total", "q_top", "q_bot"], + ) + self.sim_extrap_pp_opt = pd.DataFrame( + self.checkshape(meas_struct.oursin.sim_extrap_pp_opt), + columns=["q_total", "q_top", "q_bot"], + ) + self.sim_extrap_pp_min = pd.DataFrame( + self.checkshape(meas_struct.oursin.sim_extrap_pp_min), + columns=["q_total", "q_top", "q_bot"], + ) + self.sim_extrap_pp_max = pd.DataFrame( + self.checkshape(meas_struct.oursin.sim_extrap_pp_max), + columns=["q_total", "q_top", "q_bot"], + ) + self.sim_extrap_cns_16 = pd.DataFrame( + self.checkshape(meas_struct.oursin.sim_extrap_cns_16), + columns=["q_total", "q_top", "q_bot"], + ) + self.sim_extrap_cns_opt = pd.DataFrame( + self.checkshape(meas_struct.oursin.sim_extrap_cns_opt), + columns=["q_total", "q_top", "q_bot"], + ) + self.sim_extrap_cns_min = pd.DataFrame( + self.checkshape(meas_struct.oursin.sim_extrap_cns_min), + columns=["q_total", "q_top", "q_bot"], + ) + self.sim_extrap_cns_max = pd.DataFrame( + self.checkshape(meas_struct.oursin.sim_extrap_cns_max), + columns=["q_total", "q_top", "q_bot"], + ) + self.sim_extrap_3pns_16 = pd.DataFrame( + self.checkshape(meas_struct.oursin.sim_extrap_3pns_16), + columns=["q_total", "q_top", "q_bot"], + ) + self.sim_extrap_3pns_opt = pd.DataFrame( + self.checkshape(meas_struct.oursin.sim_extrap_3pns_opt), + columns=["q_total", "q_top", "q_bot"], + ) + self.sim_edge_min = pd.DataFrame( + self.checkshape(meas_struct.oursin.sim_edge_min), + columns=["q_total", "q_left", "q_right"], + ) + self.sim_edge_max = pd.DataFrame( + self.checkshape(meas_struct.oursin.sim_edge_max), + columns=["q_total", "q_left", "q_right"], + ) + self.sim_draft_min = pd.DataFrame( + self.checkshape(meas_struct.oursin.sim_draft_min), + columns=["q_total", "q_top", "q_left", "q_right"], + ) + self.sim_draft_max = pd.DataFrame( + self.checkshape(meas_struct.oursin.sim_draft_max), + columns=["q_total", "q_top", "q_left", "q_right"], + ) + self.sim_cells_trdi = pd.DataFrame( + self.checkshape(meas_struct.oursin.sim_cells_trdi), + columns=["q_total", "q_middle"], + ) + self.sim_cells_above = pd.DataFrame( + self.checkshape(meas_struct.oursin.sim_cells_above), + columns=["q_total", "q_middle"], + ) + self.sim_cells_below = pd.DataFrame( + self.checkshape(meas_struct.oursin.sim_cells_below), + columns=["q_total", "q_middle"], + ) + self.sim_cells_before = pd.DataFrame( + self.checkshape(meas_struct.oursin.sim_cells_before), + columns=["q_total", "q_middle"], + ) + self.sim_cells_after = pd.DataFrame( + self.checkshape(meas_struct.oursin.sim_cells_after), + columns=["q_total", "q_middle"], + ) + self.sim_shallow = pd.DataFrame( + self.checkshape(meas_struct.oursin.sim_shallow), + columns=["q_total", "q_middle"], + ) + self.sim_depth_hold = pd.DataFrame( + self.checkshape(meas_struct.oursin.sim_depth_hold), + columns=["q_total", "q_middle"], + ) + self.sim_depth_next = pd.DataFrame( + self.checkshape(meas_struct.oursin.sim_depth_next), + columns=["q_total", "q_middle"], + ) + self.sim_boat_hold = pd.DataFrame( + self.checkshape(meas_struct.oursin.sim_boat_hold), + columns=["q_total", "q_middle"], + ) + self.sim_boat_next = pd.DataFrame( + self.checkshape(meas_struct.oursin.sim_boat_next), + columns=["q_total", "q_middle"], + ) + self.u_contribution_meas = pd.DataFrame( + self.checkshape(meas_struct.oursin.u_contribution_meas), + columns=["boat", "water", "dzi"], + ) + self.u = pd.DataFrame( + self.checkshape(meas_struct.oursin.u), + columns=[ + "u_syst", + "u_compass", + "u_movbed", + "u_ens", + "u_meas", + "u_top", + "u_bot", + "u_left", + "u_right", + "u_boat", + "u_depth", + "u_water", + "u_cov", + "total", + "total_95", + ], + ) + self.u_measurement = pd.DataFrame( + self.checkshape(meas_struct.oursin.u_measurement), + columns=[ + "u_syst", + "u_compass", + "u_movbed", + "u_ens", + "u_meas", + "u_top", + "u_bot", + "u_left", + "u_right", + "u_boat", + "u_depth", + "u_water", + "u_cov", + "total", + "total_95", + ], + ) + self.u_contribution = pd.DataFrame( + self.checkshape(meas_struct.oursin.u_contribution), + columns=[ + "u_syst", + "u_compass", + "u_movbed", + "u_ens", + "u_meas", + "u_top", + "u_bot", + "u_left", + "u_right", + "u_boat", + "u_depth", + "u_water", + "u_cov", + "total", + ], + ) + self.u_contribution_measurement = pd.DataFrame( + self.checkshape(meas_struct.oursin.u_contribution_measurement), + columns=[ + "u_syst", + "u_compass", + "u_movbed", + "u_ens", + "u_meas", + "u_top", + "u_bot", + "u_left", + "u_right", + "u_boat", + "u_depth", + "u_water", + "u_cov", + "total", + ], + ) + self.u_user = pd.DataFrame( + self.checkshape(meas_struct.oursin.u_user), + columns=[ + "u_syst", + "u_compass", + "u_movbed", + "u_ens", + "u_meas", + "u_top", + "u_bot", + "u_left", + "u_right", + "u_boat", + "u_depth", + "u_water", + "u_cov", + "total", + "total_95", + ], + ) + self.u_measurement_user = pd.DataFrame( + self.checkshape(meas_struct.oursin.u_measurement_user), + columns=[ + "u_syst", + "u_compass", + "u_movbed", + "u_ens", + "u_meas", + "u_top", + "u_bot", + "u_left", + "u_right", + "u_boat", + "u_depth", + "u_water", + "u_cov", + "total", + "total_95", + ], + ) + self.u_contribution_user = pd.DataFrame( + self.checkshape(meas_struct.oursin.u_contribution_user), + columns=[ + "u_syst", + "u_compass", + "u_movbed", + "u_ens", + "u_meas", + "u_top", + "u_bot", + "u_left", + "u_right", + "u_boat", + "u_depth", + "u_water", + "u_cov", + "total", + ], + ) self.u_contribution_measurement_user = pd.DataFrame( self.checkshape(meas_struct.oursin.u_contribution_measurement_user), - columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', - 'u_meas', 'u_top', 'u_bot', 'u_left', 'u_right', - 'u_boat', 'u_depth', 'u_water', 'u_cov', 'total']) + columns=[ + "u_syst", + "u_compass", + "u_movbed", + "u_ens", + "u_meas", + "u_top", + "u_bot", + "u_left", + "u_right", + "u_boat", + "u_depth", + "u_water", + "u_cov", + "total", + ], + ) @staticmethod def checkshape(a): @@ -611,9 +1015,11 @@ class Oursin(object): return a # @profile - def compute_oursin(self, meas, user_advanced_settings=None, u_measurement_user=None): - """Computes the uncertainty for the components of the discharge measurement - using measurement data or user provided values. + def compute_oursin( + self, meas, user_advanced_settings=None, u_measurement_user=None + ): + """Computes the uncertainty for the components of the discharge + measurement using measurement data or user provided values. Parameters ---------- @@ -622,7 +1028,8 @@ class Oursin(object): user_advanced_settings: dict Dictionary of user input on advanced settings u_measurement_user: dict - Dictionary of user estimates of uncertainty, standard deviation in percent, for each component + Dictionary of user estimates of uncertainty, standard deviation + in percent, for each component """ if user_advanced_settings is not None: @@ -647,7 +1054,7 @@ class Oursin(object): # Prep data for computations self.data_prep(meas) - self.compute_measurement_cov(meas=meas, method='Bayes') + self.compute_measurement_cov(meas=meas, method="Bayes") # 1. Systematic terms + correction terms (moving bed) self.uncertainty_system() @@ -661,7 +1068,8 @@ class Oursin(object): # 3. Run all the simulations to compute possible discharges self.run_simulations(meas) - # 4. Compute uncertainty terms based on simulations and assuming a rectangular law + # 4. Compute uncertainty terms based on simulations and assuming a + # rectangular law self.uncertainty_top_discharge() self.uncertainty_bottom_discharge() self.uncertainty_left_discharge() @@ -671,39 +1079,64 @@ class Oursin(object): self.uncertainty_invalid_water_data() # 6. Compute combined uncertainty - self.u, self.u_measurement, self.u_contribution, self.u_contribution_measurement = \ - self.compute_combined_uncertainty(u_syst=self.u_syst_list, - u_compass=self.u_compass_list, - u_movbed=self.u_movbed_list, - u_meas=self.u_meas_list, - u_ens=self.u_ens_list, - u_top=self.u_top_list, - u_bot=self.u_bot_list, - u_left=self.u_left_list, - u_right=self.u_right_list, - u_boat=self.u_invalid_boat_list, - u_depth=self.u_invalid_depth_list, - u_water=self.u_invalid_water_list, - cov_68=self.cov_68) - - self.u_user, self.u_measurement_user, self.u_contribution_user, self.u_contribution_measurement_user = \ - self.compute_combined_uncertainty(u_syst=self.u_syst_mean_user_list, - u_compass=self.u_compass_user_list, - u_movbed=self.u_movbed_user_list, - u_meas=self.u_meas_mean_user_list, - u_ens=self.u_ens_user_list, - u_top=self.u_top_mean_user_list, - u_bot=self.u_bot_mean_user_list, - u_left=self.u_left_mean_user_list, - u_right=self.u_right_mean_user_list, - u_boat=self.u_invalid_boat_user_list, - u_depth=self.u_invalid_depth_user_list, - u_water=self.u_invalid_water_user_list, - cov_68=self.cov_68) + ( + self.u, + self.u_measurement, + self.u_contribution, + self.u_contribution_measurement, + ) = self.compute_combined_uncertainty( + u_syst=self.u_syst_list, + u_compass=self.u_compass_list, + u_movbed=self.u_movbed_list, + u_meas=self.u_meas_list, + u_ens=self.u_ens_list, + u_top=self.u_top_list, + u_bot=self.u_bot_list, + u_left=self.u_left_list, + u_right=self.u_right_list, + u_boat=self.u_invalid_boat_list, + u_depth=self.u_invalid_depth_list, + u_water=self.u_invalid_water_list, + cov_68=self.cov_68, + ) + + ( + self.u_user, + self.u_measurement_user, + self.u_contribution_user, + self.u_contribution_measurement_user, + ) = self.compute_combined_uncertainty( + u_syst=self.u_syst_mean_user_list, + u_compass=self.u_compass_user_list, + u_movbed=self.u_movbed_user_list, + u_meas=self.u_meas_mean_user_list, + u_ens=self.u_ens_user_list, + u_top=self.u_top_mean_user_list, + u_bot=self.u_bot_mean_user_list, + u_left=self.u_left_mean_user_list, + u_right=self.u_right_mean_user_list, + u_boat=self.u_invalid_boat_user_list, + u_depth=self.u_invalid_depth_user_list, + u_water=self.u_invalid_water_user_list, + cov_68=self.cov_68, + ) @staticmethod - def compute_combined_uncertainty(u_syst, u_compass, u_movbed, u_meas, u_ens, u_top, u_bot, u_left, u_right, - u_boat, u_depth, u_water, cov_68): + def compute_combined_uncertainty( + u_syst, + u_compass, + u_movbed, + u_meas, + u_ens, + u_top, + u_bot, + u_left, + u_right, + u_boat, + u_depth, + u_water, + cov_68, + ): """Combined the uncertainty for each transect and for the measurement Parameters @@ -721,11 +1154,14 @@ class Oursin(object): u_top: list List of uncertainties due to top extrapolation in each transect u_bot: list - List of uncertainties due to the bottom extrapolation in each transect + List of uncertainties due to the bottom extrapolation in each + transect u_left: list - List of uncertainties due to the left edge discharge in each transect + List of uncertainties due to the left edge discharge in each + transect u_right: list - List of uncertainties due to the right edge discharge in each transect + List of uncertainties due to the right edge discharge in each + transect u_boat: list List of uncertainties due to invalid boat velocities u_depth: list @@ -738,34 +1174,57 @@ class Oursin(object): Returns ------- u_contribution_meas: DataFrame - DataFrame containing measured discharge uncertainty contribution from: boat, water, depth, and dzi + DataFrame containing measured discharge uncertainty contribution + from: boat, water, depth, and dzi u: DataFrame - DataFrame containing standard deviations in percent for each transect: u_syst, u_compass, u_movbed, u_ens, - u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, u_water, u_cov, total, and total_95 + DataFrame containing standard deviations in percent for each + transect: u_syst, u_compass, u_movbed, u_ens, + u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, u_water, + u_cov, total, and total_95 u_measurement: DataFrame - DataFrame containing standard deviations in percent for the whole measurement: u_syst, u_compass, u_movbed, - u_ens, u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, u_water, u_cov, total, and total_95 + DataFrame containing standard deviations in percent for the + whole measurement: u_syst, u_compass, u_movbed, + u_ens, u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, + u_water, u_cov, total, and total_95 u_contribution_measurement: DataFrame - DataFrame containing uncertainty contribution in percent from: u_syst, u_compass, u_movbed, - u_ens, u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, u_water, u_cov, and total + DataFrame containing uncertainty contribution in percent from: + u_syst, u_compass, u_movbed, + u_ens, u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, + u_water, u_cov, and total """ - # Create a Dataframe with all computed uncertainty for each checked transect - u = pd.DataFrame(columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top', 'u_bot', - 'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water', 'u_cov']) - u['u_syst'] = u_syst - u['u_compass'] = u_compass - u['u_movbed'] = u_movbed - u['u_meas'] = u_meas - u['u_ens'] = u_ens - u['u_water'] = u_water - u['u_top'] = u_top - u['u_bot'] = u_bot - u['u_left'] = u_left - u['u_right'] = u_right - u['u_cov'] = cov_68 - u['u_boat'] = u_boat - u['u_depth'] = u_depth + # Create a Dataframe with all computed uncertainty for each checked + # transect + u = pd.DataFrame( + columns=[ + "u_syst", + "u_compass", + "u_movbed", + "u_ens", + "u_meas", + "u_top", + "u_bot", + "u_left", + "u_right", + "u_boat", + "u_depth", + "u_water", + "u_cov", + ] + ) + u["u_syst"] = u_syst + u["u_compass"] = u_compass + u["u_movbed"] = u_movbed + u["u_meas"] = u_meas + u["u_ens"] = u_ens + u["u_water"] = u_water + u["u_top"] = u_top + u["u_bot"] = u_bot + u["u_left"] = u_left + u["u_right"] = u_right + u["u_cov"] = cov_68 + u["u_boat"] = u_boat + u["u_depth"] = u_depth n_transects = len(u_ens) @@ -775,45 +1234,56 @@ class Oursin(object): u2_measurement = u2.mean(axis=0, skipna=False).to_frame().T # Combined uncertainty by transect - # Sum of variance of each component, then sqrt, then multiply by 100 for percentage - # u['total'] = u2.drop(['u_cov'], axis=1).sum(axis=1, skipna=False) ** 0.5 - u['total'] = (u2.sum(axis=1, skipna=False) ** 0.5) - u['total_95'] = u['total'] * 2 + # Sum of variance of each component, then sqrt, then multiply by 100 + # for percentage + # u['total'] = u2.drop(['u_cov'], axis=1).sum(axis=1, skipna=False) + # ** 0.5 + u["total"] = u2.sum(axis=1, skipna=False) ** 0.5 + u["total_95"] = u["total"] * 2 u = u.mul(100) # Uncertainty for the measurement - # The random error is computed as a mean of the random error from the measured portion and the overall + # The random error is computed as a mean of the random error from + # the measured portion and the overall # random error from the COV. - u2_random = u2['u_meas'].mean(skipna=False) + u2['u_cov'].mean(skipna=False) + u2_random = u2["u_meas"].mean(skipna=False) + u2["u_cov"].mean(skipna=False) - # All other sources are systematic (mostly due to computation method and values from user) - u2_bias = u2_measurement.drop(['u_meas', 'u_cov'], axis=1).sum(axis=1, skipna=False) + # All other sources are systematic (mostly due to computation method + # and values from user) + u2_bias = u2_measurement.drop(["u_meas", "u_cov"], axis=1).sum( + axis=1, skipna=False + ) # Combined all uncertainty sources - u2_measurement['total'] = (1 / n_transects) * u2_random + u2_bias[0] - u_measurement = u2_measurement ** 0.5 - u_measurement['total_95'] = u_measurement['total'] * 2 + u2_measurement["total"] = (1 / n_transects) * u2_random + u2_bias[0] + u_measurement = u2_measurement**0.5 + u_measurement["total_95"] = u_measurement["total"] * 2 u_measurement = u_measurement * 100 # Compute relative contributions from each source u_contribution_measurement = u2_measurement.copy() - # Adjust contribution of u_meas and u_cov to account for number of transects - u_contribution_measurement['u_meas'] = u2_measurement['u_meas'] / n_transects - u_contribution_measurement['u_cov'] = u2_measurement['u_cov'] / n_transects - u_contribution_measurement = u_contribution_measurement.div(u_contribution_measurement['total'], axis=0) + # Adjust contribution of u_meas and u_cov to account for number of + # transects + u_contribution_measurement["u_meas"] = u2_measurement["u_meas"] / n_transects + u_contribution_measurement["u_cov"] = u2_measurement["u_cov"] / n_transects + u_contribution_measurement = u_contribution_measurement.div( + u_contribution_measurement["total"], axis=0 + ) - # Adjust contribution of u_meas and u_cov to account for number of transects + # Adjust contribution of u_meas and u_cov to account for number of + # transects u_contribution = u2.copy() - u_contribution['u_meas'] = u2['u_meas'].div(n_transects, axis=0) - u_contribution['u_cov'] = u2['u_cov'].div(n_transects, axis=0) - u_contribution['total'] = u_contribution.sum(axis=1) - u_contribution = u_contribution.div(u_contribution['total'], axis=0) + u_contribution["u_meas"] = u2["u_meas"].div(n_transects, axis=0) + u_contribution["u_cov"] = u2["u_cov"].div(n_transects, axis=0) + u_contribution["total"] = u_contribution.sum(axis=1) + u_contribution = u_contribution.div(u_contribution["total"], axis=0) return u, u_measurement, u_contribution, u_contribution_measurement def data_prep(self, meas): - """Determine checked transects and max and min exponents for power and no slip extrapolation. + """Determine checked transects and max and min exponents for power + and no slip extrapolation. Parameters ---------- @@ -837,14 +1307,19 @@ class Oursin(object): # Bottom method selected using data from each transect only self.bot_meth.append(meas.extrap_fit.sel_fit[n].bot_method_auto) - # Store 95 percent bounds on power fit exponent for each transect if power selected + # Store 95 percent bounds on power fit exponent for each + # transect if power selected if meas.extrap_fit.sel_fit[n].bot_method_auto == "Power": try: - self.exp_95ic_min.append(meas.extrap_fit.sel_fit[n].exponent_95_ci[0]) + self.exp_95ic_min.append( + meas.extrap_fit.sel_fit[n].exponent_95_ci[0] + ) except TypeError: self.exp_95ic_min.append(np.nan) try: - self.exp_95ic_max.append(meas.extrap_fit.sel_fit[n].exponent_95_ci[1]) + self.exp_95ic_max.append( + meas.extrap_fit.sel_fit[n].exponent_95_ci[1] + ) except TypeError: self.exp_95ic_max.append(np.nan) @@ -857,7 +1332,8 @@ class Oursin(object): self.nb_transects = len(self.checked_idx) def run_simulations(self, meas): - """Compute discharges (top, bot, right, left, total, middle) based on possible scenarios + """Compute discharges (top, bot, right, left, total, middle) based + on possible scenarios Parameters ---------- @@ -866,37 +1342,49 @@ class Oursin(object): """ # If list have not be saved recompute q_sensitivity - if not hasattr(meas.extrap_fit.q_sensitivity, 'q_pp_list'): - meas.extrap_fit.q_sensitivity.populate_data(meas.transects, meas.extrap_fit.sel_fit) + if not hasattr(meas.extrap_fit.q_sensitivity, "q_pp_list"): + meas.extrap_fit.q_sensitivity.populate_data( + meas.transects, meas.extrap_fit.sel_fit + ) # Simulation original self.sim_orig(meas) # Simulation power / power default 1/6 - self.sim_extrap_pp_16['q_total'] = meas.extrap_fit.q_sensitivity.q_pp_list - self.sim_extrap_pp_16['q_top'] = meas.extrap_fit.q_sensitivity.q_top_pp_list - self.sim_extrap_pp_16['q_bot'] = meas.extrap_fit.q_sensitivity.q_bot_pp_list + self.sim_extrap_pp_16["q_total"] = meas.extrap_fit.q_sensitivity.q_pp_list + self.sim_extrap_pp_16["q_top"] = meas.extrap_fit.q_sensitivity.q_top_pp_list + self.sim_extrap_pp_16["q_bot"] = meas.extrap_fit.q_sensitivity.q_bot_pp_list # Simulations power / power optimized self.sim_pp_min_max_opt(meas=meas) # Simulation cns default 1/6 - self.sim_extrap_cns_16['q_total'] = meas.extrap_fit.q_sensitivity.q_cns_list - self.sim_extrap_cns_16['q_top'] = meas.extrap_fit.q_sensitivity.q_top_cns_list - self.sim_extrap_cns_16['q_bot'] = meas.extrap_fit.q_sensitivity.q_bot_cns_list + self.sim_extrap_cns_16["q_total"] = meas.extrap_fit.q_sensitivity.q_cns_list + self.sim_extrap_cns_16["q_top"] = meas.extrap_fit.q_sensitivity.q_top_cns_list + self.sim_extrap_cns_16["q_bot"] = meas.extrap_fit.q_sensitivity.q_bot_cns_list # Simulation cns optimized self.sim_cns_min_max_opt(meas=meas) # Simulation 3pt no slip default 1/6 - self.sim_extrap_3pns_16['q_total'] = meas.extrap_fit.q_sensitivity.q_3p_ns_list - self.sim_extrap_3pns_16['q_top'] = meas.extrap_fit.q_sensitivity.q_top_3p_ns_list - self.sim_extrap_3pns_16['q_bot'] = meas.extrap_fit.q_sensitivity.q_bot_3p_ns_list + self.sim_extrap_3pns_16["q_total"] = meas.extrap_fit.q_sensitivity.q_3p_ns_list + self.sim_extrap_3pns_16[ + "q_top" + ] = meas.extrap_fit.q_sensitivity.q_top_3p_ns_list + self.sim_extrap_3pns_16[ + "q_bot" + ] = meas.extrap_fit.q_sensitivity.q_bot_3p_ns_list # Simulation 3pt no slip optimized - self.sim_extrap_3pns_opt['q_total'] = meas.extrap_fit.q_sensitivity.q_3p_ns_opt_list - self.sim_extrap_3pns_opt['q_top'] = meas.extrap_fit.q_sensitivity.q_top_3p_ns_opt_list - self.sim_extrap_3pns_opt['q_bot'] = meas.extrap_fit.q_sensitivity.q_bot_3p_ns_opt_list + self.sim_extrap_3pns_opt[ + "q_total" + ] = meas.extrap_fit.q_sensitivity.q_3p_ns_opt_list + self.sim_extrap_3pns_opt[ + "q_top" + ] = meas.extrap_fit.q_sensitivity.q_top_3p_ns_opt_list + self.sim_extrap_3pns_opt[ + "q_bot" + ] = meas.extrap_fit.q_sensitivity.q_bot_3p_ns_opt_list # Simulations edge min and max self.sim_edge_min_max(meas=meas) @@ -925,85 +1413,110 @@ class Oursin(object): Object of class Measurement """ - self.u_contribution_meas = pd.DataFrame(columns=['boat', 'water', 'dzi']) + self.u_contribution_meas = pd.DataFrame(columns=["boat", "water", "dzi"]) # Set uncertainty of cell size - if np.isnan(self.user_advanced_settings['dzi_prct_user']): - u_dzi = self.default_advanced_settings['dzi_prct'] * 0.01 + if np.isnan(self.user_advanced_settings["dzi_prct_user"]): + u_dzi = self.default_advanced_settings["dzi_prct"] * 0.01 else: - u_dzi = self.user_advanced_settings['dzi_prct_user'] * 0.01 + u_dzi = self.user_advanced_settings["dzi_prct_user"] * 0.01 # Compute the uncertainty due to the measured area for transect_id in self.checked_idx: # Relative standard deviation of error velocity (Water Track) - std_ev_wt_ens = self.water_std_by_error_velocity(meas.transects[transect_id]) + std_ev_wt_ens = self.water_std_by_error_velocity( + meas.transects[transect_id] + ) u_boat = np.nan - if meas.transects[transect_id].boat_vel.selected == 'bt_vel': + if meas.transects[transect_id].boat_vel.selected == "bt_vel": # Relative standard deviation of error velocity (Bottom Track) u_boat = self.boat_std_by_error_velocity(meas.transects[transect_id]) - elif meas.transects[transect_id].boat_vel.selected == 'gga_vel': + elif meas.transects[transect_id].boat_vel.selected == "gga_vel": boat_std = np.nan - if np.isnan(self.user_advanced_settings['gga_boat_mps_user']): + if np.isnan(self.user_advanced_settings["gga_boat_mps_user"]): if meas.transects[transect_id].gps.altitude_ens_m is not None: - # Estimate the uncertainty in gga boat velocity as 1/3 of the standard deviation of - # the elevation (estimate of horizontal position uncertainty) divided by time - boat_std = (np.nanstd(meas.transects[transect_id].gps.altitude_ens_m, ddof=1) / 3) / \ - np.nanmean(np.diff(meas.transects[transect_id].gps.gga_serial_time_ens)) + # Estimate the uncertainty in gga boat velocity as + # 1/3 of the standard deviation of + # the elevation (estimate of horizontal position + # uncertainty) divided by time + boat_std = ( + np.nanstd( + meas.transects[transect_id].gps.altitude_ens_m, ddof=1 + ) + / 3 + ) / np.nanmean( + np.diff(meas.transects[transect_id].gps.gga_serial_time_ens) + ) else: - boat_std = self.user_advanced_settings['gga_boat_mps'] + boat_std = self.user_advanced_settings["gga_boat_mps"] if meas.transects[transect_id].boat_vel.gga_vel is not None: u = meas.transects[transect_id].boat_vel.gga_vel.u_processed_mps v = meas.transects[transect_id].boat_vel.gga_vel.v_processed_mps - speed = np.sqrt(u ** 2 + v ** 2) + speed = np.sqrt(u**2 + v**2) u_boat = boat_std / speed - elif meas.transects[transect_id].boat_vel.selected == 'vtg_vel': - if np.isnan(self.user_advanced_settings['vtg_boat_mps_user']): + elif meas.transects[transect_id].boat_vel.selected == "vtg_vel": + if np.isnan(self.user_advanced_settings["vtg_boat_mps_user"]): boat_std = np.nan if meas.transects[transect_id].gps is not None: - boat_std = self.default_advanced_settings['vtg_boat_mps'] + boat_std = self.default_advanced_settings["vtg_boat_mps"] else: - boat_std = self.user_advanced_settings['vtg_boat_mps_user'] + boat_std = self.user_advanced_settings["vtg_boat_mps_user"] if meas.transects[transect_id].boat_vel.vtg_vel is not None: u = meas.transects[transect_id].boat_vel.vtg_vel.u_processed_mps v = meas.transects[transect_id].boat_vel.vtg_vel.v_processed_mps - speed = np.sqrt(u ** 2 + v ** 2) + speed = np.sqrt(u**2 + v**2) u_boat = boat_std / speed # Computation of u_meas q_2_tran = meas.discharge[transect_id].total ** 2 q_2_ens = meas.discharge[transect_id].middle_ens ** 2 - n_cell_ens = meas.transects[transect_id].w_vel.cells_above_sl.sum(axis=0) # number of cells by ens + n_cell_ens = meas.transects[transect_id].w_vel.cells_above_sl.sum( + axis=0 + ) # number of cells by ens n_cell_ens = np.where(n_cell_ens == 0, np.nan, n_cell_ens) # Variance for each ensembles - u_2_meas = q_2_ens * (u_boat ** 2 + (1 / n_cell_ens) * (std_ev_wt_ens ** 2 + u_dzi ** 2)) + u_2_meas = q_2_ens * ( + u_boat**2 + (1 / n_cell_ens) * (std_ev_wt_ens**2 + u_dzi**2) + ) u_2_prct_meas = np.nansum(u_2_meas) / q_2_tran # Standard deviation - u_prct_meas = u_2_prct_meas ** 0.5 + u_prct_meas = u_2_prct_meas**0.5 self.u_meas_list.append(u_prct_meas) - # Compute the contribution of all terms to u_meas (sum of a0 to g0 =1) - u_contrib_boat = (np.nan_to_num(q_2_ens * (u_boat ** 2)).sum() / q_2_tran) / u_2_prct_meas - u_contrib_water = (np.nan_to_num(q_2_ens * ((1 / n_cell_ens) * (std_ev_wt_ens ** 2))).sum() - / q_2_tran) / u_2_prct_meas - u_contrib_dzi = (np.nan_to_num(q_2_ens * ((1 / n_cell_ens) * (u_dzi ** 2))).sum() - / q_2_tran) / u_2_prct_meas - - self.u_contribution_meas.loc[len(self.u_contribution_meas)] = [u_contrib_boat, - u_contrib_water, - u_contrib_dzi] + # Compute the contribution of all terms to u_meas (sum of a0 to + # g0 =1) + u_contrib_boat = ( + np.nan_to_num(q_2_ens * (u_boat**2)).sum() / q_2_tran + ) / u_2_prct_meas + u_contrib_water = ( + np.nan_to_num(q_2_ens * ((1 / n_cell_ens) * (std_ev_wt_ens**2))).sum() + / q_2_tran + ) / u_2_prct_meas + u_contrib_dzi = ( + np.nan_to_num(q_2_ens * ((1 / n_cell_ens) * (u_dzi**2))).sum() + / q_2_tran + ) / u_2_prct_meas + + self.u_contribution_meas.loc[len(self.u_contribution_meas)] = [ + u_contrib_boat, + u_contrib_water, + u_contrib_dzi, + ] # Apply user specified uncertainty - if np.isnan(self.user_specified_u['u_meas_mean_user']): + if np.isnan(self.user_specified_u["u_meas_mean_user"]): self.u_meas_mean_user_list = self.u_meas_list else: - self.u_meas_mean_user_list = [0.01 * self.user_specified_u['u_meas_mean_user']] * self.nb_transects + self.u_meas_mean_user_list = [ + 0.01 * self.user_specified_u["u_meas_mean_user"] + ] * self.nb_transects def uncertainty_moving_bed(self, meas): """Computes the moving-bed uncertainty @@ -1015,7 +1528,10 @@ class Oursin(object): """ # Compute moving-bed uncertainty - if len(self.checked_idx) and meas.transects[self.checked_idx[0]].boat_vel.selected == 'bt_vel': + if ( + len(self.checked_idx) + and meas.transects[self.checked_idx[0]].boat_vel.selected == "bt_vel" + ): # Boat velocity based on bottom track, moving-bed possible if len(meas.mb_tests) > 0: # Moving_bed tests recorded @@ -1025,7 +1541,7 @@ class Oursin(object): used = [] for test in meas.mb_tests: user_valid.append(test.user_valid) - if test.test_quality == 'Errors': + if test.test_quality == "Errors": quality.append(False) else: quality.append(True) @@ -1037,11 +1553,13 @@ class Oursin(object): # Check to see if the valid tests indicate a moving bed moving_bed_bool = [] for result in moving_bed: - if result == 'Yes': + if result == "Yes": moving_bed_bool.append(True) else: moving_bed_bool.append(False) - valid_moving_bed = np.logical_and(quality, np.asarray(moving_bed_bool)) + valid_moving_bed = np.logical_and( + quality, np.asarray(moving_bed_bool) + ) if np.any(valid_moving_bed): # Check to see that a correction was used if np.any(np.logical_and(valid_moving_bed, np.asarray(used))): @@ -1068,26 +1586,30 @@ class Oursin(object): self.u_movbed_list = [0.01 * moving_bed_uncertainty / 2] * self.nb_transects # Apply user specified - if np.isnan(self.user_specified_u['u_movbed_user']): + if np.isnan(self.user_specified_u["u_movbed_user"]): self.u_movbed_user_list = self.u_movbed_list else: - self.u_movbed_user_list = [self.user_specified_u['u_movbed_user'] * 0.01] * self.nb_transects + self.u_movbed_user_list = [ + self.user_specified_u["u_movbed_user"] * 0.01 + ] * self.nb_transects def uncertainty_system(self): - """Compute systematic uncertainty - """ + """Compute systematic uncertainty""" # Assume 1.31% systematic bias at 68% self.u_syst_list = [0.01 * 1.31] * self.nb_transects # Override with user specification if available - if np.isnan(self.user_specified_u['u_syst_mean_user']): + if np.isnan(self.user_specified_u["u_syst_mean_user"]): self.u_syst_mean_user_list = self.u_syst_list else: - self.u_syst_mean_user_list = [self.user_specified_u['u_syst_mean_user'] * 0.01] * self.nb_transects + self.u_syst_mean_user_list = [ + self.user_specified_u["u_syst_mean_user"] * 0.01 + ] * self.nb_transects def uncertainty_number_ensembles(self, meas): - """Computes the uncertainty due to the number of ensembles in a transect. + """Computes the uncertainty due to the number of ensembles in a + transect. Parameters ---------- @@ -1096,16 +1618,22 @@ class Oursin(object): """ for trans_id in self.checked_idx: - # Compute uncertainty due to limited number of ensembles (ISO 748; Le Coz et al., 2012) - self.u_ens_list.append(0.01 * 32 * len(meas.discharge[trans_id].middle_ens) ** (-0.88)) + # Compute uncertainty due to limited number of ensembles (ISO + # 748; Le Coz et al., 2012) + self.u_ens_list.append( + 0.01 * 32 * len(meas.discharge[trans_id].middle_ens) ** (-0.88) + ) - if np.isnan(self.user_specified_u['u_ens_user']): + if np.isnan(self.user_specified_u["u_ens_user"]): self.u_ens_user_list = self.u_ens_list else: - self.u_ens_user_list = [0.01 * self.user_specified_u['u_ens_user']] * self.nb_transects + self.u_ens_user_list = [ + 0.01 * self.user_specified_u["u_ens_user"] + ] * self.nb_transects def uncertainty_compass(self, meas): - """Compute the potential bias in the measurement due to dynamic compass errors when using GPS as + """Compute the potential bias in the measurement due to dynamic + compass errors when using GPS as the navigation reference. The method is based on Mueller (2018, https://doi.org/10.1016/j.flowmeasinst.2018.10.004, equation 41. @@ -1116,157 +1644,217 @@ class Oursin(object): """ # No compass error component for bottom track referenced discharges - if meas.transects[self.checked_idx[0]].boat_vel.selected == 'bt_vel': + if meas.transects[self.checked_idx[0]].boat_vel.selected == "bt_vel": self.u_compass_list = [0] * self.nb_transects else: # Assume a default compass error unless one is provided by the user - if np.isnan(self.user_advanced_settings['compass_error_deg_user']): - compass_error = self.default_advanced_settings['compass_error_deg'] + if np.isnan(self.user_advanced_settings["compass_error_deg_user"]): + compass_error = self.default_advanced_settings["compass_error_deg"] else: - compass_error = self.user_advanced_settings['compass_error_deg_user'] + compass_error = self.user_advanced_settings["compass_error_deg_user"] # Compute discharge bias based on compass error and boat speed meas_stats = meas.compute_measurement_properties(meas) - speed_ratio = meas_stats['avg_boat_speed'][self.checked_idx] / \ - meas_stats['avg_water_speed'][self.checked_idx] - self.u_compass_list = np.abs(1 - (cosd(compass_error) + 0.5 * speed_ratio * sind(compass_error))) + speed_ratio = ( + meas_stats["avg_boat_speed"][self.checked_idx] + / meas_stats["avg_water_speed"][self.checked_idx] + ) + self.u_compass_list = np.abs( + 1 - (cosd(compass_error) + 0.5 * speed_ratio * sind(compass_error)) + ) # Override if user provides uncertainty due to compass - if np.isnan(self.user_specified_u['u_compass_user']): + if np.isnan(self.user_specified_u["u_compass_user"]): self.u_compass_user_list = self.u_compass_list else: - self.u_compass_user_list = [self.user_specified_u['u_compass_user'] * 0.01] * self.nb_transects + self.u_compass_user_list = [ + self.user_specified_u["u_compass_user"] * 0.01 + ] * self.nb_transects def uncertainty_top_discharge(self): - """Computes the uncertainty in the top discharge using simulations and rectangular law. + """Computes the uncertainty in the top discharge using simulations + and rectangular law. """ - self.u_top_list = list(Oursin.apply_u_rect(list_sims=[self.sim_original, - self.sim_extrap_pp_opt, - self.sim_extrap_pp_min, - self.sim_extrap_pp_max, - self.sim_extrap_cns_opt, - self.sim_extrap_cns_min, - self.sim_extrap_cns_max, - self.sim_extrap_3pns_opt, - self.sim_draft_max, - self.sim_draft_min], - col_name='q_top') - / np.abs(self.sim_original['q_total'])) - - if np.isnan(self.user_specified_u['u_top_mean_user']): + self.u_top_list = list( + Oursin.apply_u_rect( + list_sims=[ + self.sim_original, + self.sim_extrap_pp_opt, + self.sim_extrap_pp_min, + self.sim_extrap_pp_max, + self.sim_extrap_cns_opt, + self.sim_extrap_cns_min, + self.sim_extrap_cns_max, + self.sim_extrap_3pns_opt, + self.sim_draft_max, + self.sim_draft_min, + ], + col_name="q_top", + ) + / np.abs(self.sim_original["q_total"]) + ) + + if np.isnan(self.user_specified_u["u_top_mean_user"]): self.u_top_mean_user_list = self.u_top_list else: - self.u_top_mean_user_list = [0.01 * self.user_specified_u['u_top_mean_user']] * self.nb_transects + self.u_top_mean_user_list = [ + 0.01 * self.user_specified_u["u_top_mean_user"] + ] * self.nb_transects def uncertainty_bottom_discharge(self): - """Computes uncertainty of bottom discharge using simulations and rectangular law. + """Computes uncertainty of bottom discharge using simulations and + rectangular law. """ - self.u_bot_list = list(Oursin.apply_u_rect(list_sims=[self.sim_original, - self.sim_extrap_pp_opt, - self.sim_extrap_pp_min, - self.sim_extrap_pp_max, - self.sim_extrap_cns_opt, - self.sim_extrap_cns_min, - self.sim_extrap_cns_max, - self.sim_extrap_3pns_opt], - col_name='q_bot') - / np.abs(self.sim_original['q_total'])) - - if np.isnan(self.user_specified_u['u_bot_mean_user']): + self.u_bot_list = list( + Oursin.apply_u_rect( + list_sims=[ + self.sim_original, + self.sim_extrap_pp_opt, + self.sim_extrap_pp_min, + self.sim_extrap_pp_max, + self.sim_extrap_cns_opt, + self.sim_extrap_cns_min, + self.sim_extrap_cns_max, + self.sim_extrap_3pns_opt, + ], + col_name="q_bot", + ) + / np.abs(self.sim_original["q_total"]) + ) + + if np.isnan(self.user_specified_u["u_bot_mean_user"]): self.u_bot_mean_user_list = self.u_bot_list else: - self.u_bot_mean_user_list = [0.01 * self.user_specified_u['u_bot_mean_user']] * self.nb_transects + self.u_bot_mean_user_list = [ + 0.01 * self.user_specified_u["u_bot_mean_user"] + ] * self.nb_transects def uncertainty_left_discharge(self): - """Computes the uncertianty of the left edge discharge using simulations and the rectangular law. + """Computes the uncertianty of the left edge discharge using + simulations and the rectangular law. """ - self.u_left_list = list(Oursin.apply_u_rect(list_sims=[self.sim_original['q_left'], - self.sim_edge_min, - self.sim_edge_max, - self.sim_draft_min, - self.sim_draft_max], - col_name='q_left') - / np.abs(self.sim_original['q_total'])) - - if np.isnan(self.user_specified_u['u_left_mean_user']): + self.u_left_list = list( + Oursin.apply_u_rect( + list_sims=[ + self.sim_original["q_left"], + self.sim_edge_min, + self.sim_edge_max, + self.sim_draft_min, + self.sim_draft_max, + ], + col_name="q_left", + ) + / np.abs(self.sim_original["q_total"]) + ) + + if np.isnan(self.user_specified_u["u_left_mean_user"]): self.u_left_mean_user_list = self.u_left_list else: - self.u_left_mean_user_list = [0.01 * self.user_specified_u['u_left_mean_user']] * self.nb_transects + self.u_left_mean_user_list = [ + 0.01 * self.user_specified_u["u_left_mean_user"] + ] * self.nb_transects def uncertainty_right_discharge(self): - """Computes the uncertainty of the right edge discharge using simulations and the rectangular law. + """Computes the uncertainty of the right edge discharge using + simulations and the rectangular law. """ - self.u_right_list = list(Oursin.apply_u_rect(list_sims=[self.sim_original['q_right'], - self.sim_edge_min, - self.sim_edge_max, - self.sim_draft_min, - self.sim_draft_max], - col_name='q_right') - / np.abs(self.sim_original['q_total'])) - - if np.isnan(self.user_specified_u['u_right_mean_user']): + self.u_right_list = list( + Oursin.apply_u_rect( + list_sims=[ + self.sim_original["q_right"], + self.sim_edge_min, + self.sim_edge_max, + self.sim_draft_min, + self.sim_draft_max, + ], + col_name="q_right", + ) + / np.abs(self.sim_original["q_total"]) + ) + + if np.isnan(self.user_specified_u["u_right_mean_user"]): self.u_right_mean_user_list = self.u_right_list else: - self.u_right_mean_user_list = [0.01 * self.user_specified_u['u_right_mean_user']] * self.nb_transects + self.u_right_mean_user_list = [ + 0.01 * self.user_specified_u["u_right_mean_user"] + ] * self.nb_transects def uncertainty_invalid_depth_data(self): - """Computes the uncertainty due to invalid depth data using simulations and the retangular law. + """Computes the uncertainty due to invalid depth data using + simulations and the rectangular law. """ - self.u_invalid_depth_list = list(Oursin.apply_u_rect(list_sims=[self.sim_original, - self.sim_depth_hold, - self.sim_depth_next], - col_name='q_total') - / np.abs(self.sim_original['q_total'])) + self.u_invalid_depth_list = list( + Oursin.apply_u_rect( + list_sims=[self.sim_original, self.sim_depth_hold, self.sim_depth_next], + col_name="q_total", + ) + / np.abs(self.sim_original["q_total"]) + ) - if np.isnan(self.user_specified_u['u_invalid_depth_user']): + if np.isnan(self.user_specified_u["u_invalid_depth_user"]): self.u_invalid_depth_user_list = self.u_invalid_depth_list else: - self.u_invalid_depth_user_list = [0.01 * self.user_specified_u[ - 'u_invalid_depth_user']] * self.nb_transects + self.u_invalid_depth_user_list = [ + 0.01 * self.user_specified_u["u_invalid_depth_user"] + ] * self.nb_transects def uncertainty_invalid_boat_data(self): - """Computes the uncertainty due to invalid boat data using simulations and the rectangular law. + """Computes the uncertainty due to invalid boat data using + simulations and the rectangular law. """ - self.u_invalid_boat_list = list(Oursin.apply_u_rect(list_sims=[self.sim_original, - self.sim_boat_hold, - self.sim_boat_next], - col_name='q_total') - / np.abs(self.sim_original['q_total'])) + self.u_invalid_boat_list = list( + Oursin.apply_u_rect( + list_sims=[self.sim_original, self.sim_boat_hold, self.sim_boat_next], + col_name="q_total", + ) + / np.abs(self.sim_original["q_total"]) + ) - if np.isnan(self.user_specified_u['u_invalid_boat_user']): + if np.isnan(self.user_specified_u["u_invalid_boat_user"]): self.u_invalid_boat_user_list = self.u_invalid_boat_list else: - self.u_invalid_boat_user_list = [0.01 * self.user_specified_u['u_invalid_boat_user']] * self.nb_transects + self.u_invalid_boat_user_list = [ + 0.01 * self.user_specified_u["u_invalid_boat_user"] + ] * self.nb_transects def uncertainty_invalid_water_data(self): - """Computes the uncertainty due to invalid water data assuming rectangular law. + """Computes the uncertainty due to invalid water data assuming + rectangular law. """ # Uncertainty due to invalid cells and ensembles - self.u_invalid_water_list = list(Oursin.apply_u_rect(list_sims=[self.sim_original, - self.sim_cells_trdi, - self.sim_cells_above, - self.sim_cells_below, - self.sim_cells_before, - self.sim_cells_after, - self.sim_shallow], - col_name='q_total') - / np.abs(self.sim_original['q_total'])) - - if np.isnan(self.user_specified_u['u_invalid_water_user']): + self.u_invalid_water_list = list( + Oursin.apply_u_rect( + list_sims=[ + self.sim_original, + self.sim_cells_trdi, + self.sim_cells_above, + self.sim_cells_below, + self.sim_cells_before, + self.sim_cells_after, + self.sim_shallow, + ], + col_name="q_total", + ) + / np.abs(self.sim_original["q_total"]) + ) + + if np.isnan(self.user_specified_u["u_invalid_water_user"]): self.u_invalid_water_user_list = self.u_invalid_water_list else: - self.u_invalid_water_user_list = [0.01 * self.user_specified_u['u_invalid_water_user']] \ - * self.nb_transects + self.u_invalid_water_user_list = [ + 0.01 * self.user_specified_u["u_invalid_water_user"] + ] * self.nb_transects - def compute_measurement_cov(self, meas, method='Bayes'): - """Compute the coefficient of variation of the total transect discharges used in the measurement. + def compute_measurement_cov(self, meas, method="Bayes"): + """Compute the coefficient of variation of the total transect + discharges used in the measurement. Parameters ---------- @@ -1278,7 +1866,7 @@ class Oursin(object): self.cov_68 = np.nan - if method == 'QRev': + if method == "QRev": # Only compute for multiple transects if self.nb_transects > 1: @@ -1291,26 +1879,32 @@ class Oursin(object): # Inflate the cov to the 95% value if len(total_q) == 2: - # Use the approximate method as taught in class to reduce the high coverage factor for 2 transects - # and account for prior knowledge related to 720 second duration analysis + # Use the approximate method as taught in class to + # reduce the high coverage factor for 2 transects + # and account for prior knowledge related to 720 second + # duration analysis cov_95 = cov * 3.3 self.cov_68 = cov_95 / 2 else: # Use Student's t to inflate COV for n > 2 - cov_95 = t.interval(0.95, len(total_q) - 1)[1] * cov / len(total_q) ** 0.5 + cov_95 = ( + t.interval(0.95, len(total_q) - 1)[1] + * cov + / len(total_q) ** 0.5 + ) self.cov_68 = cov_95 / 2 - elif method == 'Bayes': + elif method == "Bayes": # Set prior - if np.isnan(meas.oursin.user_advanced_settings['cov_prior_user']): - cov_prior = meas.oursin.default_advanced_settings['cov_prior'] + if np.isnan(meas.oursin.user_advanced_settings["cov_prior_user"]): + cov_prior = meas.oursin.default_advanced_settings["cov_prior"] else: - cov_prior = meas.oursin.user_advanced_settings['cov_prior_user'] + cov_prior = meas.oursin.user_advanced_settings["cov_prior_user"] - if np.isnan(meas.oursin.user_advanced_settings['cov_prior_u_user']): - cov_prior_u = meas.oursin.default_advanced_settings['cov_prior_u'] + if np.isnan(meas.oursin.user_advanced_settings["cov_prior_u_user"]): + cov_prior_u = meas.oursin.default_advanced_settings["cov_prior_u"] else: - cov_prior_u = meas.oursin.user_advanced_settings['cov_prior_u_user'] + cov_prior_u = meas.oursin.user_advanced_settings["cov_prior_u_user"] # Create list of observations transects_total_q = [] @@ -1318,7 +1912,9 @@ class Oursin(object): transects_total_q.append(meas.discharge[idx].total) # Compute COV - self.cov_68 = bayes_cov(np.array(transects_total_q), cov_prior, cov_prior_u, 20000) + self.cov_68 = bayes_cov( + np.array(transects_total_q), cov_prior, cov_prior_u, 20000 + ) def sim_orig(self, meas): """Stores original measurement results in a data frame @@ -1331,17 +1927,19 @@ class Oursin(object): self.sim_original = self.sim_original.iloc[0:0] transect_q = dict() for trans_id in self.checked_idx: - transect_q['q_total'] = meas.discharge[trans_id].total - transect_q['q_top'] = meas.discharge[trans_id].top - transect_q['q_bot'] = meas.discharge[trans_id].bottom - transect_q['q_right'] = meas.discharge[trans_id].right - transect_q['q_left'] = meas.discharge[trans_id].left - transect_q['q_middle'] = meas.discharge[trans_id].middle - self.sim_original = self.sim_original.append(transect_q, ignore_index=True, sort=False) + transect_q["q_total"] = meas.discharge[trans_id].total + transect_q["q_top"] = meas.discharge[trans_id].top + transect_q["q_bot"] = meas.discharge[trans_id].bottom + transect_q["q_right"] = meas.discharge[trans_id].right + transect_q["q_left"] = meas.discharge[trans_id].left + transect_q["q_middle"] = meas.discharge[trans_id].middle + self.sim_original = self.sim_original.append( + transect_q, ignore_index=True, sort=False + ) def sim_cns_min_max_opt(self, meas): - """Computes simulations resulting in the the min and max discharges for a constant no slip extrapolation - fit. + """Computes simulations resulting in the min and max discharges + for a constant no slip extrapolation fit. Parameters ---------- @@ -1350,49 +1948,84 @@ class Oursin(object): """ # Compute min-max no slip exponent - skip_ns_min_max, self.exp_ns_max, self.exp_ns_min = \ - self.compute_ns_max_min(meas=meas, - ns_exp=self.ns_exp, - exp_ns_min_user=self.user_advanced_settings['exp_ns_min_user'], - exp_ns_max_user=self.user_advanced_settings['exp_ns_max_user']) + skip_ns_min_max, self.exp_ns_max, self.exp_ns_min = self.compute_ns_max_min( + meas=meas, + ns_exp=self.ns_exp, + exp_ns_min_user=self.user_advanced_settings["exp_ns_min_user"], + exp_ns_max_user=self.user_advanced_settings["exp_ns_max_user"], + ) # Optimized - self.sim_extrap_cns_opt['q_total'] = meas.extrap_fit.q_sensitivity.q_cns_opt_list - self.sim_extrap_cns_opt['q_top'] = meas.extrap_fit.q_sensitivity.q_top_cns_opt_list - self.sim_extrap_cns_opt['q_bot'] = meas.extrap_fit.q_sensitivity.q_bot_cns_opt_list + self.sim_extrap_cns_opt[ + "q_total" + ] = meas.extrap_fit.q_sensitivity.q_cns_opt_list + self.sim_extrap_cns_opt[ + "q_top" + ] = meas.extrap_fit.q_sensitivity.q_top_cns_opt_list + self.sim_extrap_cns_opt[ + "q_bot" + ] = meas.extrap_fit.q_sensitivity.q_bot_cns_opt_list # Max min if skip_ns_min_max: # If cns not used both max and min are equal to the optimized value - self.sim_extrap_cns_min['q_total'] = meas.extrap_fit.q_sensitivity.q_cns_opt_list - self.sim_extrap_cns_min['q_top'] = meas.extrap_fit.q_sensitivity.q_top_cns_opt_list - self.sim_extrap_cns_min['q_bot'] = meas.extrap_fit.q_sensitivity.q_bot_cns_opt_list - self.sim_extrap_cns_max['q_total'] = meas.extrap_fit.q_sensitivity.q_cns_opt_list - self.sim_extrap_cns_max['q_top'] = meas.extrap_fit.q_sensitivity.q_top_cns_opt_list - self.sim_extrap_cns_max['q_bot'] = meas.extrap_fit.q_sensitivity.q_bot_cns_opt_list + self.sim_extrap_cns_min[ + "q_total" + ] = meas.extrap_fit.q_sensitivity.q_cns_opt_list + self.sim_extrap_cns_min[ + "q_top" + ] = meas.extrap_fit.q_sensitivity.q_top_cns_opt_list + self.sim_extrap_cns_min[ + "q_bot" + ] = meas.extrap_fit.q_sensitivity.q_bot_cns_opt_list + self.sim_extrap_cns_max[ + "q_total" + ] = meas.extrap_fit.q_sensitivity.q_cns_opt_list + self.sim_extrap_cns_max[ + "q_top" + ] = meas.extrap_fit.q_sensitivity.q_top_cns_opt_list + self.sim_extrap_cns_max[ + "q_bot" + ] = meas.extrap_fit.q_sensitivity.q_bot_cns_opt_list else: # Compute q for min and max values q = QComp() - self.sim_extrap_cns_min = pd.DataFrame(columns=self.sim_extrap_cns_min.columns) - self.sim_extrap_cns_max = pd.DataFrame(columns=self.sim_extrap_cns_max.columns) + self.sim_extrap_cns_min = pd.DataFrame( + columns=self.sim_extrap_cns_min.columns + ) + self.sim_extrap_cns_max = pd.DataFrame( + columns=self.sim_extrap_cns_max.columns + ) for trans_id in self.checked_idx: # Compute min values - q.populate_data(data_in=meas.transects[trans_id], - top_method='Constant', - bot_method='No Slip', - exponent=self.exp_ns_min) - self.sim_extrap_cns_min.loc[len(self.sim_extrap_cns_min)] = [q.total, q.top, q.bottom] + q.populate_data( + data_in=meas.transects[trans_id], + top_method="Constant", + bot_method="No Slip", + exponent=self.exp_ns_min, + ) + self.sim_extrap_cns_min.loc[len(self.sim_extrap_cns_min)] = [ + q.total, + q.top, + q.bottom, + ] # Compute max values - q.populate_data(data_in=meas.transects[trans_id], - top_method='Constant', - bot_method='No Slip', - exponent=self.exp_ns_max) - self.sim_extrap_cns_max.loc[len(self.sim_extrap_cns_max)] = [q.total, q.top, q.bottom] + q.populate_data( + data_in=meas.transects[trans_id], + top_method="Constant", + bot_method="No Slip", + exponent=self.exp_ns_max, + ) + self.sim_extrap_cns_max.loc[len(self.sim_extrap_cns_max)] = [ + q.total, + q.top, + q.bottom, + ] def sim_pp_min_max_opt(self, meas): - """Computes simulations resulting in the the min and max discharges for a power power extrapolation - fit. + """Computes simulations resulting in the the min and max discharges + for a power power extrapolation fit. Parameters ---------- @@ -1402,51 +2035,86 @@ class Oursin(object): # A power fit is not applicable to bi-directional flow mean_q = meas.mean_discharges(meas) - if np.sign(mean_q['top_mean']) != np.sign(mean_q['bot_mean']): - self.sim_extrap_pp_min = self.sim_original[['q_total', 'q_top', 'q_bot']] - self.sim_extrap_pp_max = self.sim_original[['q_total', 'q_top', 'q_bot']] - self.sim_extrap_pp_opt = self.sim_original[['q_total', 'q_top', 'q_bot']] + if np.sign(mean_q["top_mean"]) != np.sign(mean_q["bot_mean"]): + self.sim_extrap_pp_min = self.sim_original[["q_total", "q_top", "q_bot"]] + self.sim_extrap_pp_max = self.sim_original[["q_total", "q_top", "q_bot"]] + self.sim_extrap_pp_opt = self.sim_original[["q_total", "q_top", "q_bot"]] else: # Compute min-max power exponent - skip_pp_min_max, self.exp_pp_max, self.exp_pp_min = \ - self.compute_pp_max_min(meas=meas, - exp_95ic_min=self.exp_95ic_min, - exp_95ic_max=self.exp_95ic_max, - pp_exp=self.pp_exp, - exp_pp_min_user=self.user_advanced_settings['exp_pp_min_user'], - exp_pp_max_user=self.user_advanced_settings['exp_pp_max_user']) + skip_pp_min_max, self.exp_pp_max, self.exp_pp_min = self.compute_pp_max_min( + meas=meas, + exp_95ic_min=self.exp_95ic_min, + exp_95ic_max=self.exp_95ic_max, + pp_exp=self.pp_exp, + exp_pp_min_user=self.user_advanced_settings["exp_pp_min_user"], + exp_pp_max_user=self.user_advanced_settings["exp_pp_max_user"], + ) # Optimized - self.sim_extrap_pp_opt['q_total'] = meas.extrap_fit.q_sensitivity.q_pp_opt_list - self.sim_extrap_pp_opt['q_top'] = meas.extrap_fit.q_sensitivity.q_top_pp_opt_list - self.sim_extrap_pp_opt['q_bot'] = meas.extrap_fit.q_sensitivity.q_bot_pp_opt_list + self.sim_extrap_pp_opt[ + "q_total" + ] = meas.extrap_fit.q_sensitivity.q_pp_opt_list + self.sim_extrap_pp_opt[ + "q_top" + ] = meas.extrap_fit.q_sensitivity.q_top_pp_opt_list + self.sim_extrap_pp_opt[ + "q_bot" + ] = meas.extrap_fit.q_sensitivity.q_bot_pp_opt_list # Max min if skip_pp_min_max: - self.sim_extrap_pp_min['q_total'] = meas.extrap_fit.q_sensitivity.q_pp_opt_list - self.sim_extrap_pp_min['q_top'] = meas.extrap_fit.q_sensitivity.q_top_pp_opt_list - self.sim_extrap_pp_min['q_bot'] = meas.extrap_fit.q_sensitivity.q_bot_pp_opt_list - self.sim_extrap_pp_max['q_total'] = meas.extrap_fit.q_sensitivity.q_pp_opt_list - self.sim_extrap_pp_max['q_top'] = meas.extrap_fit.q_sensitivity.q_top_pp_opt_list - self.sim_extrap_pp_max['q_bot'] = meas.extrap_fit.q_sensitivity.q_bot_pp_opt_list + self.sim_extrap_pp_min[ + "q_total" + ] = meas.extrap_fit.q_sensitivity.q_pp_opt_list + self.sim_extrap_pp_min[ + "q_top" + ] = meas.extrap_fit.q_sensitivity.q_top_pp_opt_list + self.sim_extrap_pp_min[ + "q_bot" + ] = meas.extrap_fit.q_sensitivity.q_bot_pp_opt_list + self.sim_extrap_pp_max[ + "q_total" + ] = meas.extrap_fit.q_sensitivity.q_pp_opt_list + self.sim_extrap_pp_max[ + "q_top" + ] = meas.extrap_fit.q_sensitivity.q_top_pp_opt_list + self.sim_extrap_pp_max[ + "q_bot" + ] = meas.extrap_fit.q_sensitivity.q_bot_pp_opt_list else: q = QComp() - self.sim_extrap_pp_min = pd.DataFrame(columns=self.sim_extrap_pp_min.columns) - self.sim_extrap_pp_max = pd.DataFrame(columns=self.sim_extrap_pp_max.columns) + self.sim_extrap_pp_min = pd.DataFrame( + columns=self.sim_extrap_pp_min.columns + ) + self.sim_extrap_pp_max = pd.DataFrame( + columns=self.sim_extrap_pp_max.columns + ) for trans_id in self.checked_idx: - q.populate_data(data_in=meas.transects[trans_id], - top_method='Power', - bot_method='Power', - exponent=self.exp_pp_min) - self.sim_extrap_pp_min.loc[len(self.sim_extrap_pp_min)] = [q.total, q.top, q.bottom] - - q.populate_data(data_in=meas.transects[trans_id], - top_method='Power', - bot_method='Power', - exponent=self.exp_pp_max) - self.sim_extrap_pp_max.loc[len(self.sim_extrap_pp_max)] = [q.total, q.top, q.bottom] + q.populate_data( + data_in=meas.transects[trans_id], + top_method="Power", + bot_method="Power", + exponent=self.exp_pp_min, + ) + self.sim_extrap_pp_min.loc[len(self.sim_extrap_pp_min)] = [ + q.total, + q.top, + q.bottom, + ] + + q.populate_data( + data_in=meas.transects[trans_id], + top_method="Power", + bot_method="Power", + exponent=self.exp_pp_max, + ) + self.sim_extrap_pp_max.loc[len(self.sim_extrap_pp_max)] = [ + q.total, + q.top, + q.bottom, + ] def sim_edge_min_max(self, meas): """Computes simulations for the maximum and minimum edge discharges. @@ -1471,39 +2139,53 @@ class Oursin(object): # Process each checked transect for trans_id in self.checked_idx: # Compute max and min edge distances - max_left_dist, max_right_dist, min_left_dist, min_right_dist = \ - self.compute_edge_dist_max_min(transect=meas.transects[trans_id], - user_settings=self.user_advanced_settings, - default_settings=self.default_advanced_settings) + ( + max_left_dist, + max_right_dist, + min_left_dist, + min_right_dist, + ) = self.compute_edge_dist_max_min( + transect=meas.transects[trans_id], + user_settings=self.user_advanced_settings, + default_settings=self.default_advanced_settings, + ) # Compute edge minimum self.d_right_error_min.append(min_right_dist) self.d_left_error_min.append(min_left_dist) meas_temp.transects[trans_id].edges.left.distance_m = min_left_dist meas_temp.transects[trans_id].edges.right.distance_m = min_right_dist - meas_temp.transects[trans_id].edges.left.type = 'Triangular' - meas_temp.transects[trans_id].edges.right.type = 'Triangular' - meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id], - moving_bed_data=meas_temp.mb_tests) - self.sim_edge_min.loc[len(self.sim_edge_min)] = [meas_temp.discharge[trans_id].total, - meas_temp.discharge[trans_id].left, - meas_temp.discharge[trans_id].right] + meas_temp.transects[trans_id].edges.left.type = "Triangular" + meas_temp.transects[trans_id].edges.right.type = "Triangular" + meas_temp.discharge[trans_id].populate_data( + data_in=meas_temp.transects[trans_id], + moving_bed_data=meas_temp.mb_tests, + ) + self.sim_edge_min.loc[len(self.sim_edge_min)] = [ + meas_temp.discharge[trans_id].total, + meas_temp.discharge[trans_id].left, + meas_temp.discharge[trans_id].right, + ] # Compute edge maximum self.d_right_error_max.append(max_right_dist) self.d_left_error_max.append(max_left_dist) meas_temp.transects[trans_id].edges.left.distance_m = max_left_dist meas_temp.transects[trans_id].edges.right.distance_m = max_right_dist - meas_temp.transects[trans_id].edges.left.type = 'Rectangular' - meas_temp.transects[trans_id].edges.right.type = 'Rectangular' - meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id], - moving_bed_data=meas_temp.mb_tests) - self.sim_edge_max.loc[len(self.sim_edge_max)] = [meas_temp.discharge[trans_id].total, - meas_temp.discharge[trans_id].left, - meas_temp.discharge[trans_id].right] + meas_temp.transects[trans_id].edges.left.type = "Rectangular" + meas_temp.transects[trans_id].edges.right.type = "Rectangular" + meas_temp.discharge[trans_id].populate_data( + data_in=meas_temp.transects[trans_id], + moving_bed_data=meas_temp.mb_tests, + ) + self.sim_edge_max.loc[len(self.sim_edge_max)] = [ + meas_temp.discharge[trans_id].total, + meas_temp.discharge[trans_id].left, + meas_temp.discharge[trans_id].right, + ] def sim_draft_max_min(self, meas): - """Compute the simulations for the max and min draft errror. + """Compute the simulations for the max and min draft error. Parameters ---------- @@ -1521,30 +2203,40 @@ class Oursin(object): for trans_id in self.checked_idx: # Compute max and min draft - draft_max, draft_min, draft_error = \ - self.compute_draft_max_min(transect=meas.transects[trans_id], - draft_error_m_user=self.user_advanced_settings['draft_error_m_user']) + draft_max, draft_min, draft_error = self.compute_draft_max_min( + transect=meas.transects[trans_id], + draft_error_m_user=self.user_advanced_settings["draft_error_m_user"], + ) self.draft_error_list.append(draft_error) # Compute discharge for draft min meas_temp.transects[trans_id].change_draft(draft_min) - meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id], - moving_bed_data=meas_temp.mb_tests) - self.sim_draft_min.loc[len(self.sim_draft_min)] = [meas_temp.discharge[trans_id].total, - meas_temp.discharge[trans_id].top, - meas_temp.discharge[trans_id].left, - meas_temp.discharge[trans_id].right] + meas_temp.discharge[trans_id].populate_data( + data_in=meas_temp.transects[trans_id], + moving_bed_data=meas_temp.mb_tests, + ) + self.sim_draft_min.loc[len(self.sim_draft_min)] = [ + meas_temp.discharge[trans_id].total, + meas_temp.discharge[trans_id].top, + meas_temp.discharge[trans_id].left, + meas_temp.discharge[trans_id].right, + ] # Compute discharge for draft max meas_temp.transects[trans_id].change_draft(draft_max) - meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id], - moving_bed_data=meas_temp.mb_tests) - self.sim_draft_max.loc[len(self.sim_draft_max)] = [meas_temp.discharge[trans_id].total, - meas_temp.discharge[trans_id].top, - meas_temp.discharge[trans_id].left, - meas_temp.discharge[trans_id].right] + meas_temp.discharge[trans_id].populate_data( + data_in=meas_temp.transects[trans_id], + moving_bed_data=meas_temp.mb_tests, + ) + self.sim_draft_max.loc[len(self.sim_draft_max)] = [ + meas_temp.discharge[trans_id].total, + meas_temp.discharge[trans_id].top, + meas_temp.discharge[trans_id].left, + meas_temp.discharge[trans_id].right, + ] def sim_invalid_cells(self, meas): - """Computes simulations using different methods to interpolate for invalid cells and ensembles. + """Computes simulations using different methods to interpolate for + invalid cells and ensembles. Parameters ---------- @@ -1563,40 +2255,70 @@ class Oursin(object): meas_temp = copy.deepcopy(meas) for trans_id in self.checked_idx: # TRDI method - meas_temp.transects[trans_id].w_vel.interpolate_cells_trdi(meas_temp.transects[trans_id]) - meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id], - moving_bed_data=meas_temp.mb_tests) - self.sim_cells_trdi.loc[len(self.sim_cells_trdi)] = [meas_temp.discharge[trans_id].total, - meas_temp.discharge[trans_id].middle] + meas_temp.transects[trans_id].w_vel.interpolate_cells_trdi( + meas_temp.transects[trans_id] + ) + meas_temp.discharge[trans_id].populate_data( + data_in=meas_temp.transects[trans_id], + moving_bed_data=meas_temp.mb_tests, + ) + self.sim_cells_trdi.loc[len(self.sim_cells_trdi)] = [ + meas_temp.discharge[trans_id].total, + meas_temp.discharge[trans_id].middle, + ] # Above only - meas_temp.transects[trans_id].w_vel.interpolate_abba(meas_temp.transects[trans_id], search_loc=['above']) - meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id], - moving_bed_data=meas_temp.mb_tests) - self.sim_cells_above.loc[len(self.sim_cells_above)] = [meas_temp.discharge[trans_id].total, - meas_temp.discharge[trans_id].middle] + meas_temp.transects[trans_id].w_vel.interpolate_abba( + meas_temp.transects[trans_id], search_loc=["above"] + ) + meas_temp.discharge[trans_id].populate_data( + data_in=meas_temp.transects[trans_id], + moving_bed_data=meas_temp.mb_tests, + ) + self.sim_cells_above.loc[len(self.sim_cells_above)] = [ + meas_temp.discharge[trans_id].total, + meas_temp.discharge[trans_id].middle, + ] # Below only - meas_temp.transects[trans_id].w_vel.interpolate_abba(meas_temp.transects[trans_id], search_loc=['below']) - meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id], - moving_bed_data=meas_temp.mb_tests) - self.sim_cells_below.loc[len(self.sim_cells_below)] = [meas_temp.discharge[trans_id].total, - meas_temp.discharge[trans_id].middle] + meas_temp.transects[trans_id].w_vel.interpolate_abba( + meas_temp.transects[trans_id], search_loc=["below"] + ) + meas_temp.discharge[trans_id].populate_data( + data_in=meas_temp.transects[trans_id], + moving_bed_data=meas_temp.mb_tests, + ) + self.sim_cells_below.loc[len(self.sim_cells_below)] = [ + meas_temp.discharge[trans_id].total, + meas_temp.discharge[trans_id].middle, + ] # Before only - meas_temp.transects[trans_id].w_vel.interpolate_abba(meas_temp.transects[trans_id], search_loc=['before']) - meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id], - moving_bed_data=meas_temp.mb_tests) - self.sim_cells_before.loc[len(self.sim_cells_before)] = [meas_temp.discharge[trans_id].total, - meas_temp.discharge[trans_id].middle] + meas_temp.transects[trans_id].w_vel.interpolate_abba( + meas_temp.transects[trans_id], search_loc=["before"] + ) + meas_temp.discharge[trans_id].populate_data( + data_in=meas_temp.transects[trans_id], + moving_bed_data=meas_temp.mb_tests, + ) + self.sim_cells_before.loc[len(self.sim_cells_before)] = [ + meas_temp.discharge[trans_id].total, + meas_temp.discharge[trans_id].middle, + ] # After only - meas_temp.transects[trans_id].w_vel.interpolate_abba(meas_temp.transects[trans_id], search_loc=['after']) - meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id], - moving_bed_data=meas_temp.mb_tests) - self.sim_cells_after.loc[len(self.sim_cells_after)] = [meas_temp.discharge[trans_id].total, - meas_temp.discharge[trans_id].middle] + meas_temp.transects[trans_id].w_vel.interpolate_abba( + meas_temp.transects[trans_id], search_loc=["after"] + ) + meas_temp.discharge[trans_id].populate_data( + data_in=meas_temp.transects[trans_id], + moving_bed_data=meas_temp.mb_tests, + ) + self.sim_cells_after.loc[len(self.sim_cells_after)] = [ + meas_temp.discharge[trans_id].total, + meas_temp.discharge[trans_id].middle, + ] def sim_shallow_ens(self, meas): - """Computes simulations assuming no interpolation of discharge for ensembles where depths are too shallow - for any valid cells. + """Computes simulations assuming no interpolation of discharge for + ensembles where depths are too shallow for any valid cells. Parameters ---------- @@ -1608,17 +2330,23 @@ class Oursin(object): self.sim_shallow = pd.DataFrame(columns=self.sim_shallow.columns) for trans_id in self.checked_idx: - shallow_estimate = np.nansum(meas.discharge[trans_id].middle_ens) \ - - np.nansum(np.nansum(meas.discharge[trans_id].middle_cells)) + shallow_estimate = np.nansum( + meas.discharge[trans_id].middle_ens + ) - np.nansum(np.nansum(meas.discharge[trans_id].middle_cells)) if np.abs(shallow_estimate) > 0: - self.sim_shallow.loc[len(self.sim_shallow)] = [meas.discharge[trans_id].total - shallow_estimate, - meas.discharge[trans_id].middle - shallow_estimate] + self.sim_shallow.loc[len(self.sim_shallow)] = [ + meas.discharge[trans_id].total - shallow_estimate, + meas.discharge[trans_id].middle - shallow_estimate, + ] else: - self.sim_shallow.loc[len(self.sim_shallow)] = [meas.discharge[trans_id].total, - meas.discharge[trans_id].middle] + self.sim_shallow.loc[len(self.sim_shallow)] = [ + meas.discharge[trans_id].total, + meas.discharge[trans_id].middle, + ] def sim_invalid_depth(self, meas): - """Computes simulations using different methods to interpolate for invalid depths. + """Computes simulations using different methods to interpolate for + invalid depths. Parameters ---------- @@ -1633,24 +2361,40 @@ class Oursin(object): # Simulations for invalid depths meas_temp = copy.deepcopy(meas) for trans_id in self.checked_idx: - depths = getattr(meas_temp.transects[trans_id].depths, meas_temp.transects[trans_id].depths.selected) + depths = getattr( + meas_temp.transects[trans_id].depths, + meas_temp.transects[trans_id].depths.selected, + ) # Hold last depths.interpolate_hold_last() - meas_temp.transects[trans_id].w_vel.adjust_side_lobe(meas_temp.transects[trans_id]) - meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id], - moving_bed_data=meas_temp.mb_tests) - self.sim_depth_hold.loc[len(self.sim_depth_hold)] = [meas_temp.discharge[trans_id].total, - meas_temp.discharge[trans_id].middle] + meas_temp.transects[trans_id].w_vel.adjust_side_lobe( + meas_temp.transects[trans_id] + ) + meas_temp.discharge[trans_id].populate_data( + data_in=meas_temp.transects[trans_id], + moving_bed_data=meas_temp.mb_tests, + ) + self.sim_depth_hold.loc[len(self.sim_depth_hold)] = [ + meas_temp.discharge[trans_id].total, + meas_temp.discharge[trans_id].middle, + ] # Fill with next depths.interpolate_next() - meas_temp.transects[trans_id].w_vel.adjust_side_lobe(meas_temp.transects[trans_id]) - meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id], - moving_bed_data=meas_temp.mb_tests) - self.sim_depth_next.loc[len(self.sim_depth_next)] = [meas_temp.discharge[trans_id].total, - meas_temp.discharge[trans_id].middle] + meas_temp.transects[trans_id].w_vel.adjust_side_lobe( + meas_temp.transects[trans_id] + ) + meas_temp.discharge[trans_id].populate_data( + data_in=meas_temp.transects[trans_id], + moving_bed_data=meas_temp.mb_tests, + ) + self.sim_depth_next.loc[len(self.sim_depth_next)] = [ + meas_temp.discharge[trans_id].total, + meas_temp.discharge[trans_id].middle, + ] def sim_invalid_boat_velocity(self, meas): - """Computes simulations using different methods to interpolate for invalid boat velocity. + """Computes simulations using different methods to interpolate for + invalid boat velocity. Parameters ---------- @@ -1666,24 +2410,39 @@ class Oursin(object): meas_temp = copy.deepcopy(meas) for trans_id in self.checked_idx: # Hold last - boat_data = getattr(meas_temp.transects[trans_id].boat_vel, meas_temp.transects[trans_id].boat_vel.selected) + boat_data = getattr( + meas_temp.transects[trans_id].boat_vel, + meas_temp.transects[trans_id].boat_vel.selected, + ) if boat_data is not None: boat_data.interpolate_hold_last() - meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id], - moving_bed_data=meas_temp.mb_tests) - self.sim_boat_hold.loc[len(self.sim_boat_hold)] = [meas_temp.discharge[trans_id].total, - meas_temp.discharge[trans_id].middle] + meas_temp.discharge[trans_id].populate_data( + data_in=meas_temp.transects[trans_id], + moving_bed_data=meas_temp.mb_tests, + ) + self.sim_boat_hold.loc[len(self.sim_boat_hold)] = [ + meas_temp.discharge[trans_id].total, + meas_temp.discharge[trans_id].middle, + ] # Fill with next boat_data.interpolate_next() - meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id], - moving_bed_data=meas_temp.mb_tests) - self.sim_boat_next.loc[len(self.sim_boat_next)] = [meas_temp.discharge[trans_id].total, - meas_temp.discharge[trans_id].middle] + meas_temp.discharge[trans_id].populate_data( + data_in=meas_temp.transects[trans_id], + moving_bed_data=meas_temp.mb_tests, + ) + self.sim_boat_next.loc[len(self.sim_boat_next)] = [ + meas_temp.discharge[trans_id].total, + meas_temp.discharge[trans_id].middle, + ] else: - self.sim_boat_next.loc[len(self.sim_boat_next)] = [meas.discharge[trans_id].total, - meas_temp.discharge[trans_id].middle] - self.sim_boat_hold.loc[len(self.sim_boat_hold)] = [meas.discharge[trans_id].total, - meas_temp.discharge[trans_id].middle] + self.sim_boat_next.loc[len(self.sim_boat_next)] = [ + meas.discharge[trans_id].total, + meas_temp.discharge[trans_id].middle, + ] + self.sim_boat_hold.loc[len(self.sim_boat_hold)] = [ + meas.discharge[trans_id].total, + meas_temp.discharge[trans_id].middle, + ] @staticmethod def compute_draft_max_min(transect, draft_error_m_user=np.nan): @@ -1728,22 +2487,21 @@ class Oursin(object): @staticmethod def compute_edge_dist_max_min(transect, user_settings, default_settings): - """Compute the max and min edge distances. - """ + """Compute the max and min edge distances.""" init_dist_right = transect.edges.right.distance_m init_dist_left = transect.edges.left.distance_m # Select user percentage or default - if np.isnan(user_settings['right_edge_dist_prct_user']): - d_right_error_prct = default_settings['right_edge_dist_prct'] + if np.isnan(user_settings["right_edge_dist_prct_user"]): + d_right_error_prct = default_settings["right_edge_dist_prct"] else: - d_right_error_prct = user_settings['right_edge_dist_prct_user'] + d_right_error_prct = user_settings["right_edge_dist_prct_user"] - if np.isnan(user_settings['left_edge_dist_prct_user']): - d_left_error_prct = default_settings['left_edge_dist_prct'] + if np.isnan(user_settings["left_edge_dist_prct_user"]): + d_left_error_prct = default_settings["left_edge_dist_prct"] else: - d_left_error_prct = user_settings['left_edge_dist_prct_user'] + d_left_error_prct = user_settings["left_edge_dist_prct_user"] # Compute min distance for both edges min_left_dist = (1 - d_left_error_prct * 0.01) * init_dist_left @@ -1761,7 +2519,9 @@ class Oursin(object): return max_left_dist, max_right_dist, min_left_dist, min_right_dist @staticmethod - def compute_pp_max_min(meas, exp_95ic_min, exp_95ic_max, pp_exp, exp_pp_min_user, exp_pp_max_user): + def compute_pp_max_min( + meas, exp_95ic_min, exp_95ic_max, pp_exp, exp_pp_min_user, exp_pp_max_user + ): """Determine the max and min exponents for power fit. Parameters @@ -1769,9 +2529,11 @@ class Oursin(object): meas: MeasurementData Object of MeasurementData exp_95ic_min: list - Minimum power fit exponent from the 95% confidence interval for each transect + Minimum power fit exponent from the 95% confidence interval for + each transect exp_95ic_max: list - Maximum power fit exponent from the 95% confidence interval for each transect + Maximum power fit exponent from the 95% confidence interval for + each transect pp_exp: list Optimized power fit exponent for each transect exp_pp_min_user: float @@ -1799,8 +2561,8 @@ class Oursin(object): else: mean_pp = np.nanmean(pp_exp) - # If all transects have confidence intervals, use the mean of the confidence interval min/max - # Otherwise adjust average +/- 0.2 + # If all transects have confidence intervals, use the mean of + # the confidence interval min/max. Otherwise adjust average +/- 0.2 if np.isnan(exp_95ic_min).any(): min_pp = mean_pp - 0.2 else: @@ -1837,7 +2599,9 @@ class Oursin(object): return skip_pp_min_max, exp_pp_max, exp_pp_min @staticmethod - def compute_ns_max_min(meas, ns_exp, exp_ns_min_user=np.nan, exp_ns_max_user=np.nan): + def compute_ns_max_min( + meas, ns_exp, exp_ns_min_user=np.nan, exp_ns_max_user=np.nan + ): """Determine the max and min no slip exponents. Parameters @@ -1868,8 +2632,8 @@ class Oursin(object): else: mean_ns = np.nanmean(ns_exp) if len(ns_exp) == 1: - min_ns = ns_exp[0]-0.05 - max_ns = ns_exp[0]+0.05 + min_ns = ns_exp[0] - 0.05 + max_ns = ns_exp[0] + 0.05 else: min_ns = np.nanmin(ns_exp) max_ns = np.nanmax(ns_exp) @@ -1913,7 +2677,7 @@ class Oursin(object): ------- relative_error_depth: float Random depth error by ensemble - """ + """ d_ens = transect.depths.bt_depths.depth_processed_m depth_vv = transect.boat_vel.bt_vel.w_mps * transect.date_time.ens_duration_sec @@ -1923,8 +2687,9 @@ class Oursin(object): @staticmethod def water_std_by_error_velocity(transect): - """Compute the relative standard deviation of the water velocity using the fact that the error velocity is - scaled so that the standard deviation of the error velocity is the same as the standard deviation + """Compute the relative standard deviation of the water velocity + using the fact that the error velocity is scaled so that the + standard deviation of the error velocity is the same as the standard deviation of the horizontal water velocity. Parameters @@ -1941,11 +2706,14 @@ class Oursin(object): # Computer water speed u_water = transect.w_vel.u_processed_mps v_water = transect.w_vel.v_processed_mps - v_wa_cell_abs = np.sqrt(u_water ** 2 + v_water ** 2) + v_wa_cell_abs = np.sqrt(u_water**2 + v_water**2) + v_wa_cell_abs[np.round(v_wa_cell_abs, 3) == 0.000] = np.nan # Use only valid error velocity data d_vel_filtered = np.tile([np.nan], transect.w_vel.d_mps.shape) - d_vel_filtered[transect.w_vel.valid_data[0]] = transect.w_vel.d_mps[transect.w_vel.valid_data[0]] + d_vel_filtered[transect.w_vel.valid_data[0]] = transect.w_vel.d_mps[ + transect.w_vel.valid_data[0] + ] # Compute relative standard deviation of error velocity std_ev_wt = np.nanstd(d_vel_filtered) / np.abs(v_wa_cell_abs) @@ -1958,9 +2726,10 @@ class Oursin(object): @staticmethod def boat_std_by_error_velocity(transect): - """Compute the relative standard deviation of the boat velocity using the fact that the error velocity is - scaled so that the standard deviation of the error velocity is the same as the standard deviation - of the horizontal boat velocity. + """Compute the relative standard deviation of the boat velocity + using the fact that the error velocity is scaled so that the + standard deviation of the error velocity is the same as the + standard deviation of the horizontal boat velocity. Parameters ---------- @@ -1976,12 +2745,14 @@ class Oursin(object): # Compute boat speed u_boat = transect.boat_vel.bt_vel.u_processed_mps v_boat = transect.boat_vel.bt_vel.v_processed_mps - speed = np.sqrt(u_boat ** 2 + v_boat ** 2) + speed = np.sqrt(u_boat**2 + v_boat**2) + speed[np.round(speed, 3) == 0.000] = np.nan # Use only valid error velocity data d_vel_filtered = np.tile([np.nan], transect.boat_vel.bt_vel.d_mps.shape) - d_vel_filtered[transect.boat_vel.bt_vel.valid_data[0]] = \ - transect.boat_vel.bt_vel.d_mps[transect.boat_vel.bt_vel.valid_data[0]] + d_vel_filtered[ + transect.boat_vel.bt_vel.valid_data[0] + ] = transect.boat_vel.bt_vel.d_mps[transect.boat_vel.bt_vel.valid_data[0]] # Compute relative standard deviation of error velocity all_std_ev_bt = np.nanstd(d_vel_filtered) @@ -1994,7 +2765,8 @@ class Oursin(object): @staticmethod def apply_u_rect(list_sims, col_name): - """Compute the uncertainty using list of simulated discharges following a ranctangular law + """Compute the uncertainty using list of simulated discharges + following a ranctangular law Parameters ---------- @@ -2013,8 +2785,10 @@ class Oursin(object): vertical_stack = pd.concat(list_sims, axis=0, sort=True) # Apply rectangular law - u_rect = (vertical_stack.groupby(vertical_stack.index)[col_name].max() - - vertical_stack.groupby(vertical_stack.index)[col_name].min()) / (2 * (3 ** 0.5)) + u_rect = ( + vertical_stack.groupby(vertical_stack.index)[col_name].max() + - vertical_stack.groupby(vertical_stack.index)[col_name].min() + ) / (2 * (3**0.5)) return u_rect @@ -2022,15 +2796,16 @@ class Oursin(object): # ============ @staticmethod def bayes_cov(transects_total_q, cov_prior=0.03, cov_prior_u=0.2, nsim=20000): - """Computes the coefficient of variation using a Bayesian approach and an assumed posterior - log-normal distribution. + """Computes the coefficient of variation using a Bayesian approach + and an assumed posterior log-normal distribution. Parameters ---------- transects_total_q: list List of total discharge for each transect cov_prior: float - Expected COV (68%) based on prior knowledge. Assumed to be 3% by default. + Expected COV (68%) based on prior knowledge. Assumed to be 3% by + default. cov_prior_u: float Uncertainty (68%) of cov_prior. Assumed to be 20%. nsim: int @@ -2042,23 +2817,29 @@ class Oursin(object): Coefficient of variation """ - sav = Oursin.metropolis(theta0=[np.mean(transects_total_q), cov_prior], - obs_data=transects_total_q, - cov_prior=cov_prior, - cov_prior_u=cov_prior_u, - nsim=nsim, - theta_std=np.abs(np.array([np.mean(transects_total_q), cov_prior]))* cov_prior_u / np.sqrt(len(transects_total_q))) - + sav = Oursin.metropolis( + theta0=[np.mean(transects_total_q), cov_prior], + obs_data=transects_total_q, + cov_prior=cov_prior, + cov_prior_u=cov_prior_u, + nsim=nsim, + theta_std=np.abs(np.array([np.mean(transects_total_q), cov_prior])) + * cov_prior_u + / np.sqrt(len(transects_total_q)), + ) n_burn = int(nsim / 2) - cov = np.mean(sav['sam'][n_burn:nsim, 1]) + cov = np.mean(sav["sam"][n_burn:nsim, 1]) return cov @staticmethod - def metropolis(theta0, obs_data, cov_prior, cov_prior_u, nsim=1000, theta_std=np.nan): - """Implements the Metropolis_Hastings Markov chain Monte Carlo (MCMC) algorithm for sampling the + def metropolis( + theta0, obs_data, cov_prior, cov_prior_u, nsim=1000, theta_std=np.nan + ): + """Implements the Metropolis_Hastings Markov chain Monte Carlo ( + MCMC) algorithm for sampling the posterior distribution, assuming a log-normal posterior distribution. Parameters @@ -2074,7 +2855,8 @@ class Oursin(object): nsim: int Number of simulations. theta_std: float - Standard deviation for the gaussian Jump distribution. If blank a default value is computed. + Standard deviation for the gaussian Jump distribution. If blank + a default value is computed. Returns ------- @@ -2083,26 +2865,33 @@ class Oursin(object): sam: np.array(float) Matrix containing the MCMC samples obj_funk: np.array(float) - Vector containing the corresponding values of the objective function + Vector containing the corresponding values of the objective + function (i.e. of the unnormalized log-posterior) """ - + # Initialize npar = len(theta0) - sam = np.zeros((nsim + 1, npar)) - obj_funk = np.zeros((nsim + 1, 1)) + sam = np.zeros((nsim + 1, npar)) + obj_funk = np.zeros((nsim + 1, 1)) - # Parameters - used for automatic computation of starting stds of the Gaussian Jump distribution + # Parameters - used for automatic computation of starting stds of + # the Gaussian Jump distribution if np.any(np.isnan(theta_std)): std_factor = 0.1 theta_std = std_factor * np.abs(theta0) # Check if starting point is feasible - abandon otherwise - f_current = Oursin.log_post(param=theta0, measures=obs_data, cov_prior=cov_prior, cov_prior_u=cov_prior_u) + f_current = Oursin.log_post( + param=theta0, + measures=obs_data, + cov_prior=cov_prior, + cov_prior_u=cov_prior_u, + ) if not Oursin.is_feasible(f_current): - print('Metropolis:FATAL:unfeasible starting point') - w = {'sam': sam, 'obj_funk': obj_funk} + print("Metropolis:FATAL:unfeasible starting point") + w = {"sam": sam, "obj_funk": obj_funk} return w else: sam[0, :] = list(theta0) @@ -2117,13 +2906,17 @@ class Oursin(object): # Propose a new candidate candid = np.random.normal(loc=current, scale=theta_std) # Change for use in Numba - # candid[0] = np.random.normal(loc=current[0], scale=theta_std[0]) - # candid[1] = np.random.normal(loc=current[1], scale=theta_std[1]) + # candid[0] = np.random.normal(loc=current[0], + # scale=theta_std[0]) + # candid[1] = np.random.normal(loc=current[1], + # scale=theta_std[1]) # Evaluate objective function at candidate - f_candid = Oursin.log_post(param=candid, - measures=obs_data, - cov_prior=cov_prior, - cov_prior_u=cov_prior_u) + f_candid = Oursin.log_post( + param=candid, + measures=obs_data, + cov_prior=cov_prior, + cov_prior_u=cov_prior_u, + ) if not Oursin.is_feasible(f_candid): sam[i + 1, :] = current @@ -2135,7 +2928,8 @@ class Oursin(object): # Compute Metropolis acceptance ratio # Changed for use in Numba ratio = math.exp(min(max(-100, f_candid - f_current), 0)) - # ratio = np.exp(min(((np.max(np.hstack((float(-100), f_candid - f_current))), float(0))))) + # ratio = np.exp(min(((np.max(np.hstack((float(-100), + # f_candid - f_current))), float(0))))) # Apply acceptance rule if u <= ratio: @@ -2145,32 +2939,35 @@ class Oursin(object): sam[i + 1, :] = current obj_funk[i + 1] = f_current - w = {'sam': sam, 'obj_funk': obj_funk} + w = {"sam": sam, "obj_funk": obj_funk} return w @staticmethod def log_post(param, measures, cov_prior, cov_prior_u): - """Define function returning the posterior log-pdf using the model measures ~ N(true_value,cov*true_value), - with a flat prior on true_value and a log-normal prior for cov (= coefficient of variation) - + """Define function returning the posterior log-pdf using the model + measures ~ N(true_value,cov*true_value), + with a flat prior on true_value and a log-normal prior for cov (= + coefficient of variation) + Parameters ---------- param: np.array(float) Array containing the true value and COV - + measures: np.array(float) Array of observations cov_prior: float Expected COV (68%) based on prior knowledge. cov_prior_u: float Uncertainty (68%) of cov_prior. - + Returns ------- - logp: + logp: """ # Check if any parameter is <=0 - # since both true_value and cov have to be positive - otherwise sigma = true_value*cov does not make sense + # since both true_value and cov have to be positive - otherwise + # sigma = true_value*cov does not make sense if any(item <= 0 for item in param): return -math.inf @@ -2178,22 +2975,30 @@ class Oursin(object): cov = param[1] sigma = cov * true_value # standard deviation - # Compute log-likelihood under the model: measures ~ N(true_value,sigma) - # You can easily change this model (e.g. lognormal for a positive measurand?) + # Compute log-likelihood under the model: + # measures ~ N(true_value, sigma) + # You can easily change this model (e.g. lognormal for a positive + # measurand?) # OPTION 1 : the model follows a Normal distribution - log_likelihood = np.sum(scipy.stats.norm.logpdf(measures, loc=true_value, scale=sigma)) + log_likelihood = np.sum( + scipy.stats.norm.logpdf(measures, loc=true_value, scale=sigma) + ) # Change for Numba - # log_likelihood = np.sum(np.log(np.exp(-(((measures - true_value) / sigma) ** 2) / 2) + # log_likelihood = np.sum(np.log(np.exp(-(((measures - true_value) / + # sigma) ** 2) / 2) # / (np.sqrt(2 * np.pi) * sigma))) - # Prior on true_value - flat prior used here but you may change this if you have prior knowledge + # Prior on true_value - flat prior used here but you may change this + # if you have prior knowledge log_prior_1 = 0 # Lognormal prior x = cov mu = np.log(cov_prior) scale = cov_prior_u - pdf = np.exp(-(np.log(x) - mu) ** 2 / (2 * scale ** 2)) / (x * scale * np.sqrt(2 * np.pi)) + pdf = np.exp(-((np.log(x) - mu) ** 2) / (2 * scale**2)) / ( + x * scale * np.sqrt(2 * np.pi) + ) log_prior_2 = np.log(pdf) # Joint prior (prior independence) @@ -2202,17 +3007,18 @@ class Oursin(object): # Return (unnormalized) log-posterior logp = log_likelihood + log_prior if np.isnan(logp): - logp = -math.inf # returns -Inf rather than NaN's (required by the MCMC sampler used subsequently) + logp = -math.inf # returns -Inf rather than NaN's (required by + # the MCMC sampler used subsequently) return logp @staticmethod def is_feasible(value): """Checks that a value is a real value (not infinity or nan) - + Parameters ---------- value: float or int - + Returns ------- bool @@ -2226,10 +3032,11 @@ class Oursin(object): # --------------------------------------------------- @staticmethod def hh_random_meas(meas): - """Implements the semi-empirical method for computing the random uncertainty of an ADCP discharge transect, - as presented in Hening Huang (2018) Estimating uncertainty of streamflow measurements with - moving-boat acoustic Doppler current profilers, Hydrological Sciences Journal, 63:3, 353-368, - DOI:10.1080/02626667.2018.1433833 + """Implements the semi-empirical method for computing the random + uncertainty of an ADCP discharge transect, as presented in + Hening Huang (2018) Estimating uncertainty of streamflow measurements with + moving-boat acoustic Doppler current profilers, Hydrological + Sciences Journal, 63:3, 353-368, DOI:10.1080/02626667.2018.1433833 Parameters ---------- @@ -2252,26 +3059,30 @@ class Oursin(object): r1_numerator = [] r1_denominator = [] for n in range(len(q_i) - 1): - r1_numerator.append((q_i[n] - q_bar) * (q_i[n+1] - q_bar)) - r1_denominator.append((q_i[n] - q_bar)**2) - r1_denominator.append((q_i[-1] - q_bar)**2) + r1_numerator.append((q_i[n] - q_bar) * (q_i[n + 1] - q_bar)) + r1_denominator.append((q_i[n] - q_bar) ** 2) + r1_denominator.append((q_i[-1] - q_bar) ** 2) r1 = np.nansum(r1_numerator) / np.nansum(r1_denominator) # Compute g(r1) - g_r1 = 0.6 + (0.1 * np.exp(r1)) + (0.01 * (1 - np.exp((r1**0.6)-1)**-0.08)) + g_r1 = ( + 0.6 + + (0.1 * np.exp(r1)) + + (0.01 * (1 - np.exp((r1**0.6) - 1) ** -0.08)) + ) if g_r1 < 1: g_r1 = 1.0 # Compute (delta qi)**2 - alpha = 1. / 3. + alpha = 1.0 / 3.0 c1 = 0.5 * (1 - alpha) delta_list = [] for n in range(1, len(q_i) - 1): q_i_hat = c1 * q_i[n - 1] + alpha * q_i[n] + c1 * q_i[n + 1] - delta_list.append((q_i[n] - q_i_hat)**2) + delta_list.append((q_i[n] - q_i_hat) ** 2) # Compute unbiased residual sum of squares - urss = ((2. / 3.) * (1 / (1 - alpha))**2) * np.nansum(delta_list) + urss = ((2.0 / 3.0) * (1 / (1 - alpha)) ** 2) * np.nansum(delta_list) # Compute random uncertainty random_u.append(g_r1 * np.sqrt(urss) / q_m) diff --git a/Classes/Pd0TRDI.py b/Classes/Pd0TRDI.py index 05bb1b9..dd149c0 100644 --- a/Classes/Pd0TRDI.py +++ b/Classes/Pd0TRDI.py @@ -35,7 +35,7 @@ class Pd0TRDI(object): Nmea: Nmea Object of Nmea to hold Nmea data """ - + def __init__(self, file_name): """Constructor initializing instance variables. @@ -44,7 +44,7 @@ class Pd0TRDI(object): file_name: str Full name including path of pd0 file to be read """ - + self.file_name = file_name self.Hdr = None self.Inst = None @@ -57,10 +57,12 @@ class Pd0TRDI(object): self.Surface = None self.AutoMode = None self.Nmea = None - + self.pd0_read(file_name) - - def create_objects(self, n_ensembles, n_types, n_bins, max_surface_bins, n_velocities, wr2=False): + + def create_objects( + self, n_ensembles, n_types, n_bins, max_surface_bins, n_velocities, wr2=False + ): """Create objects for instance variables. Parameters @@ -92,7 +94,8 @@ class Pd0TRDI(object): self.Nmea = Nmea(n_ensembles) def pd0_read(self, fullname, wr2=False): - """Reads the binary pd0 file and assigns values to object instance variables. + """Reads the binary pd0 file and assigns values to object instance + variables. Parameters ---------- @@ -112,35 +115,38 @@ class Pd0TRDI(object): if file_info > 0: # Open file for processing - with open(fullname, 'rb') as f: + with open(fullname, "rb") as f: # Read leader ID leader_id = hex(np.fromfile(f, np.uint16, count=1)[0]) # Leader ID 7f7f marks beginning of ensemble - if leader_id != '0x7f7f': - while leader_id != '0x7f7f': + if leader_id != "0x7f7f": + while leader_id != "0x7f7f": f.seek(-1, 1) leader_id = hex(np.fromfile(f, np.uint16, count=1)[0]) # Read header information - initial_pos = f.tell()-2 + initial_pos = f.tell() - 2 bytes_per_ens = np.fromfile(f, dtype=np.uint16, count=1)[0] f.seek(1, 1) n_types = np.fromfile(f, np.uint8, count=1)[0] offset = np.fromfile(f, np.uint16, count=1)[0] - f.seek(initial_pos+offset+8, 0) + f.seek(initial_pos + offset + 8, 0) n_beams = np.fromfile(f, np.uint8, count=1)[0] n_bins = np.fromfile(f, np.uint8, count=1)[0] - # Determine number of ensembles in the file to allow pre-allocation of arrays + # Determine number of ensembles in the file to allow + # pre-allocation of arrays n_ensembles = Pd0TRDI.number_of_ensembles(f, file_info) # Create objects and pre-allocate arrays - self.create_objects(n_ensembles=n_ensembles, - n_types=n_types, - n_bins=n_bins, - max_surface_bins=max_surface_bins, - n_velocities=n_velocities) + self.create_objects( + n_ensembles=n_ensembles, + n_types=n_types, + n_bins=n_bins, + max_surface_bins=max_surface_bins, + n_velocities=n_velocities, + ) # Initialize counters and variables i_ens = -1 @@ -161,17 +167,20 @@ class Pd0TRDI(object): # Read leader ID leader_id = hex(np.fromfile(f, np.uint16, count=1)[0]) - if i_data_types >= n_data_types and leader_id != '0x7f7f': - leader_id = '0x9999' + if i_data_types >= n_data_types and leader_id != "0x7f7f": + leader_id = "0x9999" # 7f7f marks the beginning of an ensemble - if leader_id == '0x7f7f': + if leader_id == "0x7f7f": i2022 = 0 file_loc = f.tell() - 2 # Check for last ensemble in file - if file_loc+bytes_per_ens > end_file and i_ens >= n_ensembles: - end_file_check = end_file+1 + if ( + file_loc + bytes_per_ens > end_file + and i_ens >= n_ensembles + ): + end_file_check = end_file + 1 else: # Process ensemble @@ -181,24 +190,33 @@ class Pd0TRDI(object): # Check check_sum if self.check_sum(f, file_loc, bytes_per_ens): - f.seek(file_loc+5, 0) + f.seek(file_loc + 5, 0) n_data_types = np.fromfile(f, np.uint8, count=1)[0] - data_offsets = np.fromfile(f, np.uint16, count=n_data_types) + data_offsets = np.fromfile( + f, np.uint16, count=n_data_types + ) # Find variable leader ID - while i_data_types+1 <= n_data_types and leader_id != '0x80': - f.seek(data_offsets[i_data_types]+file_loc, 0) - leader_id = hex(np.fromfile(f, np.uint16, count=1)[0]) + while ( + i_data_types + 1 <= n_data_types + and leader_id != "0x80" + ): + f.seek(data_offsets[i_data_types] + file_loc, 0) + leader_id = hex( + np.fromfile(f, np.uint16, count=1)[0] + ) i_data_types += 1 # Check for consecutive ensemble numbers - if i_ens > -1 and leader_id == '0x80': + if i_ens > -1 and leader_id == "0x80": ens_num = np.fromfile(f, np.uint16, count=1)[0] ens_num_diff = ens_num - self.Sensor.num[i_ens] if ens_num_diff > 1: - for nn in range(0, int(ens_num_diff-1)): + for nn in range(0, int(ens_num_diff - 1)): if i_ens < n_ensembles: - self.Sensor.num[i_ens] = self.Sensor.num[i_ens-1]+1 + self.Sensor.num[i_ens] = ( + self.Sensor.num[i_ens - 1] + 1 + ) i_ens += 1 elif ens_num_diff < 1: i_ens -= 1 @@ -212,38 +230,57 @@ class Pd0TRDI(object): i_ens += 1 # Read bytes in this ensemble - self.Hdr.bytes_per_ens[i_ens] = np.fromfile(f, np.uint16, count=1)[0] + self.Hdr.bytes_per_ens[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] # If checksum is valid read header data - if self.check_sum(f, file_loc, int(self.Hdr.bytes_per_ens[i_ens])): + if self.check_sum( + f, file_loc, int(self.Hdr.bytes_per_ens[i_ens]) + ): # Read number of data types - f.seek(file_loc+5, 0) - self.Hdr.n_data_types[i_ens] = np.fromfile(f, np.uint8, count=1)[0] + f.seek(file_loc + 5, 0) + self.Hdr.n_data_types[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] # Read data offsets - test = np.fromfile(f, np.uint16, count=int(self.Hdr.n_data_types[i_ens])) + test = np.fromfile( + f, + np.uint16, + count=int(self.Hdr.n_data_types[i_ens]), + ) if test.shape[0] > self.Hdr.data_offsets.shape[1]: - self.Hdr.data_offsets = np.resize(self.Hdr.data_offsets, - (n_ensembles, test.shape[0])) - self.Hdr.data_offsets[i_ens, 0:int(self.Hdr.n_data_types[i_ens])] = \ - test[0:int(self.Hdr.n_data_types[i_ens])] + self.Hdr.data_offsets = np.resize( + self.Hdr.data_offsets, + (n_ensembles, test.shape[0]), + ) + self.Hdr.data_offsets[ + i_ens, 0 : int(self.Hdr.n_data_types[i_ens]) + ] = test[0 : int(self.Hdr.n_data_types[i_ens])] # Check for end of data types - self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens) + self.end_reading( + f, file_loc, i_data_types, i_ens, bytes_per_ens + ) else: self.bad_check_sum(f, file_loc) i_data_types = -1 # Read binary fixed leader data - elif leader_id == '0x0': + elif leader_id == "0x0": # Update data types counter i_data_types += 1 # Read and decode firmware version - self.Inst.firm_ver[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.Inst.firm_ver[i_ens] = self.Inst.firm_ver[i_ens] + \ - np.fromfile(f, np.uint8, count=1)[0] / 100 + self.Inst.firm_ver[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.Inst.firm_ver[i_ens] = ( + self.Inst.firm_ver[i_ens] + + np.fromfile(f, np.uint8, count=1)[0] / 100 + ) # Read and decode instrument characteristics bitls = np.fromfile(f, np.uint8, count=1)[0] @@ -267,29 +304,29 @@ class Pd0TRDI(object): val = int(bitls[4], 2) if val == 0: - self.Inst.pat[i_ens] = 'Concave' + self.Inst.pat[i_ens] = "Concave" elif val == 1: - self.Inst.pat[i_ens] = 'Convex' + self.Inst.pat[i_ens] = "Convex" else: - self.Inst.pat[i_ens] = 'n/a' + self.Inst.pat[i_ens] = "n/a" self.Inst.sensor_CFG[i_ens] = int(bitls[2:3], 2) + 1 val = int(bitls[1], 2) if val == 0: - self.Inst.xducer[i_ens] = 'Not Attached' + self.Inst.xducer[i_ens] = "Not Attached" elif val == 1: - self.Inst.xducer[i_ens] = 'Attached' + self.Inst.xducer[i_ens] = "Attached" else: - self.Inst.xducer[i_ens] = 'n/a' + self.Inst.xducer[i_ens] = "n/a" val = int(bitls[0], 2) if val == 0: - self.Sensor.orient[i_ens] = 'Down' + self.Sensor.orient[i_ens] = "Down" elif val == 1: - self.Sensor.orient[i_ens] = 'Up' + self.Sensor.orient[i_ens] = "Up" else: - self.Sensor.orient[i_ens] = 'n/a' + self.Sensor.orient[i_ens] = "n/a" bitms = np.fromfile(f, np.uint8, count=1)[0] bitms = "{0:08b}".format(bitms) @@ -321,259 +358,425 @@ class Pd0TRDI(object): val = np.fromfile(f, np.uint8, count=1)[0] if val == 0: - self.Inst.data_type[i_ens] = 'Real' + self.Inst.data_type[i_ens] = "Real" else: - self.Inst.data_type[i_ens] = 'Simu' + self.Inst.data_type[i_ens] = "Simu" - # Position file pointer and read configuration information + # Position file pointer and read configuration + # information f.seek(1, 1) - self.Cfg.n_beams[i_ens] = np.fromfile(f, np.uint8, count=1)[0] + self.Cfg.n_beams[i_ens] = np.fromfile(f, np.uint8, count=1)[ + 0 + ] self.Cfg.wn[i_ens] = np.fromfile(f, np.uint8, count=1)[0] self.Cfg.wp[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.Cfg.ws_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.Cfg.wf_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0] + self.Cfg.ws_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[ + 0 + ] + self.Cfg.wf_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[ + 0 + ] self.Cfg.wm[i_ens] = np.fromfile(f, np.uint8, count=1)[0] self.Cfg.wc[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.Cfg.code_reps[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.Cfg.wg_per[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.Cfg.we_mmps[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.Cfg.tp_sec[i_ens] = np.sum(np.fromfile(f, np.uint8, count=3) * np.array([60, 1, 0.01])) + self.Cfg.code_reps[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.Cfg.wg_per[i_ens] = np.fromfile(f, np.uint8, count=1)[ + 0 + ] + self.Cfg.we_mmps[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + self.Cfg.tp_sec[i_ens] = np.sum( + np.fromfile(f, np.uint8, count=3) + * np.array([60, 1, 0.01]) + ) self.Cfg.ex[i_ens] = "{0:08b}".format(ord(f.read(1))) val = int(self.Cfg.ex[i_ens][3:5], 2) if val == 0: - self.Cfg.coord_sys[i_ens] = 'Beam' + self.Cfg.coord_sys[i_ens] = "Beam" elif val == 1: - self.Cfg.coord_sys[i_ens] = 'Inst' + self.Cfg.coord_sys[i_ens] = "Inst" elif val == 2: - self.Cfg.coord_sys[i_ens] = 'Ship' + self.Cfg.coord_sys[i_ens] = "Ship" elif val == 3: - self.Cfg.coord_sys[i_ens] = 'Earth' + self.Cfg.coord_sys[i_ens] = "Earth" else: self.Cfg.coord_sys[i_ens] = "N/a" val = int(self.Cfg.ex[i_ens][5], 2) if val == 0: - self.Cfg.use_pr = 'No' + self.Cfg.use_pr = "No" elif val == 1: - self.Cfg.use_pr = 'Yes' + self.Cfg.use_pr = "Yes" else: - self.Cfg.use_pr = 'N/a' + self.Cfg.use_pr = "N/a" val = int(self.Cfg.ex[i_ens][6], 2) if val == 0: - self.Cfg.use_3beam = 'No' + self.Cfg.use_3beam = "No" elif val == 1: - self.Cfg.use_3beam = 'Yes' + self.Cfg.use_3beam = "Yes" else: - self.Cfg.use_3beam = 'N/a' + self.Cfg.use_3beam = "N/a" val = int(self.Cfg.ex[i_ens][7], 2) if val == 0: - self.Cfg.map_bins = 'No' + self.Cfg.map_bins = "No" elif val == 1: - self.Cfg.map_bins = 'Yes' + self.Cfg.map_bins = "Yes" else: - self.Cfg.map_bins = 'N/a' - - self.Cfg.ea_deg[i_ens] = np.fromfile(f, np.int16, count=1)[0] * 0.01 - self.Cfg.eb_deg[i_ens] = np.fromfile(f, np.uint16, count=1)[0] * 0.01 - self.Cfg.ez[i_ens] = "{0:08b}".format(np.fromfile(f, np.uint8, count=1)[0]) + self.Cfg.map_bins = "N/a" + + self.Cfg.ea_deg[i_ens] = ( + np.fromfile(f, np.int16, count=1)[0] * 0.01 + ) + self.Cfg.eb_deg[i_ens] = ( + np.fromfile(f, np.uint16, count=1)[0] * 0.01 + ) + self.Cfg.ez[i_ens] = "{0:08b}".format( + np.fromfile(f, np.uint8, count=1)[0] + ) val = int(self.Cfg.ez[i_ens][:2], 2) if val == 0: - self.Cfg.sos_src[i_ens] = 'Manual EC' + self.Cfg.sos_src[i_ens] = "Manual EC" elif val == 1: - self.Cfg.sos_src[i_ens] = 'Calculated' + self.Cfg.sos_src[i_ens] = "Calculated" elif val == 3: - self.Cfg.sos_src[i_ens] = 'SVSS Sensor' + self.Cfg.sos_src[i_ens] = "SVSS Sensor" else: - self.Cfg.sos_src[i_ens] = 'N/a' + self.Cfg.sos_src[i_ens] = "N/a" val = int(self.Cfg.ez[i_ens][2], 2) if val == 0: - self.Cfg.xdcr_dep_srs[i_ens] = 'Manual ED' + self.Cfg.xdcr_dep_srs[i_ens] = "Manual ED" if val == 1: - self.Cfg.xdcr_dep_srs[i_ens] = 'Sensor' + self.Cfg.xdcr_dep_srs[i_ens] = "Sensor" else: - self.Cfg.xdcr_dep_srs[i_ens] = 'N/a' + self.Cfg.xdcr_dep_srs[i_ens] = "N/a" val = int(self.Cfg.ez[i_ens][3], 2) if val == 0: - self.Cfg.head_src[i_ens] = 'Manual EH' + self.Cfg.head_src[i_ens] = "Manual EH" if val == 1: - self.Cfg.head_src[i_ens] = 'Int. Sensor' + self.Cfg.head_src[i_ens] = "Int. Sensor" else: - self.Cfg.head_src[i_ens] = 'N/a' + self.Cfg.head_src[i_ens] = "N/a" val = int(self.Cfg.ez[i_ens][4], 2) if val == 0: - self.Cfg.pitch_src[i_ens] = 'Manual EP' + self.Cfg.pitch_src[i_ens] = "Manual EP" if val == 1: - self.Cfg.pitch_src[i_ens] = 'Int. Sensor' + self.Cfg.pitch_src[i_ens] = "Int. Sensor" else: - self.Cfg.pitch_src[i_ens] = 'N/a' + self.Cfg.pitch_src[i_ens] = "N/a" val = int(self.Cfg.ez[i_ens][5], 2) if val == 0: - self.Cfg.roll_src[i_ens] = 'Manual ER' + self.Cfg.roll_src[i_ens] = "Manual ER" if val == 1: - self.Cfg.roll_src[i_ens] = 'Int. Sensor' + self.Cfg.roll_src[i_ens] = "Int. Sensor" else: - self.Cfg.roll_src[i_ens] = 'N/a' + self.Cfg.roll_src[i_ens] = "N/a" val = int(self.Cfg.ez[i_ens][6], 2) if val == 0: - self.Cfg.xdcr_dep_srs[i_ens] = 'Manual ES' + self.Cfg.xdcr_dep_srs[i_ens] = "Manual ES" if val == 1: - self.Cfg.xdcr_dep_srs[i_ens] = 'Int. Sensor' + self.Cfg.xdcr_dep_srs[i_ens] = "Int. Sensor" else: - self.Cfg.xdcr_dep_srs[i_ens] = 'N/a' + self.Cfg.xdcr_dep_srs[i_ens] = "N/a" val = int(self.Cfg.ez[i_ens][7], 2) if val == 0: - self.Cfg.temp_src[i_ens] = 'Manual ET' + self.Cfg.temp_src[i_ens] = "Manual ET" if val == 1: - self.Cfg.temp_src[i_ens] = 'Int. Sensor' + self.Cfg.temp_src[i_ens] = "Int. Sensor" else: - self.Cfg.temp_src[i_ens] = 'N/a' - - self.Cfg.sensor_avail[i_ens] = "{0:08b}".format(np.fromfile(f, np.uint8, count=1)[0]) - self.Cfg.dist_bin1_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.Cfg.xmit_pulse_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.Cfg.ref_lay_str_cell[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.Cfg.ref_lay_end_cell[i_ens] = np.fromfile(f, np.uint8, count=1)[0] + self.Cfg.temp_src[i_ens] = "N/a" + + self.Cfg.sensor_avail[i_ens] = "{0:08b}".format( + np.fromfile(f, np.uint8, count=1)[0] + ) + self.Cfg.dist_bin1_cm[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + self.Cfg.xmit_pulse_cm[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + self.Cfg.ref_lay_str_cell[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.Cfg.ref_lay_end_cell[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] self.Cfg.wa[i_ens] = np.fromfile(f, np.uint8, count=1)[0] self.Cfg.cx[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.Cfg.lag_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.Cfg.cpu_ser_no[i_ens] = np.fromfile(f, np.uint8, count=1)[0] + self.Cfg.lag_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[ + 0 + ] + self.Cfg.cpu_ser_no[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] self.Cfg.wb[i_ens] = np.fromfile(f, np.uint8, count=1)[0] self.Cfg.cq[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - # Check if more data types need to be read and position the pointer - self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens) + # Check if more data types need to be read and + # position the pointer + self.end_reading( + f, file_loc, i_data_types, i_ens, bytes_per_ens + ) # Read variable leader data - elif leader_id == '0x80': + elif leader_id == "0x80": # Update the data types counter i_data_types += 1 # Read instrument clock and sensor data - self.Sensor.num[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.Sensor.date_not_y2k[i_ens, :] = np.fromfile(f, np.uint8, count=3) - self.Sensor.time[i_ens, :] = np.fromfile(f, np.uint8, count=4) - self.Sensor.num_fact[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.Sensor.num_tot[i_ens] = self.Sensor.num[i_ens] + self.Sensor.num_fact[i_ens]*65535 - self.Sensor.bit_test[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.Sensor.sos_mps[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.Sensor.xdcr_depth_dm[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.Sensor.heading_deg[i_ens] = np.fromfile(f, np.uint16, count=1)[0] / 100. - self.Sensor.pitch_deg[i_ens] = np.fromfile(f, np.int16, count=1)[0] / 100. - self.Sensor.roll_deg[i_ens] = np.fromfile(f, np.int16, count=1)[0] / 100. - self.Sensor.salinity_ppt[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.Sensor.temperature_deg_c[i_ens] = np.fromfile(f, np.int16, count=1)[0] / 100. - self.Sensor.mpt_msc[i_ens, :] = np.fromfile(f, np.uint8, count=3) - self.Sensor.heading_std_dev_deg[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.Sensor.pitch_std_dev_deg[i_ens] = np.fromfile(f, np.uint8, count=1)[0] / 10. - self.Sensor.roll_std_dev_deg[i_ens] = np.fromfile(f, np.uint8, count=1) / 10. - self.Sensor.xmit_current[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.Sensor.xmit_voltage[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.Sensor.ambient_temp[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.Sensor.pressure_pos[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.Sensor.pressure_neg[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.Sensor.attitude_temp[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.Sensor.attitude[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.Sensor.contam_sensor[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.Sensor.error_status_word[i_ens] = ["{0:08b}".format(x) - for x in np.fromfile(f, np.uint8, count=4)] + self.Sensor.num[i_ens] = np.fromfile(f, np.uint16, count=1)[ + 0 + ] + self.Sensor.date_not_y2k[i_ens, :] = np.fromfile( + f, np.uint8, count=3 + ) + self.Sensor.time[i_ens, :] = np.fromfile( + f, np.uint8, count=4 + ) + self.Sensor.num_fact[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.Sensor.num_tot[i_ens] = ( + self.Sensor.num[i_ens] + + self.Sensor.num_fact[i_ens] * 65535 + ) + self.Sensor.bit_test[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + self.Sensor.sos_mps[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + self.Sensor.xdcr_depth_dm[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + self.Sensor.heading_deg[i_ens] = ( + np.fromfile(f, np.uint16, count=1)[0] / 100.0 + ) + self.Sensor.pitch_deg[i_ens] = ( + np.fromfile(f, np.int16, count=1)[0] / 100.0 + ) + self.Sensor.roll_deg[i_ens] = ( + np.fromfile(f, np.int16, count=1)[0] / 100.0 + ) + self.Sensor.salinity_ppt[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + self.Sensor.temperature_deg_c[i_ens] = ( + np.fromfile(f, np.int16, count=1)[0] / 100.0 + ) + self.Sensor.mpt_msc[i_ens, :] = np.fromfile( + f, np.uint8, count=3 + ) + self.Sensor.heading_std_dev_deg[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.Sensor.pitch_std_dev_deg[i_ens] = ( + np.fromfile(f, np.uint8, count=1)[0] / 10.0 + ) + self.Sensor.roll_std_dev_deg[i_ens] = ( + np.fromfile(f, np.uint8, count=1) / 10.0 + ) + self.Sensor.xmit_current[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.Sensor.xmit_voltage[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.Sensor.ambient_temp[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.Sensor.pressure_pos[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.Sensor.pressure_neg[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.Sensor.attitude_temp[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.Sensor.attitude[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.Sensor.contam_sensor[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.Sensor.error_status_word[i_ens] = [ + "{0:08b}".format(x) + for x in np.fromfile(f, np.uint8, count=4) + ] f.seek(2, 1) - self.Sensor.pressure_pascal[i_ens] = np.fromfile(f, np.uint32, count=1)[0] - self.Sensor.pressure_var_pascal[i_ens] = np.fromfile(f, np.uint32, count=1)[0] + self.Sensor.pressure_pascal[i_ens] = np.fromfile( + f, np.uint32, count=1 + )[0] + self.Sensor.pressure_var_pascal[i_ens] = np.fromfile( + f, np.uint32, count=1 + )[0] f.seek(1, 1) - self.Sensor.date_y2k[i_ens, :] = np.fromfile(f, np.uint8, count=4) - self.Sensor.time_y2k[i_ens, :] = np.fromfile(f, np.uint8, count=4) - self.Sensor.date[i_ens, :] = self.Sensor.date_not_y2k[i_ens, :] - self.Sensor.date[i_ens, 0] = self.Sensor.date_y2k[i_ens, 0] * 100 + \ - self.Sensor.date_y2k[i_ens, 1] - self.Cfg.lag_near_bottom[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - - # Check if more data types need to be read and position the pointer - self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens) + self.Sensor.date_y2k[i_ens, :] = np.fromfile( + f, np.uint8, count=4 + ) + self.Sensor.time_y2k[i_ens, :] = np.fromfile( + f, np.uint8, count=4 + ) + self.Sensor.date[i_ens, :] = self.Sensor.date_not_y2k[ + i_ens, : + ] + self.Sensor.date[i_ens, 0] = ( + self.Sensor.date_y2k[i_ens, 0] * 100 + + self.Sensor.date_y2k[i_ens, 1] + ) + self.Cfg.lag_near_bottom[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + + # Check if more data types need to be read and + # position the pointer + self.end_reading( + f, file_loc, i_data_types, i_ens, bytes_per_ens + ) # Read water-tracking velocity data - elif leader_id == '0x100': + elif leader_id == "0x100": # Update the data types counter i_data_types += 1 if self.Cfg.wn[i_ens] > self.Wt.vel_mps.shape[1]: - append = np.zeros([self.Wt.vel_mps.shape[0], - int(self.Cfg.wn[i_ens] - self.Wt.vel_mps.shape[1]), - self.Wt.vel_mps.shape[2]]) + append = np.zeros( + [ + self.Wt.vel_mps.shape[0], + int( + self.Cfg.wn[i_ens] + - self.Wt.vel_mps.shape[1] + ), + self.Wt.vel_mps.shape[2], + ] + ) self.Wt.vel_mps = np.hstack([self.Wt.vel_mps, append]) - dummy = np.fromfile(f, np.int16, count=int(self.Cfg.wn[i_ens]*4)) - dummy = np.reshape(dummy, [int(self.Cfg.wn[i_ens]), n_velocities]) - self.Wt.vel_mps[:n_velocities, :int(self.Cfg.wn[i_ens]), i_ens] = dummy.T - - # Check if more data types need to be read and position the pointer - self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens) + dummy = np.fromfile( + f, np.int16, count=int(self.Cfg.wn[i_ens] * 4) + ) + dummy = np.reshape( + dummy, [int(self.Cfg.wn[i_ens]), n_velocities] + ) + self.Wt.vel_mps[ + :n_velocities, : int(self.Cfg.wn[i_ens]), i_ens + ] = dummy.T + + # Check if more data types need to be read and + # position the pointer + self.end_reading( + f, file_loc, i_data_types, i_ens, bytes_per_ens + ) # Read correlation magnitude - elif leader_id == '0x200': + elif leader_id == "0x200": # Update the data types counter i_data_types += 1 if self.Cfg.wn[i_ens] > self.Wt.corr.shape[1]: - append = np.zeros([self.Wt.corr.shape[0], - int(self.Cfg.wn[i_ens] - self.Wt.corr.shape[1]), - self.Wt.corr.shape[2]]) + append = np.zeros( + [ + self.Wt.corr.shape[0], + int(self.Cfg.wn[i_ens] - self.Wt.corr.shape[1]), + self.Wt.corr.shape[2], + ] + ) self.Wt.corr = np.hstack([self.Wt.corr, append]) - dummy = np.fromfile(f, np.uint8, count=int(self.Cfg.wn[i_ens]*4)) - dummy = np.reshape(dummy, [int(self.Cfg.wn[i_ens]), n_velocities]) - self.Wt.corr[:n_velocities, :int(self.Cfg.wn[i_ens]), i_ens] = dummy.T - - # Check if more data types need to be read and position the pointer - self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens) + dummy = np.fromfile( + f, np.uint8, count=int(self.Cfg.wn[i_ens] * 4) + ) + dummy = np.reshape( + dummy, [int(self.Cfg.wn[i_ens]), n_velocities] + ) + self.Wt.corr[ + :n_velocities, : int(self.Cfg.wn[i_ens]), i_ens + ] = dummy.T + + # Check if more data types need to be read and + # position the pointer + self.end_reading( + f, file_loc, i_data_types, i_ens, bytes_per_ens + ) # Read echo intensity - elif leader_id == '0x300': + elif leader_id == "0x300": # Update the data types counter i_data_types += 1 if self.Cfg.wn[i_ens] > self.Wt.rssi.shape[1]: - append = np.zeros([self.Wt.rssi.shape[0], - int(self.Cfg.wn[i_ens] - self.Wt.rssi.shape[1]), - self.Wt.rssi.shape[2]]) + append = np.zeros( + [ + self.Wt.rssi.shape[0], + int(self.Cfg.wn[i_ens] - self.Wt.rssi.shape[1]), + self.Wt.rssi.shape[2], + ] + ) self.Wt.rssi = np.hstack([self.Wt.rssi, append]) - dummy = np.fromfile(f, np.uint8, count=int(self.Cfg.wn[i_ens]*4)) - dummy = np.reshape(dummy, [int(self.Cfg.wn[i_ens]), n_velocities]) - self.Wt.rssi[:n_velocities, :int(self.Cfg.wn[i_ens]), i_ens] = dummy.T - - # Check if more data types need to be read and position the pointer - self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens) + dummy = np.fromfile( + f, np.uint8, count=int(self.Cfg.wn[i_ens] * 4) + ) + dummy = np.reshape( + dummy, [int(self.Cfg.wn[i_ens]), n_velocities] + ) + self.Wt.rssi[ + :n_velocities, : int(self.Cfg.wn[i_ens]), i_ens + ] = dummy.T + + # Check if more data types need to be read and + # position the pointer + self.end_reading( + f, file_loc, i_data_types, i_ens, bytes_per_ens + ) # Read percent-good data - elif leader_id == '0x400': + elif leader_id == "0x400": # Update the data types counter i_data_types += 1 if self.Cfg.wn[i_ens] > self.Wt.pergd.shape[1]: - append = np.zeros([self.Wt.pergd.shape[0], - int(self.Cfg.wn[i_ens] - self.Wt.pergd.shape[1]), - self.Wt.pergd.shape[2]]) + append = np.zeros( + [ + self.Wt.pergd.shape[0], + int( + self.Cfg.wn[i_ens] - self.Wt.pergd.shape[1] + ), + self.Wt.pergd.shape[2], + ] + ) self.Wt.pergd = np.hstack([self.Wt.pergd, append]) - dummy = np.fromfile(f, np.uint8, count=int(self.Cfg.wn[i_ens]*4)) - dummy = np.reshape(dummy, [int(self.Cfg.wn[i_ens]), n_velocities]) - self.Wt.pergd[:n_velocities, :int(self.Cfg.wn[i_ens]), i_ens] = dummy.T - - # Check if more data types need to be read and position the pointer - self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens) + dummy = np.fromfile( + f, np.uint8, count=int(self.Cfg.wn[i_ens] * 4) + ) + dummy = np.reshape( + dummy, [int(self.Cfg.wn[i_ens]), n_velocities] + ) + self.Wt.pergd[ + :n_velocities, : int(self.Cfg.wn[i_ens]), i_ens + ] = dummy.T + + # Check if more data types need to be read and + # position the pointer + self.end_reading( + f, file_loc, i_data_types, i_ens, bytes_per_ens + ) # Read bottom track data - elif leader_id == '0x600': + elif leader_id == "0x600": # Update the data types counter i_data_types += 1 @@ -584,10 +787,14 @@ class Pd0TRDI(object): self.Cfg.ba[i_ens] = np.fromfile(f, np.uint8, count=1)[0] self.Cfg.bg[i_ens] = np.fromfile(f, np.uint8, count=1)[0] self.Cfg.bm[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.Cfg.be_mmps[i_ens] = np.fromfile(f, np.uint16, count=1)[0] + self.Cfg.be_mmps[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] # Read winriver 10.06 format GPS data - self.Gps.lat_deg[i_ens] = (np.fromfile(f, np.int32, count=1)[0]/2**31) * 180 + self.Gps.lat_deg[i_ens] = ( + np.fromfile(f, np.int32, count=1)[0] / 2**31 + ) * 180 # Read the Least Significant Bytes for beam depths dummy = np.fromfile(f, np.uint16, count=4) @@ -612,16 +819,20 @@ class Pd0TRDI(object): # Read WinRiver 10.06 format GPS data dummy = np.fromfile(f, np.uint16, count=1)[0] if dummy != -32768: - self.Gps.alt_m[i_ens] = (dummy-32768)/10 + self.Gps.alt_m[i_ens] = (dummy - 32768) / 10 else: self.Gps.altm[i_ens] = np.nan long2 = np.fromfile(f, np.uint16, count=1)[0] - self.Gps.long_deg[i_ens] = ((long1+long2*2**16)/2**31)*180 + self.Gps.long_deg[i_ens] = ( + (long1 + long2 * 2**16) / 2**31 + ) * 180 if self.Gps.long_deg[i_ens] > 180: self.Gps.long_deg[i_ens] -= 360 - self.Bt.ext_depth_cm[i_ens] = np.fromfile(f, np.int16, count=1)[0] + self.Bt.ext_depth_cm[i_ens] = np.fromfile( + f, np.int16, count=1 + )[0] dummy = np.fromfile(f, np.int16, count=1)[0] if dummy != -32768: self.Gps.gga_vel_e_mps[i_ens] = dummy * -1 / 1000 @@ -657,43 +868,73 @@ class Pd0TRDI(object): self.Gps.gga_n_stats[i_ens] = dummy f.seek(1, 1) - self.Gps.gsa_sat[i_ens, 4] = np.fromfile(f, np.uint8, count=1)[0] - self.Gps.gsa_sat[i_ens, 5] = np.fromfile(f, np.uint8, count=1)[0] - self.Gps.gga_diff[i_ens] = np.fromfile(f, np.uint8, count=1)[0] + self.Gps.gsa_sat[i_ens, 4] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.Gps.gsa_sat[i_ens, 5] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.Gps.gga_diff[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] dummy = np.fromfile(f, np.uint8, count=1)[0] if dummy != 0: self.Gps.gga_hdop[i_ens] = dummy / 10 - self.Gps.gsa_sat[i_ens, 0] = np.fromfile(f, np.uint8, count=1)[0] - self.Gps.gsa_sat[i_ens, 1] = np.fromfile(f, np.uint8, count=1)[0] - self.Gps.gsa_sat[i_ens, 2] = np.fromfile(f, np.uint8, count=1)[0] - self.Gps.gsa_sat[i_ens, 3] = np.fromfile(f, np.uint8, count=1)[0] + self.Gps.gsa_sat[i_ens, 0] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.Gps.gsa_sat[i_ens, 1] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.Gps.gsa_sat[i_ens, 2] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.Gps.gsa_sat[i_ens, 3] = np.fromfile( + f, np.uint8, count=1 + )[0] # Read bx configuration setting - self.Cfg.bx_dm[i_ens] = np.fromfile(f, np.uint16, count=1)[0] + self.Cfg.bx_dm[i_ens] = np.fromfile(f, np.uint16, count=1)[ + 0 + ] # Read bottom-tracking RSSI - self.Bt.rssi[0, i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.Bt.rssi[1, i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.Bt.rssi[2, i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.Bt.rssi[3, i_ens] = np.fromfile(f, np.uint8, count=1)[0] + self.Bt.rssi[0, i_ens] = np.fromfile(f, np.uint8, count=1)[ + 0 + ] + self.Bt.rssi[1, i_ens] = np.fromfile(f, np.uint8, count=1)[ + 0 + ] + self.Bt.rssi[2, i_ens] = np.fromfile(f, np.uint8, count=1)[ + 0 + ] + self.Bt.rssi[3, i_ens] = np.fromfile(f, np.uint8, count=1)[ + 0 + ] # Read wj configuration setting self.Cfg.wj[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - # Read most significant byte and compute beam depths + # Read most significant byte and compute beam + # depths dummy = np.fromfile(f, np.uint8, count=1)[0] rr_bt_depth_correction[0:4, i_ens] = dummy.T * 2e16 / 100 - # Check if more data types need to be read and position the pointer - self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens) + # Check if more data types need to be read and + # position the pointer + self.end_reading( + f, file_loc, i_data_types, i_ens, bytes_per_ens + ) # Read General NMEA Structure - # Data type '2022' contains sub data types the identify specfic NMEA - # 0183 data types that will be decoded. There may be multiple values + # Data type '2022' contains sub data types the + # identify specfic NMEA + # 0183 data types that will be decoded. There may be + # multiple values # for a single ensemble. - elif leader_id == '0x2022': + elif leader_id == "0x2022": i2022 += 1 # Update the data types counter i_data_types += 1 @@ -705,52 +946,92 @@ class Pd0TRDI(object): # GGA if specific_id == 100: j100 += 1 - # If the number of values exceeds 20 expand arrays + # If the number of values exceeds 20 expand + # arrays if j100 > self.Gps2.gga_delta_time.shape[1] - 1: self.Gps2.gga_expand(n_ensembles) self.Gps2.gga_delta_time[i_ens, j100] = delta_time - self.Gps2.gga_header[i_ens][j100] = ''.join([chr(x) for x in f.read(10)]) + self.Gps2.gga_header[i_ens][j100] = "".join( + [chr(x) for x in f.read(10)] + ) try: - temp = ''.join([chr(x) for x in f.read(10)]) - self.Gps2.utc[i_ens, j100] = float(re.findall('^\d+\.\d+|\d+', temp)[0]) + temp = "".join([chr(x) for x in f.read(10)]) + self.Gps2.utc[i_ens, j100] = float( + re.findall("^\d+\.\d+|\d+", temp)[0] + ) except ValueError: self.Gps2.utc[i_ens, j100] = np.nan - self.Gps2.lat_deg[i_ens, j100] = np.fromfile(f, np.float64, count=1)[0] + self.Gps2.lat_deg[i_ens, j100] = np.fromfile( + f, np.float64, count=1 + )[0] self.Gps2.lat_ref[i_ens][j100] = chr(f.read(1)[0]) - self.Gps2.lon_deg[i_ens, j100] = np.fromfile(f, np.float64, count=1)[0] + self.Gps2.lon_deg[i_ens, j100] = np.fromfile( + f, np.float64, count=1 + )[0] self.Gps2.lon_ref[i_ens][j100] = chr(f.read(1)[0]) - self.Gps2.corr_qual[i_ens, j100] = np.fromfile(f, np.uint8, count=1)[0] - self.Gps2.num_sats[i_ens, j100] = np.fromfile(f, np.uint8, count=1)[0] - self.Gps2.hdop[i_ens, j100] = np.fromfile(f, np.float32, count=1)[0] - self.Gps2.alt[i_ens, j100] = np.fromfile(f, np.float32, count=1)[0] + self.Gps2.corr_qual[i_ens, j100] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.Gps2.num_sats[i_ens, j100] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.Gps2.hdop[i_ens, j100] = np.fromfile( + f, np.float32, count=1 + )[0] + self.Gps2.alt[i_ens, j100] = np.fromfile( + f, np.float32, count=1 + )[0] self.Gps2.alt_unit[i_ens][j100] = chr(f.read(1)[0]) - self.Gps2.geoid[i_ens, j100] = np.fromfile(f, np.float32, count=1)[0] + self.Gps2.geoid[i_ens, j100] = np.fromfile( + f, np.float32, count=1 + )[0] self.Gps2.geoid_unit[i_ens][j100] = chr(f.read(1)[0]) - self.Gps2.d_gps_age[i_ens, j100] = np.fromfile(f, np.float32, count=1)[0] - self.Gps2.ref_stat_id[i_ens, j100] = np.fromfile(f, np.int16, count=0)[0] + self.Gps2.d_gps_age[i_ens, j100] = np.fromfile( + f, np.float32, count=1 + )[0] + self.Gps2.ref_stat_id[i_ens, j100] = np.fromfile( + f, np.int16, count=0 + )[0] # VTG elif specific_id == 101: j101 += 1 - # If the number of values exceeds 20 expand arrays + # If the number of values exceeds 20 expand + # arrays if j101 > self.Gps2.vtg_delta_time.shape[1] - 1: self.Gps2.vtg_expand(n_ensembles) self.Gps2.vtg_delta_time[i_ens, j101] = delta_time - self.Gps2.vtg_header[i_ens][j101] = ''.join([chr(x) for x in f.read(10)]) - self.Gps2.course_true[i_ens, j101] = np.fromfile(f, np.float32, count=1)[0] - self.Gps2.true_indicator[i_ens][j101] = chr(f.read(1)[0]) - self.Gps2.course_mag[i_ens, j101] = np.fromfile(f, np.float32, count=1)[0] + self.Gps2.vtg_header[i_ens][j101] = "".join( + [chr(x) for x in f.read(10)] + ) + self.Gps2.course_true[i_ens, j101] = np.fromfile( + f, np.float32, count=1 + )[0] + self.Gps2.true_indicator[i_ens][j101] = chr( + f.read(1)[0] + ) + self.Gps2.course_mag[i_ens, j101] = np.fromfile( + f, np.float32, count=1 + )[0] self.Gps2.mag_indicator[i_ens][j101] = chr(f.read(1)[0]) - self.Gps2.speed_knots[i_ens, j101] = np.fromfile(f, np.float32, count=1)[0] - self.Gps2.knots_indicator[i_ens][j101] = chr(f.read(1)[0]) - self.Gps2.speed_kph[i_ens, j101] = np.fromfile(f, np.float32, count=1)[0] + self.Gps2.speed_knots[i_ens, j101] = np.fromfile( + f, np.float32, count=1 + )[0] + self.Gps2.knots_indicator[i_ens][j101] = chr( + f.read(1)[0] + ) + self.Gps2.speed_kph[i_ens, j101] = np.fromfile( + f, np.float32, count=1 + )[0] self.Gps2.kph_indicator[i_ens][j101] = chr(f.read(1)[0]) - self.Gps2.mode_indicator[i_ens][j101] = chr(f.read(1)[0]) + self.Gps2.mode_indicator[i_ens][j101] = chr( + f.read(1)[0] + ) # Depth sounder elif specific_id == 102: @@ -760,13 +1041,23 @@ class Pd0TRDI(object): self.Gps2.dbt_expand(n_ensembles) self.Gps2.dbt_delta_time[i_ens, j102] = delta_time - self.Gps2.dbt_header[i_ens][j102] = ''.join([chr(x) for x in f.read(10)]) - self.Gps2.depth_ft[i_ens, j102] = np.fromfile(f, np.float32, count=1)[0] + self.Gps2.dbt_header[i_ens][j102] = "".join( + [chr(x) for x in f.read(10)] + ) + self.Gps2.depth_ft[i_ens, j102] = np.fromfile( + f, np.float32, count=1 + )[0] self.Gps2.ft_indicator[i_ens][j102] = chr(f.read(1)[0]) - self.Gps2.depth_m[i_ens, j102] = np.fromfile(f, np.float32, count=1)[0] + self.Gps2.depth_m[i_ens, j102] = np.fromfile( + f, np.float32, count=1 + )[0] self.Gps2.m_indicator[i_ens][j102] = chr(f.read(1)[0]) - self.Gps2.depth_fath[i_ens, j102] = np.fromfile(f, np.float32, count=1)[0] - self.Gps2.fath_indicator[i_ens][j102] = chr(f.read(1)[0]) + self.Gps2.depth_fath[i_ens, j102] = np.fromfile( + f, np.float32, count=1 + )[0] + self.Gps2.fath_indicator[i_ens][j102] = chr( + f.read(1)[0] + ) # External heading elif specific_id == 103: @@ -776,9 +1067,15 @@ class Pd0TRDI(object): self.Gps2.hdt_expand(n_ensembles) self.Gps2.hdt_delta_time[i_ens, j103] = delta_time - self.Gps2.hdt_header[i_ens][j103] = ''.join([chr(x) for x in f.read(10)]) - self.Gps2.heading_deg[i_ens, j103] = np.fromfile(f, np.double, count=1)[0] - self.Gps2.h_true_indicator[i_ens][j103] = chr(f.read(1)[0]) + self.Gps2.hdt_header[i_ens][j103] = "".join( + [chr(x) for x in f.read(10)] + ) + self.Gps2.heading_deg[i_ens, j103] = np.fromfile( + f, np.double, count=1 + )[0] + self.Gps2.h_true_indicator[i_ens][j103] = chr( + f.read(1)[0] + ) # GGA elif specific_id == 104: @@ -789,30 +1086,51 @@ class Pd0TRDI(object): self.Gps2.gga_delta_time[i_ens, j100] = delta_time try: - self.Gps2.gga_header[i_ens][j100] = ''.join([chr(x) for x in f.read(7)]) + self.Gps2.gga_header[i_ens][j100] = "".join( + [chr(x) for x in f.read(7)] + ) except IndexError: - self.Gps2.gga_header[i_ens][j100] = ' ' + self.Gps2.gga_header[i_ens][j100] = " " try: - temp = ''.join([chr(x) for x in f.read(10)]) - self.Gps2.utc[i_ens, j100] = \ - float(re.findall('^\d+\.\d+|\d+', temp)[0]) + temp = "".join([chr(x) for x in f.read(10)]) + self.Gps2.utc[i_ens, j100] = float( + re.findall("^\d+\.\d+|\d+", temp)[0] + ) except (ValueError, AttributeError, IndexError): self.Gps2.utc[i_ens, j100] = np.nan - self.Gps2.lat_deg[i_ens, j100] = np.fromfile(f, np.float64, count=1)[0] + self.Gps2.lat_deg[i_ens, j100] = np.fromfile( + f, np.float64, count=1 + )[0] self.Gps2.lat_ref[i_ens][j100] = chr(f.read(1)[0]) - self.Gps2.lon_deg[i_ens, j100] = np.fromfile(f, np.float64, count=1)[0] + self.Gps2.lon_deg[i_ens, j100] = np.fromfile( + f, np.float64, count=1 + )[0] self.Gps2.lon_ref[i_ens][j100] = chr(f.read(1)[0]) - self.Gps2.corr_qual[i_ens, j100] = np.fromfile(f, np.uint8, count=1)[0] - self.Gps2.num_sats[i_ens, j100] = np.fromfile(f, np.uint8, count=1)[0] - self.Gps2.hdop[i_ens, j100] = np.fromfile(f, np.float32, count=1)[0] - self.Gps2.alt[i_ens, j100] = np.fromfile(f, np.float32, count=1)[0] + self.Gps2.corr_qual[i_ens, j100] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.Gps2.num_sats[i_ens, j100] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.Gps2.hdop[i_ens, j100] = np.fromfile( + f, np.float32, count=1 + )[0] + self.Gps2.alt[i_ens, j100] = np.fromfile( + f, np.float32, count=1 + )[0] self.Gps2.alt_unit[i_ens][j100] = chr(f.read(1)[0]) - self.Gps2.geoid[i_ens, j100] = np.fromfile(f, np.float32, count=1)[0] + self.Gps2.geoid[i_ens, j100] = np.fromfile( + f, np.float32, count=1 + )[0] self.Gps2.geoid_unit[i_ens][j100] = chr(f.read(1)[0]) - self.Gps2.d_gps_age[i_ens, j100] = np.fromfile(f, np.float32, count=1)[0] - self.Gps2.ref_stat_id[i_ens, j100] = np.fromfile(f, np.int16, count=1)[0] + self.Gps2.d_gps_age[i_ens, j100] = np.fromfile( + f, np.float32, count=1 + )[0] + self.Gps2.ref_stat_id[i_ens, j100] = np.fromfile( + f, np.int16, count=1 + )[0] # VTG elif specific_id == 105: @@ -822,16 +1140,32 @@ class Pd0TRDI(object): self.Gps2.vtg_expand(n_ensembles) self.Gps2.vtg_delta_time[i_ens, j101] = delta_time - self.Gps2.vtg_header[i_ens][j101] = ''.join([chr(x) for x in f.read(7)]) - self.Gps2.course_true[i_ens, j101] = np.fromfile(f, np.float32, count=1)[0] - self.Gps2.true_indicator[i_ens][j101] = chr(f.read(1)[0]) - self.Gps2.course_mag[i_ens, j101] = np.fromfile(f, np.float32, count=1)[0] + self.Gps2.vtg_header[i_ens][j101] = "".join( + [chr(x) for x in f.read(7)] + ) + self.Gps2.course_true[i_ens, j101] = np.fromfile( + f, np.float32, count=1 + )[0] + self.Gps2.true_indicator[i_ens][j101] = chr( + f.read(1)[0] + ) + self.Gps2.course_mag[i_ens, j101] = np.fromfile( + f, np.float32, count=1 + )[0] self.Gps2.mag_indicator[i_ens][j101] = chr(f.read(1)[0]) - self.Gps2.speed_knots[i_ens, j101] = np.fromfile(f, np.float32, count=1)[0] - self.Gps2.knots_indicator[i_ens][j101] = chr(f.read(1)[0]) - self.Gps2.speed_kph[i_ens, j101] = np.fromfile(f, np.float32, count=1)[0] + self.Gps2.speed_knots[i_ens, j101] = np.fromfile( + f, np.float32, count=1 + )[0] + self.Gps2.knots_indicator[i_ens][j101] = chr( + f.read(1)[0] + ) + self.Gps2.speed_kph[i_ens, j101] = np.fromfile( + f, np.float32, count=1 + )[0] self.Gps2.kph_indicator[i_ens][j101] = chr(f.read(1)[0]) - self.Gps2.mode_indicator[i_ens][j101] = chr(f.read(1)[0]) + self.Gps2.mode_indicator[i_ens][j101] = chr( + f.read(1)[0] + ) # Depth sounder elif specific_id == 106: @@ -841,13 +1175,23 @@ class Pd0TRDI(object): self.Gps2.dbt_expand(n_ensembles) self.Gps2.dbt_delta_time[i_ens, j102] = delta_time - self.Gps2.dbt_header[i_ens][j102] = ''.join([chr(x) for x in f.read(7)]) - self.Gps2.depth_ft[i_ens, j102] = np.fromfile(f, np.float32, count=1)[0] + self.Gps2.dbt_header[i_ens][j102] = "".join( + [chr(x) for x in f.read(7)] + ) + self.Gps2.depth_ft[i_ens, j102] = np.fromfile( + f, np.float32, count=1 + )[0] self.Gps2.ft_indicator[i_ens][j102] = chr(f.read(1)[0]) - self.Gps2.depth_m[i_ens, j102] = np.fromfile(f, np.float32, count=1)[0] + self.Gps2.depth_m[i_ens, j102] = np.fromfile( + f, np.float32, count=1 + )[0] self.Gps2.m_indicator[i_ens][j102] = chr(f.read(1)[0]) - self.Gps2.depth_fath[i_ens, j102] = np.fromfile(f, np.float32, count=1)[0] - self.Gps2.fath_indicator[i_ens][j102] = chr(f.read(1)[0]) + self.Gps2.depth_fath[i_ens, j102] = np.fromfile( + f, np.float32, count=1 + )[0] + self.Gps2.fath_indicator[i_ens][j102] = chr( + f.read(1)[0] + ) # External heading elif specific_id == 107: @@ -857,9 +1201,15 @@ class Pd0TRDI(object): self.Gps2.hdt_expand(n_ensembles) self.Gps2.hdt_delta_time[i_ens, j103] = delta_time - self.Gps2.hdt_header[i_ens][j103] = ''.join([chr(x) for x in f.read(7)]) - self.Gps2.heading_deg[i_ens, j103] = np.fromfile(f, np.double, count=1)[0] - self.Gps2.h_true_indicator[i_ens][j103] = chr(f.read(1)[0]) + self.Gps2.hdt_header[i_ens][j103] = "".join( + [chr(x) for x in f.read(7)] + ) + self.Gps2.heading_deg[i_ens, j103] = np.fromfile( + f, np.double, count=1 + )[0] + self.Gps2.h_true_indicator[i_ens][j103] = chr( + f.read(1)[0] + ) # GGA elif specific_id == 204: @@ -868,10 +1218,10 @@ class Pd0TRDI(object): if j100 > self.Gps2.gga_delta_time.shape[1] - 1: self.Gps2.gga_expand(n_ensembles) - temp = ''.join([chr(x) for x in f.read(msg_size)]) + temp = "".join([chr(x) for x in f.read(msg_size)]) self.Gps2.gga_sentence[i_ens][j100] = temp - temp_array = np.array(temp.split(',')) - temp_array[temp_array == '999.9'] = '' + temp_array = np.array(temp.split(",")) + temp_array[temp_array == "999.9"] = "" try: self.Gps2.gga_delta_time[i_ens, j100] = delta_time @@ -879,25 +1229,36 @@ class Pd0TRDI(object): self.Gps2.utc[i_ens, j100] = float(temp_array[1]) lat_str = temp_array[2] lat_deg = float(lat_str[0:2]) - lat_deg = lat_deg+float(lat_str[2:]) / 60 + lat_deg = lat_deg + float(lat_str[2:]) / 60 self.Gps2.lat_deg[i_ens, j100] = lat_deg self.Gps2.lat_ref[i_ens][j100] = temp_array[3] lon_str = temp_array[4] lon_num = float(lon_str) lon_deg = np.floor(lon_num / 100) - lon_deg = lon_deg + (((lon_num / 100.) - lon_deg) * 100.) / 60. + lon_deg = ( + lon_deg + + (((lon_num / 100.0) - lon_deg) * 100.0) / 60.0 + ) self.Gps2.lon_deg[i_ens, j100] = lon_deg self.Gps2.lon_ref[i_ens][j100] = temp_array[5] - self.Gps2.corr_qual[i_ens, j100] = float(temp_array[6]) - self.Gps2.num_sats[i_ens, j100] = float(temp_array[7]) + self.Gps2.corr_qual[i_ens, j100] = float( + temp_array[6] + ) + self.Gps2.num_sats[i_ens, j100] = float( + temp_array[7] + ) self.Gps2.hdop[i_ens, j100] = float(temp_array[8]) self.Gps2.alt[i_ens, j100] = float(temp_array[9]) self.Gps2.alt_unit[i_ens][j100] = temp_array[10] self.Gps2.geoid[i_ens, j100] = temp_array[11] self.Gps2.geoid_unit[i_ens][j100] = temp_array[12] - self.Gps2.d_gps_age[i_ens, j100] = float(temp_array[13]) - idx_star = temp_array[14].find('*') - self.Gps2.ref_stat_id[i_ens, j100] = float(temp_array[15][:idx_star]) + self.Gps2.d_gps_age[i_ens, j100] = float( + temp_array[13] + ) + idx_star = temp_array[14].find("*") + self.Gps2.ref_stat_id[i_ens, j100] = float( + temp_array[15][:idx_star] + ) except (ValueError, EOFError, IndexError): pass @@ -909,24 +1270,38 @@ class Pd0TRDI(object): if j101 > self.Gps2.vtg_delta_time.shape[1] - 1: self.Gps2.vtg_expand(n_ensembles) - temp = ''.join([chr(x) for x in f.read(msg_size)]) + temp = "".join([chr(x) for x in f.read(msg_size)]) self.Gps2.vtg_sentence[i_ens][j100] = temp - temp_array = np.array(temp.split(',')) - temp_array[temp_array == '999.9'] = '' + temp_array = np.array(temp.split(",")) + temp_array[temp_array == "999.9"] = "" try: self.Gps2.vtg_delta_time[i_ens, j101] = delta_time self.Gps2.vtg_header[i_ens][j101] = temp_array[0] - self.Gps2.course_true[i_ens, j101] = valid_number(temp_array[1]) - self.Gps2.true_indicator[i_ens][j101] = temp_array[2] - self.Gps2.course_mag[i_ens, j101] = valid_number(temp_array[3]) + self.Gps2.course_true[i_ens, j101] = valid_number( + temp_array[1] + ) + self.Gps2.true_indicator[i_ens][j101] = temp_array[ + 2 + ] + self.Gps2.course_mag[i_ens, j101] = valid_number( + temp_array[3] + ) self.Gps2.mag_indicator[i_ens][j101] = temp_array[4] - self.Gps2.speed_knots[i_ens, j101] = valid_number(temp_array[5]) - self.Gps2.knots_indicator[i_ens][j101] = temp_array[6] - self.Gps2.speed_kph[i_ens, j101] = valid_number(temp_array[7]) + self.Gps2.speed_knots[i_ens, j101] = valid_number( + temp_array[5] + ) + self.Gps2.knots_indicator[i_ens][j101] = temp_array[ + 6 + ] + self.Gps2.speed_kph[i_ens, j101] = valid_number( + temp_array[7] + ) self.Gps2.kph_indicator[i_ens][j101] = temp_array[8] - idx_star = temp_array[9].find('*') - self.Gps2.mode_indicator[i_ens][j101] = temp_array[9][:idx_star] + idx_star = temp_array[9].find("*") + self.Gps2.mode_indicator[i_ens][j101] = temp_array[ + 9 + ][:idx_star] except (ValueError, EOFError, IndexError): pass @@ -938,20 +1313,28 @@ class Pd0TRDI(object): if j102 > self.Gps2.dbt_delta_time.shape[1] - 1: self.Gps2.dbt_expand(n_ensembles) - temp = ''.join([chr(x) for x in f.read(msg_size)]) - temp_array = np.array(temp.split(',')) - temp_array[temp_array == '999.9'] = '' + temp = "".join([chr(x) for x in f.read(msg_size)]) + temp_array = np.array(temp.split(",")) + temp_array[temp_array == "999.9"] = "" try: self.Gps2.dbt_delta_time[i_ens, j102] = delta_time self.Gps2.dbt_header[i_ens][j102] = temp_array[0] - self.Gps2.depth_ft[i_ens, j102] = float(temp_array[1]) + self.Gps2.depth_ft[i_ens, j102] = float( + temp_array[1] + ) self.Gps2.ft_indicator[i_ens][j102] = temp_array[2] - self.Gps2.depth_m[i_ens, j102] = float(temp_array[3]) + self.Gps2.depth_m[i_ens, j102] = float( + temp_array[3] + ) self.Gps2.m_indicator[i_ens][j102] = temp_array[4] - self.Gps2.depth_fath[i_ens, j102] = float(temp_array[5]) - idx_star = temp.find('*') - self.Gps2.fath_indicator[i_ens][j102] = temp_array[6][:idx_star] + self.Gps2.depth_fath[i_ens, j102] = float( + temp_array[5] + ) + idx_star = temp.find("*") + self.Gps2.fath_indicator[i_ens][j102] = temp_array[ + 6 + ][:idx_star] except (ValueError, EOFError, IndexError): pass @@ -963,274 +1346,544 @@ class Pd0TRDI(object): if j103 > self.Gps2.hdt_delta_time.shape[1] - 1: self.Gps2.hdt_expand(n_ensembles) - temp = ''.join([chr(x) for x in f.read(msg_size)]) - temp_array = np.array(temp.split(',')) - temp_array[temp_array == '999.9'] = '' + temp = "".join([chr(x) for x in f.read(msg_size)]) + temp_array = np.array(temp.split(",")) + temp_array[temp_array == "999.9"] = "" try: self.Gps2.hdt_delta_time[i_ens, j103] = delta_time self.Gps2.hdt_header[i_ens][j103] = temp_array[0] - self.Gps2.heading_deg[i_ens, j103] = float(temp_array[1]) - idx_star = temp.find('*') - self.Gps2.h_true_indicator[i_ens][j103] = temp_array[2][:idx_star] + self.Gps2.heading_deg[i_ens, j103] = float( + temp_array[1] + ) + idx_star = temp.find("*") + self.Gps2.h_true_indicator[i_ens][ + j103 + ] = temp_array[2][:idx_star] except (ValueError, EOFError, IndexError): pass - # Check if more data types need to be read and position the pointer - self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens) + # Check if more data types need to be read and + # position the pointer + self.end_reading( + f, file_loc, i_data_types, i_ens, bytes_per_ens + ) # Raw NMEA dbt sentence - elif leader_id == '0x2100': + elif leader_id == "0x2100": # Update data types counter i_data_types += 1 # Reposition file pointer - f.seek(int(self.Hdr.data_offsets[i_ens, i_data_types-1])+file_loc+4, 0) + f.seek( + int(self.Hdr.data_offsets[i_ens, i_data_types - 1]) + + file_loc + + 4, + 0, + ) # Determine the number of characters to read if i_data_types < self.Hdr.n_data_types[i_ens]: - num_2_read = self.Hdr.data_offsets[i_ens, i_data_types] \ - - self.Hdr.data_offsets[i_ens, i_data_types - 1] - 4 + num_2_read = ( + self.Hdr.data_offsets[i_ens, i_data_types] + - self.Hdr.data_offsets[i_ens, i_data_types - 1] + - 4 + ) else: - num_2_read = bytes_per_ens - self.Hdr.data_offsets[i_ens, i_data_types-1] - 6 + num_2_read = ( + bytes_per_ens + - self.Hdr.data_offsets[i_ens, i_data_types - 1] + - 6 + ) # Read DBT sentence - self.Nmea.dbt[i_ens] = ''.join([chr(x) for x in f.read(int(num_2_read))]) + self.Nmea.dbt[i_ens] = "".join( + [chr(x) for x in f.read(int(num_2_read))] + ) - # Check if more data types need to be read and position the pointer - self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens) + # Check if more data types need to be read and + # position the pointer + self.end_reading( + f, file_loc, i_data_types, i_ens, bytes_per_ens + ) # Raw NMEA gga sentence - elif leader_id == '0x2101': + elif leader_id == "0x2101": # Update data types counter i_data_types += 1 # Reposition file pointer - f.seek(int(self.Hdr.data_offsets[i_ens, i_data_types-1])+file_loc+4, 0) + f.seek( + int(self.Hdr.data_offsets[i_ens, i_data_types - 1]) + + file_loc + + 4, + 0, + ) # Determine the number of characters to read if i_data_types < self.Hdr.n_data_types[i_ens]: - num_2_read = self.Hdr.data_offsets[i_ens, i_data_types] \ - - self.Hdr.data_offsets[i_ens, i_data_types - 1] - 4 + num_2_read = ( + self.Hdr.data_offsets[i_ens, i_data_types] + - self.Hdr.data_offsets[i_ens, i_data_types - 1] + - 4 + ) else: - num_2_read = bytes_per_ens - self.Hdr.data_offsets[i_ens, i_data_types-1] - 6 + num_2_read = ( + bytes_per_ens + - self.Hdr.data_offsets[i_ens, i_data_types - 1] + - 6 + ) # Read GGA sentence - self.Nmea.gga[i_ens] = ''.join([chr(x) for x in f.read(int(num_2_read))]) + self.Nmea.gga[i_ens] = "".join( + [chr(x) for x in f.read(int(num_2_read))] + ) - # Check if more data types need to be read and position the pointer - self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens) + # Check if more data types need to be read and + # position the pointer + self.end_reading( + f, file_loc, i_data_types, i_ens, bytes_per_ens + ) # Raw NMEA vtg sentence - elif leader_id == '0x2102': + elif leader_id == "0x2102": # Update data types counter i_data_types += 1 # Reposition file pointer - f.seek(int(self.Hdr.data_offsets[i_ens, i_data_types-1])+file_loc+4, 0) + f.seek( + int(self.Hdr.data_offsets[i_ens, i_data_types - 1]) + + file_loc + + 4, + 0, + ) # Determine the number of characters to read if i_data_types < self.Hdr.n_data_types[i_ens]: - num_2_read = self.Hdr.data_offsets[i_ens, i_data_types] \ - - self.Hdr.data_offsets[i_ens, i_data_types - 1] - 4 + num_2_read = ( + self.Hdr.data_offsets[i_ens, i_data_types] + - self.Hdr.data_offsets[i_ens, i_data_types - 1] + - 4 + ) else: - num_2_read = bytes_per_ens - self.Hdr.data_offsets[i_ens, i_data_types-1] - 6 + num_2_read = ( + bytes_per_ens + - self.Hdr.data_offsets[i_ens, i_data_types - 1] + - 6 + ) # Read VTG sentence - self.Nmea.vtg[i_ens] = ''.join([chr(x) for x in f.read(int(num_2_read))]) + self.Nmea.vtg[i_ens] = "".join( + [chr(x) for x in f.read(int(num_2_read))] + ) - # Check if more data types need to be read and position the pointer - self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens) + # Check if more data types need to be read and + # position the pointer + self.end_reading( + f, file_loc, i_data_types, i_ens, bytes_per_ens + ) # Raw NMEA gsa sentence - elif leader_id == '0x2103': + elif leader_id == "0x2103": # Update data types counter i_data_types += 1 # Reposition file pointer - f.seek(int(self.Hdr.data_offsets[i_ens, i_data_types-1])+file_loc+4, 0) + f.seek( + int(self.Hdr.data_offsets[i_ens, i_data_types - 1]) + + file_loc + + 4, + 0, + ) # Determine the number of characters to read if i_data_types < self.Hdr.n_data_types[i_ens]: - num_2_read = self.Hdr.data_offsets[i_ens, i_data_types] \ - - self.Hdr.data_offsets[i_ens, i_data_types - 1] - 4 + num_2_read = ( + self.Hdr.data_offsets[i_ens, i_data_types] + - self.Hdr.data_offsets[i_ens, i_data_types - 1] + - 4 + ) else: - num_2_read = bytes_per_ens - self.Hdr.data_offsets[i_ens, i_data_types-1] - 6 + num_2_read = ( + bytes_per_ens + - self.Hdr.data_offsets[i_ens, i_data_types - 1] + - 6 + ) # Read GSA sentence - self.Nmea.gsa[i_ens] = ''.join([chr(x) for x in f.read(int(num_2_read))]) + self.Nmea.gsa[i_ens] = "".join( + [chr(x) for x in f.read(int(num_2_read))] + ) - # Check if more data types need to be read and position the pointer - self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens) + # Check if more data types need to be read and + # position the pointer + self.end_reading( + f, file_loc, i_data_types, i_ens, bytes_per_ens + ) # Surface cells: cell data - elif leader_id == '0x10': + elif leader_id == "0x10": # Update data types counter i_data_types += 1 - self.Surface.no_cells[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.Surface.cell_size_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.Surface.dist_bin1_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - - # Check if more data types need to be read and position the pointer - self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens) + self.Surface.no_cells[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.Surface.cell_size_cm[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + self.Surface.dist_bin1_cm[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + + # Check if more data types need to be read and + # position the pointer + self.end_reading( + f, file_loc, i_data_types, i_ens, bytes_per_ens + ) # Surface cells: velocity data - elif leader_id == '0x110': + elif leader_id == "0x110": # Update data types counter i_data_types += 1 - dummy = np.fromfile(f, np.int16, count=int((self.Surface.no_cells[i_ens]*4))) - dummy = np.reshape(dummy, [int(self.Surface.no_cells[i_ens]), n_velocities]) - self.Surface.vel_mps[:n_velocities, :int(self.Surface.no_cells[i_ens]), i_ens] = dummy.T - - # Check if more data types need to be read and position the pointer - self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens) + dummy = np.fromfile( + f, + np.int16, + count=int((self.Surface.no_cells[i_ens] * 4)), + ) + dummy = np.reshape( + dummy, [int(self.Surface.no_cells[i_ens]), n_velocities] + ) + self.Surface.vel_mps[ + :n_velocities, + : int(self.Surface.no_cells[i_ens]), + i_ens, + ] = dummy.T + + # Check if more data types need to be read and + # position the pointer + self.end_reading( + f, file_loc, i_data_types, i_ens, bytes_per_ens + ) # Surface cells: correlation magnitude - elif leader_id == '0x210': + elif leader_id == "0x210": # Update data types counter i_data_types += 1 - dummy = np.fromfile(f, np.uint8, count=int((self.Surface.no_cells[i_ens]*4))) - dummy = np.reshape(dummy, [int(self.Surface.no_cells[i_ens]), n_velocities]) - self.Surface.corr[:n_velocities, :int(self.Surface.no_cells[i_ens]), i_ens] = dummy.T - - # Check if more data types need to be read and position the pointer - self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens) + dummy = np.fromfile( + f, + np.uint8, + count=int((self.Surface.no_cells[i_ens] * 4)), + ) + dummy = np.reshape( + dummy, [int(self.Surface.no_cells[i_ens]), n_velocities] + ) + self.Surface.corr[ + :n_velocities, + : int(self.Surface.no_cells[i_ens]), + i_ens, + ] = dummy.T + + # Check if more data types need to be read and + # position the pointer + self.end_reading( + f, file_loc, i_data_types, i_ens, bytes_per_ens + ) # Surface cells: echo intensity - elif leader_id == '0x310': + elif leader_id == "0x310": # Update data types counter i_data_types += 1 - dummy = np.fromfile(f, np.uint8, count=int((self.Surface.no_cells[i_ens]*4))) - dummy = np.reshape(dummy, [int(self.Surface.no_cells[i_ens]), n_velocities]) - self.Surface.rssi[:n_velocities, :int(self.Surface.no_cells[i_ens]), i_ens] = dummy.T - - # Check if more data types need to be read and position the pointer - self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens) + dummy = np.fromfile( + f, + np.uint8, + count=int((self.Surface.no_cells[i_ens] * 4)), + ) + dummy = np.reshape( + dummy, [int(self.Surface.no_cells[i_ens]), n_velocities] + ) + self.Surface.rssi[ + :n_velocities, + : int(self.Surface.no_cells[i_ens]), + i_ens, + ] = dummy.T + + # Check if more data types need to be read and + # position the pointer + self.end_reading( + f, file_loc, i_data_types, i_ens, bytes_per_ens + ) # Surface cells: percent good - elif leader_id == '0x410': + elif leader_id == "0x410": # Update data types counter i_data_types += 1 - dummy = np.fromfile(f, np.uint8, count=int((self.Surface.no_cells[i_ens]*4))) - dummy = np.reshape(dummy, [int(self.Surface.no_cells[i_ens]), n_velocities]) - self.Surface.pergd[:n_velocities, :self.Surface.no_cells[i_ens], i_ens] = dummy.T - - # Check if more data types need to be read and position the pointer - self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens) + dummy = np.fromfile( + f, + np.uint8, + count=int((self.Surface.no_cells[i_ens] * 4)), + ) + dummy = np.reshape( + dummy, [int(self.Surface.no_cells[i_ens]), n_velocities] + ) + self.Surface.pergd[ + :n_velocities, : self.Surface.no_cells[i_ens], i_ens + ] = dummy.T + + # Check if more data types need to be read and + # position the pointer + self.end_reading( + f, file_loc, i_data_types, i_ens, bytes_per_ens + ) # Undefined data skipped - elif leader_id == '0x510': + elif leader_id == "0x510": # Update data types counter i_data_types += 1 - # Check if more data types need to be read and position the pointer - self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens) + # Check if more data types need to be read and + # position the pointer + self.end_reading( + f, file_loc, i_data_types, i_ens, bytes_per_ens + ) # Automatic mode configuration - elif leader_id == '0x4401': + elif leader_id == "0x4401": # Update data types counter i_data_types += 1 - self.AutoMode.beam_count[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - - self.AutoMode.Beam1.mode[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.AutoMode.Beam1.depth_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.AutoMode.Beam1.ping_count[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.AutoMode.Beam1.ping_type[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.AutoMode.Beam1.cell_count[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.AutoMode.Beam1.cell_size_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.AutoMode.Beam1.cell_mid_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.AutoMode.Beam1.code_repeat[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.AutoMode.Beam1.trans_length_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.AutoMode.Beam1.lag_length_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.AutoMode.Beam1.transmit_bw[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.AutoMode.Beam1.receive_bw[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.AutoMode.Beam1.ping_interval_ms[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - - self.AutoMode.Beam2.mode[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.AutoMode.Beam2.depth_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.AutoMode.Beam2.ping_count[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.AutoMode.Beam2.ping_type[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.AutoMode.Beam2.cell_count[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.AutoMode.Beam2.cell_size_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.AutoMode.Beam2.cell_mid_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.AutoMode.Beam2.code_repeat[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.AutoMode.Beam2.trans_length_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.AutoMode.Beam2.lag_length_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.AutoMode.Beam2.transmit_bw[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.AutoMode.Beam2.receive_bw[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.AutoMode.Beam2.ping_interval_ms[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - - self.AutoMode.Beam3.mode[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.AutoMode.Beam3.depth_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.AutoMode.Beam3.ping_count[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.AutoMode.Beam3.ping_type[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.AutoMode.Beam3.cell_count[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.AutoMode.Beam3.cell_size_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.AutoMode.Beam3.cell_mid_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.AutoMode.Beam3.code_repeat[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.AutoMode.Beam3.trans_length_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.AutoMode.Beam3.lag_length_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.AutoMode.Beam3.transmit_bw[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.AutoMode.Beam3.receive_bw[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.AutoMode.Beam3.ping_interval_ms[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - - self.AutoMode.Beam4.mode[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.AutoMode.Beam4.depth_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.AutoMode.Beam4.ping_count[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.AutoMode.Beam4.ping_type[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.AutoMode.Beam4.cell_count[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.AutoMode.Beam4.cell_size_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.AutoMode.Beam4.cell_mid_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.AutoMode.Beam4.code_repeat[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.AutoMode.Beam4.trans_length_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.AutoMode.Beam4.lag_length_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - self.AutoMode.Beam4.transmit_bw[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.AutoMode.Beam4.receive_bw[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.AutoMode.Beam4.ping_interval_ms[i_ens] = np.fromfile(f, np.uint16, count=1)[0] - - self.AutoMode.Reserved[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - - # Check if more data types need to be read and position the pointer - self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens) + self.AutoMode.beam_count[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + + self.AutoMode.Beam1.mode[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.AutoMode.Beam1.depth_cm[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + self.AutoMode.Beam1.ping_count[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.AutoMode.Beam1.ping_type[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.AutoMode.Beam1.cell_count[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + self.AutoMode.Beam1.cell_size_cm[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + self.AutoMode.Beam1.cell_mid_cm[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + self.AutoMode.Beam1.code_repeat[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.AutoMode.Beam1.trans_length_cm[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + self.AutoMode.Beam1.lag_length_cm[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + self.AutoMode.Beam1.transmit_bw[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.AutoMode.Beam1.receive_bw[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.AutoMode.Beam1.ping_interval_ms[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + + self.AutoMode.Beam2.mode[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.AutoMode.Beam2.depth_cm[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + self.AutoMode.Beam2.ping_count[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.AutoMode.Beam2.ping_type[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.AutoMode.Beam2.cell_count[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + self.AutoMode.Beam2.cell_size_cm[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + self.AutoMode.Beam2.cell_mid_cm[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + self.AutoMode.Beam2.code_repeat[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.AutoMode.Beam2.trans_length_cm[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + self.AutoMode.Beam2.lag_length_cm[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + self.AutoMode.Beam2.transmit_bw[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.AutoMode.Beam2.receive_bw[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.AutoMode.Beam2.ping_interval_ms[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + + self.AutoMode.Beam3.mode[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.AutoMode.Beam3.depth_cm[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + self.AutoMode.Beam3.ping_count[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.AutoMode.Beam3.ping_type[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.AutoMode.Beam3.cell_count[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + self.AutoMode.Beam3.cell_size_cm[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + self.AutoMode.Beam3.cell_mid_cm[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + self.AutoMode.Beam3.code_repeat[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.AutoMode.Beam3.trans_length_cm[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + self.AutoMode.Beam3.lag_length_cm[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + self.AutoMode.Beam3.transmit_bw[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.AutoMode.Beam3.receive_bw[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.AutoMode.Beam3.ping_interval_ms[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + + self.AutoMode.Beam4.mode[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.AutoMode.Beam4.depth_cm[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + self.AutoMode.Beam4.ping_count[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.AutoMode.Beam4.ping_type[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.AutoMode.Beam4.cell_count[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + self.AutoMode.Beam4.cell_size_cm[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + self.AutoMode.Beam4.cell_mid_cm[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + self.AutoMode.Beam4.code_repeat[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.AutoMode.Beam4.trans_length_cm[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + self.AutoMode.Beam4.lag_length_cm[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + self.AutoMode.Beam4.transmit_bw[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.AutoMode.Beam4.receive_bw[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.AutoMode.Beam4.ping_interval_ms[i_ens] = np.fromfile( + f, np.uint16, count=1 + )[0] + + self.AutoMode.Reserved[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + + # Check if more data types need to be read and + # position the pointer + self.end_reading( + f, file_loc, i_data_types, i_ens, bytes_per_ens + ) # Vertical beam - elif leader_id == '0x4100': + elif leader_id == "0x4100": # Update data types counter i_data_types += 1 - self.Sensor.vert_beam_eval_amp[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.Sensor.vert_beam_RSSI_amp[i_ens] = np.fromfile(f, np.uint8, count=1)[0] - self.Sensor.vert_beam_range_m[i_ens] = np.fromfile(f, np.uint32, count=1)[0] / 1000 - temp = "{0:08b}".format(np.fromfile(f, np.uint8, count=1)[0]) + self.Sensor.vert_beam_eval_amp[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.Sensor.vert_beam_RSSI_amp[i_ens] = np.fromfile( + f, np.uint8, count=1 + )[0] + self.Sensor.vert_beam_range_m[i_ens] = ( + np.fromfile(f, np.uint32, count=1)[0] / 1000 + ) + temp = "{0:08b}".format( + np.fromfile(f, np.uint8, count=1)[0] + ) self.Sensor.vert_beam_status[i_ens] = int(temp[6:], 2) - if temp[5] == '0': - self.Sensor.vert_beam_gain[i_ens] = 'L' + if temp[5] == "0": + self.Sensor.vert_beam_gain[i_ens] = "L" else: - self.Sensor.vert_beam_gain[i_ens] = 'H' + self.Sensor.vert_beam_gain[i_ens] = "H" - # Check if more data types need to be read and position the pointer - self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens) + # Check if more data types need to be read and + # position the pointer + self.end_reading( + f, file_loc, i_data_types, i_ens, bytes_per_ens + ) # Transformation matrix - elif leader_id == '0x3200': + elif leader_id == "0x3200": # Update data types counter i_data_types += 1 - self.Inst.t_matrix[0, :] = np.fromfile(f, np.int16, count=4) * .0001 - self.Inst.t_matrix[1, :] = np.fromfile(f, np.int16, count=4) * .0001 - self.Inst.t_matrix[2, :] = np.fromfile(f, np.int16, count=4) * .0001 - self.Inst.t_matrix[3, :] = np.fromfile(f, np.int16, count=4) * .0001 - - # Check if more data types need to be read and position the pointer - self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens) + self.Inst.t_matrix[0, :] = ( + np.fromfile(f, np.int16, count=4) * 0.0001 + ) + self.Inst.t_matrix[1, :] = ( + np.fromfile(f, np.int16, count=4) * 0.0001 + ) + self.Inst.t_matrix[2, :] = ( + np.fromfile(f, np.int16, count=4) * 0.0001 + ) + self.Inst.t_matrix[3, :] = ( + np.fromfile(f, np.int16, count=4) * 0.0001 + ) + + # Check if more data types need to be read and + # position the pointer + self.end_reading( + f, file_loc, i_data_types, i_ens, bytes_per_ens + ) else: @@ -1241,33 +1894,51 @@ class Pd0TRDI(object): # Find next leader ID if (i_data_types + 1) <= self.Hdr.n_data_types[i_ens]: # Reposition file pointer for next data type - f.seek(int(self.Hdr.data_offsets[i_ens, i_data_types])+file_loc, 0) + f.seek( + int(self.Hdr.data_offsets[i_ens, i_data_types]) + + file_loc, + 0, + ) else: if f.tell() < end_file: - # Locate next ensemble if no more data types - if i_data_types + 1 > self.Hdr.n_data_types[i_ens] + 1: + # Locate next ensemble if no more data + # types + if ( + i_data_types + 1 + > self.Hdr.n_data_types[i_ens] + 1 + ): current_loc = f.tell() - srch_string = struct.unpack('B'*(end_file-current_loc), - f.read(end_file-current_loc)) - hex_string = ''.join([hex(x) for x in srch_string]) - - next_ens = hex_string.find('0x7f7f') + srch_string = struct.unpack( + "B" * (end_file - current_loc), + f.read(end_file - current_loc), + ) + hex_string = "".join( + [hex(x) for x in srch_string] + ) + + next_ens = hex_string.find("0x7f7f") if next_ens > 0: next_ens = int((next_ens - 1) / 2) - f.seek(current_loc+next_ens, 0) + f.seek(current_loc + next_ens, 0) i_data_types = 0 else: end_file_check = end_file + 1 else: - f.seek(file_loc+bytes_per_ens-2, 0) + f.seek(file_loc + bytes_per_ens - 2, 0) - # If all data types have been read, read last two bytes of ensemble + # If all data types have been read, read last two + # bytes of ensemble if i_ens <= len(self.Hdr.n_data_types): - if i_data_types >= self.Hdr.n_data_types[i_ens] and f.tell() <= end_file: + if ( + i_data_types >= self.Hdr.n_data_types[i_ens] + and f.tell() <= end_file + ): try: - self.Inst.res_RDI = np.fromfile(f, np.uint16, count=1)[0] + self.Inst.res_RDI = np.fromfile( + f, np.uint16, count=1 + )[0] # Read checksum but not used _ = np.fromfile(f, np.uint16, count=1)[0] except (ValueError, EOFError, IndexError): @@ -1296,41 +1967,52 @@ class Pd0TRDI(object): # Correct Bt.depth_m for RiverRay data if not np.isnan(rr_bt_depth_correction).any(): - rr_bt_depth_correction[rr_bt_depth_correction == (-32768 * 2e16) / 100] = np.nan + rr_bt_depth_correction[ + rr_bt_depth_correction == (-32768 * 2e16) / 100 + ] = np.nan self.Bt.depth_m += rr_bt_depth_correction - # Remove bad data from Surface structure (RR), convert where needed + # Remove bad data from Surface structure (RR), convert + # where needed self.Surface.vel_mps[self.Surface.vel_mps == -32768] = np.nan self.Surface.vel_mps = self.Surface.vel_mps / 1000 self.Surface.corr[self.Surface.corr == -32768] = np.nan self.Surface.rssi[self.Surface.rssi == -32768] = np.nan self.Surface.pergd[self.Surface.pergd == -32768] = np.nan - # If requested compute WR2 compatible GPS-based boat velocities + # If requested compute WR2 compatible GPS-based boat + # velocities if wr2: - # If vtg data are available compute north and east components - if self.Gps2.vtg_header[0, 0] == '$': + # If vtg data are available compute north and east + # components + if self.Gps2.vtg_header[0, 0] == "$": - # Find minimum of absolute value of delta time from raw data + # Find minimum of absolute value of delta time + # from raw data vtg_delta_time = np.abs(self.Gps2.vtg_delta_time) vtg_min = np.nanmin(vtg_delta_time, 1) # Compute the velocity components in m/s for i in range(len(vtg_delta_time)): idx = np.where(vtg_delta_time == vtg_min)[0][0] - self.Gps2.vtg_velE_mps[i], self.Gps2.vtg_velN_mps[i] = \ - pol2cart((90 - self.Gps2.course_true[i, idx])*np.pi/180, - self.Gps2.speed_kph[i, idx] * 0.2777778) + ( + self.Gps2.vtg_velE_mps[i], + self.Gps2.vtg_velN_mps[i], + ) = pol2cart( + (90 - self.Gps2.course_true[i, idx]) * np.pi / 180, + self.Gps2.speed_kph[i, idx] * 0.2777778, + ) - if self.Gps2.gga_header[0, 0] == '$': + if self.Gps2.gga_header[0, 0] == "$": # Initialize constants e_radius = 6378137 coeff = e_radius * np.pi / 180 ellip = 1 / 298.257223563 - # Find minimum of absolute value of delta time from raw data + # Find minimum of absolute value of delta time + # from raw data gga_delta_time = np.abs(self.Gps2.gga_delta_time) gga_min = np.nanmin(gga_delta_time, axis=1) @@ -1338,15 +2020,35 @@ class Pd0TRDI(object): for i in range(len(gga_delta_time)): idx = np.where(gga_delta_time[i:] == gga_min) if idx > 0: - lat_avg_rad = (self.Gps2.lat_deg[i, idx[i]] - + self.Gps2.lat_deg[i - 1, idx[i - 1]]) / 2 + lat_avg_rad = ( + self.Gps2.lat_deg[i, idx[i]] + + self.Gps2.lat_deg[i - 1, idx[i - 1]] + ) / 2 sin_lat_avg_rad = np.sin(np.deg2rad(lat_avg_rad)) - r_e = coeff * (1 + ellip * sin_lat_avg_rad * sin_lat_avg_rad) - rn = coeff * (1 - 2 * ellip + 3 * ellip * sin_lat_avg_rad * sin_lat_avg_rad) - dx = r_e * (self.Gps2.lon_deg[i, idx[i]] - - self.Gps2.lon_deg(i-1, idx[i-1])) * np.cos(np.deg2rad(lat_avg_rad)) - dy = rn * (self.Gps2.lat_deg[i, idx[i]] - self.Gps2.lat_deg[i - 1, idx[i - 1]]) - dt = self.Gps2.utc[i, idx[i]] - self.Gps2.utc[i-1, idx[i-1]] + r_e = coeff * ( + 1 + ellip * sin_lat_avg_rad * sin_lat_avg_rad + ) + rn = coeff * ( + 1 + - 2 * ellip + + 3 * ellip * sin_lat_avg_rad * sin_lat_avg_rad + ) + dx = ( + r_e + * ( + self.Gps2.lon_deg[i, idx[i]] + - self.Gps2.lon_deg(i - 1, idx[i - 1]) + ) + * np.cos(np.deg2rad(lat_avg_rad)) + ) + dy = rn * ( + self.Gps2.lat_deg[i, idx[i]] + - self.Gps2.lat_deg[i - 1, idx[i - 1]] + ) + dt = ( + self.Gps2.utc[i, idx[i]] + - self.Gps2.utc[i - 1, idx[i - 1]] + ) self.Gps2.gga_velE_mps[i] = dx / dt self.Gps2.gga_velN_mps[i] = dy / dt else: @@ -1371,10 +2073,10 @@ class Pd0TRDI(object): """ i = 0 - leader_id = '0000' - + leader_id = "0000" + # Find the first ensemble - while leader_id != '0x7f7f' and i < f_size: + while leader_id != "0x7f7f" and i < f_size: f.seek(i, 0) i = i + 1 leader_id = hex(np.fromfile(f, np.uint16, count=1)[0]) @@ -1384,11 +2086,11 @@ class Pd0TRDI(object): # Find last ensemble i = 0 - leader_id = '0000' + leader_id = "0000" last_num = -1 - + while last_num < 0: - while leader_id != '0x7f7f' and i < f_size: + while leader_id != "0x7f7f" and i < f_size: i = i + 1 f.seek(-i, 2) @@ -1400,17 +2102,19 @@ class Pd0TRDI(object): last_num = Pd0TRDI.find_ens_no(f) if last_num is None or np.isnan(last_num): last_num = -1 - - leader_id = '0000' - n_ensembles = last_num-first_num+1 + + leader_id = "0000" + n_ensembles = last_num - first_num + 1 return n_ensembles @staticmethod def find_ens_no(f): - """This function assumes the current position of the file pointer is just + """This function assumes the current position of the file pointer is + just after '7F7F'. The function then reads the ensemble header and - works through the data offsets until the 00800 data type is found. The + works through the data offsets until the 00800 data type is + found. The ensemble number is then read. Parameters @@ -1431,7 +2135,7 @@ class Pd0TRDI(object): if Pd0TRDI.check_sum(f, fileloc): # Read header information - f.seek(fileloc+5, 0) + f.seek(fileloc + 5, 0) n_data_types = np.fromfile(f, np.uint8, count=1)[0] data_offsets = [] for x in range(n_data_types): @@ -1439,17 +2143,16 @@ class Pd0TRDI(object): # Initialize variables i = 0 - leader_id = '0000' + leader_id = "0000" # Search for 0x80 - while leader_id != '0x80' and i < n_data_types: - - f.seek(data_offsets[i]+fileloc, 0) + while leader_id != "0x80" and i < n_data_types: + f.seek(data_offsets[i] + fileloc, 0) leader_id = hex(np.fromfile(f, np.uint16, count=1)[0]) i = i + 1 - + # Read ensemble number from data type 0x80 - if leader_id == '0x80': + if leader_id == "0x80": ensemble_num = np.fromfile(f, np.uint16, count=1)[0] else: @@ -1478,42 +2181,43 @@ class Pd0TRDI(object): """ try: - + if bytes_per_ens is None: - bytes_per_ens = np.fromfile(f, np.uint16, count=1)[0] - # Go to file location from the beginning of file + bytes_per_ens = np.fromfile(f, np.uint16, count=1)[0] + # Go to file location from the beginning of file f.seek(fileloc, 0) - + # Read in the values for all of the bytes an get a check sum test_b = [] x = f.read(bytes_per_ens) for y in x: test_b.append(y) - + check_sum = sum(test_b) check_h = hex(check_sum)[2:] - - # Check for a hex that is greater than 4 (including L indicator at the end) + + # Check for a hex that is greater than 4 (including L indicator + # at the end) if len(check_h) > 4: - + # Seek to location of check sum and compared to computed - if check_h[-1] == 'L': + if check_h[-1] == "L": check_h = check_h[:-1] - - f.seek(fileloc+bytes_per_ens, 0) - check_sum = np.fromfile(f, np.uint16, count=1)[0] - if int('0x'+check_h[1:], 16) == check_sum: + + f.seek(fileloc + bytes_per_ens, 0) + check_sum = np.fromfile(f, np.uint16, count=1)[0] + if int("0x" + check_h[1:], 16) == check_sum: return True else: return False elif len(check_h) > 3: # Seek to location of check sum and compared to computed - if check_h[-1] == 'L': + if check_h[-1] == "L": check_h = check_h[:-1] - - f.seek(fileloc+bytes_per_ens, 0) - check_sum = np.fromfile(f, np.uint16, count=1)[0] - if int('0x'+check_h, 16) == check_sum: + + f.seek(fileloc + bytes_per_ens, 0) + check_sum = np.fromfile(f, np.uint16, count=1)[0] + if int("0x" + check_h, 16) == check_sum: return True else: return False @@ -1534,9 +2238,9 @@ class Pd0TRDI(object): Location in file """ - search_id = ' ' - search_loc = file_loc+2 - while search_id != '0x7f7f': + search_id = " " + search_loc = file_loc + 2 + while search_id != "0x7f7f": f.seek(search_loc, 0) search_loc += 1 try: @@ -1544,7 +2248,7 @@ class Pd0TRDI(object): except (ValueError, EOFError): continue f.seek(search_loc, 0) - + def end_reading(self, f, file_loc, i_data_types, i_ens, bytes_per_ens): """Checks if more data types need to be read and position file pointer. @@ -1563,9 +2267,9 @@ class Pd0TRDI(object): """ if i_data_types + 1 <= self.Hdr.n_data_types[i_ens]: - f.seek(int(self.Hdr.data_offsets[i_ens, i_data_types])+file_loc, 0) + f.seek(int(self.Hdr.data_offsets[i_ens, i_data_types]) + file_loc, 0) else: - f.seek(file_loc+bytes_per_ens-2, 0) + f.seek(file_loc + bytes_per_ens - 2, 0) class Hdr(object): @@ -1599,7 +2303,7 @@ class Hdr(object): self.data_offsets = nans([n_ensembles, n_types]) self.n_data_types = nans(n_ensembles) self.data_ok = nans(n_ensembles) - self.invalid = [''] * n_ensembles + self.invalid = [""] * n_ensembles class Inst(object): @@ -1641,13 +2345,13 @@ class Inst(object): """ self.beam_ang = nans(n_ensembles) self.beams = nans(n_ensembles) - self.data_type = [''] * n_ensembles + self.data_type = [""] * n_ensembles self.firm_ver = nans(n_ensembles) self.freq = nans(n_ensembles) - self.pat = [''] * n_ensembles + self.pat = [""] * n_ensembles self.res_RDI = 0 self.sensor_CFG = nans(n_ensembles) - self.xducer = [''] * n_ensembles + self.xducer = [""] * n_ensembles self.t_matrix = np.tile([np.nan], [4, 4]) self.demod = nans(n_ensembles) @@ -1900,31 +2604,31 @@ class Cfg(object): self.bp = nans(n_ensembles) self.bx_dm = nans(n_ensembles) self.code_reps = nans(n_ensembles) - self.coord_sys = [''] * n_ensembles + self.coord_sys = [""] * n_ensembles self.cpu_ser_no = nans([n_ensembles, 8]) self.cq = nans(n_ensembles) self.cx = nans(n_ensembles) self.dist_bin1_cm = nans(n_ensembles) self.ea_deg = nans(n_ensembles) self.eb_deg = nans(n_ensembles) - self.sensor_avail = [''] * n_ensembles - self.ex = [''] * n_ensembles - self.ez = [''] * n_ensembles - self.head_src = [''] * n_ensembles + self.sensor_avail = [""] * n_ensembles + self.ex = [""] * n_ensembles + self.ez = [""] * n_ensembles + self.head_src = [""] * n_ensembles self.lag_cm = nans(n_ensembles) - self.map_bins = [''] * n_ensembles + self.map_bins = [""] * n_ensembles self.n_beams = nans(n_ensembles) - self.pitch_src = [''] * n_ensembles + self.pitch_src = [""] * n_ensembles self.ref_lay_end_cell = nans(n_ensembles) self.ref_lay_str_cell = nans(n_ensembles) - self.roll_src = [''] * n_ensembles - self.sal_src = [''] * n_ensembles + self.roll_src = [""] * n_ensembles + self.sal_src = [""] * n_ensembles self.wm = nans(n_ensembles) - self.sos_src = [''] * n_ensembles - self.temp_src = [''] * n_ensembles + self.sos_src = [""] * n_ensembles + self.temp_src = [""] * n_ensembles self.tp_sec = nans(n_ensembles) - self.use_3beam = [''] * n_ensembles - self.use_pr = [''] * n_ensembles + self.use_3beam = [""] * n_ensembles + self.use_pr = [""] * n_ensembles self.wa = nans(n_ensembles) self.wb = nans(n_ensembles) self.wc = nans(n_ensembles) @@ -1935,7 +2639,7 @@ class Cfg(object): self.wn = nans(n_ensembles) self.wp = nans(n_ensembles) self.ws_cm = nans(n_ensembles) - self.xdcr_dep_srs = [''] * n_ensembles + self.xdcr_dep_srs = [""] * n_ensembles self.xmit_pulse_cm = nans(n_ensembles) self.lag_near_bottom = nans(n_ensembles) @@ -2106,46 +2810,46 @@ class Gps2(object): """ self.gga_delta_time = np.full([n_ensembles, 20], np.nan) - self.gga_header = [x[:] for x in [[''] * 20] * n_ensembles] - self.gga_sentence = [x[:] for x in [[''] * 20] * n_ensembles] + self.gga_header = [x[:] for x in [[""] * 20] * n_ensembles] + self.gga_sentence = [x[:] for x in [[""] * 20] * n_ensembles] self.utc = np.full([n_ensembles, 20], np.nan) self.lat_deg = np.zeros([n_ensembles, 20]) - self.lat_ref = [x[:] for x in [[''] * 20] * n_ensembles] + self.lat_ref = [x[:] for x in [[""] * 20] * n_ensembles] self.lon_deg = np.zeros([n_ensembles, 20]) - self.lon_ref = [x[:] for x in [[''] * 20] * n_ensembles] + self.lon_ref = [x[:] for x in [[""] * 20] * n_ensembles] self.corr_qual = np.full([n_ensembles, 20], np.nan) self.num_sats = np.full([n_ensembles, 20], np.nan) self.hdop = np.full([n_ensembles, 20], np.nan) self.alt = np.full([n_ensembles, 20], np.nan) - self.alt_unit = [x[:] for x in [[''] * 20] * n_ensembles] + self.alt_unit = [x[:] for x in [[""] * 20] * n_ensembles] self.geoid = np.full([n_ensembles, 20], np.nan) - self.geoid_unit = [x[:] for x in [[''] * 20] * n_ensembles] + self.geoid_unit = [x[:] for x in [[""] * 20] * n_ensembles] self.d_gps_age = np.full([n_ensembles, 20], np.nan) self.ref_stat_id = np.full([n_ensembles, 20], np.nan) self.vtg_delta_time = np.full([n_ensembles, 20], np.nan) - self.vtg_header = [x[:] for x in [[''] * 20] * n_ensembles] - self.vtg_sentence = [x[:] for x in [[''] * 20] * n_ensembles] + self.vtg_header = [x[:] for x in [[""] * 20] * n_ensembles] + self.vtg_sentence = [x[:] for x in [[""] * 20] * n_ensembles] self.course_true = np.full([n_ensembles, 20], np.nan) - self.true_indicator = [x[:] for x in [[''] * 20] * n_ensembles] + self.true_indicator = [x[:] for x in [[""] * 20] * n_ensembles] self.course_mag = np.full([n_ensembles, 20], np.nan) - self.mag_indicator = [x[:] for x in [[''] * 20] * n_ensembles] + self.mag_indicator = [x[:] for x in [[""] * 20] * n_ensembles] self.speed_knots = np.full([n_ensembles, 20], np.nan) - self.knots_indicator = [x[:] for x in [[''] * 20] * n_ensembles] + self.knots_indicator = [x[:] for x in [[""] * 20] * n_ensembles] self.speed_kph = np.zeros([n_ensembles, 20]) - self.kph_indicator = [x[:] for x in [[''] * 20] * n_ensembles] - self.mode_indicator = [x[:] for x in [[''] * 20] * n_ensembles] + self.kph_indicator = [x[:] for x in [[""] * 20] * n_ensembles] + self.mode_indicator = [x[:] for x in [[""] * 20] * n_ensembles] self.dbt_delta_time = np.full([n_ensembles, 20], np.nan) - self.dbt_header = [x[:] for x in [[''] * 20] * n_ensembles] + self.dbt_header = [x[:] for x in [[""] * 20] * n_ensembles] self.depth_ft = np.full([n_ensembles, 20], np.nan) - self.ft_indicator = [x[:] for x in [[''] * 20] * n_ensembles] + self.ft_indicator = [x[:] for x in [[""] * 20] * n_ensembles] self.depth_m = np.zeros([n_ensembles, 20]) - self.m_indicator = [x[:] for x in [[''] * 20] * n_ensembles] + self.m_indicator = [x[:] for x in [[""] * 20] * n_ensembles] self.depth_fath = np.full([n_ensembles, 20], np.nan) - self.fath_indicator = [x[:] for x in [[''] * 20] * n_ensembles] + self.fath_indicator = [x[:] for x in [[""] * 20] * n_ensembles] self.hdt_delta_time = np.full([n_ensembles, 20], np.nan) - self.hdt_header = [x[:] for x in [[''] * 20] * n_ensembles] + self.hdt_header = [x[:] for x in [[""] * 20] * n_ensembles] self.heading_deg = np.full([n_ensembles, 20], np.nan) - self.h_true_indicator = [x[:] for x in [[''] * 20] * n_ensembles] + self.h_true_indicator = [x[:] for x in [[""] * 20] * n_ensembles] # if wr2: self.gga_velE_mps = nans(n_ensembles) @@ -2155,76 +2859,98 @@ class Gps2(object): def gga_expand(self, n_ensembles): self.gga_delta_time = np.concatenate( - (self.gga_delta_time, np.tile(np.nan, (1, n_ensembles)).T), axis=1) + (self.gga_delta_time, np.tile(np.nan, (1, n_ensembles)).T), axis=1 + ) self.utc = np.concatenate( - (self.utc, np.tile(np.nan, (1, n_ensembles)).T), axis=1) + (self.utc, np.tile(np.nan, (1, n_ensembles)).T), axis=1 + ) self.lat_deg = np.concatenate( - (self.lat_deg, np.tile(np.nan, (1, n_ensembles)).T), axis=1) + (self.lat_deg, np.tile(np.nan, (1, n_ensembles)).T), axis=1 + ) self.lon_deg = np.concatenate( - (self.lon_deg, np.tile(np.nan, (1, n_ensembles)).T), axis=1) + (self.lon_deg, np.tile(np.nan, (1, n_ensembles)).T), axis=1 + ) self.corr_qual = np.concatenate( - (self.corr_qual, np.tile(np.nan, (1, n_ensembles)).T), axis=1) + (self.corr_qual, np.tile(np.nan, (1, n_ensembles)).T), axis=1 + ) self.num_sats = np.concatenate( - (self.num_sats, np.tile(np.nan, (1, n_ensembles)).T), axis=1) + (self.num_sats, np.tile(np.nan, (1, n_ensembles)).T), axis=1 + ) self.hdop = np.concatenate( - (self.hdop, np.tile(np.nan, (1, n_ensembles)).T), axis=1) + (self.hdop, np.tile(np.nan, (1, n_ensembles)).T), axis=1 + ) self.alt = np.concatenate( - (self.alt, np.tile(np.nan, (1, n_ensembles)).T), axis=1) + (self.alt, np.tile(np.nan, (1, n_ensembles)).T), axis=1 + ) self.geoid = np.concatenate( - (self.geoid, np.tile(np.nan, (1, n_ensembles)).T), axis=1) + (self.geoid, np.tile(np.nan, (1, n_ensembles)).T), axis=1 + ) self.d_gps_age = np.concatenate( - (self.d_gps_age, np.tile(np.nan, (1, n_ensembles)).T), axis=1) + (self.d_gps_age, np.tile(np.nan, (1, n_ensembles)).T), axis=1 + ) self.ref_stat_id = np.concatenate( - (self.ref_stat_id, np.tile(np.nan, (1, n_ensembles)).T), axis=1) + (self.ref_stat_id, np.tile(np.nan, (1, n_ensembles)).T), axis=1 + ) for ens in range(n_ensembles): - self.gga_header[ens].append('') - self.geoid_unit[ens].append('') - self.alt_unit[ens].append('') - self.lon_ref[ens].append('') - self.lat_ref[ens].append('') + self.gga_header[ens].append("") + self.geoid_unit[ens].append("") + self.alt_unit[ens].append("") + self.lon_ref[ens].append("") + self.lat_ref[ens].append("") def vtg_expand(self, n_ensembles): self.vtg_delta_time = np.concatenate( - (self.vtg_delta_time, np.tile(np.nan, (1, n_ensembles)).T), axis=1) + (self.vtg_delta_time, np.tile(np.nan, (1, n_ensembles)).T), axis=1 + ) self.course_true = np.concatenate( - (self.course_true, np.tile(np.nan, (1, n_ensembles)).T), axis=1) + (self.course_true, np.tile(np.nan, (1, n_ensembles)).T), axis=1 + ) self.course_mag = np.concatenate( - (self.course_mag, np.tile(np.nan, (1, n_ensembles)).T), axis=1) + (self.course_mag, np.tile(np.nan, (1, n_ensembles)).T), axis=1 + ) self.speed_knots = np.concatenate( - (self.speed_knots, np.tile(np.nan, (1, n_ensembles)).T), axis=1) + (self.speed_knots, np.tile(np.nan, (1, n_ensembles)).T), axis=1 + ) self.speed_kph = np.concatenate( - (self.speed_kph, np.tile(np.nan, (1, n_ensembles)).T), axis=1) + (self.speed_kph, np.tile(np.nan, (1, n_ensembles)).T), axis=1 + ) for ens in range(n_ensembles): - self.kph_indicator[ens].append('') - self.mode_indicator[ens].append('') - self.vtg_header[ens].append('') - self.true_indicator[ens].append('') - self.mag_indicator[ens].append('') - self.knots_indicator[ens].append('') + self.kph_indicator[ens].append("") + self.mode_indicator[ens].append("") + self.vtg_header[ens].append("") + self.true_indicator[ens].append("") + self.mag_indicator[ens].append("") + self.knots_indicator[ens].append("") def dbt_expand(self, n_ensembles): self.dbt_delta_time = np.concatenate( - (self.dbt_delta_time, np.tile(np.nan, (1, n_ensembles)).T), axis=1) + (self.dbt_delta_time, np.tile(np.nan, (1, n_ensembles)).T), axis=1 + ) self.depth_ft = np.concatenate( - (self.depth_ft, np.tile(np.nan, (1, n_ensembles)).T), axis=1) + (self.depth_ft, np.tile(np.nan, (1, n_ensembles)).T), axis=1 + ) self.depth_m = np.concatenate( - (self.depth_m, np.tile(np.nan, (1, n_ensembles)).T), axis=1) + (self.depth_m, np.tile(np.nan, (1, n_ensembles)).T), axis=1 + ) self.depth_fath = np.concatenate( - (self.depth_fath, np.tile(np.nan, (1, n_ensembles)).T), axis=1) + (self.depth_fath, np.tile(np.nan, (1, n_ensembles)).T), axis=1 + ) for ens in range(n_ensembles): - self.fath_indicator[ens].append('') - self.dbt_header[ens].append('') - self.ft_indicator[ens].append('') - self.m_indicator[ens].append('') + self.fath_indicator[ens].append("") + self.dbt_header[ens].append("") + self.ft_indicator[ens].append("") + self.m_indicator[ens].append("") def hdt_expand(self, n_ensembles): self.hdt_delta_time = np.concatenate( - (self.hdt_delta_time, np.tile(np.nan, (1, n_ensembles)).T), axis=1) + (self.hdt_delta_time, np.tile(np.nan, (1, n_ensembles)).T), axis=1 + ) self.heading_deg = np.concatenate( - (self.heading_deg, np.tile(np.nan, (1, n_ensembles)).T), axis=1) + (self.heading_deg, np.tile(np.nan, (1, n_ensembles)).T), axis=1 + ) for ens in range(n_ensembles): - self.h_true_indicator[ens].append('') - self.hdt_header[ens].append('') + self.h_true_indicator[ens].append("") + self.hdt_header[ens].append("") class Nmea(object): @@ -2250,11 +2976,11 @@ class Nmea(object): n_ensembles: int Number of ensembles """ - self.gga = ['']*n_ensembles - self.gsa = ['']*n_ensembles - self.vtg = ['']*n_ensembles + self.gga = [""] * n_ensembles + self.gsa = [""] * n_ensembles + self.vtg = [""] * n_ensembles # self.raw = ['']*n_ensembles DSM: not sure this was used - self.dbt = ['']*n_ensembles + self.dbt = [""] * n_ensembles class Sensor(object): @@ -2355,14 +3081,14 @@ class Sensor(object): self.date = nans([n_ensembles, 3]) self.date_y2k = nans([n_ensembles, 4]) self.date_not_y2k = nans([n_ensembles, 3]) - self.error_status_word = [''] * n_ensembles + self.error_status_word = [""] * n_ensembles self.heading_deg = nans(n_ensembles) self.heading_std_dev_deg = nans(n_ensembles) self.mpt_msc = nans([n_ensembles, 3]) self.num = nans(n_ensembles) self.num_fact = nans(n_ensembles) self.num_tot = nans(n_ensembles) - self.orient = [''] * n_ensembles + self.orient = [""] * n_ensembles self.pitch_std_dev_deg = nans(n_ensembles) self.pitch_deg = nans(n_ensembles) self.pressure_neg = nans(n_ensembles) @@ -2382,7 +3108,7 @@ class Sensor(object): self.vert_beam_eval_amp = nans(n_ensembles) self.vert_beam_RSSI_amp = nans(n_ensembles) self.vert_beam_range_m = nans(n_ensembles) - self.vert_beam_gain = [''] * n_ensembles + self.vert_beam_gain = [""] * n_ensembles self.vert_beam_status = np.zeros(n_ensembles) @@ -2404,7 +3130,8 @@ class Surface(object): pergd: np.array(int) 3D array of percent good for each beam, cell, and ensemble rssi: np.array(int) - 3D array of return signal strength indicator for each beam, cell, and ensemble + 3D array of return signal strength indicator for each beam, cell, + and ensemble """ def __init__(self, n_ensembles, n_velocities, max_surface_bins): @@ -2441,7 +3168,8 @@ class Wt(object): pergd: np.array(int) 3D array of percent good for each beam, cell, and ensemble rssi: np.array(int) - 3D array of return signal strength indicator for each beam, cell, and ensemble + 3D array of return signal strength indicator for each beam, cell, + and ensemble """ def __init__(self, n_bins, n_ensembles, n_velocities): diff --git a/Classes/Pd0TRDI_2.py b/Classes/Pd0TRDI_2.py index 6a20448..ff820f4 100644 --- a/Classes/Pd0TRDI_2.py +++ b/Classes/Pd0TRDI_2.py @@ -59,49 +59,53 @@ class Pd0TRDI(object): self.Nmea = None self.data_decoders = { - 0x0000: ('fixed_leader', self.decode_fixed_leader), - 0x0080: ('variable_leader', self.decode_variable_leader), - 0x0100: ('velocity', self.decode_velocity), - 0x0200: ('correlation', self.decode_correlation), - 0x0300: ('echo_intensity', self.decode_echo_intensity), - 0x0400: ('percent_good', self.decode_percent_good), - 0x0500: ('status', self.decode_status), - 0x0600: ('bottom_track', self.decode_bottom_track), - 0x2022: ('nmea', self.decode_nmea), - 0x2100: ('dbt_sentence', self.decode_dbt_sentence), - 0x2101: ('gga_sentence', self.decode_gga_sentence), - 0x2102: ('vtg_sentence', self.decode_vtg_sentence), - 0x2103: ('gsa_sentence', self.decode_gsa_sentence), - 0x0010: ('surface_leader', self.decode_surface_leader), - 0x0110: ('surface_velocity', self.decode_surface_velocity), - 0x0210: ('surface_correlation', self.decode_surface_correlation), - 0x0310: ('surface_intensity', self.decode_surface_intensity), - 0x0410: ('surface_percent_good', self.decode_surface_percent_good), - 0x0510: ('surface_status', self.decode_surface_status), - 0x4401: ('auto_configuration', self.decode_auto_config), - 0x4100: ('vertical_beam', self.decode_vertical_beam), - 0x3200: ('transformation_matrix', self.decode_transformation_matrix) + 0x0000: ("fixed_leader", self.decode_fixed_leader), + 0x0080: ("variable_leader", self.decode_variable_leader), + 0x0100: ("velocity", self.decode_velocity), + 0x0200: ("correlation", self.decode_correlation), + 0x0300: ("echo_intensity", self.decode_echo_intensity), + 0x0400: ("percent_good", self.decode_percent_good), + 0x0500: ("status", self.decode_status), + 0x0600: ("bottom_track", self.decode_bottom_track), + 0x2022: ("nmea", self.decode_nmea), + 0x2100: ("dbt_sentence", self.decode_dbt_sentence), + 0x2101: ("gga_sentence", self.decode_gga_sentence), + 0x2102: ("vtg_sentence", self.decode_vtg_sentence), + 0x2103: ("gsa_sentence", self.decode_gsa_sentence), + 0x0010: ("surface_leader", self.decode_surface_leader), + 0x0110: ("surface_velocity", self.decode_surface_velocity), + 0x0210: ("surface_correlation", self.decode_surface_correlation), + 0x0310: ("surface_intensity", self.decode_surface_intensity), + 0x0410: ("surface_percent_good", self.decode_surface_percent_good), + 0x0510: ("surface_status", self.decode_surface_status), + 0x4401: ("auto_configuration", self.decode_auto_config), + 0x4100: ("vertical_beam", self.decode_vertical_beam), + 0x3200: ("transformation_matrix", self.decode_transformation_matrix), } - self.nmea_decoders = {100: ('gga', self.decode_gga_100), - 101: ('vtg', self.decode_vtg_101), - 102: ('ds', self.decode_ds_102), - 103: ('ext_heading', self.decode_ext_heading_103), - 104: ('gga', self.decode_gga_104), - 105: ('vtg', self.decode_vtg_105), - 106: ('ds', self.decode_ds_106), - 107: ('ext_heading', self.decode_ext_heading_107), - 204: ('gga', self.decode_gga_204), - 205: ('vtg', self.decode_vtg_205), - 206: ('ds', self.decode_ds_206), - 207: ('ext_heading', self.decode_ext_heading_207)} + self.nmea_decoders = { + 100: ("gga", self.decode_gga_100), + 101: ("vtg", self.decode_vtg_101), + 102: ("ds", self.decode_ds_102), + 103: ("ext_heading", self.decode_ext_heading_103), + 104: ("gga", self.decode_gga_104), + 105: ("vtg", self.decode_vtg_105), + 106: ("ds", self.decode_ds_106), + 107: ("ext_heading", self.decode_ext_heading_107), + 204: ("gga", self.decode_gga_204), + 205: ("vtg", self.decode_vtg_205), + 206: ("ds", self.decode_ds_206), + 207: ("ext_heading", self.decode_ext_heading_207), + } self.n_velocities = 4 self.max_surface_bins = 5 self.pd0_read(file_name) - def create_objects(self, n_ensembles, n_types, n_bins, max_surface_bins, n_velocities, wr2=False): + def create_objects( + self, n_ensembles, n_types, n_bins, max_surface_bins, n_velocities, wr2=False + ): """Create objects for instance variables. Parameters @@ -133,7 +137,8 @@ class Pd0TRDI(object): self.Nmea = Nmea(n_ensembles) def pd0_read(self, fullname, wr2=False): - """Reads the binary pd0 file and assigns values to object instance variables. + """Reads the binary pd0 file and assigns values to object instance + variables. Parameters ---------- @@ -149,13 +154,22 @@ class Pd0TRDI(object): if file_info > 0: # Open file for processing - with open(fullname, 'rb') as f: + with open(fullname, "rb") as f: pd0 = f.read() pd0_bytes = bytearray(pd0) # Intialize classes and arrays - n_ensembles, max_types, max_beams, max_bins = self.number_of_ensembles(self, file_info, pd0_bytes) - self.create_objects(n_ensembles, max_types, max_bins, self.max_surface_bins, self.n_velocities, wr2) + n_ensembles, max_types, max_beams, max_bins = self.number_of_ensembles( + self, file_info, pd0_bytes + ) + self.create_objects( + n_ensembles, + max_types, + max_bins, + self.max_surface_bins, + self.n_velocities, + wr2, + ) self.decode_all(pd0_bytes, file_info) self.screen_and_convert(wr2) @@ -188,7 +202,7 @@ class Pd0TRDI(object): if wr2: # If vtg data are available compute north and east components - if self.Gps2.vtg_header[0, 0] == '$': + if self.Gps2.vtg_header[0, 0] == "$": # Find minimum of absolute value of delta time from raw data vtg_delta_time = np.abs(self.Gps2.vtg_delta_time) @@ -197,11 +211,12 @@ class Pd0TRDI(object): # Compute the velocity components in m/s for i in range(len(vtg_delta_time)): idx = np.where(vtg_delta_time == vtg_min)[0][0] - self.Gps2.vtg_velE_mps[i], self.Gps2.vtg_velN_mps[i] = \ - pol2cart((90 - self.Gps2.course_true[i, idx]) * np.pi / 180, - self.Gps2.speed_kph[i, idx] * 0.2777778) + self.Gps2.vtg_velE_mps[i], self.Gps2.vtg_velN_mps[i] = pol2cart( + (90 - self.Gps2.course_true[i, idx]) * np.pi / 180, + self.Gps2.speed_kph[i, idx] * 0.2777778, + ) - if self.Gps2.gga_header[0, 0] == '$': + if self.Gps2.gga_header[0, 0] == "$": # Initialize constants e_radius = 6378137 @@ -216,14 +231,29 @@ class Pd0TRDI(object): for i in range(len(gga_delta_time)): idx = np.where(gga_delta_time[i:] == gga_min) if idx > 0: - lat_avg_rad = (self.Gps2.lat_deg[i, idx[i]] - + self.Gps2.lat_deg[i - 1, idx[i - 1]]) / 2 + lat_avg_rad = ( + self.Gps2.lat_deg[i, idx[i]] + + self.Gps2.lat_deg[i - 1, idx[i - 1]] + ) / 2 sin_lat_avg_rad = np.sin(np.deg2rad(lat_avg_rad)) r_e = coeff * (1 + ellip * sin_lat_avg_rad * sin_lat_avg_rad) - rn = coeff * (1 - 2 * ellip + 3 * ellip * sin_lat_avg_rad * sin_lat_avg_rad) - dx = r_e * (self.Gps2.lon_deg[i, idx[i]] - - self.Gps2.lon_deg(i - 1, idx[i - 1])) * np.cos(np.deg2rad(lat_avg_rad)) - dy = rn * (self.Gps2.lat_deg[i, idx[i]] - self.Gps2.lat_deg[i - 1, idx[i - 1]]) + rn = coeff * ( + 1 + - 2 * ellip + + 3 * ellip * sin_lat_avg_rad * sin_lat_avg_rad + ) + dx = ( + r_e + * ( + self.Gps2.lon_deg[i, idx[i]] + - self.Gps2.lon_deg(i - 1, idx[i - 1]) + ) + * np.cos(np.deg2rad(lat_avg_rad)) + ) + dy = rn * ( + self.Gps2.lat_deg[i, idx[i]] + - self.Gps2.lat_deg[i - 1, idx[i - 1]] + ) dt = self.Gps2.utc[i, idx[i]] - self.Gps2.utc[i - 1, idx[i - 1]] self.Gps2.gga_velE_mps[i] = dx / dt self.Gps2.gga_velN_mps[i] = dy / dt @@ -238,10 +268,10 @@ class Pd0TRDI(object): ensemble_number = 0 while start_byte < file_info: data = self.decode_pd0_bytearray(self.data_decoders, pd0_bytes[start_byte:]) - if data['checksum']: + if data["checksum"]: # Adjust index for lost ensembles if ensemble_number > 0: - n = n + data['variable_leader']['ensemble_number'] - ensemble_number + n = n + data["variable_leader"]["ensemble_number"] - ensemble_number try: self.Hdr.populate_data(n, data) self.Inst.populate_data(n, data) @@ -254,14 +284,13 @@ class Pd0TRDI(object): self.Surface.populate_data(n, data, self) self.AutoMode.populate_data(n, data) self.Nmea.populate_data(n, data) - start_byte = start_byte + data['header']['number_of_bytes'] + 2 - ensemble_number = data['variable_leader']['ensemble_number'] + start_byte = start_byte + data["header"]["number_of_bytes"] + 2 + ensemble_number = data["variable_leader"]["ensemble_number"] except ValueError: start_byte = Pd0TRDI.find_next(pd0_bytes, start_byte, file_info) else: start_byte = Pd0TRDI.find_next(pd0_bytes, start_byte, file_info) - @staticmethod def number_of_ensembles(self, file_info, pd0_bytes): """Determines the number of ensembles in the data file. @@ -288,8 +317,10 @@ class Pd0TRDI(object): """ # Configure data decoders to be used - data_decoders = {0x0000: ('fixed_leader', self.preload_fixed_leader), - 0x0080: ('variable_leader', self.preload_variable_leader)} + data_decoders = { + 0x0000: ("fixed_leader", self.preload_fixed_leader), + 0x0080: ("variable_leader", self.preload_variable_leader), + } # Intitialize variables start_byte = 0 @@ -303,20 +334,22 @@ class Pd0TRDI(object): data = self.decode_pd0_bytearray(data_decoders, pd0_bytes[start_byte:]) # start_byte = start_byte + data['header']['number_of_bytes'] + 2 - if data['checksum']: - # if 'number_of_bytes' in data['header'] and data['header']['number_of_bytes'] > 0: - if 'number_of_bytes' in data['header'] and 'fixed_leader' in data and 'variable_leader' in data: - n_data_types.append(data['header']['number_of_data_types']) - n_beams.append(data['fixed_leader']['number_of_beams']) - n_bins.append(data['fixed_leader']['number_of_cells']) - ens_num.append(data['variable_leader']['ensemble_number']) - start_byte = start_byte + data['header']['number_of_bytes'] + 2 + if data["checksum"]: + if ( + "number_of_bytes" in data["header"] + and "fixed_leader" in data + and "variable_leader" in data + ): + n_data_types.append(data["header"]["number_of_data_types"]) + n_beams.append(data["fixed_leader"]["number_of_beams"]) + n_bins.append(data["fixed_leader"]["number_of_cells"]) + ens_num.append(data["variable_leader"]["ensemble_number"]) + start_byte = start_byte + data["header"]["number_of_bytes"] + 2 else: start_byte = Pd0TRDI.find_next(pd0_bytes, start_byte, file_info) else: start_byte = Pd0TRDI.find_next(pd0_bytes, start_byte, file_info) - # Compute maximums max_data_types = np.nanmax(n_data_types) max_beams = np.nanmax(n_beams) @@ -326,12 +359,11 @@ class Pd0TRDI(object): return n_ensembles, max_data_types, max_beams, max_bins @staticmethod - def find_next (pd0_bytes, start_byte, file_info): + def find_next(pd0_bytes, start_byte, file_info): try: start_byte = start_byte + 1 - skip_forward = pd0_bytes[start_byte:].index(b'\x7f\x7f') - # data['header'] = Pd0TRDI.decode_fixed_header(pd0_bytes[start_byte + skip_forward:]) + skip_forward = pd0_bytes[start_byte:].index(b"\x7f\x7f") start_byte = start_byte + skip_forward except ValueError: start_byte = file_info @@ -359,7 +391,7 @@ class Pd0TRDI(object): Number of regular cells in ensemble """ - fixed_leader_format = (('number_of_beams', 'B', 8), ('number_of_cells', 'B', 9)) + fixed_leader_format = (("number_of_beams", "B", 8), ("number_of_cells", "B", 9)) return Pd0TRDI.unpack_bytes(pd0_bytes, fixed_leader_format, offset) @@ -383,13 +415,14 @@ class Pd0TRDI(object): """ # Define format - variable_leader_format = (('ensemble_number', '<H', 2),) + variable_leader_format = (("ensemble_number", "<H", 2),) return Pd0TRDI.unpack_bytes(pd0_bytes, variable_leader_format, offset) @staticmethod def decode_pd0_bytearray(data_decoders, pd0_bytes): - """Loops through data and calls appropriate parsing method for each header ID. + """Loops through data and calls appropriate parsing method for each + header ID. Parameters ---------- @@ -404,42 +437,52 @@ class Pd0TRDI(object): Dictionary of decoded data """ - data = {} + data = {"header": Pd0TRDI.decode_fixed_header(pd0_bytes), "checksum": False} # Read in header - data['header'] = Pd0TRDI.decode_fixed_header(pd0_bytes) - data['checksum'] = False - if 'number_of_bytes' in data['header'] and data['header']['number_of_bytes'] > 0: - if 'number_of_data_types' in data['header']: - # If checksum is OK then decode address offsets to the data types - if Pd0TRDI.validate_checksum(pd0_bytes, data['header']['number_of_bytes']): - data['checksum'] = True - data['header']['address_offsets'] = Pd0TRDI.decode_address_offsets(pd0_bytes, - data['header']['number_of_data_types']) - data['header']['invalid'] = [] - # Loop to decode all data types for which a data decoder is provided - for offset in data['header']['address_offsets']: + if ( + "number_of_bytes" in data["header"] + and data["header"]["number_of_bytes"] > 0 + ): + if "number_of_data_types" in data["header"]: + # If checksum is OK then decode address offsets to the data + # types + if Pd0TRDI.validate_checksum( + pd0_bytes, data["header"]["number_of_bytes"] + ): + data["checksum"] = True + data["header"]["address_offsets"] = Pd0TRDI.decode_address_offsets( + pd0_bytes, data["header"]["number_of_data_types"] + ) + data["header"]["invalid"] = [] + # Loop to decode all data types for which a data + # decoder is provided + for offset in data["header"]["address_offsets"]: if len(pd0_bytes) > offset + 2: - header_id = struct.unpack('<H', pd0_bytes[offset: offset + 2])[0] + header_id = struct.unpack( + "<H", pd0_bytes[offset : offset + 2] + )[0] if header_id in data_decoders: key = data_decoders[header_id][0] decoder = data_decoders[header_id][1] data[key] = decoder(pd0_bytes, offset, data) else: - data['header']['invalid'].append(header_id) + data["header"]["invalid"].append(header_id) return data @staticmethod def unpack_bytes(pd0_bytes, data_format_tuples, offset=0): - """Unpackes the data based on the supplied data format tuples and offset. + """Unpackes the data based on the supplied data format tuples and + offset. Parameters ---------- pd0_bytes: bytearray Bytearray of all pd0 data data_format_tuples: tuple - A tuple of tuples providing the data name, format, and byte location + A tuple of tuples providing the data name, format, and byte + location offset: int Pointer into pd0_bytes @@ -450,14 +493,17 @@ class Pd0TRDI(object): """ data = {} - # Decode data for each format specified in the data format tuples and assign to the data dictionary + # Decode data for each format specified in the data format tuples and + # assign to the data dictionary for fmt in data_format_tuples: try: struct_offset = offset + fmt[2] size = struct.calcsize(fmt[1]) - data[fmt[0]] = struct.unpack(fmt[1], pd0_bytes[struct_offset: struct_offset + size])[0] + data[fmt[0]] = struct.unpack( + fmt[1], pd0_bytes[struct_offset : struct_offset + size] + )[0] except: - print('Error parsing %s with the arguments ') + print("Error parsing %s with the arguments ") return data @@ -480,7 +526,7 @@ class Pd0TRDI(object): """ if len(pd0_bytes) > offset + 1: calc_checksum = sum(pd0_bytes[:offset]) & 0xFFFF - given_checksum = struct.unpack('<H', pd0_bytes[offset: offset + 2])[0] + given_checksum = struct.unpack("<H", pd0_bytes[offset : offset + 2])[0] if calc_checksum == given_checksum: return True @@ -492,9 +538,9 @@ class Pd0TRDI(object): def bin2str(bin_in): try: - str_out = bin_in.decode('utf-8') + str_out = bin_in.decode("utf-8") except: - str_out = '' + str_out = "" return str_out @staticmethod @@ -520,7 +566,7 @@ class Pd0TRDI(object): # Loop through each data type for bytes_start in range(offset, offset + (num_data_types * 2), 2): - data = struct.unpack_from('<H', pd0_bytes[bytes_start: bytes_start + 2])[0] + data = struct.unpack_from("<H", pd0_bytes[bytes_start : bytes_start + 2])[0] address_data.append(data) return address_data @@ -540,11 +586,13 @@ class Pd0TRDI(object): Dictionary of header data """ - header_data_format = (('id', 'B', 0), - ('data_source', 'B', 1), - ('number_of_bytes', '<H', 2), - ('spare', 'B', 4), - ('number_of_data_types', 'B', 5)) + header_data_format = ( + ("id", "B", 0), + ("data_source", "B", 1), + ("number_of_bytes", "<H", 2), + ("spare", "B", 4), + ("number_of_data_types", "B", 5), + ) header = Pd0TRDI.unpack_bytes(pd0_bytes, header_data_format) return header @@ -570,44 +618,44 @@ class Pd0TRDI(object): # Define format fixed_leader_format = ( - ('id', '<H', 0), - ('cpu_firmware_version', 'B', 2), - ('cpu_firmware_revision', 'B', 3), - ('system_configuration_ls', 'B', 4), - ('system_configuration_ms', 'B', 5), - ('simulation_data_flag', 'B', 6), - ('lag_length', 'B', 7), - ('number_of_beams', 'B', 8), - ('number_of_cells', 'B', 9), - ('number_of_water_pings', '<H', 10), - ('depth_cell_size', '<H', 12), - ('blank_after_transmit', '<H', 14), - ('water_mode', 'B', 16), - ('low_correlation_threshold', 'B', 17), - ('number_of_code_repetitions', 'B', 18), - ('minimum_percentage_water_profile_pings', 'B', 19), - ('error_velocity_threshold', '<H', 20), - ('minutes', 'B', 22), - ('seconds', 'B', 23), - ('hundredths', 'B', 24), - ('coordinate_transformation_process', 'B', 25), - ('heading_alignment', '<H', 26), - ('heading_bias', '<H', 28), - ('sensor_source', 'B', 30), - ('sensor_available', 'B', 31), - ('bin_1_distance', '<H', 32), - ('transmit_pulse_length', '<H', 34), - ('starting_depth_cell', 'B', 36), - ('ending_depth_cell', 'B', 37), - ('false_target_threshold', 'B', 38), - ('low_latency_trigger', 'B', 39), - ('transmit_lag_distance', '<H', 40), - ('cpu_board_serial_number', '<Q', 42), - ('system_bandwidth', '<H', 50), - ('system_power', 'B', 52), - ('spare', 'B', 53), - ('serial_number', '<I', 54), - ('beam_angle', 'B', 58) + ("id", "<H", 0), + ("cpu_firmware_version", "B", 2), + ("cpu_firmware_revision", "B", 3), + ("system_configuration_ls", "B", 4), + ("system_configuration_ms", "B", 5), + ("simulation_data_flag", "B", 6), + ("lag_length", "B", 7), + ("number_of_beams", "B", 8), + ("number_of_cells", "B", 9), + ("number_of_water_pings", "<H", 10), + ("depth_cell_size", "<H", 12), + ("blank_after_transmit", "<H", 14), + ("water_mode", "B", 16), + ("low_correlation_threshold", "B", 17), + ("number_of_code_repetitions", "B", 18), + ("minimum_percentage_water_profile_pings", "B", 19), + ("error_velocity_threshold", "<H", 20), + ("minutes", "B", 22), + ("seconds", "B", 23), + ("hundredths", "B", 24), + ("coordinate_transformation_process", "B", 25), + ("heading_alignment", "<H", 26), + ("heading_bias", "<H", 28), + ("sensor_source", "B", 30), + ("sensor_available", "B", 31), + ("bin_1_distance", "<H", 32), + ("transmit_pulse_length", "<H", 34), + ("starting_depth_cell", "B", 36), + ("ending_depth_cell", "B", 37), + ("false_target_threshold", "B", 38), + ("low_latency_trigger", "B", 39), + ("transmit_lag_distance", "<H", 40), + ("cpu_board_serial_number", "<Q", 42), + ("system_bandwidth", "<H", 50), + ("system_power", "B", 52), + ("spare", "B", 53), + ("serial_number", "<I", 54), + ("beam_angle", "B", 58), ) return Pd0TRDI.unpack_bytes(pd0_bytes, fixed_leader_format, offset) @@ -633,60 +681,64 @@ class Pd0TRDI(object): # Define format variable_leader_format = ( - ('id', '<H', 0), - ('ensemble_number', '<H', 2), - ('rtc_year', 'B', 4), - ('rtc_month', 'B', 5), - ('rtc_day', 'B', 6), - ('rtc_hour', 'B', 7), - ('rtc_minutes', 'B', 8), - ('rtc_seconds', 'B', 9), - ('rtc_hundredths', 'B', 10), - ('ensemble_number_msb', 'B', 11), - ('bit_fault', 'B', 12), - ('bit_count', 'B', 13), - ('speed_of_sound', '<H', 14), - ('depth_of_transducer', '<H', 16), - ('heading', '<H', 18), - ('pitch', '<h', 20), - ('roll', '<h', 22), - ('salinity', '<H', 24), - ('temperature', '<h', 26), - ('mpt_minutes', 'B', 28), - ('mpt_seconds', 'B', 29), - ('mpt_hundredths', 'B', 30), - ('heading_standard_deviation', 'B', 31), - ('pitch_standard_deviation', 'B', 32), - ('roll_standard_deviation', 'B', 33), - ('transmit_current', 'B', 34), - ('transmit_voltage', 'B', 35), - ('ambient_temperature', 'B', 36), - ('pressure_positive', 'B', 37), - ('pressure_negative', 'B', 38), - ('attitude_temperature', 'B', 39), - ('attitude', 'B', 40), - ('contamination_sensor', 'B', 41), - ('error_status_word', '<I', 42), - ('reserved', '<H', 46), - ('pressure', '<I', 48), - ('pressure_variance', '<I', 52), - ('spare', 'B', 56), - ('rtc_y2k_century', 'B', 57), - ('rtc_y2k_year', 'B', 58), - ('rtc_y2k_month', 'B', 59), - ('rtc_y2k_day', 'B', 60), - ('rtc_y2k_hour', 'B', 61), - ('rtc_y2k_minutes', 'B', 62), - ('rtc_y2k_seconds', 'B', 63), - ('rtc_y2k_hundredths', 'B', 64), - ('lag_near_bottom', 'B', 65) + ("id", "<H", 0), + ("ensemble_number", "<H", 2), + ("rtc_year", "B", 4), + ("rtc_month", "B", 5), + ("rtc_day", "B", 6), + ("rtc_hour", "B", 7), + ("rtc_minutes", "B", 8), + ("rtc_seconds", "B", 9), + ("rtc_hundredths", "B", 10), + ("ensemble_number_msb", "B", 11), + ("bit_fault", "B", 12), + ("bit_count", "B", 13), + ("speed_of_sound", "<H", 14), + ("depth_of_transducer", "<H", 16), + ("heading", "<H", 18), + ("pitch", "<h", 20), + ("roll", "<h", 22), + ("salinity", "<H", 24), + ("temperature", "<h", 26), + ("mpt_minutes", "B", 28), + ("mpt_seconds", "B", 29), + ("mpt_hundredths", "B", 30), + ("heading_standard_deviation", "B", 31), + ("pitch_standard_deviation", "B", 32), + ("roll_standard_deviation", "B", 33), + ("transmit_current", "B", 34), + ("transmit_voltage", "B", 35), + ("ambient_temperature", "B", 36), + ("pressure_positive", "B", 37), + ("pressure_negative", "B", 38), + ("attitude_temperature", "B", 39), + ("attitude", "B", 40), + ("contamination_sensor", "B", 41), + ("error_status_word", "<I", 42), + ("reserved", "<H", 46), + ("pressure", "<I", 48), + ("pressure_variance", "<I", 52), + ("spare", "B", 56), + ("rtc_y2k_century", "B", 57), + ("rtc_y2k_year", "B", 58), + ("rtc_y2k_month", "B", 59), + ("rtc_y2k_day", "B", 60), + ("rtc_y2k_hour", "B", 61), + ("rtc_y2k_minutes", "B", 62), + ("rtc_y2k_seconds", "B", 63), + ("rtc_y2k_hundredths", "B", 64), + ("lag_near_bottom", "B", 65), ) return Pd0TRDI.unpack_bytes(pd0_bytes, variable_leader_format, offset) - def decode_per_cell_per_beam(pd0_bytes, offset, number_of_cells, number_of_beams, struct_format): + @staticmethod + def decode_per_cell_per_beam( + pd0_bytes, offset, number_of_cells, number_of_beams, struct_format + ): """Parses fields that are stored in serial cells and beams structures. - Returns an array of cell readings where each reading is an array containing the value at that beam. + Returns an array of cell readings where each reading is an array + containing the value at that beam. Parameters ---------- @@ -696,7 +748,7 @@ class Pd0TRDI(object): Pointer into pd0_bytes number_of_cells: int Number of cells in data - number of beams: int + number_of_beams: int Number of beams in data struct_format: str A string identifying the type of data to decode @@ -716,7 +768,7 @@ class Pd0TRDI(object): # Loop through beams in each cell for field in range(0, number_of_beams): field_start = cell_start + field * data_size - data_bytes = pd0_bytes[field_start: field_start + data_size] + data_bytes = pd0_bytes[field_start : field_start + data_size] field_data = struct.unpack(struct_format, data_bytes)[0] cell_data.append(field_data) data.append(cell_data) @@ -743,18 +795,20 @@ class Pd0TRDI(object): """ # Define format - velocity_format = (('id', '<h', 0),) + velocity_format = (("id", "<h", 0),) # Unpack data velocity_data = Pd0TRDI.unpack_bytes(pd0_bytes, velocity_format, offset) # Move past id field offset += 2 # Arrange data in list of depth cells and beams or velocity components - velocity_data['data'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, - offset, - data['fixed_leader']['number_of_cells'], - data['fixed_leader']['number_of_beams'], - '<h') + velocity_data["data"] = Pd0TRDI.decode_per_cell_per_beam( + pd0_bytes, + offset, + data["fixed_leader"]["number_of_cells"], + data["fixed_leader"]["number_of_beams"], + "<h", + ) return velocity_data @@ -777,17 +831,19 @@ class Pd0TRDI(object): Dictionary of decoded data """ - correlation_format = (('id', '<H', 0),) + correlation_format = (("id", "<H", 0),) # Unpack data correlation_data = Pd0TRDI.unpack_bytes(pd0_bytes, correlation_format, offset) # Move past id field offset += 2 # Arrange data in list of depth cells and beams - correlation_data['data'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, - offset, - data['fixed_leader']['number_of_cells'], - data['fixed_leader']['number_of_beams'], - 'B') + correlation_data["data"] = Pd0TRDI.decode_per_cell_per_beam( + pd0_bytes, + offset, + data["fixed_leader"]["number_of_cells"], + data["fixed_leader"]["number_of_beams"], + "B", + ) return correlation_data @@ -810,17 +866,21 @@ class Pd0TRDI(object): Dictionary of decoded data """ - echo_intensity_format = (('id', '<H', 0),) + echo_intensity_format = (("id", "<H", 0),) # Unpack data - echo_intensity_data = Pd0TRDI.unpack_bytes(pd0_bytes, echo_intensity_format, offset) + echo_intensity_data = Pd0TRDI.unpack_bytes( + pd0_bytes, echo_intensity_format, offset + ) # Move past id field offset += 2 # Arrange data in list of depth cells and beams - echo_intensity_data['data'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, - offset, - data['fixed_leader']['number_of_cells'], - data['fixed_leader']['number_of_beams'], - 'B') + echo_intensity_data["data"] = Pd0TRDI.decode_per_cell_per_beam( + pd0_bytes, + offset, + data["fixed_leader"]["number_of_cells"], + data["fixed_leader"]["number_of_beams"], + "B", + ) return echo_intensity_data @@ -843,17 +903,19 @@ class Pd0TRDI(object): Dictionary of decoded data """ - percent_good_format = (('id', '<H', 0),) + percent_good_format = (("id", "<H", 0),) # Unpack data percent_good_data = Pd0TRDI.unpack_bytes(pd0_bytes, percent_good_format, offset) # Move past id field offset += 2 # Arrange data in list of depth cells and beams - percent_good_data['data'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, - offset, - data['fixed_leader']['number_of_cells'], - data['fixed_leader']['number_of_beams'], - 'B') + percent_good_data["data"] = Pd0TRDI.decode_per_cell_per_beam( + pd0_bytes, + offset, + data["fixed_leader"]["number_of_cells"], + data["fixed_leader"]["number_of_beams"], + "B", + ) return percent_good_data @@ -876,17 +938,19 @@ class Pd0TRDI(object): Dictionary of decoded data """ - status_format = (('id', '<H', 0),) + status_format = (("id", "<H", 0),) # Unpack data status_data = Pd0TRDI.unpack_bytes(pd0_bytes, status_format, offset) # Move past id field offset += 2 # Arrange data in list of depth cells and beams - status_data['data'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, - offset, - data['fixed_leader']['number_of_cells'], - data['fixed_leader']['number_of_beams'], - 'B') + status_data["data"] = Pd0TRDI.decode_per_cell_per_beam( + pd0_bytes, + offset, + data["fixed_leader"]["number_of_cells"], + data["fixed_leader"]["number_of_beams"], + "B", + ) return status_data @@ -908,23 +972,39 @@ class Pd0TRDI(object): bottom_track_data:dict Dictionary of decoded data """ - bottom_track_format = (('id', '<H', 0), - ('pings_per_ensemble_bp', '<H', 2), - ('delay_before_reaquire', '<H', 4), - ('correlation_magnitude_minimum_bc', 'B', 6), - ('evaluation_amplitude_minimum_ba', 'B', 7), - ('percent_good_minimum_bg', 'B', 8), - ('bottom_track_mode_bm', 'B', 9), - ('error_velocity_maximum_be', '<H', 10)) + bottom_track_format = ( + ("id", "<H", 0), + ("pings_per_ensemble_bp", "<H", 2), + ("delay_before_reaquire", "<H", 4), + ("correlation_magnitude_minimum_bc", "B", 6), + ("evaluation_amplitude_minimum_ba", "B", 7), + ("percent_good_minimum_bg", "B", 8), + ("bottom_track_mode_bm", "B", 9), + ("error_velocity_maximum_be", "<H", 10), + ) bottom_track_data = Pd0TRDI.unpack_bytes(pd0_bytes, bottom_track_format, offset) - bottom_track_data['range_lsb'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 16, 1, 4, '<H') - bottom_track_data['velocity'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 24, 1, 4, '<h') - bottom_track_data['correlation'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 32, 1, 4, 'B') - bottom_track_data['amplitude'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 36, 1, 4, 'B') - bottom_track_data['percent_good'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 40, 1, 4, 'B') - bottom_track_data['rssi'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 72, 1, 4, 'B') - bottom_track_data['range_msb'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 77, 1, 4, 'B') + bottom_track_data["range_lsb"] = Pd0TRDI.decode_per_cell_per_beam( + pd0_bytes, offset + 16, 1, 4, "<H" + ) + bottom_track_data["velocity"] = Pd0TRDI.decode_per_cell_per_beam( + pd0_bytes, offset + 24, 1, 4, "<h" + ) + bottom_track_data["correlation"] = Pd0TRDI.decode_per_cell_per_beam( + pd0_bytes, offset + 32, 1, 4, "B" + ) + bottom_track_data["amplitude"] = Pd0TRDI.decode_per_cell_per_beam( + pd0_bytes, offset + 36, 1, 4, "B" + ) + bottom_track_data["percent_good"] = Pd0TRDI.decode_per_cell_per_beam( + pd0_bytes, offset + 40, 1, 4, "B" + ) + bottom_track_data["rssi"] = Pd0TRDI.decode_per_cell_per_beam( + pd0_bytes, offset + 72, 1, 4, "B" + ) + bottom_track_data["range_msb"] = Pd0TRDI.decode_per_cell_per_beam( + pd0_bytes, offset + 77, 1, 4, "B" + ) return bottom_track_data @@ -945,15 +1025,17 @@ class Pd0TRDI(object): nmea_data:dict Dictionary of decoded data """ - nmea_leader_format = (('id', '<H', 0), - ('msg_id', '<H', 2), - ('msg_size', '<H', 4), - ('delta_time', 'd', 6)) + nmea_leader_format = ( + ("id", "<H", 0), + ("msg_id", "<H", 2), + ("msg_size", "<H", 4), + ("delta_time", "d", 6), + ) nmea_data = Pd0TRDI.unpack_bytes(pd0_bytes, nmea_leader_format, offset) - if nmea_data['msg_id'] in self.nmea_decoders: - key = self.nmea_decoders[nmea_data['msg_id']][0] - decoder = self.nmea_decoders[nmea_data['msg_id']][1] + if nmea_data["msg_id"] in self.nmea_decoders: + key = self.nmea_decoders[nmea_data["msg_id"]][0] + decoder = self.nmea_decoders[nmea_data["msg_id"]][1] if key in data: data[key].append(decoder(pd0_bytes, offset + 14, nmea_data)) else: @@ -980,34 +1062,38 @@ class Pd0TRDI(object): """ # Define format - format = (('header', '10s', 0), - ('utc', '10s', 10), - ('lat_deg', 'd', 20), - ('lat_ref', 'c', 28), - ('lon_deg', 'd', 29), - ('lon_ref', 'c', 37), - ('corr_qual', 'B', 38), - ('num_sats', 'B', 39), - ('hdop', 'f', 40), - ('alt', 'f', 44), - ('alt_unit', 'c', 48), - ('geoid', 'f', 49), - ('geoid_unit', 'c', 53), - ('d_gps_age', 'f', 54), - ('ref_stat_id', '<H', 58)) + fmt = ( + ("header", "10s", 0), + ("utc", "10s", 10), + ("lat_deg", "d", 20), + ("lat_ref", "c", 28), + ("lon_deg", "d", 29), + ("lon_ref", "c", 37), + ("corr_qual", "B", 38), + ("num_sats", "B", 39), + ("hdop", "f", 40), + ("alt", "f", 44), + ("alt_unit", "c", 48), + ("geoid", "f", 49), + ("geoid_unit", "c", 53), + ("d_gps_age", "f", 54), + ("ref_stat_id", "<H", 58), + ) # Decode data - decoded_data = Pd0TRDI.unpack_bytes(pd0_bytes, format, offset) - decoded_data['header'] = Pd0TRDI.bin2str(decoded_data['header']).rstrip('\x00') + decoded_data = Pd0TRDI.unpack_bytes(pd0_bytes, fmt, offset) + decoded_data["header"] = Pd0TRDI.bin2str(decoded_data["header"]).rstrip("\x00") try: - decoded_data['utc'] = float(re.findall(b'^\d+\.\d+|\d+', decoded_data['utc'])[0]) + decoded_data["utc"] = float( + re.findall(b"^\d+\.\d+|\d+", decoded_data["utc"])[0] + ) except BaseException: - decoded_data['utc'] = np.nan - decoded_data['lat_ref'] = Pd0TRDI.bin2str(decoded_data['lat_ref']) - decoded_data['lon_ref'] = Pd0TRDI.bin2str(decoded_data['lon_ref']) - decoded_data['geoid_unit'] = Pd0TRDI.bin2str(decoded_data['geoid_unit']) - decoded_data['alt_unit'] = Pd0TRDI.bin2str(decoded_data['alt_unit']) - decoded_data['delta_time'] = data['delta_time'] + decoded_data["utc"] = np.nan + decoded_data["lat_ref"] = Pd0TRDI.bin2str(decoded_data["lat_ref"]) + decoded_data["lon_ref"] = Pd0TRDI.bin2str(decoded_data["lon_ref"]) + decoded_data["geoid_unit"] = Pd0TRDI.bin2str(decoded_data["geoid_unit"]) + decoded_data["alt_unit"] = Pd0TRDI.bin2str(decoded_data["alt_unit"]) + decoded_data["delta_time"] = data["delta_time"] return decoded_data @@ -1031,25 +1117,29 @@ class Pd0TRDI(object): """ # Define format - format = (('header', '10s', 0), - ('course_true', 'f', 10), - ('true_indicator', 'c', 14), - ('course_mag', 'f', 15), - ('mag_indicator', 'c', 19), - ('speed_knots', 'f', 20), - ('knots_indicator', 'c', 24), - ('speed_kph', 'f', 25), - ('kph_indicator', 'c', 29), - ('mode_indicator', 'c', 30)) + fmt = ( + ("header", "10s", 0), + ("course_true", "f", 10), + ("true_indicator", "c", 14), + ("course_mag", "f", 15), + ("mag_indicator", "c", 19), + ("speed_knots", "f", 20), + ("knots_indicator", "c", 24), + ("speed_kph", "f", 25), + ("kph_indicator", "c", 29), + ("mode_indicator", "c", 30), + ) # Decode data - decoded_data = Pd0TRDI.unpack_bytes(pd0_bytes, format, offset) - decoded_data['header'] = Pd0TRDI.bin2str(decoded_data['header']).rstrip('\x00') - decoded_data['true_indicator'] = Pd0TRDI.bin2str(decoded_data['true_indicator']) - decoded_data['mag_indicator'] = Pd0TRDI.bin2str(decoded_data['mag_indicator']) - decoded_data['knots_indicator'] = Pd0TRDI.bin2str(decoded_data['knots_indicator']) - decoded_data['kph_indicator'] = Pd0TRDI.bin2str(decoded_data['kph_indicator']) - decoded_data['delta_time'] = data['delta_time'] + decoded_data = Pd0TRDI.unpack_bytes(pd0_bytes, fmt, offset) + decoded_data["header"] = Pd0TRDI.bin2str(decoded_data["header"]).rstrip("\x00") + decoded_data["true_indicator"] = Pd0TRDI.bin2str(decoded_data["true_indicator"]) + decoded_data["mag_indicator"] = Pd0TRDI.bin2str(decoded_data["mag_indicator"]) + decoded_data["knots_indicator"] = Pd0TRDI.bin2str( + decoded_data["knots_indicator"] + ) + decoded_data["kph_indicator"] = Pd0TRDI.bin2str(decoded_data["kph_indicator"]) + decoded_data["delta_time"] = data["delta_time"] return decoded_data @@ -1073,21 +1163,23 @@ class Pd0TRDI(object): """ # Define format - format = (('header', '10s', 0), - ('depth_ft', 'f', 10), - ('ft_indicator', 'c', 14), - ('depth_m', 'f', 15), - ('m_indicator', 'c', 19), - ('depth_fath', 'f', 20), - ('fath_indicator', 'c', 24)) + fmt = ( + ("header", "10s", 0), + ("depth_ft", "f", 10), + ("ft_indicator", "c", 14), + ("depth_m", "f", 15), + ("m_indicator", "c", 19), + ("depth_fath", "f", 20), + ("fath_indicator", "c", 24), + ) # Decode data - decoded_data = Pd0TRDI.unpack_bytes(pd0_bytes, format, offset) - decoded_data['header'] = Pd0TRDI.bin2str(decoded_data['header']).rstrip('\x00') - decoded_data['ft_indicator'] = Pd0TRDI.bin2str(decoded_data['ft_indicator']) - decoded_data['m_indicator'] = Pd0TRDI.bin2str(decoded_data['m_indicator']) - decoded_data['fath_indicator'] = Pd0TRDI.bin2str(decoded_data['fath_indicator']) - decoded_data['delta_time'] = data['delta_time'] + decoded_data = Pd0TRDI.unpack_bytes(pd0_bytes, fmt, offset) + decoded_data["header"] = Pd0TRDI.bin2str(decoded_data["header"]).rstrip("\x00") + decoded_data["ft_indicator"] = Pd0TRDI.bin2str(decoded_data["ft_indicator"]) + decoded_data["m_indicator"] = Pd0TRDI.bin2str(decoded_data["m_indicator"]) + decoded_data["fath_indicator"] = Pd0TRDI.bin2str(decoded_data["fath_indicator"]) + decoded_data["delta_time"] = data["delta_time"] return decoded_data @@ -1111,21 +1203,26 @@ class Pd0TRDI(object): """ # Define format - format = (('header', '10s', 0), - ('heading_deg', 'd', 10), - ('h_true_indicator', 'c', 14)) + fmt = ( + ("header", "10s", 0), + ("heading_deg", "d", 10), + ("h_true_indicator", "c", 14), + ) # Decode data - decoded_data = Pd0TRDI.unpack_bytes(pd0_bytes, format, offset) - decoded_data['header'] = Pd0TRDI.bin2str(decoded_data['header']).rstrip('\x00') - decoded_data['h_true_indicator'] = Pd0TRDI.bin2str(decoded_data['h_true_indicator']) - decoded_data['delta_time'] = data['delta_time'] + decoded_data = Pd0TRDI.unpack_bytes(pd0_bytes, fmt, offset) + decoded_data["header"] = Pd0TRDI.bin2str(decoded_data["header"]).rstrip("\x00") + decoded_data["h_true_indicator"] = Pd0TRDI.bin2str( + decoded_data["h_true_indicator"] + ) + decoded_data["delta_time"] = data["delta_time"] return decoded_data @staticmethod def decode_gga_104(pd0_bytes, offset, data): - """Decodes gga data for WinRiver 2.00 and greater with ADCP's without integrated NMEA data + """Decodes gga data for WinRiver 2.00 and greater with ADCP's without + integrated NMEA data Parameters ---------- @@ -1143,40 +1240,45 @@ class Pd0TRDI(object): """ # Define format - format = (('header', '7s', 0), - ('utc', '10s', 7), - ('lat_deg', 'd', 17), - ('lat_ref', 'c', 25), - ('lon_deg', 'd', 26), - ('lon_ref', 'c', 34), - ('corr_qual', 'B', 35), - ('num_sats', 'B', 36), - ('hdop', 'f', 37), - ('alt', 'f', 41), - ('alt_unit', 'c', 45), - ('geoid', 'f', 46), - ('geoid_unit', 'c', 50), - ('d_gps_age', 'f', 51), - ('ref_stat_id', '<H', 55)) + fmt = ( + ("header", "7s", 0), + ("utc", "10s", 7), + ("lat_deg", "d", 17), + ("lat_ref", "c", 25), + ("lon_deg", "d", 26), + ("lon_ref", "c", 34), + ("corr_qual", "B", 35), + ("num_sats", "B", 36), + ("hdop", "f", 37), + ("alt", "f", 41), + ("alt_unit", "c", 45), + ("geoid", "f", 46), + ("geoid_unit", "c", 50), + ("d_gps_age", "f", 51), + ("ref_stat_id", "<H", 55), + ) # Decode data - decoded_data = Pd0TRDI.unpack_bytes(pd0_bytes, format, offset) - decoded_data['header'] = Pd0TRDI.bin2str(decoded_data['header']).rstrip('\x00') + decoded_data = Pd0TRDI.unpack_bytes(pd0_bytes, fmt, offset) + decoded_data["header"] = Pd0TRDI.bin2str(decoded_data["header"]).rstrip("\x00") try: - decoded_data['utc'] = float(re.findall(b'^\d+\.\d+|\d+', decoded_data['utc'])[0]) + decoded_data["utc"] = float( + re.findall(b"^\d+\.\d+|\d+", decoded_data["utc"])[0] + ) except BaseException: - decoded_data['utc'] = np.nan - decoded_data['lat_ref'] = Pd0TRDI.bin2str(decoded_data['lat_ref']) - decoded_data['lon_ref'] = Pd0TRDI.bin2str(decoded_data['lon_ref']) - decoded_data['geoid_unit'] = Pd0TRDI.bin2str(decoded_data['geoid_unit']) - decoded_data['alt_unit'] = Pd0TRDI.bin2str(decoded_data['alt_unit']) - decoded_data['delta_time'] = data['delta_time'] + decoded_data["utc"] = np.nan + decoded_data["lat_ref"] = Pd0TRDI.bin2str(decoded_data["lat_ref"]) + decoded_data["lon_ref"] = Pd0TRDI.bin2str(decoded_data["lon_ref"]) + decoded_data["geoid_unit"] = Pd0TRDI.bin2str(decoded_data["geoid_unit"]) + decoded_data["alt_unit"] = Pd0TRDI.bin2str(decoded_data["alt_unit"]) + decoded_data["delta_time"] = data["delta_time"] return decoded_data @staticmethod def decode_vtg_105(pd0_bytes, offset, data): - """Decodes vtg data for WinRiver 2.00 and greater with ADCP's without integrated NMEA data + """Decodes vtg data for WinRiver 2.00 and greater with ADCP's without + integrated NMEA data Parameters ---------- @@ -1194,31 +1296,36 @@ class Pd0TRDI(object): """ # Define format - format = (('header', '7s', 0), - ('course_true', 'f', 7), - ('true_indicator', 'c', 11), - ('course_mag', 'f', 12), - ('mag_indicator', 'c', 16), - ('speed_knots', 'f', 17), - ('knots_indicator', 'c', 21), - ('speed_kph', 'f', 22), - ('kph_indicator', 'c', 26), - ('mode_indicator', 'c', 27)) + fmt = ( + ("header", "7s", 0), + ("course_true", "f", 7), + ("true_indicator", "c", 11), + ("course_mag", "f", 12), + ("mag_indicator", "c", 16), + ("speed_knots", "f", 17), + ("knots_indicator", "c", 21), + ("speed_kph", "f", 22), + ("kph_indicator", "c", 26), + ("mode_indicator", "c", 27), + ) # Decode data - decoded_data = Pd0TRDI.unpack_bytes(pd0_bytes, format, offset) - decoded_data['header'] = Pd0TRDI.bin2str(decoded_data['header']).rstrip('\x00') - decoded_data['true_indicator'] = Pd0TRDI.bin2str(decoded_data['true_indicator']) - decoded_data['mag_indicator'] = Pd0TRDI.bin2str(decoded_data['mag_indicator']) - decoded_data['knots_indicator'] = Pd0TRDI.bin2str(decoded_data['knots_indicator']) - decoded_data['kph_indicator'] = Pd0TRDI.bin2str(decoded_data['kph_indicator']) - decoded_data['delta_time'] = data['delta_time'] + decoded_data = Pd0TRDI.unpack_bytes(pd0_bytes, fmt, offset) + decoded_data["header"] = Pd0TRDI.bin2str(decoded_data["header"]).rstrip("\x00") + decoded_data["true_indicator"] = Pd0TRDI.bin2str(decoded_data["true_indicator"]) + decoded_data["mag_indicator"] = Pd0TRDI.bin2str(decoded_data["mag_indicator"]) + decoded_data["knots_indicator"] = Pd0TRDI.bin2str( + decoded_data["knots_indicator"] + ) + decoded_data["kph_indicator"] = Pd0TRDI.bin2str(decoded_data["kph_indicator"]) + decoded_data["delta_time"] = data["delta_time"] return decoded_data @staticmethod def decode_ds_106(pd0_bytes, offset, data): - """Decodes depth sounder for WinRiver 2.00 and greater with ADCP's without integrated NMEA data + """Decodes depth sounder for WinRiver 2.00 and greater with ADCP's + without integrated NMEA data Parameters ---------- @@ -1236,27 +1343,30 @@ class Pd0TRDI(object): """ # Define format - format = (('header', '7s', 0), - ('depth_ft', 'f', 7), - ('ft_indicator', 'c', 11), - ('depth_m', 'f', 12), - ('m_indicator', 'c', 16), - ('depth_fath', 'f', 17), - ('fath_indicator', 'c', 21)) + fmt = ( + ("header", "7s", 0), + ("depth_ft", "f", 7), + ("ft_indicator", "c", 11), + ("depth_m", "f", 12), + ("m_indicator", "c", 16), + ("depth_fath", "f", 17), + ("fath_indicator", "c", 21), + ) # Decode data - decoded_data = Pd0TRDI.unpack_bytes(pd0_bytes, format, offset) - decoded_data['header'] = Pd0TRDI.bin2str(decoded_data['header']).rstrip('\x00') - decoded_data['ft_indicator'] = Pd0TRDI.bin2str(decoded_data['ft_indicator']) - decoded_data['m_indicator'] = Pd0TRDI.bin2str(decoded_data['m_indicator']) - decoded_data['fath_indicator'] = Pd0TRDI.bin2str(decoded_data['fath_indicator']) - decoded_data['delta_time'] = data['delta_time'] + decoded_data = Pd0TRDI.unpack_bytes(pd0_bytes, fmt, offset) + decoded_data["header"] = Pd0TRDI.bin2str(decoded_data["header"]).rstrip("\x00") + decoded_data["ft_indicator"] = Pd0TRDI.bin2str(decoded_data["ft_indicator"]) + decoded_data["m_indicator"] = Pd0TRDI.bin2str(decoded_data["m_indicator"]) + decoded_data["fath_indicator"] = Pd0TRDI.bin2str(decoded_data["fath_indicator"]) + decoded_data["delta_time"] = data["delta_time"] return decoded_data @staticmethod def decode_ext_heading_107(pd0_bytes, offset, data): - """Decodes external heading for WinRiver 2.00 and greater with ADCP's without integrated NMEA data + """Decodes external heading for WinRiver 2.00 and greater with + ADCP's without integrated NMEA data Parameters ---------- @@ -1274,22 +1384,26 @@ class Pd0TRDI(object): """ # Define format - format = (('header', '7s', 0), - ('heading_deg', 'd', 7), - ('h_true_indicator', 'c', 15)) + fmt = ( + ("header", "7s", 0), + ("heading_deg", "d", 7), + ("h_true_indicator", "c", 15), + ) # Decode data - decoded_data = Pd0TRDI.unpack_bytes(pd0_bytes, format, offset) - decoded_data['header'] = Pd0TRDI.bin2str(decoded_data['header']).rstrip('\x00') - if abs(decoded_data['heading_deg']) < 360: + decoded_data = Pd0TRDI.unpack_bytes(pd0_bytes, fmt, offset) + decoded_data["header"] = Pd0TRDI.bin2str(decoded_data["header"]).rstrip("\x00") + if abs(decoded_data["heading_deg"]) < 360: try: - decoded_data['h_true_indicator'] = Pd0TRDI.bin2str(decoded_data['h_true_indicator']) + decoded_data["h_true_indicator"] = Pd0TRDI.bin2str( + decoded_data["h_true_indicator"] + ) except: - decoded_data['h_true_indicator'] = '' + decoded_data["h_true_indicator"] = "" else: - decoded_data['heading_deg'] = np.nan - decoded_data['h_true_indicator'] = '' - decoded_data['delta_time'] = data['delta_time'] + decoded_data["heading_deg"] = np.nan + decoded_data["h_true_indicator"] = "" + decoded_data["delta_time"] = data["delta_time"] return decoded_data @@ -1313,54 +1427,61 @@ class Pd0TRDI(object): """ # Initialize dictionary - decoded_data = {} - decoded_data['header'] = '' - decoded_data['utc'] = np.nan - decoded_data['lat_deg'] = np.nan - decoded_data['lat_ref'] = '' - decoded_data['lon_deg'] = np.nan - decoded_data['lon_ref'] = '' - decoded_data['corr_qual'] = np.nan - decoded_data['num_sats'] = np.nan - decoded_data['hdop'] = np.nan - decoded_data['alt'] = np.nan - decoded_data['alt_unit'] = '' - decoded_data['geoid'] = '' - decoded_data['geoid_unit'] = '' - decoded_data['d_gps_age'] = np.nan - decoded_data['ref_stat_id'] = np.nan - decoded_data['delta_time'] = np.nan + decoded_data = { + "header": "", + "utc": np.nan, + "lat_deg": np.nan, + "lat_ref": "", + "lon_deg": np.nan, + "lon_ref": "", + "corr_qual": np.nan, + "num_sats": np.nan, + "hdop": np.nan, + "alt": np.nan, + "alt_unit": "", + "geoid": "", + "geoid_unit": "", + "d_gps_age": np.nan, + "ref_stat_id": np.nan, + "delta_time": np.nan, + } # Decode NMEA sentence and split into an array - format = str(data['msg_size']) + 'c' - sentence = Pd0TRDI.bin2str(b''.join(list(struct.unpack(format, pd0_bytes[offset: offset + data['msg_size']])))) - temp_array = np.array(sentence.split(',')) - temp_array[temp_array == '999.9'] = '' + fmt = str(data["msg_size"]) + "c" + sentence = Pd0TRDI.bin2str( + b"".join( + list(struct.unpack(fmt, pd0_bytes[offset : offset + data["msg_size"]])) + ) + ) + temp_array = np.array(sentence.split(",")) + temp_array[temp_array == "999.9"] = "" # Assign parts of array to dictionary try: - decoded_data['delta_time'] = data['delta_time'] - decoded_data['header'] = temp_array[0] - decoded_data['utc'] = valid_number(temp_array[1]) + decoded_data["delta_time"] = data["delta_time"] + decoded_data["header"] = temp_array[0] + decoded_data["utc"] = valid_number(temp_array[1]) lat_str = temp_array[2] lat_deg = valid_number(lat_str[0:2]) - decoded_data['lat_deg'] = lat_deg + valid_number(lat_str[2:]) / 60 - decoded_data['lat_ref'] = temp_array[3] + decoded_data["lat_deg"] = lat_deg + valid_number(lat_str[2:]) / 60 + decoded_data["lat_ref"] = temp_array[3] lon_str = temp_array[4] lon_num = valid_number(lon_str) - lon_deg = np.floor(lon_num / 100.) - decoded_data['lon_deg'] = lon_deg + (((lon_num / 100.) - lon_deg) * 100.) / 60. - decoded_data['lon_ref'] = temp_array[5] - decoded_data['corr_qual'] = valid_number(temp_array[6]) - decoded_data['num_sats'] = valid_number(temp_array[7]) - decoded_data['hdop'] = valid_number(temp_array[8]) - decoded_data['alt'] = valid_number(temp_array[9]) - decoded_data['alt_unit'] = temp_array[10] - decoded_data['geoid'] = temp_array[11] - decoded_data['geoid_unit'] = temp_array[12] - decoded_data['d_gps_age'] = valid_number(temp_array[13]) - idx_star = temp_array[14].find('*') - decoded_data['ref_stat_id'] = valid_number(temp_array[15][:idx_star]) + lon_deg = np.floor(lon_num / 100.0) + decoded_data["lon_deg"] = ( + lon_deg + (((lon_num / 100.0) - lon_deg) * 100.0) / 60.0 + ) + decoded_data["lon_ref"] = temp_array[5] + decoded_data["corr_qual"] = valid_number(temp_array[6]) + decoded_data["num_sats"] = valid_number(temp_array[7]) + decoded_data["hdop"] = valid_number(temp_array[8]) + decoded_data["alt"] = valid_number(temp_array[9]) + decoded_data["alt_unit"] = temp_array[10] + decoded_data["geoid"] = temp_array[11] + decoded_data["geoid_unit"] = temp_array[12] + decoded_data["d_gps_age"] = valid_number(temp_array[13]) + idx_star = temp_array[14].find("*") + decoded_data["ref_stat_id"] = valid_number(temp_array[15][:idx_star]) except (ValueError, EOFError, IndexError): pass @@ -1387,39 +1508,44 @@ class Pd0TRDI(object): """ # Initialize dictionary - decoded_data = {} - decoded_data['header'] = '' - decoded_data['course_true'] = np.nan - decoded_data['true_indicator'] = '' - decoded_data['course_mag'] = np.nan - decoded_data['mag_indicator'] = '' - decoded_data['speed_knots'] = np.nan - decoded_data['knots_indicator'] = '' - decoded_data['speed_kph'] = np.nan - decoded_data['kph_indicator'] = '' - decoded_data['mode_indicator'] = '' - decoded_data['delta_time'] = np.nan + decoded_data = { + "header": "", + "course_true": np.nan, + "true_indicator": "", + "course_mag": np.nan, + "mag_indicator": "", + "speed_knots": np.nan, + "knots_indicator": "", + "speed_kph": np.nan, + "kph_indicator": "", + "mode_indicator": "", + "delta_time": np.nan, + } # Decode NMEA sentence and split into an array - format = str(data['msg_size']) + 'c' - sentence = Pd0TRDI.bin2str(b''.join(list(struct.unpack(format, pd0_bytes[offset: offset + data['msg_size']])))) - temp_array = np.array(sentence.split(',')) - temp_array[temp_array == '999.9'] = '' + fmt = str(data["msg_size"]) + "c" + sentence = Pd0TRDI.bin2str( + b"".join( + list(struct.unpack(fmt, pd0_bytes[offset : offset + data["msg_size"]])) + ) + ) + temp_array = np.array(sentence.split(",")) + temp_array[temp_array == "999.9"] = "" # Assign parts of array to dictionary try: - decoded_data['vtg_header'] = temp_array[0] - decoded_data['course_true'] = valid_number(temp_array[1]) - decoded_data['true_indicator'] = temp_array[2] - decoded_data['course_mag'] = valid_number(temp_array[3]) - decoded_data['mag_indicator'] = temp_array[4] - decoded_data['speed_knots'] = valid_number(temp_array[5]) - decoded_data['knots_indicator'] = temp_array[6] - decoded_data['speed_kph'] = valid_number(temp_array[7]) - decoded_data['kph_indicator'] = temp_array[8] - idx_star = temp_array[9].find('*') - decoded_data['mode_indicator'] = temp_array[9][:idx_star] - decoded_data['delta_time'] = data['delta_time'] + decoded_data["vtg_header"] = temp_array[0] + decoded_data["course_true"] = valid_number(temp_array[1]) + decoded_data["true_indicator"] = temp_array[2] + decoded_data["course_mag"] = valid_number(temp_array[3]) + decoded_data["mag_indicator"] = temp_array[4] + decoded_data["speed_knots"] = valid_number(temp_array[5]) + decoded_data["knots_indicator"] = temp_array[6] + decoded_data["speed_kph"] = valid_number(temp_array[7]) + decoded_data["kph_indicator"] = temp_array[8] + idx_star = temp_array[9].find("*") + decoded_data["mode_indicator"] = temp_array[9][:idx_star] + decoded_data["delta_time"] = data["delta_time"] except (ValueError, EOFError, IndexError): pass @@ -1446,33 +1572,38 @@ class Pd0TRDI(object): """ # Initialize dictionary - decoded_data = {} - decoded_data['header'] = '' - decoded_data['depth_ft'] = np.nan - decoded_data['ft_indicator'] = '' - decoded_data['depth_m'] = np.nan - decoded_data['m_indicator'] = '' - decoded_data['depth_fath'] = np.nan - decoded_data['fath_indicator'] = '' - decoded_data['delta_time'] = np.nan + decoded_data = { + "header": "", + "depth_ft": np.nan, + "ft_indicator": "", + "depth_m": np.nan, + "m_indicator": "", + "depth_fath": np.nan, + "fath_indicator": "", + "delta_time": np.nan, + } # Decode NMEA sentence and split into an array - format = str(data['msg_size']) + 'c' - sentence = Pd0TRDI.bin2str(b''.join(list(struct.unpack(format, pd0_bytes[offset: offset + data['msg_size']])))) - temp_array = np.array(sentence.split(',')) - temp_array[temp_array == '999.9'] = '' + fmt = str(data["msg_size"]) + "c" + sentence = Pd0TRDI.bin2str( + b"".join( + list(struct.unpack(fmt, pd0_bytes[offset : offset + data["msg_size"]])) + ) + ) + temp_array = np.array(sentence.split(",")) + temp_array[temp_array == "999.9"] = "" # Assign parts of array to dictionary try: - decoded_data['dbt_header'] = temp_array[0] - decoded_data['depth_ft'] = valid_number(temp_array[1]) - decoded_data['ft_indicator'] = temp_array[2] - decoded_data['depth_m'] = valid_number(temp_array[3]) - decoded_data['m_indicator'] = temp_array[4] - decoded_data['depth_fath'] = valid_number(temp_array[5]) - idx_star = temp_array[6].find('*') - decoded_data['fath_indicator'] = temp_array[6][:idx_star] - decoded_data['delta_time'] = data['delta_time'] + decoded_data["dbt_header"] = temp_array[0] + decoded_data["depth_ft"] = valid_number(temp_array[1]) + decoded_data["ft_indicator"] = temp_array[2] + decoded_data["depth_m"] = valid_number(temp_array[3]) + decoded_data["m_indicator"] = temp_array[4] + decoded_data["depth_fath"] = valid_number(temp_array[5]) + idx_star = temp_array[6].find("*") + decoded_data["fath_indicator"] = temp_array[6][:idx_star] + decoded_data["delta_time"] = data["delta_time"] except (ValueError, EOFError, IndexError): pass @@ -1499,25 +1630,30 @@ class Pd0TRDI(object): """ # Initialize dictionary - decoded_data = {} - decoded_data['header'] = '' - decoded_data['heading_deg'] = np.nan - decoded_data['h_true_indicator'] = '' - decoded_data['delta_time'] = np.nan + decoded_data = { + "header": "", + "heading_deg": np.nan, + "h_true_indicator": "", + "delta_time": np.nan, + } # Decode NMEA sentence and split into an array - format = str(data['msg_size']) + 'c' - sentence = Pd0TRDI.bin2str(b''.join(list(struct.unpack(format, pd0_bytes[offset: offset + data['msg_size']])))) - temp_array = np.array(sentence.split(',')) - temp_array[temp_array == '999.9'] = '' + fmt = str(data["msg_size"]) + "c" + sentence = Pd0TRDI.bin2str( + b"".join( + list(struct.unpack(fmt, pd0_bytes[offset : offset + data["msg_size"]])) + ) + ) + temp_array = np.array(sentence.split(",")) + temp_array[temp_array == "999.9"] = "" # Assign parts of array to dictionary try: - decoded_data['header'] = temp_array[0] - decoded_data['heading_deg'] = valid_number(temp_array[1]) - idx_star = temp_array[2].find('*') - decoded_data['h_true_indicator'] = temp_array[2][:idx_star] - decoded_data['delta_time'] = data['delta_time'] + decoded_data["header"] = temp_array[0] + decoded_data["heading_deg"] = valid_number(temp_array[1]) + idx_star = temp_array[2].find("*") + decoded_data["h_true_indicator"] = temp_array[2][:idx_star] + decoded_data["delta_time"] = data["delta_time"] except (ValueError, EOFError, IndexError): pass @@ -1543,7 +1679,7 @@ class Pd0TRDI(object): Dictionary of decoded data """ - return Pd0TRDI.decode_nmea_sentence(pd0_bytes, offset, data, 'dbt_sentence') + return Pd0TRDI.decode_nmea_sentence(pd0_bytes, offset, data, "dbt_sentence") @staticmethod def decode_gga_sentence(pd0_bytes, offset, data): @@ -1564,7 +1700,7 @@ class Pd0TRDI(object): Dictionary of decoded data """ - return Pd0TRDI.decode_nmea_sentence(pd0_bytes, offset, data, 'gga_sentence') + return Pd0TRDI.decode_nmea_sentence(pd0_bytes, offset, data, "gga_sentence") @staticmethod def decode_vtg_sentence(pd0_bytes, offset, data): @@ -1585,7 +1721,7 @@ class Pd0TRDI(object): Dictionary of decoded data """ - return Pd0TRDI.decode_nmea_sentence(pd0_bytes, offset, data, 'vtg_sentence') + return Pd0TRDI.decode_nmea_sentence(pd0_bytes, offset, data, "vtg_sentence") @staticmethod def decode_gsa_sentence(pd0_bytes, offset, data): @@ -1606,7 +1742,7 @@ class Pd0TRDI(object): Dictionary of decoded data """ - return Pd0TRDI.decode_nmea_sentence(pd0_bytes, offset, data, 'gsa_sentence') + return Pd0TRDI.decode_nmea_sentence(pd0_bytes, offset, data, "gsa_sentence") @staticmethod def decode_nmea_sentence(pd0_bytes, offset, data, target): @@ -1630,25 +1766,28 @@ class Pd0TRDI(object): """ # Compute number of characters in the sentence - offset_idx = data['header']['address_offsets'].index(offset) + offset_idx = data["header"]["address_offsets"].index(offset) - if offset_idx + 1 == data['header']['number_of_data_types']: - end_offset = data['header']['number_of_bytes'] + if offset_idx + 1 == data["header"]["number_of_data_types"]: + end_offset = data["header"]["number_of_bytes"] else: - end_offset = data['header']['address_offsets'][offset_idx + 1] - number_of_characters = end_offset - data['header']['address_offsets'][offset_idx] + end_offset = data["header"]["address_offsets"][offset_idx + 1] + number_of_characters = ( + end_offset - data["header"]["address_offsets"][offset_idx] + ) # Generate format string - format_str = str(number_of_characters - 4) + 'c' - format = (('sentence', format_str, 0)) - offset = data['header']['address_offsets'][offset_idx] + format_str = str(number_of_characters - 4) + "c" + offset = data["header"]["address_offsets"][offset_idx] # Decode data - sentence = struct.unpack(format_str, pd0_bytes[offset + 4: offset + number_of_characters ]) + sentence = struct.unpack( + format_str, pd0_bytes[offset + 4 : offset + number_of_characters] + ) try: - end_of_sentence = sentence.index(b'\n') + 1 - sentence = b''.join(sentence[0:end_of_sentence]).decode('utf-8') + end_of_sentence = sentence.index(b"\n") + 1 + sentence = b"".join(sentence[0:end_of_sentence]).decode("utf-8") except ValueError: - sentence = '' + sentence = "" # Create or add to list of target sentences if target in data: decoded_data = data[target] @@ -1676,12 +1815,16 @@ class Pd0TRDI(object): surface_leader_data:dict Dictionary of decoded data """ - surface_leader_format = (('id', '<H', 0), - ('cell_count', 'B', 2), - ('cell_size', '<H', 3), - ('range_cell_1', '<H', 5)) + surface_leader_format = ( + ("id", "<H", 0), + ("cell_count", "B", 2), + ("cell_size", "<H", 3), + ("range_cell_1", "<H", 5), + ) - surface_leader_data = Pd0TRDI.unpack_bytes(pd0_bytes, surface_leader_format, offset) + surface_leader_data = Pd0TRDI.unpack_bytes( + pd0_bytes, surface_leader_format, offset + ) return surface_leader_data @staticmethod @@ -1702,12 +1845,14 @@ class Pd0TRDI(object): surface_velocity_data:dict Dictionary of decoded data """ - surface_velocity_format = (('id', '<H', 0),) + surface_velocity_format = (("id", "<H", 0),) - surface_velocity_data = Pd0TRDI.unpack_bytes(pd0_bytes, surface_velocity_format, offset) - surface_velocity_data['velocity'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 2, - data['surface_leader']['cell_count'], - 4, '<h') + surface_velocity_data = Pd0TRDI.unpack_bytes( + pd0_bytes, surface_velocity_format, offset + ) + surface_velocity_data["velocity"] = Pd0TRDI.decode_per_cell_per_beam( + pd0_bytes, offset + 2, data["surface_leader"]["cell_count"], 4, "<h" + ) return surface_velocity_data @staticmethod @@ -1728,12 +1873,14 @@ class Pd0TRDI(object): surface_velocity_data:dict Dictionary of decoded data """ - surface_correlation_format = (('id', '<H', 0),) + surface_correlation_format = (("id", "<H", 0),) - surface_correlation_data = Pd0TRDI.unpack_bytes(pd0_bytes, surface_correlation_format, offset) - surface_correlation_data['correlation'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 2, - data['surface_leader']['cell_count'], - 4, 'B') + surface_correlation_data = Pd0TRDI.unpack_bytes( + pd0_bytes, surface_correlation_format, offset + ) + surface_correlation_data["correlation"] = Pd0TRDI.decode_per_cell_per_beam( + pd0_bytes, offset + 2, data["surface_leader"]["cell_count"], 4, "B" + ) return surface_correlation_data @staticmethod @@ -1754,12 +1901,12 @@ class Pd0TRDI(object): surface_rssi_data:dict Dictionary of decoded data """ - surface_rssi_format = (('id', '<H', 0),) + surface_rssi_format = (("id", "<H", 0),) surface_rssi_data = Pd0TRDI.unpack_bytes(pd0_bytes, surface_rssi_format, offset) - surface_rssi_data['rssi'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 2, - data['surface_leader']['cell_count'], - 4, 'B') + surface_rssi_data["rssi"] = Pd0TRDI.decode_per_cell_per_beam( + pd0_bytes, offset + 2, data["surface_leader"]["cell_count"], 4, "B" + ) return surface_rssi_data @staticmethod @@ -1780,12 +1927,14 @@ class Pd0TRDI(object): surface_per_good_data:dict Dictionary of decoded data """ - surface_per_good_format = (('id', '<H', 0),) + surface_per_good_format = (("id", "<H", 0),) - surface_per_good_data = Pd0TRDI.unpack_bytes(pd0_bytes, surface_per_good_format, offset) - surface_per_good_data['percent_good'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 2, - data['surface_leader']['cell_count'], - 4, 'B') + surface_per_good_data = Pd0TRDI.unpack_bytes( + pd0_bytes, surface_per_good_format, offset + ) + surface_per_good_data["percent_good"] = Pd0TRDI.decode_per_cell_per_beam( + pd0_bytes, offset + 2, data["surface_leader"]["cell_count"], 4, "B" + ) return surface_per_good_data @staticmethod @@ -1806,12 +1955,14 @@ class Pd0TRDI(object): surface_statusdata:dict Dictionary of decoded data """ - surface_status_format = (('id', '<H', 0),) + surface_status_format = (("id", "<H", 0),) - surface_status_data = Pd0TRDI.unpack_bytes(pd0_bytes, surface_status_format, offset) - surface_status_data['percent_good'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 2, - data['surface_leader']['cell_count'], - 4, 'B') + surface_status_data = Pd0TRDI.unpack_bytes( + pd0_bytes, surface_status_format, offset + ) + surface_status_data["percent_good"] = Pd0TRDI.decode_per_cell_per_beam( + pd0_bytes, offset + 2, data["surface_leader"]["cell_count"], 4, "B" + ) return surface_status_data @staticmethod @@ -1832,27 +1983,32 @@ class Pd0TRDI(object): auto_config_data:dict Dictionary of decoded data """ - auto_config_leader_format = (('id', '<H', 0), ('beam_count', 'B', 2)) - auto_config_beam_format = (('setup', 'B', 0), - ('depth', '<H', 1), - ('ping_count', 'B', 3), - ('ping_type', 'B', 4), - ('cell_count', '<H', 5), - ('cell_size', '<H', 7), - ('bin_1_mid', '<H', 9), - ('code_reps', 'B', 11), - ('transmit_length', '<H', 12), - ('lag_length', '<H', 15), - ('transmit_bandwidth', 'B', 16), - ('receive_bandwidth', 'B', 17), - ('min_ping_interval', '<H', 18)) - auto_config_data = {} - auto_config_data['leader'] = Pd0TRDI.unpack_bytes(pd0_bytes, auto_config_leader_format, offset) - - for n in range(1, auto_config_data['leader']['beam_count'] + 1): - label = 'beam_' + str(n) + auto_config_leader_format = (("id", "<H", 0), ("beam_count", "B", 2)) + auto_config_beam_format = ( + ("setup", "B", 0), + ("depth", "<H", 1), + ("ping_count", "B", 3), + ("ping_type", "B", 4), + ("cell_count", "<H", 5), + ("cell_size", "<H", 7), + ("bin_1_mid", "<H", 9), + ("code_reps", "B", 11), + ("transmit_length", "<H", 12), + ("lag_length", "<H", 15), + ("transmit_bandwidth", "B", 16), + ("receive_bandwidth", "B", 17), + ("min_ping_interval", "<H", 18), + ) + auto_config_data = { + "leader": Pd0TRDI.unpack_bytes(pd0_bytes, auto_config_leader_format, offset) + } + + for n in range(1, auto_config_data["leader"]["beam_count"] + 1): + label = "beam_" + str(n) beam_offset = offset + 3 + (20 * (n - 1)) - auto_config_data[label] = Pd0TRDI.unpack_bytes(pd0_bytes, auto_config_beam_format, beam_offset) + auto_config_data[label] = Pd0TRDI.unpack_bytes( + pd0_bytes, auto_config_beam_format, beam_offset + ) return auto_config_data @@ -1874,13 +2030,17 @@ class Pd0TRDI(object): vertical_beam_data:dict Dictionary of decoded data """ - vertical_beam_format = (('id', '<H', 0), - ('eval_amp', 'B', 2), - ('rssi', 'B', 3), - ('range', 'L', 4), - ('status', 'B', 8)) + vertical_beam_format = ( + ("id", "<H", 0), + ("eval_amp", "B", 2), + ("rssi", "B", 3), + ("range", "L", 4), + ("status", "B", 8), + ) - vertical_beam_data = Pd0TRDI.unpack_bytes(pd0_bytes, vertical_beam_format, offset) + vertical_beam_data = Pd0TRDI.unpack_bytes( + pd0_bytes, vertical_beam_format, offset + ) return vertical_beam_data @staticmethod @@ -1901,8 +2061,8 @@ class Pd0TRDI(object): matrix_data:dict Dictionary of decoded data """ - matrix_id_format = (('id', '<H', 0),) - matrix_data_format = (('element', '<h', 0),) + matrix_id_format = (("id", "<H", 0),) + matrix_data_format = (("element", "<h", 0),) matrix_data = Pd0TRDI.unpack_bytes(pd0_bytes, matrix_id_format, offset) matrix = [] @@ -1910,10 +2070,13 @@ class Pd0TRDI(object): row_list = [] for col in range(4): offset = offset + 2 - # row.append(struct.unpack('<H', pd0_bytes[offset: offset + 2])[0]) - row_list.append(Pd0TRDI.unpack_bytes(pd0_bytes, matrix_data_format, offset)['element']) + row_list.append( + Pd0TRDI.unpack_bytes(pd0_bytes, matrix_data_format, offset)[ + "element" + ] + ) matrix.append(row_list) - matrix_data['matrix'] = matrix + matrix_data["matrix"] = matrix return matrix_data @@ -1945,29 +2108,30 @@ class Hdr(object): n_types: int Number of data types """ - self.bytes_per_ens = nans(n_ensembles) - self.data_offsets = nans([n_ensembles, n_types]) - self.n_data_types = nans(n_ensembles) - self.data_ok = nans(n_ensembles) - self.invalid = [''] * n_ensembles + self.bytes_per_ens = nans((n_ensembles,)) + self.data_offsets = nans((n_ensembles, n_types)) + self.n_data_types = nans((n_ensembles,)) + self.data_ok = nans((n_ensembles,)) + self.invalid = [""] * n_ensembles def populate_data(self, n_ens, data): """Populates the class with data for an ensemble. Parameters ---------- - i_ens: int + n_ens: int Ensemble index data: dict Dictionary of all data for this ensemble """ - if 'header' in data: - self.bytes_per_ens[n_ens] = data['header']['number_of_bytes'] - self.data_offsets[n_ens, :len(data['header']['address_offsets'])] = \ - np.array(data['header']['address_offsets']) - self.n_data_types[n_ens] = data['header']['number_of_data_types'] - self.invalid[n_ens] = data['header']['invalid'] + if "header" in data: + self.bytes_per_ens[n_ens] = data["header"]["number_of_bytes"] + self.data_offsets[ + n_ens, : len(data["header"]["address_offsets"]) + ] = np.array(data["header"]["address_offsets"]) + self.n_data_types[n_ens] = data["header"]["number_of_data_types"] + self.invalid[n_ens] = data["header"]["invalid"] class Inst(object): @@ -2010,18 +2174,18 @@ class Inst(object): Number of ensembles """ - #TODO change n_ensembles to (ensembles,) - self.beam_ang = nans(n_ensembles) - self.beams = nans(n_ensembles) - self.data_type = [''] * n_ensembles - self.firm_ver = nans(n_ensembles) - self.freq = nans(n_ensembles) - self.pat = [''] * n_ensembles + # TODO change n_ensembles to (ensembles,) + self.beam_ang = nans((n_ensembles,)) + self.beams = nans((n_ensembles,)) + self.data_type = [""] * n_ensembles + self.firm_ver = nans((n_ensembles,)) + self.freq = nans((n_ensembles,)) + self.pat = [""] * n_ensembles self.res_RDI = 0 - self.sensor_CFG = nans(n_ensembles) - self.xducer = [''] * n_ensembles + self.sensor_CFG = nans((n_ensembles,)) + self.xducer = [""] * n_ensembles self.t_matrix = np.tile([np.nan], [4, 4]) - self.demod = nans(n_ensembles) + self.demod = nans((n_ensembles,)) self.serial_number = np.nan def populate_data(self, i_ens, data): @@ -2035,13 +2199,13 @@ class Inst(object): Dictionary of all data for this ensemble """ - - if 'fixed_leader' in data: - self.firm_ver[i_ens] = data['fixed_leader']['cpu_firmware_version'] + \ - (data['fixed_leader']['cpu_firmware_revision'] / 100) + if "fixed_leader" in data: + self.firm_ver[i_ens] = data["fixed_leader"]["cpu_firmware_version"] + ( + data["fixed_leader"]["cpu_firmware_revision"] / 100 + ) # Convert system_configuration_ls to individual bits - bitls = "{0:08b}".format(data['fixed_leader']['system_configuration_ls']) + bitls = "{0:08b}".format(data["fixed_leader"]["system_configuration_ls"]) val = int(bitls[5:], 2) if val == 0: self.freq[i_ens] = 75 @@ -2060,24 +2224,24 @@ class Inst(object): val = int(bitls[4], 2) if val == 0: - self.pat[i_ens] = 'Concave' + self.pat[i_ens] = "Concave" elif val == 1: - self.pat[i_ens] = 'Convex' + self.pat[i_ens] = "Convex" else: - self.pat[i_ens] = 'n/a' + self.pat[i_ens] = "n/a" self.sensor_CFG[i_ens] = int(bitls[2:3], 2) + 1 val = int(bitls[1], 2) if val == 0: - self.xducer[i_ens] = 'Not Attached' + self.xducer[i_ens] = "Not Attached" elif val == 1: - self.xducer[i_ens] = 'Attached' + self.xducer[i_ens] = "Attached" else: - self.xducer[i_ens] = 'n/a' + self.xducer[i_ens] = "n/a" # Convert system_configuration_ms to individual bits - bitms = "{0:08b}".format(data['fixed_leader']['system_configuration_ms']) + bitms = "{0:08b}".format(data["fixed_leader"]["system_configuration_ms"]) val = int(bitms[6:], 2) if val == 0: @@ -2104,17 +2268,17 @@ class Inst(object): self.beams[i_ens] = np.nan self.demod[i_ens] = np.nan - if data['fixed_leader']['simulation_data_flag'] == 0: - self.data_type[i_ens] = 'Real' + if data["fixed_leader"]["simulation_data_flag"] == 0: + self.data_type[i_ens] = "Real" else: - self.data_type[i_ens] = 'Simu' + self.data_type[i_ens] = "Simu" - self.serial_number = data['fixed_leader']['serial_number'] + self.serial_number = data["fixed_leader"]["serial_number"] - if 'transformation_matrix' in data: + if "transformation_matrix" in data: self.res_RDI = 0 # Scale transformation matrix - self.t_matrix = np.array(data['transformation_matrix']['matrix']) / 10000 + self.t_matrix = np.array(data["transformation_matrix"]["matrix"]) / 10000 class AutoMode(object): @@ -2143,12 +2307,12 @@ class AutoMode(object): n_ensembles: int Number of ensembles """ - self.beam_count = nans(n_ensembles) + self.beam_count = nans((n_ensembles,)) self.Beam1 = Beam(n_ensembles) self.Beam2 = Beam(n_ensembles) self.Beam3 = Beam(n_ensembles) self.Beam4 = Beam(n_ensembles) - self.Reserved = nans(n_ensembles) + self.Reserved = nans((n_ensembles,)) def populate_data(self, i_ens, data): """Populates the class with data for an ensemble. @@ -2161,12 +2325,12 @@ class AutoMode(object): Dictionary of all data for this ensemble """ - if 'auto_configuration' in data: - self.beam_count[i_ens] = data['auto_configuration']['leader']['beam_count'] - self.Beam1.populate_data(i_ens, data['auto_configuration']['beam_1']) - self.Beam2.populate_data(i_ens, data['auto_configuration']['beam_2']) - self.Beam3.populate_data(i_ens, data['auto_configuration']['beam_3']) - self.Beam4.populate_data(i_ens, data['auto_configuration']['beam_4']) + if "auto_configuration" in data: + self.beam_count[i_ens] = data["auto_configuration"]["leader"]["beam_count"] + self.Beam1.populate_data(i_ens, data["auto_configuration"]["beam_1"]) + self.Beam2.populate_data(i_ens, data["auto_configuration"]["beam_2"]) + self.Beam3.populate_data(i_ens, data["auto_configuration"]["beam_3"]) + self.Beam4.populate_data(i_ens, data["auto_configuration"]["beam_4"]) class Beam(object): @@ -2211,19 +2375,19 @@ class Beam(object): Number of ensembles """ - self.mode = nans(n_ensembles) - self.depth_cm = nans(n_ensembles) - self.ping_count = nans(n_ensembles) - self.ping_type = nans(n_ensembles) - self.cell_count = nans(n_ensembles) - self.cell_size_cm = nans(n_ensembles) - self.cell_mid_cm = nans(n_ensembles) - self.code_repeat = nans(n_ensembles) - self.trans_length_cm = nans(n_ensembles) - self.lag_length_cm = nans(n_ensembles) - self.transmit_bw = nans(n_ensembles) - self.receive_bw = nans(n_ensembles) - self.ping_interval_ms = nans(n_ensembles) + self.mode = nans((n_ensembles,)) + self.depth_cm = nans((n_ensembles,)) + self.ping_count = nans((n_ensembles,)) + self.ping_type = nans((n_ensembles,)) + self.cell_count = nans((n_ensembles,)) + self.cell_size_cm = nans((n_ensembles,)) + self.cell_mid_cm = nans((n_ensembles,)) + self.code_repeat = nans((n_ensembles,)) + self.trans_length_cm = nans((n_ensembles,)) + self.lag_length_cm = nans((n_ensembles,)) + self.transmit_bw = nans((n_ensembles,)) + self.receive_bw = nans((n_ensembles,)) + self.ping_interval_ms = nans((n_ensembles,)) def populate_data(self, i_ens, beam_data): """Populates the class with data for an ensemble. @@ -2232,23 +2396,23 @@ class Beam(object): ---------- i_ens: int Ensemble index - data: dict + beam_data: dict Dictionary of all data for this ensemble """ - self.mode = beam_data['setup'] - self.depth_cm = beam_data['depth'] - self.ping_count = beam_data['ping_count'] - self.ping_type = beam_data['ping_type'] - self.cell_count = beam_data['cell_count'] - self.cell_size_cm = beam_data['cell_size'] - self.cell_mid_cm = beam_data['bin_1_mid'] - self.code_repeat = beam_data['code_reps'] - self.trans_length_cm = beam_data['transmit_length'] - self.lag_length_cm = beam_data['lag_length'] - self.transmit_bw = beam_data['transmit_bandwidth'] - self.receive_bw = beam_data['receive_bandwidth'] - self.ping_interval_ms = beam_data['min_ping_interval'] + self.mode = beam_data["setup"] + self.depth_cm = beam_data["depth"] + self.ping_count = beam_data["ping_count"] + self.ping_type = beam_data["ping_type"] + self.cell_count = beam_data["cell_count"] + self.cell_size_cm = beam_data["cell_size"] + self.cell_mid_cm = beam_data["bin_1_mid"] + self.code_repeat = beam_data["code_reps"] + self.trans_length_cm = beam_data["transmit_length"] + self.lag_length_cm = beam_data["lag_length"] + self.transmit_bw = beam_data["transmit_bandwidth"] + self.receive_bw = beam_data["receive_bandwidth"] + self.ping_interval_ms = beam_data["min_ping_interval"] class Bt(object): @@ -2283,13 +2447,13 @@ class Bt(object): Number of velocity beams """ - self.corr = nans([n_velocities, n_ensembles]) - self.depth_m = nans([n_velocities, n_ensembles]) - self.eval_amp = nans([n_velocities, n_ensembles]) - self.ext_depth_cm = nans(n_ensembles) - self.pergd = nans([n_velocities, n_ensembles]) - self.rssi = nans([n_velocities, n_ensembles]) - self.vel_mps = nans([n_velocities, n_ensembles]) + self.corr = nans((n_velocities, n_ensembles)) + self.depth_m = nans((n_velocities, n_ensembles)) + self.eval_amp = nans((n_velocities, n_ensembles)) + self.ext_depth_cm = nans((n_ensembles,)) + self.pergd = nans((n_velocities, n_ensembles)) + self.rssi = nans((n_velocities, n_ensembles)) + self.vel_mps = nans((n_velocities, n_ensembles)) def populate_data(self, i_ens, data): """Populates the class with data for an ensemble. @@ -2302,15 +2466,25 @@ class Bt(object): Dictionary of all data for this ensemble """ - if 'bottom_track' in data: + if "bottom_track" in data: # Combine bytes to compute depth - self.depth_m[0:4, i_ens] = np.squeeze(np.array(data['bottom_track']['range_lsb']).T) + \ - np.squeeze(np.array(data['bottom_track']['range_msb']).T) * 2e16 / 100 - self.vel_mps[0:4, i_ens] = np.squeeze(np.array(data['bottom_track']['velocity']).T) - self.corr[0:4, i_ens] = np.squeeze(np.array(data['bottom_track']['correlation']).T) - self.eval_amp[0:4, i_ens] = np.squeeze(np.array(data['bottom_track']['amplitude']).T) - self.pergd[0:4, i_ens] = np.squeeze(np.array(data['bottom_track']['percent_good']).T) - self.rssi[0:4, i_ens] = np.squeeze(np.array(data['bottom_track']['rssi']).T) + self.depth_m[0:4, i_ens] = ( + np.squeeze(np.array(data["bottom_track"]["range_lsb"]).T) + + np.squeeze(np.array(data["bottom_track"]["range_msb"]).T) * 2e16 / 100 + ) + self.vel_mps[0:4, i_ens] = np.squeeze( + np.array(data["bottom_track"]["velocity"]).T + ) + self.corr[0:4, i_ens] = np.squeeze( + np.array(data["bottom_track"]["correlation"]).T + ) + self.eval_amp[0:4, i_ens] = np.squeeze( + np.array(data["bottom_track"]["amplitude"]).T + ) + self.pergd[0:4, i_ens] = np.squeeze( + np.array(data["bottom_track"]["percent_good"]).T + ) + self.rssi[0:4, i_ens] = np.squeeze(np.array(data["bottom_track"]["rssi"]).T) class Cfg(object): @@ -2421,54 +2595,54 @@ class Cfg(object): Number of ensembles """ - self.ba = nans(n_ensembles) - self.bc = nans(n_ensembles) - self.be_mmps = nans(n_ensembles) - self.bg = nans(n_ensembles) - self.bm = nans(n_ensembles) - self.bp = nans(n_ensembles) - self.bx_dm = nans(n_ensembles) - self.code_reps = nans(n_ensembles) - self.coord_sys = [''] * n_ensembles - self.cpu_ser_no = nans([n_ensembles, 8]) - self.cq = nans(n_ensembles) - self.cx = nans(n_ensembles) - self.dist_bin1_cm = nans(n_ensembles) - self.ea_deg = nans(n_ensembles) - self.eb_deg = nans(n_ensembles) - self.sensor_avail = [''] * n_ensembles - self.ex = [''] * n_ensembles - self.ez = [''] * n_ensembles - self.head_src = [''] * n_ensembles - self.lag_cm = nans(n_ensembles) - self.map_bins = [''] * n_ensembles - self.n_beams = nans(n_ensembles) - self.pitch_src = [''] * n_ensembles - self.ref_lay_end_cell = nans(n_ensembles) - self.ref_lay_str_cell = nans(n_ensembles) - self.roll_src = [''] * n_ensembles - self.sal_src = [''] * n_ensembles - self.wm = nans(n_ensembles) - self.sos_src = [''] * n_ensembles - self.temp_src = [''] * n_ensembles - self.tp_sec = nans(n_ensembles) - self.use_3beam = [''] * n_ensembles - self.use_pr = [''] * n_ensembles - self.wa = nans(n_ensembles) - self.wb = nans(n_ensembles) - self.wc = nans(n_ensembles) - self.we_mmps = nans(n_ensembles) - self.wf_cm = nans(n_ensembles) - self.wg_per = nans(n_ensembles) - self.wj = nans(n_ensembles) - self.wn = nans(n_ensembles) - self.wp = nans(n_ensembles) - self.ws_cm = nans(n_ensembles) - self.xdcr_dep_srs = [''] * n_ensembles - self.xmit_pulse_cm = nans(n_ensembles) - self.lag_near_bottom = nans(n_ensembles) - - def populate_data (self, i_ens, data): + self.ba = nans((n_ensembles,)) + self.bc = nans((n_ensembles,)) + self.be_mmps = nans((n_ensembles,)) + self.bg = nans((n_ensembles,)) + self.bm = nans((n_ensembles,)) + self.bp = nans((n_ensembles,)) + self.bx_dm = nans((n_ensembles,)) + self.code_reps = nans((n_ensembles,)) + self.coord_sys = [""] * n_ensembles + self.cpu_ser_no = nans((n_ensembles, 8)) + self.cq = nans((n_ensembles,)) + self.cx = nans((n_ensembles,)) + self.dist_bin1_cm = nans((n_ensembles,)) + self.ea_deg = nans((n_ensembles,)) + self.eb_deg = nans((n_ensembles,)) + self.sensor_avail = [""] * n_ensembles + self.ex = [""] * n_ensembles + self.ez = [""] * n_ensembles + self.head_src = [""] * n_ensembles + self.lag_cm = nans((n_ensembles,)) + self.map_bins = [""] * n_ensembles + self.n_beams = nans((n_ensembles,)) + self.pitch_src = [""] * n_ensembles + self.ref_lay_end_cell = nans((n_ensembles,)) + self.ref_lay_str_cell = nans((n_ensembles,)) + self.roll_src = [""] * n_ensembles + self.sal_src = [""] * n_ensembles + self.wm = nans((n_ensembles,)) + self.sos_src = [""] * n_ensembles + self.temp_src = [""] * n_ensembles + self.tp_sec = nans((n_ensembles,)) + self.use_3beam = [""] * n_ensembles + self.use_pr = [""] * n_ensembles + self.wa = nans((n_ensembles,)) + self.wb = nans((n_ensembles,)) + self.wc = nans((n_ensembles,)) + self.we_mmps = nans((n_ensembles,)) + self.wf_cm = nans((n_ensembles,)) + self.wg_per = nans((n_ensembles,)) + self.wj = nans((n_ensembles,)) + self.wn = nans((n_ensembles,)) + self.wp = nans((n_ensembles,)) + self.ws_cm = nans((n_ensembles,)) + self.xdcr_dep_srs = [""] * n_ensembles + self.xmit_pulse_cm = nans((n_ensembles,)) + self.lag_near_bottom = nans((n_ensembles,)) + + def populate_data(self, i_ens, data): """Populates the class with data for an ensemble. Parameters @@ -2479,146 +2653,154 @@ class Cfg(object): Dictionary of all data for this ensemble """ - if 'fixed_leader' in data: - self.n_beams[i_ens] = data['fixed_leader']['number_of_beams'] - self.wn[i_ens] = data['fixed_leader']['number_of_cells'] - self.wp[i_ens] = data['fixed_leader']['number_of_water_pings'] - self.ws_cm[i_ens] = data['fixed_leader']['depth_cell_size'] - self.wf_cm[i_ens] = data['fixed_leader']['blank_after_transmit'] - self.wm[i_ens] = data['fixed_leader']['water_mode'] - self.wc[i_ens] = data['fixed_leader']['low_correlation_threshold'] - self.code_reps[i_ens] = data['fixed_leader']['number_of_code_repetitions'] - self.wg_per[i_ens] = data['fixed_leader']['minimum_percentage_water_profile_pings'] - self.we_mmps[i_ens] = data['fixed_leader']['error_velocity_threshold'] - self.tp_sec[i_ens] = data['fixed_leader']['minutes'] * 60. + \ - data['fixed_leader']['seconds'] + \ - data['fixed_leader']['hundredths'] * 0.01 + if "fixed_leader" in data: + self.n_beams[i_ens] = data["fixed_leader"]["number_of_beams"] + self.wn[i_ens] = data["fixed_leader"]["number_of_cells"] + self.wp[i_ens] = data["fixed_leader"]["number_of_water_pings"] + self.ws_cm[i_ens] = data["fixed_leader"]["depth_cell_size"] + self.wf_cm[i_ens] = data["fixed_leader"]["blank_after_transmit"] + self.wm[i_ens] = data["fixed_leader"]["water_mode"] + self.wc[i_ens] = data["fixed_leader"]["low_correlation_threshold"] + self.code_reps[i_ens] = data["fixed_leader"]["number_of_code_repetitions"] + self.wg_per[i_ens] = data["fixed_leader"][ + "minimum_percentage_water_profile_pings" + ] + self.we_mmps[i_ens] = data["fixed_leader"]["error_velocity_threshold"] + self.tp_sec[i_ens] = ( + data["fixed_leader"]["minutes"] * 60.0 + + data["fixed_leader"]["seconds"] + + data["fixed_leader"]["hundredths"] * 0.01 + ) # Convert coordinate_transformation_process to individual bits - self.ex[i_ens] = "{0:08b}".format(data['fixed_leader']['coordinate_transformation_process']) + self.ex[i_ens] = "{0:08b}".format( + data["fixed_leader"]["coordinate_transformation_process"] + ) val = int(self.ex[i_ens][3:5], 2) if val == 0: - self.coord_sys[i_ens] = 'Beam' + self.coord_sys[i_ens] = "Beam" elif val == 1: - self.coord_sys[i_ens] = 'Inst' + self.coord_sys[i_ens] = "Inst" elif val == 2: - self.coord_sys[i_ens] = 'Ship' + self.coord_sys[i_ens] = "Ship" elif val == 3: - self.coord_sys[i_ens] = 'Earth' + self.coord_sys[i_ens] = "Earth" else: self.coord_sys[i_ens] = "N/a" val = int(self.ex[i_ens][5], 2) if val == 0: - self.use_pr = 'No' + self.use_pr = "No" elif val == 1: - self.use_pr = 'Yes' + self.use_pr = "Yes" else: - self.use_pr = 'N/a' + self.use_pr = "N/a" val = int(self.ex[i_ens][6], 2) if val == 0: - self.use_3beam = 'No' + self.use_3beam = "No" elif val == 1: - self.use_3beam = 'Yes' + self.use_3beam = "Yes" else: - self.use_3beam = 'N/a' + self.use_3beam = "N/a" val = int(self.ex[i_ens][7], 2) if val == 0: - self.map_bins = 'No' + self.map_bins = "No" elif val == 1: - self.map_bins = 'Yes' + self.map_bins = "Yes" else: - self.map_bins = 'N/a' + self.map_bins = "N/a" - self.ea_deg[i_ens] = data['fixed_leader']['heading_alignment'] * 0.01 - self.eb_deg[i_ens] = data['fixed_leader']['heading_bias'] * 0.01 + self.ea_deg[i_ens] = data["fixed_leader"]["heading_alignment"] * 0.01 + self.eb_deg[i_ens] = data["fixed_leader"]["heading_bias"] * 0.01 # Convert sensour_source to individual bits - self.ez[i_ens] = "{0:08b}".format(data['fixed_leader']['sensor_source']) + self.ez[i_ens] = "{0:08b}".format(data["fixed_leader"]["sensor_source"]) val = int(self.ez[i_ens][:2], 2) if val == 0: - self.sos_src[i_ens] = 'Manual EC' + self.sos_src[i_ens] = "Manual EC" elif val == 1: - self.sos_src[i_ens] = 'Calculated' + self.sos_src[i_ens] = "Calculated" elif val == 3: - self.sos_src[i_ens] = 'SVSS Sensor' + self.sos_src[i_ens] = "SVSS Sensor" else: - self.sos_src[i_ens] = 'N/a' + self.sos_src[i_ens] = "N/a" val = int(self.ez[i_ens][2], 2) if val == 0: - self.xdcr_dep_srs[i_ens] = 'Manual ED' + self.xdcr_dep_srs[i_ens] = "Manual ED" if val == 1: - self.xdcr_dep_srs[i_ens] = 'Sensor' + self.xdcr_dep_srs[i_ens] = "Sensor" else: - self.xdcr_dep_srs[i_ens] = 'N/a' + self.xdcr_dep_srs[i_ens] = "N/a" val = int(self.ez[i_ens][3], 2) if val == 0: - self.head_src[i_ens] = 'Manual EH' + self.head_src[i_ens] = "Manual EH" if val == 1: - self.head_src[i_ens] = 'Int. Sensor' + self.head_src[i_ens] = "Int. Sensor" else: - self.head_src[i_ens] = 'N/a' + self.head_src[i_ens] = "N/a" val = int(self.ez[i_ens][4], 2) if val == 0: - self.pitch_src[i_ens] = 'Manual EP' + self.pitch_src[i_ens] = "Manual EP" if val == 1: - self.pitch_src[i_ens] = 'Int. Sensor' + self.pitch_src[i_ens] = "Int. Sensor" else: - self.pitch_src[i_ens] = 'N/a' + self.pitch_src[i_ens] = "N/a" val = int(self.ez[i_ens][5], 2) if val == 0: - self.roll_src[i_ens] = 'Manual ER' + self.roll_src[i_ens] = "Manual ER" if val == 1: - self.roll_src[i_ens] = 'Int. Sensor' + self.roll_src[i_ens] = "Int. Sensor" else: - self.roll_src[i_ens] = 'N/a' + self.roll_src[i_ens] = "N/a" val = int(self.ez[i_ens][6], 2) if val == 0: - self.xdcr_dep_srs[i_ens] = 'Manual ES' + self.xdcr_dep_srs[i_ens] = "Manual ES" if val == 1: - self.xdcr_dep_srs[i_ens] = 'Int. Sensor' + self.xdcr_dep_srs[i_ens] = "Int. Sensor" else: - self.xdcr_dep_srs[i_ens] = 'N/a' + self.xdcr_dep_srs[i_ens] = "N/a" val = int(self.ez[i_ens][7], 2) if val == 0: - self.temp_src[i_ens] = 'Manual ET' + self.temp_src[i_ens] = "Manual ET" if val == 1: - self.temp_src[i_ens] = 'Int. Sensor' + self.temp_src[i_ens] = "Int. Sensor" else: - self.temp_src[i_ens] = 'N/a' - - self.sensor_avail[i_ens] = "{0:08b}".format(data['fixed_leader']['sensor_available']) - self.dist_bin1_cm[i_ens] = data['fixed_leader']['bin_1_distance'] - self.xmit_pulse_cm[i_ens] = data['fixed_leader']['transmit_pulse_length'] - self.ref_lay_str_cell[i_ens] = data['fixed_leader']['starting_depth_cell'] - self.ref_lay_end_cell[i_ens] = data['fixed_leader']['ending_depth_cell'] - self.wa[i_ens] = data['fixed_leader']['false_target_threshold'] - self.cx[i_ens] = data['fixed_leader']['low_latency_trigger'] - self.lag_cm[i_ens] = data['fixed_leader']['transmit_lag_distance'] - self.cpu_ser_no[i_ens] = data['fixed_leader']['cpu_board_serial_number'] - self.wb[i_ens] = data['fixed_leader']['system_bandwidth'] - self.cq[i_ens] = data['fixed_leader']['system_power'] - - if 'variable_leader' in data: - self.lag_near_bottom[i_ens] = data['variable_leader']['lag_near_bottom'] - - if 'bottom_track' in data: - self.bp[i_ens] = data['bottom_track']['pings_per_ensemble_bp'] - self.bc[i_ens] = data['bottom_track']['correlation_magnitude_minimum_bc'] - self.ba[i_ens] = data['bottom_track']['evaluation_amplitude_minimum_ba'] - self.bg[i_ens] = data['bottom_track']['percent_good_minimum_bg'] - self.bm[i_ens] = data['bottom_track']['bottom_track_mode_bm'] - self.be_mmps[i_ens] = data['bottom_track']['error_velocity_maximum_be'] + self.temp_src[i_ens] = "N/a" + + self.sensor_avail[i_ens] = "{0:08b}".format( + data["fixed_leader"]["sensor_available"] + ) + self.dist_bin1_cm[i_ens] = data["fixed_leader"]["bin_1_distance"] + self.xmit_pulse_cm[i_ens] = data["fixed_leader"]["transmit_pulse_length"] + self.ref_lay_str_cell[i_ens] = data["fixed_leader"]["starting_depth_cell"] + self.ref_lay_end_cell[i_ens] = data["fixed_leader"]["ending_depth_cell"] + self.wa[i_ens] = data["fixed_leader"]["false_target_threshold"] + self.cx[i_ens] = data["fixed_leader"]["low_latency_trigger"] + self.lag_cm[i_ens] = data["fixed_leader"]["transmit_lag_distance"] + self.cpu_ser_no[i_ens] = data["fixed_leader"]["cpu_board_serial_number"] + self.wb[i_ens] = data["fixed_leader"]["system_bandwidth"] + self.cq[i_ens] = data["fixed_leader"]["system_power"] + + if "variable_leader" in data: + self.lag_near_bottom[i_ens] = data["variable_leader"]["lag_near_bottom"] + + if "bottom_track" in data: + self.bp[i_ens] = data["bottom_track"]["pings_per_ensemble_bp"] + self.bc[i_ens] = data["bottom_track"]["correlation_magnitude_minimum_bc"] + self.ba[i_ens] = data["bottom_track"]["evaluation_amplitude_minimum_ba"] + self.bg[i_ens] = data["bottom_track"]["percent_good_minimum_bg"] + self.bm[i_ens] = data["bottom_track"]["bottom_track_mode_bm"] + self.be_mmps[i_ens] = data["bottom_track"]["error_velocity_maximum_be"] class Gps(object): @@ -2663,19 +2845,19 @@ class Gps(object): Number of ensembles """ - self.alt_m = nans(n_ensembles) - self.gga_diff = nans(n_ensembles) - self.gga_hdop = nans(n_ensembles) - self.gga_n_stats = nans(n_ensembles) - self.gga_vel_e_mps = nans(n_ensembles) - self.gga_vel_n_mps = nans(n_ensembles) - self.gsa_p_dop = nans(n_ensembles) - self.gsa_sat = nans([n_ensembles, 6]) - self.gsa_v_dop = nans(n_ensembles) - self.lat_deg = nans(n_ensembles) - self.long_deg = nans(n_ensembles) - self.vtg_vel_e_mps = nans(n_ensembles) - self.vtg_vel_n_mps = nans(n_ensembles) + self.alt_m = nans((n_ensembles,)) + self.gga_diff = nans((n_ensembles,)) + self.gga_hdop = nans((n_ensembles,)) + self.gga_n_stats = nans((n_ensembles,)) + self.gga_vel_e_mps = nans((n_ensembles,)) + self.gga_vel_n_mps = nans((n_ensembles,)) + self.gsa_p_dop = nans((n_ensembles,)) + self.gsa_sat = nans((n_ensembles, 6)) + self.gsa_v_dop = nans((n_ensembles,)) + self.lat_deg = nans((n_ensembles,)) + self.long_deg = nans((n_ensembles,)) + self.vtg_vel_e_mps = nans((n_ensembles,)) + self.vtg_vel_n_mps = nans((n_ensembles,)) class Gps2(object): @@ -2787,52 +2969,52 @@ class Gps2(object): """ self.gga_delta_time = np.full([n_ensembles, 20], np.nan) - self.gga_header = np.full([n_ensembles, 20], ' ') - self.gga_sentence = np.full([n_ensembles, 20], '') + self.gga_header = np.tile(" ", [n_ensembles, 20]) + self.gga_sentence = np.tile("", [n_ensembles, 20]) self.utc = np.full([n_ensembles, 20], np.nan) self.lat_deg = np.zeros([n_ensembles, 20]) - self.lat_ref = np.full([n_ensembles, 20], '') + self.lat_ref = np.tile("", [n_ensembles, 20]) self.lon_deg = np.zeros([n_ensembles, 20]) - self.lon_ref = np.full([n_ensembles, 20], '') + self.lon_ref = np.tile("", [n_ensembles, 20]) self.corr_qual = np.full([n_ensembles, 20], np.nan) self.num_sats = np.full([n_ensembles, 20], np.nan) self.hdop = np.full([n_ensembles, 20], np.nan) self.alt = np.full([n_ensembles, 20], np.nan) - self.alt_unit = np.full([n_ensembles, 20], '') + self.alt_unit = np.tile("", [n_ensembles, 20]) self.geoid = np.full([n_ensembles, 20], np.nan) - self.geoid_unit = np.full([n_ensembles, 20], '') + self.geoid_unit = np.tile("", [n_ensembles, 20]) self.d_gps_age = np.full([n_ensembles, 20], np.nan) self.ref_stat_id = np.full([n_ensembles, 20], np.nan) self.vtg_delta_time = np.full([n_ensembles, 20], np.nan) - self.vtg_header = np.full([n_ensembles, 20], ' ') - self.vtg_sentence = np.full([n_ensembles, 20], '') + self.vtg_header = np.tile(" ", [n_ensembles, 20]) + self.vtg_sentence = np.tile("", [n_ensembles, 20]) self.course_true = np.full([n_ensembles, 20], np.nan) - self.true_indicator = np.full([n_ensembles, 20], '') + self.true_indicator = np.tile("", [n_ensembles, 20]) self.course_mag = np.full([n_ensembles, 20], np.nan) - self.mag_indicator = np.full([n_ensembles, 20], '') + self.mag_indicator = np.tile("", [n_ensembles, 20]) self.speed_knots = np.full([n_ensembles, 20], np.nan) - self.knots_indicator = np.full([n_ensembles, 20], '') + self.knots_indicator = np.tile("", [n_ensembles, 20]) self.speed_kph = np.zeros([n_ensembles, 20]) - self.kph_indicator = np.full([n_ensembles, 20], '') - self.mode_indicator = np.full([n_ensembles, 20], '') + self.kph_indicator = np.tile("", [n_ensembles, 20]) + self.mode_indicator = np.tile("", [n_ensembles, 20]) self.dbt_delta_time = np.full([n_ensembles, 20], np.nan) - self.dbt_header = np.full([n_ensembles, 20], ' ') + self.dbt_header = np.tile(" ", [n_ensembles, 20]) self.depth_ft = np.full([n_ensembles, 20], np.nan) - self.ft_indicator = np.full([n_ensembles, 20], '') + self.ft_indicator = np.tile("", [n_ensembles, 20]) self.depth_m = np.zeros([n_ensembles, 20]) - self.m_indicator = np.full([n_ensembles, 20], '') + self.m_indicator = np.tile("", [n_ensembles, 20]) self.depth_fath = np.full([n_ensembles, 20], np.nan) - self.fath_indicator = np.full([n_ensembles, 20], '') + self.fath_indicator = np.tile("", [n_ensembles, 20]) self.hdt_delta_time = np.full([n_ensembles, 20], np.nan) - self.hdt_header = np.full([n_ensembles, 20], ' ') + self.hdt_header = np.tile(" ", [n_ensembles, 20]) self.heading_deg = np.full([n_ensembles, 20], np.nan) - self.h_true_indicator = np.full([n_ensembles, 20], '') + self.h_true_indicator = np.tile("", [n_ensembles, 20]) # if wr2: - self.gga_velE_mps = nans(n_ensembles) - self.gga_velN_mps = nans(n_ensembles) - self.vtg_velE_mps = nans(n_ensembles) - self.vtg_velN_mps = nans(n_ensembles) + self.gga_velE_mps = nans((n_ensembles,)) + self.gga_velN_mps = nans((n_ensembles,)) + self.vtg_velE_mps = nans((n_ensembles,)) + self.vtg_velN_mps = nans((n_ensembles,)) def populate_data(self, i_ens, data): """Populates the class with data for an ensemble. @@ -2845,94 +3027,98 @@ class Gps2(object): Dictionary of all data for this ensemble """ - if 'gga' in data: + if "gga" in data: # Check size and expand if needed - if len(data['gga']) > self.gga_delta_time.shape[1]: - self.gga_expand(len(data['gga'])) + if len(data["gga"]) > self.gga_delta_time.shape[1]: + self.gga_expand(len(data["gga"])) - for n, gga_data in enumerate(data['gga']): + for n, gga_data in enumerate(data["gga"]): # Try implemented because of occasional garbage in data stream. - # This prevents a crash and data after garbage are not used, but any data before garbage is saved + # This prevents a crash and data after garbage are not used, + # but any data before garbage is saved try: - self.gga_delta_time[i_ens, n] = gga_data['delta_time'] - self.gga_header[i_ens, n] = gga_data['header'] - self.utc[i_ens, n] = gga_data['utc'] - self.lat_deg[i_ens, n] = gga_data['lat_deg'] - self.lat_ref[i_ens, n] = gga_data['lat_ref'] - self.lon_deg[i_ens, n] = gga_data['lon_deg'] - self.lon_ref[i_ens, n] = gga_data['lon_ref'] - self.corr_qual[i_ens, n] = gga_data['corr_qual'] - self.num_sats[i_ens, n] = gga_data['num_sats'] - self.hdop[i_ens, n] = gga_data['hdop'] - self.alt[i_ens, n] = gga_data['alt'] - self.alt_unit[i_ens, n] = gga_data['alt_unit'] - self.geoid[i_ens, n] = gga_data['geoid'] - self.geoid_unit[i_ens, n] = gga_data['geoid_unit'] - self.d_gps_age[i_ens, n] = gga_data['d_gps_age'] - self.ref_stat_id[i_ens, n] = gga_data['ref_stat_id'] + self.gga_delta_time[i_ens, n] = gga_data["delta_time"] + self.gga_header[i_ens, n] = gga_data["header"] + self.utc[i_ens, n] = gga_data["utc"] + self.lat_deg[i_ens, n] = gga_data["lat_deg"] + self.lat_ref[i_ens, n] = gga_data["lat_ref"] + self.lon_deg[i_ens, n] = gga_data["lon_deg"] + self.lon_ref[i_ens, n] = gga_data["lon_ref"] + self.corr_qual[i_ens, n] = gga_data["corr_qual"] + self.num_sats[i_ens, n] = gga_data["num_sats"] + self.hdop[i_ens, n] = gga_data["hdop"] + self.alt[i_ens, n] = gga_data["alt"] + self.alt_unit[i_ens, n] = gga_data["alt_unit"] + self.geoid[i_ens, n] = gga_data["geoid"] + self.geoid_unit[i_ens, n] = gga_data["geoid_unit"] + self.d_gps_age[i_ens, n] = gga_data["d_gps_age"] + self.ref_stat_id[i_ens, n] = gga_data["ref_stat_id"] except: pass - if 'vtg' in data: + if "vtg" in data: # Check size and expand if needed - if len(data['vtg']) > self.vtg_delta_time.shape[1]: - self.vtg_expand(len(data['vtg'])) + if len(data["vtg"]) > self.vtg_delta_time.shape[1]: + self.vtg_expand(len(data["vtg"])) - for n, vtg_data in enumerate(data['vtg']): + for n, vtg_data in enumerate(data["vtg"]): # Try implemented because of occasional garbage in data stream. - # This prevents a crash and data after garbage are not used, but any data before garbage is saved + # This prevents a crash and data after garbage are not used, + # but any data before garbage is saved try: - self.vtg_delta_time[i_ens, n] = vtg_data['delta_time'] - self.vtg_header[i_ens, n] = vtg_data['header'] - self.course_true[i_ens, n] = vtg_data['course_true'] - self.true_indicator[i_ens, n] = vtg_data['true_indicator'] - self.course_mag[i_ens, n] = vtg_data['course_mag'] - self.mag_indicator[i_ens, n] = vtg_data['mag_indicator'] - self.speed_knots[i_ens, n] = vtg_data['speed_knots'] - self.knots_indicator[i_ens, n] = vtg_data['knots_indicator'] - self.speed_kph[i_ens, n] = vtg_data['speed_kph'] - self.kph_indicator[i_ens, n] = vtg_data['kph_indicator'] - self.mode_indicator[i_ens, n] = vtg_data['mode_indicator'] + self.vtg_delta_time[i_ens, n] = vtg_data["delta_time"] + self.vtg_header[i_ens, n] = vtg_data["header"] + self.course_true[i_ens, n] = vtg_data["course_true"] + self.true_indicator[i_ens, n] = vtg_data["true_indicator"] + self.course_mag[i_ens, n] = vtg_data["course_mag"] + self.mag_indicator[i_ens, n] = vtg_data["mag_indicator"] + self.speed_knots[i_ens, n] = vtg_data["speed_knots"] + self.knots_indicator[i_ens, n] = vtg_data["knots_indicator"] + self.speed_kph[i_ens, n] = vtg_data["speed_kph"] + self.kph_indicator[i_ens, n] = vtg_data["kph_indicator"] + self.mode_indicator[i_ens, n] = vtg_data["mode_indicator"] except: pass - if 'ds' in data: + if "ds" in data: # Check size and expand if needed - if len(data['ds']) > self.dbt_delta_time.shape[1]: - self.dbt_expand(len(data['ds'])) + if len(data["ds"]) > self.dbt_delta_time.shape[1]: + self.dbt_expand(len(data["ds"])) - for n, dbt_data in enumerate(data['ds']): + for n, dbt_data in enumerate(data["ds"]): # Try implemented because of occasional garbage in data stream. - # This prevents a crash and data after garbage are not used, but any data before garbage is saved + # This prevents a crash and data after garbage are not used, + # but any data before garbage is saved try: - self.dbt_delta_time[i_ens, n] = dbt_data['delta_time'] - self.dbt_header[i_ens, n] = dbt_data['header'] - self.depth_ft[i_ens, n] = dbt_data['depth_ft'] - self.ft_indicator[i_ens, n] = dbt_data['ft_indicator'] - self.depth_m[i_ens, n] = dbt_data['depth_m'] - self.m_indicator[i_ens, n] = dbt_data['m_indicator'] - self.depth_fath[i_ens, n] = dbt_data['depth_fath'] - self.fath_indicator[i_ens, n] = dbt_data['fath_indicator'] + self.dbt_delta_time[i_ens, n] = dbt_data["delta_time"] + self.dbt_header[i_ens, n] = dbt_data["header"] + self.depth_ft[i_ens, n] = dbt_data["depth_ft"] + self.ft_indicator[i_ens, n] = dbt_data["ft_indicator"] + self.depth_m[i_ens, n] = dbt_data["depth_m"] + self.m_indicator[i_ens, n] = dbt_data["m_indicator"] + self.depth_fath[i_ens, n] = dbt_data["depth_fath"] + self.fath_indicator[i_ens, n] = dbt_data["fath_indicator"] except: pass - if 'ext_heading' in data: + if "ext_heading" in data: # Check size and expand if needed - if len(data['ext_heading']) > self.hdt_delta_time.shape[1]: - self.hdt_expand(len(data['ext_heading'])) + if len(data["ext_heading"]) > self.hdt_delta_time.shape[1]: + self.hdt_expand(len(data["ext_heading"])) - for n, hdt_data in enumerate(data['ext_heading']): + for n, hdt_data in enumerate(data["ext_heading"]): # Try implemented because of occasional garbage in data stream. - # This prevents a crash and data after garbage are not used, but any data before garbage is saved + # This prevents a crash and data after garbage are not used, + # but any data before garbage is saved try: - self.hdt_delta_time[i_ens, n] = hdt_data['delta_time'] - self.hdt_header[i_ens, n] = hdt_data['header'] - self.heading_deg[i_ens, n] = hdt_data['heading_deg'] - self.h_true_indicator[i_ens, n] = hdt_data['h_true_indicator'] + self.hdt_delta_time[i_ens, n] = hdt_data["delta_time"] + self.hdt_header[i_ens, n] = hdt_data["header"] + self.heading_deg[i_ens, n] = hdt_data["heading_deg"] + self.h_true_indicator[i_ens, n] = hdt_data["h_true_indicator"] except: pass @@ -2951,38 +3137,54 @@ class Gps2(object): # Expand arrays self.gga_delta_time = np.concatenate( - (self.gga_delta_time, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1) + (self.gga_delta_time, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1 + ) self.utc = np.concatenate( - (self.utc, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1) + (self.utc, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1 + ) self.lat_deg = np.concatenate( - (self.lat_deg, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1) + (self.lat_deg, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1 + ) self.lon_deg = np.concatenate( - (self.lon_deg, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1) + (self.lon_deg, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1 + ) self.corr_qual = np.concatenate( - (self.corr_qual, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1) + (self.corr_qual, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1 + ) self.num_sats = np.concatenate( - (self.num_sats, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1) + (self.num_sats, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1 + ) self.hdop = np.concatenate( - (self.hdop, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1) + (self.hdop, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1 + ) self.alt = np.concatenate( - (self.alt, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1) + (self.alt, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1 + ) self.geoid = np.concatenate( - (self.geoid, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1) + (self.geoid, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1 + ) self.d_gps_age = np.concatenate( - (self.d_gps_age, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1) + (self.d_gps_age, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1 + ) self.ref_stat_id = np.concatenate( - (self.ref_stat_id, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1) + (self.ref_stat_id, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1 + ) self.gga_header = np.concatenate( - (self.gga_header, np.tile('', (n_ensembles, n_expansion))), axis=1) + (self.gga_header, np.tile("", (n_ensembles, n_expansion))), axis=1 + ) self.geoid_unit = np.concatenate( - (self.geoid_unit, np.tile('', (n_ensembles, n_expansion))), axis=1) + (self.geoid_unit, np.tile("", (n_ensembles, n_expansion))), axis=1 + ) self.alt_unit = np.concatenate( - (self.alt_unit, np.tile('', (n_ensembles, n_expansion))), axis=1) + (self.alt_unit, np.tile("", (n_ensembles, n_expansion))), axis=1 + ) self.lon_ref = np.concatenate( - (self.lon_ref, np.tile('', (n_ensembles, n_expansion))), axis=1) + (self.lon_ref, np.tile("", (n_ensembles, n_expansion))), axis=1 + ) self.lat_ref = np.concatenate( - (self.lat_ref, np.tile('', (n_ensembles, n_expansion))), axis=1) + (self.lat_ref, np.tile("", (n_ensembles, n_expansion))), axis=1 + ) def vtg_expand(self, n_samples): """Expand arrays. @@ -2999,28 +3201,39 @@ class Gps2(object): # Expand arrays self.vtg_delta_time = np.concatenate( - (self.vtg_delta_time, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1) + (self.vtg_delta_time, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1 + ) self.course_true = np.concatenate( - (self.course_true, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1) + (self.course_true, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1 + ) self.course_mag = np.concatenate( - (self.course_mag, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1) + (self.course_mag, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1 + ) self.speed_knots = np.concatenate( - (self.speed_knots, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1) + (self.speed_knots, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1 + ) self.speed_kph = np.concatenate( - (self.speed_kph, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1) + (self.speed_kph, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1 + ) self.kph_indicator = np.concatenate( - (self.kph_indicator, np.tile('', (n_ensembles, n_expansion))), axis=1) + (self.kph_indicator, np.tile("", (n_ensembles, n_expansion))), axis=1 + ) self.mode_indicator = np.concatenate( - (self.mode_indicator, np.tile('', (n_ensembles, n_expansion))), axis=1) + (self.mode_indicator, np.tile("", (n_ensembles, n_expansion))), axis=1 + ) self.vtg_header = np.concatenate( - (self.vtg_header, np.tile('', (n_ensembles, n_expansion))), axis=1) + (self.vtg_header, np.tile("", (n_ensembles, n_expansion))), axis=1 + ) self.true_indicator = np.concatenate( - (self.true_indicator, np.tile('', (n_ensembles, n_expansion))), axis=1) + (self.true_indicator, np.tile("", (n_ensembles, n_expansion))), axis=1 + ) self.mag_indicator = np.concatenate( - (self.mag_indicator, np.tile('', (n_ensembles, n_expansion))), axis=1) + (self.mag_indicator, np.tile("", (n_ensembles, n_expansion))), axis=1 + ) self.knots_indicator = np.concatenate( - (self.knots_indicator, np.tile('', (n_ensembles, n_expansion))), axis=1) + (self.knots_indicator, np.tile("", (n_ensembles, n_expansion))), axis=1 + ) def dbt_expand(self, n_samples): """Expand arrays. @@ -3037,22 +3250,30 @@ class Gps2(object): # Expand arrays self.dbt_delta_time = np.concatenate( - (self.dbt_delta_time, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1) + (self.dbt_delta_time, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1 + ) self.depth_ft = np.concatenate( - (self.depth_ft, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1) + (self.depth_ft, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1 + ) self.depth_m = np.concatenate( - (self.depth_m, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1) + (self.depth_m, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1 + ) self.depth_fath = np.concatenate( - (self.depth_fath, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1) + (self.depth_fath, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1 + ) self.fath_indicator = np.concatenate( - (self.fath_indicator, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1) + (self.fath_indicator, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1 + ) self.dbt_header = np.concatenate( - (self.dbt_header, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1) + (self.dbt_header, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1 + ) self.ft_indicator = np.concatenate( - (self.ft_indicator, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1) + (self.ft_indicator, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1 + ) self.m_indicator = np.concatenate( - (self.m_indicator, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1) + (self.m_indicator, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1 + ) def hdt_expand(self, n_samples): """Expand arrays. @@ -3069,13 +3290,17 @@ class Gps2(object): # Expand the arrays self.hdt_delta_time = np.concatenate( - (self.hdt_delta_time, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1) + (self.hdt_delta_time, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1 + ) self.heading_deg = np.concatenate( - (self.heading_deg, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1) + (self.heading_deg, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1 + ) self.h_true_indicator = np.concatenate( - (self.h_true_indicator, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1) + (self.h_true_indicator, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1 + ) self.hdt_header = np.concatenate( - (self.hdt_header, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1) + (self.hdt_header, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1 + ) class Nmea(object): @@ -3101,11 +3326,11 @@ class Nmea(object): n_ensembles: int Number of ensembles """ - self.gga = [''] * n_ensembles - self.gsa = [''] * n_ensembles - self.vtg = [''] * n_ensembles + self.gga = [""] * n_ensembles + self.gsa = [""] * n_ensembles + self.vtg = [""] * n_ensembles # self.raw = ['']*n_ensembles DSM: not sure this was used - self.dbt = [''] * n_ensembles + self.dbt = [""] * n_ensembles def populate_data(self, i_ens, data): """Populates the class with data for an ensemble. @@ -3118,17 +3343,17 @@ class Nmea(object): Dictionary of all data for this ensemble """ - if 'gga_sentence' in data: - self.gga[i_ens] = data['gga_sentence'] + if "gga_sentence" in data: + self.gga[i_ens] = data["gga_sentence"] - if 'vtg_sentence' in data: - self.vtg[i_ens] = data['vtg_sentence'] + if "vtg_sentence" in data: + self.vtg[i_ens] = data["vtg_sentence"] - if 'gsa_sentence' in data: - self.gsa[i_ens] = data['gsa_sentence'] + if "gsa_sentence" in data: + self.gsa[i_ens] = data["gsa_sentence"] - if 'dbt_sentence' in data: - self.dbt[i_ens] = data['dbt_sentence'] + if "dbt_sentence" in data: + self.dbt[i_ens] = data["dbt_sentence"] class Sensor(object): @@ -3200,7 +3425,7 @@ class Sensor(object): Transducer depth in decimeters xmit_current: np.array(int) Transmit current - self.xmit_voltage = nans(n_ensembles) + self.xmit_voltage = nans((n_ensembles,)) Transmit voltage self.vert_beam_eval_amp: np.array(int) Vertical beam amplitude @@ -3223,43 +3448,43 @@ class Sensor(object): Number of ensembles """ - self.ambient_temp = nans(n_ensembles) - self.attitude_temp = nans(n_ensembles) - self.attitude = nans(n_ensembles) - self.bit_test = nans(n_ensembles) - self.bit_test_count = nans(n_ensembles) - self.contam_sensor = nans(n_ensembles) - self.date = nans([n_ensembles, 3]) - self.date_y2k = nans([n_ensembles, 4]) - self.date_not_y2k = nans([n_ensembles, 3]) - self.error_status_word = [''] * n_ensembles - self.heading_deg = nans(n_ensembles) - self.heading_std_dev_deg = nans(n_ensembles) - self.mpt_msc = nans([n_ensembles, 3]) - self.num = nans(n_ensembles) - self.num_fact = nans(n_ensembles) - self.num_tot = nans(n_ensembles) - self.orient = [''] * n_ensembles - self.pitch_std_dev_deg = nans(n_ensembles) - self.pitch_deg = nans(n_ensembles) - self.pressure_neg = nans(n_ensembles) - self.pressure_pos = nans(n_ensembles) - self.pressure_pascal = nans(n_ensembles) - self.pressure_var_pascal = nans(n_ensembles) - self.roll_std_dev_deg = nans(n_ensembles) - self.roll_deg = nans(n_ensembles) - self.salinity_ppt = nans(n_ensembles) - self.sos_mps = nans(n_ensembles) - self.temperature_deg_c = nans(n_ensembles) - self.time = nans([n_ensembles, 4]) - self.time_y2k = nans([n_ensembles, 4]) - self.xdcr_depth_dm = nans(n_ensembles) - self.xmit_current = nans(n_ensembles) - self.xmit_voltage = nans(n_ensembles) - self.vert_beam_eval_amp = nans(n_ensembles) - self.vert_beam_RSSI_amp = nans(n_ensembles) - self.vert_beam_range_m = nans(n_ensembles) - self.vert_beam_gain = [''] * n_ensembles + self.ambient_temp = nans((n_ensembles,)) + self.attitude_temp = nans((n_ensembles,)) + self.attitude = nans((n_ensembles,)) + self.bit_test = nans((n_ensembles,)) + self.bit_test_count = nans((n_ensembles,)) + self.contam_sensor = nans((n_ensembles,)) + self.date = nans((n_ensembles, 3)) + self.date_y2k = nans((n_ensembles, 4)) + self.date_not_y2k = nans((n_ensembles, 3)) + self.error_status_word = [""] * n_ensembles + self.heading_deg = nans((n_ensembles,)) + self.heading_std_dev_deg = nans((n_ensembles,)) + self.mpt_msc = nans((n_ensembles, 3)) + self.num = nans((n_ensembles,)) + self.num_fact = nans((n_ensembles,)) + self.num_tot = nans((n_ensembles,)) + self.orient = [""] * n_ensembles + self.pitch_std_dev_deg = nans((n_ensembles,)) + self.pitch_deg = nans((n_ensembles,)) + self.pressure_neg = nans((n_ensembles,)) + self.pressure_pos = nans((n_ensembles,)) + self.pressure_pascal = nans((n_ensembles,)) + self.pressure_var_pascal = nans((n_ensembles,)) + self.roll_std_dev_deg = nans((n_ensembles,)) + self.roll_deg = nans((n_ensembles,)) + self.salinity_ppt = nans((n_ensembles,)) + self.sos_mps = nans((n_ensembles,)) + self.temperature_deg_c = nans((n_ensembles,)) + self.time = nans((n_ensembles, 4)) + self.time_y2k = nans((n_ensembles, 4)) + self.xdcr_depth_dm = nans((n_ensembles,)) + self.xmit_current = nans((n_ensembles,)) + self.xmit_voltage = nans((n_ensembles,)) + self.vert_beam_eval_amp = nans((n_ensembles,)) + self.vert_beam_RSSI_amp = nans((n_ensembles,)) + self.vert_beam_range_m = nans((n_ensembles,)) + self.vert_beam_gain = [""] * n_ensembles self.vert_beam_status = np.zeros(n_ensembles) def populate_data(self, i_ens, data): @@ -3273,84 +3498,108 @@ class Sensor(object): Dictionary of all data for this ensemble """ - if 'fixed_leader' in data and 'variable_leader' in data: + if "fixed_leader" in data and "variable_leader" in data: # Convert system_configuration_ls to 1s and 0s - bitls = "{0:08b}".format(data['fixed_leader']['system_configuration_ls']) + bitls = "{0:08b}".format(data["fixed_leader"]["system_configuration_ls"]) # Convert first two bits to integer val = int(bitls[0], 2) if val == 0: - self.orient[i_ens] = 'Down' + self.orient[i_ens] = "Down" elif val == 1: - self.orient[i_ens] = 'Up' + self.orient[i_ens] = "Up" else: - self.orient[i_ens] = 'n/a' + self.orient[i_ens] = "n/a" - self.num[i_ens] = data['variable_leader']['ensemble_number'] + self.num[i_ens] = data["variable_leader"]["ensemble_number"] # Store data and time as list - self.date_not_y2k[i_ens, :] = [data['variable_leader']['rtc_year'], - data['variable_leader']['rtc_month'], - data['variable_leader']['rtc_day']] - self.time[i_ens, :] = [data['variable_leader']['rtc_hour'], - data['variable_leader']['rtc_minutes'], - data['variable_leader']['rtc_seconds'], - data['variable_leader']['rtc_hundredths']] - - self.num_fact[i_ens] = data['variable_leader']['ensemble_number_msb'] + self.date_not_y2k[i_ens, :] = [ + data["variable_leader"]["rtc_year"], + data["variable_leader"]["rtc_month"], + data["variable_leader"]["rtc_day"], + ] + self.time[i_ens, :] = [ + data["variable_leader"]["rtc_hour"], + data["variable_leader"]["rtc_minutes"], + data["variable_leader"]["rtc_seconds"], + data["variable_leader"]["rtc_hundredths"], + ] + + self.num_fact[i_ens] = data["variable_leader"]["ensemble_number_msb"] self.num_tot[i_ens] = self.num[i_ens] + self.num_fact[i_ens] * 65535 - self.bit_test[i_ens] = data['variable_leader']['bit_fault'] - self.bit_test_count[i_ens] = data['variable_leader']['bit_count'] - self.sos_mps[i_ens] = data['variable_leader']['speed_of_sound'] - self.xdcr_depth_dm[i_ens] = data['variable_leader']['depth_of_transducer'] - self.heading_deg[i_ens] = data['variable_leader']['heading'] / 100. - self.pitch_deg[i_ens] = data['variable_leader']['pitch'] / 100. - self.roll_deg[i_ens] = data['variable_leader']['roll'] / 100. - self.salinity_ppt[i_ens] = data['variable_leader']['salinity'] - self.temperature_deg_c[i_ens] = data['variable_leader']['temperature'] / 100. - self.mpt_msc[i_ens, :] = [data['variable_leader']['mpt_minutes'], - data['variable_leader']['mpt_seconds'], - data['variable_leader']['mpt_hundredths']] - self.heading_std_dev_deg[i_ens] = data['variable_leader']['heading_standard_deviation'] - self.pitch_std_dev_deg[i_ens] = data['variable_leader']['pitch_standard_deviation'] / 10. - self.roll_std_dev_deg[i_ens] = data['variable_leader']['roll_standard_deviation'] / 10. - self.xmit_current[i_ens] = data['variable_leader']['transmit_current'] - self.xmit_voltage[i_ens] = data['variable_leader']['transmit_voltage'] - self.ambient_temp[i_ens] = data['variable_leader']['ambient_temperature'] - self.pressure_pos[i_ens] = data['variable_leader']['pressure_positive'] - self.pressure_neg[i_ens] = data['variable_leader']['pressure_negative'] - self.attitude_temp[i_ens] = data['variable_leader']['attitude_temperature'] - self.attitude[i_ens] = data['variable_leader']['attitude'] - self.contam_sensor[i_ens] = data['variable_leader']['contamination_sensor'] - self.error_status_word[i_ens] = "{0:032b}".format(data['variable_leader']['error_status_word']) - self.pressure_pascal[i_ens] = data['variable_leader']['pressure'] - self.pressure_var_pascal[i_ens] = data['variable_leader']['pressure_variance'] + self.bit_test[i_ens] = data["variable_leader"]["bit_fault"] + self.bit_test_count[i_ens] = data["variable_leader"]["bit_count"] + self.sos_mps[i_ens] = data["variable_leader"]["speed_of_sound"] + self.xdcr_depth_dm[i_ens] = data["variable_leader"]["depth_of_transducer"] + self.heading_deg[i_ens] = data["variable_leader"]["heading"] / 100.0 + self.pitch_deg[i_ens] = data["variable_leader"]["pitch"] / 100.0 + self.roll_deg[i_ens] = data["variable_leader"]["roll"] / 100.0 + self.salinity_ppt[i_ens] = data["variable_leader"]["salinity"] + self.temperature_deg_c[i_ens] = ( + data["variable_leader"]["temperature"] / 100.0 + ) + self.mpt_msc[i_ens, :] = [ + data["variable_leader"]["mpt_minutes"], + data["variable_leader"]["mpt_seconds"], + data["variable_leader"]["mpt_hundredths"], + ] + self.heading_std_dev_deg[i_ens] = data["variable_leader"][ + "heading_standard_deviation" + ] + self.pitch_std_dev_deg[i_ens] = ( + data["variable_leader"]["pitch_standard_deviation"] / 10.0 + ) + self.roll_std_dev_deg[i_ens] = ( + data["variable_leader"]["roll_standard_deviation"] / 10.0 + ) + self.xmit_current[i_ens] = data["variable_leader"]["transmit_current"] + self.xmit_voltage[i_ens] = data["variable_leader"]["transmit_voltage"] + self.ambient_temp[i_ens] = data["variable_leader"]["ambient_temperature"] + self.pressure_pos[i_ens] = data["variable_leader"]["pressure_positive"] + self.pressure_neg[i_ens] = data["variable_leader"]["pressure_negative"] + self.attitude_temp[i_ens] = data["variable_leader"]["attitude_temperature"] + self.attitude[i_ens] = data["variable_leader"]["attitude"] + self.contam_sensor[i_ens] = data["variable_leader"]["contamination_sensor"] + self.error_status_word[i_ens] = "{0:032b}".format( + data["variable_leader"]["error_status_word"] + ) + self.pressure_pascal[i_ens] = data["variable_leader"]["pressure"] + self.pressure_var_pascal[i_ens] = data["variable_leader"][ + "pressure_variance" + ] # Store Y2K date and time as list - self.date_y2k[i_ens, :] = [data['variable_leader']['rtc_y2k_century'], - data['variable_leader']['rtc_y2k_year'], - data['variable_leader']['rtc_y2k_month'], - data['variable_leader']['rtc_y2k_day']] - self.time_y2k[i_ens, :] = [data['variable_leader']['rtc_y2k_hour'], - data['variable_leader']['rtc_y2k_minutes'], - data['variable_leader']['rtc_y2k_seconds'], - data['variable_leader']['rtc_y2k_hundredths']] + self.date_y2k[i_ens, :] = [ + data["variable_leader"]["rtc_y2k_century"], + data["variable_leader"]["rtc_y2k_year"], + data["variable_leader"]["rtc_y2k_month"], + data["variable_leader"]["rtc_y2k_day"], + ] + self.time_y2k[i_ens, :] = [ + data["variable_leader"]["rtc_y2k_hour"], + data["variable_leader"]["rtc_y2k_minutes"], + data["variable_leader"]["rtc_y2k_seconds"], + data["variable_leader"]["rtc_y2k_hundredths"], + ] self.date[i_ens, :] = self.date_not_y2k[i_ens, :] - self.date[i_ens, 0] = self.date_y2k[i_ens, 0] * 100 + \ - self.date_y2k[i_ens, 1] - - if 'vertical_beam' in data: - self.vert_beam_eval_amp[i_ens] = data['vertical_beam']['eval_amp'] - self.vert_beam_RSSI_amp[i_ens] = data['vertical_beam']['rssi'] - self.vert_beam_range_m[i_ens] = data['vertical_beam']['range'] / 1000 - - # Use first 8 bits of status and the 6 the bit to determine the gain - temp = "{0:08b}".format(data['vertical_beam']['status']) + self.date[i_ens, 0] = ( + self.date_y2k[i_ens, 0] * 100 + self.date_y2k[i_ens, 1] + ) + + if "vertical_beam" in data: + self.vert_beam_eval_amp[i_ens] = data["vertical_beam"]["eval_amp"] + self.vert_beam_RSSI_amp[i_ens] = data["vertical_beam"]["rssi"] + self.vert_beam_range_m[i_ens] = data["vertical_beam"]["range"] / 1000 + + # Use first 8 bits of status and the 6 the bit to determine + # the gain + temp = "{0:08b}".format(data["vertical_beam"]["status"]) self.vert_beam_status[i_ens] = int(temp[6:], 2) - if temp[5] == '0': - self.vert_beam_gain[i_ens] = 'L' + if temp[5] == "0": + self.vert_beam_gain[i_ens] = "L" else: - self.vert_beam_gain[i_ens] = 'H' + self.vert_beam_gain[i_ens] = "H" class Surface(object): @@ -3371,7 +3620,8 @@ class Surface(object): pergd: np.array(int) 3D array of percent good for each beam, cell, and ensemble rssi: np.array(int) - 3D array of return signal strength indicator for each beam, cell, and ensemble + 3D array of return signal strength indicator for each beam, cell, + and ensemble """ def __init__(self, n_ensembles, n_velocities, max_surface_bins): @@ -3388,12 +3638,12 @@ class Surface(object): """ self.no_cells = np.zeros(n_ensembles) - self.cell_size_cm = nans(n_ensembles) - self.dist_bin1_cm = nans(n_ensembles) + self.cell_size_cm = nans((n_ensembles,)) + self.dist_bin1_cm = nans((n_ensembles,)) self.vel_mps = np.tile([np.nan], [n_velocities, max_surface_bins, n_ensembles]) - self.corr = nans([n_velocities, max_surface_bins, n_ensembles]) - self.pergd = nans([n_velocities, max_surface_bins, n_ensembles]) - self.rssi = nans([n_velocities, max_surface_bins, n_ensembles]) + self.corr = nans((n_velocities, max_surface_bins, n_ensembles)) + self.pergd = nans((n_velocities, max_surface_bins, n_ensembles)) + self.rssi = nans((n_velocities, max_surface_bins, n_ensembles)) def populate_data(self, i_ens, data, main_data): """Populates the class with data for an ensemble. @@ -3408,26 +3658,38 @@ class Surface(object): Object of PD0TRDI """ - if 'surface_leader' in data: - self.no_cells[i_ens] = data['surface_leader']['cell_count'] - self.cell_size_cm[i_ens] = data['surface_leader']['cell_size'] - self.dist_bin1_cm[i_ens] = data['surface_leader']['range_cell_1'] - - if 'surface_velocity' in data: - self.vel_mps[:main_data.n_velocities, :len(data['surface_velocity']['velocity']), i_ens] = \ - np.array(data['surface_velocity']['velocity']).T - - if 'surface_correlation' in data: - self.corr[:main_data.n_velocities, :len(data['surface_correlation']['correlation']), i_ens] = \ - np.array(data['surface_correlation']['correlation']).T - - if 'surface_intensity' in data: - self.rssi[:main_data.n_velocities, :len(data['surface_intensity']['rssi']), i_ens] = \ - np.array(data['surface_intensity']['rssi']).T - - if 'surface_percent_good' in data: - self.pergd[:main_data.n_velocities, :len(data['surface_percent_good']['percent_good']), i_ens] = \ - np.array(data['surface_percent_good']['percent_good']).T + if "surface_leader" in data: + self.no_cells[i_ens] = data["surface_leader"]["cell_count"] + self.cell_size_cm[i_ens] = data["surface_leader"]["cell_size"] + self.dist_bin1_cm[i_ens] = data["surface_leader"]["range_cell_1"] + + if "surface_velocity" in data: + self.vel_mps[ + : main_data.n_velocities, + : len(data["surface_velocity"]["velocity"]), + i_ens, + ] = np.array(data["surface_velocity"]["velocity"]).T + + if "surface_correlation" in data: + self.corr[ + : main_data.n_velocities, + : len(data["surface_correlation"]["correlation"]), + i_ens, + ] = np.array(data["surface_correlation"]["correlation"]).T + + if "surface_intensity" in data: + self.rssi[ + : main_data.n_velocities, + : len(data["surface_intensity"]["rssi"]), + i_ens, + ] = np.array(data["surface_intensity"]["rssi"]).T + + if "surface_percent_good" in data: + self.pergd[ + : main_data.n_velocities, + : len(data["surface_percent_good"]["percent_good"]), + i_ens, + ] = np.array(data["surface_percent_good"]["percent_good"]).T class Wt(object): @@ -3442,7 +3704,8 @@ class Wt(object): pergd: np.array(int) 3D array of percent good for each beam, cell, and ensemble rssi: np.array(int) - 3D array of return signal strength indicator for each beam, cell, and ensemble + 3D array of return signal strength indicator for each beam, cell, + and ensemble """ def __init__(self, n_bins, n_ensembles, n_velocities): @@ -3458,10 +3721,10 @@ class Wt(object): Maximum number of bins in an ensemble in the transect """ - self.corr = nans([n_velocities, n_bins, n_ensembles]) - self.pergd = nans([n_velocities, n_bins, n_ensembles]) - self.rssi = nans([n_velocities, n_bins, n_ensembles]) - self.vel_mps = nans([n_velocities, n_bins, n_ensembles]) + self.corr = nans((n_velocities, n_bins, n_ensembles)) + self.pergd = nans((n_velocities, n_bins, n_ensembles)) + self.rssi = nans((n_velocities, n_bins, n_ensembles)) + self.vel_mps = nans((n_velocities, n_bins, n_ensembles)) def populate_data(self, i_ens, data, main_data): """Populates the class with data for an ensemble. @@ -3476,28 +3739,35 @@ class Wt(object): Object of PD0TRDI """ - - if 'velocity' in data: + if "velocity" in data: # Check size in case array needs to be expanded if main_data.Cfg.wn[i_ens] > self.vel_mps.shape[1]: - append = np.zeros([self.vel_mps.shape[0], - int(main_data.Cfg.wn[i_ens] - self.vel_mps.shape[1]), - self.vel_mps.shape[2]]) + append = np.zeros( + [ + self.vel_mps.shape[0], + int(main_data.Cfg.wn[i_ens] - self.vel_mps.shape[1]), + self.vel_mps.shape[2], + ] + ) self.vel_mps = np.hstack([self.vel_mps, append]) self.corr = np.hstack([self.corr, append]) self.rssi = np.hstack([self.rssi, append]) self.pergd = np.hstack([self.pergd, append]) # Reformat and assign data - if 'velocity' in data: - self.vel_mps[:main_data.n_velocities, :int(main_data.Cfg.wn[i_ens]), i_ens] = \ - np.array(data['velocity']['data']).T - if 'correlation' in data: - self.corr[:main_data.n_velocities, :int(main_data.Cfg.wn[i_ens]), i_ens] = \ - np.array(data['correlation']['data']).T - if 'echo_intensity' in data: - self.rssi[:main_data.n_velocities, :int(main_data.Cfg.wn[i_ens]), i_ens] = \ - np.array(data['echo_intensity']['data']).T - if 'percent_good' in data: - self.pergd[:main_data.n_velocities, :int(main_data.Cfg.wn[i_ens]), i_ens] = \ - np.array(data['percent_good']['data']).T + if "velocity" in data: + self.vel_mps[ + : main_data.n_velocities, : int(main_data.Cfg.wn[i_ens]), i_ens + ] = np.array(data["velocity"]["data"]).T + if "correlation" in data: + self.corr[ + : main_data.n_velocities, : int(main_data.Cfg.wn[i_ens]), i_ens + ] = np.array(data["correlation"]["data"]).T + if "echo_intensity" in data: + self.rssi[ + : main_data.n_velocities, : int(main_data.Cfg.wn[i_ens]), i_ens + ] = np.array(data["echo_intensity"]["data"]).T + if "percent_good" in data: + self.pergd[ + : main_data.n_velocities, : int(main_data.Cfg.wn[i_ens]), i_ens + ] = np.array(data["percent_good"]["data"]).T diff --git a/Classes/PreMeasurement.py b/Classes/PreMeasurement.py index ed2b5c5..cbb09a6 100644 --- a/Classes/PreMeasurement.py +++ b/Classes/PreMeasurement.py @@ -4,7 +4,8 @@ import numpy as np class PreMeasurement(object): - """Stores tests, calibrations, and evaluations conducted prior ot measurement. + """Stores tests, calibrations, and evaluations conducted prior ot + measurement. Attributes ---------- @@ -15,14 +16,14 @@ class PreMeasurement(object): result: dict Dictionary of test results. Varies by test. """ - + def __init__(self): """Initialize instance variables.""" self.time_stamp = None self.data = None self.result = {} - + def populate_data(self, time_stamp, data_in, data_type): """Coordinates storing of test, calibration, and evaluation data. @@ -41,42 +42,45 @@ class PreMeasurement(object): self.data = data_in # Process data depending on data type and store result - if data_type[1] == 'C': + if data_type[1] == "C": self.compass_read() - elif data_type == 'TST': + elif data_type == "TST": self.sys_test_read() self.pt3_data() - elif data_type == 'SST': + elif data_type == "SST": self.sys_test_read() def compass_read(self): """Method for getting compass evaluation data""" # Match regex for compass evaluation error: - splits = re.split('(Total error:|Double Cycle Errors:|Error from calibration:)', self.data) + splits = re.split( + "(Total error:|Double Cycle Errors:|Error from calibration:)", self.data + ) if len(splits) > 1: - error = float(re.search('\d+\.*\d*', splits[-1])[0]) + error = float(re.search("\d+\.*\d*", splits[-1])[0]) else: - error = 'N/A' - self.result['compass'] = {'error': error} + error = "N/A" + self.result["compass"] = {"error": error} @staticmethod def cc_qrev_mat_in(meas_struct): - """Processes the Matlab data structure to obtain a list of Premeasurement objects containing compass calibration - data from the Matlab data structure. - - Parameters - ---------- - meas_struct: mat_struct - Matlab data structure obtained from sio.loadmat - - Returns - ------- - cc: list - List of Premeasurement data objects - """ + """Processes the Matlab data structure to obtain a list of + Premeasurement objects containing compass calibration + data from the Matlab data structure. + + Parameters + ---------- + meas_struct: mat_struct + Matlab data structure obtained from sio.loadmat + + Returns + ------- + cc: list + List of Premeasurement data objects + """ cc = [] - if hasattr(meas_struct, 'compassCal'): + if hasattr(meas_struct, "compassCal"): if type(meas_struct.compassCal) is np.ndarray: for cal in meas_struct.compassCal: pm = PreMeasurement() @@ -91,21 +95,22 @@ class PreMeasurement(object): @staticmethod def ce_qrev_mat_in(meas_struct): - """Processes the Matlab data structure to obtain a list of Premeasurement objects containing compass evaluation - data from the Matlab data structure. - - Parameters - ---------- - meas_struct: mat_struct - Matlab data structure obtained from sio.loadmat - - Returns - ------- - ce: list - List of Premeasurement data objects - """ + """Processes the Matlab data structure to obtain a list of + Premeasurement objects containing compass evaluation + data from the Matlab data structure. + + Parameters + ---------- + meas_struct: mat_struct + Matlab data structure obtained from sio.loadmat + + Returns + ------- + ce: list + List of Premeasurement data objects + """ ce = [] - if hasattr(meas_struct, 'compassEval'): + if hasattr(meas_struct, "compassEval"): if type(meas_struct.compassEval) is np.ndarray: for comp_eval in meas_struct.compassEval: pm = PreMeasurement() @@ -118,7 +123,8 @@ class PreMeasurement(object): return ce def compass_populate_from_qrev_mat(self, data_in): - """Populated Premeasurement instance variables with data from QRev Matlab file. + """Populated Premeasurement instance variables with data from QRev + Matlab file. Parameters ---------- @@ -127,35 +133,40 @@ class PreMeasurement(object): """ self.data = data_in.data self.time_stamp = data_in.timeStamp - if hasattr(data_in, 'result'): - self.result = {'compass': {'error': data_in.result.compass.error}} + if hasattr(data_in, "result"): + self.result = {"compass": {"error": data_in.result.compass.error}} else: # Match regex for compass evaluation error: - splits = re.split('(Total error:|Double Cycle Errors:|Error from calibration:)', self.data) + splits = re.split( + "(Total error:|Double Cycle Errors:|Error from calibration:)", self.data + ) if len(splits) > 1: - error = float(re.search('\d+\.*\d*', splits[-1])[0]) + error = float(re.search("\d+\.*\d*", splits[-1])[0]) else: - error = 'N/A' - self.result['compass'] = {'error': error} - + error = "N/A" + self.result["compass"] = {"error": error} + def sys_test_read(self): """Method for reading the system test data""" if self.data is not None: # Match regex for number of tests and number of failures - num_tests = re.findall('(Fail|FAIL|F A I L|Pass|PASS|NOT DETECTED|P A S S)', self.data) - num_fails = re.findall('(Fail|FAIL|F A I L)', self.data) + num_tests = re.findall( + "(Fail|FAIL|F A I L|Pass|PASS|NOT DETECTED|P A S S)", self.data + ) + num_fails = re.findall("(Fail|FAIL|F A I L)", self.data) # Store results - self.result = {'sysTest': {'n_tests': len(num_tests)}} - self.result['sysTest']['n_failed'] = len(num_fails) + self.result = {"sysTest": {"n_tests": len(num_tests)}} + self.result["sysTest"]["n_failed"] = len(num_fails) else: - self.result = {'sysTest': {'n_tests': None}} - self.result['sysTest']['n_failed'] = None + self.result = {"sysTest": {"n_tests": None}} + self.result["sysTest"]["n_failed"] = None @staticmethod def sys_test_qrev_mat_in(meas_struct): - """Processes the Matlab data structure to obtain a list of Premeasurement objects containing system test data - from the Matlab data structure. + """Processes the Matlab data structure to obtain a list of + Premeasurement objects containing system test data + from the Matlab data structure. Parameters ---------- @@ -166,9 +177,9 @@ class PreMeasurement(object): ------- system_tst: list List of Premeasurement data objects - """ + """ system_tst = [] - if hasattr(meas_struct, 'sysTest'): + if hasattr(meas_struct, "sysTest"): try: if type(meas_struct.sysTest) == np.ndarray: for test in meas_struct.sysTest: @@ -184,7 +195,8 @@ class PreMeasurement(object): return system_tst def sys_tst_populate_from_qrev_mat(self, test_in): - """Populated Premeasurement instance variables with data from QRev Matlab file. + """Populated Premeasurement instance variables with data from QRev + Matlab file. Parameters ---------- @@ -194,119 +206,201 @@ class PreMeasurement(object): try: self.data = test_in.data self.time_stamp = test_in.timeStamp - self.result = {'sysTest': {'n_failed': test_in.result.sysTest.nFailed}} - self.result['sysTest']['n_tests'] = test_in.result.sysTest.nTests - - if hasattr(test_in.result, 'pt3'): - data_types = {'corr_table': np.array([]), 'sdc': np.array([]), 'cdc': np.array([]), - 'noise_floor': np.array([])} - test_types = {'high_wide': data_types.copy(), 'high_narrow': data_types.copy(), - 'low_wide': data_types.copy(), - 'low_narrow': data_types.copy()} - pt3 = {'hard_limit': copy.deepcopy(test_types), 'linear': copy.deepcopy(test_types)} - if hasattr(test_in.result.pt3, 'hardLimit'): - if hasattr(test_in.result.pt3.hardLimit, 'hw'): - pt3['hard_limit']['high_wide']['corr_table'] = test_in.result.pt3.hardLimit.hw.corrTable - pt3['hard_limit']['high_wide']['sdc'] = test_in.result.pt3.hardLimit.hw.sdc - pt3['hard_limit']['high_wide']['cdc'] = test_in.result.pt3.hardLimit.hw.cdc - pt3['hard_limit']['high_wide']['noise_floor'] = test_in.result.pt3.hardLimit.hw.noiseFloor - if hasattr(test_in.result.pt3.hardLimit, 'lw'): - pt3['hard_limit']['low_wide']['corr_table'] = test_in.result.pt3.hardLimit.lw.corrTable - pt3['hard_limit']['low_wide']['sdc'] = test_in.result.pt3.hardLimit.lw.sdc - pt3['hard_limit']['low_wide']['cdc'] = test_in.result.pt3.hardLimit.lw.cdc - pt3['hard_limit']['low_wide']['noise_floor'] = test_in.result.pt3.hardLimit.lw.noiseFloor - if hasattr(test_in.result.pt3.hardLimit, 'hn'): - pt3['hard_limit']['high_narrow']['corr_table'] = test_in.result.pt3.hardLimit.hn.corrTable - pt3['hard_limit']['high_narrow']['sdc'] = test_in.result.pt3.hardLimit.hn.sdc - pt3['hard_limit']['high_narrow']['cdc'] = test_in.result.pt3.hardLimit.hn.cdc - pt3['hard_limit']['high_narrow']['noise_floor'] = test_in.result.pt3.hardLimit.hn.noiseFloor - if hasattr(test_in.result.pt3.hardLimit, 'ln'): - pt3['hard_limit']['low_narrow']['corr_table'] = test_in.result.pt3.hardLimit.ln.corrTable - pt3['hard_limit']['low_narrow']['sdc'] = test_in.result.pt3.hardLimit.ln.sdc - pt3['hard_limit']['low_narrow']['cdc'] = test_in.result.pt3.hardLimit.ln.cdc - pt3['hard_limit']['low_narrow']['noise_floor'] = test_in.result.pt3.hardLimit.ln.noiseFloor - if hasattr(test_in.result.pt3, 'linear'): - if hasattr(test_in.result.pt3.linear, 'hw'): - pt3['linear']['high_wide']['corr_table'] = test_in.result.pt3.linear.hw.corrTable - pt3['linear']['high_wide']['noise_floor'] = test_in.result.pt3.linear.hw.noiseFloor - if hasattr(test_in.result.pt3.linear, 'lw'): - pt3['linear']['low_wide']['corr_table'] = test_in.result.pt3.linear.lw.corrTable - pt3['linear']['low_wide']['noise_floor'] = test_in.result.pt3.linear.lw.noiseFloor - if hasattr(test_in.result.pt3.linear, 'hn'): - pt3['linear']['high_narrow']['corr_table'] = test_in.result.pt3.linear.hn.corrTable - pt3['linear']['high_narrow']['noise_floor'] = test_in.result.pt3.linear.hn.noiseFloor - if hasattr(test_in.result.pt3.linear, 'ln'): - pt3['linear']['low_narrow']['corr_table'] = test_in.result.pt3.linear.ln.corrTable - pt3['linear']['low_narrow']['noise_floor'] = test_in.result.pt3.linear.ln.noiseFloor - - self.result['pt3'] = pt3 + self.result = {"sysTest": {"n_failed": test_in.result.sysTest.nFailed}} + self.result["sysTest"]["n_tests"] = test_in.result.sysTest.nTests + + if hasattr(test_in.result, "pt3"): + data_types = { + "corr_table": np.array([]), + "sdc": np.array([]), + "cdc": np.array([]), + "noise_floor": np.array([]), + } + test_types = { + "high_wide": data_types.copy(), + "high_narrow": data_types.copy(), + "low_wide": data_types.copy(), + "low_narrow": data_types.copy(), + } + pt3 = { + "hard_limit": copy.deepcopy(test_types), + "linear": copy.deepcopy(test_types), + } + if hasattr(test_in.result.pt3, "hardLimit"): + if hasattr(test_in.result.pt3.hardLimit, "hw"): + pt3["hard_limit"]["high_wide"][ + "corr_table" + ] = test_in.result.pt3.hardLimit.hw.corrTable + pt3["hard_limit"]["high_wide"][ + "sdc" + ] = test_in.result.pt3.hardLimit.hw.sdc + pt3["hard_limit"]["high_wide"][ + "cdc" + ] = test_in.result.pt3.hardLimit.hw.cdc + pt3["hard_limit"]["high_wide"][ + "noise_floor" + ] = test_in.result.pt3.hardLimit.hw.noiseFloor + if hasattr(test_in.result.pt3.hardLimit, "lw"): + pt3["hard_limit"]["low_wide"][ + "corr_table" + ] = test_in.result.pt3.hardLimit.lw.corrTable + pt3["hard_limit"]["low_wide"][ + "sdc" + ] = test_in.result.pt3.hardLimit.lw.sdc + pt3["hard_limit"]["low_wide"][ + "cdc" + ] = test_in.result.pt3.hardLimit.lw.cdc + pt3["hard_limit"]["low_wide"][ + "noise_floor" + ] = test_in.result.pt3.hardLimit.lw.noiseFloor + if hasattr(test_in.result.pt3.hardLimit, "hn"): + pt3["hard_limit"]["high_narrow"][ + "corr_table" + ] = test_in.result.pt3.hardLimit.hn.corrTable + pt3["hard_limit"]["high_narrow"][ + "sdc" + ] = test_in.result.pt3.hardLimit.hn.sdc + pt3["hard_limit"]["high_narrow"][ + "cdc" + ] = test_in.result.pt3.hardLimit.hn.cdc + pt3["hard_limit"]["high_narrow"][ + "noise_floor" + ] = test_in.result.pt3.hardLimit.hn.noiseFloor + if hasattr(test_in.result.pt3.hardLimit, "ln"): + pt3["hard_limit"]["low_narrow"][ + "corr_table" + ] = test_in.result.pt3.hardLimit.ln.corrTable + pt3["hard_limit"]["low_narrow"][ + "sdc" + ] = test_in.result.pt3.hardLimit.ln.sdc + pt3["hard_limit"]["low_narrow"][ + "cdc" + ] = test_in.result.pt3.hardLimit.ln.cdc + pt3["hard_limit"]["low_narrow"][ + "noise_floor" + ] = test_in.result.pt3.hardLimit.ln.noiseFloor + if hasattr(test_in.result.pt3, "linear"): + if hasattr(test_in.result.pt3.linear, "hw"): + pt3["linear"]["high_wide"][ + "corr_table" + ] = test_in.result.pt3.linear.hw.corrTable + pt3["linear"]["high_wide"][ + "noise_floor" + ] = test_in.result.pt3.linear.hw.noiseFloor + if hasattr(test_in.result.pt3.linear, "lw"): + pt3["linear"]["low_wide"][ + "corr_table" + ] = test_in.result.pt3.linear.lw.corrTable + pt3["linear"]["low_wide"][ + "noise_floor" + ] = test_in.result.pt3.linear.lw.noiseFloor + if hasattr(test_in.result.pt3.linear, "hn"): + pt3["linear"]["high_narrow"][ + "corr_table" + ] = test_in.result.pt3.linear.hn.corrTable + pt3["linear"]["high_narrow"][ + "noise_floor" + ] = test_in.result.pt3.linear.hn.noiseFloor + if hasattr(test_in.result.pt3.linear, "ln"): + pt3["linear"]["low_narrow"][ + "corr_table" + ] = test_in.result.pt3.linear.ln.corrTable + pt3["linear"]["low_narrow"][ + "noise_floor" + ] = test_in.result.pt3.linear.ln.noiseFloor + + self.result["pt3"] = pt3 except AttributeError: # Match regex for number of tests and number of failures - num_tests = re.findall('(Fail|FAIL|F A I L|Pass|PASS|NOT DETECTED|P A S S)', test_in.data) - num_fails = re.findall('(Fail|FAIL|F A I L)', test_in.data) + num_tests = re.findall( + "(Fail|FAIL|F A I L|Pass|PASS|NOT DETECTED|P A S S)", test_in.data + ) + num_fails = re.findall("(Fail|FAIL|F A I L)", test_in.data) # Store results - self.result = {'sysTest': {'n_tests': len(num_tests)}} - self.result['sysTest']['n_failed'] = len(num_fails) + self.result = {"sysTest": {"n_tests": len(num_tests)}} + self.result["sysTest"]["n_failed"] = len(num_fails) def pt3_data(self): """Method for processing the data in the correlation matrices.""" try: - data_types = {'corr_table': np.array([]), 'sdc': np.array([]), 'cdc': np.array([]), - 'noise_floor': np.array([])} - test_types = {'high_wide': data_types.copy(), 'high_narrow': data_types.copy(), - 'low_wide': data_types.copy(), - 'low_narrow': data_types.copy()} - pt3 = {'hard_limit': copy.deepcopy(test_types), 'linear': copy.deepcopy(test_types)} + data_types = { + "corr_table": np.array([]), + "sdc": np.array([]), + "cdc": np.array([]), + "noise_floor": np.array([]), + } + test_types = { + "high_wide": data_types.copy(), + "high_narrow": data_types.copy(), + "low_wide": data_types.copy(), + "low_narrow": data_types.copy(), + } + pt3 = { + "hard_limit": copy.deepcopy(test_types), + "linear": copy.deepcopy(test_types), + } # Match regex for correlation tables - matches = re.findall('Lag.*?0', self.data, re.DOTALL) + matches = re.findall("Lag.*?0", self.data, re.DOTALL) # Count the number or correlation tables to process correl_count = 0 for match in matches: - bm1_matches = re.findall('Bm1', match) + bm1_matches = re.findall("Bm1", match) correl_count += len(bm1_matches) # Correlation table match - lag_matches = re.findall('Lag.*?^\s*$', self.data, re.MULTILINE | re.DOTALL) + lag_matches = re.findall("Lag.*?^\s*$", self.data, re.MULTILINE | re.DOTALL) # Sin match - sin_match = re.findall('((Sin|SIN).*?^\s*$)', self.data, re.MULTILINE | re.DOTALL)[0][0] - sin_array = np.array(re.findall('\d+\.*\d*', sin_match), dtype=int) + sin_match = re.findall( + "((Sin|SIN).*?^\s*$)", self.data, re.MULTILINE | re.DOTALL + )[0][0] + sin_array = np.array(re.findall("\d+\.*\d*", sin_match), dtype=int) # Cos match - cos_match = re.findall('((Cos|COS).*?^\s*$)', self.data, re.MULTILINE | re.DOTALL)[0][0] - cos_array = np.array(re.findall('\d+\.*\d*', cos_match), dtype=int) + cos_match = re.findall( + "((Cos|COS).*?^\s*$)", self.data, re.MULTILINE | re.DOTALL + )[0][0] + cos_array = np.array(re.findall("\d+\.*\d*", cos_match), dtype=int) # RSSI match rssi_array = np.array([]) - rssi_matches = re.findall('RSSI.*?^\s*$', self.data, re.MULTILINE | re.DOTALL) + rssi_matches = re.findall( + "RSSI.*?^\s*$", self.data, re.MULTILINE | re.DOTALL + ) for rssi_match in rssi_matches: - rssi_array = np.hstack((rssi_array, np.array(re.findall('\d+\.*\d*', rssi_match), dtype=int))) + rssi_array = np.hstack( + ( + rssi_array, + np.array(re.findall("\d+\.*\d*", rssi_match), dtype=int), + ) + ) # Process each set of correlation tables for n, lag_match in enumerate(lag_matches): # Count the Bm1 string to know how many tables to read - bm_count = len(re.findall('Bm1', lag_match)) + bm_count = len(re.findall("Bm1", lag_match)) # Extract the table into list - numbers = re.findall('\d+\.*\d*', lag_match) + numbers = re.findall("\d+\.*\d*", lag_match) # Create array from data in table - corr_data = np.array(numbers[(bm_count * 4):(bm_count * 44)], - dtype=int).reshape([8, (bm_count * 4) + 1])[:, 1::] + corr_data = np.array( + numbers[(bm_count * 4) : (bm_count * 44)], dtype=int + ).reshape([8, (bm_count * 4) + 1])[:, 1::] # Only one pt3 test. Typical of Rio Grande and Streampro if bm_count == 1: # Assign matrix slices to corresponding variables # corr_hlimit_hgain_wband = corr_data - pt3['hard_limit']['high_wide']['corr_table'] = corr_data - pt3['hard_limit']['high_wide']['sdc'] = sin_array[0:4] - pt3['hard_limit']['high_wide']['cdc'] = cos_array[0:4] - pt3['hard_limit']['high_wide']['noise_floor'] = rssi_array[0:4] + pt3["hard_limit"]["high_wide"]["corr_table"] = corr_data + pt3["hard_limit"]["high_wide"]["sdc"] = sin_array[0:4] + pt3["hard_limit"]["high_wide"]["cdc"] = cos_array[0:4] + pt3["hard_limit"]["high_wide"]["noise_floor"] = rssi_array[0:4] # 4 tests arranged in groups of 2. All data are hard limited. elif bm_count == 2 and correl_count == 4: @@ -314,117 +408,217 @@ class PreMeasurement(object): # Hard limited wide bandwidth (n=0) if n == 0: - pt3['hard_limit']['high_wide']['corr_table'] = corr_data[:, 0:4] - pt3['hard_limit']['high_wide']['sdc'] = sin_array[n * 4: (n + 1) * 4] - pt3['hard_limit']['high_wide']['cdc'] = cos_array[n * 4: (n + 1) * 4] - pt3['hard_limit']['high_wide']['noise_floor'] = rssi_array[n * 4: (n + 1) * 4] - - pt3['hard_limit']['low_wide']['corr_table'] = corr_data[:, 4::] - pt3['hard_limit']['low_wide']['sdc'] = sin_array[(n + 1) * 4: (n + 2) * 4] - pt3['hard_limit']['low_wide']['cdc'] = cos_array[(n + 1) * 4: (n + 2) * 4] - pt3['hard_limit']['low_wide']['noise_floor'] = rssi_array[(n + 1) * 4: (n + 2) * 4] + pt3["hard_limit"]["high_wide"]["corr_table"] = corr_data[:, 0:4] + pt3["hard_limit"]["high_wide"]["sdc"] = sin_array[ + n * 4 : (n + 1) * 4 + ] + pt3["hard_limit"]["high_wide"]["cdc"] = cos_array[ + n * 4 : (n + 1) * 4 + ] + pt3["hard_limit"]["high_wide"]["noise_floor"] = rssi_array[ + n * 4 : (n + 1) * 4 + ] + + pt3["hard_limit"]["low_wide"]["corr_table"] = corr_data[:, 4::] + pt3["hard_limit"]["low_wide"]["sdc"] = sin_array[ + (n + 1) * 4 : (n + 2) * 4 + ] + pt3["hard_limit"]["low_wide"]["cdc"] = cos_array[ + (n + 1) * 4 : (n + 2) * 4 + ] + pt3["hard_limit"]["low_wide"]["noise_floor"] = rssi_array[ + (n + 1) * 4 : (n + 2) * 4 + ] # Hard limited narrow bandwidth (n=1) elif n == 1: - pt3['hard_limit']['high_narrow']['corr_table'] = corr_data[:, 0:4] - pt3['hard_limit']['high_narrow']['sdc'] = sin_array[(n + 1) * 4: (n + 2) * 4] - pt3['hard_limit']['high_narrow']['cdc'] = cos_array[(n + 1) * 4: (n + 2) * 4] - pt3['hard_limit']['high_narrow']['noise_floor'] = rssi_array[(n + 1) * 4: (n + 2) * 4] - - pt3['hard_limit']['low_narrow']['corr_table'] = corr_data[:, 4::] - pt3['hard_limit']['low_narrow']['sdc'] = sin_array[(n + 2) * 4: (n + 3) * 4] - pt3['hard_limit']['low_narrow']['cdc'] = cos_array[(n + 2) * 4: (n + 3) * 4] - pt3['hard_limit']['low_narrow']['noise_floor'] = rssi_array[(n + 2) * 4: (n + 3) * 4] - - # 8 tests arranged in sets of 2. The linear is 1st followed by the hard limit. + pt3["hard_limit"]["high_narrow"]["corr_table"] = corr_data[ + :, 0:4 + ] + pt3["hard_limit"]["high_narrow"]["sdc"] = sin_array[ + (n + 1) * 4 : (n + 2) * 4 + ] + pt3["hard_limit"]["high_narrow"]["cdc"] = cos_array[ + (n + 1) * 4 : (n + 2) * 4 + ] + pt3["hard_limit"]["high_narrow"]["noise_floor"] = rssi_array[ + (n + 1) * 4 : (n + 2) * 4 + ] + + pt3["hard_limit"]["low_narrow"]["corr_table"] = corr_data[ + :, 4:: + ] + pt3["hard_limit"]["low_narrow"]["sdc"] = sin_array[ + (n + 2) * 4 : (n + 3) * 4 + ] + pt3["hard_limit"]["low_narrow"]["cdc"] = cos_array[ + (n + 2) * 4 : (n + 3) * 4 + ] + pt3["hard_limit"]["low_narrow"]["noise_floor"] = rssi_array[ + (n + 2) * 4 : (n + 3) * 4 + ] + + # 8 tests arranged in sets of 2. The linear is 1st followed + # by the hard limit. elif bm_count == 2 and correl_count == 8: # Hard limit bandwidth (n=0) if n == 0: - pt3['hard_limit']['high_wide']['corr_table'] = corr_data[:, 0:4] - pt3['hard_limit']['high_wide']['sdc'] = sin_array[n * 4: (n + 1) * 4] - pt3['hard_limit']['high_wide']['cdc'] = cos_array[n * 4: (n + 1) * 4] - pt3['hard_limit']['high_wide']['noise_floor'] = rssi_array[n * 4: (n + 1) * 4] - - pt3['hard_limit']['low_wide']['corr_table'] = corr_data[:, 4::] - pt3['hard_limit']['low_wide']['sdc'] = sin_array[(n + 1) * 4: (n + 2) * 4] - pt3['hard_limit']['low_wide']['cdc'] = cos_array[(n + 1) * 4: (n + 2) * 4] - pt3['hard_limit']['low_wide']['noise_floor'] = rssi_array[(n + 1) * 4: (n + 2) * 4] + pt3["hard_limit"]["high_wide"]["corr_table"] = corr_data[:, 0:4] + pt3["hard_limit"]["high_wide"]["sdc"] = sin_array[ + n * 4 : (n + 1) * 4 + ] + pt3["hard_limit"]["high_wide"]["cdc"] = cos_array[ + n * 4 : (n + 1) * 4 + ] + pt3["hard_limit"]["high_wide"]["noise_floor"] = rssi_array[ + n * 4 : (n + 1) * 4 + ] + + pt3["hard_limit"]["low_wide"]["corr_table"] = corr_data[:, 4::] + pt3["hard_limit"]["low_wide"]["sdc"] = sin_array[ + (n + 1) * 4 : (n + 2) * 4 + ] + pt3["hard_limit"]["low_wide"]["cdc"] = cos_array[ + (n + 1) * 4 : (n + 2) * 4 + ] + pt3["hard_limit"]["low_wide"]["noise_floor"] = rssi_array[ + (n + 1) * 4 : (n + 2) * 4 + ] # Hard limit narrow bandwidth (n=1) elif n == 1: - pt3['hard_limit']['high_narrow']['corr_table'] = corr_data[:, 0:4] - pt3['hard_limit']['high_narrow']['sdc'] = sin_array[(n + 1) * 4: (n + 2) * 4] - pt3['hard_limit']['high_narrow']['cdc'] = cos_array[(n + 1) * 4: (n + 2) * 4] - pt3['hard_limit']['high_narrow']['noise_floor'] = rssi_array[(n + 1) * 4: (n + 2) * 4] - - pt3['hard_limit']['low_narrow']['corr_table'] = corr_data[:, 4::] - pt3['hard_limit']['low_narrow']['sdc'] = sin_array[(n + 2) * 4: (n + 3) * 4] - pt3['hard_limit']['low_narrow']['cdc'] = cos_array[(n + 2) * 4: (n + 3) * 4] - pt3['hard_limit']['low_narrow']['noise_floor'] = rssi_array[(n + 2) * 4: (n + 3) * 4] + pt3["hard_limit"]["high_narrow"]["corr_table"] = corr_data[ + :, 0:4 + ] + pt3["hard_limit"]["high_narrow"]["sdc"] = sin_array[ + (n + 1) * 4 : (n + 2) * 4 + ] + pt3["hard_limit"]["high_narrow"]["cdc"] = cos_array[ + (n + 1) * 4 : (n + 2) * 4 + ] + pt3["hard_limit"]["high_narrow"]["noise_floor"] = rssi_array[ + (n + 1) * 4 : (n + 2) * 4 + ] + + pt3["hard_limit"]["low_narrow"]["corr_table"] = corr_data[ + :, 4:: + ] + pt3["hard_limit"]["low_narrow"]["sdc"] = sin_array[ + (n + 2) * 4 : (n + 3) * 4 + ] + pt3["hard_limit"]["low_narrow"]["cdc"] = cos_array[ + (n + 2) * 4 : (n + 3) * 4 + ] + pt3["hard_limit"]["low_narrow"]["noise_floor"] = rssi_array[ + (n + 2) * 4 : (n + 3) * 4 + ] # Linear wide bandwidth (n=2) elif n == 2: - pt3['linear']['high_wide']['corr_table'] = corr_data[:, 0:4] - pt3['linear']['high_wide']['noise_floor'] = rssi_array[(n + 2) * 4: (n + 3) * 4] + pt3["linear"]["high_wide"]["corr_table"] = corr_data[:, 0:4] + pt3["linear"]["high_wide"]["noise_floor"] = rssi_array[ + (n + 2) * 4 : (n + 3) * 4 + ] - pt3['linear']['low_wide']['corr_table'] = corr_data[:, 4::] - pt3['linear']['low_wide']['noise_floor'] = rssi_array[(n + 3) * 4: (n + 4) * 4] + pt3["linear"]["low_wide"]["corr_table"] = corr_data[:, 4::] + pt3["linear"]["low_wide"]["noise_floor"] = rssi_array[ + (n + 3) * 4 : (n + 4) * 4 + ] # Linear narrow bandwidth (n=3) elif n == 3: - pt3['linear']['high_narrow']['corr_table'] = corr_data[:, 0:4] - pt3['linear']['high_narrow']['noise_floor'] = rssi_array[(n + 3) * 4: (n + 4) * 4] + pt3["linear"]["high_narrow"]["corr_table"] = corr_data[:, 0:4] + pt3["linear"]["high_narrow"]["noise_floor"] = rssi_array[ + (n + 3) * 4 : (n + 4) * 4 + ] - pt3['linear']['low_narrow']['corr_table'] = corr_data[:, 4::] - pt3['linear']['low_narrow']['noise_floor'] = rssi_array[(n + 4) * 4: (n + 5) * 4] + pt3["linear"]["low_narrow"]["corr_table"] = corr_data[:, 4::] + pt3["linear"]["low_narrow"]["noise_floor"] = rssi_array[ + (n + 4) * 4 : (n + 5) * 4 + ] - # 8 tests in groups of 4. Hard limit is the first group then the linear. + # 8 tests in groups of 4. Hard limit is the first group then + # the linear. elif bm_count == 4: # Hard limit data (n=0) if n == 0: - pt3['hard_limit']['high_wide']['corr_table'] = corr_data[:, 0:4] - pt3['hard_limit']['high_wide']['sdc'] = sin_array[n * 4: (n + 1) * 4] - pt3['hard_limit']['high_wide']['cdc'] = cos_array[n * 4: (n + 1) * 4] - pt3['hard_limit']['high_wide']['noise_floor'] = rssi_array[n * 4: (n + 1) * 4] - - pt3['hard_limit']['low_wide']['corr_table'] = corr_data[:, 4:8] - pt3['hard_limit']['low_wide']['sdc'] = sin_array[(n + 1) * 4: (n + 2) * 4] - pt3['hard_limit']['low_wide']['cdc'] = cos_array[(n + 1) * 4: (n + 2) * 4] - pt3['hard_limit']['low_wide']['noise_floor'] = rssi_array[(n + 1) * 4: (n + 2) * 4] - - pt3['hard_limit']['high_narrow']['corr_table'] = corr_data[:, 8:12] - pt3['hard_limit']['high_narrow']['sdc'] = sin_array[(n + 2) * 4: (n + 3) * 4] - pt3['hard_limit']['high_narrow']['cdc'] = cos_array[(n + 2) * 4: (n + 3) * 4] - pt3['hard_limit']['high_narrow']['noise_floor'] = rssi_array[(n + 2) * 4: (n + 3) * 4] - - pt3['hard_limit']['low_narrow']['corr_table'] = corr_data[:, 12::] - pt3['hard_limit']['low_narrow']['sdc'] = sin_array[(n + 3) * 4: (n + 4) * 4] - pt3['hard_limit']['low_narrow']['cdc'] = cos_array[(n + 3) * 4: (n + 4) * 4] - pt3['hard_limit']['low_narrow']['noise_floor'] = rssi_array[(n + 3) * 4: (n + 4) * 4] + pt3["hard_limit"]["high_wide"]["corr_table"] = corr_data[:, 0:4] + pt3["hard_limit"]["high_wide"]["sdc"] = sin_array[ + n * 4 : (n + 1) * 4 + ] + pt3["hard_limit"]["high_wide"]["cdc"] = cos_array[ + n * 4 : (n + 1) * 4 + ] + pt3["hard_limit"]["high_wide"]["noise_floor"] = rssi_array[ + n * 4 : (n + 1) * 4 + ] + + pt3["hard_limit"]["low_wide"]["corr_table"] = corr_data[:, 4:8] + pt3["hard_limit"]["low_wide"]["sdc"] = sin_array[ + (n + 1) * 4 : (n + 2) * 4 + ] + pt3["hard_limit"]["low_wide"]["cdc"] = cos_array[ + (n + 1) * 4 : (n + 2) * 4 + ] + pt3["hard_limit"]["low_wide"]["noise_floor"] = rssi_array[ + (n + 1) * 4 : (n + 2) * 4 + ] + + pt3["hard_limit"]["high_narrow"]["corr_table"] = corr_data[ + :, 8:12 + ] + pt3["hard_limit"]["high_narrow"]["sdc"] = sin_array[ + (n + 2) * 4 : (n + 3) * 4 + ] + pt3["hard_limit"]["high_narrow"]["cdc"] = cos_array[ + (n + 2) * 4 : (n + 3) * 4 + ] + pt3["hard_limit"]["high_narrow"]["noise_floor"] = rssi_array[ + (n + 2) * 4 : (n + 3) * 4 + ] + + pt3["hard_limit"]["low_narrow"]["corr_table"] = corr_data[ + :, 12:: + ] + pt3["hard_limit"]["low_narrow"]["sdc"] = sin_array[ + (n + 3) * 4 : (n + 4) * 4 + ] + pt3["hard_limit"]["low_narrow"]["cdc"] = cos_array[ + (n + 3) * 4 : (n + 4) * 4 + ] + pt3["hard_limit"]["low_narrow"]["noise_floor"] = rssi_array[ + (n + 3) * 4 : (n + 4) * 4 + ] # Linear data (n=1) else: - pt3['linear']['high_wide']['corr_table'] = corr_data[:, 0:4] - pt3['linear']['high_wide']['noise_floor'] = rssi_array[(n + 3) * 4: (n + 4) * 4] - - pt3['linear']['low_wide']['corr_table'] = corr_data[:, 4:8] - pt3['linear']['low_wide']['noise_floor'] = rssi_array[(n + 4) * 4: (n + 5) * 4] - - pt3['linear']['high_narrow']['corr_table'] = corr_data[:, 8:12] - pt3['linear']['high_narrow']['noise_floor'] = rssi_array[(n + 5) * 4: (n + 6) * 4] - - pt3['linear']['low_narrow']['corr_table'] = corr_data[:, 12::] - pt3['linear']['low_narrow']['noise_floor'] = rssi_array[(n + 6) * 4: (n + 7) * 4] - self.result['pt3'] = pt3 + pt3["linear"]["high_wide"]["corr_table"] = corr_data[:, 0:4] + pt3["linear"]["high_wide"]["noise_floor"] = rssi_array[ + (n + 3) * 4 : (n + 4) * 4 + ] + + pt3["linear"]["low_wide"]["corr_table"] = corr_data[:, 4:8] + pt3["linear"]["low_wide"]["noise_floor"] = rssi_array[ + (n + 4) * 4 : (n + 5) * 4 + ] + + pt3["linear"]["high_narrow"]["corr_table"] = corr_data[:, 8:12] + pt3["linear"]["high_narrow"]["noise_floor"] = rssi_array[ + (n + 5) * 4 : (n + 6) * 4 + ] + + pt3["linear"]["low_narrow"]["corr_table"] = corr_data[:, 12::] + pt3["linear"]["low_narrow"]["noise_floor"] = rssi_array[ + (n + 6) * 4 : (n + 7) * 4 + ] + self.result["pt3"] = pt3 except Exception: pass - # pt3 = None - # self.result['pt3'] = pt3 diff --git a/Classes/Python2Matlab.py b/Classes/Python2Matlab.py index 13d9570..e36cbd6 100644 --- a/Classes/Python2Matlab.py +++ b/Classes/Python2Matlab.py @@ -15,7 +15,8 @@ class Python2Matlab(object): """ def __init__(self, meas, checked): - """Initialize dictionaries and convert Python data to Matlab structures. + """Initialize dictionaries and convert Python data to Matlab + structures. Parameters ---------- @@ -29,41 +30,52 @@ class Python2Matlab(object): # Initialize Matlab dictionary self.matlab_dict = dict() - # Apply conversion of Python data to be compatible with Matlab conventions + # Apply conversion of Python data to be compatible with Matlab + # conventions meas_mat = self.data2matlab(meas) checked_idx = np.array(checked) checked_idx_meas = np.copy(checked_idx) - np.append(checked_idx_meas, len(meas_mat.extrap_fit.sel_fit)-1) + np.append(checked_idx_meas, len(meas_mat.extrap_fit.sel_fit) - 1) # Convert Python data structure to Matlab - self.matlab_dict['stationName'] = meas_mat.station_name - if self.matlab_dict['stationName'] is None: - self.matlab_dict['stationName'] = '' - self.matlab_dict['stationNumber'] = meas_mat.station_number - if self.matlab_dict['stationNumber'] is None: - self.matlab_dict['stationNumber'] = '' - self.matlab_dict['persons'] = meas_mat.persons - self.matlab_dict['meas_number'] = meas_mat.meas_number - self.matlab_dict['stage_start_m'] = meas_mat.stage_start_m - self.matlab_dict['stage_end_m'] = meas_mat.stage_end_m - self.matlab_dict['stage_meas_m'] = meas_mat.stage_meas_m - self.matlab_dict['processing'] = meas_mat.processing - self.matlab_dict['extTempChk'] = meas_mat.ext_temp_chk - self.matlab_dict['userRating'] = meas_mat.user_rating - self.matlab_dict['initialSettings'] = meas_mat.initial_settings - self.matlab_dict['comments'] = self.comment2struct(meas_mat.comments) - self.matlab_dict['compassCal'] = self.listobj2struct(meas_mat.compass_cal, py_2_mat_dict) - self.matlab_dict['compassEval'] = self.listobj2struct(meas_mat.compass_eval, py_2_mat_dict) - self.matlab_dict['sysTest'] = self.listobj2struct(meas_mat.system_tst, py_2_mat_dict) + self.matlab_dict["stationName"] = meas_mat.station_name + if self.matlab_dict["stationName"] is None: + self.matlab_dict["stationName"] = "" + self.matlab_dict["stationNumber"] = meas_mat.station_number + if self.matlab_dict["stationNumber"] is None: + self.matlab_dict["stationNumber"] = "" + self.matlab_dict["persons"] = meas_mat.persons + self.matlab_dict["meas_number"] = meas_mat.meas_number + self.matlab_dict["stage_start_m"] = meas_mat.stage_start_m + self.matlab_dict["stage_end_m"] = meas_mat.stage_end_m + self.matlab_dict["stage_meas_m"] = meas_mat.stage_meas_m + self.matlab_dict["processing"] = meas_mat.processing + self.matlab_dict["extTempChk"] = meas_mat.ext_temp_chk + self.matlab_dict["userRating"] = meas_mat.user_rating + self.matlab_dict["initialSettings"] = meas_mat.initial_settings + self.matlab_dict["comments"] = self.comment2struct(meas_mat.comments) + self.matlab_dict["compassCal"] = self.listobj2struct( + meas_mat.compass_cal, py_2_mat_dict + ) + self.matlab_dict["compassEval"] = self.listobj2struct( + meas_mat.compass_eval, py_2_mat_dict + ) + self.matlab_dict["sysTest"] = self.listobj2struct( + meas_mat.system_tst, py_2_mat_dict + ) discharge = np.copy(meas_mat.discharge) discharge_sel = [discharge[i] for i in checked_idx] - self.matlab_dict['discharge'] = self.listobj2struct(discharge_sel, py_2_mat_dict) + self.matlab_dict["discharge"] = self.listobj2struct( + discharge_sel, py_2_mat_dict + ) transects = np.copy(meas_mat.transects) transects_sel = [transects[i] for i in checked_idx] - self.matlab_dict['transects'] = self.listobj2struct(transects_sel, py_2_mat_dict) + self.matlab_dict["transects"] = self.listobj2struct( + transects_sel, py_2_mat_dict + ) extrap = copy.deepcopy(meas_mat.extrap_fit) - self.matlab_dict['extrapFit'] = self.listobj2struct([extrap], py_2_mat_dict) + self.matlab_dict["extrapFit"] = self.listobj2struct([extrap], py_2_mat_dict) # Check for multiple moving-bed tests if type(meas_mat.mb_tests) == list: mb_tests = self.listobj2struct(meas_mat.mb_tests, py_2_mat_dict) @@ -72,16 +84,19 @@ class Python2Matlab(object): if len(mb_tests) == 0: mb_tests = np.array([]) - self.matlab_dict['mbTests'] = mb_tests + self.matlab_dict["mbTests"] = mb_tests - self.matlab_dict['observed_no_moving_bed'] = meas_mat.observed_no_moving_bed + self.matlab_dict["observed_no_moving_bed"] = meas_mat.observed_no_moving_bed - self.matlab_dict['uncertainty'] = self.listobj2struct([meas_mat.uncertainty], py_2_mat_dict) - self.matlab_dict['qa'] = self.listobj2struct([meas_mat.qa], py_2_mat_dict) - self.matlab_dict['run_oursin'] = meas_mat.run_oursin + self.matlab_dict["uncertainty"] = self.listobj2struct( + [meas_mat.uncertainty], py_2_mat_dict + ) + self.matlab_dict["qa"] = self.listobj2struct([meas_mat.qa], py_2_mat_dict) + self.matlab_dict["run_oursin"] = meas_mat.run_oursin if meas_mat.oursin is not None: - self.matlab_dict['oursin'] = self.listobj2struct([meas_mat.oursin], py_2_mat_dict) - + self.matlab_dict["oursin"] = self.listobj2struct( + [meas_mat.oursin], py_2_mat_dict + ) @staticmethod def listobj2struct(list_in, new_key_dict=None): @@ -92,7 +107,8 @@ class Python2Matlab(object): list_in: list List of objects new_key_dict: dict - Dictionary to translate python variable names to Matlab variable names + Dictionary to translate python variable names to Matlab variable + names Returns ------- @@ -141,7 +157,8 @@ class Python2Matlab(object): @staticmethod def change_dict_keys(dict_in, new_key_dict): - """Recursively changes the name of dictionary keys and checks for str data types and converts them to arrays. + """Recursively changes the name of dictionary keys and checks for str + data types and converts them to arrays. Parameters ---------- @@ -156,15 +173,20 @@ class Python2Matlab(object): for key in dict_in: # Iterate on nested dictionaries if type(dict_in[key]) is dict: - dict_in[key] = Python2Matlab.change_dict_keys(dict_in[key], new_key_dict) + dict_in[key] = Python2Matlab.change_dict_keys( + dict_in[key], new_key_dict + ) - # If a list contains a str variable, such as messages, convert the string to an array + # If a list contains a str variable, such as messages, convert + # the string to an array if type(dict_in[key]) is list: for line in range(len(dict_in[key])): if type(line) == str: for col in range(len(dict_in[key][line])): if type(dict_in[key][line][col]) is str: - dict_in[key][line][col] = np.array([list(dict_in[key][line][col])]) + dict_in[key][line][col] = np.array( + [list(dict_in[key][line][col])] + ) # Change key if needed if new_key_dict is not None and key in new_key_dict: @@ -176,14 +198,16 @@ class Python2Matlab(object): @staticmethod def obj2dict(obj, new_key_dict=None): - """Converts object variables to dictionaries. Works recursively to all levels of objects. + """Converts object variables to dictionaries. Works recursively to all + levels of objects. Parameters ---------- obj: object Object of some class new_key_dict: dict - Dictionary to translate python variable names to Matlab variable names + Dictionary to translate python variable names to Matlab variable + names Returns ------- @@ -195,21 +219,29 @@ class Python2Matlab(object): for key in obj_dict: # If variable is another object convert to dictionary recursively - if str(type(obj_dict[key]))[8:13] == 'Class': + if str(type(obj_dict[key]))[8:13] == "Class": obj_dict[key] = Python2Matlab.obj2dict(obj_dict[key], new_key_dict) # If variable is a list of objects convert to dictionary - elif type(obj_dict[key]) is list and len(obj_dict[key]) > 0 \ - and str(type(obj_dict[key][0]))[8:13] == 'Class': - obj_dict[key] = Python2Matlab.listobj2struct(obj_dict[key], new_key_dict) + elif ( + type(obj_dict[key]) is list + and len(obj_dict[key]) > 0 + and str(type(obj_dict[key][0]))[8:13] == "Class" + ): + obj_dict[key] = Python2Matlab.listobj2struct( + obj_dict[key], new_key_dict + ) elif type(obj_dict[key]) is dict: - obj_dict[key] = Python2Matlab.change_dict_keys(obj_dict[key], new_key_dict) + obj_dict[key] = Python2Matlab.change_dict_keys( + obj_dict[key], new_key_dict + ) elif type(obj_dict[key]) is pd.DataFrame: obj_dict[key] = obj_dict[key].to_numpy() - # If variable is None rename as necessary and convert None to empty list + # If variable is None rename as necessary and convert None to + # empty list if obj_dict[key] is None: if new_key_dict is not None and key in new_key_dict: new_dict[new_key_dict[key]] = [] @@ -247,14 +279,16 @@ class Python2Matlab(object): @staticmethod def listobj2dict(list_in, new_key_dict=None): - """Converts list of objects to list of dictionaries. Works recursively to all levels of objects. + """Converts list of objects to list of dictionaries. Works recursively + to all levels of objects. Parameters ---------- list_in: list List of objects of some class new_key_dict: dict - Dictionary to translate python variable names to Matlab variable names + Dictionary to translate python variable names to Matlab variable + names Returns ------- @@ -268,7 +302,8 @@ class Python2Matlab(object): @staticmethod def create_py_2_mat_dict(): - """Creates a dictionary to cross reference Python names with Matlab names + """Creates a dictionary to cross reference Python names with Matlab + names Returns ------- @@ -276,308 +311,310 @@ class Python2Matlab(object): Dictionary of python key to Matlab variable """ - py_2_mat_dict = {'Python': 'Matlab', - 'align_correction_deg': 'alignCorrection_deg', - 'altitude_ens_m': 'altitudeEns_m', - 'avg_method': 'avgMethod', - 'beam_angle_deg': 'beamAngle_deg', - 'beam_filter': 'beamFilter', - 'beam_pattern': 'beamPattern', - 'blanking_distance_m': 'blankingDistance_m', - 'boat_vel': 'boatVel', - 'bot_diff': 'botdiff', - 'bot_method': 'botMethod', - 'bot_method_auto': 'botMethodAuto', - 'bot_method_orig': 'botMethodOrig', - 'bot_r2': 'botrsqr', - 'bottom_ens': 'bottomEns', - 'bottom_mode': 'bottomMode', - 'bt_depths': 'btDepths', - 'bt_vel': 'btVel', - 'cell_depth_normalized': 'cellDepthNormalized', - 'cells_above_sl': 'cellsAboveSL', - 'cells_above_sl_bt': 'cellsAboveSLbt', - 'compass_cal': 'compassCal', - 'compass_diff_deg': 'compassDiff_deg', - 'compass_eval': 'compassEval', - 'configuration_commands': 'configurationCommands', - 'coord_sys': 'coordSys', - 'corr_table': 'corrTable', - 'correction_factor': 'correctionFactor', - 'cov_95': 'cov95', - 'cov_95_user': 'cov95User', - 'cust_coef': 'custCoef', - 'd_filter': 'dFilter', - 'd_filter_thresholds': 'dFilterThreshold', - 'data_extent': 'dataExtent', - 'data_orig': 'dataOrig', - 'data_type': 'dataType', - 'date_time': 'dateTime', - 'depth_beams_m': 'depthBeams_m', - 'depth_cell_depth_m': 'depthCellDepth_m', - 'depth_cell_depth_orig_m': 'depthCellDepthOrig_m', - 'depth_cell_size_m': 'depthCellSize_m', - 'depth_cell_size_orig_m': 'depthCellSizeOrig_m', - 'depth_depth_m': 'depthCellDepth_m', - 'depth_source_ens': 'depthSourceEns', - 'depth_freq_kHz': 'depthFreq_Hz', - 'depth_invalid_index': 'depthInvalidIndex', - 'depth_orig_m': 'depthOrig_m', - 'depth_processed_m': 'depthProcessed_m', - 'depth_source': 'depthSource', - 'depths': 'depths', - 'diff_qual_ens': 'diffQualEns', - 'dist_us_m': 'distUS_m', - 'distance_m': 'dist_m', - 'draft_orig_m': 'draftOrig_m', - 'draft_use_m': 'draftUse_m', - 'ds_depths': 'dsDepths', - 'edges_95': 'edges95', - 'edges_95_user': 'edges95User', - 'end_serial_time': 'endSerialTime', - 'ens_duration_sec': 'ensDuration_sec', - 'excluded_dist_m': 'excludedDist', - 'exp_method': 'expMethod', - 'exponent_95_ci': 'exponent95confint', - 'exponent_auto': 'exponentAuto', - 'exponent_orig': 'exponentOrig', - 'ext_gga_altitude_m': 'extGGAAltitude_m', - 'ext_gga_differential': 'extGGADifferential', - 'ext_gga_hdop': 'extGGAHDOP', - 'ext_gga_lat_deg': 'extGGALat_deg', - 'ext_gga_lon_deg': 'extGGALon_deg', - 'ext_gga_num_sats': 'extGGANumSats', - 'ext_gga_serial_time': 'extGGASerialTime', - 'ext_gga_utc': 'extGGAUTC', - 'ext_temp_chk': 'extTempChk', - 'ext_vtg_course_deg': 'extVTGCourse_deg', - 'ext_vtg_speed_mps': 'extVTGSpeed_mps', - 'extrap_fit': 'extrapFit', - 'extrapolation_95': 'extrapolation95', - 'extrapolation_95_user': 'extrapolation95User', - 'file_name': 'fileName', - 'filter_type': 'filterType', - 'fit_method': 'fitMethod', - 'fit_r2': 'fitrsqr', - 'flow_dir_deg': 'flowDir_deg', - 'flow_dir': 'flowDir_deg', - 'flow_spd_mps': 'flowSpd_mps', - 'frequency_khz': 'frequency_hz', - 'gga_lat_ens_deg': 'ggaLatEns_deg', - 'gga_lon_ens_deg': 'ggaLonEns_deg', - 'gga_position_method': 'ggaPositionMethod', - 'gga_serial_time_ens': 'ggaSerialTimeEns', - 'gga_vel': 'ggaVel', - 'gga_velocity_ens_mps': 'ggaVelocityEns_mps', - 'gga_velocity_method': 'ggaVelocityMethod', - 'gps_HDOP_filter': 'gpsHDOPFilter', - 'gps_HDOP_filter_change': 'gpsHDOPFilterChange', - 'gps_HDOP_filter_max': 'gpsHDOPFilterMax', - 'gps_altitude_filter': 'gpsAltitudeFilter', - 'gps_altitude_filter_change': 'gpsAltitudeFilterChange', - 'gps_diff_qual_filter': 'gpsDiffQualFilter', - 'hard_limit': 'hardLimit', - 'hdop_ens': 'hdopEns', - 'high_narrow': 'hn', - 'high_wide': 'hw', - 'in_transect_idx': 'inTransectIdx', - 'initial_settings': 'initialSettings', - 'int_cells': 'intCells', - 'int_ens': 'intEns', - 'interp_type': 'interpType', - 'interpolate_cells': 'interpolateCells', - 'interpolate_ens': 'interpolateEns', - 'invalid_95': 'invalid95', - 'invalid_index': 'invalidIndex', - 'invalid_95_user': 'invalid95User', - 'left_idx': 'leftidx', - 'low_narrow': 'ln', - 'low_wide': 'lw', - 'mag_error': 'magError', - 'mag_var_orig_deg': 'magVarOrig_deg', - 'mag_var_deg': 'magVar_deg', - 'man_bot': 'manBot', - 'man_exp': 'manExp', - 'man_top': 'manTop', - 'mb_dir': 'mbDir_deg', - 'mb_spd_mps': 'mbSpd_mps', - 'mb_tests': 'mbTests', - 'meas': 'meas_struct', - 'middle_cells': 'middleCells', - 'middle_ens': 'middleEns', - 'moving_bed': 'movingBed', - 'moving_bed_95': 'movingBed95', - 'moving_bed_95_user': 'movingBed95User', - 'n_failed': 'nFailed', - 'n_tests': 'nTests', - 'nav_ref': 'navRef', - 'near_bed_speed_mps': 'nearBedSpeed_mps', - 'noise_floor': 'noiseFloor', - 'norm_data': 'normData', - 'ns_exp': 'nsExponent', - 'ns_exponent': 'nsexponent', - 'num_invalid': 'numInvalid', - 'num_sats_ens': 'numSatsEns', - 'number_ensembles': 'numEns2Avg', - 'orig_coord_sys': 'origCoordSys', - 'orig_ref': 'origNavRef', - 'orig_nav_ref': 'origNavRef', - 'orig_sys': 'origCoordSys', - 'original_data': 'originalData', - 'per_good_ens': 'perGoodEns', - 'percent_invalid_bt': 'percentInvalidBT', - 'percent_mb': 'percentMB', - 'pitch_limit': 'pitchLimit', - 'pp_exp': 'ppExponent', - 'pp_exponent': 'ppexponent', - 'processed_source': 'processedSource', - 'q_cns_mean': 'qCNSmean', - 'q_cns_opt_mean': 'qCNSoptmean', - 'q_cns_opt_per_diff': 'qCNSoptperdiff', - 'q_cns_per_diff': 'qCNSperdiff', - 'q_man_mean': 'qManmean', - 'q_man_per_diff': 'qManperdiff', - 'q_3p_ns_mean': 'q3pNSmean', - 'q_3p_ns_opt_mean': 'q3pNSoptmean', - 'q_3p_ns_opt_per_diff': 'q3pNSoptperdiff', - 'q_3p_ns_per_diff': 'q3pNSperdiff', - 'q_pp_mean': 'qPPmean', - 'q_pp_opt_mean': 'qPPoptmean', - 'q_pp_opt_per_diff': 'qPPoptperdiff', - 'q_pp_per_diff': 'qPPperdiff', - 'q_run_threshold_caution': 'qRunThresholdCaution', - 'q_run_threshold_warning': 'qRunThresholdWarning', - 'q_sensitivity': 'qSensitivity', - 'q_total_threshold_caution': 'qTotalThresholdWarning', - 'q_total_threshold_warning': 'qTotalThresholdCaution', - 'raw_gga_altitude_m': 'rawGGAAltitude_m', - 'raw_gga_delta_time': 'rawGGADeltaTime', - 'raw_gga_differential': 'rawGGADifferential', - 'raw_gga_hdop': 'rawGGAHDOP', - 'raw_gga_lat_deg': 'rawGGALat_deg', - 'raw_gga_lon_deg': 'rawGGALon_deg', - 'raw_gga_serial_time': 'rawGGASerialTime', - 'raw_gga_utc': 'rawGGAUTC', - 'raw_gga_num_sats': 'rawGGANumSats', - 'raw_vel_mps': 'rawVel_mps', - 'raw_vtg_course_deg': 'rawVTGCourse_deg', - 'raw_vtg_delta_time': 'rawVTGDeltaTime', - 'raw_vtg_mode_indicator': 'rawVTGModeIndicator', - 'raw_vtg_speed_mps': 'rawVTGSpeed_mps', - 'rec_edge_method': 'recEdgeMethod', - 'right_idx': 'rightidx', - 'roll_limit': 'rollLimit', - 'rssi_units': 'rssiUnits', - 'sel_fit': 'selFit', - 'serial_num': 'serialNum', - 'sl_lag_effect_m': 'slLagEffect_m', - 'sl_cutoff_number': 'slCutoffNum', - 'sl_cutoff_percent': 'slCutoffPer', - 'sl_cutoff_type': 'slCutoffType', - 'sl_cutoff_m': 'slCutoff_m', - 'smooth_depth': 'smoothDepth', - 'smooth_filter': 'smoothFilter', - 'smooth_lower_limit': 'smoothLowerLimit', - 'smooth_speed': 'smoothSpeed', - 'smooth_upper_limit': 'smoothUpperLimit', - 'snr_filter': 'snrFilter', - 'speed_of_sound_mps': 'speedOfSound_mps', - 'snr_rng': 'snrRng', - 'start_edge': 'startEdge', - 'start_serial_time': 'startSerialTime', - 'station_name': 'stationName', - 'station_number': 'stationNumber', - 'stationary_cs_track': 'stationaryCSTrack', - 'stationary_mb_vel': 'stationaryMBVel', - 'stationary_us_track': 'stationaryUSTrack', - 'system_test': 'sysTest', - 'system_tst': 'systemTest', - 'systematic_user': 'systematicUser', - 't_matrix': 'tMatrix', - 'temperature': 'temperature', - 'temperature_deg_c': 'temperature_degC', - 'test_quality': 'testQuality', - 'time_stamp': 'timeStamp', - 'top_ens': 'topEns', - 'top_fit_r2': 'topfitr2', - 'top_max_diff': 'topmaxdiff', - 'top_method': 'topMethod', - 'top_method_auto': 'topMethodAuto', - 'top_method_orig': 'topMethodOrig', - 'top_r2': 'topr2', - 'total_95': 'total95', - 'total_uncorrected': 'totalUncorrected', - 'total_95_user': 'total95User', - 'transect_duration_sec': 'transectDuration_sec', - 'u_auto': 'uAuto', - 'u_processed_mps': 'uProcessed_mps', - 'u_earth_no_ref_mps': 'uEarthNoRef_mps', - 'unit_normalized_z': 'unitNormalizedz', - 'unit_normalized': 'unitNormalized', - 'unit_normalized_25': 'unitNormalized25', - 'unit_normalized_75': 'unitNormalized75', - 'unit_normalized_med': 'unitNormalizedMed', - 'unit_normalized_no': 'unitNormalizedNo', - 'use_2_correct': 'use2Correct', - 'user_discharge_cms': 'userQ_cms', - 'user_rating': 'userRating', - 'user_valid': 'userValid', - 'utm_ens_m': 'UTMEns_m', - 'v_processed_mps': 'vProcessed_mps', - 'v_earth_no_ref_mps': 'vEarthNoRef_mps', - 'valid_beams': 'validBeams', - 'valid_data': 'validData', - 'valid_data_method': 'validDataMethod', - 'vb_depths': 'vbDepths', - 'vel_method': 'velMethod', - 'vtg_vel': 'vtgVel', - 'vtg_velocity_ens_mps': 'vtgVelocityEns_mps', - 'vtg_velocity_method': 'vtgVelocityMethod', - 'w_filter': 'wFilter', - 'w_filter_thresholds': 'wFilterThreshold', - 'w_vel': 'wVel', - 'water_mode': 'waterMode', - 'wt_depth_filter': 'wtDepthFilter', - 'z_auto': 'zAuto', - 'all_invalid': 'allInvalid', - 'q_max_run': 'qMaxRun', - 'q_max_run_caution': 'qRunCaution', - 'q_max_run_warning': 'qRunWarning', - 'q_total': 'qTotal', - 'q_total_caution': 'qTotalCaution', - 'q_total_warning': 'qTotalWarning', - 'sta_name': 'staName', - 'sta_number': 'staNumber', - 'left_q': 'leftQ', - 'left_q_idx': 'leftQIdx', - 'right_q': 'rightQ', - 'right_q_idx': 'rightQIdx', - 'left_sign': 'leftSign', - 'right_sign': 'rightSign', - 'right_dist_moved_idx': 'rightDistMovedIdx', - 'left_dist_moved_idx': 'leftDistMovedIdx', - 'left_zero': 'leftzero', - 'left_zero_idx': 'leftZeroIdx', - 'right_zero': 'rightzero', - 'right_zero_idx': 'rightZeroIdx', - 'left_type': 'leftType', - 'right_type': 'rightType', - 'pitch_mean_warning_idx': 'pitchMeanWarningIdx', - 'pitch_mean_caution_idx': 'pitchMeanCautionIdx', - 'pitch_std_caution_idx': 'pitchStdCautionIdx', - 'roll_mean_warning_idx': 'rollMeanWarningIdx', - 'roll_mean_caution_idx': 'rollMeanCautionIdx', - 'roll_std_caution_idx': 'rollStdCautionIdx', - 'magvar_idx': 'magvarIdx', - 'mag_error_idx': 'magErrorIdx', - 'invalid_transect_left_idx': 'invalidTransLeftIdx', - 'invalid_transect_right_idx': 'invalidTransRightIdx', - } + py_2_mat_dict = { + "Python": "Matlab", + "align_correction_deg": "alignCorrection_deg", + "altitude_ens_m": "altitudeEns_m", + "avg_method": "avgMethod", + "beam_angle_deg": "beamAngle_deg", + "beam_filter": "beamFilter", + "beam_pattern": "beamPattern", + "blanking_distance_m": "blankingDistance_m", + "boat_vel": "boatVel", + "bot_diff": "botdiff", + "bot_method": "botMethod", + "bot_method_auto": "botMethodAuto", + "bot_method_orig": "botMethodOrig", + "bot_r2": "botrsqr", + "bottom_ens": "bottomEns", + "bottom_mode": "bottomMode", + "bt_depths": "btDepths", + "bt_vel": "btVel", + "cell_depth_normalized": "cellDepthNormalized", + "cells_above_sl": "cellsAboveSL", + "cells_above_sl_bt": "cellsAboveSLbt", + "compass_cal": "compassCal", + "compass_diff_deg": "compassDiff_deg", + "compass_eval": "compassEval", + "configuration_commands": "configurationCommands", + "coord_sys": "coordSys", + "corr_table": "corrTable", + "correction_factor": "correctionFactor", + "cov_95": "cov95", + "cov_95_user": "cov95User", + "cust_coef": "custCoef", + "d_filter": "dFilter", + "d_filter_thresholds": "dFilterThreshold", + "data_extent": "dataExtent", + "data_orig": "dataOrig", + "data_type": "dataType", + "date_time": "dateTime", + "depth_beams_m": "depthBeams_m", + "depth_cell_depth_m": "depthCellDepth_m", + "depth_cell_depth_orig_m": "depthCellDepthOrig_m", + "depth_cell_size_m": "depthCellSize_m", + "depth_cell_size_orig_m": "depthCellSizeOrig_m", + "depth_depth_m": "depthCellDepth_m", + "depth_source_ens": "depthSourceEns", + "depth_freq_kHz": "depthFreq_Hz", + "depth_invalid_index": "depthInvalidIndex", + "depth_orig_m": "depthOrig_m", + "depth_processed_m": "depthProcessed_m", + "depth_source": "depthSource", + "depths": "depths", + "diff_qual_ens": "diffQualEns", + "dist_us_m": "distUS_m", + "distance_m": "dist_m", + "draft_orig_m": "draftOrig_m", + "draft_use_m": "draftUse_m", + "ds_depths": "dsDepths", + "edges_95": "edges95", + "edges_95_user": "edges95User", + "end_serial_time": "endSerialTime", + "ens_duration_sec": "ensDuration_sec", + "excluded_dist_m": "excludedDist", + "exp_method": "expMethod", + "exponent_95_ci": "exponent95confint", + "exponent_auto": "exponentAuto", + "exponent_orig": "exponentOrig", + "ext_gga_altitude_m": "extGGAAltitude_m", + "ext_gga_differential": "extGGADifferential", + "ext_gga_hdop": "extGGAHDOP", + "ext_gga_lat_deg": "extGGALat_deg", + "ext_gga_lon_deg": "extGGALon_deg", + "ext_gga_num_sats": "extGGANumSats", + "ext_gga_serial_time": "extGGASerialTime", + "ext_gga_utc": "extGGAUTC", + "ext_temp_chk": "extTempChk", + "ext_vtg_course_deg": "extVTGCourse_deg", + "ext_vtg_speed_mps": "extVTGSpeed_mps", + "extrap_fit": "extrapFit", + "extrapolation_95": "extrapolation95", + "extrapolation_95_user": "extrapolation95User", + "file_name": "fileName", + "filter_type": "filterType", + "fit_method": "fitMethod", + "fit_r2": "fitrsqr", + "flow_dir_deg": "flowDir_deg", + "flow_dir": "flowDir_deg", + "flow_spd_mps": "flowSpd_mps", + "frequency_khz": "frequency_hz", + "gga_lat_ens_deg": "ggaLatEns_deg", + "gga_lon_ens_deg": "ggaLonEns_deg", + "gga_position_method": "ggaPositionMethod", + "gga_serial_time_ens": "ggaSerialTimeEns", + "gga_vel": "ggaVel", + "gga_velocity_ens_mps": "ggaVelocityEns_mps", + "gga_velocity_method": "ggaVelocityMethod", + "gps_HDOP_filter": "gpsHDOPFilter", + "gps_HDOP_filter_change": "gpsHDOPFilterChange", + "gps_HDOP_filter_max": "gpsHDOPFilterMax", + "gps_altitude_filter": "gpsAltitudeFilter", + "gps_altitude_filter_change": "gpsAltitudeFilterChange", + "gps_diff_qual_filter": "gpsDiffQualFilter", + "hard_limit": "hardLimit", + "hdop_ens": "hdopEns", + "high_narrow": "hn", + "high_wide": "hw", + "in_transect_idx": "inTransectIdx", + "initial_settings": "initialSettings", + "int_cells": "intCells", + "int_ens": "intEns", + "interp_type": "interpType", + "interpolate_cells": "interpolateCells", + "interpolate_ens": "interpolateEns", + "invalid_95": "invalid95", + "invalid_index": "invalidIndex", + "invalid_95_user": "invalid95User", + "left_idx": "leftidx", + "low_narrow": "ln", + "low_wide": "lw", + "mag_error": "magError", + "mag_var_orig_deg": "magVarOrig_deg", + "mag_var_deg": "magVar_deg", + "man_bot": "manBot", + "man_exp": "manExp", + "man_top": "manTop", + "mb_dir": "mbDir_deg", + "mb_spd_mps": "mbSpd_mps", + "mb_tests": "mbTests", + "meas": "meas_struct", + "middle_cells": "middleCells", + "middle_ens": "middleEns", + "moving_bed": "movingBed", + "moving_bed_95": "movingBed95", + "moving_bed_95_user": "movingBed95User", + "n_failed": "nFailed", + "n_tests": "nTests", + "nav_ref": "navRef", + "near_bed_speed_mps": "nearBedSpeed_mps", + "noise_floor": "noiseFloor", + "norm_data": "normData", + "ns_exp": "nsExponent", + "ns_exponent": "nsexponent", + "num_invalid": "numInvalid", + "num_sats_ens": "numSatsEns", + "number_ensembles": "numEns2Avg", + "orig_coord_sys": "origCoordSys", + "orig_ref": "origNavRef", + "orig_nav_ref": "origNavRef", + "orig_sys": "origCoordSys", + "original_data": "originalData", + "per_good_ens": "perGoodEns", + "percent_invalid_bt": "percentInvalidBT", + "percent_mb": "percentMB", + "pitch_limit": "pitchLimit", + "pp_exp": "ppExponent", + "pp_exponent": "ppexponent", + "processed_source": "processedSource", + "q_cns_mean": "qCNSmean", + "q_cns_opt_mean": "qCNSoptmean", + "q_cns_opt_per_diff": "qCNSoptperdiff", + "q_cns_per_diff": "qCNSperdiff", + "q_man_mean": "qManmean", + "q_man_per_diff": "qManperdiff", + "q_3p_ns_mean": "q3pNSmean", + "q_3p_ns_opt_mean": "q3pNSoptmean", + "q_3p_ns_opt_per_diff": "q3pNSoptperdiff", + "q_3p_ns_per_diff": "q3pNSperdiff", + "q_pp_mean": "qPPmean", + "q_pp_opt_mean": "qPPoptmean", + "q_pp_opt_per_diff": "qPPoptperdiff", + "q_pp_per_diff": "qPPperdiff", + "q_run_threshold_caution": "qRunThresholdCaution", + "q_run_threshold_warning": "qRunThresholdWarning", + "q_sensitivity": "qSensitivity", + "q_total_threshold_caution": "qTotalThresholdWarning", + "q_total_threshold_warning": "qTotalThresholdCaution", + "raw_gga_altitude_m": "rawGGAAltitude_m", + "raw_gga_delta_time": "rawGGADeltaTime", + "raw_gga_differential": "rawGGADifferential", + "raw_gga_hdop": "rawGGAHDOP", + "raw_gga_lat_deg": "rawGGALat_deg", + "raw_gga_lon_deg": "rawGGALon_deg", + "raw_gga_serial_time": "rawGGASerialTime", + "raw_gga_utc": "rawGGAUTC", + "raw_gga_num_sats": "rawGGANumSats", + "raw_vel_mps": "rawVel_mps", + "raw_vtg_course_deg": "rawVTGCourse_deg", + "raw_vtg_delta_time": "rawVTGDeltaTime", + "raw_vtg_mode_indicator": "rawVTGModeIndicator", + "raw_vtg_speed_mps": "rawVTGSpeed_mps", + "rec_edge_method": "recEdgeMethod", + "right_idx": "rightidx", + "roll_limit": "rollLimit", + "rssi_units": "rssiUnits", + "sel_fit": "selFit", + "serial_num": "serialNum", + "sl_lag_effect_m": "slLagEffect_m", + "sl_cutoff_number": "slCutoffNum", + "sl_cutoff_percent": "slCutoffPer", + "sl_cutoff_type": "slCutoffType", + "sl_cutoff_m": "slCutoff_m", + "smooth_depth": "smoothDepth", + "smooth_filter": "smoothFilter", + "smooth_lower_limit": "smoothLowerLimit", + "smooth_speed": "smoothSpeed", + "smooth_upper_limit": "smoothUpperLimit", + "snr_filter": "snrFilter", + "speed_of_sound_mps": "speedOfSound_mps", + "snr_rng": "snrRng", + "start_edge": "startEdge", + "start_serial_time": "startSerialTime", + "station_name": "stationName", + "station_number": "stationNumber", + "stationary_cs_track": "stationaryCSTrack", + "stationary_mb_vel": "stationaryMBVel", + "stationary_us_track": "stationaryUSTrack", + "system_test": "sysTest", + "system_tst": "systemTest", + "systematic_user": "systematicUser", + "t_matrix": "tMatrix", + "temperature": "temperature", + "temperature_deg_c": "temperature_degC", + "test_quality": "testQuality", + "time_stamp": "timeStamp", + "top_ens": "topEns", + "top_fit_r2": "topfitr2", + "top_max_diff": "topmaxdiff", + "top_method": "topMethod", + "top_method_auto": "topMethodAuto", + "top_method_orig": "topMethodOrig", + "top_r2": "topr2", + "total_95": "total95", + "total_uncorrected": "totalUncorrected", + "total_95_user": "total95User", + "transect_duration_sec": "transectDuration_sec", + "u_auto": "uAuto", + "u_processed_mps": "uProcessed_mps", + "u_earth_no_ref_mps": "uEarthNoRef_mps", + "unit_normalized_z": "unitNormalizedz", + "unit_normalized": "unitNormalized", + "unit_normalized_25": "unitNormalized25", + "unit_normalized_75": "unitNormalized75", + "unit_normalized_med": "unitNormalizedMed", + "unit_normalized_no": "unitNormalizedNo", + "use_2_correct": "use2Correct", + "user_discharge_cms": "userQ_cms", + "user_rating": "userRating", + "user_valid": "userValid", + "utm_ens_m": "UTMEns_m", + "v_processed_mps": "vProcessed_mps", + "v_earth_no_ref_mps": "vEarthNoRef_mps", + "valid_beams": "validBeams", + "valid_data": "validData", + "valid_data_method": "validDataMethod", + "vb_depths": "vbDepths", + "vel_method": "velMethod", + "vtg_vel": "vtgVel", + "vtg_velocity_ens_mps": "vtgVelocityEns_mps", + "vtg_velocity_method": "vtgVelocityMethod", + "w_filter": "wFilter", + "w_filter_thresholds": "wFilterThreshold", + "w_vel": "wVel", + "water_mode": "waterMode", + "wt_depth_filter": "wtDepthFilter", + "z_auto": "zAuto", + "all_invalid": "allInvalid", + "q_max_run": "qMaxRun", + "q_max_run_caution": "qRunCaution", + "q_max_run_warning": "qRunWarning", + "q_total": "qTotal", + "q_total_caution": "qTotalCaution", + "q_total_warning": "qTotalWarning", + "sta_name": "staName", + "sta_number": "staNumber", + "left_q": "leftQ", + "left_q_idx": "leftQIdx", + "right_q": "rightQ", + "right_q_idx": "rightQIdx", + "left_sign": "leftSign", + "right_sign": "rightSign", + "right_dist_moved_idx": "rightDistMovedIdx", + "left_dist_moved_idx": "leftDistMovedIdx", + "left_zero": "leftzero", + "left_zero_idx": "leftZeroIdx", + "right_zero": "rightzero", + "right_zero_idx": "rightZeroIdx", + "left_type": "leftType", + "right_type": "rightType", + "pitch_mean_warning_idx": "pitchMeanWarningIdx", + "pitch_mean_caution_idx": "pitchMeanCautionIdx", + "pitch_std_caution_idx": "pitchStdCautionIdx", + "roll_mean_warning_idx": "rollMeanWarningIdx", + "roll_mean_caution_idx": "rollMeanCautionIdx", + "roll_std_caution_idx": "rollStdCautionIdx", + "magvar_idx": "magvarIdx", + "mag_error_idx": "magErrorIdx", + "invalid_transect_left_idx": "invalidTransLeftIdx", + "invalid_transect_right_idx": "invalidTransRightIdx", + } return py_2_mat_dict @staticmethod def save_matlab_file(meas, file_name, version, checked=None): - """Saves the measurement class and all data into a Matlab file using the variable names and structure + """Saves the measurement class and all data into a Matlab file using + the variable names and structure from the QRev Matlab version. Parameters @@ -596,18 +633,24 @@ class Python2Matlab(object): checked = list(range(len(meas.transects))) # Convert Python objects to Matlab structure - mat_struct = {'meas_struct': Python2Matlab(meas, checked).matlab_dict, 'version': version} - sio.savemat(file_name=file_name, - mdict=mat_struct, - appendmat=True, - format='5', - long_field_names=True, - do_compression=True, - oned_as='row') + mat_struct = { + "meas_struct": Python2Matlab(meas, checked).matlab_dict, + "version": version, + } + sio.savemat( + file_name=file_name, + mdict=mat_struct, + appendmat=True, + format="5", + long_field_names=True, + do_compression=True, + oned_as="row", + ) @staticmethod def data2matlab(meas): - """Apply changes to the Python data to replicate QRev for Matlab conventions. + """Apply changes to the Python data to replicate QRev for Matlab + conventions. Parameters ---------- @@ -617,7 +660,8 @@ class Python2Matlab(object): Returns ------- meas_mat: Measurement - Deepcopy of meas with changes to replicate QRev for Matlab conventions + Deepcopy of meas with changes to replicate QRev for Matlab + conventions """ # Make copy to prevent changing Python meas data @@ -647,7 +691,8 @@ class Python2Matlab(object): for dat in meas_mat.extrap_fit.norm_data: dat.valid_data = dat.valid_data + 1 - # If system tests, compass calibrations, or compass evaluations don't exist create empty objects + # If system tests, compass calibrations, or compass evaluations don't + # exist create empty objects if len(meas_mat.system_tst) == 0: meas_mat.system_tst = [PreMeasurement()] if len(meas_mat.compass_eval) == 0: @@ -660,19 +705,22 @@ class Python2Matlab(object): meas_mat.mb_tests = meas_mat.mb_tests[0] # Convert message to cell array for Matlab if len(meas_mat.mb_tests.messages) > 0: - meas_mat.mb_tests.messages = np.array(meas_mat.mb_tests.messages).astype(np.object) + meas_mat.mb_tests.messages = np.array( + meas_mat.mb_tests.messages + ).astype(np.object) # Fix user and adcp temperature for QRev Matlab - if np.isnan(meas_mat.ext_temp_chk['user']): - meas_mat.ext_temp_chk['user'] = '' - if np.isnan(meas_mat.ext_temp_chk['adcp']): - meas_mat.ext_temp_chk['adcp'] = '' + if np.isnan(meas_mat.ext_temp_chk["user"]): + meas_mat.ext_temp_chk["user"] = "" + if np.isnan(meas_mat.ext_temp_chk["adcp"]): + meas_mat.ext_temp_chk["adcp"] = "" return meas_mat @staticmethod def reconfigure_transect(transect): - """Changes variable names, rearranges arrays, and adjusts time for consistency with original QRev Matlab output. + """Changes variable names, rearranges arrays, and adjusts time for + consistency with original QRev Matlab output. Parameters ---------- @@ -686,20 +734,20 @@ class Python2Matlab(object): """ # Change selected boat velocity identification - if transect.boat_vel.selected == 'bt_vel': - transect.boat_vel.selected = 'btVel' - elif transect.boat_vel.selected == 'gga_vel': - transect.boat_vel.selected = 'ggaVel' - elif transect.boat_vel.selected == 'vtg_vel': - transect.boat_vel.selected = 'vtgVel' + if transect.boat_vel.selected == "bt_vel": + transect.boat_vel.selected = "btVel" + elif transect.boat_vel.selected == "gga_vel": + transect.boat_vel.selected = "ggaVel" + elif transect.boat_vel.selected == "vtg_vel": + transect.boat_vel.selected = "vtgVel" # Change selected depth identification - if transect.depths.selected == 'bt_depths': - transect.depths.selected = 'btDepths' - elif transect.depths.selected == 'vb_depths': - transect.depths.selected = 'vbDepths' - elif transect.depths.selected == 'ds_depths': - transect.depths.selected = 'dsDepths' + if transect.depths.selected == "bt_depths": + transect.depths.selected = "btDepths" + elif transect.depths.selected == "vb_depths": + transect.depths.selected = "vbDepths" + elif transect.depths.selected == "ds_depths": + transect.depths.selected = "dsDepths" # Adjust in transect number for 1 base rather than 0 base transect.in_transect_idx = transect.in_transect_idx + 1 @@ -710,16 +758,24 @@ class Python2Matlab(object): transect.w_vel.rssi = np.moveaxis(transect.w_vel.rssi, 0, 2) transect.w_vel.valid_data = np.moveaxis(transect.w_vel.valid_data, 0, 2) if len(transect.adcp.t_matrix.matrix.shape) == 3: - transect.adcp.t_matrix.matrix = np.moveaxis(transect.adcp.t_matrix.matrix, 2, 0) + transect.adcp.t_matrix.matrix = np.moveaxis( + transect.adcp.t_matrix.matrix, 2, 0 + ) # Adjust 2-D array to be row based if transect.adcp.configuration_commands is not None: - transect.adcp.configuration_commands = transect.adcp.configuration_commands.reshape(-1, 1) + transect.adcp.configuration_commands = ( + transect.adcp.configuration_commands.reshape(-1, 1) + ) # Adjust serial time to Matlab convention seconds_day = 86400 time_correction = 719529.0000000003 - transect.date_time.start_serial_time = (transect.date_time.start_serial_time / seconds_day) \ - + time_correction - transect.date_time.end_serial_time = (transect.date_time.end_serial_time / seconds_day) + time_correction + transect.date_time.start_serial_time = ( + transect.date_time.start_serial_time / seconds_day + ) + time_correction + transect.date_time.end_serial_time = ( + transect.date_time.end_serial_time / seconds_day + ) + time_correction + return transect diff --git a/Classes/QAData.py b/Classes/QAData.py index 05c49d7..6a6cd73 100644 --- a/Classes/QAData.py +++ b/Classes/QAData.py @@ -12,21 +12,27 @@ class QAData(object): Attributes ---------- q_run_threshold_caution: int - Caution threshold for interpolated discharge for a run of invalid ensembles, in percent. + Caution threshold for interpolated discharge for a run of invalid + ensembles, in percent. q_run_threshold_warning: int - Warning threshold for interpolated discharge for a run of invalid ensembles, in percent. + Warning threshold for interpolated discharge for a run of invalid + ensembles, in percent. q_total_threshold_caution: int - Caution threshold for total interpolated discharge for invalid ensembles, in percent. + Caution threshold for total interpolated discharge for invalid + ensembles, in percent. q_total_threshold_warning: int - Warning threshold for total interpolated discharge for invalid ensembles, in percent. + Warning threshold for total interpolated discharge for invalid + ensembles, in percent. transects: dict Dictionary of quality assurance checks for transects system_tst: dict Dictionary of quality assurance checks on the system test(s) compass: dict - Dictionary of quality assurance checks on compass calibration and evaluations + Dictionary of quality assurance checks on compass calibration and + evaluations temperature: dict - Dictionary of quality assurance checks on temperature comparions and variation + Dictionary of quality assurance checks on temperature comparions and + variation movingbed: dict Dictionary of quality assurance checks on moving-bed tests user: dict @@ -78,15 +84,15 @@ class QAData(object): self.extrapolation = dict() self.edges = dict() self.settings_dict = dict() - self.settings_dict['tab_compass'] = 'Default' - self.settings_dict['tab_tempsal'] = 'Default' - self.settings_dict['tab_mbt'] = 'Default' - self.settings_dict['tab_bt'] = 'Default' - self.settings_dict['tab_gps'] = 'Default' - self.settings_dict['tab_depth'] = 'Default' - self.settings_dict['tab_wt'] = 'Default' - self.settings_dict['tab_extrap'] = 'Default' - self.settings_dict['tab_edges'] = 'Default' + self.settings_dict["tab_compass"] = "Default" + self.settings_dict["tab_tempsal"] = "Default" + self.settings_dict["tab_mbt"] = "Default" + self.settings_dict["tab_bt"] = "Default" + self.settings_dict["tab_gps"] = "Default" + self.settings_dict["tab_depth"] = "Default" + self.settings_dict["tab_wt"] = "Default" + self.settings_dict["tab_extrap"] = "Default" + self.settings_dict["tab_edges"] = "Default" if compute: # Apply QA checks @@ -116,7 +122,8 @@ class QAData(object): self.populate_from_qrev_mat(meas, mat_struct) def populate_from_qrev_mat(self, meas, meas_struct): - """Populates the object using data from previously saved QRev Matlab file. + """Populates the object using data from previously saved QRev Matlab + file. Parameters ---------- @@ -126,15 +133,16 @@ class QAData(object): Matlab data structure obtained from sio.loadmat """ - # Generate a new QA object using the measurement data and the current QA code. - # When QA checks from the current QA are not available from old QRev files, these + # Generate a new QA object using the measurement data and the + # current QA code. When QA checks from the current QA are not + # available from old QRev files, these # checks will be included to supplement the old QRev file data. new_qa = QAData(meas) - if hasattr(meas_struct, 'qa'): + if hasattr(meas_struct, "qa"): # Set default thresholds self.q_run_threshold_caution = meas_struct.qa.qRunThresholdCaution self.q_run_threshold_warning = meas_struct.qa.qRunThresholdWarning - if hasattr(meas_struct.qa, 'qTotalThresholdCaution'): + if hasattr(meas_struct.qa, "qTotalThresholdCaution"): self.q_total_threshold_caution = meas_struct.qa.qTotalThresholdCaution else: self.q_total_threshold_caution = 10 @@ -142,285 +150,342 @@ class QAData(object): # Initialize instance variables self.transects = dict() - self.transects['duration'] = meas_struct.qa.transects.duration - self.transects['messages'] = self.make_list(meas_struct.qa.transects.messages) - self.transects['number'] = meas_struct.qa.transects.number - self.transects['recip'] = meas_struct.qa.transects.recip - self.transects['sign'] = meas_struct.qa.transects.sign - self.transects['status'] = meas_struct.qa.transects.status - self.transects['uncertainty'] = meas_struct.qa.transects.uncertainty + self.transects["duration"] = meas_struct.qa.transects.duration + self.transects["messages"] = self.make_list( + meas_struct.qa.transects.messages + ) + self.transects["number"] = meas_struct.qa.transects.number + self.transects["recip"] = meas_struct.qa.transects.recip + self.transects["sign"] = meas_struct.qa.transects.sign + self.transects["status"] = meas_struct.qa.transects.status + self.transects["uncertainty"] = meas_struct.qa.transects.uncertainty self.system_tst = dict() - self.system_tst['messages'] = self.make_list(meas_struct.qa.systemTest.messages) - self.system_tst['status'] = meas_struct.qa.systemTest.status + self.system_tst["messages"] = self.make_list( + meas_struct.qa.systemTest.messages + ) + self.system_tst["status"] = meas_struct.qa.systemTest.status self.compass = dict() - self.compass['messages'] = self.make_list(meas_struct.qa.compass.messages) - self.compass['status'] = meas_struct.qa.compass.status - if hasattr(meas_struct.qa.compass, 'status1'): - self.compass['status1'] = meas_struct.qa.compass.status1 - self.compass['status2'] = meas_struct.qa.compass.status2 + self.compass["messages"] = self.make_list(meas_struct.qa.compass.messages) + self.compass["status"] = meas_struct.qa.compass.status + if hasattr(meas_struct.qa.compass, "status1"): + self.compass["status1"] = meas_struct.qa.compass.status1 + self.compass["status2"] = meas_struct.qa.compass.status2 else: - self.compass['status1'] = 'good' - self.compass['status2'] = 'good' + self.compass["status1"] = "good" + self.compass["status2"] = "good" # If QA check not available, get check from new QA - if hasattr(meas_struct.qa.compass, 'magvar'): - self.compass['magvar'] = meas_struct.qa.compass.magvar + if hasattr(meas_struct.qa.compass, "magvar"): + self.compass["magvar"] = meas_struct.qa.compass.magvar else: - self.compass['magvar'] = new_qa.compass['magvar'] - self.compass['status'] = new_qa.compass['status'] + self.compass["magvar"] = new_qa.compass["magvar"] + self.compass["status"] = new_qa.compass["status"] # If QA check not available, get check from new QA - if hasattr(meas_struct.qa.compass, 'magvarIdx'): - self.compass['magvar_idx'] = self.make_array(meas_struct.qa.compass.magvarIdx) + if hasattr(meas_struct.qa.compass, "magvarIdx"): + self.compass["magvar_idx"] = self.make_array( + meas_struct.qa.compass.magvarIdx + ) else: - self.compass['magvar_idx'] = new_qa.compass['magvar_idx'] - self.compass['status'] = new_qa.compass['status'] + self.compass["magvar_idx"] = new_qa.compass["magvar_idx"] + self.compass["status"] = new_qa.compass["status"] # Changed mag_error_idx from bool to int array in QRevPy - self.compass['mag_error_idx'] = new_qa.compass['mag_error_idx'] - self.compass['status'] = new_qa.compass['status'] + self.compass["mag_error_idx"] = new_qa.compass["mag_error_idx"] + self.compass["status"] = new_qa.compass["status"] # If QA check not available, get check from new QA - if hasattr(meas_struct.qa.compass, 'pitchMeanWarningIdx'): - self.compass['pitch_mean_warning_idx'] = self.make_array(meas_struct.qa.compass.pitchMeanWarningIdx) + if hasattr(meas_struct.qa.compass, "pitchMeanWarningIdx"): + self.compass["pitch_mean_warning_idx"] = self.make_array( + meas_struct.qa.compass.pitchMeanWarningIdx + ) else: - self.compass['pitch_mean_warning_idx'] = new_qa.compass['pitch_mean_warning_idx'] - self.compass['status'] = new_qa.compass['status'] + self.compass["pitch_mean_warning_idx"] = new_qa.compass[ + "pitch_mean_warning_idx" + ] + self.compass["status"] = new_qa.compass["status"] # If QA check not available, get check from new QA - if hasattr(meas_struct.qa.compass, 'rollMeanWarningIdx'): - self.compass['roll_mean_warning_idx'] = self.make_array(meas_struct.qa.compass.rollMeanWarningIdx) + if hasattr(meas_struct.qa.compass, "rollMeanWarningIdx"): + self.compass["roll_mean_warning_idx"] = self.make_array( + meas_struct.qa.compass.rollMeanWarningIdx + ) else: - self.compass['roll_mean_warning_idx'] = new_qa.compass['roll_mean_warning_idx'] - self.compass['status'] = new_qa.compass['status'] + self.compass["roll_mean_warning_idx"] = new_qa.compass[ + "roll_mean_warning_idx" + ] + self.compass["status"] = new_qa.compass["status"] # If QA check not available, get check from new QA - if hasattr(meas_struct.qa.compass, 'pitchMeanCautionIdx'): - self.compass['pitch_mean_caution_idx'] = self.make_array(meas_struct.qa.compass.pitchMeanCautionIdx) + if hasattr(meas_struct.qa.compass, "pitchMeanCautionIdx"): + self.compass["pitch_mean_caution_idx"] = self.make_array( + meas_struct.qa.compass.pitchMeanCautionIdx + ) else: - self.compass['pitch_mean_caution_idx'] = new_qa.compass['pitch_mean_caution_idx'] - self.compass['status'] = new_qa.compass['status'] + self.compass["pitch_mean_caution_idx"] = new_qa.compass[ + "pitch_mean_caution_idx" + ] + self.compass["status"] = new_qa.compass["status"] # If QA check not available, get check from new QA - if hasattr(meas_struct.qa.compass, 'rollMeanCautionIdx'): - self.compass['roll_mean_caution_idx'] = self.make_array(meas_struct.qa.compass.rollMeanCautionIdx) + if hasattr(meas_struct.qa.compass, "rollMeanCautionIdx"): + self.compass["roll_mean_caution_idx"] = self.make_array( + meas_struct.qa.compass.rollMeanCautionIdx + ) else: - self.compass['roll_mean_caution_idx'] = new_qa.compass['roll_mean_caution_idx'] - self.compass['status'] = new_qa.compass['status'] + self.compass["roll_mean_caution_idx"] = new_qa.compass[ + "roll_mean_caution_idx" + ] + self.compass["status"] = new_qa.compass["status"] # If QA check not available, get check from new QA - if hasattr(meas_struct.qa.compass, 'pitchStdCautionIdx'): - self.compass['pitch_std_caution_idx'] = self.make_array(meas_struct.qa.compass.pitchStdCautionIdx) + if hasattr(meas_struct.qa.compass, "pitchStdCautionIdx"): + self.compass["pitch_std_caution_idx"] = self.make_array( + meas_struct.qa.compass.pitchStdCautionIdx + ) else: - self.compass['pitch_std_caution_idx'] = new_qa.compass['pitch_std_caution_idx'] - self.compass['status'] = new_qa.compass['status'] + self.compass["pitch_std_caution_idx"] = new_qa.compass[ + "pitch_std_caution_idx" + ] + self.compass["status"] = new_qa.compass["status"] # If QA check not available, get check from new QA - if hasattr(meas_struct.qa.compass, 'rollStdCautionIdx'): - self.compass['roll_std_caution_idx'] = self.make_array(meas_struct.qa.compass.rollStdCautionIdx) + if hasattr(meas_struct.qa.compass, "rollStdCautionIdx"): + self.compass["roll_std_caution_idx"] = self.make_array( + meas_struct.qa.compass.rollStdCautionIdx + ) else: - self.compass['roll_std_caution_idx'] = new_qa.compass['roll_std_caution_idx'] - self.compass['status'] = new_qa.compass['status'] + self.compass["roll_std_caution_idx"] = new_qa.compass[ + "roll_std_caution_idx" + ] + self.compass["status"] = new_qa.compass["status"] self.temperature = dict() - self.temperature['messages'] = self.make_list(meas_struct.qa.temperature.messages) - self.temperature['status'] = meas_struct.qa.temperature.status + self.temperature["messages"] = self.make_list( + meas_struct.qa.temperature.messages + ) + self.temperature["status"] = meas_struct.qa.temperature.status self.movingbed = dict() - self.movingbed['messages'] = self.make_list(meas_struct.qa.movingbed.messages) - self.movingbed['status'] = meas_struct.qa.movingbed.status - self.movingbed['code'] = meas_struct.qa.movingbed.code + self.movingbed["messages"] = self.make_list( + meas_struct.qa.movingbed.messages + ) + self.movingbed["status"] = meas_struct.qa.movingbed.status + self.movingbed["code"] = meas_struct.qa.movingbed.code self.user = dict() - self.user['messages'] = self.make_list(meas_struct.qa.user.messages) - self.user['sta_name'] = bool(meas_struct.qa.user.staName) - self.user['sta_number'] = bool(meas_struct.qa.user.staNumber) - self.user['status'] = meas_struct.qa.user.status + self.user["messages"] = self.make_list(meas_struct.qa.user.messages) + self.user["sta_name"] = bool(meas_struct.qa.user.staName) + self.user["sta_number"] = bool(meas_struct.qa.user.staNumber) + self.user["status"] = meas_struct.qa.user.status # If QA check not available, get check from new QA self.depths = self.create_qa_dict(self, meas_struct.qa.depths) - if 'draft' not in self.depths: - self.depths['draft'] = new_qa.depths['draft'] - self.depths['status'] = new_qa.depths['status'] + if "draft" not in self.depths: + self.depths["draft"] = new_qa.depths["draft"] + self.depths["status"] = new_qa.depths["status"] - if 'all_invalid' not in self.depths: - self.depths['all_invalid'] = new_qa.depths['all_invalid'] - self.depths['status'] = new_qa.depths['status'] + if "all_invalid" not in self.depths: + self.depths["all_invalid"] = new_qa.depths["all_invalid"] + self.depths["status"] = new_qa.depths["status"] # If QA check not available, get check from new QA self.bt_vel = self.create_qa_dict(self, meas_struct.qa.btVel, ndim=2) - if 'all_invalid' not in self.bt_vel: - self.bt_vel['all_invalid'] = new_qa.bt_vel['all_invalid'] - self.bt_vel['status'] = new_qa.bt_vel['status'] + if "all_invalid" not in self.bt_vel: + self.bt_vel["all_invalid"] = new_qa.bt_vel["all_invalid"] + self.bt_vel["status"] = new_qa.bt_vel["status"] # If QA check not available, get check from new QA self.gga_vel = self.create_qa_dict(self, meas_struct.qa.ggaVel, ndim=2) - if 'all_invalid' not in self.gga_vel: - self.gga_vel['all_invalid'] = new_qa.gga_vel['all_invalid'] - if 'lag_status' not in self.gga_vel: - self.gga_vel['lag_status'] = new_qa.gga_vel['lag_status'] - self.gga_vel['status'] = new_qa.gga_vel['status'] + if "all_invalid" not in self.gga_vel: + self.gga_vel["all_invalid"] = new_qa.gga_vel["all_invalid"] + if "lag_status" not in self.gga_vel: + self.gga_vel["lag_status"] = new_qa.gga_vel["lag_status"] + self.gga_vel["status"] = new_qa.gga_vel["status"] # If QA check not available, get check from new QA self.vtg_vel = self.create_qa_dict(self, meas_struct.qa.vtgVel, ndim=2) - if 'all_invalid' not in self.vtg_vel: - self.vtg_vel['all_invalid'] = new_qa.vtg_vel['all_invalid'] - if 'lag_status' not in self.vtg_vel: - self.vtg_vel['lag_status'] = new_qa.vtg_vel['lag_status'] - self.vtg_vel['status'] = new_qa.vtg_vel['status'] + if "all_invalid" not in self.vtg_vel: + self.vtg_vel["all_invalid"] = new_qa.vtg_vel["all_invalid"] + if "lag_status" not in self.vtg_vel: + self.vtg_vel["lag_status"] = new_qa.vtg_vel["lag_status"] + self.vtg_vel["status"] = new_qa.vtg_vel["status"] # If QA check not available, get check from new QA self.w_vel = self.create_qa_dict(self, meas_struct.qa.wVel, ndim=2) - if 'all_invalid' not in self.w_vel: - self.w_vel['all_invalid'] = new_qa.w_vel['all_invalid'] - self.w_vel['status'] = new_qa.w_vel['status'] + if "all_invalid" not in self.w_vel: + self.w_vel["all_invalid"] = new_qa.w_vel["all_invalid"] + self.w_vel["status"] = new_qa.w_vel["status"] self.extrapolation = dict() - self.extrapolation['messages'] = self.make_list(meas_struct.qa.extrapolation.messages) - self.extrapolation['status'] = meas_struct.qa.extrapolation.status + self.extrapolation["messages"] = self.make_list( + meas_struct.qa.extrapolation.messages + ) + self.extrapolation["status"] = meas_struct.qa.extrapolation.status self.edges = dict() - self.edges['messages'] = self.make_list(meas_struct.qa.edges.messages) - self.edges['status'] = meas_struct.qa.edges.status - self.edges['left_q'] = meas_struct.qa.edges.leftQ - self.edges['right_q'] = meas_struct.qa.edges.rightQ - self.edges['left_sign'] = meas_struct.qa.edges.leftSign - self.edges['right_sign'] = meas_struct.qa.edges.rightSign - self.edges['left_zero'] = meas_struct.qa.edges.leftzero - self.edges['right_zero'] = meas_struct.qa.edges.rightzero - self.edges['left_type'] = meas_struct.qa.edges.leftType - self.edges['right_type'] = meas_struct.qa.edges.rightType + self.edges["messages"] = self.make_list(meas_struct.qa.edges.messages) + self.edges["status"] = meas_struct.qa.edges.status + self.edges["left_q"] = meas_struct.qa.edges.leftQ + self.edges["right_q"] = meas_struct.qa.edges.rightQ + self.edges["left_sign"] = meas_struct.qa.edges.leftSign + self.edges["right_sign"] = meas_struct.qa.edges.rightSign + self.edges["left_zero"] = meas_struct.qa.edges.leftzero + self.edges["right_zero"] = meas_struct.qa.edges.rightzero + self.edges["left_type"] = meas_struct.qa.edges.leftType + self.edges["right_type"] = meas_struct.qa.edges.rightType # If QA check not available, get check from new QA - if hasattr(meas_struct.qa.edges, 'rightDistMovedIdx'): - self.edges['right_dist_moved_idx'] = self.make_array(meas_struct.qa.edges.rightDistMovedIdx) + if hasattr(meas_struct.qa.edges, "rightDistMovedIdx"): + self.edges["right_dist_moved_idx"] = self.make_array( + meas_struct.qa.edges.rightDistMovedIdx + ) else: - self.edges['right_dist_moved_idx'] = new_qa.edges['right_dist_moved_idx'] - self.edges['status'] = new_qa.edges['status'] + self.edges["right_dist_moved_idx"] = new_qa.edges[ + "right_dist_moved_idx" + ] + self.edges["status"] = new_qa.edges["status"] # If QA check not available, get check from new QA - if hasattr(meas_struct.qa.edges, 'leftDistMovedIdx'): - self.edges['left_dist_moved_idx'] = self.make_array(meas_struct.qa.edges.leftDistMovedIdx) + if hasattr(meas_struct.qa.edges, "leftDistMovedIdx"): + self.edges["left_dist_moved_idx"] = self.make_array( + meas_struct.qa.edges.leftDistMovedIdx + ) else: - self.edges['left_dist_moved_idx'] = new_qa.edges['left_dist_moved_idx'] - self.edges['status'] = new_qa.edges['status'] + self.edges["left_dist_moved_idx"] = new_qa.edges["left_dist_moved_idx"] + self.edges["status"] = new_qa.edges["status"] # If QA check not available, get check from new QA - if hasattr(meas_struct.qa.edges, 'leftQIdx'): - self.edges['left_q_idx'] = self.make_array(meas_struct.qa.edges.leftQIdx) + if hasattr(meas_struct.qa.edges, "leftQIdx"): + self.edges["left_q_idx"] = self.make_array( + meas_struct.qa.edges.leftQIdx + ) else: - self.edges['left_q_idx'] = new_qa.edges['left_q_idx'] - self.edges['status'] = new_qa.edges['status'] + self.edges["left_q_idx"] = new_qa.edges["left_q_idx"] + self.edges["status"] = new_qa.edges["status"] # If QA check not available, get check from new QA - if hasattr(meas_struct.qa.edges, 'rightQIdx'): - self.edges['right_q_idx'] = self.make_array(meas_struct.qa.edges.rightQIdx) + if hasattr(meas_struct.qa.edges, "rightQIdx"): + self.edges["right_q_idx"] = self.make_array( + meas_struct.qa.edges.rightQIdx + ) else: - self.edges['right_q_idx'] = new_qa.edges['right_q_idx'] - self.edges['status'] = new_qa.edges['status'] + self.edges["right_q_idx"] = new_qa.edges["right_q_idx"] + self.edges["status"] = new_qa.edges["status"] # If QA check not available, get check from new QA - if hasattr(meas_struct.qa.edges, 'leftZeroIdx'): - self.edges['left_zero_idx'] = self.make_array(meas_struct.qa.edges.leftZeroIdx) + if hasattr(meas_struct.qa.edges, "leftZeroIdx"): + self.edges["left_zero_idx"] = self.make_array( + meas_struct.qa.edges.leftZeroIdx + ) else: - self.edges['left_zero_idx'] = new_qa.edges['left_zero_idx'] - self.edges['status'] = new_qa.edges['status'] + self.edges["left_zero_idx"] = new_qa.edges["left_zero_idx"] + self.edges["status"] = new_qa.edges["status"] # If QA check not available, get check from new QA - if hasattr(meas_struct.qa.edges, 'rightZeroIdx'): - self.edges['right_zero_idx'] = self.make_array(meas_struct.qa.edges.rightZeroIdx) + if hasattr(meas_struct.qa.edges, "rightZeroIdx"): + self.edges["right_zero_idx"] = self.make_array( + meas_struct.qa.edges.rightZeroIdx + ) else: - self.edges['right_zero_idx'] = new_qa.edges['right_zero_idx'] - self.edges['status'] = new_qa.edges['status'] + self.edges["right_zero_idx"] = new_qa.edges["right_zero_idx"] + self.edges["status"] = new_qa.edges["status"] # If QA check not available, get check from new QA - if hasattr(meas_struct.qa.edges, 'invalid_transect_left_idx'): - self.edges['invalid_transect_left_idx'] = \ - self.make_array(meas_struct.qa.edges.invalid_transect_left_idx) - elif hasattr(meas_struct.qa.edges, 'invalidTransLeftIdx'): - self.edges['invalid_transect_left_idx'] = \ - self.make_array(meas_struct.qa.edges.invalidTransLeftIdx) + if hasattr(meas_struct.qa.edges, "invalid_transect_left_idx"): + self.edges["invalid_transect_left_idx"] = self.make_array( + meas_struct.qa.edges.invalid_transect_left_idx + ) + elif hasattr(meas_struct.qa.edges, "invalidTransLeftIdx"): + self.edges["invalid_transect_left_idx"] = self.make_array( + meas_struct.qa.edges.invalidTransLeftIdx + ) else: - self.edges['invalid_transect_left_idx'] = new_qa.edges['invalid_transect_left_idx'] - self.edges['status'] = new_qa.edges['status'] + self.edges["invalid_transect_left_idx"] = new_qa.edges[ + "invalid_transect_left_idx" + ] + self.edges["status"] = new_qa.edges["status"] # If QA check not available, get check from new QA - if hasattr(meas_struct.qa.edges, 'invalid_transect_right_idx'): - self.edges['invalid_transect_right_idx'] = \ - self.make_array(meas_struct.qa.edges.invalid_transect_right_idx) - elif hasattr(meas_struct.qa, 'invalidTransRightIdx'): - self.edges['invalid_transect_right_idx'] = \ - self.make_array(meas_struct.qa.edges.invalidTransRightIdx) + if hasattr(meas_struct.qa.edges, "invalid_transect_right_idx"): + self.edges["invalid_transect_right_idx"] = self.make_array( + meas_struct.qa.edges.invalid_transect_right_idx + ) + elif hasattr(meas_struct.qa, "invalidTransRightIdx"): + self.edges["invalid_transect_right_idx"] = self.make_array( + meas_struct.qa.edges.invalidTransRightIdx + ) else: - self.edges['invalid_transect_right_idx'] = new_qa.edges['invalid_transect_right_idx'] - self.edges['status'] = new_qa.edges['status'] + self.edges["invalid_transect_right_idx"] = new_qa.edges[ + "invalid_transect_right_idx" + ] + self.edges["status"] = new_qa.edges["status"] - if hasattr(meas_struct.qa, 'settings_dict'): + if hasattr(meas_struct.qa, "settings_dict"): self.settings_dict = dict() try: - self.settings_dict['tab_compass'] = \ - meas_struct.qa.settings_dict.tab_compass + self.settings_dict[ + "tab_compass" + ] = meas_struct.qa.settings_dict.tab_compass except AttributeError: - self.settings_dict['tab_compass'] = \ - new_qa.settings_dict['tab_compass'] + self.settings_dict["tab_compass"] = new_qa.settings_dict[ + "tab_compass" + ] try: - self.settings_dict['tab_tempsal'] = \ - meas_struct.qa.settings_dict.tab_tempsal + self.settings_dict[ + "tab_tempsal" + ] = meas_struct.qa.settings_dict.tab_tempsal except AttributeError: - self.settings_dict['tab_tempsal'] = \ - new_qa.settings_dict['tab_tempsal'] + self.settings_dict["tab_tempsal"] = new_qa.settings_dict[ + "tab_tempsal" + ] try: - self.settings_dict['tab_mbt'] = \ - meas_struct.qa.settings_dict.tab_mbt + self.settings_dict["tab_mbt"] = meas_struct.qa.settings_dict.tab_mbt except AttributeError: - self.settings_dict['tab_mbt'] = \ - new_qa.settings_dict['tab_mbt'] + self.settings_dict["tab_mbt"] = new_qa.settings_dict["tab_mbt"] try: - self.settings_dict['tab_bt'] = \ - meas_struct.qa.settings_dict.tab_bt + self.settings_dict["tab_bt"] = meas_struct.qa.settings_dict.tab_bt except AttributeError: - self.settings_dict['tab_bt'] = \ - new_qa.settings_dict['tab_bt'] + self.settings_dict["tab_bt"] = new_qa.settings_dict["tab_bt"] try: - self.settings_dict['tab_gps'] = \ - meas_struct.qa.settings_dict.tab_gps + self.settings_dict["tab_gps"] = meas_struct.qa.settings_dict.tab_gps except AttributeError: - self.settings_dict['tab_gps'] = \ - new_qa.settings_dict['tab_gps'] + self.settings_dict["tab_gps"] = new_qa.settings_dict["tab_gps"] try: - self.settings_dict['tab_depth'] = \ - meas_struct.qa.settings_dict.tab_depth + self.settings_dict[ + "tab_depth" + ] = meas_struct.qa.settings_dict.tab_depth except AttributeError: - self.settings_dict['tab_depth'] = \ - new_qa.settings_dict['tab_depth'] + self.settings_dict["tab_depth"] = new_qa.settings_dict["tab_depth"] try: - self.settings_dict['tab_wt'] = \ - meas_struct.qa.settings_dict.tab_wt + self.settings_dict["tab_wt"] = meas_struct.qa.settings_dict.tab_wt except AttributeError: - self.settings_dict['tab_wt'] = \ - new_qa.settings_dict['tab_wt'] + self.settings_dict["tab_wt"] = new_qa.settings_dict["tab_wt"] try: - self.settings_dict['tab_extrap'] = \ - meas_struct.qa.settings_dict.tab_extrap + self.settings_dict[ + "tab_extrap" + ] = meas_struct.qa.settings_dict.tab_extrap except AttributeError: - self.settings_dict['tab_extrap'] = \ - new_qa.settings_dict['tab_extrap'] + self.settings_dict["tab_extrap"] = new_qa.settings_dict[ + "tab_extrap" + ] try: - self.settings_dict['tab_edges'] = \ - meas_struct.qa.settings_dict.tab_edges + self.settings_dict[ + "tab_edges" + ] = meas_struct.qa.settings_dict.tab_edges except AttributeError: - self.settings_dict['tab_edges'] = \ - new_qa.settings_dict['tab_edges'] + self.settings_dict["tab_edges"] = new_qa.settings_dict["tab_edges"] @staticmethod def create_qa_dict(self, mat_data, ndim=1): - """Creates the dictionary used to store QA checks associated with the percent of discharge estimated - by interpolation. This dictionary is used by BT, GPS, Depth, and WT. + """Creates the dictionary used to store QA checks associated with + the percent of discharge estimated by interpolation. This dictionary + is used by BT, GPS, Depth, and WT. Parameters ---------- @@ -436,28 +501,40 @@ class QAData(object): qa_dict = dict() # Populate dictionary from Matlab data - qa_dict['messages'] = QAData.make_list(mat_data.messages) + qa_dict["messages"] = QAData.make_list(mat_data.messages) # allInvalid not available in older QRev data - if hasattr(mat_data, 'allInvalid'): - qa_dict['all_invalid'] = self.make_array(mat_data.allInvalid, 1).astype(bool) - - qa_dict['q_max_run_caution'] = self.make_array(mat_data.qRunCaution, ndim).astype(bool) - qa_dict['q_max_run_warning'] = self.make_array(mat_data.qRunWarning, ndim).astype(bool) - if hasattr(mat_data, 'qTotalCaution'): - qa_dict['q_total_caution'] = self.make_array(mat_data.qTotalCaution, ndim).astype(bool) + if hasattr(mat_data, "allInvalid"): + qa_dict["all_invalid"] = self.make_array(mat_data.allInvalid, 1).astype( + bool + ) + + qa_dict["q_max_run_caution"] = self.make_array( + mat_data.qRunCaution, ndim + ).astype(bool) + qa_dict["q_max_run_warning"] = self.make_array( + mat_data.qRunWarning, ndim + ).astype(bool) + if hasattr(mat_data, "qTotalCaution"): + qa_dict["q_total_caution"] = self.make_array( + mat_data.qTotalCaution, ndim + ).astype(bool) else: - qa_dict['q_total_caution'] = self.make_array(mat_data.qTotalWarning, ndim).astype(bool) - qa_dict['q_total_warning'] = self.make_array(mat_data.qTotalWarning, ndim).astype(bool) - qa_dict['status'] = mat_data.status + qa_dict["q_total_caution"] = self.make_array( + mat_data.qTotalWarning, ndim + ).astype(bool) + qa_dict["q_total_warning"] = self.make_array( + mat_data.qTotalWarning, ndim + ).astype(bool) + qa_dict["status"] = mat_data.status # q_max_run and q_total not available in older QRev data try: - qa_dict['q_max_run'] = self.make_array(mat_data.qMaxRun, ndim) - qa_dict['q_total'] = self.make_array(mat_data.qTotal, ndim) + qa_dict["q_max_run"] = self.make_array(mat_data.qMaxRun, ndim) + qa_dict["q_total"] = self.make_array(mat_data.qTotal, ndim) except AttributeError: - qa_dict['q_max_run'] = np.tile(np.nan, (len(mat_data.qRunCaution), 6)) - qa_dict['q_total'] = np.tile(np.nan, (len(mat_data.qRunCaution), 6)) + qa_dict["q_max_run"] = np.tile(np.nan, (len(mat_data.qRunCaution), 6)) + qa_dict["q_total"] = np.tile(np.nan, (len(mat_data.qRunCaution), 6)) return qa_dict @staticmethod @@ -502,13 +579,16 @@ class QAData(object): list_out = [] # Single message with integer codes at end elif array_in.size == 3: - if type(array_in[1]) is int or len(array_in[1].strip()) == 1: - temp = array_in.tolist() - if len(temp) > 0: - internal_list = [] - for item in temp: - internal_list.append(item) - list_out = [internal_list] + if not type(array_in[1]) is np.ndarray: + if type(array_in[1]) is int or len(array_in[1].strip()) == 1: + temp = array_in.tolist() + if len(temp) > 0: + internal_list = [] + for item in temp: + internal_list.append(item) + list_out = [internal_list] + else: + list_out = array_in.tolist() else: list_out = array_in.tolist() # Either multiple messages with or without integer codes @@ -527,15 +607,18 @@ class QAData(object): """ # Assume good results - self.transects['status'] = 'good' + self.transects["status"] = "good" # Initialize keys - self.transects['messages'] = [] - self.transects['recip'] = 0 - self.transects['sign'] = 0 - self.transects['duration'] = 0 - self.transects['number'] = 0 - self.transects['uncertainty'] = 0 + self.transects["messages"] = [] + self.transects["recip"] = 0 + self.transects["sign"] = 0 + self.transects["duration"] = 0 + self.transects["number"] = 0 + self.transects["uncertainty"] = 0 + self.transects["batt_voltage"] = [] + # Battery voltage + batt_threshold = 10.5 # Initialize lists checked = [] @@ -558,67 +641,179 @@ class QAData(object): if transect.checked: total_duration += transect.date_time.transect_duration_sec - # Check duration against USGS policy + # Check duration against policy if total_duration < meas.min_duration: - self.transects['status'] = 'caution' - text = 'Transects: Duration of selected transects is less than ' + str(meas.min_duration) + ' seconds;' - self.transects['messages'].append([text, 2, 0]) - self.transects['duration'] = 1 + self.transects["status"] = "caution" + text = ( + "Transects: Duration of selected transects is less than " + + str(meas.min_duration) + + " seconds;" + ) + self.transects["messages"].append([text, 2, 0]) + self.transects["duration"] = 1 # Check transects for missing ensembles + left_invalid_exceeded = False + right_invalid_exceeded = False for transect in meas.transects: if transect.checked: # Determine number of missing ensembles - if transect.adcp.manufacturer == 'SonTek': + if transect.adcp.manufacturer == "SonTek": # Determine number of missing ensembles for SonTek data idx_missing = np.where(transect.date_time.ens_duration_sec > 1.5)[0] if len(idx_missing) > 0: - average_ensemble_duration = (np.nansum(transect.date_time.ens_duration_sec) - - np.nansum(transect.date_time.ens_duration_sec[idx_missing])) \ - / (len(transect.date_time.ens_duration_sec) - len(idx_missing)) - num_missing = np.round(np.nansum(transect.date_time.ens_duration_sec[idx_missing]) - / average_ensemble_duration) - len(idx_missing) + average_ensemble_duration = ( + np.nansum(transect.date_time.ens_duration_sec) + - np.nansum( + transect.date_time.ens_duration_sec[idx_missing] + ) + ) / ( + len(transect.date_time.ens_duration_sec) - len(idx_missing) + ) + num_missing = np.round( + np.nansum(transect.date_time.ens_duration_sec[idx_missing]) + / average_ensemble_duration + ) - len(idx_missing) else: num_missing = 0 else: # Determine number of lost ensembles for TRDI data - idx_missing = np.where(np.isnan(transect.date_time.ens_duration_sec))[0] + idx_missing = np.where( + np.isnan(transect.date_time.ens_duration_sec) + )[0] num_missing = len(idx_missing) - 1 # Save caution message if num_missing > 0: - self.transects['messages'].append(['Transects: ' + str(transect.file_name) + ' is missing ' - + str(int(num_missing)) + ' ensembles;', 2, 0]) - self.transects['status'] = 'caution' + self.transects["messages"].append( + [ + "Transects: " + + str(transect.file_name) + + " is missing " + + str(int(num_missing)) + + " ensembles;", + 2, + 0, + ] + ) + self.transects["status"] = "caution" + + # Invalid ensembles at left and/or right edge + boat_selected = getattr(transect.boat_vel, transect.boat_vel.selected) + valid_bt = boat_selected.valid_data[0, :] + valid_wt = np.any(transect.w_vel.valid_data[0, :, :], axis=0) + depth_selected = getattr(transect.depths, transect.depths.selected) + valid_depth = depth_selected.valid_data + valid_all = np.vstack([valid_bt, valid_wt, valid_depth]) + valid = np.all(valid_all, axis=0) + + threshold = np.floor(0.05 * valid.shape[0]) + if transect.start_edge == "Left": + idx = np.where(np.logical_not(valid)) + if idx[0].size > threshold: + left_invalid_exceeded = True + idx = np.where(np.logical_not(np.flip(valid))) + if idx[0].size > threshold: + right_invalid_exceeded = True + else: + idx = np.where(np.logical_not(valid)) + if idx[0].size > threshold: + right_invalid_exceeded = True + idx = np.where(np.logical_not(np.flip(valid))) + if idx[0].size > threshold: + left_invalid_exceeded = True + + if transect.sensors is not None: + if hasattr(transect.sensors, "battery_voltage"): + if transect.sensors.battery_voltage.internal is not None: + if transect.adcp.model == "RS5": + batt_threshold = 3.3 + + if ( + np.nanmin( + transect.sensors.battery_voltage.internal.data + ) + < batt_threshold + ): + self.transects["batt_voltage"].append( + transect.file_name[:-4] + ) + + # Message for invalid ensembles at left or right + if left_invalid_exceeded: + self.transects["messages"].append( + [ + "Transects: " + + " The number of invalid ensembles at the left" + + " edge exceeds 5 percent;", + 2, + 0, + ] + ) + self.transects["status"] = "caution" + if right_invalid_exceeded: + self.transects["messages"].append( + [ + "Transects: " + + " The number of invalid ensembles at the right" + + " edge exceeds 5 percent;", + 2, + 0, + ] + ) + self.transects["status"] = "caution" + + # Message for low battery + if len(self.transects["batt_voltage"]) > 0: + self.transects["status"] = "caution" + text = ( + "Transects: " + + str(self.transects["batt_voltage"]) + + " have battery voltage less than " + + str(batt_threshold) + ) + self.transects["messages"].append([text, 2, 0]) # Check number of transects checked if num_checked == 0: # No transects selected - self.transects['status'] = 'warning' - self.transects['messages'].append(['TRANSECTS: No transects selected;', 1, 0]) - self.transects['number'] = 2 + self.transects["status"] = "warning" + self.transects["messages"].append( + ["TRANSECTS: No transects selected;", 1, 0] + ) + self.transects["number"] = 2 elif num_checked == 1: # Only one transect selected - self.transects['status'] = 'caution' - self.transects['messages'].append(['Transects: Only one transect selected;', 2, 0]) - self.transects['number'] = 2 + self.transects["status"] = "caution" + self.transects["messages"].append( + ["Transects: Only one transect selected;", 2, 0] + ) + self.transects["number"] = 2 else: - self.transects['number'] = num_checked + self.transects["number"] = num_checked if num_checked == 2: # Only 2 transects selected - cov, _ = Uncertainty.uncertainty_q_random(discharges, 'total') + cov, _ = Uncertainty.uncertainty_q_random(discharges, "total") # Check uncertainty if cov > 2: - self.transects['status'] = 'caution' - self.transects['messages'].append( - ['Transects: Uncertainty would be reduced by additional transects;', 2, 0]) + self.transects["status"] = "caution" + self.transects["messages"].append( + [ + "Transects: Uncertainty would be reduced by " + "additional transects;", + 2, + 0, + ] + ) if num_checked < meas.min_transects: - self.transects['status'] = 'caution' - text = 'Transects: Number of transects is below the required minimum of ' \ - + str(meas.min_transects) + ';' - self.transects['messages'].append([text, 2, 0]) + self.transects["status"] = "caution" + text = ( + "Transects: Number of transects is below the " + "required minimum of " + str(meas.min_transects) + ";" + ) + self.transects["messages"].append([text, 2, 0]) # Check for consistent sign q_positive = [] @@ -628,17 +823,31 @@ class QAData(object): else: q_positive.append(False) if len(np.unique(q_positive)) > 1: - self.transects['status'] = 'warning' - self.transects['messages'].append( - ['TRANSECTS: Sign of total Q is not consistent. One or more start banks may be incorrect;', 1, 0]) + self.transects["status"] = "warning" + self.transects["messages"].append( + [ + "TRANSECTS: Sign of total Q is not consistent. One " + "or more start banks may be incorrect;", + 1, + 0, + ] + ) # Check for reciprocal transects - num_left = start_edge.count('Left') - num_right = start_edge.count('Right') + num_left = start_edge.count("Left") + num_right = start_edge.count("Right") if not num_left == num_right: - self.transects['status'] = 'warning' - self.transects['messages'].append(['TRANSECTS: Transects selected are not reciprocal transects;', 1, 0]) + self.transects["status"] = "warning" + self.transects["messages"].append( + [ + "TRANSECTS: Transects " + "selected are not " + "reciprocal transects;", + 1, + 0, + ] + ) # Check for zero discharge transects q_zero = False @@ -646,8 +855,10 @@ class QAData(object): if q.total == 0: q_zero = True if q_zero: - self.transects['status'] = 'warning' - self.transects['messages'].append(['TRANSECTS: One or more transects have zero Q;', 1, 0]) + self.transects["status"] = "warning" + self.transects["messages"].append( + ["TRANSECTS: One or more transects have zero Q;", 1, 0] + ) def system_tst_qa(self, meas): """Apply QA checks to system test. @@ -658,60 +869,127 @@ class QAData(object): Object of class Measurement """ - self.system_tst['messages'] = [] - self.system_tst['status'] = 'good' + self.system_tst["messages"] = [] + self.system_tst["status"] = "good" # Determine if a system test was recorded if not meas.system_tst: # No system test data recorded - self.system_tst['status'] = 'warning' - self.system_tst['messages'].append(['SYSTEM TEST: No system test;', 1, 3]) + self.system_tst["status"] = "warning" + self.system_tst["messages"].append(["SYSTEM TEST: No system test;", 1, 3]) else: pt3_fail = False num_tests_with_failure = 0 for test in meas.system_tst: - if hasattr(test, 'result'): + if hasattr(test, "result"): # Check for presence of pt3 test - if 'pt3' in test.result and test.result['pt3'] is not None: + if "pt3" in test.result and test.result["pt3"] is not None: # Check hard_limit, high gain, wide bandwidth - if 'hard_limit' in test.result['pt3']: - if 'high_wide' in test.result['pt3']['hard_limit']: - corr_table = test.result['pt3']['hard_limit']['high_wide']['corr_table'] + if "hard_limit" in test.result["pt3"]: + if "high_wide" in test.result["pt3"]["hard_limit"]: + corr_table = test.result["pt3"]["hard_limit"][ + "high_wide" + ]["corr_table"] if len(corr_table) > 0: - # All lags past lag 2 should be less than 50% of lag 0 + # All lags past lag 2 should be less + # than 50% of lag 0 qa_threshold = corr_table[0, :] * 0.5 - all_lag_check = np.greater(corr_table[3::, :], qa_threshold) + all_lag_check = np.greater( + corr_table[3::, :], qa_threshold + ) # Lag 7 should be less than 25% of lag 0 - lag_7_check = np.greater(corr_table[7, :], corr_table[0, :] * 0.25) - - # If either condition is met for any beam the test fails - if np.sum(np.sum(all_lag_check)) + np.sum(lag_7_check) > 1: + lag_7_check = np.greater( + corr_table[7, :], corr_table[0, :] * 0.25 + ) + + # If either condition is met for any + # beam the test fails + if ( + np.sum(np.sum(all_lag_check)) + + np.sum(lag_7_check) + > 1 + ): pt3_fail = True - if test.result['sysTest']['n_failed'] is not None and test.result['sysTest']['n_failed'] > 0: + if ( + test.result["sysTest"]["n_failed"] is not None + and test.result["sysTest"]["n_failed"] > 0 + ): num_tests_with_failure += 1 # pt3 test failure message if pt3_fail: - self.system_tst['status'] = 'caution' - self.system_tst['messages'].append( - ['System Test: One or more PT3 tests in the system test indicate potential EMI;', 2, 3]) + self.system_tst["status"] = "caution" + self.system_tst["messages"].append( + [ + "System Test: One or more PT3 tests in the system " + "test indicate potential EMI;", + 2, + 3, + ] + ) # Check for failed tests if num_tests_with_failure == len(meas.system_tst): # All tests had a failure - self.system_tst['status'] = 'warning' - self.system_tst['messages'].append( - ['SYSTEM TEST: All system test sets have at least one test that failed;', 1, 3]) + self.system_tst["status"] = "warning" + self.system_tst["messages"].append( + [ + "SYSTEM TEST: All system test sets have at least one " + "test that failed;", + 1, + 3, + ] + ) elif num_tests_with_failure > 0: - self.system_tst['status'] = 'caution' - self.system_tst['messages'].append( - ['System Test: One or more system test sets have at least one test that failed;', 2, 3]) + self.system_tst["status"] = "caution" + self.system_tst["messages"].append( + [ + "System Test: One or more system test sets have at " + "least one test that failed;", + 2, + 3, + ] + ) + + # Check for a custom transformation matrix + for transect in meas.transects: + if transect.checked: + if transect.adcp.t_matrix.source == "Nominal": + # This secondary check is necessary due to an earlier QRev bug + # that indicated the source as Nominal even though it was a + # custom matrix obtained from the ADCP. Thus loading a measurement + # saved from a earlier version could create a false alert. + # RiverRay matrices are always a standard matrix based on 30 degree + # beams, but other TRDI ADCPs should be different from their + # standard matrix based on 20 degree beams. + # This secondary check is not necessary for Sontek ADCPs. + if ( + transect.adcp.model != "RiverRay" + and transect.adcp.manufacturer == "TRDI" + ): + nominal_matrix = [ + [1.4619, -1.4619, 0, 0], + [0, 0, -1.4619, 1.4619], + [0.2661, 0.2661, 0.2661, 0.2661], + [1.0337, 1.0337, -1.0337, -1.0337], + ] + if np.allclose(nominal_matrix, transect.adcp.t_matrix.matrix): + self.system_tst["status"] = "caution" + self.system_tst["messages"].append( + [ + "System Test: ADCP is using a nominal matrix rather " + "than a custom matrix;", + 2, + 3, + ] + ) + break def compass_qa(self, meas): """Apply QA checks to compass calibration and evaluation. @@ -722,30 +1000,32 @@ class QAData(object): Object of class Measurement """ - self.compass['messages'] = [] + self.compass["messages"] = [] checked = [] for transect in meas.transects: checked.append(transect.checked) if np.any(checked): - heading = np.unique(meas.transects[checked.index(1)].sensors.heading_deg.internal.data) + heading = np.unique( + meas.transects[checked.index(1)].sensors.heading_deg.internal.data + ) else: heading = np.array([0]) # Initialize variable as if ADCP has no compass - self.compass['status'] = 'inactive' - self.compass['status1'] = 'good' - self.compass['status2'] = 'good' - self.compass['magvar'] = 0 - self.compass['magvar_idx'] = [] - self.compass['mag_error_idx'] = [] - self.compass['pitch_mean_warning_idx'] = [] - self.compass['pitch_mean_caution_idx'] = [] - self.compass['pitch_std_caution_idx'] = [] - self.compass['roll_mean_warning_idx'] = [] - self.compass['roll_mean_caution_idx'] = [] - self.compass['roll_std_caution_idx'] = [] + self.compass["status"] = "inactive" + self.compass["status1"] = "good" + self.compass["status2"] = "good" + self.compass["magvar"] = 0 + self.compass["magvar_idx"] = [] + self.compass["mag_error_idx"] = [] + self.compass["pitch_mean_warning_idx"] = [] + self.compass["pitch_mean_caution_idx"] = [] + self.compass["pitch_std_caution_idx"] = [] + self.compass["roll_mean_warning_idx"] = [] + self.compass["roll_mean_caution_idx"] = [] + self.compass["roll_std_caution_idx"] = [] if len(heading) > 1 and np.any(np.not_equal(heading, 0)): # ADCP has a compass @@ -754,71 +1034,100 @@ class QAData(object): # Check for loop test loop = False for test in meas.mb_tests: - if test.type == 'Loop': + if test.type == "Loop": loop = True # Check for GPS data gps = False - if meas.transects[checked.index(True)].boat_vel.gga_vel is not None or \ - meas.transects[checked.index(True)].boat_vel.vtg_vel is not None: + if ( + meas.transects[checked.index(True)].boat_vel.gga_vel is not None + or meas.transects[checked.index(True)].boat_vel.vtg_vel is not None + ): gps = True if gps or loop: # Compass calibration is required # Determine the ADCP manufacturer - if meas.transects[checked.index(True)].adcp.manufacturer == 'SonTek': + if meas.transects[checked.index(True)].adcp.manufacturer == "SonTek": # SonTek ADCP if len(meas.compass_cal) == 0: # No compass calibration - self.compass['status1'] = 'warning' - self.compass['messages'].append(['COMPASS: No compass calibration;', 1, 4]) - elif meas.compass_cal[-1].result['compass']['error'] == 'N/A': - # If the error cannot be decoded from the calibration assume the calibration is good - self.compass['status1'] = 'good' + self.compass["status1"] = "warning" + self.compass["messages"].append( + ["COMPASS: No compass calibration;", 1, 4] + ) + elif meas.compass_cal[-1].result["compass"]["error"] == "N/A": + # If the error cannot be decoded from the + # calibration assume the calibration is good + self.compass["status1"] = "good" else: - if meas.compass_cal[-1].result['compass']['error'] <= 0.2: - self.compass['status1'] = 'good' + if meas.compass_cal[-1].result["compass"]["error"] <= 0.2: + self.compass["status1"] = "good" else: - self.compass['status1'] = 'caution' - self.compass['messages'].append(['Compass: Calibration result > 0.2 deg;', 2, 4]) + self.compass["status1"] = "caution" + self.compass["messages"].append( + ["Compass: Calibration result > 0.2 deg;", 2, 4] + ) - elif meas.transects[checked.index(True)].adcp.manufacturer == 'TRDI': + elif meas.transects[checked.index(True)].adcp.manufacturer == "TRDI": # TRDI ADCP if len(meas.compass_cal) == 0: # No compass calibration if len(meas.compass_eval) == 0: # No calibration or evaluation - self.compass['status1'] = 'warning' - self.compass['messages'].append(['COMPASS: No compass calibration or evaluation;', 1, 4]) + self.compass["status1"] = "warning" + self.compass["messages"].append( + [ + "COMPASS: No " + "compass " + "calibration " + "or " + "evaluation;", + 1, + 4, + ] + ) + else: # No calibration but an evaluation was completed - self.compass['status1'] = 'caution' - self.compass['messages'].append(['Compass: No compass calibration;', 2, 4]) + self.compass["status1"] = "caution" + self.compass["messages"].append( + ["Compass: No compass calibration;", 2, 4] + ) else: # Compass was calibrated if len(meas.compass_eval) == 0: # No compass evaluation - self.compass['status1'] = 'caution' - self.compass['messages'].append(['Compass: No compass evaluation;', 2, 4]) + self.compass["status1"] = "caution" + self.compass["messages"].append( + ["Compass: No compass evaluation;", 2, 4] + ) else: # Check results of evaluation try: - if float(meas.compass_eval[-1].result['compass']['error']) <= 1: - self.compass['status1'] = 'good' + if ( + float( + meas.compass_eval[-1].result["compass"]["error"] + ) + <= 1 + ): + self.compass["status1"] = "good" else: - self.compass['status1'] = 'caution' - self.compass['messages'].append(['Compass: Evaluation result > 1 deg;', 2, 4]) + self.compass["status1"] = "caution" + self.compass["messages"].append( + ["Compass: Evaluation result > 1 deg;", 2, 4] + ) except ValueError: - self.compass['status1'] = 'good' + self.compass["status1"] = "good" else: # Compass not required if len(meas.compass_cal) == 0 and len(meas.compass_eval) == 0: # No compass calibration or evaluation - self.compass['status1'] = 'default' + self.compass["status1"] = "default" else: # Compass was calibrated and evaluated - self.compass['status1'] = 'good' + self.compass["status1"] = "good" # Check for consistent magvar and pitch and roll mean and variation magvar = [] @@ -835,29 +1144,43 @@ class QAData(object): if transect.checked: transect_idx.append(n) heading_source_selected = getattr( - transect.sensors.heading_deg, transect.sensors.heading_deg.selected) - pitch_source_selected = getattr(transect.sensors.pitch_deg, transect.sensors.pitch_deg.selected) - roll_source_selected = getattr(transect.sensors.roll_deg, transect.sensors.roll_deg.selected) + transect.sensors.heading_deg, + transect.sensors.heading_deg.selected, + ) + pitch_source_selected = getattr( + transect.sensors.pitch_deg, transect.sensors.pitch_deg.selected + ) + roll_source_selected = getattr( + transect.sensors.roll_deg, transect.sensors.roll_deg.selected + ) magvar.append(transect.sensors.heading_deg.internal.mag_var_deg) if transect.sensors.heading_deg.external is not None: - align.append(transect.sensors.heading_deg.external.align_correction_deg) + align.append( + transect.sensors.heading_deg.external.align_correction_deg + ) pitch_mean.append(np.nanmean(pitch_source_selected.data)) pitch_std.append(np.nanstd(pitch_source_selected.data, ddof=1)) roll_mean.append(np.nanmean(roll_source_selected.data)) roll_std.append(np.nanstd(roll_source_selected.data, ddof=1)) - # SonTek G3 compass provides pitch, roll, and magnetic error parameters that can be checked - if transect.adcp.manufacturer == 'SonTek': + # SonTek G3 compass provides pitch, roll, and magnetic + # error parameters that can be checked + if transect.adcp.manufacturer == "SonTek": if heading_source_selected.pitch_limit is not None: - # Check for bug in SonTek data where pitch and roll was n x 3 use n x 1 + # Check for bug in SonTek data where pitch and + # roll was n x 3 use n x 1 if len(pitch_source_selected.data.shape) == 1: pitch_data = pitch_source_selected.data else: pitch_data = pitch_source_selected.data[:, 0] - idx_max = np.where(pitch_data > heading_source_selected.pitch_limit[0])[0] - idx_min = np.where(pitch_data < heading_source_selected.pitch_limit[1])[0] + idx_max = np.where( + pitch_data > heading_source_selected.pitch_limit[0] + )[0] + idx_min = np.where( + pitch_data < heading_source_selected.pitch_limit[1] + )[0] if len(idx_max) > 0 or len(idx_min) > 0: pitch_exceeded.append(True) else: @@ -868,8 +1191,12 @@ class QAData(object): roll_data = roll_source_selected.data else: roll_data = roll_source_selected.data[:, 0] - idx_max = np.where(roll_data > heading_source_selected.pitch_limit[0])[0] - idx_min = np.where(roll_data < heading_source_selected.pitch_limit[1])[0] + idx_max = np.where( + roll_data > heading_source_selected.pitch_limit[0] + )[0] + idx_min = np.where( + roll_data < heading_source_selected.pitch_limit[1] + )[0] if len(idx_max) > 0 or len(idx_min) > 0: roll_exceeded.append(True) else: @@ -881,120 +1208,193 @@ class QAData(object): mag_error_exceeded.append(n) # Check magvar consistency if len(np.unique(magvar)) > 1: - self.compass['status2'] = 'caution' - self.compass['messages'].append( - ['Compass: Magnetic variation is not consistent among transects;', 2, 4]) - self.compass['magvar'] = 1 + self.compass["status2"] = "caution" + self.compass["messages"].append( + [ + "Compass: Magnetic variation is not consistent among " + "transects;", + 2, + 4, + ] + ) + self.compass["magvar"] = 1 # Check magvar consistency if len(np.unique(align)) > 1: - self.compass['status2'] = 'caution' - self.compass['messages'].append( - ['Compass: Heading offset is not consistent among transects;', 2, 4]) - self.compass['align'] = 1 + self.compass["status2"] = "caution" + self.compass["messages"].append( + [ + "Compass: Heading offset is not consistent among " "transects;", + 2, + 4, + ] + ) + self.compass["align"] = 1 # Check that magvar was set if GPS data are available if gps: if 0 in magvar: - self.compass['status2'] = 'warning' - self.compass['messages'].append( - ['COMPASS: Magnetic variation is 0 and GPS data are present;', 1, 4]) - self.compass['magvar'] = 2 - self.compass['magvar_idx'] = np.where(np.array(magvar) == 0)[0].tolist() + self.compass["status2"] = "warning" + self.compass["messages"].append( + [ + "COMPASS: Magnetic variation is 0 and GPS data " + "are present;", + 1, + 4, + ] + ) + self.compass["magvar"] = 2 + self.compass["magvar_idx"] = np.where(np.array(magvar) == 0)[ + 0 + ].tolist() # Check pitch mean if np.any(np.asarray(np.abs(pitch_mean)) > 8): - self.compass['status2'] = 'warning' - self.compass['messages'].append(['PITCH: One or more transects have a mean pitch > 8 deg;', 1, 4]) + self.compass["status2"] = "warning" + self.compass["messages"].append( + ["PITCH: One or more transects have a mean pitch > 8 deg;", 1, 4] + ) temp = np.where(np.abs(pitch_mean) > 8)[0] if len(temp) > 0: - self.compass['pitch_mean_warning_idx'] = np.array(transect_idx)[temp] + self.compass["pitch_mean_warning_idx"] = np.array(transect_idx)[ + temp + ] else: - self.compass['pitch_mean_warning_idx'] = [] + self.compass["pitch_mean_warning_idx"] = [] elif np.any(np.asarray(np.abs(pitch_mean)) > 4): - if self.compass['status2'] == 'good': - self.compass['status2'] = 'caution' - self.compass['messages'].append(['Pitch: One or more transects have a mean pitch > 4 deg;', 2, 4]) + if self.compass["status2"] == "good": + self.compass["status2"] = "caution" + self.compass["messages"].append( + ["Pitch: One or more transects have a mean pitch > 4 deg;", 2, 4] + ) temp = np.where(np.abs(pitch_mean) > 4)[0] if len(temp) > 0: - self.compass['pitch_mean_caution_idx'] = np.array(transect_idx)[temp] + self.compass["pitch_mean_caution_idx"] = np.array(transect_idx)[ + temp + ] else: - self.compass['pitch_mean_caution_idx'] = [] + self.compass["pitch_mean_caution_idx"] = [] # Check roll mean if np.any(np.asarray(np.abs(roll_mean)) > 8): - self.compass['status2'] = 'warning' - self.compass['messages'].append(['ROLL: One or more transects have a mean roll > 8 deg;', 1, 4]) + self.compass["status2"] = "warning" + self.compass["messages"].append( + ["ROLL: One or more transects have a mean roll > 8 deg;", 1, 4] + ) temp = np.where(np.abs(roll_mean) > 8)[0] if len(temp) > 0: - self.compass['roll_mean_warning_idx'] = np.array(transect_idx)[temp] + self.compass["roll_mean_warning_idx"] = np.array(transect_idx)[temp] else: - self.compass['roll_mean_warning_idx'] = [] + self.compass["roll_mean_warning_idx"] = [] elif np.any(np.asarray(np.abs(roll_mean)) > 4): - if self.compass['status2'] == 'good': - self.compass['status2'] = 'caution' - self.compass['messages'].append(['Roll: One or more transects have a mean roll > 4 deg;', 2, 4]) + if self.compass["status2"] == "good": + self.compass["status2"] = "caution" + self.compass["messages"].append( + ["Roll: One or more transects have a mean roll > 4 deg;", 2, 4] + ) temp = np.where(np.abs(roll_mean) > 4)[0] if len(temp) > 0: - self.compass['roll_mean_caution_idx'] = np.array(transect_idx)[temp] + self.compass["roll_mean_caution_idx"] = np.array(transect_idx)[temp] else: - self.compass['roll_mean_caution_idx'] = [] + self.compass["roll_mean_caution_idx"] = [] # Check pitch standard deviation if np.any(np.asarray(pitch_std) > 5): - if self.compass['status2'] == 'good': - self.compass['status2'] = 'caution' - self.compass['messages'].append(['Pitch: One or more transects have a pitch std dev > 5 deg;', 2, 4]) + if self.compass["status2"] == "good": + self.compass["status2"] = "caution" + self.compass["messages"].append( + [ + "Pitch: One or more " + "transects have a pitch " + "std dev > 5 deg;", + 2, + 4, + ] + ) + temp = np.where(np.abs(pitch_std) > 5)[0] if len(temp) > 0: - self.compass['pitch_std_caution_idx'] = np.array(transect_idx)[temp] + self.compass["pitch_std_caution_idx"] = np.array(transect_idx)[temp] else: - self.compass['pitch_std_caution_idx'] = [] + self.compass["pitch_std_caution_idx"] = [] # Check roll standard deviation if np.any(np.asarray(roll_std) > 5): - if self.compass['status2'] == 'good': - self.compass['status2'] = 'caution' - self.compass['messages'].append(['Roll: One or more transects have a roll std dev > 5 deg;', 2, 4]) + if self.compass["status2"] == "good": + self.compass["status2"] = "caution" + self.compass["messages"].append( + [ + "Roll: One or more " + "transects have a roll " + "std dev > 5 deg;", + 2, + 4, + ] + ) + temp = np.where(np.abs(roll_std) > 5)[0] if len(temp) > 0: - self.compass['roll_std_caution_idx'] = np.array(transect_idx)[temp] + self.compass["roll_std_caution_idx"] = np.array(transect_idx)[temp] else: - self.compass['roll_std_caution_idx'] = [] + self.compass["roll_std_caution_idx"] = [] # Additional checks for SonTek G3 compass - if meas.transects[checked.index(True)].adcp.manufacturer == 'SonTek': + if meas.transects[checked.index(True)].adcp.manufacturer == "SonTek": # Check if pitch limits were exceeded if any(pitch_exceeded): - if self.compass['status2'] == 'good': - self.compass['status2'] = 'caution' - self.compass['messages'].append( - ['Compass: One or more transects have pitch exceeding calibration limits;', 2, 4]) + if self.compass["status2"] == "good": + self.compass["status2"] = "caution" + self.compass["messages"].append( + [ + "Compass: One or more transects have pitch " + "exceeding calibration limits;", + 2, + 4, + ] + ) # Check if roll limits were exceeded if any(roll_exceeded): - if self.compass['status2'] == 'good': - self.compass['status2'] = 'caution' - self.compass['messages'].append( - ['Compass: One or more transects have roll exceeding calibration limits;', 2, 4]) + if self.compass["status2"] == "good": + self.compass["status2"] = "caution" + self.compass["messages"].append( + [ + "Compass: One or more transects have roll " + "exceeding calibration limits;", + 2, + 4, + ] + ) # Check if magnetic error was exceeded - self.compass['mag_error_idx'] = [] + self.compass["mag_error_idx"] = [] if len(mag_error_exceeded) > 0: - self.compass['mag_error_idx'] = np.array(mag_error_exceeded) - if self.compass['status2'] == 'good': - self.compass['status2'] = 'caution' - self.compass['messages'].append( - ['Compass: One or more transects have a change in mag field exceeding 2%;', 2, 4]) - - if self.compass['status1'] == 'warning' or self.compass['status2'] == 'warning': - self.compass['status'] = 'warning' - elif self.compass['status1'] == 'caution' or self.compass['status2'] == 'caution': - self.compass['status'] = 'caution' + self.compass["mag_error_idx"] = np.array(mag_error_exceeded) + if self.compass["status2"] == "good": + self.compass["status2"] = "caution" + self.compass["messages"].append( + [ + "Compass: One or more transects have a change in " + "mag field exceeding 2%;", + 2, + 4, + ] + ) + + if ( + self.compass["status1"] == "warning" + or self.compass["status2"] == "warning" + ): + self.compass["status"] = "warning" + elif ( + self.compass["status1"] == "caution" + or self.compass["status2"] == "caution" + ): + self.compass["status"] = "caution" else: - self.compass['status'] = 'good' + self.compass["status"] = "good" def temperature_qa(self, meas): """Apply QA checks to temperature. @@ -1005,7 +1405,7 @@ class QAData(object): Object of class Measurement """ - self.temperature['messages'] = [] + self.temperature["messages"] = [] check = [0, 0] # Create array of all temperatures @@ -1014,7 +1414,10 @@ class QAData(object): for transect in meas.transects: if transect.checked: checked.append(transect.checked) - temp_selected = getattr(transect.sensors.temperature_deg_c, transect.sensors.temperature_deg_c.selected) + temp_selected = getattr( + transect.sensors.temperature_deg_c, + transect.sensors.temperature_deg_c.selected, + ) if len(temp) == 0: temp = temp_selected.data else: @@ -1028,37 +1431,56 @@ class QAData(object): if temp_range > 2: check[0] = 3 - self.temperature['messages'].append(['TEMPERATURE: Temperature range is ' - + '{:3.1f}'.format(temp_range) - + ' degrees C which is greater than 2 degrees;', 1, 5]) + self.temperature["messages"].append( + [ + "TEMPERATURE: Temperature range is " + + "{:3.1f}".format(temp_range) + + " degrees C which is greater than 2 degrees;", + 1, + 5, + ] + ) elif temp_range > 1: check[0] = 2 - self.temperature['messages'].append(['Temperature: Temperature range is ' - + '{:3.1f}'.format(temp_range) - + ' degrees C which is greater than 1 degree;', 2, 5]) + self.temperature["messages"].append( + [ + "Temperature: Temperature range is " + + "{:3.1f}".format(temp_range) + + " degrees C which is greater than 1 degree;", + 2, + 5, + ] + ) else: check[0] = 1 # Check for independent temperature reading - if 'user' in meas.ext_temp_chk: + if "user" in meas.ext_temp_chk: try: - user = float(meas.ext_temp_chk['user']) + user = float(meas.ext_temp_chk["user"]) except (ValueError, TypeError): user = None if user is None or np.isnan(user): # No independent temperature reading check[1] = 2 - self.temperature['messages'].append(['Temperature: No independent temperature reading;', 2, 5]) - elif not np.isnan(meas.ext_temp_chk['adcp']): + self.temperature["messages"].append( + ["Temperature: No independent temperature reading;", 2, 5] + ) + elif not np.isnan(meas.ext_temp_chk["adcp"]): # Compare user to manually entered ADCP temperature - diff = np.abs(user - meas.ext_temp_chk['adcp']) + diff = np.abs(user - meas.ext_temp_chk["adcp"]) if diff < 2: check[1] = 1 else: check[1] = 3 - self.temperature['messages'].append( - ['TEMPERATURE: The difference between ADCP and reference is > 2: ' - + '{:3.1f}'.format(diff) + ' C;', 1, 5]) + self.temperature["messages"].append( + [ + "TEMPERATURE: The difference between ADCP and " + "reference is > 2: " + "{:3.1f}".format(diff) + " C;", + 1, + 5, + ] + ) else: # Compare user to mean of all temperature data diff = np.abs(user - np.nanmean(temp)) @@ -1066,18 +1488,23 @@ class QAData(object): check[1] = 1 else: check[1] = 3 - self.temperature['messages'].append( - ['TEMPERATURE: The difference between ADCP and reference is > 2: ' - + '{:3.1f}'.format(diff) + ' C;', 1, 5]) + self.temperature["messages"].append( + [ + "TEMPERATURE: The difference between ADCP and " + "reference is > 2: " + "{:3.1f}".format(diff) + " C;", + 1, + 5, + ] + ) # Assign temperature status max_check = max(check) if max_check == 1: - self.temperature['status'] = 'good' + self.temperature["status"] = "good" elif max_check == 2: - self.temperature['status'] = 'caution' + self.temperature["status"] = "caution" elif max_check == 3: - self.temperature['status'] = 'warning' + self.temperature["status"] = "warning" def moving_bed_qa(self, meas): """Applies quality checks to moving-bed tests. @@ -1088,20 +1515,24 @@ class QAData(object): Object of class Measurement """ - self.movingbed['messages'] = [] - self.movingbed['code'] = 0 + self.movingbed["messages"] = [] + self.movingbed["code"] = 0 # Are there moving-bed tests? if len(meas.mb_tests) < 1: if meas.observed_no_moving_bed: - self.movingbed['messages'].append(['Moving-Bed Test: Visually observed no moving bed;', 2, 6]) - self.movingbed['status'] = 'caution' - self.movingbed['code'] = 2 + self.movingbed["messages"].append( + ["Moving-Bed Test: Visually observed no moving bed;", 2, 6] + ) + self.movingbed["status"] = "caution" + self.movingbed["code"] = 2 else: # No moving-bed test - self.movingbed['messages'].append(['MOVING-BED TEST: No moving bed test;', 1, 6]) - self.movingbed['status'] = 'warning' - self.movingbed['code'] = 3 + self.movingbed["messages"].append( + ["MOVING-BED TEST: No moving bed test;", 1, 6] + ) + self.movingbed["status"] = "warning" + self.movingbed["code"] = 3 else: # Moving-bed tests available @@ -1124,12 +1555,15 @@ class QAData(object): if test.user_valid: user_valid_test.append(True) file_names.append(test.transect.file_name) - if test.type == 'Loop' and not test.test_quality == 'Errors': + if test.type == "Loop" and not test.test_quality == "Errors": loop.append(test.moving_bed) + if not np.isnan(test.gps_percent_mb): if np.abs(test.bt_percent_mb - test.gps_percent_mb) > 2: gps_diff2 = True - if np.logical_xor(test.bt_percent_mb >= 1, test.gps_percent_mb >= 1): + if np.logical_xor( + test.bt_percent_mb >= 1, test.gps_percent_mb >= 1 + ): gps_diff1 = True # Selected test if test.selected: @@ -1144,54 +1578,109 @@ class QAData(object): if not any(user_valid_test): # No valid test according to user - self.movingbed['messages'].append(['MOVING-BED TEST: No valid moving-bed test based on user input;', - 1, 6]) - self.movingbed['status'] = 'warning' - self.movingbed['code'] = 3 + self.movingbed["messages"].append( + [ + "MOVING-BED TEST: No " + "valid moving-bed test " + "based on user input;", + 1, + 6, + ] + ) + + self.movingbed["status"] = "warning" + self.movingbed["code"] = 3 else: # Check for duplicate valid moving-bed tests if len(np.unique(file_names)) < len(file_names): - self.movingbed['messages'].append([ - 'MOVING-BED TEST: Duplicate moving-bed test files marked valid;', 1, 6]) - self.movingbed['status'] = 'warning' - self.movingbed['code'] = 3 - - if self.movingbed['code'] == 0: + self.movingbed["messages"].append( + [ + "MOVING-BED TEST: Duplicate moving-bed test files " + "marked valid;", + 1, + 6, + ] + ) + self.movingbed["status"] = "warning" + self.movingbed["code"] = 3 + + if self.movingbed["code"] == 0: # Check test quality - if len(test_quality) > 0 and sum(np.array(test_quality) == 'Good') > 0: - self.movingbed['status'] = 'good' - self.movingbed['code'] = 1 + if len(test_quality) > 0 and sum(np.array(test_quality) == "Good") > 0: + self.movingbed["status"] = "good" + self.movingbed["code"] = 1 # Check if there is a moving-bed - if 'Yes' in mb: + if "Yes" in mb: # Moving-bed present - self.movingbed['messages'].append( - ['Moving-Bed Test: A moving-bed is present.', 2, 6]) - self.movingbed['code'] = 2 - self.movingbed['status'] = 'caution' - if meas.transects[meas.checked_transect_idx[0]].boat_vel.composite == 'On': - self.movingbed['messages'].append( - ['Moving-Bed: Use of composite tracks could cause inaccurate results.', 2, 6]) - - if meas.transects[meas.checked_transect_idx[0]].boat_vel.selected == 'bt_vel': + self.movingbed["messages"].append( + ["Moving-Bed Test: A moving-bed is present.", 2, 6] + ) + self.movingbed["code"] = 2 + self.movingbed["status"] = "caution" + if ( + meas.transects[ + meas.checked_transect_idx[0] + ].boat_vel.composite + == "On" + ): + self.movingbed["messages"].append( + [ + "Moving-Bed: Use of composite tracks " + "could cause inaccurate results.", + 2, + 6, + ] + ) + + if ( + meas.transects[ + meas.checked_transect_idx[0] + ].boat_vel.selected + == "bt_vel" + ): if any(use_2_correct): - self.movingbed['messages'].append( - ['Moving-Bed: BT based moving-bed correction applied.', 2, 6]) + self.movingbed["messages"].append( + [ + "Moving-Bed: BT based moving-bed " + "correction applied.", + 2, + 6, + ] + ) else: - self.movingbed['messages'].append( - ['MOVING-BED: Moving-bed present and BT used, but no correction applied.', 1, 6]) - self.movingbed['code'] = 3 - self.movingbed['status'] = 'warning' - elif meas.transects[meas.checked_transect_idx[0]].boat_vel.selected == 'gga_vel': - self.movingbed['messages'].append( - ['Moving-Bed: GGA used.', 2, 6]) - elif meas.transects[meas.checked_transect_idx[0]].boat_vel.selected == 'vtg_vel': - self.movingbed['messages'].append( - ['Moving-Bed: VTG used.', 2, 6]) + self.movingbed["messages"].append( + [ + "MOVING-BED: Moving-bed present and " + "BT used, but no correction applied.", + 1, + 6, + ] + ) + self.movingbed["code"] = 3 + self.movingbed["status"] = "warning" + elif ( + meas.transects[ + meas.checked_transect_idx[0] + ].boat_vel.selected + == "gga_vel" + ): + self.movingbed["messages"].append( + ["Moving-Bed: GGA used.", 2, 6] + ) + elif ( + meas.transects[ + meas.checked_transect_idx[0] + ].boat_vel.selected + == "vtg_vel" + ): + self.movingbed["messages"].append( + ["Moving-Bed: VTG used.", 2, 6] + ) # Check for test type - if sum(np.array(mb_test_type) == 'Stationary'): + if sum(np.array(mb_test_type) == "Stationary"): # Check for GPS or 3 stationary tests if len(mb_tests) < 3: gps = [] @@ -1202,55 +1691,132 @@ class QAData(object): else: gps.append(True) if not all(gps): - # GPS not available for all selected transects - self.movingbed['messages'].append([ - 'Moving-Bed Test: ' - + 'Less than 3 stationary tests available for moving-bed correction;', - 2, 6]) - - elif len(test_quality) > 0 and sum(np.array(test_quality) == 'Warnings') > 0: + # GPS not available for all selected + # transects + self.movingbed["messages"].append( + [ + "Moving-Bed Test: " + + "Less than 3 stationary tests " + "available for moving-bed " + "correction;", + 2, + 6, + ] + ) + + elif ( + len(test_quality) > 0 + and sum(np.array(test_quality) == "Warnings") > 0 + ): # Quality check has warnings - self.movingbed['messages'].append(['Moving-Bed Test: The moving-bed test(s) has warnings, ' - + 'please review tests to determine validity;', 2, 6]) - self.movingbed['status'] = 'caution' - self.movingbed['code'] = 2 - - elif len(test_quality) > 0 and sum(np.array(test_quality) == 'Manual') > 0: + self.movingbed["messages"].append( + [ + "Moving-Bed Test: " + "The moving-bed " + "test(s) has " + "warnings, " + "please review " + "tests to " + "determine " + "validity;", + 2, + 6, + ] + ) + + self.movingbed["status"] = "caution" + self.movingbed["code"] = 2 + + elif ( + len(test_quality) > 0 + and sum(np.array(test_quality) == "Manual") > 0 + ): # Manual override used - self.movingbed['messages'].append(['MOVING-BED TEST: ' - + 'The user has manually forced the use of some tests;', 1, 6]) - self.movingbed['status'] = 'warning' - self.movingbed['code'] = 3 + self.movingbed["messages"].append( + [ + "MOVING-BED TEST: " + "The user has " + "manually forced " + "the use of some " + "tests;", + 1, + 6, + ] + ) + self.movingbed["status"] = "warning" + self.movingbed["code"] = 3 else: # Test has critical errors - self.movingbed['messages'].append(['MOVING-BED TEST: The moving-bed test(s) have critical errors ' - + 'and will not be used;', 1, 6]) - self.movingbed['status'] = 'warning' - self.movingbed['code'] = 3 + self.movingbed["messages"].append( + [ + "MOVING-BED TEST: " + "The moving-bed " + "test(s) have " + "critical errors " + "and will not be " + "used;", + 1, + 6, + ] + ) + + self.movingbed["status"] = "warning" + self.movingbed["code"] = 3 # Check multiple loops for consistency if len(np.unique(loop)) > 1: - self.movingbed['messages'].append(['Moving-Bed Test: Results of valid loops are not consistent, ' - + 'review moving-bed tests;', 2, 6]) - if self.movingbed['code'] < 3: - self.movingbed['code'] = 2 - self.movingbed['status'] = 'caution' + self.movingbed["messages"].append( + [ + "Moving-Bed Test: " + "Results of valid " + "loops are not " + "consistent, " + "review " + "moving-bed " + "tests;", + 2, + 6, + ] + ) + if self.movingbed["code"] < 3: + self.movingbed["code"] = 2 + self.movingbed["status"] = "caution" # Notify of differences in results of test between BT and GPS if gps_diff2: - self.movingbed['messages'].append(['Moving-Bed Test: Bottom track and ' - 'GPS results differ by more than 2%.', 2, 6]) - if self.movingbed['code'] < 3: - self.movingbed['code'] = 2 - self.movingbed['status'] = 'caution' + self.movingbed["messages"].append( + [ + "Moving-Bed Test: Bottom track and " + "GPS results differ by more than 2%.", + 2, + 6, + ] + ) + if self.movingbed["code"] < 3: + self.movingbed["code"] = 2 + self.movingbed["status"] = "caution" if gps_diff1: - self.movingbed['messages'].append(['Moving-Bed Test: Bottom track and GPS results do not agree.', - 2, 6]) - if self.movingbed['code'] < 3: - self.movingbed['code'] = 2 - self.movingbed['status'] = 'caution' + self.movingbed["messages"].append( + [ + "Moving-Bed Test: " + "Bottom track and " + "GPS results do " + "not agree.", + 2, + 6, + ] + ) + if self.movingbed["code"] < 3: + self.movingbed["code"] = 2 + self.movingbed["status"] = "caution" + + if len(loop) > 0: + if self.compass["status1"] != "good": + self.movingbed["messages"].append( + "Moving-Bed Test: Loop test used but compass calibration is " + + self.compass["status1"] + ) + if self.movingbed["code"] < 3: + self.movingbed["code"] = 2 + self.movingbed["status"] = "caution" self.check_mbt_settings(meas) @@ -1263,27 +1829,31 @@ class QAData(object): Object of class Measurement """ - self.user['messages'] = [] - self.user['status'] = 'good' + self.user["messages"] = [] + self.user["status"] = "good" # Check for Station Name - self.user['sta_name'] = False + self.user["sta_name"] = False if meas.station_name is None or len(meas.station_name.strip()) < 1: - self.user['messages'].append(['Site Info: Station name not entered;', 2, 2]) - self.user['status'] = 'caution' - self.user['sta_name'] = True + self.user["messages"].append(["Site Info: Station name not entered;", 2, 2]) + self.user["status"] = "caution" + self.user["sta_name"] = True # Check for Station Number - self.user['sta_number'] = False + self.user["sta_number"] = False try: if meas.station_number is None or len(meas.station_number.strip()) < 1: - self.user['messages'].append(['Site Info: Station number not entered;', 2, 2]) - self.user['status'] = 'caution' - self.user['sta_number'] = True + self.user["messages"].append( + ["Site Info: Station number not entered;", 2, 2] + ) + self.user["status"] = "caution" + self.user["sta_number"] = True except AttributeError: - self.user['messages'].append(['Site Info: Station number not entered;', 2, 2]) - self.user['status'] = 'caution' - self.user['sta_number'] = True + self.user["messages"].append( + ["Site Info: Station number not entered;", 2, 2] + ) + self.user["status"] = "caution" + self.user["sta_number"] = True def depths_qa(self, meas): """Apply quality checks to depth data. @@ -1296,16 +1866,16 @@ class QAData(object): # Initialize variables n_transects = len(meas.transects) - self.depths['q_total'] = np.tile(np.nan, n_transects) - self.depths['q_max_run'] = np.tile(np.nan, n_transects) - self.depths['q_total_caution'] = np.tile(False, n_transects) - self.depths['q_max_run_caution'] = np.tile(False, n_transects) - self.depths['q_total_warning'] = np.tile(False, n_transects) - self.depths['q_max_run_warning'] = np.tile(False, n_transects) - self.depths['all_invalid'] = np.tile(False, n_transects) - self.depths['messages'] = [] - self.depths['status'] = 'good' - self.depths['draft'] = 0 + self.depths["q_total"] = np.tile(np.nan, n_transects) + self.depths["q_max_run"] = np.tile(np.nan, n_transects) + self.depths["q_total_caution"] = np.tile(False, n_transects) + self.depths["q_max_run_caution"] = np.tile(False, n_transects) + self.depths["q_total_warning"] = np.tile(False, n_transects) + self.depths["q_max_run_warning"] = np.tile(False, n_transects) + self.depths["all_invalid"] = np.tile(False, n_transects) + self.depths["messages"] = [] + self.depths["status"] = "good" + self.depths["draft"] = 0 checked = [] drafts = [] for n, transect in enumerate(meas.transects): @@ -1318,21 +1888,25 @@ class QAData(object): # Determine valid measured depths if transect.depths.composite: - depth_na = depths_selected.depth_source_ens[in_transect_idx] != 'NA' - depth_in = depths_selected.depth_source_ens[in_transect_idx] != 'IN' + depth_na = depths_selected.depth_source_ens[in_transect_idx] != "NA" + depth_in = depths_selected.depth_source_ens[in_transect_idx] != "IN" depth_valid = np.all(np.vstack((depth_na, depth_in)), 0) else: depth_valid_temp = depths_selected.valid_data[in_transect_idx] - depth_nan = depths_selected.depth_processed_m[in_transect_idx] != np.nan + depth_nan = ( + depths_selected.depth_processed_m[in_transect_idx] != np.nan + ) depth_valid = np.all(np.vstack((depth_nan, depth_valid_temp)), 0) if not np.any(depth_valid): - self.depths['all_invalid'][n] = True + self.depths["all_invalid"][n] = True # Compute QA characteristics - q_total, q_max_run, number_invalid_ensembles = QAData.invalid_qa(depth_valid, meas.discharge[n]) - self.depths['q_total'][n] = q_total - self.depths['q_max_run'][n] = q_max_run + q_total, q_max_run, number_invalid_ensembles = QAData.invalid_qa( + depth_valid, meas.discharge[n] + ) + self.depths["q_total"][n] = q_total + self.depths["q_max_run"][n] = q_max_run # Compute percentage compared to total if meas.discharge[n].total == 0.0: @@ -1340,19 +1914,21 @@ class QAData(object): q_max_run_percent = np.nan else: q_total_percent = np.abs((q_total / meas.discharge[n].total) * 100) - q_max_run_percent = np.abs((q_max_run / meas.discharge[n].total) * 100) + q_max_run_percent = np.abs( + (q_max_run / meas.discharge[n].total) * 100 + ) # Apply total interpolated discharge threshold if q_total_percent > self.q_total_threshold_warning: - self.depths['q_total_warning'][n] = True + self.depths["q_total_warning"][n] = True elif q_total_percent > self.q_total_threshold_caution: - self.depths['q_total_caution'][n] = True + self.depths["q_total_caution"][n] = True # Apply interpolated discharge run thresholds if q_max_run_percent > self.q_run_threshold_warning: - self.depths['q_max_run_warning'][n] = True + self.depths["q_max_run_warning"][n] = True elif q_max_run_percent > self.q_run_threshold_caution: - self.depths['q_max_run_caution'][n] = True + self.depths["q_max_run_caution"][n] = True if checked: @@ -1361,45 +1937,93 @@ class QAData(object): # Check draft consistency if len(draft_check) > 1: - self.depths['status'] = 'caution' - self.depths['draft'] = 1 - self.depths['messages'].append(['Depth: Transducer depth is not consistent among transects;', 2, 10]) + self.depths["status"] = "caution" + self.depths["draft"] = 1 + self.depths["messages"].append( + [ + "Depth: Transducer depth " + "is not consistent among " + "transects;", + 2, + 10, + ] + ) # Check for zero draft if np.any(np.less(draft_check, 0.01)): - self.depths['status'] = 'warning' - self.depths['draft'] = 2 - self.depths['messages'].append(['DEPTH: Transducer depth is too shallow, likely 0;', 1, 10]) - - # Check consecutive interpolated discharge criteria - if np.any(self.depths['q_max_run_warning']): - self.depths['messages'].append(['DEPTH: Int. Q for consecutive invalid ensembles exceeds ' - + '%2.0f' % self.q_run_threshold_warning + '%;', 1, 10]) - self.depths['status'] = 'warning' - elif np.any(self.depths['q_max_run_caution']): - self.depths['messages'].append(['Depth: Int. Q for consecutive invalid ensembles exceeds ' - + '%2.0f' % self.q_run_threshold_caution + '%;', 2, 10]) - self.depths['status'] = 'caution' - - # Check total interpolated discharge criteria - if np.any(self.depths['q_total_warning']): - self.depths['messages'].append(['DEPTH: Int. Q for invalid ensembles in a transect exceeds ' - + '%2.0f' % self.q_total_threshold_warning + '%;', 1, 10]) - self.depths['status'] = 'warning' - elif np.any(self.depths['q_total_caution']): - self.depths['messages'].append(['Depth: Int. Q for invalid ensembles in a transect exceeds ' - + '%2.0f' % self.q_total_threshold_caution + '%;', 2, 10]) - self.depths['status'] = 'caution' + self.depths["status"] = "warning" + self.depths["draft"] = 2 + self.depths["messages"].append( + ["DEPTH: Transducer depth is too shallow, likely 0;", 1, 10] + ) + + # Check interpolated discharge criteria + if np.any(self.depths["q_max_run_warning"]): + self.depths["messages"].append( + [ + "DEPTH: Int. Q for consecutive invalid ensembles exceeds " + + "%2.0f" % self.q_run_threshold_warning + + "%;", + 1, + 10, + ] + ) + self.depths["status"] = "warning" + + elif np.any(self.depths["q_total_warning"]): + self.depths["messages"].append( + [ + "DEPTH: Int. Q for invalid ensembles in a " + "transect exceeds " + + "%2.0f" % self.q_total_threshold_warning + + "%;", + 1, + 10, + ] + ) + self.depths["status"] = "warning" + elif np.any(self.depths["q_max_run_caution"]): + self.depths["messages"].append( + [ + "Depth: Int. Q for consecutive invalid ensembles exceeds " + + "%2.0f" % self.q_run_threshold_caution + + "%;", + 2, + 10, + ] + ) + self.depths["status"] = "caution" + elif np.any(self.depths["q_total_caution"]): + self.depths["messages"].append( + [ + "Depth: Int. Q for " + "invalid ensembles in a " + "transect exceeds " + + "%2.0f" % self.q_total_threshold_caution + + "%;", + 2, + 10, + ] + ) + self.depths["status"] = "caution" # Check if all depths are invalid - if np.any(self.depths['all_invalid']): - self.depths['messages'].append(['DEPTH: There are no valid depths for one or more transects.', 2, 10]) - self.depths['status'] = 'warning' + if np.any(self.depths["all_invalid"]): + self.depths["messages"].append( + [ + "DEPTH: There are no " + "valid depths for one or " + "more transects.", + 2, + 10, + ] + ) + self.depths["status"] = "warning" else: - self.depths['status'] = 'inactive' + self.depths["status"] = "inactive" - def boat_qa(self, meas): + def boat_qa1(self, meas): """Apply quality checks to boat data. Parameters @@ -1410,34 +2034,65 @@ class QAData(object): # Initialize variables n_transects = len(meas.transects) - data_type = {'BT': {'class': 'bt_vel', 'warning': 'BT-', 'caution': 'bt-', - 'filter': [('All: ', 0), ('Original: ', 1), ('ErrorVel: ', 2), - ('VertVel: ', 3), ('Other: ', 4), ('3Beams: ', 5)]}, - 'GGA': {'class': 'gga_vel', 'warning': 'GGA-', 'caution': 'gga-', - 'filter': [('All: ', 0), ('Original: ', 1), ('DGPS: ', 2), - ('Altitude: ', 3), ('Other: ', 4), ('HDOP: ', 5)]}, - 'VTG': {'class': 'vtg_vel', 'warning': 'VTG-', 'caution': 'vtg-', - 'filter': [('All: ', 0), ('Original: ', 1), ('Other: ', 4), ('HDOP: ', 5)]}} - self.boat['messages'] = [] + data_type = { + "BT": { + "class": "bt_vel", + "warning": "BT-", + "caution": "bt-", + "filter": [ + ("All: ", 0), + ("Original: ", 1), + ("ErrorVel: ", 2), + ("VertVel: ", 3), + ("Other: ", 4), + ("3Beams: ", 5), + ], + }, + "GGA": { + "class": "gga_vel", + "warning": "GGA-", + "caution": "gga-", + "filter": [ + ("All: ", 0), + ("Original: ", 1), + ("DGPS: ", 2), + ("Altitude: ", 3), + ("Other: ", 4), + ("HDOP: ", 5), + ], + }, + "VTG": { + "class": "vtg_vel", + "warning": "VTG-", + "caution": "vtg-", + "filter": [ + ("All: ", 0), + ("Original: ", 1), + ("Other: ", 4), + ("HDOP: ", 5), + ], + }, + } + self.boat["messages"] = [] for dt_key, dt_value in data_type.items(): - boat = getattr(self, dt_value['class']) + boat = getattr(self, dt_value["class"]) # Initialize dictionaries for each data type - boat['q_total_caution'] = np.tile(False, (n_transects, 6)) - boat['q_max_run_caution'] = np.tile(False, (n_transects, 6)) - boat['q_total_warning'] = np.tile(False, (n_transects, 6)) - boat['q_max_run_warning'] = np.tile(False, (n_transects, 6)) - boat['all_invalid'] = np.tile(False, n_transects) - boat['q_total'] = np.tile(np.nan, (n_transects, 6)) - boat['q_max_run'] = np.tile(np.nan, (n_transects, 6)) - boat['messages'] = [] + boat["q_total_caution"] = np.tile(False, (n_transects, 6)) + boat["q_max_run_caution"] = np.tile(False, (n_transects, 6)) + boat["q_total_warning"] = np.tile(False, (n_transects, 6)) + boat["q_max_run_warning"] = np.tile(False, (n_transects, 6)) + boat["all_invalid"] = np.tile(False, n_transects) + boat["q_total"] = np.tile(np.nan, (n_transects, 6)) + boat["q_max_run"] = np.tile(np.nan, (n_transects, 6)) + boat["messages"] = [] status_switch = 0 avg_speed_check = 0 # Check the results of each filter - for dt_filter in dt_value['filter']: - boat['status'] = 'inactive' + for dt_filter in dt_value["filter"]: + boat["status"] = "inactive" # Quality check each transect for n, transect in enumerate(meas.transects): @@ -1448,123 +2103,179 @@ class QAData(object): in_transect_idx = transect.in_transect_idx # Check to see if data are available for the data_type - if getattr(transect.boat_vel, dt_value['class']) is not None: - boat['status'] = 'good' + if getattr(transect.boat_vel, dt_value["class"]) is not None: + boat["status"] = "good" # Compute quality characteristics - valid = getattr(transect.boat_vel, dt_value['class']).valid_data[dt_filter[1], - in_transect_idx] - q_total, q_max_run, number_invalid_ens = QAData.invalid_qa(valid, meas.discharge[n]) - boat['q_total'][n, dt_filter[1]] = q_total - boat['q_max_run'][n, dt_filter[1]] = q_max_run + valid = getattr( + transect.boat_vel, dt_value["class"] + ).valid_data[dt_filter[1], in_transect_idx] + q_total, q_max_run, number_invalid_ens = QAData.invalid_qa( + valid, meas.discharge[n] + ) + boat["q_total"][n, dt_filter[1]] = q_total + boat["q_max_run"][n, dt_filter[1]] = q_max_run # Compute percentage compared to total if meas.discharge[n].total == 0.0: q_total_percent = np.nan q_max_run_percent = np.nan else: - q_total_percent = np.abs((q_total / meas.discharge[n].total) * 100) - q_max_run_percent = np.abs((q_max_run / meas.discharge[n].total) * 100) + q_total_percent = np.abs( + (q_total / meas.discharge[n].total) * 100 + ) + q_max_run_percent = np.abs( + (q_max_run / meas.discharge[n].total) * 100 + ) # Check if all invalid if dt_filter[1] == 0 and not np.any(valid): - boat['all_invalid'][n] = True + boat["all_invalid"][n] = True # Apply total interpolated discharge threshold if q_total_percent > self.q_total_threshold_warning: - boat['q_total_warning'][n, dt_filter[1]] = True + boat["q_total_warning"][n, dt_filter[1]] = True elif q_total_percent > self.q_total_threshold_caution: - boat['q_total_caution'][n, dt_filter[1]] = True + boat["q_total_caution"][n, dt_filter[1]] = True # Apply interpolated discharge run thresholds if q_max_run_percent > self.q_run_threshold_warning: - boat['q_max_run_warning'][n, dt_filter[1]] = True + boat["q_max_run_warning"][n, dt_filter[1]] = True elif q_max_run_percent > self.q_run_threshold_caution: - boat['q_max_run_caution'][n, dt_filter[1]] = True + boat["q_max_run_caution"][n, dt_filter[1]] = True # Check boat velocity for vtg data - if dt_key == 'VTG' and transect.boat_vel.selected == 'vtg_vel' and avg_speed_check == 0: + if ( + dt_key == "VTG" + and transect.boat_vel.selected == "vtg_vel" + and avg_speed_check == 0 + ): if transect.boat_vel.vtg_vel.u_mps is not None: - avg_speed = np.nanmean((transect.boat_vel.vtg_vel.u_mps ** 2 - + transect.boat_vel.vtg_vel.v_mps ** 2) ** 0.5) + avg_speed = np.nanmean( + ( + transect.boat_vel.vtg_vel.u_mps**2 + + transect.boat_vel.vtg_vel.v_mps**2 + ) + ** 0.5 + ) if avg_speed < 0.24: - boat['q_total_caution'][n, 2] = True + boat["q_total_caution"][n, 2] = True if status_switch < 1: status_switch = 1 - boat['messages'].append( - ['vtg-AvgSpeed: VTG data may not be accurate for average boat speed ' - 'less than' + '0.24 m/s (0.8 ft/s);', 2, 8]) + boat["messages"].append( + [ + "vtg-AvgSpeed: VTG data may " + "not be accurate for average " + "boat speed " + "less than" + "0.24 m/s (0.8 " + "ft/s);", + 2, + 8, + ] + ) avg_speed_check = 1 # Create message for consecutive invalid discharge - if boat['q_max_run_warning'][:, dt_filter[1]].any(): - if dt_key == 'BT': + if boat["q_max_run_warning"][:, dt_filter[1]].any(): + if dt_key == "BT": module_code = 7 else: module_code = 8 - boat['messages'].append( - [dt_value['warning'] + dt_filter[0] + - 'Int. Q for consecutive invalid ensembles exceeds ' + - '%3.1f' % self.q_run_threshold_warning + '%;', 1, module_code]) + boat["messages"].append( + [ + dt_value["warning"] + + dt_filter[0] + + "Int. Q for consecutive invalid ensembles exceeds " + + "%3.1f" % self.q_run_threshold_warning + + "%;", + 1, + module_code, + ] + ) status_switch = 2 - elif boat['q_max_run_caution'][:, dt_filter[1]].any(): - if dt_key == 'BT': + elif boat["q_max_run_caution"][:, dt_filter[1]].any(): + if dt_key == "BT": module_code = 7 else: module_code = 8 - boat['messages'].append( - [dt_value['caution'] + dt_filter[0] + - 'Int. Q for consecutive invalid ensembles exceeds ' + - '%3.1f' % self.q_run_threshold_caution + '%;', 2, module_code]) + boat["messages"].append( + [ + dt_value["caution"] + + dt_filter[0] + + "Int. Q for consecutive invalid ensembles exceeds " + + "%3.1f" % self.q_run_threshold_caution + + "%;", + 2, + module_code, + ] + ) if status_switch < 1: status_switch = 1 # Create message for total invalid discharge - if boat['q_total_warning'][:, dt_filter[1]].any(): - if dt_key == 'BT': + if boat["q_total_warning"][:, dt_filter[1]].any(): + if dt_key == "BT": module_code = 7 else: module_code = 8 - boat['messages'].append( - [dt_value['warning'] + dt_filter[0] + - 'Int. Q for invalid ensembles in a transect exceeds ' + - '%3.1f' % self.q_total_threshold_warning + '%;', 1, module_code]) + boat["messages"].append( + [ + dt_value["warning"] + + dt_filter[0] + + "Int. Q for invalid ensembles in a transect exceeds" + " " + "%3.1f" % self.q_total_threshold_warning + "%;", + 1, + module_code, + ] + ) status_switch = 2 - elif boat['q_total_caution'][:, dt_filter[1]].any(): - if dt_key == 'BT': + elif boat["q_total_caution"][:, dt_filter[1]].any(): + if dt_key == "BT": module_code = 7 else: module_code = 8 - boat['messages'].append( - [dt_value['caution'] + dt_filter[0] + - 'Int. Q for invalid ensembles in a transect exceeds ' + - '%3.1f' % self.q_total_threshold_caution + '%;', 2, module_code]) + boat["messages"].append( + [ + dt_value["caution"] + + dt_filter[0] + + "Int. Q for invalid ensembles in a transect exceeds " + "" + "%3.1f" % self.q_total_threshold_caution + "%;", + 2, + module_code, + ] + ) if status_switch < 1: status_switch = 1 # Create message for all data invalid - if boat['all_invalid'].any(): - boat['status'] = 'warning' - if dt_key == 'BT': + if boat["all_invalid"].any(): + boat["status"] = "warning" + if dt_key == "BT": module_code = 7 else: module_code = 8 - boat['messages'].append( - [dt_value['warning'] + dt_value['filter'][0][0] + - 'There are no valid data for one or more transects.;', 1, module_code]) + boat["messages"].append( + [ + dt_value["warning"] + + dt_value["filter"][0][0] + + "There are no valid data for one or more transects.;", + 1, + module_code, + ] + ) # Set status if status_switch == 2: - boat['status'] = 'warning' + boat["status"] = "warning" elif status_switch == 1: - boat['status'] = 'caution' + boat["status"] = "caution" - setattr(self, dt_value['class'], boat) + setattr(self, dt_value["class"], boat) lag_gga = [] lag_vtg = [] - self.gga_vel['lag_status'] = 'good' - self.vtg_vel['lag_status'] = 'good' + self.gga_vel["lag_status"] = "good" + self.vtg_vel["lag_status"] = "good" for transect in meas.transects: gga, vtg = TransectData.compute_gps_lag(transect) if gga is not None: @@ -1573,26 +2284,323 @@ class QAData(object): lag_vtg.append(vtg) if len(lag_gga) > 0: if np.mean(np.abs(lag_gga)) > 10: - self.gga_vel['messages'].append(['GGA: BT and GGA do not appear to be sychronized', 1, 8]) - if self.gga_vel['status'] != 'warning': - self.gga_vel['status'] = 'warning' - self.gga_vel['lag_status'] = 'warning' + self.gga_vel["messages"].append( + ["GGA: BT and GGA do not appear to be sychronized", 1, 8] + ) + if self.gga_vel["status"] != "warning": + self.gga_vel["status"] = "warning" + self.gga_vel["lag_status"] = "warning" elif np.mean(np.abs(lag_gga)) > 2: - self.gga_vel['messages'].append(['gga: Lag between BT and GGA > 2 sec', 2, 8]) - if self.gga_vel['status'] != 'warning': - self.gga_vel['status'] = 'caution' - self.gga_vel['lag_status'] = 'caution' + self.gga_vel["messages"].append( + ["gga: Lag between BT and GGA > 2 sec", 2, 8] + ) + if self.gga_vel["status"] != "warning": + self.gga_vel["status"] = "caution" + self.gga_vel["lag_status"] = "caution" if len(lag_vtg) > 0: if np.mean(np.abs(lag_vtg)) > 10: - self.vtg_vel['messages'].append(['VTG: BT and VTG do not appear to be sychronized', 1, 8]) - if self.vtg_vel['status'] != 'warning': - self.vtg_vel['status'] = 'warning' - self.vtg_vel['lag status'] = 'warning' + self.vtg_vel["messages"].append( + ["VTG: BT and VTG do not appear to be sychronized", 1, 8] + ) + if self.vtg_vel["status"] != "warning": + self.vtg_vel["status"] = "warning" + self.vtg_vel["lag status"] = "warning" elif np.mean(np.abs(lag_vtg)) > 2: - self.vtg_vel['messages'].append(['vtg: Lag between BT and VTG > 2 sec', 2, 8]) - if self.vtg_vel['status'] != 'warning': - self.vtg_vel['status'] = 'caution' - self.vtg_vel['lag_status'] = 'caution' + self.vtg_vel["messages"].append( + ["vtg: Lag between BT and VTG > 2 sec", 2, 8] + ) + if self.vtg_vel["status"] != "warning": + self.vtg_vel["status"] = "caution" + self.vtg_vel["lag_status"] = "caution" + + def boat_qa(self, meas): + """Apply quality checks to boat data. + + Parameters + ---------- + meas: Measurement + Object of class Measurement + """ + + # Initialize variables + n_transects = len(meas.transects) + data_type = { + "BT": { + "class": "bt_vel", + "warning": "BT-", + "caution": "bt-", + "filter": [ + ("All: ", 0), + ("Original: ", 1), + ("ErrorVel: ", 2), + ("VertVel: ", 3), + ("Other: ", 4), + ("3Beams: ", 5), + ], + }, + "GGA": { + "class": "gga_vel", + "warning": "GGA-", + "caution": "gga-", + "filter": [ + ("All: ", 0), + ("Original: ", 1), + ("DGPS: ", 2), + ("Altitude: ", 3), + ("Other: ", 4), + ("HDOP: ", 5), + ], + }, + "VTG": { + "class": "vtg_vel", + "warning": "VTG-", + "caution": "vtg-", + "filter": [ + ("All: ", 0), + ("Original: ", 1), + ("Other: ", 4), + ("HDOP: ", 5), + ], + }, + } + self.boat["messages"] = [] + + for dt_key, dt_value in data_type.items(): + boat = getattr(self, dt_value["class"]) + + # Initialize dictionaries for each data type + boat["q_total_caution"] = np.tile(False, (n_transects, 6)) + boat["q_max_run_caution"] = np.tile(False, (n_transects, 6)) + boat["q_total_warning"] = np.tile(False, (n_transects, 6)) + boat["q_max_run_warning"] = np.tile(False, (n_transects, 6)) + boat["all_invalid"] = np.tile(False, n_transects) + boat["q_total"] = np.tile(np.nan, (n_transects, 6)) + boat["q_max_run"] = np.tile(np.nan, (n_transects, 6)) + boat["messages"] = [] + status_switch = 0 + avg_speed_check = 0 + + # Check the results of each filter + for dt_filter in dt_value["filter"]: + boat["status"] = "inactive" + + # Quality check each transect + for n, transect in enumerate(meas.transects): + + # Evaluate on transects used in the discharge computation + if transect.checked: + + in_transect_idx = transect.in_transect_idx + + # Check to see if data are available for the data_type + if getattr(transect.boat_vel, dt_value["class"]) is not None: + boat["status"] = "good" + + # Compute quality characteristics + valid = getattr( + transect.boat_vel, dt_value["class"] + ).valid_data[dt_filter[1], in_transect_idx] + + # Check if all invalid + if dt_filter[1] == 0 and not np.any(valid): + boat["all_invalid"][n] = True + + else: + ( + q_total, + q_max_run, + number_invalid_ens, + ) = QAData.invalid_qa(valid, meas.discharge[n]) + boat["q_total"][n, dt_filter[1]] = q_total + boat["q_max_run"][n, dt_filter[1]] = q_max_run + + # Compute percentage compared to total + if meas.discharge[n].total == 0.0: + q_total_percent = np.nan + q_max_run_percent = np.nan + else: + q_total_percent = np.abs( + (q_total / meas.discharge[n].total) * 100 + ) + q_max_run_percent = np.abs( + (q_max_run / meas.discharge[n].total) * 100 + ) + + # Apply total interpolated discharge threshold + if q_total_percent > self.q_total_threshold_warning: + boat["q_total_warning"][n, dt_filter[1]] = True + elif q_total_percent > self.q_total_threshold_caution: + boat["q_total_caution"][n, dt_filter[1]] = True + + # Apply interpolated discharge run thresholds + if q_max_run_percent > self.q_run_threshold_warning: + boat["q_max_run_warning"][n, dt_filter[1]] = True + elif q_max_run_percent > self.q_run_threshold_caution: + boat["q_max_run_caution"][n, dt_filter[1]] = True + + # Check boat velocity for vtg data + if ( + dt_key == "VTG" + and transect.boat_vel.selected == "vtg_vel" + and avg_speed_check == 0 + ): + if transect.boat_vel.vtg_vel.u_mps is not None: + avg_speed = np.nanmean( + ( + transect.boat_vel.vtg_vel.u_mps**2 + + transect.boat_vel.vtg_vel.v_mps**2 + ) + ** 0.5 + ) + if avg_speed < 0.24: + boat["q_total_caution"][n, 2] = True + if status_switch < 1: + status_switch = 1 + boat["messages"].append( + [ + "vtg-AvgSpeed: VTG data may " + "not be accurate for average " + "boat speed " + "less than" + "0.24 m/s (0.8 " + "ft/s);", + 2, + 8, + ] + ) + avg_speed_check = 1 + + # Create message for consecutive invalid discharge + if boat["q_max_run_warning"].any(): + if dt_key == "BT": + module_code = 7 + else: + module_code = 8 + boat["messages"].append( + [ + dt_value["warning"] + + "Int. Q for consecutive invalid ensembles exceeds " + + "%3.1f" % self.q_run_threshold_warning + + "%;", + 1, + module_code, + ] + ) + status_switch = 2 + elif boat["q_total_warning"].any(): + if dt_key == "BT": + module_code = 7 + else: + module_code = 8 + boat["messages"].append( + [ + dt_value["warning"] + + "Int. Q for invalid ensembles in a transect exceeds" + " " + "%3.1f" % self.q_total_threshold_warning + "%;", + 1, + module_code, + ] + ) + status_switch = 2 + elif boat["q_max_run_caution"].any(): + if dt_key == "BT": + module_code = 7 + else: + module_code = 8 + boat["messages"].append( + [ + dt_value["caution"] + + "Int. Q for consecutive invalid ensembles exceeds " + + "%3.1f" % self.q_run_threshold_caution + + "%;", + 2, + module_code, + ] + ) + if status_switch < 1: + status_switch = 1 + + elif boat["q_total_caution"].any(): + if dt_key == "BT": + module_code = 7 + else: + module_code = 8 + boat["messages"].append( + [ + dt_value["caution"] + + "Int. Q for invalid ensembles in a transect exceeds " + "" + "%3.1f" % self.q_total_threshold_caution + "%;", + 2, + module_code, + ] + ) + if status_switch < 1: + status_switch = 1 + + # Create message for all data invalid + if boat["all_invalid"].any(): + boat["status"] = "warning" + if dt_key == "BT": + module_code = 7 + else: + module_code = 8 + boat["messages"].append( + [ + dt_value["warning"] + + dt_value["filter"][0][0] + + "There are no valid data for one or more transects.;", + 1, + module_code, + ] + ) + + # Set status + if status_switch == 2: + boat["status"] = "warning" + elif status_switch == 1: + boat["status"] = "caution" + + setattr(self, dt_value["class"], boat) + + lag_gga = [] + lag_vtg = [] + self.gga_vel["lag_status"] = "good" + self.vtg_vel["lag_status"] = "good" + for transect in meas.transects: + gga, vtg = TransectData.compute_gps_lag(transect) + if gga is not None: + lag_gga.append(gga) + if vtg is not None: + lag_vtg.append(vtg) + if len(lag_gga) > 0: + if np.mean(np.abs(lag_gga)) > 10: + self.gga_vel["messages"].append( + ["GGA: BT and GGA do not appear to be sychronized", 1, 8] + ) + if self.gga_vel["status"] != "warning": + self.gga_vel["status"] = "warning" + self.gga_vel["lag_status"] = "warning" + elif np.mean(np.abs(lag_gga)) > 2: + self.gga_vel["messages"].append( + ["gga: Lag between BT and GGA > 2 sec", 2, 8] + ) + if self.gga_vel["status"] != "warning": + self.gga_vel["status"] = "caution" + self.gga_vel["lag_status"] = "caution" + if len(lag_vtg) > 0: + if np.mean(np.abs(lag_vtg)) > 10: + self.vtg_vel["messages"].append( + ["VTG: BT and VTG do not appear to be sychronized", 1, 8] + ) + if self.vtg_vel["status"] != "warning": + self.vtg_vel["status"] = "warning" + self.vtg_vel["lag status"] = "warning" + elif np.mean(np.abs(lag_vtg)) > 2: + self.vtg_vel["messages"].append( + ["vtg: Lag between BT and VTG > 2 sec", 2, 8] + ) + if self.vtg_vel["status"] != "warning": + self.vtg_vel["status"] = "caution" + self.vtg_vel["lag_status"] = "caution" def water_qa(self, meas): """Apply quality checks to water data. @@ -1604,8 +2612,7 @@ class QAData(object): """ # Initialize filter labels and indices - prefix = ['All: ', 'Original: ', 'ErrorVel: ', 'VertVel: ', 'Other: ', '3Beams: ', 'SNR:'] - if meas.transects[0].adcp.manufacturer == 'TRDI': + if meas.transects[0].adcp.manufacturer == "TRDI": filter_index = [0, 1, 2, 3, 4, 5] else: filter_index = [0, 1, 2, 3, 4, 5, 7] @@ -1613,17 +2620,18 @@ class QAData(object): n_transects = len(meas.transects) n_filters = len(filter_index) + 1 # Initialize dictionaries for each data type - self.w_vel['q_total_caution'] = np.tile(False, (n_transects, n_filters)) - self.w_vel['q_max_run_caution'] = np.tile(False, (n_transects, n_filters)) - self.w_vel['q_total_warning'] = np.tile(False, (n_transects, n_filters)) - self.w_vel['q_max_run_warning'] = np.tile(False, (n_transects, n_filters)) - self.w_vel['all_invalid'] = np.tile(False, n_transects) - self.w_vel['q_total'] = np.tile(np.nan, (n_transects, n_filters)) - self.w_vel['q_max_run'] = np.tile(np.nan, (n_transects, n_filters)) - self.w_vel['messages'] = [] + self.w_vel["q_total_caution"] = np.tile(False, (n_transects, n_filters)) + self.w_vel["q_max_run_caution"] = np.tile(False, (n_transects, n_filters)) + self.w_vel["q_total_warning"] = np.tile(False, (n_transects, n_filters)) + self.w_vel["q_max_run_warning"] = np.tile(False, (n_transects, n_filters)) + self.w_vel["all_invalid"] = np.tile(False, n_transects) + self.w_vel["q_total"] = np.tile(np.nan, (n_transects, n_filters)) + self.w_vel["q_max_run"] = np.tile(np.nan, (n_transects, n_filters)) + self.w_vel["messages"] = [] status_switch = 0 - # TODO if meas had a property checked as list it would save creating that list multiple times + # TODO if meas had a property checked as list it would save creating + # that list multiple times checked = [] for transect in meas.transects: checked.append(transect.checked) @@ -1635,10 +2643,19 @@ class QAData(object): # Loop through transects for n, transect in enumerate(meas.transects): if transect.checked: - valid_original = np.any(transect.w_vel.valid_data[1, :, transect.in_transect_idx].T, 0) - - # Determine what data each filter have marked invalid. Original invalid data are excluded - valid = np.any(transect.w_vel.valid_data[filter_idx, :, transect.in_transect_idx].T, 0) + valid_original = np.any( + transect.w_vel.valid_data[1, :, transect.in_transect_idx].T, + 0, + ) + + # Determine what data each filter have marked + # invalid. Original invalid data are excluded + valid = np.any( + transect.w_vel.valid_data[ + filter_idx, :, transect.in_transect_idx + ].T, + 0, + ) if filter_idx > 1: valid_int = valid.astype(int) - valid_original.astype(int) valid = valid_int != -1 @@ -1646,88 +2663,140 @@ class QAData(object): # Check if all data are invalid if filter_idx == 0: if np.nansum(valid.astype(int)) < 1: - self.w_vel['all_invalid'][n] = True - # TODO seems like the rest of this should be under else of all invalid or multiple messages - # generated. + self.w_vel["all_invalid"][n] = True + # TODO seems like the rest of this should be under + # else of all invalid or multiple messages generated. # Compute characteristics - q_total, q_max_run, number_invalid_ens = QAData.invalid_qa(valid, meas.discharge[n]) - self.w_vel['q_total'][n, filter_idx] = q_total - self.w_vel['q_max_run'][n, filter_idx] = q_max_run + q_total, q_max_run, number_invalid_ens = QAData.invalid_qa( + valid, meas.discharge[n] + ) + self.w_vel["q_total"][n, filter_idx] = q_total + self.w_vel["q_max_run"][n, filter_idx] = q_max_run # Compute percentage compared to total if meas.discharge[n].total == 0.0: q_total_percent = np.nan q_max_run_percent = np.nan else: - q_total_percent = np.abs((q_total / meas.discharge[n].total) * 100) - q_max_run_percent = np.abs((q_max_run / meas.discharge[n].total) * 100) - - # Check total invalid discharge in ensembles for warning + q_total_percent = np.abs( + (q_total / meas.discharge[n].total) * 100 + ) + q_max_run_percent = np.abs( + (q_max_run / meas.discharge[n].total) * 100 + ) + + # Check total invalid discharge in ensembles for + # warning if q_total_percent > self.q_total_threshold_warning: - self.w_vel['q_total_warning'][n, filter_idx] = True + self.w_vel["q_total_warning"][n, filter_idx] = True # Apply run or cluster thresholds if q_max_run_percent > self.q_run_threshold_warning: - self.w_vel['q_max_run_warning'][n, filter_idx] = True + self.w_vel["q_max_run_warning"][n, filter_idx] = True elif q_max_run_percent > self.q_run_threshold_caution: - self.w_vel['q_max_run_caution'][n, filter_idx] = True + self.w_vel["q_max_run_caution"][n, filter_idx] = True - # Compute percent discharge interpolated for both cells and ensembles + # Compute percent discharge interpolated for both + # cells and ensembles # This approach doesn't exclude original data - valid_cells = transect.w_vel.valid_data[filter_idx, :, transect.in_transect_idx].T - q_invalid_total = np.nansum(meas.discharge[n].middle_cells[np.logical_not(valid_cells)]) \ - + np.nansum(meas.discharge[n].top_ens[np.logical_not(valid)]) \ - + np.nansum(meas.discharge[n].bottom_ens[np.logical_not(valid)]) - q_invalid_total_percent = (q_invalid_total / meas.discharge[n].total) * 100 + valid_cells = transect.w_vel.valid_data[ + filter_idx, :, transect.in_transect_idx + ].T + q_invalid_total = ( + np.nansum( + meas.discharge[n].middle_cells[ + np.logical_not(valid_cells) + ] + ) + + np.nansum( + meas.discharge[n].top_ens[np.logical_not(valid)] + ) + + np.nansum( + meas.discharge[n].bottom_ens[np.logical_not(valid)] + ) + ) + q_invalid_total_percent = ( + q_invalid_total / meas.discharge[n].total + ) * 100 if q_invalid_total_percent > self.q_total_threshold_caution: - self.w_vel['q_total_caution'][n, filter_idx] = True - - # Generate messages for ensemble run or clusters - if np.any(self.w_vel['q_max_run_warning'][:, filter_idx]): - self.w_vel['messages'].append(['WT-' + prefix[prefix_idx] - + 'Int. Q for consecutive invalid ensembles exceeds ' - + '%3.0f' % self.q_run_threshold_warning - + '%;', 1, 11]) - status_switch = 2 - elif np.any(self.w_vel['q_max_run_caution'][:, filter_idx]): - self.w_vel['messages'].append(['wt-' + prefix[prefix_idx] - + 'Int. Q for consecutive invalid ensembles exceeds ' - + '%3.0f' % self.q_run_threshold_caution - + '%;', 2, 11]) - if status_switch < 1: - status_switch = 1 + self.w_vel["q_total_caution"][n, filter_idx] = True + + # Generate messages for ensemble run or clusters + if np.any(self.w_vel["q_max_run_warning"]): + self.w_vel["messages"].append( + [ + "WT-" + "Int. Q for consecutive " + "invalid ensembles " + "exceeds " + "%3.0f" % self.q_run_threshold_warning + "%;", + 1, + 11, + ] + ) + status_switch = 2 - # Generate message for total_invalid Q - if np.any(self.w_vel['q_total_warning'][:, filter_idx]): - self.w_vel['messages'].append(['WT-' + prefix[prefix_idx] - + 'Int. Q for invalid cells and ensembles in a transect exceeds ' - + '%3.0f' % self.q_total_threshold_warning - + '%;', 1, 11]) - status_switch = 2 - elif np.any(self.w_vel['q_total_caution'][:, filter_idx]): - self.w_vel['messages'].append(['wt-' + prefix[prefix_idx] - + 'Int. Q for invalid cells and ensembles in a transect exceeds ' - + '%3.0f' % self.q_total_threshold_caution - + '%;', 2, 11]) - if status_switch < 1: - status_switch = 1 + elif np.any(self.w_vel["q_total_warning"]): + self.w_vel["messages"].append( + [ + "WT-" + "Int. Q for invalid " + "cells and ensembles in " + "a transect exceeds " + + "%3.0f" % self.q_total_threshold_warning + + "%;", + 1, + 11, + ] + ) + status_switch = 2 + + elif np.any(self.w_vel["q_max_run_caution"]): + self.w_vel["messages"].append( + [ + "wt-" + "Int. Q for consecutive " + "invalid ensembles " + "exceeds " + "%3.0f" % self.q_run_threshold_caution + "%;", + 2, + 11, + ] + ) + if status_switch < 1: + status_switch = 1 + + elif np.any(self.w_vel["q_total_caution"]): + self.w_vel["messages"].append( + [ + "wt-" + "Int. Q for invalid " + "cells and ensembles in " + "a transect exceeds " + + "%3.0f" % self.q_total_threshold_caution + + "%;", + 2, + 11, + ] + ) + if status_switch < 1: + status_switch = 1 # Generate message for all invalid - if np.any(self.w_vel['all_invalid']): - self.w_vel['messages'].append(['WT-' + prefix[0] + 'There are no valid data for one or more transects.', - 1, 11]) + if np.any(self.w_vel["all_invalid"]): + self.w_vel["messages"].append( + [ + "WT-" + "There are no valid data for one or more transects.", + 1, + 11, + ] + ) status_switch = 2 # Set status - self.w_vel['status'] = 'good' + self.w_vel["status"] = "good" if status_switch == 2: - self.w_vel['status'] = 'warning' + self.w_vel["status"] = "warning" elif status_switch == 1: - self.w_vel['status'] = 'caution' + self.w_vel["status"] = "caution" else: - self.w_vel['status'] = 'inactive' + self.w_vel["status"] = "inactive" def extrapolation_qa(self, meas): """Apply quality checks to extrapolation methods @@ -1738,7 +2807,7 @@ class QAData(object): Object of class Measurement """ - self.extrapolation['messages'] = [] + self.extrapolation["messages"] = [] checked = [] discharges = [] @@ -1748,18 +2817,28 @@ class QAData(object): discharges.append(meas.discharge[n]) if any(checked): - self.extrapolation['status'] = 'good' + self.extrapolation["status"] = "good" extrap_uncertainty = Uncertainty.uncertainty_extrapolation(meas, discharges) if np.abs(extrap_uncertainty) > 2: - self.extrapolation['messages'].append(['Extrapolation: The extrapolation uncertainty is more than ' - + '2 percent;', 2, 12]) - self.extrapolation['messages'].append([' Carefully review the extrapolation;', 2, 12]) - self.extrapolation['status'] = 'caution' + self.extrapolation["messages"].append( + [ + "Extrapolation: " + "The extrapolation " + "uncertainty is " + "more than " + "2 percent;", + 2, + 12, + ] + ) + self.extrapolation["messages"].append( + [" Carefully review the extrapolation;", 2, 12] + ) + self.extrapolation["status"] = "caution" else: - self.extrapolation['status'] = 'inactive' + self.extrapolation["status"] = "inactive" - def edges_qa(self, meas): + def edges_qa1(self, meas): """Apply quality checks to edge estimates Parameters @@ -1769,7 +2848,7 @@ class QAData(object): """ # Initialize variables - self.edges['messages'] = [] + self.edges["messages"] = [] checked = [] left_q = [] right_q = [] @@ -1802,71 +2881,91 @@ class QAData(object): if any(checked): # Set default status to good - self.edges['status'] = 'good' + self.edges["status"] = "good" mean_total_q = np.nanmean(total_q) # Check left edge q > 5% - self.edges['left_q'] = 0 + self.edges["left_q"] = 0 left_q_percent = (np.nanmean(left_q) / mean_total_q) * 100 temp_idx = np.where(left_q / mean_total_q > 0.05)[0] if len(temp_idx) > 0: - self.edges['left_q_idx'] = np.array(transect_idx)[temp_idx] + self.edges["left_q_idx"] = np.array(transect_idx)[temp_idx] else: - self.edges['left_q_idx'] = [] + self.edges["left_q_idx"] = [] if np.abs(left_q_percent) > 5: - self.edges['status'] = 'caution' - self.edges['messages'].append(['Edges: Left edge Q is greater than 5%;', 1, 13]) - self.edges['left_q'] = 1 - elif len(self.edges['left_q_idx']) > 0: - self.edges['status'] = 'caution' - self.edges['messages'].append( - ['Edges: One or more transects have a left edge Q greater than 5%;', 1, 13]) - self.edges['left_q'] = 1 + self.edges["status"] = "caution" + self.edges["messages"].append( + ["Edges: Left edge Q is greater than 5%;", 1, 13] + ) + self.edges["left_q"] = 1 + elif len(self.edges["left_q_idx"]) > 0: + self.edges["status"] = "caution" + self.edges["messages"].append( + [ + "Edges: One or more transects have a left edge Q " + "greater than 5%;", + 1, + 13, + ] + ) + self.edges["left_q"] = 1 # Check right edge q > 5% - self.edges['right_q'] = 0 + self.edges["right_q"] = 0 right_q_percent = (np.nanmean(right_q) / mean_total_q) * 100 temp_idx = np.where(right_q / mean_total_q > 0.05)[0] if len(temp_idx) > 0: - self.edges['right_q_idx'] = np.array(transect_idx)[temp_idx] + self.edges["right_q_idx"] = np.array(transect_idx)[temp_idx] else: - self.edges['right_q_idx'] = [] + self.edges["right_q_idx"] = [] if np.abs(right_q_percent) > 5: - self.edges['status'] = 'caution' - self.edges['messages'].append(['Edges: Right edge Q is greater than 5%;', 1, 13]) - self.edges['right_q'] = 1 - elif len(self.edges['right_q_idx']) > 0: - self.edges['status'] = 'caution' - self.edges['messages'].append( - ['Edges: One or more transects have a right edge Q greater than 5%;', 1, 13]) - self.edges['right_q'] = 1 + self.edges["status"] = "caution" + self.edges["messages"].append( + ["Edges: Right edge Q is greater than 5%;", 1, 13] + ) + self.edges["right_q"] = 1 + elif len(self.edges["right_q_idx"]) > 0: + self.edges["status"] = "caution" + self.edges["messages"].append( + [ + "Edges: One or more transects have a right edge Q " + "greater than 5%;", + 1, + 13, + ] + ) + self.edges["right_q"] = 1 # Check for consistent sign q_positive = [] - self.edges['left_sign'] = 0 + self.edges["left_sign"] = 0 for q in left_q: if q >= 0: q_positive.append(True) else: q_positive.append(False) if len(np.unique(q_positive)) > 1 and left_q_percent > 0.5: - self.edges['status'] = 'caution' - self.edges['messages'].append(['Edges: Sign of left edge Q is not consistent;', 2, 13]) - self.edges['left_sign'] = 1 + self.edges["status"] = "caution" + self.edges["messages"].append( + ["Edges: Sign of left edge Q is not consistent;", 2, 13] + ) + self.edges["left_sign"] = 1 q_positive = [] - self.edges['right_sign'] = 0 + self.edges["right_sign"] = 0 for q in right_q: if q >= 0: q_positive.append(True) else: q_positive.append(False) if len(np.unique(q_positive)) > 1 and right_q_percent > 0.5: - self.edges['status'] = 'caution' - self.edges['messages'].append(['Edges: Sign of right edge Q is not consistent;', 2, 13]) - self.edges['right_sign'] = 1 + self.edges["status"] = "caution" + self.edges["messages"].append( + ["Edges: Sign of right edge Q is not consistent;", 2, 13] + ) + self.edges["right_sign"] = 1 # Check distance moved dmg_5_percent = 0.05 * np.nanmean(dist_made_good) @@ -1874,97 +2973,378 @@ class QAData(object): right_threshold = np.nanmin([dmg_5_percent, avg_right_edge_dist]) temp_idx = np.where(dist_moved_right > right_threshold)[0] if len(temp_idx) > 0: - self.edges['right_dist_moved_idx'] = np.array(transect_idx)[temp_idx] - self.edges['status'] = 'caution' - self.edges['messages'].append(['Edges: Excessive boat movement in right edge ensembles;', 2, 13]) + self.edges["right_dist_moved_idx"] = np.array(transect_idx)[temp_idx] + self.edges["status"] = "caution" + self.edges["messages"].append( + ["Edges: Excessive boat movement in right edge ensembles;", 2, 13] + ) else: - self.edges['right_dist_moved_idx'] = [] + self.edges["right_dist_moved_idx"] = [] avg_left_edge_dist = np.nanmean(edge_dist_left) left_threshold = np.nanmin([dmg_5_percent, avg_left_edge_dist]) temp_idx = np.where(dist_moved_left > left_threshold)[0] if len(temp_idx) > 0: - self.edges['left_dist_moved_idx'] = np.array(transect_idx)[temp_idx] - self.edges['status'] = 'caution' - self.edges['messages'].append(['Edges: Excessive boat movement in left edge ensembles;', 2, 13]) + self.edges["left_dist_moved_idx"] = np.array(transect_idx)[temp_idx] + self.edges["status"] = "caution" + self.edges["messages"].append( + ["Edges: Excessive boat movement in left edge ensembles;", 2, 13] + ) else: - self.edges['left_dist_moved_idx'] = [] + self.edges["left_dist_moved_idx"] = [] # Check for edge ensembles marked invalid due to excluded distance - self.edges['invalid_transect_left_idx'] = [] - self.edges['invalid_transect_right_idx'] = [] + self.edges["invalid_transect_left_idx"] = [] + self.edges["invalid_transect_right_idx"] = [] for n, transect in enumerate(meas.transects): if transect.checked: ens_invalid = np.nansum(transect.w_vel.valid_data[0, :, :], 0) > 0 ens_cells_above_sl = np.nansum(transect.w_vel.cells_above_sl, 0) > 0 - ens_invalid = np.logical_not(np.logical_and(ens_invalid, ens_cells_above_sl)) + ens_invalid = np.logical_not( + np.logical_and(ens_invalid, ens_cells_above_sl) + ) if np.any(ens_invalid): - if transect.start_edge == 'Left': - invalid_left = ens_invalid[0:int(transect.edges.left.number_ensembles)] - invalid_right = ens_invalid[-int(transect.edges.right.number_ensembles):] + if transect.start_edge == "Left": + invalid_left = ens_invalid[ + 0 : int(transect.edges.left.number_ensembles) + ] + invalid_right = ens_invalid[ + -int(transect.edges.right.number_ensembles) : + ] else: - invalid_right = ens_invalid[0:int(transect.edges.right.number_ensembles)] - invalid_left = ens_invalid[-int(transect.edges.left.number_ensembles):] + invalid_right = ens_invalid[ + 0 : int(transect.edges.right.number_ensembles) + ] + invalid_left = ens_invalid[ + -int(transect.edges.left.number_ensembles) : + ] if len(invalid_left) > 0: left_invalid_percent = sum(invalid_left) / len(invalid_left) else: left_invalid_percent = 0 if len(invalid_right) > 0: - right_invalid_percent = sum(invalid_right) / len(invalid_right) + right_invalid_percent = sum(invalid_right) / len( + invalid_right + ) else: right_invalid_percent = 0 - max_invalid_percent = max([left_invalid_percent, right_invalid_percent]) * 100 + max_invalid_percent = ( + max([left_invalid_percent, right_invalid_percent]) * 100 + ) if max_invalid_percent > 25: - self.edges['status'] = 'caution' + self.edges["status"] = "caution" if np.any(invalid_left): - self.edges['invalid_transect_left_idx'].append(n) + self.edges["invalid_transect_left_idx"].append(n) if np.any(invalid_right): - self.edges['invalid_transect_right_idx'].append(n) + self.edges["invalid_transect_right_idx"].append(n) + + if ( + len(self.edges["invalid_transect_left_idx"]) > 0 + or len(self.edges["invalid_transect_right_idx"]) > 0 + ): + self.edges["messages"].append( + [ + "Edges: The percent of invalid ensembles exceeds 25% in" + + " one or more transects.", + 2, + 13, + ] + ) + + # Check edges for zero discharge + self.edges["left_zero"] = 0 + temp_idx = np.where(np.round(left_q, 4) == 0)[0] + if len(temp_idx) > 0: + self.edges["left_zero_idx"] = np.array(transect_idx)[temp_idx] + self.edges["status"] = "warning" + self.edges["messages"].append(["EDGES: Left edge has zero Q;", 1, 13]) + self.edges["left_zero"] = 2 + else: + self.edges["left_zero_idx"] = [] + + self.edges["right_zero"] = 0 + temp_idx = np.where(np.round(right_q, 4) == 0)[0] + if len(temp_idx) > 0: + self.edges["right_zero_idx"] = np.array(transect_idx)[temp_idx] + self.edges["status"] = "warning" + self.edges["messages"].append(["EDGES: Right edge has zero Q;", 1, 13]) + self.edges["right_zero"] = 2 + else: + self.edges["right_zero_idx"] = [] + + # Check consistent edge type + self.edges["left_type"] = 0 + if len(np.unique(left_type)) > 1: + self.edges["status"] = "warning" + self.edges["messages"].append( + ["EDGES: Left edge type is not consistent;", 1, 13] + ) + self.edges["left_type"] = 2 + + self.edges["right_type"] = 0 + if len(np.unique(right_type)) > 1: + self.edges["status"] = "warning" + self.edges["messages"].append( + ["EDGES: Right edge type is not consistent;", 1, 13] + ) + self.edges["right_type"] = 2 + else: + self.edges["status"] = "inactive" + + def edges_qa(self, meas): + """Apply quality checks to edge estimates - if len(self.edges['invalid_transect_left_idx']) > 0 or len(self.edges['invalid_transect_right_idx']) > 0: - self.edges['messages'].append(['Edges: The percent of invalid ensembles exceeds 25% in' + - ' one or more transects.', 2, 13]) + Parameters + ---------- + meas: Measurement + Object of class Measurement + """ + + # Initialize variables + self.edges["messages"] = [] + checked = [] + left_q = [] + right_q = [] + total_q = [] + edge_dist_left = [] + edge_dist_right = [] + dist_moved_left = [] + dist_moved_right = [] + dist_made_good = [] + left_type = [] + right_type = [] + transect_idx = [] + + for n, transect in enumerate(meas.transects): + checked.append(transect.checked) + + if transect.checked: + left_q.append(meas.discharge[n].left) + right_q.append(meas.discharge[n].right) + total_q.append(meas.discharge[n].total) + dmr, dml, dmg = QAData.edge_distance_moved(transect) + dist_moved_right.append(dmr) + dist_moved_left.append(dml) + dist_made_good.append(dmg) + edge_dist_left.append(transect.edges.left.distance_m) + edge_dist_right.append(transect.edges.right.distance_m) + left_type.append(transect.edges.left.type) + right_type.append(transect.edges.right.type) + transect_idx.append(n) + + if any(checked): + # Set default status to good + self.edges["status"] = "good" + + mean_total_q = np.nanmean(total_q) + + # Check left edge q > 5% + self.edges["left_q"] = 0 + + left_q_percent = (np.nanmean(left_q) / mean_total_q) * 100 + temp_idx = np.where(left_q / mean_total_q > 0.05)[0] + + if len(temp_idx) > 0: + self.edges["left_q_idx"] = np.array(transect_idx)[temp_idx] + else: + self.edges["left_q_idx"] = [] + + if np.abs(left_q_percent) > 5: + self.edges["left_q"] = 1 + elif len(self.edges["left_q_idx"]) > 0: + self.edges["left_q"] = 1 + + # Check right edge q > 5% + self.edges["right_q"] = 0 + right_q_percent = (np.nanmean(right_q) / mean_total_q) * 100 + temp_idx = np.where(right_q / mean_total_q > 0.05)[0] + if len(temp_idx) > 0: + self.edges["right_q_idx"] = np.array(transect_idx)[temp_idx] + else: + self.edges["right_q_idx"] = [] + if np.abs(right_q_percent) > 5: + self.edges["right_q"] = 1 + elif len(self.edges["right_q_idx"]) > 0: + self.edges["right_q"] = 1 + + # Edge Q message + if np.abs(right_q_percent) > 5 or np.abs(left_q_percent) > 5: + self.edges["status"] = "caution" + self.edges["messages"].append( + ["Edges: Edge Q is greater than 5%;", 1, 13] + ) + elif ( + len(self.edges["right_q_idx"]) > 0 or len(self.edges["left_q_idx"]) > 0 + ): + self.edges["status"] = "caution" + self.edges["messages"].append( + [ + "Edges: One or more transects have an edge edge Q " + "greater than 5%;", + 1, + 13, + ] + ) + + # Check for consistent sign + q_positive = [] + self.edges["left_sign"] = 0 + for q in left_q: + if q >= 0: + q_positive.append(True) + else: + q_positive.append(False) + if len(np.unique(q_positive)) > 1 and left_q_percent > 0.5: + self.edges["left_sign"] = 1 + + q_positive = [] + self.edges["right_sign"] = 0 + for q in right_q: + if q >= 0: + q_positive.append(True) + else: + q_positive.append(False) + if len(np.unique(q_positive)) > 1 and right_q_percent > 0.5: + self.edges["right_sign"] = 1 + + if self.edges["right_sign"] or self.edges["left_sign"]: + self.edges["status"] = "caution" + self.edges["messages"].append( + ["Edges: Sign of edge Q is not consistent;", 2, 13] + ) + + # Check distance moved + dmg_5_percent = 0.05 * np.nanmean(dist_made_good) + avg_right_edge_dist = np.nanmean(edge_dist_right) + right_threshold = np.nanmin([dmg_5_percent, avg_right_edge_dist]) + temp_idx = np.where(dist_moved_right > right_threshold)[0] + if len(temp_idx) > 0: + self.edges["right_dist_moved_idx"] = np.array(transect_idx)[temp_idx] + else: + self.edges["right_dist_moved_idx"] = [] + + avg_left_edge_dist = np.nanmean(edge_dist_left) + left_threshold = np.nanmin([dmg_5_percent, avg_left_edge_dist]) + temp_idx = np.where(dist_moved_left > left_threshold)[0] + if len(temp_idx) > 0: + self.edges["left_dist_moved_idx"] = np.array(transect_idx)[temp_idx] + else: + self.edges["left_dist_moved_idx"] = [] + + # Excessive movement message + if ( + len(self.edges["right_dist_moved_idx"]) > 0 + or len(self.edges["left_dist_moved_idx"]) > 0 + ): + self.edges["status"] = "caution" + self.edges["messages"].append( + ["Edges: Excessive boat movement in edge ensembles;", 2, 13] + ) + + # Check for edge ensembles marked invalid due to excluded distance + self.edges["invalid_transect_left_idx"] = [] + self.edges["invalid_transect_right_idx"] = [] + for n, transect in enumerate(meas.transects): + if transect.checked: + ens_invalid = np.nansum(transect.w_vel.valid_data[0, :, :], 0) > 0 + ens_cells_above_sl = np.nansum(transect.w_vel.cells_above_sl, 0) > 0 + ens_invalid = np.logical_not( + np.logical_and(ens_invalid, ens_cells_above_sl) + ) + if np.any(ens_invalid): + if transect.start_edge == "Left": + invalid_left = ens_invalid[ + 0 : int(transect.edges.left.number_ensembles) + ] + invalid_right = ens_invalid[ + -int(transect.edges.right.number_ensembles) : + ] + else: + invalid_right = ens_invalid[ + 0 : int(transect.edges.right.number_ensembles) + ] + invalid_left = ens_invalid[ + -int(transect.edges.left.number_ensembles) : + ] + if len(invalid_left) > 0: + left_invalid_percent = sum(invalid_left) / len(invalid_left) + else: + left_invalid_percent = 0 + if len(invalid_right) > 0: + right_invalid_percent = sum(invalid_right) / len( + invalid_right + ) + else: + right_invalid_percent = 0 + max_invalid_percent = ( + max([left_invalid_percent, right_invalid_percent]) * 100 + ) + if max_invalid_percent > 25: + self.edges["status"] = "caution" + if np.any(invalid_left): + self.edges["invalid_transect_left_idx"].append(n) + if np.any(invalid_right): + self.edges["invalid_transect_right_idx"].append(n) + + if ( + len(self.edges["invalid_transect_left_idx"]) > 0 + or len(self.edges["invalid_transect_right_idx"]) > 0 + ): + self.edges["messages"].append( + [ + "Edges: The percent of invalid ensembles exceeds 25% in" + + " one or more transects.", + 2, + 13, + ] + ) # Check edges for zero discharge - self.edges['left_zero'] = 0 + self.edges["left_zero"] = 0 temp_idx = np.where(np.round(left_q, 4) == 0)[0] if len(temp_idx) > 0: - self.edges['left_zero_idx'] = np.array(transect_idx)[temp_idx] - self.edges['status'] = 'warning' - self.edges['messages'].append(['EDGES: Left edge has zero Q;', 1, 13]) - self.edges['left_zero'] = 2 + self.edges["left_zero_idx"] = np.array(transect_idx)[temp_idx] + self.edges["left_zero"] = 2 else: - self.edges['left_zero_idx'] = [] + self.edges["left_zero_idx"] = [] - self.edges['right_zero'] = 0 + self.edges["right_zero"] = 0 temp_idx = np.where(np.round(right_q, 4) == 0)[0] if len(temp_idx) > 0: - self.edges['right_zero_idx'] = np.array(transect_idx)[temp_idx] - self.edges['status'] = 'warning' - self.edges['messages'].append(['EDGES: Right edge has zero Q;', 1, 13]) - self.edges['right_zero'] = 2 + self.edges["right_zero_idx"] = np.array(transect_idx)[temp_idx] + self.edges["right_zero"] = 2 else: - self.edges['right_zero_idx'] = [] + self.edges["right_zero_idx"] = [] + + # Zero Q Message + if self.edges["right_zero"] == 2 or self.edges["left_zero"] == 2: + self.edges["status"] = "warning" + self.edges["messages"].append(["EDGES: Edge has zero Q;", 1, 13]) # Check consistent edge type - self.edges['left_type'] = 0 + self.edges["left_type"] = 0 if len(np.unique(left_type)) > 1: - self.edges['status'] = 'warning' - self.edges['messages'].append(['EDGES: Left edge type is not consistent;', 1, 13]) - self.edges['left_type'] = 2 + self.edges["left_type"] = 2 - self.edges['right_type'] = 0 + self.edges["right_type"] = 0 if len(np.unique(right_type)) > 1: - self.edges['status'] = 'warning' - self.edges['messages'].append(['EDGES: Right edge type is not consistent;', 1, 13]) - self.edges['right_type'] = 2 + self.edges["right_type"] = 2 + + # Inconsistent type message + if self.edges["right_type"] == 2 or self.edges["left_type"] == 2: + self.edges["status"] = "warning" + self.edges["messages"].append( + ["EDGES: An edge has an inconsistent edge type;", 1, 13] + ) + else: - self.edges['status'] = 'inactive' + self.edges["status"] = "inactive" @staticmethod def invalid_qa(valid, discharge): - """Computes the total invalid discharge in ensembles that have invalid data. The function also computes - the maximum run or cluster of ensembles with the maximum interpolated discharge. + """Computes the total invalid discharge in ensembles that have + invalid data. The function also computes the maximum run or cluster + of ensembles with the maximum interpolated discharge. Parameters ---------- @@ -1978,26 +3358,29 @@ class QAData(object): q_invalid_total: float Total interpolated discharge in invalid ensembles q_invalid_max_run: float - Maximum interpolated discharge in a run or cluster of invalid ensembles + Maximum interpolated discharge in a run or cluster of invalid + ensembles ens_invalid: int Total number of invalid ensembles """ # Create bool for invalid data invalid = np.logical_not(valid) - q_invalid_total = np.nansum(discharge.middle_ens[invalid]) + np.nansum(discharge.top_ens[invalid]) \ + q_invalid_total = ( + np.nansum(discharge.middle_ens[invalid]) + + np.nansum(discharge.top_ens[invalid]) + np.nansum(discharge.bottom_ens[invalid]) + ) # Compute total number of invalid ensembles ens_invalid = np.sum(invalid) # Compute the indices of where changes occur - valid_int = np.insert(valid.astype(int), 0, -1) valid_int = np.append(valid_int, -1) valid_run = np.where(np.diff(valid_int) != 0)[0] run_length = np.diff(valid_run) - run_length0 = run_length[(valid[0] == 1)::2] + run_length0 = run_length[(valid[0] == 1) :: 2] n_runs = len(run_length0) @@ -2015,9 +3398,11 @@ class QAData(object): m += 1 idx_start = valid_run[n] idx_end = valid_run[n + 1] - q_invalid_run.append(np.nansum(discharge.middle_ens[idx_start:idx_end]) - + np.nansum(discharge.top_ens[idx_start:idx_end]) - + np.nansum(discharge.bottom_ens[idx_start:idx_end])) + q_invalid_run.append( + np.nansum(discharge.middle_ens[idx_start:idx_end]) + + np.nansum(discharge.top_ens[idx_start:idx_end]) + + np.nansum(discharge.bottom_ens[idx_start:idx_end]) + ) # Determine the maximum discharge in a single run q_invalid_max_run = np.nanmax(np.abs(q_invalid_run)) @@ -2054,8 +3439,12 @@ class QAData(object): u_processed = boat_selected.u_processed_mps v_processed = boat_selected.v_processed_mps else: - u_processed = np.tile(np.nan, transect.boat_vel.bt_vel.u_processed_mps.shape) - v_processed = np.tile(np.nan, transect.boat_vel.bt_vel.v_processed_mps.shape) + u_processed = np.tile( + np.nan, transect.boat_vel.bt_vel.u_processed_mps.shape + ) + v_processed = np.tile( + np.nan, transect.boat_vel.bt_vel.v_processed_mps.shape + ) # Compute boat coordinates x_processed = np.nancumsum(u_processed * ens_duration) @@ -2064,29 +3453,29 @@ class QAData(object): # Compute left distance moved # TODO should be a dist moved function - left_edge_idx = QComp.edge_ensembles('left', transect) + left_edge_idx = QComp.edge_ensembles("left", transect) if len(left_edge_idx) > 0: boat_x = x_processed[left_edge_idx[-1]] - x_processed[left_edge_idx[0]] boat_y = y_processed[left_edge_idx[-1]] - y_processed[left_edge_idx[0]] - left_dist_moved = (boat_x ** 2 + boat_y ** 2) ** 0.5 + left_dist_moved = (boat_x**2 + boat_y**2) ** 0.5 else: left_dist_moved = np.nan # Compute right distance moved - right_edge_idx = QComp.edge_ensembles('right', transect) + right_edge_idx = QComp.edge_ensembles("right", transect) if len(right_edge_idx) > 0: boat_x = x_processed[right_edge_idx[-1]] - x_processed[right_edge_idx[0]] boat_y = y_processed[right_edge_idx[-1]] - y_processed[right_edge_idx[0]] - right_dist_moved = (boat_x ** 2 + boat_y ** 2) ** 0.5 + right_dist_moved = (boat_x**2 + boat_y**2) ** 0.5 else: right_dist_moved = np.nan return right_dist_moved, left_dist_moved, dmg - + # check for user changes def check_bt_setting(self, meas): """Checks the bt settings to see if they are still on the default - settings. + settings. Parameters ---------- @@ -2094,30 +3483,38 @@ class QAData(object): Object of class Measurement """ - self.settings_dict['tab_bt'] = 'Default' + self.settings_dict["tab_bt"] = "Default" s = meas.current_settings() d = meas.qrev_default_settings() - if s['BTbeamFilter'] != d['BTbeamFilter']: - self.bt_vel['messages'].append(['BT: User modified default beam setting.', 3, 8]) - self.settings_dict['tab_bt'] = 'Custom' - - if s['BTdFilter'] != d['BTdFilter']: - self.bt_vel['messages'].append(['BT: User modified default error velocity filter.', 3, 8]) - self.settings_dict['tab_bt'] = 'Custom' - - if s['BTwFilter'] != d['BTwFilter']: - self.bt_vel['messages'].append(['BT: User modified default vertical velocity filter.', 3, 8]) - self.settings_dict['tab_bt'] = 'Custom' - - if s['BTsmoothFilter'] != d['BTsmoothFilter']: - self.bt_vel['messages'].append(['BT: User modified default smooth filter.', 3, 8]) - self.settings_dict['tab_bt'] = 'Custom' + if s["BTbeamFilter"] != d["BTbeamFilter"]: + self.bt_vel["messages"].append( + ["BT: User modified default beam setting.", 3, 8] + ) + self.settings_dict["tab_bt"] = "Custom" + + if s["BTdFilter"] != d["BTdFilter"]: + self.bt_vel["messages"].append( + ["BT: User modified default error velocity filter.", 3, 8] + ) + self.settings_dict["tab_bt"] = "Custom" + + if s["BTwFilter"] != d["BTwFilter"]: + self.bt_vel["messages"].append( + ["BT: User modified default vertical velocity filter.", 3, 8] + ) + self.settings_dict["tab_bt"] = "Custom" + + if s["BTsmoothFilter"] != d["BTsmoothFilter"]: + self.bt_vel["messages"].append( + ["BT: User modified default smooth filter.", 3, 8] + ) + self.settings_dict["tab_bt"] = "Custom" def check_wt_settings(self, meas): """Checks the wt settings to see if they are still on the default - settings. + settings. Parameters ---------- @@ -2125,30 +3522,40 @@ class QAData(object): Object of class Measurement """ - self.settings_dict['tab_wt'] = 'Default' + self.settings_dict["tab_wt"] = "Default" s = meas.current_settings() d = meas.qrev_default_settings() - if round(s['WTExcludedDistance'], 2) != round(d['WTExcludedDistance'], 2): - self.w_vel['messages'].append(['WT: User modified excluded distance.', 3, 11]) - self.settings_dict['tab_wt'] = 'Custom' - - if s['WTbeamFilter'] != d['WTbeamFilter']: - self.w_vel['messages'].append(['WT: User modified default beam setting.', 3, 11]) - self.settings_dict['tab_wt'] = 'Custom' - - if s['WTdFilter'] != d['WTdFilter']: - self.w_vel['messages'].append(['WT: User modified default error velocity filter.', 3, 11]) - self.settings_dict['tab_wt'] = 'Custom' - - if s['WTwFilter'] != d['WTwFilter']: - self.w_vel['messages'].append(['WT: User modified default vertical velocity filter.', 3, 11]) - self.settings_dict['tab_wt'] = 'Custom' - - if s['WTsnrFilter'] != d['WTsnrFilter']: - self.w_vel['messages'].append(['WT: User modified default SNR filter.', 3, 11]) - self.settings_dict['tab_wt'] = 'Custom' + if round(s["WTExcludedDistance"], 2) != round(d["WTExcludedDistance"], 2): + self.w_vel["messages"].append( + ["WT: User modified excluded distance.", 3, 11] + ) + self.settings_dict["tab_wt"] = "Custom" + + if s["WTbeamFilter"] != d["WTbeamFilter"]: + self.w_vel["messages"].append( + ["WT: User modified default beam setting.", 3, 11] + ) + self.settings_dict["tab_wt"] = "Custom" + + if s["WTdFilter"] != d["WTdFilter"]: + self.w_vel["messages"].append( + ["WT: User modified default error velocity filter.", 3, 11] + ) + self.settings_dict["tab_wt"] = "Custom" + + if s["WTwFilter"] != d["WTwFilter"]: + self.w_vel["messages"].append( + ["WT: User modified default vertical velocity filter.", 3, 11] + ) + self.settings_dict["tab_wt"] = "Custom" + + if s["WTsnrFilter"] != d["WTsnrFilter"]: + self.w_vel["messages"].append( + ["WT: User modified default SNR filter.", 3, 11] + ) + self.settings_dict["tab_wt"] = "Custom" def check_extrap_settings(self, meas): """Checks the extrap to see if they are still on the default @@ -2160,25 +3567,33 @@ class QAData(object): Object of class Measurement """ - self.settings_dict['tab_extrap'] = 'Default' + self.settings_dict["tab_extrap"] = "Default" # Check fit parameters - if meas.extrap_fit.sel_fit[0].fit_method != 'Automatic': - self.settings_dict['tab_extrap'] = 'Custom' - self.extrapolation['messages'].append(['Extrapolation: User modified default automatic setting.', 3, 12]) + if meas.extrap_fit.sel_fit[0].fit_method != "Automatic": + self.settings_dict["tab_extrap"] = "Custom" + self.extrapolation["messages"].append( + ["Extrapolation: User modified default automatic setting.", 3, 12] + ) # Check data parameters - if meas.extrap_fit.sel_fit[-1].data_type.lower() != 'q': - self.settings_dict['tab_extrap'] = 'Custom' - self.extrapolation['messages'].append(['Extrapolation: User modified data type ', 3, 12]) + if meas.extrap_fit.sel_fit[-1].data_type.lower() != "q": + self.settings_dict["tab_extrap"] = "Custom" + self.extrapolation["messages"].append( + ["Extrapolation: User modified data type ", 3, 12] + ) if meas.extrap_fit.threshold != 20: - self.settings_dict['tab_extrap'] = 'Custom' - self.extrapolation['messages'].append(['Extrapolation: User modified default threshold.', 3, 12]) + self.settings_dict["tab_extrap"] = "Custom" + self.extrapolation["messages"].append( + ["Extrapolation: User modified default threshold.", 3, 12] + ) if meas.extrap_fit.subsection[0] != 0 or meas.extrap_fit.subsection[1] != 100: - self.settings_dict['tab_extrap'] = 'Custom' - self.extrapolation['messages'].append(['Extrapolation: User modified subsectioning', 3, 12]) + self.settings_dict["tab_extrap"] = "Custom" + self.extrapolation["messages"].append( + ["Extrapolation: User modified subsectioning", 3, 12] + ) def check_tempsal_settings(self, meas): """Checks the temp and salinity settings to see if they are still on @@ -2190,7 +3605,7 @@ class QAData(object): Object of class Measurement """ - self.settings_dict['tab_tempsal'] = 'Default' + self.settings_dict["tab_tempsal"] = "Default" t_source_change = False salinity_change = False @@ -2198,12 +3613,16 @@ class QAData(object): t_user_change = False t_adcp_change = False - if not all(np.isnan([meas.ext_temp_chk['user'], meas.ext_temp_chk['user_orig']])): - if meas.ext_temp_chk['user'] != meas.ext_temp_chk['user_orig']: + if not all( + np.isnan([meas.ext_temp_chk["user"], meas.ext_temp_chk["user_orig"]]) + ): + if meas.ext_temp_chk["user"] != meas.ext_temp_chk["user_orig"]: t_user_change = True - if not all(np.isnan([meas.ext_temp_chk['adcp'], meas.ext_temp_chk['adcp_orig']])): - if meas.ext_temp_chk['adcp'] != meas.ext_temp_chk['adcp_orig']: + if not all( + np.isnan([meas.ext_temp_chk["adcp"], meas.ext_temp_chk["adcp_orig"]]) + ): + if meas.ext_temp_chk["adcp"] != meas.ext_temp_chk["adcp_orig"]: t_adcp_change = True # Check each checked transect @@ -2211,36 +3630,58 @@ class QAData(object): transect = meas.transects[idx] # Temperature source - if transect.sensors.temperature_deg_c.selected != 'internal': + if transect.sensors.temperature_deg_c.selected != "internal": t_source_change = True - if transect.sensors.salinity_ppt.selected != 'internal': - sal = getattr(transect.sensors.salinity_ppt, transect.sensors.salinity_ppt.selected) - if np.all(np.equal(sal.data, transect.sensors.salinity_ppt.internal.data)): - salinity_change = False - else: - salinity_change = True + if transect.sensors.salinity_ppt.selected != "internal": + sal = getattr( + transect.sensors.salinity_ppt, + transect.sensors.salinity_ppt.selected, + ) + if transect.sensors.salinity_ppt.internal is not None: + if np.all( + np.equal(sal.data, transect.sensors.salinity_ppt.internal.data) + ): + salinity_change = False + else: + salinity_change = True # Speed of Sound - if transect.sensors.speed_of_sound_mps.selected != 'internal': + if transect.sensors.speed_of_sound_mps.selected != "internal": s_sound_change = True # Report condition and messages - if any([t_source_change, salinity_change, s_sound_change, t_adcp_change, t_user_change]): - self.settings_dict['tab_tempsal'] = 'Custom' - + if any( + [ + t_source_change, + salinity_change, + s_sound_change, + t_adcp_change, + t_user_change, + ] + ): + self.settings_dict["tab_tempsal"] = "Custom" + if t_source_change: - self.temperature['messages'].append(['Temperature: User modified temperature source.', 3, 5]) + self.temperature["messages"].append( + ["Temperature: User modified temperature source.", 3, 5] + ) if s_sound_change: - self.temperature['messages'].append(['Temperature: User modified speed of sound source.', 3, 5]) + self.temperature["messages"].append( + ["Temperature: User modified speed of sound source.", 3, 5] + ) if t_user_change: - self.temperature['messages'].append(['Temperature: User modified independent temperature.', 3, 5]) + self.temperature["messages"].append( + ["Temperature: User modified independent temperature.", 3, 5] + ) if t_adcp_change: - self.temperature['messages'].append(['Temperature: User modified ADCP temperature.', 3, 5]) - + self.temperature["messages"].append( + ["Temperature: User modified ADCP temperature.", 3, 5] + ) + def check_gps_settings(self, meas): """Checks the gps settings to see if they are still on the default settings. @@ -2252,12 +3693,15 @@ class QAData(object): """ gps = False - self.settings_dict['tab_gps'] = 'Default' + self.settings_dict["tab_gps"] = "Default" # Check for transects with gga or vtg data for idx in meas.checked_transect_idx: transect = meas.transects[idx] - if transect.boat_vel.gga_vel is not None or transect.boat_vel.gga_vel is not None: + if ( + transect.boat_vel.gga_vel is not None + or transect.boat_vel.gga_vel is not None + ): gps = True break @@ -2267,25 +3711,33 @@ class QAData(object): s = meas.current_settings() d = meas.qrev_default_settings() - if s['ggaDiffQualFilter'] != d['ggaDiffQualFilter']: - self.gga_vel['messages'].append(['GPS: User modified default quality setting.', 3, 8]) - self.settings_dict['tab_gps'] = 'Custom' - - if s['ggaAltitudeFilter'] != d['ggaAltitudeFilter']: - self.gga_vel['messages'].append(['GPS: User modified default altitude filter.', 3, 8]) - self.settings_dict['tab_gps'] = 'Custom' - - if s['GPSHDOPFilter'] != d['GPSHDOPFilter']: - self.gga_vel['messages'].append(['GPS: User modified default HDOP filter.', 3, 8]) - self.settings_dict['tab_gps'] = 'Custom' - - if s['GPSSmoothFilter'] != d['GPSSmoothFilter']: - self.gga_vel['messages'].append(['GPS: User modified default smooth filter.', 3, 8]) - self.settings_dict['tab_gps'] = 'Custom' + if s["ggaDiffQualFilter"] != d["ggaDiffQualFilter"]: + self.gga_vel["messages"].append( + ["GPS: User modified default quality setting.", 3, 8] + ) + self.settings_dict["tab_gps"] = "Custom" + + if s["ggaAltitudeFilter"] != d["ggaAltitudeFilter"]: + self.gga_vel["messages"].append( + ["GPS: User modified default altitude filter.", 3, 8] + ) + self.settings_dict["tab_gps"] = "Custom" + + if s["GPSHDOPFilter"] != d["GPSHDOPFilter"]: + self.gga_vel["messages"].append( + ["GPS: User modified default HDOP filter.", 3, 8] + ) + self.settings_dict["tab_gps"] = "Custom" + + if s["GPSSmoothFilter"] != d["GPSSmoothFilter"]: + self.gga_vel["messages"].append( + ["GPS: User modified default smooth filter.", 3, 8] + ) + self.settings_dict["tab_gps"] = "Custom" def check_depth_settings(self, meas): """Checks the depth settings to see if they are still on the default - settings. + settings. Parameters ---------- @@ -2293,42 +3745,50 @@ class QAData(object): Object of class Measurement """ - self.settings_dict['tab_depth'] = 'Default' + self.settings_dict["tab_depth"] = "Default" s = meas.current_settings() d = meas.qrev_default_settings() - if s['depthReference'] != d['depthReference']: - self.depths['messages'].append(['Depths: User modified ' - 'depth reference.', 3, 10]) - self.settings_dict['tab_depth'] = 'Custom' - - if s['depthComposite'] != d['depthComposite']: - self.depths['messages'].append(['Depths: User modified ' - 'depth reference.', 3, 10]) - self.settings_dict['tab_depth'] = 'Custom' - - if s['depthAvgMethod'] != d['depthAvgMethod']: - self.depths['messages'].append(['Depths: User modified ' - 'averaging method.', 3, 10]) - self.settings_dict['tab_depth'] = 'Custom' - - if s['depthFilterType'] != d['depthFilterType']: - self.depths['messages'].append(['Depths: User modified ' - 'filter type.', 3, 10]) - self.settings_dict['tab_depth'] = 'Custom' + if s["depthReference"] != d["depthReference"]: + self.depths["messages"].append( + ["Depths: User modified " "depth reference.", 3, 10] + ) + self.settings_dict["tab_depth"] = "Custom" + + if s["depthComposite"] != d["depthComposite"]: + self.depths["messages"].append( + ["Depths: User modified " "depth reference.", 3, 10] + ) + self.settings_dict["tab_depth"] = "Custom" + + if s["depthAvgMethod"] != d["depthAvgMethod"]: + self.depths["messages"].append( + ["Depths: User modified " "averaging method.", 3, 10] + ) + self.settings_dict["tab_depth"] = "Custom" + + if s["depthFilterType"] != d["depthFilterType"]: + self.depths["messages"].append( + ["Depths: User modified " "filter type.", 3, 10] + ) + self.settings_dict["tab_depth"] = "Custom" for idx in meas.checked_transect_idx: transect = meas.transects[idx] - if transect.depths.bt_depths.draft_orig_m != transect.depths.bt_depths.draft_use_m: - self.depths['messages'].append(['Depths: User modified ' - 'draft.', 3, 10]) - self.settings_dict['tab_depth'] = 'Custom' + if ( + transect.depths.bt_depths.draft_orig_m + != transect.depths.bt_depths.draft_use_m + ): + self.depths["messages"].append( + ["Depths: User modified " "draft.", 3, 10] + ) + self.settings_dict["tab_depth"] = "Custom" break def check_edge_settings(self, meas): """Checks the edge settings to see if they are still on the original - settings. + ettings. Parameters ---------- @@ -2360,10 +3820,16 @@ class QAData(object): if transect.edges.left.distance_m != transect.edges.left.orig_distance_m: left_edge_dist_change = True - if transect.edges.left.number_ensembles != transect.edges.left.orig_number_ensembles: + if ( + transect.edges.left.number_ensembles + != transect.edges.left.orig_number_ensembles + ): left_edge_ens_change = True - if transect.edges.left.user_discharge_cms != transect.edges.left.orig_user_discharge_cms: + if ( + transect.edges.left.user_discharge_cms + != transect.edges.left.orig_user_discharge_cms + ): left_edge_q_change = True if transect.edges.left.cust_coef != transect.edges.left.orig_cust_coef: @@ -2375,48 +3841,88 @@ class QAData(object): if transect.edges.right.distance_m != transect.edges.right.orig_distance_m: right_edge_dist_change = True - if transect.edges.right.number_ensembles != transect.edges.right.orig_number_ensembles: + if ( + transect.edges.right.number_ensembles + != transect.edges.right.orig_number_ensembles + ): right_edge_ens_change = True - if transect.edges.right.user_discharge_cms != transect.edges.right.orig_user_discharge_cms: + if ( + transect.edges.right.user_discharge_cms + != transect.edges.right.orig_user_discharge_cms + ): right_edge_q_change = True if transect.edges.right.cust_coef != transect.edges.right.orig_cust_coef: right_edge_coef_change = True - if any([start_edge_change, left_edge_type_change, left_edge_dist_change, left_edge_ens_change, - left_edge_q_change, left_edge_coef_change, right_edge_type_change, right_edge_dist_change, - right_edge_ens_change, right_edge_q_change, right_edge_coef_change]): - self.settings_dict['tab_edges'] = 'Custom' + if any( + [ + start_edge_change, + left_edge_type_change, + left_edge_dist_change, + left_edge_ens_change, + left_edge_q_change, + left_edge_coef_change, + right_edge_type_change, + right_edge_dist_change, + right_edge_ens_change, + right_edge_q_change, + right_edge_coef_change, + ] + ): + self.settings_dict["tab_edges"] = "Custom" if start_edge_change: - self.edges['messages'].append(['Edges: User modified start edge.', 3, 10]) + self.edges["messages"].append( + ["Edges: User modified start edge.", 3, 10] + ) if left_edge_type_change: - self.edges['messages'].append(['Edges: User modified left edge type.', 3, 10]) + self.edges["messages"].append( + ["Edges: User modified left edge type.", 3, 10] + ) if left_edge_dist_change: - self.edges['messages'].append(['Edges: User modified left edge distance.', 3, 10]) + self.edges["messages"].append( + ["Edges: User modified left edge distance.", 3, 10] + ) if left_edge_ens_change: - self.edges['messages'].append(['Edges: User modified left number of ensembles.', 3, 10]) + self.edges["messages"].append( + ["Edges: User modified left number of ensembles.", 3, 10] + ) if left_edge_q_change: - self.edges['messages'].append(['Edges: User modified left user discharge.', 3, 10]) + self.edges["messages"].append( + ["Edges: User modified left user discharge.", 3, 10] + ) if left_edge_coef_change: - self.edges['messages'].append(['Edges: User modified left custom coefficient.', 3, 10]) + self.edges["messages"].append( + ["Edges: User modified left custom coefficient.", 3, 10] + ) if right_edge_type_change: - self.edges['messages'].append(['Edges: User modified right edge type.', 3, 10]) + self.edges["messages"].append( + ["Edges: User modified right edge type.", 3, 10] + ) if right_edge_dist_change: - self.edges['messages'].append(['Edges: User modified right edge distance.', 3, 10]) + self.edges["messages"].append( + ["Edges: User modified right edge distance.", 3, 10] + ) if right_edge_ens_change: - self.edges['messages'].append(['Edges: User modified right number of ensembles.', 3, 10]) + self.edges["messages"].append( + ["Edges: User modified right number of ensembles.", 3, 10] + ) if right_edge_q_change: - self.edges['messages'].append(['Edges: User modified right user discharge.', 3, 10]) + self.edges["messages"].append( + ["Edges: User modified right user discharge.", 3, 10] + ) if right_edge_coef_change: - self.edges['messages'].append(['Edges: User modified right custom coefficient.', 3, 10]) + self.edges["messages"].append( + ["Edges: User modified right custom coefficient.", 3, 10] + ) else: - self.settings_dict['tab_edges'] = 'Default' + self.settings_dict["tab_edges"] = "Default" def check_mbt_settings(self, meas): """Checks the mbt settings to see if they are still on the original - settings. + settings. Parameters ---------- @@ -2441,26 +3947,36 @@ class QAData(object): else: mb_user_valid.append(True) - if mbt[n].use_2_correct != auto[n].use_2_correct and \ - meas.transects[meas.checked_transect_idx[0]].boat_vel.selected == 'bt_vel': + if ( + mbt[n].use_2_correct != auto[n].use_2_correct + and meas.transects[meas.checked_transect_idx[0]].boat_vel.selected + == "bt_vel" + ): mb_used.append(True) else: mb_used.append(False) - self.settings_dict['tab_mbt'] = 'Default' + self.settings_dict["tab_mbt"] = "Default" if any(mb_user_valid): - self.settings_dict['tab_mbt'] = 'Custom' - self.movingbed['messages'].append(['Moving-Bed Test: ' - 'User modified ' - 'valid test settings.', 3, 6]) + self.settings_dict["tab_mbt"] = "Custom" + self.movingbed["messages"].append( + ["Moving-Bed Test: " "User modified " "valid test settings.", 3, 6] + ) if any(mb_used): - self.settings_dict['tab_mbt'] = 'Custom' - self.movingbed['messages'].append(['Moving-Bed Test: ' - 'User modified ' - 'use to correct settings.', 3, 6]) + self.settings_dict["tab_mbt"] = "Custom" + self.movingbed["messages"].append( + [ + "Moving-Bed Test: " "User modified " "use to correct settings.", + 3, + 6, + ] + ) + + if meas.observed_no_moving_bed: + self.settings_dict["tab_mbt"] = "Custom" if meas.observed_no_moving_bed: - self.settings_dict['tab_mbt'] = 'Custom' + self.settings_dict["tab_mbt"] = "Custom" def check_compass_settings(self, meas): """Checks the compass settings for changes. @@ -2471,7 +3987,7 @@ class QAData(object): Object of class Measurement """ - self.settings_dict['tab_compass'] = 'Default' + self.settings_dict["tab_compass"] = "Default" magvar_change = False align_change = False @@ -2481,25 +3997,33 @@ class QAData(object): transect = meas.transects[idx] # Magvar - if transect.sensors.heading_deg.internal.mag_var_deg != \ - transect.sensors.heading_deg.internal.mag_var_orig_deg: + if ( + transect.sensors.heading_deg.internal.mag_var_deg + != transect.sensors.heading_deg.internal.mag_var_orig_deg + ): magvar_change = True # Heading offset if transect.sensors.heading_deg.external is not None: - if transect.sensors.heading_deg.external.align_correction_deg != \ - transect.sensors.heading_deg.external.align_correction_orig_deg: + if ( + transect.sensors.heading_deg.external.align_correction_deg + != transect.sensors.heading_deg.external.align_correction_orig_deg + ): align_change = True # Report condition and messages if any([magvar_change, align_change]): - self.settings_dict['tab_compass'] = 'Custom' + self.settings_dict["tab_compass"] = "Custom" if magvar_change: - self.compass['messages'].append(['Compass: User modified magnetic variation.', 3, 4]) + self.compass["messages"].append( + ["Compass: User modified magnetic variation.", 3, 4] + ) if align_change: - self.compass['messages'].append(['Compass: User modified heading offset.', 3, 4]) + self.compass["messages"].append( + ["Compass: User modified heading offset.", 3, 4] + ) def check_oursin(self, meas): """Checks the compass settings for changes. @@ -2510,16 +4034,16 @@ class QAData(object): Object of class Measurement """ - self.settings_dict['tab_uncertainty_2_advanced'] = 'Default' - self.settings_dict['tab_uncertainty'] = 'Default' + self.settings_dict["tab_uncertainty_2_advanced"] = "Default" + self.settings_dict["tab_uncertainty"] = "Default" for key in meas.oursin.user_advanced_settings.keys(): if not np.isnan(meas.oursin.user_advanced_settings[key]): - self.settings_dict['tab_uncertainty_2_advanced'] = 'Custom' - self.settings_dict['tab_uncertainty'] = 'Custom' + self.settings_dict["tab_uncertainty_2_advanced"] = "Custom" + self.settings_dict["tab_uncertainty"] = "Custom" break for key in meas.oursin.user_specified_u.keys(): if not np.isnan(meas.oursin.user_specified_u[key]): - self.settings_dict['tab_uncertainty'] = 'Custom' + self.settings_dict["tab_uncertainty"] = "Custom" break diff --git a/Classes/QComp.py b/Classes/QComp.py index f227e5e..eacdcce 100644 --- a/Classes/QComp.py +++ b/Classes/QComp.py @@ -1,7 +1,9 @@ import numpy as np from Classes.TransectData import TransectData from Classes.BoatStructure import BoatStructure -from MiscLibs.common_functions import cart2pol, pol2cart, nan_less, nan_greater +from MiscLibs.common_functions import cart2pol, pol2cart +from MiscLibs.compute_edge_cd import compute_edge_cd + # from profilehooks import profile from DischargeFunctions.top_discharge_extrapolation import extrapolate_top from DischargeFunctions.bottom_discharge_extrapolation import extrapolate_bot @@ -41,38 +43,52 @@ class QComp(object): correction_factor: float Moving-bed correction factor, if required int_cells: float - Total discharge computed for invalid depth cells excluding invalid ensembles + Total discharge computed for invalid depth cells excluding invalid + ensembles int_ens: float Total discharge computed for invalid ensembles """ - + def __init__(self): """Initialize class and instance variables.""" - self.top = None # Transect total extrapolated top discharge - self.middle = None # Transect toal measured middle discharge including interpolations - self.bottom = None # ETransect total extrapolated bottom discharge - self.top_ens = None # Extrapolated top discharge by ensemble - self.middle_cells = None # Measured middle discharge including interpolation by cell - self.middle_ens = None # Measured middle discharge including interpolation by ensemble - self.bottom_ens = None # Extrapolate bottom discharge by ensemble - self.left = None # Left edge discharge - self.left_idx = [] # Ensembles used for left edge - self.right = None # Right edge discharge - self.right_idx = [] # Ensembles used for right edge - self.total_uncorrected = None # Total discharge for transect uncorrected for moving-bed, if required - self.total = None # Total discharge with moving-bed correction applied if necessary - self.correction_factor = 1 # Moving-bed correction factor, if required - self.int_cells = None # Total discharge computed for invalid depth cells excluding invalid ensembles - self.int_ens = None # Total discharge computed for invalid ensembles + self.top = None + self.middle = None + self.bottom = None + self.top_ens = None + self.middle_cells = None + self.middle_ens = None + self.bottom_ens = None + self.left = None + self.left_idx = [] + self.right = None + self.right_idx = [] + self.total_uncorrected = None + self.total = None + self.correction_factor = 1 + self.int_cells = None + self.int_ens = None + self.top_speed = np.nan + self.bottom_speed = np.nan + self.left_edge_speed = np.nan + self.right_edge_speed = np.nan # @profile - def populate_data(self, data_in, moving_bed_data=None, top_method=None, bot_method=None, exponent=None): + def populate_data( + self, + data_in, + moving_bed_data=None, + top_method=None, + bot_method=None, + exponent=None, + ): """Discharge is computed using the data provided to the method. - Water data provided are assumed to be corrected for the navigation reference. - If a moving-bed correction is to be applied it is computed and applied. - The TRDI method using expanded delta time is applied if the processing method is WR2. - + Water data provided are assumed to be corrected for the navigation + reference. + If a moving-bed correction is to be applied, it is computed and applied. + The TRDI method using expanded delta time is applied if the + processing method is WR2. + Parameters ---------- data_in: TransectData @@ -87,32 +103,35 @@ class QComp(object): Extrapolation exponent """ - # Use bottom track interpolation settings to determine the appropriate algorithms to apply - if data_in.boat_vel.bt_vel.interpolate == 'None': - processing = 'WR2' - elif data_in.boat_vel.bt_vel.interpolate == 'Linear': - processing = 'QRev' + # Use bottom track interpolation settings to determine the + # appropriate algorithms to apply + if data_in.boat_vel.bt_vel.interpolate == "None": + processing = "WR2" + elif data_in.boat_vel.bt_vel.interpolate == "Linear": + processing = "QRev" else: - processing = 'RSL' + processing = "RSL" # Compute cross product x_prod = QComp.cross_product(data_in) - + # Get index of ensembles in moving-boat portion of transect in_transect_idx = data_in.in_transect_idx - - if processing == 'WR2': - # TRDI uses expanded delta time to handle invalid ensembles which can be caused by invalid BT - # WT, or depth. QRev by default handles this invalid data through linear interpolation of the - # invalid data through linear interpolation of the invalid data type. This if statement and - # associated code is required to maintain compatibility with WinRiver II discharge computations. - + + if processing == "WR2": + # TRDI uses expanded delta time to handle invalid ensembles + # which can be caused by invalid BT WT, or depth. QRev by default + # handles this invalid data through linear interpolation of the + # invalid data through linear interpolation of the invalid data + # type. This if statement and associated code is required to + # maintain compatibility with WinRiver II discharge computations. + # Determine valid ensembles valid_ens = np.any(np.logical_not(np.isnan(x_prod))) valid_ens = valid_ens[in_transect_idx] - - # Compute the ensemble duration using TRDI approach of expanding delta time to compensate - # for invalid ensembles + + # Compute the ensemble duration using TRDI approach of expanding + # delta time to compensate for invalid ensembles n_ens = len(valid_ens) ens_dur = data_in.date_time.ens_duration_sec[in_transect_idx] delta_t = np.tile([np.nan], n_ens) @@ -123,67 +142,114 @@ class QComp(object): if valid_ens[j]: delta_t[j] = cum_dur cum_dur = 0 - + else: # For non-WR2 processing use actual ensemble duration delta_t = data_in.date_time.ens_duration_sec[in_transect_idx] - + # Compute measured or middle discharge self.middle_cells = QComp.discharge_middle_cells(x_prod, data_in, delta_t) self.middle_ens = np.nansum(self.middle_cells, 0) self.middle = np.nansum(self.middle_ens) - + # Compute the top discharge trans_select = getattr(data_in.depths, data_in.depths.selected) - num_top_method = {'Power': 0, 'Constant': 1, '3-Point': 2, None: -1} - self.top_ens = extrapolate_top(x_prod, data_in.w_vel.valid_data[0, :, :], - num_top_method[data_in.extrap.top_method], - data_in.extrap.exponent, data_in.in_transect_idx, trans_select.depth_cell_size_m, - trans_select.depth_cell_depth_m, trans_select.depth_processed_m, delta_t, - num_top_method[top_method], exponent) + num_top_method = {"Power": 0, "Constant": 1, "3-Point": 2, None: -1} + try: + self.top_ens = extrapolate_top( + x_prod, + data_in.w_vel.valid_data[0, :, :], + num_top_method[data_in.extrap.top_method], + data_in.extrap.exponent, + data_in.in_transect_idx, + trans_select.depth_cell_size_m, + trans_select.depth_cell_depth_m, + trans_select.depth_processed_m, + delta_t, + num_top_method[top_method], + exponent, + ) + except SystemError: + self.top_ens = QComp.extrapolate_top( + x_prod, + data_in.w_vel.valid_data[0, :, :], + num_top_method[data_in.extrap.top_method], + data_in.extrap.exponent, + data_in.in_transect_idx, + trans_select.depth_cell_size_m, + trans_select.depth_cell_depth_m, + trans_select.depth_processed_m, + delta_t, + num_top_method[top_method], + exponent, + ) self.top = np.nansum(self.top_ens) # Compute the bottom discharge - num_bot_method = {'Power': 0, 'No Slip': 1, None: -1} - self.bottom_ens = extrapolate_bot(x_prod, - data_in.w_vel.valid_data[0, :, :], - num_bot_method[data_in.extrap.bot_method], - data_in.extrap.exponent, - data_in.in_transect_idx, - trans_select.depth_cell_size_m, - trans_select.depth_cell_depth_m, - trans_select.depth_processed_m, delta_t, - num_bot_method[bot_method], - exponent) + num_bot_method = {"Power": 0, "No Slip": 1, None: -1} + try: + self.bottom_ens = extrapolate_bot( + x_prod, + data_in.w_vel.valid_data[0, :, :], + num_bot_method[data_in.extrap.bot_method], + data_in.extrap.exponent, + data_in.in_transect_idx, + trans_select.depth_cell_size_m, + trans_select.depth_cell_depth_m, + trans_select.depth_processed_m, + delta_t, + num_bot_method[bot_method], + exponent, + ) + except SystemError: + self.bottom_ens = QComp.extrapolate_bot( + x_prod, + data_in.w_vel.valid_data[0, :, :], + num_bot_method[data_in.extrap.bot_method], + data_in.extrap.exponent, + data_in.in_transect_idx, + trans_select.depth_cell_size_m, + trans_select.depth_cell_depth_m, + trans_select.depth_processed_m, + delta_t, + num_bot_method[bot_method], + exponent, + ) self.bottom = np.nansum(self.bottom_ens) # Compute interpolated cell and ensemble discharge from computed # measured discharge self.interpolate_no_cells(data_in) self.middle = np.nansum(self.middle_ens) - self.int_cells, self.int_ens = QComp.discharge_interpolated(self.top_ens, self.middle_cells, - self.bottom_ens, data_in) - + self.int_cells, self.int_ens = QComp.discharge_interpolated( + self.top_ens, self.middle_cells, self.bottom_ens, data_in + ) + # Compute right edge discharge - if data_in.edges.right.type != 'User Q': - self.right, self.right_idx = QComp.discharge_edge('right', data_in, top_method, bot_method, exponent) + if data_in.edges.right.type != "User Q": + self.right, self.right_idx = QComp.discharge_edge( + "right", data_in, top_method, bot_method, exponent + ) else: self.right = data_in.edges.right.user_discharge_cms self.right_idx = [] # Compute left edge discharge - if data_in.edges.left.type != 'User Q': - self.left, self.left_idx = QComp.discharge_edge('left', data_in, top_method, bot_method, exponent) + if data_in.edges.left.type != "User Q": + self.left, self.left_idx = QComp.discharge_edge( + "left", data_in, top_method, bot_method, exponent + ) else: self.left = data_in.edges.left.user_discharge_cms self.left_idx = [] - - # Compute moving-bed correction, if applicable. Two checks are used to account for the - # way the meas object is created. - # Moving-bed corrections are only applied to bottom track referenced computations + # Compute moving-bed correction, if applicable. Two checks are used + # to account for the way the meas object is created. + + # Moving-bed corrections are only applied to bottom track referenced + # computations mb_type = None - if data_in.boat_vel.selected == 'bt_vel': + if data_in.boat_vel.selected == "bt_vel": if moving_bed_data is not None: # Determine if a moving-bed test is to be used for correction @@ -196,31 +262,52 @@ class QComp(object): if any(use_2_correct): # Make sure composite tracks are turned off - if data_in.boat_vel.composite == 'Off': + if data_in.boat_vel.composite == "Off": # Apply appropriate moving-bed test correction method - if mb_type == 'Stationary': - self.correction_factor = self.stationary_correction_factor(self.top, self.middle, - self.bottom, data_in, - moving_bed_data, delta_t) + if mb_type == "Stationary": + self.correction_factor = self.stationary_correction_factor( + self.top, + self.middle, + self.bottom, + data_in, + moving_bed_data, + delta_t, + ) else: - self.correction_factor = \ - self.loop_correction_factor(self.top, self.middle, - self.bottom, data_in, - moving_bed_data[use_2_correct.index(True)], - delta_t) - - self.total_uncorrected = self.left + self.right + self.middle + self.bottom + self.top + self.correction_factor = self.loop_correction_factor( + self.top, + self.middle, + self.bottom, + data_in, + moving_bed_data[use_2_correct.index(True)], + delta_t, + ) + + self.total_uncorrected = ( + self.left + self.right + self.middle + self.bottom + self.top + ) # Compute final discharge using correction if applicable - if self.correction_factor is None or self.correction_factor == 1 or np.isnan(self.correction_factor): + if ( + self.correction_factor is None + or self.correction_factor == 1 + or np.isnan(self.correction_factor) + ): self.total = self.total_uncorrected else: - self.total = self.left + self.right + (self.middle + self.bottom + self.top) * self.correction_factor + self.total = ( + self.left + + self.right + + (self.middle + self.bottom + self.top) * self.correction_factor + ) + + self.compute_topbot_speed(transect=data_in) + self.compute_edge_speed(transect=data_in) @staticmethod def qrev_mat_in(meas_struct): - """Processes the Matlab data structure to obtain a list of QComp objects containing the discharge data from the - Matlab data structure. + """Processes the Matlab data structure to obtain a list of QComp + objects containing the discharge data from the Matlab data structure. Parameters ---------- @@ -234,7 +321,7 @@ class QComp(object): """ discharge = [] - if hasattr(meas_struct.discharge, 'bottom'): + if hasattr(meas_struct.discharge, "bottom"): # Measurement has discharge data from only one transect q = QComp() q.populate_from_qrev_mat(meas_struct.discharge) @@ -279,15 +366,18 @@ class QComp(object): # One ensemble, multiple cells self.middle_cells = self.middle_cells[:, np.newaxis] - # If only one value, it will be read in as int but needs to be an array of len 1 + # If only one value, it will be read in as int but needs to be an + # array of len 1 self.left = q_in.left - # If only one value, it will be read in as int but needs to be an array of len 1 + # If only one value, it will be read in as int but needs to be an + # array of len 1 if type(q_in.leftidx) is int: self.left_idx = np.array([q_in.leftidx]) else: self.left_idx = q_in.leftidx self.right = q_in.right - # If only one value, it will be read in as int but needs to be an array of len 1 + # If only one value, it will be read in as int but needs to be an + # array of len 1 if type(q_in.rightidx) is int: self.right_idx = np.array([q_in.rightidx]) else: @@ -305,19 +395,19 @@ class QComp(object): def interpolate_no_cells(self, transect_data): """Computes discharge for ensembles where the depth is too - shallow for any valid depth cells. The computation is done - using interpolation of unit discharge defined as the ensemble - discharge divided by the depth of the ensemble and the - duration of the ensemble. The independent variable for the - interpolation is the track distance. After interpolation the - discharge for the interpolated ensembles is computed by - multiplying the interpolated value by the depth and duration - of those ensembles to achieve discharge for those ensembles. - - Parameters - ---------- - transect_data: TransectData - Object of TransectData + shallow for any valid depth cells. The computation is done + using interpolation of unit discharge defined as the ensemble + discharge divided by the depth of the ensemble and the + duration of the ensemble. The independent variable for the + interpolation is the track distance. After interpolation the + discharge for the interpolated ensembles is computed by + multiplying the interpolated value by the depth and duration + of those ensembles to achieve discharge for those ensembles. + + Parameters + ---------- + transect_data: TransectData + Object of TransectData """ # Compute the discharge in each ensemble @@ -329,16 +419,24 @@ class QComp(object): if len(idx) > 0: # Compute the unit discharge by depth for each ensemble - depth_selected = getattr(transect_data.depths, transect_data.depths.selected) - unit_q_depth = (q_ensemble / depth_selected.depth_processed_m[transect_data.in_transect_idx]) \ - / transect_data.date_time.ens_duration_sec[transect_data.in_transect_idx] + depth_selected = getattr( + transect_data.depths, transect_data.depths.selected + ) + unit_q_depth = ( + q_ensemble + / depth_selected.depth_processed_m[transect_data.in_transect_idx] + ) / transect_data.date_time.ens_duration_sec[ + transect_data.in_transect_idx + ] # Compute boat track - boat_track = BoatStructure.compute_boat_track(transect_data, transect_data.boat_vel.selected) + boat_track = BoatStructure.compute_boat_track( + transect_data, transect_data.boat_vel.selected + ) # Create strict monotonic vector for 1-D interpolation q_mono = unit_q_depth - x_mono = boat_track['distance_m'][transect_data.in_transect_idx] + x_mono = boat_track["distance_m"][transect_data.in_transect_idx] # Identify duplicate values, and replace with an average dups = self.group_consecutives(x_mono) @@ -355,20 +453,30 @@ class QComp(object): # Interpolate unit q if np.any(valid): - unit_q_int = np.interp(boat_track['distance_m'][transect_data.in_transect_idx], x_mono[valid], - q_mono[valid], left=np.nan, right=np.nan) + unit_q_int = np.interp( + boat_track["distance_m"][transect_data.in_transect_idx], + x_mono[valid], + q_mono[valid], + left=np.nan, + right=np.nan, + ) else: unit_q_int = 0 - # Compute the discharge in each ensemble based on interpolated data - q_int = unit_q_int * depth_selected.depth_processed_m[transect_data.in_transect_idx] \ - * transect_data.date_time.ens_duration_sec[transect_data.in_transect_idx] + # Compute the discharge in each ensemble based on + # interpolated data + q_int = ( + unit_q_int + * depth_selected.depth_processed_m[transect_data.in_transect_idx] + * transect_data.date_time.ens_duration_sec[ + transect_data.in_transect_idx + ] + ) self.middle_ens[idx] = q_int[idx] @staticmethod def group_consecutives(vals): - """Return list of consecutive lists of numbers from vals (number list). - """ + """Return list of consecutive lists of numbers from vals (number list).""" run = [] result = [] @@ -380,7 +488,7 @@ class QComp(object): if j > 1: run.append(n) elif j > 0: - run.append(n-1) + run.append(n - 1) run.append(n) elif j > 0: result.append(run) @@ -390,10 +498,18 @@ class QComp(object): return result @staticmethod - def cross_product(transect=None, w_vel_x=None, w_vel_y=None, b_vel_x=None, b_vel_y=None, start_edge=None): + def cross_product( + transect=None, + w_vel_x=None, + w_vel_y=None, + b_vel_x=None, + b_vel_y=None, + start_edge=None, + ): """Computes the cross product of the water and boat velocity. - Input data can be a transect or component vectors for the water and boat velocities with the start edge. + Input data can be a transect or component vectors for the water and + boat velocities with the start edge. Parameters ---------- @@ -429,8 +545,12 @@ class QComp(object): b_vel_x = trans_select.u_processed_mps b_vel_y = trans_select.v_processed_mps else: - b_vel_x = np.tile([np.nan], transect.boat_vel.bt_vel.u_processed_mps.shape) - b_vel_y = np.tile([np.nan], transect.boat_vel.bt_vel.v_processed_mps.shape) + b_vel_x = np.tile( + [np.nan], transect.boat_vel.bt_vel.u_processed_mps.shape + ) + b_vel_y = np.tile( + [np.nan], transect.boat_vel.bt_vel.v_processed_mps.shape + ) start_edge = transect.start_edge @@ -438,7 +558,7 @@ class QComp(object): xprod = np.multiply(w_vel_x, b_vel_y) - np.multiply(w_vel_y, b_vel_x) # Correct the sign of the cross product based on the start edge - if start_edge == 'Right': + if start_edge == "Right": direction = 1 else: direction = -1 @@ -448,7 +568,8 @@ class QComp(object): @staticmethod def discharge_middle_cells(xprod, transect, delta_t): - """Computes the discharge in the measured or middle portion of the cross section. + """Computes the discharge in the measured or middle portion of the + cross section. Parameters ---------- @@ -471,12 +592,16 @@ class QComp(object): cell_size = trans_select.depth_cell_size_m # Determine is xprod contains edge data and process appropriately - q_mid_cells = np.multiply(xprod[:, in_transect_idx] * cell_size[:, in_transect_idx], delta_t) + q_mid_cells = np.multiply( + xprod[:, in_transect_idx] * cell_size[:, in_transect_idx], delta_t + ) return q_mid_cells @staticmethod - def discharge_edge(edge_loc, transect, top_method=None, bot_method=None, exponent=None): + def discharge_edge( + edge_loc, transect, top_method=None, bot_method=None, exponent=None + ): """Computes edge discharge. Parameters @@ -514,7 +639,9 @@ class QComp(object): edge_dist = edge_selected.distance_m # Compute edge velocity and sign - edge_vel_sign, edge_vel_mag = QComp.edge_velocity(edge_idx, transect, top_method, bot_method, exponent) + edge_vel_sign, edge_vel_mag = QComp.edge_velocity( + edge_idx, transect, top_method, bot_method, exponent + ) # Compute edge coefficient coef = QComp.edge_coef(edge_loc, transect) @@ -528,10 +655,13 @@ class QComp(object): @staticmethod def edge_ensembles(edge_loc, transect): - """This function computes the starting and ending ensemble numbers for an edge. + """This function computes the starting and ending ensemble numbers + for an edge. - This method uses either the method used by TRDI which used the specified number of valid ensembles or SonTek - which uses the specified number of ensembles prior to screening for valid data + This method uses either the method used by TRDI which used the + specified number of valid ensembles or SonTek + which uses the specified number of ensembles prior to screening for + valid data Parameters ---------- @@ -551,10 +681,9 @@ class QComp(object): num_edge_ens = int(edge_select.number_ensembles) # TRDI method - if transect.adcp.manufacturer == 'TRDI': + if transect.adcp.manufacturer == "TRDI": # Determine the indices of the edge ensembles which contain - # the specified number of valid ensembles - # noinspection PyTypeChecker + # the specified number of valid ensembles noinspection PyTypeChecker valid_ens = QComp.valid_edge_ens(transect) if num_edge_ens > len(valid_ens): num_edge_ens = len(valid_ens) @@ -565,7 +694,8 @@ class QComp(object): # Sontek Method else: - # Determine the indices of the edge ensembles as collected by RiverSurveyor. There + # Determine the indices of the edge ensembles as collected by + # RiverSurveyor. There # is no check as to whether the ensembles contain valid data trans_select = getattr(transect.depths, transect.depths.selected) n_ensembles = len(trans_select.depth_processed_m) @@ -579,7 +709,9 @@ class QComp(object): return edge_idx @staticmethod - def edge_velocity(edge_idx, transect, top_method=None, bot_method=None, exponent=None): + def edge_velocity( + edge_idx, transect, top_method=None, bot_method=None, exponent=None + ): """Computes the edge velocity. Different methods may be used depending on settings in transect. @@ -614,17 +746,22 @@ class QComp(object): # Compute edge velocity using specified method # Used by TRDI - if transect.edges.vel_method == 'MeasMag': - edge_vel_mag, edge_vel_sign = QComp.edge_velocity_trdi(edge_idx, transect) + if transect.edges.vel_method == "MeasMag": + edge_vel_mag, edge_vel_sign = QComp.edge_velocity_trdi( + edge_idx, transect + ) # Used by Sontek - elif transect.edges.vel_method == 'VectorProf': - edge_val_mag, edge_vel_sign = QComp.edge_velocity_sontek(edge_idx, transect, top_method, - bot_method, exponent) + elif transect.edges.vel_method == "VectorProf": + edge_val_mag, edge_vel_sign = QComp.edge_velocity_sontek( + edge_idx, transect, top_method, bot_method, exponent + ) # USGS proposed method - elif transect.edges.vel_method == 'Profile': - edge_vel_mag, edge_vel_sign = QComp.edge_velocity_profile(edge_idx, transect) + elif transect.edges.vel_method == "Profile": + edge_vel_mag, edge_vel_sign = QComp.edge_velocity_profile( + edge_idx, transect + ) return edge_vel_sign, edge_vel_mag @@ -667,7 +804,7 @@ class QComp(object): # Compute unit vector to help determine sign unit_water_x, unit_water_y = pol2cart(edge_dir, 1) - if transect.start_edge == 'Right': + if transect.start_edge == "Right": dir_sign = 1 else: dir_sign = -1 @@ -683,22 +820,30 @@ class QComp(object): b_vel_x = np.tile([np.nan], transect.boat_vel.bt_vel.u_processed_mps.shape) b_vel_y = np.tile([np.nan], transect.boat_vel.bt_vel.v_processed_mps.shape) - track_x = np.nancumsum(b_vel_x[in_transect_idx] * ens_delta_time[in_transect_idx]) - track_y = np.nancumsum(b_vel_y[in_transect_idx] * ens_delta_time[in_transect_idx]) + track_x = np.nancumsum( + b_vel_x[in_transect_idx] * ens_delta_time[in_transect_idx] + ) + track_y = np.nancumsum( + b_vel_y[in_transect_idx] * ens_delta_time[in_transect_idx] + ) boat_dir, boat_mag = cart2pol(track_x[-1], track_y[-1]) unit_track_x, unit_track_y = pol2cart(boat_dir, 1) - unit_x_prod = (unit_water_x * unit_track_y - unit_water_y * unit_track_x) * dir_sign + unit_x_prod = ( + unit_water_x * unit_track_y - unit_water_y * unit_track_x + ) * dir_sign edge_vel_sign = np.sign(unit_x_prod) return edge_vel_mag, edge_vel_sign @staticmethod - def edge_velocity_sontek(edge_idx, transect, top_method=None, bot_method=None, exponent=None): + def edge_velocity_sontek( + edge_idx, transect, top_method=None, bot_method=None, exponent=None + ): """Computes the edge velocity using SonTek's method. - SonTek's method uses the profile extrapolation to estimate the velocities in the - unmeasured top and bottom and then projects the velocity perpendicular to the - course made good. + SonTek's method uses the profile extrapolation to estimate the + velocities in the unmeasured top and bottom and then projects the velocity + perpendicular to the course made good. Parameters ---------- @@ -740,8 +885,12 @@ class QComp(object): b_vel_x = np.tile([np.nan], transect.boat_vel.u_processed_mps.shape) b_vel_y = np.tile([np.nan], transect.boat_vel.v_processed_mps.shape) - track_x = np.nancumsum(b_vel_x[in_transect_idx] * ens_delta_time[in_transect_idx]) - track_y = np.nancumsum(b_vel_y[in_transect_idx] * ens_delta_time[in_transect_idx]) + track_x = np.nancumsum( + b_vel_x[in_transect_idx] * ens_delta_time[in_transect_idx] + ) + track_y = np.nancumsum( + b_vel_y[in_transect_idx] * ens_delta_time[in_transect_idx] + ) # Compute the unit vector for the boat track boat_dir, boat_mag = cart2pol(track_x[-1], track_y[-1]) @@ -789,14 +938,17 @@ class QComp(object): cell_depth[:, np.logical_not(valid)] = np.nan cell_depth_edge = np.nanmean(cell_size, 1) - # SonTek cuts off the mean profile based on the side lobe cutoff of - # the mean of the shallowest beams in the edge ensembles. + # SonTek cuts off the mean profile based on the side lobe + # cutoff of the mean of the shallowest beams in + # the edge ensembles. # Determine valid original beam and cell depths depth_bt_beam_orig = transect.depths.bt_depths.depth_orig_m[:, edge_idx] depth_bt_beam_orig[:, np.logical_not(valid)] = np.nan draft_bt_beam_orig = transect.depths.bt_depths.draft_orig_m - depth_cell_depth_orig = transect.depths.bt_depths.depth_cell_depth_orig_m[:, edge_idx] + depth_cell_depth_orig = ( + transect.depths.bt_depths.depth_cell_depth_orig_m[:, edge_idx] + ) depth_cell_depth_orig[:, np.logical_not(valid)] = np.nan # Compute minimum mean depth @@ -804,25 +956,33 @@ class QComp(object): min_depth = np.nanmin(min_raw_depths) min_depth = min_depth - draft_bt_beam_orig - # Compute last valid cell by computing the side lobe cutoff based - # on the mean of the minimum beam depths of the valid edge - # ensembles - if transect.w_vel.sl_cutoff_type == 'Percent': - sl_depth = min_depth - ((transect.w_vel.sl_cutoff_percent / 100.) * min_depth) + # Compute last valid cell by computing the side lobe cutoff + # based on the mean of the minimum beam depths + # of the valid edge ensembles + if transect.w_vel.sl_cutoff_type == "Percent": + sl_depth = min_depth - ( + (transect.w_vel.sl_cutoff_percent / 100.0) * min_depth + ) else: - sl_depth = min_depth - ((transect.w_vel.sl_cutoff_percent / 100.) * min_depth) \ + sl_depth = ( + min_depth + - ((transect.w_vel.sl_cutoff_percent / 100.0) * min_depth) - (transect.w_vel.sl_cutoff_number * cell_size[0, 0]) + ) # Adjust side lobe depth for draft sl_depth = sl_depth + draft_bt_beam_orig above_sl = cell_depth < (sl_depth + np.nanmax(cell_size)) above_sl_profile = np.nansum(above_sl, 1) # TODO this line doesn't make sense to me - valid_idx = np.logical_and(np.less(above_sl_profile, np.nanmax(above_sl_profile)+1), - np.greater(above_sl_profile, 0)) + valid_idx = np.logical_and( + np.less(above_sl_profile, np.nanmax(above_sl_profile) + 1), + np.greater(above_sl_profile, 0), + ) # Compute the number of cells above the side lobe cutoff - # remaining_depth = sl_depth - cell_depth_edge[idx_first_valid_cell] + # remaining_depth = sl_depth - cell_depth_edge[ + # idx_first_valid_cell] idx = np.where(np.logical_not(np.isnan(cell_size)))[0] # TODO this is not consistent with Matlab code n_cells = 0 @@ -837,15 +997,20 @@ class QComp(object): x_profile[not valid_idx] = np.nan y_profile[not valid_idx] = np.nan else: - idx_last_valid_cell = np.where(np.logical_not(np.isnan(x_profile[:idx_last_valid_cell])))[0][0] + idx_last_valid_cell = np.where( + np.logical_not(np.isnan(x_profile[:idx_last_valid_cell])) + )[0][0] # Mark the cells in the profile below the sidelobe invalid - x_profile[(idx_last_valid_cell+1):] = np.nan - y_profile[(idx_last_valid_cell + 1):] = np.nan + x_profile[(idx_last_valid_cell + 1) :] = np.nan + y_profile[(idx_last_valid_cell + 1) :] = np.nan # Find the top most 3 valid cells - idx_first_3_valid_cells = np.where(np.logical_not(np.isnan(x_profile)))[0][:3] + idx_first_3_valid_cells = np.where(np.logical_not(np.isnan(x_profile)))[ + 0 + ][:3] - # Compute the mean measured velocity components for the edge profile + # Compute the mean measured velocity components for the edge + # profile x_profile_mean = np.nanmean(x_profile) y_profile_mean = np.nanmean(y_profile) @@ -855,85 +1020,108 @@ class QComp(object): depth_avg = np.nanmean(depth_ens) # Determine top, mid, bottom range for the profile - top_rng_edge = cell_depth_edge[idx_first_valid_cell] - 0.5 * ref_cell_size + top_rng_edge = ( + cell_depth_edge[idx_first_valid_cell] - 0.5 * ref_cell_size + ) if idx_last_valid_cell > len(x_profile): mid_rng_edge = np.nansum(cell_size_edge[valid_idx]) else: - mid_rng_edge = np.nansum(cell_size_edge[idx_first_valid_cell:idx_last_valid_cell+1]) + mid_rng_edge = np.nansum( + cell_size_edge[idx_first_valid_cell : idx_last_valid_cell + 1] + ) # Compute z z_edge = depth_avg - cell_depth_edge - z_edge[idx_last_valid_cell+1:] = np.nan + z_edge[idx_last_valid_cell + 1 :] = np.nan z_edge[z_edge > 0] = np.nan idx_last_valid_cell = np.where(np.logical_not(np.isnan(z_edge)))[0][-1] - bot_rng_edge = depth_avg - cell_depth_edge[idx_last_valid_cell] - 0.5 * \ - cell_size_edge[idx_last_valid_cell] + bot_rng_edge = ( + depth_avg + - cell_depth_edge[idx_last_valid_cell] + - 0.5 * cell_size_edge[idx_last_valid_cell] + ) # Compute the top extrapolation for x-component - top_vel_x = QComp.discharge_top(top_method=top_method, - exponent=exponent, - idx_top=idx_first_valid_cell, - idx_top_3=idx_first_3_valid_cells, - top_rng=top_rng_edge, - component=x_profile, - cell_size=cell_size_edge, - cell_depth=cell_depth_edge, - depth_ens=depth_avg, - delta_t=1, - z=z_edge) + top_vel_x = QComp.discharge_top( + top_method=top_method, + exponent=exponent, + idx_top=idx_first_valid_cell, + idx_top_3=idx_first_3_valid_cells, + top_rng=top_rng_edge, + component=x_profile, + cell_size=cell_size_edge, + cell_depth=cell_depth_edge, + depth_ens=depth_avg, + delta_t=1, + z=z_edge, + ) top_vel_x = top_vel_x / top_rng_edge # Compute the bottom extrapolation for x-component - bot_vel_x = QComp.discharge_bot(bot_method=bot_method, - exponent=exponent, - idx_bot=idx_last_valid_cell, - bot_rng=bot_rng_edge, - component=x_profile, - cell_size=cell_size_edge, - cell_depth=cell_depth_edge, - depth_ens=depth_avg, - delta_t=1, - z=z_edge) + bot_vel_x = QComp.discharge_bot( + bot_method=bot_method, + exponent=exponent, + bot_rng=bot_rng_edge, + component=x_profile, + cell_size=cell_size_edge, + cell_depth=cell_depth_edge, + depth_ens=depth_avg, + delta_t=1, + z=z_edge, + ) bot_vel_x = bot_vel_x / bot_rng_edge # Compute the top extrapolation for the y-component - top_vel_y = QComp.discharge_top(top_method=top_method, - exponent=exponent, - idx_top=idx_first_valid_cell, - idx_top_3=idx_first_3_valid_cells, - top_rng=top_rng_edge, - component=y_profile, - cell_size=cell_size_edge, - cell_depth=cell_depth_edge, - depth_ens=depth_avg, - delta_t=1, - z=z_edge) + top_vel_y = QComp.discharge_top( + top_method=top_method, + exponent=exponent, + idx_top=idx_first_valid_cell, + idx_top_3=idx_first_3_valid_cells, + top_rng=top_rng_edge, + component=y_profile, + cell_size=cell_size_edge, + cell_depth=cell_depth_edge, + depth_ens=depth_avg, + delta_t=1, + z=z_edge, + ) top_vel_y = top_vel_y / top_rng_edge # Compute the bottom extrapolation for y-component - bot_vel_y = QComp.discharge_bot(bot_method=bot_method, - exponent=exponent, - idx_bot=idx_last_valid_cell, - bot_rng=bot_rng_edge, - component=y_profile, - cell_size=cell_size_edge, - cell_depth=cell_depth_edge, - depth_ens=depth_avg, - delta_t=1, - z=z_edge) + bot_vel_y = QComp.discharge_bot( + bot_method=bot_method, + exponent=exponent, + bot_rng=bot_rng_edge, + component=y_profile, + cell_size=cell_size_edge, + cell_depth=cell_depth_edge, + depth_ens=depth_avg, + delta_t=1, + z=z_edge, + ) bot_vel_y = bot_vel_y / bot_rng_edge - # Compute edge velocity vector including extrapolated velocities - v_edge_x = ((top_vel_x * top_rng_edge) + (x_profile_mean * mid_rng_edge) + (bot_vel_x * bot_rng_edge) - / depth_avg) - v_edge_y = ((top_vel_y * top_rng_edge) + (y_profile_mean * mid_rng_edge) + (bot_vel_y * bot_rng_edge) - / depth_avg) - - # Compute magnitude of edge velocity perpendicular to course made good - edge_vel_mag = (v_edge_x * -1 * unit_track_y) + (v_edge_y * unit_track_x) + # Compute edge velocity vector including extrapolated + # velocities + v_edge_x = ( + (top_vel_x * top_rng_edge) + + (x_profile_mean * mid_rng_edge) + + (bot_vel_x * bot_rng_edge) / depth_avg + ) + v_edge_y = ( + (top_vel_y * top_rng_edge) + + (y_profile_mean * mid_rng_edge) + + (bot_vel_y * bot_rng_edge) / depth_avg + ) + + # Compute magnitude of edge velocity perpendicular to course + # made good + edge_vel_mag = (v_edge_x * -1 * unit_track_y) + ( + v_edge_y * unit_track_x + ) # Determine edge sign - if transect.start_edge == 'Right': + if transect.start_edge == "Right": edge_vel_sign = -1 else: edge_vel_sign = 1 @@ -948,7 +1136,8 @@ class QComp(object): @staticmethod def edge_velocity_profile(edge_idx, transect): - """Compute edge velocity magnitude using the mean velocity of each ensemble. + """Compute edge velocity magnitude using the mean velocity of each + ensemble. The mean velocity of each ensemble is computed by first computing the mean direction of the velocities in the ensemble, @@ -1005,9 +1194,15 @@ class QComp(object): # Setup variables v_x = x_vel[:, n] v_y = y_vel[:, n] - depth_cell_size = transect.depths.bt_depths.depth_cell_size_m[:, selected_ensemble] - depth_cell_depth = transect.depths.bt_depths.depth_cell_depth_m[:, selected_ensemble] - depth = transect.depths.bt_depths.depth_processed_m[:, selected_ensemble] + depth_cell_size = transect.depths.bt_depths.depth_cell_size_m[ + :, selected_ensemble + ] + depth_cell_depth = transect.depths.bt_depths.depth_cell_depth_m[ + :, selected_ensemble + ] + depth = transect.depths.bt_depths.depth_processed_m[ + :, selected_ensemble + ] depth_cell_size[np.isnan(v_x)] = np.nan depth_cell_depth[np.isnan(v_x)] = np.nan @@ -1016,22 +1211,31 @@ class QComp(object): v_y_avg = np.nansum(v_y * depth_cell_size) / np.nansum(depth_cell_size) ens_dir, _ = cart2pol(v_x_avg, v_y_avg) v_unit[0], v_unit[1] = pol2cart(ens_dir, 1) - v_projected_mag = np.dot(np.hstack([v_x, v_y]), np.tile(v_unit, v_x.shape)) + v_projected_mag = np.dot( + np.hstack([v_x, v_y]), np.tile(v_unit, v_x.shape) + ) # Compute z value for each cell - z = (depth - depth_cell_depth) + z = depth - depth_cell_depth z[np.isnan(v_projected_mag)] = np.nan # Compute coefficient for 1/6th power curve b = 1.0 / 6.0 - a = (b + 1) * (np.nansum((v_projected_mag * depth_cell_size)) - / (np.nansum(((z + 0.5 * depth_cell_size)**(b + 1)) - - ((z - 0.5 * depth_cell_size)**(b + 1))))) + a = (b + 1) * ( + np.nansum((v_projected_mag * depth_cell_size)) + / ( + np.nansum( + ((z + 0.5 * depth_cell_size) ** (b + 1)) + - ((z - 0.5 * depth_cell_size) ** (b + 1)) + ) + ) + ) # Compute mean water speed by integrating power curve - vel_ensembles[n] = ((a / (b + 1)) * (depth**(b + 1))) / depth + vel_ensembles[n] = ((a / (b + 1)) * (depth ** (b + 1))) / depth - # Compute the mean velocity components from the mean water speed and direction + # Compute the mean velocity components from the mean water + # speed and direction u[n], v[n] = pol2cart(ens_dir, vel_ensembles) else: @@ -1041,7 +1245,8 @@ class QComp(object): u[n] = np.nan v[n] = np.nan - # Compute the mean velocity components of the edge velocity as the mean of the mean ensemble components + # Compute the mean velocity components of the edge velocity as the + # mean of the mean ensemble components u_avg = np.nanmean(u) v_avg = np.nanmean(v) @@ -1053,7 +1258,7 @@ class QComp(object): unit_water_x, unit_water_y = pol2cart(edge_vel_dir, 1) # Account for direction of boat travel - if transect.start_edge == 'Right': + if transect.start_edge == "Right": dir_sign = 1 else: dir_sign = -1 @@ -1069,13 +1274,19 @@ class QComp(object): b_vel_x = np.tile([np.nan], transect.boat_vel.u_processed_mps.shape) b_vel_y = np.tile([np.nan], transect.boat_vel.v_processed_mps.shape) - track_x = np.nancumsum(b_vel_x[in_transect_idx] * ens_delta_time[in_transect_idx]) - track_y = np.nancumsum(b_vel_y[in_transect_idx] * ens_delta_time[in_transect_idx]) + track_x = np.nancumsum( + b_vel_x[in_transect_idx] * ens_delta_time[in_transect_idx] + ) + track_y = np.nancumsum( + b_vel_y[in_transect_idx] * ens_delta_time[in_transect_idx] + ) boat_dir, boat_mag = cart2pol(track_x[-1], track_y[-1]) unit_track_x, unit_track_y = pol2cart(boat_dir, 1) # Compute cross product from unit vectors - unit_x_prod = (unit_water_x * unit_track_y - unit_water_y * unit_track_x) * dir_sign + unit_x_prod = ( + unit_water_x * unit_track_y - unit_water_y * unit_track_x + ) * dir_sign # Determine sign edge_vel_sign = np.sign(unit_x_prod) @@ -1084,7 +1295,8 @@ class QComp(object): @staticmethod def edge_coef(edge_loc, transect): - """Returns the edge coefficient based on the edge settings and transect object. + """Returns the edge coefficient based on the edge settings and + transect object. Parameters ---------- @@ -1096,20 +1308,21 @@ class QComp(object): Returns ------- coef: float - Edge coefficient for accounting for velocity distribution and edge shape + Edge coefficient for accounting for velocity distribution and + edge shape """ # Process appropriate edge type edge_select = getattr(transect.edges, edge_loc) - if edge_select.type == 'Triangular': + if edge_select.type == "Triangular": coef = 0.3535 - elif edge_select.type == 'Rectangular': + elif edge_select.type == "Rectangular": # Rectangular edge coefficient depends on the rec_edge_method. # 'Fixed' is compatible with the method used by TRDI. # 'Variable is compatible with the method used by SonTek - if transect.edges.rec_edge_method == 'Fixed': + if transect.edges.rec_edge_method == "Fixed": # Fixed Method coef = 0.91 @@ -1125,11 +1338,18 @@ class QComp(object): trans_select = getattr(transect.depths, transect.depths.selected) depth_edge = np.nanmean(trans_select.depth_processed_m[edge_idx]) - # Compute coefficient using equation 34 from Principle of River Discharge Measurement, SonTek, 2003 - coef = (1 - ((0.35 / 4) * (depth_edge / dist) * (1 - np.exp(-4 * (dist / depth_edge))))) / \ - (1 - 0.35 * np.exp(-4 * (dist / depth_edge))) - - elif edge_select.type == 'Custom': + # Compute coefficient using equation 34 from Principle of + # River Discharge Measurement, SonTek, 2003 + coef = ( + 1 + - ( + (0.35 / 4) + * (depth_edge / dist) + * (1 - np.exp(-4 * (dist / depth_edge))) + ) + ) / (1 - 0.35 * np.exp(-4 * (dist / depth_edge))) + + elif edge_select.type == "Custom": # Custom user supplied coefficient coef = edge_select.cust_coef @@ -1160,7 +1380,8 @@ class QComp(object): Returns ------- correction_factor: float - Correction factor to be applied to the discharge to correct for moving-bed effects + Correction factor to be applied to the discharge to correct for + moving-bed effects """ # Assign object properties to local variables @@ -1180,7 +1401,9 @@ class QComp(object): if q_orig != 0: # Compute near-bed velocities - nb_u, nb_v, unit_nb_u, unit_nb_v = QComp.near_bed_velocity(u, v, depth, depth_cell_depth) + nb_u, nb_v, unit_nb_u, unit_nb_v = QComp.near_bed_velocity( + u, v, depth, depth_cell_depth + ) nb_speed = np.sqrt(nb_u**2 + nb_v**2) nb_u_mean = np.nanmean(nb_u) nb_v_mean = np.nanmean(nb_v) @@ -1198,31 +1421,53 @@ class QComp(object): # Compute corrected cross product xprod = QComp.cross_product(transect=trans_data) - xprod_in = QComp.cross_product(w_vel_x=u_adj, - w_vel_y=v_adj, - b_vel_x=bt_u_adj, - b_vel_y=bt_v_adj, - start_edge=trans_data.start_edge) + xprod_in = QComp.cross_product( + w_vel_x=u_adj, + w_vel_y=v_adj, + b_vel_x=bt_u_adj, + b_vel_y=bt_v_adj, + start_edge=trans_data.start_edge, + ) xprod[:, in_transect_idx] = xprod_in # Compute corrected discharges - q_middle_cells = QComp.discharge_middle_cells(xprod=xprod, transect=trans_data, delta_t=delta_t) + q_middle_cells = QComp.discharge_middle_cells( + xprod=xprod, transect=trans_data, delta_t=delta_t + ) trans_select = getattr(trans_data.depths, trans_data.depths.selected) - num_top_method = {'Power': 0, 'Constant': 1, '3-Point': 2, None: -1} - q_top = extrapolate_top(xprod, trans_data.w_vel.valid_data[0, :, :], - num_top_method[trans_data.extrap.top_method], - trans_data.extrap.exponent, trans_data.in_transect_idx, - trans_select.depth_cell_size_m, - trans_select.depth_cell_depth_m, trans_select.depth_processed_m, delta_t, - -1, 0.1667) - num_bot_method = {'Power': 0, 'No Slip': 1, None: -1} - q_bot = extrapolate_bot(xprod, trans_data.w_vel.valid_data[0, :, :], - num_bot_method[trans_data.extrap.bot_method], - trans_data.extrap.exponent, trans_data.in_transect_idx, - trans_select.depth_cell_size_m, - trans_select.depth_cell_depth_m, trans_select.depth_processed_m, delta_t, - -1, 0.1667) - q_adj = np.nansum(np.nansum(q_middle_cells)) + np.nansum(q_top) + np.nansum(q_bot) + num_top_method = {"Power": 0, "Constant": 1, "3-Point": 2, None: -1} + q_top = extrapolate_top( + xprod, + trans_data.w_vel.valid_data[0, :, :], + num_top_method[trans_data.extrap.top_method], + trans_data.extrap.exponent, + trans_data.in_transect_idx, + trans_select.depth_cell_size_m, + trans_select.depth_cell_depth_m, + trans_select.depth_processed_m, + delta_t, + -1, + 0.1667, + ) + num_bot_method = {"Power": 0, "No Slip": 1, None: -1} + q_bot = extrapolate_bot( + xprod, + trans_data.w_vel.valid_data[0, :, :], + num_bot_method[trans_data.extrap.bot_method], + trans_data.extrap.exponent, + trans_data.in_transect_idx, + trans_select.depth_cell_size_m, + trans_select.depth_cell_depth_m, + trans_select.depth_processed_m, + delta_t, + -1, + 0.1667, + ) + q_adj = ( + np.nansum(np.nansum(q_middle_cells)) + + np.nansum(q_top) + + np.nansum(q_bot) + ) # Compute correction factor correction_factor = q_adj / q_orig @@ -1230,10 +1475,13 @@ class QComp(object): correction_factor = 1.0 return correction_factor - + @staticmethod - def stationary_correction_factor(top_q, middle_q, bottom_q, trans_data, mb_data, delta_t): - """Computes the discharge correction factor from stationary moving-bed tests. + def stationary_correction_factor( + top_q, middle_q, bottom_q, trans_data, mb_data, delta_t + ): + """Computes the discharge correction factor from stationary + moving-bed tests. Parameters ---------- @@ -1253,23 +1501,26 @@ class QComp(object): Returns ------- correction_factor: float - Correction factor to be applied to the discharge to correct for moving-bed effects + Correction factor to be applied to the discharge to correct for + moving-bed effects """ - + n_mb_tests = len(mb_data) n_sta_tests = 0 mb_speed = np.array([0]) near_bed_speed = np.array([0]) for n in range(n_mb_tests): - if (mb_data[n].type == 'Stationary') and mb_data[n].use_2_correct: + if (mb_data[n].type == "Stationary") and mb_data[n].use_2_correct: n_sta_tests += 1 mb_speed = np.append(mb_speed, mb_data[n].mb_spd_mps) - near_bed_speed = np.append(near_bed_speed, mb_data[n].near_bed_speed_mps) + near_bed_speed = np.append( + near_bed_speed, mb_data[n].near_bed_speed_mps + ) if n_sta_tests > 0: - # Compute linear regression coefficient forcing through zero to relate - # near-bed velocity to moving-bed velocity + # Compute linear regression coefficient forcing through zero to + # relate near-bed velocity to moving-bed velocity x = np.vstack(near_bed_speed) corr_coef = np.linalg.lstsq(x, mb_speed, rcond=None)[0] @@ -1285,7 +1536,9 @@ class QComp(object): bt_v = trans_data.boat_vel.bt_vel.v_processed_mps[in_transect_idx] # Compute near-bed velocities - nb_u, nb_v, unit_nb_u, unit_nb_v = QComp.near_bed_velocity(u, v, depth, depth_cell_depth) + nb_u, nb_v, unit_nb_u, unit_nb_v = QComp.near_bed_velocity( + u, v, depth, depth_cell_depth + ) # Compute moving-bed vector for each ensemble mb_u = corr_coef * nb_u @@ -1300,36 +1553,55 @@ class QComp(object): # Compute uncorrected discharge excluding the edges q_orig = top_q + middle_q + bottom_q if q_orig != 0: - # Compute corrected discharge excluding edges # Compute corrected cross product xprod = QComp.cross_product(transect=trans_data) - xprod_in = QComp.cross_product(w_vel_x=u_adj, - w_vel_y=v_adj, - b_vel_x=bt_u_adj, - b_vel_y=bt_v_adj, - start_edge=trans_data.start_edge) + xprod_in = QComp.cross_product( + w_vel_x=u_adj, + w_vel_y=v_adj, + b_vel_x=bt_u_adj, + b_vel_y=bt_v_adj, + start_edge=trans_data.start_edge, + ) xprod[:, in_transect_idx] = xprod_in # Compute corrected discharges - q_middle_cells = QComp.discharge_middle_cells(xprod=xprod, transect=trans_data, delta_t=delta_t) + q_middle_cells = QComp.discharge_middle_cells( + xprod=xprod, transect=trans_data, delta_t=delta_t + ) trans_select = getattr(trans_data.depths, trans_data.depths.selected) - num_top_method = {'Power': 0, 'Constant': 1, '3-Point': 2, None: -1} - q_top = extrapolate_top(xprod, - trans_data.w_vel.valid_data[0, :, :], - num_top_method[trans_data.extrap.top_method], - trans_data.extrap.exponent, trans_data.in_transect_idx, - trans_select.depth_cell_size_m, - trans_select.depth_cell_depth_m, trans_select.depth_processed_m, delta_t, - -1, 0.1667) - num_bot_method = {'Power': 0, 'No Slip': 1, None: -1} - q_bot = extrapolate_bot(xprod, - trans_data.w_vel.valid_data[0, :, :], - num_bot_method[trans_data.extrap.bot_method], - trans_data.extrap.exponent, trans_data.in_transect_idx, - trans_select.depth_cell_size_m, - trans_select.depth_cell_depth_m, trans_select.depth_processed_m, delta_t, - -1, 0.1667) - q_adj = np.nansum(np.nansum(q_middle_cells)) + np.nansum(q_top) + np.nansum(q_bot) + num_top_method = {"Power": 0, "Constant": 1, "3-Point": 2, None: -1} + q_top = extrapolate_top( + xprod, + trans_data.w_vel.valid_data[0, :, :], + num_top_method[trans_data.extrap.top_method], + trans_data.extrap.exponent, + trans_data.in_transect_idx, + trans_select.depth_cell_size_m, + trans_select.depth_cell_depth_m, + trans_select.depth_processed_m, + delta_t, + -1, + 0.1667, + ) + num_bot_method = {"Power": 0, "No Slip": 1, None: -1} + q_bot = extrapolate_bot( + xprod, + trans_data.w_vel.valid_data[0, :, :], + num_bot_method[trans_data.extrap.bot_method], + trans_data.extrap.exponent, + trans_data.in_transect_idx, + trans_select.depth_cell_size_m, + trans_select.depth_cell_depth_m, + trans_select.depth_processed_m, + delta_t, + -1, + 0.1667, + ) + q_adj = ( + np.nansum(np.nansum(q_middle_cells)) + + np.nansum(q_top) + + np.nansum(q_bot) + ) # Compute correction factor correction_factor = q_adj / q_orig @@ -1387,8 +1659,12 @@ class QComp(object): z_depth[n] = depth[n] - np.nanmean(bin_depth[idx, n], 0) u_mean[n] = np.nanmean(u[idx, n], 0) v_mean[n] = np.nanmean(v[idx, n], 0) - nb_u[n] = (u_mean[n] / z_depth[n] ** (1. / 6.)) * (z_near_bed[n] ** (1. / 6.)) - nb_v[n] = (v_mean[n] / z_depth[n] ** (1. / 6.)) * (z_near_bed[n] ** (1. / 6.)) + nb_u[n] = (u_mean[n] / z_depth[n] ** (1.0 / 6.0)) * ( + z_near_bed[n] ** (1.0 / 6.0) + ) + nb_v[n] = (v_mean[n] / z_depth[n] ** (1.0 / 6.0)) * ( + z_near_bed[n] ** (1.0 / 6.0) + ) speed_near_bed[n] = np.sqrt(nb_u[n] ** 2 + nb_v[n] ** 2) unit_nbu[n] = nb_u[n] / speed_near_bed[n] unit_nbv[n] = nb_v[n] / speed_near_bed[n] @@ -1397,16 +1673,17 @@ class QComp(object): @staticmethod def valid_edge_ens(trans_data): - """Determines which ensembles contain sufficient valid data to allow computation of discharge. - + """Determines which ensembles contain sufficient valid data to allow + computation of discharge. + Allows interpolated depth and boat velocity but requires valid non-interpolated water velocity. - + Parameters ---------- trans_data: TransectData Object of TransectData - + Returns ------- validEns: np.array(bool) @@ -1420,16 +1697,20 @@ class QComp(object): boat_vel_selected = getattr(trans_data.boat_vel, trans_data.boat_vel.selected) - # Depending on type of interpolation determine the valid navigation ensembles + # Depending on type of interpolation determine the valid navigation + # ensembles if boat_vel_selected is not None and len(boat_vel_selected.u_processed_mps) > 0: - if boat_vel_selected.interpolate == 'TRDI': + if boat_vel_selected.interpolate == "TRDI": nav_valid = boat_vel_selected.valid_data[0, in_transect_idx] else: - nav_valid = np.logical_not(np.isnan(boat_vel_selected.u_processed_mps[in_transect_idx])) + nav_valid = np.logical_not( + np.isnan(boat_vel_selected.u_processed_mps[in_transect_idx]) + ) else: nav_valid = np.tile(False, len(in_transect_idx)) - # Depending on type of interpolation determine the valid water track ensembles + # Depending on type of interpolation determine the valid water track + # ensembles if len(in_transect_idx) > 1: water_valid = np.any(trans_data.w_vel.valid_data[0, :, in_transect_idx], 1) else: @@ -1438,9 +1719,12 @@ class QComp(object): # Determine the ensembles with valid depth depths_select = getattr(trans_data.depths, trans_data.depths.selected) if depths_select is not None: - depth_valid = np.logical_not(np.isnan(depths_select.depth_processed_m[in_transect_idx])) + depth_valid = np.logical_not( + np.isnan(depths_select.depth_processed_m[in_transect_idx]) + ) - # Determine the ensembles with valid depth, navigation, and water data + # Determine the ensembles with valid depth, navigation, + # and water data valid_ens = np.all(np.vstack((nav_valid, water_valid, depth_valid)), 0) else: valid_ens = [] @@ -1449,7 +1733,8 @@ class QComp(object): @staticmethod def discharge_interpolated(q_top_ens, q_mid_cells, q_bot_ens, transect): - """Determines the amount of discharge in interpolated cells and ensembles. + """Determines the amount of discharge in interpolated cells and + ensembles. Parameters ---------- @@ -1477,8 +1762,9 @@ class QComp(object): # Method to compute invalid ensemble discharge depends on if # navigation data are interpolated (QRev) or if expanded delta # time is used to compute discharge for invalid ensembles(TRDI) - if transect.boat_vel.bt_vel.interpolate == 'None': - # Compute discharge in invalid ensembles for expanded delta time situation + if transect.boat_vel.bt_vel.interpolate == "None": + # Compute discharge in invalid ensembles for expanded delta time + # situation # Find index of invalid ensembles followed by a valid ensemble idx_next_valid = np.where(np.diff(np.hstack((-2, valid_ens))) == 1)[0] if len(idx_next_valid) == 0: @@ -1488,30 +1774,42 @@ class QComp(object): idx_next_valid += 1 # Sum discharge in valid ensembles following invalid ensemble - q_int_ens = np.nansum(q_mid_cells[:, idx_next_valid]) \ - + q_bot_ens[idx_next_valid] + q_top_ens[idx_next_valid] - - # Determine number of invalid ensembles preceding valid ensemble + q_int_ens = ( + np.nansum(q_mid_cells[:, idx_next_valid]) + + q_bot_ens[idx_next_valid] + + q_top_ens[idx_next_valid] + ) + + # Determine number of invalid ensembles preceding valid + # ensemble run_length_false, _ = QComp.compute_run_length(valid_ens) - # Adjust run_length_false for situation where the transect ends with invalid ensembles + # Adjust run_length_false for situation where the transect + # ends with invalid ensembles if len(run_length_false) > len(q_int_ens): run_length_false = run_length_false[:-1] - # Adjust discharge to remove the discharge that would have been measured in the valid ensemble - q_int_ens = np.nansum(q_int_ens * (run_length_false / (run_length_false+1))) + # Adjust discharge to remove the discharge that would have + # been measured in the valid ensemble + q_int_ens = np.nansum( + q_int_ens * (run_length_false / (run_length_false + 1)) + ) else: - # Compute discharge in invalid ensembles where all data were interpolated - q_int_ens = np.nansum(np.nansum(q_mid_cells[:, np.logical_not(valid_ens)])) \ - + np.nansum(q_top_ens[np.logical_not(valid_ens)]) \ - + np.nansum(q_bot_ens[np.logical_not(valid_ens)]) + # Compute discharge in invalid ensembles where all data were + # interpolated + q_int_ens = ( + np.nansum(np.nansum(q_mid_cells[:, np.logical_not(valid_ens)])) + + np.nansum(q_top_ens[np.logical_not(valid_ens)]) + + np.nansum(q_bot_ens[np.logical_not(valid_ens)]) + ) return q_int_cells, q_int_ens @staticmethod def compute_run_length(bool_vector): - """Compute how many false or true consecutive values are in every run of true or false in the + """Compute how many false or true consecutive values are in every + run of true or false in the provided boolean vector. Parameters @@ -1539,32 +1837,286 @@ class QComp(object): else: true_start = 1 false_start = 0 - run_length_false = run_length[bool_vector[false_start]::2] - run_length_true = run_length[bool_vector[true_start]::2] + run_length_false = run_length[bool_vector[false_start] :: 2] + run_length_true = run_length[bool_vector[true_start] :: 2] return run_length_false, run_length_true - # ============================================================================================ + def compute_topbot_speed(self, transect): + """Compute top and bottom extrapolated speed. + + Parameters + ---------- + transect: TransectData + """ + + delta_t = np.tile(1.0, transect.w_vel.u_processed_mps.shape[1]) + + # Compute extrapolated cell size and depth + n_ensembles = transect.w_vel.u_processed_mps.shape[1] + depth_selected = getattr(transect.depths, transect.depths.selected) + top_cell_size = np.repeat(np.nan, n_ensembles) + top_cell_depth = np.repeat(np.nan, n_ensembles) + bottom_cell_size = np.repeat(np.nan, n_ensembles) + bottom_cell_depth = np.repeat(np.nan, n_ensembles) + for n in range(n_ensembles): + # Identify topmost 1 and 3 valid cells + idx_temp = np.where( + np.logical_not(np.isnan(transect.w_vel.u_processed_mps[:, n])) + )[0] + if len(idx_temp) > 0: + # Compute top + top_cell_size[n] = ( + depth_selected.depth_cell_depth_m[idx_temp[0], n] + - 0.5 * depth_selected.depth_cell_size_m[idx_temp[0], n] + ) + top_cell_depth[n] = top_cell_size[n] / 2 + # Compute bottom + bottom_cell_size[n] = depth_selected.depth_processed_m[n] - ( + depth_selected.depth_cell_depth_m[idx_temp[-1], n] + + 0.5 * depth_selected.depth_cell_size_m[idx_temp[-1], n] + ) + bottom_cell_depth[n] = ( + depth_selected.depth_processed_m[n] - 0.5 * bottom_cell_size[n] + ) + else: + top_cell_size[n] = 0 + top_cell_depth[n] = 0 + bottom_cell_size[n] = 0 + bottom_cell_depth[n] = 0 + # Compute top speed + u = ( + self.compute_top_component( + transect=transect, + component=transect.w_vel.u_processed_mps, + delta_t=delta_t, + ) + / top_cell_size + ) + v = ( + self.compute_top_component( + transect=transect, + component=transect.w_vel.v_processed_mps, + delta_t=delta_t, + ) + / top_cell_size + ) + self.top_speed = np.sqrt(u**2 + v**2) + + # Compute bottom speed + u = ( + self.compute_bottom_component( + transect=transect, + component=transect.w_vel.u_processed_mps, + delta_t=delta_t, + ) + / bottom_cell_size + ) + v = ( + self.compute_bottom_component( + transect=transect, + component=transect.w_vel.v_processed_mps, + delta_t=delta_t, + ) + / bottom_cell_size + ) + self.bottom_speed = np.sqrt(u**2 + v**2) + + @staticmethod + def compute_top_component(transect, component, delta_t): + """Compute the extrapolated top value for the specified component. + + Parameters + ---------- + transect: TransectData + Object of TransectData + component: np.array(float) + Component to be extrapolated + delta_t: np.array(float) + Duration of each ensemble computed from QComp + + Returns + ------- + top_component: np.array(float) + Top extrapolated values + """ + + depth_selected = getattr(transect.depths, transect.depths.selected) + num_top_method = {"Power": 0, "Constant": 1, "3-Point": 2, None: -1} + try: + # Top extrapolated speed + top_component = extrapolate_top( + component, + transect.w_vel.valid_data[0, :, :], + num_top_method[transect.extrap.top_method], + transect.extrap.exponent, + transect.in_transect_idx, + depth_selected.depth_cell_size_m, + depth_selected.depth_cell_depth_m, + depth_selected.depth_processed_m, + delta_t, + -1, + 0.1667, + ) + except SystemError: + top_component = QComp.extrapolate_top( + xprod=component, + w_valid_data=transect.w_vel.valid_data[0, :, :], + transect_top_method=num_top_method[transect.extrap.top_method], + transect_exponent=transect.extrap.exponent, + in_transect_idx=transect.in_transect_idx, + depth_cell_size_m=depth_selected.depth_cell_size_m, + depth_cell_depth_m=depth_selected.depth_cell_depth_m, + depth_processed_m=depth_selected.depth_processed_m, + delta_t=delta_t, + ) + return top_component + + @staticmethod + def compute_bottom_component(transect, component, delta_t): + """Compute the extrapolated bottom value for the specified component. + + Parameters + ---------- + transect: TransectData + Object of TransectData + component: np.array(float) + Component to be extrapolated + delta_t: np.array(float) + Duration of each ensemble computed from QComp + + Returns + ------- + bottom_component: np.array(float) + Bottom extrapolated values + """ + + depth_selected = getattr(transect.depths, transect.depths.selected) + num_bot_method = {"Power": 0, "No Slip": 1, None: -1} + try: + bottom_component = extrapolate_bot( + component, + transect.w_vel.valid_data[0, :, :], + num_bot_method[transect.extrap.bot_method], + transect.extrap.exponent, + transect.in_transect_idx, + depth_selected.depth_cell_size_m, + depth_selected.depth_cell_depth_m, + depth_selected.depth_processed_m, + delta_t, + -1, + 0.1667, + ) + except SystemError: + bottom_component = QComp.extrapolate_bot( + xprod=component, + w_valid_data=transect.w_vel.valid_data[0, :, :], + transect_bot_method=num_bot_method[transect.extrap.bot_method], + transect_exponent=transect.extrap.exponent, + in_transect_idx=transect.in_transect_idx, + depth_cell_size_m=depth_selected.depth_cell_size_m, + depth_cell_depth_m=depth_selected.depth_cell_depth_m, + depth_processed_m=depth_selected.depth_processed_m, + delta_t=delta_t, + ) + return bottom_component + + def compute_edge_speed(self, transect): + + # Left edge + + # Determine what ensembles to use for edge computation. + # The method of determining varies by manufacturer + edge_idx = QComp.edge_ensembles("left", transect) + + # Average depth for the edge ensembles + depth_selected = getattr(transect.depths, transect.depths.selected) + depth = depth_selected.depth_processed_m[edge_idx] + depth_avg = np.nanmean(depth) + + # Compute area + if transect.edges.left.type == "Triangular": + a = 0.5 * transect.edges.left.distance_m * depth_avg + elif transect.edges.left.type == "Rectangular": + a = transect.edges.left.distance_m * depth_avg + else: + cd = compute_edge_cd(transect.edges.left) + a = ( + transect.edges.left.distance_m * depth_avg * cd + + 0.5 * transect.edges.left.distance_m * depth_avg * (1 - cd) + ) + self.left_edge_speed = np.abs(self.left / a) + + # Right edge + + # Determine what ensembles to use for edge computation. + # The method of determining varies by manufacturer + edge_idx = QComp.edge_ensembles("right", transect) + + # Average depth for the edge ensembles + depth_selected = getattr(transect.depths, transect.depths.selected) + depth = depth_selected.depth_processed_m[edge_idx] + depth_avg = np.nanmean(depth) + + # Compute area + if transect.edges.right.type == "Triangular": + a = 0.5 * transect.edges.right.distance_m * depth_avg + elif transect.edges.right.type == "Rectangular": + a = transect.edges.right.distance_m * depth_avg + else: + cd = compute_edge_cd(transect.edges.right) + a = ( + transect.edges.right.distance_m * depth_avg * cd + + 0.5 * transect.edges.right.distance_m * depth_avg * (1 - cd) + ) + self.right_edge_speed = np.abs(self.right / a) + + # ======================================================================== # The methods below are not being used in the discharge computations. - # The methods for extrapolating the top and bottom discharge have been moved to separate files - # and compiled using Numba AOT. The methods below are included here for historical purposes - # and may provide an easier approach to adding new features/algorithms prior to recoding - # them in a manner that can be compiled using Numba AOT. - # ============================================================================================= + # The methods for extrapolating the top and bottom discharge have been + # moved to separate files and compiled using Numba AOT. The methods below + # are included here forhistorical purposes and may provide an easier approach + # to adding new features/algorithms prior to recoding them in a manner that + # can be compiled using Numba AOT. + # ======================================================================== @staticmethod - def extrapolate_top(xprod, transect, delta_t, top_method=None, exponent=None): + def extrapolate_top( + xprod, + w_valid_data, + transect_top_method, + transect_exponent, + in_transect_idx, + depth_cell_size_m, + depth_cell_depth_m, + depth_processed_m, + delta_t, + top_method=-1, + exponent=0.1667, + ): """Computes the extrapolated top discharge. Parameters ---------- xprod: np.array(float) Cross product computed from the cross product method - transect: TransectData - Object of TransectData + w_valid_data: np.array(bool) + Valid water data + transect_top_method: int + Stored top method (power = 0, constant = 1, 3-point = 2) + transect_exponent: float + Exponent for power fit + in_transect_idx: np.array(int) + Indices of ensembles in transect to be used for discharge + depth_cell_size_m: np.array(float) + Size of each depth cell in m + depth_cell_depth_m: np.array(float) + Depth of each depth cell in m + depth_processed_m: np.array(float) + Depth for each ensemble in m delta_t: np.array(float) Duration of each ensemble computed from QComp - top_method: str + top_method: int Specifies method to use for top extrapolation exponent: float Exponent to use for power extrapolation @@ -1575,62 +2127,84 @@ class QComp(object): Top extrapolated discharge for each ensemble """ - if top_method is None: - top_method = transect.extrap.top_method - exponent = transect.extrap.exponent - - # Get index for ensembles in moving-boat portion of transect - in_transect_idx = transect.in_transect_idx + if top_method == -1: + top_method = transect_top_method + exponent = transect_exponent # Compute top variables - idx_top, idx_top3, top_rng = QComp.top_variables(xprod, transect) + idx_top, idx_top3, top_rng = QComp.top_variables( + xprod, w_valid_data, depth_cell_size_m, depth_cell_depth_m + ) idx_top = idx_top[in_transect_idx] idx_top3 = idx_top3[:, in_transect_idx] top_rng = top_rng[in_transect_idx] # Get data from transect object - trans_select = getattr(transect.depths, transect.depths.selected) - cell_size = trans_select.depth_cell_size_m[:, in_transect_idx] - cell_depth = trans_select.depth_cell_depth_m[:, in_transect_idx] - depth_ens = trans_select.depth_processed_m[in_transect_idx] + cell_size = depth_cell_size_m[:, in_transect_idx] + cell_depth = depth_cell_depth_m[:, in_transect_idx] + depth_ens = depth_processed_m[in_transect_idx] # Compute z z = np.subtract(depth_ens, cell_depth) # Use only valid data valid_data = np.logical_not(np.isnan(xprod[:, in_transect_idx])) - z[np.logical_not(valid_data)] = np.nan - cell_size[np.logical_not(valid_data)] = np.nan - cell_depth[np.logical_not(valid_data)] = np.nan + for row in range(valid_data.shape[0]): + for col in range(valid_data.shape[1]): + if not valid_data[row, col]: + z[row, col] = np.nan + cell_size[row, col] = np.nan + cell_depth[row, col] = np.nan # Compute top discharge - q_top = QComp.discharge_top(top_method, exponent, idx_top, idx_top3, top_rng, - xprod[:, in_transect_idx], cell_size, cell_depth, - depth_ens, delta_t, z) + q_top = QComp.discharge_top( + top_method, + exponent, + idx_top, + idx_top3, + top_rng, + xprod[:, in_transect_idx], + cell_size, + cell_depth, + depth_ens, + delta_t, + z, + ) return q_top @staticmethod - def discharge_top(top_method, exponent, idx_top, idx_top_3, top_rng, component, cell_size, cell_depth, - depth_ens, delta_t, z): + def discharge_top( + top_method, + exponent, + idx_top, + idx_top_3, + top_rng, + component, + cell_size, + cell_depth, + depth_ens, + delta_t, + z, + ): """Computes the top extrapolated value of the provided component. Parameters ---------- - top_method: str - Top extrapolation method (Power, Constant, 3-Point) + top_method: int + Top extrapolation method (Power = 0, Constant = 1, 3-Point = 2) exponent: float Exponent for the power extrapolation method - idx_top: + idx_top: np.array(int) Index to the topmost valid depth cell in each ensemble - idx_top_3: + idx_top_3: np.array(int) Index to the top 3 valid depth cells in each ensemble top_rng: np.array(float) Range from the water surface to the top of the topmost cell component: np.array(float) The variable to be extrapolated (xprod, u-velocity, v-velocity) cell_size: np.array(float) - Array of cellsizes (n cells x n ensembles) + Array of cell sizes (n cells x n ensembles) cell_depth: np.array(float) Depth of each cell (n cells x n ensembles) depth_ens: np.array(float) @@ -1638,65 +2212,121 @@ class QComp(object): delta_t: np.array(float) Duration of each ensemble compute by QComp z: np.array(float) - Relative depth from the bottom of each depth cell computed in discharge top method + Relative depth from the bottom of each depth cell computed in + discharge top method Returns ------- - top_value: total for the specified component integrated over the top range + top_value: np.array(float) + total for the specified component integrated over the top range """ # Initialize return - top_value = 0 + top_value = np.array([0.0]) # Top power extrapolation - if top_method == 'Power': - numerator = ((exponent + 1) * np.nansum(component * cell_size, 0)) - denominator = np.nansum(((z + 0.5 * cell_size)**(exponent+1)) - ((z - 0.5 * cell_size)**(exponent+1)), 0) - coef = np.divide(numerator, denominator, where=denominator != 0) - coef[denominator == 0] = np.nan - top_value = delta_t * (coef / (exponent + 1)) * \ - (depth_ens**(exponent + 1) - (depth_ens-top_rng)**(exponent + 1)) + if top_method == 0: + coef = np.repeat(np.nan, int(component.shape[1])) + + # Compute the coefficient for each ensemble + # Loops are used for Numba compile purposes + + # Loop through ensembles + for col in range(component.shape[1]): + # Initialize variables + numerator = 0.0 + numerator_valid = False + denominator_valid = False + denominator = 0.0 + + # Loop through depth cells in an ensemble + for row in range(component.shape[0]): + + # Compute the numerator + numerator_temp = component[row, col] * cell_size[row, col] + if np.logical_not(np.isnan(numerator_temp)): + numerator_valid = True + numerator = numerator + numerator_temp + + # Compute the denominator + denominator_temp = ( + (z[row, col] + 0.5 * cell_size[row, col]) ** (exponent + 1) + ) - ((z[row, col] - 0.5 * cell_size[row, col]) ** (exponent + 1)) + if ( + np.logical_not(np.isnan(denominator_temp)) + and denominator_temp != 0 + ): + denominator_valid = True + denominator = denominator + denominator_temp + + # If both numerator and denominator are valid compute the coefficient + if numerator_valid and denominator_valid: + coef[col] = (numerator * (1 + exponent)) / denominator + + # Compute the top discharge for each ensemble + top_value = ( + delta_t + * (coef / (exponent + 1)) + * ( + depth_ens ** (exponent + 1) + - (depth_ens - top_rng) ** (exponent + 1) + ) + ) # Top constant extrapolation - elif top_method == 'Constant': + elif top_method == 1: n_ensembles = len(delta_t) - top_value = np.tile([np.nan], n_ensembles) + top_value = np.repeat(np.nan, n_ensembles) for j in range(n_ensembles): if idx_top[j] >= 0: top_value[j] = delta_t[j] * component[idx_top[j], j] * top_rng[j] # Top 3-point extrapolation - elif top_method == '3-Point': + elif top_method == 2: # Determine number of bins available in each profile valid_data = np.logical_not(np.isnan(component)) - n_bins = np.nansum(valid_data, 0) + n_bins = np.sum(valid_data, axis=0) # Determine number of ensembles n_ensembles = len(delta_t) # Preallocate qtop vector - top_value = np.tile([np.nan], n_ensembles) + top_value = np.repeat(np.nan, n_ensembles) + # Loop through ensembles for j in range(n_ensembles): + # Set default to constant if (n_bins[j] < 6) and (n_bins[j] > 0) and (idx_top[j] >= 0): top_value[j] = delta_t[j] * component[idx_top[j], j] * top_rng[j] # If 6 or more bins use 3-pt at top if n_bins[j] > 5: - sumd = np.nansum(cell_depth[idx_top_3[0:3, j], j]) - sumd2 = np.nansum(cell_depth[idx_top_3[0:3, j], j]**2) - sumq = np.nansum(component[idx_top_3[0:3, j], j]) - sumqd = np.nansum(component[idx_top_3[0:3, j], j] * cell_depth[idx_top_3[0:3, j], j]) + sumd = 0.0 + sumd2 = 0.0 + sumq = 0.0 + sumqd = 0.0 + + # Use loop to sum data from top 3 cells + for k in range(3): + if not np.isnan(cell_depth[idx_top_3[k, j], j]): + sumd = sumd + cell_depth[idx_top_3[k, j], j] + sumd2 = sumd2 + cell_depth[idx_top_3[k, j], j] ** 2 + sumq = sumq + component[idx_top_3[k, j], j] + sumqd = sumqd + ( + component[idx_top_3[k, j], j] + * cell_depth[idx_top_3[k, j], j] + ) delta = 3 * sumd2 - sumd**2 a = (3 * sumqd - sumq * sumd) / delta b = (sumq * sumd2 - sumqd * sumd) / delta + # Compute discharge for 3-pt fit - qo = (a * top_rng[j]**2) / 2 + b * top_rng[j] + qo = (a * top_rng[j] ** 2) / 2 + b * top_rng[j] top_value[j] = delta_t[j] * qo return top_value @staticmethod - def top_variables(xprod, transect): + def top_variables(xprod, w_valid_data, depth_cell_size_m, depth_cell_depth_m): """Computes the index to the top and top three valid cells in each ensemble and the range from the water surface to the top of the topmost cell. @@ -1704,32 +2334,35 @@ class QComp(object): ---------- xprod: np.array(float) Cross product computed from the cross product method - transect: TransectData - Object of TransectData + w_valid_data: np.array(bool) + Valid water data + depth_cell_size_m: np.array(float) + Size of each depth cell in m + depth_cell_depth_m: np.array(float) + Depth of each depth cell in m Returns ------- - idx_top: np.array + idx_top: np.array(int) Index to the topmost valid depth cell in each ensemble - idx_top_3: np.array + idx_top_3: np.array(int) Index to the top 3 valid depth cell in each ensemble top_rng: np.array(float) Range from the water surface to the top of the topmost cell """ # Get data from transect object - valid_data1 = np.copy(transect.w_vel.valid_data[0, :, :]) + valid_data1 = np.copy(w_valid_data) valid_data2 = np.logical_not(np.isnan(xprod)) - valid_data = valid_data1 * valid_data2 - trans_select = getattr(transect.depths, transect.depths.selected) - cell_size = trans_select.depth_cell_size_m - cell_depth = trans_select.depth_cell_depth_m + valid_data = np.logical_and(valid_data1, valid_data2) # Preallocate variables - n_ensembles = valid_data.shape[1] - idx_top = np.tile(-1, valid_data.shape[1]).astype(int) - idx_top_3 = np.tile(-1, (3, valid_data.shape[1])).astype(int) - top_rng = np.tile([np.nan], n_ensembles) + # NOTE: Numba does not support np.tile + n_ensembles = int(valid_data.shape[1]) + idx_top = np.repeat(-1, int(valid_data.shape[1])) + idx_top_3 = np.ones((3, int(valid_data.shape[1])), dtype=np.int32) + idx_top_3[:] = int(-1) + top_rng = np.repeat(np.nan, n_ensembles) # Loop through ensembles for n in range(n_ensembles): @@ -1738,9 +2371,13 @@ class QComp(object): if len(idx_temp) > 0: idx_top[n] = idx_temp[0] if len(idx_temp) > 2: - idx_top_3[:, n] = idx_temp[0:3] + for k in range(3): + idx_top_3[k, n] = idx_temp[k] # Compute top range - top_rng[n] = cell_depth[idx_top[n], n] - 0.5 * cell_size[idx_top[n], n] + top_rng[n] = ( + depth_cell_depth_m[idx_top[n], n] + - 0.5 * depth_cell_size_m[idx_top[n], n] + ) else: top_rng[n] = 0 idx_top[n] = 0 @@ -1748,21 +2385,45 @@ class QComp(object): return idx_top, idx_top_3, top_rng @staticmethod - def extrapolate_bot(xprod, transect, delta_t, bot_method=None, exponent=None): + def extrapolate_bot( + xprod, + w_valid_data, + transect_bot_method, + transect_exponent, + in_transect_idx, + depth_cell_size_m, + depth_cell_depth_m, + depth_processed_m, + delta_t, + bot_method=-1, + exponent=0.1667, + ): """Computes the extrapolated bottom discharge Parameters ---------- xprod: np.array(float) - Cross product of the water and boat velocities - transect: TransectData - Object of TransectData + Cross product computed from the cross product method + w_valid_data: np.array(bool) + Valid water data + transect_bot_method: int + Stored bottom method (power = 0, no slip = 1) + transect_exponent: float + Exponent for power fit + in_transect_idx: np.array(int) + Indices of ensembles in transect to be used for discharge + depth_cell_size_m: np.array(float) + Size of each depth cell in m + depth_cell_depth_m: np.array(float) + Depth of each depth cell in m + depth_processed_m: np.array(float) + Depth for each ensemble in m delta_t: np.array(float) - Duration of each ensemble - bot_method: str - Bottom extrapolation method + Duration of each ensemble computed from QComp + bot_method: int + Specifies method to use for top extrapolation exponent: float - Bottom extrapolation exponent + Exponent to use for power extrapolation Returns ------- @@ -1771,49 +2432,70 @@ class QComp(object): """ # Determine extrapolation methods and exponent - if bot_method is None: - bot_method = transect.extrap.bot_method - exponent = transect.extrap.exponent + if bot_method == -1: + bot_method = transect_bot_method + exponent = transect_exponent - # Get index for ensembles in moving-boat portion of transect - in_transect_idx = transect.in_transect_idx + # Use only data in transect + w_valid_data = w_valid_data[:, in_transect_idx] xprod = xprod[:, in_transect_idx] + cell_size = depth_cell_size_m[:, in_transect_idx] + cell_depth = depth_cell_depth_m[:, in_transect_idx] + depth_ens = depth_processed_m[in_transect_idx] + delta_t = delta_t[in_transect_idx] # Compute bottom variables - idx_bot, bot_rng = QComp.bot_variables(xprod, transect) - - # Get data from transect properties - trans_select = getattr(transect.depths, transect.depths.selected) - cell_size = trans_select.depth_cell_size_m[:, in_transect_idx] - cell_depth = trans_select.depth_cell_depth_m[:, in_transect_idx] - depth_ens = trans_select.depth_processed_m[in_transect_idx] + bot_rng = QComp.bot_variables( + xprod, w_valid_data, cell_size, cell_depth, depth_ens + ) # Compute z z = np.subtract(depth_ens, cell_depth) + + # Use only valid data valid_data = np.logical_not(np.isnan(xprod)) - z[np.logical_not(valid_data)] = np.nan - z[nan_less(z, 0)] = np.nan - cell_size[np.logical_not(valid_data)] = np.nan - cell_depth[np.logical_not(valid_data)] = np.nan + for row in range(valid_data.shape[0]): + for col in range(valid_data.shape[1]): + if not valid_data[row, col]: + z[row, col] = np.nan + cell_size[row, col] = np.nan + cell_depth[row, col] = np.nan + # Compute bottom discharge - q_bot = QComp.discharge_bot(bot_method, exponent, idx_bot, bot_rng, xprod, - cell_size, cell_depth, depth_ens, delta_t, z) + q_bot = QComp.discharge_bot( + bot_method, + exponent, + bot_rng, + xprod, + cell_size, + cell_depth, + depth_ens, + delta_t, + z, + ) return q_bot @staticmethod - def discharge_bot(bot_method, exponent, idx_bot, bot_rng, component, - cell_size, cell_depth, depth_ens, delta_t, z): + def discharge_bot( + bot_method, + exponent, + bot_rng, + component, + cell_size, + cell_depth, + depth_ens, + delta_t, + z, + ): """Computes the bottom extrapolated value of the provided component. Parameters ---------- - bot_method: str + bot_method: int Bottom extrapolation method (Power, No Slip) exponent: float Exponent for power and no slip - idx_bot: - Index to the bottom most valid depth cell in each ensemble bot_rng: np.array(float) Range from the streambed to the bottom of the bottom most cell component: np.array(float) @@ -1832,94 +2514,183 @@ class QComp(object): Returns ------- bot_value: np.array(float) - Total for the specified component integrated over the bottom range for each ensemble + Total for the specified component integrated over the bottom range + for each ensemble """ # Initialize - coef = 0 + coef = np.repeat(np.nan, int(component.shape[1])) # Bottom power extrapolation - if bot_method == 'Power': - numerator = ((exponent+1) * np.nansum(component * cell_size, 0)) - denominator = np.nansum(((z + 0.5 * cell_size)**(exponent + 1)) - (z - 0.5 * cell_size)**(exponent + 1), 0) - coef = np.divide(numerator, denominator, where=denominator != 0) - coef[denominator == 0] = np.nan + if bot_method == 0: + # Compute the coefficient for each ensemble + # Loops are used for Numba compile purposes + + # Loop through ensembles + for col in range(component.shape[1]): + numerator = 0.0 + numerator_valid = False + denominator_valid = False + denominator = 0.0 + + # Loop through depth cells in an ensemble + for row in range(component.shape[0]): + + # Compute the numerator + numerator_temp = component[row, col] * cell_size[row, col] + if np.logical_not(np.isnan(numerator_temp)): + numerator_valid = True + numerator = numerator + numerator_temp + + # Compute the denominator + denominator_temp = ( + (z[row, col] + 0.5 * cell_size[row, col]) ** (exponent + 1) + ) - ((z[row, col] - 0.5 * cell_size[row, col]) ** (exponent + 1)) + if ( + np.logical_not(np.isnan(denominator_temp)) + and denominator_temp != 0 + ): + denominator_valid = True + denominator = denominator + denominator_temp + + # If both numerator and denominator are valid compute the coefficient + if numerator_valid and denominator_valid: + coef[col] = (numerator * (1 + exponent)) / denominator # Bottom no slip extrapolation - elif bot_method == 'No Slip': + elif bot_method == 1: # Valid data in the lower 20% of the water column or # the last valid depth cell are used to compute the no slip power fit cutoff_depth = 0.8 * depth_ens - depth_ok = (nan_greater(cell_depth, np.tile(cutoff_depth, (cell_depth.shape[0], 1)))) - component_ok = np.logical_not(np.isnan(component)) - use_ns = depth_ok * component_ok - for j in range(len(delta_t)): - if idx_bot[j] >= 0: - use_ns[idx_bot[j], j] = 1 - - # Create cross product and z arrays for the data to be used in - # no slip computations - component_ns = np.copy(component) - component_ns[np.logical_not(use_ns)] = np.nan - z_ns = np.copy(z) - z_ns[np.logical_not(use_ns)] = np.nan - numerator = ((exponent + 1) * np.nansum(component_ns * cell_size, 0)) - denominator = np.nansum(((z_ns + 0.5 * cell_size) ** (exponent + 1)) - - ((z_ns - 0.5 * cell_size) ** (exponent + 1)), 0) - coef = np.divide(numerator, denominator, where=denominator != 0) - coef[denominator == 0] = np.nan + + # Loop through the ensembles + for col in range(cell_depth.shape[1]): + numerator = 0.0 + denominator = 0.0 + numerator_valid = False + denominator_valid = False + cells_below_cutoff = False + last_cell_depth = np.nan + last_cell_size = np.nan + last_z = np.nan + last_component = np.nan + + # Verify there are valid depth cutoffs + if np.any(np.logical_not(np.isnan(cutoff_depth))): + + # Loop through depth cells + for row in range(cell_depth.shape[0]): + + # Identify last valid cell by end of loop + if np.logical_not(np.isnan(cell_depth[row, col])): + last_cell_depth = cell_depth[row, col] + last_cell_size = cell_size[row, col] + last_z = z[row, col] + last_component = component[row, col] + + # Use all depth cells below the cutoff (1 per loop) + if (cell_depth[row, col] - cutoff_depth[col]) >= 0: + cells_below_cutoff = True + + # Compute numerator + numerator_temp = ( + component[row, col] * cell_size[row, col] + ) + if np.logical_not(np.isnan(numerator_temp)): + numerator_valid = True + numerator = numerator + numerator_temp + + # If numerator computed, compute denominator + denominator_temp = ( + (z[row, col] + 0.5 * cell_size[row, col]) + ** (exponent + 1) + ) - ( + (z[row, col] - 0.5 * cell_size[row, col]) + ** (exponent + 1) + ) + if ( + np.logical_not(np.isnan(denominator_temp)) + and denominator_temp != 0 + ): + denominator_valid = True + denominator = denominator + denominator_temp + + # If there are not cells below the cutoff, use the last valid depth cell + if np.logical_not(cells_below_cutoff): + if np.logical_not(np.isnan(last_cell_depth)): + # Compute numerator + numerator_temp = last_component * last_cell_size + if np.logical_not(np.isnan(numerator_temp)): + numerator_valid = True + numerator = numerator + numerator_temp + + # If numerator computed, compute denominator + denominator_temp = ( + (last_z + 0.5 * last_cell_size) ** (exponent + 1) + ) - ((last_z - 0.5 * last_cell_size) ** (exponent + 1)) + if ( + np.logical_not(np.isnan(denominator_temp)) + and denominator_temp != 0 + ): + denominator_valid = True + denominator = denominator + denominator_temp + + # If both numerator and denominator are valid compute the coefficient + if numerator_valid and denominator_valid: + coef[col] = (numerator * (1 + exponent)) / denominator # Compute the bottom discharge of each profile - bot_value = delta_t * (coef / (exponent + 1)) * (bot_rng**(exponent + 1)) + bot_value = delta_t * (coef / (exponent + 1)) * (bot_rng ** (exponent + 1)) return bot_value @staticmethod - def bot_variables(x_prod, transect): - """Computes the index to the bottom most valid cell in each ensemble and the range from - the bottom to the bottom of the bottom most cell. + def bot_variables(x_prod, w_valid_data, cell_size, cell_depth, depth_ens): + """Computes the index to the bottom most valid cell in each ensemble + and the range from the bottom to the bottom of the bottom most cell. Parameters ---------- x_prod: np.array(float) Cross product computed from the cross product method - transect: TransectData - Object of TransectData + w_valid_data: np.array(bool) + Valid water data + cell_size: np.array(float) + Size of each depth cell in m + cell_depth: np.array(float) + Depth of each depth cell in m + depth_ens: np.array(float) + Processed depth for each ensemble Returns ------- - idx_bot: np.array + idx_bot: np.array(int) Index to the bottom most valid depth cell in each ensemble bot_rng: np.array(float) Range from the streambed to the bottom of the bottom most cell """ # Identify valid data - in_transect_idx = transect.in_transect_idx - valid_data1 = np.copy(transect.w_vel.valid_data[0, :, in_transect_idx].T) + valid_data1 = np.copy(w_valid_data) valid_data2 = np.logical_not(np.isnan(x_prod)) - valid_data = valid_data1 * valid_data2 - - # Assign transect properties to local variables - trans_selected = getattr(transect.depths, transect.depths.selected) - cell_size = trans_selected.depth_cell_size_m[:, in_transect_idx] - cell_depth = trans_selected.depth_cell_depth_m[:, in_transect_idx] - depth_ens = trans_selected.depth_processed_m[in_transect_idx] + valid_data = np.logical_and(valid_data1, valid_data2) # Preallocate variables - n_ensembles = valid_data.shape[1] - idx_bot = np.tile(-1, (valid_data.shape[1])).astype(int) - bot_rng = np.tile([np.nan], n_ensembles) + n_ensembles = int(valid_data.shape[1]) + bot_rng = np.repeat(np.nan, n_ensembles) + # Loop through each ensemble for n in range(n_ensembles): + # Identifying bottom most valid cell idx_temp = np.where(np.logical_not(np.isnan(x_prod[:, n])))[0] if len(idx_temp) > 0: - idx_temp = idx_temp[-1] - idx_bot[n] = idx_temp + idx_bot = idx_temp[-1] # Compute bottom range - bot_rng[n] = depth_ens[n] - cell_depth[idx_bot[n], n] - 0.5 * cell_size[idx_bot[n], n] + bot_rng[n] = ( + depth_ens[n] - cell_depth[idx_bot, n] - 0.5 * cell_size[idx_bot, n] + ) else: bot_rng[n] = 0 - return idx_bot, bot_rng + return bot_rng diff --git a/Classes/SelectFit.py b/Classes/SelectFit.py index bb771a0..c65d53b 100644 --- a/Classes/SelectFit.py +++ b/Classes/SelectFit.py @@ -60,12 +60,13 @@ class SelectFit(object): """ def __init__(self): - """Intialize object and instance variables.""" + """Initialize object and instance variables.""" - self.fit_method = 'Automatic' # User selected method Automatic or Manual - self.top_method = 'Power' - self.bot_method = 'Power' - self.exponent = '0.1667' + # User selected method Automatic or Manual + self.fit_method = "Automatic" + self.top_method = "Power" + self.bot_method = "Power" + self.exponent = "0.1667" self.exp_method = None self.u = None self.u_auto = None @@ -73,20 +74,20 @@ class SelectFit(object): self.z_auto = None self.residuals = np.array([]) self.coef = 0 - self.bot_method_auto = 'Power' # Selected extrapolation for top - self.top_method_auto = 'Power' # Selected extrapolation for bottom - self.exponent_auto = 0.1667 # Selected exponent - self.top_fit_r2 = 0 # Top fit custom r^2 - self.top_max_diff = 0 # Maximum difference between power and 3-pt at top - self.bot_diff = 0 # Difference between power and no slop at z = 0.1 - self.bot_r2 = 0 # Bottom fit r^2 - self.fit_r2 = 0 # Selected fit of selected power/no slip fit - self.ns_exponent = 0.1667 # No slip optimized exponent - self.pp_exponent = 0.1667 # Power Power optimized exponent + self.bot_method_auto = "Power" + self.top_method_auto = "Power" + self.exponent_auto = 0.1667 + self.top_fit_r2 = 0 + self.top_max_diff = 0 + self.bot_diff = 0 + self.bot_r2 = 0 + self.fit_r2 = 0 + self.ns_exponent = 0.1667 + self.pp_exponent = 0.1667 self.top_r2 = 0 self.rsqr = 0 self.exponent_95_ci = 0 - self.data_type = 'q' + self.data_type = "q" def populate_data(self, normalized, fit_method, top=None, bot=None, exponent=None): """Determine selected fit. @@ -113,14 +114,13 @@ class SelectFit(object): update_fd = FitData() - if fit_method == 'Automatic': - # Compute power fit with optimized exponent as reference to determine - # if constant no slip will be more appropriate + if fit_method == "Automatic": + # Compute power fit with optimized exponent as reference to + # determine if constant no slip will be more appropriate ppobj = FitData() - ppobj.populate_data(norm_data=normalized, - top='Power', - bot='Power', - method='optimize') + ppobj.populate_data( + norm_data=normalized, top="Power", bot="Power", method="optimize" + ) # Store results in object self.pp_exponent = ppobj.exponent @@ -130,75 +130,103 @@ class SelectFit(object): # Begin automatic fit - # More than 6 cells are required to compute an optimized fit. For fewer - # than 7 cells the default power/power fit is selected due to lack of sufficient - # data for a good analysis + # More than 6 cells are required to compute an optimized fit. + # For fewer than 7 cells the default power/power fit is selected + # due to lack of sufficient data for a good analysis if len(self.residuals) > 6: - # DSM (6/4/2021) the top and bottom were mislabeled (even in Matlab). I corrected. The computations - # are unaffected as the top2 and bot2 are only used in the c_shape_condition equation - # c_shape_condition = (np.sign(bot2) * np.sign(top2) == np.sign(mid2) and np.abs(bot2 + top2) > 0.1) - # Compute the difference between the bottom two cells of data and the optimized power fit - bot2 = np.nansum(normalized.unit_normalized_med[valid_data[-2:]] - - ppobj.coef * normalized.unit_normalized_z[valid_data[-2:]] ** ppobj.exponent) - - # Compute the difference between the top two cells of data and the optimized power fit - top2 = np.nansum(normalized.unit_normalized_med[valid_data[:2]] - - ppobj.coef * normalized.unit_normalized_z[valid_data[:2]] ** ppobj.exponent) - - # Compute the difference between the middle two cells of data and the optimized power fit + # DSM (6/4/2021) the top and bottom were mislabeled + # (even in Matlab). I corrected. The computations + # are unaffected as the top2 and bot2 are only used in the + # c_shape_condition equation + # c_shape_condition = (np.sign(bot2) * np.sign(top2) == + # np.sign(mid2) and np.abs(bot2 + top2) > 0.1) + # Compute the difference between the bottom two cells of + # data and the optimized power fit + bot2 = np.nansum( + normalized.unit_normalized_med[valid_data[-2:]] + - ppobj.coef + * normalized.unit_normalized_z[valid_data[-2:]] ** ppobj.exponent + ) + + # Compute the difference between the top two cells of data and + # the optimized power fit + top2 = np.nansum( + normalized.unit_normalized_med[valid_data[:2]] + - ppobj.coef + * normalized.unit_normalized_z[valid_data[:2]] ** ppobj.exponent + ) + + # Compute the difference between the middle two cells of data + # and the optimized power fit mid1 = int(np.floor(len(np.isnan(valid_data) == False) / 2)) - 1 - mid2 = np.nansum(normalized.unit_normalized_med[valid_data[mid1:mid1 + 2]] - - ppobj.coef * normalized.unit_normalized_z[valid_data[mid1:mid1 + 2]] - ** ppobj.exponent) + mid2 = np.nansum( + normalized.unit_normalized_med[valid_data[mid1 : mid1 + 2]] + - ppobj.coef + * normalized.unit_normalized_z[valid_data[mid1 : mid1 + 2]] + ** ppobj.exponent + ) - self.top_method_auto = 'Power' - self.bot_method_auto = 'Power' + self.top_method_auto = "Power" + self.bot_method_auto = "Power" - # Evaluate difference in data and power fit at water surface using a linear fit through the top 4 + # Evaluate difference in data and power fit at water surface + # using a linear fit through the top 4 # median cells and save results y = normalized.unit_normalized_med[valid_data[:4]] x = normalized.unit_normalized_z[valid_data[:4]] coeffs = np.polyfit(x, y, 1) - resid = y - (coeffs[0]*x + coeffs[1]) + resid = y - (coeffs[0] * x + coeffs[1]) corr = np.corrcoef(x, y)[0, 1] - self.top_fit_r2 = 1 - (np.sum(resid ** 2) / np.mean(np.abs(resid))) + self.top_fit_r2 = 1 - (np.sum(resid**2) / np.mean(np.abs(resid))) self.top_r2 = corr**2 # Evaluate overall fit - # If the optimized power fit does not have an r^2 better than 0.8 or if the optimized - # exponent if 0.1667 falls within the 95% confidence interval of the optimized fit, - # there is insufficient justification to change the exponent from 0.1667 - if (ppobj.r_squared < 0.8) or ((0.1667 > self.exponent_95_ci[0]) and (0.1667 < self.exponent_95_ci[1])): - # If an optimized exponent cannot be justified the linear fit is used to determine if a constant - # fit at the top is a better alternative than a power fit. If the power fit is the better - # alternative the exponent is set to the default 0.1667 and the data is refit + # If the optimized power fit does not have an r^2 better than + # 0.8 or if the optimized exponent if 0.1667 falls within the + # 95% confidence interval of the optimized fit, + # there is insufficient justification to change the exponent + # from 0.1667 + if (ppobj.r_squared < 0.8) or ( + (0.1667 > self.exponent_95_ci[0]) + and (0.1667 < self.exponent_95_ci[1]) + ): + # If an optimized exponent cannot be justified the linear + # fit is used to determine if a constant + # fit at the top is a better alternative than a power fit. + # If the power fit is the better alternative the exponent is + # set to the default 0.1667 and the data is refit if np.abs(self.top_fit_r2 < 0.8 or self.top_r2 < 0.9): ppobj = FitData() - ppobj.populate_data(norm_data=normalized, - top='Power', - bot='Power', - method='Manual', - exponent=0.1667) + ppobj.populate_data( + norm_data=normalized, + top="Power", + bot="Power", + method="Manual", + exponent=0.1667, + ) # Evaluate fit of top and bottom portions of the profile # Set save selected exponent and associated fit statistics self.exponent_auto = ppobj.exponent self.fit_r2 = ppobj.r_squared - # Compute the difference at the water surface between a linear fit of the top 4 measured cells - # and the best selected power fit of the whole profile + # Compute the difference at the water surface between a linear + # fit of the top 4 measured cells and the best selected + # power fit of the whole profile self.top_max_diff = ppobj.u[-1] - np.sum(coeffs) - # Evaluate the difference at the bottom between power using the whole profile and power using - # only the bottom third + # Evaluate the difference at the bottom between power using + # the whole profile and power using only the bottom third ns_fd = FitData() - ns_fd.populate_data(normalized, 'Constant', 'No Slip', 'Optimize') + ns_fd.populate_data(normalized, "Constant", "No Slip", "Optimize") self.ns_exponent = ns_fd.exponent self.bot_r2 = ns_fd.r_squared - self.bot_diff = ppobj.u[np.round(ppobj.z, 2) == 0.1][0] \ + self.bot_diff = ( + ppobj.u[np.round(ppobj.z, 2) == 0.1][0] - ns_fd.u[np.round(ns_fd.z, 2) == 0.1][0] + ) # Begin automatic selection logic # ----------------------------------- @@ -212,8 +240,13 @@ class SelectFit(object): # (b) the difference is either positive or the difference # of the top measured cell differs from the best # selected power fit by more than 5%. - top_condition = (np.abs(self.top_max_diff) > 0.1 and ((self.top_max_diff > 0) - or np.abs(normalized.unit_normalized_med[valid_data[0]] - ppobj.u[-1]) > 0.05)) + top_condition = np.abs(self.top_max_diff) > 0.1 and ( + (self.top_max_diff > 0) + or np.abs( + normalized.unit_normalized_med[valid_data[0]] - ppobj.u[-1] + ) + > 0.05 + ) # OR @@ -223,15 +256,16 @@ class SelectFit(object): # and the selected best power fit of the whole profile # is greater than 10% and (b) the optimized on slip fit has # an r^2 greater than 0.6. - bottom_condition = ((np.abs(self.bot_diff) > 0.1) and self.bot_r2 > 0.6) + bottom_condition = (np.abs(self.bot_diff) > 0.1) and self.bot_r2 > 0.6 # OR # 3) Flow is bidirectional. The sign of the top of the # profile is different from the sign of the bottom of # the profile. - bidirectional_condition = (np.sign(normalized.unit_normalized_med[valid_data[0]]) - != np.sign(normalized.unit_normalized_med[valid_data[-1]])) + bidirectional_condition = np.sign( + normalized.unit_normalized_med[valid_data[0]] + ) != np.sign(normalized.unit_normalized_med[valid_data[-1]]) # OR # 4) The profile is C-shaped. This is determined by @@ -241,14 +275,23 @@ class SelectFit(object): # power fit and (b) the combined difference of the top # and bottom difference from the best selected power # fit being greater than 10%. - c_shape_condition = (np.sign(bot2) * np.sign(top2) == np.sign(mid2) and np.abs(bot2 + top2) > 0.1) - - if top_condition or bottom_condition or bidirectional_condition or c_shape_condition: + c_shape_condition = ( + np.sign(bot2) * np.sign(top2) == np.sign(mid2) + and np.abs(bot2 + top2) > 0.1 + ) + + if ( + top_condition + or bottom_condition + or bidirectional_condition + or c_shape_condition + ): # Set the bottom to no slip - self.bot_method_auto = 'No Slip' - # If the no slip fit with an optimized exponent does not have r^2 better than 0.8 use - # the default 0.1667 for the no slip exponent + self.bot_method_auto = "No Slip" + # If the no slip fit with an optimized exponent does not + # have r^2 better than 0.8 use the default 0.1667 for + # the no slip exponent if ns_fd.r_squared > 0.8: self.exponent_auto = ns_fd.exponent self.fit_r2 = ns_fd.r_squared @@ -256,9 +299,11 @@ class SelectFit(object): self.exponent_auto = 0.1667 self.fit_r2 = np.nan - # Use the no slip 95% confidence intervals if they are available + # Use the no slip 95% confidence intervals if they are + # available if ns_fd.exponent_95_ci is not None and np.all( - np.isnan(ns_fd.exponent_95_ci) == False): + np.isnan(ns_fd.exponent_95_ci) == False + ): self.exponent_95_ci[0] = ns_fd.exponent_95_ci[0] self.exponent_95_ci[1] = ns_fd.exponent_95_ci[1] else: @@ -266,34 +311,37 @@ class SelectFit(object): self.exponent_95_ci[1] = np.nan # Set the top method to constant - self.top_method_auto = 'Constant' + self.top_method_auto = "Constant" else: - # Leave fit power/power and set the best selected optimized exponent as the automatic fit exponent + # Leave fit power/power and set the best selected + # optimized exponent as the automatic fit exponent self.exponent_auto = ppobj.exponent else: - # If the data are insufficient for a valid analysis use the power/power fit - # with the default 0.1667 exponent - self.top_method_auto = 'Power' - self.bot_method_auto = 'Power' + # If the data are insufficient for a valid analysis use the + # power/power fit with the default 0.1667 exponent + self.top_method_auto = "Power" + self.bot_method_auto = "Power" self.exponent_auto = 0.1667 self.ns_exponent = 0.1667 # Update the fit using the automatically selected methods - update_fd.populate_data(norm_data=normalized, - top=self.top_method_auto, - bot=self.bot_method_auto, - method='Manual', - exponent=self.exponent_auto) + update_fd.populate_data( + norm_data=normalized, + top=self.top_method_auto, + bot=self.bot_method_auto, + method="Manual", + exponent=self.exponent_auto, + ) self.u = update_fd.u self.u_auto = update_fd.u self.z_auto = update_fd.z self.z = update_fd.z - elif fit_method == 'Manual': + elif fit_method == "Manual": # Identify changes in fit settings if top is None: @@ -304,11 +352,13 @@ class SelectFit(object): exponent = self.exponent # Update fit with manual settings - update_fd.populate_data(norm_data=normalized, - top=top, - bot=bot, - method=fit_method, - exponent=exponent) + update_fd.populate_data( + norm_data=normalized, + top=top, + bot=bot, + method=fit_method, + exponent=exponent, + ) self.u = update_fd.u self.z = update_fd.z @@ -322,21 +372,21 @@ class SelectFit(object): @staticmethod def qrev_mat_in(mat_data): - """Processes the Matlab data structure to obtain a list of NormData objects containing transect - data from the Matlab data structure. + """Processes the Matlab data structure to obtain a list of NormData + objects containing transect data from the Matlab data structure. - Parameters - ---------- - mat_data: mat_struct - Matlab data structure obtained from sio.loadmat + Parameters + ---------- + mat_data: mat_struct + Matlab data structure obtained from sio.loadmat - Returns - ------- - norm_data: list - List of NormData objects - """ + Returns + ------- + norm_data: list + List of NormData objects + """ fit_data = [] - if hasattr(mat_data, 'selFit'): + if hasattr(mat_data, "selFit"): for n, data in enumerate(mat_data.selFit): temp = SelectFit() temp.populate_from_qrev_mat(data, mat_data.normData[n]) @@ -344,7 +394,8 @@ class SelectFit(object): return fit_data def populate_from_qrev_mat(self, mat_data, norm_data): - """Populates the object using data from previously saved QRev Matlab file. + """Populates the object using data from previously saved QRev Matlab + file. Parameters ---------- diff --git a/Classes/SensorData.py b/Classes/SensorData.py index 0c8f869..70298c6 100644 --- a/Classes/SensorData.py +++ b/Classes/SensorData.py @@ -2,7 +2,8 @@ import numpy as np class SensorData(object): - """Class stores data for pitch, roll, temperature, salinity, and speed of sound and its source/ + """Class stores data for pitch, roll, temperature, salinity, and speed of + sound and its source/ Attributes ---------- @@ -13,14 +14,14 @@ class SensorData(object): source: str Source of data, examples Int. Sensor, Ext. Sensor, User """ - + def __init__(self): """Initializes class and variables.""" self.data = None self.data_orig = None self.source = None - + def populate_data(self, data_in, source_in): """Store data in class. @@ -37,7 +38,8 @@ class SensorData(object): self.source = source_in def populate_from_qrev_mat(self, mat_data): - """Populates the object using data from previously saved QRev Matlab file. + """Populates the object using data from previously saved QRev Matlab + file. Parameters ---------- @@ -60,7 +62,7 @@ class SensorData(object): else: self.data_orig = np.array([float(mat_data.dataOrig)]) self.source = mat_data.source - + def change_data(self, data_in): """Change data to be applied in computations. @@ -70,7 +72,7 @@ class SensorData(object): """ self.data = data_in - + def set_source(self, source_in): """Change source of data. diff --git a/Classes/SensorStructure.py b/Classes/SensorStructure.py index 20a7cf7..11ac78a 100644 --- a/Classes/SensorStructure.py +++ b/Classes/SensorStructure.py @@ -17,14 +17,14 @@ class SensorStructure(object): self.user: SensorData Contains user supplied value, object of SensorData """ - + def __init__(self): """Initialize class and set variable to None.""" - self.selected = None # The selected sensor reference name ('internal', 'external', 'user') - self.internal = None # Contains the data from the internal sensor - self.external = None # Contains the data from an external sensor - self.user = None # Contains user supplied value + self.selected = None + self.internal = None + self.external = None + self.user = None def populate_from_qrev_mat(self, mat_data, heading=False): """Populates the object using data from previously saved QRev Matlab file. @@ -61,7 +61,7 @@ class SensorStructure(object): self.user = HeadingData() self.user.populate_from_qrev_mat(mat_data.user) self.selected = mat_data.selected - + def set_selected(self, selected_name): """Set the selected source for the specified object diff --git a/Classes/Sensors.py b/Classes/Sensors.py index 5734ff7..fc0c1ff 100644 --- a/Classes/Sensors.py +++ b/Classes/Sensors.py @@ -7,6 +7,8 @@ class Sensors(object): Attributes ---------- + battery_voltage: SensorStructure + Battery voltage suppling power to ADCP heading_deg: HeadingData Object of HeadingData. pitch_deg: SensorStructure @@ -24,12 +26,13 @@ class Sensors(object): def __init__(self): """Initialize class and create variable objects""" - self.heading_deg = SensorStructure() # Object of HeadingData - self.pitch_deg = SensorStructure() # Pitch data, object of SensorStructure - self.roll_deg = SensorStructure() # Roll data, object of SensorStructure - self.temperature_deg_c = SensorStructure() # Temperature data, object of SensorStructure - self.salinity_ppt = SensorStructure() # Salinity data, object of SensorStructure - self.speed_of_sound_mps = SensorStructure() # Speed of sound, object of SensorStructure + self.heading_deg = SensorStructure() + self.pitch_deg = SensorStructure() + self.roll_deg = SensorStructure() + self.temperature_deg_c = SensorStructure() + self.salinity_ppt = SensorStructure() + self.speed_of_sound_mps = SensorStructure() + self.battery_voltage = SensorStructure() def populate_from_qrev_mat(self, transect): """Populates the object using data from previously saved QRev Matlab file. @@ -39,19 +42,25 @@ class Sensors(object): transect: mat_struct Matlab data structure obtained from sio.loadmat """ - if hasattr(transect, 'sensors'): - if hasattr(transect.sensors, 'heading_deg'): - self.heading_deg.populate_from_qrev_mat(transect.sensors.heading_deg, heading=True) - if hasattr(transect.sensors, 'pitch_deg'): + if hasattr(transect, "sensors"): + if hasattr(transect.sensors, "heading_deg"): + self.heading_deg.populate_from_qrev_mat( + transect.sensors.heading_deg, heading=True + ) + if hasattr(transect.sensors, "pitch_deg"): self.pitch_deg.populate_from_qrev_mat(transect.sensors.pitch_deg) - if hasattr(transect.sensors, 'roll_deg'): + if hasattr(transect.sensors, "roll_deg"): self.roll_deg.populate_from_qrev_mat(transect.sensors.roll_deg) - if hasattr(transect.sensors, 'salinity_ppt'): + if hasattr(transect.sensors, "salinity_ppt"): self.salinity_ppt.populate_from_qrev_mat(transect.sensors.salinity_ppt) - if hasattr(transect.sensors, 'speedOfSound_mps'): - self.speed_of_sound_mps.populate_from_qrev_mat(transect.sensors.speedOfSound_mps) - if hasattr(transect.sensors, 'temperature_degC'): - self.temperature_deg_c.populate_from_qrev_mat(transect.sensors.temperature_degC) + if hasattr(transect.sensors, "speedOfSound_mps"): + self.speed_of_sound_mps.populate_from_qrev_mat( + transect.sensors.speedOfSound_mps + ) + if hasattr(transect.sensors, "temperature_degC"): + self.temperature_deg_c.populate_from_qrev_mat( + transect.sensors.temperature_degC + ) @staticmethod def speed_of_sound(temperature, salinity): @@ -65,10 +74,15 @@ class Sensors(object): Water salinity at transducer face, in ppt. """ - # Not provided in RS Matlab file computed from equation used in TRDI BBSS, from Urick (1983) - # May not be the same equation as used by SonTek - sos = 1449.2 + 4.6 * temperature - 0.055 * temperature**2 + 0.00029 * temperature**3 \ + # Not provided in RS Matlab file computed from equation used in + # TRDI BBSS + sos = ( + 1449.2 + + 4.6 * temperature + - 0.055 * temperature**2 + + 0.00029 * temperature**3 + (1.34 - 0.01 * temperature) * (salinity - 35.0) + ) return sos @@ -79,25 +93,34 @@ class Sensors(object): sr = np.sqrt(np.abs(s)) # S ** 2 TERM - d = 1.727E-3 - 7.9836E-6 * p + d = 1.727e-3 - 7.9836e-6 * p # S ** 3 / 2 TERM - b1 = 7.3637E-5 + 1.7945E-7 * t - b0 = -1.922E-2 - 4.42E-5 * t + b1 = 7.3637e-5 + 1.7945e-7 * t + b0 = -1.922e-2 - 4.42e-5 * t b = b0 + b1 * p # S ** 1 TERM - a3 = (-3.389E-13 * t + 6.649E-12) * t + 1.100E-10 - a2 = ((7.988E-12 * t - 1.6002E-10) * t + 9.1041E-9) * t - 3.9064E-7 - a1 = (((-2.0122E-10 * t + 1.0507E-8) * t - 6.4885E-8) * t - 1.2580E-5) * t + 9.4742E-5 - a0 = (((-3.21E-8 * t + 2.006E-6) * t + 7.164E-5) * t - 1.262E-2) * t + 1.389 + a3 = (-3.389e-13 * t + 6.649e-12) * t + 1.100e-10 + a2 = ((7.988e-12 * t - 1.6002e-10) * t + 9.1041e-9) * t - 3.9064e-7 + a1 = ( + ((-2.0122e-10 * t + 1.0507e-8) * t - 6.4885e-8) * t - 1.2580e-5 + ) * t + 9.4742e-5 + a0 = (((-3.21e-8 * t + 2.006e-6) * t + 7.164e-5) * t - 1.262e-2) * t + 1.389 a = ((a3 * p + a2) * p + a1) * p + a0 # S ** 0 TERM - c3 = (-2.3643E-12 * t + 3.8504E-10) * t - 9.7729E-9 - c2 = (((1.0405E-12 * t - 2.5335E-10) * t + 2.5974E-8) * t - 1.7107E-6) * t + 3.1260E-5 - c1 = (((-6.1185E-10 * t + 1.3621E-7) * t - 8.1788E-6) * t + 6.8982E-4) * t + 0.153563 - c0 = ((((3.1464E-9 * t - 1.47800E-6) * t + 3.3420E-4) * t - 5.80852E-2) * t + 5.03711) * t + 1402.388 + c3 = (-2.3643e-12 * t + 3.8504e-10) * t - 9.7729e-9 + c2 = ( + ((1.0405e-12 * t - 2.5335e-10) * t + 2.5974e-8) * t - 1.7107e-6 + ) * t + 3.1260e-5 + c1 = ( + ((-6.1185e-10 * t + 1.3621e-7) * t - 8.1788e-6) * t + 6.8982e-4 + ) * t + 0.153563 + c0 = ( + (((3.1464e-9 * t - 1.47800e-6) * t + 3.3420e-4) * t - 5.80852e-2) * t + + 5.03711 + ) * t + 1402.388 c = ((c3 * p + c2) * p + c1) * p + c0 # SOUND SPEED @@ -118,5 +141,7 @@ class Sensors(object): temps = np.array([]) for transect in transects: if transect.checked: - temps = np.append(temps, transect.sensors.temperature_deg_c.internal.data) + temps = np.append( + temps, transect.sensors.temperature_deg_c.internal.data + ) return np.nanmean(temps) diff --git a/Classes/TransectData.py b/Classes/TransectData.py index 1d7a2ee..8c3f824 100644 --- a/Classes/TransectData.py +++ b/Classes/TransectData.py @@ -1,12 +1,10 @@ import os -import time import warnings -import concurrent.futures import numpy as np from datetime import datetime from datetime import timezone from scipy import signal, fftpack -# from Classes.Pd0TRDI import Pd0TRDI + from Classes.Pd0TRDI_2 import Pd0TRDI from Classes.DepthStructure import DepthStructure from Classes.WaterData import WaterData @@ -19,9 +17,17 @@ from Classes.SensorData import SensorData from Classes.HeadingData import HeadingData from Classes.DateTime import DateTime from Classes.InstrumentData import InstrumentData -from Classes.MultiThread import MultiThread from Classes.CoordError import CoordError -from MiscLibs.common_functions import nandiff, cosd, arctand, tand, nans, cart2pol, rad2azdeg, nan_less +from MiscLibs.common_functions import ( + nandiff, + cosd, + arctand, + tand, + nans, + cart2pol, + rad2azdeg, + nan_less, +) class TransectData(object): @@ -36,13 +42,15 @@ class TransectData(object): w_vel: WaterData Object of WaterData boat_vel: BoatStructure - Object of BoatStructure containing objects of BoatData for BT, GGA, and VTG + Object of BoatStructure containing objects of BoatData for BT, GGA, + and VTG gps: GPSData Object of GPSData sensors: SensorData Object of SensorData depths: DepthStructure - Object of DepthStructure containing objects of Depth data for bt_depths, vb_depths, ds_depths) + Object of DepthStructure containing objects of Depth data for + bt_depths, vb_depths, ds_depths) edges: Edges Object of Edges (left and right object of clsEdgeData) extrap: ExtrapData @@ -54,26 +62,28 @@ class TransectData(object): date_time: DateTime Object of DateTime checked: bool - Setting for if transect was checked for use in mmt file assumed checked for SonTek + Setting for if transect was checked for use in mmt file assumed + checked for SonTek in_transect_idx: np.array(int) - Index of ensemble data associated with the moving-boat portion of the transect + Index of ensemble data associated with the moving-boat portion of the + transect """ def __init__(self): - self.adcp = None # object of clsInstrument - self.file_name = None # filename of transect data file - self.w_vel = None # object of clsWaterData - self.boat_vel = None # class for various boat velocity references (btVel, ggaVel, vtgVel) - self.gps = None # object of clsGPSData - self.sensors = None # object of clsSensorData - self.depths = None # object of clsDepthStructure for depth data including cell depths & ref depths - self.edges = None # object of clsEdges(left and right object of clsEdgeData) - self.extrap = None # object of clsExtrapData - self.start_edge = None # starting edge of transect looking downstream (Left or Right) + self.adcp = None + self.file_name = None + self.w_vel = None + self.boat_vel = None + self.gps = None + self.sensors = None + self.depths = None + self.edges = None + self.extrap = None + self.start_edge = None self.orig_start_edge = None - self.date_time = None # object of DateTime - self.checked = None # transect was checked for use in mmt file assumed checked for SonTek - self.in_transect_idx = None # index of ensemble data associated with the moving-boat portion of the transect + self.date_time = None + self.checked = None + self.in_transect_idx = None def trdi(self, mmt_transect, pd0_data, mmt): """Create object, lists, and instance variables for TRDI data. @@ -88,25 +98,28 @@ class TransectData(object): Object of MMT_TRDI """ - # Get file name of pd0 file which is first file in list of file associated with the transect + # Get file name of pd0 file which is first file in list of file + # associated with the transect self.file_name = mmt_transect.Files[0] # Get the active configuration data for the transect - mmt_config = getattr(mmt_transect, 'active_config') + mmt_config = getattr(mmt_transect, "active_config") # If the pd0 file has water track data process all of the data if pd0_data.Wt is not None: # Ensemble times # Compute time for each ensemble in seconds - ens_time_sec = pd0_data.Sensor.time[:, 0] * 3600 \ - + pd0_data.Sensor.time[:, 1] * 60 \ - + pd0_data.Sensor.time[:, 2] \ - + pd0_data.Sensor.time[:, 3] / 100 + ens_time_sec = ( + pd0_data.Sensor.time[:, 0] * 3600 + + pd0_data.Sensor.time[:, 1] * 60 + + pd0_data.Sensor.time[:, 2] + + pd0_data.Sensor.time[:, 3] / 100 + ) # Compute the duration of each ensemble in seconds adjusting for lost data ens_delta_time = np.tile([np.nan], ens_time_sec.shape) - idx_time = np.where(np.isnan(ens_time_sec) == False)[0] + idx_time = np.where(np.logical_not(np.isnan(ens_time_sec)))[0] ens_delta_time[idx_time[1:]] = nandiff(ens_time_sec[idx_time]) # Adjust for transects tha last past midnight @@ -115,56 +128,102 @@ class TransectData(object): ens_delta_time = ens_delta_time.T # Start date and time - idx = np.where(np.isnan(pd0_data.Sensor.time[:, 0]) == False)[0][0] + idx = np.where(np.logical_not(np.isnan(pd0_data.Sensor.time[:, 0])))[0][0] start_year = int(pd0_data.Sensor.date[idx, 0]) - # StreamPro doesn't include y2k dates - if start_year < 100: + # Handle data that is not Y2K compliant + if pd0_data.Sensor.date_not_y2k[idx, 0] > 80: + start_year = 1900 + int(pd0_data.Sensor.date_not_y2k[idx, 0]) + elif pd0_data.Sensor.date_not_y2k[idx, 1] < 81: start_year = 2000 + int(pd0_data.Sensor.date_not_y2k[idx, 0]) start_month = int(pd0_data.Sensor.date[idx, 1]) start_day = int(pd0_data.Sensor.date[idx, 2]) start_hour = int(pd0_data.Sensor.time[idx, 0]) start_min = int(pd0_data.Sensor.time[idx, 1]) - start_sec = int(pd0_data.Sensor.time[idx, 2] + pd0_data.Sensor.time[idx, 3] / 100) + start_sec = int( + pd0_data.Sensor.time[idx, 2] + pd0_data.Sensor.time[idx, 3] / 100 + ) start_micro = int( - ((pd0_data.Sensor.time[idx, 2] + pd0_data.Sensor.time[idx, 3] / 100) - start_sec) * 10 ** 6) - - start_dt = datetime(start_year, start_month, start_day, start_hour, start_min, start_sec, start_micro, - tzinfo=timezone.utc) + ( + (pd0_data.Sensor.time[idx, 2] + pd0_data.Sensor.time[idx, 3] / 100) + - start_sec + ) + * 10**6 + ) + + start_dt = datetime( + start_year, + start_month, + start_day, + start_hour, + start_min, + start_sec, + start_micro, + tzinfo=timezone.utc, + ) start_serial_time = start_dt.timestamp() - start_date = datetime.strftime(datetime.utcfromtimestamp(start_serial_time), '%m/%d/%Y') + start_date = datetime.strftime( + datetime.utcfromtimestamp(start_serial_time), "%m/%d/%Y" + ) # End data and time - idx = np.where(np.isnan(pd0_data.Sensor.time[:, 0]) == False)[0][-1] + idx = np.where(np.logical_not(np.isnan(pd0_data.Sensor.time[:, 0])))[0][-1] end_year = int(pd0_data.Sensor.date[idx, 0]) - # StreamPro does not include Y@K dates - if end_year < 100: + + # Handle data that is not Y2K compliant + if pd0_data.Sensor.date_not_y2k[idx, 0] > 80: + end_year = 1900 + int(pd0_data.Sensor.date_not_y2k[idx, 0]) + elif pd0_data.Sensor.date_not_y2k[idx, 1] < 81: end_year = 2000 + int(pd0_data.Sensor.date_not_y2k[idx, 0]) end_month = int(pd0_data.Sensor.date[idx, 1]) end_day = int(pd0_data.Sensor.date[idx, 2]) end_hour = int(pd0_data.Sensor.time[idx, 0]) end_min = int(pd0_data.Sensor.time[idx, 1]) - end_sec = int(pd0_data.Sensor.time[idx, 2] + pd0_data.Sensor.time[idx, 3] / 100) - end_micro = int(((pd0_data.Sensor.time[idx, 2] + pd0_data.Sensor.time[idx, 3] / 100) - end_sec) * 10 ** 6) - - end_dt = datetime(end_year, end_month, end_day, end_hour, end_min, end_sec, end_micro, tzinfo=timezone.utc) + end_sec = int( + pd0_data.Sensor.time[idx, 2] + pd0_data.Sensor.time[idx, 3] / 100 + ) + end_micro = int( + ( + (pd0_data.Sensor.time[idx, 2] + pd0_data.Sensor.time[idx, 3] / 100) + - end_sec + ) + * 10**6 + ) + + end_dt = datetime( + end_year, + end_month, + end_day, + end_hour, + end_min, + end_sec, + end_micro, + tzinfo=timezone.utc, + ) end_serial_time = end_dt.timestamp() # Create date/time object self.date_time = DateTime() - self.date_time.populate_data(date_in=start_date, - start_in=start_serial_time, - end_in=end_serial_time, - ens_dur_in=ens_delta_time) + self.date_time.populate_data( + date_in=start_date, + start_in=start_serial_time, + end_in=end_serial_time, + ens_dur_in=ens_delta_time, + ) # Transect checked for use in discharge computation self.checked = mmt_transect.Checked # Create class for adcp information self.adcp = InstrumentData() - self.adcp.populate_data(manufacturer='TRDI', raw_data=pd0_data, mmt_transect=mmt_transect, mmt=mmt) + self.adcp.populate_data( + manufacturer="TRDI", + raw_data=pd0_data, + mmt_transect=mmt_transect, + mmt=mmt, + ) # Create valid frequency time series freq_ts = self.valid_frequencies(pd0_data.Inst.freq) @@ -172,21 +231,23 @@ class TransectData(object): # Initialize boat vel self.boat_vel = BoatStructure() # Apply 3-beam setting from mmt file - if mmt_config['Proc_Use_3_Beam_BT'] < 0.5: + if mmt_config["Proc_Use_3_Beam_BT"] < 0.5: min_beams = 4 else: min_beams = 3 - self.boat_vel.add_boat_object(source='TRDI', - vel_in=pd0_data.Bt.vel_mps, - freq_in=freq_ts, - coord_sys_in=pd0_data.Cfg.coord_sys[0], - nav_ref_in='BT', - min_beams=min_beams, - bottom_mode=pd0_data.Cfg.bm[0], - corr_in=pd0_data.Bt.corr, - rssi_in=pd0_data.Bt.rssi) - - self.boat_vel.set_nav_reference('BT') + self.boat_vel.add_boat_object( + source="TRDI", + vel_in=pd0_data.Bt.vel_mps, + freq_in=freq_ts, + coord_sys_in=pd0_data.Cfg.coord_sys[0], + nav_ref_in="BT", + min_beams=min_beams, + bottom_mode=pd0_data.Cfg.bm[0], + corr_in=pd0_data.Bt.corr, + rssi_in=pd0_data.Bt.rssi, + ) + + self.boat_vel.set_nav_reference("BT") # Compute velocities from GPS Data # ------------------------------------ @@ -197,12 +258,12 @@ class TransectData(object): # Determine correct sign for latitude for n, lat_ref in enumerate(pd0_data.Gps2.lat_ref): - idx = np.nonzero(np.array(lat_ref) == 'S') + idx = np.nonzero(np.array(lat_ref) == "S") raw_gga_lat[n, idx] = raw_gga_lat[n, idx] * -1 # Determine correct sign for longitude for n, lon_ref in enumerate(pd0_data.Gps2.lon_ref): - idx = np.nonzero(np.array(lon_ref) == 'W') + idx = np.nonzero(np.array(lon_ref) == "W") raw_gga_lon[n, idx] = raw_gga_lon[n, idx] * -1 # Assign data to local variables @@ -228,54 +289,61 @@ class TransectData(object): ext_vtg_speed = [] # QRev methods GPS processing methods - gga_p_method = 'Mindt' - gga_v_method = 'Mindt' - vtg_method = 'Mindt' + gga_p_method = "Mindt" + gga_v_method = "Mindt" + vtg_method = "Mindt" # If valid gps data exist, process the data - if (np.nansum(np.nansum(np.abs(raw_gga_lat))) > 0) \ - or (np.nansum(np.nansum(np.abs(raw_vtg_speed))) > 0): + if (np.nansum(np.nansum(np.abs(raw_gga_lat))) > 0) or ( + np.nansum(np.nansum(np.abs(raw_vtg_speed))) > 0 + ): # Process raw GPS data self.gps = GPSData() - self.gps.populate_data(raw_gga_utc=raw_gga_utc, - raw_gga_lat=raw_gga_lat, - raw_gga_lon=raw_gga_lon, - raw_gga_alt=raw_gga_alt, - raw_gga_diff=raw_gga_diff, - raw_gga_hdop=raw_gga_hdop, - raw_gga_num_sats=raw_gga_num_sats, - raw_gga_delta_time=raw_gga_delta_time, - raw_vtg_course=raw_vtg_course, - raw_vtg_speed=raw_vtg_speed, - raw_vtg_delta_time=raw_vtg_delta_time, - raw_vtg_mode_indicator=raw_vtg_mode_indicator, - ext_gga_utc=ext_gga_utc, - ext_gga_lat=ext_gga_lat, - ext_gga_lon=ext_gga_lon, - ext_gga_alt=ext_gga_alt, - ext_gga_diff=ext_gga_diff, - ext_gga_hdop=ext_gga_hdop, - ext_gga_num_sats=ext_gga_num_sats, - ext_vtg_course=ext_vtg_course, - ext_vtg_speed=ext_vtg_speed, - gga_p_method=gga_p_method, - gga_v_method=gga_v_method, - vtg_method=vtg_method) + self.gps.populate_data( + raw_gga_utc=raw_gga_utc, + raw_gga_lat=raw_gga_lat, + raw_gga_lon=raw_gga_lon, + raw_gga_alt=raw_gga_alt, + raw_gga_diff=raw_gga_diff, + raw_gga_hdop=raw_gga_hdop, + raw_gga_num_sats=raw_gga_num_sats, + raw_gga_delta_time=raw_gga_delta_time, + raw_vtg_course=raw_vtg_course, + raw_vtg_speed=raw_vtg_speed, + raw_vtg_delta_time=raw_vtg_delta_time, + raw_vtg_mode_indicator=raw_vtg_mode_indicator, + ext_gga_utc=ext_gga_utc, + ext_gga_lat=ext_gga_lat, + ext_gga_lon=ext_gga_lon, + ext_gga_alt=ext_gga_alt, + ext_gga_diff=ext_gga_diff, + ext_gga_hdop=ext_gga_hdop, + ext_gga_num_sats=ext_gga_num_sats, + ext_vtg_course=ext_vtg_course, + ext_vtg_speed=ext_vtg_speed, + gga_p_method=gga_p_method, + gga_v_method=gga_v_method, + vtg_method=vtg_method, + ) # If valid gga data exists create gga boat velocity object if np.nansum(np.nansum(np.abs(raw_gga_lat))) > 0: - self.boat_vel.add_boat_object(source='TRDI', - vel_in=self.gps.gga_velocity_ens_mps, - coord_sys_in='Earth', - nav_ref_in='GGA') + self.boat_vel.add_boat_object( + source="TRDI", + vel_in=self.gps.gga_velocity_ens_mps, + coord_sys_in="Earth", + nav_ref_in="GGA", + ) # If valid vtg data exist create vtg boat velocity object if np.nansum(np.nansum(np.abs(raw_vtg_speed))) > 0: - self.boat_vel.add_boat_object(source='TRDI', - vel_in=self.gps.vtg_velocity_ens_mps, - coord_sys_in='Earth', - nav_ref_in='VTG') + self.boat_vel.add_boat_object( + source="TRDI", + vel_in=self.gps.vtg_velocity_ens_mps, + coord_sys_in="Earth", + nav_ref_in="VTG", + ) # Get and compute ensemble beam depths temp_depth_bt = np.array(pd0_data.Bt.depth_m) @@ -284,32 +352,39 @@ class TransectData(object): temp_depth_bt[temp_depth_bt < 0.01] = np.nan # Add draft - temp_depth_bt += mmt_config['Offsets_Transducer_Depth'] + temp_depth_bt += mmt_config["Offsets_Transducer_Depth"] # Get instrument cell data - cell_size_all_m, cell_depth_m, sl_cutoff_per, sl_lag_effect_m = \ - TransectData.compute_cell_data(pd0_data) + ( + cell_size_all_m, + cell_depth_m, + sl_cutoff_per, + sl_lag_effect_m, + ) = TransectData.compute_cell_data(pd0_data) # Adjust cell depth of draft - cell_depth_m = np.add(mmt_config['Offsets_Transducer_Depth'], cell_depth_m) + cell_depth_m = np.add(mmt_config["Offsets_Transducer_Depth"], cell_depth_m) # Create depth data object for BT self.depths = DepthStructure() - self.depths.add_depth_object(depth_in=temp_depth_bt, - source_in='BT', - freq_in=freq_ts, - draft_in=mmt_config['Offsets_Transducer_Depth'], - cell_depth_in=cell_depth_m, - cell_size_in=cell_size_all_m) + self.depths.add_depth_object( + depth_in=temp_depth_bt, + source_in="BT", + freq_in=freq_ts, + draft_in=mmt_config["Offsets_Transducer_Depth"], + cell_depth_in=cell_depth_m, + cell_size_in=cell_size_all_m, + ) # Compute cells above side lobe - cells_above_sl, sl_cutoff_m = \ - TransectData.side_lobe_cutoff(depths=self.depths.bt_depths.depth_orig_m, - draft=self.depths.bt_depths.draft_orig_m, - cell_depth=self.depths.bt_depths.depth_cell_depth_m, - sl_lag_effect=sl_lag_effect_m, - slc_type='Percent', - value=1 - sl_cutoff_per / 100) + cells_above_sl, sl_cutoff_m = TransectData.side_lobe_cutoff( + depths=self.depths.bt_depths.depth_orig_m, + draft=self.depths.bt_depths.draft_orig_m, + cell_depth=self.depths.bt_depths.depth_cell_depth_m, + sl_lag_effect=sl_lag_effect_m, + slc_type="Percent", + value=1 - sl_cutoff_per / 100, + ) # Check for the presence of vertical beam data if np.nanmax(np.nanmax(pd0_data.Sensor.vert_beam_status)) > 0: @@ -320,15 +395,17 @@ class TransectData(object): temp_depth_vb[temp_depth_vb < 0.01] = np.nan # Add draft - temp_depth_vb = temp_depth_vb + mmt_config['Offsets_Transducer_Depth'] + temp_depth_vb = temp_depth_vb + mmt_config["Offsets_Transducer_Depth"] # Create depth data object for vertical beam - self.depths.add_depth_object(depth_in=temp_depth_vb, - source_in='VB', - freq_in=freq_ts, - draft_in=mmt_config['Offsets_Transducer_Depth'], - cell_depth_in=cell_depth_m, - cell_size_in=cell_size_all_m) + self.depths.add_depth_object( + depth_in=temp_depth_vb, + source_in="VB", + freq_in=freq_ts, + draft_in=mmt_config["Offsets_Transducer_Depth"], + cell_depth_in=cell_depth_m, + cell_size_in=cell_size_all_m, + ) # Check for the presence of depth sounder if np.nansum(np.nansum(pd0_data.Gps2.depth_m)) > 1e-5: @@ -338,87 +415,98 @@ class TransectData(object): temp_depth_ds[temp_depth_ds < 0.01] = np.nan # Use the last valid depth for each ensemble - last_depth_col_idx = np.sum(np.isnan(temp_depth_ds) == False, axis=1) - 1 + last_depth_col_idx = ( + np.sum(np.logical_not(np.isnan(temp_depth_ds)), axis=1) - 1 + ) last_depth_col_idx[last_depth_col_idx == -1] = 0 row_index = np.arange(len(temp_depth_ds)) last_depth = nans(row_index.size) for row in row_index: last_depth[row] = temp_depth_ds[row, last_depth_col_idx[row]] - # Determine if mmt file has a scale factor and offset for the depth sounder - if mmt_config['DS_Cor_Spd_Sound'] == 0: - scale_factor = mmt_config['DS_Scale_Factor'] + # Determine if mmt file has a scale factor and offset for the + # depth sounder + if mmt_config["DS_Cor_Spd_Sound"] == 0: + scale_factor = mmt_config["DS_Scale_Factor"] else: - scale_factor = pd0_data.Sensor.sos_mps / 1500. + scale_factor = pd0_data.Sensor.sos_mps / 1500.0 # Apply scale factor, offset, and draft # Note: Only the ADCP draft is stored. The transducer - # draft or scaling for depth sounder data cannot be changed in QRev + # draft or scaling for depth sounder data cannot be changed in + # QRev ds_depth = np.tile(np.nan, (1, cell_depth_m.shape[1])) - ds_depth[0, :] = (last_depth * scale_factor) \ - + mmt_config['DS_Transducer_Depth'] \ - + mmt_config['DS_Transducer_Offset'] - - self.depths.add_depth_object(depth_in=ds_depth, - source_in='DS', - freq_in=np.tile(np.nan, pd0_data.Inst.freq.shape), - draft_in=mmt_config['Offsets_Transducer_Depth'], - cell_depth_in=cell_depth_m, - cell_size_in=cell_size_all_m) + ds_depth[0, :] = ( + (last_depth * scale_factor) + + mmt_config["DS_Transducer_Depth"] + + mmt_config["DS_Transducer_Offset"] + ) + + self.depths.add_depth_object( + depth_in=ds_depth, + source_in="DS", + freq_in=np.tile(np.nan, pd0_data.Inst.freq.shape), + draft_in=mmt_config["Offsets_Transducer_Depth"], + cell_depth_in=cell_depth_m, + cell_size_in=cell_size_all_m, + ) # Set depth reference to value from mmt file - if 'Proc_River_Depth_Source' in mmt_config: - if mmt_config['Proc_River_Depth_Source'] == 0: - self.depths.selected = 'bt_depths' - self.depths.composite_depths(transect=self, setting='Off') + if "Proc_River_Depth_Source" in mmt_config: + if mmt_config["Proc_River_Depth_Source"] == 0: + self.depths.selected = "bt_depths" + self.depths.composite_depths(transect=self, setting="Off") - elif mmt_config['Proc_River_Depth_Source'] == 1: + elif mmt_config["Proc_River_Depth_Source"] == 1: if self.depths.ds_depths is not None: - self.depths.selected = 'ds_depths' + self.depths.selected = "ds_depths" else: - self.depths.selected = 'bt_depths' - self.depths.composite_depths(transect=self, setting='Off') + self.depths.selected = "bt_depths" + self.depths.composite_depths(transect=self, setting="Off") - elif mmt_config['Proc_River_Depth_Source'] == 2: + elif mmt_config["Proc_River_Depth_Source"] == 2: if self.depths.vb_depths is not None: - self.depths.selected = 'vb_depths' + self.depths.selected = "vb_depths" else: - self.depths.selected = 'bt_depths' - self.depths.composite_depths(transect=self, setting='Off') + self.depths.selected = "bt_depths" + self.depths.composite_depths(transect=self, setting="Off") - elif mmt_config['Proc_River_Depth_Source'] == 3: + elif mmt_config["Proc_River_Depth_Source"] == 3: if self.depths.vb_depths is None: - self.depths.selected = 'bt_depths' - self.depths.composite_depths(transect=self, setting='Off') + self.depths.selected = "bt_depths" + self.depths.composite_depths(transect=self, setting="Off") else: - self.depths.selected = 'vb_depths' - self.depths.composite_depths(transect=self, setting='On') + self.depths.selected = "vb_depths" + self.depths.composite_depths(transect=self, setting="On") - elif mmt_config['Proc_River_Depth_Source'] == 4: + elif mmt_config["Proc_River_Depth_Source"] == 4: if self.depths.bt_depths is not None: - self.depths.selected = 'bt_depths' - if self.depths.vb_depths is not None or self.depths.ds_depths is not None: - self.depths.composite_depths(transect=self, setting='On') + self.depths.selected = "bt_depths" + if ( + self.depths.vb_depths is not None + or self.depths.ds_depths is not None + ): + self.depths.composite_depths(transect=self, setting="On") else: - self.depths.composite_depths(transect=self, setting='Off') + self.depths.composite_depths(transect=self, setting="Off") elif self.depths.vb_depths is not None: - self.depths.selected = 'vb_depths' - self.depths.composite_depths(transect=self, setting='On') + self.depths.selected = "vb_depths" + self.depths.composite_depths(transect=self, setting="On") elif self.depths.ds_depths is not None: - self.depths.selected = 'ds_depths' - self.depths.composite_depths(transect=self, setting='On') + self.depths.selected = "ds_depths" + self.depths.composite_depths(transect=self, setting="On") else: - self.depths.selected = 'bt_depths' - self.depths.composite_depths(transect=self, setting='Off') + self.depths.selected = "bt_depths" + self.depths.composite_depths(transect=self, setting="Off") else: - if mmt_config['DS_Use_Process'] > 0: + if mmt_config["DS_Use_Process"] > 0: if self.depths.ds_depths is not None: - self.depths.selected = 'ds_depths' + self.depths.selected = "ds_depths" else: - self.depths.selected = 'bt_depths' + self.depths.selected = "bt_depths" else: - self.depths.selected = 'bt_depths' - self.depths.composite_depths(transect=self, setting='Off') + self.depths.selected = "bt_depths" + self.depths.composite_depths(transect=self, setting="Off") # Create water_data object # ------------------------ @@ -428,166 +516,197 @@ class TransectData(object): # Check for RiverRay and RiverPro data firmware = str(pd0_data.Inst.firm_ver[0]) excluded_dist = 0 - if (firmware[:2] == '56') and (np.nanmax(pd0_data.Sensor.vert_beam_status) < 0.9): + if (firmware[:2] == "56") and ( + np.nanmax(pd0_data.Sensor.vert_beam_status) < 0.9 + ): excluded_dist = 0.25 - if (firmware[:2] == '44') or (firmware[:2] == '56'): + if (firmware[:2] == "44") or (firmware[:2] == "56"): # Process water velocities for RiverRay and RiverPro self.w_vel = WaterData() - self.w_vel.populate_data(vel_in=pd0_data.Wt.vel_mps, - freq_in=freq_ts, - coord_sys_in=pd0_data.Cfg.coord_sys, - nav_ref_in='None', - rssi_in=pd0_data.Wt.rssi, - rssi_units_in='Counts', - excluded_dist_in=excluded_dist, - cells_above_sl_in=cells_above_sl, - sl_cutoff_per_in=sl_cutoff_per, - sl_cutoff_num_in=0, - sl_cutoff_type_in='Percent', - sl_lag_effect_in=sl_lag_effect_m, - sl_cutoff_m=sl_cutoff_m, - wm_in=pd0_data.Cfg.wm[0], - blank_in=pd0_data.Cfg.wf_cm[0] / 100, - corr_in=pd0_data.Wt.corr, - surface_vel_in=pd0_data.Surface.vel_mps, - surface_rssi_in=pd0_data.Surface.rssi, - surface_corr_in=pd0_data.Surface.corr, - surface_num_cells_in=pd0_data.Surface.no_cells, - ping_type=ensemble_ping_type) + self.w_vel.populate_data( + vel_in=pd0_data.Wt.vel_mps, + freq_in=freq_ts, + coord_sys_in=pd0_data.Cfg.coord_sys, + nav_ref_in="None", + rssi_in=pd0_data.Wt.rssi, + rssi_units_in="Counts", + excluded_dist_in=excluded_dist, + cells_above_sl_in=cells_above_sl, + sl_cutoff_per_in=sl_cutoff_per, + sl_cutoff_num_in=0, + sl_cutoff_type_in="Percent", + sl_lag_effect_in=sl_lag_effect_m, + sl_cutoff_m=sl_cutoff_m, + wm_in=pd0_data.Cfg.wm[0], + blank_in=pd0_data.Cfg.wf_cm[0] / 100, + corr_in=pd0_data.Wt.corr, + surface_vel_in=pd0_data.Surface.vel_mps, + surface_rssi_in=pd0_data.Surface.rssi, + surface_corr_in=pd0_data.Surface.corr, + surface_num_cells_in=pd0_data.Surface.no_cells, + ping_type=ensemble_ping_type, + ) else: # Process water velocities for non-RiverRay ADCPs self.w_vel = WaterData() - self.w_vel.populate_data(vel_in=pd0_data.Wt.vel_mps, - freq_in=freq_ts, - coord_sys_in=pd0_data.Cfg.coord_sys[0], - nav_ref_in='None', - rssi_in=pd0_data.Wt.rssi, - rssi_units_in='Counts', - excluded_dist_in=excluded_dist, - cells_above_sl_in=cells_above_sl, - sl_cutoff_per_in=sl_cutoff_per, - sl_cutoff_num_in=0, - sl_cutoff_type_in='Percent', - sl_lag_effect_in=sl_lag_effect_m, - sl_cutoff_m=sl_cutoff_m, - wm_in=pd0_data.Cfg.wm[0], - blank_in=pd0_data.Cfg.wf_cm[0] / 100, - corr_in=pd0_data.Wt.corr, - ping_type=ensemble_ping_type) + self.w_vel.populate_data( + vel_in=pd0_data.Wt.vel_mps, + freq_in=freq_ts, + coord_sys_in=pd0_data.Cfg.coord_sys[0], + nav_ref_in="None", + rssi_in=pd0_data.Wt.rssi, + rssi_units_in="Counts", + excluded_dist_in=excluded_dist, + cells_above_sl_in=cells_above_sl, + sl_cutoff_per_in=sl_cutoff_per, + sl_cutoff_num_in=0, + sl_cutoff_type_in="Percent", + sl_lag_effect_in=sl_lag_effect_m, + sl_cutoff_m=sl_cutoff_m, + wm_in=pd0_data.Cfg.wm[0], + blank_in=pd0_data.Cfg.wf_cm[0] / 100, + corr_in=pd0_data.Wt.corr, + ping_type=ensemble_ping_type, + ) # Create Edges Object self.edges = Edges() - self.edges.populate_data(rec_edge_method='Fixed', vel_method='MeasMag') + self.edges.populate_data(rec_edge_method="Fixed", vel_method="MeasMag") # Determine number of ensembles to average - n_ens_left = mmt_config['Q_Shore_Pings_Avg'] + n_ens_left = mmt_config["Q_Shore_Pings_Avg"] # TRDI uses same number on left and right edges n_ens_right = n_ens_left - # Set indices for ensembles in the moving-boat portion of the transect + # Set indices for ensembles in the moving-boat portion of the + # transect self.in_transect_idx = np.arange(0, pd0_data.Bt.vel_mps.shape[1]) # Determine left and right edge distances - if mmt_config['Edge_Begin_Left_Bank']: - dist_left = float(mmt_config['Edge_Begin_Shore_Distance']) - dist_right = float(mmt_config['Edge_End_Shore_Distance']) - if 'Edge_End_Manual_Discharge' in mmt_config: - user_discharge_left = float(mmt_config['Edge_Begin_Manual_Discharge']) - user_discharge_right = float(mmt_config['Edge_End_Manual_Discharge']) - edge_method_left = mmt_config['Edge_Begin_Method_Distance'] - edge_method_right = mmt_config['Edge_End_Method_Distance'] + if mmt_config["Edge_Begin_Left_Bank"]: + dist_left = float(mmt_config["Edge_Begin_Shore_Distance"]) + dist_right = float(mmt_config["Edge_End_Shore_Distance"]) + if "Edge_End_Manual_Discharge" in mmt_config: + user_discharge_left = float( + mmt_config["Edge_Begin_Manual_Discharge"] + ) + user_discharge_right = float( + mmt_config["Edge_End_Manual_Discharge"] + ) + edge_method_left = mmt_config["Edge_Begin_Method_Distance"] + edge_method_right = mmt_config["Edge_End_Method_Distance"] else: user_discharge_left = None user_discharge_right = None - edge_method_left = 'Yes' - edge_method_right = 'Yes' - self.start_edge = 'Left' - self.orig_start_edge = 'Left' + edge_method_left = "Yes" + edge_method_right = "Yes" + self.start_edge = "Left" + self.orig_start_edge = "Left" else: - dist_left = float(mmt_config['Edge_End_Shore_Distance']) - dist_right = float(mmt_config['Edge_Begin_Shore_Distance']) - if 'Edge_End_Manual_Discharge' in mmt_config: - user_discharge_left = float(mmt_config['Edge_End_Manual_Discharge']) - user_discharge_right = float(mmt_config['Edge_Begin_Manual_Discharge']) - edge_method_left = mmt_config['Edge_End_Method_Distance'] - edge_method_right = mmt_config['Edge_Begin_Method_Distance'] + dist_left = float(mmt_config["Edge_End_Shore_Distance"]) + dist_right = float(mmt_config["Edge_Begin_Shore_Distance"]) + if "Edge_End_Manual_Discharge" in mmt_config: + user_discharge_left = float(mmt_config["Edge_End_Manual_Discharge"]) + user_discharge_right = float( + mmt_config["Edge_Begin_Manual_Discharge"] + ) + edge_method_left = mmt_config["Edge_End_Method_Distance"] + edge_method_right = mmt_config["Edge_Begin_Method_Distance"] else: user_discharge_left = None user_discharge_right = None - edge_method_left = 'Yes' - edge_method_right = 'Yes' - self.start_edge = 'Right' - self.orig_start_edge = 'Right' + edge_method_left = "Yes" + edge_method_right = "Yes" + self.start_edge = "Right" + self.orig_start_edge = "Right" # Create left edge - if edge_method_left == 'NO': - self.edges.left.populate_data(edge_type='User Q', - distance=dist_left, - number_ensembles=n_ens_left, - user_discharge=user_discharge_left) - - elif mmt_config['Q_Left_Edge_Type'] == 0: - self.edges.left.populate_data(edge_type='Triangular', - distance=dist_left, - number_ensembles=n_ens_left, - user_discharge=user_discharge_left) - - elif mmt_config['Q_Left_Edge_Type'] == 1: - self.edges.left.populate_data(edge_type='Rectangular', - distance=dist_left, - number_ensembles=n_ens_left, - user_discharge=user_discharge_left) - - elif mmt_config['Q_Left_Edge_Type'] == 2: - self.edges.left.populate_data(edge_type='Custom', - distance=dist_left, - number_ensembles=n_ens_left, - coefficient=mmt_config['Q_Left_Edge_Coeff'], - user_discharge=user_discharge_left) + if edge_method_left == "NO": + self.edges.left.populate_data( + edge_type="User Q", + distance=dist_left, + number_ensembles=n_ens_left, + user_discharge=user_discharge_left, + ) + + elif mmt_config["Q_Left_Edge_Type"] == 0: + self.edges.left.populate_data( + edge_type="Triangular", + distance=dist_left, + number_ensembles=n_ens_left, + user_discharge=user_discharge_left, + ) + + elif mmt_config["Q_Left_Edge_Type"] == 1: + self.edges.left.populate_data( + edge_type="Rectangular", + distance=dist_left, + number_ensembles=n_ens_left, + user_discharge=user_discharge_left, + ) + + elif mmt_config["Q_Left_Edge_Type"] == 2: + self.edges.left.populate_data( + edge_type="Custom", + distance=dist_left, + number_ensembles=n_ens_left, + coefficient=mmt_config["Q_Left_Edge_Coeff"], + user_discharge=user_discharge_left, + ) # Create right edge - if edge_method_right == 'NO': - self.edges.right.populate_data(edge_type='User Q', - distance=dist_right, - number_ensembles=n_ens_right, - user_discharge=user_discharge_right) - elif mmt_config['Q_Right_Edge_Type'] == 0: - self.edges.right.populate_data(edge_type='Triangular', - distance=dist_right, - number_ensembles=n_ens_right, - user_discharge=user_discharge_right) - - elif mmt_config['Q_Right_Edge_Type'] == 1: - self.edges.right.populate_data(edge_type='Rectangular', - distance=dist_right, - number_ensembles=n_ens_right, - user_discharge=user_discharge_right) - - elif mmt_config['Q_Right_Edge_Type'] == 2: - self.edges.right.populate_data(edge_type='Custom', - distance=dist_right, - number_ensembles=n_ens_right, - coefficient=mmt_config['Q_Right_Edge_Coeff'], - user_discharge=user_discharge_right) + if edge_method_right == "NO": + self.edges.right.populate_data( + edge_type="User Q", + distance=dist_right, + number_ensembles=n_ens_right, + user_discharge=user_discharge_right, + ) + elif mmt_config["Q_Right_Edge_Type"] == 0: + self.edges.right.populate_data( + edge_type="Triangular", + distance=dist_right, + number_ensembles=n_ens_right, + user_discharge=user_discharge_right, + ) + + elif mmt_config["Q_Right_Edge_Type"] == 1: + self.edges.right.populate_data( + edge_type="Rectangular", + distance=dist_right, + number_ensembles=n_ens_right, + user_discharge=user_discharge_right, + ) + + elif mmt_config["Q_Right_Edge_Type"] == 2: + self.edges.right.populate_data( + edge_type="Custom", + distance=dist_right, + number_ensembles=n_ens_right, + coefficient=mmt_config["Q_Right_Edge_Coeff"], + user_discharge=user_discharge_right, + ) # Create extrap object # -------------------- # Determine top method - top = 'Power' - if mmt_config['Q_Top_Method'] == 1: - top = 'Constant' - elif mmt_config['Q_Top_Method'] == 2: - top = '3-Point' + top = "Power" + if mmt_config["Q_Top_Method"] == 1: + top = "Constant" + elif mmt_config["Q_Top_Method"] == 2: + top = "3-Point" # Determine bottom method - bot = 'Power' - if mmt_config['Q_Bottom_Method'] == 2: - bot = 'No Slip' + bot = "Power" + if mmt_config["Q_Bottom_Method"] == 2: + bot = "No Slip" self.extrap = ExtrapData() - self.extrap.populate_data(top=top, bot=bot, exp=mmt_config['Q_Power_Curve_Coeff']) + self.extrap.populate_data( + top=top, bot=bot, exp=mmt_config["Q_Power_Curve_Coeff"] + ) # Sensor Data self.sensors = Sensors() @@ -596,15 +715,19 @@ class TransectData(object): # Internal Heading self.sensors.heading_deg.internal = HeadingData() - self.sensors.heading_deg.internal.populate_data(data_in=pd0_data.Sensor.heading_deg.T, - source_in='internal', - magvar=mmt_config['Offsets_Magnetic_Variation'], - align=mmt_config['Ext_Heading_Offset']) + self.sensors.heading_deg.internal.populate_data( + data_in=pd0_data.Sensor.heading_deg.T, + source_in="internal", + magvar=mmt_config["Offsets_Magnetic_Variation"], + align=mmt_config["Ext_Heading_Offset"], + ) # External Heading - ext_heading_check = np.where(np.isnan(pd0_data.Gps2.heading_deg) == False) + ext_heading_check = np.where( + np.logical_not(np.isnan(pd0_data.Gps2.heading_deg)) + ) if len(ext_heading_check[0]) <= 0: - self.sensors.heading_deg.selected = 'internal' + self.sensors.heading_deg.selected = "internal" else: # Determine external heading for each ensemble # Using the minimum time difference @@ -623,26 +746,32 @@ class TransectData(object): # Create external heading sensor self.sensors.heading_deg.external = HeadingData() - self.sensors.heading_deg.external.populate_data(data_in=ext_heading_deg, - source_in='external', - magvar=mmt_config['Offsets_Magnetic_Variation'], - align=mmt_config['Ext_Heading_Offset']) + self.sensors.heading_deg.external.populate_data( + data_in=ext_heading_deg, + source_in="external", + magvar=mmt_config["Offsets_Magnetic_Variation"], + align=mmt_config["Ext_Heading_Offset"], + ) # Determine heading source to use from mmt setting - source_used = mmt_config['Ext_Heading_Use'] + source_used = mmt_config["Ext_Heading_Use"] if source_used: - self.sensors.heading_deg.selected = 'external' + self.sensors.heading_deg.selected = "external" else: - self.sensors.heading_deg.selected = 'internal' + self.sensors.heading_deg.selected = "internal" # Pitch - pitch = arctand(tand(pd0_data.Sensor.pitch_deg) * cosd(pd0_data.Sensor.roll_deg)) + pitch = arctand( + tand(pd0_data.Sensor.pitch_deg) * cosd(pd0_data.Sensor.roll_deg) + ) pitch_src = pd0_data.Cfg.pitch_src[0] # Create pitch sensor self.sensors.pitch_deg.internal = SensorData() - self.sensors.pitch_deg.internal.populate_data(data_in=pitch, source_in=pitch_src) - self.sensors.pitch_deg.selected = 'internal' + self.sensors.pitch_deg.internal.populate_data( + data_in=pitch, source_in=pitch_src + ) + self.sensors.pitch_deg.selected = "internal" # Roll roll = pd0_data.Sensor.roll_deg.T @@ -650,8 +779,10 @@ class TransectData(object): # Create Roll sensor self.sensors.roll_deg.internal = SensorData() - self.sensors.roll_deg.internal.populate_data(data_in=roll, source_in=roll_src) - self.sensors.roll_deg.selected = 'internal' + self.sensors.roll_deg.internal.populate_data( + data_in=roll, source_in=roll_src + ) + self.sensors.roll_deg.selected = "internal" # Temperature temperature = pd0_data.Sensor.temperature_deg_c.T @@ -659,8 +790,10 @@ class TransectData(object): # Create temperature sensor self.sensors.temperature_deg_c.internal = SensorData() - self.sensors.temperature_deg_c.internal.populate_data(data_in=temperature, source_in=temperature_src) - self.sensors.temperature_deg_c.selected = 'internal' + self.sensors.temperature_deg_c.internal.populate_data( + data_in=temperature, source_in=temperature_src + ) + self.sensors.temperature_deg_c.selected = "internal" # Salinity pd0_salinity = pd0_data.Sensor.salinity_ppt.T @@ -668,30 +801,54 @@ class TransectData(object): # Create salinity sensor from pd0 data self.sensors.salinity_ppt.internal = SensorData() - self.sensors.salinity_ppt.internal.populate_data(data_in=pd0_salinity, source_in=pd0_salinity_src) + self.sensors.salinity_ppt.internal.populate_data( + data_in=pd0_salinity, source_in=pd0_salinity_src + ) # Create salinity sensor from mmt data - mmt_salinity = mmt_config['Proc_Salinity'] + mmt_salinity = mmt_config["Proc_Salinity"] mmt_salinity = np.tile(mmt_salinity, pd0_salinity.shape) self.sensors.salinity_ppt.user = SensorData() - self.sensors.salinity_ppt.user.populate_data(data_in=mmt_salinity, source_in='mmt') + self.sensors.salinity_ppt.user.populate_data( + data_in=mmt_salinity, source_in="mmt" + ) # Set selected salinity - self.sensors.salinity_ppt.selected = 'internal' + self.sensors.salinity_ppt.selected = "internal" # Speed of Sound speed_of_sound = pd0_data.Sensor.sos_mps.T speed_of_sound_src = pd0_data.Cfg.sos_src[0] self.sensors.speed_of_sound_mps.internal = SensorData() - self.sensors.speed_of_sound_mps.internal.populate_data(data_in=speed_of_sound, source_in=speed_of_sound_src) + self.sensors.speed_of_sound_mps.internal.populate_data( + data_in=speed_of_sound, source_in=speed_of_sound_src + ) # The raw data are referenced to the internal SOS - self.sensors.speed_of_sound_mps.selected = 'internal' + self.sensors.speed_of_sound_mps.selected = "internal" + + # Battery voltage + self.sensors.battery_voltage.internal = SensorData() + + # Determine TRDI model + num = float(pd0_data.Inst.firm_ver[0]) + model_switch = np.floor(num) + + # Rio Grande voltage does not represent battery voltage and is set to nan + if model_switch == 10: + scale_factor = np.nan + else: + scale_factor = 0.1 + + self.sensors.battery_voltage.internal.populate_data( + data_in=pd0_data.Sensor.xmit_voltage * scale_factor, + source_in="internal", + ) @staticmethod def trdi_ping_type(pd0_data): - """Determines if the ping is coherent on incoherent based on the lag near bottom. A coherent ping will have - the lag near the bottom. + """Determines if the ping is coherent on incoherent based on the lag near bottom. + A coherent ping will have the lag near the bottom. Parameters ---------- @@ -707,36 +864,37 @@ class TransectData(object): firmware = str(pd0_data.Inst.firm_ver[0]) # RiverRay, RiverPro, and RioPro - if (firmware[:2] == '44') or (firmware[:2] == '56'): - if hasattr(pd0_data.Cfg, 'lag_near_bottom'): + if (firmware[:2] == "44") or (firmware[:2] == "56"): + if hasattr(pd0_data.Cfg, "lag_near_bottom"): ping_temp = pd0_data.Cfg.lag_near_bottom > 0 - ping_type = np.tile(['U'], ping_temp.shape) - ping_type[ping_temp == 0] = 'I' - ping_type[ping_temp == 1] = 'C' + ping_type = np.tile(["U"], ping_temp.shape) + ping_type[ping_temp == 0] = "I" + ping_type[ping_temp == 1] = "C" # StreamPro - elif firmware[:2] == '31': + elif firmware[:2] == "31": if pd0_data.Cfg.wm[0] == 12: - ping_type = np.tile(['I'], pd0_data.Wt.vel_mps.shape[2]) + ping_type = np.tile(["I"], pd0_data.Wt.vel_mps.shape[2]) elif pd0_data.Cfg.wm[0] == 13: - ping_type = np.tile(['C'], pd0_data.Wt.vel_mps.shape[2]) + ping_type = np.tile(["C"], pd0_data.Wt.vel_mps.shape[2]) else: - ping_type = np.tile(['U'], pd0_data.Wt.vel_mps.shape[2]) + ping_type = np.tile(["U"], pd0_data.Wt.vel_mps.shape[2]) # Rio Grande - elif firmware[:2] == '10': + elif firmware[:2] == "10": if pd0_data.Cfg.wm[0] == 1 or pd0_data.Cfg.wm[0] == 12: - ping_type = np.tile(['I'], pd0_data.Wt.vel_mps.shape[2]) + ping_type = np.tile(["I"], pd0_data.Wt.vel_mps.shape[2]) elif pd0_data.Cfg.wm[0] == 5 or pd0_data.Cfg.wm[0] == 8: - ping_type = np.tile(['C'], pd0_data.Wt.vel_mps.shape[2]) + ping_type = np.tile(["C"], pd0_data.Wt.vel_mps.shape[2]) else: - ping_type = np.tile(['U'], pd0_data.Wt.vel_mps.shape[2]) + ping_type = np.tile(["U"], pd0_data.Wt.vel_mps.shape[2]) else: - ping_type = np.tile(['U'], pd0_data.Wt.vel_mps.shape[2]) + ping_type = np.tile(["U"], pd0_data.Wt.vel_mps.shape[2]) return ping_type - def sontek(self, rsdata, file_name): - """Reads Matlab file produced by RiverSurveyor Live and populates the transect instance variables. + def sontek(self, rsdata, file_name, snr_3beam_comp): + """Reads Matlab file produced by RiverSurveyor Live and populates the + transect instance variables. Parameters ---------- @@ -744,6 +902,8 @@ class TransectData(object): Object of Matlab data from SonTek Matlab files file_name: str Name of SonTek Matlab file not including path. + snr_3beam_comp: bool + Indicates the use of 3-beam velocity computations when invalid SNR is found """ self.file_name = os.path.basename(file_name) @@ -751,28 +911,28 @@ class TransectData(object): # ADCP instrument information # --------------------------- self.adcp = InstrumentData() - if hasattr(rsdata.System, 'InstrumentModel'): - self.adcp.populate_data(manufacturer='Nortek', raw_data=rsdata) + if hasattr(rsdata.System, "InstrumentModel"): + self.adcp.populate_data(manufacturer="Nortek", raw_data=rsdata) else: - self.adcp.populate_data(manufacturer='SonTek', raw_data=rsdata) + self.adcp.populate_data(manufacturer="SonTek", raw_data=rsdata) # Ensemble times ensemble_delta_time = np.append([0], np.diff(rsdata.System.Time)) - # TODO potentially add popup message when there are missing ensembles. Matlab did that. - - # idx_missing = np.where(ensemble_delta_time > 1.5) - # if len(idx_missing[0]) > 0: - # number_missing = np.sum(ensemble_delta_time[idx_missing]) - len(idx_missing) - # error_str = self.file_name + ' is missing ' + str(number_missing) + ' samples' + # TODO potentially add popup message when there are missing ensembles. + # Matlab did that. start_serial_time = rsdata.System.Time[0] + ((30 * 365) + 7) * 24 * 60 * 60 end_serial_time = rsdata.System.Time[-1] + ((30 * 365) + 7) * 24 * 60 * 60 - meas_date = datetime.strftime(datetime.fromtimestamp(start_serial_time), '%m/%d/%Y') + meas_date = datetime.strftime( + datetime.fromtimestamp(start_serial_time), "%m/%d/%Y" + ) self.date_time = DateTime() - self.date_time.populate_data(date_in=meas_date, - start_in=start_serial_time, - end_in=end_serial_time, - ens_dur_in=ensemble_delta_time) + self.date_time.populate_data( + date_in=meas_date, + start_in=start_serial_time, + end_in=end_serial_time, + ens_dur_in=ensemble_delta_time, + ) # Transect checked for use in discharge computations self.checked = True @@ -780,61 +940,75 @@ class TransectData(object): # Coordinate system ref_coord = None - # The initial coordinate system must be set to earth for early versions of RiverSurveyor firmware. - # This implementation forces all versions to use the earth coordinate system. + # The initial coordinate system must be set to earth for early versions + # of RiverSurveyor firmware. This implementation forces all versions to use + # the earth coordinate system. if rsdata.Setup.coordinateSystem == 0: - # ref_coord = 'Beam' - raise CoordError('Beam Coordinates are not supported for all RiverSuveyor firmware releases, ' + - 'use Earth coordinates.') + # ref_coord = "Beam" + raise CoordError( + "Beam Coordinates are not supported for all " + "RiverSuveyor firmware releases, " + "use Earth coordinates." + ) elif rsdata.Setup.coordinateSystem == 1: - # ref_coord = 'Inst' - raise CoordError('Instrument Coordinates are not supported for all RiverSuveyor firmware releases, ' + - 'use Earth coordinates.') + # ref_coord = "Inst" + raise CoordError( + "Instrument Coordinates are not supported for all" + " RiverSuveyor firmware releases, " + "use Earth coordinates." + ) elif rsdata.Setup.coordinateSystem == 2: - ref_coord = 'Earth' + ref_coord = "Earth" # Speed of Sound Parameters # ------------------------- - # In SonTek's Matlab file the BT velocity, VB Depth, and WT Velocity are not reported as raw data but rather - # are reported as processed values based on manual settings of temperature, salinity, and speed of sound. + # In SonTek's Matlab file the BT velocity, VB Depth, and WT Velocity + # are not reported as raw data but rather are reported as processed values + # based on manual settings of temperature, salinity, and speed of sound. # Note: the 4 beam depths are raw data and are not adjusted. - # QRev expects raw data to be independent of user settings. Therefore, manual settings must be identified - # and the Matlab data adjusted to reflect the raw data before creating the data classes in QRev. + # QRev expects raw data to be independent of user settings. Therefore, + # manual settings must be identified and the Matlab data adjusted to reflect + # the raw data before creating the data classes in QRev. # The manual values will then be applied during processing. self.sensors = Sensors() # Temperature - if rsdata.System.Units.Temperature.find('C') >= 0: + if rsdata.System.Units.Temperature.find("C") >= 0: temperature = rsdata.System.Temperature else: - temperature = (5. / 9.) * (rsdata.System.Temperature - 32) + temperature = (5.0 / 9.0) * (rsdata.System.Temperature - 32) self.sensors.temperature_deg_c.internal = SensorData() - self.sensors.temperature_deg_c.internal.populate_data(data_in=temperature, source_in='internal') - self.sensors.temperature_deg_c.selected = 'internal' + self.sensors.temperature_deg_c.internal.populate_data( + data_in=temperature, source_in="internal" + ) + self.sensors.temperature_deg_c.selected = "internal" - if hasattr(rsdata.Setup, 'userTemperature'): + if hasattr(rsdata.Setup, "userTemperature"): if rsdata.Setup.useMeasuredTemperature == 0: - if rsdata.Setup.Units.userTemperature.find('C') >= 0: + if rsdata.Setup.Units.userTemperature.find("C") >= 0: temperature = rsdata.Setup.userTemperature else: - temperature = (5. / 9.) * (rsdata.Setup.userTemperature - 32) + temperature = (5.0 / 9.0) * (rsdata.Setup.userTemperature - 32) self.sensors.temperature_deg_c.user = SensorData() - self.sensors.temperature_deg_c.user.populate_data(data_in=temperature, source_in='Manual') - self.sensors.temperature_deg_c.selected = 'user' + self.sensors.temperature_deg_c.user.populate_data( + data_in=temperature, source_in="Manual" + ) + self.sensors.temperature_deg_c.selected = "user" # Salinity - # Create internal salinity using a zero value since salinity can only be applied in RSL and not in the raw data + # Create internal salinity using a zero value since salinity can only + # be applied in RSL and not in the raw data self.sensors.salinity_ppt.internal = SensorData() - self.sensors.salinity_ppt.internal.populate_data(data_in=0, source_in='QRev') + self.sensors.salinity_ppt.internal.populate_data(data_in=0, source_in="QRev") self.sensors.salinity_ppt.user = SensorData() - self.sensors.salinity_ppt.user.populate_data(data_in=rsdata.Setup.userSalinity, source_in='Manual') + self.sensors.salinity_ppt.user.populate_data( + data_in=rsdata.Setup.userSalinity, source_in="Manual" + ) # Set salinity source if rsdata.Setup.userSalinity > 0: - self.sensors.salinity_ppt.selected = 'user' + self.sensors.salinity_ppt.selected = "user" else: - self.sensors.salinity_ppt.selected = 'internal' + self.sensors.salinity_ppt.selected = "internal" # Speed of sound # Internal sos provided in SonTek data but is computed from equation. @@ -842,30 +1016,43 @@ class TransectData(object): salinity = self.sensors.salinity_ppt.internal.data speed_of_sound = Sensors.unesco_speed_of_sound(t=temperature, s=salinity) self.sensors.speed_of_sound_mps.internal = SensorData() - self.sensors.speed_of_sound_mps.internal.populate_data(data_in=speed_of_sound, source_in='QRev') - self.sensors.speed_of_sound_mps.selected = 'internal' + self.sensors.speed_of_sound_mps.internal.populate_data( + data_in=speed_of_sound, source_in="QRev" + ) + self.sensors.speed_of_sound_mps.selected = "internal" - if hasattr(rsdata.Setup, 'useFixedSoundSpeed'): + if hasattr(rsdata.Setup, "useFixedSoundSpeed"): if rsdata.Setup.useFixedSoundSpeed > 0: self.sensors.speed_of_sound_mps.user = SensorData() user_sos = rsdata.Setup.fixedSoundSpeed - self.sensors.speed_of_sound_mps.user.populate_data(data_in=user_sos, source_in='Manual') - self.sensors.speed_of_sound_mps.selected = 'user' + self.sensors.speed_of_sound_mps.user.populate_data( + data_in=user_sos, source_in="Manual" + ) + self.sensors.speed_of_sound_mps.selected = "user" # Speed of sound correction to obtain raw data sos_correction = None - if self.sensors.speed_of_sound_mps.selected == 'user': - sos_correction = self.sensors.speed_of_sound_mps.internal.data / self.sensors.speed_of_sound_mps.user.data - - elif self.sensors.salinity_ppt.selected == 'user' or self.sensors.temperature_deg_c.selected == 'user': - selected_temperature = getattr(self.sensors.temperature_deg_c, self.sensors.temperature_deg_c.selected) + if self.sensors.speed_of_sound_mps.selected == "user": + sos_correction = ( + self.sensors.speed_of_sound_mps.internal.data + / self.sensors.speed_of_sound_mps.user.data + ) + + elif ( + self.sensors.salinity_ppt.selected == "user" + or self.sensors.temperature_deg_c.selected == "user" + ): + selected_temperature = getattr( + self.sensors.temperature_deg_c, self.sensors.temperature_deg_c.selected + ) temperature = selected_temperature.data - selected_salinity = getattr(self.sensors.salinity_ppt, self.sensors.salinity_ppt.selected) + selected_salinity = getattr( + self.sensors.salinity_ppt, self.sensors.salinity_ppt.selected + ) salinity = selected_salinity.data sos_user = Sensors.unesco_speed_of_sound(t=temperature, s=salinity) sos_correction = self.sensors.speed_of_sound_mps.internal.data / sos_user - # Bottom Track # ------------ @@ -880,6 +1067,13 @@ class TransectData(object): # Create valid frequency time series freq_ts = self.valid_frequencies(freq) + # Add ping types + if hasattr(rsdata.BottomTrack, "BT_PingType_Text"): + # RS5 + ping_ts = self.rs5_bt_ping_type(rsdata.BottomTrack.BT_PingType_Text) + else: + ping_ts = None + bt_vel = np.swapaxes(rsdata.BottomTrack.BT_Vel, 1, 0) # Apply correction for manual sos parameters to obtain raw values @@ -887,11 +1081,14 @@ class TransectData(object): bt_vel = np.around(bt_vel * sos_correction, 3) self.boat_vel = BoatStructure() - self.boat_vel.add_boat_object(source='SonTek', - vel_in=bt_vel, - freq_in=freq_ts, - coord_sys_in=ref_coord, - nav_ref_in='BT') + self.boat_vel.add_boat_object( + source="SonTek", + vel_in=bt_vel, + freq_in=freq_ts, + coord_sys_in=ref_coord, + nav_ref_in="BT", + ping_type=ping_ts, + ) # GPS Data # -------- @@ -900,80 +1097,97 @@ class TransectData(object): if len(rsdata.RawGPSData.GgaLatitude.shape) > 1: - self.gps.populate_data(raw_gga_utc=rsdata.RawGPSData.GgaUTC, - raw_gga_lat=rsdata.RawGPSData.GgaLatitude, - raw_gga_lon=rsdata.RawGPSData.GgaLongitude, - raw_gga_alt=rsdata.RawGPSData.GgaAltitude, - raw_gga_diff=rsdata.RawGPSData.GgaQuality, - raw_gga_hdop=np.swapaxes(np.tile(rsdata.GPS.HDOP, - (rsdata.RawGPSData.GgaLatitude.shape[1], - 1)), 1, 0), - raw_gga_num_sats=np.swapaxes(np.tile(rsdata.GPS.Satellites, - (rsdata.RawGPSData.GgaLatitude.shape[1], - 1)), 1, 0), - raw_gga_delta_time=None, - raw_vtg_course=rsdata.RawGPSData.VtgTmgTrue, - raw_vtg_speed=rsdata.RawGPSData.VtgSogMPS, - raw_vtg_delta_time=None, - raw_vtg_mode_indicator=rsdata.RawGPSData.VtgMode, - ext_gga_utc=rsdata.GPS.Utc, - ext_gga_lat=rsdata.GPS.Latitude, - ext_gga_lon=rsdata.GPS.Longitude, - ext_gga_alt=rsdata.GPS.Altitude, - ext_gga_diff=rsdata.GPS.GPS_Quality, - ext_gga_hdop=rsdata.GPS.HDOP, - ext_gga_num_sats=rsdata.GPS.Satellites, - ext_vtg_course=np.tile(np.nan, rsdata.GPS.Latitude.shape), - ext_vtg_speed=np.tile(np.nan, rsdata.GPS.Latitude.shape), - gga_p_method='End', - gga_v_method='End', - vtg_method='Average') + self.gps.populate_data( + raw_gga_utc=rsdata.RawGPSData.GgaUTC, + raw_gga_lat=rsdata.RawGPSData.GgaLatitude, + raw_gga_lon=rsdata.RawGPSData.GgaLongitude, + raw_gga_alt=rsdata.RawGPSData.GgaAltitude, + raw_gga_diff=rsdata.RawGPSData.GgaQuality, + raw_gga_hdop=np.swapaxes( + np.tile( + rsdata.GPS.HDOP, (rsdata.RawGPSData.GgaLatitude.shape[1], 1) + ), + 1, + 0, + ), + raw_gga_num_sats=np.swapaxes( + np.tile( + rsdata.GPS.Satellites, + (rsdata.RawGPSData.GgaLatitude.shape[1], 1), + ), + 1, + 0, + ), + raw_gga_delta_time=None, + raw_vtg_course=rsdata.RawGPSData.VtgTmgTrue, + raw_vtg_speed=rsdata.RawGPSData.VtgSogMPS, + raw_vtg_delta_time=None, + raw_vtg_mode_indicator=rsdata.RawGPSData.VtgMode, + ext_gga_utc=rsdata.GPS.Utc, + ext_gga_lat=rsdata.GPS.Latitude, + ext_gga_lon=rsdata.GPS.Longitude, + ext_gga_alt=rsdata.GPS.Altitude, + ext_gga_diff=rsdata.GPS.GPS_Quality, + ext_gga_hdop=rsdata.GPS.HDOP, + ext_gga_num_sats=rsdata.GPS.Satellites, + ext_vtg_course=np.tile(np.nan, rsdata.GPS.Latitude.shape), + ext_vtg_speed=np.tile(np.nan, rsdata.GPS.Latitude.shape), + gga_p_method="End", + gga_v_method="End", + vtg_method="Average", + ) else: # Nortek data rows = rsdata.RawGPSData.GgaLatitude.shape[0] - self.gps.populate_data(raw_gga_utc=rsdata.GPS.Utc.reshape(rows, 1), - raw_gga_lat=rsdata.GPS.Latitude.reshape(rows, 1), - raw_gga_lon=rsdata.GPS.Longitude.reshape(rows, 1), - raw_gga_alt=rsdata.GPS.Altitude.reshape(rows, 1), - raw_gga_diff=rsdata.GPS.GPS_Quality.reshape(rows, 1), - raw_gga_hdop=rsdata.GPS.HDOP.reshape(rows, 1), - raw_gga_num_sats=rsdata.GPS.Satellites.reshape(rows, 1), - raw_gga_delta_time=None, - raw_vtg_course=rsdata.RawGPSData.VtgTmgTrue.reshape(rows, 1), - raw_vtg_speed=rsdata.RawGPSData.VtgSogMPS.reshape(rows, 1), - raw_vtg_delta_time=None, - raw_vtg_mode_indicator=rsdata.RawGPSData.VtgMode.reshape(rows, 1), - ext_gga_utc=rsdata.GPS.Utc, - ext_gga_lat=rsdata.GPS.Latitude, - ext_gga_lon=rsdata.GPS.Longitude, - ext_gga_alt=rsdata.GPS.Altitude, - ext_gga_diff=rsdata.GPS.GPS_Quality, - ext_gga_hdop=rsdata.GPS.HDOP, - ext_gga_num_sats=rsdata.GPS.Satellites, - ext_vtg_course=np.tile(np.nan, rsdata.GPS.Latitude.shape), - ext_vtg_speed=np.tile(np.nan, rsdata.GPS.Latitude.shape), - gga_p_method='End', - gga_v_method='End', - vtg_method='Average') - - self.boat_vel.add_boat_object(source='SonTek', - vel_in=self.gps.gga_velocity_ens_mps, - freq_in=None, - coord_sys_in='Earth', - nav_ref_in='GGA') - - self.boat_vel.add_boat_object(source='SonTek', - vel_in=self.gps.vtg_velocity_ens_mps, - freq_in=None, - coord_sys_in='Earth', - nav_ref_in='VTG') - ref = 'BT' + self.gps.populate_data( + raw_gga_utc=rsdata.GPS.Utc.reshape(rows, 1), + raw_gga_lat=rsdata.GPS.Latitude.reshape(rows, 1), + raw_gga_lon=rsdata.GPS.Longitude.reshape(rows, 1), + raw_gga_alt=rsdata.GPS.Altitude.reshape(rows, 1), + raw_gga_diff=rsdata.GPS.GPS_Quality.reshape(rows, 1), + raw_gga_hdop=rsdata.GPS.HDOP.reshape(rows, 1), + raw_gga_num_sats=rsdata.GPS.Satellites.reshape(rows, 1), + raw_gga_delta_time=None, + raw_vtg_course=rsdata.RawGPSData.VtgTmgTrue.reshape(rows, 1), + raw_vtg_speed=rsdata.RawGPSData.VtgSogMPS.reshape(rows, 1), + raw_vtg_delta_time=None, + raw_vtg_mode_indicator=rsdata.RawGPSData.VtgMode.reshape(rows, 1), + ext_gga_utc=rsdata.GPS.Utc, + ext_gga_lat=rsdata.GPS.Latitude, + ext_gga_lon=rsdata.GPS.Longitude, + ext_gga_alt=rsdata.GPS.Altitude, + ext_gga_diff=rsdata.GPS.GPS_Quality, + ext_gga_hdop=rsdata.GPS.HDOP, + ext_gga_num_sats=rsdata.GPS.Satellites, + ext_vtg_course=np.tile(np.nan, rsdata.GPS.Latitude.shape), + ext_vtg_speed=np.tile(np.nan, rsdata.GPS.Latitude.shape), + gga_p_method="End", + gga_v_method="End", + vtg_method="Average", + ) + + self.boat_vel.add_boat_object( + source="SonTek", + vel_in=self.gps.gga_velocity_ens_mps, + freq_in=None, + coord_sys_in="Earth", + nav_ref_in="GGA", + ) + + self.boat_vel.add_boat_object( + source="SonTek", + vel_in=self.gps.vtg_velocity_ens_mps, + freq_in=None, + coord_sys_in="Earth", + nav_ref_in="VTG", + ) + ref = "BT" if rsdata.Setup.trackReference == 1: - ref = 'BT' + ref = "BT" elif rsdata.Setup.trackReference == 2: - ref = 'GGA' + ref = "GGA" elif rsdata.Setup.trackReference == 3: - ref = 'VTG' + ref = "VTG" self.boat_vel.set_nav_reference(ref) # Depth @@ -990,32 +1204,38 @@ class TransectData(object): cell_size = rsdata.System.Cell_Size.reshape(1, num_ens) cell_size_all = np.tile(cell_size, (max_cells, 1)) top_of_cells = rsdata.System.Cell_Start.reshape(1, num_ens) - cell_depth = ((np.tile(np.arange(1, max_cells + 1, 1).reshape(max_cells, 1), (1, num_ens)) - 0.5) - * cell_size_all) + np.tile(top_of_cells, (max_cells, 1)) + cell_depth = ( + ( + np.tile( + np.arange(1, max_cells + 1, 1).reshape(max_cells, 1), (1, num_ens) + ) + - 0.5 + ) + * cell_size_all + ) + np.tile(top_of_cells, (max_cells, 1)) # Adjust cell size and depth for user supplied temp, sal, or sos if sos_correction is not None: cell_size_all = np.around(cell_size_all * sos_correction, 6) - cell_depth = \ - np.around(((cell_depth - rsdata.Setup.sensorDepth) * sos_correction) + rsdata.Setup.sensorDepth, 6) + cell_depth = np.around( + ((cell_depth - rsdata.Setup.sensorDepth) * sos_correction) + + rsdata.Setup.sensorDepth, + 6, + ) # Prepare bottom track depth variable depth = rsdata.BottomTrack.BT_Beam_Depth.T depth[depth == 0] = np.nan - # Convert frequency to kHz - if np.nanmean(rsdata.BottomTrack.BT_Frequency) > 10000: - freq = rsdata.BottomTrack.BT_Frequency / 1000 - else: - freq = rsdata.BottomTrack.BT_Frequency - # Create depth object for bottom track beams - self.depths.add_depth_object(depth_in=depth, - source_in='BT', - freq_in=freq_ts, - draft_in=rsdata.Setup.sensorDepth, - cell_depth_in=cell_depth, - cell_size_in=cell_size_all) + self.depths.add_depth_object( + depth_in=depth, + source_in="BT", + freq_in=freq_ts, + draft_in=rsdata.Setup.sensorDepth, + cell_depth_in=cell_depth, + cell_size_in=cell_size_all, + ) # Prepare vertical beam depth variable depth_vb = np.tile(np.nan, (1, cell_depth.shape[1])) @@ -1024,21 +1244,29 @@ class TransectData(object): # Apply correction for manual sos parameters to obtain raw values if sos_correction is not None: - depth_vb = np.around(((depth_vb - rsdata.Setup.sensorDepth) * sos_correction) + rsdata.Setup.sensorDepth, 5) + depth_vb = np.around( + ((depth_vb - rsdata.Setup.sensorDepth) * sos_correction) + + rsdata.Setup.sensorDepth, + 5, + ) # Create depth object for vertical beam - self.depths.add_depth_object(depth_in=depth_vb, - source_in='VB', - freq_in=np.array([rsdata.Transformation_Matrices.Frequency[1]] * depth.shape[-1]), - draft_in=rsdata.Setup.sensorDepth, - cell_depth_in=cell_depth, - cell_size_in=cell_size_all) + self.depths.add_depth_object( + depth_in=depth_vb, + source_in="VB", + freq_in=np.array( + [rsdata.Transformation_Matrices.Frequency[1]] * depth.shape[-1] + ), + draft_in=rsdata.Setup.sensorDepth, + cell_depth_in=cell_depth, + cell_size_in=cell_size_all, + ) # Set depth reference if rsdata.Setup.depthReference < 0.5: - self.depths.selected = 'vb_depths' + self.depths.selected = "vb_depths" else: - self.depths.selected = 'bt_depths' + self.depths.selected = "bt_depths" # Water Velocity # -------------- @@ -1059,170 +1287,189 @@ class TransectData(object): if sos_correction is not None: vel = np.around(vel * sos_correction, 3) snr = np.swapaxes(rsdata.System.SNR, 1, 0) - if hasattr(rsdata.WaterTrack, 'Correlation'): + if hasattr(rsdata.WaterTrack, "Correlation"): corr = np.swapaxes(rsdata.WaterTrack.Correlation, 1, 0) else: corr = np.array([]) - # Correct SonTek difference velocity for error in earlier transformation matrices. + # Correct SonTek difference velocity for error in earlier + # transformation matrices. if abs(rsdata.Transformation_Matrices.Matrix[3, 0, 0]) < 0.5: vel[3, :, :] = vel[3, :, :] * 2 - # Apply TRDI scaling to SonTek difference velocity to convert to a TRDI compatible error velocity - vel[3, :, :] = vel[3, :, :] / ((2 ** 0.5) * np.tan(np.deg2rad(25))) + # Apply TRDI scaling to SonTek difference velocity to convert to a + # TRDI compatible error velocity + vel[3, :, :] = vel[3, :, :] / ((2**0.5) * np.tan(np.deg2rad(25))) - # Convert velocity reference from what was used in RiverSurveyor Live to None by adding the boat velocity - # to the reported water velocity + # Convert velocity reference from what was used in RiverSurveyor Live + # to None by adding the boat velocity to the reported water velocity boat_vel = np.swapaxes(rsdata.Summary.Boat_Vel, 1, 0) vel[0, :, :] = vel[0, :, :] + boat_vel[0, :] vel[1, :, :] = vel[1, :, :] + boat_vel[1, :] - ref_water = 'None' + ref_water = "None" - # Compute side lobe cutoff using Transmit Length information if availalbe, if not it is assumed to be equal - # to 1/2 depth_cell_size_m. The percent method is use for the side lobe cutoff computation. + # Compute side lobe cutoff using Transmit Length information if + # availalbe, if not it is assumed to be equal + # to 1/2 depth_cell_size_m. The percent method is use for the side + # lobe cutoff computation. sl_cutoff_percent = rsdata.Setup.extrapolation_dDiscardPercent sl_cutoff_number = rsdata.Setup.extrapolation_nDiscardCells - if hasattr(rsdata.Summary, 'Transmit_Length'): - sl_lag_effect_m = (rsdata.Summary.Transmit_Length - + self.depths.bt_depths.depth_cell_size_m[0, :]) / 2.0 + if hasattr(rsdata.Summary, "Transmit_Length"): + sl_lag_effect_m = ( + rsdata.Summary.Transmit_Length + + self.depths.bt_depths.depth_cell_size_m[0, :] + ) / 2.0 else: sl_lag_effect_m = np.copy(self.depths.bt_depths.depth_cell_size_m[0, :]) - sl_cutoff_type = 'Percent' - cells_above_sl, sl_cutoff_m = TransectData.side_lobe_cutoff(depths=self.depths.bt_depths.depth_orig_m, - draft=self.depths.bt_depths.draft_orig_m, - cell_depth=self.depths.bt_depths.depth_cell_depth_m, - sl_lag_effect=sl_lag_effect_m, - slc_type=sl_cutoff_type, - value=1 - sl_cutoff_percent / 100) + sl_cutoff_type = "Percent" + cells_above_sl, sl_cutoff_m = TransectData.side_lobe_cutoff( + depths=self.depths.bt_depths.depth_orig_m, + draft=self.depths.bt_depths.draft_orig_m, + cell_depth=self.depths.bt_depths.depth_cell_depth_m, + sl_lag_effect=sl_lag_effect_m, + slc_type=sl_cutoff_type, + value=1 - sl_cutoff_percent / 100, + ) # Determine water mode if len(corr) > 0: corr_nan = np.isnan(corr) number_of_nan = np.count_nonzero(corr_nan) if number_of_nan == 0: - wm = 'HD' + wm = "HD" elif corr_nan.size == number_of_nan: - wm = 'IC' + wm = "IC" else: - wm = 'Variable' + wm = "Variable" else: - wm = 'Unknown' + wm = "Unknown" # Determine excluded distance (Similar to SonTek's screening distance) excluded_distance = rsdata.Setup.screeningDistance - rsdata.Setup.sensorDepth if excluded_distance < 0: excluded_distance = 0 - if hasattr(rsdata.WaterTrack, 'Vel_Expected_StdDev'): + if hasattr(rsdata.WaterTrack, "Vel_Expected_StdDev"): # RS5 - ping_type = self.sontek_ping_type(corr=corr, freq=rsdata.WaterTrack.WT_Frequency, - expected_std=rsdata.WaterTrack.Vel_Expected_StdDev) + ping_type = self.sontek_ping_type( + corr=corr, + freq=rsdata.WaterTrack.WT_Frequency, + expected_std=rsdata.WaterTrack.Vel_Expected_StdDev, + ) else: # M9 or S5 - ping_type = self.sontek_ping_type(corr=corr, freq=rsdata.WaterTrack.WT_Frequency) + ping_type = self.sontek_ping_type( + corr=corr, freq=rsdata.WaterTrack.WT_Frequency + ) # Create water velocity object self.w_vel = WaterData() - self.w_vel.populate_data(vel_in=vel, - freq_in=freq_ts, - coord_sys_in=ref_coord, - nav_ref_in=ref_water, - rssi_in=snr, - rssi_units_in='SNR', - excluded_dist_in=excluded_distance, - cells_above_sl_in=cells_above_sl, - sl_cutoff_per_in=sl_cutoff_percent, - sl_cutoff_num_in=sl_cutoff_number, - sl_cutoff_type_in=sl_cutoff_type, - sl_lag_effect_in=sl_lag_effect_m, - sl_cutoff_m=sl_cutoff_m, - wm_in=wm, - blank_in=excluded_distance, - corr_in=corr, - ping_type=ping_type) + self.w_vel.populate_data( + vel_in=vel, + freq_in=freq_ts, + coord_sys_in=ref_coord, + nav_ref_in=ref_water, + rssi_in=snr, + rssi_units_in="SNR", + excluded_dist_in=excluded_distance, + cells_above_sl_in=cells_above_sl, + sl_cutoff_per_in=sl_cutoff_percent, + sl_cutoff_num_in=sl_cutoff_number, + sl_cutoff_type_in=sl_cutoff_type, + sl_lag_effect_in=sl_lag_effect_m, + sl_cutoff_m=sl_cutoff_m, + wm_in=wm, + blank_in=excluded_distance, + corr_in=corr, + ping_type=ping_type, + snr_3beam_comp=snr_3beam_comp, + ) # Edges # ----- # Create edge object self.edges = Edges() - self.edges.populate_data(rec_edge_method='Variable', - vel_method='VectorProf') + self.edges.populate_data(rec_edge_method="Variable", vel_method="VectorProf") # Determine number of ensembles for each edge if rsdata.Setup.startEdge > 0.1: ensembles_right = np.nansum(rsdata.System.Step == 2) ensembles_left = np.nansum(rsdata.System.Step == 4) - self.start_edge = 'Right' - self.orig_start_edge = 'Right' + self.start_edge = "Right" + self.orig_start_edge = "Right" else: ensembles_right = np.nansum(rsdata.System.Step == 4) ensembles_left = np.nansum(rsdata.System.Step == 2) - self.start_edge = 'Left' - self.orig_start_edge = 'Left' + self.start_edge = "Left" + self.orig_start_edge = "Left" self.in_transect_idx = np.where(rsdata.System.Step == 3)[0] # Create left edge object edge_type = None if rsdata.Setup.Edges_0__Method == 2: - edge_type = 'Triangular' + edge_type = "Triangular" elif rsdata.Setup.Edges_0__Method == 1: - edge_type = 'Rectangular' + edge_type = "Rectangular" elif rsdata.Setup.Edges_0__Method == 0: - edge_type = 'User Q' + edge_type = "User Q" if np.isnan(rsdata.Setup.Edges_0__EstimatedQ): user_discharge = None else: user_discharge = rsdata.Setup.Edges_0__EstimatedQ - self.edges.left.populate_data(edge_type=edge_type, - distance=rsdata.Setup.Edges_0__DistanceToBank, - number_ensembles=ensembles_left, - coefficient=None, - user_discharge=user_discharge) + self.edges.left.populate_data( + edge_type=edge_type, + distance=rsdata.Setup.Edges_0__DistanceToBank, + number_ensembles=ensembles_left, + coefficient=None, + user_discharge=user_discharge, + ) # Create right edge object if rsdata.Setup.Edges_1__Method == 2: - edge_type = 'Triangular' + edge_type = "Triangular" elif rsdata.Setup.Edges_1__Method == 1: - edge_type = 'Rectangular' + edge_type = "Rectangular" elif rsdata.Setup.Edges_1__Method == 0: - edge_type = 'User Q' + edge_type = "User Q" if np.isnan(rsdata.Setup.Edges_1__EstimatedQ): user_discharge = None else: user_discharge = rsdata.Setup.Edges_1__EstimatedQ - self.edges.right.populate_data(edge_type=edge_type, - distance=rsdata.Setup.Edges_1__DistanceToBank, - number_ensembles=ensembles_right, - coefficient=None, - user_discharge=user_discharge) + self.edges.right.populate_data( + edge_type=edge_type, + distance=rsdata.Setup.Edges_1__DistanceToBank, + number_ensembles=ensembles_right, + coefficient=None, + user_discharge=user_discharge, + ) # Extrapolation # ------------- - top = '' - bottom = '' + top = "" + bottom = "" # Top extrapolation if rsdata.Setup.extrapolation_Top_nFitType == 0: - top = 'Constant' + top = "Constant" elif rsdata.Setup.extrapolation_Top_nFitType == 1: - top = 'Power' + top = "Power" elif rsdata.Setup.extrapolation_Top_nFitType == 2: - top = '3-Point' + top = "3-Point" # Bottom extrapolation if rsdata.Setup.extrapolation_Bottom_nFitType == 0: - bottom = 'Constant' + bottom = "Constant" elif rsdata.Setup.extrapolation_Bottom_nFitType == 1: if rsdata.Setup.extrapolation_Bottom_nEntirePro > 1.1: - bottom = 'No Slip' + bottom = "No Slip" else: - bottom = 'Power' + bottom = "Power" # Create extrapolation object self.extrap = ExtrapData() - self.extrap.populate_data(top=top, - bot=bottom, - exp=rsdata.Setup.extrapolation_Bottom_dExponent) + self.extrap.populate_data( + top=top, bot=bottom, exp=rsdata.Setup.extrapolation_Bottom_dExponent + ) # Sensor data # ----------- @@ -1231,74 +1478,127 @@ class TransectData(object): self.sensors.heading_deg.internal = HeadingData() # Check for firmware supporting G3 compass and associated data - if hasattr(rsdata, 'Compass'): - # TODO need to find older file that had 3 columns in Magnetic error to test and modify code + if hasattr(rsdata, "Compass"): + # TODO need to find older file that had 3 columns in Magnetic + # error to test and modify code mag_error = rsdata.Compass.Magnetic_error - pitch_limit = np.array((rsdata.Compass.Maximum_Pitch, rsdata.Compass.Minimum_Pitch)).T - roll_limit = np.array((rsdata.Compass.Maximum_Roll, rsdata.Compass.Minimum_Roll)).T - if np.any(np.greater_equal(np.abs(pitch_limit), 90)) or np.any(np.greater_equal(np.abs(roll_limit), 90)): + pitch_limit = np.array( + (rsdata.Compass.Maximum_Pitch, rsdata.Compass.Minimum_Pitch) + ).T + roll_limit = np.array( + (rsdata.Compass.Maximum_Roll, rsdata.Compass.Minimum_Roll) + ).T + if np.any(np.greater_equal(np.abs(pitch_limit), 90)) or np.any( + np.greater_equal(np.abs(roll_limit), 90) + ): pitch_limit = None roll_limit = None else: mag_error = None pitch_limit = None roll_limit = None - self.sensors.heading_deg.internal.populate_data(data_in=rsdata.System.Heading, - source_in='internal', - magvar=rsdata.Setup.magneticDeclination, - mag_error=mag_error, - pitch_limit=pitch_limit, - roll_limit=roll_limit) + self.sensors.heading_deg.internal.populate_data( + data_in=rsdata.System.Heading, + source_in="internal", + magvar=rsdata.Setup.magneticDeclination, + mag_error=mag_error, + pitch_limit=pitch_limit, + roll_limit=roll_limit, + ) # External heading ext_heading = rsdata.System.GPS_Compass_Heading if np.nansum(np.abs(np.diff(ext_heading))) > 0: self.sensors.heading_deg.external = HeadingData() - self.sensors.heading_deg.external.populate_data(data_in=ext_heading, - source_in='external', - magvar=rsdata.Setup.magneticDeclination, - align=rsdata.Setup.hdtHeadingCorrection) + self.sensors.heading_deg.external.populate_data( + data_in=ext_heading, + source_in="external", + magvar=rsdata.Setup.magneticDeclination, + align=rsdata.Setup.hdtHeadingCorrection, + ) # Set selected reference if rsdata.Setup.headingSource > 1.1: - self.sensors.heading_deg.selected = 'external' + self.sensors.heading_deg.selected = "external" else: - self.sensors.heading_deg.selected = 'internal' + self.sensors.heading_deg.selected = "internal" # Pitch and roll pitch = None roll = None - if hasattr(rsdata, 'Compass'): + if hasattr(rsdata, "Compass"): pitch = rsdata.Compass.Pitch roll = rsdata.Compass.Roll - elif hasattr(rsdata.System, 'Pitch'): + elif hasattr(rsdata.System, "Pitch"): pitch = rsdata.System.Pitch roll = rsdata.System.Roll + if len(pitch.shape) > 1: + pitch = np.squeeze(pitch[:, 0]) + roll = np.squeeze(roll[:, 0]) + self.sensors.pitch_deg.internal = SensorData() - self.sensors.pitch_deg.internal.populate_data(data_in=pitch, source_in='internal') - self.sensors.pitch_deg.selected = 'internal' + self.sensors.pitch_deg.internal.populate_data( + data_in=pitch, source_in="internal" + ) + self.sensors.pitch_deg.selected = "internal" self.sensors.roll_deg.internal = SensorData() - self.sensors.roll_deg.internal.populate_data(data_in=roll, source_in='internal') - self.sensors.roll_deg.selected = 'internal' - + self.sensors.roll_deg.internal.populate_data(data_in=roll, source_in="internal") + self.sensors.roll_deg.selected = "internal" + + # Battery voltage + self.sensors.battery_voltage.internal = SensorData() + if hasattr(rsdata.System, "Voltage"): + self.sensors.battery_voltage.internal.populate_data( + data_in=rsdata.System.Voltage, + source_in="internal", + ) + elif hasattr(rsdata.System, "Battery_Voltage"): + self.sensors.battery_voltage.internal.populate_data( + data_in=rsdata.System.Battery_Voltage, + source_in="internal", + ) # Set composite depths as this is the only option in RiverSurveyor Live self.depths.composite_depths(transect=self, setting="On") + @staticmethod + def rs5_bt_ping_type(mat_data): + """Pulls ping type from mat_strut object + + Parameters + --------- + mat_data: mat_strut + BottomTrack.BT_PingType_Text""" + + pt = [] + for data in mat_data: + for item in data._fieldnames: + ping = data.__dict__[item] + pt.append(ping) + + pt = np.array(pt) + pt[pt == "1"] = "U" + + return pt + @staticmethod def sontek_ping_type(corr, freq, expected_std=None): - """Determines ping type based on the fact that HD has correlation but incoherent does not. + """Determines ping type based on the fact that HD has correlation but + incoherent does not. Parameters ---------- - corr: np.ndarray(int) + corr: np.array(int) Water track correlation freq: Frequency of ping in Hz + expected_std: np.array(float) + Expected standard deviation Returns ------- ping_type: np.array(int) - Ping_type for each ensemble, 3 - 1 MHz Incoherent, 4 - 1 MHz HD, 5 - 3 MHz Incoherent, 6 - 3 MHz HD + Ping_type for each ensemble, 3 - 1 MHz Incoherent, 4 - 1 MHz HD, + 5 - 3 MHz Incoherent, 6 - 3 MHz HD """ # Determine ping type @@ -1313,14 +1613,14 @@ class TransectData(object): for n in range(len(coherent)): if coherent[n]: if freq[n] == 3000: - ping_type.append('3C') + ping_type.append("3C") else: - ping_type.append('1C') + ping_type.append("1C") else: if freq[n] == 3000: - ping_type.append('3I') + ping_type.append("3I") else: - ping_type.append('1I') + ping_type.append("1I") ping_type = np.array(ping_type) else: # RS5 @@ -1332,31 +1632,33 @@ class TransectData(object): ves_avg = np.nanmean(ves, axis=0) - ping_type = np.tile(['PC/BB'], ves_avg.size) - ping_type[ves_avg < 0.01] = 'PC' - ping_type[ves_avg > 0.025] = 'BB' + ping_type = np.tile(["PC/BB"], ves_avg.size) + ping_type[ves_avg < 0.01] = "PC" + ping_type[ves_avg > 0.025] = "BB" return ping_type @staticmethod def qrev_mat_in(meas_struct): - """Processes the Matlab data structure to obtain a list of TransectData objects containing transect - data from the Matlab data structure. + """Processes the Matlab data structure to obtain a list of + TransectData objects containing transect + data from the Matlab data structure. - Parameters - ---------- - meas_struct: mat_struct - Matlab data structure obtained from sio.loadmat + Parameters + ---------- + meas_struct: mat_struct + Matlab data structure obtained from sio.loadmat - Returns - ------- - transects: list - List of TransectData objects - """ + Returns + ------- + transects: list + List of TransectData objects + """ transects = [] - if hasattr(meas_struct, 'transects'): - # If only one transect the data are not a list or array of transects + if hasattr(meas_struct, "transects"): + # If only one transect the data are not a list or array of + # transects try: if len(meas_struct.transects) > 0: for transect in meas_struct.transects: @@ -1371,7 +1673,8 @@ class TransectData(object): return transects def populate_from_qrev_mat(self, transect): - """Populates the object using data from previously saved QRev Matlab file. + """Populates the object using data from previously saved QRev Matlab + file. Parameters ---------- @@ -1397,7 +1700,7 @@ class TransectData(object): self.extrap = ExtrapData() self.extrap.populate_from_qrev_mat(transect) self.start_edge = transect.startEdge - if hasattr(transect, 'orig_start_edge'): + if hasattr(transect, "orig_start_edge"): self.orig_start_edge = transect.orig_start_edge else: self.orig_start_edge = transect.startEdge @@ -1411,7 +1714,8 @@ class TransectData(object): @staticmethod def valid_frequencies(frequency_in): - """Create frequency time series for BT and WT with all valid frequencies. + """Create frequency time series for BT and WT with all valid + frequencies. Parameters ---------- @@ -1432,7 +1736,8 @@ class TransectData(object): if np.any(invalid_freq): # Identify the first valid frequency valid = frequency_in[np.logical_not(invalid_freq)][0] - # Forward fill for invalid frequencies beyond first valid, backfill until 1st valid + # Forward fill for invalid frequencies beyond first valid, backfill + # until 1st valid for n in range(frequency_in.size): if invalid_freq[n]: frequency_out[n] = valid @@ -1453,8 +1758,8 @@ class TransectData(object): dist_cell_1_m = pd0.Cfg.dist_bin1_cm / 100 num_reg_cells = pd0.Wt.vel_mps.shape[1] - # Surf data are to accommodate RiverRay and RiverPro. pd0_read sets these - # values to nan when reading Rio Grande or StreamPro data + # Surf data are to accommodate RiverRay and RiverPro. pd0_read sets + # these values to nan when reading Rio Grande or StreamPro data no_surf_cells = pd0.Surface.no_cells no_surf_cells[np.isnan(no_surf_cells)] = 0 max_surf_cells = np.nanmax(no_surf_cells) @@ -1478,19 +1783,32 @@ class TransectData(object): # Compute cell depth if no_surf_cells[i] > 1e-5: - cell_depth[:int(no_surf_cells[i]), i] = surf_cell_dist[i] + \ - np.arange(0, (no_surf_cells[i] - 1) * surf_cell_size[i] + 0.001, - surf_cell_size[i]) - cell_depth[int(no_surf_cells[i]):, i] = cell_depth[int(no_surf_cells[i] - 1), i] \ - + (.5 * surf_cell_size[i] + 0.5 * reg_cell_size[i]) \ - + np.arange(0, (num_reg_cells - 1) * reg_cell_size[i] + 0.001, - reg_cell_size[i]) - cell_size_all[0:int(no_surf_cells[i]), i] = np.repeat(surf_cell_size[i], int(no_surf_cells[i])) - cell_size_all[int(no_surf_cells[i]):, i] = np.repeat(reg_cell_size[i], int(num_reg_cells)) + cell_depth[: int(no_surf_cells[i]), i] = surf_cell_dist[i] + np.arange( + 0, + (no_surf_cells[i] - 1) * surf_cell_size[i] + 0.001, + surf_cell_size[i], + ) + cell_depth[int(no_surf_cells[i]) :, i] = ( + cell_depth[int(no_surf_cells[i] - 1), i] + + (0.5 * surf_cell_size[i] + 0.5 * reg_cell_size[i]) + + np.arange( + 0, + (num_reg_cells - 1) * reg_cell_size[i] + 0.001, + reg_cell_size[i], + ) + ) + cell_size_all[0 : int(no_surf_cells[i]), i] = np.repeat( + surf_cell_size[i], int(no_surf_cells[i]) + ) + cell_size_all[int(no_surf_cells[i]) :, i] = np.repeat( + reg_cell_size[i], int(num_reg_cells) + ) else: - cell_depth[:int(num_reg_cells), i] = dist_cell_1_m[i] + \ - np.linspace(0, int(num_reg_cells) - 1, - int(num_reg_cells)) * reg_cell_size[i] + cell_depth[: int(num_reg_cells), i] = ( + dist_cell_1_m[i] + + np.linspace(0, int(num_reg_cells) - 1, int(num_reg_cells)) + * reg_cell_size[i] + ) cell_size_all[:, i] = np.repeat(reg_cell_size[i], num_reg_cells) # Firmware is used to ID RiverRay data with variable modes and lags @@ -1498,7 +1816,7 @@ class TransectData(object): # Compute sl_lag_effect lag = pd0.Cfg.lag_cm / 100 - if firmware[0:2] == '44' or firmware[0:2] == '56': + if firmware[0:2] == "44" or firmware[0:2] == "56": lag_near_bottom = np.array(pd0.Cfg.lag_near_bottom) lag_near_bottom[lag_near_bottom == np.nan] = 0 lag[lag_near_bottom != 0] = 0 @@ -1510,8 +1828,8 @@ class TransectData(object): return cell_size_all, cell_depth, sl_cutoff_per, sl_lag_effect_m def change_q_ensembles(self, proc_method): - """Sets in_transect_idx to all ensembles, except in the case of SonTek data - where RSL processing is applied. + """Sets in_transect_idx to all ensembles, except in the case of SonTek + data where RSL processing is applied. Parameters ---------- @@ -1519,22 +1837,29 @@ class TransectData(object): Processing method (WR2, RSL, QRev) """ - if proc_method == 'RSL': + if proc_method == "RSL": num_ens = self.boat_vel.bt_vel.u_processed_mps.shape[1] # Determine number of ensembles for each edge - if self.start_edge == 'Right': - self.in_transect_idx = np.arange(self.edges.right.num_ens_2_avg, - num_ens - self.edges.left.num_ens_2_avg) + if self.start_edge == "Right": + self.in_transect_idx = np.arange( + self.edges.right.num_ens_2_avg, + num_ens - self.edges.left.num_ens_2_avg, + ) else: - self.in_transect_idx = np.arange(self.edges.left.num_ens_2_avg, - num_ens - self.edges.right.num_ens_2_avg) + self.in_transect_idx = np.arange( + self.edges.left.num_ens_2_avg, + num_ens - self.edges.right.num_ens_2_avg, + ) else: - self.in_transect_idx = np.arange(0, self.boat_vel.bt_vel.u_processed_mps.shape[0]) + self.in_transect_idx = np.arange( + 0, self.boat_vel.bt_vel.u_processed_mps.shape[0] + ) def change_coord_sys(self, new_coord_sys): """Changes the coordinate system of the water and boat data. - Current implementation only allows changes for original to higher order coordinate + Current implementation only allows changes for original to higher + order coordinate systems: Beam - Inst - Ship - Earth. Parameters @@ -1572,31 +1897,32 @@ class TransectData(object): # Update object if self.sensors.heading_deg.external is not None: - self.sensors.heading_deg.external.set_mag_var(magvar, 'external') + self.sensors.heading_deg.external.set_mag_var(magvar, "external") - if self.sensors.heading_deg.selected == 'internal': - heading_selected = getattr(self.sensors.heading_deg, self.sensors.heading_deg.selected) + if self.sensors.heading_deg.selected == "internal": + heading_selected = getattr( + self.sensors.heading_deg, self.sensors.heading_deg.selected + ) old_magvar = heading_selected.mag_var_deg magvar_change = magvar - old_magvar - heading_selected.set_mag_var(magvar, 'internal') + heading_selected.set_mag_var(magvar, "internal") self.boat_vel.bt_vel.change_heading(magvar_change) self.w_vel.change_heading(self.boat_vel, magvar_change) else: - self.sensors.heading_deg.internal.set_mag_var(magvar, 'internal') - - # self.update_water() + self.sensors.heading_deg.internal.set_mag_var(magvar, "internal") def change_offset(self, h_offset): - """Change the heading offset (alignment correction). Only affects external heading. + """Change the heading offset (alignment correction). Only affects + external heading. Parameters ---------- h_offset: float Heading offset in degrees """ - self.sensors.heading_deg.internal.set_align_correction(h_offset, 'internal') + self.sensors.heading_deg.internal.set_align_correction(h_offset, "internal") - if self.sensors.heading_deg.selected == 'external': + if self.sensors.heading_deg.selected == "external": old = getattr(self.sensors.heading_deg, self.sensors.heading_deg.selected) old_offset = old.align_correction_deg offset_change = h_offset - old_offset @@ -1604,7 +1930,7 @@ class TransectData(object): self.w_vel.change_heading(self.boat_vel, offset_change) if self.sensors.heading_deg.external is not None: - self.sensors.heading_deg.external.set_align_correction(h_offset, 'external') + self.sensors.heading_deg.external.set_align_correction(h_offset, "external") self.update_water() @@ -1618,21 +1944,24 @@ class TransectData(object): """ # If source is user, check to see if it was created, if not create it - if h_source == 'user': + if h_source == "user": if self.sensors.heading_deg.user is None: self.sensors.heading_deg.user = HeadingData() - self.sensors.heading_deg.user.populate_data(data_in=np.zeros( - self.boat_vel.bt_vel.u_processed_mps.shape), - source_in='user', + self.sensors.heading_deg.user.populate_data( + data_in=np.zeros(self.boat_vel.bt_vel.u_processed_mps.shape), + source_in="user", magvar=0, - align=0) + align=0, + ) # Get new heading object new_heading_selection = getattr(self.sensors.heading_deg, h_source) # Change source to that requested if h_source is not None: - old_heading_selection = getattr(self.sensors.heading_deg, self.sensors.heading_deg.selected) + old_heading_selection = getattr( + self.sensors.heading_deg, self.sensors.heading_deg.selected + ) old_heading = old_heading_selection.data new_heading = new_heading_selection.data heading_change = new_heading - old_heading @@ -1643,7 +1972,8 @@ class TransectData(object): self.update_water() def update_water(self): - """Method called from set_nav_reference, boat_interpolation and boat filters + """Method called from set_nav_reference, boat_interpolation and boat + filters to ensure that changes in boatvel are reflected in the water data""" self.w_vel.set_nav_reference(self.boat_vel) @@ -1657,7 +1987,9 @@ class TransectData(object): self.w_vel.apply_interpolation(transect=self) @staticmethod - def side_lobe_cutoff(depths, draft, cell_depth, sl_lag_effect, slc_type='Percent', value=None): + def side_lobe_cutoff( + depths, draft, cell_depth, sl_lag_effect, slc_type="Percent", value=None + ): """Computes side lobe cutoff. The side lobe cutoff is based on the beam angle and is computed to @@ -1667,17 +1999,20 @@ class TransectData(object): Parameters ---------- depths: np.array - Bottom track (all 4 beams) and vertical beam depths for each ensemble, in m. + Bottom track (all 4 beams) and vertical beam depths for each + ensemble, in m. draft: float Draft of transducers, in m. cell_depth: np.array Depth to the centerline of each depth cell, in m. sl_lag_effect: np.array - The extra depth below the last depth cell that must be above the side lobe cutoff, in m. + The extra depth below the last depth cell that must be above the + side lobe cutoff, in m. slc_type: str Method used for side lobe cutoff computation. value: float - Value used in specified method to use for side lobe cutoff computation. + Value used in specified method to use for side lobe cutoff + computation. """ # Compute minimum depths for each ensemble @@ -1690,9 +2025,9 @@ class TransectData(object): # Adjust for transducer angle coeff = None - if slc_type == 'Percent': + if slc_type == "Percent": coeff = value - elif slc_type == 'Angle': + elif slc_type == "Angle": coeff = np.cos(np.deg2rad(value)) # Compute sidelobe cutoff to centerline @@ -1716,18 +2051,24 @@ class TransectData(object): """ # Interpolate bottom track data - if target == 'BT': - self.boat_vel.bt_vel.apply_interpolation(transect=self, interpolation_method=method) + if target == "BT": + self.boat_vel.bt_vel.apply_interpolation( + transect=self, interpolation_method=method + ) - if target == 'GPS': + if target == "GPS": # Interpolate GGA data - vel = getattr(self.boat_vel, 'gga_vel') + vel = getattr(self.boat_vel, "gga_vel") if vel is not None: - self.boat_vel.gga_vel.apply_interpolation(transect=self, interpolation_method=method) + self.boat_vel.gga_vel.apply_interpolation( + transect=self, interpolation_method=method + ) # Interpolate VTG data - vel = getattr(self.boat_vel, 'vtg_vel') + vel = getattr(self.boat_vel, "vtg_vel") if vel is not None: - self.boat_vel.vtg_vel.apply_interpolation(transect=self, interpolation_method=method) + self.boat_vel.vtg_vel.apply_interpolation( + transect=self, interpolation_method=method + ) # Apply composite tracks setting self.composite_tracks(update=False) @@ -1784,7 +2125,7 @@ class TransectData(object): # Apply filter to transect self.boat_vel.bt_vel.apply_filter(self, **kwargs) - if self.boat_vel.selected == 'bt_vel' and update: + if self.boat_vel.selected == "bt_vel" and update: self.update_water() def gps_filters(self, update, **kwargs): @@ -1816,7 +2157,9 @@ class TransectData(object): if self.boat_vel.vtg_vel is not None: self.boat_vel.vtg_vel.apply_gps_filter(self, **kwargs) - if (self.boat_vel.selected == 'VTG' or self.boat_vel.selected == 'GGA') and update == True: + if ( + self.boat_vel.selected == "VTG" or self.boat_vel.selected == "GGA" + ) and update: self.update_water() def set_depth_reference(self, update, setting): @@ -1837,8 +2180,9 @@ class TransectData(object): self.w_vel.adjust_side_lobe(self) def apply_averaging_method(self, setting): - """Method to apply the selected averaging method to the BT team depths to achieve a single - average depth. It is only applicable to the multiple beams used for BT, not VB or DS. + """Method to apply the selected averaging method to the BT team depths + to achieve a single average depth. It is only applicable to the + multiple beams used for BT, not VB or DS. Input: setting: averaging method (IDW, Simple) @@ -1848,10 +2192,18 @@ class TransectData(object): self.process_depths(update=False) - def process_depths(self, update=False, filter_method=None, interpolation_method=None, composite_setting=None, - avg_method=None, valid_method=None): - """Method applies filter, composite, and interpolation settings to depth objects - so that all are updated using the same filter and interpolation settings. + def process_depths( + self, + update=False, + filter_method=None, + interpolation_method=None, + composite_setting=None, + avg_method=None, + valid_method=None, + ): + """Method applies filter, composite, and interpolation settings to + depth objects so that all are updated using the same filter and interpolation + settings. Parameters ---------- @@ -1864,7 +2216,8 @@ class TransectData(object): composite_setting: str Specifies use of composite depths ("On" or "Off"). avg_method: str - Defines averaging method: "Simple", "IDW", only applicable to bottom track. + Defines averaging method: "Simple", "IDW", only applicable + to bottom track. valid_method: Defines method to determine if depth is valid (QRev or TRDI). """ @@ -1910,67 +2263,75 @@ class TransectData(object): if self.depths.bt_depths is not None: self.depths.bt_depths.change_draft(draft_in) - def change_sos(self, parameter=None, salinity=None, temperature=None, selected=None, speed=None): + def change_sos( + self, parameter=None, salinity=None, temperature=None, selected=None, speed=None + ): """Coordinates changing the speed of sound. Parameters ---------- parameter: str - Speed of sound parameter to be changed ('temperatureSrc', 'temperature', 'salinity', 'sosSrc') + Speed of sound parameter to be changed ('temperatureSrc', + 'temperature', 'salinity', 'sosSrc') salinity: float Salinity in ppt temperature: float Temperature in deg C selected: str - Selected speed of sound ('internal', 'computed', 'user') or temperature ('internal', 'user') + Selected speed of sound ('internal', 'computed', 'user') or + temperature ('internal', 'user') speed: float Manually supplied speed of sound for 'user' source """ - if parameter == 'temperatureSrc': + if parameter == "temperatureSrc": - temperature_internal = getattr(self.sensors.temperature_deg_c, 'internal') - if selected == 'user': + temperature_internal = getattr(self.sensors.temperature_deg_c, "internal") + if selected == "user": if self.sensors.temperature_deg_c.user is None: self.sensors.temperature_deg_c.user = SensorData() ens_temperature = np.tile(temperature, temperature_internal.data.shape) self.sensors.temperature_deg_c.user.change_data(data_in=ens_temperature) - self.sensors.temperature_deg_c.user.set_source(source_in='Manual Input') + self.sensors.temperature_deg_c.user.set_source(source_in="Manual Input") # Set the temperature data to the selected source self.sensors.temperature_deg_c.set_selected(selected_name=selected) # Update the speed of sound self.update_sos() - elif parameter == 'temperature': + elif parameter == "temperature": adcp_temp = self.sensors.temperature_deg_c.internal.data new_user_temperature = np.tile(temperature, adcp_temp.shape) - self.sensors.temperature_deg_c.user.change_data(data_in=new_user_temperature) - self.sensors.temperature_deg_c.user.set_source(source_in='Manual Input') + self.sensors.temperature_deg_c.user.change_data( + data_in=new_user_temperature + ) + self.sensors.temperature_deg_c.user.set_source(source_in="Manual Input") # Set the temperature data to the selected source - self.sensors.temperature_deg_c.set_selected(selected_name='user') + self.sensors.temperature_deg_c.set_selected(selected_name="user") # Update the speed of sound self.update_sos() - elif parameter == 'salinity': + elif parameter == "salinity": if salinity is not None: self.sensors.salinity_ppt.user.change_data(data_in=salinity) if type(self.sensors.salinity_ppt.internal.data) is float: salinity_internal = self.sensors.salinity_ppt.internal.data else: salinity_internal = self.sensors.salinity_ppt.internal.data - if np.all(np.equal(self.sensors.salinity_ppt.user.data, salinity_internal)): - self.sensors.salinity_ppt.set_selected(selected_name='internal') + if np.all( + np.equal(self.sensors.salinity_ppt.user.data, salinity_internal) + ): + self.sensors.salinity_ppt.set_selected(selected_name="internal") else: - self.sensors.salinity_ppt.set_selected(selected_name='user') + self.sensors.salinity_ppt.set_selected(selected_name="user") self.update_sos() - elif parameter == 'sosSrc': - if selected == 'internal': + elif parameter == "sosSrc": + if selected == "internal": self.update_sos() - elif selected == 'user': - self.update_sos(speed=speed, selected='user', source='Manual Input') + elif selected == "user": + self.update_sos(speed=speed, selected="user", source="Manual Input") def update_sos(self, selected=None, source=None, speed=None): """Sets a new specified speed of sound. @@ -1988,54 +2349,69 @@ class TransectData(object): """ # Get current speed of sound - sos_selected = getattr(self.sensors.speed_of_sound_mps, self.sensors.speed_of_sound_mps.selected) + sos_selected = getattr( + self.sensors.speed_of_sound_mps, self.sensors.speed_of_sound_mps.selected + ) old_sos = sos_selected.data new_sos = None # Manual input for speed of sound - if selected == 'user' and source == 'Manual Input': + if selected == "user" and source == "Manual Input": self.sensors.speed_of_sound_mps.set_selected(selected_name=selected) self.sensors.speed_of_sound_mps.user = SensorData() self.sensors.speed_of_sound_mps.user.populate_data(speed, source) - # If called with no input set source to internal and determine whether computed or calculated based on - # availability of user supplied temperature or salinity + # If called with no input set source to internal and determine whether + # computed or calculated based on availability of user supplied + # temperature or salinity elif selected is None and source is None: - self.sensors.speed_of_sound_mps.set_selected('internal') - # If temperature or salinity is set by the user the speed of sound is computed otherwise it is consider - # calculated by the ADCP. - if (self.sensors.temperature_deg_c.selected == 'user') or (self.sensors.salinity_ppt.selected == 'user'): - self.sensors.speed_of_sound_mps.internal.set_source('Computed') + self.sensors.speed_of_sound_mps.set_selected("internal") + # If temperature or salinity is set by the user the speed of + # sound is computed otherwise it is consider calculated by the ADCP. + if (self.sensors.temperature_deg_c.selected == "user") or ( + self.sensors.salinity_ppt.selected == "user" + ): + self.sensors.speed_of_sound_mps.internal.set_source("Computed") else: - self.sensors.speed_of_sound_mps.internal.set_source('Calculated') + self.sensors.speed_of_sound_mps.internal.set_source("Calculated") # Determine new speed of sound - if self.sensors.speed_of_sound_mps.selected == 'internal': + if self.sensors.speed_of_sound_mps.selected == "internal": - if self.sensors.speed_of_sound_mps.internal.source == 'Calculated': + if self.sensors.speed_of_sound_mps.internal.source == "Calculated": # Internal: Calculated new_sos = self.sensors.speed_of_sound_mps.internal.data_orig self.sensors.speed_of_sound_mps.internal.change_data(data_in=new_sos) # Change temperature and salinity selected to internal - self.sensors.temperature_deg_c.set_selected(selected_name='internal') - self.sensors.salinity_ppt.set_selected(selected_name='internal') + self.sensors.temperature_deg_c.set_selected(selected_name="internal") + self.sensors.salinity_ppt.set_selected(selected_name="internal") else: # Internal: Computed - temperature_selected = getattr(self.sensors.temperature_deg_c, self.sensors.temperature_deg_c.selected) + temperature_selected = getattr( + self.sensors.temperature_deg_c, + self.sensors.temperature_deg_c.selected, + ) temperature = temperature_selected.data - salinity_selected = getattr(self.sensors.salinity_ppt, self.sensors.salinity_ppt.selected) + salinity_selected = getattr( + self.sensors.salinity_ppt, self.sensors.salinity_ppt.selected + ) salinity = salinity_selected.data - new_sos = Sensors.speed_of_sound(temperature=temperature, salinity=salinity) + new_sos = Sensors.speed_of_sound( + temperature=temperature, salinity=salinity + ) self.sensors.speed_of_sound_mps.internal.change_data(data_in=new_sos) else: if speed is not None: - new_sos = np.tile(speed, len(self.sensors.speed_of_sound_mps.internal.data_orig)) + new_sos = np.tile( + speed, len(self.sensors.speed_of_sound_mps.internal.data_orig) + ) self.sensors.speed_of_sound_mps.user.change_data(data_in=new_sos) self.apply_sos_change(old_sos=old_sos, new_sos=new_sos) def apply_sos_change(self, old_sos, new_sos): - """Computes the ratio and calls methods in WaterData and BoatData to apply change. + """Computes the ratio and calls methods in WaterData and BoatData to + apply change. Parameters ---------- @@ -2047,8 +2423,9 @@ class TransectData(object): ratio = new_sos / old_sos - # RiverRay horizontal velocities are not affected by changes in speed of sound - if self.adcp.model != 'RiverRay': + # RiverRay horizontal velocities are not affected by changes in + # speed of sound + if self.adcp.model != "RiverRay": # Apply speed of sound change to water and boat data self.w_vel.sos_correction(ratio=ratio) self.boat_vel.bt_vel.sos_correction(ratio=ratio) @@ -2057,12 +2434,14 @@ class TransectData(object): @staticmethod def raw_valid_data(transect): - """Determines ensembles and cells with no interpolated water or boat data. + """Determines ensembles and cells with no interpolated water or + boat data. For valid water track cells both non-interpolated valid water data and boat velocity data must be available. Interpolated depths are allowed. - For valid ensembles water, boat, and depth data must all be non-interpolated. + For valid ensembles water, boat, and depth data must all be + non-interpolated. Parameters ---------- @@ -2079,9 +2458,13 @@ class TransectData(object): in_transect_idx = transect.in_transect_idx - # Determine valid water track ensembles based on water track and navigation data. + # Determine valid water track ensembles based on water track and + # navigation data. boat_vel_select = getattr(transect.boat_vel, transect.boat_vel.selected) - if boat_vel_select is not None and np.nansum(np.logical_not(np.isnan(boat_vel_select.u_processed_mps))) > 0: + if ( + boat_vel_select is not None + and np.nansum(np.logical_not(np.isnan(boat_vel_select.u_processed_mps))) > 0 + ): valid_nav = boat_vel_select.valid_data[0, in_transect_idx] else: valid_nav = np.tile(False, in_transect_idx.shape[0]) @@ -2093,15 +2476,21 @@ class TransectData(object): depths_select = getattr(transect.depths, transect.depths.selected) if transect.depths.composite: valid_depth = np.tile(True, (depths_select.depth_source_ens.shape[0])) - idx_na = np.where(depths_select.depth_source_ens[in_transect_idx] == 'NA')[0] + idx_na = np.where(depths_select.depth_source_ens[in_transect_idx] == "NA")[ + 0 + ] if len(idx_na) > 0: valid_depth[idx_na] = False - interpolated_depth_idx = np.where(depths_select.depth_source_ens[in_transect_idx] == 'IN')[0] + interpolated_depth_idx = np.where( + depths_select.depth_source_ens[in_transect_idx] == "IN" + )[0] if len(interpolated_depth_idx) > 0: valid_depth[interpolated_depth_idx] = False else: valid_depth = depths_select.valid_data[in_transect_idx] - idx = np.where(np.isnan(depths_select.depth_processed_m[in_transect_idx]))[0] + idx = np.where(np.isnan(depths_select.depth_processed_m[in_transect_idx]))[ + 0 + ] if len(idx) > 0: valid_depth[idx] = False @@ -2112,7 +2501,8 @@ class TransectData(object): @staticmethod def compute_gps_lag(transect): - """Computes the lag between bottom track and GGA and/or VTG using an autocorrelation method. + """Computes the lag between bottom track and GGA and/or VTG using an + autocorrelation method. Parameters ---------- @@ -2131,36 +2521,56 @@ class TransectData(object): lag_gga = None lag_vtg = None - bt_speed = np.sqrt(transect.boat_vel.bt_vel.u_processed_mps ** 2 - + transect.boat_vel.bt_vel.v_processed_mps ** 2) + bt_speed = np.sqrt( + transect.boat_vel.bt_vel.u_processed_mps**2 + + transect.boat_vel.bt_vel.v_processed_mps**2 + ) avg_ens_dur = np.nanmean(transect.date_time.ens_duration_sec) # Compute lag for gga, if available if transect.boat_vel.gga_vel is not None: - gga_speed = np.sqrt(transect.boat_vel.gga_vel.u_processed_mps ** 2 - + transect.boat_vel.gga_vel.v_processed_mps ** 2) + gga_speed = np.sqrt( + transect.boat_vel.gga_vel.u_processed_mps**2 + + transect.boat_vel.gga_vel.v_processed_mps**2 + ) # Compute lag if both bottom track and gga have valid data - valid_data = np.all(np.logical_not(np.isnan(np.vstack((bt_speed, gga_speed)))), axis=0) + valid_data = np.all( + np.logical_not(np.isnan(np.vstack((bt_speed, gga_speed)))), axis=0 + ) if np.sometrue(valid_data): # Compute lag - lag_gga = (np.count_nonzero(valid_data) - - np.argmax(signal.correlate(bt_speed[valid_data], gga_speed[valid_data])) - 1) * avg_ens_dur + lag_gga = ( + np.count_nonzero(valid_data) + - np.argmax( + signal.correlate(bt_speed[valid_data], gga_speed[valid_data]) + ) + - 1 + ) * avg_ens_dur else: lag_gga = None # Compute lag for vtg, if available if transect.boat_vel.vtg_vel is not None: - vtg_speed = np.sqrt(transect.boat_vel.vtg_vel.u_processed_mps ** 2 - + transect.boat_vel.vtg_vel.v_processed_mps ** 2) + vtg_speed = np.sqrt( + transect.boat_vel.vtg_vel.u_processed_mps**2 + + transect.boat_vel.vtg_vel.v_processed_mps**2 + ) # Compute lag if both bottom track and gga have valid data - valid_data = np.all(np.logical_not(np.isnan(np.vstack((bt_speed, vtg_speed)))), axis=0) + valid_data = np.all( + np.logical_not(np.isnan(np.vstack((bt_speed, vtg_speed)))), axis=0 + ) if np.sometrue(valid_data): # Compute lag - lag_vtg = (np.count_nonzero(valid_data) - - np.argmax(signal.correlate(bt_speed[valid_data], vtg_speed[valid_data])) - 1) * avg_ens_dur + lag_vtg = ( + np.count_nonzero(valid_data) + - np.argmax( + signal.correlate(bt_speed[valid_data], vtg_speed[valid_data]) + ) + - 1 + ) * avg_ens_dur else: lag_vtg = None @@ -2168,7 +2578,8 @@ class TransectData(object): @staticmethod def compute_gps_lag_fft(transect): - """Computes the lag between bottom track and GGA and/or VTG using fft method. + """Computes the lag between bottom track and GGA and/or VTG using fft + method. Parameters ---------- @@ -2185,23 +2596,32 @@ class TransectData(object): lag_gga = None lag_vtg = None - bt_speed = np.sqrt(transect.boat_vel.bt_vel.u_processed_mps ** 2 - + transect.boat_vel.bt_vel.v_processed_mps ** 2) + bt_speed = np.sqrt( + transect.boat_vel.bt_vel.u_processed_mps**2 + + transect.boat_vel.bt_vel.v_processed_mps**2 + ) - avg_ens_dur = np.nanmean(transect.date_time.ens_duration_sec) if transect.boat_vel.gga_vel is not None: - gga_speed = np.sqrt(transect.boat_vel.gga_vel.u_processed_mps ** 2 - + transect.boat_vel.gga_vel.v_processed_mps ** 2) - valid_data = np.all(np.logical_not(np.isnan(np.vstack((bt_speed, gga_speed)))), axis=0) + gga_speed = np.sqrt( + transect.boat_vel.gga_vel.u_processed_mps**2 + + transect.boat_vel.gga_vel.v_processed_mps**2 + ) + valid_data = np.all( + np.logical_not(np.isnan(np.vstack((bt_speed, gga_speed)))), axis=0 + ) b = fftpack.fft(bt_speed[valid_data]) g = fftpack.fft(gga_speed[valid_data]) br = -b.conjugat() lag_gga = np.argmax(np.abs(fftpack.ifft(br * g))) if transect.boat_vel.vtg_vel is not None: - vtg_speed = np.sqrt(transect.boat_vel.vtg_vel.u_processed_mps ** 2 - + transect.boat_vel.vtg_vel.v_processed_mps ** 2) - valid_data = np.all(np.logical_not(np.isnan(np.vstack((bt_speed, vtg_speed)))), axis=0) + vtg_speed = np.sqrt( + transect.boat_vel.vtg_vel.u_processed_mps**2 + + transect.boat_vel.vtg_vel.v_processed_mps**2 + ) + valid_data = np.all( + np.logical_not(np.isnan(np.vstack((bt_speed, vtg_speed)))), axis=0 + ) b = fftpack.fft(bt_speed[valid_data]) g = fftpack.fft(vtg_speed[valid_data]) br = -b.conjugat() @@ -2210,8 +2630,9 @@ class TransectData(object): return lag_gga, lag_vtg @staticmethod - def compute_gps_bt(transect, gps_ref='gga_vel'): - """Computes properties describing the difference between bottom track and the specified GPS reference. + def compute_gps_bt(transect, gps_ref="gga_vel"): + """Computes properties describing the difference between bottom track + and the specified GPS reference. Parameters ---------- @@ -2226,239 +2647,75 @@ class TransectData(object): course: float Difference in course computed from gps and bt, in degrees ratio: float - Ratio of final distance made good for bt and gps (bt dmg / gps dmg) + Ratio of final distance made good for bt and gps + (bt dmg / gps dmg) dir: float - Direction of vector from end of GPS track to end of bottom track + Direction of vector from end of GPS track to end of bottom + track mag: float Length of vector from end of GPS track to end of bottom track """ gps_bt = dict() gps_vel = getattr(transect.boat_vel, gps_ref) - if gps_vel is not None and \ - 1 < np.sum(np.logical_not(np.isnan(gps_vel.u_processed_mps))) and \ - 1 < np.sum(np.logical_not(np.isnan(transect.boat_vel.bt_vel.u_processed_mps))): + if ( + gps_vel is not None + and 1 < np.sum(np.logical_not(np.isnan(gps_vel.u_processed_mps))) + and 1 + < np.sum(np.logical_not(np.isnan(transect.boat_vel.bt_vel.u_processed_mps))) + ): # Data prep - bt_track = BoatStructure.compute_boat_track(transect, ref='bt_vel') + bt_track = BoatStructure.compute_boat_track(transect, ref="bt_vel") try: - bt_course, _ = cart2pol(bt_track['track_x_m'][-1], bt_track['track_y_m'][-1]) + bt_course, _ = cart2pol( + bt_track["track_x_m"][-1], bt_track["track_y_m"][-1] + ) bt_course = rad2azdeg(bt_course) except TypeError: bt_course = np.nan gps_track = BoatStructure.compute_boat_track(transect, ref=gps_ref) - gps_course, _ = cart2pol(gps_track['track_x_m'][-1], gps_track['track_y_m'][-1]) + gps_course, _ = cart2pol( + gps_track["track_x_m"][-1], gps_track["track_y_m"][-1] + ) gps_course = rad2azdeg(gps_course) # Compute course - gps_bt['course'] = gps_course - bt_course - if gps_bt['course'] < 0: - gps_bt['course'] = gps_bt['course'] + 360 + gps_bt["course"] = gps_course - bt_course + if gps_bt["course"] < 0: + gps_bt["course"] = gps_bt["course"] + 360 # Compute ratio try: - gps_bt['ratio'] = bt_track['dmg_m'][-1] / gps_track['dmg_m'][-1] + gps_bt["ratio"] = bt_track["dmg_m"][-1] / gps_track["dmg_m"][-1] except TypeError: - gps_bt['ratio'] = np.nan + gps_bt["ratio"] = np.nan # Compute closure vector try: - x_diff = bt_track['track_x_m'][-1] - gps_track['track_x_m'][-1] + x_diff = bt_track["track_x_m"][-1] - gps_track["track_x_m"][-1] except TypeError: x_diff = np.nan try: - y_diff = bt_track['track_y_m'][-1] - gps_track['track_y_m'][-1] + y_diff = bt_track["track_y_m"][-1] - gps_track["track_y_m"][-1] except TypeError: y_diff = np.nan try: - gps_bt['dir'], gps_bt['mag'] = cart2pol(x_diff, y_diff) - gps_bt['dir'] = rad2azdeg(gps_bt['dir']) + gps_bt["dir"], gps_bt["mag"] = cart2pol(x_diff, y_diff) + gps_bt["dir"] = rad2azdeg(gps_bt["dir"]) except TypeError: - gps_bt['dir'] = np.nan - gps_bt['mag'] = np.nan + gps_bt["dir"] = np.nan + gps_bt["mag"] = np.nan return gps_bt -# ======================================================================== -# Begin multithread function included in module but not TransectData class -# Currently this is coded only for TRDI data -# ======================================================================== - - -# DSM changed 1/23/2018 def allocate_transects(source, mmt, kargs) -# TODO This needs a complete rewrite from what Greg did. However it works with no multi-threading for now - -# def add_transect(mmt, filename, index, type): -# pd0_data = Pd0TRDI(filename) -# -# if type == 'MB': -# mmt_transect = mmt.mbt_transects[index] -# else: -# mmt_transect = mmt.transects[index] -# -# transect = TransectData() -# transect.trdi(mmt=mmt, -# mmt_transect=mmt_transect, -# pd0_data=pd0_data) -# return transect -# -# -# def allocate_transects(mmt, transect_type='Q', checked=False): -# """Method to load transect data. Changed from Matlab approach by Greg to allow possibility -# of multi-thread approach. -# -# Parameters -# ---------- -# mmt: MMT_TRDI -# Object of MMT_TRDI -# transect_type: str -# Type of transect (Q: discharge or MB: moving-bed test) -# checked: bool -# Determines if all files are loaded (False) or only checked files (True) -# """ -# -# # DEBUG, set threaded to false to get manual serial commands -# multi_threaded = False -# -# file_names = [] -# file_idx = [] -# -# # Setup processing for discharge or moving-bed transects -# if transect_type == 'Q': -# # Identify discharge transect files to load -# if checked: -# for idx, transect in enumerate(mmt.transects): -# if transect.Checked == 1: -# file_names.append(transect.Files[0]) -# file_idx.append(idx) -# # file_names = [transect.Files[0] for transect in mmt.transects if transect.Checked == 1] -# else: -# file_names = [transect.Files[0] for transect in mmt.transects] -# file_idx = list(range(0, len(file_names))) -# elif transect_type == 'MB': -# file_names = [transect.Files[0] for transect in mmt.mbt_transects] -# file_idx = list(range(0, len(file_names))) -# -# # Determine if any files are missing -# valid_files = [] -# valid_indices = [] -# for index, name in enumerate(file_names): -# fullname = os.path.join(mmt.path, name) -# if os.path.exists(fullname): -# valid_files.append(fullname) -# valid_indices.append(file_idx[index]) -# -# -# start = time.perf_counter() -# transects = [] -# num = len(valid_indices) -# # num = 1 -# multi_process = True -# if multi_process: -# with concurrent.futures.ProcessPoolExecutor() as executor: -# results = [executor.submit(add_transect, mmt, valid_files[k], valid_indices[k], transect_type) for k in range(num)] -# -# for f in concurrent.futures.as_completed(results): -# transects.append(f.result()) -# else: -# for k in range(num): -# transects.append(add_transect(mmt, valid_files[k], valid_indices[k], transect_type)) -# -# # # Multi-thread for Pd0 files -# # # ------------------------- -# # # Seems like this section belongs in Pd0TRDI.py -# # # Initialize thread variables -# # pd0_data = [] -# # pd0_threads = [] -# # thread_id = 0 -# # -# # # DSM 1/24/2018 could this be moved to Pd0TRDI.py as a method -# # def add_pd0(file_name): -# # pd0_data.append(Pd0TRDI(file_name)) -# # -# # if multi_threaded: -# # # TODO this belongs in the pd0 class -# # for file in valid_files: -# # pd0_thread = MultiThread(thread_id=thread_id, function=add_pd0, args={'file_name': file}) -# # thread_id += 1 -# # pd0_thread.start() -# # pd0_threads.append(pd0_thread) -# # else: -# # for file in valid_files: -# # pd0_data.append(Pd0TRDI(file)) -# # -# # for thrd in pd0_threads: -# # thrd.join() -# # -# # # Multi-thread for transect data -# # -# # # Initialize thread variables -# # processed_transects = [] -# # transect_threads = [] -# # thread_id = 0 -# # -# # # DSM 1/24/2018 couldn't this be added to the TransectData class -# # def add_transect(transect_data, mmt_transect, mt_pd0_data, mt_mmt): -# # transect_data.trdi(mmt=mt_mmt, -# # mmt_transect=mmt_transect, -# # pd0_data=mt_pd0_data) -# # processed_transects.append(transect_data) -# # -# # # Process each transect -# # for k in range(len(pd0_data)): -# # transect = TransectData() -# # if pd0_data[k].Wt is not None: -# # if transect_type == 'MB': -# # # Process moving-bed transect -# # if multi_threaded: -# # t_thread = MultiThread(thread_id=thread_id, -# # function=add_transect, -# # args={'transect': transect, -# # 'mmt_transect': mmt.mbt_transects[valid_indices[k]], -# # 'mt_pd0_data': pd0_data[k], -# # 'mt_mmt': mmt}) -# # t_thread.start() -# # transect_threads.append(t_thread) -# # -# # else: -# # transect = TransectData() -# # add_transect(transect_data=transect, -# # mmt_transect=mmt.mbt_transects[valid_indices[k]], -# # mt_pd0_data=pd0_data[k], -# # mt_mmt=mmt) -# # -# # else: -# # # Process discharge transects -# # if multi_threaded: -# # t_thread = MultiThread(thread_id=thread_id, -# # function=add_transect, -# # args={'transect': transect, -# # 'mmt_transect': mmt.transects[valid_indices[k]], -# # 'mt_pd0_data': pd0_data[k], -# # 'mt_mmt': mmt}) -# # t_thread.start() -# # transect_threads.append(t_thread) -# # -# # else: -# # add_transect(transect_data=transect, -# # mmt_transect=mmt.transects[valid_indices[k]], -# # mt_pd0_data=pd0_data[k], -# # mt_mmt=mmt) -# # -# # if multi_threaded: -# # for x in transect_threads: -# # x.join() -# finish = time.perf_counter() -# print(f'Finished in {finish - start}') -# return processed_transects - - def adjusted_ensemble_duration(transect, trans_type=None): - """Applies the TRDI method of expanding the ensemble time when data are invalid. + """Applies the TRDI method of expanding the ensemble time when data are + invalid. Parameters ---------- @@ -2473,14 +2730,16 @@ def adjusted_ensemble_duration(transect, trans_type=None): Array of delta time in seconds for each ensemble. """ - if transect.adcp.manufacturer == 'TRDI': + if transect.adcp.manufacturer == "TRDI": if trans_type is None: # Determine valid data from water track - valid = np.isnan(transect.w_vel.u_processed_mps) == False + valid = np.logical_not(np.isnan(transect.w_vel.u_processed_mps)) valid_sum = np.sum(valid) else: # Determine valid data from bottom track - valid_sum = np.isnan(transect.boat_vel.bt_vel.u_processed_mps) == False + valid_sum = np.logical_not( + np.isnan(transect.boat_vel.bt_vel.u_processed_mps) + ) valid_ens = valid_sum > 0 n_ens = len(valid_ens) diff --git a/Classes/TransformationMatrix.py b/Classes/TransformationMatrix.py index 4bd3986..36933a6 100644 --- a/Classes/TransformationMatrix.py +++ b/Classes/TransformationMatrix.py @@ -2,7 +2,8 @@ import numpy as np class TransformationMatrix(object): - """Determines the transformation matrix and source for the specified ADCP model from the data provided. + """Determines the transformation matrix and source for the specified ADCP + model from the data provided. Attributes ---------- @@ -10,15 +11,16 @@ class TransformationMatrix(object): Source of transformation matrix, either Nominal or ADCP matrix: np.array One or more 4x4 transformation matrices. - """ + """ def __init__(self): """Constructor initializes variable to None""" self.source = None self.matrix = None - + def populate_data(self, manufacturer, model=None, data_in=None): - """Uses the manufacturer and model to determine how to parse the transformation matrix. + """Uses the manufacturer and model to determine how to parse the + transformation matrix. Parameters ---------- @@ -29,54 +31,61 @@ class TransformationMatrix(object): data_in: System test data or 'Nominal' """ - - if manufacturer == 'TRDI': + + if manufacturer == "TRDI": self.trdi(model, data_in) - elif manufacturer == 'SonTek': + elif manufacturer == "SonTek": self.sontek(data_in) def trdi(self, model=None, data_in=None): - """Processes the data to store the transformation matrix for TRDI ADCPs. - If no transformation matrix information is available a nominal transformation - matrix for that model is assumed. + """Processes the data to store the transformation matrix for + TRDI ADCPs. If no transformation matrix information is available a nominal + transformation matrix for that model is assumed. Parameters ---------- model: str Model of ADCP - data_in: + data_in: np.array(float) System test data or 'Nominal' """ adcp_model = model # Set nominal matrix based on model - self.matrix = [[1.4619, -1.4619, 0, 0], - [0, 0, -1.4619, 1.4619], - [0.2661, 0.2661, 0.2661, 0.2661], - [1.0337, 1.0337, -1.0337, -1.0337]] - - if adcp_model == 'RiverRay': - self.matrix = [[1, -1, 0, 0], - [0, 0, -1, 1], - [0.2887, 0.2887, 0.2887, 0.2887], - [0.7071, 0.7071, -0.7071, -0.7071]] - - # Overwrite nominal transformation matrix with custom matrix from test data, if available - self.source = 'Nominal' - if data_in == 'Nominal': - self.source = 'Nominal' - elif adcp_model == 'Rio Grande': + self.matrix = [ + [1.4619, -1.4619, 0, 0], + [0, 0, -1.4619, 1.4619], + [0.2661, 0.2661, 0.2661, 0.2661], + [1.0337, 1.0337, -1.0337, -1.0337], + ] + self.source = "Nominal" + + if adcp_model == "RiverRay": + self.matrix = [ + [1, -1, 0, 0], + [0, 0, -1, 1], + [0.2887, 0.2887, 0.2887, 0.2887], + [0.7071, 0.7071, -0.7071, -0.7071], + ] + + # Overwrite nominal transformation matrix with custom matrix from + # test data, if available + + if data_in == "Nominal": + self.source = "Nominal" + elif adcp_model == "Rio Grande": self.riogrande(data_in) - elif adcp_model == 'StreamPro': + elif adcp_model == "StreamPro": self.streampro(data_in) - elif adcp_model == 'RiverRay': + elif adcp_model == "RiverRay": self.riverray(data_in) - elif adcp_model == 'RiverPro': + elif adcp_model == "RiverPro": self.riverpro(data_in) - elif adcp_model == 'RioPro': + elif adcp_model == "RioPro": self.riopro(data_in) - elif adcp_model == 'pd0': + elif adcp_model == "pd0": self.matrix = data_in.Inst.t_matrix + self.source = "ADCP" if np.array(self.matrix).size < 16: self.trdi(model=model, data_in=None) @@ -93,13 +102,15 @@ class TransformationMatrix(object): System test data """ if data_in is not None: - idx = data_in.find('Instrument Transformation Matrix (Down):') + idx = data_in.find("Instrument Transformation Matrix (Down):") if idx != -1: - cell_matrix = np.fromstring(data_in[idx + 50:idx + 356], dtype=np.float64, sep=' ') + cell_matrix = np.fromstring( + data_in[idx + 50 : idx + 356], dtype=np.float64, sep=" " + ) try: self.matrix = np.reshape(cell_matrix, (-1, 8))[:, 0:4] - self.source = 'ADCP' + self.source = "ADCP" except ValueError: pass @@ -113,15 +124,15 @@ class TransformationMatrix(object): """ if data_in is not None: - idx = data_in.find('>PS3') + idx = data_in.find(">PS3") if idx != -1: - temp_str = data_in[idx + 5:idx + 138] - temp_str = temp_str.replace('-', ' -') - temp_str = temp_str[:temp_str.find('>')] - cell_matrix = np.fromstring(temp_str, dtype=np.float64, sep=' ') + temp_str = data_in[idx + 5 : idx + 138] + temp_str = temp_str.replace("-", " -") + temp_str = temp_str[: temp_str.find(">")] + cell_matrix = np.fromstring(temp_str, dtype=np.float64, sep=" ") try: self.matrix = cell_matrix.reshape(4, 4) - self.source = 'ADCP' + self.source = "ADCP" except ValueError: pass @@ -134,16 +145,19 @@ class TransformationMatrix(object): System test data """ if data_in is not None: - idx = data_in.find('Instrument Transformation Matrix') + idx = data_in.find("Instrument Transformation Matrix") if idx != -1: - idx2 = data_in[idx:].find(':') - idx3 = idx + idx2 + idx2 = data_in[idx:].find(":") + idx3 = idx + idx2 + 1 if idx2 != -1: - idx4 = data_in[idx3:].find('>') + idx4 = data_in[idx3:].find(">") idx5 = idx3 + idx4 - 2 if idx4 != -1: - self.matrix = float(data_in[idx3:idx5]) - self.source = 'ADCP' + cell_matrix = np.fromstring( + data_in[idx3:idx5], dtype=np.float64, sep=" " + ) + self.matrix = cell_matrix.reshape(4, 4) + self.source = "ADCP" def riverpro(self, data_in): """Process RiverPro test data for transformation matrix. @@ -154,16 +168,16 @@ class TransformationMatrix(object): System test data """ if data_in is not None: - idx = data_in.find('Instrument Transformation Matrix') + idx = data_in.find("Instrument Transformation Matrix") if idx != -1: - idx2 = data_in[idx:].find(':') + idx2 = data_in[idx:].find(":") idx3 = idx + idx2 if idx2 != -1: - idx4 = data_in[idx3:].find('Has V-Beam') + idx4 = data_in[idx3:].find("Has V-Beam") idx5 = idx3 + idx4 - 2 if idx4 != -1: self.matrix = float(data_in[idx3:idx5]) - self.source = 'ADCP' + self.source = "ADCP" def riopro(self, data_in): """Process RioPro test data for transformation matrix. @@ -175,16 +189,16 @@ class TransformationMatrix(object): """ if data_in is not None: - idx = data_in.find('Instrument Transformation Matrix') + idx = data_in.find("Instrument Transformation Matrix") if idx != -1: - idx2 = data_in[idx:].find(':') + idx2 = data_in[idx:].find(":") idx3 = idx + idx2 if idx2 != -1: - idx4 = data_in[idx3:].find('Has V-Beam') + idx4 = data_in[idx3:].find("Has V-Beam") idx5 = idx3 + idx4 - 2 if idx4 != -1: self.matrix = float(data_in[idx3:idx5]) - self.source = 'ADCP' + self.source = "ADCP" def sontek(self, data_in): """Store SonTek transformation matrix data. @@ -196,7 +210,7 @@ class TransformationMatrix(object): """ if data_in is not None: - self.source = 'ADCP' + self.source = "ADCP" # Note: for M9 this is a 4x4x3 matrix (300,500,1000) # Note: for S5 this is a 4x4x2 matrix (3000,1000) self.matrix = data_in diff --git a/Classes/Uncertainty.py b/Classes/Uncertainty.py index 0af2deb..5e260b8 100644 --- a/Classes/Uncertainty.py +++ b/Classes/Uncertainty.py @@ -8,15 +8,16 @@ class Uncertainty(object): Attributes ---------- cov: float - Coefficient of variation for all transects used in dicharge computation + Coefficient of variation for all transects used in discharge computation cov_95: float Coefficient of variation inflated to the 95% percent coverage invalid_95: float - Estimated 95% uncertainty for dicharge in invalid bins and ensembles + Estimated 95% uncertainty for discharge in invalid bins and ensembles edges_95: float Estimated 95% uncertainty for the computed edge discharges extrapolation_95: float - Estimated 95% uncertainty in discharge due to top and bottom extrapolations + Estimated 95% uncertainty in discharge due to top and bottom + extrapolations moving_bed_95: float Estimated 95% uncertainty due to moving-bed tests and conditions systematic: float @@ -24,19 +25,25 @@ class Uncertainty(object): total_95: float Estimated 95% uncertainty in discharge using automated values cov_95_user: float - User provided estimate of coefficient of variation inflated to the 95% percent coverage + User provided estimate of coefficient of variation inflated to the + 95% percent coverage invalid_95_user: float - User provided estimate of95% uncertainty for discharge in invalid bins and ensembles + User provided estimate of95% uncertainty for discharge in invalid + bins and ensembles edges_95_user: float - User provided estimate of 95% uncertainty for the computed edge discharges + User provided estimate of 95% uncertainty for the computed edge + discharges extrapolation_95_user: float - User provided estimate of 95% uncertainty in discharge due to top and bottom extrapolations + User provided estimate of 95% uncertainty in discharge due to top + and bottom extrapolations moving_bed_95_user: float - User provided estimate of 95% uncertainty due to moving-bed tests and conditions + User provided estimate of 95% uncertainty due to moving-bed tests + and conditions systematic_user: float User provided estimate of systematic error estimated at 1.5% at 1 sigma total_95_user: float - Estimated 95% uncertainty in discharge using user provide values to override automated values + Estimated 95% uncertainty in discharge using user provide values to + override automated values """ def __init__(self): @@ -57,9 +64,18 @@ class Uncertainty(object): self.systematic_user = None self.total_95_user = None - def compute_uncertainty(self, meas, cov_95_user=None, invalid_95_user=None, edges_95_user=None, - extrapolation_95_user=None, moving_bed_95_user=None, systematic_user=None): - """Computes the uncertainty for the components of the discharge measurement + def compute_uncertainty( + self, + meas, + cov_95_user=None, + invalid_95_user=None, + edges_95_user=None, + extrapolation_95_user=None, + moving_bed_95_user=None, + systematic_user=None, + ): + """Computes the uncertainty for the components of the discharge + measurement using measurement data or user provided values. Parameters @@ -67,17 +83,23 @@ class Uncertainty(object): meas: Measurement Object of class Measurement cov_95_user: float - User provided estimate of coefficient of variation inflated to the 95% percent coverage + User provided estimate of coefficient of variation inflated to + the 95% percent coverage invalid_95_user: float - User provided estimate of95% uncertainty for discharge in invalid bins and ensembles + User provided estimate of95% uncertainty for discharge in invalid + bins and ensembles edges_95_user: float - User provided estimate of 95% uncertainty for the computed edge discharges + User provided estimate of 95% uncertainty for the computed edge + discharges extrapolation_95_user: float - User provided estimate of 95% uncertainty in discharge due to top and bottom extrapolations + User provided estimate of 95% uncertainty in discharge due to top + and bottom extrapolations moving_bed_95_user: float - User provided estimate of 95% uncertainty due to moving-bed tests and conditions + User provided estimate of 95% uncertainty due to moving-bed tests + and conditions systematic_user: float - User provided estimate of systematic error estimated at 1.5% at 1 sigma + User provided estimate of systematic error estimated at 1.5% at 1 + sigma """ # Use only checked discharges @@ -89,7 +111,7 @@ class Uncertainty(object): discharges.append(meas.discharge[n]) # Compute uncertainties from the data - self.cov, self.cov_95 = self.uncertainty_q_random(discharges, 'total') + self.cov, self.cov_95 = self.uncertainty_q_random(discharges, "total") self.invalid_95 = self.uncertainty_invalid_data(discharges) self.edges_95 = self.uncertainty_edges(discharges) self.extrapolation_95 = self.uncertainty_extrapolation(meas, discharges) @@ -108,14 +130,15 @@ class Uncertainty(object): self.estimate_total_uncertainty() def populate_from_qrev_mat(self, meas_struct): - """Populates the object using data from previously saved QRev Matlab file. + """Populates the object using data from previously saved QRev Matlab + file. Parameters ---------- meas_struct: mat_struct Matlab data structure obtained from sio.loadmat """ - if hasattr(meas_struct, 'uncertainty'): + if hasattr(meas_struct, "uncertainty"): self.cov = meas_struct.uncertainty.cov self.cov_95 = meas_struct.uncertainty.cov95 self.invalid_95 = meas_struct.uncertainty.invalid95 @@ -139,17 +162,23 @@ class Uncertainty(object): self.total_95_user = meas_struct.uncertainty.total95User def estimate_total_uncertainty(self): - """Compute the uncertainty of the measurement using the automatically computed uncertainties and + """Compute the uncertainty of the measurement using the automatically + computed uncertainties and user overrides. """ - self.total_95 = 2.0 * ((self.cov_95 / 2)**2 - + (self.invalid_95 / 2)**2 - + (self.edges_95 / 2)**2 - + (self.extrapolation_95 / 2)**2 - + (self.moving_bed_95 / 2)**2 - + self.systematic**2 - )**0.5 + self.total_95 = ( + 2.0 + * ( + (self.cov_95 / 2) ** 2 + + (self.invalid_95 / 2) ** 2 + + (self.edges_95 / 2) ** 2 + + (self.extrapolation_95 / 2) ** 2 + + (self.moving_bed_95 / 2) ** 2 + + self.systematic**2 + ) + ** 0.5 + ) if self.cov_95_user is None: cov_95_user = self.cov_95 @@ -169,7 +198,7 @@ class Uncertainty(object): if self.extrapolation_95_user is None: extrapolation_95_user = self.extrapolation_95 else: - extrapolation_95_user = self. extrapolation_95_user + extrapolation_95_user = self.extrapolation_95_user if self.moving_bed_95_user is None: moving_bed_95_user = self.moving_bed_95 @@ -181,17 +210,23 @@ class Uncertainty(object): else: systematic_user = self.systematic_user - self.total_95_user = 2.0 * ((cov_95_user / 2)**2 - + (invalid_95_user / 2)**2 - + (edges_95_user / 2)**2 - + (extrapolation_95_user / 2)**2 - + (moving_bed_95_user / 2)**2 - + systematic_user**2 - )**0.5 + self.total_95_user = ( + 2.0 + * ( + (cov_95_user / 2) ** 2 + + (invalid_95_user / 2) ** 2 + + (edges_95_user / 2) ** 2 + + (extrapolation_95_user / 2) ** 2 + + (moving_bed_95_user / 2) ** 2 + + systematic_user**2 + ) + ** 0.5 + ) @staticmethod def get_array_attr(list_in, prop): - """Create an array of the requested attribute from a list of objects containing the requested attribute. + """Create an array of the requested attribute from a list of objects + containing the requested attribute. Parameters ---------- @@ -242,12 +277,13 @@ class Uncertainty(object): # Inflate the cov to the 95% value if n_max == 2: - # Use the approximate method as taught in class to reduce the high coverage factor for 2 transects - # and account for prior knowledge related to 720 second duration analysis + # Use the approximate method as taught in class to reduce the + # high coverage factor for 2 transects and account for prior + # knowledge related to 720 second duration analysis cov_95 = cov * 3.3 else: # Use Student's t to inflate COV for n > 2 - cov_95 = t.interval(0.95, n_max-1)[1] * cov / n_max**0.5 + cov_95 = t.interval(0.95, n_max - 1)[1] * cov / n_max**0.5 else: cov = np.nan cov_95 = np.nan @@ -256,7 +292,8 @@ class Uncertainty(object): @staticmethod def uncertainty_edges(discharges): - """Compute uncertainty of edge discharge. Currently assuming random plus bias + """Compute uncertainty of edge discharge. Currently assuming random + plus bias is within 30% of actual value. Parameters @@ -271,9 +308,9 @@ class Uncertainty(object): """ # Compute mean discharge values for total, left, and right - mean_q = np.nanmean(Uncertainty.get_array_attr(discharges, 'total')) - mean_left = np.nanmean(Uncertainty.get_array_attr(discharges, 'left')) - mean_right = np.nanmean(Uncertainty.get_array_attr(discharges, 'right')) + mean_q = np.nanmean(Uncertainty.get_array_attr(discharges, "total")) + mean_left = np.nanmean(Uncertainty.get_array_attr(discharges, "left")) + mean_right = np.nanmean(Uncertainty.get_array_attr(discharges, "right")) # Compute combined edge uncertainty percent_edge = ((np.abs(mean_left) + np.abs(mean_right)) / mean_q) * 100 @@ -299,15 +336,21 @@ class Uncertainty(object): """ # Compute mean total uncorrected discharge - q_selected = np.nanmean(Uncertainty.get_array_attr(discharges, 'total_uncorrected')) + q_selected = np.nanmean( + Uncertainty.get_array_attr(discharges, "total_uncorrected") + ) # Create array of discharges from the various extrapolation methods - q_possible = np.array([meas.extrap_fit.q_sensitivity.q_pp_mean, - meas.extrap_fit.q_sensitivity.q_pp_opt_mean, - meas.extrap_fit.q_sensitivity.q_cns_mean, - meas.extrap_fit.q_sensitivity.q_cns_opt_mean, - meas.extrap_fit.q_sensitivity.q_3p_ns_mean, - meas.extrap_fit.q_sensitivity.q_3p_ns_opt_mean]) + q_possible = np.array( + [ + meas.extrap_fit.q_sensitivity.q_pp_mean, + meas.extrap_fit.q_sensitivity.q_pp_opt_mean, + meas.extrap_fit.q_sensitivity.q_cns_mean, + meas.extrap_fit.q_sensitivity.q_cns_opt_mean, + meas.extrap_fit.q_sensitivity.q_3p_ns_mean, + meas.extrap_fit.q_sensitivity.q_3p_ns_opt_mean, + ] + ) # Compute difference in discharges from the selected method q_diff = np.abs(q_possible - q_selected) @@ -322,7 +365,8 @@ class Uncertainty(object): @staticmethod def uncertainty_invalid_data(discharges): - """Computes an estimate of the uncertainty for the discharge computed for invalid bins and ensembles. + """Computes an estimate of the uncertainty for the discharge computed + for invalid bins and ensembles. Parameters ---------- @@ -336,22 +380,25 @@ class Uncertainty(object): """ # Compute mean discharges - q_mean = np.nanmean(Uncertainty.get_array_attr(discharges, 'total')) - q_cells = np.nanmean(Uncertainty.get_array_attr(discharges, 'int_cells')) - q_ensembles = np.nanmean(Uncertainty.get_array_attr(discharges, 'int_ens')) + q_mean = np.nanmean(Uncertainty.get_array_attr(discharges, "total")) + q_cells = np.nanmean(Uncertainty.get_array_attr(discharges, "int_cells")) + q_ensembles = np.nanmean(Uncertainty.get_array_attr(discharges, "int_ens")) # Compute percentages percent_cells = (q_cells / q_mean) * 100 percent_ensembles = (q_ensembles / q_mean) * 100 # Compute uncertainty for combined invalid cells and ensembles - invalid_data_uncertainty = (np.abs(percent_cells) + np.abs(percent_ensembles)) * 0.2 + invalid_data_uncertainty = ( + np.abs(percent_cells) + np.abs(percent_ensembles) + ) * 0.2 return invalid_data_uncertainty @staticmethod def uncertainty_moving_bed(meas, checked): - """Estimates the 95% uncertainty of the discharge due to a moving-bed and navigation reference. + """Estimates the 95% uncertainty of the discharge due to a + moving-bed and navigation reference. Parameters ---------- @@ -366,7 +413,10 @@ class Uncertainty(object): 95% uncertainty associated with moving-bed conditions """ - if np.any(checked) and meas.transects[checked.index(1)].boat_vel.selected == 'bt_vel': + if ( + np.any(checked) + and meas.transects[checked.index(1)].boat_vel.selected == "bt_vel" + ): # Boat velocity based on bottom track, moving-bed possible if len(meas.mb_tests) > 0: # Moving_bed tests recorded @@ -376,7 +426,7 @@ class Uncertainty(object): used = [] for test in meas.mb_tests: user_valid.append(test.user_valid) - if test.test_quality == 'Errors': + if test.test_quality == "Errors": quality.append(False) else: quality.append(True) @@ -388,11 +438,13 @@ class Uncertainty(object): # Check to see if the valid tests indicate a moving bed moving_bed_bool = [] for result in moving_bed: - if result == 'Yes': + if result == "Yes": moving_bed_bool.append(True) else: moving_bed_bool.append(False) - valid_moving_bed = np.logical_and(quality, np.asarray(moving_bed_bool)) + valid_moving_bed = np.logical_and( + quality, np.asarray(moving_bed_bool) + ) if np.any(valid_moving_bed): # Check to see that a correction was used if np.any(np.logical_and(valid_moving_bed, np.asarray(used))): diff --git a/Classes/WaterData.py b/Classes/WaterData.py index 56e1b68..8d3f097 100644 --- a/Classes/WaterData.py +++ b/Classes/WaterData.py @@ -15,14 +15,16 @@ class WaterData(object): ---------- Original data provided to the class: raw_vel_mps: np.array(float) - Contains the raw unfiltered velocity in m/s. 1st index 1-4 are beams 1,2,3,4 if beam or - u,v,w,d if otherwise. + Contains the raw unfiltered velocity in m/s. 1st index 1-4 are + beams 1,2,3,4 if beam or u,v,w,d if otherwise. frequency: np.array(float) Defines ADCP frequency used for velocity measurement, in kHz. orig_coord_sys: str - Defines the original raw velocity coordinate system "Beam", "Inst", "Ship", "Earth". + Defines the original raw velocity coordinate system "Beam", + "Inst", "Ship", "Earth". orig_nav_ref: str - Defines the original taw data naviagation reference: "None", "BT", "GGA", "VTG". + Defines the original taw data naviagation reference: "None", "BT", + "GGA", "VTG". corr: np.array(float) Correlation values for WT, if available. rssi: np.array(float) @@ -32,9 +34,11 @@ class WaterData(object): water_mode: str WaterMode for TRDI or 'Variable' for SonTek. blanking_distance_m: float - Distance below transducer where data are marked invalid due to potential ringing. + Distance below transducer where data are marked invalid due to + potential ringing. cells_above_sl: np.array(bool) - Logical array of depth cells above sidelobe cutoff based on selected depth reference. + Logical array of depth cells above sidelobe cutoff based on + selected depth reference. cells_above_sl_bt: np.array(bool) Logical array of depth cells above the sidelobe cutoff based on BT sl_lag_effect_m: np.array(float) @@ -42,25 +46,33 @@ class WaterData(object): Data computed in this class: u_earth_no_ref_mps: np.array(float) - Horizontal velocity in x-direction with no boat reference applied, in m/s. + Horizontal velocity in x-direction with no boat reference applied, + in m/s. v_earth_no_ref_mps: np.array(float) - Horizontal velocity in y-direction with no boat reference applied, in m/s. + Horizontal velocity in y-direction with no boat reference applied, + in m/s. u_mps: np.array(float) - Horizontal velocity in x-direction, earth coord, nav referenced, in m/s. + Horizontal velocity in x-direction, earth coord, nav referenced, + in m/s. v_mps: np.array(float) - Horizontal velocity in y-direction, earth coord, nav referenced, in m/s. + Horizontal velocity in y-direction, earth coord, nav referenced, + in m/s. u_processed_mps: np.array(float) - Horizontal velocity in x-direction, earth coord, nav ref, filtered, and interpolated. + Horizontal velocity in x-direction, earth coord, nav ref, + filtered, and interpolated. v_processed_mps: np.array(float) - Horizontal veloctiy in y-direction, earth coord, nav ref, filtered, and interpolated. + Horizontal veloctiy in y-direction, earth coord, nav ref, + filtered, and interpolated. w_mps: np.array(float) Vertical velocity (+ up), in m/s. d_mps: np.array(float) - Difference in vertical velocities compute from opposing beam pairs, in m/s. + Difference in vertical velocities compute from opposing beam + pairs, in m/s. invalid_index: np.array(bool) Index of ensembles with no valid raw velocity data. num_invalid: float - Estimated number of depth cells in ensembles with no valid raw velocity data. + Estimated number of depth cells in ensembles with no valid raw + velocity data. valid_data: np.array(float) 3-D logical array of valid data Dim1 0 - composite @@ -85,9 +97,11 @@ class WaterData(object): w_filter_thresholds: float, dict, tuple Threshold(s) for vertical velocity filter. excluded_dist_m: float - Distance below transucer for which data are excluded or marked invalid, in m. + Distance below transucer for which data are excluded or marked + invalid, in m. orig_excluded_dist_m: float - Original distance below transucer for which data are excluded or marked invalid, in m. + Original distance below transucer for which data are excluded or + marked invalid, in m. smooth_filter: str Set filter based on smoothing function "On", "Off". smooth_speed: np.array(float) @@ -100,6 +114,8 @@ class WaterData(object): Set SNR filter for SonTek data "On", "Off". snr_rng: np.array(float) Range of beam averaged SNR + snr_3beam_comp: bool + Indicates the use of 3-beam velocity computations when invalid SNR is found wt_depth_filter: np.array(bool) WT in ensembles with invalid depths are marked invalid. interpolate_ens: str @@ -107,29 +123,33 @@ class WaterData(object): interpolate_cells: str Type of cell interpolation: "None", "TRDI", "Linear", 'abba' coord_sys: str - Defines the velocity coordinate system "Beam", "Inst", "Ship", "Earth" + Defines the velocity coordinate system "Beam", "Inst", "Ship", + "Earth" nav_ref: str Defines the navigation reference: "None", "BT", "GGA", "VTG" sl_cutoff_percent: float Percent cutoff defined by cos(angle) sl_cutoff_number: float - User specified number of cells to cutoff from SonTek, not implemented, undefined + User specified number of cells to cutoff from SonTek, not + implemented, undefined sl_cutoff_type: str Type of cutoff method "Percent" or "Number". ping_type: np.array(int) - Indicates type of ping for each cell: 0-incoherent, 1-coherent, 2-surface + Indicates type of ping for each cell: 0-incoherent, 1-coherent, + 2-surface d_meas_thresholds: list - Dictionary of difference velocity thresholds computed using the whole measurement by ping type + Dictionary of difference velocity thresholds computed using the + whole measurement by ping type w_meas_thresholds: list - Dictionary of vertical velocity thresholds computed using the whole measurement by ping type + Dictionary of vertical velocity thresholds computed using the + whole measurement by ping type use_measurement_thresholds: bool Indicates if the measurement based thresholds should be used """ def __init__(self): - """Initialize instance variables. - """ + """Initialize instance variables.""" # Data input to this class self.raw_vel_mps = None @@ -144,7 +164,7 @@ class WaterData(object): self.cells_above_sl = None self.cells_above_sl_bt = None self.sl_lag_effect_m = None - + # Data computed in this class self.u_earth_no_ref_mps = None self.v_earth_no_ref_mps = None @@ -157,7 +177,7 @@ class WaterData(object): self.invalid_index = None self.num_invalid = [] self.valid_data = None - + # Settings self.beam_filter = None self.d_filter = None @@ -170,8 +190,10 @@ class WaterData(object): self.smooth_speed = None self.smooth_upper_limit = None self.smooth_lower_limit = None - self.snr_filter = 'Off' + self.snr_filter = "Off" self.snr_rng = [] + self.snr_beam_velocities = None + self.snr_3beam_comp = False self.wt_depth_filter = True self.interpolate_ens = None self.interpolate_cells = None @@ -189,12 +211,33 @@ class WaterData(object): self.use_measurement_thresholds = False - def populate_data(self, vel_in, freq_in, coord_sys_in, nav_ref_in, rssi_in, rssi_units_in, - excluded_dist_in, cells_above_sl_in, sl_cutoff_per_in, sl_cutoff_num_in, - sl_cutoff_type_in, sl_lag_effect_in, wm_in, blank_in, corr_in=None, - surface_vel_in=None, surface_rssi_in=None, surface_corr_in=None, sl_cutoff_m=None, - surface_num_cells_in=0, ping_type='U', use_measurement_thresholds=False): - + def populate_data( + self, + vel_in, + freq_in, + coord_sys_in, + nav_ref_in, + rssi_in, + rssi_units_in, + excluded_dist_in, + cells_above_sl_in, + sl_cutoff_per_in, + sl_cutoff_num_in, + sl_cutoff_type_in, + sl_lag_effect_in, + wm_in, + blank_in, + corr_in=None, + surface_vel_in=None, + surface_rssi_in=None, + surface_corr_in=None, + sl_cutoff_m=None, + surface_num_cells_in=0, + ping_type="U", + use_measurement_thresholds=False, + snr_3beam_comp=False, + ): + """Populates the variables with input, computed, or default values. Parameters @@ -205,17 +248,21 @@ class WaterData(object): freq_in: np.array(float) Defines ADCP frequency used for velocity measurement. coord_sys_in: str - Defines the original raw velocity coordinate system "Beam", "Inst", "Ship", "Earth". + Defines the original raw velocity coordinate system "Beam", + "Inst", "Ship", "Earth". nav_ref_in: str - Defines the original raw data navigation reference: "None", "BT", "GGA", "VTG". + Defines the original raw data navigation reference: "None", "BT", + "GGA", "VTG". rssi_in: np.array(float) Returned acoustic signal strength. rssi_units_in: str Units for returned acoustic signal strength: "Counts", "dB", "SNR". excluded_dist_in: float - Distance below transducer for which data are excluded or marked invalid. + Distance below transducer for which data are excluded or marked + invalid. cells_above_sl_in: np.array(bool) - Bool array of depth cells above the sidelobe cutoff based on selected depth reference. + Bool array of depth cells above the sidelobe cutoff based on + selected depth reference. sl_cutoff_per_in: float Percent cutoff defined by cos(angle). sl_cutoff_num_in: float @@ -224,6 +271,8 @@ class WaterData(object): Method used to compute cutoff "Percent" or "Number". sl_lag_effect_in: np.array(float) Lag effect for each ensemble, in m. + snr_3beam_comp: bool + Indicates the use of 3-beam velocity computations when invalid SNR is found wm_in: str Watermode for TRDI or 'Variable' for SonTek. blank_in: float @@ -233,19 +282,28 @@ class WaterData(object): surface_vel_in: np.array(float) Surface velocity data for RiverRay, RiverPro, RioPro. Optional. surface_rssi_in: np.array(float) - Returned acoust signal strength for RiverRay, RiverPro, RioPro. Optional. + Returned acoust signal strength for RiverRay, RiverPro, RioPro. + Optional. surface_corr_in: np.array(float) - Surface velocity correlations for RiverRay, RiverPro, RioPro. Optional. + Surface velocity correlations for RiverRay, RiverPro, RioPro. + Optional. surface_num_cells_in: np.array(float) - Number of surface cells in each ensemble for RiverRay, RiverPro, RioPro. Optional. + Number of surface cells in each ensemble for RiverRay, RiverPro, + RioPro. Optional. sl_cutoff_m: np.array(float) Depth in meters of side lobe cutoff to center of cells. ping_type: np.array(str) Indicates type of ping used for water tracking + use_measurement_thresholds: bool + Indicates if thresholds should be computed using entire measurement """ # Set object properties from input data standard for all ADCPs - self.frequency = freq_in + if np.nanmean(freq_in) < 10: + self.frequency = freq_in * 1000 + else: + self.frequency = freq_in + self.orig_coord_sys = coord_sys_in self.coord_sys = coord_sys_in self.orig_nav_ref = nav_ref_in @@ -256,8 +314,11 @@ class WaterData(object): max_cells = cells_above_sl_in.shape[0] self.ping_type = np.tile(np.array([ping_type]), (max_cells, 1)) self.use_measurement_thresholds = use_measurement_thresholds + self.snr_beam_velocities = None + self.snr_3beam_comp = snr_3beam_comp - # Set object properties that depend on the presence or absence of surface cells + # Set object properties that depend on the presence or absence of + # surface cells if np.sum(surface_num_cells_in) > 0: surface_num_cells_in[np.isnan(surface_num_cells_in)] = 0 @@ -265,27 +326,43 @@ class WaterData(object): num_reg_cells = vel_in.shape[1] max_surf_cells = max_cells - num_reg_cells - # Combine surface velocity bins and regular velocity bins into one matrix + # Combine surface velocity bins and regular velocity bins into + # one matrix self.raw_vel_mps = np.tile([np.nan], [4, max_cells, num_ens]) self.rssi = np.tile([np.nan], [4, max_cells, num_ens]) self.corr = np.tile([np.nan], [4, max_cells, num_ens]) if max_surf_cells > 0: - self.raw_vel_mps[:, :max_surf_cells, :] = surface_vel_in[:, :max_surf_cells, :] - self.rssi[:, :max_surf_cells, :] = surface_rssi_in[:, :max_surf_cells, :] - self.corr[:, :max_surf_cells, :] = surface_corr_in[:, :max_surf_cells, :] + self.raw_vel_mps[:, :max_surf_cells, :] = surface_vel_in[ + :, :max_surf_cells, : + ] + self.rssi[:, :max_surf_cells, :] = surface_rssi_in[ + :, :max_surf_cells, : + ] + self.corr[:, :max_surf_cells, :] = surface_corr_in[ + :, :max_surf_cells, : + ] for i_ens in range(num_ens): - self.raw_vel_mps[:, - int(surface_num_cells_in[i_ens]):int(surface_num_cells_in[i_ens]) - + num_reg_cells, i_ens] = vel_in[:, :num_reg_cells, i_ens] - self.rssi[:, - int(surface_num_cells_in[i_ens]):int(surface_num_cells_in[i_ens]) - + num_reg_cells, i_ens] = rssi_in[:, :num_reg_cells, i_ens] - self.corr[:, - int(surface_num_cells_in[i_ens]):int(surface_num_cells_in[i_ens]) - + num_reg_cells, i_ens] = corr_in[:, :num_reg_cells, i_ens] - self.ping_type[:int(surface_num_cells_in[i_ens]), i_ens] = 'S' + self.raw_vel_mps[ + :, + int(surface_num_cells_in[i_ens]): int(surface_num_cells_in[i_ens]) + + num_reg_cells, + i_ens, + ] = vel_in[:, :num_reg_cells, i_ens] + self.rssi[ + :, + int(surface_num_cells_in[i_ens]): int(surface_num_cells_in[i_ens]) + + num_reg_cells, + i_ens, + ] = rssi_in[:, :num_reg_cells, i_ens] + self.corr[ + :, + int(surface_num_cells_in[i_ens]): int(surface_num_cells_in[i_ens]) + + num_reg_cells, + i_ens, + ] = corr_in[:, :num_reg_cells, i_ens] + self.ping_type[: int(surface_num_cells_in[i_ens]), i_ens] = "S" else: # No surface cells self.raw_vel_mps = vel_in @@ -296,7 +373,8 @@ class WaterData(object): # No correlations input self.corr = np.tile(np.nan, rssi_in.shape) - #TODO This doesn't seem correct. If raw data in beam coordinates this is not correct. + # TODO This doesn't seem correct. If raw data in beam coordinates + # this is not correct. self.u_mps = np.copy(self.raw_vel_mps)[0, :, :] self.v_mps = np.copy(self.raw_vel_mps)[1, :, :] self.w_mps = np.copy(self.raw_vel_mps)[2, :, :] @@ -306,13 +384,14 @@ class WaterData(object): self.excluded_dist_m = excluded_dist_in self.orig_excluded_dist_m = excluded_dist_in - # In some rare situations the blank is empty so it is set to the excluded_dist_in + # In some rare situations the blank is empty so it is set to the + # excluded_dist_in try: blank_in = float(blank_in) self.blanking_distance_m = blank_in except ValueError: self.blanking_distance_m = excluded_dist_in - + self.cells_above_sl = cells_above_sl_in self.cells_above_sl_bt = cells_above_sl_in self.sl_cutoff_percent = sl_cutoff_per_in @@ -320,37 +399,37 @@ class WaterData(object): self.sl_cutoff_type = sl_cutoff_type_in self.sl_lag_effect_m = sl_lag_effect_in self.sl_cutoff_m = sl_cutoff_m - + # Set filter defaults to no filtering and no interruption self.beam_filter = 3 - self.d_filter = 'Off' + self.d_filter = "Off" self.d_filter_thresholds = {} - self.w_filter = 'Off' + self.w_filter = "Off" self.w_filter_thresholds = {} self.smooth_filter = False - self.interpolate_ens = 'None' - self.interpolate_cells = 'None' - + self.interpolate_ens = "None" + self.interpolate_cells = "None" + # Determine original valid # Initialize valid data property self.valid_data = np.tile(self.cells_above_sl, [9, 1, 1]) - + # Find invalid raw data valid_vel = np.tile(self.cells_above_sl, [4, 1, 1]) valid_vel[np.isnan(self.raw_vel_mps)] = False - + # Identify invalid velocity data (less than 3 valid beams) valid_vel_sum = np.sum(valid_vel, axis=0) valid_data2 = np.copy(self.cells_above_sl) valid_data2[valid_vel_sum < 3] = False - + # Set valid_data property for original data self.valid_data[1, :, :] = valid_data2 - + # Combine all filter data to composite valid data self.all_valid_data() - + # Estimate the number of cells in invalid ensembles using # Adjacent valid ensembles valid_data_2_sum = np.nansum(self.valid_data[1], 0) @@ -358,34 +437,37 @@ class WaterData(object): n_invalid = len(self.invalid_index) for n in range(n_invalid): # Find first valid ensemble - idx1 = np.where(valid_data_2_sum[:self.invalid_index[n]] > 0)[0] + idx1 = np.where(valid_data_2_sum[: self.invalid_index[n]] > 0)[0] if len(idx1) > 0: idx1 = idx1[0] else: idx1 = self.invalid_index[n] - + # Find next valid ensemble - idx2 = np.where(valid_data_2_sum[:self.invalid_index[n]] > 0)[0] + idx2 = np.where(valid_data_2_sum[: self.invalid_index[n]] > 0)[0] if len(idx2) > 0: idx2 = idx2[-1] else: idx2 = self.invalid_index[n] - + # Estimate number of cells in invalid ensemble - self.num_invalid.append(np.floor((valid_data_2_sum[idx1]+valid_data_2_sum[idx2]) / 2)) - + self.num_invalid.append( + np.floor((valid_data_2_sum[idx1] + valid_data_2_sum[idx2]) / 2) + ) + # Set processed data to non-interpolated valid data self.u_processed_mps = np.copy(self.u_mps) self.v_processed_mps = np.copy(self.v_mps) - self.u_processed_mps[self.valid_data[0] == False] = np.nan - self.v_processed_mps[self.valid_data[0] == False] = np.nan - + self.u_processed_mps[np.logical_not(self.valid_data[0])] = np.nan + self.v_processed_mps[np.logical_not(self.valid_data[0])] = np.nan + # Compute SNR range if SNR data is provided - if rssi_units_in == 'SNR': + if rssi_units_in == "SNR": self.compute_snr_rng() def populate_from_qrev_mat(self, transect): - """Populates the object using data from previously saved QRev Matlab file. + """Populates the object using data from previously saved QRev Matlab + file. Parameters ---------- @@ -398,105 +480,164 @@ class WaterData(object): if len(transect.boatVel.btVel.rawVel_mps.shape) > 1: # Multiple ensembles with one cell self.raw_vel_mps = np.moveaxis(transect.wVel.rawVel_mps, 1, 0) - self.raw_vel_mps = self.raw_vel_mps.reshape(self.raw_vel_mps.shape[0], 1, self.raw_vel_mps.shape[1]) + self.raw_vel_mps = self.raw_vel_mps.reshape( + (self.raw_vel_mps.shape[0], 1, self.raw_vel_mps.shape[1]) + ) self.corr = np.moveaxis(transect.wVel.corr, 1, 0) - self.corr = self.corr.reshape(self.corr.shape[0], 1, self.corr.shape[1]) + self.corr = self.corr.reshape( + (self.corr.shape[0], 1, self.corr.shape[1]) + ) self.rssi = np.moveaxis(transect.wVel.rssi, 1, 0) - self.rssi = self.rssi.reshape(self.rssi.shape[0], 1, self.rssi.shape[1]) + self.rssi = self.rssi.reshape( + (self.rssi.shape[0], 1, self.rssi.shape[1]) + ) self.valid_data = np.moveaxis(transect.wVel.validData, 1, 0) - self.valid_data = self.valid_data.reshape(self.valid_data.shape[0], 1, self.valid_data.shape[1]) + self.valid_data = self.valid_data.reshape( + (self.valid_data.shape[0], 1, self.valid_data.shape[1]) + ) self.u_earth_no_ref_mps = transect.wVel.uEarthNoRef_mps - self.u_earth_no_ref_mps = self.u_earth_no_ref_mps.reshape(1, self.u_earth_no_ref_mps.shape[0]) + self.u_earth_no_ref_mps = self.u_earth_no_ref_mps.reshape( + (1, self.u_earth_no_ref_mps.shape[0]) + ) self.v_earth_no_ref_mps = transect.wVel.vEarthNoRef_mps - self.v_earth_no_ref_mps = self.v_earth_no_ref_mps.reshape(1, self.v_earth_no_ref_mps.shape[0]) + self.v_earth_no_ref_mps = self.v_earth_no_ref_mps.reshape( + (1, self.v_earth_no_ref_mps.shape[0]) + ) self.u_mps = transect.wVel.u_mps - self.u_mps = self.u_mps.reshape(1, self.u_mps.shape[0]) + self.u_mps = self.u_mps.reshape((1, self.u_mps.shape[0])) self.v_mps = transect.wVel.v_mps - self.v_mps = self.v_mps.reshape(1, self.v_mps.shape[0]) + self.v_mps = self.v_mps.reshape((1, self.v_mps.shape[0])) self.u_processed_mps = transect.wVel.uProcessed_mps - self.u_processed_mps = self.u_processed_mps.reshape(1, self.u_processed_mps.shape[0]) + self.u_processed_mps = self.u_processed_mps.reshape( + (1, self.u_processed_mps.shape[0]) + ) self.v_processed_mps = transect.wVel.vProcessed_mps - self.v_processed_mps = self.v_processed_mps.reshape(1, self.v_processed_mps.shape[0]) + self.v_processed_mps = self.v_processed_mps.reshape( + 1, self.v_processed_mps.shape[0] + ) self.w_mps = transect.wVel.w_mps - self.w_mps = self.w_mps.reshape(1, self.w_mps.shape[0]) + self.w_mps = self.w_mps.reshape((1, self.w_mps.shape[0])) self.d_mps = transect.wVel.d_mps - self.d_mps = self.d_mps.reshape(1, self.d_mps.shape[0]) - self.snr_rng = transect.wVel.snrRng - self.snr_rng = self.snr_rng.reshape(1, self.snr_rng.shape[0]) + self.d_mps = self.d_mps.reshape((1, self.d_mps.shape[0])) + # self.snr_rng = transect.wVel.snrRng + # self.snr_rng = self.snr_rng.reshape(1, self.snr_rng.shape[0]) self.cells_above_sl = transect.wVel.cellsAboveSL.astype(bool) - self.cells_above_sl = self.cells_above_sl.reshape(1, self.cells_above_sl.shape[0]) + self.cells_above_sl = self.cells_above_sl.reshape( + 1, self.cells_above_sl.shape[0] + ) self.cells_above_sl_bt = transect.wVel.cellsAboveSLbt.astype(bool) - self.cells_above_sl_bt = self.cells_above_sl_bt.reshape(1, self.cells_above_sl_bt.shape[0]) + self.cells_above_sl_bt = self.cells_above_sl_bt.reshape( + 1, self.cells_above_sl_bt.shape[0] + ) self.sl_lag_effect_m = np.array([transect.wVel.slLagEffect_m]) # Ping type - if hasattr(transect.wVel, 'ping_type'): + if hasattr(transect.wVel, "ping_type"): if type(transect.wVel.ping_type) == str: - self.ping_type = np.tile(transect.wVel.ping_type, self.d_mps.shape) + self.ping_type = np.tile( + transect.wVel.ping_type, self.d_mps.shape + ) else: self.ping_type = transect.wVel.ping_type[np.newaxis, :] + + self.ping_type = np.char.strip(self.ping_type) else: - self.ping_type = np.tile('U', self.d_mps.shape) + self.ping_type = np.tile("U", self.d_mps.shape) else: # One ensemble with multiple cells self.raw_vel_mps = np.moveaxis(transect.wVel.rawVel_mps, 1, 0) - self.raw_vel_mps = self.raw_vel_mps.reshape(self.raw_vel_mps.shape[0], self.raw_vel_mps.shape[1], 1) + self.raw_vel_mps = self.raw_vel_mps.reshape( + (self.raw_vel_mps.shape[0], self.raw_vel_mps.shape[1], 1) + ) self.corr = np.moveaxis(transect.wVel.corr, 1, 0) - self.corr = self.corr.reshape(self.corr.shape[0], self.corr.shape[1], 1) + self.corr = self.corr.reshape( + (self.corr.shape[0], self.corr.shape[1], 1) + ) self.rssi = np.moveaxis(transect.wVel.rssi, 1, 0) - self.rssi = self.rssi.reshape(self.rssi.shape[0], self.rssi.shape[1], 1) + self.rssi = self.rssi.reshape( + (self.rssi.shape[0], self.rssi.shape[1], 1) + ) self.valid_data = np.moveaxis(transect.wVel.validData, 1, 0) - self.valid_data = self.valid_data.reshape(self.valid_data.shape[0], self.valid_data.shape[1], 1) + self.valid_data = self.valid_data.reshape( + (self.valid_data.shape[0], self.valid_data.shape[1], 1) + ) self.u_earth_no_ref_mps = transect.wVel.uEarthNoRef_mps - self.u_earth_no_ref_mps = self.u_earth_no_ref_mps.reshape(self.u_earth_no_ref_mps.shape[0], 1) + self.u_earth_no_ref_mps = self.u_earth_no_ref_mps.reshape( + self.u_earth_no_ref_mps.shape[0], 1 + ) self.v_earth_no_ref_mps = transect.wVel.vEarthNoRef_mps - self.v_earth_no_ref_mps = self.v_earth_no_ref_mps.reshape(self.v_earth_no_ref_mps.shape[0], 1) + self.v_earth_no_ref_mps = self.v_earth_no_ref_mps.reshape( + self.v_earth_no_ref_mps.shape[0], 1 + ) self.u_mps = transect.wVel.u_mps self.u_mps = self.u_mps.reshape(self.u_mps.shape[0], 1) self.v_mps = transect.wVel.v_mps self.v_mps = self.v_mps.reshape(self.v_mps.shape[0], 1) self.u_processed_mps = transect.wVel.uProcessed_mps - self.u_processed_mps = self.u_processed_mps.reshape(self.u_processed_mps.shape[0], 1) + self.u_processed_mps = self.u_processed_mps.reshape( + self.u_processed_mps.shape[0], 1 + ) self.v_processed_mps = transect.wVel.vProcessed_mps - self.v_processed_mps = self.v_processed_mps.reshape(self.v_processed_mps.shape[0], 1) + self.v_processed_mps = self.v_processed_mps.reshape( + self.v_processed_mps.shape[0], 1 + ) self.w_mps = transect.wVel.w_mps self.w_mps = self.w_mps.reshape(self.w_mps.shape[0], 1) self.d_mps = transect.wVel.d_mps self.d_mps = self.d_mps.reshape(self.d_mps.shape[0], 1) - self.snr_rng = transect.wVel.snrRng - self.snr_rng = self.snr_rng.reshape(self.snr_rng.shape[0], 1) + # self.snr_rng = transect.wVel.snrRng + # self.snr_rng = self.snr_rng.reshape(self.snr_rng.shape[0], 1) self.cells_above_sl = transect.wVel.cellsAboveSL.astype(bool) - self.cells_above_sl = self.cells_above_sl.reshape(self.cells_above_sl.shape[0], 1) + self.cells_above_sl = self.cells_above_sl.reshape( + self.cells_above_sl.shape[0], 1 + ) self.cells_above_sl_bt = transect.wVel.cellsAboveSLbt.astype(bool) - self.cells_above_sl_bt = self.cells_above_sl_bt.reshape(self.cells_above_sl_bt.shape[0], 1) + self.cells_above_sl_bt = self.cells_above_sl_bt.reshape( + self.cells_above_sl_bt.shape[0], 1 + ) self.sl_lag_effect_m = np.array([transect.wVel.slLagEffect_m]) # Ping type - if hasattr(transect.wVel, 'ping_type'): + if hasattr(transect.wVel, "ping_type"): if type(transect.wVel.ping_type) == str: - self.ping_type = np.tile(transect.wVel.ping_type, self.d_mps.shape) + self.ping_type = np.tile( + transect.wVel.ping_type, self.d_mps.shape + ) else: self.ping_type = transect.wVel.ping_type[:, np.newaxis] + self.ping_type = np.char.strip(self.ping_type) else: - self.ping_type = np.tile('U', self.d_mps.shape) + self.ping_type = np.tile("U", self.d_mps.shape) else: n_ensembles = transect.wVel.u_mps.shape[1] n_cells = transect.wVel.u_mps.shape[0] - if transect.wVel.rawVel_mps.shape[2] != n_ensembles or transect.wVel.rawVel_mps.shape[1] != n_cells: + if ( + transect.wVel.rawVel_mps.shape[2] != n_ensembles + or transect.wVel.rawVel_mps.shape[1] != n_cells + ): self.raw_vel_mps = np.moveaxis(transect.wVel.rawVel_mps, 2, 0) else: self.raw_vel_mps = transect.wVel.rawVel_mps - if transect.wVel.corr.shape[2] != n_ensembles or transect.wVel.corr.shape[1] != n_cells: + if ( + transect.wVel.corr.shape[2] != n_ensembles + or transect.wVel.corr.shape[1] != n_cells + ): self.corr = np.moveaxis(transect.wVel.corr, 2, 0) else: self.corr = transect.wVel.corr - if transect.wVel.rssi.shape[2] != n_ensembles or transect.wVel.rssi.shape[1] != n_cells: + if ( + transect.wVel.rssi.shape[2] != n_ensembles + or transect.wVel.rssi.shape[1] != n_cells + ): self.rssi = np.moveaxis(transect.wVel.rssi, 2, 0) else: self.rssi = transect.wVel.rssi - if transect.wVel.validData.shape[2] != n_ensembles or transect.wVel.validData.shape[1] != n_cells: + if ( + transect.wVel.validData.shape[2] != n_ensembles + or transect.wVel.validData.shape[1] != n_cells + ): self.valid_data = np.moveaxis(transect.wVel.validData, 2, 0) else: self.valid_data = transect.wVel.validData @@ -508,26 +649,21 @@ class WaterData(object): self.v_processed_mps = transect.wVel.vProcessed_mps self.w_mps = transect.wVel.w_mps self.d_mps = transect.wVel.d_mps - self.snr_rng = transect.wVel.snrRng + # self.snr_rng = transect.wVel.snrRng self.cells_above_sl = transect.wVel.cellsAboveSL.astype(bool) self.cells_above_sl_bt = transect.wVel.cellsAboveSLbt.astype(bool) self.sl_lag_effect_m = transect.wVel.slLagEffect_m # Ping type - if hasattr(transect.wVel, 'ping_type'): + if hasattr(transect.wVel, "ping_type"): if type(transect.wVel.ping_type) == str: self.ping_type = np.tile(transect.wVel.ping_type, self.d_mps.shape) else: self.ping_type = transect.wVel.ping_type + self.ping_type = np.char.strip(self.ping_type) else: - self.ping_type = np.tile('U', self.d_mps.shape) + self.ping_type = np.tile("U", self.d_mps.shape) self.valid_data = self.valid_data.astype(bool) - # Fix for moving-bed transects that did not have 3D array indices adjusted properly when saved - # if self.valid_data.shape[0] == self.u_processed_mps.shape[1]: - # self.valid_data = np.moveaxis(self.valid_data, 0, 2) - # self.raw_vel_mps = np.moveaxis(self.raw_vel_mps, 0, 2) - # self.corr = np.moveaxis(self.corr, 0, 2) - # self.rssi = np.moveaxis(self.rssi, 0, 2) self.frequency = transect.wVel.frequency self.orig_coord_sys = transect.wVel.origCoordSys self.orig_nav_ref = transect.wVel.origNavRef @@ -547,7 +683,7 @@ class WaterData(object): self.w_filter = transect.wVel.wFilter self.w_filter_thresholds = self.struct_to_dict(transect.wVel.wFilterThreshold) self.excluded_dist_m = transect.wVel.excludedDist - if hasattr(transect.wVel, 'orig_excludedDist'): + if hasattr(transect.wVel, "orig_excludedDist"): self.orig_excluded_dist_m = transect.wVel.orig_excludedDist else: self.orig_excluded_dist_m = transect.wVel.excludedDist @@ -566,10 +702,16 @@ class WaterData(object): self.sl_cutoff_type = transect.wVel.slCutoffType # Use measurement for filter - if hasattr(transect.wVel, 'use_measurement_thresholds'): - self.use_measurement_thresholds = self.struct_to_dict(transect.wVel.use_measurement_thresholds) - self.d_meas_thresholds = self.struct_to_dict(transect.wVel.d_meas_thresholds) - self.w_meas_thresholds = self.struct_to_dict(transect.wVel.w_meas_thresholds) + if hasattr(transect.wVel, "use_measurement_thresholds"): + self.use_measurement_thresholds = self.struct_to_dict( + transect.wVel.use_measurement_thresholds + ) + self.d_meas_thresholds = self.struct_to_dict( + transect.wVel.d_meas_thresholds + ) + self.w_meas_thresholds = self.struct_to_dict( + transect.wVel.w_meas_thresholds + ) else: self.use_measurement_thresholds = False self.d_meas_thresholds = {} @@ -614,16 +756,23 @@ class WaterData(object): adcp: InstrumentData Object of instrument data """ + if type(self.orig_coord_sys) is list or type(self.orig_coord_sys) is np.ndarray: o_coord_sys = self.orig_coord_sys[0].strip() else: o_coord_sys = self.orig_coord_sys.strip() + if self.snr_beam_velocities is None: + data = self.raw_vel_mps + else: + data = self.snr_beam_velocities + o_coord_sys = "Beam" + orig_sys = None new_sys = None if o_coord_sys != new_coord_sys: - + # Assign the transformation matrix and retrieve the sensor data t_matrix = copy.deepcopy(adcp.t_matrix.matrix) t_matrix_freq = copy.deepcopy(adcp.frequency_khz) @@ -631,36 +780,36 @@ class WaterData(object): p = getattr(sensors.pitch_deg, sensors.pitch_deg.selected).data r = getattr(sensors.roll_deg, sensors.roll_deg.selected).data h = getattr(sensors.heading_deg, sensors.heading_deg.selected).data - + # Modify the transformation matrix and heading, pitch # and roll values based on the original coordinate # system so that only the needed values ar used in # computing the new coordinate system. - if o_coord_sys.strip() == 'Beam': + if o_coord_sys.strip() == "Beam": orig_sys = 1 - elif o_coord_sys.strip() == 'Inst': + elif o_coord_sys.strip() == "Inst": orig_sys = 2 - elif o_coord_sys.strip() == 'Ship': + elif o_coord_sys.strip() == "Ship": orig_sys = 3 p = np.zeros(h.shape) r = np.zeros(h.shape) - t_matrix = np.eye(len(t_matrix)) - elif o_coord_sys.strip() == 'Earth': + elif o_coord_sys.strip() == "Earth": orig_sys = 4 # Assign a value to the new coordinate system - if new_coord_sys.strip() == 'Beam': + if new_coord_sys.strip() == "Beam": new_sys = 1 - elif new_coord_sys.strip() == 'Inst': + elif new_coord_sys.strip() == "Inst": new_sys = 2 - elif new_coord_sys.strip() == 'Ship': + elif new_coord_sys.strip() == "Ship": new_sys = 3 - elif new_coord_sys.strip() == 'Earth': + elif new_coord_sys.strip() == "Earth": new_sys = 4 - - # Check to ensure the new coordinate system is a higher order than the original system + + # Check to ensure the new coordinate system is a higher order than + # the original system if new_sys - orig_sys > 0: - + # Compute trig function for heaing, pitch and roll ch = np.cos(np.deg2rad(h)) sh = np.sin(np.deg2rad(h)) @@ -670,73 +819,80 @@ class WaterData(object): sr = np.sin(np.deg2rad(r)) n_ens = self.raw_vel_mps.shape[2] - + for ii in range(n_ens): - + # Compute matrix for heading, pitch, and roll - hpr_matrix = np.array([[((ch[ii] * cr[ii]) + (sh[ii]*sp[ii] * sr[ii])), - (sh[ii] * cp[ii]), - ((ch[ii] * sr[ii]) - sh[ii]*sp[ii] * cr[ii])], - [(-1 * sh[ii] * cr[ii]) + (ch[ii] * sp[ii] * sr[ii]), - ch[ii] * cp[ii], - (-1 * sh[ii] * sr[ii])-(ch[ii] * sp[ii] * cr[ii])], - [(-1.*cp[ii] * sr[ii]), - sp[ii], - cp[ii] * cr[ii]]]) - + hpr_matrix = np.array( + [ + [ + ((ch[ii] * cr[ii]) + (sh[ii] * sp[ii] * sr[ii])), + (sh[ii] * cp[ii]), + ((ch[ii] * sr[ii]) - sh[ii] * sp[ii] * cr[ii]), + ], + [ + (-1 * sh[ii] * cr[ii]) + (ch[ii] * sp[ii] * sr[ii]), + ch[ii] * cp[ii], + (-1 * sh[ii] * sr[ii]) - (ch[ii] * sp[ii] * cr[ii]), + ], + [(-1.0 * cp[ii] * sr[ii]), sp[ii], cp[ii] * cr[ii]], + ] + ) + # Transform beam coordinates - if o_coord_sys == 'Beam': - + if o_coord_sys == "Beam": + # Determine frequency index for transformation if len(t_matrix.shape) > 2: idx_freq = np.where(t_matrix_freq == self.frequency[ii]) - t_mult = np.copy(t_matrix[:, :, idx_freq]) + t_mult = np.copy(np.squeeze(t_matrix[:, :, idx_freq[0][0]])) else: t_mult = np.copy(t_matrix) - + # Get velocity data - vel_beams = np.copy(self.raw_vel_mps[:, :, ii]) - + vel_beams = np.copy(data[:, :, ii]) + # Apply transformation matrix for 4 beam solutions temp_t = t_mult.dot(vel_beams) - + # Apply hpr_matrix temp_thpr = hpr_matrix.dot(temp_t[:3]) temp_thpr = np.vstack([temp_thpr, temp_t[3]]) - + # Check for invalid beams invalid_idx = np.isnan(vel_beams) - + # Identify rows requiring 3 beam solutions n_invalid_col = np.sum(invalid_idx, axis=0) col_idx = np.where(n_invalid_col == 1)[0] - + # Compute 3 beam solution, if necessary if len(col_idx) > 0: for i3 in range(len(col_idx)): - # Id invalid beam vel_3_beam = vel_beams[:, col_idx[i3]] idx_3_beam = np.where(np.isnan(vel_3_beam))[0] - + # 3 beam solution for non-RiverRay vel_3_beam_zero = vel_3_beam vel_3_beam_zero[np.isnan(vel_3_beam)] = 0 vel_error = t_mult[3, :].dot(vel_3_beam_zero) - vel_3_beam[idx_3_beam] = -1 * vel_error / t_mult[3, idx_3_beam] + vel_3_beam[idx_3_beam] = ( + -1 * vel_error / t_mult[3, idx_3_beam] + ) temp_t = t_mult.dot(vel_3_beam) - + # Apply transformation matrix for 3 # beam solutions temp_thpr[0:3, col_idx[i3]] = hpr_matrix.dot(temp_t[:3]) temp_thpr[3, col_idx[i3]] = np.nan - + else: # Get velocity data vel_raw = np.copy(np.squeeze(self.raw_vel_mps[:, :, ii])) temp_thpr = np.array(hpr_matrix).dot(vel_raw[:3, :]) temp_thpr = np.vstack([temp_thpr, vel_raw[3, :]]) - + # Update object temp_thpr = temp_thpr.T self.u_mps[:, ii] = temp_thpr[:, 0] @@ -744,63 +900,157 @@ class WaterData(object): self.w_mps[:, ii] = temp_thpr[:, 2] self.d_mps[:, ii] = temp_thpr[:, 3] - # Because of padded arrays with zeros and RR has a variable number of bins, - # the raw data may be padded with zeros. The next 4 statements changes - # those to nan - self.u_mps[self.u_mps == 0] = np.nan - self.v_mps[self.v_mps == 0] = np.nan - self.w_mps[self.w_mps == 0] = np.nan - self.d_mps[self.d_mps == 0] = np.nan - + # Because of padded arrays with zeros and RR has a variable + # number of bins, the raw data may be padded with zeros. The next 4 + # statements changes those to nan + find_padded = ( + np.abs(self.u_mps) + + np.abs(self.v_mps) + + np.abs(self.w_mps) + + np.abs(self.d_mps) + ) + self.u_mps[find_padded == 0] = np.nan + self.v_mps[find_padded == 0] = np.nan + self.w_mps[find_padded == 0] = np.nan + self.d_mps[find_padded == 0] = np.nan + # Assign processed object properties self.u_processed_mps = np.copy(self.u_mps) self.v_processed_mps = np.copy(self.v_mps) - + # Assign coordinate system and reference properties self.coord_sys = new_coord_sys self.nav_ref = self.orig_nav_ref - + else: - + # Reset velocity properties to raw values self.u_mps = np.copy(self.raw_vel_mps[0]) self.v_mps = np.copy(self.raw_vel_mps[1]) self.w_mps = np.copy(self.raw_vel_mps[2]) self.d_mps = np.copy(self.raw_vel_mps[3]) - - if adcp.manufacturer == 'TRDI': - self.u_mps[self.u_mps == 0] = np.nan - self.v_mps[self.v_mps == 0] = np.nan - self.w_mps[self.w_mps == 0] = np.nan - self.d_mps[self.d_mps == 0] = np.nan - + + if adcp.manufacturer == "TRDI": + find_padded = ( + np.abs(self.u_mps) + + np.abs(self.v_mps) + + np.abs(self.w_mps) + + np.abs(self.d_mps) + ) + self.u_mps[find_padded == 0] = np.nan + self.v_mps[find_padded == 0] = np.nan + self.w_mps[find_padded == 0] = np.nan + self.d_mps[find_padded == 0] = np.nan + # Assign processed properties self.u_processed_mps = np.copy(self.u_mps) self.v_processed_mps = np.copy(self.v_mps) - + else: - + # Reset velocity properties to raw values self.u_mps = np.copy(self.raw_vel_mps[0]) self.v_mps = np.copy(self.raw_vel_mps[1]) self.w_mps = np.copy(self.raw_vel_mps[2]) self.d_mps = np.copy(self.raw_vel_mps[3]) - - if adcp.manufacturer == 'TRDI': - self.u_mps[self.u_mps == 0] = np.nan - self.v_mps[self.v_mps == 0] = np.nan - self.w_mps[self.w_mps == 0] = np.nan - self.d_mps[self.d_mps == 0] = np.nan - + + if adcp.manufacturer == "TRDI": + find_padded = ( + np.abs(self.u_mps) + + np.abs(self.v_mps) + + np.abs(self.w_mps) + + np.abs(self.d_mps) + ) + self.u_mps[find_padded == 0] = np.nan + self.v_mps[find_padded == 0] = np.nan + self.w_mps[find_padded == 0] = np.nan + self.d_mps[find_padded == 0] = np.nan + # Assign processed properties self.u_processed_mps = np.copy(self.u_mps) self.v_processed_mps = np.copy(self.v_mps) - - if new_coord_sys == 'Earth': + + if new_coord_sys == "Earth": self.u_earth_no_ref_mps = np.copy(self.u_mps) self.v_earth_no_ref_mps = np.copy(self.v_mps) - - def set_nav_reference(self, boat_vel): + + def earth_to_beam(self, sensors, adcp): + """Converts earth coordinates to beam coordinates. + + Parameters + ---------- + sensors: Sensors + Object of class Sensors + adcp: InstrumentData + Object of class InstrumentData + """ + + # Create matrix to store results + vel_beam = np.tile(np.nan, self.raw_vel_mps.shape) + + # Assign the transformation matrix retrieve the sensor data + t_matrix = copy.deepcopy(adcp.t_matrix.matrix) + t_matrix_freq = copy.deepcopy(adcp.frequency_khz) + + # Retrieve the sensor data + p = getattr(sensors.pitch_deg, sensors.pitch_deg.selected).data + r = getattr(sensors.roll_deg, sensors.roll_deg.selected).data + h = getattr(sensors.heading_deg, sensors.heading_deg.selected).data + + # Compute trig function for heading, pitch and roll + ch = np.cos(np.deg2rad(h)) + sh = np.sin(np.deg2rad(h)) + cp = np.cos(np.deg2rad(p)) + sp = np.sin(np.deg2rad(p)) + cr = np.cos(np.deg2rad(r)) + sr = np.sin(np.deg2rad(r)) + + # Process each ensemble + n_ens = self.raw_vel_mps.shape[2] + for ii in range(n_ens): + + # Compute matrix for heading, pitch, and roll + hpr_matrix = np.array( + [ + [ + ((ch[ii] * cr[ii]) + (sh[ii] * sp[ii] * sr[ii])), + (sh[ii] * cp[ii]), + ((ch[ii] * sr[ii]) - sh[ii] * sp[ii] * cr[ii]), + ], + [ + (-1 * sh[ii] * cr[ii]) + (ch[ii] * sp[ii] * sr[ii]), + ch[ii] * cp[ii], + (-1 * sh[ii] * sr[ii]) - (ch[ii] * sp[ii] * cr[ii]), + ], + [(-1.0 * cp[ii] * sr[ii]), sp[ii], cp[ii] * cr[ii]], + ] + ) + + # Determine frequency index for transformation + if len(t_matrix.shape) > 2: + idx_freq = np.where(t_matrix_freq == self.frequency[ii]) + t_mult = np.copy(np.squeeze(t_matrix[:, :, idx_freq[0][0]])) + else: + t_mult = np.copy(t_matrix) + + # Construct earth velocity matrix for ensemble + vel_enu = np.vstack( + [ + self.u_earth_no_ref_mps[:, ii], + self.v_earth_no_ref_mps[:, ii], + self.w_mps[:, ii], + self.d_mps[:, ii], + ] + ) + + # Compute beam velocities + vel_xyz = np.copy(vel_enu) + vel_xyz[0:3, :] = np.matmul(np.linalg.inv(hpr_matrix), vel_enu[:3]) + vel_beam[:, :, ii] = np.matmul(np.linalg.inv(t_mult), vel_xyz) + + return vel_beam + + def set_nav_reference(self, boat_vel): """This function sets the navigation reference. The current reference is first removed from the velocity and then the @@ -811,7 +1061,7 @@ class WaterData(object): boat_vel: BoatStructure Object of BoatStructure """ - + # Apply selected navigation reference boat_select = getattr(boat_vel, boat_vel.selected) if boat_select is not None: @@ -819,29 +1069,33 @@ class WaterData(object): self.v_mps = np.add(self.v_earth_no_ref_mps, boat_select.v_processed_mps) self.nav_ref = boat_select.nav_ref else: - self.u_mps = repmat([np.nan], - self.u_earth_no_ref_mps.shape[0], - self.u_earth_no_ref_mps.shape[1]) - self.v_mps = repmat([np.nan], - self.v_earth_no_ref_mps.shape[0], - self.v_earth_no_ref_mps.shape[1]) - if boat_vel.selected == 'bt_vel': - self.nav_ref = 'BT' - elif boat_vel.selected == 'gga_vel': - self.nav_ref = 'GGA' - elif boat_vel.selected == 'vtg_vel': - self.nav_ref = 'VTG' - + self.u_mps = repmat( + [np.nan], + self.u_earth_no_ref_mps.shape[0], + self.u_earth_no_ref_mps.shape[1], + ) + self.v_mps = repmat( + [np.nan], + self.v_earth_no_ref_mps.shape[0], + self.v_earth_no_ref_mps.shape[1], + ) + if boat_vel.selected == "bt_vel": + self.nav_ref = "BT" + elif boat_vel.selected == "gga_vel": + self.nav_ref = "GGA" + elif boat_vel.selected == "vtg_vel": + self.nav_ref = "VTG" + valid_data2 = np.copy(self.cells_above_sl) valid_data2[np.isnan(self.u_mps)] = False self.valid_data[1] = valid_data2 - + # Duplicate original to other filters that have yet to be applied self.valid_data[2:] = np.tile(self.valid_data[1], [7, 1, 1]) - + # Combine all filter data and update processed properties self.all_valid_data() - + def change_heading(self, boat_vel, heading_chng): """Adjusts the velocity vectors for a change in heading due change in magnetic variation or heading offset. @@ -862,11 +1116,13 @@ class WaterData(object): # Reprocess water data to get navigation reference corrected velocities self.set_nav_reference(boat_vel) - + def change_heading_source(self, boat_vel, heading): - """Applies changes to water velocity when the heading source is changed. + """Applies changes to water velocity when the heading source is + changed. - Typically called when the heading source is changed between external and internal. + Typically called when the heading source is changed between external + and internal. Parameters ---------- @@ -878,14 +1134,15 @@ class WaterData(object): u_nr = self.u_earth_no_ref_mps v_nr = self.v_earth_no_ref_mps direction, mag = cart2pol(u_nr, v_nr) - u_nr_rotated, v_nr_rotated = pol2cart(direction - - np.deg2rad(repmat(heading, len(mag), 1)), mag) + u_nr_rotated, v_nr_rotated = pol2cart( + direction - np.deg2rad(repmat(heading, len(mag), 1)), mag + ) self.u_earth_no_ref_mps = u_nr_rotated self.v_earth_no_ref_mps = v_nr_rotated self.set_nav_reference(boat_vel) - - def apply_interpolation(self, transect, ens_interp='None', cells_interp='None'): + + def apply_interpolation(self, transect, ens_interp="None", cells_interp="None"): """Coordinates the application of water velocity interpolation. Parameters @@ -902,59 +1159,76 @@ class WaterData(object): self.v_processed_mps = np.tile([np.nan], self.v_mps.shape) self.u_processed_mps[self.valid_data[0]] = self.u_mps[self.valid_data[0]] self.v_processed_mps[self.valid_data[0]] = self.v_mps[self.valid_data[0]] - + # Determine interpolation methods to apply - if ens_interp == 'None': + if ens_interp == "None": ens_interp = self.interpolate_ens else: self.interpolate_ens = ens_interp - if cells_interp == 'None': + if cells_interp == "None": cells_interp = self.interpolate_cells else: self.interpolate_cells = cells_interp - if ens_interp == 'abba' or cells_interp == 'abba': - self.interpolate_ens = 'abba' - self.interpolate_cells = 'abba' + if ens_interp == "abba" or cells_interp == "abba": + self.interpolate_ens = "abba" + self.interpolate_cells = "abba" self.interpolate_abba(transect) else: - if ens_interp == 'None': + if ens_interp == "None": # Sets invalid data to nan with no interpolation self.interpolate_ens_none() - elif ens_interp == 'ExpandedT': - # Sets interpolate to None as the interpolation is done in class QComp + elif ens_interp == "ExpandedT": + # Sets interpolate to None as the interpolation is done in + # class QComp self.interpolate_ens_next() - elif ens_interp == 'Hold9': - # Interpolates using SonTek's method of holding last valid for up to 9 samples + elif ens_interp == "Hold9": + # Interpolates using SonTek's method of holding last valid + # for up to 9 samples self.interpolate_ens_hold_last_9() - elif ens_interp == 'Hold': + elif ens_interp == "Hold": # Interpolates by holding last valid indefinitely self.interpolate_ens_hold_last() - elif ens_interp == 'Linear': + elif ens_interp == "Linear": # Interpolates using linear interpolation self.interpolate_ens_linear(transect) - elif ens_interp == 'TRDI': + elif ens_interp == "TRDI": # TRDI is applied in discharge self.interpolate_ens_none() self.interpolate_ens = ens_interp # Apply specified cell interpolation method - if cells_interp == 'None': + if cells_interp == "None": # Sets invalid data to nan with no interpolation self.interpolate_cells_none() - elif cells_interp == 'TRDI': + elif cells_interp == "TRDI": # Use TRDI method to interpolate invalid interior cells self.interpolate_cells_trdi(transect) - elif cells_interp == 'Linear': + elif cells_interp == "Linear": # Uses linear interpolation to interpolate velocity for all # invalid bins including those in invalid ensembles # up to 9 samples self.interpolate_cells_linear(transect) - - def apply_filter(self, transect, beam=None, difference=None, difference_threshold=None, vertical=None, - vertical_threshold=None, other=None, excluded=None, snr=None, wt_depth=None): - """Coordinates application of specified filters and subsequent interpolation. + elif cells_interp == "Froude": + self.interpolate_cells_froude() + + def apply_filter( + self, + transect, + beam=None, + difference=None, + difference_threshold=None, + vertical=None, + vertical_threshold=None, + other=None, + excluded=None, + snr=None, + wt_depth=None, + ): + """Coordinates application of specified filters and subsequent + interpolation. + Parameters ---------- transect: TransectData @@ -980,16 +1254,36 @@ class WaterData(object): """ # Determine filters to apply - if len({beam, difference, difference_threshold, vertical, vertical_threshold, other, excluded, snr, - wt_depth}) > 1: - + if ( + len( + { + beam, + difference, + difference_threshold, + vertical, + vertical_threshold, + other, + excluded, + snr, + wt_depth, + } + ) + > 1 + ): + + # Because the snr filter may apply 3-beam solutions the result + # could affect other filters, thus it should be run first + if snr is not None: + self.filter_snr(transect=transect, setting=snr) if difference is not None: - if difference == 'Manual': - self.filter_diff_vel(setting=difference, threshold=difference_threshold) + if difference == "Manual": + self.filter_diff_vel( + setting=difference, threshold=difference_threshold + ) else: self.filter_diff_vel(setting=difference) if vertical is not None: - if vertical == 'Manual': + if vertical == "Manual": self.filter_vert_vel(setting=vertical, threshold=vertical_threshold) else: self.filter_vert_vel(setting=vertical) @@ -997,23 +1291,22 @@ class WaterData(object): self.filter_smooth(transect=transect, setting=other) if excluded is not None: self.filter_excluded(transect=transect, setting=excluded) - if snr is not None: - self.filter_snr(setting=snr) if wt_depth is not None: self.filter_wt_depth(transect=transect, setting=wt_depth) if beam is not None: self.filter_beam(setting=beam, transect=transect) else: - self.filter_diff_vel(setting=self.d_filter, threshold=self.d_filter_thresholds) - self.filter_vert_vel(setting=self.w_filter, threshold=self.w_filter_thresholds) + self.filter_snr(transect=transect, setting=self.snr_filter) + self.filter_diff_vel( + setting=self.d_filter, threshold=self.d_filter_thresholds + ) + self.filter_vert_vel( + setting=self.w_filter, threshold=self.w_filter_thresholds + ) self.filter_smooth(transect=transect, setting=self.smooth_filter) self.filter_excluded(transect=transect, setting=self.excluded_dist_m) - self.filter_snr(setting=self.snr_filter) self.filter_beam(setting=self.beam_filter, transect=transect) - # After filters have been applied, interpolate to estimate values for invalid data. - # self.apply_interpolation(transect=transect) - def sos_correction(self, ratio): """Corrects water velocities for a change in speed of sound. @@ -1031,7 +1324,8 @@ class WaterData(object): self.v_earth_no_ref_mps = self.v_earth_no_ref_mps * ratio def adjust_side_lobe(self, transect): - """Adjust the side lobe cutoff for vertical beam and interpolated depths. + """Adjust the side lobe cutoff for vertical beam and interpolated + depths. Parameters ---------- @@ -1042,21 +1336,28 @@ class WaterData(object): selected = transect.depths.selected depth_selected = getattr(transect.depths, transect.depths.selected) cells_above_slbt = np.copy(self.cells_above_sl_bt) - + # Compute cutoff for vertical beam depths - if selected == 'vb_depths': - sl_cutoff_vb = (depth_selected.depth_processed_m - depth_selected.draft_use_m) \ - * np.cos(np.deg2rad(transect.adcp.beam_angle_deg)) \ - - self.sl_lag_effect_m + depth_selected.draft_use_m - cells_above_slvb = np.round(depth_selected.depth_cell_depth_m, 2) < np.round(sl_cutoff_vb, 2) - idx = np.where(transect.depths.bt_depths.valid_data == False) + if selected == "vb_depths": + sl_cutoff_vb = ( + (depth_selected.depth_processed_m - depth_selected.draft_use_m) + * np.cos(np.deg2rad(transect.adcp.beam_angle_deg)) + - self.sl_lag_effect_m + + depth_selected.draft_use_m + ) + cells_above_slvb = np.round( + depth_selected.depth_cell_depth_m, 2 + ) < np.round(sl_cutoff_vb, 2) + idx = np.where(np.logical_not(transect.depths.bt_depths.valid_data)) cells_above_slbt[:, idx] = cells_above_slvb[:, idx] cells_above_sl = np.logical_and(cells_above_slbt, cells_above_slvb) else: cells_above_sl = cells_above_slbt # Compute cutoff from interpolated depths - n_valid_beams = np.nansum(depth_selected.valid_beams, 0) + valid_beams = depth_selected.depth_beams_m > 0 + n_valid_beams = np.nansum(valid_beams, 0) + # n_valid_beams = np.nansum(depth_selected.valid_beams, 0) # Find ensembles with no valid beam depths idx = np.where(n_valid_beams == 0)[0] @@ -1067,62 +1368,79 @@ class WaterData(object): sl_lag_effect_m = self.sl_lag_effect_m[idx] else: sl_lag_effect_m = self.sl_lag_effect_m - - sl_cutoff_int = (depth_selected.depth_processed_m[idx] - depth_selected.draft_use_m) \ - * np.cos(np.deg2rad(transect.adcp.beam_angle_deg)) - sl_lag_effect_m + \ - depth_selected.draft_use_m + + sl_cutoff_int = ( + (depth_selected.depth_processed_m[idx] - depth_selected.draft_use_m) + * np.cos(np.deg2rad(transect.adcp.beam_angle_deg)) + - sl_lag_effect_m + + depth_selected.draft_use_m + ) for i in range(len(idx)): - cells_above_sl[:, idx[i]] = np.less(depth_selected.depth_cell_depth_m[:, idx[i]], sl_cutoff_int[i]) - - # Find ensembles with at least 1 invalid beam depth + cells_above_sl[:, idx[i]] = np.less( + depth_selected.depth_cell_depth_m[:, idx[i]], sl_cutoff_int[i] + ) + # Find ensembles with at least 1 invalid beam depth idx = np.where(np.logical_and(n_valid_beams < 4, n_valid_beams > 0))[0] if len(idx) > 0: if len(self.sl_lag_effect_m) > 1: sl_lag_effect_m = self.sl_lag_effect_m[idx] else: sl_lag_effect_m = self.sl_lag_effect_m - - sl_cutoff_int = (depth_selected.depth_processed_m[idx] - depth_selected.draft_use_m)\ - * np.cos(np.deg2rad(transect.adcp.beam_angle_deg)) \ - - sl_lag_effect_m + depth_selected.draft_use_m + + sl_cutoff_int = ( + (depth_selected.depth_processed_m[idx] - depth_selected.draft_use_m) + * np.cos(np.deg2rad(transect.adcp.beam_angle_deg)) + - sl_lag_effect_m + + depth_selected.draft_use_m + ) cells_above_sl_int = np.tile(True, cells_above_sl.shape) for i in range(len(idx)): - cells_above_sl_int[:, idx[i]] = np.less(depth_selected.depth_cell_depth_m[:, idx[i]], sl_cutoff_int[i]) - + cells_above_sl_int[:, idx[i]] = np.less( + depth_selected.depth_cell_depth_m[:, idx[i]], sl_cutoff_int[i] + ) + cells_above_sl[cells_above_sl_int == 0] = 0 - + self.cells_above_sl = np.copy(cells_above_sl) valid_vel = np.logical_not(np.isnan(self.u_mps)) self.valid_data[1, :, :] = self.cells_above_sl * valid_vel self.all_valid_data() self.compute_snr_rng() self.apply_filter(transect) - # self.apply_interpolation(transect) def all_valid_data(self): - """Combines the results of all filters to determine a final set of valid data""" + """Combines the results of all filters to determine a final set of + valid data""" n_filters = len(self.valid_data[1:, 0, 0]) - sum_filters = np.nansum(self.valid_data[1:, :, :], 0) / n_filters + + # Since invalid SNR are replaced with 3-beam solutions + # set these values to true as they will not need interpolation + valid_data = np.copy(self.valid_data) + if self.snr_3beam_comp: + valid_data[7, :, :] = True + + sum_filters = np.nansum(valid_data[1:, :, :], 0) / n_filters valid = np.tile([True], self.cells_above_sl.shape) valid[sum_filters < 1] = False self.valid_data[0] = valid - + def filter_beam(self, setting, transect=None): """Applies beam filter to water velocity data. - The determination of invalid data depends on whether - 3-beam or 4-beam solutions are acceptable. This function can be applied by + The determination of invalid data depends on whether 3-beam or + 4-beam solutions are acceptable. This function can be applied by specifying 3 or 4 beam solutions and setting self.beam_filter to -1 - which will trigger an automatic mode. The automatic mode will find all 3 beam - solutions and them compare the velocity of the 3 beam solutions to nearest 4 - beam solutions. If the 3 beam solution is within 50% of the average of the - neighboring 4 beam solutions the data are deemed valid, if not they are marked - invalid. Thus in automatic mode only those data from 3 beam solutions - that are sufficiently different from the 4 beam solutions are marked invalid. - If the number of beams is specified manually, it is applied + which will trigger an automatic mode. The automatic mode will find + all 3 beam solutions and them compare the velocity of the 3 beam + solutions to nearest 4 beam solutions. If the 3 beam solution is + within 50% of the average of the neighboring 4 beam solutions the + data are deemed valid, if not they are marked invalid. Thus in + automatic mode only those data from 3 beam solutions that are + sufficiently different from the 4 beam solutions are marked + invalid. If the number of beams is specified manually, it is applied uniformly for the whole transect. Parameters @@ -1132,23 +1450,29 @@ class WaterData(object): transect: TransectData Object of TransectData """ - + self.beam_filter = setting - - # In manual mode (3 or 4) determine number of raw invalid and number of 2 beam solutions + + # In manual mode (3 or 4) determine number of raw invalid and number + # of 2 beam solutions if self.beam_filter > 0: - + # Find invalid raw data valid_vel = np.array([self.cells_above_sl] * 4) - valid_vel[np.isnan(self.raw_vel_mps)] = 0 - + if self.snr_beam_velocities is None: + valid_vel[np.isnan(self.raw_vel_mps)] = 0 + else: + valid_vel[np.isnan(self.snr_beam_velocities)] = 0 # Determine how many beams or transformed coordinates are valid valid_vel_sum = np.sum(valid_vel, 0) valid = copy.deepcopy(self.cells_above_sl) - - # Compare number of valid beams or velocity coordinates to filter value - valid[np.logical_and((valid_vel_sum < self.beam_filter), (valid_vel_sum > 2))] = False - + + # Compare number of valid beams or velocity coordinates to + # filter value + valid[ + np.logical_and((valid_vel_sum < self.beam_filter), (valid_vel_sum > 2)) + ] = False + # Save logical of valid data to object self.valid_data[5, :, :] = valid @@ -1161,7 +1485,8 @@ class WaterData(object): self.automatic_beam_filter_abba_interpolation(transect) def automatic_beam_filter_abba_interpolation(self, transect): - """Applies abba interpolation to allow comparison of interpolated and 3-beam solutions. + """Applies abba interpolation to allow comparison of interpolated and + 3-beam solutions. Parameters ---------- @@ -1169,12 +1494,14 @@ class WaterData(object): Object of TransectData """ - # Create array indicating which cells do not have 4-beam solutions and all cells below side lobe are nan + # Create array indicating which cells do not have 4-beam solutions + # and all cells below side lobe are nan temp = copy.deepcopy(self) temp.filter_beam(4) valid_bool = temp.valid_data[5, :, :] valid = valid_bool.astype(float) - valid[temp.cells_above_sl == False] = np.nan + valid[np.logical_not(temp.cells_above_sl)] = 0 + valid[np.logical_not(temp.valid_data[1, :, :])] = 0 # Initialize processed velocity data variables temp.u_processed_mps = copy.deepcopy(temp.u_mps) @@ -1196,18 +1523,31 @@ class WaterData(object): u = u[:, transect.in_transect_idx] v = v[:, transect.in_transect_idx] - u[np.logical_not(temp.valid_data[5, :, :])] = np.nan - v[np.logical_not(temp.valid_data[5, :, :])] = np.nan - interpolated_data = self.compute_abba_interpolation(wt_data=temp, - data_list=[u, v], - valid=temp.valid_data[5, :, :], - transect=transect) + u[ + np.logical_not(temp.valid_data[5, :, transect.in_transect_idx].T) + ] = np.nan + v[ + np.logical_not(temp.valid_data[5, :, transect.in_transect_idx].T) + ] = np.nan + interpolated_data = self.compute_abba_interpolation( + wt_data=temp, + data_list=[u, v], + valid=valid, + transect=transect, + ) if interpolated_data is not None: - # Compute interpolated to measured ratios and apply filter criteria + # Compute interpolated to measured ratios and apply filter + # criteria for n in range(len(interpolated_data[0])): - u_ratio = (temp.u_mps[interpolated_data[0][n][0]] / interpolated_data[0][n][1]) - 1 - v_ratio = (temp.v_mps[interpolated_data[1][n][0]] / interpolated_data[1][n][1]) - 1 + u_ratio = ( + temp.u_mps[interpolated_data[0][n][0]] + / interpolated_data[0][n][1] + ) - 1 + v_ratio = ( + temp.v_mps[interpolated_data[1][n][0]] + / interpolated_data[1][n][1] + ) - 1 if np.abs(u_ratio) < 0.5 and np.abs(v_ratio) < 0.5: valid_bool[interpolated_data[0][n][0]] = True else: @@ -1228,18 +1568,19 @@ class WaterData(object): """Applies filter to difference velocity. Applies either manual or automatic filtering of the difference (error) - velocity. The automatic mode is based on the following: This filter is - based on the assumption that the water error velocity should follow a gaussian - distribution. Therefore, 5 standard deviations should encompass all of the - valid data. The standard deviation and limits (multiplier*std dev) are computed - in an iterative process until filtering out additional data does not change the - computed standard deviation. + velocity. The automatic mode is based on the following: + This filter is based on the assumption that the water error velocity + should follow a gaussian distribution. Therefore, 5 standard + deviations should encompass all of the valid data. + The standard deviation and limits (multiplier*std dev) are computed + in an iterative process until filtering out additional data does not + change the computed standard deviation. Parameters ---------- setting: str Filter setting (Auto, Off, Manual) - threshold: float + threshold: float, dict Threshold value for Manual setting. """ @@ -1251,41 +1592,49 @@ class WaterData(object): # Get difference data from object d_vel = copy.deepcopy(self.d_mps) - # NOTE: Versions prior to 1.01 did not apply this step to remove data below the side lobe cutoff + # NOTE: Versions prior to 1.01 did not apply this step to remove data + # below the side lobe cutoff d_vel[np.logical_not(self.cells_above_sl)] = np.nan - d_vel_min_ref = None - d_vel_max_ref = None - bad_idx_rows = np.array([]).astype(int) bad_idx_cols = np.array([]).astype(int) # Apply selected method - if self.d_filter == 'Manual': + if self.d_filter == "Manual": d_vel_max_ref = np.abs(self.d_filter_thresholds) d_vel_min_ref = -1 * d_vel_max_ref # Set valid data row 2 for difference velocity filter results - bad_idx_rows, bad_idx_cols = np.where(np.logical_or(nan_greater(d_vel, d_vel_max_ref), - nan_less(d_vel, d_vel_min_ref))) - elif self.d_filter == 'Off': + bad_idx_rows, bad_idx_cols = np.where( + np.logical_or( + nan_greater(d_vel, d_vel_max_ref), nan_less(d_vel, d_vel_min_ref) + ) + ) + elif self.d_filter == "Off": d_vel_max_ref = np.nanmax(np.nanmax(d_vel)) + 1 d_vel_min_ref = np.nanmin(np.nanmin(d_vel)) - 1 # Set valid data row 2 for difference velocity filter results - bad_idx_rows, bad_idx_cols = np.where(np.logical_or(nan_greater(d_vel, d_vel_max_ref), - nan_less(d_vel, d_vel_min_ref))) + bad_idx_rows, bad_idx_cols = np.where( + np.logical_or( + nan_greater(d_vel, d_vel_max_ref), nan_less(d_vel, d_vel_min_ref) + ) + ) self.d_filter_thresholds = d_vel_max_ref - elif self.d_filter == 'Auto': - # Apply threshold from entire measurement processing to each transect + elif self.d_filter == "Auto": + # Apply threshold from entire measurement processing to each + # transect if self.use_measurement_thresholds: self.d_filter_thresholds = self.d_meas_thresholds for p_type in self.d_meas_thresholds.keys(): data_max_ref = self.d_meas_thresholds[p_type][0] data_min_ref = self.d_meas_thresholds[p_type][1] data = np.copy(self.d_mps) - data[self.ping_type!=p_type] = np.nan - idx_invalid_rows, idx_invalid_cols = np.where(np.logical_or(np.greater(data, data_max_ref), - np.less(data, data_min_ref))) + data[self.ping_type != p_type] = np.nan + idx_invalid_rows, idx_invalid_cols = np.where( + np.logical_or( + np.greater(data, data_max_ref), np.less(data, data_min_ref) + ) + ) if len(idx_invalid_rows) > 0: if len(bad_idx_rows) > 0: bad_idx_rows = np.hstack((bad_idx_rows, idx_invalid_rows)) @@ -1307,9 +1656,11 @@ class WaterData(object): # Copy of difference velocities vel_2_filter = copy.deepcopy(d_vel) # Remove data not associated with the specified ping type - vel_2_filter[self.ping_type!=p_type] = np.nan + vel_2_filter[self.ping_type != p_type] = np.nan # Apply filter to data of a single ping type - idx_invalid_rows, idx_invalid_cols, threshold = self.iqr_filter(vel_2_filter) + idx_invalid_rows, idx_invalid_cols, threshold = self.iqr_filter( + vel_2_filter + ) # Combine indices of invalid data for all ping types if len(idx_invalid_rows) > 0: if len(bad_idx_rows) > 0: @@ -1321,17 +1672,20 @@ class WaterData(object): thresholds[p_type] = threshold self.d_filter_thresholds = thresholds - # Compute unique threshold for each transect when no ping types are available + # Compute unique threshold for each transect when no ping types + # are available else: - self.ping_type = np.array(['U']) + self.ping_type = np.array(["U"]) bad_idx_rows, bad_idx_cols, threshold = self.iqr_filter(d_vel) - self.d_filter_thresholds = {'U': threshold} + self.d_filter_thresholds = {"U": threshold} valid = copy.deepcopy(self.cells_above_sl) if len(bad_idx_rows) > 0: valid[bad_idx_rows, bad_idx_cols] = False - # TODO Seems like if the difference velocity doesn't exist due to a 3-beam solution it shouldn't be - # flagged as invalid however this is the way it was in Matlab. May change this in future. + # TODO Seems like if the difference velocity doesn't exist due to a + # 3-beam solution it shouldn't be + # flagged as invalid however this is the way it was in Matlab. + # May change this in future. # valid[np.isnan(self.d_mps)] = True self.valid_data[2, :, :] = valid @@ -1382,8 +1736,11 @@ class WaterData(object): data_min_ref = np.nanmedian(data) - multiplier * data_iqr # Identify valid and invalid data - data_bad_rows, data_bad_cols = np.where(np.logical_or(nan_greater(data, data_max_ref), - nan_less(data, data_min_ref))) + data_bad_rows, data_bad_cols = np.where( + np.logical_or( + nan_greater(data, data_max_ref), nan_less(data, data_min_ref) + ) + ) # Update filtered data array data[data_bad_rows, data_bad_cols] = np.nan @@ -1395,13 +1752,18 @@ class WaterData(object): iqr_diff = 0 # Determine row and column index of invalid cells with invalid data - bad_idx_rows, bad_idx_cols = np.where(np.logical_or(nan_greater(data_orig, data_max_ref), - nan_less(data_orig, data_min_ref))) + bad_idx_rows, bad_idx_cols = np.where( + np.logical_or( + nan_greater(data_orig, data_max_ref), + nan_less(data_orig, data_min_ref), + ) + ) else: # All data are invalid # Determine row and column index of invalid cells with invalid data - bad_idx_rows, bad_idx_cols = np.where(np.logical_or(np.greater(data, -1), - np.less(data, 1))) + bad_idx_rows, bad_idx_cols = np.where( + np.logical_or(np.greater(data, -1), np.less(data, 1)) + ) threshold = [data_max_ref, data_min_ref] @@ -1444,8 +1806,11 @@ class WaterData(object): data_min_ref = np.nanmedian(data) - multiplier * data_iqr # Identify valid and invalid data - bad_idx = np.where(np.logical_or(nan_greater(data, data_max_ref), - nan_less(data, data_min_ref))) + bad_idx = np.where( + np.logical_or( + nan_greater(data, data_max_ref), nan_less(data, data_min_ref) + ) + ) # Update filtered data array data[bad_idx] = np.nan @@ -1463,20 +1828,23 @@ class WaterData(object): def filter_vert_vel(self, setting, threshold=None): """Applies filter to vertical velocity. - Applies either manual or automatic filter of the difference (error) velocity. The automatic - mode is based on the following: This filter is based on the assumption that the water error - velocity should follow a gaussian distribution. Therefore, 4 standard deviations should - encompass all of the valid data. The standard deviation and limits (multplier * standard deviation) - are computed in an iterative process until filtering out additional data does not change - the computed standard deviation. + Applies either manual or automatic filter of the difference (error) + velocity. The automatic mode is based on the following: This filter + is based on the assumption that the water error velocity should follow a + gaussian distribution. Therefore, 4 standard deviations should + encompass all of the valid data. The standard deviation and limits + (multplier * standard deviation) are computed in an iterative process + until filtering out additional data does not change the + computed standard deviation. Parameters --------- setting: str Filter setting (Auto, Off, Manual) - threshold: float - Threshold value for Manual setting.""" - + threshold: float, dict + Threshold value for Manual setting. + """ + # Set vertical velocity filter properties self.w_filter = setting if threshold is not None: @@ -1485,32 +1853,39 @@ class WaterData(object): # Get difference data from object w_vel = copy.deepcopy(self.w_mps) - # NOTE: Versions prior to 1.01 did not apply this step to remove data below the side lobe cutoff + # NOTE: Versions prior to 1.01 did not apply this step to remove data + # below the side lobe cutoff w_vel[np.logical_not(self.cells_above_sl)] = np.nan - w_vel_min_ref = None w_vel_max_ref = None bad_idx_rows = np.array([]).astype(int) bad_idx_cols = np.array([]).astype(int) # Apply selected method - if self.w_filter == 'Manual': + if self.w_filter == "Manual": w_vel_max_ref = np.abs(self.w_filter_thresholds) w_vel_min_ref = -1 * w_vel_max_ref # Identify valid and invalid data - bad_idx_rows, bad_idx_cols = np.where(np.logical_or(nan_greater(w_vel, w_vel_max_ref), - nan_less(w_vel, w_vel_min_ref))) - elif self.w_filter == 'Off': + bad_idx_rows, bad_idx_cols = np.where( + np.logical_or( + nan_greater(w_vel, w_vel_max_ref), nan_less(w_vel, w_vel_min_ref) + ) + ) + elif self.w_filter == "Off": w_vel_max_ref = np.nanmax(np.nanmax(w_vel)) + 1 w_vel_min_ref = np.nanmin(np.nanmin(w_vel)) - 1 # Identify valid and invalid data - bad_idx_rows, bad_idx_cols = np.where(np.logical_or(nan_greater(w_vel, w_vel_max_ref), - nan_less(w_vel, w_vel_min_ref))) + bad_idx_rows, bad_idx_cols = np.where( + np.logical_or( + nan_greater(w_vel, w_vel_max_ref), nan_less(w_vel, w_vel_min_ref) + ) + ) self.w_filter_thresholds = w_vel_max_ref - elif self.w_filter == 'Auto': - # Apply threshold from entire measurement processing to each transect + elif self.w_filter == "Auto": + # Apply threshold from entire measurement processing to each + # transect if self.use_measurement_thresholds: self.w_filter_thresholds = self.w_meas_thresholds for p_type in self.w_meas_thresholds.keys(): @@ -1518,8 +1893,11 @@ class WaterData(object): data_min_ref = self.w_meas_thresholds[p_type][1] data = np.copy(self.w_mps) data[self.ping_type != p_type] = np.nan - idx_invalid_rows, idx_invalid_cols = np.where(np.logical_or(np.greater(data, data_max_ref), - np.less(data, data_min_ref))) + idx_invalid_rows, idx_invalid_cols = np.where( + np.logical_or( + np.greater(data, data_max_ref), np.less(data, data_min_ref) + ) + ) if len(idx_invalid_rows) > 0: if len(bad_idx_rows) > 0: bad_idx_rows = np.hstack((bad_idx_rows, idx_invalid_rows)) @@ -1543,7 +1921,9 @@ class WaterData(object): # Remove data not associated with the specified ping type vel_2_filter[self.ping_type != p_type] = np.nan # Apply filter to data of a single ping type - idx_invalid_rows, idx_invalid_cols, threshold = self.iqr_filter(vel_2_filter) + idx_invalid_rows, idx_invalid_cols, threshold = self.iqr_filter( + vel_2_filter + ) # Combine indices of invalid data for all ping types if len(idx_invalid_rows) > 0: if len(bad_idx_rows) > 0: @@ -1554,11 +1934,12 @@ class WaterData(object): bad_idx_cols = idx_invalid_cols thresholds[p_type] = threshold self.w_filter_thresholds = thresholds - # Compute unique threshold for each transect when no ping types are available + # Compute unique threshold for each transect when no ping types + # are available else: - self.ping_type = np.array(['U']) + self.ping_type = np.array(["U"]) bad_idx_rows, bad_idx_cols, threshold = self.iqr_filter(w_vel) - self.w_filter_thresholds = {'U': threshold} + self.w_filter_thresholds = {"U": threshold} valid = copy.deepcopy(self.cells_above_sl) if len(bad_idx_rows) > 0: @@ -1571,7 +1952,7 @@ class WaterData(object): # Combine all filter data and update processed properties self.all_valid_data() - + def filter_smooth(self, transect, setting): """Filter water speed using a smooth filter. @@ -1582,13 +1963,14 @@ class WaterData(object): the raw data and the smoothed line are computed. The trimmed standard eviation is computed by selecting the number of residuals specified by "halfwidth" before the target point and after the target point, but not - including the target point. These values are then sorted, and the points - with the highest and lowest values are removed from the subset, and the - standard deviation of the trimmed subset is computed. The filter - criteria are determined by multiplying the standard deviation by a user - specified multiplier. This criteria defines a maximum and minimum - acceptable residual. Data falling outside the criteria are set to nan. - + including the target point. These values are then sorted, and the + points with the highest and lowest values are removed from the subset, + and the standard deviation of the trimmed subset is computed. + The filter criteria are determined by multiplying the standard + deviation by a user specified multiplier. This criteria defines a + maximum and minimum acceptable residual. Data falling outside the + criteria are set to nan. + Recommended filter settings are: filter_width = 10 half_width = 10 @@ -1601,22 +1983,22 @@ class WaterData(object): setting: str Set filter (Auto, Off) """ - + self.smooth_filter = setting upper_limit = None lower_limit = None wt_bad_idx = None - + # Compute ens_time ens_time = np.nancumsum(transect.date_time.ens_duration_sec) - + # Determine if smooth filter should be applied - if self.smooth_filter == 'Auto': - + if self.smooth_filter == "Auto": + # Boat velocity components w_vele = self.u_mps w_veln = self.v_mps - + # Set filter parameters filter_width = 10 half_width = 10 @@ -1627,72 +2009,107 @@ class WaterData(object): w_vele_avg = np.nanmean(w_vele, 0) w_veln_avg = np.nanmean(w_veln, 0) _, speed = cart2pol(w_vele_avg, w_veln_avg) - + # Compute residuals from a robust Loess smooth speed_smooth = rloess(ens_time, speed, filter_width) speed_res = speed - speed_smooth - + # Apply a trimmed standard deviation filter multiple times for i in range(cycles): fill_array = BoatData.run_std_trim(half_width, speed_res.T) - + # Compute filter bounds upper_limit = speed_smooth + multiplier * fill_array lower_limit = speed_smooth - multiplier * fill_array - + # Apply filter to residuals wt_bad_idx = np.where((speed > upper_limit) or (speed < lower_limit))[0] speed_res[wt_bad_idx] = np.nan - + valid = np.copy(self.cells_above_sl) - + valid[:, wt_bad_idx] = False self.valid_data[4, :, :] = valid self.smooth_upper_limit = upper_limit self.smooth_lower_limit = lower_limit self.smooth_speed = speed_smooth - + else: # No filter applied self.valid_data[4, :, :] = np.copy(self.cells_above_sl) self.smooth_upper_limit = np.nan self.smooth_lower_limit = np.nan self.smooth_speed = np.nan - + self.all_valid_data() - - def filter_snr(self, setting): + + def filter_snr(self, transect, setting): """Filters SonTek data based on SNR. - Computes the average SNR for all cells above the side lobe cutoff for each beam in - each ensemble. If the range in average SNR in an ensemble is greater than 12 dB the - water velocity in that ensemble is considered invalid. + Computes the average SNR for all cells above the side lobe cutoff for + each beam in each ensemble. If the range in average SNR in an ensemble is greater + than 12 dB the water velocity in that ensemble is considered invalid. Parameters ---------- + transect: TransectData + Object of TransectData setting: str Setting for filter (Auto, Off) """ - self.snr_filter = setting - - if setting == 'Auto': - if self.snr_rng is not None: + self.snr_filter = setting + + if setting == "Auto": + + # Determines if invalid data should use 3-beam computations + if self.snr_3beam_comp and self.d_filter != 3: + + cells_above_sl = np.copy(self.cells_above_sl.astype(float)) + cells_above_sl[cells_above_sl < 0.5] = np.nan + snr_adjusted = self.rssi * cells_above_sl + snr_average = np.nanmean(snr_adjusted, 1) + + # Find invalid beams + snr_beam_invalid = (np.max(snr_average, axis=0) - snr_average) > 12 + + ens_bad_snr = np.any(snr_beam_invalid, axis=0) + valid = np.copy(self.cells_above_sl) + + bad_snr_array = np.tile(ens_bad_snr, (valid.shape[0], 1)) + valid[bad_snr_array] = False + w_vel_copy = copy.deepcopy(self) + beam_velocities = w_vel_copy.earth_to_beam( + sensors=transect.sensors, adcp=transect.adcp + ) + invalid_beam_value = np.tile(np.nan, valid.shape[0]) + + invalid_snr_idx = np.where(snr_beam_invalid) + + beam_velocities[ + invalid_snr_idx[0], :, invalid_snr_idx[1] + ] = invalid_beam_value + self.snr_beam_velocities = beam_velocities + + else: bad_snr_idx = np.greater(self.snr_rng, 12) valid = np.copy(self.cells_above_sl) - + bad_snr_array = np.tile(bad_snr_idx, (valid.shape[0], 1)) valid[bad_snr_array] = False - self.valid_data[7, :, :] = valid - # Combine all filter data and update processed properties - self.all_valid_data() - else: + self.valid_data[7, :, :] = valid + + # Combine all filter data and update processed properties + self.all_valid_data() + elif transect.adcp.manufacturer == "SonTek": + self.snr_beam_velocities = None self.valid_data[7, :, :] = np.copy(self.cells_above_sl) self.all_valid_data() - + def filter_wt_depth(self, transect, setting): - """Marks water velocity data invalid if there is no valid or interpolated average depth. + """Marks water velocity data invalid if there is no valid or + interpolated average depth. Parameters ---------- @@ -1703,16 +2120,17 @@ class WaterData(object): """ self.wt_depth_filter = setting valid = np.copy(self.cells_above_sl) - + if setting: trans_select = getattr(transect.depths, transect.depths.selected) valid[:, np.isnan(trans_select.depth_processed_m)] = False self.valid_data[8, :, :] = valid - + self.all_valid_data() - + def filter_excluded(self, transect, setting): - """Marks all data invalid that are closer to the transducer than the setting. + """Marks all data invalid that are closer to the transducer than the + setting. Parameters ---------- @@ -1728,31 +2146,36 @@ class WaterData(object): cell_size = trans_select.depth_cell_size_m draft = trans_select.draft_use_m top_cell_depth = cell_depth - 0.5 * cell_size - threshold = np.round((setting+draft), 3) + threshold = np.round((setting + draft), 3) # Apply filter exclude = np.round(top_cell_depth, 3) <= threshold valid = np.copy(self.cells_above_sl) valid[exclude] = False self.valid_data[6, :, :] = valid - + # Set threshold property self.excluded_dist_m = setting - + self.all_valid_data() - def interpolate_abba(self, transect, search_loc=['above', 'below', 'before', 'after']): - """" Interpolates all data marked invalid using the abba interpolation algorithm. + def interpolate_abba( + self, transect, search_loc=("above", "below", "before", "after") + ): + """ " Interpolates all data marked invalid using the abba interpolation + algorithm. Parameters ---------- transect: TransectData Object of TransectData + search_loc: tuple + Locations to search for abba interpolation """ # Set properties - self.interpolate_cells = 'abba' - self.interpolate_ens = 'abba' + self.interpolate_cells = "abba" + self.interpolate_ens = "abba" # Get valid data based on all filters applied valid = self.valid_data[0, :, :] @@ -1769,11 +2192,13 @@ class WaterData(object): u[np.logical_not(valid)] = np.nan v[np.logical_not(valid)] = np.nan - interpolated_data = self.compute_abba_interpolation(wt_data=self, - data_list=[u, v], - valid=valid, - transect=transect, - search_loc=search_loc) + interpolated_data = self.compute_abba_interpolation( + wt_data=self, + data_list=[u, v], + valid=valid, + transect=transect, + search_loc=search_loc, + ) if interpolated_data is not None: # Incorporate interpolated values @@ -1781,16 +2206,23 @@ class WaterData(object): u[interpolated_data[0][n][0]] = interpolated_data[0][n][1] v[interpolated_data[1][n][0]] = interpolated_data[1][n][1] - # Save interpolated data, while retaining of the ensembles including those that are not - # in the in_transect_idx array + # Save interpolated data, while retaining of the ensembles including + # those that are not in the in_transect_idx array self.u_processed_mps[:, :] = np.nan self.v_processed_mps[:, :] = np.nan self.u_processed_mps[:, transect.in_transect_idx] = u self.v_processed_mps[:, transect.in_transect_idx] = v @staticmethod - def compute_abba_interpolation(wt_data, data_list, valid, transect, search_loc=['above', 'below', 'before', 'after']): - """Computes the interpolated values for invalid cells using the abba method. + def compute_abba_interpolation( + wt_data, + data_list, + valid, + transect, + search_loc=("above", "below", "before", "after"), + ): + """Computes the interpolated values for invalid cells using the abba + method. Parameters ---------- @@ -1802,6 +2234,8 @@ class WaterData(object): Array indicating valid to be used for interpolation transect: TransectData Object of TransectData + search_loc: tuple + Locations to search for abba interpolation Returns ------- @@ -1818,33 +2252,40 @@ class WaterData(object): if not np.all(valid_cells) and np.nansum(boat_valid) > 1: # Compute distance along shiptrack to be used in interpolation - distance_along_shiptrack = transect.boat_vel.compute_boat_track(transect)['distance_m'] + distance_along_shiptrack = transect.boat_vel.compute_boat_track(transect)[ + "distance_m" + ] - # Where there is invalid boat speed at beginning or end of transect mark the distance nan to avoid + # Where there is invalid boat speed at beginning or end of + # transect mark the distance nan to avoid # interpolating velocities that won't be used for discharge if type(distance_along_shiptrack) is np.ndarray: - distance_along_shiptrack[0:np.argmax(boat_valid == True)] = np.nan - end_nan = np.argmax(np.flip(boat_valid) == True) + distance_along_shiptrack[0: np.argmax(boat_valid)] = np.nan + end_nan = np.argmax(np.flip(boat_valid)) if end_nan > 0: distance_along_shiptrack[-1 * end_nan:] = np.nan - # if type(distance_along_shiptrack) is np.ndarray: + # if type(distance_along_shiptrack) is np.ndarray: depth_selected = getattr(transect.depths, transect.depths.selected) cells_above_sl = wt_data.valid_data[6, :, :] cells_above_sl = cells_above_sl[:, transect.in_transect_idx] - # Interpolate values for invalid cells with from neighboring data - interpolated_data = abba_idw_interpolation(data_list=data_list, - valid_data=valid, - cells_above_sl=cells_above_sl, - y_centers= - depth_selected.depth_cell_depth_m[:, transect.in_transect_idx], - y_cell_size= - depth_selected.depth_cell_size_m[:, transect.in_transect_idx], - y_depth= - depth_selected.depth_processed_m[transect.in_transect_idx], - x_shiptrack=distance_along_shiptrack, - search_loc=search_loc, - normalize=True) + # Interpolate values for invalid cells with from neighboring + # data + interpolated_data = abba_idw_interpolation( + data_list=data_list, + valid_data=valid, + cells_above_sl=cells_above_sl, + y_centers=depth_selected.depth_cell_depth_m[ + :, transect.in_transect_idx + ], + y_cell_size=depth_selected.depth_cell_size_m[ + :, transect.in_transect_idx + ], + y_depth=depth_selected.depth_processed_m[transect.in_transect_idx], + x_shiptrack=distance_along_shiptrack, + search_loc=search_loc, + normalize=True, + ) return interpolated_data else: return None @@ -1852,57 +2293,58 @@ class WaterData(object): return None def interpolate_ens_next(self): - """Applies data from the next valid ensemble for ensembles with invalid water velocities. + """Applies data from the next valid ensemble for ensembles with + invalid water velocities. """ # Set interpolation property for ensembles - self.interpolate_ens = 'ExpandedT' - + self.interpolate_ens = "ExpandedT" + # Set processed data to nan for all invalid data valid = self.valid_data[0] self.u_processed_mps = np.copy(self.u_mps) self.v_processed_mps = np.copy(self.v_mps) - self.u_processed_mps[valid == False] = np.nan - self.v_processed_mps[valid == False] = np.nan - + self.u_processed_mps[np.logical_not(valid)] = np.nan + self.v_processed_mps[np.logical_not(valid)] = np.nan + # Identifying ensembles with no valid data valid_ens = np.any(valid, axis=0) n_ens = len(valid_ens) - + # Set the invalid ensembles to the data in the next valid ensemble - for n in np.arange(0, n_ens-1)[::-1]: + for n in np.arange(0, n_ens - 1)[::-1]: if not valid_ens[n]: - self.u_processed_mps[:, n] = self.u_processed_mps[:, n+1] - self.v_processed_mps[:, n] = self.v_processed_mps[:, n+1] - + self.u_processed_mps[:, n] = self.u_processed_mps[:, n + 1] + self.v_processed_mps[:, n] = self.v_processed_mps[:, n + 1] + def interpolate_ens_hold_last(self): """Interpolates velocity data for invalid ensembles by repeating the the last valid data until new valid data is found """ - - self.interpolate_ens = 'HoldLast' - + + self.interpolate_ens = "HoldLast" + valid = self.valid_data[0] - + # Initialize processed velocity data variables self.u_processed_mps = np.copy(self.u_mps) self.v_processed_mps = np.copy(self.v_mps) - + # Set invalid data to nan in processed velocity data variables - self.u_processed_mps[valid == False] = np.nan - self.v_processed_mps[valid == False] = np.nan - + self.u_processed_mps[np.logical_not(valid)] = np.nan + self.v_processed_mps[np.logical_not(valid)] = np.nan + # Determine ensembles with valid data valid_ens = np.any(valid, axis=0) - + # Process each ensemble beginning with the second ensemble n_ens = len(valid_ens) - + for n in np.arange(1, n_ens): # If ensemble is invalid fill in with previous ensemble if not valid_ens[n]: - self.u_processed_mps[:, n] = self.u_processed_mps[:, n-1] - self.v_processed_mps[:, n] = self.v_processed_mps[:, n-1] + self.u_processed_mps[:, n] = self.u_processed_mps[:, n - 1] + self.v_processed_mps[:, n] = self.v_processed_mps[:, n - 1] def interpolate_ens_hold_last_9(self): """Apply SonTek's approach to invalid data. @@ -1913,56 +2355,56 @@ class WaterData(object): ensembles beyond the 9th remain invalid. This is for compatibility with SonTek RiverSurveyor Live. """ - - self.interpolate_ens = 'Hold9' - + + self.interpolate_ens = "Hold9" + valid = self.valid_data[0] - + # Initialize processed velocity data variables self.u_processed_mps = np.copy(self.u_mps) self.v_processed_mps = np.copy(self.v_mps) - + # Set invalid data to nan in processed velocity data variables - self.u_processed_mps[valid == False] = np.nan - self.v_processed_mps[valid == False] = np.nan - + self.u_processed_mps[np.logical_not(valid)] = np.nan + self.v_processed_mps[np.logical_not(valid)] = np.nan + # Determine ensembles with valid data valid_ens = np.any(valid, axis=0) - + # Process each ensemble beginning with the second ensemble n_ens = len(valid_ens) n_invalid = 0 - + for n in np.arange(1, n_ens): # If ensemble is invalid fill in with previous ensemble - if valid_ens[n] == False and n_invalid < 10: + if not valid_ens[n] and n_invalid < 10: n_invalid += 1 - self.u_processed_mps[:, n] = self.u_processed_mps[:, n-1] - self.v_processed_mps[:, n] = self.v_processed_mps[:, n-1] + self.u_processed_mps[:, n] = self.u_processed_mps[:, n - 1] + self.v_processed_mps[:, n] = self.v_processed_mps[:, n - 1] else: n_invalid = 0 def interpolate_ens_none(self): """Applies no interpolation for invalid ensembles.""" - - self.interpolate_ens = 'None' - + + self.interpolate_ens = "None" + valid = self.valid_data[0] - + # Initialize processed velocity data variables self.u_processed_mps = np.copy(self.u_mps) self.v_processed_mps = np.copy(self.v_mps) - + # Set invalid data to nan in processed velocity data variables - self.u_processed_mps[valid == False] = np.nan - self.v_processed_mps[valid == False] = np.nan + self.u_processed_mps[np.logical_not(valid)] = np.nan + self.v_processed_mps[np.logical_not(valid)] = np.nan def interpolate_cells_none(self): """Applies no interpolation for invalid cells that are not part of an invalid ensemble.""" - self.interpolate_cells = 'None' - + self.interpolate_cells = "None" + valid = self.valid_data[0] # Determine ensembles with valid data @@ -1974,21 +2416,21 @@ class WaterData(object): # Initialize processed velocity data variables self.u_processed_mps = np.copy(self.u_mps) self.v_processed_mps = np.copy(self.v_mps) - + for n in range(n_ens): # If ensemble is invalid fill in with previous ensemble if valid_ens[n]: invalid_cells = np.logical_not(valid[:, n]) - self.u_processed_mps[invalid_cells, - n] = np.nan - self.v_processed_mps[invalid_cells, - n] = np.nan - + self.u_processed_mps[invalid_cells, n] = np.nan + self.v_processed_mps[invalid_cells, n] = np.nan + def interpolate_ens_linear(self, transect): - """Uses 2D linear interpolation to estimate values for invalid ensembles. + """Uses 2D linear interpolation to estimate values for invalid + ensembles. Use linear interpolation as computed by scipy's interpolation - function to interpolated velocity data for ensembles with no valid velocities. + function to interpolated velocity data for ensembles with no valid + velocities. Parameters ---------- @@ -1996,8 +2438,8 @@ class WaterData(object): Object of TransectData """ - self.interpolate_ens = 'Linear' - + self.interpolate_ens = "Linear" + valid = self.valid_data[0, :, :] # Initialize processed velocity data variables @@ -2006,16 +2448,17 @@ class WaterData(object): # Determine ensembles with valid data valid_ens = np.any(valid, 0) - + if np.sum(valid_ens) > 1: - # Determine the number of ensembles - # n_ens = len(valid_ens) - trans_select = getattr(transect.depths, transect.depths.selected) # Compute z - z = np.divide(np.subtract(trans_select.depth_processed_m, trans_select.depth_cell_depth_m), - trans_select.depth_processed_m) - + z = np.divide( + np.subtract( + trans_select.depth_processed_m, trans_select.depth_cell_depth_m + ), + trans_select.depth_processed_m, + ) + # Create position array boat_select = getattr(transect.boat_vel, transect.boat_vel.selected) if boat_select is not None: @@ -2024,32 +2467,43 @@ class WaterData(object): boat_vel_y = boat_select.v_processed_mps track_x = boat_vel_x * transect.date_time.ens_duration_sec track_y = boat_vel_y * transect.date_time.ens_duration_sec - track = np.nancumsum(np.sqrt(track_x**2 + track_y**2)) + track = np.nancumsum(np.sqrt(track_x ** 2 + track_y ** 2)) track_array = np.tile(track, (self.u_processed_mps.shape[0], 1)) - + # Determine index of all valid data - valid_z = np.isnan(z) == False + valid_z = np.logical_not(np.isnan(z)) valid_combined = np.logical_and(valid, valid_z) - u = interpolate.griddata(np.vstack((z[valid_combined], track_array[valid_combined])).T, - self.u_processed_mps[valid_combined], - (z, track_array)) - - v = interpolate.griddata(np.vstack((z[valid_combined], track_array[valid_combined])).T, - self.v_processed_mps[valid_combined], - (z, track_array)) + u = interpolate.griddata( + np.vstack((z[valid_combined], track_array[valid_combined])).T, + self.u_processed_mps[valid_combined], + (z, track_array), + ) + + v = interpolate.griddata( + np.vstack((z[valid_combined], track_array[valid_combined])).T, + self.v_processed_mps[valid_combined], + (z, track_array), + ) self.u_processed_mps = np.tile(np.nan, self.u_mps.shape) - self.u_processed_mps = np.tile(np.nan, self.u_mps.shape) - processed_valid_cells = self.estimate_processed_valid_cells(transect) - self.u_processed_mps[processed_valid_cells] = u[processed_valid_cells] - self.v_processed_mps[processed_valid_cells] = v[processed_valid_cells] + self.v_processed_mps = np.tile(np.nan, self.u_mps.shape) + processed_valid_cells = self.estimate_processed_valid_cells( + transect + ) + self.u_processed_mps[processed_valid_cells] = u[ + processed_valid_cells + ] + self.v_processed_mps[processed_valid_cells] = v[ + processed_valid_cells + ] def interpolate_cells_linear(self, transect): """Uses 2D linear interpolation to estimate values for invalid cells. Use linear interpolation as computed by scipy's interpolation - function to interpolated velocity data for cells with no valid velocities. + function to interpolated velocity data for cells with no valid + velocities. Parameters ---------- @@ -2057,7 +2511,7 @@ class WaterData(object): Object of TransectData """ - self.interpolate_ens = 'Linear' + self.interpolate_ens = "Linear" valid = self.valid_data[0, :, :] @@ -2068,8 +2522,12 @@ class WaterData(object): trans_select = getattr(transect.depths, transect.depths.selected) # Compute z - z = np.divide(np.subtract(trans_select.depth_processed_m, trans_select.depth_cell_depth_m), - trans_select.depth_processed_m) + z = np.divide( + np.subtract( + trans_select.depth_processed_m, trans_select.depth_cell_depth_m + ), + trans_select.depth_processed_m, + ) # Create position array boat_select = getattr(transect.boat_vel, transect.boat_vel.selected) @@ -2083,18 +2541,24 @@ class WaterData(object): track_array = np.tile(track, (self.u_processed_mps.shape[0], 1)) # Determine index of all valid data - valid_z = np.isnan(z) == False + valid_z = np.logical_not(np.isnan(z)) valid_combined = np.logical_and(valid, valid_z) - u = interpolate.griddata(np.array([z[valid_combined].ravel(), - track_array[valid_combined].ravel()]).T, - self.u_processed_mps[valid_combined].ravel(), - (z, track_array)) - - v = interpolate.griddata(np.array([z[valid_combined].ravel(), - track_array[valid_combined].ravel()]).T, - self.v_processed_mps[valid_combined].ravel(), - (z, track_array)) + u = interpolate.griddata( + np.array( + [z[valid_combined].ravel(), track_array[valid_combined].ravel()] + ).T, + self.u_processed_mps[valid_combined].ravel(), + (z, track_array), + ) + + v = interpolate.griddata( + np.array( + [z[valid_combined].ravel(), track_array[valid_combined].ravel()] + ).T, + self.v_processed_mps[valid_combined].ravel(), + (z, track_array), + ) self.u_processed_mps = np.tile(np.nan, self.u_mps.shape) self.u_processed_mps = np.tile(np.nan, self.u_mps.shape) @@ -2103,7 +2567,8 @@ class WaterData(object): self.v_processed_mps[processed_valid_cells] = v[processed_valid_cells] def interpolate_cells_trdi(self, transect): - """Interpolates values for invalid cells using methods similar to WinRiver II. + """Interpolates values for invalid cells using methods similar to + WinRiver II. This function computes the velocity for the invalid cells using the methods in WinRiver II, but applied to velocity components. @@ -2111,7 +2576,8 @@ class WaterData(object): more correct, mathematically applying to discharge or velocity components is identical. By applying to velocity components the user can see the velocity data interpolated. - Power fit uses the power fit equation and no slip uses linear interpolation. + Power fit uses the power fit equation and no slip uses linear + interpolation. Parameters ---------- @@ -2120,7 +2586,7 @@ class WaterData(object): """ # Set property - self.interpolate_cells = 'TRDI' + self.interpolate_cells = "TRDI" # Construct variables depths = getattr(transect.depths, transect.depths.selected) @@ -2138,41 +2604,59 @@ class WaterData(object): for n in range(n_ens): # Identify first and last valid depth cell - idx = np.where(valid[:, n] == True)[0] + idx = np.where(valid[:, n])[0] if len(idx) > 0: idx_first = idx[0] idx_last = idx[-1] - idx_middle = np.where(valid[idx_first:idx_last + 1, n] == False)[0] + idx_middle = np.where( + np.logical_not(valid[idx_first: idx_last + 1, n]) + )[0] - # For invalid middle depth cells perform interpolation based on bottom method + # For invalid middle depth cells perform interpolation based + # on bottom method if len(idx_middle) > 0: idx_middle = idx_middle + idx_first z_adj[idx_middle, n] = z_all[idx_middle, n] # Interpolate velocities using power fit - if bot_method == 'Power': + if bot_method == "Power": # Compute interpolated u-velocities z2 = z[:, n] - (0.5 * cell_size[:, n]) z2[nan_less(z2, 0)] = np.nan - coef = ((exponent + 1) * np.nansum(self.u_processed_mps[:, n] * cell_size[:, n], 0)) / \ - np.nansum(((z[:, n] + 0.5 * cell_size[:, n]) ** (exponent + 1)) - (z2 ** (exponent + 1)), 0) + coef = ( + (exponent + 1) + * np.nansum(self.u_processed_mps[:, n] * cell_size[:, n], 0) + ) / np.nansum( + ((z[:, n] + 0.5 * cell_size[:, n]) ** (exponent + 1)) + - (z2 ** (exponent + 1)), + 0, + ) temp = coef * z_adj[:, n] ** exponent self.u_processed_mps[idx_middle, n] = temp[idx_middle] # Compute interpolated v-Velocities - coef = ((exponent + 1) * np.nansum(self.v_processed_mps[:, n] * cell_size[:, n])) / \ - np.nansum(((z[:, n] + 0.5 * cell_size[:, n]) ** (exponent + 1)) - (z2 ** (exponent + 1))) + coef = ( + (exponent + 1) + * np.nansum(self.v_processed_mps[:, n] * cell_size[:, n]) + ) / np.nansum( + ((z[:, n] + 0.5 * cell_size[:, n]) ** (exponent + 1)) + - (z2 ** (exponent + 1)) + ) temp = coef * z_adj[:, n] ** exponent self.v_processed_mps[idx_middle, n] = temp[idx_middle] # Interpolate velocities using linear interpolation - elif bot_method == 'No Slip': - self.u_processed_mps[idx_middle, n] = np.interp(x=cell_depth[idx_middle, n], - xp=cell_depth[valid[:, n], n], - fp=self.u_processed_mps[valid[:, n], n]) - self.v_processed_mps[idx_middle, n] = np.interp(x=cell_depth[idx_middle, n], - xp=cell_depth[valid[:, n], n], - fp=self.v_processed_mps[valid[:, n], n]) + elif bot_method == "No Slip": + self.u_processed_mps[idx_middle, n] = np.interp( + x=cell_depth[idx_middle, n], + xp=cell_depth[valid[:, n], n], + fp=self.u_processed_mps[valid[:, n], n], + ) + self.v_processed_mps[idx_middle, n] = np.interp( + x=cell_depth[idx_middle, n], + xp=cell_depth[valid[:, n], n], + fp=self.v_processed_mps[valid[:, n], n], + ) def estimate_processed_valid_cells(self, transect): """Estimate the number of valid cells for invalid ensembles. @@ -2196,11 +2680,11 @@ class WaterData(object): for n in range(n_invalid): # Find nearest valid ensembles on either side of invalid ensemble - idx1 = np.where(valid_data_sum[:invalid_ens_idx[n]] > 0)[0] + idx1 = np.where(valid_data_sum[: invalid_ens_idx[n]] > 0)[0] if len(idx1) > 0: idx1 = idx1[-1] # Find the last cell in the neighboring valid ensembles - idx1_cell = np.where(processed_valid_cells[:, idx1] == True)[0][-1] + idx1_cell = np.where(processed_valid_cells[:, idx1])[0][-1] # Determine valid cells for invalid ensemble idx1_cell_depth = depth_cell_depth[idx1_cell, idx1] else: @@ -2211,14 +2695,16 @@ class WaterData(object): idx2 = idx2[0] idx2 = invalid_ens_idx[n] + idx2 # Find the last cell in the neighboring valid ensembles - idx2_cell = np.where(processed_valid_cells[:, idx2] == True)[0][-1] + idx2_cell = np.where(processed_valid_cells[:, idx2])[0][-1] # Determine valid cells for invalid ensemble idx2_cell_depth = depth_cell_depth[idx2_cell, idx2] else: idx2_cell_depth = 0 cutoff = np.nanmax([idx1_cell_depth, idx2_cell_depth]) - processed_valid_cells[depth_cell_depth[:, invalid_ens_idx[n]] < cutoff, invalid_ens_idx[n]] = True + processed_valid_cells[ + depth_cell_depth[:, invalid_ens_idx[n]] < cutoff, invalid_ens_idx[n] + ] = True # Apply excluded distance processed_valid_cells = processed_valid_cells * self.valid_data[6, :, :] @@ -2229,7 +2715,7 @@ class WaterData(object): """Computes the range between the average snr for all beams. The average is computed using only data above the side lobe cutoff. """ - if self.rssi_units == 'SNR': + if self.rssi_units == "SNR": cells_above_sl = np.copy(self.cells_above_sl.astype(float)) cells_above_sl[cells_above_sl < 0.5] = np.nan snr_adjusted = self.rssi * cells_above_sl @@ -2237,15 +2723,15 @@ class WaterData(object): self.snr_rng = np.nanmax(snr_average, 0) - np.nanmin(snr_average, 0) def automated_beam_filter_old(self): - """Older version of automatic beam filter. Not currently used. - """ + """Older version of automatic beam filter. Not currently used.""" - # Create array indicating which cells do not have 4-beam solutions and all cells below side lobe are nan + # Create array indicating which cells do not have 4-beam solutions + # and all cells below side lobe are nan temp = copy.deepcopy(self) temp.filter_beam(4) valid_bool = temp.valid_data[5, :, :] valid = valid_bool.astype(float) - valid[temp.cells_above_sl == False] = np.nan + valid[np.logical_not(temp.cells_above_sl)] = np.nan # Find cells with 3 beams solutions rows_3b, cols_3b = np.where(np.abs(valid) == 0) @@ -2257,24 +2743,36 @@ class WaterData(object): valid_v = temp.v_mps[valid == 1] # Use interpolate water velocity of cells with 3 beam solutions - # The following code duplicates Matlab scatteredInterpolant which seems to only estimate along columns - # as long as there is data in the ensemble above and below the value being estimated. + # The following code duplicates Matlab scatteredInterpolant which + # seems to only estimate along columns as long as there is data in + # the ensemble above and below the value being estimated. row_numbers = np.linspace(0, valid.shape[0] - 1, valid.shape[0]) n = 0 for col in cols_3b: - # If the cell has valid data above and below it linearly interpolate using data in that ensemble. + # If the cell has valid data above and below it linearly + # interpolate using data in that ensemble. # If not, use other means of interpolation. - if np.any(valid_bool[rows_3b[n] + 1::, col]) and np.any(valid_bool[0:rows_3b[n], col]): - est_u = np.interp(x=rows_3b[n], - xp=row_numbers[valid_bool[:, col]], - fp=temp.u_mps[valid_bool[:, col], col]) - - est_v = np.interp(x=rows_3b[n], - xp=row_numbers[valid_bool[:, col]], - fp=temp.v_mps[valid_bool[:, col], col]) + if np.any(valid_bool[rows_3b[n] + 1::, col]) and np.any( + valid_bool[0: rows_3b[n], col] + ): + est_u = np.interp( + x=rows_3b[n], + xp=row_numbers[valid_bool[:, col]], + fp=temp.u_mps[valid_bool[:, col], col], + ) + + est_v = np.interp( + x=rows_3b[n], + xp=row_numbers[valid_bool[:, col]], + fp=temp.v_mps[valid_bool[:, col], col], + ) else: - est_u = interpolate.griddata(np.array((valid_rows, valid_cols)).T, valid_u, (col, rows_3b[n])) - est_v = interpolate.griddata(np.array((valid_cols, valid_rows)).T, valid_v, (col, rows_3b[n])) + est_u = interpolate.griddata( + np.array((valid_rows, valid_cols)).T, valid_u, (col, rows_3b[n]) + ) + est_v = interpolate.griddata( + np.array((valid_cols, valid_rows)).T, valid_v, (col, rows_3b[n]) + ) u_ratio = (temp.u_mps[rows_3b[n], col] / est_u) - 1 v_ratio = (temp.v_mps[rows_3b[n], col] / est_v) - 1 @@ -2291,79 +2789,9 @@ class WaterData(object): self.all_valid_data() -# Code from Aurelien - def interpolate_cells_above(self, transect): - """Interpolates values for invalid cells using below valid cell - Written by Aurelien Despax - Modified by dsm - - Parameters - ---------- - transect: TransectData - Object of TransectData - """ - - # Set property - self.interpolate_cells = 'Above' - - # Construct variables - - valid = self.valid_data[0] - n_cells, n_ens = self.u_processed_mps.shape - - for n in range(n_ens): - - # Identify first and last valid depth cell - idx = np.where(valid[:, n] == True)[0] - if len(idx) > 0: - idx_first = idx[0] - idx_last = idx[-1] - idx_middle = np.where(valid[idx_first:idx_last + 1, n] == False)[0] - - # For invalid middle depth cells assign value of shallower valid depth cell - # TODO this assigns the value of the shallowest depth cell not the next valid depth cell - if len(idx_middle) > 0: - idx_middle = idx_middle + idx_first - self.u_processed_mps[idx_middle, n] = self.u_processed_mps[idx_first, n] - self.v_processed_mps[idx_middle, n] = self.v_processed_mps[idx_first, n] - - def interpolate_cells_below(self, transect): - """Interpolates values for invalid cells using above valid cell - Written by Aurelien Despax - Modified by dsm - - Parameters - ---------- - transect: TransectData - Object of TransectData - """ - - # Set property - self.interpolate_cells = 'Below' - - # Construct variables - valid = self.valid_data[0] - n_cells, n_ens = self.u_processed_mps.shape - - for n in range(n_ens): - - # Identify first and last valid depth cell - idx = np.where(valid[:, n] == True)[0] - if len(idx) > 0: - idx_first = idx[0] - idx_last = idx[-1] - idx_middle = np.where(valid[idx_first:idx_last + 1, n] == False)[0] - - # For invalid middle depth cells assign the value of the next deeper valid depth cells - # TODO this assigns the value of the shallowest depth cell not the next valid depth cell - if len(idx_middle) > 0: - idx_middle = idx_middle + idx_first - self.u_processed_mps[idx_middle, n] = self.u_processed_mps[idx_last, n] - self.v_processed_mps[idx_middle, n] = self.v_processed_mps[idx_last, n] - - def interpolate_cells_before(self, transect): - """Interpolates values for invalid cells using above valid cell - Written by Aurelien Despax + def interpolate_cells_froude(self, transect): + """Uses Froude interpolation to estimate values for invalid + cells. Parameters ---------- @@ -2371,75 +2799,113 @@ class WaterData(object): Object of TransectData """ - # Set property - self.interpolate_cells = 'Before' + self.interpolate_ens = "Froude" # Construct variables - depths = getattr(transect.depths, transect.depths.selected) - valid = self.valid_data[0] - cell_depth = depths.depth_cell_depth_m - z_all = np.subtract(depths.depth_processed_m, cell_depth) - z = np.copy(z_all) - z[np.isnan(self.u_processed_mps)] = np.nan - z_adj = np.tile(np.nan, z.shape) - n_cells, n_ens = self.u_processed_mps.shape - - for n in range(n_ens): + g = 9.80665 - # Identify first and last valid depth cell - idx = np.where(valid[:, n] == True)[0] - if len(idx) > 0: - idx_first = idx[0] - idx_last = idx[-1] - idx_middle = np.where(valid[idx_first:idx_last + 1, n] == False)[0] - - # For invalid middle depth cells perform interpolation based on bottom method - if len(idx_middle) > 0: - idx_middle = idx_middle + idx_first - z_adj[idx_middle, n] = z_all[idx_middle, n] - - # Interpolate velocities using linear interpolation - self.u_processed_mps[idx_middle, n] = self.u_processed_mps[idx_middle, n - 1] - self.v_processed_mps[idx_middle, n] = self.v_processed_mps[idx_middle, n - 1] - - def interpolate_cells_after(self, transect): - """Interpolates values for invalid cells using above valid cell - Written by Aurelien Despax - - Parameters - ---------- - transect: TransectData - Object of TransectData - """ + valid = self.valid_data[0, :, :] - # Set property - self.interpolate_cells = 'After' + # Initialize processed velocity data variables + u = np.copy(self.u_mps) + v = np.copy(self.v_mps) + + # Compute mean velocity components in each ensemble + w_vel_mean_u = np.nanmean(u, 0) + w_vel_mean_v = np.nanmean(v, 0) + + # Compute a unit vector + direction, _ = cart2pol(w_vel_mean_u, w_vel_mean_v) + unit_vec_1, unit_vec_2 = pol2cart(direction, 1) + unit_vec = np.vstack([unit_vec_1, unit_vec_2]) + + # Compute the velocity magnitude in the direction of the mean velocity of each + # ensemble using the dot product and unit vector + w_vel_prim = np.tile([np.nan], u.shape) + w_vel_sec = np.tile([np.nan], u.shape) + for i in range(u.shape[0]): + w_vel_prim[i, :] = np.sum( + np.vstack([u[i, :], v[i, :]]) * unit_vec, 0 + ) + w_vel_sec[i, :] = ( + unit_vec_2 * u[i, :] - unit_vec_1 * v[i, :] + ) + + w_vel_mean_prim = np.nanmean(w_vel_prim, axis=0) - # Construct variables - depths = getattr(transect.depths, transect.depths.selected) - valid = self.valid_data[0] - cell_depth = depths.depth_cell_depth_m - z_all = np.subtract(depths.depth_processed_m, cell_depth) - z = np.copy(z_all) - z[np.isnan(self.u_processed_mps)] = np.nan - z_adj = np.tile(np.nan, z.shape) - n_cells, n_ens = self.u_processed_mps.shape + # Determine ensembles with valid data + valid_ens = np.any(valid, 0) - for n in list(reversed(list(range(n_ens)))): + if np.sum(valid_ens) > 1: + trans_select = getattr(transect.depths, transect.depths.selected) + # Compute z + z = np.divide( + np.subtract( + trans_select.depth_processed_m, trans_select.depth_cell_depth_m + ), + trans_select.depth_processed_m, + ) - # Identify first and last valid depth cell - idx = np.where(valid[:, n] == True)[0] - if len(idx) > 0: - idx_first = idx[0] - idx_last = idx[-1] - idx_middle = np.where(valid[idx_first:idx_last + 1, n] == False)[0] + # Create position array + boat_select = getattr(transect.boat_vel, transect.boat_vel.selected) + if boat_select is not None: + if np.nansum(boat_select.valid_data[0]) > 0: + boat_vel_x = boat_select.u_processed_mps + boat_vel_y = boat_select.v_processed_mps + track_x = boat_vel_x * transect.date_time.ens_duration_sec + track_y = boat_vel_y * transect.date_time.ens_duration_sec + track = np.nancumsum(np.sqrt(track_x ** 2 + track_y ** 2)) + track_array = np.tile(track, (self.u_processed_mps.shape[0], 1)) - # For invalid middle depth cells perform interpolation based on bottom method - if len(idx_middle) > 0: - idx_middle = idx_middle + idx_first - z_adj[idx_middle, n] = z_all[idx_middle, n] + # Determine index of all valid data + valid_z = np.logical_not(np.isnan(z)) + valid_combined = np.logical_and(valid, valid_z) - # Interpolate velocities using linear interpolation - if (n_ens > (n + 1)): - self.u_processed_mps[idx_middle, n] = self.u_processed_mps[idx_middle, n + 1] - self.v_processed_mps[idx_middle, n] = self.v_processed_mps[idx_middle, n + 1] \ No newline at end of file + # Compute Froude + depth_transect = trans_select.depth_processed_m + froude = np.divide(w_vel_mean_prim, + np.sqrt(np.multiply(g, depth_transect))) + froude[np.logical_not(valid_ens)] = np.nan + + # Interpolate Froude values + interp_froude = interpolate.griddata(track[valid_ens], + froude[valid_ens], + track) + + # Compute mean primary velocity + interp_mean_ens_prim = np.multiply( + interp_froude, + np.sqrt(np.multiply(g, depth_transect)) + ) + # TODO find a better way to extand Froude to velocity + interp_w_vel_prim = np.tile(interp_mean_ens_prim, + (self.u_processed_mps.shape[0], 1)) + + # Linear interpolation for secondary velocity + interp_w_vel_sec = interpolate.griddata( + np.vstack((z[valid_combined], track_array[valid_combined])).T, + w_vel_sec[valid_combined], + (z, track_array), + ) + + # Define interpolated cells + processed_valid_cells = self.estimate_processed_valid_cells( + transect + ) + interp_cells = np.logical_xor(processed_valid_cells, valid_combined) + + # Change to interpolate values + w_vel_prim[interp_cells] = interp_w_vel_prim[interp_cells] + w_vel_sec[interp_cells] = interp_w_vel_sec[interp_cells] + + # Initialize processed velocity data variables + self.u_processed_mps = np.copy(self.u_mps) + self.v_processed_mps = np.copy(self.v_mps) + + for i in range(u.shape[0]): + self.u_processed_mps[i, :] = np.sum( + np.vstack([w_vel_prim[i, :], w_vel_sec[i, :]]) * unit_vec, 0 + ) + self.v_processed_mps[i, :] = ( + unit_vec_2 * w_vel_prim[i, :] - unit_vec_1 * w_vel_sec[i, :] + ) diff --git a/Classes/stickysettings.py b/Classes/stickysettings.py index 8900467..6888ac0 100644 --- a/Classes/stickysettings.py +++ b/Classes/stickysettings.py @@ -3,14 +3,17 @@ import json class StickySettings(object): - """Provides methods to quickly store and retrieve settings to and from disk. + """Provides methods to quickly store and retrieve settings to and from + disk. - This class is intended to be used to store simple settings that need to be retained between session of the subject - application, such as, last folder opened, or units setting. Any setting that the application needs to know + This class is intended to be used to store simple settings that need to be + retained between session of the subject application, such as, last folder + opened, or units setting. Any setting that the application needs to know when it is run again can be stored using the methods in this class. - Data are stored a dictionary which is then written to a json file having the filename provided by the user and - stored in the folder defined by the APPDATA environment variable. + Data are stored a dictionary which is then written to a json file having + the filename provided by the user and stored in the folder defined by the + APPDATA environment variable. Note ---- @@ -28,32 +31,35 @@ class StickySettings(object): def __init__(self, arg): """Constructor method which establishes the json file. - If the filename (arg) provided by the user cannot be found a new file is created. If the filename (arg) - provided by the user is found the file is opened and all keys and values are read and stored in settings for - quick modification by the calling application. + If the filename (arg) provided by the user cannont be found a new file + is created. If the filename (arg) provided by the user is found + the file is opened and all keys and values are read and stored + in settings for quick modification by the calling application. Parameters ---------- arg : str - User supplied filename excluding the suffix. Example 'myFile' but not 'myFile.json' + User supplied filename excluding the suffix. Example 'myFile' but + not 'myFile.json' """ # Construct filename from user input. - self.settings_file = os.path.join(os.getenv('APPDATA'), arg + '.json') + self.settings_file = os.path.join(os.getenv("APPDATA"), arg + ".json") if os.path.isfile(self.settings_file): # Read json into dictionary - with open(self.settings_file, 'r') as f: + with open(self.settings_file, "r") as f: self.settings = json.load(f) else: # Create json file with default dictionary self.settings = {} - with open(self.settings_file, 'w') as f: + with open(self.settings_file, "w") as f: json.dump(self.settings, f) def new(self, key, value): """Create new key value pair in settings. - Method checks to see if key exists. If it exists an error is raised. If the key does not exist it is created. + Method checks to see if key exists. If it exists an error is raised. + If the key does not exist it is created. Paramenters ----------- @@ -69,16 +75,17 @@ class StickySettings(object): """ if key in self.settings: - raise KeyError('Key already exists in settings') + raise KeyError("Key already exists in settings") else: self.settings[key] = value - with open(self.settings_file, 'w') as f: + with open(self.settings_file, "w") as f: json.dump(self.settings, f) def set(self, key, value): """Set value of existing key. - Method checks to ensure the key exists and sets the value of the key to value. If the key does not exist an + Method checks to ensure the key exists and sets the value of the key + to value. If the key does not exist an error is raised. Parameters @@ -95,10 +102,10 @@ class StickySettings(object): """ if key in self.settings: self.settings[key] = value - with open(self.settings_file, 'w') as f: + with open(self.settings_file, "w") as f: json.dump(self.settings, f) else: - raise KeyError('Key does not exist in settings') + raise KeyError("Key does not exist in settings") def get(self, item): """Get value of item for settings. @@ -117,7 +124,7 @@ class StickySettings(object): """ - with open(self.settings_file, 'r') as f: + with open(self.settings_file, "r") as f: self.settings = json.load(f) return self.settings[item] diff --git a/Classes/test_stickysettings.py b/Classes/test_stickysettings.py index 2c3b6ec..ff6c1ec 100644 --- a/Classes/test_stickysettings.py +++ b/Classes/test_stickysettings.py @@ -3,16 +3,18 @@ import os import sys from Classes.stickysettings import StickySettings as ss +# Todo Move this to the test package... + def create_filename(): """Create filename to use in testing""" file_exists = True n = 0 - testfile = '' + testfile = "" while file_exists: n += 1 - testfile = os.path.join(os.getenv('APPDATA'), 'xyz123') - if os.path.join(os.getenv('APPDATA'), 'xyz123'): + testfile = os.path.join(os.getenv("APPDATA"), "xyz123") + if os.path.join(os.getenv("APPDATA"), "xyz123"): testfile = testfile + str(n) else: file_exists = False @@ -25,44 +27,44 @@ def test_file_creation(): """Test initialization of StickySettings and file creation""" testfile = create_filename() _ = ss(testfile) - assert os.path.isfile(testfile + '.json') - os.remove(testfile + '.json') + assert os.path.isfile(testfile + ".json") + os.remove(testfile + ".json") def test_store_value(): """Test creating a file and key value pair and getting the value""" testfile = create_filename() test_user = ss(testfile) - test_user.new('test', True) - assert test_user.get('test') - os.remove(testfile + '.json') + test_user.new("test", True) + assert test_user.get("test") + os.remove(testfile + ".json") def test_set_value(): """Test setting a value of an existing key""" testfile = create_filename() test_user = ss(testfile) - test_user.new('test', False) - test_user.set('test', True) - assert test_user.get('test') - os.remove(testfile + '.json') + test_user.new("test", False) + test_user.set("test", True) + assert test_user.get("test") + os.remove(testfile + ".json") def test_set_value_failure(): """Test failure when setting a value for a key that does not exist""" testfile = create_filename() test_user = ss(testfile) - test_user.new('test', True) + test_user.new("test", True) with pytest.raises(KeyError): - test_user.set('Folder', 'AnyFolder') - os.remove(testfile + '.json') + test_user.set("Folder", "AnyFolder") + os.remove(testfile + ".json") def test_get_value_failure(): """Test failure when requesting a value for a key that does not exist""" testfile = create_filename() test_user = ss(testfile) - test_user.new('test', True) + test_user.new("test", True) with pytest.raises(KeyError): - test_user.get('Folder') - os.remove(testfile + '.json') + test_user.get("Folder") + os.remove(testfile + ".json") diff --git a/DischargeFunctions/bottom_discharge_extrapolation.py b/DischargeFunctions/bottom_discharge_extrapolation.py index 71e5a5b..d1dca08 100644 --- a/DischargeFunctions/bottom_discharge_extrapolation.py +++ b/DischargeFunctions/bottom_discharge_extrapolation.py @@ -1,6 +1,6 @@ """bottom_discharge_extrapolation -Computes the extrapolated discharge in the bottom unmeasured portion of an ADCP transect. Methods are consistent with -equations used by TRDI and SonTek. +Computes the extrapolated discharge in the bottom unmeasured portion of an ADCP transect. +Methods are consistent with equations used by TRDI and SonTek. Example ------- @@ -9,10 +9,15 @@ from DischargeFunctions.bottom_discharge_extrapolation import trans_select = getattr(data_in.depths, data_in.depths.selected) num_top_method = {'Power': 0, 'Constant': 1, '3-Point': 2, None: -1} - self.top_ens = extrapolate_top(x_prod, data_in.w_vel.valid_data[0, :, :], + self.top_ens = extrapolate_top(x_prod, + data_in.w_vel.valid_data[0, :, :], num_top_method[data_in.extrap.top_method], - data_in.extrap.exponent, data_in.in_transect_idx, trans_select.depth_cell_size_m, - trans_select.depth_cell_depth_m, trans_select.depth_processed_m, delta_t, + data_in.extrap.exponent, + data_in.in_transect_idx, + trans_select.depth_cell_size_m, + trans_select.depth_cell_depth_m, + trans_select.depth_processed_m, + delta_t, num_top_method[top_method], exponent) """ @@ -20,24 +25,29 @@ import numpy as np from numba.pycc import CC from numba import njit -cc = CC('bottom_discharge_extrapolation') +cc = CC("bottom_discharge_extrapolation") # Bottom Discharge Extrapolation with Numba # ========================================= -@cc.export('extrapolate_bot', 'f8[:](f8[:, :], b1[:, :], i8, f8, i4[:], f8[:, :], f8[:, :], f8[:], f8[:], ' - 'optional(i8), optional(f8))') -def extrapolate_bot(xprod, - w_valid_data, - transect_bot_method, - transect_exponent, - in_transect_idx, - depth_cell_size_m, - depth_cell_depth_m, - depth_processed_m, - delta_t, - bot_method=-1, - exponent=0.1667): +@cc.export( + "extrapolate_bot", + "f8[:](f8[:, :], b1[:, :], i8, f8, i4[:], f8[:, :], f8[:, :], f8[:], f8[:], " + "optional(i8), optional(f8))", +) +def extrapolate_bot( + xprod, + w_valid_data, + transect_bot_method, + transect_exponent, + in_transect_idx, + depth_cell_size_m, + depth_cell_depth_m, + depth_processed_m, + delta_t, + bot_method=-1, + exponent=0.1667, +): """Computes the extrapolated bottom discharge Parameters @@ -100,16 +110,37 @@ def extrapolate_bot(xprod, cell_depth[row, col] = np.nan # Compute bottom discharge - q_bot = discharge_bot(bot_method, exponent, bot_rng, xprod, - cell_size, cell_depth, depth_ens, delta_t, z) + q_bot = discharge_bot( + bot_method, + exponent, + bot_rng, + xprod, + cell_size, + cell_depth, + depth_ens, + delta_t, + z, + ) return q_bot @njit -@cc.export('discharge_top', 'f8[:](i8, f8, f8[:], f8[:, :], f8[:, :], f8[:, :], f8[:], f8[:], f8[:, :])') -def discharge_bot(bot_method, exponent, bot_rng, component, - cell_size, cell_depth, depth_ens, delta_t, z): +@cc.export( + "discharge_top", + "f8[:](i8, f8, f8[:], f8[:, :], f8[:, :], f8[:, :], f8[:], f8[:], f8[:, :])", +) +def discharge_bot( + bot_method, + exponent, + bot_rng, + component, + cell_size, + cell_depth, + depth_ens, + delta_t, + z, +): """Computes the bottom extrapolated value of the provided component. Parameters @@ -136,7 +167,8 @@ def discharge_bot(bot_method, exponent, bot_rng, component, Returns ------- bot_value: np.array(float) - Total for the specified component integrated over the bottom range for each ensemble + Total for the specified component integrated over the bottom range + for each ensemble """ # Initialize @@ -164,8 +196,9 @@ def discharge_bot(bot_method, exponent, bot_rng, component, numerator = numerator + numerator_temp # Compute the denominator - denominator_temp = ((z[row, col] + 0.5 * cell_size[row, col]) ** (exponent + 1)) \ - - ((z[row, col] - 0.5 * cell_size[row, col]) ** (exponent + 1)) + denominator_temp = ( + (z[row, col] + 0.5 * cell_size[row, col]) ** (exponent + 1) + ) - ((z[row, col] - 0.5 * cell_size[row, col]) ** (exponent + 1)) if np.logical_not(np.isnan(denominator_temp)) and denominator_temp != 0: denominator_valid = True denominator = denominator + denominator_temp @@ -216,9 +249,17 @@ def discharge_bot(bot_method, exponent, bot_rng, component, numerator = numerator + numerator_temp # If numerator computed, compute denominator - denominator_temp = ((z[row, col] + 0.5 * cell_size[row, col]) ** (exponent + 1)) \ - - ((z[row, col] - 0.5 * cell_size[row, col]) ** (exponent + 1)) - if np.logical_not(np.isnan(denominator_temp)) and denominator_temp != 0: + denominator_temp = ( + (z[row, col] + 0.5 * cell_size[row, col]) + ** (exponent + 1) + ) - ( + (z[row, col] - 0.5 * cell_size[row, col]) + ** (exponent + 1) + ) + if ( + np.logical_not(np.isnan(denominator_temp)) + and denominator_temp != 0 + ): denominator_valid = True denominator = denominator + denominator_temp @@ -232,9 +273,13 @@ def discharge_bot(bot_method, exponent, bot_rng, component, numerator = numerator + numerator_temp # If numerator computed, compute denominator - denominator_temp = ((last_z + 0.5 * last_cell_size) ** (exponent + 1)) \ - - ((last_z - 0.5 * last_cell_size) ** (exponent + 1)) - if np.logical_not(np.isnan(denominator_temp)) and denominator_temp != 0: + denominator_temp = ( + (last_z + 0.5 * last_cell_size) ** (exponent + 1) + ) - ((last_z - 0.5 * last_cell_size) ** (exponent + 1)) + if ( + np.logical_not(np.isnan(denominator_temp)) + and denominator_temp != 0 + ): denominator_valid = True denominator = denominator + denominator_temp @@ -243,16 +288,16 @@ def discharge_bot(bot_method, exponent, bot_rng, component, coef[col] = (numerator * (1 + exponent)) / denominator # Compute the bottom discharge of each profile - bot_value = delta_t * (coef / (exponent + 1)) * (bot_rng**(exponent + 1)) + bot_value = delta_t * (coef / (exponent + 1)) * (bot_rng ** (exponent + 1)) return bot_value @njit -@cc.export('top_variables', 'f8[:](f8[:, :], b1[:, :], f8[:, :], f8[:, :], f8[:])') +@cc.export("top_variables", "f8[:](f8[:, :], b1[:, :], f8[:, :], f8[:, :], f8[:])") def bot_variables(x_prod, w_valid_data, cell_size, cell_depth, depth_ens): - """Computes the index to the bottom most valid cell in each ensemble and the range from - the bottom to the bottom of the bottom most cell. + """Computes the index to the bottom most valid cell in each ensemble + and the range from the bottom to the bottom of the bottom most cell. Parameters ---------- @@ -292,13 +337,15 @@ def bot_variables(x_prod, w_valid_data, cell_size, cell_depth, depth_ens): if len(idx_temp) > 0: idx_bot = idx_temp[-1] # Compute bottom range - bot_rng[n] = depth_ens[n] - cell_depth[idx_bot, n] - 0.5 * cell_size[idx_bot, n] + bot_rng[n] = ( + depth_ens[n] - cell_depth[idx_bot, n] - 0.5 * cell_size[idx_bot, n] + ) else: bot_rng[n] = 0 return bot_rng -if __name__ == '__main__': +if __name__ == "__main__": # Used to compile code - cc.compile() \ No newline at end of file + cc.compile() diff --git a/DischargeFunctions/top_discharge_extrapolation.py b/DischargeFunctions/top_discharge_extrapolation.py index e6b19ad..e494fad 100644 --- a/DischargeFunctions/top_discharge_extrapolation.py +++ b/DischargeFunctions/top_discharge_extrapolation.py @@ -9,10 +9,15 @@ from DischargeFunctions.top_discharge_extrapolation import trans_select = getattr(data_in.depths, data_in.depths.selected) num_top_method = {'Power': 0, 'Constant': 1, '3-Point': 2, None: -1} - self.top_ens = extrapolate_top(x_prod, data_in.w_vel.valid_data[0, :, :], + self.top_ens = extrapolate_top(x_prod, + data_in.w_vel.valid_data[0, :, :], num_top_method[data_in.extrap.top_method], - data_in.extrap.exponent, data_in.in_transect_idx, trans_select.depth_cell_size_m, - trans_select.depth_cell_depth_m, trans_select.depth_processed_m, delta_t, + data_in.extrap.exponent, + data_in.in_transect_idx, + trans_select.depth_cell_size_m, + trans_select.depth_cell_depth_m, + trans_select.depth_processed_m, + delta_t, num_top_method[top_method], exponent) """ @@ -20,24 +25,29 @@ import numpy as np from numba.pycc import CC from numba import njit -cc = CC('top_discharge_extrapolation') +cc = CC("top_discharge_extrapolation") # Top Discharge Extrapolation with Numba # ====================================== -@cc.export('extrapolate_top', 'f8[:](f8[:, :], b1[:, :], i8, f8, i4[:], f8[:, :], f8[:, :], f8[:], f8[:], ' - 'optional(i8), optional(f8))') -def extrapolate_top(xprod, - w_valid_data, - transect_top_method, - transect_exponent, - in_transect_idx, - depth_cell_size_m, - depth_cell_depth_m, - depth_processed_m, - delta_t, - top_method=-1, - exponent=0.1667): +@cc.export( + "extrapolate_top", + "f8[:](f8[:, :], b1[:, :], i8, f8, i4[:], f8[:, :], f8[:, :], f8[:], f8[:], " + "optional(i8), optional(f8))", +) +def extrapolate_top( + xprod, + w_valid_data, + transect_top_method, + transect_exponent, + in_transect_idx, + depth_cell_size_m, + depth_cell_depth_m, + depth_processed_m, + delta_t, + top_method=-1, + exponent=0.1667, +): """Computes the extrapolated top discharge. Parameters @@ -76,7 +86,9 @@ def extrapolate_top(xprod, exponent = transect_exponent # Compute top variables - idx_top, idx_top3, top_rng = top_variables(xprod, w_valid_data, depth_cell_size_m, depth_cell_depth_m) + idx_top, idx_top3, top_rng = top_variables( + xprod, w_valid_data, depth_cell_size_m, depth_cell_depth_m + ) idx_top = idx_top[in_transect_idx] idx_top3 = idx_top3[:, in_transect_idx] top_rng = top_rng[in_transect_idx] @@ -99,18 +111,42 @@ def extrapolate_top(xprod, cell_depth[row, col] = np.nan # Compute top discharge - q_top = discharge_top(top_method, exponent, idx_top, idx_top3, top_rng, - xprod[:, in_transect_idx], cell_size, cell_depth, - depth_ens, delta_t, z) + q_top = discharge_top( + top_method, + exponent, + idx_top, + idx_top3, + top_rng, + xprod[:, in_transect_idx], + cell_size, + cell_depth, + depth_ens, + delta_t, + z, + ) return q_top @njit -@cc.export('discharge_top', 'f8[:](i8, f8, i4[:], i4[:, :], f8[:], f8[:, :], f8[:, :], f8[:, :], f8[:], ' - 'f8[:], f8[:, :])') -def discharge_top(top_method, exponent, idx_top, idx_top_3, top_rng, component, cell_size, cell_depth, - depth_ens, delta_t, z): +@cc.export( + "discharge_top", + "f8[:](i8, f8, i4[:], i4[:, :], f8[:], f8[:, :], f8[:, :], f8[:, :], f8[:], " + "f8[:], f8[:, :])", +) +def discharge_top( + top_method, + exponent, + idx_top, + idx_top_3, + top_rng, + component, + cell_size, + cell_depth, + depth_ens, + delta_t, + z, +): """Computes the top extrapolated value of the provided component. Parameters @@ -136,7 +172,8 @@ def discharge_top(top_method, exponent, idx_top, idx_top_3, top_rng, component, delta_t: np.array(float) Duration of each ensemble compute by QComp z: np.array(float) - Relative depth from the bottom of each depth cell computed in discharge top method + Relative depth from the bottom of each depth cell computed in + discharge top method Returns ------- @@ -172,8 +209,9 @@ def discharge_top(top_method, exponent, idx_top, idx_top_3, top_rng, component, numerator = numerator + numerator_temp # Compute the denominator - denominator_temp = ((z[row, col] + 0.5 * cell_size[row, col]) ** (exponent + 1)) \ - - ((z[row, col] - 0.5 * cell_size[row, col]) ** (exponent + 1)) + denominator_temp = ( + (z[row, col] + 0.5 * cell_size[row, col]) ** (exponent + 1) + ) - ((z[row, col] - 0.5 * cell_size[row, col]) ** (exponent + 1)) if np.logical_not(np.isnan(denominator_temp)) and denominator_temp != 0: denominator_valid = True denominator = denominator + denominator_temp @@ -183,8 +221,11 @@ def discharge_top(top_method, exponent, idx_top, idx_top_3, top_rng, component, coef[col] = (numerator * (1 + exponent)) / denominator # Compute the top discharge for each ensemble - top_value = delta_t * (coef / (exponent + 1)) * \ - (depth_ens**(exponent + 1) - (depth_ens-top_rng)**(exponent + 1)) + top_value = ( + delta_t + * (coef / (exponent + 1)) + * (depth_ens ** (exponent + 1) - (depth_ens - top_rng) ** (exponent + 1)) + ) # Top constant extrapolation elif top_method == 1: @@ -222,22 +263,25 @@ def discharge_top(top_method, exponent, idx_top, idx_top_3, top_rng, component, for k in range(3): if np.isnan(cell_depth[idx_top_3[k, j], j]) == False: sumd = sumd + cell_depth[idx_top_3[k, j], j] - sumd2 = sumd2 + cell_depth[idx_top_3[k, j], j]**2 + sumd2 = sumd2 + cell_depth[idx_top_3[k, j], j] ** 2 sumq = sumq + component[idx_top_3[k, j], j] - sumqd = sumqd + (component[idx_top_3[k, j], j] * cell_depth[idx_top_3[k, j], j]) + sumqd = sumqd + ( + component[idx_top_3[k, j], j] + * cell_depth[idx_top_3[k, j], j] + ) delta = 3 * sumd2 - sumd**2 a = (3 * sumqd - sumq * sumd) / delta b = (sumq * sumd2 - sumqd * sumd) / delta # Compute discharge for 3-pt fit - qo = (a * top_rng[j]**2) / 2 + b * top_rng[j] + qo = (a * top_rng[j] ** 2) / 2 + b * top_rng[j] top_value[j] = delta_t[j] * qo return top_value @njit -@cc.export('top_variables', '(f8[:, :], b1[:, :], f8[:, :], f8[:, :])') +@cc.export("top_variables", "(f8[:, :], b1[:, :], f8[:, :], f8[:, :])") def top_variables(xprod, w_valid_data, depth_cell_size_m, depth_cell_depth_m): """Computes the index to the top and top three valid cells in each ensemble and the range from the water surface to the top of the topmost cell. @@ -286,7 +330,10 @@ def top_variables(xprod, w_valid_data, depth_cell_size_m, depth_cell_depth_m): for k in range(3): idx_top_3[k, n] = idx_temp[k] # Compute top range - top_rng[n] = depth_cell_depth_m[idx_top[n], n] - 0.5 * depth_cell_size_m[idx_top[n], n] + top_rng[n] = ( + depth_cell_depth_m[idx_top[n], n] + - 0.5 * depth_cell_size_m[idx_top[n], n] + ) else: top_rng[n] = 0 idx_top[n] = 0 @@ -294,6 +341,6 @@ def top_variables(xprod, w_valid_data, depth_cell_size_m, depth_cell_depth_m): return idx_top, idx_top_3, top_rng -if __name__ == '__main__': +if __name__ == "__main__": # Used to compile code cc.compile() diff --git a/MiscLibs/abba_2d_interpolation.py b/MiscLibs/abba_2d_interpolation.py index 296806d..83e8b3a 100644 --- a/MiscLibs/abba_2d_interpolation.py +++ b/MiscLibs/abba_2d_interpolation.py @@ -1,19 +1,24 @@ """abba_2d_interpolation -This module performs 2-D interpolation on data that is assumed to be arranged in row-column format rather -than in a random pattern. The rows represent vertical location or y-coordinate of each cell -in the data array. The columns represent a horizontal location or x-coordinate of the data. -The cell size and thus the y-coordinate of a cell can change from cell to cell or ensemble to ensemble. -The interpolation algorithm searches for the all valid cells above, below, before, and after -that touch the cell to be interpolated. Bathymetry is honored by checking to see if the depth of the streambed -of the cell before or after is greater than the bottom of the target cell. When searching before or after, if the -streambed is encountered before a valid cell then no valid cell is used in that direction. - -The methods provide the flexibility to determine neighbors based on either a raw vertical location -or a normalized location. To use a normalized location set normalize to True. - -For efficiency the data_list can contain multiple types of data that lie on the same x-y locations. -This allows multiple interpolations without having to recompute neighbors and distances. +This module performs 2-D interpolation on data that is assumed to be arranged +in row-column format rather than in a random pattern. The rows represent vertical +location or y-coordinate of each cell in the data array. The columns represent a +horizontal location or x-coordinate of the data. The cell size and thus the +y-coordinate of a cell can change from cell to cell or ensemble to ensemble. +The interpolation algorithm searches for the all valid cells above, below, +before, and after that touch the cell to be interpolated. Bathymetry is honored +by checking to see if the depth of the streambed of the cell before or after is +greater than the bottom of the target cell. When searching before or after, +if thestreambed is encountered before a valid cell then no valid cell is used +in that direction. + +The methods provide the flexibility to determine neighbors based on either +a raw vertical location or a normalized location. To use a normalized location +set normalize to True. + +For efficiency the data_list can contain multiple types of data that lie on +the same x-y locations. This allows multiple interpolations without having to +recompute neighbors and distances. Example ------- @@ -36,9 +41,17 @@ interpolated_v_values = interpolated_data[1] import numpy as np -def find_neighbors(valid_data, cells_above_sl, y_cell_centers, y_cell_size, y_depth, search_loc, normalize=False): - """ Finds the nearest valid cells above, below, before, and after each invalid cell. The before and after - Cells must have data in the same y range as the invalid cell. +def find_neighbors( + valid_data, + cells_above_sl, + y_cell_centers, + y_cell_size, + y_depth, + search_loc, + normalize=False, +): + """Finds the nearest valid cells above, below, before, and after each invalid cell. + The before and after cells must have data in the same y range as the invalid cell. Parameters ---------- @@ -51,8 +64,8 @@ def find_neighbors(valid_data, cells_above_sl, y_cell_centers, y_cell_size, y_de y_cell_size: np.array(float) Size of each cell in the y-direction y_depth: np.array(float) - 1-D array containing values that will be used to normalize the data and specifying the lower boundary for - identifying neighbors + 1-D array containing values that will be used to normalize the data and + specifying the lower boundary for identifying neighbors search_loc: list Identifies location to search (above, below, before, after) normalize: bool @@ -61,7 +74,8 @@ def find_neighbors(valid_data, cells_above_sl, y_cell_centers, y_cell_size, y_de Returns ------- neighbors: list - List of dictionaries providing the indices of the above, below, before, and after valid cells. + List of dictionaries providing the indices of the above, below, + before, and after valid cells. """ # Compute cell extents @@ -86,40 +100,40 @@ def find_neighbors(valid_data, cells_above_sl, y_cell_centers, y_cell_size, y_de points = [] target = (cell, ens) - if 'above' in search_loc: + if "above" in search_loc: # Identify indices of cells above and below target above = find_above(target, valid_data) if above is not None: points.append(above) - if 'below' in search_loc: + if "below" in search_loc: below = find_below(target, valid_data) if below is not None: points.append(below) - # Find all cells in ensembles before or after the target ensemble that overlap the target cell - # This is a change implemented on 2/27/2020 - dsm - y_match = np.logical_and(y_top[target] <= y_bottom, y_bottom[target] >= y_top) + # Find all cells in ensembles before or after the target ensemble + # that overlap the target cell. + y_match = np.logical_and(y_top[target] < y_bottom, y_bottom[target] > y_top) y_match = np.logical_and(y_match, valid_data) - if 'before' in search_loc: + if "before" in search_loc: # Identify indices of cells before and after target before = find_before(target, y_match, y_depth, y_bottom_actual) if before: points = points + before - if 'after' in search_loc: + if "after" in search_loc: after = find_after(target, y_match, y_depth, y_bottom_actual) if after: points = points + after - neighbors.append({'target': target, 'neighbors': points}) + neighbors.append({"target": target, "neighbors": points}) return neighbors def find_above(target, valid_data): - """ Finds the nearest valid cell above the target. + """Finds the nearest valid cell above the target. Parameters ---------- @@ -148,7 +162,7 @@ def find_above(target, valid_data): def find_below(target, valid_data): - """ Finds the nearest valid cell below the target. + """Finds the nearest valid cell below the target. Parameters ---------- @@ -166,7 +180,7 @@ def find_below(target, valid_data): below_idx = target[0] + 1 # Determine cell row index limit - n_cells = len(valid_data[:, target[1]])-1 + n_cells = len(valid_data[:, target[1]]) - 1 # Find nearest valid cell below target while below_idx <= n_cells and not valid_data[below_idx, target[1]]: @@ -180,7 +194,8 @@ def find_below(target, valid_data): def find_before(target, y_match, y_depth, y_bottom): - """ Finds the nearest ensemble before the target that has valid cells within the vertical range of the target + """Finds the nearest ensemble before the target that has valid cells within the + vertical range of the target Parameters ---------- @@ -189,28 +204,29 @@ def find_before(target, y_match, y_depth, y_bottom): y_match: np.array(logical) 2-D array of all cells that are within the vertical range of the target cell y_depth: np.array(float) - 1-D array containing values that will be used to normalize the data and specifying the lower boundary for - identifying neighbors + 1-D array containing values that will be used to normalize the data and + specifying the lower boundary for identifying neighbors y_bottom: np.array(float) Bottom depth of each cell Returns ------- before_idx: list - List of tuples of indices of all cells in the nearest ensemble before that target that are within - the vertical range of the target cell + List of tuples of indices of all cells in the nearest ensemble before + that target that are within the vertical range of the target cell """ # Initialize ensemble counter before_ens = target[1] - 1 - # Loop until an ensemble is found that has valid data within the vertical range of the target while honoring - # the bathymetry. If the streambed is encountered while searching for a previously valid ensemble then - # it is determined that there is no available valid data before the target that can be used. + # Loop until an ensemble is found that has valid data within the vertical range + # of the target while honoring the bathymetry. If the streambed is encountered + # while searching for a previously valid ensemble then it is determined that + # there is no available valid data before the target that can be used. found = False while (before_ens >= 0) and not found: - if y_bottom[target] < y_depth[before_ens] and np.any(y_match[:, before_ens]): + if y_bottom[target] <= y_depth[before_ens] and np.any(y_match[:, before_ens]): found = True elif y_bottom[target] > y_depth[before_ens]: before_ens = -999 @@ -232,7 +248,8 @@ def find_before(target, y_match, y_depth, y_bottom): def find_after(target, y_match, y_depth, y_bottom): - """ Finds the nearest ensemble after the target that has valid cells within the vertical range of the target + """Finds the nearest ensemble after the target that has valid cells within the + vertical range of the target Parameters ---------- @@ -241,27 +258,28 @@ def find_after(target, y_match, y_depth, y_bottom): y_match: np.array(logical) 2-D array of all cells that are within the vertical range of the target cell y_depth: np.array(float) - 1-D array containing values that will be used to normalize the data and specifying the lower boundary for - identifying neighbors + 1-D array containing values that will be used to normalize the data and + specifying the lower boundary for identifying neighbors y_bottom: np.array(float) Bottom depth of each cell Returns ------- after_idx: list - List of tuples of indices of all cells in the nearest ensemble after that target that are within - the vertical range of the target cell + List of tuples of indices of all cells in the nearest ensemble after + that target that are within the vertical range of the target cell """ # Initialize ensemble counter after_ens = target[1] + 1 - # Loop until an ensemble is found that has valid data within the vertical range of the target while honoring - # the bathymetry. If the streambed is encountered while searching for a next valid ensemble then - # it is determined that there is no available valid data after the target that can be used. + # Loop until an ensemble is found that has valid data within the vertical range + # of the target while honoring the bathymetry. If the streambed is encountered + # while searching for a next valid ensemble then it is determined that there is + # no available valid data after the target that can be used. found = False while (after_ens <= y_match.shape[1] - 1) and not found: - if (y_bottom[target] < y_depth[after_ens]) and np.any(y_match[:, after_ens]): + if (y_bottom[target] <= y_depth[after_ens]) and np.any(y_match[:, after_ens]): found = True elif y_bottom[target] > y_depth[after_ens]: after_ens = -999 @@ -271,7 +289,7 @@ def find_after(target, y_match, y_depth, y_bottom): # Find and store the indices all cells from the identified ensemble # that are within the vertical range of the target - if (after_ens <= y_match.shape[1]-1) and (after_ens > 0): + if (after_ens <= y_match.shape[1] - 1) and (after_ens > 0): rows = np.where(y_match[:, after_ens])[0] after_idx = [] for row in rows: @@ -283,7 +301,7 @@ def find_after(target, y_match, y_depth, y_bottom): def compute_distances(target, neighbors, x, y): - """ Computes distances between the target and neighbors. + """Computes distances between the target and neighbors. Parameters ---------- @@ -309,13 +327,15 @@ def compute_distances(target, neighbors, x, y): # Compute distance from target cell to each neighbor distances = [] for neighbor in neighbors: - distances.append(np.sqrt((y[neighbor] - target_y) ** 2 + (x[neighbor[1]] - target_x) ** 2)) + distances.append( + np.sqrt((y[neighbor] - target_y) ** 2 + (x[neighbor[1]] - target_x) ** 2) + ) return distances def idw_interpolation(data, neighbor_indices, distances): - """ Interpolate value using neighbors and inverse distance weighting. + """Interpolate value using neighbors and inverse distance weighting. Parameters ---------- @@ -336,8 +356,9 @@ def idw_interpolation(data, neighbor_indices, distances): sum_of_weights = 0 weighted_sum = 0 for n, index in enumerate(neighbor_indices): - sum_of_weights = sum_of_weights + (1/distances[n]) - weighted_sum = weighted_sum + data[index] * (1/distances[n]) + if distances[n] > 0: + sum_of_weights = sum_of_weights + (1 / distances[n]) + weighted_sum = weighted_sum + data[index] * (1 / distances[n]) # Compute interpolated value if sum_of_weights > 0: @@ -348,10 +369,19 @@ def idw_interpolation(data, neighbor_indices, distances): return interpolated_value -def abba_idw_interpolation(data_list, valid_data, cells_above_sl, y_centers, y_cell_size, y_depth, - x_shiptrack, normalize, search_loc=('above', 'below', 'before', 'after')): - """ Interpolates values for invalid cells using the neighboring cells above, below, before, and after and - and inverse distance averaging. +def abba_idw_interpolation( + data_list, + valid_data, + cells_above_sl, + y_centers, + y_cell_size, + y_depth, + x_shiptrack, + normalize, + search_loc=("above", "below", "before", "after"), +): + """Interpolates values for invalid cells using the neighboring cells above, + below, before, and after and and inverse distance averaging. Parameters ---------- @@ -366,8 +396,8 @@ def abba_idw_interpolation(data_list, valid_data, cells_above_sl, y_centers, y_c y_cell_size: np.array(float) Size of each cell in the y-direction y_depth: np.array(float) - 1-D array containing values that will be used to normalize the data and specifying the lower boundary for - identifying neighbors + 1-D array containing values that will be used to normalize the data and + specifying the lower boundary for identifying neighbors x_shiptrack: np.array(float) X coordinate of cumulative shiptrack normalize: bool @@ -387,27 +417,31 @@ def abba_idw_interpolation(data_list, valid_data, cells_above_sl, y_centers, y_c valid_cells = np.logical_and(cells_above_sl, valid_data) if not np.all(valid_cells): # Find neighbors associated with each target - interpolation_points = find_neighbors(valid_data=valid_data, - cells_above_sl=cells_above_sl, - y_cell_centers=y_centers, - y_cell_size=y_cell_size, - y_depth=y_depth, - search_loc=search_loc, - normalize=normalize) - + interpolation_points = find_neighbors( + valid_data=valid_data, + cells_above_sl=cells_above_sl, + y_cell_centers=y_centers, + y_cell_size=y_cell_size, + y_depth=y_depth, + search_loc=search_loc, + normalize=normalize, + ) + dist = [] # Process each target for point in interpolation_points: # Compute distance from target to neighbors - distances = compute_distances(target=point['target'], - neighbors=point['neighbors'], - x=x_shiptrack, - y=y_centers) - + distances = compute_distances( + target=point["target"], + neighbors=point["neighbors"], + x=x_shiptrack, + y=y_centers, + ) + dist.append(dist) # Interpolate target for each data set in data_list for n, data in enumerate(data_list): - interpolated_value = idw_interpolation(data=data, - neighbor_indices=point['neighbors'], - distances=distances) - interpolated_data[n].append([point['target'], interpolated_value]) + interpolated_value = idw_interpolation( + data=data, neighbor_indices=point["neighbors"], distances=distances + ) + interpolated_data[n].append([point["target"], interpolated_value]) return interpolated_data diff --git a/MiscLibs/bayes_cov_compiled.py b/MiscLibs/bayes_cov_compiled.py index 9f627e8..913d173 100644 --- a/MiscLibs/bayes_cov_compiled.py +++ b/MiscLibs/bayes_cov_compiled.py @@ -14,15 +14,15 @@ import numpy as np from numba.pycc import CC from numba import njit -cc = CC('bayes_cov_compiled') +cc = CC("bayes_cov_compiled") # Bayesian COV # ============ -@cc.export('bayes_cov', 'f8(f8[::1], f8, f8, i4)') +@cc.export("bayes_cov", "f8(f8[::1], f8, f8, i4)") def bayes_cov(transects_total_q, cov_prior=0.03, cov_prior_u=0.2, nsim=20000): - """Computes the coefficient of variation using a Bayesian approach and an assumed posterior - log-normal distribution. + """Computes the coefficient of variation using a Bayesian approach and + an assumed posterior log-normal distribution. Parameters ---------- @@ -41,16 +41,21 @@ def bayes_cov(transects_total_q, cov_prior=0.03, cov_prior_u=0.2, nsim=20000): Coefficient of variation """ - theta_std = np.abs(np.array([np.mean(transects_total_q), cov_prior])) * cov_prior_u \ + theta_std = ( + np.abs(np.array([np.mean(transects_total_q), cov_prior])) + * cov_prior_u / np.sqrt(len(transects_total_q)) + ) # Modified for compatibility with Numba - sam, obj_funk = metropolis(theta0=np.array([np.mean(transects_total_q), cov_prior]), - obs_data=transects_total_q, - cov_prior=cov_prior, - cov_prior_u=cov_prior_u, - nsim=nsim, - theta_std=theta_std) + sam, obj_funk = metropolis( + theta0=np.array([np.mean(transects_total_q), cov_prior]), + obs_data=transects_total_q, + cov_prior=cov_prior, + cov_prior_u=cov_prior_u, + nsim=nsim, + theta_std=theta_std, + ) n_burn = int(nsim / 2) @@ -60,10 +65,11 @@ def bayes_cov(transects_total_q, cov_prior=0.03, cov_prior_u=0.2, nsim=20000): @njit -@cc.export('metropolis', '(f8[:], f8[:], f8, f8, i4, f8[:])') +@cc.export("metropolis", "(f8[:], f8[:], f8, f8, i4, f8[:])") def metropolis(theta0, obs_data, cov_prior, cov_prior_u, nsim, theta_std): - """Implements the Metropolis_Hastings Markov chain Monte Carlo (MCMC) algorithm for sampling the - posterior distribution, assuming a log-normal posterior distribution. + """Implements the Metropolis_Hastings Markov chain Monte Carlo (MCMC) + algorithm for sampling the posterior distribution, assuming a + log-normal posterior distribution. Parameters ---------- @@ -78,7 +84,8 @@ def metropolis(theta0, obs_data, cov_prior, cov_prior_u, nsim, theta_std): nsim: int Number of simulations. theta_std: np.array(float) - Standard deviation for the gaussian Jump distribution. If blank a default value is computed. + Standard deviation for the gaussian Jump distribution. + If blank a default value is computed. Returns ------- @@ -94,16 +101,19 @@ def metropolis(theta0, obs_data, cov_prior, cov_prior_u, nsim, theta_std): sam = np.zeros((nsim + 1, npar)) obj_funk = np.zeros((nsim + 1, 1)) - # Parameters - used for automatic computation of starting stds of the Gaussian Jump distribution + # Parameters - used for automatic computation of starting stds of the + # Gaussian Jump distribution if np.any(np.isnan(theta_std)): std_factor = 0.1 theta_std = std_factor * np.abs(theta0) # Check if starting point is feasible - abandon otherwise - f_current = log_post(param=theta0, measures=obs_data, cov_prior=cov_prior, cov_prior_u=cov_prior_u) + f_current = log_post( + param=theta0, measures=obs_data, cov_prior=cov_prior, cov_prior_u=cov_prior_u + ) if not is_feasible(f_current): - print('Metropolis:FATAL:unfeasible starting point') + print("Metropolis:FATAL:unfeasible starting point") return sam, obj_funk else: sam[0, :] = list(theta0) @@ -122,10 +132,12 @@ def metropolis(theta0, obs_data, cov_prior, cov_prior_u, nsim, theta_std): candid[1] = np.random.normal(loc=current[1], scale=theta_std[1]) # Evaluate objective function at candidate - f_candid = log_post(param=candid, - measures=obs_data, - cov_prior=cov_prior, - cov_prior_u=cov_prior_u) + f_candid = log_post( + param=candid, + measures=obs_data, + cov_prior=cov_prior, + cov_prior_u=cov_prior_u, + ) if not is_feasible(f_candid): sam[i + 1, :] = current @@ -135,7 +147,18 @@ def metropolis(theta0, obs_data, cov_prior, cov_prior_u, nsim, theta_std): u = np.random.uniform(0, 1) # Compute Metropolis acceptance ratio - ratio = np.exp(min((np.max(np.hstack((np.array([float(-100)]), f_candid - f_current))), float(0)))) + ratio = np.exp( + min( + ( + np.max( + np.hstack( + (np.array([float(-100)]), f_candid - f_current) + ) + ), + float(0), + ) + ) + ) # Apply acceptance rule if u <= ratio: @@ -150,10 +173,11 @@ def metropolis(theta0, obs_data, cov_prior, cov_prior_u, nsim, theta_std): @njit -@cc.export('log_post', 'f8(f8[:], f8[:], f8, f8)') +@cc.export("log_post", "f8(f8[:], f8[:], f8, f8)") def log_post(param, measures, cov_prior, cov_prior_u): - """Define function returning the posterior log-pdf using the model measures ~ N(true_value,cov*true_value), - with a flat prior on true_value and a log-normal prior for cov (= coefficient of variation) + """Define function returning the posterior log-pdf using the model measures + ~ N(true_value,cov*true_value), with a flat prior on true_value and a log-normal + prior for cov (= coefficient of variation) Parameters ---------- @@ -173,9 +197,9 @@ def log_post(param, measures, cov_prior, cov_prior_u): Unnormalized log-posterior """ # Check if any parameter is <=0 - # since both true_value and cov have to be positive - otherwise sigma = true_value*cov does not make sense - # if any(item <= 0 for item in param): - # return -math.inf + # since both true_value and cov have to be positive - + # otherwise sigma = true_value*cov does not make sense + # Changed for compatibility with Numba if np.any(np.less_equal(param, 0)): return np.NINF @@ -187,18 +211,26 @@ def log_post(param, measures, cov_prior, cov_prior_u): # Compute log-likelihood under the model: measures ~ N(true_value,sigma) # You can easily change this model (e.g. lognormal for a positive measurand?) # OPTION 1 : the model follows a Normal distribution - # This equation is used for compatibility with Numba, instead of call to scipy.stats.norm.logpdf - log_likelihood = np.sum(np.log(np.exp(-(((measures - true_value) / sigma) ** 2) / 2) - / (np.sqrt(2 * np.pi) * sigma))) - - # Prior on true_value - flat prior used here but you may change this if you have prior knowledge + # This equation is used for compatibility with Numba, + # instead of call to scipy.stats.norm.logpdf + log_likelihood = np.sum( + np.log( + np.exp(-(((measures - true_value) / sigma) ** 2) / 2) + / (np.sqrt(2 * np.pi) * sigma) + ) + ) + + # Prior on true_value - flat prior used here but you may change this + # if you have prior knowledge log_prior_1 = 0 # Lognormal prior x = cov mu = np.log(cov_prior) scale = cov_prior_u - pdf = np.exp(-(np.log(x) - mu) ** 2 / (2 * scale ** 2)) / (x * scale * np.sqrt(2 * np.pi)) + pdf = np.exp(-((np.log(x) - mu) ** 2) / (2 * scale**2)) / ( + x * scale * np.sqrt(2 * np.pi) + ) log_prior_2 = np.log(pdf) # Joint prior (prior independence) @@ -208,12 +240,14 @@ def log_post(param, measures, cov_prior, cov_prior_u): logp = log_likelihood + log_prior if np.isnan(logp): # Used np to eliminate the need for math package - logp = np.NINF # returns -Inf rather than NaN's (required by the MCMC sampler used subsequently) + logp = np.NINF + # returns -Inf rather than NaN's + # (required by the MCMC sampler used subsequently) return logp @njit -@cc.export('is_feasible', 'b1(f8)') +@cc.export("is_feasible", "b1(f8)") def is_feasible(value): """Checks that a value is a real value (not infinity or nan) @@ -231,6 +265,6 @@ def is_feasible(value): return True -if __name__ == '__main__': +if __name__ == "__main__": # Used to compile code cc.compile() diff --git a/MiscLibs/common_functions.py b/MiscLibs/common_functions.py index e4c337c..65abafa 100644 --- a/MiscLibs/common_functions.py +++ b/MiscLibs/common_functions.py @@ -1,6 +1,7 @@ import numpy as np import scipy.stats as sp -from decimal import Decimal +# from decimal import Decimal +from sigfig import round as sigfig_round def cosd(angle): @@ -11,44 +12,44 @@ def cosd(angle): angle: float Angle in degrees """ - - return np.cos(np.pi * angle/180) + + return np.cos(np.pi * angle / 180) def sind(angle): """Compute sine of angle in degrees. - Parameters - ---------- - angle: float - Angle in degrees - """ - - return np.sin(np.pi * angle/180) + Parameters + ---------- + angle: float + Angle in degrees + """ + + return np.sin(np.pi * angle / 180) def tand(angle): """Compute tangent of angle in degrees. - Parameters - ---------- - angle: float - Angle in degrees - """ - - return np.tan(np.pi * angle/180) + Parameters + ---------- + angle: float + Angle in degrees + """ + + return np.tan(np.pi * angle / 180) def arctand(angle): """Compute arctangent of angle in degrees. - Parameters - ---------- - angle: float - Angle in degrees - """ - - return np.arctan(angle) * 180/np.pi + Parameters + ---------- + angle: float + Angle in degrees + """ + + return np.arctan(angle) * 180 / np.pi def cart2pol(x, y): @@ -63,40 +64,40 @@ def cart2pol(x, y): Returns ------- - phi: float + phi: np.array(float) Angle in radians - rho: float + rho: np.array(float) Magnitude """ - + rho = np.sqrt(x**2 + y**2) phi = np.arctan2(y, x) - + return phi, rho def pol2cart(phi, rho): """Convert polar coordinates to cartesian coordinates. - Parameters - ---------- - phi: np.array(float) - Angle in radians - rho: np.array(float) - Magnitude - - Returns - ------- - x: float - x coordinate - y: float - y coordinate - - """ - + Parameters + ---------- + phi: np.array(float) + Angle in radians + rho: np.array(float) + Magnitude + + Returns + ------- + x: float + x coordinate + y: float + y coordinate + + """ + x = rho * np.cos(phi) y = rho * np.sin(phi) - + return x, y @@ -158,7 +159,7 @@ def iqr_2d(data): return sp_iqr -def azdeg2rad(angle) -> float: +def azdeg2rad(angle): """Converts an azimuth angle in degrees to radians. Parameters @@ -173,7 +174,7 @@ def azdeg2rad(angle) -> float: """ # Convert to radians - direction = np.deg2rad(90-angle) + direction = np.deg2rad(90 - angle) # Create postive angle idx = np.where(direction < 0)[0] @@ -181,11 +182,11 @@ def azdeg2rad(angle) -> float: direction[idx] = direction[idx] + 2 * np.pi else: direction = direction + 2 * np.pi - + return direction -def rad2azdeg(angle) -> float: +def rad2azdeg(angle): """Converts an angle in radians to an azimuth in degrees. Parameters @@ -204,7 +205,7 @@ def rad2azdeg(angle) -> float: deg = 90 - deg if deg < 0: deg += 360 - + return deg else: # Multiple values @@ -212,7 +213,7 @@ def rad2azdeg(angle) -> float: deg = 90 - deg sub_zero = np.where(nan_less(deg, 0)) deg[sub_zero] = deg[sub_zero] + 360 - + return deg @@ -229,7 +230,7 @@ def nandiff(values): final_values: np.ndarray() 1-D array of differences of consecutive non nan numbers """ - + final_values = [] for n in range(len(values) - 1): # Check for nan and add nan to final values @@ -240,9 +241,9 @@ def nandiff(values): i = n + 1 while np.isnan(values[i]) and i < len(values) - 1: i += 1 - + final_values.append(values[i] - values[n]) - + return np.array(final_values) @@ -309,7 +310,7 @@ def checked_idx(transects): return checked -def units_conversion(units_id='SI'): +def units_conversion(units_id="SI"): """Computes the units conversion from SI units used internally to the desired display units. @@ -324,27 +325,31 @@ def units_conversion(units_id='SI'): dictionary of unit conversion and labels """ - if units_id == 'SI': - units = {'L': 1, - 'Q': 1, - 'A': 1, - 'V': 1, - 'label_L': 'm', - 'label_Q': 'm3/s', - 'label_A': 'm2', - 'label_V': 'm/s', - 'ID': 'SI'} + if units_id == "SI": + units = { + "L": 1, + "Q": 1, + "A": 1, + "V": 1, + "label_L": "(m)", + "label_Q": "(m3/s)", + "label_A": "(m2)", + "label_V": "(m/s)", + "ID": "SI", + } else: - units = {'L': 1.0 / 0.3048, - 'Q': (1.0 / 0.3048)**3, - 'A': (1.0 / 0.3048)**2, - 'V': 1.0 / 0.3048, - 'label_L': 'ft', - 'label_Q': 'ft3/s', - 'label_A': 'ft2', - 'label_V': 'ft/s', - 'ID': 'English'} + units = { + "L": 1.0 / 0.3048, + "Q": (1.0 / 0.3048) ** 3, + "A": (1.0 / 0.3048) ** 2, + "V": 1.0 / 0.3048, + "label_L": "(ft)", + "label_Q": "(ft3/s)", + "label_A": "(ft2)", + "label_V": "(ft/s)", + "ID": "English", + } return units @@ -368,17 +373,17 @@ def convert_temperature(temp_in, units_in, units_out) -> float: """ temp_out = None - if units_in == 'F': - if units_out == 'C': - temp_out = (temp_in - 32) * (5./9.) + if units_in == "F": + if units_out == "C": + temp_out = (temp_in - 32) * (5.0 / 9.0) else: temp_out = temp_in - elif units_in == 'C': - if units_out == 'C': + elif units_in == "C": + if units_out == "C": temp_out = temp_in else: - temp_out = (temp_in * (9./5.)) + 32 + temp_out = (temp_in * (9.0 / 5.0)) + 32 return temp_out @@ -400,7 +405,7 @@ def nan_less_equal(data1, data2) -> bool: """ d3 = data2 - data1 - d3[np.isnan(d3)] = -999. + d3[np.isnan(d3)] = -999.0 return d3 >= 0 @@ -416,12 +421,12 @@ def nan_less(data1, data2) -> bool: Returns ------- - :bool + :np.array(bool) Result of comparison. """ d3 = data2 - data1 - d3[np.isnan(d3)] = -999. + d3[np.isnan(d3)] = -999.0 return d3 > 0 @@ -437,12 +442,12 @@ def nan_greater_equal(data1, data2) -> bool: Returns ------- - :bool + :np.array(bool) Result of comparison. """ d3 = data1 - data2 - d3[np.isnan(d3)] = -999. + d3[np.isnan(d3)] = -999.0 return d3 >= 0 @@ -463,40 +468,63 @@ def nan_greater(data1, data2) -> bool: """ d3 = data1 - data2 - d3[np.isnan(d3)] = -999. + d3[np.isnan(d3)] = -999.0 return d3 > 0 +def ari2geodeg(ari_ang): + + """Ported from matlab VMT. + ARI2GEODEG converts arithmetic angles to geographic angles. + + Parameters + ========== + ari_ang: np.array + temperature in units_in + + Returns + ======= + geo_ang : np.array + angles converted to geographic headings + """ + + geo_ang = (360 - ari_ang + 90) % 360 + + return geo_ang + + def scientific_notation(n, sig: int = 3): - """Return scientific notation - - Parameters - ---------- - n: float or int - Value to convert in scientific notation - sig: int - Number of significant digits - """ - if not np.isnan(n) and not np.isinf(n): - n_str = str(n) - b = '%.'+str(sig-1)+'E' - a = b % Decimal(n_str) - [val, exp] = a.split('E') - exp = int(exp) - # if exp < 0: - # n_sn = a - # elif exp == 0: - # if float(val) == 0: - # n_sn = '0.'+'0'*(sig-1) - # else: - # n_sn = val - # else: - n_sn_temp = np.round(float(val) * 10**exp, 3) - if abs(n_sn_temp) > 10**(sig-1): - n_sn = str(n_sn_temp).split('.')[0] - else: - n_sn = str(n_sn_temp) - # n_sn = sfrnd(n, sig) - return n_sn - else: - return str(n) + # """Return scientific notation + # + # Parameters + # ---------- + # n: float or int + # Value to convert in scientific notation + # sig: int + # Number of significant digits + # """ + # n_str = str(n) + # b = '%.'+str(sig-1)+'E' + # a = b % Decimal(n_str) + # [val, exp] = a.split('E') + # exp = int(exp) + # # if exp < 0: + # # n_sn = a + # # elif exp == 0: + # # if float(val) == 0: + # # n_sn = '0.'+'0'*(sig-1) + # # else: + # # n_sn = val + # # else: + # n_sn_temp = np.round(float(val) * 10**exp, 3) + # if abs(n_sn_temp) > 10**(sig-1): + # n_sn = str(n_sn_temp).split('.')[0] + # else: + # n_sn = str(n_sn_temp) + return sigfig_round(n, sig) + + +def rotate_coordinates(x, y, angle_d): + xr = x * cosd(angle_d) + y * sind(angle_d) + yr = -x * sind(angle_d) + y * cosd(angle_d) + return xr, yr diff --git a/MiscLibs/non_uniform_savgol.py b/MiscLibs/non_uniform_savgol.py index 5f52b8a..c151ae2 100644 --- a/MiscLibs/non_uniform_savgol.py +++ b/MiscLibs/non_uniform_savgol.py @@ -6,7 +6,8 @@ def non_uniform_savgol(x, y, window, polynom): Applies a Savitzky-Golay filter to y with non-uniform spacing as defined in x - This is based on https://dsp.stackexchange.com/questions/1676/savitzky-golay-smoothing-filter-for-not-equally-spaced-data + This is based on https://dsp.stackexchange.com/questions/1676 + /savitzky-golay-smoothing-filter-for-not-equally-spaced-data The borders are interpolated like scipy.signal.savgol_filter would do Parameters @@ -30,7 +31,7 @@ def non_uniform_savgol(x, y, window, polynom): raise ValueError('"x" and "y" must be of the same size') if len(x) < window: - raise ValueError('The data size must be larger than the window size') + raise ValueError("The data size must be larger than the window size") if type(window) is not int: raise TypeError('"window" must be an integer') @@ -48,9 +49,9 @@ def non_uniform_savgol(x, y, window, polynom): polynom += 1 # Initialize variables - A = np.empty((window, polynom)) # Matrix - tA = np.empty((polynom, window)) # Transposed matrix - t = np.empty(window) # Local x variables + A = np.empty((window, polynom)) # Matrix + tA = np.empty((polynom, window)) # Transposed matrix + t = np.empty(window) # Local x variables y_smoothed = np.full(len(y), np.nan) # Start smoothing diff --git a/MiscLibs/robust_loess.py b/MiscLibs/robust_loess.py index 805a848..d201c28 100644 --- a/MiscLibs/robust_loess.py +++ b/MiscLibs/robust_loess.py @@ -15,7 +15,7 @@ import numpy as np from numba import jit, njit # Set constants used in multiple functions -eps = np.finfo('float').eps +eps = np.finfo("float").eps seps = np.sqrt(eps) # @jit(cache=True, nopython=True) @@ -50,19 +50,22 @@ def nearest_neighbors(num_neighbors, idx, x, valid_x): distance_sorted = np.sort(distance[valid_x]) distance_neighbors = distance_sorted[num_neighbors - 1] - # Find all points that are as close as or closer than the num_neighbors closest points - # close = np.array(distance <= distance_neighbors) + # Find all points that are as close as or closer than the num_neighbors + # closest points close = np.array(distance <= distance_neighbors) close = np.less_equal(distance, distance_neighbors) # Find the indices of x that are both close and valid - neighbors_idx = np.where(np.equal(np.logical_and(close, valid_x), np.repeat(True, len(valid_x))))[0] + neighbors_idx = np.where( + np.equal(np.logical_and(close, valid_x), np.repeat(True, len(valid_x))) + )[0] return neighbors_idx + # @jit(cache=True, nopython=True) @njit def tricube_weights(distance): - """ Convert distances into weights using tri-cubic weight function. + """Convert distances into weights using tri-cubic weight function. Note for Matlab: This function returns the square-root of the weights. Parameters @@ -79,13 +82,15 @@ def tricube_weights(distance): max_distance = np.max(distance) if max_distance > 0: distance = distance / max_distance - weights = (1 - distance ** 3) ** 1.5 + weights = (1 - distance**3) ** 1.5 return weights + # @jit(cache=True, nopython=True) @njit def bisquare(data): - """Bisqure weight function which for values greater than are equal to 1 are set to zero. + """Bisqure weight function which for values greater than are equal + to 1 are set to zero. Parameters ---------- @@ -100,12 +105,13 @@ def bisquare(data): """ weights = np.zeros(data.shape) d3 = 1 - np.abs(data) - d3[np.isnan(d3)] = -999. + d3[np.isnan(d3)] = -999.0 idx = d3 > 0 # idx = nan_less(np.abs(data), 1) weights[idx] = np.abs(1 - data[idx] ** 2) return weights + # @jit(cache=True, nopython=True) @njit def robust_weights(residuals, max_eps): @@ -133,10 +139,12 @@ def robust_weights(residuals, max_eps): return weights + # @jit(cache=True, nopython=True) @njit def compute_loess(x, y, neighbors_idx, idx, r_weights=None): - """Computes the loess smooth for the specified point x[i]. If robust weights are specified the computed weights + """Computes the loess smooth for the specified point x[i]. + If robust weights are specified the computed weights are adjusted by the robust weights. Parameters @@ -174,23 +182,23 @@ def compute_loess(x, y, neighbors_idx, idx, r_weights=None): weights = weights * r_weights[neighbors_idx] weighted_x_matrix = np.vstack((np.ones(distances.shape), distances)) - weighted_x_matrix = np.vstack((weighted_x_matrix, np.expand_dims(distances * distances, axis=0))) - weighted_x_matrix = weights.repeat(weighted_x_matrix.shape[0]).reshape(-1, 3).T * weighted_x_matrix + weighted_x_matrix = np.vstack( + (weighted_x_matrix, np.expand_dims(distances * distances, axis=0)) + ) + weighted_x_matrix = ( + weights.repeat(weighted_x_matrix.shape[0]).reshape(-1, 3).T + * weighted_x_matrix + ) neighbors_y = weights * neighbors_y # Solve using least squares - # try: - # mask = ~np.isnan(weighted_x_matrix.T) & ~np.isnan(neighbors_y.T) - # smoothed_values, _, _, _ = np.linalg.lstsq(weighted_x_matrix.T[mask], - # neighbors_y.T[mask], rcond=None) - # except (IndexError, ValueError): - smoothed_values, _, _, _ = np.linalg.lstsq(weighted_x_matrix.T, - neighbors_y.T) + smoothed_values, _, _, _ = np.linalg.lstsq(weighted_x_matrix.T, neighbors_y.T) smoothed_value = smoothed_values[0] else: smoothed_value = np.nan return smoothed_value + # @jit(cache=True, nopython=True) @njit def rloess(x, y, span): @@ -236,9 +244,9 @@ def rloess(x, y, span): # if x[i] and x[i-1] are equal just use previous fit if the_diffs[n] == 0: - smoothed_values[n] = smoothed_values[n-1] - lower_bound[n] = int(lower_bound[n-1]) - upper_bound[n] = int(upper_bound[n-1]) + smoothed_values[n] = smoothed_values[n - 1] + lower_bound[n] = int(lower_bound[n - 1]) + upper_bound[n] = int(upper_bound[n - 1]) else: @@ -264,17 +272,25 @@ def rloess(x, y, span): # Find new value for each point for n in range(n_points): - if n > 0 and x[n] == x[n-1]: - smoothed_values[n] = smoothed_values[n-1] + if n > 0 and x[n] == x[n - 1]: + smoothed_values[n] = smoothed_values[n - 1] else: if not np.isnan(smoothed_values[n]): - neighbors_idx = np.array(list(range(lower_bound[n], upper_bound[n] + 1))) + neighbors_idx = np.array( + list(range(lower_bound[n], upper_bound[n] + 1)) + ) if any_nans: - neighbors_idx = neighbors_idx[np.logical_not(y_nan[neighbors_idx])] + neighbors_idx = neighbors_idx[ + np.logical_not(y_nan[neighbors_idx]) + ] if np.any(r_weights[neighbors_idx] <= 0): - neighbors_idx = nearest_neighbors(span, n, x, (r_weights > 0)) + neighbors_idx = nearest_neighbors( + span, n, x, (r_weights > 0) + ) - smoothed_values[n] = compute_loess(x, y, neighbors_idx, n, r_weights) + smoothed_values[n] = compute_loess( + x, y, neighbors_idx, n, r_weights + ) return smoothed_values diff --git a/MiscLibs/robust_loess_compiled.py b/MiscLibs/robust_loess_compiled.py index 55e2405..91a01fc 100644 --- a/MiscLibs/robust_loess_compiled.py +++ b/MiscLibs/robust_loess_compiled.py @@ -15,14 +15,15 @@ import numpy as np from numba.pycc import CC from numba import njit -cc = CC('robust_loess_compiled') +cc = CC("robust_loess_compiled") # Set constants -eps = np.finfo('float').eps +eps = np.finfo("float").eps seps = np.sqrt(eps) + @njit -@cc.export('nearest_neighbors', 'i8[:](i4, i4, f8[:], b1[:])') +@cc.export("nearest_neighbors", "i8[:](i4, i4, f8[:], b1[:])") def nearest_neighbors(num_neighbors, idx, x, valid_x): """Find the nearest k neighbors to x[i] that are not nan. @@ -53,18 +54,22 @@ def nearest_neighbors(num_neighbors, idx, x, valid_x): distance_sorted = np.sort(distance[valid_x]) distance_neighbors = distance_sorted[num_neighbors - 1] - # Find all points that are as close as or closer than the num_neighbors closest points + # Find all points that are as close as or closer than the num_neighbors + # closest points close = np.less_equal(distance, distance_neighbors) # Find the indices of x that are both close and valid - neighbors_idx = np.where(np.equal(np.logical_and(close, valid_x), np.repeat(True, len(valid_x))))[0] + neighbors_idx = np.where( + np.equal(np.logical_and(close, valid_x), np.repeat(True, len(valid_x))) + )[0] return neighbors_idx + @njit -@cc.export('tricube_weights', 'f8[:](f8[:])') +@cc.export("tricube_weights", "f8[:](f8[:])") def tricube_weights(distance): - """ Convert distances into weights using tri-cubic weight function. + """Convert distances into weights using tri-cubic weight function. Note for Matlab: This function returns the square-root of the weights. Parameters @@ -81,13 +86,15 @@ def tricube_weights(distance): max_distance = np.max(distance) if max_distance > 0: distance = distance / max_distance - weights = (1 - distance ** 3) ** 1.5 + weights = (1 - distance**3) ** 1.5 return weights + @njit -@cc.export('bisquare', 'f8[:](f8[:])') +@cc.export("bisquare", "f8[:](f8[:])") def bisquare(data): - """Bisqure weight function which for values greater than are equal to 1 are set to zero. + """Bisqure weight function which for values greater than are equal + to 1 are set to zero. Parameters ---------- @@ -104,14 +111,15 @@ def bisquare(data): # Code to compute less than with nan's and no runtime warnings d3 = 1 - np.abs(data) - d3[np.isnan(d3)] = -999. + d3[np.isnan(d3)] = -999.0 idx = d3 > 0 weights[idx] = np.abs(1 - data[idx] ** 2) return weights + @njit -@cc.export('robust_weights', 'f8[:](f8[:], f8)') +@cc.export("robust_weights", "f8[:](f8[:], f8)") def robust_weights(residuals, max_eps): """Compute robust weights using residuals. @@ -137,8 +145,9 @@ def robust_weights(residuals, max_eps): return weights + @njit -@cc.export('compute_loess', 'f8(f8[:], f8[:], i8[:], i4, optional(f8[:]))') +@cc.export("compute_loess", "f8(f8[:], f8[:], i8[:], i4, optional(f8[:]))") def compute_loess(x, y, neighbors_idx, idx, r_weights=None): """Computes the loess smooth for the specified point x[i]. If robust weights are specified the computed weights are adjusted by the robust weights. @@ -178,24 +187,24 @@ def compute_loess(x, y, neighbors_idx, idx, r_weights=None): weights = weights * r_weights[neighbors_idx] weighted_x_matrix = np.vstack((np.ones(distances.shape), distances)) - weighted_x_matrix = np.vstack((weighted_x_matrix, np.expand_dims(distances * distances, axis=0))) - weighted_x_matrix = weights.repeat(weighted_x_matrix.shape[0]).reshape(-1, 3).T * weighted_x_matrix + weighted_x_matrix = np.vstack( + (weighted_x_matrix, np.expand_dims(distances * distances, axis=0)) + ) + weighted_x_matrix = ( + weights.repeat(weighted_x_matrix.shape[0]).reshape(-1, 3).T + * weighted_x_matrix + ) neighbors_y = weights * neighbors_y # Solve using least squares - # try: - # mask = ~np.isnan(weighted_x_matrix.T) & ~np.isnan(neighbors_y.T) - # smoothed_values, _, _, _ = np.linalg.lstsq(weighted_x_matrix.T[mask], - # neighbors_y.T[mask], rcond=None) - # except (IndexError, ValueError): - smoothed_values, _, _, _ = np.linalg.lstsq(weighted_x_matrix.T, - neighbors_y.T) + smoothed_values, _, _, _ = np.linalg.lstsq(weighted_x_matrix.T, neighbors_y.T) smoothed_value = smoothed_values[0] else: smoothed_value = np.nan return smoothed_value -@cc.export('rloess', 'f8[:](f8[::1], f8[::1], i4)') + +@cc.export("rloess", "f8[:](f8[::1], f8[::1], i4)") def rloess(x, y, span): """This function computes a robust loess smooth using a quadratic model as defined by W.S.Cleveland, (1979) "Robust Locally Weighted Regression and Smoothing Scatterplots", @@ -237,9 +246,9 @@ def rloess(x, y, span): # if x[i] and x[i-1] are equal just use previous fit if the_diffs[n] == 0: - smoothed_values[n] = smoothed_values[n-1] - lower_bound[n] = int(lower_bound[n-1]) - upper_bound[n] = int(upper_bound[n-1]) + smoothed_values[n] = smoothed_values[n - 1] + lower_bound[n] = int(lower_bound[n - 1]) + upper_bound[n] = int(upper_bound[n - 1]) else: @@ -265,20 +274,29 @@ def rloess(x, y, span): # Find new value for each point for n in range(n_points): - if n > 0 and x[n] == x[n-1]: - smoothed_values[n] = smoothed_values[n-1] + if n > 0 and x[n] == x[n - 1]: + smoothed_values[n] = smoothed_values[n - 1] else: if not np.isnan(smoothed_values[n]): - neighbors_idx = np.array(list(range(lower_bound[n], upper_bound[n] + 1))) + neighbors_idx = np.array( + list(range(lower_bound[n], upper_bound[n] + 1)) + ) if any_nans: - neighbors_idx = neighbors_idx[np.logical_not(y_nan[neighbors_idx])] + neighbors_idx = neighbors_idx[ + np.logical_not(y_nan[neighbors_idx]) + ] if np.any(r_weights[neighbors_idx] <= 0): - neighbors_idx = nearest_neighbors(span, n, x, (r_weights > 0)) + neighbors_idx = nearest_neighbors( + span, n, x, (r_weights > 0) + ) - smoothed_values[n] = compute_loess(x, y, neighbors_idx, n, r_weights) + smoothed_values[n] = compute_loess( + x, y, neighbors_idx, n, r_weights + ) return smoothed_values -if __name__ == '__main__': - cc.compile() \ No newline at end of file + +if __name__ == "__main__": + cc.compile() diff --git a/MiscLibs/run_iqr.py b/MiscLibs/run_iqr.py index 72a20db..d84ed28 100644 --- a/MiscLibs/run_iqr.py +++ b/MiscLibs/run_iqr.py @@ -2,9 +2,10 @@ import numpy as np from numba.pycc import CC from numba import njit -cc = CC('run_iqr') +cc = CC("run_iqr") -@cc.export('run_iqr', 'f8[:](i4, f8[::1])') + +@cc.export("run_iqr", "f8[:](i4, f8[::1])") def run_iqr(half_width, data): """Computes a running Innerquartile Range The routine accepts a column vector as input. "halfWidth" number of data @@ -18,7 +19,8 @@ def run_iqr(half_width, data): Parameters ---------- half_width: int - Number of ensembles before and after current ensemble which are used to compute the IQR + Number of ensembles before and after current ensemble which are used + to compute the IQR data: np.array(float) Data for which the IQR is computed """ @@ -35,26 +37,29 @@ def run_iqr(half_width, data): # Sample selection for 1st point if n == 0: - sample = data[1:1 + half_width] + sample = data[1 : 1 + half_width] # Sample selection a end of data set elif n + half_width > npts: - sample = np.hstack((data[n - half_width - 1:n - 1], data[n:npts])) + sample = np.hstack((data[n - half_width - 1 : n - 1], data[n:npts])) # Sample selection at beginning of data set elif half_width >= n + 1: - sample = np.hstack((data[0:n], data[n + 1:n + half_width + 1])) + sample = np.hstack((data[0:n], data[n + 1 : n + half_width + 1])) # Sample selection in body of data set else: - sample = np.hstack((data[n - half_width:n], data[n + 1:n + half_width + 1])) + sample = np.hstack( + (data[n - half_width : n], data[n + 1 : n + half_width + 1]) + ) iqr_array.append(iqr(sample)) return np.array(iqr_array) + @njit -@cc.export('iqr', 'f8(f8[::1])') +@cc.export("iqr", "f8(f8[::1])") def iqr(data_1d): """This function computes the iqr consistent with Matlab @@ -83,8 +88,9 @@ def iqr(data_1d): return sp_iqr + @njit -@cc.export('compute_quantile', 'f8(f8[::1], f8)') +@cc.export("compute_quantile", "f8(f8[::1], f8)") def compute_quantile(data_1d, q): sorted_data = np.sort(data_1d) @@ -93,10 +99,13 @@ def compute_quantile(data_1d, q): x1 = int(np.floor(sample_idx)) x2 = int(np.ceil(sample_idx)) if x1 != x2: - result = (sample_idx - x1) * (sorted_data[x2] - sorted_data[x1]) + sorted_data[x1] + result = (sample_idx - x1) * (sorted_data[x2] - sorted_data[x1]) + sorted_data[ + x1 + ] else: result = sorted_data[x1] return result -if __name__ is '__main__': - cc.compile() \ No newline at end of file + +if __name__ == "__main__": + cc.compile() diff --git a/UI/main.py b/UI/main.py index a755eb0..0014c2e 100644 --- a/UI/main.py +++ b/UI/main.py @@ -91,7 +91,7 @@ class ApplicationWindow(QMainWindow, Ui_MainWindow): self.tableResume.resizeColumnToContents(0) # self.__iter = 0 - self.QRame_version = 'QRame 0.1' + self.QRame_version = 'QRame 1.0' self.setWindowTitle(self.QRame_version) self.setWindowIcon(QtGui.QIcon('QRame.ico')) @@ -1001,7 +1001,10 @@ class ApplicationWindow(QMainWindow, Ui_MainWindow): update_settings = 'nav_ref' gps = True for transect_idx in meas.checked_transect_idx: - if meas.transects[transect_idx].boat_vel.gga_vel is None: + if meas.transects[transect_idx].boat_vel.gga_vel is None or np.all( + meas.transects[transect_idx].gps.diff_qual_ens[ + ~np.isnan(meas.transects[transect_idx].gps.diff_qual_ens) + ] < 2): gps = False break if gps: -- GitLab