diff --git a/Classes/BoatData.py b/Classes/BoatData.py
new file mode 100644
index 0000000000000000000000000000000000000000..b748f176eb8c844fe56b0c1f9d047ba169070825
--- /dev/null
+++ b/Classes/BoatData.py
@@ -0,0 +1,1767 @@
+import copy
+import numpy as np
+from numpy.matlib import repmat
+from MiscLibs.common_functions import cosd, sind, cart2pol, iqr, pol2cart, nan_less_equal, \
+    nan_greater_equal, nan_greater, nan_less
+from MiscLibs.robust_loess import rloess
+
+
+class BoatData(object):
+    """Class to process and store boat velocity data.
+
+    Attributes
+    ----------
+
+    Original data provided to the class
+        raw_vel_mps: np.array
+            Contains the raw unfiltered velocity data in m/s.
+            First index is 1-4 are beams 1,2,3,3 if if beam or u,v,w,d if otherwise.
+        frequency_khz: np.array or float
+            Defines ADCP frequency used for velocity Measurement.
+        orig_coord_sys: str
+            Defines the original raw data velocity Coordinate, "Beam", "Inst", "Ship", "Earth".
+        nav_ref: str
+            Defines the original raw data navigation reference, "None", "BT", "GGA" "VTG".
+        corr: np.array
+            Correlation values for bottom track
+        rssi: np.array
+            Returned signal strength for bottom track
+
+    Coordinate transformed data
+        coord_sys: str
+            Defines the current coordinate system "Beam", "Inst", "Ship", "Earth" used to compute u, v, w, and d.
+        u_mps: np.array(float)
+            Horizontal velocity in x-direction, in m/s.
+        v_mps: np.array(float)
+            Horizontal velocity in y-direction, in m/s.
+        w_mps: np.array(float)
+            Vertical velocity (+ up), m/s.
+        d_mps: np.array(float)
+            Difference in vertical velocities compute from opposing beam pairs in m/s.
+        num_invalid: float
+            Number of ensembles with invalid velocity data.
+        bottom_mode: str
+            BT mode for TRDI, 'Variable' for SonTek.
+
+    Processed data
+        u_processed_mps: np.array(float)
+            Horizontal velocity in x-direction filtered and interpolated.
+        v_processed_mps: np.array(float)
+            Horizontal velocity in y-direction filtered and interpolated.
+        processed_source: np.array(object)
+            Source of velocity: BT, VTG, GGA, INT.
+
+    Settings variables
+        d_filter: str
+            Difference velocity filter "Manual", "Off", "Auto".
+        d_filter_thresholds: float
+            Threshold for difference velocity filter.
+        w_filter: str
+            Vertical velocity filter "Manual", "Off", "Auto".
+        w_filter_threshold: float
+            Threshold for vertical velocity filter.
+        gps_diff_qual_filter: integer
+            Differential correction quality (1,2,4).
+        gps_altitude_filter: str
+            Change in altitude filter "Auto", "Manual", "Off".
+        gps_altitude_filter_change: float
+            Threshold from mean for altitude filter.
+        gps_HDOP_filter: str
+            HDOP filter "Auto", "Manual", "Off".
+        gps_HDOP_filter_max: float
+            Max acceptable value for HDOP.
+        gps_HDOP_filter_change: float
+            Maximum change allowed from mean.
+        smooth_filter: str
+            Setting to use filter based on smoothing function ("On", "Off")
+        smooth_speed: np.array(float)
+            Smoothed boat speed.
+        smooth_upper_limit: np.array(float)
+            Smooth function upper limit of window.
+        smooth_lower_limit: np.array(float)
+            Smooth function lower limit of window.
+        interpolate: str
+            Type of interpolation: "None", "Linear", "Smooth" etc.
+        beam_filter: integer
+            Minimum number of beams for valid data, 3 for 3-beam solutions, 4 for 4-beam.
+        valid_data: np.array(bool)
+            Logical array of identifying valid and invalid data for each filter applied.
+                Row 1 [0] - composite
+                Row 2 [1] - original
+                Row 3 [2] - d_filter of diff_qual
+                Row 4 [3] - w_filter or altitude
+                Row 5 [4] - smooth_filter
+                Row 6 [5] - beam_filter or HDOP
+
+        d_meas_thresholds: dict
+            Dictionary of difference velocity thresholds computed using the whole measurement
+        w_meas_thresholds: dict
+            Dictionary of vertical velocity thresholds computed using the whole measurement
+        use_measurement_thresholds: bool
+            Indicates if the measurement based thresholds should be used
+    """
+
+    def __init__(self):
+        """Initialize instance variables."""
+
+        # Variables passed to the constructor
+        self.raw_vel_mps = None  # contains the raw unfiltered velocity data in m/s.
+        self.frequency_khz = None  # Defines ADCP frequency used for velocity Measurement
+        self.orig_coord_sys = None  # Defines the original raw data velocity Coordinate
+        self.nav_ref = None  # Defines the original raw data navigation reference
+        self.corr = np.array([])
+        self.rssi = np.array([])
+
+        # Coordinate transformed data
+        self.coord_sys = None  # Defines the current coordinate system "Beam", "Inst", "Ship", "Earth"
+        self.u_mps = None  # Horizontal velocity in x-direction, in m/s
+        self.v_mps = None  # Horizontal velocity in y-direction, in m/s
+        self.w_mps = None  # Vertical velocity (+ up), m/s
+        self.d_mps = None  # Difference in vertical velocities compute from opposing beam pairs in m/s
+        self.num_invalid = None  # Number of ensembles with invalid velocity data
+        self.bottom_mode = None  # BT mode for TRDI, 'Variable' for SonTek
+
+        # Processed data
+        self.u_processed_mps = None  # Horizontal velocity in x-direction filtered and interpolated
+        self.v_processed_mps = None  # Horizontal velocity in y-direction filtered and interpolated
+        self.processed_source = None  # Source of data, BT, GGA, VTG, INT
+
+        # Filter and interpolation properties
+        self.d_filter = None  # Difference velocity filter "Manual", "Off", "Auto"
+        self.d_filter_thresholds = {}  # Threshold for difference velocity filter
+        self.w_filter = None  # Vertical velocity filter "On", "Off"
+        self.w_filter_thresholds = {}  # Threshold for vertical velocity filter
+        self.gps_diff_qual_filter = None  # Differential correction quality (1,2,4)
+        self.gps_altitude_filter = None  # Change in altitude filter "Auto", "Manual", "Off"
+        self.gps_altitude_filter_change = None  # Threshold from mean for altitude filter
+        self.gps_HDOP_filter = None  # HDOP filter "Auto", "Manual", "Off"
+        self.gps_HDOP_filter_max = None  # Max acceptable value for HDOP
+        self.gps_HDOP_filter_change = None  # Maximum change allowed from mean
+        self.smooth_filter = None  # Filter based on smoothing function
+        self.smooth_speed = None  # Smoothed boat speed
+        self.smooth_upper_limit = None  # Smooth function upper limit of window
+        self.smooth_lower_limit = None  # Smooth function lower limit of window
+        self.interpolate = None  # Type of interpolation: "None", "Linear", "Smooth" etc.
+        self.beam_filter = None  # 3 for 3-beam solutions, 4 for 4-beam SolutionStackDescription
+        self.valid_data = None  # Logical array of identifying valid and invalid data for each filter applied
+
+        # Filter settings populated from Measurement.create_filter_composites
+        self.d_meas_thresholds = {}
+        self.w_meas_thresholds = {}
+
+        self.use_measurement_thresholds = False
+
+    def populate_data(self, source, vel_in, freq_in, coord_sys_in, nav_ref_in, beam_filter_in=3,
+                      bottom_mode_in='Variable', corr_in=None, rssi_in=None):
+        """Assigns data to instance variables.
+
+        Parameters
+        ----------
+        source: str
+            Manufacturer (TRDI, SonTek)
+        vel_in: np.array(float)
+            Boat velocity array
+        freq_in: np.array(float)
+            Acoustic frequency boat velocity
+        coord_sys_in: str
+            Coordinate system of boat velocity
+        nav_ref_in: str
+            Source of boat velocity (BT, GGA, VTG)
+        beam_filter_in: int
+            Minimum number of valid beams for valid data.
+        bottom_mode_in: str
+            Bottom mode for TRDI ADCP
+        corr: np.array
+            Correlation values for bottom track
+        rssi: np.array
+            Returned signal strength for bottom track
+        """
+
+        # Identify invalid ensembles for SonTek data.
+        if source == 'SonTek':
+            vel_in = BoatData.filter_sontek(vel_in)
+
+        # Store input data
+        self.raw_vel_mps = vel_in
+        self.frequency_khz = freq_in
+        self.coord_sys = coord_sys_in
+        self.orig_coord_sys = coord_sys_in
+        self.nav_ref = nav_ref_in
+        self.beam_filter = beam_filter_in
+        self.bottom_mode = bottom_mode_in
+        if corr_in is not None:
+            self.corr = corr_in
+        if rssi_in is not None:
+            self.rssi = rssi_in
+
+        if nav_ref_in == 'BT':
+
+            # Boat velocities are referenced to ADCP not the streambed and thus must be reversed
+            self.u_mps = np.copy(-1 * vel_in[0, :])
+            self.v_mps = np.copy(-1 * vel_in[1, :])
+            self.w_mps = np.copy(vel_in[2, :])
+            self.d_mps = np.copy(vel_in[3, :])
+
+            # Default filtering applied during initial construction of object
+            self.d_filter = 'Off'
+            self.d_filter_thresholds = {}
+            self.w_filter = 'Off'
+            self.w_filter_thresholds = {}
+            self.smooth_filter = 'Off'
+            self.interpolate = 'None'
+
+        else:
+
+            # GPS referenced boat velocity
+            self.u_mps = np.copy(vel_in[0, :])
+            self.v_mps = np.copy(vel_in[1, :])
+            self.w_mps = np.nan
+            self.d_mps = np.nan
+
+            # Default filtering
+            self.gps_diff_qual_filter = 2
+            self.gps_altitude_filter = 'Off'
+            self.gps_altitude_filter_change = 3
+            self.gps_HDOP_filter = 'Off'
+            self.gps_HDOP_filter_max = 2.5
+            self.gps_HDOP_filter_change = 1
+            self.smooth_filter = 'Off'
+            self.interpolate = 'None'
+
+        # Assign data to processed property
+        self.u_processed_mps = np.copy(self.u_mps)
+        self.v_processed_mps = np.copy(self.v_mps)
+
+        # Preallocate arrays
+        n_ensembles = vel_in.shape[1]
+        self.valid_data = repmat([True], 6, n_ensembles)
+        self.smooth_speed = np.nan
+        self.smooth_upper_limit = np.nan
+        self.smooth_lower_limit = np.nan
+
+        # Determine number of raw invalid
+        # --------------------------------
+        # Find invalid raw data
+        valid_vel = np.tile([True], self.raw_vel_mps.shape)
+        valid_vel[np.isnan(self.raw_vel_mps)] = False
+
+        # Identify invalid ensembles
+        if nav_ref_in == 'BT':
+            self.valid_data[1, np.sum(valid_vel, 0) < 3] = False
+        else:
+            self.valid_data[1, np.sum(valid_vel, 0) < 2] = False
+
+        # Combine all filter data to composite valid data
+        self.valid_data[0, :] = np.all(self.valid_data[1:, :], 0)
+        self.num_invalid = np.sum(self.valid_data[0, :] == False)
+        self.processed_source = np.array([''] * self.u_mps.shape[0], dtype=object)
+        self.processed_source[np.where(self.valid_data[0, :] == True)] = nav_ref_in
+        self.processed_source[np.where(self.valid_data[0, :] == False)] = "INT"
+
+    def populate_from_qrev_mat(self, mat_data):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        mat_data: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+
+        # Variables passed to the constructor
+
+        if type(mat_data.frequency_hz) is np.ndarray:
+            self.frequency_khz = mat_data.frequency_hz
+        elif np.isnan(mat_data.frequency_hz):
+            self.frequency_khz = None
+        else:
+            self.frequency_khz = np.array([mat_data.frequency_hz])
+        self.orig_coord_sys = mat_data.origCoordSys
+        self.nav_ref = mat_data.navRef
+
+        # Data requiring manipulation if only 1 ensemble
+        if type(mat_data.u_mps) is float:
+            self.raw_vel_mps = mat_data.rawVel_mps.reshape(mat_data.rawVel_mps.shape[0], 1)
+            # Coordinate transformed data
+            self.coord_sys = np.array([mat_data.coordSys])
+            self.u_mps = np.array([mat_data.u_mps])
+            self.v_mps = np.array([mat_data.v_mps])
+            self.w_mps = np.array([mat_data.w_mps])
+            self.d_mps = np.array([mat_data.d_mps])
+            if hasattr(mat_data, 'corr'):
+                self.corr = mat_data.corr.reshape(mat_data.corr.shape[0], 1)
+            if hasattr(mat_data, 'rssi'):
+                self.rssi = mat_data.rssi.reshape(mat_data.rssi.shape[0], 1)
+
+            # self.bottom_mode = np.array([mat_data.bottomMode])
+
+            # Processed data
+            self.u_processed_mps = np.array([mat_data.uProcessed_mps])
+            self.v_processed_mps = np.array([mat_data.vProcessed_mps])
+            self.processed_source = np.array([mat_data.processedSource])
+            self.valid_data = np.array([ mat_data.validData]).astype(bool)
+            if self.valid_data.shape[1] > 1:
+                self.valid_data = self.valid_data.reshape(-1, 1)
+            self.smooth_speed = np.array([mat_data.smoothSpeed])
+            self.smooth_upper_limit = np.array([mat_data.smoothUpperLimit])
+            self.smooth_lower_limit = np.array([mat_data.smoothLowerLimit])
+        else:
+            self.raw_vel_mps = mat_data.rawVel_mps
+            # Coordinate transformed data
+            self.coord_sys = mat_data.coordSys
+            self.u_mps = mat_data.u_mps
+            self.v_mps = mat_data.v_mps
+            self.w_mps = mat_data.w_mps
+            self.d_mps = mat_data.d_mps
+
+            if hasattr(mat_data, 'corr'):
+                self.corr = mat_data.corr
+            if hasattr(mat_data, 'rssi'):
+                self.rssi = mat_data.rssi
+
+            # self.bottom_mode = mat_data.bottomMode
+
+            # Processed data
+            self.u_processed_mps = mat_data.uProcessed_mps
+            self.v_processed_mps = mat_data.vProcessed_mps
+            self.processed_source = mat_data.processedSource
+            self.valid_data = mat_data.validData.astype(bool)
+            self.smooth_speed = mat_data.smoothSpeed
+            self.smooth_upper_limit = mat_data.smoothUpperLimit
+            self.smooth_lower_limit = mat_data.smoothLowerLimit
+
+        self.bottom_mode = mat_data.bottomMode
+        self.num_invalid = mat_data.numInvalid
+        # Error velocity filter
+        if type(mat_data.dFilter) is np.ndarray:
+            self.d_filter = None
+        else:
+            self.d_filter = mat_data.dFilter
+
+        # Error velocity threshold
+        if type(mat_data.dFilterThreshold) is np.ndarray:
+            self.d_filter_thresholds = {}
+        else:
+            self.d_filter_thresholds = self.struct_to_dict(mat_data.dFilterThreshold)
+
+        # Vertical velocity filter
+        if type(mat_data.wFilter) is np.ndarray:
+            self.w_filter = None
+        else:
+            self.w_filter = self.struct_to_dict(mat_data.wFilter)
+
+        # Vertical velocity threshold
+        if type(mat_data.wFilterThreshold) is np.ndarray:
+            self.w_filter_thresholds = {}
+        else:
+            self.w_filter_thresholds = self.struct_to_dict(mat_data.wFilterThreshold)
+
+        # GPS quality filter
+        if type(mat_data.gpsDiffQualFilter) is np.ndarray:
+            self.gps_diff_qual_filter = None
+        else:
+            self.gps_diff_qual_filter = mat_data.gpsDiffQualFilter
+
+        # GPS altitude filter
+        if type(mat_data.gpsAltitudeFilter) is np.ndarray:
+            self.gps_altitude_filter = None
+        else:
+            self.gps_altitude_filter = mat_data.gpsAltitudeFilter
+
+        # GPS altitude threshold
+        if type(mat_data.gpsAltitudeFilterChange) is np.ndarray:
+            self.gps_altitude_filter_change = None
+        else:
+            self.gps_altitude_filter_change = mat_data.gpsAltitudeFilterChange
+
+        # HDOP filter
+        if type(mat_data.gpsHDOPFilter) is np.ndarray:
+            self.gps_HDOP_filter = None
+        else:
+            self.gps_HDOP_filter = mat_data.gpsHDOPFilter
+
+        # HDOP max threshold
+        if type(mat_data.gpsHDOPFilterMax) is np.ndarray:
+            self.gps_HDOP_filter_max = None
+        else:
+            self.gps_HDOP_filter_max = mat_data.gpsHDOPFilterMax
+
+        # HDOP change threshold
+        if type(mat_data.gpsHDOPFilterChange) is np.ndarray:
+            self.gps_HDOP_filter_change = None
+        else:
+            self.gps_HDOP_filter_change = mat_data.gpsHDOPFilterChange
+
+        # Other filters
+        self.smooth_filter = mat_data.smoothFilter
+        self.interpolate = mat_data.interpolate
+        self.beam_filter = mat_data.beamFilter
+
+        # Use measurement for filter
+        if hasattr(mat_data, 'use_measurement_thresholds'):
+            self.use_measurement_thresholds = mat_data.use_measurement_thresholds
+            self.d_meas_thresholds = self.struct_to_dict(mat_data.d_meas_thresholds)
+            self.w_meas_thresholds = self.struct_to_dict(mat_data.w_meas_thresholds)
+        else:
+            self.use_measurement_thresholds = False
+            self.d_meas_thresholds = {}
+            self.w_meas_thresholds = {}
+
+    @staticmethod
+    def struct_to_dict(struct):
+        """If input is a mat structure it converts it into a dictionary.
+
+        Parameters
+        ----------
+        struct: mat.struct or other
+            Data to be converted
+
+        Returns
+        -------
+        result: dict or other
+            Result of conversion
+        """
+
+        try:
+            keys = struct._fieldnames
+            result = {}
+            for key in keys:
+                result[key] = struct.__dict__[key]
+        except AttributeError:
+            result = struct
+        return result
+
+    def change_coord_sys(self, new_coord_sys, sensors, adcp):
+        """This function allows the coordinate system to be changed.
+
+        Current implementation is only to allow change to a higher order coordinate system Beam - Inst - Ship - Earth
+
+        Parameters
+        ----------
+        new_coord_sys: str
+            New coordinate_sys (Beam, Inst, Ship, Earth)
+        sensors: Sensors
+            Object of Sensors
+        adcp: InstrumentData
+            Object of InstrumentData
+        """
+
+        # Remove any trailing spaces
+        if isinstance(self.orig_coord_sys, str):
+            o_coord_sys = self.orig_coord_sys.strip()
+        else:
+            o_coord_sys = self.orig_coord_sys.strip()
+
+        # Initialize variables
+        orig_sys = 0
+        new_sys = 0
+        temp_t = None
+
+        if self.orig_coord_sys.strip() != new_coord_sys.strip():
+            # Assign the transformation matrix and retrieve the sensor data
+            t_matrix = copy.deepcopy(adcp.t_matrix.matrix)
+            t_matrix_freq = copy.deepcopy(adcp.frequency_khz)
+            p = getattr(sensors.pitch_deg, sensors.pitch_deg.selected).data
+            r = getattr(sensors.roll_deg, sensors.roll_deg.selected).data
+            h = getattr(sensors.heading_deg, sensors.heading_deg.selected).data
+
+            # Modify the transformation matrix and heading, pitch, and roll values base on
+            # the original coordinate system so that only the needed values are used in
+            # computing the new coordinate system
+            if o_coord_sys == 'Beam':
+                orig_sys = 1
+            elif o_coord_sys == 'Inst':
+                orig_sys = 2
+                t_matrix[:] = np.eye(t_matrix.shape[0])
+            elif o_coord_sys == 'Ship':
+                orig_sys = 3
+                p = np.zeros(h.shape)
+                r = np.zeros(h.shape)
+                t_matrix[:] = np.eye(t_matrix.shape[0])
+            elif o_coord_sys == 'Earth':
+                orig_sys = 4
+
+            # Assign a value to the new coordinate system
+            if new_coord_sys == 'Beam':
+                new_sys = 1
+            elif new_coord_sys == 'Inst':
+                new_sys = 2
+            elif new_coord_sys == 'Ship':
+                new_sys = 3
+            elif new_coord_sys == 'Earth':
+                new_sys = 4
+
+            # Check to ensure the new coordinate system is a higher order than the original system
+            if new_sys - orig_sys > 0:
+
+                # Compute trig function for heaing, pitch and roll
+                ch = cosd(h)
+                sh = sind(h)
+                cp = cosd(p)
+                sp = sind(p)
+                cr = cosd(r)
+                sr = sind(r)
+
+                vel_changed = np.tile([np.nan], self.raw_vel_mps.shape)
+                n_ens = self.raw_vel_mps.shape[1]
+
+                for ii in range(n_ens):
+
+                    # Compute matrix for heading, pitch, and roll
+                    hpr_matrix = [[((ch[ii] * cr[ii]) + (sh[ii]*sp[ii]*sr[ii])),
+                                   (sh[ii] * cp[ii]),
+                                   ((ch[ii] * sr[ii]) - sh[ii]*sp[ii]*cr[ii])],
+                                  [(-1 * sh[ii] * cr[ii])+(ch[ii] * sp[ii] * sr[ii]),
+                                   ch[ii] * cp[ii],
+                                   (-1 * sh[ii] * sr[ii])-(ch[ii] * sp[ii] * cr[ii])],
+                                  [(-1.*cp[ii] * sr[ii]),
+                                   sp[ii],
+                                   cp[ii] * cr[ii]]]
+
+                    # Transform beam coordinates
+                    if o_coord_sys == 'Beam':
+
+                        # Determine frequency index for transformation matrix
+                        if len(t_matrix.shape) > 2:
+                            idx_freq = np.where(t_matrix_freq == self.frequency_khz[ii])
+                            t_mult = np.copy(t_matrix[idx_freq])
+                        else:
+                            t_mult = np.copy(t_matrix)
+
+                        # Get velocity data
+                        vel = np.copy(np.squeeze(self.raw_vel_mps[:, ii]))
+
+                        # Check for invalid beams
+                        idx_3_beam = np.where(np.isnan(vel))
+
+                        # 3-beam solution
+                        if len(idx_3_beam[0]) == 1:
+
+                            # Special processing for RiverRay
+                            if adcp.model == 'RiverRay':
+
+                                # Set beam pairing
+                                beam_pair_1a = 0
+                                beam_pair_1b = 1
+                                beam_pair_2a = 2
+                                beam_pair_2b = 3
+
+                                # Set speed of sound correction variables Note: Currently (2013-09-06)
+                                # WinRiver II does not use a variable correction and assumes the speed
+                                # of sound and the reference speed of sound are the same.
+                                # sos = sensors.speed_ofs_sound_mps.selected.data[ii]
+                                # sos_reference = 1536
+                                # sos_correction = np.sqrt(((2 * sos_reference) / sos) **2 -1)
+
+                                sos_correction = np.sqrt(3)
+
+                                # Reconfigure transformation matrix based on which beam is invalid
+
+                                # Beam 1 invalid
+                                if idx_3_beam[0][0] == beam_pair_1a:
+
+                                    # Double valid beam in invalid pair
+                                    t_mult[0:2, beam_pair_1b] *= 2
+
+                                    # Eliminate invalid pair from vertical velocity computations
+                                    t_mult[2, :] = [0, 0, 1/sos_correction, 1/sos_correction]
+
+                                    # Reconstruct beam velocity matrix to use only valid beams
+                                    t_mult = t_mult[0:3, [beam_pair_1b, beam_pair_2a, beam_pair_2b]]
+
+                                    # Reconstruct beam velocity matrix to use only valid beams
+                                    vel = vel[[beam_pair_1b, beam_pair_2a, beam_pair_2b]]
+
+                                    # Apply transformation matrix
+                                    temp_t = t_mult.dot(vel)
+
+                                    # Correct horizontal velocity for invalid pair with the vertical velocity
+                                    # and speed of sound correction
+                                    temp_t[0] = temp_t[0] + temp_t[2] * sos_correction
+
+                                # Beam 2 invalid
+                                if idx_3_beam[0][0] == beam_pair_1b:
+
+                                    # Double valid beam in invalid pair
+                                    t_mult[0:2, beam_pair_1a] = t_mult[0:2, beam_pair_1a] * 2
+
+                                    # Eliminate invalid pair from vertical velocity computations
+                                    t_mult[2, :] = [0, 0, 1/sos_correction, 1/sos_correction]
+
+                                    # Reconstruct transformation matrix as a 3x3 matrix
+                                    t_mult = t_mult[0:3, [beam_pair_1a, beam_pair_2a, beam_pair_2b]]
+
+                                    # Reconstruct beam velocity matrix to use only valid beams
+                                    vel = vel[[beam_pair_1a, beam_pair_2a, beam_pair_2b]]
+
+                                    # Apply transformation matrix
+                                    temp_t = t_mult.dot(vel)
+
+                                    # Correct horizontal velocity for invalid pair with the vertical
+                                    # velocity and speed of sound correction
+                                    temp_t[0] = temp_t[0] - temp_t[2] * sos_correction
+
+                                # Beam 3 invalid
+                                if idx_3_beam[0][0] == beam_pair_2a:
+
+                                    # Double valid beam in invalid pair
+                                    t_mult[0:2, beam_pair_2b] = t_mult[:2, beam_pair_2b] * 2
+
+                                    # Eliminate invalid pair from vertical velocity computations
+                                    t_mult[2, :] = [1/sos_correction, 1/sos_correction, 0, 0]
+
+                                    # Reconstruct transformation matrix as a 3x3 matrid
+                                    t_mult = t_mult[:3, [beam_pair_1a, beam_pair_1b, beam_pair_2b]]
+
+                                    # Reconstruct beam velocity matrix to use only valid beams
+                                    vel = vel[[beam_pair_1a, beam_pair_1b, beam_pair_2b]]
+
+                                    # Apply transformation matrix
+                                    temp_t = t_mult.dot(vel)
+
+                                    # Correct horizontal velocity for invalid pair with the vertical
+                                    # velocity and speed of sound correction
+                                    temp_t[1] = temp_t[1] - temp_t[2] * sos_correction
+
+                                # Beam 4 invalid
+                                if idx_3_beam[0][0] == beam_pair_2b:
+
+                                    # Double valid beam in invalid pair
+                                    t_mult[:2, beam_pair_2a] *= 2
+
+                                    # Eliminate invalid pair from vertical velocity computations
+                                    t_mult[2, :] = [1/sos_correction, 1/sos_correction, 0, 0]
+
+                                    # Reconstruct transformations matrix as a 3x3 matrix
+                                    t_mult = t_mult[:3, [beam_pair_1a, beam_pair_1b, beam_pair_2a]]
+
+                                    # Reconstruct beam velocity matrix to use only valid beams
+                                    vel = vel[[beam_pair_1a, beam_pair_1b, beam_pair_2a]]
+
+                                    # Apply transformation matrix
+                                    temp_t = t_mult.dot(vel)
+
+                                    # Correct horizontal velocity for invalid pair with the vertical
+                                    # velocity and speed of sound correction
+                                    temp_t[1] = temp_t[1] + temp_t[2] * sos_correction
+
+                            else:
+
+                                # 3 Beam solution for non-RiverRay
+                                vel_3_beam_zero = vel
+                                vel_3_beam_zero[np.isnan(vel)] = 0
+                                vel_error = np.matmul(t_mult[3, :], vel_3_beam_zero)
+                                vel[idx_3_beam] = -1 * vel_error / np.squeeze(t_mult[3, idx_3_beam])
+                                temp_t = t_mult.dot(vel)
+
+                            # Apply transformation matrix for 3 beam solutions
+                            temp_thpr = np.array(hpr_matrix).dot(temp_t[:3])
+                            temp_thpr = np.hstack([temp_thpr, np.nan])
+
+                        else:
+
+                            # Apply transformation matrix for 4 beam solutions
+                            temp_t = t_mult.dot(np.squeeze(self.raw_vel_mps[:, ii]))
+
+                            # Apply hpr_matrix
+                            temp_thpr = np.array(hpr_matrix).dot(temp_t[:3])
+                            temp_thpr = np.hstack([temp_thpr, temp_t[3]])
+
+                    else:
+
+                        # Get velocity data
+                        vel = np.copy(np.squeeze(self.raw_vel_mps[:, ii]))
+
+                        # Apply heading pitch roll for inst and ship coordinate data
+                        temp_thpr = np.array(hpr_matrix).dot(vel[:3])
+                        temp_thpr = np.hstack([temp_thpr, vel[3]])
+
+                    vel_changed[:, ii] = temp_thpr.T
+
+                # Assign results to object
+                self.u_mps = -1 * vel_changed[0, :]
+                self.v_mps = -1 * vel_changed[1, :]
+                self.w_mps = vel_changed[2, :]
+                self.d_mps = vel_changed[3, :]
+                self.coord_sys = new_coord_sys
+                self.u_processed_mps = np.copy(self.u_mps)
+                self.v_processed_mps = np.copy(self.v_mps)
+
+    def change_heading(self, heading_change):
+        """Rotates the boat velocities for a change in heading due to a change in
+        magnetic variation, heading offset, or heading source.
+
+        Parameters
+        ----------
+        heading_change: float
+            Change in the magnetic variation in degrees
+        """
+
+        # Apply change to processed data
+        direction, mag = cart2pol(self.u_processed_mps, self.v_processed_mps)
+        self.u_processed_mps, self.v_processed_mps = pol2cart(direction - np.deg2rad(heading_change), mag)
+
+        # Apply change to unprocessed data
+        direction, mag = cart2pol(self.u_mps, self.v_mps)
+        self.u_mps, self.v_mps = pol2cart(direction - np.deg2rad(heading_change), mag)
+
+    def apply_interpolation(self, transect, interpolation_method=None):
+        """Function to apply interpolations to navigation data.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        interpolation_method: str
+            Specified interpolation method if different from that in self
+        """
+
+        # Reset processed data
+        if self.u_mps is not None:
+            self.u_processed_mps = np.copy(self.u_mps)
+            self.v_processed_mps = np.copy(self.v_mps)
+            self.u_processed_mps[self.valid_data[0, :] == False] = np.nan
+            self.v_processed_mps[self.valid_data[0, :] == False] = np.nan
+
+            # Determine interpolation methods to apply
+            if interpolation_method is None:
+                interpolation_method = self.interpolate
+            else:
+                self.interpolate = interpolation_method
+
+            # Apply specified interpolation method
+
+            if interpolation_method == 'None':
+                # Sets invalid data to nan with no interpolation
+                self.interpolate_none()
+
+            elif interpolation_method == 'ExpandedT':
+                # Set interpolate to none as the interpolation done is in the QComp
+                self.interpolate_next()
+
+            elif interpolation_method == 'Hold9':
+                # Interpolates using SonTek method of holding last valid for up to 9 samples
+                self.interpolate_hold_9()
+
+            elif interpolation_method == 'HoldLast':
+                # Interpolates by holding last valid indefinitely
+                self.interpolate_hold_last()
+
+            elif interpolation_method == 'Linear':
+                # Interpolates using linear interpolation
+                self.interpolate_linear(transect)
+
+            elif interpolation_method == 'Smooth':
+                # Interpolates using smooth interpolation
+                self.interpolate_smooth(transect)
+
+            elif interpolation_method == 'TRDI':
+                # TRDI interpolation is done in discharge.
+                # For TRDI the interpolation is done on discharge not on velocities
+                self.interpolate_none()
+
+    def apply_composite(self, u_composite, v_composite, composite_source):
+        """Stores composite velocities and sources.
+
+        Parameters
+        ----------
+        u_composite: np.array(float)
+            Composite u-velocity component, in m/s
+        v_composite: np.array(float)
+            Composite v-velocity component, in m/s
+        composite_source: str
+            Reference used for each ensemble velocity.
+        """
+
+        self.u_processed_mps = u_composite
+        self.v_processed_mps = v_composite
+        self.processed_source[composite_source == 1] = 'BT'
+        self.processed_source[composite_source == 2] = 'GGA'
+        self.processed_source[composite_source == 3] = 'VTG'
+        self.processed_source[composite_source == 0] = 'INT'
+        self.processed_source[composite_source == -1] = 'INV'
+
+    def sos_correction(self, ratio):
+        """Correct boat velocity for a change in speed of sound.
+
+        Parameters
+        ----------
+        ratio: float
+            Ratio of new and old speed of sound
+        """
+
+        # Correct velocities
+        self.u_mps = self.u_mps * ratio
+        self.v_mps = self.v_mps * ratio
+        self.w_mps = self.w_mps * ratio
+
+    def interpolate_hold_9(self):
+        """This function applies Sontek's approach to maintaining the last valid boat speed for up to 9 invalid samples.
+        """
+
+        # Initialize variables
+        n_ensembles = self.u_mps.shape[0]
+
+        # Get data from object
+        self.u_processed_mps = np.copy(self.u_mps)
+        self.v_processed_mps = np.copy(self.v_mps)
+        self.u_processed_mps[self.valid_data[0, :] == False] = np.nan
+        self.v_processed_mps[self.valid_data[0, :] == False] = np.nan
+
+        n_invalid = 0
+        # Process data by ensembles
+        for n in range(n_ensembles):
+            # Check if ensemble is invalid and number of consecutive invalids is less than 9
+            if self.valid_data[0, n] == False and n_invalid < 9:
+                self.u_processed_mps[n] = self.u_processed_mps[n - 1]
+                self.v_processed_mps[n] = self.v_processed_mps[n - 1]
+                n_invalid += 1
+            else:
+                n_invalid = 0
+
+    def interpolate_none(self):
+        """This function removes any interpolation from the data and sets filtered data to nan."""
+
+        # Reset processed data
+        self.u_processed_mps = np.copy(self.u_mps)
+        self.v_processed_mps = np.copy(self.v_mps)
+        self.u_processed_mps[self.valid_data[0, :] == False] = np.nan
+        self.v_processed_mps[self.valid_data[0, :] == False] = np.nan
+
+    def interpolate_hold_last(self):
+        """This function holds the last valid value until the next valid data point."""
+
+        if self.u_mps is not None:
+            # Initialize variables
+            n_ensembles = len(self.u_mps)
+
+            # Get data from object
+            self.u_processed_mps = np.copy(self.u_mps)
+            self.v_processed_mps = np.copy(self.v_mps)
+            self.u_processed_mps[self.valid_data[0, :] == False] = np.nan
+            self.v_processed_mps[self.valid_data[0, :] == False] = np.nan
+
+            n_invalid = 0
+            # Process data by ensembles
+            for n in range(1, n_ensembles):
+                # Check if ensemble is invalid and number of consecutive invalids is less than 9
+                if (self.valid_data[0, n] == False) and (n_invalid < 9):
+                    self.u_processed_mps[n] = self.u_processed_mps[n - 1]
+                    self.v_processed_mps[n] = self.v_processed_mps[n - 1]
+
+    def interpolate_next(self):
+        """This function uses the next valid data to back fill for invalid"""
+
+        # Get valid ensembles
+        valid_ens = self.valid_data[0, :]
+
+        # Process ensembles
+        n_ens = len(valid_ens)
+
+        for n in np.arange(0, n_ens-1)[::-1]:
+            if not valid_ens[n]:
+                self.u_processed_mps[n] = self.u_processed_mps[n+1]
+                self.v_processed_mps[n] = self.v_processed_mps[n+1]
+
+    def interpolate_smooth(self, transect):
+        """This function interpolates data flagged invalid using the smooth function.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        """
+
+        # Get data from object
+
+        u = np.copy(self.u_mps)
+        v = np.copy(self.v_mps)
+        u[self.valid_data[0, :] == False] = np.nan
+        v[self.valid_data[0, :] == False] = np.nan
+
+        # Compute ens_time
+        ens_time = np.nancumsum(transect.date_time.ens_duration_sec)
+
+        # Apply smooth to each component
+        u_smooth = rloess(ens_time, u, 10)
+        v_smooth = rloess(ens_time, v, 10)
+
+        # Save data in object
+        self.u_processed_mps = u
+        self.v_processed_mps = v
+        self.u_processed_mps[np.isnan(u)] = u_smooth[np.isnan(u)]
+        self.v_processed_mps[np.isnan(v)] = v_smooth[np.isnan(v)]
+
+    def interpolate_linear(self, transect):
+        """This function interpolates data flagged invalid using linear interpolation.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        """
+
+        u = np.copy(self.u_mps)
+        v = np.copy(self.v_mps)
+
+        valid = np.isnan(u) == False
+
+        # Check for valid data
+        if sum(valid) > 1 and sum(self.valid_data[0, :]) > 1:
+
+            # Compute ens_time
+            ens_time = np.nancumsum(transect.date_time.ens_duration_sec)
+
+            # Apply linear interpolation
+            self.u_processed_mps = np.interp(x=ens_time,
+                                             xp=ens_time[self.valid_data[0, :]],
+                                             fp=u[self.valid_data[0, :]],
+                                             left=np.nan,
+                                             right=np.nan)
+            # Apply linear interpolation
+            self.v_processed_mps = np.interp(x=ens_time,
+                                             xp=ens_time[self.valid_data[0, :]],
+                                             fp=v[self.valid_data[0, :]],
+                                             left=np.nan,
+                                             right=np.nan)
+
+    def interpolate_composite(self, transect):
+        """This function interpolates processed data flagged invalid using linear interpolation.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        """
+
+        u = np.copy(self.u_processed_mps)
+        v = np.copy(self.v_processed_mps)
+
+        valid = np.isnan(u) == False
+
+        # Check for valid data
+        if np.sum(valid) > 1:
+
+            # Compute ensTime
+            ens_time = np.nancumsum(transect.date_time.ens_duration_sec)
+
+            # Ensure monotonic input
+            diff_time = np.diff(ens_time[valid])
+            idx = np.where(diff_time == 0)[0]
+            mono_array = np.vstack([ens_time[valid], u[valid], v[valid]])
+            # Replace non-monotonic times with average values
+            for i in idx[::-1]:
+                mono_array[1, i] = np.nanmean(mono_array[1, i:i+2])
+                mono_array[2, i] = np.nanmean(mono_array[2, i:i + 2])
+                mono_array = np.delete(mono_array, i+1, 1)
+            # Apply linear interpolation
+            # Apply linear interpolation
+            self.u_processed_mps = np.interp(ens_time,
+                                             mono_array[0, :],
+                                             mono_array[1, :])
+            # Apply linear interpolation
+            self.v_processed_mps = np.interp(ens_time,
+                                             mono_array[0, :],
+                                             mono_array[2, :])
+
+    def apply_filter(self, transect, beam=None, difference=None, difference_threshold=None, vertical=None,
+                     vertical_threshold=None, other=None):
+        """Function to apply filters to navigation data.
+
+        More than one filter can be applied during a single call.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        beam: int
+            Setting for beam filter (3, 4, -1)
+        difference: str
+            Setting for difference velocity filter (Auto, Manual, Off)
+        difference_threshold: float
+            Threshold for manual setting
+        vertical: str
+            Setting for vertical velocity filter (Auto, Manual, Off)
+        vertical_threshold: float
+            Threshold for manual setting
+        other: bool
+            Setting to other filter
+        """
+
+        if len({beam, difference, difference_threshold, vertical, vertical_threshold, other}) > 1:
+
+            # Filter based on number of valid beams
+            if beam is not None:
+                self.filter_beam(setting=beam)
+
+            # Filter based on difference velocity
+            if difference is not None:
+                if difference == 'Manual':
+                    self.filter_diff_vel(setting=difference, threshold=difference_threshold)
+                else:
+                    self.filter_diff_vel(setting=difference)
+
+            # Filter based on vertical velocity
+            if vertical is not None:
+                if vertical == 'Manual':
+                    self.filter_vert_vel(setting=vertical, threshold=vertical_threshold)
+                else:
+                    self.filter_vert_vel(setting=vertical)
+
+            # Filter based on robust loess smooth
+            if other is not None:
+                self.filter_smooth(setting=other, transect=transect)
+
+        else:
+            self.filter_beam(setting=self.beam_filter)
+            self.filter_diff_vel(setting=self.d_filter, threshold=self.d_filter_thresholds)
+            self.filter_vert_vel(setting=self.w_filter, threshold=self.w_filter_thresholds)
+            self.filter_smooth(setting=self.smooth_filter, transect=transect)
+
+        # Apply previously specified interpolation method
+        self.apply_interpolation(transect)
+
+    def filter_beam(self, setting):
+        """Applies beam filter.
+
+        The determination of invalid data depends on the whether
+        3-beam or 4-beam solutions are acceptable. This function can be
+        applied by specifying 3 or 4 beam solutions are setting
+        obj.beamFilter to -1 which will trigger an automatic mode. The
+        automatic mode will find all 3 beam solutions and then compare
+        the velocity of the 3 beam solutions to nearest 4 beam solution
+        before and after the 3 beam solution. If the 3 beam solution is
+        within 50% of the average of the neighboring 3 beam solutions the
+        data are deemed valid if not invalid. Thus in automatic mode only
+        those data from 3 beam solutions that appear sufficiently
+        than the 4 beam solutions are marked invalid. The process happens
+        for each ensemble. If the number of beams is specified manually
+        it is applied uniformly for the whole transect.
+
+        Parameters
+        ----------
+        setting: int
+            Setting for beam filter (3, 4, -1)
+        """
+
+        self.beam_filter = setting
+
+        # In manual mode determine number of raw invalid and number of 3 beam solutions
+        # 3 beam solutions if selected
+        if self.beam_filter > 0:
+
+            # Find invalid raw data
+            valid_vel = np.ones(self.raw_vel_mps.shape)
+            valid_vel[np.isnan(self.raw_vel_mps)] = 0
+
+            # Determine how many beams transformed coordinates are valid
+            valid_vel_sum = np.sum(valid_vel, 0)
+            valid = np.ones(valid_vel_sum.shape)
+
+            # Compare number of valid beams or coordinates to filter value
+            valid[valid_vel_sum < self.beam_filter] = False
+
+            # Save logical of valid data to object
+            self.valid_data[5, :] = valid
+
+        else:
+
+            # Apply automatic filter
+            # ----------------------
+            # Find all 3 beam solutions
+            self.filter_beam(3)
+            beam_3_valid_data = copy.deepcopy(self.valid_data)
+            self.filter_beam(4)
+            valid_3_beams = np.logical_xor(beam_3_valid_data[5, :], self.valid_data[5, :])
+            n_ens = len(self.valid_data[5, :])
+            idx = np.where(valid_3_beams == True)[0]
+
+            # If 3 beam solutions exist evaluate there validity
+            if len(idx) > 0:
+
+                # Identify 3 beam solutions that appear to be invalid
+                n3_beam_ens = len(idx)
+
+                # Check each three beam solution for validity
+                for m in range(n3_beam_ens):
+
+                    # Use before and after values to check 3-beam solution
+                    # but make sure the ensemble is not the first or last.
+                    if (idx[m] > 1) and (idx[m] < n_ens):
+
+                        # Find nearest 4 beam solutions before and after
+                        # 3 beam solution
+                        ref_idx_before = np.where(self.valid_data[5, :idx[m]] == True)[0]
+                        if len(ref_idx_before) > 0:
+                            ref_idx_before = ref_idx_before[-1]
+                        else:
+                            ref_idx_before = None
+
+                        ref_idx_after = np.where(self.valid_data[5, idx[m]:] == True)[0]
+                        if len(ref_idx_after) > 0:
+                            ref_idx_after = idx[m] + ref_idx_after[0]
+                        else:
+                            ref_idx_after = None
+
+                        if (ref_idx_after is not None) and (ref_idx_before is not None):
+                            u_ratio = (self.u_mps[idx[m]]) / ((self.u_mps[ref_idx_before]
+                                                               + self.u_mps[ref_idx_after]) / 2.) - 1
+                            v_ratio = (self.v_mps[idx[m]]) / ((self.v_mps[ref_idx_before]
+                                                               + self.v_mps[ref_idx_after]) / 2.) - 1
+                        else:
+                            u_ratio = 1
+                            v_ratio = 1
+
+                        # If 3-beam differs from 4-beam by more than 50% mark it invalid
+                        if (np.abs(u_ratio) > 0.5) and (np.abs(v_ratio) > 0.5):
+                            self.valid_data[5, idx[m]] = False
+                        else:
+                            self.valid_data[5, idx[m]] = True
+
+            self.beam_filter = -1
+
+        # Combine all filter data to composite valid data
+        self.valid_data[0, :] = np.all(self.valid_data[1:, :], 0)
+        self.num_invalid = np.sum(self.valid_data[0, :] == False)
+
+    def filter_diff_vel(self, setting, threshold=None):
+        """Applies either manual or automatic filtering of the difference
+        (error) velocity.
+
+        The automatic mode is based on the following:
+        This filter is based on the assumption that the water error velocity
+        should follow a gaussian distribution. Therefore, 5 iqr
+        should encompass all of the valid data. The standard deviation and
+        limits (multiplier*standard deviation) are computed in an iterative
+        process until filtering out additional data does not change the computed
+        standard deviation.
+
+        Parameters
+        ----------
+        setting: str
+            Difference velocity setting (Off, Manual, Auto)
+        threshold: float
+            If manual, the user specified threshold
+        """
+
+        self.d_filter = setting
+        if setting == 'Manual':
+            self.d_filter_thresholds = threshold
+
+        # Apply selected method
+        if self.d_filter == 'Manual':
+            d_vel_max_ref = np.abs(self.d_filter_thresholds)
+            d_vel_min_ref = -1 * d_vel_max_ref
+            invalid_idx = np.where(np.logical_or(nan_greater(self.d_mps, d_vel_max_ref),
+                                                 nan_less(self.d_mps, d_vel_min_ref)))[0]
+        elif self.d_filter == 'Off':
+            invalid_idx = np.array([])
+
+        elif self.d_filter == 'Auto':
+            if self.use_measurement_thresholds:
+                freq_ensembles = self.frequency_khz.astype(int).astype(str)
+                invalid_idx = np.array([])
+                for freq in self.d_meas_thresholds.keys():
+                    filter_data = np.copy(self.d_mps)
+                    filter_data[freq_ensembles != freq] = np.nan
+                    idx = np.where(np.logical_or(np.greater(filter_data, self.d_meas_thresholds[freq][0]),
+                                                 np.less(filter_data, self.d_meas_thresholds[freq][1])))[0]
+                    if idx.size > 0:
+                        if invalid_idx.size > 0:
+                            invalid_idx = np.hstack((invalid_idx, idx))
+                        else:
+                            invalid_idx = idx
+            else:
+                freq_used = np.unique(self.frequency_khz).astype(int).astype(str)
+                freq_ensembles = self.frequency_khz.astype(int).astype(str)
+                self.d_filter_thresholds = {}
+                invalid_idx = np.array([])
+                for freq in freq_used:
+                    filter_data = np.copy(self.d_mps)
+                    filter_data[freq_ensembles != freq] = np.nan
+                    d_vel_max_ref, d_vel_min_ref = self.iqr_filter(filter_data)
+                    self.d_filter_thresholds[freq] = [d_vel_max_ref, d_vel_min_ref]
+                    idx = np.where(np.logical_or(nan_greater(filter_data, d_vel_max_ref),
+                                                 nan_less(filter_data, d_vel_min_ref)))[0]
+                    if idx.size > 0:
+                        if invalid_idx.size > 0:
+                            invalid_idx = np.hstack((invalid_idx, idx))
+                        else:
+                            invalid_idx = idx
+        else:
+            invalid_idx = np.array([])
+
+        # Set valid data row 3 for difference velocity filter results
+        self.valid_data[2, :] = True
+        if invalid_idx.size > 0:
+            self.valid_data[2, invalid_idx] = False
+
+        # Combine all filter data to composite filter data
+        self.valid_data[0, :] = np.all(self.valid_data[1:, :], 0)
+        self.num_invalid = np.sum(self.valid_data[0, :] == False)
+
+    def filter_vert_vel(self, setting, threshold=None):
+        """Applies either manual or automatic filtering of the vertical
+        velocity.  Uses same assumptions as difference filter.
+
+        Parameters
+        ----------
+        setting: str
+            Filter setting (Off, Manual, Auto)
+        threshold: float
+            If setting is manual, the user specified threshold
+        """
+
+        # Set vertical velocity filter properties
+        self.w_filter = setting
+        if setting == 'Manual':
+            self.w_filter_thresholds = threshold
+
+        # Apply selected method
+        if self.w_filter == 'Manual':
+            w_vel_max_ref = np.abs(self.w_filter_thresholds)
+            w_vel_min_ref = -1 * w_vel_max_ref
+            invalid_idx = np.where(np.logical_or(nan_greater(self.w_mps, w_vel_max_ref),
+                                                 nan_less(self.w_mps, w_vel_min_ref)))[0]
+
+        elif self.w_filter == 'Off':
+            invalid_idx = np.array([])
+
+        elif self.w_filter == 'Auto':
+            if self.use_measurement_thresholds:
+                freq_ensembles = self.frequency_khz.astype(int).astype(str)
+                invalid_idx = np.array([])
+                for freq in self.w_meas_thresholds.keys():
+                    filter_data = np.copy(self.w_mps.astype(float))
+                    filter_data[freq_ensembles != freq] = np.nan
+                    idx = np.where(np.logical_or(np.greater(filter_data, self.w_meas_thresholds[freq][0]),
+                                                 np.less(filter_data, self.w_meas_thresholds[freq][1])))[0]
+                    if idx.size > 0:
+                        if invalid_idx.size > 0:
+                            invalid_idx = np.hstack((invalid_idx, idx))
+                        else:
+                            invalid_idx = idx
+            else:
+                freq_used = np.unique(self.frequency_khz).astype(int).astype(str)
+                freq_ensembles = self.frequency_khz.astype(int).astype(str)
+                self.w_filter_thresholds = {}
+                invalid_idx = np.array([])
+                for freq in freq_used:
+                    filter_data = np.copy(self.w_mps)
+                    filter_data[freq_ensembles != freq] = np.nan
+                    w_vel_max_ref, w_vel_min_ref = self.iqr_filter(filter_data)
+                    self.w_filter_thresholds[freq] = [w_vel_max_ref, w_vel_min_ref]
+                    idx = np.where(np.logical_or(nan_greater(filter_data, w_vel_max_ref),
+                                                 nan_less(filter_data, w_vel_min_ref)))[0]
+                    if idx.size > 0:
+                        if invalid_idx.size > 0:
+                            invalid_idx = np.hstack((invalid_idx, idx))
+                        else:
+                            invalid_idx = idx
+        else:
+            invalid_idx = np.array([])
+
+        # Set valid data row 3 for difference velocity filter results
+        self.valid_data[3, :] = True
+        if invalid_idx.size > 0:
+            self.valid_data[3, invalid_idx] = False
+
+        # Combine all filter data to composite valid data
+        self.valid_data[0, :] = np.all(self.valid_data[1:, :], 0)
+        self.num_invalid = np.sum(self.valid_data[0, :] == False)
+
+    @staticmethod
+    def iqr_filter(data, multiplier=5,  minimum_window = 0.01):
+        """Apply the iqr filter to bt data.
+
+        Parameters
+        ----------
+        data: np.ndarray(float)
+            Array of difference or vertical velocity data
+        multiplier: int
+            Number of IQR's to use to set the threshold
+        minimum_window: float
+            Minimum allowable threshold
+
+        Returns
+        -------
+        data_max_ref: float
+            Maximum threshold
+        data_min_ref: float
+            Minimum threshold
+        """
+        data_max_ref = np.nan
+        data_min_ref = np.nan
+
+        # Check to make sure there are data to process
+        if data.size > 0 and np.any(np.logical_not(np.isnan(data))):
+            # Initialize loop controllers
+            k = 0
+            iqr_diff = 1
+
+
+            # Loop until no additional data are removed
+            while iqr_diff != 0 and k < 1000:
+                k += 1
+
+                # Compute inner quartile range
+                data_iqr = iqr(data)
+                threshold_window = multiplier * data_iqr
+                if threshold_window < minimum_window:
+                    threshold_window = minimum_window
+
+                # Compute maximum and minimum thresholds
+                data_max_ref = np.nanmedian(data) + threshold_window
+                data_min_ref = np.nanmedian(data) - threshold_window
+
+                # Identify valid and invalid data
+                data_less_idx = np.where(nan_less_equal(data, data_max_ref))[0]
+                data_greater_idx = np.where(nan_greater_equal(data, data_min_ref))[0]
+                data_good_idx = list(np.intersect1d(data_less_idx, data_greater_idx))
+
+                # Update filtered data array
+                data = copy.deepcopy(data[data_good_idx])
+
+                # Determine differences due to last filter iteration
+                if len(data) > 0:
+                    data_iqr2 = iqr(data)
+                    iqr_diff = data_iqr2 - data_iqr
+                else:
+                    iqr_diff = 0
+
+        return data_max_ref, data_min_ref
+
+    def filter_smooth(self, transect, setting):
+        """This filter employs a running trimmed standard deviation filter to
+        identify and mark spikes in the boat speed.
+
+        First a robust Loess smooth is fitted to the boat speed time series and
+        residuals between the raw data and the smoothed line are computed. The
+        trimmed standard deviation is computed by selecting the number of residuals
+        specified by "halfwidth" before the target point and after the target point,
+        but not including the target point. These values are then sorted, and the points
+        with the highest and lowest values are removed from the subset, and the
+        standard deviation of the trimmed subset is computed. The filter
+        criteria are determined by multiplying the standard deviation by a user
+        specified multiplier. This criteria defines a maximum and minimum
+        acceptable residual. Data falling outside the criteria are set to nan.
+
+        Recommended filter setting are:
+        filterWidth=10;
+        halfWidth=10;
+        multiplier=9;
+
+        David S. Mueller, USGS, OSW
+        9/8/2005
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        setting: bool
+            Filter setting (True, False)
+        """
+
+        # Set property
+        self.smooth_filter = setting
+
+        # Compute ens_time
+        ens_time = np.nancumsum(transect.date_time.ens_duration_sec)
+        n_ensembles = len(ens_time)
+        # Determine if smooth filter should be applied
+        if self.smooth_filter == 'On':
+            # Initialize arrays
+            self.smooth_speed = repmat([np.nan], 1, n_ensembles)
+            self.smooth_upper_limit = repmat([np.nan], 1, n_ensembles)
+            self.smooth_lower_limit = repmat([np.nan], 1, n_ensembles)
+
+            # Boat velocity components
+            b_vele = np.copy(self.u_mps)
+            b_veln = np.copy(self.v_mps)
+
+            # Set filter parameters
+            filter_width = 10
+            half_width = 10
+            multiplier = 9
+            cycles = 3
+
+            # Initialize variables
+            bt_bad_idx = []
+            upper_limit = 0
+            lower_limit = 0
+
+            # Compute speed and direction of boat
+            direct, speed = cart2pol(b_vele, b_veln)
+
+            # Compute residuals from a robust Loess smooth
+            speed_smooth = rloess(ens_time, speed, filter_width)
+            speed_res = speed - speed_smooth
+
+            # Apply a trimmed standard deviation filter multiple times
+            for i in range(cycles):
+                filter_array = BoatData.run_std_trim(half_width, speed_res.T)
+
+                # Compute filter bounds
+                upper_limit = speed_smooth + multiplier * filter_array
+                lower_limit = speed_smooth - multiplier * filter_array
+
+                # Apply filter to residuals
+                bt_bad_idx = np.where(np.logical_or(np.greater(speed, upper_limit), np.less(speed, lower_limit)))[0]
+                speed_res[bt_bad_idx] = np.nan
+
+            # Update valid_data property
+            self.valid_data[4, :] = True
+            self.valid_data[4, bt_bad_idx] = False
+            self.valid_data[4, self.valid_data[1, :] == False] = True
+            self.smooth_upper_limit = upper_limit
+            self.smooth_lower_limit = lower_limit
+            self.smooth_speed = speed_smooth
+
+        else:
+
+            # No filter applied all data assumed valid
+            self.valid_data[4, :] = True
+            self.smooth_upper_limit = np.nan
+            self.smooth_lower_limit = np.nan
+            self.smooth_speed = np.nan
+
+        # Combine all filter data to composite valid data
+        self.valid_data[0, :] = np.all(self.valid_data[1:, ], 0)
+        self.num_invalid = np.sum(self.valid_data[0, :] == False, 0)
+
+    def apply_gps_filter(self, transect, differential=None, altitude=None, altitude_threshold=None,
+                         hdop=None, hdop_max_threshold=None, hdop_change_threshold=None, other=None):
+        """Applies filters to GPS referenced boat velocity data.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        differential: str
+            Differential filter setting (1, 2, 4)
+        altitude: str
+            New setting for altitude filter (Off, Manual, Auto)
+        altitude_threshold: float
+            Threshold provide by user for manual altitude setting
+        hdop: str
+            Filter setting (On, off, Auto)
+        hdop_max_threshold: float
+            Maximum HDOP threshold
+        hdop_change_threshold: float
+            HDOP change threshold
+        other: bool
+            Other filter typically a smooth.
+        """
+
+        if len({differential, altitude, altitude_threshold, hdop,
+                hdop_max_threshold, hdop_change_threshold, other}) > 0:
+            # Differential filter only applies to GGA data, defaults to 1 for VTG
+            if differential is not None:
+                if self.nav_ref == 'GGA':
+                    self.filter_diff_qual(gps_data=transect.gps, setting=int(differential))
+                else:
+                    self.filter_diff_qual(gps_data=transect.gps, setting=1)
+
+            # Altitude filter only applies to GGA data
+            if altitude is not None:
+                if (altitude == 'Manual') and (self.nav_ref == 'GGA'):
+                    self.filter_altitude(gps_data=transect.gps, setting=altitude, threshold=altitude_threshold)
+                elif self.nav_ref == 'GGA':
+                    self.filter_altitude(gps_data=transect.gps, setting=altitude)
+
+            if hdop is not None:
+                if hdop == 'Manual':
+                    self.filter_hdop(gps_data=transect.gps, setting=hdop, max_threshold=hdop_max_threshold,
+                                     change_threshold=hdop_change_threshold)
+                else:
+                    self.filter_hdop(gps_data=transect.gps, setting=hdop)
+
+            if other is not None:
+                self.filter_smooth(transect=transect, setting=other)
+        else:
+            self.filter_diff_qual(gps_data=transect.gps)
+            self.filter_altitude(gps_data=transect.gps)
+            self.filter_hdop(gps_data=transect.gps)
+            self.filter_smooth(transect=transect, setting=self.smooth_filter)
+
+        # Apply previously specified interpolation method
+        self.apply_interpolation(transect=transect)
+
+    def filter_diff_qual(self, gps_data, setting=None):
+        """Filters GPS data based on the minimum acceptable differential correction quality.
+
+        Parameters
+        ----------
+        gps_data: GPSData
+            Object of GPSData
+        setting: int
+            Filter setting (1, 2, 4).
+        """
+
+        # New filter setting if provided
+        if setting is not None:
+            self.gps_diff_qual_filter = setting
+
+        # Reset valid_data property
+        self.valid_data[2, :] = True
+        self.valid_data[5, :] = True
+
+        # Determine and apply appropriate filter type
+        if gps_data.diff_qual_ens is not None:
+            self.valid_data[2, np.isnan(gps_data.diff_qual_ens)] = False
+            if self.gps_diff_qual_filter is not None:
+                # Autonomous
+                if self.gps_diff_qual_filter == 1:
+                    self.valid_data[2, gps_data.diff_qual_ens < 1] = False
+                # Differential correction
+                elif self.gps_diff_qual_filter == 2:
+                    self.valid_data[2, gps_data.diff_qual_ens < 2] = False
+                # RTK
+                elif self.gps_diff_qual_filter == 4:
+                    self.valid_data[2, gps_data.diff_qual_ens < 4] = False
+
+                # If there is no indication of the quality assume 1 fot vtg
+                if self.nav_ref == 'VTG':
+                    self.valid_data[2, np.isnan(gps_data.diff_qual_ens)] = True
+            else:
+                self.valid_data[2, :] = False
+
+        # Combine all filter data to composite valid data
+        self.valid_data[0, :] = np.all(self.valid_data[1:, :], 0)
+        self.num_invalid = np.sum(self.valid_data[0, :] == False)
+
+    def filter_altitude(self, gps_data, setting=None, threshold=None):
+        """Filter GPS data based on a change in altitude.
+
+        Assuming the data are collected on the river the altitude should not
+        change substantially during the transect. Since vertical resolution is
+        about 3 x worse that horizontal resolution the automatic filter
+        threshold is set to 3 m, which should ensure submeter horizontal
+        accuracy.
+
+        Parameters
+        ----------
+        gps_data: GPSData
+            Object of GPSData
+        setting: str
+            New setting for filter (Off, Manual, Auto)
+        threshold: float
+            Threshold provide by user for manual setting
+        """
+
+        # New filter settings if provided
+        if setting is not None:
+            self.gps_altitude_filter = setting
+            if setting == 'Manual':
+                self.gps_altitude_filter_change = threshold
+
+        # Set threshold for Auto
+        if self.gps_altitude_filter == 'Auto':
+            self.gps_altitude_filter_change = 3
+
+        # Set all data to valid
+        self.valid_data[3, :] = True
+        # self.valid_data[5, :] = True
+
+        # Manual or Auto is selected, apply filter
+        if not self.gps_altitude_filter == 'Off':
+            # Initialize variables
+            num_valid_old = np.sum(self.valid_data[3, :])
+            k = 0
+            change = 1
+            # Loop until no change in the number of valid ensembles
+            while k < 100 and change > 0.1:
+                # Compute mean using valid ensembles
+                if self.valid_data.shape[1] == 1:
+                    if self.valid_data[1,0]:
+                        alt_mean = gps_data.altitude_ens_m
+                    else:
+                        alt_mean = np.nan
+                else:
+                    alt_mean = np.nanmean(gps_data.altitude_ens_m[self.valid_data[1, :]])
+
+                # Compute difference for each ensemble
+                diff = np.abs(gps_data.altitude_ens_m - alt_mean)
+
+                # Mark invalid those ensembles with differences greater than the change threshold
+                self.valid_data[3, diff > self.gps_altitude_filter_change] = False
+                k += 1
+                num_valid = np.sum(self.valid_data[3, :])
+                change = num_valid_old - num_valid
+
+        # Combine all filter data to composite valid data
+        self.valid_data[0, :] = np.all(self.valid_data[1:, :], 0)
+        self.num_invalid = np.sum(self.valid_data[0, :] == False)
+
+    def filter_hdop(self, gps_data, setting=None, max_threshold=None, change_threshold=None):
+        """Filter GPS data based on both a maximum HDOP and a change in HDOP
+        over the transect.
+
+        Parameters
+        ----------
+        gps_data: GPSData
+            Object of GPSData
+        setting: str
+            Filter setting (On, off, Auto)
+        max_threshold: float
+            Maximum threshold
+        change_threshold: float
+            Change threshold
+        """
+
+        if gps_data.hdop_ens is None or gps_data.hdop_ens.size == 0:
+            self.valid_data[5, :self.valid_data.shape[1]] = True
+        else:
+            # New settings if provided
+            if setting is not None:
+                self.gps_HDOP_filter = setting
+                if self.gps_HDOP_filter == 'Manual':
+                    self.gps_HDOP_filter_max = max_threshold
+                    self.gps_HDOP_filter_change = change_threshold
+
+            # Settings for auto mode
+            if self.gps_HDOP_filter == 'Auto':
+                self.gps_HDOP_filter_change = 3
+                self.gps_HDOP_filter_max = 4
+
+            # Set all ensembles to valid
+            self.valid_data[5, :] = True
+
+            # Apply filter for manual or auto
+            if not self.gps_HDOP_filter == 'Off':
+
+                # Initialize variables
+                num_valid_old = np.sum(self.valid_data[5, :])
+                k = 0
+                change = 1
+
+                # Apply max filter
+                self.valid_data[5, np.greater(gps_data.hdop_ens, self.gps_HDOP_filter_max)] = False
+
+                # Loop until the number of valid ensembles does not change
+                while k < 100 and change > 0.1:
+
+                    # Compute mean HDOP for all valid ensembles
+                    if self.valid_data.shape[1] == 1:
+                        if self.valid_data[5, 0]:
+                            hdop_mean = gps_data.hdop_ens
+                        else:
+                            hdop_mean = np.nan
+                    else:
+                        hdop_mean = np.nanmean(gps_data.hdop_ens[self.valid_data[5, :]])
+
+                    # Compute the difference in HDOP and the mean for all ensembles
+                    diff = np.abs(gps_data.hdop_ens - hdop_mean)
+
+                    # If the change is HDOP or the value of HDOP is greater
+                    # than the threshold setting mark the data invalid
+                    self.valid_data[5, np.greater(diff, self.gps_HDOP_filter_change)] = False
+
+                    k += 1
+                    num_valid = np.sum(self.valid_data[5, :])
+                    change = num_valid_old - num_valid
+                    num_valid_old = num_valid
+
+        # Combine all filter data to composite data
+        self.valid_data[0, :] = np.all(self.valid_data[1:, :], 0)
+        self.num_invalid = np.sum(self.valid_data[0, :] == False)
+
+    @staticmethod
+    def filter_sontek(vel_in):
+        """Determines invalid raw bottom track samples for SonTek data.
+
+        Invalid data are those that are zero or where the velocity doesn't change between ensembles.
+
+        Parameters
+        ----------
+        vel_in: np.array(float)
+            Bottom track velocity data, in m/s.
+
+        Returns
+        -------
+        vel_out: np.array(float)
+            Filtered bottom track velocity data with all invalid data set to np.nan.
+        """
+
+        # Identify all samples where the velocity did not change
+        test1 = np.abs(np.diff(vel_in, 1, 1)) < 0.00001
+
+        # Identify all samples with all zero values
+        test2 = np.nansum(np.abs(vel_in), 0) < 0.00001
+        test2 = test2[1:] * 4  # using 1: makes the array dimension consistent with test1 as diff results in 1 less.
+
+        # Combine criteria
+        test_sum = np.sum(test1, 0) + test2
+
+        # Develop logical vector of invalid ensembles
+        invalid_bool = np.full(test_sum.size, False)
+        invalid_bool[test_sum > 3] = True
+        # Handle first ensemble
+        invalid_bool = np.concatenate((np.array([False]), invalid_bool), 0)
+        if np.nansum(vel_in[:, 0]) == 0:
+            invalid_bool[0] = True
+
+        # Set invalid ensembles to nan
+        vel_out = np.copy(vel_in)
+        vel_out[:, invalid_bool] = np.nan
+
+        return vel_out
+
+    @staticmethod
+    def run_std_trim(half_width, my_data):
+        """Computes a standard deviation over +/- halfwidth of points.
+
+        The routine accepts a column vector as input. "halfWidth" number of data
+        points for computing the standard deviation are selected before and
+        after the target data point, but not including the target data point.
+        Near the ends of the series the number of points before or after are
+        reduced. nan in the data are counted as points. The selected subset of
+        points are sorted and the points with the highest and lowest values are
+        removed from the subset and the standard deviation computed on the
+        remaining points in the subset. The process occurs for each point in the
+        provided column vector. A column vector with the computed standard
+        deviation at each point is returned.
+
+        Parameters
+        ----------
+        half_width: int
+             Number of ensembles on each side of target ensemble to used
+             for computing trimmed standard deviation
+        my_data: np.array(float)
+             Data to be processed
+
+        Returns
+        -------
+        filter_array: np.array(float)
+             Vector with computed standard
+        """
+
+        # Determine number of points to process
+        n_pts = my_data.shape[0]
+        if n_pts < 20:
+            half_width = np.floor(n_pts / 2.)
+
+        filter_array = []
+        # Compute standard deviation for each point
+        for n in range(n_pts):
+
+            # Sample selection for 1st point
+            if n == 0:
+                sample = my_data[1:1 + half_width]
+
+            # Sample selection at end of data set
+            elif n + half_width > n_pts:
+                sample = np.hstack((my_data[n - half_width - 1:n - 1], my_data[n:n_pts]))
+
+            # Sample selection at beginning of data set
+            elif half_width >= n + 1:
+                sample = np.hstack((my_data[0:n], my_data[n + 1:n + half_width + 1]))
+
+            # Samples selection in body of data set
+            else:
+                sample = np.hstack((my_data[n - half_width:n], my_data[n + 1:n + half_width + 1]))
+
+            # Sort and compute trummed standard deviation
+            sample = np.sort(sample)
+            filter_array.append(np.nanstd(sample[1:sample.shape[0] - 1], ddof=1))
+
+        return np.array(filter_array)
diff --git a/Classes/BoatStructure.py b/Classes/BoatStructure.py
new file mode 100644
index 0000000000000000000000000000000000000000..f009c7b74aefdc84938dd8d0c22cbaadd309598a
--- /dev/null
+++ b/Classes/BoatStructure.py
@@ -0,0 +1,437 @@
+import numpy as np
+from Classes.BoatData import BoatData
+
+
+class BoatStructure(object):
+    """This class organizes the various sources for boat velocity into
+    a single structured class and establishes a selected property that
+    contains the select source for velocity and discharge computations.
+
+    Attributes
+    ----------
+    selected: str
+        Name of BoatData object to be used for discharge computations.
+    bt_vel: BoatData
+        BoatData object for bottom track velocity
+    gga_vel: BoatData
+        BoatData object for gga velocity
+    vtg_vel: BoatData
+        BoatData object for vtg velocity
+    composite: str
+        Setting to use ("On") or not ("Off") composite tracks.
+    """
+
+    def __init__(self):
+
+        self.selected = None  # Name of BoatData object to be used for discharge computations
+        self.bt_vel = None  # BoatData object for bottom track velocity
+        self.gga_vel = None  # BoatData object for gga velocity
+        self.vtg_vel = None  # BoatData object for vtg velocity
+
+        # Composite track information is not currently provided by the manufacturers.
+        # Future versions may try to determine this setting from SonTek data
+        self.composite = 'Off'  # Setting for compositir tracks
+
+    def add_boat_object(self, source, vel_in, freq_in=None, coord_sys_in=None, nav_ref_in=None,
+                        min_beams=3, bottom_mode='Variable', corr_in=None, rssi_in=None):
+        """Adds a BoatData object to the appropriate property
+
+        Parameters
+        ----------
+        source: str
+            Name of manufacturer.
+        vel_in: np.array
+            Boat velocity array.
+        freq_in: np.array or float
+            Acoustic frequency
+        coord_sys_in: str
+            Coordinate system of velocity data.
+        nav_ref_in: str
+            Source of boat velocity data
+        min_beams: int
+            Setting to allow 3 beam solutions or require 4 beam solutions or set to Auto (-1)
+        bottom_mode: str
+            Bottom mode used
+        """
+
+        if nav_ref_in == 'BT':
+            self.bt_vel = BoatData()
+            self.bt_vel.populate_data(source, vel_in, freq_in, coord_sys_in, nav_ref_in, min_beams, bottom_mode,
+                                      corr_in, rssi_in)
+        if nav_ref_in == 'GGA':
+            self.gga_vel = BoatData()
+            self.gga_vel.populate_data(source, vel_in, freq_in, coord_sys_in, nav_ref_in)
+        if nav_ref_in == 'VTG':
+            self.vtg_vel = BoatData()
+            self.vtg_vel.populate_data(source, vel_in, freq_in, coord_sys_in, nav_ref_in)
+
+    def set_nav_reference(self, reference):
+        """This function will set the navigation reference property to the specified object reference.
+
+        Parameters
+        ----------
+        reference: str
+            Navigation reference, BT, GGA, or VTG
+        """
+
+        if reference == 'BT':
+            self.selected = 'bt_vel'
+        elif reference == 'GGA':
+            self.selected = 'gga_vel'
+        elif reference == 'VTG':
+            self.selected = 'vtg_vel'
+
+    def change_nav_reference(self, reference, transect):
+        """This function changes the navigation reference to the specified object reference and recomputes
+        the composite tracks, if necessary.
+
+        Parameters
+        ----------
+        reference: str
+            New navigation reference, BT, GGA, or VTG.
+        transect: TransectData
+            Object of TransectData.
+        """
+
+        if reference == 'BT':
+            self.selected = 'bt_vel'
+        elif reference == 'GGA':
+            self.selected = 'gga_vel'
+        elif reference == 'VTG':
+            self.selected = 'vtg_vel'
+        elif reference == 'bt_vel':
+            self.selected = 'bt_vel'
+        elif reference == 'gga_vel':
+            self.selected = 'gga_vel'
+        elif reference == 'vtg_vel':
+            self.selected = 'vtg_vel'
+
+        self.composite_tracks(transect)
+
+    def change_coord_sys(self, new_coord_sys, sensors, adcp):
+        """This function will change the coordinate system of the boat velocity reference.
+        
+        Parameters
+        ----------
+        new_coord_sys: str
+            Specified new coordinate system.
+        sensors: Sensors
+            Object of Sensors.
+        adcp: InstrumentData
+            Object of InstrumentData.
+        """
+
+        # Change coordinate system for all available boat velocity sources
+        if self.bt_vel is not None:
+            self.bt_vel.change_coord_sys(new_coord_sys, sensors, adcp)
+        if self.gga_vel is not None:
+            self.gga_vel.change_coord_sys(new_coord_sys, sensors, adcp)
+        if self.vtg_vel is not None:
+            self.vtg_vel.change_coord_sys(new_coord_sys, sensors, adcp)
+
+    def composite_tracks(self, transect, setting=None):
+        """If new composite setting is provided it is used, if not the setting saved in the object is used
+        
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData.
+        setting: bool
+            New setting for composite tracks
+        """
+
+        if setting is None:
+            setting = self.composite
+        else:
+            # New Setting
+            self.composite = setting
+
+        # Composite depths turned on
+        if setting == 'On':
+            # Initialize variables
+            u_bt = np.array([])
+            v_bt = np.array([])
+            u_gga = np.array([])
+            v_gga = np.array([])
+            u_vtg = np.array([])
+            v_vtg = np.array([])
+
+            # Prepare bt data
+            if self.bt_vel is not None:
+                u_bt = self.bt_vel.u_processed_mps
+                v_bt = self.bt_vel.v_processed_mps
+                # Set to invalid all interpolated velocities
+                valid_bt = self.bt_vel.valid_data[0, :]
+                u_bt[valid_bt == False] = np.nan
+                v_bt[valid_bt == False] = np.nan
+
+            # Prepare gga data
+            if self.gga_vel is not None:
+                # Get gga velocities
+                u_gga = self.gga_vel.u_processed_mps
+                v_gga = self.gga_vel.v_processed_mps
+                # Set to invalid all interpolated velocities
+                valid_gga = self.gga_vel.valid_data[0, :]
+                u_gga[valid_gga == False] = np.nan
+                v_gga[valid_gga == False] = np.nan
+            elif self.bt_vel is not None:
+                u_gga = np.tile([np.nan], u_bt.shape)
+                v_gga = np.tile([np.nan], v_bt.shape)
+
+            # Prepare vtg data
+            if self.vtg_vel is not None:
+                # Get vtg velocities
+                u_vtg = self.vtg_vel.u_processed_mps
+                v_vtg = self.vtg_vel.v_processed_mps
+                # Set to invalid all interpolated velocities
+                valid_vtg = self.vtg_vel.valid_data[0, :]
+                u_vtg[valid_vtg == False] = np.nan
+                v_vtg[valid_vtg == False] = np.nan
+            elif self.bt_vel is not None:
+                u_vtg = np.tile([np.nan], u_bt.shape)
+                v_vtg = np.tile([np.nan], v_bt.shape)
+
+            # Process bt as primary
+            if self.selected == 'bt_vel':
+                # Initialize composite source
+                comp_source = np.tile(np.nan, u_bt.shape)
+
+                # Process u velocity component
+                u_comp = u_bt
+                comp_source[np.isnan(u_comp) == False] = 1
+
+                # If BT data are not valid try VTG and set composite source (BUG HERE DSM)
+                u_comp[np.isnan(u_comp)] = u_vtg[np.isnan(u_comp)]
+                comp_source[np.logical_and(np.isnan(u_comp) == False, np.isnan(comp_source))] = 3
+
+                # If there are still invalid boat velocities, try GGA and set composite source
+                u_comp[np.isnan(u_comp)] = u_gga[np.isnan(u_comp)]
+                comp_source[np.logical_and(np.isnan(u_comp) == False, np.isnan(comp_source))] = 2
+
+                # If there are still invalid boat velocities, use interpolated
+                # values if present and set composite source
+                comp_source[np.logical_and(np.isnan(u_comp) == False, np.isnan(comp_source))] = 0
+
+                # Set composite source to invalid for all remaining invalid boat velocity data
+                comp_source[np.isnan(comp_source)] = -1
+
+                # Process v velocity component.  Assume that the composite source is the same
+                # as the u component
+                v_comp = v_bt
+                v_comp[np.isnan(v_comp)] = v_vtg[np.isnan(v_comp)]
+                v_comp[np.isnan(v_comp)] = v_gga[np.isnan(v_comp)]
+                v_comp[np.isnan(v_comp)] = self.bt_vel.v_processed_mps[np.isnan(v_comp)]
+
+                # Apply the composite settings to the bottom track Boatdata objects
+                self.bt_vel.apply_composite(u_comp, v_comp, comp_source)
+                self.bt_vel.interpolate_composite(transect)
+
+            # Process gga as primary
+            elif self.selected == 'gga_vel':
+                # Initialize the composite source
+                comp_source = np.tile([np.nan], u_bt.shape)
+
+                # Process the u velocity component
+                u_comp = u_gga
+                comp_source[np.isnan(u_comp) == False] = 2
+
+                # If GGA data are not valid try VTG and set composite source
+                u_comp[np.isnan(u_comp)] = u_vtg[np.isnan(u_comp)]
+                comp_source[np.logical_and(np.isnan(u_comp) == False, np.isnan(comp_source))] = 3
+
+                # If there are still invalid boar velocities, try BT and set composite source
+                u_comp[np.isnan(u_comp)] = u_bt[np.isnan(u_comp)]
+                comp_source[np.logical_and(np.isnan(u_comp) == False, np.isnan(comp_source))] = 1
+
+                # If there are still invalid boat velocities, use interpolated values,
+                # if present and set composite source
+                comp_source[np.logical_and(np.isnan(u_comp) == False, np.isnan(comp_source))] = 0
+
+                # Set composite source to invalid for all remaining invalid boat velocity data
+                comp_source[np.isnan(comp_source)] = -1
+
+                # Process v velocity component.  Assume that the composite source is the
+                # same as the u component
+                v_comp = v_gga
+                v_comp[np.isnan(v_comp)] = v_vtg[np.isnan(v_comp)]
+                v_comp[np.isnan(v_comp)] = v_bt[np.isnan(v_comp)]
+                # v_comp[np.isnan(v_comp)] = self.gga_vel.v_processed_mps[np.isnan(v_comp)]
+
+                # Apply the composite settings to the gga BoatData object
+                # For the situation where the transect has no GGA data but other transects do and composite tracks
+                # has been turned on, create the gga_vel object and populate only the u and v processed, comp_source,
+                # and valid_data attributes.
+                if self.gga_vel is None:
+                    self.gga_vel = BoatData()
+                    self.gga_vel.processed_source = np.array([''] * comp_source.shape[0], dtype=object)
+                    self.gga_vel.valid_data = np.full((6, comp_source.shape[0]), False)
+                self.gga_vel.apply_composite(u_comp, v_comp, comp_source)
+                self.gga_vel.interpolate_composite(transect)
+
+            # Process vtg as primary
+            elif self.selected == 'vtg_vel':
+                # Initialize the composite source
+                comp_source = np.tile([np.nan], u_bt.shape)
+
+                # Process the u velocity component
+                u_comp = u_vtg
+                comp_source[np.isnan(u_comp) == False] = 3
+
+                # If VTG data are not valid try GGA and set composite source
+                u_comp[np.isnan(u_comp)] = u_gga[np.isnan(u_comp)]
+                comp_source[np.logical_and(np.isnan(u_comp) == False, np.isnan(comp_source))] = 2
+
+                # If there are still invalid boat velocities, try BT and set composite source
+                u_comp[np.isnan(u_comp)] = u_bt[np.isnan(u_comp)]
+                comp_source[np.logical_and(np.isnan(u_comp) == False, np.isnan(comp_source))] = 1
+
+                # If there are still invalid boat velocities, use interpolated values,
+                # if present and set composite source
+                comp_source[np.logical_and(np.isnan(u_comp) == False, np.isnan(comp_source))] = 0
+
+                # Set composite source to invalid for all remaining invalid boat velocity data
+                comp_source[np.isnan(comp_source)] = -1
+
+                # Process v velocity component.  Assume that the composite source is the
+                # same as the u component
+                v_comp = v_vtg
+                # DSM wrong in Matlab version 1/29/2018 v_comp[np.isnan(v_comp)] = v_vtg[np.isnan(v_comp)]
+                v_comp[np.isnan(v_comp)] = v_gga[np.isnan(v_comp)]
+                v_comp[np.isnan(v_comp)] = v_bt[np.isnan(v_comp)]
+                # v_comp[np.isnan(v_comp)] = self.vtg_vel.v_processed_mps[np.isnan(v_comp)]
+
+                # Apply the composite settings to the gga BoatData object
+                # For the situation where the transect has no GGA data but other transects do and composite tracks
+                # has been turned on, create the gga_vel object and populate only the u and v processed, comp_source,
+                # and valid_data attributes.
+                if self.vtg_vel is None:
+                    self.vtg_vel = BoatData()
+                    self.vtg_vel.processed_source = np.array([''] * comp_source.shape[0], dtype=object)
+                    self.vtg_vel.valid_data = np.full((6, comp_source.shape[0]), False)
+                self.vtg_vel.apply_composite(u_comp, v_comp, comp_source)
+                self.vtg_vel.interpolate_composite(transect)
+        else:
+            # Composite tracks off
+
+            # Use only interpolations for bt
+            if self.bt_vel is not None:
+                self.bt_vel.apply_interpolation(transect=transect,
+                                                interpolation_method=transect.boat_vel.bt_vel.interpolate)
+                comp_source = np.tile(np.nan, self.bt_vel.u_processed_mps.shape)
+                comp_source[self.bt_vel.valid_data[0, :]] = 1
+                comp_source[np.logical_and(np.isnan(comp_source),
+                                           (np.isnan(self.bt_vel.u_processed_mps) == False))] = 0
+                comp_source[np.isnan(comp_source)] = -1
+                self.bt_vel.apply_composite(u_composite=self.bt_vel.u_processed_mps,
+                                            v_composite=self.bt_vel.v_processed_mps,
+                                            composite_source=comp_source)
+
+            # Use only interpolations for gga
+            if self.gga_vel is not None:
+                # This if statement handles the situation where there is no GPS data for a transect but there is GPS
+                # data for other transects and the user has turned on / off composite tracks.
+                if self.gga_vel.u_mps is not None:
+                    self.gga_vel.apply_interpolation(transect=transect,
+                                                     interpolation_method=transect.boat_vel.gga_vel.interpolate)
+                    comp_source = np.tile(np.nan, self.gga_vel.u_processed_mps.shape)
+                    comp_source[self.gga_vel.valid_data[0, :]] = 2
+                    comp_source[np.logical_and(np.isnan(comp_source),
+                                               (np.isnan(self.gga_vel.u_processed_mps) == False))] = 0
+                    comp_source[np.isnan(comp_source)] = -1
+                    self.gga_vel.apply_composite(u_composite=self.gga_vel.u_processed_mps,
+                                                 v_composite=self.gga_vel.v_processed_mps,
+                                                 composite_source=comp_source)
+                else:
+                    self.gga_vel = None
+
+            # Use only interpolations for vtg
+            if self.vtg_vel is not None:
+                # This if statement handles the situation where there is no GPS data for a transect but there is GPS
+                # data for other transects and the user has turned on / off composite tracks.
+                if self.vtg_vel.u_mps is not None:
+                    self.vtg_vel.apply_interpolation(transect=transect,
+                                                     interpolation_method=transect.boat_vel.vtg_vel.interpolate)
+                    comp_source = np.tile(np.nan, self.vtg_vel.u_processed_mps.shape)
+                    comp_source[self.vtg_vel.valid_data[0, :]] = 3
+                    comp_source[np.logical_and(np.isnan(comp_source),
+                                               (np.isnan(self.vtg_vel.u_processed_mps) == False))] = 0
+                    comp_source[np.isnan(comp_source)] = -1
+                    self.vtg_vel.apply_composite(u_composite=self.vtg_vel.u_processed_mps,
+                                                 v_composite=self.vtg_vel.v_processed_mps,
+                                                 composite_source=comp_source)
+                else:
+                    self.vtg_vel = None
+
+    @staticmethod
+    def compute_boat_track(transect, ref=None):
+        """Computes the shiptrack coordinates, along track distance, and distance made
+        good for the selected boat reference.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        ref: str
+            Setting to determine what navigation reference should be used. In None use selected.
+
+        Returns
+        -------
+        boat_track: dict
+            Dictionary containing shiptrack coordinates (track_x_m, track_y_m), along track distance (distance_m),
+            and distance made good (dmg_m)
+        """
+
+        # Initialize dictionary
+        boat_track = {'track_x_m': np.nan, 'track_y_m': np.nan, 'distance_m': np.nan, 'dmg_m': np.nan}
+
+        # Compute incremental track coordinates
+        if ref is None:
+            boat_vel_selected = getattr(transect.boat_vel, transect.boat_vel.selected)
+        else:
+            boat_vel_selected = getattr(transect.boat_vel, ref)
+
+        if boat_vel_selected is None:
+            boat_vel_selected = getattr(transect.boat_vel, 'bt_vel')
+        track_x = boat_vel_selected.u_processed_mps[transect.in_transect_idx] * \
+            transect.date_time.ens_duration_sec[transect.in_transect_idx]
+        track_y = boat_vel_selected.v_processed_mps[transect.in_transect_idx] * \
+            transect.date_time.ens_duration_sec[transect.in_transect_idx]
+
+        # Check for any valid data
+        idx = np.where(np.logical_not(np.isnan(track_x)))
+        if idx[0].size > 1:
+            # Compute variables
+            boat_track['distance_m'] = np.nancumsum(np.sqrt(track_x ** 2 + track_y ** 2))
+            boat_track['track_x_m'] = np.nancumsum(track_x)
+            boat_track['track_y_m'] = np.nancumsum(track_y)
+            boat_track['dmg_m'] = np.sqrt(boat_track['track_x_m'] ** 2 + boat_track['track_y_m'] ** 2)
+
+        return boat_track
+
+    def populate_from_qrev_mat(self, transect):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        transect: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+
+        if hasattr(transect, 'boatVel'):
+            if hasattr(transect.boatVel, 'btVel'):
+                if hasattr(transect.boatVel.btVel, 'u_mps'):
+                    self.bt_vel = BoatData()
+                    self.bt_vel.populate_from_qrev_mat(transect.boatVel.btVel)
+            if hasattr(transect.boatVel, 'ggaVel'):
+                if hasattr(transect.boatVel.ggaVel, 'u_mps'):
+                    self.gga_vel = BoatData()
+                    self.gga_vel.populate_from_qrev_mat(transect.boatVel.ggaVel)
+            if hasattr(transect.boatVel, 'vtgVel'):
+                if hasattr(transect.boatVel.vtgVel, 'u_mps'):
+                    self.vtg_vel = BoatData()
+                    self.vtg_vel.populate_from_qrev_mat(transect.boatVel.vtgVel)
+            nav_dict = {'btVel':'bt_vel', 'bt_vel':'bt_vel',
+                        'ggaVel':'gga_vel', 'gga_vel':'gga_vel',
+                        'vtgVel':'vtg_vel', 'vtg_vel':'vtg_vel'}
+            self.selected = nav_dict[transect.boatVel.selected]
+
diff --git a/Classes/CompassCal.py b/Classes/CompassCal.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b279e9546044410c7c86628cd69d9f01d774f9b
--- /dev/null
+++ b/Classes/CompassCal.py
@@ -0,0 +1,41 @@
+import re
+
+
+class CompassCal(object):
+    """Class stores compass calibration or evaluation data and parses the compass error from the raw data.
+
+    Attributes
+    ----------
+    time_stamp: str
+        Time of calibration or evaluation (mm/dd/yyyy).
+    data: str
+        All calibration or evaluation data provided by the manufacturer.
+    error: float
+        Remaining compass error after calibration or from evaluation, in degrees.
+    """
+
+    def __init__(self):
+        """Initialize class and instance variables."""
+
+        self.time_stamp = None
+        self.data = None
+        self.error = None
+
+    def populate_data(self, time_stamp, data_in):
+        """Store data and parse compass error from compass data.
+
+        Parameters
+        ----------
+        time_stamp: str
+            Time of calibration or evaluation (mm/dd/yyyy).
+        data_in: str
+            All calibration or evaluation data provided by the manufacturer.
+        """
+        self.time_stamp = time_stamp
+        self.data = data_in
+
+        splits = re.split('(Total error:|Double Cycle Errors:|Error from calibration:)', data_in)
+        if len(splits) > 1:
+            self.error = re.search('\d+\.*\d*', splits[2])[0]
+        else:
+            self.error = 'N/A'
diff --git a/Classes/ComputeExtrap.py b/Classes/ComputeExtrap.py
new file mode 100644
index 0000000000000000000000000000000000000000..fd272d1919231e75950931a69624ffd1fe1ca34c
--- /dev/null
+++ b/Classes/ComputeExtrap.py
@@ -0,0 +1,330 @@
+import numpy as np
+from Classes.SelectFit import SelectFit
+from Classes.ExtrapQSensitivity import ExtrapQSensitivity
+from Classes.NormData import NormData
+
+
+class ComputeExtrap(object):
+    """Class to compute the optimized or manually specified extrapolation methods
+
+    Attributes
+    ----------
+    threshold: float
+        Threshold as a percent for determining if a median is valid
+    subsection: list
+        Percent of discharge
+    fit_method: str
+        Method used to determine fit.  Automatic or manual
+    norm_data: NormData
+        Object of class NormData
+    sel_fit: SelectFit
+        Object of class SelectFit
+    q_sensitivity: ExtrapQSensitivity
+        Object of class ExtrapQSensitivity
+    messages: str
+        Variable for messages to UserWarning
+    use_weighted: bool
+        Specifies if discharge weighted medians are used in extrapolations
+    sub_from_left: bool
+        Specifies if when subsectioning the subsection should start from left to right.
+    use_q: bool
+        Specifies to use the discharge rather than the xprod when subsectioning
+
+    """
+    
+    def __init__(self):
+        """Initialize instance variables."""
+
+        self.threshold = None  # Threshold as a percent for determining if a median is valid
+        self.subsection = None  # Percent of discharge, does not account for transect direction
+        self.fit_method = None  # Method used to determine fit.  Automatic or manual
+        self.norm_data = []  # Object of class norm data
+        self.sel_fit = []  # Object of class SelectFit
+        self.q_sensitivity = None  # Object of class ExtrapQSensitivity
+        self.messages = []  # Variable for messages to UserWarning
+        self.use_weighted = False
+        self.use_q = False
+        self.sub_from_left = False
+        
+    def populate_data(self, transects, compute_sensitivity=True, use_weighted=False, use_q=True, sub_from_left=True):
+        """Store data in instance variables.
+
+        Parameters
+        ----------
+        transects: list
+            List of transects of TransectData
+        compute_sensitivity: bool
+            Determines is sensitivity should be computed.
+        use_weighted: bool
+        Specifies if discharge weighted medians are used in extrapolations
+        """
+
+        self.threshold = 20
+        self.subsection = [0, 100]
+        self.fit_method = 'Automatic'
+        self.use_weighted = use_weighted
+        self.use_q = use_q
+        self.sub_from_left = sub_from_left
+        self.process_profiles(transects=transects, data_type='q', use_weighted=use_weighted)
+
+        # Compute the sensitivity of the final discharge to changes in extrapolation methods
+        if compute_sensitivity:
+            self.q_sensitivity = ExtrapQSensitivity()
+            self.q_sensitivity.populate_data(transects=transects, extrap_fits=self.sel_fit)
+
+    def populate_from_qrev_mat(self, meas_struct):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        meas_struct: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+
+        if hasattr(meas_struct, 'extrapFit'):
+            self.threshold = meas_struct.extrapFit.threshold
+            self.subsection = meas_struct.extrapFit.subsection
+            self.fit_method = meas_struct.extrapFit.fitMethod
+
+            # Check for consistency between transects and norm_data. If only checked transects were saved, the
+            # normData and selfit will also include unchecked transects which must be removed prior to
+            # continuing to process.
+
+            # If only a single transect the meas_struct.transects will be structure not an array, so the len method
+            # won't work.
+            try:
+                n_transects = len(meas_struct.transects)
+            except TypeError:
+                n_transects = 1
+
+            try:
+                n_data = len(meas_struct.extrapFit.normData) - 1
+            except TypeError:
+                n_data = 1
+
+            if n_transects != n_data:
+                # normData needs adjustment to match transects
+                file_names = []
+                valid_norm_data = []
+                valid_sel_fit = []
+                # Create list of transect filenames
+                if n_transects == 1:
+                    file_names.append(meas_struct.transects.fileName)
+                else:
+                    for transect in meas_struct.transects:
+                        file_names.append(transect.fileName)
+                # Create a list of norm_data and sel_fit objects that match the filenames in transects
+                for n in range(len(meas_struct.extrapFit.normData) - 1):
+                    if meas_struct.extrapFit.normData[n].fileName in file_names:
+                        valid_norm_data.append(meas_struct.extrapFit.normData[n])
+                        valid_sel_fit.append(meas_struct.extrapFit.selFit[n])
+                # Append the whole measurement objects
+                valid_norm_data.append(meas_struct.extrapFit.normData[-1])
+                valid_sel_fit.append(meas_struct.extrapFit.selFit[-1])
+                # Update meas_struct so normData and selFit match transects
+                meas_struct.extrapFit.normData = np.array(valid_norm_data)
+                meas_struct.extrapFit.selFit = np.array(valid_sel_fit)
+
+            self.norm_data = NormData.qrev_mat_in(meas_struct.extrapFit)
+            self.sel_fit = SelectFit.qrev_mat_in(meas_struct.extrapFit)
+            self.q_sensitivity = ExtrapQSensitivity()
+            self.q_sensitivity.populate_from_qrev_mat(meas_struct.extrapFit)
+            if hasattr(meas_struct.extrapFit, 'use_weighted'):
+                self.use_weighted = meas_struct.extrapFit.use_weighted
+            else:
+                self.use_weighted = False
+
+            if hasattr(meas_struct.extrapFit, 'use_q'):
+                self.use_q = meas_struct.extrapFit.use_q
+            else:
+                self.use_q = False
+
+            if hasattr(meas_struct.extrapFit, 'sub_from_left'):
+                self.sub_from_left = meas_struct.extrapFit.sub_from_left
+            else:
+                self.sub_from_left = False
+
+            if type(meas_struct.extrapFit.messages) is str:
+                self.messages = [meas_struct.extrapFit.messages]
+            elif type(meas_struct.extrapFit.messages) is np.ndarray:
+                self.messages = meas_struct.extrapFit.messages.tolist()
+
+    def process_profiles(self, transects, data_type, use_weighted=None, use_q=True, sub_from_left=True):
+        """Function that coordinates the fitting process.
+
+        Parameters
+        ----------
+        transects: TransectData
+            Object of TransectData
+        data_type: str
+            Type of data processing (q or v)
+        sub_from_left: bool
+            Specifies if when subsectioning the subsection should start from left to right.
+        use_q: bool
+            Specifies to use the discharge rather than the xprod when subsectioning
+        """
+        if use_weighted is not None:
+            self.use_weighted = use_weighted
+        else:
+            self.use_weighted = self.norm_data[-1].use_weighted
+
+        self.use_q = use_q
+        self.sub_from_left = sub_from_left
+
+        # Compute normalized data for each transect
+        self.norm_data = []
+        for transect in transects:
+            norm_data = NormData()
+            norm_data.populate_data(transect=transect,
+                                    data_type=data_type,
+                                    threshold=self.threshold,
+                                    data_extent=self.subsection,
+                                    use_weighted=self.use_weighted,
+                                    use_q=self.use_q,
+                                    sub_from_left=self.sub_from_left)
+            self.norm_data.append(norm_data)
+
+        # Compute composite normalized data
+        comp_data = NormData()
+        comp_data.use_q = self.norm_data[-1].use_q
+        comp_data.sub_from_left = self.norm_data[-1].sub_from_left
+        comp_data.create_composite(transects=transects, norm_data=self.norm_data, threshold=self.threshold)
+        self.norm_data.append(comp_data)
+
+        # Compute the fit for the selected  method
+        if self.fit_method == 'Manual':
+            for n in range(len(transects)):
+                self.sel_fit[n].populate_data(normalized=self.norm_data[n],
+                                              fit_method=self.fit_method,
+                                              top=transects[n].extrap.top_method,
+                                              bot=transects[n].extrap.bot_method,
+                                              exponent=transects[n].extrap.exponent)
+        else:
+            self.sel_fit = []
+            for n in range(len(self.norm_data)):
+                sel_fit = SelectFit()
+                sel_fit.populate_data(self.norm_data[n], self.fit_method)
+                self.sel_fit.append(sel_fit)
+
+        if self.sel_fit[-1].top_fit_r2 is not None:
+            # Evaluate if there is a potential that a 3-point top method may be appropriate
+            if (self.sel_fit[-1].top_fit_r2 > 0.9 or self.sel_fit[-1].top_r2 > 0.9) \
+                    and np.abs(self.sel_fit[-1].top_max_diff) > 0.2:
+                self.messages.append('The measurement profile may warrant a 3-point fit at the top')
+                
+    def update_q_sensitivity(self, transects):
+        """Updates the discharge sensitivity values.
+
+        Parameters
+        ----------
+        transects: list
+            List of TransectData objects
+        """
+        self.q_sensitivity = ExtrapQSensitivity()
+        self.q_sensitivity.populate_data(transects, self.sel_fit)
+        
+    def change_fit_method(self, transects, new_fit_method, idx, top=None, bot=None, exponent=None, compute_qsens=True):
+        """Function to change the extrapolation method.
+
+        Parameters
+        ----------
+        transects: list
+            List of TransectData objects
+        new_fit_method: str
+            Identifies fit method automatic or manual
+        idx: int
+            Index to the specified transect or measurement in NormData
+        top: str
+            Specifies top fit
+        bot: str
+            Specifies bottom fit
+        exponent: float
+            Specifies exponent for power or no slip fits
+        compute_qsens: bool
+            Specifies if the discharge sensitivities should be recomputed
+        """
+        self.fit_method = new_fit_method
+
+        self.sel_fit[idx].populate_data(self.norm_data[idx], new_fit_method,  top=top, bot=bot, exponent=exponent)
+        if compute_qsens & idx == len(self.norm_data)-1:
+            self.q_sensitivity = ExtrapQSensitivity()
+            self.q_sensitivity.populate_data(transects, self.sel_fit)
+        
+    def change_threshold(self, transects, data_type, threshold):
+        """Function to change the threshold for accepting the increment median as valid.  The threshold
+        is in percent of the median number of points in all increments.
+
+        Parameters
+        ----------
+        transects: list
+            List of TransectData objects
+        data_type: str
+            Specifies the data type (discharge or velocity)
+        threshold: float
+            Percent of data that must be in a median to include the median in the fit algorithm
+        """
+        
+        self.threshold = threshold
+        self.process_profiles(transects=transects, data_type=data_type)
+        self.q_sensitivity = ExtrapQSensitivity()
+        self.q_sensitivity.populate_data(transects=transects, extrap_fits=self.sel_fit)
+        
+    def change_extents(self, transects, data_type, extents, use_q, sub_from_left):
+        """Function allows the data to be subsection by specifying the percent cumulative discharge
+        for the start and end points.  Currently this function does not consider transect direction.
+
+        Parameters
+        ----------
+        transects: list
+            List of TransectData objects
+        data_type: str
+            Specifies the data type (discharge or velocity)
+        extents: list
+            List containing two values, the minimum and maximum discharge percentages to subsectioning
+        sub_from_left: bool
+            Specifies if when subsectioning the subsection should start from left to right.
+        use_q: bool
+            Specifies to use the discharge rather than the xprod when subsectioning
+        """
+        
+        self.subsection = extents
+        self.use_q = use_q
+        self.sub_from_left = sub_from_left
+        self.process_profiles(transects=transects, data_type=data_type )
+        self.q_sensitivity = ExtrapQSensitivity()
+        self.q_sensitivity.populate_data(transects=transects, extrap_fits=self.sel_fit)
+        
+    def change_data_type(self, transects, data_type):
+        """Changes the data type to be processed in extrap.
+
+        Parameters
+        ----------
+        transects: list
+            List of TransectData objects
+        data_type: str
+            Specifies the data type (discharge or velocity)
+        """
+        if data_type.lower() == 'q':
+            use_weighted = self.use_weighted
+        else:
+            use_weighted = False
+
+        self.process_profiles(transects=transects, data_type=data_type, use_weighted=use_weighted)
+        self.q_sensitivity = ExtrapQSensitivity()
+        self.q_sensitivity.populate_data(transects=transects, extrap_fits=self.sel_fit)
+
+    def change_data_auto(self, transects):
+        """Changes the data selection settings to automatic.
+
+        Parameters
+        ----------
+        transects: list
+            List of TransectData objects
+        """
+        self.threshold = 20
+        self.subsection = [0, 100]
+        self.process_profiles(transects=transects, data_type='q', use_weighted=self.use_weighted)
+
+        # Compute the sensitivity of the final discharge to changes in extrapolation methods
+        self.q_sensitivity = ExtrapQSensitivity()
+        self.q_sensitivity.populate_data(transects=transects, extrap_fits=self.sel_fit)
diff --git a/Classes/CoordError.py b/Classes/CoordError.py
new file mode 100644
index 0000000000000000000000000000000000000000..1411a302024774957e85c6eca52b4ebb0e92408d
--- /dev/null
+++ b/Classes/CoordError.py
@@ -0,0 +1,4 @@
+class CoordError(Exception):
+
+    def __init__(self, text):
+        self.text = text
\ No newline at end of file
diff --git a/Classes/DateTime.py b/Classes/DateTime.py
new file mode 100644
index 0000000000000000000000000000000000000000..9f7e3d39b61d2b5985ca34128fb62b2ae22e3d9c
--- /dev/null
+++ b/Classes/DateTime.py
@@ -0,0 +1,77 @@
+import numpy as np
+
+class DateTime(object):
+    """This stores the date and time data in Python compatible format.
+
+    Attributes
+    ----------
+    date: str
+        Measurement date as mm/dd/yyyy
+    start_serial_time: float
+        Python serial time for start of transect (seconds since 1/1/1970), timestamp
+    end_serial_time: float
+        Python serial time for end of transect (seconds since 1/1/1970), timestamp
+    transect_duration_sec: float
+        Duration of transect, in seconds.
+    ens_duration_sec: np.array(float)
+        Duration of each ensemble, in seconds.
+    """
+    
+    def __init__(self):
+        """Initialize class and instance variables."""
+
+        self.date = None  # Measurement date mm/dd/yyyy
+        self.start_serial_time = None  # Python serial time for start of transect, timestamp
+        self.end_serial_time = None  # Python serial time for end of transect, timestamp
+        self.transect_duration_sec = None  # Duration of transect in seconds
+        self.ens_duration_sec = None  # Duration of each ensemble in seconds
+        
+    def populate_data(self, date_in, start_in, end_in, ens_dur_in):
+        """Populate data in object.
+
+        Parameters
+        ----------
+        date_in: str
+            Measurement date as mm/dd/yyyy
+        start_in: float
+            Python serial time for start of transect.
+        end_in: float
+            Python serial time for end of transect.
+        ens_dur_in: np.array(float)
+            Duration of each ensemble, in seconds.
+        """
+        
+        self.date = date_in
+        self.start_serial_time = start_in
+        self.end_serial_time = end_in
+        self.transect_duration_sec = float(end_in - start_in)
+        self.ens_duration_sec = ens_dur_in.astype(float)
+
+    def populate_from_qrev_mat(self, transect):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        transect: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+
+        if hasattr(transect, 'dateTime'):
+            seconds_day = 86400
+            time_correction = 719529.0000000003
+
+            self.date = transect.dateTime.date
+            self.start_serial_time = (transect.dateTime.startSerialTime - time_correction) * seconds_day
+            self.end_serial_time = (transect.dateTime.endSerialTime - time_correction) * seconds_day
+            self.transect_duration_sec = float(transect.dateTime.transectDuration_sec)
+            try:
+                self.ens_duration_sec = transect.dateTime.ensDuration_sec.astype(float)
+            except AttributeError:
+                self.ens_duration_sec = np.array([np.nan])
+
+            #
+            # self.date = transect.dateTime.date
+            # self.start_serial_time = transect.dateTime.startSerialTime
+            # self.end_serial_time = transect.dateTime.endSerialTime
+            # self.transect_duration_sec = float(transect.dateTime.transectDuration_sec)
+            # self.ens_duration_sec = transect.dateTime.ensDuration_sec.astype(float)
diff --git a/Classes/DepthData.py b/Classes/DepthData.py
new file mode 100644
index 0000000000000000000000000000000000000000..946908873307ac7467ed20d1f11eff5c0355daa0
--- /dev/null
+++ b/Classes/DepthData.py
@@ -0,0 +1,1009 @@
+import copy
+import concurrent.futures
+import numpy as np
+import itertools as it
+from numpy.matlib import repmat
+from MiscLibs.common_functions import iqr, nan_less, nan_greater
+from MiscLibs.robust_loess_compiled import rloess
+from MiscLibs.non_uniform_savgol import non_uniform_savgol
+from MiscLibs.run_iqr import run_iqr, compute_quantile
+
+
+class DepthData(object):
+    """Process and store depth data.
+    Supported sources include bottom track
+    vertical beam, and external depth sounder.
+
+    Attributes
+    ----------
+        depth_orig_m: np.array
+            Original multi-beam depth data from transect file (includes draft_orig) in meters.
+        depth_beams_m: np.array
+            Depth data from transect file adjusted for any draft changes, in meters.
+        depth_processed_m: np.array
+            Depth data filtered and interpolated.
+        depth_freq_kHz: float
+            Defines acoustic frequency used to measure depth.
+        depth_invalid_index:
+            Index of depths marked invalid.
+        depth_source: str
+            Source of depth data ("BT", "VB", "DS").
+        depth_source_ens: np.array(object)
+            Source of each depth value ("BT", "VB", "DS", "IN").
+        draft_orig_m: float
+            Original draft from data files, in meters.
+        draft_use_m: float
+            Draft used in computation of depth_beams_m and depth_cell_depths_m.
+        depth_cell_depth_orig_m: np.array
+            Depth to centerline of depth cells in raw data, in meters.
+        depth_cell_depth_m: np.array
+            Depth to centerline of depth cells adjusted for draft or speed of sound changes, in meters.
+        depth_cell_size_orig_m: np.array
+            Size of depth cells in meters from raw data, in meters.
+        depth_cell_size_m:
+            Size of depth cells adjusted for draft or speed of sound changes, in meters.
+        smooth_depth: np.array
+            Smoothed beam depth, in meters.
+        smooth_upper_limit: np.array
+            Smooth function upper limit of window, in meters.
+        smooth_lower_limit: np.array
+            Smooth function lower limit or window, in meters.
+        avg_method:str
+            Defines averaging method: "Simple", "IDW", only applicable to bottom track.
+        filter_type: str
+            Type of filter: "None", "TRDI", "Smooth".
+        interp_type: str
+            Type of interpolation: "None", "Linear", "Smooth".
+        valid_data_method: str
+            QRev or TRDI.
+        valid_data: np.array
+            Logical array of valid mean depth for each ensemble.
+        valid_beams: np.array
+            Logical array, 1 row for each beam identifying valid data.
+    """
+    
+    def __init__(self):
+        """Initialize attributes.
+        """
+
+        self.depth_orig_m = None  # Original multi-beam depth data from transect file (includes draft_orig) in meters
+        self.depth_beams_m = None  # Depth data from transect file adjusted for any draft changes, in meters
+        self.depth_processed_m = None  # Depth data filtered and interpolated
+        self.depth_freq_kHz = None  # Defines ADCP frequency used of each raw data point
+        self.depth_invalid_index = None  # Index of depths marked invalid
+        self.depth_source = None  # Source of depth data ("BT", "VB", "DS")
+        self.depth_source_ens = None  # Source of each depth value ("BT", "VB", "DS", "IN")
+        self.draft_orig_m = None  # Original draft from data files, in meters
+        self.draft_use_m = None  # Draft used in computation of depth_beams_m and depth_cell_depths_m
+        self.depth_cell_depth_orig_m = None  # Depth cell range from the transducer, in meters
+        self.depth_cell_depth_m = None  # Depth to centerline of depth cells, in meters
+        self.depth_cell_size_orig_m = None  # Size of depth cells in meters from raw data
+        self.depth_cell_size_m = None  # Size of depth cells in meters
+        self.smooth_depth = None  # Smoothed beam depth
+        self.smooth_upper_limit = None  # Smooth function upper limit of window
+        self.smooth_lower_limit = None  # Smooth function lowerl limit or window
+        self.avg_method = None  # Defines averaging method: "Simple", "IDW"
+        self.filter_type = None  # Type of filter: "None", "TRDI", "Smooth"
+        self.interp_type = None  # Type of interpolation: "None", "Linear", "Smooth"
+        self.valid_data_method = None  # QRev or TRDI
+        self.valid_data = None  # Logical array of valid mean depth for each ensemble
+        self.valid_beams = None  # Logical array, 1 row for each beam identifying valid data
+        
+    def populate_data(self, depth_in, source_in, freq_in, draft_in, cell_depth_in, cell_size_in):
+        """Stores data in DepthData.
+
+        Parameters
+        ----------
+        depth_in: np.array
+            Raw depth data, in meters.
+        source_in: str
+            Source of raw depth data.
+        freq_in: float
+            Acoustic frequency used to measure depths, in kHz.
+        draft_in: float
+            Draft of transducer used to measure depths, in meters.
+        cell_depth_in: np.array
+            Depth to centerline of each depth cell, in meters. If source does not have depth cells the depth cell depth
+            from bottom track should be used.
+        cell_size_in: np.array
+            Size of each depth cell, in meters. If source does not have depth cells the depth cell size
+            from bottom track should be used.
+        """
+
+        self.depth_orig_m = depth_in
+        self.depth_beams_m = depth_in
+        self.depth_source = source_in
+        self.depth_source_ens = np.array([source_in] * depth_in.shape[-1], dtype=object)
+        self.depth_freq_kHz = freq_in
+        self.draft_orig_m = draft_in
+        self.draft_use_m = draft_in
+        self.filter_type = 'None'
+        self.interp_type = 'None'
+        self.valid_data_method = 'QRev'
+        
+        # For BT data set method to average multiple beam depths
+        if source_in == 'BT':
+            self.avg_method = 'IDW'
+        else:
+            self.avg_method = 'None'
+
+        # Store cell data
+        self.depth_cell_depth_orig_m = cell_depth_in
+        self.depth_cell_size_orig_m = cell_size_in
+        self.depth_cell_size_m = cell_size_in
+        self.depth_cell_depth_m = cell_depth_in
+
+        # Remove all filters to initialize data
+        self.apply_filter('dummy', filter_type='Off')
+
+    def populate_from_qrev_mat(self, mat_data):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        mat_data: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+
+        self.depth_processed_m = mat_data.depthProcessed_m
+        self.depth_freq_kHz = mat_data.depthFreq_Hz
+
+        # Support for older files that may not have had invalid index
+        if len(mat_data.depthInvalidIndex) > 0:
+            self.depth_invalid_index = mat_data.depthInvalidIndex
+        else:
+            self.depth_invalid_index = None
+
+        self.depth_source = mat_data.depthSource
+        self.depth_source_ens = mat_data.depthSourceEns
+        self.draft_orig_m = mat_data.draftOrig_m
+        self.draft_use_m = mat_data.draftUse_m
+        self.depth_cell_depth_orig_m = mat_data.depthCellDepthOrig_m
+        self.depth_cell_depth_m = mat_data.depthCellDepth_m
+        if hasattr(mat_data, "depthCellSizeOrig_m"):
+            self.depth_cell_size_orig_m = mat_data.depthCellSizeOrig_m
+        else:
+            self.depth_cell_size_orig_m = mat_data.depthCellSize_m
+        self.depth_cell_size_m = mat_data.depthCellSize_m
+
+        # Configure arrays properly for VB and DS
+        if mat_data.depthSource == 'BT':
+            self.depth_beams_m = mat_data.depthBeams_m
+            self.depth_orig_m = mat_data.depthOrig_m
+            self.smooth_depth = mat_data.smoothDepth
+            self.smooth_upper_limit = mat_data.smoothUpperLimit
+            self.smooth_lower_limit = mat_data.smoothLowerLimit
+        else:
+            self.depth_beams_m = mat_data.depthBeams_m.reshape(1, -1)
+            self.depth_orig_m = mat_data.depthOrig_m.reshape(1, -1)
+            self.smooth_depth = mat_data.smoothDepth.reshape(1, -1)
+            self.smooth_upper_limit = mat_data.smoothUpperLimit.reshape(1, -1)
+            self.smooth_lower_limit = mat_data.smoothLowerLimit.reshape(1, -1)
+
+        self.avg_method = mat_data.avgMethod
+        self.filter_type = mat_data.filterType
+        self.interp_type = mat_data.interpType
+        self.valid_data_method = mat_data.validDataMethod
+        if type(mat_data.validData) is int:
+            self.valid_data = np.array([mat_data.validData]).astype(bool)
+        else:
+            self.valid_data = mat_data.validData.astype(bool)
+
+        # Reshape array for vertical beam and depth sounder
+        if len(mat_data.validBeams.shape) < 2:
+            self.valid_beams = mat_data.validBeams.reshape(1, -1)
+        else:
+            self.valid_beams = mat_data.validBeams
+
+        self.valid_beams = self.valid_beams.astype(bool)
+
+        # Handle data with one ensemble and multiple cells or one cell and multiple ensembles
+        if len(self.depth_beams_m.shape) == 1:
+            # One ensemble multiple cells
+            self.depth_beams_m = self.depth_beams_m.reshape(self.depth_beams_m.shape[0], 1)
+            self.depth_cell_depth_m = self.depth_cell_depth_m.reshape(self.depth_cell_depth_m.shape[0], 1)
+            self.depth_cell_depth_orig_m = self.depth_cell_depth_orig_m.reshape(
+                self.depth_cell_depth_orig_m.shape[0], 1)
+            self.depth_cell_size_m = self.depth_cell_size_m.reshape(self.depth_cell_size_m.shape[0], 1)
+            self.depth_cell_size_orig_m = self.depth_cell_size_orig_m.reshape(self.depth_cell_size_orig_m.shape[0], 1)
+            self.depth_orig_m = self.depth_orig_m.reshape(self.depth_orig_m.shape[0], 1)
+            self.depth_processed_m = np.array([self.depth_processed_m])
+            self.smooth_depth = self.smooth_depth.reshape(self.smooth_depth.shape[0], 1)
+            self.smooth_lower_limit = self.smooth_lower_limit.reshape(self.smooth_lower_limit.shape[0], 1)
+            self.smooth_upper_limit = self.smooth_upper_limit.reshape(self.smooth_upper_limit.shape[0], 1)
+            self.valid_data = np.array([self.valid_data])
+            self.depth_source_ens = np.array([mat_data.depthSourceEns])
+        elif len(self.depth_cell_depth_m.shape) == 1:
+            # One cell, multiple ensembles
+            self.depth_cell_depth_m = self.depth_cell_depth_m.reshape(1, self.depth_cell_depth_m.shape[0])
+            self.depth_cell_depth_orig_m = self.depth_cell_depth_orig_m.reshape(1,
+                                                                                self.depth_cell_depth_orig_m.shape[0])
+            self.depth_cell_size_m = self.depth_cell_size_m.reshape(1, self.depth_cell_size_m.shape[0])
+            self.depth_cell_size_orig_m = self.depth_cell_size_orig_m.reshape(1, self.depth_cell_size_orig_m.shape[0])
+
+    def change_draft(self, draft):
+        """Changes the draft for object
+        
+        draft: new draft for object
+        """
+        # Compute draft change
+        draft_change = draft - self.draft_use_m
+        self.draft_use_m = draft
+        
+        # Apply draft to ensemble depths if BT or VB
+        if self.depth_source != 'DS':
+            self.depth_beams_m = self.depth_beams_m + draft_change
+            self.depth_processed_m = self.depth_processed_m + draft_change 
+            
+        # Apply draft to depth cell locations
+        if len(self.depth_cell_depth_m) > 0:
+            self.depth_cell_depth_m = self.depth_cell_depth_m + draft_change
+
+    def add_cell_data(self, bt_depths):
+        """Adds cell data to depth objects with no cell data
+        such as the vertical beam and depth sounder.  This allows
+        a single object to contain all the required depth data
+
+        Parameters
+        ----------
+        bt_depths: DepthData
+            Object of DepthData with bottom track depths
+        """
+        
+        self.depth_cell_depth_orig_m = bt_depths.depth_cell_depth_orig_m
+        self.depth_cell_size_m = bt_depths.depth_cell_size_m
+        self.depth_cell_depth_m = bt_depths.depth_cell_depth_m
+
+    def compute_avg_bt_depth(self, method=None):
+        """Computes average depth for BT_Depths
+
+        Parameters
+        ----------
+        method: str
+            Averaging method (Simple or IDW)
+        """
+
+        if method is not None:
+            self.avg_method = method
+
+        # Get valid depths
+        depth = np.copy(self.depth_beams_m)
+        depth[np.logical_not(self.valid_beams)] = np.nan
+
+        # Compute average depths
+        self.depth_processed_m = DepthData.average_depth(depth, self.draft_use_m, self.avg_method)
+
+        # Set depths to nan if depth are not valid beam depths
+        self.depth_processed_m[np.equal(self.valid_data, False)] = np.nan
+
+    def apply_filter(self, transect, filter_type=None):
+        """Coordinate the application of depth filters.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of transect data.
+        filter_type: str
+            Type of filter to apply (None, Smooth, TRDI).
+        """
+
+        # Compute selected filter
+        if filter_type == 'Off' or filter_type is None:
+            # No filter
+            self.filter_none()
+            # Savitzky-Golay
+        elif filter_type == 'SavGol':
+            self.filter_savgol(transect)
+        elif filter_type == 'Smooth':
+            # Smooth filter
+            self.filter_smooth(transect)
+        elif filter_type == 'TRDI' and self.depth_source == 'BT':
+            # TRDI filter for multiple returns
+            self.filter_trdi()
+            self.filter_type = 'TRDI'
+            
+        self.valid_mean_data()
+
+        # Update processed depth with filtered results
+        if self.depth_source == 'BT':
+            # Multiple beams require averaging to obtain 1-D array
+            self.compute_avg_bt_depth()
+        else:
+            # Single beam (VB or DS) save to 1-D array
+            self.depth_processed_m = np.array(self.depth_beams_m[0, :])
+            self.depth_processed_m[np.squeeze(np.equal(self.valid_data, 0))] = np.nan
+            
+    def apply_interpolation(self, transect, method=None):
+        """Coordinates application of interpolations
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        method: str
+            Type of interpolation to apply (None, HoldLast, Smooth, Linear)
+        """
+        
+        # Determine interpolation to apply
+        if method is None:
+            method = self.interp_type
+            
+        # Apply selected interpolation
+        self.interp_type = method
+        # No filtering
+        if method == 'None':
+            self.interpolate_none()
+
+        # Hold last valid depth indefinitely
+        elif method == 'HoldLast':
+            self.interpolate_hold_last()
+
+        # Use values form a Loess smooth
+        elif method == 'Smooth':
+            self.interpolate_smooth()
+
+        # Linear interpolation
+        else:
+            self.interpolate_linear(transect=transect)
+            
+        # Identify ensembles with interpolated depths
+        idx = np.where(np.logical_not(self.valid_data[:]))
+        if len(idx[0]) > 0:
+            idx = idx[0]
+            idx2 = np.where(np.logical_not(np.isnan(self.depth_processed_m[idx])))
+            if len(idx2) > 0:
+                idx2 = idx2[0]
+                self.depth_source_ens[idx[idx2]] = 'IN'
+        
+    def apply_composite(self, comp_depth, comp_source):
+        """Applies the data from CompDepth computed in DepthStructure
+        to DepthData object
+
+        Parameters
+        ----------
+        comp_depth: np.array(float)
+            Composite depth computed in DepthStructure
+        comp_source: str
+            Source of composite depth (BT, VB, DS)
+        """
+        
+        # Assign composite depth to property
+        self.depth_processed_m = comp_depth
+        
+        # Assign appropriate composite source for each ensemble
+        self.depth_source_ens[comp_source == 1] = 'BT'
+        self.depth_source_ens[comp_source == 2] = 'VB'
+        self.depth_source_ens[comp_source == 3] = 'DS'
+        self.depth_source_ens[comp_source == 4] = 'IN'
+        self.depth_source_ens[comp_source == 0] = 'NA'
+        
+    def sos_correction(self, ratio):
+        """Correct depth for new speed of sound setting
+
+        Parameters
+        ----------
+        ratio: float
+            Ratio of new to old speed of sound value
+        """
+        
+        # Correct unprocessed depths
+        self.depth_beams_m = self.draft_use_m+np.multiply(self.depth_beams_m-self.draft_use_m, ratio)
+        
+        # Correct processed depths
+        self.depth_processed_m = self.draft_use_m+np.multiply(self.depth_processed_m-self.draft_use_m, ratio)
+        
+        # Correct cell size and location
+        self.depth_cell_size_m = np.multiply(self.depth_cell_size_m, ratio)
+        self.depth_cell_depth_m = self.draft_use_m + np.multiply(self.depth_cell_depth_m - self.draft_use_m, ratio)
+        
+    def valid_mean_data(self):
+        """Determines if raw data are sufficient to compute a valid depth without interpolation.
+        """
+        
+        if self.depth_source == 'BT':
+            self.valid_data = np.tile(True, self.valid_beams.shape[1])
+            nvalid = np.sum(self.valid_beams, axis=0)
+            
+            if self.valid_data_method == 'TRDI':
+                self.valid_data[nvalid < 3] = False
+            else:
+                self.valid_data[nvalid < 2] = False
+        else:
+            self.valid_data = self.valid_beams[0, :]
+            
+    def filter_none(self):
+        """Applies no filter to depth data. Removes filter if one was applied.
+        """
+        
+        # Set all ensembles to have valid data
+        if len(self.depth_beams_m.shape) > 1:
+            self.valid_beams = np.tile(True, self.depth_beams_m.shape)
+        else:
+            self.valid_beams = np.tile(True, (1, self.depth_beams_m.shape[0]))
+        
+        # Set ensembles with no depth data to invalid
+        self.valid_beams[self.depth_beams_m == 0] = False
+        self.valid_beams[np.isnan(self.depth_beams_m)] = False
+        
+        self.filter_type = 'None'
+        
+    def filter_smooth(self, transect):
+        """This filter uses a moving InterQuartile Range filter on residuals from a
+        robust Loess smooth of the depths in each beam to identify unnatural spikes in the depth
+        measurements from each beam.  Each beam is filtered independently.  The filter
+        criteria are set to be the maximum of the IQR filter, 5% of the measured depth, or 0.1 meter
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+
+        Notes
+        -----
+        half_width - number of points to each side of target point used in computing IQR.
+            This is the raw number of points actual points used may be less if some are bad.
+
+        multiplier - number multiplied times the IQR to determine the filter criteria
+        
+        """
+
+        # If the smoothed depth has not been computed
+        if self.smooth_depth is None or len(self.smooth_depth) == 0:
+            
+            # Set filter characteristics
+            self.filter_type = 'Smooth'
+            # cycles = 3
+            # half_width = 10
+            # multiplier = 15
+            
+            # Determine number of beams
+            if len(self.depth_orig_m.shape) > 1:
+                n_beams, n_ensembles = self.depth_orig_m.shape[0], self.depth_orig_m.shape[1]
+                depth_raw = np.copy(self.depth_orig_m)
+            else:
+                n_beams = 1
+                n_ensembles = self.depth_orig_m.shape[0]
+                depth_raw = np.copy(np.reshape(self.depth_orig_m, (1, n_ensembles)))
+
+            # Set bad depths to nan
+            depth = repmat([np.nan], n_beams, n_ensembles)
+
+            # Arrays initialized
+            depth_smooth = repmat([np.nan], n_beams, n_ensembles)
+            # depth_res = repmat([np.nan], n_beams, n_ensembles)
+            upper_limit = repmat([np.nan], n_beams, n_ensembles)
+            lower_limit = repmat([np.nan], n_beams, n_ensembles)
+            depth_filtered = depth
+            depth[nan_greater(depth_raw, 0)] = depth_raw[nan_greater(depth_raw, 0)]
+
+            # Create position array
+            boat_vel_selected = getattr(transect.boat_vel, transect.boat_vel.selected)
+            if boat_vel_selected is not None:
+                track_x = boat_vel_selected.u_processed_mps * transect.date_time.ens_duration_sec
+                track_y = boat_vel_selected.v_processed_mps * transect.date_time.ens_duration_sec
+            else:
+                track_x = np.nan
+                track_y = np.nan
+
+            idx = np.where(np.isnan(track_x))
+            if len(idx[0]) < 2:
+                x = np.nancumsum(np.sqrt(track_x**2+track_y**2))
+            else:
+                x = np.nancumsum(transect.date_time.ens_duration_sec)
+
+            multi_processing = False
+            # start = time.perf_counter()
+            if multi_processing:
+                with concurrent.futures.ProcessPoolExecutor() as executor:
+                    results = executor.map(self.compute_smooth, depth, depth_filtered, it.repeat(x))
+
+                for j, result in enumerate(results):
+                    depth_smooth[j] = result[0]
+                    upper_limit[j] = result[1]
+                    lower_limit[j] = result[2]
+            else:
+                # Loop for each beam, smooth is applied to each beam
+                for j in range(n_beams):
+                    depth_smooth[j], upper_limit[j], lower_limit[j] = self.compute_smooth(depth[j],
+                                                                                          depth_filtered[j],
+                                                                                          x)
+
+            # Save smooth results to avoid recomputing them if needed later
+            self.smooth_depth = depth_smooth
+            self.smooth_upper_limit = upper_limit
+            self.smooth_lower_limit = lower_limit
+
+        # Reset valid data
+        self.filter_none()
+        
+        # Set filter type
+        self.filter_type = 'Smooth'
+        
+        # Determine number of beams
+        if len(self.depth_orig_m.shape) > 1:
+            n_beams, n_ensembles = self.depth_orig_m.shape[0], self.depth_orig_m.shape[1]
+            depth_raw = np.copy(self.depth_orig_m)
+        else:
+            n_beams = 1
+            n_ensembles = self.depth_orig_m.shape[0]
+            depth_raw = np.reshape(self.depth_orig_m, (1, n_ensembles))
+
+        depth_res = repmat([np.nan], n_beams, n_ensembles)
+
+        # Set bad depths to nan
+        depth = repmat(np.nan, depth_raw.shape[0], depth_raw.shape[1])
+        depth[nan_greater(depth_raw, 0)] = depth_raw[nan_greater(depth_raw, 0)]
+        
+        # Apply filter
+        for j in range(n_beams):
+            if np.nansum(self.smooth_upper_limit[j, :]) > 0:
+                bad_idx = np.where(
+                    np.logical_or(nan_greater(depth[j], self.smooth_upper_limit[j]),
+                                  nan_less(depth[j], self.smooth_lower_limit[j])))[0]
+                # Update depth matrix
+                depth_res[j, bad_idx] = np.nan
+
+            else:
+                bad_idx = np.isnan(depth[j])
+
+            # Update valid data matrix
+            self.valid_beams[j, bad_idx] = False
+
+    @staticmethod
+    def compute_smooth(depth, depth_filtered, x):
+        cycles = 3
+        half_width = 10
+        multiplier = 15
+
+        upper_limit = np.nan
+        lower_limit = np.nan
+
+        # At least 50% of the data in a beam must be valid to apply the smooth
+        # if np.nansum((np.isnan(depth_filtered) == False) / len(depth_filtered)) > .5:
+        # Compute residuals based on robust loess smooth
+        if len(x) > 1:
+            # Fit smooth
+            try:
+                smooth_fit = rloess(x, depth_filtered, 20)
+                depth_smooth = smooth_fit
+            except ValueError:
+                depth_smooth = depth_filtered
+        else:
+            depth_smooth = depth_filtered
+
+        depth_res = depth - depth_smooth
+
+        # Run the filter multiple times
+        for n in range(cycles - 1):
+            max_upper_limit = 9999
+            idx = np.where(np.logical_not(np.isnan(depth_filtered)))[0]
+            if len(idx) > 0:
+                max_upper_limit = compute_quantile(depth_filtered[idx], 0.90) * 3
+
+            # Compute inner quartile range
+            fill_array = run_iqr(half_width, depth_res)
+
+            # Compute filter criteria and apply appropriate
+            criteria = multiplier * fill_array
+            idx = np.where(nan_less(criteria, np.max(np.vstack((depth * .05,
+                                                                np.ones(depth.shape) / 10)), 0)))[0]
+            if len(idx) > 0:
+                criteria[idx] = np.max(np.vstack((depth[idx] * .05, np.ones(idx.shape) / 10)), 0)
+
+            # Compute limits
+            upper_limit = depth_smooth + criteria
+            idx = np.where(np.logical_or(np.greater(upper_limit, max_upper_limit), np.isnan(upper_limit)))[0]
+            if len(idx) > 0:
+                upper_limit[idx] = max_upper_limit
+            lower_limit = depth_smooth - criteria
+            idx = np.where(np.less(lower_limit, 0))[0]
+            lower_limit[idx] = 0
+
+            bad_idx = np.where(
+                np.logical_or(nan_greater(depth, upper_limit), nan_less(depth, lower_limit)))[0]
+            # Update depth matrix
+            # depth_res[bad_idx] = np.nan
+            if len(bad_idx) == 0:
+                break
+            else:
+                depth_filtered[bad_idx] = np.nan
+                # Fit smooth
+                try:
+                    smooth_fit = rloess(x, depth_filtered, 20)
+                    depth_smooth = smooth_fit
+                except ValueError:
+                    depth_smooth = depth_filtered
+
+                depth_res = depth - depth_smooth
+
+        return depth_smooth, upper_limit, lower_limit
+
+    def filter_savgol(self, transect):
+        """This filter uses a moving InterQuartile Range filter on residuals from a
+        a Savitzky-Golay filter on y with non-uniform spaced x
+        of the depths in each beam to identify unnatural spikes in the depth
+        measurements from each beam.  Each beam is filtered independently.  The filter
+        criteria are set to be the maximum of the IQR filter, 5% of the measured depth, or 0.1 meter
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+
+        Notes
+        -----
+        half_width - number of points to each side of target point used in computing IQR.
+            This is the raw number of points actual points used may be less if some are bad.
+
+        multiplier - number multiplied times the IQR to determine the filter criteria
+
+        """
+
+        # Determine number of beams
+        if len(self.depth_orig_m.shape) > 1:
+            # For slant beams
+            n_beams, n_ensembles = self.depth_orig_m.shape[0], self.depth_orig_m.shape[1]
+            depth_raw = np.copy(self.depth_orig_m)
+        else:
+            # For vertical beam or depth sounder
+            n_beams = 1
+            n_ensembles = self.depth_orig_m.shape[0]
+            depth_raw = np.copy(np.reshape(self.depth_orig_m, (1, n_ensembles)))
+
+        # Set bad depths to nan
+        depth = repmat([np.nan], n_beams, n_ensembles)
+        depth[depth_raw > 0] = depth_raw[depth_raw > 0]
+
+        # If the smoothed depth has not been computed
+        if self.smooth_depth is None:
+
+            # Set filter characteristics
+            self.filter_type = 'SavGol'
+            cycles = 3
+            half_width = 10
+            multiplier = 15
+
+            # Arrays initialized
+            depth_smooth = repmat([np.nan], n_beams, n_ensembles)
+            depth_res = repmat([np.nan], n_beams, n_ensembles)
+            upper_limit = repmat([np.nan], n_beams, n_ensembles)
+            lower_limit = repmat([np.nan], n_beams, n_ensembles)
+
+            # Create position array. If there are insufficient track data use elapsed time
+            boat_vel_selected = getattr(transect.boat_vel, transect.boat_vel.selected)
+            if boat_vel_selected is not None and \
+                    np.nansum(np.isnan(boat_vel_selected.u_processed_mps)) < 2:
+                track_x = boat_vel_selected.u_processed_mps * transect.date_time.ens_duration_sec
+                track_y = boat_vel_selected.v_processed_mps * transect.date_time.ens_duration_sec
+                x = np.nancumsum(np.sqrt(track_x ** 2 + track_y ** 2))
+            else:
+                x = np.nancumsum(transect.date_time.ens_duration_sec)
+
+            # Loop for each beam, smooth is applied to each beam
+            for j in range(n_beams):
+                # At least 50% of the data in a beam must be valid to apply the smooth
+                # if np.nansum((np.isnan(depth[j, :]) == False) / depth.shape[0]) > .5:
+
+                # Compute residuals based on non-uniform Savitzky-Golay
+                try:
+                    valid_depth_idx = np.logical_not(np.isnan(depth[j, :]))
+                    x_fit = x[valid_depth_idx]
+                    y_fit = depth[j, valid_depth_idx]
+                    smooth_fit = non_uniform_savgol(x_fit, y_fit, 15, 3)
+                    depth_smooth[j, valid_depth_idx] = smooth_fit
+                except ValueError:
+                    depth_smooth[j, :] = depth[j, :]
+
+                depth_res[j, :] = depth[j, :] - depth_smooth[j, :]
+
+                # Run the filter multiple times
+                for n in range(cycles - 1):
+
+                    # Compute inner quartile range
+                    # fill_array = DepthData.run_iqr(half_width, depth_res[j, :])
+                    fill_array = run_iqr(half_width, depth_res[j, :])
+                    # Compute filter criteria
+                    criteria = multiplier * fill_array
+
+                    # Adjust criteria so that it is never less than 5% of depth or 0.1 m which ever is greater
+                    idx = np.where(criteria < np.max(np.vstack((depth[j, :] * .05,
+                                                                np.ones(depth.shape) / 10)), 0))[0]
+                    if len(idx) > 0:
+                        criteria[idx] = np.max(np.vstack((depth[j, idx] * .05, np.ones(idx.shape) / 10)), 0)
+
+                    # Compute limits
+                    upper_limit[j] = depth_smooth[j, :] + criteria
+                    lower_limit[j] = depth_smooth[j, :] - criteria
+
+                    bad_idx = np.where(np.logical_or(np.greater(depth[j], upper_limit[j]),
+                                                     np.less(depth[j], lower_limit[j])))[0]
+                    # Update residual matrix
+                    depth_res[j, bad_idx] = np.nan
+
+            # Save smooth results to avoid recomputing them if needed later
+            self.smooth_depth = depth_smooth
+            self.smooth_upper_limit = upper_limit
+            self.smooth_lower_limit = lower_limit
+
+        # Reset valid data
+        self.filter_none()
+
+        # Set filter type
+        self.filter_type = 'SavGol'
+
+        # Apply filter
+        for j in range(n_beams):
+            if np.nansum(self.smooth_upper_limit[j]) > 0:
+                bad_idx = np.where(
+                    np.logical_or(np.greater(depth[j], self.smooth_upper_limit[j]),
+                                  np.less(depth[j], self.smooth_lower_limit[j])))[0]
+            else:
+                bad_idx = np.isnan(depth[j])
+
+            # Update valid data matrix
+            self.valid_beams[j, bad_idx] = False
+
+    def interpolate_none(self):
+        """Applies no interpolation.
+        """
+        
+        # Compute processed depth without interpolation
+        if self.depth_source == 'BT':
+            # Bottom track methods
+            self.compute_avg_bt_depth()
+        else:
+            # Vertical beam or depth sounder depths
+            self.depth_processed_m = self.depth_beams_m[0, :]
+            
+        self.depth_processed_m[np.squeeze(np.equal(self.valid_data, False))] = np.nan
+        
+        # Set interpolation type
+        self.interp_type = 'None'
+        
+    def interpolate_hold_last(self):
+        """This function holds the last valid value until the next valid data point.
+        """
+        
+        # Get number of ensembles
+        n_ensembles = len(self.depth_processed_m)
+        
+        # Process data by ensemble
+        for n in range(1, n_ensembles):
+            
+            # If current ensemble's depth is invalid assign depth from previous example
+            if not self.valid_data[n]:
+                self.depth_processed_m[n] = self.depth_processed_m[n-1]
+
+    def interpolate_next(self):
+        """This function back fills with the next valid value.
+        """
+
+        # Get number of ensembles
+        n_ens = len(self.depth_processed_m)
+
+        # Process data by ensemble
+        for n in np.arange(0, n_ens-1)[::-1]:
+
+            # If current ensemble's depth is invalid assign depth from previous example
+            if not self.valid_data[n]:
+                self.depth_processed_m[n] = self.depth_processed_m[n + 1]
+
+    def interpolate_smooth(self):
+        """Apply interpolation based on the robust loess smooth
+        """
+        
+        self.interp_type = 'Smooth'
+        
+        # Get depth data from object
+        depth_new = self.depth_beams_m
+        
+        # Update depth data with interpolated depths
+        depth_new[not self.valid_beams] = self.smooth_depth[not self.valid_beams]
+        
+        # Compute processed depths with interpolated values
+        if self.depth_source == 'BT':
+            # Temporarily change self.depth_beams_m to compute average
+            # for bottom track based depths
+            temp_save = copy.deepcopy(self.depth_beams_m)
+            self.depth_beams_m = depth_new
+            self.compute_avg_bt_depth()
+            self.depth_beams_m = temp_save
+
+        else:
+            # Assignment for VB or DS
+            self.depth_processed_m = depth_new[0, :]
+            
+    def interpolate_linear(self, transect):
+        """Apply linear interpolation
+        """
+        
+        # Set interpolation type
+        self.interp_type = 'Linear'
+
+        # Create position array
+        select = getattr(transect.boat_vel, transect.boat_vel.selected)
+        if select is not None:
+            boat_vel_x = select.u_processed_mps
+            boat_vel_y = select.v_processed_mps
+            track_x = boat_vel_x * transect.date_time.ens_duration_sec
+            track_y = boat_vel_y * transect.date_time.ens_duration_sec
+        else:
+            select = getattr(transect.boat_vel, 'bt_vel')
+            track_x = np.tile(np.nan, select.u_processed_mps.shape)
+            track_y = np.tile(np.nan, select.v_processed_mps.shape)
+              
+        idx = np.where(np.isnan(track_x[1:]))
+        
+        # If the navigation reference has no gaps use it for interpolation, if not use time
+        if len(idx[0]) < 1:
+            x = np.nancumsum(np.sqrt(track_x**2 + track_y**2))
+        else:
+            # Compute accumulated time
+            x = np.nancumsum(transect.date_time.ens_duration_sec)
+            
+        # Determine number of beams
+        n_beams = self.depth_beams_m.shape[0]
+        depth_mono = copy.deepcopy(self.depth_beams_m)
+        depth_new = copy.deepcopy(self.depth_beams_m)
+        
+#       Create strict monotonic arrays for depth and track by identifying duplicate
+#       track values.  The first track value is used and the remaining duplicates
+#       are set to nan.  The depth assigned to that first track value is the average
+#       of all duplicates.  The depths for the duplicates are then set to nan.  Only
+#       valid strictly monotonic track and depth data are used for the input in to linear
+#       interpolation.   Only the interpolated data for invalid depths are added
+#       to the valid depth data to create depth_new
+
+        x_mono = x
+        
+        idx0 = np.where(np.diff(x) == 0)[0]
+        if len(idx0) > 0:
+            if len(idx0) > 1:
+                # Split array into subarrays in proper sequence e.g [[2,3,4],[7,8,9]] etc.
+                idx1 = np.add(np.where(np.diff(idx0) != 1)[0], 1)
+                group = np.split(idx0, idx1)
+
+            else:
+                # Group of only 1 point
+                group = np.array([idx0])
+
+            # Replace repeated values with mean
+            n_group = len(group)
+            for k in range(n_group):
+                indices = group[k]
+                indices = np.append(indices, indices[-1] + 1)
+                depth_avg = np.nanmean(depth_mono[:, indices], axis=1)
+                depth_mono[:, indices[0]] = depth_avg
+                depth_mono[:, indices[1:]] = np.nan
+                x[indices[1:]] = np.nan
+                
+        # Interpolate each beam
+
+        for n in range(n_beams):
+            # Determine ensembles with valid depth data
+            valid_depth_mono = np.logical_not(np.isnan(depth_mono[n]))
+            valid_x_mono = np.logical_not(np.isnan(x_mono))
+            valid_data = copy.deepcopy(self.valid_beams[n])
+            valid = np.vstack([valid_depth_mono, valid_x_mono, valid_data])
+            valid = np.all(valid, 0)
+
+            if np.sum(valid) > 1:
+                # Compute interpolation function from all valid data
+                depth_int = np.interp(x_mono, x_mono[valid], depth_mono[n, valid], left=np.nan, right=np.nan)
+                # Fill in invalid data with interpolated data
+                depth_new[n, np.logical_not(self.valid_beams[n])] = depth_int[np.logical_not(self.valid_beams[n])]
+
+        if self.depth_source == 'BT':
+            # Bottom track depths
+            self.depth_processed_m = self.average_depth(depth_new, self.draft_use_m, self.avg_method)
+        else:
+            # Vertical beam or depth sounder depths
+            self.depth_processed_m = np.copy(depth_new[0, :])
+
+    @staticmethod
+    def average_depth(depth, draft, method):
+        """Compute average depth from bottom track beam depths.
+
+        Parameters
+        ----------
+        depth: np.array(float)
+            Individual beam depths for each beam in each ensemble including the draft
+        draft: float
+            Draft of ADCP
+        method: str
+            Averaging method (Simple, IDW)
+        
+        Returns
+        -------
+        avg_depth: np.array(float)
+            Average depth for each ensemble
+        
+        """
+        if method == 'Simple':
+            avg_depth = np.nanmean(depth, 0)
+        else:
+            # Compute inverse weighted mean depth
+            rng = depth - draft
+            w = 1 - np.divide(rng, np.nansum(rng, 0))
+            avg_depth = draft+np.nansum(np.divide((rng * w), np.nansum(w, 0), where=np.nansum(w, 0) != 0), 0)
+            avg_depth[avg_depth == draft] = np.nan
+
+        return avg_depth
+
+    def filter_trdi(self):
+        """Filter used by TRDI to filter out multiple reflections that get digitized as depth.
+        """
+
+        # Assign raw depth data to local variable
+        depth_raw = np.copy(self.depth_orig_m)
+
+        # Determine number of beams
+        n_beams = depth_raw.shape[0]
+
+        # Reset filters to none
+        self.filter_none()
+
+        # Set filter type to TRDI
+        self.filter_type = 'TRDI'
+
+        for n in range(n_beams):
+            depth_ratio = depth_raw[n, :] / depth_raw
+            exceeded = depth_ratio > 1.75
+            exceeded_ens = np.nansum(exceeded, 0)
+            self.valid_beams[n, exceeded_ens > 0] = False
+
+    # ============================================================================================
+    # The methods below are not being used.
+    # The methods have been moved to separate files and compiled using Numba AOT.
+    # The methods below are included here for historical purposes
+    # and may provide an easier approach to adding new features/algorithms prior to recoding
+    # them in a manner that can be compiled using Numba AOT.
+    # =============================================================================================
+    @staticmethod
+    def run_iqr(half_width, data):
+        """Computes a running Innerquartile Range
+        The routine accepts a column vector as input.  "halfWidth" number of data
+        points for computing the Innerquartile Range are selected before and
+        after the target data point, but no including the target data point.
+        Near the ends of the series the number of points before or after are reduced.
+        Nan in the data are counted as points.  The IQR is computed on the slected
+        subset of points.  The process occurs for each point in the provided column vector.
+        A column vector with the computed IQR at each point is returned.
+
+        Parameters
+        ----------
+        half_width: int
+            Number of ensembles before and after current ensemble which are used to compute the IQR
+        data: np.array(float)
+            Data for which the IQR is computed
+        """
+        npts = len(data)
+        half_width = int(half_width)
+
+        if npts < 20:
+            half_width = int(np.floor(npts / 2))
+
+        iqr_array = []
+
+        # Compute IQR for each point
+        for n in range(npts):
+
+            # Sample selection for 1st point
+            if n == 0:
+                sample = data[1:1 + half_width]
+
+            # Sample selection a end of data set
+            elif n + half_width > npts:
+                sample = np.hstack([data[n - half_width - 1:n - 1], data[n:npts]])
+
+            # Sample selection at beginning of data set
+            elif half_width >= n + 1:
+                sample = np.hstack([data[0:n], data[n + 1:n + half_width + 1]])
+
+            # Sample selection in body of data set
+            else:
+                sample = np.hstack([data[n - half_width:n], data[n + 1:n + half_width + 1]])
+
+            iqr_array.append(iqr(sample))
+
+        return np.array(iqr_array)
diff --git a/Classes/DepthStructure.py b/Classes/DepthStructure.py
new file mode 100644
index 0000000000000000000000000000000000000000..4dcb2c6bbdb9445e065259fb056f92fc31c6d6bf
--- /dev/null
+++ b/Classes/DepthStructure.py
@@ -0,0 +1,368 @@
+import numpy as np
+from Classes.DepthData import DepthData
+
+
+class DepthStructure(object):
+    """This class creates the data structure used store depths from different sources
+
+    Attributes
+    ----------
+    selected: str
+        Name of object DepthData that contains depth data.
+    bt_depths: DepthData
+        Object of DepthData for bottom track based depths.
+    vb_depths: DepthData
+        Object of DepthData for vertical beam based depths.
+    ds_depths: DepthData
+        Object of DepthData for depth sounder based depths.
+    composite: str
+        Indicates use of composite depths ("On" or "Off".
+    """
+    
+    def __init__(self):
+        """Creates object and initializes variables to None"""
+
+        self.selected = None  # name of object DepthData that contains the depth data for q computation
+        self.bt_depths = None  # object of DepthData for by depth data
+        self.vb_depths = None  # object of DepthData for vertical beam depth data
+        self.ds_depths = None  # object of DepthData for depth sounder depth data
+        self.composite = "On"  # Turn composite depths "on" or "off"
+
+    def add_depth_object(self, depth_in, source_in, freq_in, draft_in, cell_depth_in, cell_size_in):
+        """Adds a DepthData object to the depth structure for the specified type of depths.
+
+        Parameters
+        ----------
+        depth_in: np.array
+            Depth data in meters.
+        source_in: str
+            Specifies source of depth data: bottom track (BT), vertical beam (VB), or depth sounder (DS)
+        freq_in: np.array
+            Acoustic frequency in kHz of beams used to determine depth.
+        draft_in:
+            Draft of transducer (in meters) used to measure depths.
+        cell_depth_in
+            Depth of each cell in the profile. If the referenced depth does not have depth cells the depth cell
+            values from the bottom track (BT) depths should be used.
+        cell_size_in
+            Size of each depth cell. If the referenced depth does not have depth cells the cell size from
+            the bottom track (BT) depths should be used.
+        """
+
+        if source_in == 'BT':
+            self.bt_depths = DepthData()
+            self.bt_depths.populate_data(depth_in, source_in, freq_in, draft_in, cell_depth_in, cell_size_in)
+        elif source_in == 'VB':
+            self.vb_depths = DepthData()
+            self.vb_depths.populate_data(depth_in, source_in, freq_in, draft_in, cell_depth_in, cell_size_in)
+        elif source_in == 'DS':
+            self.ds_depths = DepthData()
+            self.ds_depths.populate_data(depth_in, source_in, freq_in, draft_in, cell_depth_in, cell_size_in)
+
+    def populate_from_qrev_mat(self, transect):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        transect: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+        if hasattr(transect, 'depths'):
+
+            # try:
+            self.bt_depths = DepthData()
+            self.bt_depths.populate_from_qrev_mat(transect.depths.btDepths)
+            # except AttributeError:
+            #     self.bt_depths = None
+
+            try:
+                self.vb_depths = DepthData()
+                self.vb_depths.populate_from_qrev_mat(transect.depths.vbDepths)
+            except AttributeError:
+                self.vb_depths = None
+
+            try:
+                self.ds_depths = DepthData()
+                self.ds_depths.populate_from_qrev_mat(transect.depths.dsDepths)
+            except AttributeError:
+                self.ds_depths = None
+
+            if transect.depths.selected == 'btDepths':
+                self.selected = 'bt_depths'
+            elif transect.depths.selected == 'vbDepths':
+                self.selected = 'vb_depths'
+            elif transect.depths.selected == 'dsDepths':
+                self.selected = 'ds_depths'
+            self.composite = transect.depths.composite
+            if self.vb_depths is None and self.ds_depths is None:
+                self.composite = 'Off'
+
+    def composite_depths(self, transect, setting="Off"):
+        """Depth composite is based on the following assumptions
+        
+        1. If a depth sounder is available the user must have assumed the ADCP beams
+        (BT or vertical) might have problems and it will be the second alternative if 
+        not selected as the preferred source
+        
+        2. For 4-beam BT depths, if 3 beams are valid the average is considered valid.
+        It may be based on interpolation of the invalid beam.  However, if only 2 beams
+        are valid even though the other two beams may be interpolated and included in the average the
+        average will be replaced by an alternative if available.  If no alternative is 
+        available the multi-beam average based on available beams and interpolation will
+        be used.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Transect object containing all data.
+        setting: str
+            Setting to use ("On") or not use ("Off") composite depths.
+        """
+        
+        if setting is None:
+            setting = self.composite
+        else:
+            self.composite = setting
+            
+        # The primary depth reference is the selected reference
+        ref = self.selected
+        comp_depth = np.array([])
+
+        if setting == 'On':
+            # Prepare vector of valid BT averages, which are defined as having at least 2 valid beams
+            bt_valid = self.bt_depths.valid_data
+            n_ensembles = bt_valid.shape[-1]
+            bt_filtered = np.copy(self.bt_depths.depth_processed_m)
+            bt_filtered[np.logical_not(bt_valid)] = np.nan
+            
+            # Prepare vertical beam data, using only data prior to interpolation
+            if self.vb_depths is not None:
+                vb_filtered = np.copy(self.vb_depths.depth_processed_m)
+                vb_filtered[np.squeeze(np.equal(self.vb_depths.valid_data, False))] = np.nan
+            else:
+                vb_filtered = np.tile(np.nan, n_ensembles)
+                  
+            # Prepare depth sounder data, using only data prior to interpolation
+            if self.ds_depths is not None:
+                ds_filtered = np.copy(self.ds_depths.depth_processed_m)
+                ds_filtered[np.squeeze(np.equal(self.ds_depths.valid_data, False))] = np.nan
+            else:
+                ds_filtered = np.tile(np.nan, n_ensembles)
+
+            comp_source = np.tile(np.nan, bt_filtered.shape)
+
+            # Apply composite depths
+            if ref == 'bt_depths':
+                comp_depth = np.copy(bt_filtered)
+                comp_source[np.isnan(comp_depth) == False] = 1
+                comp_depth[np.isnan(comp_depth)] = np.squeeze(ds_filtered[np.isnan(comp_depth)])
+                comp_source[np.logical_and((np.isnan(comp_depth) == False), (np.isnan(comp_source) == True))] = 3
+                comp_depth[np.isnan(comp_depth)] = vb_filtered[np.isnan(comp_depth)]
+                comp_source[np.logical_and((np.isnan(comp_depth) == False), (np.isnan(comp_source) == True))] = 2
+                comp_depth = self.interpolate_composite(transect=transect, composite_depth=comp_depth)
+                # comp_depth[np.isnan(comp_depth)] = np.squeeze(self.bt_depths.depth_processed_m[np.isnan(comp_depth)])
+                comp_source[np.logical_and((np.isnan(comp_depth) == False), (np.isnan(comp_source) == True))] = 4
+                
+            elif ref == 'vb_depths':
+                comp_depth = np.copy(vb_filtered)
+                comp_source[np.isnan(comp_depth) == False] = 2
+                comp_depth[np.isnan(comp_depth)] = np.squeeze(ds_filtered[np.isnan(comp_depth)])
+                comp_source[np.logical_and((np.isnan(comp_depth) == False), (np.isnan(comp_source) == True))] = 3
+                comp_depth[np.isnan(comp_depth)] = np.squeeze(bt_filtered[np.isnan(comp_depth)])
+                comp_source[np.logical_and((np.isnan(comp_depth) == False), (np.isnan(comp_source) == True))] = 1
+                comp_depth = self.interpolate_composite(transect=transect, composite_depth=comp_depth)
+                # comp_depth[np.isnan(comp_depth)] = np.squeeze(self.vb_depths.depth_processed_m[np.isnan(comp_depth)])
+                comp_source[np.logical_and((np.isnan(comp_depth) == False), (np.isnan(comp_source) == True))] = 4
+                
+            elif ref == 'ds_depths':
+                comp_depth = np.copy(ds_filtered)
+                comp_source[np.isnan(comp_depth) == False] = 3
+                comp_depth[np.isnan(comp_depth)] = np.squeeze(vb_filtered[np.isnan(comp_depth)])
+                comp_source[np.logical_and((np.isnan(comp_depth) == False), (np.isnan(comp_source) == True))] = 2
+                comp_depth[np.isnan(comp_depth)] = np.squeeze(bt_filtered[np.isnan(comp_depth)])
+                comp_source[np.logical_and((np.isnan(comp_depth) == False), (np.isnan(comp_source) == True))] = 1
+                comp_depth = self.interpolate_composite(transect=transect, composite_depth=comp_depth)
+                # comp_depth[np.isnan(comp_depth)] = np.squeeze(self.ds_depths.depth_processed_m[np.isnan(comp_depth)])
+                comp_source[np.logical_and((np.isnan(comp_depth) == False), (np.isnan(comp_source) == True))] = 4
+
+            # Save composite depth to depth_processed of selected primary reference
+            selected_data = getattr(self, ref)
+            selected_data.apply_composite(comp_depth, comp_source.astype(int))
+                
+        else:
+            selected_data = getattr(self, ref)
+            comp_source = np.zeros(selected_data.depth_processed_m.shape)
+            
+            if ref == 'bt_depths':
+                selected_data.valid_data[np.isnan(selected_data.valid_data)] = False
+                comp_source[np.squeeze(selected_data.valid_data)] = 1
+            elif ref == 'vb_depths':
+                comp_source[np.squeeze(selected_data.valid_data)] = 2
+            elif ref == 'ds_depths':
+                comp_source[np.squeeze(selected_data.valid_data)] = 3
+
+            selected_data.apply_interpolation(transect)
+            comp_depth = selected_data.depth_processed_m
+            selected_data.apply_composite(comp_depth, comp_source.astype(int))
+
+    def set_draft(self, target, draft):
+        """This function will change the ref_depth draft.
+
+        Parameters
+        ----------
+        target: str
+            Source of depth data.
+        draft: float
+            New draft.
+        """
+        
+        if target == 'ADCP':
+            self.bt_depths.change_draft(draft)
+            self.vb_depths.change_draft(draft)
+        else:
+            self.ds_depths.change_draft(draft)    
+            
+    def depth_filter(self, transect, filter_method):
+        """Method to apply filter to all available depth sources, so that
+        all sources have the same filter applied.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        filter_method: str
+            Method to use to filter data (Smooth, TRDI, None).
+        """
+        
+        if self.bt_depths is not None:
+            self.bt_depths.apply_filter(transect, filter_method)
+        if self.vb_depths is not None:
+            self.vb_depths.apply_filter(transect, filter_method)
+        if self.ds_depths is not None:
+            self.ds_depths.apply_filter(transect, filter_method)
+            
+    def depth_interpolation(self, transect, method=None):
+        """Method to apply interpolation to all available depth sources, so
+        that all sources have the same filter applied.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        method: str
+            Interpolation method (None, HoldLast, Smooth, Linear)
+            """
+        
+        if self.bt_depths is not None:
+            self.bt_depths.apply_interpolation(transect, method)
+        if self.vb_depths is not None:
+            self.vb_depths.apply_interpolation(transect, method)
+        if self.ds_depths is not None:
+            self.ds_depths.apply_interpolation(transect, method)
+            
+    def sos_correction(self, ratio):
+        """Correct depths for change in speed of sound.
+
+        Parameters
+        ----------
+        ratio: float
+            Ratio of new to old speed of sound.
+        """
+        
+        # Bottom Track Depths
+        if self.bt_depths is not None:
+            self.bt_depths.sos_correction(ratio)
+            
+        # Vertical beam depths
+        if self.vb_depths is not None:
+            self.vb_depths.sos_correction(ratio)
+
+    @staticmethod
+    def interpolate_composite(transect, composite_depth):
+
+        """Apply linear interpolation to composite depths
+
+        Parameters
+        ----------
+        transect: TransectData
+            Transect being processed
+        composite_depth: np.array(float)
+            Array of composite depths
+
+        Returns
+        -------
+        depth_new: np.array(float)
+            Array of composite depths with interpolated values
+        """
+
+        # Create position array
+        select = getattr(transect.boat_vel, transect.boat_vel.selected)
+        if select is not None:
+            boat_vel_x = select.u_processed_mps
+            boat_vel_y = select.v_processed_mps
+            track_x = boat_vel_x * transect.date_time.ens_duration_sec
+            track_y = boat_vel_y * transect.date_time.ens_duration_sec
+        else:
+            select = getattr(transect.boat_vel, 'bt_vel')
+            track_x = np.tile(np.nan, select.u_processed_mps.shape)
+            track_y = np.tile(np.nan, select.v_processed_mps.shape)
+
+        idx = np.where(np.isnan(track_x[1:]))
+
+        # If the navigation reference has no gaps use it for interpolation, if not use time
+        if len(idx[0]) < 1:
+            x = np.nancumsum(np.sqrt(track_x ** 2 + track_y ** 2))
+        else:
+            # Compute accumulated time
+            x = np.nancumsum(transect.date_time.ens_duration_sec)
+
+        depth_mono = np.copy(composite_depth)
+        depth_new = np.copy(composite_depth)
+
+        #       Create strict monotonic arrays for depth and track by identifying duplicate
+        #       track values.  The first track value is used and the remaining duplicates
+        #       are set to nan.  The depth assigned to that first track value is the average
+        #       of all duplicates.  The depths for the duplicates are then set to nan.  Only
+        #       valid strictly monotonic track and depth data are used for the input in to linear
+        #       interpolation.   Only the interpolated data for invalid depths are added
+        #       to the valid depth data to create depth_new
+
+        x_mono = x
+
+        idx0 = np.where(np.diff(x) == 0)[0]
+        if len(idx0) > 0:
+            if len(idx0) > 1:
+                # Split array into subarrays in proper sequence e.g [[2,3,4],[7,8,9]] etc.
+                idx1 = np.add(np.where(np.diff(idx0) != 1)[0], 1)
+                group = np.split(idx0, idx1)
+
+            else:
+                # Group of only 1 point
+                group = np.array([idx0])
+
+            # Replace repeated values with mean
+            n_group = len(group)
+            for k in range(n_group):
+                indices = group[k]
+                indices = np.append(indices, indices[-1] + 1)
+                depth_avg = np.nanmean(depth_mono[indices])
+                depth_mono[indices[0]] = depth_avg
+                depth_mono[indices[1:]] = np.nan
+                x[indices[1:]] = np.nan
+
+        # Interpolate
+
+
+        # Determine ensembles with valid depth data
+        valid_depth_mono = np.logical_not(np.isnan(depth_mono))
+        valid_x_mono = np.logical_not(np.isnan(x_mono))
+        valid = np.vstack([valid_depth_mono, valid_x_mono])
+        valid = np.all(valid, 0)
+
+        if np.sum(valid) > 1:
+            # Compute interpolation function from all valid data
+            depth_int = np.interp(x_mono, x_mono[valid], depth_mono[valid], left=np.nan, right=np.nan)
+            # Fill in invalid data with interpolated data
+            depth_new[np.logical_not(valid_depth_mono)] = depth_int[np.logical_not(valid_depth_mono)]
+
+        return depth_new
diff --git a/Classes/EdgeData.py b/Classes/EdgeData.py
new file mode 100644
index 0000000000000000000000000000000000000000..a4fddc5b6a27f4bff7c2943af6925923854a8b8b
--- /dev/null
+++ b/Classes/EdgeData.py
@@ -0,0 +1,125 @@
+import numpy as np
+
+
+class EdgeData(object):
+    """Class used to store edge settings.
+
+    Attributes
+    ----------
+    type: str
+        Shape of edge: 'Triangular', 'Rectangular', 'Custom, 'User Q'
+    distance_m: float
+        Distance to shore, in m.
+    cust_coef: float
+        Custom coefficient provided by user.
+    number_ensembles: int
+        Number of ensembles to average for depth and velocities.
+    user_discharge_cms: float
+        Original user supplied discharge for edge, in cms.
+    orig_type: str
+        Original shape of edge: 'Triangular', 'Rectangular', 'Custom, 'User Q'
+    orig_distance_m: float
+        Original distance to shore, in m.
+    orig_cust_coef: float
+        Original custom coefficient provided by user.
+    orig_number_ensembles: int
+        Original number of ensembles to average for depth and velocities.
+    orig_user_discharge_cms: float
+        Original user supplied discharge for edge, in cms.
+    """
+    
+    def __init__(self):
+        """Initialize EdgeData.
+        """
+        
+        self.type = None       # Shape of edge: 'Triangular', 'Rectangular', 'Custom, 'User Q'
+        self.distance_m = None          # Distance to shore
+        self.cust_coef = None     # Custom coefficient provided by user
+        self.number_ensembles = None   # Number of ensembles to average for depth and velocities
+        self.user_discharge_cms = None      # User supplied edge discharge.
+
+        self.orig_type = None  # Shape of edge: 'Triangular', 'Rectangular', 'Custom, 'User Q'
+        self.orig_distance_m = None  # Distance to shore
+        self.orig_cust_coef = None  # Custom coefficient provided by user
+        self.orig_number_ensembles = None  # Number of ensembles to average for depth and velocities
+        self.orig_user_discharge_cms = None  # User supplied edge discharge.
+
+        
+    def populate_data(self, edge_type, distance=None, number_ensembles=10, coefficient=None, user_discharge=None):
+        """Construct left or right edge object from provided inputs
+        
+        Parameters
+        ----------
+        edge_type: str
+            Type of edge (Triangular, Rectangular, Custom, User Q)
+        distance: float
+            Distance to shore, in m.
+        number_ensembles: int
+            Number of edge ensembles for all types but User Q
+        coefficient: float
+            User supplied custom edge coefficient.
+        user_discharge: float
+            User supplied edge discharge, in cms.
+        """
+
+        # Set properties for custom coefficient
+        self.type = edge_type
+        self.distance_m = distance
+        self.number_ensembles = number_ensembles
+        self.user_discharge_cms = user_discharge
+        self.cust_coef = coefficient
+
+        if self.orig_type is None:
+            self.orig_type = edge_type
+            self.orig_distance_m = distance
+            self.orig_number_ensembles = number_ensembles
+            self.orig_user_discharge_cms = user_discharge
+            self.orig_cust_coef = coefficient
+
+    def populate_from_qrev_mat(self, mat_data):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        mat_data: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+
+        self.type = mat_data.type
+        self.distance_m = mat_data.dist_m
+        self.number_ensembles = mat_data.numEns2Avg
+        if type(mat_data.userQ_cms) is float:
+            if not np.isnan(mat_data.userQ_cms):
+                self.user_discharge_cms = mat_data.userQ_cms
+        if type(mat_data.custCoef) is float:
+            self.cust_coef = mat_data.custCoef
+        if hasattr(mat_data, 'orig_type'):
+            self.orig_type = mat_data.orig_type
+            self.orig_distance_m = mat_data.orig_distance_m
+            self.orig_number_ensembles = mat_data.orig_number_ensembles
+            if type(mat_data.orig_user_discharge_cms) is float:
+                if not np.isnan(mat_data.orig_user_discharge_cms):
+                    self.orig_user_discharge_cms = mat_data.orig_user_discharge_cms
+            if type(mat_data.custCoef) is float:
+                self.orig_cust_coef = mat_data.orig_cust_coef
+        else:
+            self.orig_type = mat_data.type
+            self.orig_distance_m = mat_data.dist_m
+            self.orig_number_ensembles = mat_data.numEns2Avg
+            if type(mat_data.userQ_cms) is float:
+                if not np.isnan(mat_data.userQ_cms):
+                    self.orig_user_discharge_cms = mat_data.userQ_cms
+            if type(mat_data.custCoef) is float:
+                self.orig_cust_coef = mat_data.custCoef
+
+    def change_property(self, prop, setting):
+        """Change edge data property
+
+        Parameters
+        ----------
+        prop: str
+            Property to change.
+        setting:
+            New setting for property.
+        """
+        setattr(self, prop, setting)
diff --git a/Classes/Edges.py b/Classes/Edges.py
new file mode 100644
index 0000000000000000000000000000000000000000..436cec20918248fd3b72a7f6c9462bf9b577b683
--- /dev/null
+++ b/Classes/Edges.py
@@ -0,0 +1,77 @@
+from Classes.EdgeData import EdgeData
+
+
+class Edges(object):
+    """Class to store and process edge data.
+
+    Attributes
+    ----------
+    rec_edge_method: str
+        Method used to determine coef for rec. edge 'Fixed', 'Variable'.
+    vel_method: str
+        Method used to compute the velocity used 'MeasMag', 'VectorProf'.
+    left: EdgeData
+        Object of EdgeData for left edge.
+    right: EdgeData
+        Object of EdgeData for right edge.
+    """
+    
+    def __init__(self):
+        """Initialize Edges.
+        """
+
+        self.rec_edge_method = None
+        self.vel_method = None
+        self.left = EdgeData()
+        self.right = EdgeData()
+        
+    def populate_data(self, rec_edge_method, vel_method):
+        """Store the general methods used for edge data.
+
+        Parameters
+        ----------
+        rec_edge_method: str
+            Method used to determine coef for rec. edge 'Fixed', 'Variable'.
+        vel_method: str
+            Method used to compute the velocity used 'MeasMag', 'VectorProf'.
+        """
+        self.rec_edge_method = rec_edge_method
+        self.vel_method = vel_method
+
+    def populate_from_qrev_mat(self, transect):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        transect: mat_struct
+           Matlab data structure obtained from sio.loadmat
+       """
+
+        if hasattr(transect, 'edges'):
+            if hasattr(transect.edges, 'left'):
+                self.left = EdgeData()
+                self.left.populate_from_qrev_mat(transect.edges.left)
+            if hasattr(transect.edges, 'right'):
+                self.right = EdgeData()
+                self.right.populate_from_qrev_mat(transect.edges.right)
+            self.rec_edge_method = transect.edges.recEdgeMethod
+            self.vel_method = transect.edges.velMethod
+
+    def change_property(self, prop, setting, edge=None):
+        """Change edge property
+        
+        Parameters
+        ----------
+        prop: str
+            Name of property.
+        setting:
+            New property setting.
+        edge: str
+            Edge to change (left, right)
+        """
+        
+        if edge is None:
+            setattr(self, prop, setting)
+        else:
+            temp = getattr(self, edge)
+            temp.change_property(prop, setting)
diff --git a/Classes/ExtrapData.py b/Classes/ExtrapData.py
new file mode 100644
index 0000000000000000000000000000000000000000..2a6e1d445bc65e7a9e264087a0655eaea8c4ecbd
--- /dev/null
+++ b/Classes/ExtrapData.py
@@ -0,0 +1,91 @@
+class ExtrapData(object):
+    """Class to store both original and modified extrapolation settings.
+
+    Attributes
+    ----------
+    top_method_orig: str
+        Original extrapolation method for top of profile: Power, Constant, 3-Point.
+    bot_method_orig: str
+        Original extrapolation method for bottom of profile: Power, No Slip.
+    exponent_orig: float
+        Original exponent for power of no slip methods.
+    top_method: str
+        Applied extrapolation method for top of profile: Power, Constant, 3-Point.
+    bot_method: str
+        Applied extrapolation method for bottom of profile: Power, No Slip
+    exponent: float
+        Applied exponent for power of no slip methods
+    """
+    
+    def __init__(self):
+        """Initialize class and set defaults."""
+        self.top_method_orig = None  # Extrapolation method for top of profile: Power, Constant, 3-Point
+        self.bot_method_orig = None  # Extrapolation method for bottom of profile: Power, No Slip
+        self.exponent_orig = None  # Exponent for power of no slip methods
+        self.top_method = None  # Extrapolation method for top of profile: Power, Constant, 3-Point
+        self.bot_method = None  # Extrapolation method for bottom of profile: Power, No Slip
+        self.exponent = None  # Exponent for power of no slip methods
+        
+    def populate_data(self, top, bot, exp):
+        """Store data in class variables.
+
+        Parameters
+        ----------
+        top: str
+            Original top method.
+        bot: str
+            Original bottom method.
+        exp: float
+            Original exponent.
+        """
+        self.top_method_orig = top
+        self.bot_method_orig = bot
+        self.top_method = top
+        self.bot_method = bot
+        self.exponent_orig = float(exp)
+        self.exponent = float(exp)
+
+    def populate_from_qrev_mat(self, transect):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        transect: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+
+        if hasattr(transect, 'extrap'):
+            self.top_method_orig = transect.extrap.topMethodOrig
+            self.bot_method_orig = transect.extrap.botMethodOrig
+            self.exponent_orig = transect.extrap.exponentOrig
+            self.top_method = transect.extrap.topMethod
+            self.bot_method = transect.extrap.botMethod
+            self.exponent = transect.extrap.exponent
+
+    def set_extrap_data(self, top, bot, exp):
+        """Store new extrapolation settings
+
+        Parameters
+        ----------
+        top: str
+            New top extrapolation method.
+        bot: str
+            New bottom extrapolation method.
+        exp: float
+            New exponent.
+        """
+        self.top_method = top
+        self.bot_method = bot
+        self.exponent = exp
+        
+    def set_property(self, prop, setting):
+        """Allows setting any property.
+
+        Parameters
+        ----------
+        prop: str
+            Name of property.
+        setting:
+            New setting.
+        """
+        setattr(self, prop, setting)
diff --git a/Classes/ExtrapQSensitivity.py b/Classes/ExtrapQSensitivity.py
new file mode 100644
index 0000000000000000000000000000000000000000..75be866e6350f063018c447403d94212dd4aca41
--- /dev/null
+++ b/Classes/ExtrapQSensitivity.py
@@ -0,0 +1,372 @@
+import numpy as np
+from Classes.QComp import QComp
+
+
+class ExtrapQSensitivity(object):
+    """Class to compute the sensitivity of the discharge to various extrapolation methods.
+
+    Attributes
+    ----------
+    q_pp_mean: float
+        Discharge power power 1/6
+    q_pp_opt_mean: float
+        Discharge power power optimized
+    q_cns_mean: float
+        Discharge constant no RoutingSlipDelivery
+    q_cns_opt_mean: float
+        Discharge constant optimized no slip
+    q_3p_ns_mean: float
+        Discharge 3-pt no slip
+    q_3p_ns_opt_mean: float
+        Discharge 3-pt optimized no slip
+    q_pp_per_diff: float
+        Power power 1/6 difference from reference
+    q_pp_opt_per_diff: float
+        Power power optimized percent difference from reference
+    q_cns_per_diff: float
+        Constant no slip percent difference from reference
+    q_cns_opt_per_diff: float
+        Constant optimized no slip percent difference from reference
+    q_3p_ns_per_diff: float
+        3-point no skip percent difference from reference
+    q_3p_ns_opt_per_diff: float
+        3-point optimized no slip percent difference from reference
+    pp_exp: float
+        Optimized power power exponent
+    ns_exp: float
+        Optimized no slip Exponent
+    man_top: str
+        Manually specified top method
+    man_bot: str
+        Manually specified bottom method
+    man_exp: float
+        Manually specified exponent
+    q_man_mean: float
+        Mean discharge for manually specified extrapolations
+    q_man_per_diff: float
+        Manually specified extrapolations percent difference from reference
+    q_pp_list: list
+        List of single transect discharges base on default 1/6 power-power law
+    q_pp_opt_list: list
+        List of single transect discharges base on optimized power-power law
+    q_cns_list: list
+        List of single transect discharges base on default 1/6 constant no slip law
+    q_cns_opt_list: list
+        List of single transect discharges base on optimized constant no slip law
+    q_3p_ns_list: list
+        List of single transect discharges base on default 3pt no slip
+    q_3p_ns_opt_list: list
+        List of single transect discharges base on optimized 3pt no slip
+    q_top_pp_list: list
+        List of single transect top discharges base on default 1/6 power-power law
+    q_top_pp_opt_list: list
+        List of single transect top discharges base on optimized power-power law
+    q_top_cns_list: list
+        List of single transect top discharges base on default 1/6 constant no slip law
+    q_top_cns_opt_list: list
+        List of single transect top discharges base on optimized constant no slip law
+    q_top_3p_ns_list: list
+        List of single transect top discharges base on default 3pt no slip
+    q_top_3p_ns_opt_list: list
+        List of single transect top discharges base on optimized 3pt no slip
+    q_bot_pp_list: list
+        List of single transect bottom discharges base on default 1/6 power-power law
+    q_bot_pp_opt_list: list
+        List of single transect bottom discharges base on optimized power-power law
+    q_bot_cns_list: list
+        List of single transect bottom discharges base on default 1/6 constant no slip law
+    q_bot_cns_opt_list: list
+        List of single transect bottom discharges base on optimized constant no slip law
+    q_bot_3p_ns_list: list
+        List of single transect bottom discharges base on default 3pt no slip
+    q_bot_3p_ns_opt_list: list
+        List of single transect bottom discharges base on optimized 3pt no slip
+    """
+    
+    def __init__(self):
+        """Initialize object and instance variables."""
+
+        self.q_pp_mean = None  # Discharge power power 1/6
+        self.q_pp_opt_mean = None  # discharge power power optimized
+        self.q_cns_mean = None  # Discharge constant no RoutingSlipDelivery
+        self.q_cns_opt_mean = None  # Discharge constant optimized no slip
+        self.q_3p_ns_mean = None  # Discharge 3-pt no slip
+        self.q_3p_ns_opt_mean = None  # Discharge 3-pt optimized no slip
+        self.q_pp_per_diff = None  # Power power 1/6 difference from reference
+        self.q_pp_opt_per_diff = None  # Power power optimized percent difference from reference
+        self.q_cns_per_diff = None  # Constant no slip percent difference from reference
+        self.q_cns_opt_per_diff = None  # Constant optimized no slip percent difference from reference
+        self.q_3p_ns_per_diff = None  # 3-point no skip percent difference from reference
+        self.q_3p_ns_opt_per_diff = None  # 3-point optimized no slip percent difference from reference
+        self.pp_exp = None  # Optimized power power exponent
+        self.ns_exp = None  # Optimized no slip Exponent
+        self.man_top = None  # Manually specified top method
+        self.man_bot = None  # Manually specified bottom method
+        self.man_exp = None  # Manually specified exponent
+        self.q_man_mean = None  # Mean discharge for manually specified extrapolations
+        self.q_man_per_diff = None  # Manually specified extrapolations percent difference from reference
+        self.q_pp_list = []  # List of single transect discharges base on default 1/6 power-power law
+        self.q_pp_opt_list = []  # List of single transect discharges base on optimized power-power law
+        self.q_cns_list = []  # List of single transect discharges base on default 1/6 constant no slip law
+        self.q_cns_opt_list = []  # List of single transect discharges base on optimized constant no slip law
+        self.q_3p_ns_list = []  # List of single transect discharges base on default 3pt no slip
+        self.q_3p_ns_opt_list = []  # List of single transect discharges base on optimized 3pt no slip
+        self.q_top_pp_list = []  # List of single transect top discharges base on default 1/6 power-power law
+        self.q_top_pp_opt_list = []  # List of single transect top discharges base on optimized power-power law
+        self.q_top_cns_list = []  # List of single transect top discharges base on default 1/6 constant no slip law
+        self.q_top_cns_opt_list = []  # List of single transect top discharges base on optimized constant no slip law
+        self.q_top_3p_ns_list = []  # List of single transect top discharges base on default 3pt no slip
+        self.q_top_3p_ns_opt_list = []  # List of single transect top discharges base on optimized 3pt no slip
+        self.q_bot_pp_list = []  # List of single transect bottom discharges base on default 1/6 power-power law
+        self.q_bot_pp_opt_list = []  # List of single transect bottom discharges base on optimized power-power law
+        self.q_bot_cns_list = []  # List of single transect bottom discharges base on default 1/6 constant no slip law
+        self.q_bot_cns_opt_list = []  # List of single transect bottom discharges base on optimized constant no slip law
+        self.q_bot_3p_ns_list = []  # List of single transect bottom discharges base on default 3pt no slip
+        self.q_bot_3p_ns_opt_list = []  # List of single transect bottom discharges base on optimized 3pt no slip
+        
+    def populate_data(self, transects, extrap_fits):
+        """Compute means and percent differences.
+
+        Parameters
+        ----------
+        transects: list
+            List of objects of TransectData
+        extrap_fits: SelectFit
+            Object of SelectFit
+        """
+        q_pp = []
+        q_pp_opt = []
+        q_cns = []
+        q_cns_opt = []
+        q_3p_ns = []
+        q_3p_ns_opt = []
+        self.pp_exp = extrap_fits[-1].pp_exponent
+        self.ns_exp = extrap_fits[-1].ns_exponent
+
+        # Store top discharges
+        q_pp_top = []
+        q_pp_opt_top = []
+        q_cns_top = []
+        q_cns_opt_top = []
+        q_3p_ns_top = []
+        q_3p_ns_opt_top = []
+
+        # Store bottom discharges
+        q_pp_bot = []
+        q_pp_opt_bot = []
+        q_cns_bot = []
+        q_cns_opt_bot = []
+        q_3p_ns_bot = []
+        q_3p_ns_opt_bot = []
+
+        # Compute discharges for each transect for possible extrapolation combinations
+        for transect in transects:
+            if transect.checked:
+                q = QComp()
+
+                q.populate_data(data_in=transect, top_method='Power', bot_method='Power', exponent=0.1667)
+                q_pp.append(q.total)
+                q_pp_top.append(q.top)
+                q_pp_bot.append(q.bottom)
+
+                q.populate_data(data_in=transect, top_method='Power', bot_method='Power', exponent=self.pp_exp)
+                q_pp_opt.append(q.total)
+                q_pp_opt_top.append(q.top)
+                q_pp_opt_bot.append(q.bottom)
+
+                q.populate_data(data_in=transect, top_method='Constant', bot_method='No Slip', exponent=0.1667)
+                q_cns.append(q.total)
+                q_cns_top.append(q.top)
+                q_cns_bot.append(q.bottom)
+
+                q.populate_data(data_in=transect, top_method='Constant', bot_method='No Slip', exponent=self.ns_exp)
+                q_cns_opt.append(q.total)
+                q_cns_opt_top.append(q.top)
+                q_cns_opt_bot.append(q.bottom)
+
+                q.populate_data(data_in=transect, top_method='3-Point', bot_method='No Slip', exponent=0.1667)
+                q_3p_ns.append(q.total)
+                q_3p_ns_top.append(q.top)
+                q_3p_ns_bot.append(q.bottom)
+
+                q.populate_data(data_in=transect, top_method='3-Point', bot_method='No Slip', exponent=self.ns_exp)
+                q_3p_ns_opt.append(q.total)
+                q_3p_ns_opt_top.append(q.top)
+                q_3p_ns_opt_bot.append(q.bottom)
+
+        # Compute mean discharge for each combination
+        self.q_pp_mean = np.nanmean(q_pp)
+        self.q_pp_opt_mean = np.nanmean(q_pp_opt)
+        self.q_cns_mean = np.nanmean(q_cns)
+        self.q_cns_opt_mean = np.nanmean(q_cns_opt)
+        self.q_3p_ns_mean = np.nanmean(q_3p_ns)
+        self.q_3p_ns_opt_mean = np.nanmean(q_3p_ns_opt)
+
+        # Save all single-transect discharges
+        self.q_pp_list = q_pp
+        self.q_pp_opt_list = q_pp_opt
+        self.q_cns_list = q_cns
+        self.q_cns_opt_list = q_cns_opt
+        self.q_3p_ns_list = q_3p_ns
+        self.q_3p_ns_opt_list = q_3p_ns_opt
+
+        # Save all single-transect top discharges
+        self.q_top_pp_list = q_pp_top
+        self.q_top_pp_opt_list = q_pp_opt_top
+        self.q_top_cns_list = q_cns_top
+        self.q_top_cns_opt_list = q_cns_opt_top
+        self.q_top_3p_ns_list = q_3p_ns_top
+        self.q_top_3p_ns_opt_list = q_3p_ns_opt_top
+
+        # Save all single-transect bottom discharges
+        self.q_bot_pp_list = q_pp_bot
+        self.q_bot_pp_opt_list = q_pp_opt_bot
+        self.q_bot_cns_list = q_cns_bot
+        self.q_bot_cns_opt_list = q_cns_opt_bot
+        self.q_bot_3p_ns_list = q_3p_ns_bot
+        self.q_bot_3p_ns_opt_list = q_3p_ns_opt_bot
+
+        self.compute_percent_diff(extrap_fits=extrap_fits, transects=transects)
+
+    def populate_from_qrev_mat(self, mat_data):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        mat_data: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+
+        if hasattr(mat_data, 'qSensitivity'):
+            self.q_pp_mean = mat_data.qSensitivity.qPPmean
+            self.q_pp_opt_mean = mat_data.qSensitivity.qPPoptmean
+            self.q_cns_mean = mat_data.qSensitivity.qCNSmean
+            self.q_cns_opt_mean = mat_data.qSensitivity.qCNSoptmean
+            self.q_3p_ns_mean = mat_data.qSensitivity.q3pNSmean
+            self.q_3p_ns_opt_mean = mat_data.qSensitivity.q3pNSoptmean
+
+            # For compatibility with older QRev.mat files
+            if hasattr(mat_data.qSensitivity, 'qPPperdiff'):
+                self.q_pp_per_diff = mat_data.qSensitivity.qPPperdiff
+            else:
+                self.q_pp_per_diff = np.nan
+
+            self.q_pp_opt_per_diff = mat_data.qSensitivity.qPPoptperdiff
+            self.q_cns_per_diff = mat_data.qSensitivity.qCNSperdiff
+            self.q_cns_opt_per_diff = mat_data.qSensitivity.qCNSoptperdiff
+            self.q_3p_ns_per_diff = mat_data.qSensitivity.q3pNSperdiff
+            self.q_3p_ns_opt_per_diff = mat_data.qSensitivity.q3pNSoptperdiff
+            self.pp_exp = mat_data.qSensitivity.ppExponent
+            self.ns_exp = mat_data.qSensitivity.nsExponent
+
+            # If a manual fit was used
+            if len(mat_data.qSensitivity.manTop) > 0:
+                self.man_top = mat_data.qSensitivity.manTop
+                self.man_bot = mat_data.qSensitivity.manBot
+                self.man_exp = mat_data.qSensitivity.manExp
+                self.q_man_mean = mat_data.qSensitivity.qManmean
+                self.q_man_per_diff = mat_data.qSensitivity.qManperdiff
+
+            # Add compatibility for Oursin uncertainty model
+            if hasattr(mat_data.qSensitivity, 'q_pp_list'):
+                self.q_pp_list = mat_data.qSensitivity.q_pp_list
+                self.q_pp_opt_list = mat_data.qSensitivity.q_pp_opt_list
+                self.q_cns_list = mat_data.qSensitivity.q_cns_list
+                self.q_cns_opt_list = mat_data.qSensitivity.q_cns_opt_list
+                self.q_3p_ns_list = mat_data.qSensitivity.q_3p_ns_list
+                self.q_3p_ns_opt_list = mat_data.qSensitivity.q_3p_ns_opt_list
+                self.q_top_pp_list = mat_data.qSensitivity.q_top_pp_list
+                self.q_top_pp_opt_list = mat_data.qSensitivity.q_top_pp_opt_list
+                self.q_top_cns_list = mat_data.qSensitivity.q_top_cns_list
+                self.q_top_cns_opt_list = mat_data.qSensitivity.q_top_cns_opt_list
+                self.q_top_3p_ns_list = mat_data.qSensitivity.q_top_3p_ns_list
+                self.q_top_3p_ns_opt_list = mat_data.qSensitivity.q_top_3p_ns_opt_list
+                self.q_bot_pp_list = mat_data.qSensitivity.q_bot_pp_list
+                self.q_bot_pp_opt_list = mat_data.qSensitivity.q_bot_pp_opt_list
+                self.q_bot_cns_list = mat_data.qSensitivity.q_bot_cns_list
+                self.q_bot_cns_opt_list = mat_data.qSensitivity.q_bot_cns_opt_list
+                self.q_bot_3p_ns_list = mat_data.qSensitivity.q_bot_3p_ns_list
+                self.q_bot_3p_ns_opt_list = mat_data.qSensitivity.q_bot_3p_ns_opt_list
+            else:
+                self.q_pp_list = []
+                self.q_pp_opt_list = []
+                self.q_cns_list = []
+                self.q_cns_opt_list = []
+                self.q_3p_ns_list = []
+                self.q_3p_ns_opt_list = []
+                self.q_top_pp_list = []
+                self.q_top_pp_opt_list = []
+                self.q_top_cns_list = []
+                self.q_top_cns_opt_list = []
+                self.q_top_3p_ns_list = []
+                self.q_top_3p_ns_opt_list = []
+                self.q_bot_pp_list = []
+                self.q_bot_pp_opt_list = []
+                self.q_bot_cns_list = []
+                self.q_bot_cns_opt_list = []
+                self.q_bot_3p_ns_list = []
+                self.q_bot_3p_ns_opt_list =[]
+
+    def compute_percent_diff(self, extrap_fits, transects=None):
+        """Computes the percent difference for each of the extrapolation options as compared to selected method.
+
+        Parameters
+        ----------
+        extrap_fits: SelectFit
+            Object of SelectFit
+        transects: list
+            List of TransectData objects
+        """
+        # Determine which mean is the reference
+        if extrap_fits[-1].fit_method == 'Manual':
+            self.man_top = extrap_fits[-1].top_method
+            self.man_bot = extrap_fits[-1].bot_method
+            self.man_exp = extrap_fits[-1].exponent
+
+            if transects is not None:
+                q_man = []
+                checked = []
+                # Compute discharge for each transect
+                for transect in transects:
+                    q = QComp()
+                    checked.append(transect.checked)
+
+                    q.populate_data(data_in=transect,
+                                    top_method=self.man_top,
+                                    bot_method=self.man_bot,
+                                    exponent=self.man_exp)
+                    q_man.append(q)
+                container = []
+                for index, item in enumerate(q_man):
+                    if checked[index]:
+                        container.append(item.total)
+                self.q_man_mean = np.nanmean(container)
+            reference_mean = self.q_man_mean
+
+        else:
+            if extrap_fits[-1].top_method_auto == 'Power':
+                if np.abs(extrap_fits[-1].exponent_auto - 0.1667) < 0.0001:
+                    reference_mean = self.q_pp_mean
+                else:
+                    reference_mean = self.q_pp_opt_mean
+            elif extrap_fits[-1].top_method_auto == 'Constant':
+                if np.abs(extrap_fits[-1].exponent_auto - 0.1667) < 0.0001:
+                    reference_mean = self.q_cns_mean
+                else:
+                    reference_mean = self.q_cns_opt_mean
+            else:
+                if np.abs(extrap_fits[-1].exponent_auto - 0.1667) < 0.0001:
+                    reference_mean = self.q_3p_ns_mean
+                else:
+                    reference_mean = self.q_3p_ns_opt_mean
+
+        # Compute percent difference from reference
+        self.q_pp_per_diff = ((self.q_pp_mean - reference_mean) / reference_mean) * 100
+        self.q_pp_opt_per_diff = ((self.q_pp_opt_mean - reference_mean) / reference_mean) * 100
+        self.q_cns_per_diff = ((self.q_cns_mean - reference_mean) / reference_mean) * 100
+        self.q_cns_opt_per_diff = ((self.q_cns_opt_mean - reference_mean) / reference_mean) * 100
+        self.q_3p_ns_per_diff = ((self.q_3p_ns_mean - reference_mean) / reference_mean) * 100
+        self.q_3p_ns_opt_per_diff = ((self.q_3p_ns_opt_mean - reference_mean) / reference_mean) * 100
+
+        if extrap_fits[-1].fit_method == 'Manual':
+            self.q_man_per_diff = ((self.q_man_mean - reference_mean) / reference_mean) * 100
diff --git a/Classes/FitData.py b/Classes/FitData.py
new file mode 100644
index 0000000000000000000000000000000000000000..da61b868a2c4bf991104e6c8183d92b23e3d099f
--- /dev/null
+++ b/Classes/FitData.py
@@ -0,0 +1,284 @@
+import numpy as np
+from scipy.optimize.minpack import curve_fit
+from scipy.stats import t
+
+
+class FitData(object):
+    """Class to compute top and bottom extrapolation methods and associated statistics.
+
+     Data required for the constructor method include data of class
+     NormData, threshold for the minimum number of points for a valid
+     median, top extrapolation method, bottom extrapolation method, type
+     of fit, and if a manual fit, the exponent.
+
+    Attributes
+    ----------
+    self.file_name: str
+        Name of transect file
+    top_method: str
+        Top extrapolation method
+    bot_method: str
+        Bottom extrapolation method
+    coef: float
+        Power fit coefficient
+    exponent: float
+        Power fit exponent
+    u: np.array(float)
+        Fit values of the variable
+    u_auto: np.array(float)
+        Fit values from automatic fit
+    z_auto: np.array(float)
+        z values for automtic fit
+    z: np.array(float)
+        Distance from the streambed for fit variable
+    exp_method: str
+        Method to determine exponent (default, optimize, or manual)
+    data_type: str
+        Type of data (v, q, V, or Q)
+    exponent_95_ci: np.array(float)
+        95% confidence intervals for optimized exponent
+    residuals: np.array(float)
+        Residuals from fit
+    r_squared: float
+        R squared of model
+    """
+
+    def __init__(self):
+        """Initialize object and instance variables."""
+
+        self.file_name = None  # Name of transect file
+        self.top_method = 'Power'  # Top extrapolation method
+        self.bot_method = 'Power'  # Bottom extrapolation method
+        self.coef = 0  # Power fit coefficient
+        self.exponent = 0.1667  # Power fit exponent
+        self.u = None  # Fit values of the variable
+        self.u_auto = None  # Fit values from automatic fit
+        self.z_auto = None  # z values for automtic fit
+        self.z = None  # Distance from the streambed for fit variable
+        self.exp_method = 'Power'  # Method to determine exponent (default, optimize, or manual)
+        self.data_type = None  # Type of data (velocity or unit discharge)
+        self.exponent_95_ci = 0  # 95% confidence intervals for optimized exponent
+        self.residuals = np.array([])  # Residuals from fit
+        self.r_squared = 0  # R squared of model
+
+    def populate_data(self, norm_data, top, bot, method, exponent=None):
+        """Computes fit and stores associated data.
+
+        Parameters
+        ----------
+        norm_data: NormData
+            Object of NormData
+        top: str
+            Top extrapolation method
+        bot: str
+            Bottom extrapolation method
+        method:
+            Method used to define the exponent (default, optimize, or manual), default is 1/6.
+        exponent:
+            Exponent for power or no slip fit methods.
+        """
+
+        avg_z = norm_data.unit_normalized_z
+        y = norm_data.unit_normalized_med
+        idxz = norm_data.valid_data
+        zc = np.nan
+
+        lower_bound = [-np.inf, 0.01]
+        upper_bound = [np.inf, 1]
+        bounds = None
+        p0 = None
+        uc = np.nan
+
+        # Process data if available
+        if len(idxz) > 0:
+            idx_power = idxz
+
+            # Create arrays for data fitting
+            # Select median values to use in extrapolation methods selected and create
+            # methods selected and create fir output data arrays
+
+            # If bottom is No Slip, Power at top is not allowed
+            if bot == 'No Slip':
+                if top == 'Power':
+                    top = 'Constant'
+
+            fit_combo = ''.join([top, bot])
+            if fit_combo == 'PowerPower':
+                self.z = np.arange(0, 1.01, 0.01)
+                zc = np.nan
+                uc = np.nan
+            elif fit_combo == 'ConstantPower':
+                self.z = np.arange(0, np.max(avg_z[idxz]), 0.01)
+                self.z = np.hstack([self.z, np.nan])
+                zc = np.arange(np.max(avg_z[idxz]) + 0.01, 1.0, 0.01)
+                uc = np.tile(y[idxz[0]], zc.shape)
+            elif fit_combo == '3-PointPower':
+                self.z = np.arange(0, np.max(avg_z[idxz]), 0.01)
+                self.z = np.hstack([self.z, np.nan])
+                # If less than 6 bins use constant at the top
+                if len(idxz) < 6:
+                    zc = np.arange(np.max(avg_z[idxz]) + 0.01, 1.0, 0.01)
+                    uc = np.tile(y[idxz[0]], zc.shape)
+                else:
+                    p = np.polyfit(avg_z[idxz[0:3]], y[idxz[0:3]], 1)
+                    zc = np.arange(np.max(avg_z[idxz]) + 0.01, 1.0, 0.01)
+                    # zc = zc.T
+                    uc = zc * p[0] + p[1]
+
+            elif fit_combo == 'ConstantNo Slip':
+                # Optimize constant / no slip if sufficient cells are available
+                if method.lower() == 'optimize':
+                    idx = idxz[int(1+len(idxz) - np.floor(len(avg_z[idxz]) / 3) - 1)::]
+                    if len(idx) < 4:
+                        method = 'default'
+
+                # Compute Constant / No Slip using WinRiver II and
+                # RiverSurveyor Live default cells
+                else:
+                    idx = np.where(avg_z[idxz] <= .2)[0]
+                    if len(idx) < 1:
+                        idx = idxz[-1]
+                    else:
+                        idx = idxz[idx]
+
+                # Configures u and z arrays
+                idxns = np.array([idx]).T
+                self.z = np.arange(0, avg_z[idxns[0]], 0.01)
+                self.z = np.hstack([self.z, [np.nan]])
+                idx_power = idx
+                zc = np.arange(np.max(avg_z[idxz]) + 0.01, 1.00, 0.01)
+                uc = np.tile(y[idxz[0]], zc.shape)
+
+            elif fit_combo == '3-PointNo Slip':
+                # Optimize 3-Point / no slip if sufficient cells are available
+                if method.lower() == 'optimize':
+                    idx = idxz[int(1 + len(idxz) - np.floor(len(avg_z[idxz])) / 3) - 1::]
+                    if len(idx) < 4:
+                        method = 'default'
+
+                # Compute 3-Point / No Slip using WinRiver II and
+                # RiverSurveyor Live default cells
+                else:
+                    idx = np.where(avg_z[idxz] <= .2)[0]
+                    if len(idx) < 1:
+                        idx = idxz[-1]
+                    else:
+                        idx = idxz[idx]
+
+                # Configures u and z arrays
+                idxns = np.array([idx]).T
+                self.z = np.arange(0, avg_z[idxns[0]], 0.01)
+                self.z = np.hstack([self.z, [np.nan]])
+                idx_power = idx
+                # If less than 6 bins use constant at the top
+                if len(idxz) < 6:
+                    zc = np.arange(np.max(idxz) + 0.01, 1.0, 0.01)
+                    uc = np.tile(y[idxz[0]], zc.shape)
+                else:
+                    p = np.polyfit(avg_z[idxz[0:3]], y[idxz[0:3]], 1)
+                    zc = np.arange(np.max(avg_z[idxz]) + 0.01, 1.0, 0.01)
+                    uc = zc * p[0] + p[1]
+
+            # Compute exponent
+            zfit = avg_z[idx_power]
+            yfit = y[idx_power]
+
+            # Check data validity
+            ok_ = np.logical_and(np.isfinite(zfit), np.isfinite(yfit))
+
+            self.exponent = np.nan
+            self.exponent_95_ci = np.nan
+            self.r_squared = np.nan
+            fit_func = 'linear'
+
+            lower_method = method.lower()
+
+            if lower_method == 'manual':
+                fit_func = 'linear'
+                self.exponent = exponent
+                bounds = None
+                p0 = None
+
+            elif lower_method == 'default':
+                fit_func = 'linear'
+                self.exponent = 1./6.
+                bounds = None
+                p0 = None
+
+            elif lower_method == 'optimize':
+                fit_func = 'power'
+                bounds = [lower_bound, upper_bound]
+                strt = yfit[ok_]
+                p0 = [strt[-1], 1./6]
+
+            fit_funcs = {
+                'linear': lambda x, a: a * x**self.exponent,
+                'power': lambda x, a, b: a * x**b
+            }
+
+            if ok_.size > 1:
+                if bounds is not None:
+                    popt, pcov = curve_fit(fit_funcs[fit_func],
+                                           zfit, yfit, p0=p0, bounds=bounds)
+                else:
+                    popt, pcov = curve_fit(fit_funcs[fit_func],
+                                           zfit, yfit, p0=p0)
+
+                # Extract exponent and confidence intervals from fit
+                if lower_method == 'optimize':
+                    self.exponent = popt[1]
+                    if self.exponent is None or self.exponent < 0.05:
+                        self.exponent = 0.05
+
+                if len(zfit[ok_]) > 2:
+
+                    n = len(zfit)    # number of data points
+
+                    t_val = t.ppf(.975, n-2)
+
+                    # Get 95% confidence intervals
+                    lower = (popt[-1] - t_val * np.sqrt(np.diag(pcov)[-1]))
+                    upper = (popt[-1] + t_val * np.sqrt(np.diag(pcov)[-1]))
+                    self.exponent_95_ci = np.hstack([lower, upper])
+
+                    # Get the rsquared for the model
+                    ss_tot = np.sum((y[idx_power] - np.mean(yfit))**2)
+                    ss_res = np.sum((y[idx_power] - fit_funcs[fit_func](zfit, *popt))**2)
+                    self.r_squared = 1 - (ss_res/ss_tot)
+                else:
+                    self.exponent_95_ci = [np.nan, np.nan]
+                    self.r_squared = np.nan
+
+            # Fit power curve to appropriate data
+            self.coef = ((self.exponent + 1) * 0.05 * np.nansum(y[idx_power])) / \
+                np.nansum(((avg_z[idx_power] + (0.5 * 0.05))**(self.exponent + 1)
+                           - ((avg_z[idx_power] - (0.5 * 0.05))**(self.exponent + 1))))
+
+            # Compute residuals
+            self.residuals = y[idx_power] - self.coef * avg_z[idx_power]**self.exponent
+            if self.residuals is None:
+                self.residuals = np.array([])
+
+            # Compute values (velocity or discharge) based on exponent and compute coefficient
+            self.u = self.coef * self.z**self.exponent
+            if type(zc) == np.ndarray:
+                self.u = np.append(self.u, uc)
+                self.z = np.append(self.z, zc)
+
+            # Assign variables to object properties
+            self.file_name = norm_data.file_name
+            self.top_method = top
+            self.bot_method = bot
+            self.exp_method = method
+            self.data_type = norm_data.data_type
+
+        else:
+            # If not data are valid simply apply methods
+            self.exponent = np.nan
+            self.exponent_95_ci = [np.nan, np.nan]
+            self.r_squared = np.nan
+            self.file_name = norm_data.file_name
+            self.top_method = top
+            self.bot_method = bot
+            self.exp_method = method
+            self.data_type = norm_data.data_type
diff --git a/Classes/GPSData.py b/Classes/GPSData.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ca09b5eb61736ba1d11da2a79f695584b689846
--- /dev/null
+++ b/Classes/GPSData.py
@@ -0,0 +1,731 @@
+import utm
+import numpy as np
+from MiscLibs.common_functions import azdeg2rad, pol2cart, nans, nan_less
+
+
+class GPSData(object):
+    """Class containing the raw GPS data and algorithms to convert 
+    that raw data to boat velocity.
+
+    Attributes
+    ----------
+    # Raw properties:
+        raw_gga_lat_deg: np.array(float)
+            Raw latitude in degress, [ensemble,n]
+        raw_gga_lon_deg: np.array(float)
+            Raw longitude in degrees, [ensemble,n]
+        raw_gga_altitude_m: np.array(float)
+            Raw altitude in meters, [ensemble,n]
+        raw_gga_differential: np.array(float)
+            Raw differential correction indicator, [ensemble,n]
+        raw_gga_hdop: np.array(float)
+            Raw horizontal dilution of precision, [ensemble,n]
+        raw_gga_utc: np.array(float)
+            Raw UTC time, hhmmss.ss, [ensemble,n]
+        raw_gga_serial_time: np.array(float)
+            Raw UTC time of gga data in seconds past midnight, [ensemble,n]
+        raw_gga_num_sats: np.array(float)
+            Raw number of satellites reported in gga sentence, [ensemble,n]
+        raw_vtg_course_deg:np.array(float)
+            Raw course in degress, [ensemble,n]
+        raw_vtg_speed_mps: np.array(float)
+            Raw speed in m/s, [ensemble,n]
+        raw_vtg_delta_time: np.array(float)
+            Raw vtg delta time (sec), [ensemble,n]
+        raw_vtg_mode_indicator: np.array(float)
+            Raw vtg mode indicator, [ensemble,n]
+        raw_gga_delta_time: np.array(float)
+            Raw gga delta time (sec), [ensemble,n]
+
+    # Manufacturer assigned ensemble values:
+        ext_gga_lat_deg: np.array(float)
+            Latitude for each ensemble, in degrees [ensemble]
+        ext_gga_lon_deg: np.array(float)
+            Longitude for each ensemble, in degrees [ensemble]
+        ext_gga_altitude_m: np.array(float)
+            Altitude for each ensemble, in meters [ensemble]
+        ext_gga_differential: np.array(float)
+            Differential correction indicator for each ensemble [ensemble]
+        ext_gga_hdop: np.array(float)
+            Horizontal dilution of precision for each ensemble [ensemble]
+        ext_gga_utc: np.array(float)
+            UTC time, hhmmss.ss for each ensemble [ensemble]
+        ext_gga_serial_time: np.array(float)
+            UTC time of gga data in seconds past midnight for each ensemble [ensemble]
+        ext_gga_num_sats: np.array(float)
+            Number of satellites for each ensemble [ensemble]
+        ext_vtg_course_deg: np.array(float)
+            Course for each ensemble, in degrees [ensemble]
+        ext_vtg_speed_mps: np.array(float)
+            Speed for each ensemble, in m/s [ensemble]
+
+    # User specifications:
+        gga_position_method: str
+            Method used to process gga data for position ('End', 'Average' 'External')
+        gga_velocity_method: str
+            Method used to process gga data for velocity ('End','Average' 'External')
+        vtg_velocity_method: str
+            Method used to process vtg data for velocity ('Average' 'External)
+
+    # Computed properties:
+        gga_lat_ens_deg: np.array(float)
+            Processed latitude, in degrees [ensemble]
+        gga_lon_ens_deg: np.array(float)
+            Processed longitude, in degrees [ensemble]
+        utm_ens_m: np.array(float)
+            UTM position from processed gga data, in m [2,ensemble]
+        gga_velocity_ens_mps: np.array(float)
+            Boat velocity components computed from gga data, in m/s [2,ensemble]
+        gga_serial_time_ens: np.array(float)
+            UTC time of gga data, in seconds past midnight [ensemble]
+        vtg_velocity_ens_mps: np.array(float)
+            Boat velocity components computed from vtg data, in m/s [2,ensemble]
+        per_good_ens: np.array(float)
+            Percentage of available data used to compute ensemble value [ensemble]
+        hdop_ens: np.array(float)
+            HDOP for each ensemble using velocity method [ensemble]
+        num_sats_ens: np.array(float)
+            Number of satellites for each ensemble, using velocity method [ensemble]
+        altitude_ens_m: np.array(float)
+            Altitude for each ensemble, using velocity method [ensemble]
+        diff_qual_ens: np.array(float)
+            Differential quality for each ensemble, using velocity method [ensemble]
+    """
+    
+    def __init__(self):
+        """Initialize instance variables.
+        """
+        
+        # Raw properties
+        self.raw_gga_lat_deg = None         # self.raw_ latitude, in degress [ensemble,n]
+        self.raw_gga_lon_deg = None         # self.raw_ longitude, in degrees [ensemble,n]
+        self.raw_gga_altitude_m = None     # self.raw_ altitude in meters, [ensemble,n]
+        self.raw_gga_differential = None    # Differential correction indicator [ensemble,n]
+        self.raw_gga_hdop = None            # Horizontal dilution of precision [ensemble,n]
+        self.raw_gga_utc = None             # UTC time, in hhmmss.ss [ensemble,n]
+        self.raw_gga_serial_time = None     # UTC time of gga data, in seconds past midnight [ensemble,n]
+        self.raw_gga_num_sats = None        # Number of satellites reported in gga sentence [ensemble,n]
+        self.raw_vtg_course_deg = None      # Course, in degress [ensemble,n]
+        self.raw_vtg_speed_mps = None       # Speed, in m/s [ensemble,n]
+        self.raw_vtg_delta_time = None      # vtg delta time, in sec [ensemble,n]
+        self.raw_vtg_mode_indicator = None  # vtg mode indicator [ensemble,n]
+        self.raw_gga_delta_time = None      # gga delta time, in sec [ensemble,n]
+        
+        # Manufacturer assigned ensemble values
+        self.ext_gga_lat_deg = None         # Raw latitude, in degrees [1,ensemble]
+        self.ext_gga_lon_deg = None         # Raw longitude, in degrees [1,ensemble]
+        self.ext_gga_altitude_m = None      # Raw altitude, in meters [1,ensemble]
+        self.ext_gga_differential = None    # Differential correction indicator [1,ensemble]
+        self.ext_gga_hdop = None            # Horizontal dilution of precision [1,ensemble]
+        self.ext_gga_utc = None             # UTC time, in hhmmss.ss [1, ensemble]
+        self.ext_gga_serial_time = None     # UTC time of gga data, in seconds past midnight [1,ensemble]
+        self.ext_gga_num_sats = None        # Number of satellites reported by software [1,ensemble]
+        self.ext_vtg_course_deg = None      # Course, in degress [1, ensemble]
+        self.ext_vtg_speed_mps = None       # Speed, in m/s [1, ensemble]
+       
+        # User specification
+        self.gga_position_method = None     # Method used to process gga data for position ('End', 'Average' 'External')
+        self.gga_velocity_method = None     # Method used to process gga data for velocity ('End','Average' 'External')
+        self.vtg_velocity_method = None     # Method used to process vtg data for velocity ('Average' 'External)
+        
+        # Computed properties for ensembles
+        self.gga_lat_ens_deg = None         # Processed latitude in degrees, [ensemble]
+        self.gga_lon_ens_deg = None         # Processed longitude in degrees, [ensemble]
+        self.utm_ens_m = None               # UTM position from processed gga data, [2,ensemble]
+        self.gga_velocity_ens_mps = None    # Boat velocity computed from gga data [2,ensemble]
+        self.gga_serial_time_ens = None     # UTC time of gga data in seconds past midnight, [ensemble]
+        self.vtg_velocity_ens_mps = None    # Boat velocity computed from vtg data [2,ensemble]
+        self.per_good_ens = None            # Percentage of available data used to compute ensemble value [ensemble]
+        self.hdop_ens = None                # HDOP for each ensemble using velocity method [ensemble]
+        self.num_sats_ens = None            # Number of satellites for each ensemble, using velocity method [ensemble]
+        self.altitude_ens_m = None          # Altitude for each ensemble, using velocity method [ensemble]
+        self.diff_qual_ens = None           # Differential quality for each ensemble, using velocity method [ensemble]
+        
+    def populate_data(self, raw_gga_utc, raw_gga_lat, raw_gga_lon, raw_gga_alt, raw_gga_diff,
+                      raw_gga_hdop, raw_gga_num_sats, raw_gga_delta_time, raw_vtg_course, raw_vtg_speed,
+                      raw_vtg_delta_time, raw_vtg_mode_indicator, ext_gga_utc, ext_gga_lat, ext_gga_lon, ext_gga_alt,
+                      ext_gga_diff, ext_gga_hdop, ext_gga_num_sats, ext_vtg_course, ext_vtg_speed,
+                      gga_p_method, gga_v_method, vtg_method):
+        """Store and process provided data in GPSData class.
+
+        Parameters
+        ----------
+        raw_gga_utc: np.array(float)
+            Raw UTC time, hhmmss.ss, [ensemble,n]
+        raw_gga_lat: np.array(float)
+            Raw latitude in degress, [ensemble,n]
+        raw_gga_lon: np.array(float)
+            Raw longitude in degrees, [ensemble,n]
+        raw_gga_alt: np.array(float)
+            Raw altitude in meters, [ensemble,n]
+        raw_gga_diff: np.array(float)
+            Raw differential correction indicator, [ensemble,n]
+        raw_gga_hdop: np.array(float)
+            Raw horizontal dilution of precision, [ensemble,n]
+        raw_gga_num_sats: np.array(float)
+            Raw number of satellites reported in gga sentence, [ensemble,n]
+        raw_gga_delta_time: np.array(float)
+            Raw gga delta time (sec), [ensemble,n]
+        raw_vtg_course:np.array(float)
+            Raw course in degress, [ensemble,n]
+        raw_vtg_speed: np.array(float)
+            Raw speed in m/s, [ensemble,n]
+        raw_vtg_delta_time: np.array(float)
+            Raw vtg delta time (sec), [ensemble,n]
+        raw_vtg_mode_indicator: np.array(float)
+            Raw vtg mode indicator, [ensemble,n]
+        ext_gga_utc: np.array(float)
+            UTC time, hhmmss.ss for each ensemble [ensemble]
+        ext_gga_lat: np.array(float)
+            Latitude for each ensemble, in degrees [ensemble]
+        ext_gga_lon: np.array(float)
+            Longitude for each ensemble, in degrees [ensemble]
+        ext_gga_alt: np.array(float)
+            Altitude for each ensemble, in meters [ensemble]
+        ext_gga_diff: np.array(float)
+            Differential correction indicator for each ensemble [ensemble]
+        ext_gga_hdop: np.array(float)
+            Horizontal dilution of precision for each ensemble [ensemble]
+        ext_gga_num_sats: np.array(float)
+            Number of satellites for each ensemble [ensemble]
+        ext_vtg_course: np.array(float)
+            Course for each ensemble, in degrees [ensemble]
+        ext_vtg_speed: np.array(float)
+            Speed for each ensemble, in m/s [ensemble]
+        gga_p_method: str
+            Method used to process gga data for position ('End', 'Average' 'External')
+        gga_v_method: str
+            Method used to process gga data for velocity ('End','Average' 'External')
+        vtg_method: str
+            Method used to process vtg data for velocity ('Average' 'External)
+        """
+
+        # Assign input to raw properties
+        if raw_gga_utc is None:
+            self.raw_gga_utc = np.tile([np.nan], raw_gga_lat.shape)
+            self.raw_gga_serial_time = np.tile([np.nan], raw_gga_lat.shape)
+        else:
+            self.raw_gga_utc = raw_gga_utc
+            self.raw_gga_serial_time = np.floor(raw_gga_utc / 10000) * 3600 \
+                + np.floor(np.mod(raw_gga_utc, 10000, where=~np.isnan(raw_gga_utc)) / 100) \
+                                       * 60 + np.mod(raw_gga_utc, 100, where=~np.isnan(raw_gga_utc))
+
+        self.raw_gga_lat_deg = raw_gga_lat
+        self.raw_gga_lon_deg = raw_gga_lon
+        self.raw_gga_lat_deg[np.where(np.logical_and((self.raw_gga_lat_deg == 0),
+                                                     (self.raw_gga_lon_deg == 0)))] = np.nan
+        self.raw_gga_lat_deg[nan_less(raw_gga_diff, 1)] = np.nan
+        self.raw_gga_lon_deg[np.isnan(self.raw_gga_lat_deg)] = np.nan
+        self.raw_gga_altitude_m = raw_gga_alt
+        self.raw_gga_altitude_m[np.isnan(self.raw_gga_lat_deg)] = np.nan
+        self.raw_gga_differential = raw_gga_diff.astype('float')
+        self.raw_gga_differential[np.isnan(self.raw_gga_lat_deg)] = np.nan
+        self.raw_gga_hdop = raw_gga_hdop.astype('float')
+        self.raw_gga_hdop[np.isnan(self.raw_gga_lat_deg)] = np.nan
+        self.raw_gga_num_sats = raw_gga_num_sats.astype('float')
+        self.raw_gga_num_sats[np.isnan(self.raw_gga_lat_deg)] = np.nan
+        self.raw_gga_serial_time[np.isnan(self.raw_gga_lat_deg)] = np.nan
+
+        # Delta time is a TRDI only variable
+        if raw_gga_delta_time is None:
+            self.raw_gga_delta_time = np.tile(np.nan, raw_gga_lat.shape)
+        else:
+            self.raw_gga_delta_time = raw_gga_delta_time
+
+        self.raw_vtg_course_deg = raw_vtg_course
+        self.raw_vtg_speed_mps = raw_vtg_speed
+        self.raw_vtg_course_deg[np.where(np.logical_and((self.raw_vtg_course_deg == 0),
+                                                        (self.raw_vtg_speed_mps == 0)))] = np.nan
+        self.raw_vtg_speed_mps[np.isnan(self.raw_vtg_course_deg)] = np.nan
+
+        # Delta time is a TRDI only variable
+        if raw_vtg_delta_time is None:
+            self.raw_vtg_delta_time = np.tile(np.nan, raw_gga_lat.shape)
+        else:
+            self.raw_vtg_delta_time = raw_vtg_delta_time
+
+        self.raw_vtg_mode_indicator = np.array(raw_vtg_mode_indicator)
+        
+        # Assign input data to ensemble values computed by other software
+        self.ext_gga_utc = ext_gga_utc
+        self.ext_gga_lat_deg = ext_gga_lat
+        self.ext_gga_lon_deg = ext_gga_lon
+        self.ext_gga_altitude_m = ext_gga_alt
+        self.ext_gga_differential = ext_gga_diff
+        self.ext_gga_hdop = ext_gga_hdop
+        self.ext_gga_num_sats = ext_gga_num_sats
+        self.ext_gga_serial_time = np.floor(np.array(ext_gga_utc) / 10000) * 3600 + \
+            np.floor(np.mod(ext_gga_utc, 10000) / 100) * 60 + np.mod(ext_gga_utc, 100)
+        self.ext_vtg_course_deg = ext_vtg_course
+        self.ext_vtg_speed_mps = ext_vtg_speed
+        
+        # Assign input data to method properties
+        self.gga_position_method = gga_p_method
+        self.gga_velocity_method = gga_v_method
+        self.vtg_velocity_method = vtg_method
+        
+        # If gga data exist compute position and velocity
+        if np.sum(np.sum(np.isnan(raw_gga_lat) == False)) > 0:
+            self.process_gga()
+        
+        # If vtg data exist compute velocity
+        if np.sum(np.sum(np.isnan(raw_vtg_speed) == False)) > 0:
+            self.process_vtg()
+
+    def populate_from_qrev_mat(self, transect):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        transect: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+
+        if hasattr(transect, 'gps'):
+            if hasattr(transect.gps, 'diffQualEns'):
+
+                # Raw properties
+                self.raw_gga_lat_deg = transect.gps.rawGGALat_deg
+                self.raw_gga_lon_deg = transect.gps.rawGGALon_deg
+                self.raw_gga_altitude_m = transect.gps.rawGGAAltitude_m
+                self.raw_gga_differential = transect.gps.rawGGADifferential
+                self.raw_gga_hdop = transect.gps.rawGGAHDOP
+                self.raw_gga_utc = transect.gps.rawGGAUTC
+                self.raw_gga_serial_time = transect.gps.rawGGASerialTime
+                self.raw_gga_num_sats = transect.gps.rawGGANumSats
+                self.raw_vtg_course_deg = transect.gps.rawVTGCourse_deg
+                self.raw_vtg_speed_mps = transect.gps.rawVTGSpeed_mps
+                self.raw_vtg_delta_time = transect.gps.rawVTGDeltaTime
+
+                # Older versions of QRev Matlab files represented the VTG mode differently.
+                try:
+                    if transect.gps.rawVTGModeIndicator.ndim == 2 and \
+                            type(transect.gps.rawVTGModeIndicator[0][0]) is np.float64:
+                        indicator = []
+                        for row in transect.gps.rawVTGModeIndicator.astype(int):
+                            row_indicator = []
+                            for value in row:
+                                if 127 > value > 0:
+                                    row_indicator.append(chr(value))
+                                else:
+                                    row_indicator. append('')
+                            indicator.append(row_indicator)
+                        self.raw_vtg_mode_indicator = np.array(indicator)
+                    else:
+                        raw_vtg_mode_indicator = transect.gps.rawVTGModeIndicator.tolist()
+                        new_list = []
+                        for row in raw_vtg_mode_indicator:
+                            new_list.append(list(row))
+                        self.raw_vtg_mode_indicator = np.array(new_list)
+                except AttributeError:
+                    self.raw_vtg_mode_indicator = transect.gps.rawVTGModeIndicator
+
+                self.raw_gga_delta_time = transect.gps.rawGGADeltaTime
+
+                # Manufacturer assigned ensemble values
+                self.ext_gga_lat_deg = transect.gps.extGGALat_deg
+                self.ext_gga_lon_deg = transect.gps.extGGALon_deg
+                self.ext_gga_altitude_m = transect.gps.extGGAAltitude_m
+                self.ext_gga_differential = transect.gps.extGGADifferential
+                self.ext_gga_hdop = transect.gps.extGGAHDOP
+                self.ext_gga_utc = transect.gps.extGGAUTC
+                self.ext_gga_serial_time = transect.gps.extGGASerialTime
+                self.ext_gga_num_sats = transect.gps.extGGANumSats
+                self.ext_vtg_course_deg = transect.gps.extVTGCourse_deg
+                self.ext_vtg_speed_mps = transect.gps.extVTGSpeed_mps
+
+                # User specification
+                self.gga_position_method = transect.gps.ggaPositionMethod
+                self.gga_velocity_method = transect.gps.ggaVelocityMethod
+                self.vtg_velocity_method = transect.gps.vtgVelocityMethod
+
+                # Computed properties for ensembles
+                self.gga_lat_ens_deg = transect.gps.ggaLatEns_deg
+                self.gga_lon_ens_deg = transect.gps.ggaLonEns_deg
+                self.utm_ens_m = transect.gps.UTMEns_m
+                self.gga_velocity_ens_mps = transect.gps.ggaVelocityEns_mps
+                self.gga_serial_time_ens = transect.gps.ggaSerialTimeEns
+                self.vtg_velocity_ens_mps = transect.gps.vtgVelocityEns_mps
+                if len(transect.gps.perGoodEns) > 0:
+                    self.per_good_ens = transect.gps.perGoodEns
+                else:
+                    self.per_good_ens = None
+                if type(transect.gps.hdopEns) is np.ndarray:
+                    self.hdop_ens = transect.gps.hdopEns
+                else:
+                    self.hdop_ens = np.array([transect.gps.hdopEns])
+                self.num_sats_ens = transect.gps.numSatsEns
+                self.altitude_ens_m = transect.gps.altitudeEns_m
+                self.diff_qual_ens = transect.gps.diffQualEns
+
+    def process_gga(self, p_setting=None, v_setting=None):
+        """Computes boat velocity from gga data.
+
+        Parameters
+        ----------
+        p_setting: str
+            Specifies method to use for computing positions from gga data (External, End, First, Average, Mindt).
+        v_setting: str
+            Specifies method to use for computing velocity from gga data (External, End, First, Average, Mindt).
+        """
+
+        if p_setting is None:
+            p_setting = self.gga_position_method
+
+        if v_setting is None:
+            v_setting = self.gga_velocity_method
+            
+        # Use only valid gga data
+        valid = np.copy(self.raw_gga_lat_deg)
+        valid[np.logical_not(np.isnan(valid))] = 1
+        valid[np.isnan(valid)] = 0
+        # valid[valid > 0] = 1
+        gga_lat_deg = np.copy(self.raw_gga_lat_deg)
+        gga_lat_deg[valid == False] = np.nan
+        gga_lon_deg = np.copy(self.raw_gga_lon_deg)
+        gga_lon_deg[valid == False] = np.nan
+        gga_serial_time = np.copy(self.raw_gga_serial_time)
+        gga_serial_time[valid == False] = np.nan
+        gga_delta_time = np.copy(self.raw_gga_delta_time)
+        gga_delta_time[valid == False] = np.nan
+        gga_hdop = np.copy(self.raw_gga_hdop)
+        gga_hdop[valid == False] = np.nan
+        gga_num_sats = np.copy(self.raw_gga_num_sats)
+        gga_num_sats[valid == False] = np.nan
+        gga_altitude_m = np.copy(self.raw_gga_altitude_m)
+        gga_altitude_m[valid == False] = np.nan
+        gga_differential = np.copy(self.raw_gga_differential)
+        gga_differential[valid == False] = np.nan
+        n_ensembles = gga_lat_deg.shape[0]
+
+        # Apply method for computing position of ensemble
+
+        # Use ensemble data from other software
+        if p_setting == 'External':
+            self.gga_lat_ens_deg = self.ext_gga_lat_deg
+            self.gga_lon_ens_deg = self.ext_gga_lon_deg
+
+        # Uses last valid data for each ensemble
+        elif p_setting == 'End':
+            self.gga_lat_ens_deg = np.tile(np.nan, gga_lat_deg.shape[0])
+            self.gga_lon_ens_deg = np.tile(np.nan, gga_lon_deg.shape[0])
+            for n in range(n_ensembles):
+                idx = np.argwhere(~np.isnan(gga_lat_deg[n, :]))
+                if idx.size < 1:
+                    idx = 0
+                else:
+                    idx = idx[-1][0]
+                self.gga_lat_ens_deg[n] = gga_lat_deg[n, idx]
+                self.gga_lon_ens_deg[n] = gga_lon_deg[n, idx]
+
+        # Use first valid data for each ensemble
+        elif p_setting == 'First':
+            self.gga_lat_ens_deg = np.tile(np.nan, gga_lat_deg.shape[0])
+            self.gga_lon_ens_deg = np.tile(np.nan, gga_lon_deg.shape[0])
+            for n in range(n_ensembles):
+                idx = 0
+                self.gga_lat_ens_deg[n] = gga_lat_deg[n, idx]
+                self.gga_lon_ens_deg[n] = gga_lon_deg[n, idx]
+
+        # Use minimum delta time
+        elif p_setting == 'Mindt':
+            self.gga_lat_ens_deg = np.tile(np.nan, gga_lat_deg.shape[0])
+            self.gga_lon_ens_deg = np.tile(np.nan, gga_lon_deg.shape[0])
+            d_time = np.abs(gga_delta_time)
+            d_time_min = np.nanmin(d_time.T, 0).T
+            
+            use = []
+            for n in range(len(d_time_min)):
+                use.append(np.abs(d_time[n, :]) == d_time_min[n])
+                
+            use = np.array(use)
+            self.gga_lat_ens_deg = np.tile([np.nan], (len(d_time_min))) 
+            self.gga_lon_ens_deg = np.tile([np.nan], (len(d_time_min)))
+            for n in range(len(d_time_min)):
+                idx = np.where(use[n, :] == True)[0]
+                if len(idx) > 0:
+                    idx = idx[0]
+                    self.gga_lat_ens_deg[n] = gga_lat_deg[n, idx]
+                    self.gga_lon_ens_deg[n] = gga_lon_deg[n, idx]
+                    
+        y_utm, x_utm = self.compute_utm(self.gga_lat_ens_deg, self.gga_lon_ens_deg)
+        self.utm_ens_m = (x_utm, y_utm)
+
+        # Prepare variables for velocity computations
+        lat = np.tile([np.nan], n_ensembles)
+        lon = np.tile([np.nan], n_ensembles)
+        self.gga_serial_time_ens = np.tile([np.nan], n_ensembles)
+        self.altitude_ens_m = np.tile([np.nan], n_ensembles)
+        self.diff_qual_ens = np.tile([np.nan], n_ensembles)
+        self.hdop_ens = np.tile([np.nan], n_ensembles)
+        self.num_sats_ens = np.tile([np.nan], n_ensembles)
+        
+        # Apply method for computing velocity of ensemble
+        if v_setting == 'External':
+            lat = self.ext_gga_lat_deg
+            lon = self.ext_gga_lon_deg
+            self.gga_serial_time_ens = self.ext_gga_serial_time
+            self.hdop_ens = self.ext_gga_hdop
+            self.num_sats_ens = self.ext_gga_num_sats
+            self.altitude_ens_m = self.ext_gga_altitude_m
+            self.diff_qual_ens = self.ext_gga_differential
+            
+        # Average all position during an ensemble
+        elif v_setting == 'Average':
+            lat = np.nanmean(gga_lat_deg, 1)
+            lon = np.nanmean(gga_lon_deg, 1)
+            self.gga_serial_time_ens = np.nanmean(gga_serial_time, 1)
+            self.hdop_ens = np.nanmean(gga_hdop, 1)
+            self.num_sats_ens = np.floor(np.nanmean(gga_num_sats, 1))
+            self.altitude_ens_m = np.nanmean(self.raw_gga_altitude_m, 1)
+            self.diff_qual_ens = np.floor(np.nanmean(self.raw_gga_differential, 1))
+            
+        # Use the last valid data in an ensemble
+        elif v_setting == 'End':
+
+            for n in range(n_ensembles):
+                idx = np.where(np.isnan(gga_lat_deg[n, :]) == False)[0]
+                if len(idx) > 0:
+                    idx = idx[-1]
+                    lat[n] = gga_lat_deg[n, idx]
+                    lon[n] = gga_lon_deg[n, idx]
+                    self.gga_serial_time_ens[n] = gga_serial_time[n, idx]
+                    self.altitude_ens_m[n] = gga_altitude_m[n, idx]
+                    self.diff_qual_ens[n] = gga_differential[n, idx]
+
+                if idx <= len(self.raw_gga_hdop):
+                    self.hdop_ens[n] = gga_hdop[n, idx]
+                    
+                if idx <= len(gga_num_sats[n]):
+                    self.num_sats_ens[n] = gga_num_sats[n, idx]
+
+        # Use the first valid data in an ensemble
+        elif v_setting == 'First':
+            for n in range(n_ensembles):
+                idx = 0
+                lat[n] = gga_lat_deg[n, idx]
+                lon[n] = gga_lon_deg[n, idx]
+                self.gga_serial_time_ens[n] = gga_serial_time[n, idx]
+                self.altitude_ens_m[n] = gga_altitude_m[n, idx]
+                self.diff_qual_ens[n] = gga_differential[n, idx]
+                
+                if idx <= len(self.raw_gga_hdop):
+                    self.hdop_ens[n] = gga_hdop[n, idx]
+                    
+                if idx <= len(gga_num_sats[n]):
+                    self.num_sats_ens[n] = gga_num_sats[n, idx]
+
+        # Use the minimum delta time to assign data to an ensemble
+        elif v_setting == 'Mindt':
+            d_time = np.abs(gga_delta_time)
+            d_time_min = np.nanmin(d_time, 1)
+            use = []
+            for n in range(len(d_time_min)):
+                use.append(np.abs(d_time[n, :]) == d_time_min[n])
+            use = np.array(use)  
+            for n in range(len(d_time_min)):
+                idx = np.where(use[n, :] == True)[0]
+                if len(idx) > 0:
+                    idx = idx[0]
+                    lat[n] = gga_lat_deg[n, idx]
+                    lon[n] = gga_lon_deg[n, idx]
+                    self.gga_serial_time_ens[n] = gga_serial_time[n, idx]
+                    self.altitude_ens_m[n] = gga_altitude_m[n, idx]
+                    self.diff_qual_ens[n] = gga_differential[n, idx]
+                    
+                if idx <= len(gga_hdop[n]):
+                    self.hdop_ens[n] = gga_hdop[n, idx]
+                    
+                if idx <= len(gga_num_sats[n]):
+                    self.num_sats_ens[n] = gga_num_sats[n, idx]
+                    
+        # Identify valid values
+        idx_values = np.where(np.isnan(x_utm) == False)[0]
+        if len(idx_values) > 1:
+            u, v = self.gga2_vel_trdi(lat, lon, self.gga_serial_time_ens, idx_values)
+            self.gga_velocity_ens_mps = np.tile([np.nan], (2, len(lat)))
+            self.gga_velocity_ens_mps[0, idx_values[1:]] = u[idx_values[1:]]
+            self.gga_velocity_ens_mps[1, idx_values[1:]] = v[idx_values[1:]]
+        else:
+            self.gga_velocity_ens_mps = np.tile([np.nan], (2, len(lat)))
+
+    def process_vtg(self, v_setting=None):
+        """Processes raw vtg data to achieve a velocity for each ensemble containing data.
+
+        Parameters
+        ----------
+        v_setting: str
+            Method to used to compute ensemble velocity.
+        """
+        
+        # Determine method used to compute ensemble velocity
+        if v_setting is None:
+            v_setting = self.vtg_velocity_method
+
+        # Use only valid data
+        vtg_speed_mps = np.copy(self.raw_vtg_speed_mps)
+        vtg_course_deg = np.copy(self.raw_vtg_course_deg)
+        vtg_delta_time = np.copy(self.raw_vtg_delta_time)
+
+        # Use mode indicator to identify invalid original data
+        idx = np.where(self.raw_vtg_mode_indicator == 'N')
+        vtg_speed_mps[idx] = np.nan
+        vtg_course_deg[idx] = np.nan
+        vtg_delta_time[idx] = np.nan
+
+        # Use average velocity for ensemble velocity
+        if v_setting == 'Average':
+            # Compute vtg velocity in x y coordinates from speed and course
+            direction = azdeg2rad(vtg_course_deg)
+            vx, vy = pol2cart(direction, vtg_speed_mps)
+            vx[np.logical_and(vx == 0, vy == 0)] = np.nan
+            vy[np.isnan(vx)] = np.nan
+            vx_mean = np.nanmean(vx, 1)
+            vy_mean = np.nanmean(vy, 1)
+            self.vtg_velocity_ens_mps = np.vstack([vx_mean.T, vy_mean.T])
+
+        # Use last velocity for ensemble velocity
+        elif v_setting == 'End':
+            n_ensembles = vtg_speed_mps.shape[0]
+            vtg_vel = nans(n_ensembles)
+            vtg_dir = nans(n_ensembles)
+            
+            for n in range(n_ensembles):
+                idx = np.where(~np.isnan(vtg_speed_mps[n, :]))[0]
+                if len(idx) > 0:
+                    idx = idx[-1]
+                else:
+                    idx = 0
+                vtg_vel[n] = vtg_speed_mps[n, idx]
+                vtg_dir[n] = vtg_course_deg[n, idx]
+                
+            direction = azdeg2rad(vtg_dir)
+            vx, vy = pol2cart(direction, vtg_vel)
+            vx[np.logical_and(vx == 0, vy == 0)] = np.nan
+            vy[np.isnan(vx)] = np.nan
+            self.vtg_velocity_ens_mps = np.vstack([vx, vy])
+
+        # Use first velocity for ensemble velocity
+        elif v_setting == 'First':
+            n_ensembles = vtg_speed_mps.shape[0]
+            vtg_vel = nans(n_ensembles)
+            vtg_dir = nans(n_ensembles)
+            
+            for n in range(n_ensembles):
+                idx = 0
+                vtg_vel[n] = vtg_speed_mps[n, idx]
+                vtg_dir[n] = vtg_course_deg[n, idx]
+            direction = azdeg2rad(vtg_dir)
+            vx, vy = pol2cart(direction, vtg_vel)
+            vx[np.logical_and(vx == 0, vy == 0)] = np.nan
+            vy[np.isnan(vx)] = np.nan
+            self.vtg_velocity_ens_mps = np.vstack([vx, vy])
+
+        # Use the velocity with the minimum delta time for the ensemble velocity
+        elif v_setting == 'Mindt':
+            d_time = np.abs(vtg_delta_time)
+            # d_time[d_time==0] = np.nan
+            d_time_min = np.nanmin(d_time.T, 0).T
+            
+            use = []
+            vtg_speed = []
+            vtg_dir = []
+            
+            for n in range(len(d_time_min)):
+                use.append(np.abs(d_time[n, :]) == d_time_min[n])
+                
+            use = np.array(use)
+            for n in range(len(d_time_min)):
+                idx = np.where(use[n, :] == True)[0]
+                if len(idx) > 0:
+                    idx = idx[0]
+                    vtg_speed.append(vtg_speed_mps[n, idx])
+                    vtg_dir.append(vtg_course_deg[n, idx])
+                else:
+                    vtg_speed.append(np.nan)
+                    vtg_dir.append(np.nan)
+                    
+                direction = azdeg2rad(np.array(vtg_dir))
+                vx, vy = pol2cart(direction, np.array(vtg_speed))
+                self.vtg_velocity_ens_mps = np.vstack([vx, vy])
+
+        # Use velocity selected by external algorithm for ensemble velocity
+        elif v_setting == 'External':
+            direction = azdeg2rad(self.ext_vtg_course_deg)
+            vx, vy = pol2cart(direction, self.ext_vtg_speed_mps)
+            self.vtg_velocity_ens_mps = np.vstack([vx.T, vy.T])
+
+    @staticmethod
+    def compute_utm(lat_in, lon_in):
+        """Compute UTM coordinates from latitude and longitude.
+
+        Parameters
+        ----------
+        lat_in: np.array(float)
+            Latitude in degrees.
+        lon_in: np.array(float)
+            Longitude in degrees.
+        """
+
+        # Set invalid data to nan
+        lat_in[lat_in == 0] = np.nan
+        lon_in[lon_in == 0] = np.nan
+
+        lat2 = np.deg2rad(lat_in)
+        lon2 = np.deg2rad(lon_in)
+        
+        y = np.tile([np.nan], lat_in.shape)
+        x = np.tile([np.nan], lon_in.shape)
+        idx = np.where(np.logical_and((np.isnan(lat2) == False), (np.isnan(lon2) == False)))
+        for ind in idx[0]:
+            y[ind], x[ind], _, _ = utm.from_latlon(lat2[ind], lon2[ind])
+        x_utm = x.reshape(lon_in.shape)
+        y_utm = y.reshape(lat_in.shape)
+        
+        return y_utm, x_utm
+
+    @staticmethod
+    def gga2_vel_trdi(lat, lon, t, idx_values):
+        """Computes velocity from gga data using approach from TRDI WinRiver II.
+
+        Parameters
+        ----------
+        lat: np.array(float)
+            Latitude for each ensemble used for velocity computations, in degrees.
+        lon: np.array(float)
+            Longitude for each ensemble used for velocity computations, in degrees.
+        t: np.array(float)
+            GGA time associated with the latitude and longitude selected for velocity computations.
+        idx_values: np.array(bool)
+            Index of valid lat-lon data.
+        """
+        
+        u = np.zeros(lat.shape)
+        v = np.zeros(lat.shape)
+        
+        for n in range(1, len(idx_values)):
+            lat1 = lat[idx_values[n-1]]
+            lat2 = lat[idx_values[n]]
+            lon1 = lon[idx_values[n-1]]
+            lon2 = lon[idx_values[n]]
+            t1 = t[idx_values[n-1]]
+            t2 = t[idx_values[n]]
+
+            lat_avg_rad = ((lat1 + lat2) / 2) * np.pi / 180
+            sin_lat_avg_rad = np.sin(lat_avg_rad)
+            coefficient = 6378137 * np.pi / 180
+            ellipticity = 1 / 298.257223563
+            re = coefficient * (1 + ellipticity * sin_lat_avg_rad ** 2)
+            rn = coefficient * (1 - 2 * ellipticity + 3 * ellipticity * sin_lat_avg_rad ** 2)
+            delta_x = re * (lon2 - lon1) * np.cos(lat_avg_rad)
+            delta_y = rn * (lat2 - lat1)
+            delta_time = t2 - t1
+            if delta_time > 0.0001:
+                u[idx_values[n]] = delta_x / delta_time
+                v[idx_values[n]] = delta_y / delta_time
+            else:
+                u[idx_values[n]] = np.nan
+                v[idx_values[n]] = np.nan
+            
+        return u, v
+
diff --git a/Classes/HeadingData.py b/Classes/HeadingData.py
new file mode 100644
index 0000000000000000000000000000000000000000..aa660d5de5ed3ed159c452945dcd03fedacf8889
--- /dev/null
+++ b/Classes/HeadingData.py
@@ -0,0 +1,224 @@
+import numpy as np
+from _operator import xor
+
+
+class HeadingData(object):
+    """This class stores and manipulates heading and associated data.
+
+    Attributes
+    ----------
+    data: np.array(float)
+        Corrected heading data, in degrees.
+    original_data: np.array(float)
+        Original uncorrected heading data, in degrees.
+    source: str
+        Source of heading data (internal, external).
+    mag_var_deg: float
+        Magnetic variation applied to get corrected data, in degrees (East +, West -).
+    mag_var_orig_deg: float
+        Original magnetic variation, in degrees (East +, West -).
+    align_correction_deg: float
+        Alignment correction to align compass with instrument (used for external heading), in degrees CW.
+    align_correction_orig_deg: float
+        Alignment correction to align compass with instrument (used for external heading), in degrees CW.
+    mag_error: np.array(float)
+        Percent change in mean magnetic field from calibration (SonTek only).`
+    pitch_limit: np.array(float)
+        Pitch limit of compass calibration (SonTek only), in degrees.
+    roll_limit: np.array(float)
+        Roll limit of compass calibration (SonTek only), in degrees.
+    """
+    
+    def __init__(self):
+        """Initialize class and set variables to None."""
+
+        self.data = None  # Corrected self.data data
+        self.original_data = None  # original uncorrected self.data data
+        self.source = None  # Source of self.data data (internal, external)
+        self.mag_var_deg = None  # Magnetic variation for these self.data data
+        self.mag_var_orig_deg = None  # Original magnetic variation
+        self.align_correction_deg = None  # Alignment correction to align compass with instrument
+        self.align_correction_orig_deg = None
+        self.mag_error = None  # Percent change in mean magnetic field from calibration`
+        self.pitch_limit = None  # Pitch limit of compass calibration (SonTek only), in degrees.
+        self.roll_limit = None  # Roll limit of compass calibration (SonTek only), in degrees.
+        
+    def populate_data(self, data_in, source_in, magvar=0, align=0, mag_error=None, pitch_limit=None, roll_limit=None):
+        """Assigns values to instance variables.
+
+        Parameters
+        ----------
+        data_in: np.array(float)
+            Heading, in degrees.
+        source_in: str
+            Source of heading data (internal, external).
+        magvar: float
+            Magnetic variation, in degrees (East +, West -).
+        align: float
+            Alignment correction to align compass with instrument, in degrees
+        mag_error: np.array(float)
+            Percent change in magnetic field (SonTek only)
+        pitch_limit: np.array(float)
+            Pitch limit of compass calibration (SonTek only)
+        roll_limit: np.array(float)
+            Roll limit of compass calibration (SonTek only)
+        """
+
+        self.original_data = data_in
+        self.source = source_in
+        self.mag_var_deg = float(magvar)
+        self.mag_var_orig_deg = float(magvar)
+        self.align_correction_deg = float(align)
+        self.align_correction_orig_deg = float(align)
+        self.mag_error = mag_error
+
+        if pitch_limit is not None and len(pitch_limit.shape) > 1:
+            self.pitch_limit = pitch_limit[0, :]
+        else:
+            self.pitch_limit = pitch_limit
+
+        if roll_limit is not None and len(roll_limit.shape) > 1:
+            self.roll_limit = roll_limit[0, :]
+        else:
+            self.roll_limit = roll_limit
+
+        # Correct the original data for the magvar and alignment
+        if source_in == 'internal':
+            self.data = self.original_data + self.mag_var_deg
+        else:
+            self.data = self.original_data + self.align_correction_deg
+        self.fix_upper_limit()
+        self.interp_heading()
+
+    def populate_from_qrev_mat(self, mat_data):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        mat_data: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+
+        self.data = mat_data.data
+        self.original_data = mat_data.originalData
+        self.source = mat_data.source
+        self.mag_var_deg = float(mat_data.magVar_deg)
+        self.mag_var_orig_deg = float(mat_data.magVarOrig_deg)
+        self.align_correction_deg = mat_data.alignCorrection_deg
+        if hasattr(mat_data, 'align_correction_orig_deg'):
+            self.align_correction_orig_deg = mat_data.align_correction_orig_deg
+        else:
+            self.align_correction_orig_deg = mat_data.alignCorrection_deg
+
+        # Only available for SonTek G3 compass
+        try:
+            if len(mat_data.magError) > 0:
+                self.mag_error = mat_data.magError
+
+            # Only available for SonTek G3 compass
+            if mat_data.pitchLimit.size > 0:
+                if mat_data.pitchLimit.size > 2:
+                    self.pitch_limit = mat_data.pitchLimit[0]
+                else:
+                    self.pitch_limit = mat_data.pitchLimit
+
+            # Only available for SonTek G3 compass
+            if mat_data.rollLimit.size > 0:
+                if mat_data.rollLimit.size > 2:
+                    self.roll_limit = mat_data.rollLimit[0]
+                else:
+                    self.roll_limit = mat_data.rollLimit
+        except AttributeError:
+            self.mag_error = None
+            self.pitch_limit = None
+            self.roll_limit = None
+
+    def set_mag_var(self, mag_var, h_source):
+        """Applies a new magvar to the object data.
+
+        Parameters
+        ----------
+        mag_var: float
+            Magnetic variation, in degrees
+        h_source: str
+            Heading source (internal or external)
+        """
+
+        self.mag_var_deg = mag_var
+        if h_source == 'internal':
+            self.data = self.original_data + self.mag_var_deg
+            self.fix_upper_limit()
+            
+    def set_align_correction(self, align_correction, h_source):
+        """Applies a new alignment correction to the object data.
+
+        Parameters
+        ----------
+        align_correction: float
+            Alignment correction, in degrees
+        h_source: str
+            Heading source (internal or external)
+        """
+
+        self.align_correction_deg = align_correction
+        if h_source == 'external':
+            self.data = self.original_data + self.align_correction_deg
+            self.fix_upper_limit()
+
+    def fix_upper_limit(self):
+        """Fixes heading when magvar and or alignment are applied resulting in heading greater than 360 degrees.
+        """
+
+        idx = np.where(self.data > 360)[0]
+        if len(idx) > 0:
+            self.data[idx] = self.data[idx] - 360   
+            
+    def interp_heading(self):
+        """Interpolate invalid headings. Use linear interpolation if there are
+        valid values on either side of the invalid heading. If the invalid heading
+        occurs at the beginning of the time series, back fill using the 1st valid.
+        If the invalid heading occurs at the end of the time series, forward fill
+        with the last valid self.data.
+        """
+        
+        idx_invalid = np.where(np.isnan(self.data))[0]
+        
+        if len(idx_invalid) > 0:
+            
+            first_valid_idx = np.where(np.isnan(self.data) == False)[0][0]
+            last_valid_idx = np.where(np.isnan(self.data) == False)[0][-1]
+        
+            # Process each invalid self.data
+            for n in range(len(idx_invalid)):
+                before_idx = np.where(np.isnan(self.data[0:idx_invalid[n] + 1]) == False)[0]
+                after_idx = np.where(np.isnan(self.data[idx_invalid[n]:]) == False)[0]
+                
+                # If invalid self.data is beginning back fill
+                if len(before_idx) < 1:
+                    self.data[idx_invalid[n]] = self.data[first_valid_idx]
+
+                # If invalid self.data is at end forward fill
+                elif len(after_idx) < 1:
+                    self.data[idx_invalid[n]] = self.data[last_valid_idx]
+
+                # If invalid self.data is in middle interpolate
+                else:
+                    before_idx = before_idx[-1]
+                    after_idx = after_idx[0] + idx_invalid[n]
+                    
+                    test1 = self.data[before_idx] > 180
+                    test2 = self.data[after_idx] > 180
+                    c = None
+                    if not xor(test1, test2):
+                        c = 0
+                    elif test1:
+                        c = 360
+                    elif test2:
+                        c = -360
+                    self.data[idx_invalid[n]] = (((self.data[after_idx] - self.data[before_idx] + c) /
+                                                  (before_idx - after_idx)) *
+                                                 (before_idx - idx_invalid[n])) + self.data[before_idx]
+                    if self.data[idx_invalid[n]] > 360:
+                        self.data[idx_invalid[n]] = self.data[idx_invalid[n]] - 360
+                    elif self.data[idx_invalid[n]] < 0:
+                        self.data[idx_invalid[n]] = self.data[idx_invalid[n]] + 360
diff --git a/Classes/InstrumentData.py b/Classes/InstrumentData.py
new file mode 100644
index 0000000000000000000000000000000000000000..cffa41bdba8fdc6e7bf81e327fd57203561d2358
--- /dev/null
+++ b/Classes/InstrumentData.py
@@ -0,0 +1,268 @@
+import numpy as np
+from Classes.TransformationMatrix import TransformationMatrix
+
+
+class InstrumentData(object):
+    """Container for characteristics of the ADCP used to make the measurement
+
+    Attributes
+    ----------
+    serial_num: str
+        Serial number of ADCP.
+    manufacturer: str
+        Name of manufacturer.
+    model: str
+        Model name of ADCP.
+    firmware: str
+        Firmware version in the ADCP.
+    frequency_khz:
+        Frequency or frequencies used by ADCP.
+    beam_angle_deg:
+        Angle of the beams from vertical in degrees.
+    beam_pattern:
+        Pattern of the beam angles, concave or convex.
+    t_matrix: TransformationMatrix
+        Object of TransformationMatrix.
+    configuration_commands:
+        Commands used to configure the instrument.
+    """
+     
+    def __init__(self):
+        """Constructor initializes the variables to None.
+        """
+
+        self.serial_num = None  # Serial number of ADCP
+        self.manufacturer = None  # manufacturer of ADCP (SonTek, TRDI)
+        self.model = None  # model of ADCP (Rio Grande, StreamPro, RiverRay, M9, S5)
+        self.firmware = None  # firmware version
+        self.frequency_khz = None  # frquency of ADCP (could be "Multi")
+        self.beam_angle_deg = None  # angle of beam from vertical
+        self.beam_pattern = None  # pattern of beams
+        self.t_matrix = None  # object of TransformationMatrix
+        self.configuration_commands = np.array([])  # configuration commands sent to ADCP
+        
+    def populate_data(self, manufacturer, raw_data, mmt_transect=None, mmt=None):
+        """Manages method calls for different manufacturers.
+
+        Parameters
+        ----------
+        manufacturer: str
+            Name of manufacturer.
+        raw_data: object
+            Object of Pd0TRDI for TRDI or Object of MatSonTek for SonTek
+        mmt_transect: MMT_Transect
+            Object of Transect (mmt object)
+        mmt: MMT_TRDI
+            Object of MMT_TRDI
+        """
+
+        # Process based on manufacturer
+        if manufacturer == 'TRDI':
+            self.manufacturer = manufacturer
+            self.trdi(pd0=raw_data, mmt_transect=mmt_transect, mmt=mmt)
+        elif manufacturer == 'SonTek':
+            self.manufacturer = manufacturer
+            self.sontek(rs=raw_data)
+        elif manufacturer == 'Nortek':
+            self.manufacturer = manufacturer
+            self.nortek(rs=raw_data)
+
+    def trdi(self, pd0, mmt_transect, mmt):
+        """Populates the variables with data from TRDI ADCPs.
+
+        Parameters
+        ----------
+        pd0: Pd0TRDI
+            Object of Pd0TRDI
+        mmt_transect: MMT_Transect
+            Object of MMT_Transect
+        mmt: MMT_Transect
+            Object of MMT_Transect
+        """
+
+        # Instrument frequency
+        self.frequency_khz = pd0.Inst.freq[0]
+
+        # Firmware
+        self.firmware = pd0.Inst.firm_ver[0]
+
+        # Instrument beam angle and pattern
+        self.beam_angle_deg = pd0.Inst.beam_ang[0]
+        self.beam_pattern = pd0.Inst.pat[0]
+
+        # Instrument characteristics
+        mmt_site = getattr(mmt, 'site_info')
+        mmt_config = getattr(mmt_transect, 'active_config')
+
+        self.serial_num = mmt_site['ADCPSerialNmb']
+
+        # Determine TRDI model
+        num = float(self.firmware)
+        model_switch = np.floor(num)
+
+        if model_switch == 10:
+            self.model = 'Rio Grande'
+            if 'Fixed_Commands' in mmt_config.keys():
+                self.configuration_commands = np.append(self.configuration_commands, 'Fixed')
+                self.configuration_commands = np.append(self.configuration_commands, mmt_config['Fixed_Commands'])
+
+        elif model_switch == 31:
+            self.model = 'StreamPro'
+            self.frequency_khz = 2000
+            if 'Fixed_Commands_StreamPro' in mmt_config.keys():
+                self.configuration_commands = np.append(self.configuration_commands, 'Fixed')
+                self.configuration_commands = np.append(self.configuration_commands,
+                                                        mmt_config['Fixed_Commands_StreamPro'])
+
+        elif model_switch == 44:
+            self.model = 'RiverRay'
+            if 'Fixed_Commands_RiverRay' in mmt_config.keys():
+                self.configuration_commands = np.append(self.configuration_commands, 'Fixed')
+                self.configuration_commands = np.append(self.configuration_commands,
+                                                        mmt_config['Fixed_Commands_RiverRay'])
+
+        elif model_switch == 56:
+            self.model = 'RiverPro'
+            if pd0.Cfg.n_beams[0] < 5:
+                if 'RG_Test' in mmt.qaqc.keys():
+                    idx = mmt.qaqc['RG_Test'][0].find('RioPro')
+                    if idx != -1:
+                        self.model = 'RioPro'
+
+            if 'Fixed_Commands_RiverPro' in mmt_config.keys():
+                self.configuration_commands = np.append(self.configuration_commands, 'Fixed')
+                self.configuration_commands = np.append(self.configuration_commands,
+                                                        mmt_config['Fixed_Commands_RiverPro'])
+            else:
+                self.configuration_commands = np.append(self.configuration_commands, 'Fixed')
+                self.configuration_commands = np.append(self.configuration_commands, ' ')
+
+        else:
+            self.model = 'Unknown'
+            if 'Fixed_Commands' in mmt_config.keys():
+                self.configuration_commands = np.append(self.configuration_commands, 'Fixed')
+                self.configuration_commands = np.append(self.configuration_commands, mmt_config['Fixed_Commands'])
+
+        if 'Wizard_Commands' in mmt_config.keys():
+            self.configuration_commands = np.append(self.configuration_commands, ['Wizard'])
+            self.configuration_commands = np.append(self.configuration_commands,
+                                                    mmt_config['Wizard_Commands'])
+
+        if 'User_Commands' in mmt_config.keys():
+            self.configuration_commands = np.append(self.configuration_commands, ['User'])
+            self.configuration_commands = np.append(self.configuration_commands,
+                                                    mmt_config['User_Commands'])
+
+        # Obtain transformation matrix from one of the available sources
+        if not np.isnan(pd0.Inst.t_matrix[0, 0]):
+            self.t_matrix = TransformationMatrix()
+            self.t_matrix.populate_data(manufacturer='TRDI', model='pd0', data_in=pd0)
+        elif self.model == 'RiverRay':
+            self.t_matrix = TransformationMatrix()
+            self.t_matrix.populate_data(manufacturer='TRDI', model=self.model, data_in='Nominal')
+        else:
+            if isinstance(mmt.qaqc, dict) and len(mmt.qaqc) > 0:
+                if 'RG_Test' in mmt.qaqc.keys():
+
+                    self.t_matrix = TransformationMatrix()
+                    self.t_matrix.populate_data(manufacturer='TRDI', model=self.model, data_in=mmt.qaqc['RG_Test'][0])
+
+                elif 'Compass_Calibration' in mmt.qaqc.keys():
+
+                    self.t_matrix = TransformationMatrix()
+                    self.t_matrix.populate_data(manufacturer='TRDI',
+                                                model=self.model,
+                                                data_in=mmt.qaqc['Compass_Calibration'][0])
+
+                elif 'Compass_Eval_Timestamp' in mmt.qaqc.keys():
+
+                    self.t_matrix = TransformationMatrix()
+                    self.t_matrix.populate_data(manufacturer='TRDI',
+                                                model=self.model,
+                                                data_in=mmt.qaqc['Compass_Evaluation'][0])
+
+                else:
+                    self.t_matrix = TransformationMatrix()
+                    self.t_matrix.populate_data(manufacturer='TRDI',
+                                                model=self.model,
+                                                data_in='Nominal')
+            else:
+                self.t_matrix = TransformationMatrix()
+                self.t_matrix.populate_data(manufacturer='TRDI',
+                                            model=self.model,
+                                            data_in='Nominal')
+
+    def sontek(self, rs):
+        """Populates the variables with data from SonTek ADCPs.
+
+        Parameters
+        ----------
+        rs: MatSonTek
+        """
+
+        self.serial_num = rs.System.SerialNumber
+        self.frequency_khz = rs.Transformation_Matrices.Frequency
+        if self.frequency_khz[2] > 0:
+            self.model = 'M9'
+        elif hasattr(rs.WaterTrack, 'Vel_Expected_StdDev'):
+            self.model = 'RS5'
+        else:
+            self.model = 'S5'
+        if hasattr(rs, 'SystemHW'):
+            revision = str(rs.SystemHW.FirmwareRevision)
+            if len(revision) < 2:
+                revision = '0' + revision
+            self.firmware = str(rs.SystemHW.FirmwareVersion) + '.' + revision
+        else:
+            self.firmware = ''
+        self.beam_angle_deg = 25
+        self.beam_pattern = 'Convex'
+        self.t_matrix = TransformationMatrix()
+        self.t_matrix.populate_data('SonTek', data_in=rs.Transformation_Matrices.Matrix)
+        self.configuration_commands = None
+
+    def nortek(self, rs):
+        self.serial_num = rs.System.SerialNumber
+        self.frequency_khz = rs.Transformation_Matrices.Frequency
+        self.model = rs.System.InstrumentModel
+        if hasattr(rs, 'SystemHW'):
+            revision = str(rs.SystemHW.FirmwareRevision)
+            if len(revision) < 2:
+                revision = '0' + revision
+            self.firmware = str(rs.SystemHW.FirmwareVersion) + '.' + revision
+        else:
+            self.firmware = ''
+        self.beam_angle_deg = 25
+        self.beam_pattern = 'Convex'
+        self.t_matrix = TransformationMatrix()
+        self.t_matrix.populate_data('SonTek', data_in=rs.Transformation_Matrices.Matrix)
+        self.configuration_commands = None
+
+    def populate_from_qrev_mat(self, transect):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+       ----------
+       transect: mat_struct
+           Matlab data structure obtained from sio.loadmat
+       """
+
+        self.serial_num = str(transect.adcp.serialNum)
+        self.manufacturer = transect.adcp.manufacturer
+        self.model = transect.adcp.model
+        self.firmware = transect.adcp.firmware
+        self.frequency_khz = transect.adcp.frequency_hz
+        self.beam_angle_deg = transect.adcp.beamAngle_deg
+        self.beam_pattern = transect.adcp.beamPattern
+        self.t_matrix = TransformationMatrix()
+        self.t_matrix.populate_from_qrev_mat(transect.adcp.tMatrix)
+
+        if len(transect.adcp.configurationCommands) > 0:
+            self.configuration_commands = []
+            for command in transect.adcp.configurationCommands:
+                if type(command) == str:
+                    self.configuration_commands.append(command)
+            self.configuration_commands = np.array(self.configuration_commands)
+
+        else:
+            self.configuration_commands = None
diff --git a/Classes/MMT_TRDI.py b/Classes/MMT_TRDI.py
new file mode 100644
index 0000000000000000000000000000000000000000..10b96762aac85803040ed92dedaf3be0fa54a9d8
--- /dev/null
+++ b/Classes/MMT_TRDI.py
@@ -0,0 +1,539 @@
+import os
+import re
+import xmltodict
+import numpy as np
+
+
+class MMTtrdi(object):
+    """Class to read and store data from a WinRiver 2 mmt file.
+
+    Attributes
+    ----------
+    project: dict
+        Dictionary of measurement information
+    site_info: dict
+        Dictionary of site information
+    transects: list
+        List of Transect objects containing information for each discharge transect
+    summary: dict
+        Dictionary of measurement summary for each available boat velocity reference
+    qaqc: dict
+        Dictionary of premeasurement tests, calibrations, and evaluations
+    mbt_transects: list
+        List of Transect objects containing information for each moving-bed test transect
+    path: str
+        Path for mmt file and associated files
+    """
+
+    def __init__(self, mmt_file):
+        """Initialize instance variables and reads mmt file.
+
+        Parameters
+        ----------
+        mmt_file: str
+            Full filename including path of mmt file.
+        """
+
+        # Intialize instance variables
+        self.project = {}
+        self.site_info = {}
+        self.transects = []
+        self.summary = {}
+        self.qaqc = {}
+        self.mbt_transects = []
+        self.path = None
+
+        # Process mmt file
+        self.process_mmt(mmt_file)
+
+    def process_mmt(self, mmt_file):
+        """Method to read and process the mmt file.
+
+        Parameters
+        ----------
+        mmt_file: str
+            Full filename including path of mmt file.
+        """
+
+        # Open the file and convert to an ordered dictionary tree
+        with open(mmt_file, 'r', encoding='utf-8') as fd:
+            xml_data = fd.read()
+            clean_xml_data = ''
+            remove_re = re.compile(u'[\x00-\x08\x0B-\x0C\x0E-\x1F\x7F%]')
+            for line in xml_data:
+                new_line, count = remove_re.subn('', line)
+                clean_xml_data = clean_xml_data + new_line
+
+            win_river = xmltodict.parse(clean_xml_data)
+        # UnicodeDecodeError
+        win_river = win_river['WinRiver']
+
+        self.path = os.path.split(mmt_file)[0]
+
+        # Process project settings
+        self.project['Name'] = win_river['Project']['@Name']
+        self.project['Version'] = win_river['Project']['@Version']
+        if 'Locked' in win_river['Project'].keys():
+            self.project['Locked'] = win_river['Project']['Locked']
+        else:
+            self.project['Locked'] = None
+
+        # Process site information
+        siteinfo_keys = win_river['Project']['Site_Information'].keys()
+
+        # Iterate through all of the keys and values of site info
+        for x in siteinfo_keys:
+            site_data = win_river['Project']['Site_Information'][x]
+            if site_data is not None:
+                # Remove @ symbol from properties
+                if '@' in x:
+                    x = x[1:]
+                if x == 'Water_Temperature':
+                    self.site_info[x] = float(site_data)
+                    # -32768 used to denote no data
+                    if self.site_info[x] < -100:
+                        self.site_info[x] = ''
+                else:
+                    self.site_info[x] = site_data
+            else:
+                self.site_info[x] = ''
+        if 'Transect' in win_river['Project']['Site_Discharge'].keys():
+            trans = win_river['Project']['Site_Discharge']['Transect']
+
+            # Create a Transect class for each transect found under Site_Discharge
+            if type(trans) == list:
+                for i in range(len(trans)):
+                    if 'File' in trans[i]:
+                        self.transects.append(MMTtransect(trans[i]))
+            else:
+                self.transects = [MMTtransect(trans)]
+
+            # Discharge Summary
+            if 'Discharge_Summary' in win_river['Project']['Site_Discharge'].keys():
+                discharge_summary = win_river['Project']['Site_Discharge']['Discharge_Summary']
+
+                self.summary['NONE'] = self.mmtqsum(discharge_summary['None'])
+                self.summary['BT'] = self.mmtqsum(discharge_summary['BottomTrack'])
+                self.summary['GGA'] = self.mmtqsum(discharge_summary['GGA'])
+                self.summary['VTG'] = self.mmtqsum(discharge_summary['VTG'])
+
+        # QA_QC
+        if 'QA_QC' in win_river['Project'].keys():
+            qaqc = win_river['Project']['QA_QC']
+            for qaqc_type, data in qaqc.items():
+                # Parse qaqc data from dictionary if the type is a test, cal, or eval
+                if qaqc_type in ['RG_Test', 'Compass_Calibration', 'Compass_Evaluation']:
+                    # There could be multiple tests of the same type so they are stored in a list
+                    time_stamp = qaqc_type + '_TimeStamp'
+                    if not isinstance(data['TestResult'], list):
+                        self.qaqc[qaqc_type] = [data['TestResult']['Text']]
+                        self.qaqc[time_stamp] = [data['TestResult']['TimeStamp']]
+                    else:
+                        self.qaqc[qaqc_type] = []
+                        self.qaqc[time_stamp] = []
+                        for result in data['TestResult']:
+                            self.qaqc[qaqc_type].append(result['Text'])
+                            self.qaqc[time_stamp].append(result['TimeStamp'])
+
+                if qaqc_type == 'Moving_Bed_Test':
+                    if 'Transect' in data.keys():
+                        self.moving_bed_test(data)
+
+    def moving_bed_test(self, mb_data):
+        """Method to parse data from moving-bed test dictionary.
+
+        Parameters
+        ----------
+        mb_data: dict
+            Dictionary containing moving-bed test information
+        """
+
+        transects = mb_data['Transect']
+
+        # If only one transect make it a list
+        if not isinstance(transects, list):
+            transects = [transects]
+
+        # Process each transect dictionary
+        for tsect in transects:
+            transect = MMTtransect(tsect)
+
+            # Determine type of moving-bed test
+            if '@MBTType' in tsect:
+                if tsect['@MBTType'] == '0':
+                    transect.moving_bed_type = 'Loop'
+                elif tsect['@MBTType'] == '1':
+                    transect.moving_bed_type = 'Stationary'
+            else:
+                # Use the file name to determine the moving-bed test type
+                file_name = transect.Files[0]
+                fidx = file_name.rfind('.')
+                if file_name[fidx-3:fidx] == 'SBT':
+                    transect.moving_bed_type = 'Stationary'
+                elif file_name[fidx-3:fidx] == 'LBT':
+                    transect.moving_bed_type = 'Loop'
+                else:
+                    # If type can't be determined process as stationary
+                    transect.moving_bed_type = 'Stationary'
+
+            self.mbt_transects.append(transect)
+
+    @staticmethod
+    def mmtqsum(data):
+        """Method to parse the MMT Q summary data.
+
+        Parameters
+        ----------
+        data: dict
+            A summary dictionary from mmt file.
+
+        Returns
+        -------
+        sum_dict: dict
+            Dictionary of summary with a couple of key names changed.
+        """
+        
+        sum_dict = {
+            'Use': [],
+            'Begin_Left': [],
+            'FileName': [],
+            'LeftEdgeSlopeCoeff': [],
+            'RightEdgeSlopeCoeff': []
+            }
+
+        # Iterate through each transect
+        for transect in data.values():
+            # Iterate through each key and val in the transect summary
+            for key2, val2 in transect.items():
+                # Append value from transect to appropriate key
+                if key2 == 'UseInSummary':
+                    sum_dict['Use'].append(float(val2))
+                elif key2 == "BeginLeft":
+                    sum_dict['Begin_Left'].append(float(val2))
+                elif key2 == 'FileName':
+                    sum_dict['FileName'].append(val2)
+                elif key2 == 'LeftEdgeSlopeCoeff':
+                    sum_dict['LeftEdgeSlopeCoeff'].append(float(val2))
+                elif key2 == 'RightEdgeSlopeCoeff':
+                    sum_dict['RightEdgeSlopeCoeff'].append(float(val2))
+                else:
+                    # If the key has not been specified use key from transect summary
+                    if key2 not in sum_dict:
+                        sum_dict[key2] = []
+                    try:
+                        sum_dict[key2].append(float(val2))
+                    except ValueError:
+                        sum_dict[key2].append(np.nan)
+        return sum_dict
+
+
+class MMTtransect(object):
+    """Class to hold properties of MMT transect dictionary attributes.
+
+    Attributes
+    ----------
+    Checked: int
+    Files: list
+    Notes: list
+    """
+
+    def __init__(self, trans):
+        """Constructor immediately begins extraction of data"""
+
+        self.Checked = int(trans['@Checked'])
+        self.Files = []
+        self.Notes = []
+        self.field_config = None
+        self.active_config = None
+        self.moving_bed_type = None
+
+        files = trans['File']
+
+        # Create File classes for each file associated with transect
+        if type(files) is list:
+            for file in files:
+                self.Files.append(file['#text'])
+        else:
+            self.Files.append(files['#text'])
+
+        # Create Note classes for each file associated with transect
+        if 'Note' in trans.keys():
+            note = trans['Note']
+            if type(note) is list:
+                for n in note:
+                    if type(trans['File']) is list:
+                        self.Notes.append(self.note_dict(n, trans['File'][0]['@TransectNmb']))
+                    else:
+                        self.Notes.append(self.note_dict(n, trans['File']['@TransectNmb']))
+            else:
+                if type(trans['File']) is list:
+                    self.Notes.append(self.note_dict(note, trans['File'][0]['@TransectNmb']))
+                else:
+                    self.Notes.append(self.note_dict(note, trans['File']['@TransectNmb']))
+
+        # Create configuration dictionaries for each config attribute
+        if type(trans['Configuration']) is list:
+            for config in trans['Configuration']:
+                if int(config['@Checked']) == 0:
+                    self.field_config = self.parse_config(config)
+                if int(config['@Checked']) == 1:
+                    self.active_config = self.parse_config(config)
+        else:
+            if int(trans['Configuration']['@Checked']) == 0:
+                self.field_config = self.parse_config(trans['Configuration'])
+            if int(trans['Configuration']['@Checked']) == 1:
+                self.active_config = self.parse_config(trans['Configuration'])
+
+        # Assign active config to field config if there is no field config
+        if self.field_config is None:
+            self.field_config = self.active_config
+
+    def set_moving_bed_type(self, mvb_type):
+        """Setter for moving bed type in the case of MBT Transects
+
+        Parameters
+        ----------
+        mvb_type: str
+            Type of moving-bed test.
+        """
+
+        self.moving_bed_type = mvb_type
+
+    @staticmethod
+    def parse_config(config):
+        """Method to parse configuration file from mmt xml.
+
+        Parameters
+        ----------
+        config: dict
+            Dictionary of configuration settings
+
+        Returns
+        -------
+        config_dict: dict
+            Processed dictionary of configuration settings
+        """
+
+        # Initialize dictionary for configuration
+        config_dict = {}
+
+        # Store all instrument commands
+        command_groups = config['Commands']
+        for group in command_groups.keys():
+            config_dict[group] = []
+            for key, command in command_groups[group].items():
+                if key != '@Status':
+                    config_dict[group].append(command)
+
+        # Depth sounder configuration
+        if 'Use_Depth_Sounder_In_Processing' in config['Depth_Sounder'].keys():
+            if config['Depth_Sounder']['Use_Depth_Sounder_In_Processing']['#text'] == "YES":
+                config_dict['DS_Use_Process'] = 1
+            else:
+                config_dict['DS_Use_Process'] = 0
+        else:
+            config_dict['DS_Use_Process'] = -1
+
+        config_dict['DS_Transducer_Depth'] = float(config['Depth_Sounder']['Depth_Sounder_Transducer_Depth']['#text'])
+        config_dict['DS_Transducer_Offset'] = float(config['Depth_Sounder']['Depth_Sounder_Transducer_Offset']['#text'])
+
+        if config['Depth_Sounder']['Depth_Sounder_Correct_Speed_of_Sound']['#text'] == 'YES':
+            config_dict['DS_Cor_Spd_Sound'] = 1
+        else:
+            config_dict['DS_Cor_Spd_Sound'] = 0
+
+        config_dict['DS_Scale_Factor'] = float(config['Depth_Sounder']['Depth_Sounder_Scale_Factor']['#text'])
+
+        # External heading configuration
+        config_dict['Ext_Heading_Offset'] = float(config['Ext_Heading']['Offset']['#text'])
+
+        if 'Use_Ext_Heading' in config['Ext_Heading'].keys():
+            if config['Ext_Heading']['Use_Ext_Heading']['#text'] == 'NO':
+                config_dict['Ext_Heading_Use'] = False
+            else:
+                config_dict['Ext_Heading_Use'] = True
+        else:
+            config_dict['Ext_Heading_Use'] = False
+
+        # GPS configuration
+        if 'GPS' in config.keys():
+            config_dict['GPS_Time_Delay'] = config['GPS']['Time_Delay']['#text']
+
+        # Discharge settings
+        config_dict['Q_Top_Method'] = float(config['Discharge']['Top_Discharge_Estimate']['#text'])
+        config_dict['Q_Bottom_Method'] = float(config['Discharge']['Bottom_Discharge_Estimate']['#text'])
+        config_dict['Q_Power_Curve_Coeff'] = float(config['Discharge']['Power_Curve_Coef']['#text'])
+        config_dict['Q_Cut_Top_Bins'] = float(config['Discharge']['Cut_Top_Bins']['#text'])
+        config_dict['Q_Bins_Above_Sidelobe'] = float(config['Discharge']['Cut_Bins_Above_Sidelobe']['#text'])
+        config_dict['Q_Left_Edge_Type'] = float(config['Discharge']['River_Left_Edge_Type']['#text'])
+        config_dict['Q_Left_Edge_Coeff'] = float(config['Discharge']['Left_Edge_Slope_Coeff']['#text'])
+        config_dict['Q_Right_Edge_Type'] = float(config['Discharge']['River_Right_Edge_Type']['#text'])
+        config_dict['Q_Right_Edge_Coeff'] = float(config['Discharge']['Right_Edge_Slope_Coeff']['#text'])
+        config_dict['Q_Shore_Pings_Avg'] = float(config['Discharge']['Shore_Pings_Avg']['#text'])
+
+        # Edge estimate settings
+        config_dict['Edge_Begin_Shore_Distance'] = config['Edge_Estimates']['Begin_Shore_Distance']['#text']
+        config_dict['Edge_End_Shore_Distance'] = float(config['Edge_Estimates']['End_Shore_Distance']['#text'])
+        if config['Edge_Estimates']['Begin_Left_Bank']['#text'] == 'YES':
+            config_dict['Edge_Begin_Left_Bank'] = 1
+        else:
+            config_dict['Edge_Begin_Left_Bank'] = 0
+
+        # Check for user discharge feature in mmt file
+        if 'Begin_Manual_Discharge' in config['Edge_Estimates']:
+            config_dict['Edge_Begin_Manual_Discharge'] = float(config['Edge_Estimates']['Begin_Manual_Discharge']['#text'])
+            config_dict['Edge_Begin_Method_Distance'] = \
+                config['Edge_Estimates']['Begin_Edge_Discharge_Method_Distance']['#text']
+            config_dict['Edge_End_Manual_Discharge'] = float(config['Edge_Estimates']['End_Manual_Discharge']['#text'])
+            config_dict['Edge_End_Method_Distance'] = \
+                config['Edge_Estimates']['End_Edge_Discharge_Method_Distance']['#text']
+
+        # Offsets
+        for key in config['Offsets'].keys():
+            if key == 'ADCP_Transducer_Depth':
+                child = "Offsets_Transducer_Depth"
+            else:
+                child = "Offsets_" + key
+
+            config_dict[child] = float(config['Offsets'][key]['#text'])
+
+        # Processing settings
+        for key in config['Processing'].keys():
+            if key == 'Use_3_Beam_Solution_For_BT':
+                child = 'Proc_Use_3_Beam_BT'
+            elif key == 'Use_3_Beam_Solution_For_WT':
+                child = 'Proc_Use_3_Beam_WT'
+            elif key == 'BT_Error_Velocity_Threshold':
+                child = 'Proc_BT_Error_Vel_Threshold'
+            elif key == 'WT_Error_Velocity_Threshold':
+                child = 'Proc_WT_Error_Velocity_Threshold'
+            elif key == 'BT_Up_Velocity_Threshold':
+                child = 'Proc_BT_Up_Vel_Threshold'
+            elif key == 'WT_Up_Velocity_Threshold':
+                child = 'Proc_WT_Up_Vel_Threshold'
+            elif key == 'Fixed_Speed_Of_Sound':
+                child = 'Proc_Fixed_Speed_Of_Sound'
+            elif key == 'Mark_Below_Bottom_Bad':
+                child = 'Proc_Mark_Below_Bottom_Bad'
+            elif key == 'Use_Weighted_Mean':
+                child = 'Proc_Use_Weighted_Mean'
+            elif key == 'Absorption':
+                child = 'Proc_Absorption'
+            else:
+                child = 'Proc_' + key
+
+            # Try to cast to float otherwise assign 1 or 0 based on string value
+            try:
+                config_dict[child] = float(config['Processing'][key]['#text'])
+            except ValueError:
+                if config['Processing'][key]['#text'] == 'YES':
+                    config_dict[child] = 1
+                else:
+                    config_dict[child] = 0
+
+            # Recording
+            config_dict['Rec_Filename_Prefix'] = config['Recording']['Filename_Prefix']['#text']
+            config_dict['Rec_Output_Directory'] = config['Recording']['Output_Directory']['#text']
+
+            if 'Root_Directory' in config['Recording'].keys():
+                if '#text' in config['Recording']['Root_Directory']:
+                    config_dict['Rec_Root_Directory'] = config['Recording']['Root_Directory']['#text']
+                else:
+                    config_dict['Rec_Root_Directory'] = None
+            else:
+                config_dict['Rec_Root_Directory'] = None
+
+            if config['Recording']['MeasurmentNmb'] is None:
+                config_dict['Rec_MeasNmb'] = config['Recording']['MeasurmentNmb']
+            else:
+                config_dict['Rec_MeasNmb'] = config['Recording']['MeasurmentNmb']
+            config_dict['Rec_GPS'] = config['Recording']['GPS_Recording']['#text']
+            config_dict['Rec_DS'] = config['Recording']['DS_Recording']['#text']
+            config_dict['Rec_EH'] = config['Recording']['EH_Recording']['#text']
+            config_dict['Rec_ASCII_Output'] = config['Recording']['ASCII_Output_Recording']['#text']
+            config_dict['Rec_Max_File_Size'] = float(config['Recording']['Maximum_File_Size']['#text'])
+            config_dict['Rec_Next_Transect_Number'] = float(config['Recording']['Next_Transect_Number']['#text'])
+            config_dict['Rec_Add_Date_Time'] = float(config['Recording']['Add_Date_Time']['#text'])
+            config_dict['Rec_Use_Delimiter'] = config['Recording']['Use_Delimiter']['#text']
+            config_dict['Rec_Delimiter'] = config['Recording']['Custom_Delimiter']['#text']
+            config_dict['Rec_Prefix'] = config['Recording']['Use_Prefix']['#text']
+            config_dict['Rec_Use_MeasNmb'] = config['Recording']['Use_MeasurementNmb']['#text']
+            config_dict['Rec_Use_TransectNmb'] = config['Recording']['Use_TransectNmb']['#text']
+            config_dict['Rec_Use_SequenceNmb'] = config['Recording']['Use_SequenceNmb']['#text']
+
+            # Wizard settings
+            config_dict['Wiz_ADCP_Type'] = float(config['Wizard_Info']['ADCP_Type'])
+            config_dict['Wiz_Firmware'] = float(config['Wizard_Info']['ADCP_FW_Version'])
+            config_dict['Wiz_Use_Ext_Heading'] = config['Wizard_Info']['Use_Ext_Heading']
+            config_dict['Wiz_Use_GPS'] = config['Wizard_Info']['Use_GPS']
+            config_dict['Wiz_Use_DS'] = config['Wizard_Info']['Use_Depth_Sounder']
+            config_dict['Wiz_Max_Water_Depth'] = float(config['Wizard_Info']['Max_Water_Depth'])
+            config_dict['Wiz_Max_Water_Speed'] = float(config['Wizard_Info']['Max_Water_Speed'])
+            config_dict['Wiz_Max_Boat_Space'] = float(config['Wizard_Info']['Max_Boat_Speed'])
+            config_dict['Wiz_Material'] = float(config['Wizard_Info']['Material'])
+            config_dict['Wiz_Water_Mode'] = float(config['Wizard_Info']['Water_Mode'])
+            config_dict['Wiz_Bottom_Mode'] = float(config['Wizard_Info']['Bottom_Mode'])
+            config_dict['Wiz_Beam_Angle'] = float(config['Wizard_Info']['Beam_Angle'])
+            config_dict['Wiz_Pressure_Sensor'] = config['Wizard_Info']['Pressure_Sensor']
+            config_dict['Wiz_Water_Mode_13'] = float(config['Wizard_Info']['Water_Mode_13_Avail'])
+            config_dict['Wiz_StreamPro_Default'] = float(config['Wizard_Info']['Use_StreamPro_Def_Cfg'])
+            config_dict['Wiz_StreamPro_Bin_Size'] = float(config['Wizard_Info']['StreamPro_Bin_Size'])
+            config_dict['Wiz_StreamPro_Bin_Number'] = float(config['Wizard_Info']['StreamPro_Bin_Num'])
+
+            if 'Use_GPS_Internal' in config['Wizard_Info'].keys():
+                config_dict['Wiz_Use_GPS_Internal'] = config['Wizard_Info']['Use_GPS_Internal']
+            if 'Internal_GPS_Baud_Rate_Index' in config['Wizard_Info'].keys():
+                config_dict['Wiz_Internal_GPS_Baud_Rate_Index'] = float(config['Wizard_Info']
+                                                                        ['Internal_GPS_Baud_Rate_Index'])
+
+        return config_dict
+
+    @staticmethod
+    def file_dict(file):
+        """Create dictionary for file information.
+
+        Parameters
+        ----------
+        file: dict
+            Dictionary for file from mmt
+
+        Returns
+        -------
+        transect_file: dict
+            Dictionary of transect file information
+                Path: str
+                    Full filename of transect including path
+                File: str
+                    Filename of transect
+                Number: str
+                    Transect number assigned in WinRiver 2
+        """
+
+        transect_file = {'Path': file['@PathName'], 'File': file['#text'], 'Number': file['@TransectNmb']}
+        return transect_file
+
+    @staticmethod
+    def note_dict(note, number):
+        """Create dictionary for notes.
+
+        Parameters
+        ----------
+        note: dict
+            Dictionary from mmt for notes
+        number: str
+            Transect number
+
+        Returns
+        -------
+        note_dict_out: dict
+            Dictionary for note information
+                NoteFileNo: str
+                    Transect number associated with the note
+                NoteDate: str
+                    Date note was entered
+                NoteText: str
+                    Text of note
+        """
+
+        note_dict_out = {'NoteFileNo': number, 'NoteDate': note['@TimeStamp'], 'NoteText': note['@Text']}
+        return note_dict_out
diff --git a/Classes/MatSonTek.py b/Classes/MatSonTek.py
new file mode 100644
index 0000000000000000000000000000000000000000..e78c3467c224b188d71eeefe7be2343392658c2d
--- /dev/null
+++ b/Classes/MatSonTek.py
@@ -0,0 +1,64 @@
+import scipy.io as sio
+import numpy as np
+
+class MatSonTek(object):
+    """Read SonTek Matlab files and returns a dictionary of mat_struct.
+     Any data in English units are converted to SI units.
+    """
+
+    def __init__(self, fullname):
+        """Initializes the object, reads the Matlab file, and converts all English units to metric.
+
+        Parameters
+        ----------
+        fullname: str
+            String contain both the path and filename.
+        """
+
+        # Read Matlab file
+        mat_data = sio.loadmat(fullname, struct_as_record=False, squeeze_me=True)
+
+        if 'BottomTrack' in mat_data:
+            # Convert data to SI units if in English units
+            if mat_data['BottomTrack'].Units.BT_Depth == 'ft':
+                self.convert2metric(mat_data)
+
+            if hasattr(mat_data['RawGPSData'], 'VtgMode'):
+                mat_data['RawGPSData'].VtgMode[np.isnan(mat_data['RawGPSData'].VtgMode)] = 0
+                mat_data['RawGPSData'].VtgMode = \
+                    np.array([chr(x) for x in range(127)])[mat_data['RawGPSData'].VtgMode.astype(int)]
+
+        # Create structure from dictionary
+        vars(self).update(mat_data)
+
+    @staticmethod
+    def convert2metric(mat_data):
+        """Converts all data in English units to metric units.
+
+        Parameters
+        ----------
+        mat_data: dict
+            Dictionary of data from Matlab file
+        """
+
+        data2correct = ['BottomTrack', 'GPS', 'Setup', 'Summary', 'System', 'WaterTrack']
+        for item in data2correct:
+            data = mat_data[item]
+            units = data.Units
+            names = units._fieldnames
+            for name in names:
+                if getattr(units, name) == 'ft':
+                    setattr(data, name, getattr(data, name) * 0.3048)
+                    setattr(units, name, 'm')
+                elif getattr(units, name) == 'ft/s':
+                    setattr(data, name, getattr(data, name) * 0.3048)
+                    setattr(units, name, 'm/s')
+                elif getattr(units, name) == 'degF':
+                    setattr(data, name, (getattr(data, name)-32) * (5.0/9.0))
+                    setattr(units, name, 'degC')
+                elif getattr(units, name) == 'cfs':
+                    setattr(data, name, getattr(data, name) * (0.3048**3))
+                    setattr(units, name, 'm3/s')
+                elif getattr(units, name) == 'ft2':
+                    setattr(data, name, getattr(data, name) * (0.3048 ** 2))
+                    setattr(units, name, 'm2')
diff --git a/Classes/Measurement.py b/Classes/Measurement.py
new file mode 100644
index 0000000000000000000000000000000000000000..ebcba8f2eabe7bf7f1527288e2c451be60919784
--- /dev/null
+++ b/Classes/Measurement.py
@@ -0,0 +1,3961 @@
+import os
+import datetime
+import numpy as np
+import xml.etree.ElementTree as ETree
+from xml.dom.minidom import parseString
+from Classes.MMT_TRDI import MMTtrdi
+from Classes.TransectData import TransectData
+from Classes.PreMeasurement import PreMeasurement
+from Classes.MovingBedTests import MovingBedTests
+from Classes.QComp import QComp
+from Classes.MatSonTek import MatSonTek
+from Classes.ComputeExtrap import ComputeExtrap
+from Classes.ExtrapQSensitivity import ExtrapQSensitivity
+from Classes.Uncertainty import Uncertainty
+from Classes.QAData import QAData
+from Classes.BoatStructure import BoatStructure
+from Classes.BoatData import BoatData
+from Classes.WaterData import WaterData
+from Classes.Oursin import Oursin
+from Classes.Pd0TRDI_2 import Pd0TRDI
+from MiscLibs.common_functions import cart2pol, pol2cart, rad2azdeg, nans, azdeg2rad
+# from profilehooks import profile, timecall
+
+
+class Measurement(object):
+    """Class to hold all measurement details.
+
+    Attributes
+    ----------
+    station_name: str
+        Station name
+    station_number: str
+        Station number
+    meas_number: str
+        Measurement number
+    persons: str
+        Persons collecting and/or processing the measurement
+    transects: list
+        List of transect objects of TransectData
+    mb_tests: list
+        List of moving-bed test objects of MovingBedTests
+    system_tst: list
+        List of system test objects of PreMeasurement
+    compass_cal: list
+        List of compass calibration objects of PreMeasurement
+    compass_eval: list
+        List of compass evaluation objects of PreMeasurement
+    extrap_fit: ComputeExtrap
+        Object of ComputeExtrap
+    processing: str
+        Type of processing, default QRev
+    discharge: list
+        List of discharge objects of QComp
+    uncertainty: Uncertainty
+        Object of Uncertainty
+    initial_settings: dict
+        Dictionary of all initial processing settings
+    qa: QAData
+        Object of QAData
+    user_rating: str
+        Optional user rating
+    comments: list
+        List of all user supplied comments
+    ext_temp_chk: dict
+        Dictionary of external temperature readings
+    use_weighted: bool
+        Indicates the setting for use_weighted to be used for reprocessing
+    use_ping_type: bool
+        Indicates if ping types should be used in BT and WT filters
+    use_measurement_thresholds: bool
+        Indicates if the entire measurement should be used to set filter thresholds
+    stage_start_m: float
+        Stage at start of measurement
+    stage_end_m: float
+        Stage at end of measurement
+    stage_meas_m: float
+        Stage assigned to measurement
+    """
+
+    # @profile
+    def __init__(self, in_file, source, proc_type='QRev', checked=False, run_oursin=False, use_weighted=False,
+                 use_measurement_thresholds=False, use_ping_type=True, min_transects=2, min_duration=720):
+        """Initialize instance variables and initiate processing of measurement
+        data.
+
+        Parameters
+        ----------
+        in_file: str or list or dict
+            String containing fullname of mmt file for TRDI data, dict for
+            QRev data, or list of files for SonTek
+        source: str
+            Source of data. TRDI, SonTek, QRev
+        proc_type: str
+            Type of processing. QRev, None, Original
+        checked: bool
+            Boolean to determine if only checked transects should be load for
+            TRDI data.
+        run_oursin: bool
+            Determines if the Oursin uncertainty model should be run
+        use_weighted: bool
+            Specifies if discharge weighted medians are used for extrapolation
+        use_measurement_thresholds: bool
+            Specifies if filters are based on a transect or whole measurement
+        use_ping_type: bool
+            Specifies if filters are based on ping type and frequency
+        min_transects: int
+            Minimum number of transects required to pass QA
+        min_duration: float
+            Minimum duration in seconds of all transects to pass QA
+        """
+
+        self.use_ping_type = use_ping_type
+        self.use_measurement_thresholds = use_measurement_thresholds
+        self.run_oursin = run_oursin
+        self.min_transects = min_transects
+        self.min_duration = min_duration
+        self.station_name = None
+        self.station_number = None
+        self.persons = ''
+        self.meas_number = ''
+        self.transects = []
+        self.mb_tests = []
+        self.system_tst = []
+        self.compass_cal = []
+        self.compass_eval = []
+        self.extrap_fit = None
+        self.processing = None
+        self.discharge = []
+        self.uncertainty = None
+        self.initial_settings = None
+        self.qa = None
+        self.user_rating = 'Not Rated'
+        self.comments = []
+        self.ext_temp_chk = {'user': np.nan, 'units': 'C', 'adcp': np.nan, 'user_orig': np.nan, 'adcp_orig': np.nan}
+        self.checked_transect_idx = []
+        self.oursin = None
+        self.use_weighted = use_weighted
+        self.observed_no_moving_bed = False
+        self.stage_meas_m = 0
+        self.stage_end_m = 0
+        self.stage_start_m = 0
+
+        # Load data from selected source
+        if source == 'QRev':
+            self.load_qrev_mat(mat_data=in_file)
+            if proc_type == 'QRev':
+                # Apply QRev default settings
+                self.run_oursin = run_oursin
+                self.use_weighted = use_weighted
+                self.use_measurement_thresholds = use_measurement_thresholds
+                settings = self.current_settings()
+                settings['WTEnsInterpolation'] = 'abba'
+                settings['WTCellInterpolation'] = 'abba'
+                settings['Processing'] = 'QRev'
+                settings['UseMeasurementThresholds'] = use_measurement_thresholds
+                self.apply_settings(settings)
+
+        else:
+            if source == 'TRDI':
+                self.load_trdi(in_file, checked=checked)
+
+            elif source == 'SonTek':
+                self.load_sontek(in_file)
+
+            elif source == 'Nortek':
+                self.load_sontek(in_file)
+
+            # Process data
+            if len(self.transects) > 0:
+
+                # Save initial settings
+                self.initial_settings = self.current_settings()
+
+                # Process moving-bed tests
+                if len(self.mb_tests) > 0:
+                    # Get navigation reference
+                    select = self.initial_settings['NavRef']
+                    ref = None
+                    if select == 'bt_vel':
+                        ref = 'BT'
+                    elif select == 'gga_vel':
+                        ref = 'GGA'
+                    elif select == 'vtg_vel':
+                        ref = 'VTG'
+                    self.mb_tests = MovingBedTests.auto_use_2_correct(
+                        moving_bed_tests=self.mb_tests, boat_ref=ref)
+
+                # Set processing type
+                if proc_type == 'QRev':
+                    # Apply QRev default settings
+                    settings = self.qrev_default_settings(check_user_excluded_dist=True, use_weighted=use_weighted)
+                    settings['Processing'] = 'QRev'
+                    settings['UseMeasurementThresholds'] = use_measurement_thresholds
+                    settings['UsePingType'] = self.use_ping_type
+                    self.apply_settings(settings)
+
+                elif proc_type == 'None':
+                    # Processing with no filters and interpolation
+                    settings = self.no_filter_interp_settings(self)
+                    settings['Processing'] = 'None'
+                    self.apply_settings(settings)
+
+                elif proc_type == 'Original':
+                    # Processing for original settings
+                    # from manufacturer software
+                    for transect in self.transects:
+                        q = QComp()
+                        q.populate_data(data_in=transect,
+                                        moving_bed_data=self.mb_tests)
+                        self.discharge.append(q)
+                self.uncertainty = Uncertainty()
+                self.uncertainty.compute_uncertainty(self)
+
+                self.qa = QAData(self)
+
+    def load_trdi(self, mmt_file, transect_type='Q', checked=False):
+        """Method to load TRDI data.
+
+        Parameters
+        ----------
+        mmt_file: str
+            Full pathname to mmt file.
+        transect_type: str
+            Type of data (Q: discharge, MB: moving-bed test
+        checked: bool
+            Determines if all files are loaded (False) or only checked (True)
+        """
+
+        # Read mmt file
+        mmt = MMTtrdi(mmt_file)
+
+        # Get properties if they exist, otherwise set them as blank strings
+        self.station_name = str(mmt.site_info['Name'])
+        self.station_number = str(mmt.site_info['Number'])
+        self.persons = str(mmt.site_info['Party'])
+        self.meas_number = str(mmt.site_info['MeasurementNmb'])
+
+        # Get stage readings, if available. Note: mmt stage is always in m.
+        if mmt.site_info['Use_Inside_Gage_Height'] == '1':
+            stage = float(mmt.site_info['Inside_Gage_Height'])
+        else:
+            stage = float(mmt.site_info['Outside_Gage_Height'])
+
+        self.stage_start_m = stage
+        change = float(mmt.site_info['Gage_Height_Change'])
+        self.stage_end_m = stage + change
+        self.stage_meas_m = (self.stage_start_m + self.stage_end_m) / 2.
+
+        # Initialize processing variable
+        self.processing = 'WR2'
+
+        if len(mmt.transects) > 0:
+            # Create transect objects for  TRDI data
+            self.transects = self.allocate_transects(mmt=mmt,
+                                                     transect_type=transect_type,
+                                                     checked=checked)
+
+            self.checked_transect_idx = self.checked_transects(self)
+
+            # Create object for pre-measurement tests
+            if isinstance(mmt.qaqc, dict) or isinstance(mmt.mbt_transects, list):
+                self.qaqc_trdi(mmt)
+
+            # Save comments from mmt file in comments
+            self.comments.append('MMT Remarks: ' + mmt.site_info['Remarks'])
+
+            for t in range(len(self.transects)):
+                notes = getattr(mmt.transects[t], 'Notes')
+                for note in notes:
+                    note_text = ' File: ' + note['NoteFileNo'] + ' ' \
+                                + note['NoteDate'] + ': ' + note['NoteText']
+                    self.comments.append(note_text)
+
+            # Get external temperature
+            if type(mmt.site_info['Water_Temperature']) is float:
+                self.ext_temp_chk['user'] = mmt.site_info['Water_Temperature']
+                self.ext_temp_chk['units'] = 'C'
+                self.ext_temp_chk['user_orig'] = mmt.site_info['Water_Temperature']
+
+            # Initialize thresholds settings dictionary
+            threshold_settings = dict()
+            threshold_settings['wt_settings'] = {}
+            threshold_settings['bt_settings'] = {}
+            threshold_settings['depth_settings'] = {}
+
+            # Select reference transect use first checked or if none then first transect
+            if len(self.checked_transect_idx) > 0:
+                ref_transect = self.checked_transect_idx[0]
+            else:
+                ref_transect = 0
+
+            # Water track filter threshold settings
+            threshold_settings['wt_settings']['beam'] = \
+                self.set_num_beam_wt_threshold_trdi(mmt.transects[ref_transect])
+            threshold_settings['wt_settings']['difference'] = 'Manual'
+            threshold_settings['wt_settings']['difference_threshold'] = \
+                mmt.transects[ref_transect].active_config['Proc_WT_Error_Velocity_Threshold']
+            threshold_settings['wt_settings']['vertical'] = 'Manual'
+            threshold_settings['wt_settings']['vertical_threshold'] = \
+                mmt.transects[ref_transect].active_config['Proc_WT_Up_Vel_Threshold']
+
+            # Bottom track filter threshold settings
+            threshold_settings['bt_settings']['beam'] = \
+                self.set_num_beam_bt_threshold_trdi(mmt.transects[ref_transect])
+            threshold_settings['bt_settings']['difference'] = 'Manual'
+            threshold_settings['bt_settings']['difference_threshold'] = \
+                mmt.transects[ref_transect].active_config['Proc_BT_Error_Vel_Threshold']
+            threshold_settings['bt_settings']['vertical'] = 'Manual'
+            threshold_settings['bt_settings']['vertical_threshold'] = \
+                mmt.transects[ref_transect].active_config['Proc_BT_Up_Vel_Threshold']
+
+            # Depth filter and averaging settings
+            threshold_settings['depth_settings']['depth_weighting'] = \
+                self.set_depth_weighting_trdi(mmt.transects[ref_transect])
+            threshold_settings['depth_settings']['depth_valid_method'] = 'TRDI'
+            threshold_settings['depth_settings']['depth_screening'] = \
+                self.set_depth_screening_trdi(mmt.transects[ref_transect])
+
+            # Determine reference used in WR2 if available
+            reference = 'BT'
+            if 'Reference' in mmt.site_info.keys():
+                reference = mmt.site_info['Reference']
+                if reference == 'BT':
+                    target = 'bt_vel'
+                elif reference == 'GGA':
+                    target = 'gga_vel'
+                elif reference == 'VTG':
+                    target = 'vtg_vel'
+                else:
+                    target = 'bt_vel'
+
+                for transect in self.transects:
+                    if getattr(transect.boat_vel, target) is None:
+                        reference = 'BT'
+
+            # Convert to earth coordinates
+            for transect_idx, transect in enumerate(self.transects):
+                # Convert to earth coordinates
+                transect.change_coord_sys(new_coord_sys='Earth')
+
+                # Set navigation reference
+                transect.change_nav_reference(update=False, new_nav_ref=reference)
+
+                # Apply WR2 thresholds
+                self.thresholds_trdi(transect, threshold_settings)
+
+                # Apply boat interpolations
+                transect.boat_interpolations(update=False,
+                                             target='BT',
+                                             method='None')
+                if transect.gps is not None:
+                    transect.boat_interpolations(update=False,
+                                                 target='GPS',
+                                                 method='HoldLast')
+
+                # Update water data for changes in boat velocity
+                transect.update_water()
+
+                # Filter water data
+                transect.w_vel.apply_filter(transect=transect, wt_depth=True)
+
+                # Interpolate water data
+                transect.w_vel.apply_interpolation(transect=transect,
+                                                   ens_interp='None',
+                                                   cells_interp='None')
+
+                # Apply speed of sound computations as required
+                mmt_sos_method = mmt.transects[transect_idx].active_config[
+                    'Proc_Speed_of_Sound_Correction']
+
+                # Speed of sound computed based on user supplied values
+                if mmt_sos_method == 1:
+                    salinity = mmt.transects[transect_idx].active_config['Proc_Salinity']
+                    transect.change_sos(parameter='salinity', selected='user', salinity=salinity)
+                elif mmt_sos_method == 2:
+                    # Speed of sound set by user
+                    speed = mmt.transects[transect_idx].active_config[
+                        'Proc_Fixed_Speed_Of_Sound']
+                    transect.change_sos(parameter='sosSrc',
+                                        selected='user',
+                                        speed=speed)
+
+    def qaqc_trdi(self, mmt):
+        """Processes qaqc test, calibrations, and evaluations
+        
+        Parameters
+        ----------
+        mmt: MMTtrdi
+            Object of MMT_TRDI
+        """
+
+        # ADCP Test
+        if 'RG_Test' in mmt.qaqc:
+            for n in range(len(mmt.qaqc['RG_Test'])):
+                p_m = PreMeasurement()
+                p_m.populate_data(mmt.qaqc['RG_Test_TimeStamp'][n],
+                                  mmt.qaqc['RG_Test'][n], 'TST')
+                self.system_tst.append(p_m)
+
+        # Compass calibration
+        if 'Compass_Calibration' in mmt.qaqc:
+            for n in range(len(mmt.qaqc['Compass_Calibration'])):
+                cc = PreMeasurement()
+                cc.populate_data(mmt.qaqc['Compass_Calibration_TimeStamp'][n],
+                                 mmt.qaqc['Compass_Calibration'][n], 'TCC')
+                self.compass_cal.append(cc)
+            
+        # Compass evaluation
+        if 'Compass_Evaluation' in mmt.qaqc:
+            for n in range(len(mmt.qaqc['Compass_Evaluation'])):
+                ce = PreMeasurement()
+                ce.populate_data(mmt.qaqc['Compass_Evaluation_TimeStamp'][n],
+                                 mmt.qaqc['Compass_Evaluation'][n], 'TCC')
+                self.compass_eval.append(ce)
+
+        # Check for moving-bed tests
+        if len(mmt.mbt_transects) > 0:
+            
+            # Create transect objects
+            transects = self.allocate_transects(mmt, transect_type='MB')
+
+            # Process moving-bed tests
+            if len(transects) > 0:
+                self.mb_tests = []
+                for n in range(len(transects)):
+
+                    # Create moving-bed test object
+                    mb_test = MovingBedTests()
+                    mb_test.populate_data('TRDI', transects[n],
+                                          mmt.mbt_transects[n].moving_bed_type)
+                    
+                    # Save notes from mmt files in comments
+                    notes = getattr(mmt.mbt_transects[n], 'Notes')
+                    for note in notes:
+                        note_text = ' File: ' + note['NoteFileNo'] + ' ' \
+                                    + note['NoteDate'] + ': ' + note['NoteText']
+                        self.comments.append(note_text)
+
+                    self.mb_tests.append(mb_test)
+
+    @staticmethod
+    def thresholds_trdi(transect, settings):
+        """Retrieve and apply manual filter settings from mmt file
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        settings: dict
+            Threshold settings computed before processing
+        """
+
+        # Apply WT settings
+        transect.w_vel.apply_filter(transect, **settings['wt_settings'])
+
+        # Apply BT settings
+        transect.boat_vel.bt_vel.apply_filter(transect, **settings[
+            'bt_settings'])
+
+        # Apply depth settings
+        transect.depths.bt_depths.valid_data_method = settings[
+            'depth_settings']['depth_valid_method']
+        transect.depths.depth_filter(transect=transect, filter_method=settings[
+            'depth_settings']['depth_screening'])
+        transect.depths.bt_depths.compute_avg_bt_depth(method=settings[
+            'depth_settings']['depth_weighting'])
+
+        # Apply composite depths as per setting stored in transect
+        # from TransectData
+        transect.depths.composite_depths(transect)
+
+    def load_sontek(self, fullnames):
+        """Coordinates reading of all SonTek data files.
+
+        Parameters
+        ----------
+        fullnames: list
+            File names including path for all discharge transects converted
+            to Matlab files.
+        """
+
+        # Initialize variables
+        rsdata = None
+        pathname = None
+
+        for file in fullnames:
+            # Read data file
+            rsdata = MatSonTek(file)
+            pathname, file_name = os.path.split(file)
+
+            if hasattr(rsdata, 'BottomTrack'):
+                # Create transect objects for each discharge transect
+                self.transects.append(TransectData())
+                self.transects[-1].sontek(rsdata, file_name)
+            else:
+                self.comments.append(file + ' is incomplete and is not included in measurement processing')
+
+        # Identify checked transects
+        self.checked_transect_idx = self.checked_transects(self)
+
+        # Site information pulled from last file
+        if hasattr(rsdata, 'SiteInfo'):
+            if hasattr(rsdata.SiteInfo, 'Site_Name'):
+                if len(rsdata.SiteInfo.Site_Name) > 0:
+                    self.station_name = rsdata.SiteInfo.Site_Name
+                else:
+                    self.station_name = ''
+            if hasattr(rsdata.SiteInfo, 'Station_Number'):
+                if len(rsdata.SiteInfo.Station_Number) > 0:
+                    self.station_number = rsdata.SiteInfo.Station_Number
+                else:
+                    self.station_number = ''
+            if hasattr(rsdata.SiteInfo, 'Meas_Number'):
+                if len(rsdata.SiteInfo.Meas_Number) > 0:
+                    self.meas_number = rsdata.SiteInfo.Meas_Number
+            if hasattr(rsdata.SiteInfo, 'Party'):
+                if len(rsdata.SiteInfo.Party) > 0:
+                    self.persons = rsdata.SiteInfo.Party
+
+            if hasattr(rsdata.SiteInfo, 'Comments'):
+                if len(rsdata.SiteInfo.Comments) > 0:
+                    self.comments.append('RS Comments: ' + rsdata.SiteInfo.Comments)
+
+            # Although units imply meters the data are actually stored as m / 10,000
+            if hasattr(rsdata.Setup, 'startGaugeHeight'):
+                self.stage_start_m = rsdata.Setup.startGaugeHeight / 10000.
+
+            if hasattr(rsdata.Setup, 'endGaugeHeight'):
+                self.stage_end_m = rsdata.Setup.endGaugeHeight / 10000.
+
+            self.stage_meas_m = (self.stage_start_m + self.stage_end_m) / 2.
+
+
+        self.qaqc_sontek(pathname)
+
+        for transect in self.transects:
+            transect.change_coord_sys(new_coord_sys='Earth')
+            transect.change_nav_reference(
+                update=False,
+                new_nav_ref=self.transects[self.checked_transect_idx[0]].boat_vel.selected)
+            transect.boat_interpolations(update=False,
+                                         target='BT',
+                                         method='Hold9')
+            transect.boat_interpolations(update=False,
+                                         target='GPS',
+                                         method='None')
+            transect.apply_averaging_method(setting='Simple')
+            transect.process_depths(update=False,
+                                    interpolation_method='HoldLast')
+            transect.update_water()
+
+            # Filter water data
+            transect.w_vel.apply_filter(transect=transect, wt_depth=True)
+
+            # Interpolate water data
+            transect.w_vel.apply_interpolation(transect=transect,
+                                               ens_interp='None',
+                                               cells_interp='None')
+            transect.w_vel.apply_interpolation(transect=transect,
+                                               ens_interp='None',
+                                               cells_interp='TRDI')
+
+            if transect.sensors.speed_of_sound_mps.selected == 'user':
+                transect.sensors.speed_of_sound_mps.selected = 'internal'
+                transect.change_sos(parameter='sosSrc',
+                                    selected='user',
+                                    speed=transect.sensors.speed_of_sound_mps.user.data)
+            elif transect.sensors.salinity_ppt.selected == 'user':
+                transect.change_sos(parameter='salinity',
+                                    selected='user',
+                                    salinity=transect.sensors.salinity_ppt.user.data)
+            elif transect.sensors.temperature_deg_c.selected == 'user':
+                transect.change_sos(parameter='temperature',
+                                    selected='user',
+                                    temperature=transect.sensors.temperature_deg_c.user.data)
+
+    def qaqc_sontek(self, pathname):
+        """Reads and stores system tests, compass calibrations,
+        and moving-bed tests.
+
+        Parameters
+        ----------
+        pathname: str
+            Path to discharge transect files.
+        """
+
+        # Compass Calibration
+        compass_cal_folder = os.path.join(pathname, 'CompassCal')
+        time_stamp = None
+        if os.path.isdir(compass_cal_folder):
+            for file in os.listdir(compass_cal_folder):
+                valid_file = False
+                # G3 compasses
+                if file.endswith('.ccal'):
+                    time_stamp = file.split('_')
+                    time_stamp = time_stamp[0] + '_' + time_stamp[1]
+                    valid_file = True
+
+                # G2 compasses
+                elif file.endswith('.txt'):
+                    prefix, _ = os.path.splitext(file)
+                    time_stamp = prefix.split('l')[1]
+                    valid_file = True
+
+                if valid_file:
+                    with open(os.path.join(compass_cal_folder, file)) as f:
+                        cal_data = f.read()
+                        cal = PreMeasurement()
+                        cal.populate_data(time_stamp, cal_data, 'SCC')
+                        self.compass_cal.append(cal)
+
+        # System Test
+        system_test_folder = os.path.join(pathname, 'SystemTest')
+        if os.path.isdir(system_test_folder):
+            for file in os.listdir(system_test_folder):
+                # Find system test files.
+                if file.startswith('SystemTest'):
+                    with open(os.path.join(system_test_folder, file)) as f:
+                        test_data = f.read()
+                        test_data = test_data.replace('\x00', '')
+                    time_stamp = file[10:24]
+                    sys_test = PreMeasurement()
+                    sys_test.populate_data(time_stamp=time_stamp,
+                                           data_in=test_data,
+                                           data_type='SST')
+                    self.system_tst.append(sys_test)
+
+        # Moving-bed tests
+        self.sontek_moving_bed_tests(pathname)
+
+    def sontek_moving_bed_tests(self, pathname):
+        """Locates and processes SonTek moving-bed tests.
+
+        Searches the pathname for Matlab files that start with Loop or SMBA.
+        Processes these files as moving bed tests.
+
+        Parameters
+        ----------
+        pathname: str
+            Path to discharge transect files.
+        """
+        for file in os.listdir(pathname):
+            # Find moving-bed test files.
+            if file.endswith('.mat'):
+                # Process Loop test
+                if file.lower().startswith('loop'):
+                    self.mb_tests.append(MovingBedTests())
+                    self.mb_tests[-1].populate_data(source='SonTek',
+                                                    file=os.path.join(pathname, file),
+                                                    test_type='Loop')
+                # Process Stationary test
+                elif file.lower().startswith('smba'):
+                    self.mb_tests.append(MovingBedTests())
+                    self.mb_tests[-1].populate_data(source='SonTek',
+                                                    file=os.path.join(pathname, file),
+                                                    test_type='Stationary')
+
+    def load_qrev_mat(self, mat_data):
+        """Loads and coordinates the mapping of existing QRev Matlab files
+        into Python instance variables.
+
+        Parameters
+        ----------
+        mat_data: dict
+            Dictionary containing Matlab data.
+        """
+
+        meas_struct = mat_data['meas_struct']
+
+        # Assign data from meas_struct to associated instance variables
+        # in Measurement and associated objects.
+        if len(meas_struct.stationName) > 0:
+            self.station_name = meas_struct.stationName
+        if len(meas_struct.stationNumber) > 0:
+            self.station_number = meas_struct.stationNumber
+        if hasattr(meas_struct, 'meas_number'):
+            if len(meas_struct.meas_number) == 0:
+                self.meas_number = ''
+            else:
+                self.meas_number = meas_struct.meas_number
+        if hasattr(meas_struct, 'persons'):
+            if len(meas_struct.persons) == 0:
+                self.persons = ''
+            else:
+                self.persons = meas_struct.persons
+        if hasattr(meas_struct, 'stage_start_m'):
+            self.stage_start_m = meas_struct.stage_start_m
+        if hasattr(meas_struct, 'stage_end_m'):
+            self.stage_end_m = meas_struct.stage_end_m
+        if hasattr(meas_struct, 'stage_meas_m'):
+            self.stage_meas_m = meas_struct.stage_meas_m
+        self.processing = meas_struct.processing
+        if type(meas_struct.comments) == np.ndarray:
+            self.comments = meas_struct.comments.tolist()
+
+            # Needed to handle comments with blank lines
+            for n, comment in enumerate(self.comments):
+                if type(comment) is not str:
+                    new_comment = ''
+                    for item in comment:
+                        if len(item.strip()) > 0:
+                            new_comment = new_comment + item
+                        else:
+                            new_comment = new_comment + '\n'
+                    self.comments[n] = new_comment
+        else:
+            self.comments = [meas_struct.comments]
+
+        # Check to make sure all comments are str
+        for n, comment in enumerate(self.comments):
+            if type(comment) is np.ndarray:
+                # Using comment =... didn't work but self.comments[n] does
+                self.comments[2] = np.array2string(comment)
+
+        if hasattr(meas_struct, 'userRating'):
+            self.user_rating = meas_struct.userRating
+        else:
+            self.user_rating = ''
+
+        self.initial_settings = vars(meas_struct.initialSettings)
+
+        # Update initial settings to agree with Python definitions
+        nav_dict = {'btVel': 'bt_vel', 'ggaVel': 'gga_vel', 'vtgVel': 'vtg_vel',
+                    'bt_vel': 'bt_vel', 'gga_vel': 'gga_vel', 'vtg_vel': 'vtg_vel'}
+        self.initial_settings['NavRef'] = nav_dict[self.initial_settings['NavRef']]
+
+        on_off_dict = {'Off': False, 'On': True, 0: False, 1: True}
+        self.initial_settings['WTwtDepthFilter'] = on_off_dict[self.initial_settings['WTwtDepthFilter']]
+
+        if type(self.initial_settings['WTsnrFilter']) is np.ndarray:
+            self.initial_settings['WTsnrFilter'] = 'Off'
+
+        nav_dict = {'btDepths': 'bt_depths', 'vbDepths': 'vb_depths', 'dsDepths': 'ds_depths',
+                    'bt_depths': 'bt_depths', 'vb_depths': 'vb_depths', 'ds_depths': 'ds_depths'}
+        self.initial_settings['depthReference'] = nav_dict[self.initial_settings['depthReference']]
+
+        self.ext_temp_chk = {'user': meas_struct.extTempChk.user,
+                             'units': meas_struct.extTempChk.units,
+                             'adcp': meas_struct.extTempChk.adcp}
+
+        if hasattr(meas_struct.extTempChk, 'user_orig'):
+            self.ext_temp_chk['user_orig'] = meas_struct.extTempChk.user_orig
+        else:
+            self.ext_temp_chk['user_orig'] = meas_struct.extTempChk.user
+
+        if hasattr(meas_struct.extTempChk, 'adcp_orig'):
+            self.ext_temp_chk['adcp_orig'] = meas_struct.extTempChk.adcp_orig
+        else:
+            self.ext_temp_chk['adcp_orig'] = meas_struct.extTempChk.adcp
+
+        if type(self.ext_temp_chk['user']) is str:
+            self.ext_temp_chk['user'] = np.nan
+        if type(self.ext_temp_chk['adcp']) is str:
+            self.ext_temp_chk['adcp'] = np.nan
+        if type(self.ext_temp_chk['user']) is np.ndarray:
+            self.ext_temp_chk['user'] = np.nan
+        if type(self.ext_temp_chk['adcp']) is np.ndarray:
+            self.ext_temp_chk['adcp'] = np.nan
+        if type(self.ext_temp_chk['user_orig']) is str:
+            self.ext_temp_chk['user_orig'] = np.nan
+        if type(self.ext_temp_chk['adcp_orig']) is str:
+            self.ext_temp_chk['adcp_orig'] = np.nan
+        if type(self.ext_temp_chk['user_orig']) is np.ndarray:
+            self.ext_temp_chk['user_orig'] = np.nan
+        if type(self.ext_temp_chk['adcp_orig']) is np.ndarray:
+            self.ext_temp_chk['adcp_orig'] = np.nan
+
+        self.system_tst = PreMeasurement.sys_test_qrev_mat_in(meas_struct)
+
+        # no compass cal compassCal is mat_struct with len(data) = 0
+        try:
+            self.compass_cal = PreMeasurement.cc_qrev_mat_in(meas_struct)
+        except AttributeError:
+            self.compass_cal = []
+
+        try:
+            self.compass_eval = PreMeasurement.ce_qrev_mat_in(meas_struct)
+        except AttributeError:
+            self.compass_eval = []
+
+        self.transects = TransectData.qrev_mat_in(meas_struct)
+        self.mb_tests = MovingBedTests.qrev_mat_in(meas_struct)
+        self.extrap_fit = ComputeExtrap()
+        self.extrap_fit.populate_from_qrev_mat(meas_struct)
+
+        self.discharge = QComp.qrev_mat_in(meas_struct)
+
+        # For compatibility with older QRev.mat files that didn't have this feature
+        for n in range(len(self.transects)):
+            if len(self.discharge[n].left_idx) == 0:
+                self.discharge[n].left_idx = self.discharge[n].edge_ensembles(edge_loc='left',
+                                                                              transect=self.transects[n])
+
+            if len(self.discharge[n].right_idx) == 0:
+                self.discharge[n].right_idx = self.discharge[n].edge_ensembles(edge_loc='right',
+                                                                               transect=self.transects[n])
+
+            if type(self.discharge[n].correction_factor) is list:
+                self.discharge[n].correction_factor = self.discharge[n].total / self.discharge[n].total_uncorrected
+
+        # Identify checked transects
+        self.checked_transect_idx = self.checked_transects(self)
+
+        if hasattr(meas_struct, 'observed_no_moving_bed'):
+            self.observed_no_moving_bed = meas_struct.observed_no_moving_bed
+        else:
+            self.observed_no_moving_bed = False
+
+        self.uncertainty = Uncertainty()
+        self.uncertainty.populate_from_qrev_mat(meas_struct)
+        self.qa = QAData(self, mat_struct=meas_struct, compute=False)
+        if hasattr(meas_struct, 'run_oursin'):
+            self.run_oursin = meas_struct.run_oursin
+        else:
+            self.run_oursin = False
+        if hasattr(meas_struct, 'oursin'):
+            self.oursin = Oursin()
+            self.oursin.populate_from_qrev_mat(meas_struct=meas_struct)
+        else:
+            self.oursin = None
+
+        self.use_weighted = self.extrap_fit.use_weighted
+        self.use_measurement_thresholds = \
+            self.transects[self.checked_transect_idx[0]].boat_vel.bt_vel.use_measurement_thresholds
+
+    def create_filter_composites(self):
+        """Create composite for water and bottom track difference and vertical velocities and compute the thresholds
+        using these composites.
+        """
+
+        # Initialize dictionaries
+        wt_d = {}
+        wt_w = {}
+        bt_d = {}
+        bt_w = {}
+
+        # Create composite arrays for all checked transects
+        for transect in self.transects:
+            if transect.checked:
+                bt_freq = transect.boat_vel.bt_vel.frequency_khz.astype(int).astype(str)
+                freq = np.unique(bt_freq)
+                for f in freq:
+                    if f in bt_d:
+                        bt_d[f] = np.hstack((bt_d[f], transect.boat_vel.bt_vel.d_mps[bt_freq == f]))
+                        bt_w[f] = np.hstack((bt_w[f], transect.boat_vel.bt_vel.w_mps[bt_freq == f]))
+                    else:
+                        bt_d[f] = transect.boat_vel.bt_vel.d_mps[bt_freq == f]
+                        bt_w[f] = transect.boat_vel.bt_vel.w_mps[bt_freq == f]
+
+                if transect.w_vel.ping_type.size > 0:
+                    # Identify the ping types used in the transect
+                    p_types = np.unique(transect.w_vel.ping_type)
+                    # Composite for each ping type
+                    for p_type in p_types:
+                        if p_type in wt_d:
+                            wt_d[p_type] = np.hstack(
+                                (wt_d[p_type], transect.w_vel.d_mps[np.logical_and(transect.w_vel.ping_type == p_type,
+                                                                                   transect.w_vel.cells_above_sl)]))
+                            wt_w[p_type] = np.hstack(
+                                (wt_d[p_type], transect.w_vel.w_mps[np.logical_and(transect.w_vel.ping_type == p_type,
+                                                                                   transect.w_vel.cells_above_sl)]))
+                        else:
+                            wt_d[p_type] = transect.w_vel.d_mps[np.logical_and(transect.w_vel.ping_type == p_type,
+                                                                               transect.w_vel.cells_above_sl)]
+                            wt_w[p_type] = transect.w_vel.w_mps[np.logical_and(transect.w_vel.ping_type == p_type,
+                                                                               transect.w_vel.cells_above_sl)]
+                else:
+                    p_types = np.array(['U'])
+                    for p_type in p_types:
+                        if p_type in wt_d:
+                            wt_d[p_type] = np.hstack((wt_d[p_type],
+                                                      transect.w_vel.d_mps[transect.w_vel.cells_above_sl]))
+                            wt_w[p_type] = np.hstack((wt_d[p_type],
+                                                      transect.w_vel.w_mps[transect.w_vel.cells_above_sl]))
+                        else:
+                            wt_d[p_type] = transect.w_vel.d_mps[transect.w_vel.cells_above_sl]
+                            wt_w[p_type] = transect.w_vel.w_mps[transect.w_vel.cells_above_sl]
+
+        # Compute thresholds based on composite arrays
+
+        # Water track
+        wt_d_meas_thresholds = {}
+        wt_w_meas_thresholds = {}
+        for p_type in wt_d.keys():
+            wt_d_meas_thresholds[p_type] = WaterData.meas_iqr_filter(wt_d[p_type], multiplier=5)
+            wt_w_meas_thresholds[p_type] = WaterData.meas_iqr_filter(wt_w[p_type], multiplier=5)
+
+        # Bottom track
+        bt_d_meas_thresholds = {}
+        bt_w_meas_thresholds = {}
+        for freq in bt_d.keys():
+            bt_d_meas_thresholds[freq] = BoatData.iqr_filter(bt_d[freq])
+            bt_w_meas_thresholds[freq] = BoatData.iqr_filter(bt_w[freq])
+
+        # Assign threshold to each transect
+        for transect in self.transects:
+            transect.w_vel.d_meas_thresholds = wt_d_meas_thresholds
+            transect.w_vel.w_meas_thresholds = wt_w_meas_thresholds
+            transect.boat_vel.bt_vel.d_meas_thresholds = bt_d_meas_thresholds
+            transect.boat_vel.bt_vel.w_meas_thresholds = bt_w_meas_thresholds
+
+        if len(self.mb_tests) > 0:
+            for test in self.mb_tests:
+                transect = test.transect
+                transect.w_vel.d_meas_thresholds = wt_d_meas_thresholds
+                transect.w_vel.w_meas_thresholds = wt_w_meas_thresholds
+                transect.boat_vel.bt_vel.d_meas_thresholds = bt_d_meas_thresholds
+                transect.boat_vel.bt_vel.w_meas_thresholds = bt_w_meas_thresholds
+
+    @staticmethod
+    def set_num_beam_wt_threshold_trdi(mmt_transect):
+        """Get number of beams to use in processing for WT from mmt file
+        
+        Parameters
+        ----------
+        mmt_transect: MMT_Transect
+            Object of MMT_Transect
+        
+        Returns
+        -------
+        num_3_beam_wt_Out: int
+        """
+
+        use_3_beam_wt = mmt_transect.active_config['Proc_Use_3_Beam_WT']
+        if use_3_beam_wt == 0:
+            num_beam_wt_out = 4
+        else:
+            num_beam_wt_out = 3
+            
+        return num_beam_wt_out
+
+    @staticmethod
+    def set_num_beam_bt_threshold_trdi(mmt_transect):
+        """Get number of beams to use in processing for BT from mmt file
+
+        Parameters
+        ----------
+        mmt_transect: MMT_Transect
+            Object of MMT_Transect
+
+        Returns
+        -------
+        num_3_beam_WT_Out: int
+        """
+
+        use_3_beam_bt = mmt_transect.active_config['Proc_Use_3_Beam_BT']
+        if use_3_beam_bt == 0:
+            num_beam_bt_out = 4
+        else:
+            num_beam_bt_out = 3
+
+        return num_beam_bt_out
+
+    @staticmethod
+    def set_depth_weighting_trdi(mmt_transect):
+        """Get the average depth method from mmt
+        
+        Parameters
+        ----------
+        mmt_transect: MMT_Transect
+            Object of MMT_Transect
+        
+        Returns
+        -------
+        depth_weighting_setting: str
+            Method to compute mean depth
+        """
+
+        depth_weighting = mmt_transect.active_config['Proc_Use_Weighted_Mean_Depth']
+        
+        if depth_weighting == 0:
+            depth_weighting_setting = 'Simple'
+        else:
+            depth_weighting_setting = 'IDW'
+
+        return depth_weighting_setting
+
+    @staticmethod
+    def set_depth_screening_trdi(mmt_transect):
+        """Get the depth screening setting from mmt
+        
+        Parameters
+        ----------
+        mmt_transect: MMT_Transect
+            Object of MMT_Transect
+        
+        Returns
+        -------
+        depth_screening_setting: str
+            Type of depth screening to use
+        """
+
+        depth_screen = mmt_transect.active_config['Proc_Screen_Depth']
+        if depth_screen == 0:
+            depth_screening_setting = 'None'
+        else:
+            depth_screening_setting = 'TRDI'
+        
+        return depth_screening_setting
+        
+    def change_sos(self, transect_idx=None, parameter=None, salinity=None, temperature=None, selected=None, speed=None):
+        """Applies a change in speed of sound to one or all transects
+        and update the discharge and uncertainty computations
+        
+        Parameters
+        ----------
+        transect_idx: int
+            Index of transect to change
+        parameter: str
+            Speed of sound parameter to be changed ('temperatureSrc', 'temperature', 'salinity', 'sosSrc')
+        salinity: float
+            Salinity in ppt
+        temperature: float
+            Temperature in deg C
+        selected: str
+            Selected speed of sound ('internal', 'computed', 'user') or temperature ('internal', 'user')
+        speed: float
+            Manually supplied speed of sound for 'user' source
+        """
+        
+        s = self.current_settings()
+        if transect_idx is None:
+            # Apply to all transects
+            for transect in self.transects:
+                transect.change_sos(parameter=parameter,
+                                    salinity=salinity,
+                                    temperature=temperature,
+                                    selected=selected,
+                                    speed=speed)
+        else:
+            # Apply to a single transect
+            self.transects[transect_idx].change_sos(parameter=parameter,
+                                                    salinity=salinity,
+                                                    temperature=temperature,
+                                                    selected=selected,
+                                                    speed=speed)
+        # Reapply settings to newly adjusted data
+        self.apply_settings(s)
+
+    def change_magvar(self, magvar, transect_idx=None):
+        """Coordinates changing the magnetic variation.
+
+        Parameters
+        ----------
+        magvar: float
+            Magnetic variation
+        transect_idx: int
+            Index of transect to which the change is applied. None is all transects.
+        """
+
+        # Get current settings
+        s = self.current_settings()
+
+        # Initialize variables
+        n_transects = len(self.transects)
+        recompute = False
+        n = 0
+
+        # If the internal compass is used the recompute is necessary
+        while n < n_transects and recompute is False:
+            if self.transects[n].sensors.heading_deg.selected == 'internal':
+                recompute = True
+            n += 1
+
+        # Apply change
+        if transect_idx is None:
+            # Apply change to all transects
+            for transect in self.transects:
+                transect.change_mag_var(magvar)
+
+            # Apply change to moving-bed tests
+            if len(self.mb_tests) > 0:
+                for test in self.mb_tests:
+                    old_magvar = test.transect.sensors.heading_deg.internal.mag_var_deg
+                    test.transect.change_mag_var(magvar)
+                    test.magvar_change(magvar, old_magvar)
+        else:
+            self.transects[transect_idx].change_mag_var(magvar)
+
+        # Recompute is specified
+        if recompute:
+            self.apply_settings(s)
+        else:
+            self.qa.compass_qa(self)
+            self.qa.check_compass_settings(self)
+
+    def change_h_offset(self, h_offset, transect_idx=None):
+        """Coordinates changing the heading offset for external heading.
+
+        Parameters
+        ----------
+        h_offset: float
+            Heading offset
+        transect_idx: int
+            Index of transect to which the change is applied. None is all transects.
+        """
+
+        # Get current settings
+        s = self.current_settings()
+
+        # Initialize variables
+        n_transects = len(self.transects)
+        recompute = False
+        n = 0
+
+        # If external compass is used then a recompute is necessary
+        while n < n_transects and recompute is False:
+            if self.transects[n].sensors.heading_deg.selected == 'external':
+                recompute = True
+            n += 1
+
+        # Apply change
+        if transect_idx is None:
+            for transect in self.transects:
+                transect.change_offset(h_offset)
+
+            # Apply change to moving-bed tests
+            if len(self.mb_tests) > 0:
+                for test in self.mb_tests:
+                    old_h_offset = test.transect.sensors.heading_deg.external.align_correction_deg
+                    test.transect.change_offset(h_offset)
+                    test.h_offset_change(h_offset, old_h_offset)
+        else:
+            self.transects[transect_idx].change_offset(h_offset)
+
+        # Rcompute is specified
+        if recompute:
+            self.apply_settings(s)
+        else:
+            self.qa.compass_qa(self)
+            self.qa.check_compass_settings(self)
+
+    def change_h_source(self, h_source, transect_idx=None):
+        """Coordinates changing the heading source.
+
+        Parameters
+        ----------
+        h_source: str
+            Heading source (internal or external)
+        transect_idx: int
+            Index of transect to which the change is applied. None is all transects.
+        """
+
+        # Get current settings
+        s = self.current_settings()
+
+        # Apply change
+        if transect_idx is None:
+            for transect in self.transects:
+                transect.change_heading_source(h_source)
+
+            # Apply change to moving-bed tests
+            if len(self.mb_tests) > 0:
+                for test in self.mb_tests:
+                    test.transect.change_heading_source(h_source)
+                    test.process_mb_test(source=test.transect.adcp.manufacturer)
+                settings = self.current_settings()
+                select = settings['NavRef']
+                ref = None
+                if select == 'bt_vel':
+                    ref = 'BT'
+                elif select == 'gga_vel':
+                    ref = 'GGA'
+                elif select == 'vtg_vel':
+                    ref = 'VTG'
+                self.mb_tests = MovingBedTests.auto_use_2_correct(
+                    moving_bed_tests=self.mb_tests, boat_ref=ref)
+
+        else:
+            self.transects[transect_idx].change_heading_source(h_source)
+
+        self.apply_settings(s)
+
+    def change_draft(self, draft, transect_idx=None):
+        """Coordinates changing the ADCP draft.
+
+        Parameters
+        ----------
+        draft: float
+            Draft of ADCP in m
+        transect_idx: int
+            Index of transect to which the change is applied. None is all transects.
+        """
+
+        # Get current settings
+        s = self.current_settings()
+
+        # Apply change
+        if transect_idx is None:
+            for transect in self.transects:
+                transect.change_draft(draft)
+        else:
+            self.transects[transect_idx].change_draft(draft)
+
+        self.apply_settings(s)
+
+    @staticmethod
+    def h_external_valid(meas):
+        """Determine if valid external heading data is included in the measurement.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of Measurement
+        """
+
+        external = False
+        for transect in meas.transects:
+            if transect.sensors.heading_deg.external is not None:
+                external = True
+                break
+        return external
+
+    # @profile
+    def apply_settings(self, settings, force_abba=True):
+        """Applies reference, filter, and interpolation settings.
+        
+        Parameters
+        ----------
+        settings: dict
+            Dictionary of reference, filter, and interpolation settings
+        force_abba: bool
+            Allows the above, below, before, after interpolation to be applied even when the data use another approach.
+        """
+
+        self.use_ping_type = settings['UsePingType']
+
+        # If SonTek data does not have ping type identified, determine ping types
+        if self.transects[0].w_vel.ping_type.size == 1 and self.transects[0].adcp.manufacturer == 'SonTek':
+            for transect in self.transects:
+                ping_type = TransectData.sontek_ping_type(transect.w_vel.corr, transect.w_vel.frequency)
+                transect.w_vel.ping_type = np.tile(np.array([ping_type]), (transect.w_vel.corr.shape[1], 1))
+
+        # If the measurement thresholds have not been computed, compute them
+        if not self.transects[0].w_vel.d_meas_thresholds:
+            self.create_filter_composites()
+
+        # Apply settings to moving-bed tests:
+        if len(self.mb_tests) > 0:
+            self.apply_settings_to_movingbed(settings, force_abba=True)
+
+        # Apply settings to discharge transects
+        for transect in self.transects:
+
+            if not settings['UsePingType']:
+                transect.w_vel.ping_type = np.tile('U', transect.w_vel.ping_type.shape)
+                transect.boat_vel.bt_vel.frequency_khz = np.tile(0, transect.boat_vel.bt_vel.frequency_khz.shape)
+
+            # Moving-boat ensembles
+            if 'Processing' in settings.keys():
+                transect.change_q_ensembles(proc_method=settings['Processing'])
+                self.processing = settings['Processing']
+
+            # Navigation reference
+            if transect.boat_vel.selected != settings['NavRef']:
+                transect.change_nav_reference(update=False, new_nav_ref=settings['NavRef'])
+                if len(self.mb_tests) > 0:
+                    self.mb_tests = MovingBedTests.auto_use_2_correct(
+                        moving_bed_tests=self.mb_tests,
+                        boat_ref=settings['NavRef'])
+
+            # Changing the nav reference applies the current setting for
+            # Composite tracks, check to see if a change is needed
+            if transect.boat_vel.composite != settings['CompTracks']:
+                transect.composite_tracks(update=False, setting=settings['CompTracks'])
+
+            # Set difference velocity BT filter
+            bt_kwargs = {}
+            if settings['BTdFilter'] == 'Manual':
+                bt_kwargs['difference'] = settings['BTdFilter']
+                bt_kwargs['difference_threshold'] = settings['BTdFilterThreshold']
+            else:
+                bt_kwargs['difference'] = settings['BTdFilter']
+
+            # Set vertical velocity BT filter
+            if settings['BTwFilter'] == 'Manual':
+                bt_kwargs['vertical'] = settings['BTwFilter']
+                bt_kwargs['vertical_threshold'] = settings['BTwFilterThreshold']
+            else:
+                bt_kwargs['vertical'] = settings['BTwFilter']
+
+            # Apply beam filter
+                bt_kwargs['beam'] = settings['BTbeamFilter']
+
+            # Apply smooth filter
+                bt_kwargs['other'] = settings['BTsmoothFilter']
+
+            transect.boat_vel.bt_vel.use_measurement_thresholds = settings['UseMeasurementThresholds']
+
+            # Apply BT settings
+            transect.boat_filters(update=False, **bt_kwargs)
+
+            # BT Interpolation
+            transect.boat_interpolations(update=False,
+                                         target='BT',
+                                         method=settings['BTInterpolation'])
+
+            # GPS filter settings
+            if transect.gps is not None:
+                gga_kwargs = {}
+                if transect.boat_vel.gga_vel is not None:
+                    # GGA
+                    gga_kwargs['differential'] = settings['ggaDiffQualFilter']
+                    if settings['ggaAltitudeFilter'] == 'Manual':
+                        gga_kwargs['altitude'] = settings['ggaAltitudeFilter']
+                        gga_kwargs['altitude_threshold'] = settings['ggaAltitudeFilterChange']
+                    else:
+                        gga_kwargs['altitude'] = settings['ggaAltitudeFilter']
+
+                    # Set GGA HDOP Filter
+                    if settings['GPSHDOPFilter'] == 'Manual':
+                        gga_kwargs['hdop'] = settings['GPSHDOPFilter']
+                        gga_kwargs['hdop_max_threshold'] = settings['GPSHDOPFilterMax']
+                        gga_kwargs['hdop_change_threshold'] = settings['GPSHDOPFilterChange']
+                    else:
+                        gga_kwargs['hdop'] = settings['GPSHDOPFilter']
+
+                    gga_kwargs['other'] = settings['GPSSmoothFilter']
+                    # Apply GGA filters
+                    transect.gps_filters(update=False, **gga_kwargs)
+
+                if transect.boat_vel.vtg_vel is not None:
+                    vtg_kwargs = {}
+                    if settings['GPSHDOPFilter'] == 'Manual':
+                        vtg_kwargs['hdop'] = settings['GPSHDOPFilter']
+                        vtg_kwargs['hdop_max_threshold'] = settings['GPSHDOPFilterMax']
+                        vtg_kwargs['hdop_change_threshold'] = settings['GPSHDOPFilterChange']
+                        vtg_kwargs['other'] = settings['GPSSmoothFilter']
+                    else:
+                        vtg_kwargs['hdop'] = settings['GPSHDOPFilter']
+                        vtg_kwargs['other'] = settings['GPSSmoothFilter']
+
+                    # Apply VTG filters
+                    transect.gps_filters(update=False, **vtg_kwargs)
+
+                transect.boat_interpolations(update=False,
+                                             target='GPS',
+                                             method=settings['GPSInterpolation'])
+
+            # Set depth reference
+            transect.set_depth_reference(update=False, setting=settings['depthReference'])
+
+            transect.process_depths(update=True,
+                                    filter_method=settings['depthFilterType'],
+                                    interpolation_method=settings['depthInterpolation'],
+                                    composite_setting=settings['depthComposite'],
+                                    avg_method=settings['depthAvgMethod'],
+                                    valid_method=settings['depthValidMethod'])
+
+            # Set WT difference velocity filter
+            wt_kwargs = {}
+            if settings['WTdFilter'] == 'Manual':
+                wt_kwargs['difference'] = settings['WTdFilter']
+                wt_kwargs['difference_threshold'] = settings['WTdFilterThreshold']
+            else:
+                wt_kwargs['difference'] = settings['WTdFilter']
+
+            # Set WT vertical velocity filter
+            if settings['WTwFilter'] == 'Manual':
+                wt_kwargs['vertical'] = settings['WTwFilter']
+                wt_kwargs['vertical_threshold'] = settings['WTwFilterThreshold']
+            else:
+                wt_kwargs['vertical'] = settings['WTwFilter']
+
+            wt_kwargs['beam'] = settings['WTbeamFilter']
+            wt_kwargs['other'] = settings['WTsmoothFilter']
+            wt_kwargs['snr'] = settings['WTsnrFilter']
+            wt_kwargs['wt_depth'] = settings['WTwtDepthFilter']
+            wt_kwargs['excluded'] = settings['WTExcludedDistance']
+
+            # Data loaded from old QRev.mat files will be set to use this new interpolation method. When reprocessing
+            # any data the interpolation method should be 'abba'
+            if force_abba:
+                transect.w_vel.interpolate_cells = 'abba'
+                transect.w_vel.interpolate_ens = 'abba'
+                settings['WTEnsInterpolation'] = 'abba'
+                settings['WTCellInterpolation'] = 'abba'
+
+            transect.w_vel.use_measurement_thresholds = settings['UseMeasurementThresholds']
+            if transect.w_vel.ping_type.size == 0 and transect.adcp.manufacturer == 'SonTek':
+                # Correlation and frequency can be used to determine ping type
+                transect.w_vel.ping_type = TransectData.sontek_ping_type(corr=transect.w_vel.corr,
+                                                                         freq=transect.w_vel.frequency)
+
+            transect.w_vel.apply_filter(transect=transect, **wt_kwargs)
+
+            # Edge methods
+            transect.edges.rec_edge_method = settings['edgeRecEdgeMethod']
+            transect.edges.vel_method = settings['edgeVelMethod']
+
+        if settings['UseWeighted'] and not self.use_weighted:
+            if self.extrap_fit.norm_data[-1].weights is None:
+                # Compute normalized data for each transect to obtain the weights
+                self.extrap_fit.process_profiles(self.transects, self.extrap_fit.norm_data[-1].data_type,
+                                                 use_weighted=settings['UseWeighted'])
+
+        self.use_weighted = settings['UseWeighted']
+
+        if len(self.checked_transect_idx) > 0:
+            ref_transect = self.checked_transect_idx[0]
+        else:
+            ref_transect = 0
+
+        if self.transects[ref_transect].w_vel.interpolate_cells == 'TRDI':
+            if self.extrap_fit is None:
+                self.extrap_fit = ComputeExtrap()
+                self.extrap_fit.populate_data(transects=self.transects, compute_sensitivity=False,
+                                              use_weighted=settings['UseWeighted'])
+                self.change_extrapolation(self.extrap_fit.fit_method, compute_q=False,
+                                          use_weighted=settings['UseWeighted'])
+            elif self.extrap_fit.fit_method == 'Automatic':
+                self.change_extrapolation(self.extrap_fit.fit_method, compute_q=False,
+                                          use_weighted=settings['UseWeighted'])
+            else:
+                if 'extrapTop' not in settings.keys():
+                    settings['extrapTop'] = self.extrap_fit.sel_fit[-1].top_method
+                    settings['extrapBot'] = self.extrap_fit.sel_fit[-1].bot_method
+                    settings['extrapExp'] = self.extrap_fit.sel_fit[-1].exponent
+
+            self.change_extrapolation(self.extrap_fit.fit_method,
+                                      top=settings['extrapTop'],
+                                      bot=settings['extrapBot'],
+                                      exp=settings['extrapExp'],
+                                      compute_q=False,
+                                      use_weighted=settings['UseWeighted'])
+
+        for transect in self.transects:
+
+            # Water track interpolations
+            transect.w_vel.apply_interpolation(transect=transect,
+                                               ens_interp=settings['WTEnsInterpolation'],
+                                               cells_interp=settings['WTCellInterpolation'])
+
+        if self.extrap_fit is None:
+            self.extrap_fit = ComputeExtrap()
+            self.extrap_fit.populate_data(transects=self.transects, compute_sensitivity=False,
+                                          use_weighted=settings['UseWeighted'])
+            self.change_extrapolation(self.extrap_fit.fit_method, compute_q=False,
+                                      use_weighted=settings['UseWeighted'])
+        elif self.extrap_fit.fit_method == 'Automatic':
+            self.change_extrapolation(self.extrap_fit.fit_method, compute_q=False,
+                                      use_weighted=settings['UseWeighted'])
+        else:
+            if 'extrapTop' not in settings.keys():
+                settings['extrapTop'] = self.extrap_fit.sel_fit[-1].top_method
+                settings['extrapBot'] = self.extrap_fit.sel_fit[-1].bot_method
+                settings['extrapExp'] = self.extrap_fit.sel_fit[-1].exponent
+
+        self.change_extrapolation(self.extrap_fit.fit_method,
+                                  top=settings['extrapTop'],
+                                  bot=settings['extrapBot'],
+                                  exp=settings['extrapExp'],
+                                  compute_q=False,
+                                  use_weighted=settings['UseWeighted'])
+
+        self.extrap_fit.q_sensitivity = ExtrapQSensitivity()
+        self.extrap_fit.q_sensitivity.populate_data(transects=self.transects,
+                                                    extrap_fits=self.extrap_fit.sel_fit)
+
+        self.compute_discharge()
+
+        self.compute_uncertainty()
+
+    def apply_settings_to_movingbed(self, settings, force_abba=True):
+        """Applies reference, filter, and interpolation settings.
+
+        Parameters
+        ----------
+        settings: dict
+            Dictionary of reference, filter, and interpolation settings
+        force_abba: bool
+            Allows the above, below, before, after interpolation to be applied even when the data use another approach.
+        """
+
+        self.use_ping_type = settings['UsePingType']
+        # If SonTek data does not have ping type identified, determine ping types
+        if self.mb_tests[0].transect.w_vel.ping_type.size == 1 and self.transects[0].adcp.manufacturer == 'SonTek':
+            for test in self.mb_tests:
+                transect = test.transect
+                ping_type = TransectData.sontek_ping_type(transect.w_vel.corr, transect.w_vel.frequency)
+                transect.w_vel.ping_type = np.tile(np.array([ping_type]), (transect.w_vel.corr.shape[1], 1))
+
+        for test in self.mb_tests:
+            transect = test.transect
+
+            if not settings['UsePingType']:
+                transect.w_vel.ping_type = np.tile('U', transect.w_vel.ping_type.shape)
+                transect.boat_vel.bt_vel.frequency_khz = np.tile(0, transect.boat_vel.bt_vel.frequency_khz.shape)
+
+            # Moving-boat ensembles
+            if 'Processing' in settings.keys():
+                transect.change_q_ensembles(proc_method=settings['Processing'])
+                self.processing = settings['Processing']
+
+            # Set difference velocity BT filter
+            bt_kwargs = {}
+            if settings['BTdFilter'] == 'Manual':
+                bt_kwargs['difference'] = settings['BTdFilter']
+                bt_kwargs['difference_threshold'] = settings['BTdFilterThreshold']
+            else:
+                bt_kwargs['difference'] = settings['BTdFilter']
+
+            # Set vertical velocity BT filter
+            if settings['BTwFilter'] == 'Manual':
+                bt_kwargs['vertical'] = settings['BTwFilter']
+                bt_kwargs['vertical_threshold'] = settings['BTwFilterThreshold']
+            else:
+                bt_kwargs['vertical'] = settings['BTwFilter']
+
+                # Apply beam filter
+                bt_kwargs['beam'] = settings['BTbeamFilter']
+
+                # Apply smooth filter
+                bt_kwargs['other'] = settings['BTsmoothFilter']
+
+            transect.boat_vel.bt_vel.use_measurement_thresholds = settings['UseMeasurementThresholds']
+
+            # Apply BT settings
+            transect.boat_filters(update=False, **bt_kwargs)
+
+            # Don't interpolate for stationary tests
+            if test.type == 'Loop':
+                # BT Interpolation
+                transect.boat_interpolations(update=False,
+                                             target='BT',
+                                             method=settings['BTInterpolation'])
+
+            # GPS filter settings
+            if transect.gps is not None:
+                gga_kwargs = {}
+                if transect.boat_vel.gga_vel is not None:
+                    # GGA
+                    gga_kwargs['differential'] = settings['ggaDiffQualFilter']
+                    if settings['ggaAltitudeFilter'] == 'Manual':
+                        gga_kwargs['altitude'] = settings['ggaAltitudeFilter']
+                        gga_kwargs['altitude_threshold'] = settings['ggaAltitudeFilterChange']
+                    else:
+                        gga_kwargs['altitude'] = settings['ggaAltitudeFilter']
+
+                    # Set GGA HDOP Filter
+                    if settings['GPSHDOPFilter'] == 'Manual':
+                        gga_kwargs['hdop'] = settings['GPSHDOPFilter']
+                        gga_kwargs['hdop_max_threshold'] = settings['GPSHDOPFilterMax']
+                        gga_kwargs['hdop_change_threshold'] = settings['GPSHDOPFilterChange']
+                    else:
+                        gga_kwargs['hdop'] = settings['GPSHDOPFilter']
+
+                    gga_kwargs['other'] = settings['GPSSmoothFilter']
+                    # Apply GGA filters
+                    transect.gps_filters(update=False, **gga_kwargs)
+
+                if transect.boat_vel.vtg_vel is not None:
+                    vtg_kwargs = {}
+                    if settings['GPSHDOPFilter'] == 'Manual':
+                        vtg_kwargs['hdop'] = settings['GPSHDOPFilter']
+                        vtg_kwargs['hdop_max_threshold'] = settings['GPSHDOPFilterMax']
+                        vtg_kwargs['hdop_change_threshold'] = settings['GPSHDOPFilterChange']
+                        vtg_kwargs['other'] = settings['GPSSmoothFilter']
+                    else:
+                        vtg_kwargs['hdop'] = settings['GPSHDOPFilter']
+                        vtg_kwargs['other'] = settings['GPSSmoothFilter']
+
+                    # Apply VTG filters
+                    transect.gps_filters(update=False, **vtg_kwargs)
+
+                # Don't interpolate for stationary tests
+                if test.type == 'Loop':
+                    transect.boat_interpolations(update=False,
+                                                 target='GPS',
+                                                 method=settings['GPSInterpolation'])
+
+            # Set depth reference
+            transect.set_depth_reference(update=False, setting=settings['depthReference'])
+            transect.process_depths(update=False,
+                                    filter_method=settings['depthFilterType'],
+                                    interpolation_method=settings['depthInterpolation'],
+                                    composite_setting=settings['depthComposite'],
+                                    avg_method=settings['depthAvgMethod'],
+                                    valid_method=settings['depthValidMethod'])
+
+            # Set WT difference velocity filter
+            wt_kwargs = {}
+            if settings['WTdFilter'] == 'Manual':
+                wt_kwargs['difference'] = settings['WTdFilter']
+                wt_kwargs['difference_threshold'] = settings['WTdFilterThreshold']
+            else:
+                wt_kwargs['difference'] = settings['WTdFilter']
+
+            # Set WT vertical velocity filter
+            if settings['WTwFilter'] == 'Manual':
+                wt_kwargs['vertical'] = settings['WTwFilter']
+                wt_kwargs['vertical_threshold'] = settings['WTwFilterThreshold']
+            else:
+                wt_kwargs['vertical'] = settings['WTwFilter']
+
+            wt_kwargs['beam'] = settings['WTbeamFilter']
+            wt_kwargs['other'] = settings['WTsmoothFilter']
+            wt_kwargs['snr'] = settings['WTsnrFilter']
+            wt_kwargs['wt_depth'] = settings['WTwtDepthFilter']
+            wt_kwargs['excluded'] = settings['WTExcludedDistance']
+
+            # Data loaded from old QRev.mat files will be set to use this new interpolation method. When reprocessing
+            # any data the interpolation method should be 'abba'
+            if force_abba:
+                transect.w_vel.interpolate_cells = 'abba'
+                transect.w_vel.interpolate_ens = 'abba'
+                settings['WTEnsInterpolation'] = 'abba'
+                settings['WTCellInterpolation'] = 'abba'
+
+            transect.w_vel.use_measurement_thresholds = settings['UseMeasurementThresholds']
+            if transect.w_vel.ping_type.size == 0 and transect.adcp.manufacturer == 'SonTek':
+                # Correlation and frequency can be used to determine ping type
+                transect.w_vel.ping_type = TransectData.sontek_ping_type(corr=transect.w_vel.corr,
+                                                                         freq=transect.w_vel.frequency)
+
+            transect.w_vel.apply_filter(transect=transect, **wt_kwargs)
+
+            transect.w_vel.apply_interpolation(transect=transect,
+                                               ens_interp=settings['WTEnsInterpolation'],
+                                               cells_interp=settings['WTCellInterpolation'])
+
+            test.process_mb_test(source=self.transects[0].adcp.manufacturer)
+
+    def current_settings(self):
+        """Saves the current settings for a measurement. Since all settings
+        in QRev are consistent among all transects in a measurement only the
+        settings from the first transect are saved
+        """
+
+        settings = {}
+
+        if len(self.checked_transect_idx) > 0:
+            ref_transect = self.checked_transect_idx[0]
+        else:
+            ref_transect = 0
+        transect = self.transects[ref_transect]
+        
+        # Navigation reference
+        settings['NavRef'] = transect.boat_vel.selected
+        
+        # Composite tracks
+        settings['CompTracks'] = transect.boat_vel.composite
+        
+        # Water track settings
+        settings['WTbeamFilter'] = transect.w_vel.beam_filter
+        settings['WTdFilter'] = transect.w_vel.d_filter
+        settings['WTdFilterThreshold'] = transect.w_vel.d_filter_thresholds
+        settings['WTwFilter'] = transect.w_vel.w_filter
+        settings['WTwFilterThreshold'] = transect.w_vel.w_filter_thresholds
+        settings['WTsmoothFilter'] = transect.w_vel.smooth_filter
+        settings['WTsnrFilter'] = transect.w_vel.snr_filter
+        settings['WTwtDepthFilter'] = transect.w_vel.wt_depth_filter
+        settings['WTEnsInterpolation'] = transect.w_vel.interpolate_ens
+        settings['WTCellInterpolation'] = transect.w_vel.interpolate_cells
+        settings['WTExcludedDistance'] = transect.w_vel.excluded_dist_m
+        
+        # Bottom track settings
+        settings['BTbeamFilter'] = transect.boat_vel.bt_vel.beam_filter
+        settings['BTdFilter'] = transect.boat_vel.bt_vel.d_filter
+        settings['BTdFilterThreshold'] = transect.boat_vel.bt_vel.d_filter_thresholds
+        settings['BTwFilter'] = transect.boat_vel.bt_vel.w_filter
+        settings['BTwFilterThreshold'] = transect.boat_vel.bt_vel.w_filter_thresholds
+        settings['BTsmoothFilter'] = transect.boat_vel.bt_vel.smooth_filter
+        settings['BTInterpolation'] = transect.boat_vel.bt_vel.interpolate
+        
+        # Gps Settings
+        # if transect.gps is not None:
+
+        gga_present = False
+        for idx in self.checked_transect_idx:
+            if self.transects[idx].boat_vel.gga_vel is not None:
+                gga_present = True
+                transect = self.transects[idx]
+                break
+
+        # GGA settings
+        if gga_present:
+            settings['ggaDiffQualFilter'] = transect.boat_vel.gga_vel.gps_diff_qual_filter
+            settings['ggaAltitudeFilter'] = transect.boat_vel.gga_vel.gps_altitude_filter
+            settings['ggaAltitudeFilterChange'] = \
+                transect.boat_vel.gga_vel.gps_altitude_filter_change
+            settings['GPSHDOPFilter'] = transect.boat_vel.gga_vel.gps_HDOP_filter
+            settings['GPSHDOPFilterMax'] = transect.boat_vel.gga_vel.gps_HDOP_filter_max
+            settings['GPSHDOPFilterChange'] = transect.boat_vel.gga_vel.gps_HDOP_filter_change
+            settings['GPSSmoothFilter'] = transect.boat_vel.gga_vel.smooth_filter
+            settings['GPSInterpolation'] = transect.boat_vel.gga_vel.interpolate
+        else:
+            settings['ggaDiffQualFilter'] = 1
+            settings['ggaAltitudeFilter'] = 'Off'
+            settings['ggaAltitudeFilterChange'] = []
+
+            settings['ggaSmoothFilter'] = 'Off'
+            if 'GPSInterpolation' not in settings.keys():
+                settings['GPSInterpolation'] = 'None'
+            if 'GPSHDOPFilter' not in settings.keys():
+                settings['GPSHDOPFilter'] = 'Off'
+                settings['GPSHDOPFilterMax'] = []
+                settings['GPSHDOPFilterChange'] = []
+            if 'GPSSmoothFilter' not in settings.keys():
+                settings['GPSSmoothFilter'] = 'Off'
+
+        # VTG settings
+        vtg_present = False
+        for idx in self.checked_transect_idx:
+            if self.transects[idx].boat_vel.vtg_vel is not None:
+                vtg_present = True
+                transect = self.transects[idx]
+                break
+
+        if vtg_present:
+            settings['GPSHDOPFilter'] = transect.boat_vel.vtg_vel.gps_HDOP_filter
+            settings['GPSHDOPFilterMax'] = transect.boat_vel.vtg_vel.gps_HDOP_filter_max
+            settings['GPSHDOPFilterChange'] = transect.boat_vel.vtg_vel.gps_HDOP_filter_change
+            settings['GPSSmoothFilter'] = transect.boat_vel.vtg_vel.smooth_filter
+            settings['GPSInterpolation'] = transect.boat_vel.vtg_vel.interpolate
+
+        # Depth Settings
+        settings['depthAvgMethod'] = transect.depths.bt_depths.avg_method
+        settings['depthValidMethod'] = transect.depths.bt_depths.valid_data_method
+        
+        # Depth settings are always applied to all available depth sources.
+        # Only those saved in the bt_depths are used here but are applied to all sources
+        settings['depthFilterType'] = transect.depths.bt_depths.filter_type
+        settings['depthReference'] = transect.depths.selected
+        settings['depthComposite'] = transect.depths.composite
+        select = getattr(transect.depths, transect.depths.selected)
+        settings['depthInterpolation'] = select.interp_type
+        
+        # Extrap Settings
+        if self.extrap_fit is None:
+            settings['extrapTop'] = transect.extrap.top_method
+            settings['extrapBot'] = transect.extrap.bot_method
+            settings['extrapExp'] = transect.extrap.exponent
+        else:
+            settings['extrapTop'] = self.extrap_fit.sel_fit[-1].top_method
+            settings['extrapBot'] = self.extrap_fit.sel_fit[-1].bot_method
+            settings['extrapExp'] = self.extrap_fit.sel_fit[-1].exponent
+
+        # Use of self.use_weighted allows a QRev mat file to be loaded and initially processed with the settings from
+        # the QRev file but upon reprocessing the self.use_weights will be set to the options setting for use_weights
+        settings['UseWeighted'] = self.use_weighted
+
+        # Edge Settings
+        settings['edgeVelMethod'] = transect.edges.vel_method
+        settings['edgeRecEdgeMethod'] = transect.edges.rec_edge_method
+
+        settings['UseMeasurementThresholds'] = transect.w_vel.use_measurement_thresholds
+        settings['UsePingType'] = self.use_ping_type
+
+        return settings
+
+    def qrev_default_settings(self, check_user_excluded_dist=False, use_weighted=False):
+        """QRev default and filter settings for a measurement.
+        """
+
+        settings = dict()
+
+        if len(self.checked_transect_idx) > 0:
+            ref_transect = self.checked_transect_idx[0]
+        else:
+            ref_transect = 0
+
+        # Navigation reference
+        settings['NavRef'] = self.transects[ref_transect].boat_vel.selected
+
+        # Composite tracks
+        settings['CompTracks'] = 'Off'
+
+        # Water track filter settings
+        settings['WTbeamFilter'] = -1
+        settings['WTdFilter'] = 'Auto'
+        settings['WTdFilterThreshold'] = np.nan
+        settings['WTwFilter'] = 'Auto'
+        settings['WTwFilterThreshold'] = np.nan
+        settings['WTsmoothFilter'] = 'Off'
+
+        if self.transects[ref_transect].adcp.manufacturer == 'TRDI':
+            settings['WTsnrFilter'] = 'Off'
+        else:
+            settings['WTsnrFilter'] = 'Auto'
+
+        if check_user_excluded_dist:
+            temp = [x.w_vel for x in self.transects]
+            excluded_dist = np.nanmin([x.excluded_dist_m for x in temp])
+        else:
+            excluded_dist = 0
+        if excluded_dist < 0.158 and self.transects[ref_transect].adcp.model == 'M9':
+            settings['WTExcludedDistance'] = 0.16
+        elif excluded_dist < 0.248 and self.transects[ref_transect].adcp.model == 'RioPro':
+            settings['WTExcludedDistance'] = 0.25
+        else:
+            settings['WTExcludedDistance'] = excluded_dist
+
+        # Bottom track filter settings
+        settings['BTbeamFilter'] = -1
+        settings['BTdFilter'] = 'Auto'
+        settings['BTdFilterThreshold'] = np.nan
+        settings['BTwFilter'] = 'Auto'
+        settings['BTwFilterThreshold'] = np.nan
+        settings['BTsmoothFilter'] = 'Off'
+
+        # GGA Filter settings
+        settings['ggaDiffQualFilter'] = 2
+        settings['ggaAltitudeFilter'] = 'Auto'
+        settings['ggaAltitudeFilterChange'] = np.nan
+
+        # VTG filter settings
+        settings['vtgsmoothFilter'] = 'Off'
+
+        # GGA and VTG filter settings
+        settings['GPSHDOPFilter'] = 'Auto'
+        settings['GPSHDOPFilterMax'] = np.nan
+        settings['GPSHDOPFilterChange'] = np.nan
+        settings['GPSSmoothFilter'] = 'Off'
+
+        # Depth Averaging
+        settings['depthAvgMethod'] = 'IDW'
+        settings['depthValidMethod'] = 'QRev'
+
+        # Depth Reference
+
+        # Default to 4 beam depth average
+        settings['depthReference'] = 'bt_depths'
+        # Depth settings
+        settings['depthFilterType'] = 'Smooth'
+        settings['depthComposite'] = 'Off'
+        for transect in self.transects:
+            if transect.checked:
+
+                if transect.depths.vb_depths is not None or transect.depths.ds_depths is not None:
+                    settings['depthComposite'] = 'On'
+                    break
+                else:
+                    settings['depthComposite'] = 'Off'
+                    break
+
+        # Interpolation settings
+        settings = self.qrev_default_interpolation_methods(settings)
+
+        # Edge settings
+        settings['edgeVelMethod'] = 'MeasMag'
+        settings['edgeRecEdgeMethod'] = 'Fixed'
+
+        # Extrapolation Settings
+        settings['extrapTop'] = 'Power'
+        settings['extrapBot'] = 'Power'
+        settings['extrapExp'] = 0.1667
+        settings['UseWeighted'] = use_weighted
+
+        settings['UseMeasurementThresholds'] = False
+        settings['UsePingType'] = True
+
+        return settings
+
+    def update_qa(self):
+        self.qa = QAData(self)
+
+    @staticmethod
+    def no_filter_interp_settings(self):
+        """Settings to turn off all filters and interpolations.
+
+        Returns
+        -------
+        settings: dict
+            Dictionary of all processing settings.
+        """
+
+        settings = dict()
+        if len(self.checked_transect_idx) > 0:
+            ref_transect = self.checked_transect_idx[0]
+        else:
+            ref_transect = 0
+
+        settings['NavRef'] = self.transects[ref_transect].boatVel.selected
+
+        # Composite tracks
+        settings['CompTracks'] = 'Off'
+
+        # Water track filter settings
+        settings['WTbeamFilter'] = 3
+        settings['WTdFilter'] = 'Off'
+        settings['WTdFilterThreshold'] = np.nan
+        settings['WTwFilter'] = 'Off'
+        settings['WTwFilterThreshold'] = np.nan
+        settings['WTsmoothFilter'] = 'Off'
+        settings['WTsnrFilter'] = 'Off'
+
+        temp = [x.w_vel for x in self.transects]
+        excluded_dist = np.nanmin([x.excluded_dist_m for x in temp])
+
+        settings['WTExcludedDistance'] = excluded_dist
+
+        # Bottom track filter settings
+        settings['BTbeamFilter'] = 3
+        settings['BTdFilter'] = 'Off'
+        settings['BTdFilterThreshold'] = np.nan
+        settings['BTwFilter'] = 'Off'
+        settings['BTwFilterThreshold'] = np.nan
+        settings['BTsmoothFilter'] = 'Off'
+
+        # GGA filter settings
+        settings['ggaDiffQualFilter'] = 1
+        settings['ggaAltitudeFilter'] = 'Off'
+        settings['ggaAltitudeFilterChange'] = np.nan
+
+        # VTG filter settings
+        settings['vtgsmoothFilter'] = 'Off'
+
+        # GGA and VTG filter settings
+        settings['GPSHDOPFilter'] = 'Off'
+        settings['GPSHDOPFilterMax'] = np.nan
+        settings['GPSHDOPFilterChange'] = np.nan
+        settings['GPSSmoothFilter'] = 'Off'
+
+        # Depth Averaging
+        settings['depthAvgMethod'] = 'IDW'
+        settings['depthValidMethod'] = 'QRev'
+
+        # Depth Reference
+
+        # Default to 4 beam depth average
+        settings['depthReference'] = 'btDepths'
+        # Depth settings
+        settings['depthFilterType'] = 'None'
+        settings['depthComposite'] = 'Off'
+
+        # Interpolation settings
+        settings['BTInterpolation'] = 'None'
+        settings['WTEnsInterpolation'] = 'None'
+        settings['WTCellInterpolation'] = 'None'
+        settings['GPSInterpolation'] = 'None'
+        settings['depthInterpolation'] = 'None'
+        settings['WTwtDepthFilter'] = 'Off'
+
+        # Edge Settings
+        settings['edgeVelMethod'] = 'MeasMag'
+        # settings['edgeVelMethod'] = 'Profile'
+        settings['edgeRecEdgeMethod'] = 'Fixed'
+
+        return settings
+
+    def selected_transects_changed(self, selected_transects_idx):
+        """Handle changes in the transects selected for computing discharge.
+
+        Parameters
+        ----------
+        selected_transects_idx: list
+            List of indices of the transects used to compute discharge
+        """
+
+        # Update transect settings
+        self.checked_transect_idx = []
+        for n in range(len(self.transects)):
+            if n in selected_transects_idx:
+                self.transects[n].checked = True
+                self.checked_transect_idx.append(n)
+            else:
+                self.transects[n].checked = False
+
+        # Update computations
+        self.create_filter_composites()
+        settings = self.current_settings()
+        self.apply_settings(settings=settings)
+
+    def compute_discharge(self):
+        """Computes the discharge for all transects in the measurement.
+        """
+
+        self.discharge = []
+        for transect in self.transects:
+            q = QComp()
+            q.populate_data(data_in=transect, moving_bed_data=self.mb_tests)
+            self.discharge.append(q)
+
+    def compute_uncertainty(self):
+        """Computes uncertainty using QRev model and Oursin model if selected.
+        """
+
+        self.uncertainty = Uncertainty()
+        self.uncertainty.compute_uncertainty(self)
+        self.qa = QAData(self)
+
+        if self.run_oursin:
+            if self.oursin is None:
+                self.oursin = Oursin()
+                user_advanced_settings = None
+                u_measurement_user = None
+            else:
+                user_advanced_settings = self.oursin.user_advanced_settings
+                u_measurement_user = self.oursin.u_measurement_user
+                self.oursin = Oursin()
+            self.oursin.compute_oursin(self,
+                                       user_advanced_settings=user_advanced_settings,
+                                       u_measurement_user=u_measurement_user)
+
+    @staticmethod
+    def compute_edi(meas, selected_idx, percents):
+        """Computes the locations and vertical properties for the user selected transect and
+        flow percentages.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        selected_idx: int
+            Index of selected transect
+        percents: list
+            List of selected flow percents
+        """
+
+        # Get transect and discharge data
+        transect = meas.transects[selected_idx]
+        discharge = meas.discharge[selected_idx]
+
+        # Sort the percents in ascending order
+        percents.sort()
+
+        # Compute cumulative discharge
+        q_cum = np.nancumsum(discharge.middle_ens + discharge.top_ens + discharge.bottom_ens)
+
+        # Adjust for moving-bed conditions
+        q_cum = q_cum * discharge.correction_factor
+
+        # Adjust q for starting edge
+        if transect.start_edge == 'Left':
+            q_cum = q_cum + discharge.left
+            q_cum[-1] = q_cum[-1] + discharge.right
+            start_dist = transect.edges.left.distance_m
+        else:
+            q_cum = q_cum + discharge.right
+            q_cum[-1] = q_cum[-1] + discharge.left
+            start_dist = transect.edges.right.distance_m
+
+        # Determine ensemble at each percent
+        ensembles = []
+        q_target = []
+        for percent in percents:
+            q_target.append(q_cum[-1] * percent / 100)
+            if q_target[-1] > 0:
+                ensembles.append(np.where(q_cum > q_target[-1])[0][0])
+            if q_target[-1] < 0:
+                ensembles.append(np.where(q_cum < q_target[-1])[0][0])
+
+        # Compute distance from start bank
+        boat_vel_selected = getattr(transect.boat_vel, transect.boat_vel.selected)
+        track_x = np.nancumsum(boat_vel_selected.u_processed_mps[transect.in_transect_idx] *
+                               transect.date_time.ens_duration_sec[transect.in_transect_idx])
+        track_y = np.nancumsum(boat_vel_selected.v_processed_mps[transect.in_transect_idx] *
+                               transect.date_time.ens_duration_sec[transect.in_transect_idx])
+
+        dist = np.sqrt(track_x ** 2 + track_y ** 2) + start_dist
+
+        # Initialize variables for computing vertical data
+        n_pts_in_avg = int(len(q_cum) * 0.01)
+        depth_selected = getattr(transect.depths, transect.depths.selected)
+        q_actual = []
+        distance = []
+        lat = []
+        lon = []
+        depth = []
+        velocity = []
+
+        # Compute data for each vertical
+        for ensemble in ensembles:
+            q_actual.append(q_cum[ensemble])
+            distance.append(dist[ensemble])
+            # Report lat and lon if available
+            try:
+                lat.append(transect.gps.gga_lat_ens_deg[ensemble])
+                lon.append(transect.gps.gga_lon_ens_deg[ensemble])
+            except (ValueError, AttributeError, TypeError):
+                lat.append('')
+                lon.append('')
+            depth.append(depth_selected.depth_processed_m[ensemble])
+
+            # The velocity is an average velocity for ensembles +/- 1% of the total ensembles
+            # about the selected ensemble
+            u = np.nanmean(transect.w_vel.u_processed_mps[:, ensemble - n_pts_in_avg: ensemble + n_pts_in_avg + 1], 1)
+            v = np.nanmean(transect.w_vel.v_processed_mps[:, ensemble - n_pts_in_avg: ensemble + n_pts_in_avg + 1], 1)
+            velocity.append(np.sqrt(np.nanmean(u)**2 + np.nanmean(v)**2))
+
+        # Save computed results in a dictionary
+        edi_results = {'percent': percents, 'target_q': q_target, 'actual_q': q_actual, 'distance': distance,
+                       'depth': depth, 'velocity': velocity, 'lat': lat, 'lon': lon}
+        return edi_results
+
+    @staticmethod
+    def qrev_default_interpolation_methods(settings):
+        """Adds QRev default interpolation settings to existing settings data structure
+
+        Parameters
+        ----------
+        settings: dict
+            Dictionary of reference and filter settings
+
+        Returns
+        -------
+        settings: dict
+            Dictionary with reference, filter, and interpolation settings
+        """
+
+        settings['BTInterpolation'] = 'Linear'
+        settings['WTEnsInterpolation'] = 'abba'
+        settings['WTCellInterpolation'] = 'abba'
+        settings['GPSInterpolation'] = 'Linear'
+        settings['depthInterpolation'] = 'Linear'
+        settings['WTwtDepthFilter'] = 'On'
+
+        return settings
+
+    def change_extrapolation(self, method, top=None, bot=None, exp=None, extents=None, threshold=None, compute_q=True,
+                             use_weighted=False):
+        """Applies the selected extrapolation method to each transect.
+
+        Parameters
+        ----------
+        method: str
+            Method of computation Automatic or Manual
+        top: str
+            Top extrapolation method
+        bot: str
+            Bottom extrapolation method
+        exp: float
+            Exponent for power or no slip methods
+        threshold: float
+            Threshold as a percent for determining if a median is valid
+        extents: list
+            Percent of discharge, does not account for transect direction
+        compute_q: bool
+            Specifies if the discharge should be computed
+        use_weighted: bool
+            Specifies is discharge weighting is used
+        """
+
+        if top is None:
+            top = self.extrap_fit.sel_fit[-1].top_method
+        if bot is None:
+            bot = self.extrap_fit.sel_fit[-1].bot_method
+        if exp is None:
+            exp = self.extrap_fit.sel_fit[-1].exponent
+        if extents is not None:
+            self.extrap_fit.subsection = extents
+        if threshold is not None:
+            self.extrap_fit.threshold = threshold
+
+        data_type = self.extrap_fit.norm_data[-1].data_type
+        if data_type is None:
+            data_type = 'q'
+
+        if method == 'Manual':
+            self.extrap_fit.fit_method = 'Manual'
+            for transect in self.transects:
+                transect.extrap.set_extrap_data(top=top, bot=bot, exp=exp)
+            self.extrap_fit.process_profiles(transects=self.transects, data_type=data_type, use_weighted=use_weighted)
+        else:
+            self.extrap_fit.fit_method = 'Automatic'
+            self.extrap_fit.process_profiles(transects=self.transects, data_type=data_type, use_weighted=use_weighted)
+            for transect in self.transects:
+                transect.extrap.set_extrap_data(top=self.extrap_fit.sel_fit[-1].top_method,
+                                                bot=self.extrap_fit.sel_fit[-1].bot_method,
+                                                exp=self.extrap_fit.sel_fit[-1].exponent)
+
+        if compute_q:
+            self.extrap_fit.q_sensitivity = ExtrapQSensitivity()
+            self.extrap_fit.q_sensitivity.populate_data(transects=self.transects,
+                                                        extrap_fits=self.extrap_fit.sel_fit)
+
+            self.compute_discharge()
+
+    @staticmethod
+    def measurement_duration(self):
+        """Computes the duration of the measurement.
+        """
+
+        duration = 0
+        for transect in self.transects:
+            if transect.checked:
+                duration += transect.date_time.transect_duration_sec
+        return duration
+
+    @staticmethod
+    def mean_discharges(self):
+        """Computes the mean discharge for the measurement.
+        """
+
+        # Initialize lists
+        total_q = []
+        uncorrected_q = []
+        top_q = []
+        bot_q = []
+        mid_q = []
+        left_q = []
+        right_q = []
+        int_cells_q = []
+        int_ensembles_q = []
+
+        for n, transect in enumerate(self.transects):
+            if transect.checked:
+                total_q.append(self.discharge[n].total)
+                uncorrected_q.append(self.discharge[n].total_uncorrected)
+                top_q.append(self.discharge[n].top)
+                mid_q.append(self.discharge[n].middle)
+                bot_q.append(self.discharge[n].bottom)
+                left_q.append(self.discharge[n].left)
+                right_q.append(self.discharge[n].right)
+                int_cells_q.append(self.discharge[n].int_cells)
+                int_ensembles_q.append(self.discharge[n].int_ens)
+
+        discharge = {'total_mean': np.nanmean(total_q),
+                     'uncorrected_mean': np.nanmean(uncorrected_q),
+                     'top_mean': np.nanmean(top_q),
+                     'mid_mean': np.nanmean(mid_q),
+                     'bot_mean': np.nanmean(bot_q),
+                     'left_mean': np.nanmean(left_q),
+                     'right_mean': np.nanmean(right_q),
+                     'int_cells_mean': np.nanmean(int_cells_q),
+                     'int_ensembles_mean': np.nanmean(int_ensembles_q)}
+
+        return discharge
+
+    @staticmethod
+    def compute_measurement_properties(self):
+        """Computes characteristics of the transects and measurement that assist in evaluating the consistency
+        of the transects.
+
+        Returns
+        -------
+        trans_prop: dict
+        Dictionary of transect properties
+            width: float
+                width in m
+            width_cov: float
+                coefficient of variation of width in percent
+            area: float
+                cross sectional area in m**2
+            area_cov: float
+                coefficient of variation of are in percent
+            avg_boat_speed: float
+                average boat speed in mps
+            avg_boat_course: float
+                average boat course in degrees
+            avg_water_speed: float
+                average water speed in mps
+            avg_water_dir: float
+                average water direction in degrees
+            avg_depth: float
+                average depth in m
+            max_depth: float
+                maximum depth in m
+            max_water_speed: float
+                99th percentile of water speed in mps
+        """
+
+        # Initialize variables
+        checked_idx = np.array([], dtype=int)
+        n_transects = len(self.transects)
+        trans_prop = {'width': np.array([np.nan] * (n_transects + 1)),
+                      'width_cov': np.array([np.nan] * (n_transects + 1)),
+                      'area': np.array([np.nan] * (n_transects + 1)),
+                      'area_cov': np.array([np.nan] * (n_transects + 1)),
+                      'avg_boat_speed': np.array([np.nan] * (n_transects + 1)),
+                      'avg_boat_course': np.array([np.nan] * n_transects),
+                      'avg_water_speed': np.array([np.nan] * (n_transects + 1)),
+                      'avg_water_dir': np.array([np.nan] * (n_transects + 1)),
+                      'avg_depth': np.array([np.nan] * (n_transects + 1)),
+                      'max_depth': np.array([np.nan] * (n_transects + 1)),
+                      'max_water_speed': np.array([np.nan] * (n_transects + 1))}
+
+        # Process each transect
+        for n, transect in enumerate(self.transects):
+
+            # Compute boat track properties
+            boat_track = BoatStructure.compute_boat_track(transect)
+
+            # Get boat speeds
+            in_transect_idx = transect.in_transect_idx
+            if getattr(transect.boat_vel, transect.boat_vel.selected) is not None:
+                boat_selected = getattr(transect.boat_vel, transect.boat_vel.selected)
+                u_boat = boat_selected.u_processed_mps[in_transect_idx]
+                v_boat = boat_selected.v_processed_mps[in_transect_idx]
+            else:
+                u_boat = nans(transect.boat_vel.bt_vel.u_processed_mps[in_transect_idx].shape)
+                v_boat = nans(transect.boat_vel.bt_vel.v_processed_mps[in_transect_idx].shape)
+
+            if np.logical_not(np.all(np.isnan(boat_track['track_x_m']))):
+
+                # Compute boat course and mean speed
+                [course_radians, dmg] = cart2pol(boat_track['track_x_m'][-1], boat_track['track_y_m'][-1])
+                trans_prop['avg_boat_course'][n] = rad2azdeg(course_radians)
+                trans_prop['avg_boat_speed'][n] = np.nanmean(np.sqrt(u_boat**2 + v_boat**2))
+
+                # Compute width
+                trans_prop['width'][n] = np.nansum([dmg, transect.edges.left.distance_m,
+                                                    transect.edges.right.distance_m])
+
+                # Project the shiptrack onto a line from the beginning to end of the transect
+                unit_x, unit_y = pol2cart(course_radians, 1)
+                bt = np.array([boat_track['track_x_m'], boat_track['track_y_m']]).T
+                dot_prod = bt @ np.array([unit_x, unit_y])
+                projected_x = dot_prod * unit_x
+                projected_y = dot_prod * unit_y
+                station = np.sqrt(projected_x**2 + projected_y**2)
+
+                # Get selected depth object
+                depth = getattr(transect.depths, transect.depths.selected)
+                depth_a = np.copy(depth.depth_processed_m)
+                depth_a[np.isnan(depth_a)] = 0
+                # Compute area of the moving-boat portion of the cross section using trapezoidal integration.
+                # This method is consistent with AreaComp but is different from QRev in Matlab
+                area_moving_boat = np.abs(np.trapz(depth_a[in_transect_idx], station[in_transect_idx]))
+
+                # Compute area of left edge
+                edge_type = transect.edges.left.type
+                coef = 1
+                if edge_type == 'Triangular':
+                    coef = 0.5
+                elif edge_type == 'Rectangular':
+                    coef = 1.0
+                elif edge_type == 'Custom':
+                    coef = 0.5 + (transect.edges.left.cust_coef - 0.3535)
+                elif edge_type == 'User Q':
+                    coef = 0.5
+                edge_idx = QComp.edge_ensembles('left', transect)
+                edge_depth = np.nanmean(depth.depth_processed_m[edge_idx])
+                area_left = edge_depth * transect.edges.left.distance_m * coef
+
+                # Compute area of right edge
+                edge_type = transect.edges.right.type
+                if edge_type == 'Triangular':
+                    coef = 0.5
+                elif edge_type == 'Rectangular':
+                    coef = 1.0
+                elif edge_type == 'Custom':
+                    coef = 0.5 + (transect.edges.right.cust_coef - 0.3535)
+                elif edge_type == 'User Q':
+                    coef = 0.5
+                edge_idx = QComp.edge_ensembles('right', transect)
+                edge_depth = np.nanmean(depth.depth_processed_m[edge_idx])
+                area_right = edge_depth * transect.edges.right.distance_m * coef
+
+                # Compute total cross sectional area
+                trans_prop['area'][n] = np.nansum([area_left, area_moving_boat, area_right])
+
+                # Compute average water speed
+                trans_prop['avg_water_speed'][n] = self.discharge[n].total / trans_prop['area'][n]
+
+                # Compute flow direction using discharge weighting
+                u_water = transect.w_vel.u_processed_mps[:, in_transect_idx]
+                v_water = transect.w_vel.v_processed_mps[:, in_transect_idx]
+                weight = np.abs(self.discharge[n].middle_cells)
+                u = np.nansum(np.nansum(u_water * weight)) / np.nansum(np.nansum(weight))
+                v = np.nansum(np.nansum(v_water * weight)) / np.nansum(np.nansum(weight))
+                trans_prop['avg_water_dir'][n] = np.arctan2(u, v) * 180 / np.pi
+                if trans_prop['avg_water_dir'][n] < 0:
+                    trans_prop['avg_water_dir'][n] = trans_prop['avg_water_dir'][n] + 360
+
+                # Compute average and max depth
+                # This is a deviation from QRev in Matlab which simply averaged all the depths
+                trans_prop['avg_depth'][n] = trans_prop['area'][n] / trans_prop['width'][n]
+                trans_prop['max_depth'][n] = np.nanmax(depth.depth_processed_m[in_transect_idx])
+
+                # Compute max water speed using the 99th percentile
+                water_speed = np.sqrt(u_water**2 + v_water**2)
+                trans_prop['max_water_speed'][n] = np.nanpercentile(water_speed, 99)
+                if transect.checked:
+                    checked_idx = np.append(checked_idx, n)
+
+            # Only transects used for discharge are included in measurement properties
+            if len(checked_idx) > 0:
+                n = n_transects
+                trans_prop['width'][n] = np.nanmean(trans_prop['width'][checked_idx])
+                trans_prop['width_cov'][n] = (np.nanstd(trans_prop['width'][checked_idx], ddof=1) /
+                                              trans_prop['width'][n]) * 100
+                trans_prop['area'][n] = np.nanmean(trans_prop['area'][checked_idx])
+                trans_prop['area_cov'][n] = (np.nanstd(trans_prop['area'][checked_idx], ddof=1) /
+                                             trans_prop['area'][n]) * 100
+                trans_prop['avg_boat_speed'][n] = np.nanmean(trans_prop['avg_boat_speed'][checked_idx])
+                trans_prop['avg_water_speed'][n] = np.nanmean(trans_prop['avg_water_speed'][checked_idx])
+                trans_prop['avg_depth'][n] = np.nanmean(trans_prop['avg_depth'][checked_idx])
+                trans_prop['max_depth'][n] = np.nanmax(trans_prop['max_depth'][checked_idx])
+                trans_prop['max_water_speed'][n] = np.nanmax(trans_prop['max_water_speed'][checked_idx])
+
+                # Compute average water direction using vector coordinates to avoid the problem of averaging
+                # fluctuations that cross zero degrees
+                x_coord = []
+                y_coord = []
+                for idx in checked_idx:
+                    water_dir_rad = azdeg2rad(trans_prop['avg_water_dir'][idx])
+                    x, y = pol2cart(water_dir_rad, 1)
+                    x_coord.append(x)
+                    y_coord.append(y)
+                avg_water_dir_rad, _ = cart2pol(np.mean(x_coord), np.mean(y_coord))
+                trans_prop['avg_water_dir'][n] = rad2azdeg(avg_water_dir_rad)
+
+        return trans_prop
+
+    @staticmethod
+    def checked_transects(meas):
+        """Create a list of indices of the checked transects.
+        """
+
+        checked_transect_idx = []
+        for n in range(len(meas.transects)):
+            if meas.transects[n].checked:
+                checked_transect_idx.append(n)
+        return checked_transect_idx
+
+    @staticmethod
+    def compute_time_series(meas, variable=None):
+        """Computes the time series using serial time for any variable.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        variable: np.ndarray()
+            Data for which the time series is requested
+        """
+
+        # Initialize variables
+        data = np.array([])
+        serial_time = np.array([])
+        idx_transects = Measurement.checked_transects(meas)
+
+        # Process transects
+        for idx in idx_transects:
+            if variable == 'Temperature':
+                data = np.append(data, meas.transects[idx].sensors.temperature_deg_c.internal.data)
+            ens_cum_time = np.nancumsum(meas.transects[idx].date_time.ens_duration_sec)
+            ens_time = meas.transects[idx].date_time.start_serial_time + ens_cum_time
+            serial_time = np.append(serial_time, ens_time)
+
+        return data, serial_time
+
+    def xml_output(self, version, file_name):
+        channel = ETree.Element('Channel', QRevFilename=os.path.basename(file_name[:-4]), QRevVersion=version)
+
+        # (2) SiteInformation Node
+        if self.station_name or self.station_number:
+            site_info = ETree.SubElement(channel, 'SiteInformation')
+
+            # (3) StationName Node
+            if self.station_name:
+                ETree.SubElement(site_info, 'StationName', type='char').text = self.station_name
+
+            # (3) SiteID Node
+            if type(self.station_number) is str:
+                ETree.SubElement(site_info, 'SiteID', type='char').text = self.station_number
+            else:
+                ETree.SubElement(site_info, 'SiteID', type='char').text = str(self.station_number)
+
+            # (3) Persons
+            ETree.SubElement(site_info, 'Persons', type='char').text = self.persons
+
+            # (3) Measurement Number
+            ETree.SubElement(site_info, 'MeasurementNumber', type='char').text = self.meas_number
+
+            # (3) Stage start
+            temp = self.stage_start_m
+            ETree.SubElement(site_info, 'StageStart', type='double', unitsCode='m').text = '{:.5f}'.format(temp)
+
+            # (4) Stage start
+            temp = self.stage_end_m
+            ETree.SubElement(site_info, 'StageEnd', type='double', unitsCode='m').text = '{:.5f}'.format(temp)
+
+            # (3) Stage start
+            temp = self.stage_meas_m
+            ETree.SubElement(site_info, 'StageMeasurement', type='double', unitsCode='m').text = '{:.5f}'.format(temp)
+
+        # (2) QA Node
+        qa = ETree.SubElement(channel, 'QA')
+
+        # (3) DiagnosticTestResult Node
+        if len(self.system_tst) > 0:
+            last_test = self.system_tst[-1].data
+            failed_idx = last_test.count('FAIL')
+            if failed_idx == 0:
+                test_result = 'Pass'
+            else:
+                test_result = str(failed_idx) + ' Failed'
+        else:
+            test_result = 'None'
+        ETree.SubElement(qa, 'DiagnosticTestResult', type='char').text = test_result
+
+        # (3) CompassCalibrationResult Node
+        try:
+            last_eval = self.compass_eval[-1]
+            # StreamPro, RR
+            idx = last_eval.data.find('Typical Heading Error: <')
+            if idx == (-1):
+                # Rio Grande
+                idx = last_eval.data.find('>>> Total error:')
+                if idx != (-1):
+                    idx_start = idx + 17
+                    idx_end = idx_start + 10
+                    comp_error = last_eval.data[idx_start:idx_end]
+                    comp_error = ''.join([n for n in comp_error if n.isdigit() or n == '.'])
+                else:
+                    comp_error = ''
+            else:
+                # StreamPro, RR
+                idx_start = idx + 24
+                idx_end = idx_start + 10
+                comp_error = last_eval.data[idx_start:idx_end]
+                comp_error = ''.join([n for n in comp_error if n.isdigit() or n == '.'])
+
+            # Evaluation could not be determined
+            if not comp_error:
+                ETree.SubElement(qa, 'CompassCalibrationResult', type='char').text = 'Yes'
+            elif comp_error == '':
+                ETree.SubElement(qa, 'CompassCalibrationResult', type='char').text = 'No'
+            else:
+                ETree.SubElement(qa, 'CompassCalibrationResult', type='char').text = 'Max ' + comp_error
+
+        except (IndexError, TypeError, AttributeError):
+            try:
+                if len(self.compass_cal) > 0:
+                    ETree.SubElement(qa, 'CompassCalibrationResult', type='char').text = 'Yes'
+                else:
+                    ETree.SubElement(qa, 'CompassCalibrationResult', type='char').text = 'No'
+            except (IndexError, TypeError):
+                ETree.SubElement(qa, 'CompassCalibrationResult', type='char').text = 'No'
+
+        # (3) MovingBedTestType Node
+        if not self.mb_tests:
+            ETree.SubElement(qa, 'MovingBedTestType', type='char').text = 'None'
+        else:
+            selected_idx = [i for (i, val) in enumerate(self.mb_tests) if val.selected is True]
+            if len(selected_idx) >= 1:
+                temp = self.mb_tests[selected_idx[0]].type
+            else:
+                temp = self.mb_tests[-1].type
+            ETree.SubElement(qa, 'MovingBedTestType', type='char').text = str(temp)
+
+            # MovingBedTestResult Node
+            temp = 'Unknown'
+            for idx in selected_idx:
+                if self.mb_tests[idx].moving_bed == 'Yes':
+                    temp = 'Yes'
+                    break
+                elif self.mb_tests[idx].moving_bed == 'No':
+                    temp = 'No'
+
+            ETree.SubElement(qa, 'MovingBedTestResult', type='char').text = temp
+
+        # (3) DiagnosticTest and Text Node
+        if self.system_tst:
+            test_text = ''
+            for test in self.system_tst:
+                test_text += test.data
+            diag_test = ETree.SubElement(qa, 'DiagnosticTest')
+            ETree.SubElement(diag_test, 'Text', type='char').text = test_text
+
+        # (3) CompassCalibration and Text Node
+        compass_text = ''
+        try:
+            for each in self.compass_cal:
+                if self.transects[self.checked_transect_idx[0]].adcp.manufacturer == 'SonTek':
+                    idx = each.data.find('CAL_TIME')
+                    compass_text += each.data[idx:]
+                else:
+                    compass_text += each.data
+        except (IndexError, TypeError, AttributeError):
+            pass
+        try:
+            for each in self.compass_eval:
+                if self.transects[self.checked_transect_idx[0]].adcp.manufacturer == 'SonTek':
+                    idx = each.data.find('CAL_TIME')
+                    compass_text += each.data[idx:]
+                else:
+                    compass_text += each.data
+        except (IndexError, TypeError, AttributeError):
+            pass
+
+        if len(compass_text) > 0:
+            comp_cal = ETree.SubElement(qa, 'CompassCalibration')
+            ETree.SubElement(comp_cal, 'Text', type='char').text = compass_text
+
+        # (3) MovingBedTest Node
+        if self.mb_tests:
+            for each in self.mb_tests:
+                mbt = ETree.SubElement(qa, 'MovingBedTest')
+
+                # (4) Filename Node
+                ETree.SubElement(mbt, 'Filename', type='char').text = each.transect.file_name
+
+                # (4) TestType Node
+                ETree.SubElement(mbt, 'TestType', type='char').text = each.type
+
+                # (4) Duration Node
+                ETree.SubElement(mbt, 'Duration', type='double',
+                                 unitsCode='sec').text = '{:.2f}'.format(each.duration_sec)
+
+                # (4) PercentInvalidBT Node
+                ETree.SubElement(mbt, 'PercentInvalidBT', type='double').text = '{:.4f}'.format(each.percent_invalid_bt)
+
+                # (4) HeadingDifference Node
+                if each.compass_diff_deg:
+                    temp = '{:.2f}'.format(each.compass_diff_deg)
+                else:
+                    temp = ''
+                ETree.SubElement(mbt, 'HeadingDifference', type='double', unitsCode='deg').text = temp
+
+                # (4) MeanFlowDirection Node
+                if each.flow_dir:
+                    temp = '{:.2f}'.format(each.flow_dir)
+                else:
+                    temp = ''
+                ETree.SubElement(mbt, 'MeanFlowDirection', type='double', unitsCode='deg').text = temp
+
+                # (4) MovingBedDirection Node
+                if each.mb_dir:
+                    temp = '{:.2f}'.format(each.mb_dir)
+                else:
+                    temp = ''
+                ETree.SubElement(mbt, 'MovingBedDirection', type='double', unitsCode='deg').text = temp
+
+                # (4) DistanceUpstream Node
+                ETree.SubElement(mbt, 'DistanceUpstream', type='double', unitsCode='m').text = \
+                    '{:.4f}'.format(each.dist_us_m)
+
+                # (4) MeanFlowSpeed Node
+                ETree.SubElement(mbt, 'MeanFlowSpeed', type='double', unitsCode='mps').text = \
+                    '{:.4f}'.format(each.flow_spd_mps)
+
+                # (4) MovingBedSpeed Node
+                ETree.SubElement(mbt, 'MovingBedSpeed', type='double', unitsCode='mps').text = \
+                    '{:.4f}'.format(each.mb_spd_mps)
+
+                # (4) PercentMovingBed Node
+                ETree.SubElement(mbt, 'PercentMovingBed', type='double').text = '{:.2f}'.format(each.percent_mb)
+
+                # (4) TestQuality Node
+                ETree.SubElement(mbt, 'TestQuality', type='char').text = each.test_quality
+
+                # (4) MovingBedPresent Node
+                ETree.SubElement(mbt, 'MovingBedPresent', type='char').text = each.moving_bed
+
+                # (4) UseToCorrect Node
+                if each.use_2_correct:
+                    ETree.SubElement(mbt, 'UseToCorrect', type='char').text = 'Yes'
+                else:
+                    ETree.SubElement(mbt, 'UseToCorrect', type='char').text = 'No'
+
+                # (4) UserValid Node
+                if each.user_valid:
+                    ETree.SubElement(mbt, 'UserValid', type='char').text = 'Yes'
+                else:
+                    ETree.SubElement(mbt, 'UserValid', type='char').text = 'No'
+
+                # (4) Message Node
+                if len(each.messages) > 0:
+                    str_out = ''
+                    for message in each.messages:
+                        str_out = str_out + message + '; '
+                    ETree.SubElement(mbt, 'Message', type='char').text = str_out
+
+        # (3) TemperatureCheck Node
+        temp_check = ETree.SubElement(qa, 'TemperatureCheck')
+
+        # (4) VerificationTemperature Node
+        if not np.isnan(self.ext_temp_chk['user']):
+            ETree.SubElement(temp_check, 'VerificationTemperature', type='double', unitsCode='degC').text = \
+                '{:.2f}'.format(self.ext_temp_chk['user'])
+
+        # (4) InstrumentTemperature Node
+        if not np.isnan(self.ext_temp_chk['adcp']):
+            ETree.SubElement(temp_check, 'InstrumentTemperature', type='double',
+                             unitsCode='degC').text = '{:.2f}'.format(self.ext_temp_chk['adcp'])
+
+        # (4) TemperatureChange Node:
+        temp_all = np.array([np.nan])
+        for each in self.transects:
+            # Check for situation where user has entered a constant temperature
+            temperature_selected = getattr(each.sensors.temperature_deg_c, each.sensors.temperature_deg_c.selected)
+            temperature = temperature_selected.data
+            if each.sensors.temperature_deg_c.selected != 'user':
+                # Temperatures for ADCP.
+                temp_all = np.concatenate((temp_all, temperature))
+            else:
+                # User specified constant temperature.
+                # Concatenate a matrix of size of internal data with repeated user values.
+                user_arr = np.tile(each.sensors.temperature_deg_c.user.data,
+                                   (np.size(each.sensors.temperature_deg_c.internal.data)))
+                temp_all = np.concatenate((temp_all, user_arr))
+
+        t_range = np.nanmax(temp_all) - np.nanmin(temp_all)
+        ETree.SubElement(temp_check, 'TemperatureChange', type='double',
+                         unitsCode='degC').text = '{:.2f}'.format(t_range)
+
+        # (3) QRev_Message Node
+        qa_check_keys = ['bt_vel', 'compass', 'depths', 'edges', 'extrapolation', 'gga_vel', 'movingbed', 'system_tst',
+                         'temperature', 'transects', 'user', 'vtg_vel', 'w_vel']
+
+        # For each qa check retrieve messages
+        messages = []
+        for key in qa_check_keys:
+            qa_type = getattr(self.qa, key)
+            if qa_type['messages']:
+                for message in qa_type['messages']:
+                    if type(message) is str:
+                        if message[:3].isupper():
+                            messages.append([message, 1])
+                        else:
+                            messages.append([message, 2])
+                    else:
+                        messages.append(message)
+
+        # Sort messages with warning at top
+        messages.sort(key=lambda x: x[1])
+
+        if len(messages) > 0:
+            temp = ''
+            for message in messages:
+                temp = temp + message[0]
+            ETree.SubElement(qa, 'QRev_Message', type='char').text = temp
+
+        # (2) Instrument Node
+        instrument = ETree.SubElement(channel, 'Instrument')
+
+        # (3) Manufacturer Node
+        ETree.SubElement(instrument, 'Manufacturer', type='char').text = \
+            self.transects[self.checked_transect_idx[0]].adcp.manufacturer
+
+        # (3) Model Node
+        ETree.SubElement(instrument, 'Model', type='char').text = \
+            self.transects[self.checked_transect_idx[0]].adcp.model
+
+        # (3) SerialNumber Node
+        sn = self.transects[self.checked_transect_idx[0]].adcp.serial_num
+        ETree.SubElement(instrument, 'SerialNumber', type='char').text = str(sn)
+
+        # (3) FirmwareVersion Node
+        ver = self.transects[self.checked_transect_idx[0]].adcp.firmware
+        ETree.SubElement(instrument, 'FirmwareVersion', type='char').text = str(ver)
+
+        # (3) Frequency Node
+        freq = self.transects[self.checked_transect_idx[0]].adcp.frequency_khz
+        if type(freq) == np.ndarray:
+            freq = "Multi"
+        ETree.SubElement(instrument, 'Frequency', type='char', unitsCode='kHz').text = str(freq)
+
+        # (3) BeamAngle Node
+        ang = self.transects[self.checked_transect_idx[0]].adcp.beam_angle_deg
+        ETree.SubElement(instrument, 'BeamAngle', type='double', unitsCode='deg').text = '{:.1f}'.format(ang)
+
+        # (3) BlankingDistance Node
+        w_vel = []
+        for each in self.transects:
+            w_vel.append(each.w_vel)
+        blank = []
+        for each in w_vel:
+            blank.append(each.blanking_distance_m)
+        if isinstance(blank[0], float):
+            temp = np.mean(blank)
+            if self.transects[self.checked_transect_idx[0]].w_vel.excluded_dist_m > temp:
+                temp = self.transects[self.checked_transect_idx[0]].w_vel.excluded_dist_m
+        else:
+            temp = self.transects[self.checked_transect_idx[0]].w_vel.excluded_dist_m
+        ETree.SubElement(instrument, 'BlankingDistance', type='double', unitsCode='m').text = '{:.4f}'.format(temp)
+
+        # (3) InstrumentConfiguration Node
+        commands = ''
+        if self.transects[self.checked_transect_idx[0]].adcp.configuration_commands is not None:
+            for each in self.transects[self.checked_transect_idx[0]].adcp.configuration_commands:
+                if type(each) is str:
+                    commands += each + '  '
+            ETree.SubElement(instrument, 'InstrumentConfiguration', type='char').text = commands
+
+        # (2) Processing Node
+        processing = ETree.SubElement(channel, 'Processing')
+
+        # (3) SoftwareVersion Node
+        ETree.SubElement(processing, 'SoftwareVersion', type='char').text = version
+
+        # (3) Type Node
+        ETree.SubElement(processing, 'Type', type='char').text = self.processing
+
+        # (3) AreaComputationMethod Node
+        ETree.SubElement(processing, 'AreaComputationMethod', type='char').text = 'Parallel'
+
+        # (3) Navigation Node
+        navigation = ETree.SubElement(processing, 'Navigation')
+
+        # (4) Reference Node
+        ETree.SubElement(navigation, 'Reference', type='char').text = \
+            self.transects[self.checked_transect_idx[0]].w_vel.nav_ref
+
+        # (4) CompositeTrack
+        ETree.SubElement(navigation, 'CompositeTrack', type='char').text = \
+            self.transects[self.checked_transect_idx[0]].boat_vel.composite
+
+        # (4) MagneticVariation Node
+        mag_var = self.transects[self.checked_transect_idx[0]].sensors.heading_deg.internal.mag_var_deg
+        ETree.SubElement(navigation, 'MagneticVariation', type='double',
+                         unitsCode='deg').text = '{:.2f}'.format(mag_var)
+
+        # (4) BeamFilter
+        nav_data = getattr(self.transects[self.checked_transect_idx[0]].boat_vel,
+                           self.transects[self.checked_transect_idx[0]].boat_vel.selected)
+        temp = nav_data.beam_filter
+        if temp < 0:
+            temp = 'Auto'
+        else:
+            temp = str(temp)
+        ETree.SubElement(navigation, 'BeamFilter', type='char').text = temp
+
+        # (4) ErrorVelocityFilter Node
+        evf = nav_data.d_filter
+        if evf == 'Manual':
+            evf = '{:.4f}'.format(nav_data.d_filter_thresholds)
+        ETree.SubElement(navigation, 'ErrorVelocityFilter', type='char', unitsCode='mps').text = evf
+
+        # (4) VerticalVelocityFilter Node
+        vvf = nav_data.w_filter
+        if vvf == 'Manual':
+            vvf = '{:.4f}'.format(nav_data.w_filter_thresholds)
+        ETree.SubElement(navigation, 'VerticalVelocityFilter', type='char', unitsCode='mps').text = vvf
+
+        # (4) Use measurement thresholds
+        temp = nav_data.use_measurement_thresholds
+        if temp:
+            temp = 'Yes'
+        else:
+            temp = 'No'
+        ETree.SubElement(navigation, 'UseMeasurementThresholds', type='char').text = temp
+
+        # (4) OtherFilter Node
+        o_f = nav_data.smooth_filter
+        ETree.SubElement(navigation, 'OtherFilter', type='char').text = o_f
+
+        # (4) GPSDifferentialQualityFilter Node
+        temp = nav_data.gps_diff_qual_filter
+        if temp:
+            if isinstance(temp, int) or isinstance(temp, float):
+                temp = str(temp)
+            ETree.SubElement(navigation, 'GPSDifferentialQualityFilter', type='char').text = temp
+
+        # (4) GPSAltitudeFilter Node
+        temp = nav_data.gps_altitude_filter
+        if temp:
+            if temp == 'Manual':
+                temp = self.transects[self.checked_transect_idx[0]].boat_vel.gps_altitude_filter_change
+            ETree.SubElement(navigation, 'GPSAltitudeFilter', type='char', unitsCode='m').text = str(temp)
+
+        # (4) HDOPChangeFilter
+        temp = nav_data.gps_HDOP_filter
+        if temp:
+            if temp == 'Manual':
+                temp = '{:.2f}'.format(self.transects[self.checked_transect_idx[0]].boat_vel.gps_hdop_filter_change)
+            ETree.SubElement(navigation, 'HDOPChangeFilter', type='char').text = temp
+
+        # (4) HDOPThresholdFilter
+        temp = nav_data.gps_HDOP_filter
+        if temp:
+            if temp == 'Manual':
+                temp = '{:.2f}'.format(self.transects[self.checked_transect_idx[0]].boat_vel.gps_HDOP_filter_max)
+            ETree.SubElement(navigation, 'HDOPThresholdFilter', type='char').text = temp
+
+        # (4) InterpolationType Node
+        temp = nav_data.interpolate
+        ETree.SubElement(navigation, 'InterpolationType', type='char').text = temp
+
+        # (3) Depth Node
+        depth = ETree.SubElement(processing, 'Depth')
+
+        # (4) Reference Node
+        if self.transects[self.checked_transect_idx[0]].depths.selected == 'bt_depths':
+            temp = 'BT'
+        elif self.transects[self.checked_transect_idx[0]].depths.selected == 'vb_depths':
+            temp = 'VB'
+        elif self.transects[self.checked_transect_idx[0]].depths.selected == 'ds_depths':
+            temp = 'DS'
+        ETree.SubElement(depth, 'Reference', type='char').text = temp
+
+        # (4) CompositeDepth Node
+        ETree.SubElement(depth, 'CompositeDepth', type='char').text = \
+            self.transects[self.checked_transect_idx[0]].depths.composite
+
+        # (4) ADCPDepth Node
+        depth_data = getattr(self.transects[self.checked_transect_idx[0]].depths,
+                             self.transects[self.checked_transect_idx[0]].depths.selected)
+        temp = depth_data.draft_use_m
+        ETree.SubElement(depth, 'ADCPDepth', type='double', unitsCode='m').text = '{:.4f}'.format(temp)
+
+        # (4) ADCPDepthConsistent Node
+        drafts = []
+        for transect in self.transects:
+            if transect.checked:
+                transect_depth = getattr(transect.depths, transect.depths.selected)
+                drafts.append(transect_depth.draft_use_m)
+        unique_drafts = set(drafts)
+        num_drafts = len(unique_drafts)
+        if num_drafts > 1:
+            temp = 'No'
+        else:
+            temp = 'Yes'
+        ETree.SubElement(depth, 'ADCPDepthConsistent', type='boolean').text = temp
+
+        # (4) FilterType Node
+        temp = depth_data.filter_type
+        ETree.SubElement(depth, 'FilterType', type='char').text = temp
+
+        # (4) InterpolationType Node
+        temp = depth_data.interp_type
+        ETree.SubElement(depth, 'InterpolationType', type='char').text = temp
+
+        # (4) AveragingMethod Node
+        temp = depth_data.avg_method
+        ETree.SubElement(depth, 'AveragingMethod', type='char').text = temp
+
+        # (4) ValidDataMethod Node
+        temp = depth_data.valid_data_method
+        ETree.SubElement(depth, 'ValidDataMethod', type='char').text = temp
+
+        # (3) WaterTrack Node
+        water_track = ETree.SubElement(processing, 'WaterTrack')
+
+        # (4) ExcludedDistance Node
+        temp = self.transects[self.checked_transect_idx[0]].w_vel.excluded_dist_m
+        ETree.SubElement(water_track, 'ExcludedDistance', type='double', unitsCode='m').text = '{:.4f}'.format(temp)
+
+        # (4) BeamFilter Node
+        temp = self.transects[self.checked_transect_idx[0]].w_vel.beam_filter
+        if temp < 0:
+            temp = 'Auto'
+        else:
+            temp = str(temp)
+        ETree.SubElement(water_track, 'BeamFilter', type='char').text = temp
+
+        # (4) ErrorVelocityFilter Node
+        temp = self.transects[self.checked_transect_idx[0]].w_vel.d_filter
+        if temp == 'Manual':
+            temp = '{:.4f}'.format(self.transects[self.checked_transect_idx[0]].w_vel.d_filter_thresholds)
+        ETree.SubElement(water_track, 'ErrorVelocityFilter', type='char', unitsCode='mps').text = temp
+
+        # (4) VerticalVelocityFilter Node
+        temp = self.transects[self.checked_transect_idx[0]].w_vel.w_filter
+        if temp == 'Manual':
+            temp = '{:.4f}'.format(self.transects[self.checked_transect_idx[0]].w_vel.w_filter_thresholds)
+        ETree.SubElement(water_track, 'VerticalVelocityFilter', type='char', unitsCode='mps').text = temp
+
+        # (4) Use measurement thresholds
+        temp = self.transects[self.checked_transect_idx[0]].w_vel.use_measurement_thresholds
+        if temp:
+            temp = 'Yes'
+        else:
+            temp = 'No'
+        ETree.SubElement(water_track, 'UseMeasurementThresholds', type='char').text = temp
+
+        # (4) OtherFilter Node
+        temp = self.transects[self.checked_transect_idx[0]].w_vel.smooth_filter
+        ETree.SubElement(water_track, 'OtherFilter', type='char').text = temp
+
+        # (4) SNRFilter Node
+        temp = self.transects[self.checked_transect_idx[0]].w_vel.snr_filter
+        ETree.SubElement(water_track, 'SNRFilter', type='char').text = temp
+
+        # (4) CellInterpolation Node
+        temp = self.transects[self.checked_transect_idx[0]].w_vel.interpolate_cells
+        ETree.SubElement(water_track, 'CellInterpolation', type='char').text = temp
+
+        # (4) EnsembleInterpolation Node
+        temp = self.transects[self.checked_transect_idx[0]].w_vel.interpolate_ens
+        ETree.SubElement(water_track, 'EnsembleInterpolation', type='char').text = temp
+
+        # (3) Edge Node
+        edge = ETree.SubElement(processing, 'Edge')
+
+        # (4) RectangularEdgeMethod Node
+        temp = self.transects[self.checked_transect_idx[0]].edges.rec_edge_method
+        ETree.SubElement(edge, 'RectangularEdgeMethod', type='char').text = temp
+
+        # (4) VelocityMethod Node
+        temp = self.transects[self.checked_transect_idx[0]].edges.vel_method
+        ETree.SubElement(edge, 'VelocityMethod', type='char').text = temp
+
+        # (4) LeftType Node
+        typ = []
+        for n in self.transects:
+            if n.checked:
+                typ.append(n.edges.left.type)
+        unique_type = set(typ)
+        num_types = len(unique_type)
+        if num_types > 1:
+            temp = 'Varies'
+        else:
+            temp = typ[0]
+        ETree.SubElement(edge, 'LeftType', type='char').text = temp
+
+        # LeftEdgeCoefficient
+        if temp == 'User Q':
+            temp = 'N/A'
+        elif temp == 'Varies':
+            temp = 'N/A'
+        else:
+            coef = []
+            for transect in self.transects:
+                if transect.checked:
+                    coef.append(QComp.edge_coef('left', transect))
+            num_coef = len(set(coef))
+            if num_coef > 1:
+                temp = 'Varies'
+            else:
+                temp = '{:.4f}'.format(coef[0])
+        ETree.SubElement(edge, 'LeftEdgeCoefficient', type='char').text = temp
+
+        # (4) RightType Node
+        typ = []
+        for n in self.transects:
+            if n.checked:
+                typ.append(n.edges.right.type)
+        unique_type = set(typ)
+        num_types = len(unique_type)
+        if num_types > 1:
+            temp = 'Varies'
+        else:
+            temp = typ[0]
+        ETree.SubElement(edge, 'RightType', type='char').text = temp
+
+        # RightEdgeCoefficient
+        if temp == 'User Q':
+            temp = 'N/A'
+        elif temp == 'Varies':
+            temp = 'N/A'
+        else:
+            coef = []
+            for transect in self.transects:
+                if transect.checked:
+                    coef.append(QComp.edge_coef('right', transect))
+            num_coef = len(set(coef))
+            if num_coef > 1:
+                temp = 'Varies'
+            else:
+                temp = '{:.4f}'.format(coef[0])
+        ETree.SubElement(edge, 'RightEdgeCoefficient', type='char').text = temp
+
+        # (3) Extrapolation Node
+        extrap = ETree.SubElement(processing, 'Extrapolation')
+
+        # (4) TopMethod Node
+        temp = self.transects[self.checked_transect_idx[0]].extrap.top_method
+        ETree.SubElement(extrap, 'TopMethod', type='char').text = temp
+
+        # (4) BottomMethod Node
+        temp = self.transects[self.checked_transect_idx[0]].extrap.bot_method
+        ETree.SubElement(extrap, 'BottomMethod', type='char').text = temp
+
+        # (4) Exponent Node
+        temp = self.transects[self.checked_transect_idx[0]].extrap.exponent
+        ETree.SubElement(extrap, 'Exponent', type='double').text = '{:.4f}'.format(temp)
+
+        # (4) Discharge weighted medians
+        temp = self.extrap_fit.use_weighted
+        if temp:
+            temp = 'Yes'
+        else:
+            temp = 'No'
+        ETree.SubElement(extrap, 'UseWeighted', type='char').text = temp
+
+        # (3) Sensor Node
+        sensor = ETree.SubElement(processing, 'Sensor')
+
+        # (4) TemperatureSource Node
+        temp = []
+        for n in self.transects:
+            if n.checked:
+                # k+=1
+                temp.append(n.sensors.temperature_deg_c.selected)
+        sources = len(set(temp))
+        if sources > 1:
+            temp = 'Varies'
+        else:
+            temp = temp[0]
+        ETree.SubElement(sensor, 'TemperatureSource', type='char').text = temp
+
+        # (4) Salinity
+        temp = np.array([])
+        for transect in self.transects:
+            if transect.checked:
+                sal_selected = getattr(transect.sensors.salinity_ppt, transect.sensors.salinity_ppt.selected)
+                temp = np.append(temp, sal_selected.data)
+        values = np.unique(temp)
+        if len(values) > 1:
+            temp = 'Varies'
+        else:
+            temp = '{:2.1f}'.format(values[0])
+        ETree.SubElement(sensor, 'Salinity', type='char', unitsCode='ppt').text = temp
+
+        # (4) SpeedofSound Node
+        temp = []
+        for n in self.transects:
+            if n.checked:
+                temp.append(n.sensors.speed_of_sound_mps.selected)
+        sources = len(set(temp))
+        if sources > 1:
+            temp = 'Varies'
+        else:
+            temp = temp[0]
+        if temp == 'internal':
+            temp = 'ADCP'
+        ETree.SubElement(sensor, 'SpeedofSound', type='char', unitsCode='mps').text = temp
+
+        # (2) Transect Node
+        other_prop = self.compute_measurement_properties(self)
+        for n in range(len(self.transects)):
+            if self.transects[n].checked:
+                transect = ETree.SubElement(channel, 'Transect')
+
+                # (3) Filename Node
+                temp = self.transects[n].file_name
+                ETree.SubElement(transect, 'Filename', type='char').text = temp
+
+                # (3) StartDateTime Node
+                temp = int(self.transects[n].date_time.start_serial_time)
+                temp = datetime.datetime.utcfromtimestamp(temp).strftime('%m/%d/%Y %H:%M:%S')
+                ETree.SubElement(transect, 'StartDateTime', type='char').text = temp
+
+                # (3) EndDateTime Node
+                temp = int(self.transects[n].date_time.end_serial_time)
+                temp = datetime.datetime.utcfromtimestamp(temp).strftime('%m/%d/%Y %H:%M:%S')
+                ETree.SubElement(transect, 'EndDateTime', type='char').text = temp
+
+                # (3) Discharge Node
+                t_q = ETree.SubElement(transect, 'Discharge')
+
+                # (4) Top Node
+                temp = self.discharge[n].top
+                ETree.SubElement(t_q, 'Top', type='double', unitsCode='cms').text = '{:.5f}'.format(temp)
+
+                # (4) Middle Node
+                temp = self.discharge[n].middle
+                ETree.SubElement(t_q, 'Middle', type='double', unitsCode='cms').text = '{:.5f}'.format(temp)
+
+                # (4) Bottom Node
+                temp = self.discharge[n].bottom
+                ETree.SubElement(t_q, 'Bottom', type='double', unitsCode='cms').text = '{:.5f}'.format(temp)
+
+                # (4) Left Node
+                temp = self.discharge[n].left
+                ETree.SubElement(t_q, 'Left', type='double', unitsCode='cms').text = '{:.5f}'.format(temp)
+
+                # (4) Right Node
+                temp = self.discharge[n].right
+                ETree.SubElement(t_q, 'Right', type='double', unitsCode='cms').text = '{:.5f}'.format(temp)
+
+                # (4) Total Node
+                temp = self.discharge[n].total
+                ETree.SubElement(t_q, 'Total', type='double', unitsCode='cms').text = '{:.5f}'.format(temp)
+
+                # (4) MovingBedPercentCorrection Node
+                temp = ((self.discharge[n].total / self.discharge[n].total_uncorrected) - 1) * 100
+                ETree.SubElement(t_q, 'MovingBedPercentCorrection', type='double').text = '{:.2f}'.format(temp)
+
+                # (3) Edge Node
+                t_edge = ETree.SubElement(transect, 'Edge')
+
+                # (4) StartEdge Node
+                temp = self.transects[n].start_edge
+                ETree.SubElement(t_edge, 'StartEdge', type='char').text = temp
+
+                # (4) RectangularEdgeMethod Node
+                temp = self.transects[n].edges.rec_edge_method
+                ETree.SubElement(t_edge, 'RectangularEdgeMethod', type='char').text = temp
+
+                # (4) VelocityMethod Node
+                temp = self.transects[n].edges.vel_method
+                ETree.SubElement(t_edge, 'VelocityMethod', type='char').text = temp
+
+                # (4) LeftType Node
+                temp = self.transects[n].edges.left.type
+                ETree.SubElement(t_edge, 'LeftType', type='char').text = temp
+
+                # (4) LeftEdgeCoefficient Node
+                if temp == 'User Q':
+                    temp = ''
+                else:
+                    temp = '{:.4f}'.format(QComp.edge_coef('left', self.transects[n]))
+                ETree.SubElement(t_edge, 'LeftEdgeCoefficient', type='double').text = temp
+
+                # (4) LeftDistance Node
+                temp = '{:.4f}'.format(self.transects[n].edges.left.distance_m)
+                ETree.SubElement(t_edge, 'LeftDistance', type='double', unitsCode='m').text = temp
+
+                # (4) LeftNumberEnsembles
+                temp = '{:.0f}'.format(self.transects[n].edges.left.number_ensembles)
+                ETree.SubElement(t_edge, 'LeftNumberEnsembles', type='double').text = temp
+
+                # (4) RightType Node
+                temp = self.transects[n].edges.right.type
+                ETree.SubElement(t_edge, 'RightType', type='char').text = temp
+
+                # (4) RightEdgeCoefficient Node
+                if temp == 'User Q':
+                    temp = ''
+                else:
+                    temp = '{:.4f}'.format(QComp.edge_coef('right', self.transects[n]))
+                ETree.SubElement(t_edge, 'RightEdgeCoefficient', type='double').text = temp
+
+                # (4) RightDistance Node
+                temp = '{:.4f}'.format(self.transects[n].edges.right.distance_m)
+                ETree.SubElement(t_edge, 'RightDistance', type='double', unitsCode='m').text = temp
+
+                # (4) RightNumberEnsembles Node
+                temp = '{:.0f}'.format(self.transects[n].edges.right.number_ensembles)
+                ETree.SubElement(t_edge, 'RightNumberEnsembles', type='double').text = temp
+
+                # (3) Sensor Node
+                t_sensor = ETree.SubElement(transect, 'Sensor')
+
+                # (4) TemperatureSource Node
+                temp = self.transects[n].sensors.temperature_deg_c.selected
+                ETree.SubElement(t_sensor, 'TemperatureSource', type='char').text = temp
+
+                # (4) MeanTemperature Node
+                dat = getattr(self.transects[n].sensors.temperature_deg_c,
+                              self.transects[n].sensors.temperature_deg_c.selected)
+                temp = np.nanmean(dat.data)
+                temp = '{:.2f}'.format(temp)
+                ETree.SubElement(t_sensor, 'MeanTemperature', type='double', unitsCode='degC').text = temp
+
+                # (4) MeanSalinity
+                sal_data = getattr(self.transects[n].sensors.salinity_ppt,
+                                   self.transects[n].sensors.salinity_ppt.selected)
+                temp = '{:.0f}'.format(np.nanmean(sal_data.data))
+                ETree.SubElement(t_sensor, 'MeanSalinity', type='double', unitsCode='ppt').text = temp
+
+                # (4) SpeedofSoundSource Node
+                sos_selected = getattr(self.transects[n].sensors.speed_of_sound_mps,
+                                       self.transects[n].sensors.speed_of_sound_mps.selected)
+                temp = sos_selected.source
+                ETree.SubElement(t_sensor, 'SpeedofSoundSource', type='char').text = temp
+
+                # (4) SpeedofSound
+                sos_data = getattr(self.transects[n].sensors.speed_of_sound_mps,
+                                   self.transects[n].sensors.speed_of_sound_mps.selected)
+                temp = '{:.4f}'.format(np.nanmean(sos_data.data))
+                ETree.SubElement(t_sensor, 'SpeedofSound', type='double', unitsCode='mps').text = temp
+
+                # (3) Other Node
+                t_other = ETree.SubElement(transect, 'Other')
+
+                # (4) Duration Node
+                temp = '{:.2f}'.format(self.transects[n].date_time.transect_duration_sec)
+                ETree.SubElement(t_other, 'Duration', type='double', unitsCode='sec').text = temp
+
+                # (4) Width
+                temp = other_prop['width'][n]
+                ETree.SubElement(t_other, 'Width', type='double', unitsCode='m').text = '{:.4f}'.format(temp)
+
+                # (4) Area
+                temp = other_prop['area'][n]
+                ETree.SubElement(t_other, 'Area', type='double', unitsCode='sqm').text = '{:.4f}'.format(temp)
+
+                # (4) MeanBoatSpeed
+                temp = other_prop['avg_boat_speed'][n]
+                ETree.SubElement(t_other, 'MeanBoatSpeed', type='double', unitsCode='mps').text = '{:.4f}'.format(temp)
+
+                # (4) QoverA
+                temp = other_prop['avg_water_speed'][n]
+                ETree.SubElement(t_other, 'QoverA', type='double', unitsCode='mps').text = '{:.4f}'.format(temp)
+
+                # (4) CourseMadeGood
+                temp = other_prop['avg_boat_course'][n]
+                ETree.SubElement(t_other, 'CourseMadeGood', type='double', unitsCode='deg').text = '{:.2f}'.format(temp)
+
+                # (4) MeanFlowDirection
+                temp = other_prop['avg_water_dir'][n]
+                ETree.SubElement(t_other, 'MeanFlowDirection', type='double',
+                                 unitsCode='deg').text = '{:.2f}'.format(temp)
+
+                # (4) NumberofEnsembles
+                temp = len(self.transects[n].boat_vel.bt_vel.u_processed_mps)
+                ETree.SubElement(t_other, 'NumberofEnsembles', type='integer').text = str(temp)
+
+                # (4) PercentInvalidBins
+                valid_ens, valid_cells = TransectData.raw_valid_data(self.transects[n])
+                temp = (1 - (np.nansum(np.nansum(valid_cells))
+                             / np.nansum(np.nansum(self.transects[n].w_vel.cells_above_sl)))) * 100
+                ETree.SubElement(t_other, 'PercentInvalidBins', type='double').text = '{:.2f}'.format(temp)
+
+                # (4) PercentInvalidEnsembles
+                temp = (1 - (np.nansum(valid_ens) / len(self.transects[n].boat_vel.bt_vel.u_processed_mps))) * 100
+                ETree.SubElement(t_other, 'PercentInvalidEns', type='double').text = '{:.2f}'.format(temp)
+
+                pitch_source_selected = getattr(self.transects[n].sensors.pitch_deg,
+                                                self.transects[n].sensors.pitch_deg.selected)
+                roll_source_selected = getattr(self.transects[n].sensors.roll_deg,
+                                               self.transects[n].sensors.roll_deg.selected)
+
+                # (4) MeanPitch
+                temp = np.nanmean(pitch_source_selected.data)
+                ETree.SubElement(t_other, 'MeanPitch', type='double', unitsCode='deg').text = '{:.2f}'.format(temp)
+
+                # (4) MeanRoll
+                temp = np.nanmean(roll_source_selected.data)
+                ETree.SubElement(t_other, 'MeanRoll', type='double', unitsCode='deg').text = '{:.2f}'.format(temp)
+
+                # (4) PitchStdDev
+                temp = np.nanstd(pitch_source_selected.data, ddof=1)
+                ETree.SubElement(t_other, 'PitchStdDev', type='double', unitsCode='deg').text = '{:.2f}'.format(temp)
+
+                # (4) RollStdDev
+                temp = np.nanstd(roll_source_selected.data, ddof=1)
+                ETree.SubElement(t_other, 'RollStdDev', type='double', unitsCode='deg').text = '{:.2f}'.format(temp)
+
+                # (4) ADCPDepth
+                depth_source_selected = getattr(self.transects[n].depths,
+                                                self.transects[n].depths.selected)
+                temp = depth_source_selected.draft_use_m
+                ETree.SubElement(t_other, 'ADCPDepth', type='double', unitsCode='m').text = '{:.4f}'.format(temp)
+
+        # (2) ChannelSummary Node
+        summary = ETree.SubElement(channel, 'ChannelSummary')
+
+        # (3) Discharge Node
+        s_q = ETree.SubElement(summary, 'Discharge')
+        discharge = self.mean_discharges(self)
+
+        # (4) Top
+        temp = discharge['top_mean']
+        ETree.SubElement(s_q, 'Top', type='double', unitsCode='cms').text = '{:.5f}'.format(temp)
+
+        # (4) Middle
+        temp = discharge['mid_mean']
+        ETree.SubElement(s_q, 'Middle', type='double', unitsCode='cms').text = '{:.5f}'.format(temp)
+
+        # (4) Bottom
+        temp = discharge['bot_mean']
+        ETree.SubElement(s_q, 'Bottom', type='double', unitsCode='cms').text = '{:.5f}'.format(temp)
+
+        # (4) Left
+        temp = discharge['left_mean']
+        ETree.SubElement(s_q, 'Left', type='double', unitsCode='cms').text = '{:.5f}'.format(temp)
+
+        # (4) Right
+        temp = discharge['right_mean']
+        ETree.SubElement(s_q, 'Right', type='double', unitsCode='cms').text = '{:.5f}'.format(temp)
+
+        # (4) Total
+        temp = discharge['total_mean']
+        ETree.SubElement(s_q, 'Total', type='double', unitsCode='cms').text = '{:.5f}'.format(temp)
+
+        # (4) MovingBedPercentCorrection
+        temp = ((discharge['total_mean'] / discharge['uncorrected_mean']) - 1) * 100
+        ETree.SubElement(s_q, 'MovingBedPercentCorrection', type='double').text = '{:.2f}'.format(temp)
+
+        # (3) Uncertainty Node
+        s_u = ETree.SubElement(summary, 'Uncertainty')
+        if self.run_oursin:
+            u_total = self.oursin.u_measurement_user['total_95'][0]
+            u_model = 'OURSIN'
+        else:
+            u_total = self.uncertainty.total_95_user
+            u_model = 'QRevUA'
+
+        if not np.isnan(temp):
+            ETree.SubElement(s_u, 'Total', type='double').text = '{:.1f}'.format(u_total)
+            ETree.SubElement(s_u, 'Model', type='char').text = u_model
+
+        # (3) QRev_UA Uncertainty Node
+        if self.uncertainty is not None:
+            s_qu = ETree.SubElement(summary, 'QRevUAUncertainty')
+            uncertainty = self.uncertainty
+
+            # (4) COV Node
+            temp = uncertainty.cov
+            if not np.isnan(temp):
+                ETree.SubElement(s_qu, 'COV', type='double').text = '{:.1f}'.format(temp)
+
+            # (4) AutoRandom Node
+            temp = uncertainty.cov_95
+            if not np.isnan(temp):
+                ETree.SubElement(s_qu, 'AutoRandom', type='double').text = '{:.1f}'.format(temp)
+
+            # (4) AutoInvalidData Node
+            temp = uncertainty.invalid_95
+            ETree.SubElement(s_qu, 'AutoInvalidData', type='double').text = '{:.1f}'.format(temp)
+
+            # (4) AutoEdge Node
+            temp = uncertainty.edges_95
+            ETree.SubElement(s_qu, 'AutoEdge', type='double').text = '{:.1f}'.format(temp)
+
+            # (4) AutoExtrapolation Node
+            temp = uncertainty.extrapolation_95
+            ETree.SubElement(s_qu, 'AutoExtrapolation', type='double').text = '{:.1f}'.format(temp)
+
+            # (4) AutoMovingBed
+            temp = uncertainty.moving_bed_95
+            ETree.SubElement(s_qu, 'AutoMovingBed', type='double').text = '{:.1f}'.format(temp)
+
+            # (4) AutoSystematic
+            temp = uncertainty.systematic
+            ETree.SubElement(s_qu, 'AutoSystematic', type='double').text = '{:.1f}'.format(temp)
+
+            # (4) AutoTotal
+            temp = uncertainty.total_95
+            if not np.isnan(temp):
+                ETree.SubElement(s_qu, 'AutoTotal', type='double').text = '{:.1f}'.format(temp)
+
+            # (4) UserRandom Node
+            user_random = uncertainty.cov_95_user
+            if user_random:
+                ETree.SubElement(s_qu, 'UserRandom', type='double').text = '{:.1f}'.format(user_random)
+
+            # (4) UserInvalidData Node
+            user_invalid = uncertainty.invalid_95_user
+            if user_invalid:
+                ETree.SubElement(s_qu, 'UserInvalidData', type='double').text = '{:.1f}'.format(user_invalid)
+
+            # (4) UserEdge
+            user_edge = uncertainty.edges_95_user
+            if user_edge:
+                ETree.SubElement(s_qu, 'UserEdge', type='double').text = '{:.1f}'.format(user_edge)
+
+            # (4) UserExtrapolation
+            user_extrap = uncertainty.extrapolation_95_user
+            if user_extrap:
+                ETree.SubElement(s_qu, 'UserExtrapolation', type='double').text = '{:.1f}'.format(user_extrap)
+
+            # (4) UserMovingBed
+            user_mb = uncertainty.moving_bed_95_user
+            if user_mb:
+                ETree.SubElement(s_qu, 'UserMovingBed', type='double').text = '{:.1f}'.format(user_mb)
+
+            # (4) UserSystematic
+            user_systematic = uncertainty.systematic_user
+            if user_systematic:
+                ETree.SubElement(s_qu, 'UserSystematic', type='double').text = '{:.1f}'.format(user_systematic)
+
+            # (4) UserTotal Node
+            temp = uncertainty.total_95_user
+            if not np.isnan(temp):
+                ETree.SubElement(s_qu, 'UserTotal', type='double').text = '{:.1f}'.format(temp)
+
+            # (4) Random
+            if user_random:
+                temp = user_random
+            else:
+                temp = uncertainty.cov_95
+            if not np.isnan(temp):
+                ETree.SubElement(s_qu, 'Random', type='double').text = '{:.1f}'.format(temp)
+
+            # (4) InvalidData
+            if user_invalid:
+                temp = user_invalid
+            else:
+                temp = uncertainty.invalid_95
+            ETree.SubElement(s_qu, 'InvalidData', type='double').text = '{:.1f}'.format(temp)
+
+            # (4) Edge
+            if user_edge:
+                temp = user_edge
+            else:
+                temp = uncertainty.edges_95
+            ETree.SubElement(s_qu, 'Edge', type='double').text = '{:.1f}'.format(temp)
+
+            # (4) Extrapolation
+            if user_extrap:
+                temp = user_extrap
+            else:
+                temp = uncertainty.extrapolation_95
+            ETree.SubElement(s_qu, 'Extrapolation', type='double').text = '{:.1f}'.format(temp)
+
+            # (4) MovingBed
+            if user_mb:
+                temp = user_mb
+            else:
+                temp = uncertainty.moving_bed_95
+            ETree.SubElement(s_qu, 'MovingBed', type='double').text = '{:.1f}'.format(temp)
+
+            # (4) Systematic
+            if user_systematic:
+                temp = user_systematic
+            else:
+                temp = uncertainty.systematic
+            ETree.SubElement(s_qu, 'Systematic', type='double').text = '{:.1f}'.format(temp)
+
+            # (4) UserTotal Node
+            temp = uncertainty.total_95_user
+            if not np.isnan(temp):
+                ETree.SubElement(s_qu, 'Total', type='double').text = '{:.1f}'.format(temp)
+
+        # Oursin Uncertainty Node
+        if self.oursin is not None:
+            # (3) Uncertainty Node
+            s_ou = ETree.SubElement(summary, 'OursinUncertainty')
+            oursin = self.oursin
+
+            # (4) System Node
+            temp = oursin.u_measurement['u_syst'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'System', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Compass Node
+            temp = oursin.u_measurement['u_compass'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'Compass', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Moving-bed Node
+            temp = oursin.u_measurement['u_movbed'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'MovingBed', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Ensembles Node
+            temp = oursin.u_measurement['u_ens'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'Ensembles', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Measured Node
+            temp = oursin.u_measurement['u_meas'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'Measured', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Top Node
+            temp = oursin.u_measurement['u_top'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'Top', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Bottom Node
+            temp = oursin.u_measurement['u_bot'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'Bottom', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Left Node
+            temp = oursin.u_measurement['u_left'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'Left', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Bottom Node
+            temp = oursin.u_measurement['u_right'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'Right', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Invalid Boat Node
+            temp = oursin.u_measurement['u_boat'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'InvalidBoat', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Invalid Depth Node
+            temp = oursin.u_measurement['u_depth'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'InvalidDepth', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Invalid Water Node
+            temp = oursin.u_measurement['u_water'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'InvalidWater', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) COV Node
+            temp = oursin.u_measurement['u_cov'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'COV', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Auto Total 95% Node
+            temp = oursin.u_measurement['total_95'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'AutoTotal95', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Extrapolation Power/Power Minimum
+            temp = oursin.default_advanced_settings['exp_pp_min']
+            if type(temp) is float:
+                ETree.SubElement(s_ou, 'ExtrapPPMin', type='double').text = '{:.2f}'.format(temp)
+            else:
+                ETree.SubElement(s_ou, 'ExtrapPPMin', type='char').text = temp
+
+            # (4) Extrapolation Power/Power Maximum
+            temp = oursin.default_advanced_settings['exp_pp_max']
+            if type(temp) is float:
+                ETree.SubElement(s_ou, 'ExtrapPPMax', type='double').text = '{:.2f}'.format(temp)
+            else:
+                ETree.SubElement(s_ou, 'ExtrapPPMax', type='char').text = temp
+
+            # (4) Extrapolation No Slip Minimum
+            temp = oursin.default_advanced_settings['exp_ns_min']
+            if type(temp) is float:
+                ETree.SubElement(s_ou, 'ExtrapNSMin', type='double').text = '{:.2f}'.format(temp)
+            else:
+                ETree.SubElement(s_ou, 'ExtrapNSMin', type='char').text = temp
+
+            # (4) Extrapolation No Slip Maximum
+            temp = oursin.default_advanced_settings['exp_ns_max']
+            if type(temp) is float:
+                ETree.SubElement(s_ou, 'ExtrapNSMax', type='double').text = '{:.2f}'.format(temp)
+            else:
+                ETree.SubElement(s_ou, 'ExtrapNSMax', type='char').text = temp
+
+            # (4) Draft error in m
+            temp = oursin.default_advanced_settings['draft_error_m']
+            if type(temp) is float:
+                ETree.SubElement(s_ou, 'DraftErrorm', type='double').text = '{:.2f}'.format(temp)
+            else:
+                ETree.SubElement(s_ou, 'DraftErrorm', type='char').text = temp
+
+            # (4) Bin size error in percent
+            temp = oursin.default_advanced_settings['dzi_prct']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'BinErrorPer', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Right edge distance error in percent
+            temp = oursin.default_advanced_settings['right_edge_dist_prct']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'REdgeDistErrorPer', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Left edge distance error in percent
+            temp = oursin.default_advanced_settings['left_edge_dist_prct']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'LEdgeDistErrorPer', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) GGA Boat Velocity Error in mps
+            temp = oursin.default_advanced_settings['gga_boat_mps']
+            if type(temp) is float:
+                ETree.SubElement(s_ou, 'GGABoatVelErrormps', type='double').text = '{:.2f}'.format(temp)
+            else:
+                ETree.SubElement(s_ou, 'GGABoatVelErrormps', type='char').text = temp
+
+            # (4) VTG Boat Velocity Error in mps
+            temp = oursin.default_advanced_settings['vtg_boat_mps']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'VTGBoatVelErrormps', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Compass Error in deg
+            temp = oursin.default_advanced_settings['compass_error_deg']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'CompassErrordeg', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Bayesian COV prior in percent
+            temp = oursin.default_advanced_settings['cov_prior']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'BayesCOVPriorper', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Bayesian COV prior uncertaint in percent
+            temp = oursin.default_advanced_settings['cov_prior_u']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'BayesCOVPriorUncertaintyper', type='double').text = '{:.2f}'.format(temp)
+
+            # User
+
+            # (4) System Node
+            temp = oursin.u_measurement_user['u_syst'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'SystemUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Compass Node
+            temp = oursin.u_measurement_user['u_compass'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'CompassUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Moving-bed Node
+            temp = oursin.u_measurement_user['u_movbed'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'MovingBedUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Ensembles Node
+            temp = oursin.u_measurement_user['u_ens'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'EnsemblesUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Measured Node
+            temp = oursin.u_measurement_user['u_meas'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'MeasuredUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Top Node
+            temp = oursin.u_measurement_user['u_top'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'TopUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Bottom Node
+            temp = oursin.u_measurement_user['u_bot'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'BottomUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Left Node
+            temp = oursin.u_measurement_user['u_left'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'LeftUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Bottom Node
+            temp = oursin.u_measurement_user['u_right'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'RightUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Invalid Boat Node
+            temp = oursin.u_measurement_user['u_boat'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'InvalidBoatUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Invalid Depth Node
+            temp = oursin.u_measurement_user['u_depth'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'InvalidDepthUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Invalid Water Node
+            temp = oursin.u_measurement_user['u_water'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'InvalidWaterUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Auto Total 95% Node
+            temp = oursin.u_measurement_user['total_95'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'AutoTotal95User', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Extrapolation Power/Power Minimum
+            temp = oursin.user_advanced_settings['exp_pp_min_user']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'ExtrapPPMinUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Extrapolation Power/Power Maximum
+            temp = oursin.user_advanced_settings['exp_pp_max_user']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'ExtrapPPMaxUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Extrapolation No Slip Minimum
+            temp = oursin.user_advanced_settings['exp_ns_min_user']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'ExtrapNSMinUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Extrapolation No Slip Maximum
+            temp = oursin.user_advanced_settings['exp_ns_max_user']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'ExtrapNSMaxUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Draft error in m
+            temp = oursin.user_advanced_settings['draft_error_m_user']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'DraftErrormUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Bin size error in percent
+            temp = oursin.user_advanced_settings['dzi_prct_user']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'BinErrorperUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Right edge distance error in percent
+            temp = oursin.user_advanced_settings['right_edge_dist_prct_user']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'REdgeDistErrorperUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Left edge distance error in percent
+            temp = oursin.user_advanced_settings['left_edge_dist_prct_user']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'LEdgeDistErrorperUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) GGA Boat Velocity Error in mps
+            temp = oursin.user_advanced_settings['gga_boat_mps_user']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'GGABoatVelErrormpsUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) VTG Boat Velocity Error in mps
+            temp = oursin.user_advanced_settings['vtg_boat_mps_user']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'VTGBoatVelErrormpsUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Compass Error in deg
+            temp = oursin.user_advanced_settings['compass_error_deg_user']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'CompassErrordegUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Bayesian COV prior in percent
+            temp = oursin.user_advanced_settings['cov_prior_user']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'BayesCOVPriorperUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Bayesian COV prior uncertaint in percent
+            temp = oursin.user_advanced_settings['cov_prior_u_user']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'BayesCOVPriorUncertaintyperUser', type='double').text = '{:.2f}'.format(temp)
+
+        # (3) Other Node
+        s_o = ETree.SubElement(summary, 'Other')
+
+        # (4) MeanWidth
+        temp = other_prop['width'][-1]
+        ETree.SubElement(s_o, 'MeanWidth', type='double', unitsCode='m').text = '{:.4f}'.format(temp)
+
+        # (4) WidthCOV
+        temp = other_prop['width_cov'][-1]
+        if not np.isnan(temp):
+            ETree.SubElement(s_o, 'WidthCOV', type='double').text = '{:.4f}'.format(temp)
+
+        # (4) MeanArea
+        temp = other_prop['area'][-1]
+        ETree.SubElement(s_o, 'MeanArea', type='double', unitsCode='sqm').text = '{:.4f}'.format(temp)
+
+        # (4) AreaCOV
+        temp = other_prop['area_cov'][-1]
+        if not np.isnan(temp):
+            ETree.SubElement(s_o, 'AreaCOV', type='double').text = '{:.2f}'.format(temp)
+
+        # (4) MeanBoatSpeed
+        temp = other_prop['avg_boat_speed'][-1]
+        ETree.SubElement(s_o, 'MeanBoatSpeed', type='double', unitsCode='mps').text = '{:.4f}'.format(temp)
+
+        # (4) MeanQoverA
+        temp = other_prop['avg_water_speed'][-1]
+        ETree.SubElement(s_o, 'MeanQoverA', type='double', unitsCode='mps').text = '{:.4f}'.format(temp)
+
+        # (4) MeanCourseMadeGood
+        temp = other_prop['avg_boat_course'][-1]
+        ETree.SubElement(s_o, 'MeanCourseMadeGood', type='double', unitsCode='deg').text = '{:.2f}'.format(temp)
+
+        # (4) MeanFlowDirection
+        temp = other_prop['avg_water_dir'][-1]
+        ETree.SubElement(s_o, 'MeanFlowDirection', type='double', unitsCode='deg').text = '{:.2f}'.format(temp)
+
+        # (4) MeanDepth
+        temp = other_prop['avg_depth'][-1]
+        ETree.SubElement(s_o, 'MeanDepth', type='double', unitsCode='m').text = '{:.4f}'.format(temp)
+
+        # (4) MaximumDepth
+        temp = other_prop['max_depth'][-1]
+        ETree.SubElement(s_o, 'MaximumDepth', type='double', unitsCode='m').text = '{:.4f}'.format(temp)
+
+        # (4) MaximumWaterSpeed
+        temp = other_prop['max_water_speed'][-1]
+        ETree.SubElement(s_o, 'MaximumWaterSpeed', type='double', unitsCode='mps').text = '{:.4f}'.format(temp)
+
+        # (4) NumberofTransects
+        temp = len(self.checked_transects(self))
+        ETree.SubElement(s_o, 'NumberofTransects', type='integer').text = str(temp)
+
+        # (4) Duration
+        temp = self.measurement_duration(self)
+        ETree.SubElement(s_o, 'Duration', type='double', unitsCode='sec').text = '{:.2f}'.format(temp)
+
+        # (4) LeftQPer
+        temp = 100 * discharge['left_mean'] / discharge['total_mean']
+        ETree.SubElement(s_o, 'LeftQPer', type='double').text = '{:.2f}'.format(temp)
+
+        # (4) RightQPer
+        temp = 100 * discharge['right_mean'] / discharge['total_mean']
+        ETree.SubElement(s_o, 'RightQPer', type='double').text = '{:.2f}'.format(temp)
+
+        # (4) InvalidCellsQPer
+        temp = 100 * discharge['int_cells_mean'] / discharge['total_mean']
+        ETree.SubElement(s_o, 'InvalidCellsQPer', type='double').text = '{:.2f}'.format(temp)
+
+        # (4) InvalidEnsQPer
+        temp = 100 * discharge['int_ensembles_mean'] / discharge['total_mean']
+        ETree.SubElement(s_o, 'InvalidEnsQPer', type='double').text = '{:.2f}'.format(temp)
+
+        # (4) UserRating
+        if self.user_rating:
+            temp = self.user_rating
+        else:
+            temp = 'Not Rated'
+        ETree.SubElement(s_o, 'UserRating', type='char').text = temp
+
+        # (4) DischargePPDefault
+        temp = self.extrap_fit.q_sensitivity.q_pp_mean
+        ETree.SubElement(s_o, 'DischargePPDefault', type='double').text = '{:.2f}'.format(temp)
+
+        # (2) UserComment
+        if len(self.comments) > 1:
+            temp = ''
+            for comment in self.comments:
+                temp = temp + comment.replace('\n', ' |||') + ' |||'
+            ETree.SubElement(channel, 'UserComment', type='char').text = temp
+
+        # Create xml output file
+        with open(file_name, 'wb') as xml_file:
+            # Create binary coded output file
+            et = ETree.ElementTree(channel)
+            root = et.getroot()
+            xml_out = ETree.tostring(root)
+            # Add stylesheet instructions
+            xml_out = b'<?xml-stylesheet type= "text/xsl" href="QRevStylesheet.xsl"?>' + xml_out
+            # Add tabs to make output more readable and apply utf-8 encoding
+            xml_out = parseString(xml_out).toprettyxml(encoding='utf-8')
+            # Write file
+            xml_file.write(xml_out)
+
+    @staticmethod
+    def add_transect(mmt, filename, index, transect_type):
+        """Processes a pd0 file into a TransectData object.
+
+        Parameters
+        ----------
+        mmt: MMTtrdi
+            Object of MMTtrdi
+        filename: str
+            Pd0 filename to be processed
+        index: int
+            Index to file in the mmt
+        transect_type: str
+            Indicates type of transect discharge (Q), or moving_bed (MB)
+
+        Returns
+        -------
+        transect: TransectData
+            Object of TransectData
+        """
+        pd0_data = Pd0TRDI(filename)
+
+        if transect_type == 'MB':
+            mmt_transect = mmt.mbt_transects[index]
+        else:
+            mmt_transect = mmt.transects[index]
+
+        transect = TransectData()
+        transect.trdi(mmt=mmt,
+                      mmt_transect=mmt_transect,
+                      pd0_data=pd0_data)
+        return transect
+
+    def allocate_transects(self, mmt, transect_type='Q', checked=False):
+        """Method to load transect data. Changed from Matlab approach by Greg to allow possibility
+        of multi-thread approach.
+
+        Parameters
+        ----------
+        mmt: MMT_TRDI
+            Object of MMT_TRDI
+        transect_type: str
+            Type of transect (Q: discharge or MB: moving-bed test)
+        checked: bool
+            Determines if all files are loaded (False) or only checked files (True)
+        """
+
+        file_names = []
+        file_idx = []
+
+        # Setup processing for discharge or moving-bed transects
+        if transect_type == 'Q':
+            # Identify discharge transect files to load
+            if checked:
+                for idx, transect in enumerate(mmt.transects):
+                    if transect.Checked == 1:
+                        file_names.append(transect.Files[0])
+                        file_idx.append(idx)
+
+            else:
+                file_names = [transect.Files[0] for transect in mmt.transects]
+                file_idx = list(range(0, len(file_names)))
+
+        elif transect_type == 'MB':
+            file_names = [transect.Files[0] for transect in mmt.mbt_transects]
+            file_idx = list(range(0, len(file_names)))
+
+        # Determine if any files are missing
+        valid_files = []
+        valid_indices = []
+        for index, name in enumerate(file_names):
+            fullname = os.path.join(mmt.path, name)
+            if os.path.exists(fullname):
+                valid_files.append(fullname)
+                valid_indices.append(file_idx[index])
+
+        transects = []
+        num = len(valid_indices)
+
+        for k in range(num):
+            temp = self.add_transect(mmt, valid_files[k], valid_indices[k], transect_type)
+            if temp.w_vel is not None:
+                transects.append(temp)
+
+        return transects
+
+
+if __name__ == '__main__':
+    pass
diff --git a/Classes/MovingBedTests.py b/Classes/MovingBedTests.py
new file mode 100644
index 0000000000000000000000000000000000000000..117ab213f96cd9c0d5064dad98de424c4d6b2f67
--- /dev/null
+++ b/Classes/MovingBedTests.py
@@ -0,0 +1,1038 @@
+import copy
+import numpy as np
+from Classes.TransectData import adjusted_ensemble_duration
+from Classes.TransectData import TransectData
+from Classes.QComp import QComp
+from Classes.MatSonTek import MatSonTek
+from MiscLibs.common_functions import cart2pol, sind, pol2cart, rad2azdeg, nan_less, nan_greater
+
+
+class MovingBedTests(object):
+    """Stores and processes moving-bed tests.
+
+    Attributes
+    ----------
+    type: str
+        Loop or Stationary
+    transect: TransectData
+        Object of TransectData
+    duration_sec: float
+        Duration of test, in secs
+    percent_invalid_bt: float
+        Percent of invalid bottom track
+    compass_diff_deg: float
+        Difference in heading for out and back of loop
+    flow_dir: float
+        Mean flow direction from loop test
+    mb_dir: float
+        Moving bed or closure error direction
+    dist_us_m: float
+        Distance moved upstream, in m
+    flow_spd_mps: float
+        Magnitude of water velocity, in mps
+    mb_spd_mps: float
+        Magnitude of moving=bed velocity, in mps
+    percent_mb: float
+        Potential error due to moving bed, in percent
+    moving_bed: str
+        Moving-bed determined ("Yes" or "No")
+    user_valid: bool
+        Boolean to allow user to determine if test should be considered a valid test (True or False)
+    test_quality: str
+        Quality of test, 'Valid' 'Warnings' 'Errors'
+    use_2_correct: bool
+        Use this test to correct discharge (True or False)
+    selected: bool
+        Selected as valid moving-bed test to use for correction or determine moving-bed condition
+    messages: list
+        List of strings for warning and error messages based on data processing
+    near_bed_speed_mps: float
+        Mean near-bed water speed for test, in mps
+    stationary_us_track: np.array(float)
+        Upstream component of the bottom track referenced ship track
+    stationary_cs_track: np.array(float)
+        Cross=stream component of the bottom track referenced ship track
+    stationary_mb_vel: np.array(float)
+        Moving-bed velocity by ensemble, m/s
+    ref: str
+        Identifies reference used to compute moving bed
+    bt_percent_mb: float
+        Percent moving-bed using only BT
+    bt_dist_us_m: float
+        Distance upstream using only BT
+    bt_mb_dir: float
+        Moving-bed direction using only BT
+    bt_mb_spd_mps: float
+        Moving-bed speed using only BT
+    bt_flow_spd_mps: float
+        Corrected flow speed using only BT
+    gps_percent_mb: float
+        Percent moving-bed using BT and GPS
+    gps_dist_us_m: float
+        Distance upstream using BT and GPS
+    gps_mb_dir: float
+        Moving-bed direction using BT and GPS
+    gps_mb_spd_mps: float
+        Moving-bed speed using BT and GPS
+    gps_flow_spd_mps: float
+        Corrected flow speed using BT and GPS
+    """
+    
+    def __init__(self):
+        """Initialize class and instance variables."""
+
+        self.type = None  # Loop or Stationary
+        self.transect = None  # Object of TransectData
+        self.duration_sec = np.nan  # Duration of test in secs
+        self.percent_invalid_bt = np.nan  # Percent of invalid bottom track
+        self.compass_diff_deg = np.nan  # Difference in heading for out and back of loop
+        self.flow_dir = np.nan  # Mean flow direction from loop test
+        self.mb_dir = np.nan  # Moving bed or closure error direction
+        self.dist_us_m = np.nan  # Distance moved upstream in m
+        self.flow_spd_mps = np.nan  # Magnitude of water velocity in mps
+        self.mb_spd_mps = np.nan  # Magnitude of moving=bed velocity in mps
+        self.percent_mb = np.nan  # Potential error due to moving bed in percent
+        self.moving_bed = np.nan  # Moving-bed determined 'Yes' 'No'
+        self.user_valid = True  # Logical to allow user to determine if test should be considered a valid test
+        self.test_quality = None  # Quality of test 'Valid' 'Warnings' 'Errors'
+        self.use_2_correct = None  # Use this test to correct discharge
+        self.selected = None  # Selected valid moving-bed test to use for correction or determine moving-bed condition
+        self.messages = None  # Cell array of warning and error messages based on data processing
+        self.near_bed_speed_mps = np.nan  # Mean near-bed water speed for test in mps
+        self.stationary_us_track = np.array([])  # Upstream component of the bottom track referenced ship track
+        self.stationary_cs_track = np.array([])  # Cross=stream component of the bottom track referenced ship track
+        self.stationary_mb_vel = np.array([])  # Moving-bed velocity by ensemble
+        self.ref = 'BT'
+        self.bt_percent_mb = np.nan
+        self.bt_dist_us_m = np.nan
+        self.bt_mb_dir = np.nan
+        self.bt_mb_spd_mps = np.nan
+        self.bt_flow_spd_mps = np.nan
+        self.gps_percent_mb = np.nan
+        self.gps_dist_us_m = np.nan
+        self.gps_mb_dir = np.nan
+        self.gps_mb_spd_mps = np.nan
+        self.gps_flow_spd_mps = np.nan
+        
+    def populate_data(self, source, file=None, test_type=None):
+        """Process and store moving-bed test data.
+
+        Parameters
+        ----------
+        source: str
+            Manufacturer of ADCP, SonTek or TRDI
+        file: TransectData or str
+            Object of TransectData for TRDI and str of filename for SonTek
+        test_type: str
+            Type of moving-bed test (Loop or Stationary)
+        """
+
+        if source == 'TRDI':
+            self.mb_trdi(file, test_type)
+        else:
+            self.mb_sontek(file, test_type)
+
+        self.process_mb_test(source)
+
+    def process_mb_test(self, source):
+        
+        # Convert to earth coordinates and set the navigation reference to BT
+        # for both boat and water data
+        # self.transect.boat_vel.bt_vel.apply_interpolation(transect=self.transect, interpolation_method='Linear')
+        self.transect.change_coord_sys(new_coord_sys='Earth')
+        self.transect.change_nav_reference(update=True, new_nav_ref='BT')
+            
+        # Adjust data for default manufacturer specific handling of invalid data
+        delta_t = adjusted_ensemble_duration(self.transect, 'mbt')
+        
+        if self.type == 'Loop':
+            if source == 'TRDI':
+                self.loop_test(delta_t)
+            else:
+                self.loop_test()
+        elif self.type == 'Stationary':
+            self.stationary_test()
+        else:
+            raise ValueError('Invalid moving-bed test identifier specified.')
+
+    @staticmethod
+    def qrev_mat_in(meas_struct):
+        """Processes the Matlab data structure to obtain a list of TransectData objects containing transect
+           data from the Matlab data structure.
+
+       Parameters
+       ----------
+       meas_struct: mat_struct
+           Matlab data structure obtained from sio.loadmat
+
+       Returns
+       -------
+       mb_tests: list
+           List of MovingBedTests objects
+       """
+
+        mb_tests = []
+        if hasattr(meas_struct, 'mbTests'):
+            try:
+                # If there are multiple test the Matlab structure will be an array
+                if type(meas_struct.mbTests) == np.ndarray:
+                    for test in meas_struct.mbTests:
+                        temp = MovingBedTests()
+                        temp.populate_from_qrev_mat(test)
+                        mb_tests.append(temp)
+                # If only one test, that test is not stored in an array
+                else:
+                    temp = MovingBedTests()
+                    temp.populate_from_qrev_mat(meas_struct.mbTests)
+                    mb_tests.append(temp)
+            except (TypeError, AttributeError):
+                pass
+        return mb_tests
+
+    def populate_from_qrev_mat(self, mat_data):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        mat_data: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+
+        self.type = mat_data.type
+        self.transect = TransectData()
+        self.transect.populate_from_qrev_mat(mat_data.transect)
+
+        # If QRev.mat may return and empty array instead of a float
+        self.duration_sec = self.return_float(mat_data.duration_sec)
+        self.percent_invalid_bt = self.return_float(mat_data.percentInvalidBT)
+        self.compass_diff_deg = self.return_float(mat_data.compassDiff_deg)
+        self.flow_dir = self.return_float(mat_data.flowDir_deg)
+        self.mb_dir = self.return_float(mat_data.mbDir_deg)
+        self.dist_us_m = self.return_float(mat_data.distUS_m)
+        self.flow_spd_mps = self.return_float(mat_data.flowSpd_mps)
+        self.mb_spd_mps = self.return_float(mat_data.mbSpd_mps)
+        self.percent_mb = self.return_float(mat_data.percentMB)
+        self.near_bed_speed_mps = self.return_float(mat_data.nearBedSpeed_mps)
+
+        self.moving_bed = mat_data.movingBed
+        self.user_valid = bool(mat_data.userValid)
+        self.test_quality = mat_data.testQuality
+        self.use_2_correct = bool(mat_data.use2Correct)
+        self.selected = bool(mat_data.selected)
+
+        # Handle situation for one or more messages
+        if type(mat_data.messages) == np.ndarray:
+            self.messages = mat_data.messages.tolist()
+        else:
+            self.messages = [mat_data.messages]
+
+        self.stationary_us_track = mat_data.stationaryUSTrack
+        self.stationary_cs_track = mat_data.stationaryCSTrack
+        self.stationary_mb_vel = mat_data.stationaryMBVel
+
+        # Feature that can use GPS for moving-bed tests
+        if hasattr(mat_data, 'bt_percent_mb'):
+            self.bt_percent_mb = self.return_float(mat_data.bt_percent_mb)
+            self.bt_dist_us_m = self.return_float(mat_data.bt_dist_us_m)
+            self.bt_mb_dir = self.return_float(mat_data.bt_mb_dir)
+            self.bt_mb_spd_mps = self.return_float(mat_data.bt_mb_spd_mps)
+            self.bt_flow_spd_mps = self.return_float(mat_data.bt_flow_spd_mps)
+            self.gps_percent_mb = self.return_float(mat_data.gps_percent_mb)
+            self.gps_dist_us_m = self.return_float(mat_data.gps_dist_us_m)
+            self.gps_mb_dir = self.return_float(mat_data.gps_mb_dir)
+            self.gps_mb_spd_mps = self.return_float(mat_data.gps_mb_spd_mps)
+        else:
+            self.bt_percent_mb = self.percent_mb
+            self.bt_dist_us_m = self.dist_us_m
+            self.bt_mb_dir = self.mb_dir
+            self.bt_mb_spd_mps = self.mb_spd_mps
+            self.bt_flow_spd_mps = self.flow_spd_mps
+            self.compute_mb_gps()
+
+    @staticmethod
+    def return_float(data):
+        """Handles situation where the Matlab file is an empty array
+
+        Parameters
+        ----------
+        data: float or np.ndarray
+            Any variable
+        """
+        if type(data) is float:
+            return data
+        elif type(data) is np.ndarray:
+            if data.size == 0:
+                return np.nan
+            else:
+                return MovingBedTests.make_list(data)
+        else:
+            return np.nan
+
+    @staticmethod
+    def make_list(array_in):
+        """Method to make list from several special cases that can occur in the Matlab data.
+
+        Parameters
+        ----------
+        array_in: np.ndarray
+            Input that needs to be convert to a list
+        """
+
+        # This traps messages with the associated codes
+        if array_in.size > 3:
+            list_out = array_in.tolist()
+        else:
+            # Create a list of lists
+            temp = array_in.tolist()
+            if len(temp) > 0:
+                internal_list = []
+                for item in temp:
+                    internal_list.append(item)
+                list_out = [internal_list]
+            else:
+                list_out = np.nan
+        return list_out
+
+    def mb_trdi(self, transect, test_type):
+        """Function to create object properties for TRDI moving-bed tests
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        test_type: str
+            Type of moving-bed test."""
+        
+        self.transect = transect
+        self.user_valid = True
+        self.type = test_type
+
+    def mb_sontek(self, file_name, test_type):
+        """Function to create object properties for SonTek moving-bed tests
+
+        Parameters
+        ----------
+        file_name: str
+            Name of moving-bed test data file
+        test_type: str
+            Type of moving-bed test."""
+        self.type = test_type
+
+        # Read Matlab file for moving-bed test
+        rsdata = MatSonTek(file_name)
+
+        # Create transect objects for each discharge transect
+        self.transect = TransectData()
+        self.transect.sontek(rsdata, file_name)
+        
+    def loop_test(self, ens_duration=None, ref='BT'):
+        """Process loop moving bed test.
+
+        Parameters
+        ----------
+        ens_duration: np.array(float)
+            Duration of each ensemble, in sec
+        ref: str
+            Reference used to compare distance moved
+        """
+
+        # Assign data from transect to local variables
+        # self.transect.boat_interpolations(update=False, target='BT', method='Linear')
+        # self.transect.boat_interpolations(update=False, target='GPS', method='Linear')
+        trans_data = copy.deepcopy(self.transect)
+        in_transect_idx = trans_data.in_transect_idx
+        n_ensembles = len(in_transect_idx)
+        bt_valid = trans_data.boat_vel.bt_vel.valid_data[0, in_transect_idx]
+
+        # Set variables to defaults
+        self.messages = []
+        vel_criteria = 0.012
+
+        # Check that there is some valid BT data
+        if np.nansum(bt_valid) > 1:
+            wt_u = trans_data.w_vel.u_processed_mps[:, in_transect_idx]
+            wt_v = trans_data.w_vel.v_processed_mps[:, in_transect_idx]
+            if ens_duration is None:
+                ens_duration = trans_data.date_time.ens_duration_sec[in_transect_idx]
+
+            bt_u = trans_data.boat_vel.bt_vel.u_processed_mps[in_transect_idx]
+            bt_v = trans_data.boat_vel.bt_vel.v_processed_mps[in_transect_idx]
+            bin_size = trans_data.depths.bt_depths.depth_cell_size_m[:, in_transect_idx]
+
+            # Compute closure distance and direction
+            bt_x = np.nancumsum(bt_u * ens_duration)
+            bt_y = np.nancumsum(bt_v * ens_duration)
+            direct, self.bt_dist_us_m = cart2pol(bt_x[-1], bt_y[-1])
+            self.bt_mb_dir = rad2azdeg(direct)
+
+            # Compute duration of test
+            self.duration_sec = np.nansum(ens_duration)
+
+            # Compute the moving-bed velocity
+            self.bt_mb_spd_mps = self.bt_dist_us_m / self.duration_sec
+
+            # Compute discharge weighted mean velocity components for the
+            # purposed of computing the mean flow direction
+            xprod = QComp.cross_product(transect=trans_data)
+            q = QComp.discharge_middle_cells(xprod, trans_data, ens_duration)
+            wght = np.abs(q)
+            se = np.nansum(np.nansum(wt_u * wght)) / np.nansum(np.nansum(wght))
+            sn = np.nansum(np.nansum(wt_v * wght)) / np.nansum(np.nansum(wght))
+            direct, flow_speed_q = cart2pol(se, sn)
+
+            # Compute flow speed and direction
+            self.flow_dir = rad2azdeg(direct)
+            
+            # Compute the area weighted mean velocity components for the
+            # purposed of computing the mean flow speed. Area weighting is used for flow speed instead of
+            # discharge so that the flow speed is not included in the weighting used to compute the mean flow speed.
+            wght_area = np.multiply(np.multiply(np.sqrt(bt_u ** 2 + bt_v ** 2), bin_size), ens_duration)
+            idx = np.where(np.isnan(wt_u) == False)
+            se = np.nansum(np.nansum(wt_u[idx] * wght_area[idx])) / np.nansum(np.nansum(wght_area[idx]))
+            sn = np.nansum(np.nansum(wt_v[idx] * wght_area[idx])) / np.nansum(np.nansum(wght_area[idx]))
+            dir_a, self.bt_flow_spd_mps = cart2pol(se, sn)
+            self.bt_flow_spd_mps = self.bt_flow_spd_mps + self.bt_mb_spd_mps
+
+            # Compute potential error in BT referenced discharge
+            self.bt_percent_mb = (self.bt_mb_spd_mps / self.bt_flow_spd_mps) * 100
+
+            # Compute test with GPS
+            self.compute_mb_gps()
+
+            # Store selected test characteristics
+            if ref == 'BT':
+                self.mb_spd_mps = self.bt_mb_spd_mps
+                self.dist_us_m = self.bt_dist_us_m
+                self.percent_mb = self.bt_percent_mb
+                self.mb_dir = self.bt_mb_dir
+                self.flow_spd_mps = self.bt_flow_spd_mps
+            elif not np.isnan(self.gps_percent_mb):
+                self.mb_spd_mps = self.gps_mb_spd_mps
+                self.dist_us_m = self.gps_dist_us_m
+                self.percent_mb = self.gps_percent_mb
+                self.mb_dir = self.gps_mb_dir
+                self.flow_spd_mps = self.bt_flow_spd_mps
+
+            # Assess invalid bottom track
+            # Compute percent invalid bottom track
+            self.percent_invalid_bt = (np.nansum(bt_valid == False) / len(bt_valid)) * 100
+
+            # Determine if more than 9 consecutive seconds of invalid BT occurred
+            consect_bt_time = np.zeros(n_ensembles)
+            for n in range(1, n_ensembles):
+                if bt_valid[n]:
+                    consect_bt_time[n] = 0
+                else:
+                    consect_bt_time[n] = consect_bt_time[n - 1] + ens_duration[n]
+
+            max_consect_bt_time = np.nanmax(consect_bt_time)
+
+            # Evaluate compass calibration based on flow direction
+
+            # Find apex of loop adapted from
+            # http://www.mathworks.de/matlabcentral/newsreader/view_thread/164048
+            loop_out = np.array([bt_x[0], bt_y[0], 0])
+            loop_return = np.array([bt_x[-1], bt_y[-1], 0])
+
+            distance = np.zeros(n_ensembles)
+            for n in range(n_ensembles):
+                p = np.array([bt_x[n], bt_y[n], 0])
+                distance[n] = np.linalg.norm(np.cross(loop_return - loop_out, p - loop_out))  \
+                    / np.linalg.norm(loop_return - loop_out)
+
+            dmg_idx = np.where(distance == np.nanmax(distance))[0][0]
+
+            # Compute flow direction on outgoing part of loop
+            u_out = wt_u[:, :dmg_idx + 1]
+            v_out = wt_v[:, :dmg_idx + 1]
+            wght = np.abs(q[:, :dmg_idx+1])
+            se = np.nansum(u_out * wght) / np.nansum(wght)
+            sn = np.nansum(v_out * wght) / np.nansum(wght)
+            direct, _ = cart2pol(se, sn)
+            flow_dir1 = rad2azdeg(direct)
+
+            # Compute unweighted flow direction in each cell
+            direct, _ = cart2pol(u_out, v_out)
+            flow_dir_cell = rad2azdeg(direct)
+
+            # Compute difference from mean and correct to +/- 180
+            v_dir_corr = flow_dir_cell - flow_dir1
+            v_dir_idx = nan_greater(v_dir_corr, 180)
+            v_dir_corr[v_dir_idx] = 360-v_dir_corr[v_dir_idx]
+            v_dir_idx = nan_less(v_dir_corr, -180)
+            v_dir_corr[v_dir_idx] = 360 + v_dir_corr[v_dir_idx]
+
+            # Number of invalid weights
+            idx2 = np.where(np.isnan(wght) == False)
+            nwght = len(idx2[0])
+
+            # Compute 95% uncertainty using weighted standard deviation
+            uncert1 = 2. * np.sqrt(np.nansum(np.nansum(wght * v_dir_corr**2))
+                                   / (((nwght - 1) * np.nansum(np.nansum(wght))) / nwght)) / np.sqrt(nwght)
+
+            # Compute flow direction on returning part of loop
+            u_ret = wt_u[:, dmg_idx + 1:]
+            v_ret = wt_v[:, dmg_idx + 1:]
+            wght = np.abs(q[:, dmg_idx+1:])
+            se = np.nansum(u_ret * wght) / np.nansum(wght)
+            sn = np.nansum(v_ret * wght) / np.nansum(wght)
+            direct, _ = cart2pol(se, sn)
+            flow_dir2 = rad2azdeg(direct)
+
+            # Compute unweighted flow direction in each cell
+            direct, _ = cart2pol(u_ret, v_ret)
+            flow_dir_cell = rad2azdeg(direct)
+
+            # Compute difference from mean and correct to +/- 180
+            v_dir_corr = flow_dir_cell - flow_dir2
+            v_dir_idx = nan_greater(v_dir_corr, 180)
+            v_dir_corr[v_dir_idx] = 360 - v_dir_corr[v_dir_idx]
+            v_dir_idx = nan_less(v_dir_corr, -180)
+            v_dir_corr[v_dir_idx] = 360 + v_dir_corr[v_dir_idx]
+
+            # Number of valid weights
+            idx2 = np.where(np.isnan(wght) == False)
+            nwght = len(idx2[0])
+
+            # Compute 95% uncertainty using weighted standard deviation
+            uncert2 = 2.*np.sqrt(np.nansum(np.nansum(wght * v_dir_corr**2))
+                                 / (((nwght-1)*np.nansum(np.nansum(wght))) / nwght)) / np.sqrt(nwght)
+
+            # Compute and report difference in flow direction
+            diff_dir = np.abs(flow_dir1 - flow_dir2)
+            if diff_dir > 180:
+                diff_dir = diff_dir - 360
+            self.compass_diff_deg = diff_dir
+            uncert = uncert1 + uncert2
+
+            # Compute potential compass error
+            idx = np.where(np.isnan(bt_x) == False)
+            if len(idx[0]) > 0:
+                idx = idx[0][-1]
+            width = np.sqrt((bt_x[dmg_idx] - bt_x[idx] / 2) ** 2 + (bt_y[dmg_idx] - bt_y[idx] / 2) ** 2)
+            compass_error = (2 * width * sind(diff_dir / 2) * 100) / (self.duration_sec * self.flow_spd_mps)
+
+            # Initialize message counter
+            self.test_quality = 'Good'
+
+            # Low water velocity
+            if self.flow_spd_mps < 0.25:
+                self.messages.append('WARNING: The water velocity is less than recommended minimum for '
+                                     + 'this test and could cause the loop method to be inaccurate. '
+                                     + 'CONSIDER USING A STATIONARY TEST TO CHECK MOVING-BED CONDITIONS')
+                self.test_quality = 'Warnings'
+
+            # Percent invalid bottom track
+            if self.percent_invalid_bt > 20:
+                self.messages.append('ERROR: Percent invalid bottom track exceeds 20 percent. '
+                                     + 'THE LOOP IS NOT ACCURATE. TRY A STATIONARY MOVING-BED TEST.')
+                self.test_quality = 'Errors'
+            elif self.percent_invalid_bt > 5:
+                self.messages.append('WARNING: Percent invalid bottom track exceeds 5 percent. '
+                                     + 'Loop may not be accurate. PLEASE REVIEW DATA.')
+                self.test_quality = 'Warnings'
+
+            # More than 9 consecutive seconds of invalid BT
+            if max_consect_bt_time > 9:
+                self.messages.append('ERROR: Bottom track is invalid for more than 9 consecutive seconds.'
+                                     + 'THE LOOP IS NOT ACCURATE. TRY A STATIONARY MOVING-BED TEST.')
+                self.test_quality = 'Errors'
+
+            if np.abs(compass_error) > 5 and np.abs(diff_dir) > 3 and np.abs(diff_dir) > uncert:
+                self.messages.append('ERROR: Difference in flow direction between out and back sections of '
+                                     + 'loop could result in a 5 percent or greater error in final discharge. '
+                                     + 'REPEAT LOOP AFTER COMPASS CAL. OR USE A STATIONARY MOVING-BED TEST.')
+                self.test_quality = 'Errors'
+
+        else:
+            self.messages.append('ERROR: Loop has no valid bottom track data. '
+                                 + 'REPEAT OR USE A STATIONARY MOVING-BED TEST.')
+            self.test_quality = 'Errors'
+
+        # If loop is valid then evaluate moving-bed condition
+        if self.test_quality != 'Errors':
+
+            # Check minimum moving-bed velocity criteria
+            if self.mb_spd_mps > vel_criteria:
+                # Check that closure error is in upstream direction
+                if 135 < np.abs(self.flow_dir - self.mb_dir) < 225:
+                    # Check if moving-bed is greater than 1% of the mean flow speed
+                    if self.percent_mb > 1:
+                        self.messages.append('Loop Indicates a Moving Bed -- Use GPS as reference. If GPS is '
+                                             + 'unavailable or invalid use the loop method to correct the '
+                                             + 'final discharge.')
+                        self.moving_bed = 'Yes'
+                    else:
+                        self.messages.append('Moving Bed Velocity < 1% of Mean Velocity -- No Correction Recommended')
+                        self.moving_bed = 'No'
+                else:
+                    self.messages.append('ERROR: Loop closure error not in upstream direction. '
+                                         + 'REPEAT LOOP or USE STATIONARY TEST')
+                    self.test_quality = 'Errors'
+                    self.moving_bed = 'Unknown'
+            else:
+                self.messages.append('Moving-bed velocity < Minimum moving-bed velocity criteria '
+                                     + '-- No correction recommended')
+                self.moving_bed = 'No'
+
+            # Notify of differences in results of test between BT and GPS
+            if not np.isnan(self.gps_percent_mb):
+                if np.abs(self.bt_percent_mb - self.gps_percent_mb) > 2:
+                    self.messages.append('WARNING - Bottom track and GPS results differ by more than 2%.')
+                    self.test_quality = 'Warnings'
+
+                if np.logical_xor(self.bt_percent_mb >= 1,  self.gps_percent_mb >= 1):
+                    self.messages.append('WARNING - Bottom track and GPS results do not agree.')
+                    self.test_quality = 'Warnings'
+
+        else:
+            self.messages.append('ERROR: Due to ERRORS noted above this loop is NOT VALID. '
+                                 + 'Please consider suggestions.')
+            self.moving_bed = 'Unknown'
+
+    def stationary_test(self, ref='BT'):
+        """Processed the stationary moving-bed tests.
+        """
+
+        # Assign data from transect to local variables
+        trans_data = copy.deepcopy(self.transect)
+        in_transect_idx = trans_data.in_transect_idx
+        bt_valid = trans_data.boat_vel.bt_vel.valid_data[0, in_transect_idx]
+
+        # Check to see that there is valid bottom track data
+        self.messages = []
+        if np.nansum(bt_valid) > 0:
+            # Assign data to local variables
+            wt_u = trans_data.w_vel.u_processed_mps[:, in_transect_idx]
+            wt_v = trans_data.w_vel.v_processed_mps[:, in_transect_idx]
+            ens_duration = trans_data.date_time.ens_duration_sec[in_transect_idx]
+            bt_u = trans_data.boat_vel.bt_vel.u_processed_mps[in_transect_idx]
+            bt_v = trans_data.boat_vel.bt_vel.v_processed_mps[in_transect_idx]
+
+            # Use only data with valid bottom track
+            valid_bt = trans_data.boat_vel.bt_vel.valid_data[0, in_transect_idx]
+            wt_u[:, valid_bt == False] = np.nan
+            wt_v[:, valid_bt == False] = np.nan
+            bt_u[valid_bt == False] = np.nan
+            bt_v[valid_bt == False] = np.nan
+
+            u_water = np.nanmean(wt_u)
+            v_water = np.nanmean(wt_v)
+            self.flow_dir = np.arctan2(u_water, v_water) * 180 / np.pi
+            if self.flow_dir < 0:
+                self.flow_dir = self.flow_dir + 360
+
+            bin_depth = trans_data.depths.bt_depths.depth_cell_depth_m[:, in_transect_idx]
+            trans_select = getattr(trans_data.depths, trans_data.depths.selected)
+            depth_ens = trans_select.depth_processed_m[in_transect_idx]
+
+            nb_u, nb_v, unit_nbu, unit_nbv = self.near_bed_velocity(wt_u, wt_v, depth_ens, bin_depth)
+            
+            # Compute bottom track parallel to water velocity
+            unit_nb_vel = np.vstack([unit_nbu, unit_nbv])
+            bt_vel = np.vstack([bt_u, bt_v])
+            bt_vel_up_strm = -1 * np.sum(bt_vel * unit_nb_vel, 0)
+            bt_up_strm_dist = bt_vel_up_strm * ens_duration
+            bt_up_strm_dist_cum = np.nancumsum(bt_up_strm_dist)
+            self.bt_dist_us_m = bt_up_strm_dist_cum[-1]
+
+            # Compute bottom track perpendicular to water velocity
+            nb_vel_ang, _ = cart2pol(unit_nbu, unit_nbv)
+            nb_vel_unit_cs1, nb_vel_unit_cs2 = pol2cart(nb_vel_ang + np.pi / 2, 1)
+            nb_vel_unit_cs = np.vstack([nb_vel_unit_cs1, nb_vel_unit_cs2])
+            bt_vel_cs = np.sum(bt_vel * nb_vel_unit_cs, 0)
+            bt_cs_strm_dist = bt_vel_cs * ens_duration
+            bt_cs_strm_dist_cum = np.nancumsum(bt_cs_strm_dist)
+            
+            # Compute cumulative mean moving bed velocity
+            valid_bt_vel_up_strm = np.isnan(bt_vel_up_strm) == False
+
+            mb_vel = np.nancumsum(bt_vel_up_strm) / np.nancumsum(valid_bt_vel_up_strm)
+
+            # Compute the average ensemble velocities corrected for moving bed
+            if mb_vel[-1] > 0:
+                u_corrected = np.add(wt_u, (unit_nb_vel[0, :]) * bt_vel_up_strm)
+                v_corrected = np.add(wt_v, (unit_nb_vel[1, :]) * bt_vel_up_strm)
+            else:
+                u_corrected = wt_u
+                v_corrected = wt_v
+                
+            # Compute the mean of the ensemble magnitudes
+
+            # Mean is computed using magnitudes because if a Streampro with no compass is the data source the change
+            # in direction could be either real change in water direction or an uncompensated turn of the floating
+            # platform. This approach is the best compromise when there is no compass or the compass is unreliable,
+            # which is often why the stationary method is used. A weighted average is used to account for the possible
+            # change in cell size within and ensemble for the RiverRay and RiverPro.
+
+            mag = np.sqrt(u_corrected**2 + v_corrected**2)
+            depth_cell_size = trans_data.depths.bt_depths.depth_cell_size_m[:, in_transect_idx]
+            depth_cell_size[np.isnan(mag)] = np.nan
+            mag_w = mag * depth_cell_size
+            self.bt_flow_spd_mps = np.nansum(mag_w) / np.nansum(depth_cell_size)
+            self.bt_mb_spd_mps = mb_vel[-1]
+            self.bt_percent_mb = (self.bt_mb_spd_mps / self.bt_flow_spd_mps) * 100
+            if self.bt_percent_mb < 0:
+                self.bt_percent_mb = 0
+
+            # Compute percent invalid bottom track
+            self.percent_invalid_bt = (np.nansum(bt_valid == False) / len(bt_valid)) * 100
+            self.duration_sec = np.nansum(ens_duration)
+
+            # Compute test using GPS
+            self.compute_mb_gps()
+
+            # Store selected test characteristics
+            if ref == 'BT':
+                self.mb_spd_mps = self.bt_mb_spd_mps
+                self.dist_us_m = self.bt_dist_us_m
+                self.percent_mb = self.bt_percent_mb
+                self.mb_dir = self.bt_mb_dir
+                self.flow_spd_mps = self.bt_flow_spd_mps
+            elif not np.isnan(self.gps_percent_mb):
+                self.mb_spd_mps = self.gps_mb_spd_mps
+                self.dist_us_m = self.gps_dist_us_m
+                self.percent_mb = self.gps_percent_mb
+                self.mb_dir = self.gps_mb_dir
+                self.flow_spd_mps = self.bt_flow_spd_mps
+
+            self.near_bed_speed_mps = np.sqrt(np.nanmean(nb_u)**2 + np.nanmean(nb_v)**2)
+            self.stationary_us_track = bt_up_strm_dist_cum
+            self.stationary_cs_track = bt_cs_strm_dist_cum
+            self.stationary_mb_vel = mb_vel
+
+            # Quality check
+            self.test_quality = 'Good'
+            # Check duration
+            if self.duration_sec < 299:
+                self.messages.append('WARNING - Duration of stationary test is less than 5 minutes')
+                self.test_quality = 'Warnings'
+                
+            # Check validity of mean moving-bed velocity
+            if self.duration_sec > 60:
+                mb_vel_std = np.nanstd(mb_vel[-30:], ddof=1)
+                cov = mb_vel_std / mb_vel[-1]
+                if cov > 0.25 and mb_vel_std > 0.03:
+                    self.messages.append('WARNING - Moving-bed velocity may not be consistent. '
+                                         + 'Average maybe inaccurate.')
+                    self.test_quality = 'Warnings'
+                    
+            # Check percentage of invalid BT data
+            if np.nansum(ens_duration[valid_bt_vel_up_strm]) <= 120:
+                
+                self.messages.append('ERROR - Total duration of valid BT data is insufficient for a valid test.')
+                self.test_quality = 'Errors'
+                self.moving_bed = 'Unknown'
+            elif self.percent_invalid_bt > 10:
+                self.messages.append('WARNING - Number of ensembles with invalid bottom track exceeds 10%')
+                self.test_quality = 'Warnings'
+                
+            # Determine if the test indicates a moving bed
+            if self.test_quality != 'Errors':
+                if self.percent_mb >= 1:
+                    self.moving_bed = 'Yes'
+                else:
+                    self.moving_bed = 'No'
+
+            # Notify of differences in results of test between BT and GPS
+            if not np.isnan(self.gps_percent_mb):
+                if np.abs(self.bt_percent_mb - self.gps_percent_mb) > 2:
+                    self.messages.append('WARNING - Bottom track and GPS results differ by more than 2%.')
+                    self.test_quality = 'Warnings'
+
+                if np.logical_xor(self.bt_percent_mb >= 1,  self.gps_percent_mb >= 1):
+                    self.messages.append('WARNING - Bottom track and GPS results do not agree.')
+                    self.test_quality = 'Warnings'
+
+        else:
+            self.messages.append('ERROR - Stationary moving-bed test has no valid bottom track data.')
+            self.test_quality = 'Errors'
+            self.moving_bed = 'Unknown'
+            self.duration_sec = np.nansum(trans_data.date_time.ens_duration_sec[in_transect_idx])
+            self.percent_invalid_bt = 100
+
+    def compute_mb_gps(self):
+        """Computes moving-bed data using GPS.
+        """
+        if np.isnan(self.flow_dir):
+            u_water = np.nanmean(self.transect.w_vel.u_processed_mps[:, self.transect.in_transect_idx])
+            v_water = np.nanmean(self.transect.w_vel.v_processed_mps[:, self.transect.in_transect_idx])
+            self.flow_dir = np.arctan2(u_water, v_water) * 180 / np.pi
+            if self.flow_dir < 0:
+                self.flow_dir = self.flow_dir + 360
+
+        gps_bt = None
+        # Use GGA data if available and VTG is GGA is not available
+        if self.transect.boat_vel.gga_vel is not None:
+            gps_bt = TransectData.compute_gps_bt(self.transect, gps_ref='gga_vel')
+        elif self.transect.boat_vel.vtg_vel is not None:
+            gps_bt = TransectData.compute_gps_bt(self.transect, gps_ref='vtg_vel')
+        if gps_bt is not None and len(gps_bt) > 0:
+            self.gps_dist_us_m = gps_bt['mag']
+            self.gps_mb_dir = gps_bt['dir']
+            self.gps_mb_spd_mps = self.gps_dist_us_m / self.duration_sec
+            self.gps_flow_spd_mps = self.bt_flow_spd_mps - self.bt_mb_spd_mps + self.gps_mb_spd_mps
+            self.gps_percent_mb = (self.gps_mb_spd_mps / self.gps_flow_spd_mps) * 100
+
+    def magvar_change(self, magvar, old_magvar):
+        """Adjust moving-bed test for change in magvar.
+
+        Parameters
+        ----------
+        magvar: float
+            New magvar
+        old_magvar: float
+            Existing magvar
+        """
+
+        if self.transect.sensors.heading_deg.selected == 'internal':
+            magvar_change = magvar - old_magvar
+            self.bt_mb_dir = self.bt_mb_dir + magvar_change
+            self.flow_dir = self.flow_dir + magvar_change
+
+            # Recompute moving-bed tests with GPS and set results using existing reference
+            self.compute_mb_gps()
+            self.change_ref(self.ref)
+
+    def h_offset_change(self, h_offset, old_h_offset):
+        """Adjust moving-bed test for change in h_offset for external compass.
+
+        Parameters
+        ----------
+        h_offset: float
+            New h_offset
+        old_h_offset: float
+            Existing h_offset
+        """
+
+        if self.transect.sensors.heading_deg.selected == 'external':
+            h_offset_change = h_offset - old_h_offset
+            self.bt_mb_dir = self.bt_mb_dir + h_offset_change
+            self.flow_dir = self.flow_dir + h_offset_change
+
+            # Recompute moving-bed tests with GPS and set results using existing reference
+            self.compute_mb_gps()
+            self.change_ref(self.ref)
+
+    def change_ref(self, ref):
+        """Change moving-bed test fixed reference.
+
+        Parameters
+        ----------
+        ref: str
+            Defines specified reference (BT or GPS)
+        """
+
+        if ref == 'BT':
+            self.mb_spd_mps = self.bt_mb_spd_mps
+            self.dist_us_m = self.bt_dist_us_m
+            self.percent_mb = self.bt_percent_mb
+            self.mb_dir = self.bt_mb_dir
+            self.flow_spd_mps = self.bt_flow_spd_mps
+            self.ref = 'BT'
+            check_mb = True
+            if self.test_quality != 'Errors':
+                if self.type == 'Loop':
+                    if self.mb_spd_mps <= 0.012:
+                        check_mb = False
+                        self.moving_bed = 'No'
+                    else:
+                        if 135 < np.abs(self.flow_dir - self.mb_dir) < 225:
+                            check_mb = True
+                        else:
+                            check_mb = False
+                            self.moving_bed = 'Unknown'
+                if check_mb:
+                    if self.percent_mb > 1:
+                        self.moving_bed = 'Yes'
+                    else:
+                        self.moving_bed = 'No'
+            else:
+                self.moving_bed = 'Unknown'
+        elif ref == 'GPS':
+            self.mb_spd_mps = self.gps_mb_spd_mps
+            self.dist_us_m = self.gps_dist_us_m
+            self.percent_mb = self.gps_percent_mb
+            self.mb_dir = self.gps_mb_dir
+            self.flow_spd_mps = self.gps_flow_spd_mps
+            self.ref = 'GPS'
+            check_mb = True
+            if self.test_quality != 'Errors':
+                if self.type == 'Loop':
+                    if self.mb_spd_mps <= 0.012:
+                        check_mb = False
+                        self.moving_bed = 'No'
+                    else:
+                        if 135 < np.abs(self.flow_dir - self.mb_dir) < 225:
+                            check_mb = True
+                        else:
+                            check_mb = False
+                            self.messages.append('ERROR: GPS Loop closure error not in upstream direction. '
+                                                 + 'REPEAT LOOP or USE STATIONARY TEST')
+                            self.moving_bed = 'Unknown'
+                if check_mb:
+                    if self.percent_mb > 1:
+                        self.moving_bed = 'Yes'
+                    else:
+                        self.moving_bed = 'No'
+            else:
+                self.moving_bed = 'Unknown'
+
+    @staticmethod
+    def near_bed_velocity(u, v, depth, bin_depth):
+        """Compute near bed velocities.
+
+        Parameters
+        ----------
+        u: np.array(float)
+            u water velocity component
+        v: np.array(float)
+            v water velocity component
+        depth: np.array(float)
+            Water depth for each ensemble
+        bin_depth: np.array(float)
+            Depth to centerline of each bin
+
+        Returns
+        -------
+        nb_u: np.array(float)
+            u near-bed velocity component
+        nb_v: np.array(float)
+            v near-bed velocity component
+        unit_nbu: np.array(float)
+            u component of the near-bed unit vector
+        unit_nbv: np.array(float)
+            v component of the near-bed unit vector
+        """
+
+        # Compute z near bed as 10% of depth
+        z_near_bed = depth * 0.1
+
+        # Initialize variables
+        n_ensembles = u.shape[1]
+        nb_u = np.tile(np.nan, n_ensembles)
+        nb_v = np.tile(np.nan, n_ensembles)
+        unit_nbu = np.tile(np.nan, n_ensembles)
+        unit_nbv = np.tile(np.nan, n_ensembles)
+        z_depth = np.tile(np.nan, n_ensembles)
+        u_mean = np.tile(np.nan, n_ensembles)
+        v_mean = np.tile(np.nan, n_ensembles)
+        speed_near_bed = np.tile(np.nan, n_ensembles)
+
+        # Compute near bed velocity for each ensemble
+        for n in range(n_ensembles):
+            idx = np.where(np.isnan(u[:, n]) == False)
+            if len(idx[-1]) > 0:
+                if len(idx[-1]) > 0:
+                    idx = idx[-1][-2::]
+                else:
+                    idx = idx[-1][-1]
+                # Compute near-bed velocity
+                z_depth[n] = depth[n] - np.nanmean(bin_depth[idx, n], 0)
+                u_mean[n] = np.nanmean(u[idx, n], 0)
+                v_mean[n] = np.nanmean(v[idx, n], 0)
+                nb_u[n] = (u_mean[n] / z_depth[n] ** (1. / 6.)) * (z_near_bed[n] ** (1. / 6.))
+                nb_v[n] = (v_mean[n] / z_depth[n] ** (1. / 6.)) * (z_near_bed[n] ** (1. / 6.))
+                speed_near_bed[n] = np.sqrt(nb_u[n] ** 2 + nb_v[n] ** 2)
+                unit_nbu[n] = nb_u[n] / speed_near_bed[n]
+                unit_nbv[n] = nb_v[n] / speed_near_bed[n]
+
+        return nb_u, nb_v, unit_nbu, unit_nbv
+
+    @staticmethod
+    def auto_use_2_correct(moving_bed_tests, boat_ref=None):
+        """Apply logic to determine which moving-bed tests should be used
+        for correcting bottom track referenced discharges with moving-bed conditions.
+
+        Parameters
+        ----------
+        moving_bed_tests: list
+            List of MovingBedTests objects.
+        boat_ref: str
+            Boat velocity reference.
+
+        Returns
+        -------
+        moving_bed_tests: list
+            List of MovingBedTests objects.
+        """
+
+        if len(moving_bed_tests) != 0:
+            # Initialize variables
+            lidx_user = []
+            lidx_no_errors = []
+            test_type = []
+            lidx_stationary = []
+            lidx_loop = []
+            flow_speed = []
+            for test in moving_bed_tests:
+                test.use_2_correct = False
+                test.selected = False
+                # Valid test according to user
+                lidx_user.append(test.user_valid == True)
+                # Valid test according to quality assessment
+                lidx_no_errors.append(test.test_quality != 'Errors')
+                # Identify type of test
+                test_type.append(test.type)
+                lidx_stationary.append(test.type == 'Stationary')
+                lidx_loop.append(test.type == 'Loop')
+                flow_speed.append(test.flow_spd_mps)
+
+            # Combine
+            lidx_valid_loop = np.all(np.vstack((lidx_user, lidx_no_errors, lidx_loop)), 0)
+            lidx_valid_stationary = np.all(np.vstack((lidx_user, lidx_no_errors, lidx_stationary)), 0)
+
+            # Check flow speed
+            lidx_flow_speed = np.array(flow_speed) > 0.25
+
+            # Determine if there are valid loop tests
+            # This is the code in matlab but I don't think it is correct. I the valid loop should also have a valid
+            # flow speed, if not then a stationary test, if available could be used.
+            lidx_loops_2_select = np.all(np.vstack((lidx_flow_speed, lidx_valid_loop)), 0)
+            if np.any(lidx_loops_2_select):
+                # Select last loop
+                idx_select = np.where(lidx_loops_2_select)[0][-1]
+                test_select = moving_bed_tests[idx_select]
+                test_select.selected = True
+
+                if test_select.moving_bed == 'Yes':
+                    test_select.use_2_correct = True
+
+            # If there are no valid loop look for valid stationary tests
+            elif np.any(lidx_valid_stationary):
+                moving_bed = []
+                for n, lidx in enumerate(lidx_valid_stationary):
+                    if lidx:
+                        moving_bed_tests[n].selected = True
+                        # Determine if any stationary test resulted in a moving bed
+                        if moving_bed_tests[n].moving_bed == 'Yes':
+                            moving_bed.append(True)
+                        else:
+                            moving_bed.append(False)
+                # If any stationary test shows a moving-bed use all valid stationary test to correct BT discharge
+                if any(moving_bed) > 0:
+                    for n, test in enumerate(moving_bed_tests):
+                        if lidx_valid_stationary[n]:
+                            test.use_2_correct = True
+
+            # If the flow speed is too low but there are not valid stationary tests use the last loop test.
+            elif np.any(lidx_valid_loop):
+                # Select last loop
+                idx_select = np.where(lidx_valid_loop)[0][-1]
+                moving_bed_tests[idx_select].selected = True
+                if moving_bed_tests[idx_select].moving_bed == 'Yes':
+                    moving_bed_tests[idx_select].use_2_correct = True
+
+            # If the navigation reference for discharge computations is set
+            # GPS then none of test should be used for correction. The
+            # selected test should be used to determine if there is a valid
+            # moving-bed and a moving-bed condition.
+            if boat_ref is None:
+                ref = 'BT'
+            else:
+                ref = boat_ref
+
+            if ref != 'BT':
+                for test in moving_bed_tests:
+                    test.use_2_correct = False
+        return moving_bed_tests
diff --git a/Classes/MultiThread.py b/Classes/MultiThread.py
new file mode 100644
index 0000000000000000000000000000000000000000..51d4580b93f88df719e852dd8602b64f92f2443c
--- /dev/null
+++ b/Classes/MultiThread.py
@@ -0,0 +1,22 @@
+"""
+Created on Sep 28, 2017
+
+@author: gpetrochenkov
+"""
+import threading
+
+
+class MultiThread(threading.Thread):
+    
+    def __init__(self, thread_id, function, args=None):
+        threading.Thread.__init__(self)
+        self.thread_id = thread_id
+        self.function = function
+        self.args = args
+        
+    def run(self):
+        
+        if self.args is not None:
+            self.function(**self.args)
+        else:
+            self.function()
diff --git a/Classes/NormData.py b/Classes/NormData.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a35a845c541819425c626a467a26aaaf921dff8
--- /dev/null
+++ b/Classes/NormData.py
@@ -0,0 +1,435 @@
+import warnings
+import numpy as np
+import scipy.stats as sp
+from MiscLibs.common_functions import cart2pol, pol2cart, nan_less, nan_less_equal, nan_greater
+from Classes.QComp import QComp
+
+class NormData(object):
+    """Class creates normalized depth and unit discharge or velocity.
+
+    The constuctor method allows an object to be formed without any data.
+    populate_data method creates normalized data for a single transect.
+    create_composite method creates normalized data for all check transects.
+    also allows only a portion of the data to be used in the
+    normalization process by specifying the data extent.
+
+    Attributes
+    ----------
+    file_name: str
+        Name of transect file
+    cell_depth_normalized: np.array(float)
+        Normalized depth of cell
+    unit_normalized: np.array(float)
+        Normalized discharge or velocity for all depth cells
+    unit_normalized_med: np.array(float)
+        Median of normalized data within 5% partitions
+    unit_normalized_no: np.array(int)
+        Number of data points in each median
+    unit_normalized_z: np.array(float)
+        Relative depth for each median (5% increments)
+    unit_normalized_25: np.array(float)
+        Value for which 25% of normalized values are smaller
+    unit_normalized_75: np.array(float)
+        Value for which 75% or normalized values are larger
+    data_type: str
+        Type of data (v, q, V, or Q)
+    data_extent: list
+        Defines percent of data from start of transect to use, default [0, 100]
+    valid_data: np.array(int)
+        Index of median values with point count greater than threshold cutoff
+    weights: np.array(float)
+        Discharge based weights for computing a weighted median
+    use_weights: bool
+        Specifies if discharge weighted medians are to be used in the extrapolation fit
+    sub_from_left: bool
+        Specifies if when subsectioning the subsection should start from left to right.
+    use_q: bool
+        Specifies to use the discharge rather than the xprod when subsectioning
+    """
+    
+    def __init__(self):
+        """Creates object and initializes instance variables."""
+        self.file_name = None  # Name of transect file
+        self.cell_depth_normalized = None  # Normalized depth of cell
+        self.unit_normalized = None  # Normalized discharge or velocity for all depth cells
+        self.unit_normalized_med = None  # Median of normalized data within 5% partitions
+        self.unit_normalized_no = None  # Number of data points in each median
+        self.unit_normalized_z = None  # Relative depth for each median (5% increments)
+        self.unit_normalized_25 = None  # Value for which 25% of normalized values are smaller
+        self.unit_normalized_75 = None  # Value for which 75% or normalized values are larger
+        self.data_type = 'q'  # Type of data (v, q, V, or Q)
+        self.data_extent = None  # Defines percent of data from start of transect to use, default [0, 100]
+        self.valid_data = np.array([])  # Index of median values with point count greater than threshold cutoff
+        self.weights = np.array([])
+        self.use_weighted = True
+        self.sub_from_left = False
+        self.use_q = False
+        
+    def populate_data(self, transect, data_type, threshold, data_extent=None, use_weighted=True, sub_from_left=True, use_q=True):
+        """Computes the normalized values for a single transect.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        data_type: str
+            Type of data (v, q, V, or Q)
+        threshold: int
+            Number of data points in an increment for the increment to be valid.
+        data_extent: list
+            Defines percent of data from start of transect to use, default [0, 100]
+        use_weighted: bool
+            Specifies if discharge weighted medians are to be used in the extrapolation fit
+        sub_from_left: bool
+            Specifies if when subsectioning the subsection should start from left to right.
+        use_q: bool
+            Specifies to use the discharge rather than the xprod when subsectioning
+        """
+
+        # If the data extent is not defined set data_extent to zero to trigger all data to be used
+        if data_extent is None:
+            data_extent = [0, 100]
+
+        self.sub_from_left = sub_from_left
+        self.use_q = use_q
+            
+        # Get data copies to avoid changing original data
+        filename = transect.file_name
+        in_transect_idx = transect.in_transect_idx
+
+        depths_selected = getattr(transect.depths, transect.depths.selected)
+        cell_depth = np.copy(depths_selected.depth_cell_depth_m[:, in_transect_idx])
+        cells_above_sl = transect.w_vel.cells_above_sl[:, in_transect_idx]
+        cell_depth[cells_above_sl == False] = np.nan
+        depth_ens = np.copy(depths_selected.depth_processed_m[in_transect_idx])
+
+        w_vel_x = np.copy(transect.w_vel.u_processed_mps[:, in_transect_idx])
+        w_vel_y = np.copy(transect.w_vel.v_processed_mps[:, in_transect_idx])
+
+        invalid_data = np.logical_not(transect.w_vel.valid_data[0, :, in_transect_idx]).T
+        w_vel_x[invalid_data] = np.nan
+        w_vel_y[invalid_data] = np.nan
+
+        boat_select = getattr(transect.boat_vel, transect.boat_vel.selected)
+        if boat_select is not None:
+            bt_vel_x = np.copy(boat_select.u_processed_mps[in_transect_idx])
+            bt_vel_y = np.copy(boat_select.v_processed_mps[in_transect_idx])
+        else:
+            bt_vel_x = np.tile([np.nan], transect.boat_vel.bt_vel.u_processed_mps[in_transect_idx].shape)
+            bt_vel_y = np.tile([np.nan], transect.boat_vel.bt_vel.u_processed_mps[in_transect_idx].shape)
+
+        # Compute discharges
+        xprod = np.multiply(w_vel_x, bt_vel_y) - np.multiply(w_vel_y, bt_vel_x)
+        cell_size = depths_selected.depth_cell_size_m
+        delta_t = transect.date_time.ens_duration_sec[in_transect_idx]
+        q = np.multiply(xprod[:, in_transect_idx] * cell_size[:, in_transect_idx], delta_t)
+        q_ens = np.nansum(q, 0)
+
+        # Ensure all elements of xprod can be used to compute q (have a delta_t), first ensemble has no delta_t
+        idx_invalid = np.where(np.isnan(delta_t))[0]
+        xprod[:, idx_invalid] = np.nan
+
+        if np.abs(np.nansum(abs(q_ens))) > 0:
+            # Compute ensemble weights
+            weight_ensemble = abs(q_ens) / np.nansum(abs(q_ens))
+
+            # Apply weights to cells
+            weights = np.tile(weight_ensemble, (cell_depth.shape[0], 1))
+        else:
+            weights = np.ones(w_vel_x.shape)
+
+        # Compute normalized cell depth by average depth in each ensemble
+        norm_cell_depth = np.divide(cell_depth, depth_ens)
+        norm_cell_depth[nan_less(norm_cell_depth, 0)] = np.nan
+
+        # If data type is discharge compute unit discharge for each cell
+        if data_type.lower() == 'q':
+            # Compute the cross product for each cell
+            unit = xprod
+        else:
+            w_vel_x = np.copy(transect.w_vel.u_processed_mps[:, in_transect_idx])
+            w_vel_y = np.copy(transect.w_vel.v_processed_mps[:, in_transect_idx])
+
+            invalid_data = np.logical_not(transect.w_vel.valid_data[0, :, in_transect_idx]).T
+            w_vel_x[invalid_data] = np.nan
+            w_vel_y[invalid_data] = np.nan
+
+            # Compute mean velocity components in each ensemble
+            w_vel_mean_1 = np.nanmean(w_vel_x, 0)
+            w_vel_mean_2 = np.nanmean(w_vel_y, 0)
+
+            # Compute a unit vector
+            direction, _ = cart2pol(w_vel_mean_1, w_vel_mean_2)
+            unit_vec_1, unit_vec_2 = pol2cart(direction, 1)
+            unit_vec = np.vstack([unit_vec_1, unit_vec_2])
+            
+            # Compute the velocity magnitude in the direction of the mean velocity of each
+            # ensemble using the dot product and unit vector
+            unit = np.tile([np.nan], w_vel_x.shape)
+            for i in range(w_vel_x.shape[0]):
+                unit[i, :] = np.sum(np.vstack([w_vel_x[i, :], w_vel_y[i, :]]) * unit_vec, 0)
+
+            # Discharge weighting of velocity data is not permitted
+            use_weighted = False
+
+        # Adjust to positive value
+        unit_total = np.nansum(np.nansum(unit), 0)
+        if unit_total < 0:
+            unit *= -1
+            
+        # Compute normalize unit values
+        with warnings.catch_warnings():
+            warnings.simplefilter("ignore", category=RuntimeWarning)
+            unit_norm = np.divide(unit, np.abs(np.nanmean(unit, 0)))
+
+        # Apply extents if they have been specified
+        if data_extent[0] != 0 or data_extent[1] != 100:
+            if use_q:
+                # Adjust cumulative sum direction based on start bank so that cumsum is always from left to right
+                if transect.start_edge == 'Right' and sub_from_left:
+                    q_ens_flipped = np.flip(q_ens)
+                    q_cum = np.nancumsum(q_ens_flipped)
+                    q_max = q_cum[-1]
+                    q_cum = np.flip(q_cum)
+                else:
+                    q_cum = np.nancumsum(q_ens)
+                    q_max = q_cum[-1]
+            else:
+                unit_ens = np.nansum(unit, 0)
+                q_cum = np.nancumsum(unit_ens)
+                q_max = q_cum[-1]
+            # Adjust so total discharge is positive
+            if q_max < 0:
+                q_cum *= -1
+                q_max *= -1
+
+            # Apply extents
+            unit_left = q_max * data_extent[0] / 100
+            unit_right = q_max * data_extent[1] / 100
+            idx_extent = np.where(np.logical_and(np.greater(q_cum, unit_left),
+                                                 np.less(q_cum, unit_right)))[0]
+            # if data_type.lower() == 'v':
+            #     # Unit discharge is computed here because the unit norm could be based on velocity
+            #     unit = np.multiply(w_vel_x, bt_vel_y) - np.multiply(w_vel_y, bt_vel_x)
+            #     unit_ens = np.nansum(unit, 0)
+            #     unit_total = np.nancumsum(unit_ens)
+            #
+            #     # Adjust so total discharge is positive
+            #     if unit_total[-1] < 0:
+            #         unit_total *= -1
+                
+            # # Apply extents
+            # unit_lower = unit_total[-1] * data_extent[0] / 100
+            # unit_upper = unit_total[-1] * data_extent[1] / 100
+            # idx_extent = np.where(np.logical_and(np.greater(unit_total, unit_lower),
+            #                                      np.less(unit_total, unit_upper)))[0]
+            unit_norm = unit_norm[:, idx_extent]
+            norm_cell_depth = norm_cell_depth[:, idx_extent]
+            weights = weights[:, idx_extent]
+            
+        # If whole profile is negative make positive
+        idx_neg1 = np.tile([np.nan], [unit_norm.shape[1], 1])
+        idx_neg2 = np.tile([np.nan], [unit_norm.shape[1], 1])
+        for c in range(unit_norm.shape[1]):
+            idx_neg1[c] = len(np.where(nan_less(unit_norm[:, c], 0))[0])
+            idx_neg2[c] = len(np.where(np.isnan(unit_norm[:, c]) == False)[0])
+        idx_neg = np.squeeze(idx_neg1) == np.squeeze(idx_neg2)
+        unit_norm[:, idx_neg] = unit_norm[:, idx_neg] * -1
+
+        # Store results
+        self.file_name = filename
+        self.data_extent = data_extent
+        self.data_type = data_type
+        self.cell_depth_normalized = norm_cell_depth
+        self.unit_normalized = unit_norm
+        self.use_weighted = use_weighted
+        self.weights = weights
+        self.compute_stats(threshold)
+
+    @staticmethod
+    def qrev_mat_in(mat_data):
+        """Processes the Matlab data structure to obtain a list of NormData objects containing transect
+           data from the Matlab data structure.
+
+       Parameters
+       ----------
+       mat_data: mat_struct
+           Matlab data structure obtained from sio.loadmat
+
+       Returns
+       -------
+       norm_data: list
+           List of NormData objects
+       """
+        norm_data = []
+        if hasattr(mat_data, 'normData'):
+            for data in mat_data.normData:
+                temp = NormData()
+                temp.populate_from_qrev_mat(data)
+                norm_data.append(temp)
+        return norm_data
+
+    def populate_from_qrev_mat(self, mat_data):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        mat_data: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+
+        self.file_name = mat_data.fileName
+        self.cell_depth_normalized = mat_data.cellDepthNormalized
+        self.unit_normalized = mat_data.unitNormalized
+        self.unit_normalized_med = mat_data.unitNormalizedMed
+        self.unit_normalized_no = mat_data.unitNormalizedNo
+        self.unit_normalized_z = mat_data.unitNormalizedz
+        self.unit_normalized_25 = mat_data.unitNormalized25
+        self.unit_normalized_75 = mat_data.unitNormalized75
+        self.data_type = mat_data.dataType
+        self.data_extent = mat_data.dataExtent
+        self.valid_data = mat_data.validData - 1
+        if hasattr(mat_data, 'use_weighted'):
+            self.use_weighted = mat_data.use_weighted
+            self.weights = mat_data.weights
+        else:
+            self.use_weighted = False
+            self.weights = None
+        if hasattr(mat_data, 'use_q'):
+            self.use_q = mat_data.use_q
+        if hasattr(mat_data, 'sub_from_left'):
+            self.sub_from_left = mat_data.sub_from_left
+
+    def compute_stats(self, threshold):
+        """Computes the statistics for the normalized data.
+
+        Parameters
+        ----------
+        threshold: int
+            Number of data points in an increment for the increment to be valid.
+        """
+
+        # Set averaging interval
+        avg_interval = np.arange(0, 1.05, .05)
+
+        # Initialize variables to nan
+        unit_norm_med = np.tile([np.nan], len(avg_interval) - 1)
+        unit_norm_med_no = np.tile([np.nan], len(avg_interval) - 1)
+        unit_25 = np.tile([np.nan], len(avg_interval) - 1)
+        unit_75 = np.tile([np.nan], len(avg_interval) - 1)
+        avgz = np.tile([np.nan], len(avg_interval) - 1)
+
+        # Process each normalized increment
+        for i in range(len(avg_interval) - 1):
+            condition_1 = nan_greater(self.cell_depth_normalized, avg_interval[i])
+            condition_2 = nan_less_equal(self.cell_depth_normalized, avg_interval[i + 1])
+            condition_3 = np.logical_not(np.isnan(self.unit_normalized))
+            condition_all = np.logical_and(np.logical_and(condition_1, condition_2), condition_3)
+            if np.any(condition_all):
+                if self.data_type.lower() == 'q' and self.use_weighted:
+                    results = self.weighted_quantile(self.unit_normalized[condition_all],
+                                                     quantiles=[0.25, 0.5, 0.75],
+                                                     sample_weight=self.weights[condition_all])
+                    unit_25[i] = results[0]
+                    unit_norm_med[i] = results[1]
+                    unit_75[i] = results[2]
+                else:
+                    unit_25[i], unit_norm_med[i], unit_75[i] = sp.mstats.mquantiles(self.unit_normalized[condition_all],
+                                                                                    alphap=0.5, betap=0.5)
+
+                unit_norm_med_no[i] = np.sum(np.isnan(self.unit_normalized[condition_all]) == False)
+                avgz[i] = 1 - np.nanmean(self.cell_depth_normalized[condition_all])
+
+        # Mark increments invalid if they do not have sufficient data
+        cutoff = np.nanmedian(unit_norm_med_no[nan_greater(unit_norm_med_no, 0)]) * (threshold / 100)
+        self.valid_data = np.where(nan_greater(unit_norm_med_no, cutoff))[0]
+
+        self.unit_normalized_med = unit_norm_med
+        self.unit_normalized_no = unit_norm_med_no
+        self.unit_normalized_25 = unit_25
+        self.unit_normalized_75 = unit_75
+        self.unit_normalized_z = avgz
+
+    @staticmethod
+    def weighted_quantile(values, quantiles, sample_weight):
+        """ Very close to numpy.percentile, but supports weights.
+        NOTE: quantiles should be in [0, 1]!
+
+        Parameters
+        ----------
+        values: ndarray(float)
+            Array of normalized values
+        quantiles: list
+            List of quantiles to be computed
+        sample_weight: ndarray(float)
+            Weights for each value`
+
+        Returns
+        -------
+        results: list
+            List of values at specified quantiles
+
+        """
+
+        sorter = np.argsort(values)
+        values = values[sorter]
+        sample_weight = sample_weight[sorter]
+
+        weighted_quantiles = np.cumsum(sample_weight) - 0.5 * sample_weight
+        weighted_quantiles /= np.sum(sample_weight)
+
+        results = []
+        for quantile in quantiles:
+            results.append(np.interp(quantile, weighted_quantiles, values))
+
+        return results
+
+    def create_composite(self, transects, norm_data, threshold):
+        """Compute normalized data for measurement composite.
+
+        Parameters
+        ----------
+        transects: list
+            List of objects of TransectData
+        norm_data: list
+            List of objects of NormData
+        threshold: int
+            Number of data points in an increment for the increment to be valid.
+        """
+
+        # Initialize lists
+        n_cells = []
+        n_ens = [0]
+
+        # Determine number of cells and ensembles for each transect
+        for data in norm_data:
+            n_cells.append(data.unit_normalized.shape[0])
+            try:
+                n_ens.append(data.unit_normalized.shape[1])
+            except IndexError:
+                n_ens.append(1)
+        max_cells = max(n_cells)
+        sum_ens = np.cumsum(n_ens)
+
+        # Initialize normalized variables
+        self.unit_normalized = np.tile([np.nan], (max_cells, sum_ens[-1]))
+        self.cell_depth_normalized = np.tile([np.nan], (max_cells, sum_ens[-1]))
+        self.weights = np.tile([np.nan], (max_cells, sum_ens[-1]))
+
+        # Process each transect using data from only the checked transects
+        for n in range(len(transects)):
+            if transects[n].checked:
+                self.unit_normalized[:n_cells[n], np.arange(sum_ens[n], sum_ens[n + 1])] \
+                    = norm_data[n].unit_normalized
+                self.weights[:n_cells[n], np.arange(sum_ens[n], sum_ens[n + 1])] \
+                    = norm_data[n].weights
+                self.cell_depth_normalized[:n_cells[n], np.arange(sum_ens[n], sum_ens[n + 1])] \
+                    = norm_data[n].cell_depth_normalized
+                # if self.data_extent is None:
+                self.data_extent = norm_data[n].data_extent
+                self.data_type = norm_data[n].data_type
+                self.use_weighted = norm_data[n].use_weighted
+
+        # Store data
+        self.file_name = 'Measurement'
+        self.compute_stats(threshold)
diff --git a/Classes/Oursin.py b/Classes/Oursin.py
new file mode 100644
index 0000000000000000000000000000000000000000..263ed78502c90c49ffddbf3df36598ce8e23e44d
--- /dev/null
+++ b/Classes/Oursin.py
@@ -0,0 +1,2279 @@
+import time
+
+import pandas as pd
+import copy
+from Classes.QComp import QComp
+from scipy.stats import t
+import numpy as np
+import math
+import scipy.stats
+from profilehooks import profile
+from MiscLibs.common_functions import cosd, sind
+from MiscLibs.bayes_cov_compiled import bayes_cov
+
+
+class Oursin(object):
+    """Computes the uncertainty of a measurement using Oursin method.
+
+    Attributes
+    ----------
+    bot_meth: list
+        List that contains the method proposed by Extrap for each transect
+    exp_95ic_min: list
+        List that contains the min range of 95% interval if power-power method is used for transect
+    exp_95ic_max: list
+        List that contains the max range of 95% interval if power-power method is used for transect
+    pp_exp: list
+        List that contains the power-power exponent computed by Extrap for Power-Power transect only
+    ns_exp: list
+        List that contains the no-slip exponent computed by Extrap for No-Slip method transect only
+    exp_pp_min: list
+        Minimum power-power exponent used for simulating possible discharge
+    exp_pp_max: list
+        Maximum power-power exponent used for simulating possible discharge
+    exp_ns_min: list
+        Minimum no-slip exponent used for simulating possible discharge
+    exp_ns_max: list
+        Maximum no-slip exponent used for simulating possible discharge
+    d_right_error_min: list
+        List that contains the minimum right distance (in m) used for simulating the discharge for each transect
+    d_left_error_min: list
+        List that contains the minimum left distance (in m) used for simulating the discharge for each transect
+    d_right_error_max: list
+        List that contains the maximum right distance (in m) used for simulating the discharge for each transect
+    d_left_error_max: list
+        List that contains the maximum left distance (in m) used for simulating the discharge for each transect
+    draft_error_list: list
+        List that contains the draft (in cm) used for simulating the discharge for each transect
+    u_syst_list: list
+        List that contains the computed systematic uncertainty (68%) for each transect
+    u_compass_list: list
+        List that contains the computed uncertainty (68%) due to compass error for each transect
+    u_meas_list: list
+        List that contains the computed measured area uncertainty (68%) for each transect
+    u_ens_list: list
+       List that contains the computed uncertainty (68%) due to limited number of ensemble for each transect
+    u_movbed_list: list
+       List that contains the estimated uncertainty (68%) due to moving bed for each transect
+    u_invalid_water_list: list
+        List that contains the computed uncertainty (68%) due to invalid water velocities for each transect
+    u_invalid_boat_list: list
+        List that contains the computed uncertainty (68%) due to invalid boat velocities for each transect
+    u_invalid_depth_list: list
+       List that contains the computed uncertainty (68%) due to invalid depths for each transect
+    u_top_list: list
+       List that contains the computed uncertainty (68%) due to top discharge extrapolation for each transect
+    u_bot_list: list
+       List that contains the computed uncertainty (68%) due to bottom discharge extrapolation for each transect
+    u_left_list: list
+       List that contains the computed uncertainty (68%) due to left discharge extrapolation for each transect
+    u_right_list: list
+       List that contains the computed uncertainty (68%) due to right discharge extrapolation for each transect
+    u_syst_mean_user_list: list
+        List that contains the user specified  systematic uncertainty (68%) for each transect
+    u_compass_user_list: list
+        List that contains user specified uncertainty (68%) due to compass error for each transect
+    u_meas_mean_user_list: list
+        List that contains the user specified measured area uncertainty (68%) for each transect
+    u_ens_user_list: list
+       List that contains the user specified uncertainty (68%) due to limited number of ensemble for each transect
+    u_movbed_user_list: list
+       List that contains the user specified uncertainty (68%) due to moving bed for each transect
+    u_invalid_water_user_list: list
+        List that contains the user specified uncertainty (68%) due to invalid water velocities for each transect
+    u_invalid_boat_user_list: list
+        List that contains the user specified uncertainty (68%) due to invalid boat velocities for each transect
+    u_invalid_depth_user_list: list
+       List that contains the user specified uncertainty (68%) due to invalid depths for each transect
+    u_top_mean_user_list: list
+       List that contains the user specified uncertainty (68%) due to top discharge extrapolation for each transect
+    u_bot_mean_user_list: list
+       List that contains the user specified uncertainty (68%) due to bottom discharge extrapolation for each transect
+    u_left_mean_user_list: list
+       List that contains the user specified uncertainty (68%) due to left discharge extrapolation for each transect
+    u_right_mean_user_list: list
+       List that contains the user specified uncertainty (68%) due to right discharge extrapolation for each transect
+    cov_68: float
+       Computed uncertainty (68%) due to coefficient of variation
+    sim_original: DataFrame
+        Discharges (total, and subareas) computed for the processed discharge
+    sim_extrap_pp_16: DataFrame
+        Discharges (total, and subareas) computed using power fit with 1/6th exponent
+    sim_extrap_pp_min: DataFrame
+        Discharges (total, and subareas) computed using power fit with minimum exponent
+    sim_extrap_pp_max: DataFrame
+        Discharges (total, and subareas) computed using power fit with maximum exponent
+    sim_extrap_cns_16: DataFrame
+        Discharges (total, and subareas) computed using constant no slip with 1/6th exponent
+    sim_extrap_cns_min: DataFrame
+        Discharges (total, and subareas) computed using constant no slip with minimum exponent
+    sim_extrap_cns_max: DataFrame
+        Discharges (total, and subareas) computed using constant no slip with maximum exponent
+    sim_extrap_3pns_16: DataFrame
+        Discharges (total, and subareas) computed using 3pt no slip with 1/6the exponent
+    sim_extrap_3pns_opt: DataFrame
+        Discharges (total, and subareas) computed using 3pt no slip with optimized exponent
+    sim_edge_min: DataFrame
+        Discharges (total, and subareas) computed using minimum edge q
+    sim_edge_max: DataFrame
+        Discharges (total, and subareas) computed using maximum edge q
+    sim_draft_min: DataFrame
+        Discharges (total, and subareas) computed using minimum draft
+    sim_draft_max: DataFrame
+        Discharges (total, and subareas) computed using maximum draft
+    sim_cells_trdi: DataFrame
+        Discharges (total, and subareas) computed using TRDI method for invalid cells
+    sim_cells_above: DataFrame
+        Discharges (total, and subareas) computed using cells above for invalid cells
+    sim_cells_below: DataFrame
+        Discharges (total, and subareas) computed using cells below for invalid cells
+    sim_cells_before: DataFrame
+        Discharges (total, and subareas) computed for using cells before for invalid cells
+    sim_cells_after: DataFrame
+        Discharges (total, and subareas) computed for using cells before for invalid cells
+    nb_transects: float
+        Number of transects used
+    checked_idx: list
+        List of indices of checked transects
+    user_advanced_settings: dict
+        Dictionary of user specified advanced settings
+        exp_pp_min_user: float
+            User specified minimum exponent for power fit
+        exp_pp_max_user: float
+            User specified maximum exponent for power fit
+        exp_ns_min_user: float
+            User specified minimum exponent for no slip fit
+        exp_ns_max_user: float
+            User specified maximum exponent for no slip fit
+        draft_error_user: float
+            User specified draft error in m
+        dzi_prct_user: float
+            User specified percent error in depth cell size
+        right_edge_dist_prct_user: float
+            User specified percent error in right edge distance
+        left_edge_dist_prct_user: float
+            User specified percent error in left edge distance
+        gga_boat_user: float
+            User specified standard deviation of boat velocities based on gga in m/s
+        vtg_boat_user: float
+            User specified standard deviation of boat velocities based on vtg in m/s
+        compass_error_user: float
+            User specified compass error in degrees
+    default_advanced_settings: dict
+        Dictionary of default values for advanced settings
+        exp_pp_min: float
+            Default minimum exponent for power fit
+        exp_pp_max: float
+            Default maximum exponent for power fit
+        exp_ns_min: float
+            Default minimum exponent for no slip fit
+        exp_ns_max: float
+            Default maximum exponent for no slip fit
+        draft_error: float
+            Default draft error in m
+        dzi_prct: float
+            Default percent error in depth cell size
+        right_edge_dist_prct: float
+            Default percent error in right edge distance
+        left_edge_dist_prct: float
+            Default percent error in left edge distance
+        gga_boat: float
+            Default standard deviation of boat velocities based on gga in m/s
+        vtg_boat: float
+            Default standard deviation of boat velocities based on vtg in m/s
+        compass_error: float
+            Default compass error in degrees
+    user_specified_u: dict
+        Dictionary of user specified uncertainties as standard deviation in percent
+        u_syst_mean_user: float
+            User specified uncertianty (bias) due to the system, in percent
+        u_movbed_user: float
+            User specified uncertianty (bias) due to the moving-bed conditions, in percent
+        u_compass_user: float
+            User specified uncertianty (bias) due to the compass error, in percent
+        u_ens_user: float
+            User specified uncertianty (bias) due to the number of ensembles collected, in percent
+        u_meas_mean_user: float
+            User specified uncertianty (random) of the measured portion of the cross section, in percent
+        u_top_mean_user: float
+            User specified uncertianty (bias) due to the top extrapolation, in percent
+        u_bot_mean_user: float
+            User specified uncertianty (bias) due to the bottom extrapolation, in percent
+        u_right_mean_user: float
+            User specified uncertianty (bias) due to the right edge discharge estimate, in percent
+        u_left_mean_user: float
+            User specified uncertianty (bias) due to the left edge discharge estimate, in percent
+        u_invalid_boat_user: float
+            User specified uncertianty (bias) due to invalid boat velocities, in percent
+        u_invalid_depth_user
+            User specified uncertianty (bias) due to invalid depths, in percent
+        u_invalid_water_user: float
+            User specified uncertianty (bias) due to invalid water velocities, in percent
+    u: DataFrame
+        DataFrame containing standard deviations in percent for each transect: u_syst, u_compass, u_movbed, u_ens,
+        u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, u_water, u_cov, total, and total_95
+     u_contribution_meas: DataFrame
+        DataFrame containing measured discharge uncertainty contribution from: boat, water, depth, and dzi
+    u_measurement: DataFrame
+        DataFrame containing standard deviations in percent for the whole measurement: u_syst, u_compass, u_movbed,
+        u_ens, u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, u_water, u_cov, total, and total_95
+    u_contribution_measurement: DataFrame
+        DataFrame containing uncertainty contribution in percent from: u_syst, u_compass, u_movbed,
+        u_ens, u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, u_water, u_cov, and total
+    u_user: DataFrame
+        DataFrame containing standard deviations in percent for each transect: u_syst, u_compass, u_movbed, u_ens,
+        u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, u_water, u_cov, total, and total_95
+    u_measurement_user: DataFrame
+        DataFrame containing standard deviations in percent for the whole measurement: u_syst, u_compass, u_movbed,
+        u_ens, u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, u_water, u_cov, total, and total_95
+    u_contribution_measurement_user: DataFrame
+        DataFrame containing uncertainty contribution in percent from: u_syst, u_compass, u_movbed,
+        u_ens, u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, u_water, u_cov, and total
+    """
+
+    def __init__(self):
+        """Initialize class and instance variables."""
+
+        # User provided parameters
+        self.user_advanced_settings = {'exp_pp_min_user': np.nan,
+                                       'exp_pp_max_user': np.nan,
+                                       'exp_ns_min_user': np.nan,
+                                       'exp_ns_max_user': np.nan,
+                                       'draft_error_m_user': np.nan,
+                                       'dzi_prct_user': np.nan,
+                                       'right_edge_dist_prct_user': np.nan,
+                                       'left_edge_dist_prct_user': np.nan,
+                                       'gga_boat_mps_user': np.nan,
+                                       'vtg_boat_mps_user': np.nan,
+                                       'compass_error_deg_user': np.nan,
+                                       'cov_prior_user': np.nan,
+                                       'cov_prior_u_user': np.nan}
+
+        self.default_advanced_settings = {'exp_pp_min': 'computed',
+                                          'exp_pp_max': 'computed',
+                                          'exp_ns_min': 'computed',
+                                          'exp_ns_max': 'computed',
+                                          'draft_error_m': 'computed',
+                                          'dzi_prct': 0.5,
+                                          'right_edge_dist_prct': 20,
+                                          'left_edge_dist_prct': 20,
+                                          'gga_boat_mps': 'computed',
+                                          'vtg_boat_mps': 0.05,
+                                          'compass_error_deg': 1,
+                                          'cov_prior': 0.03,
+                                          'cov_prior_u': 0.20}
+
+        self.user_specified_u = {'u_syst_mean_user': np.nan,
+                                 'u_movbed_user': np.nan,
+                                 'u_compass_user': np.nan,
+                                 'u_ens_user': np.nan,
+                                 'u_meas_mean_user': np.nan,
+                                 'u_top_mean_user': np.nan,
+                                 'u_bot_mean_user': np.nan,
+                                 'u_right_mean_user': np.nan,
+                                 'u_left_mean_user': np.nan,
+                                 'u_invalid_boat_user': np.nan,
+                                 'u_invalid_depth_user': np.nan,
+                                 'u_invalid_water_user': np.nan}
+
+        # Extrap results
+        self.bot_meth = []
+        self.exp_95ic_min = []
+        self.exp_95ic_max = []
+        self.pp_exp = []
+        self.ns_exp = []
+
+        # Parameters used for computing the uncertainty
+        self.exp_pp_min = np.nan
+        self.exp_pp_max = np.nan
+        self.exp_ns_min = np.nan
+        self.exp_ns_max = np.nan
+        self.d_right_error_min = []
+        self.d_left_error_min = []
+        self.d_right_error_max = []
+        self.d_left_error_max = []
+        self.draft_error_list = []
+
+        # Terms computed by transect (list at 68% level)
+        self.u_syst_list = []
+        self.u_compass_list = []
+        self.u_meas_list = []
+        self.u_ens_list = []
+        self.u_movbed_list = []
+        self.u_invalid_water_list = []
+        self.u_invalid_boat_list = []
+        self.u_invalid_depth_list = []
+        self.u_top_list = []
+        self.u_bot_list = []
+        self.u_left_list = []
+        self.u_right_list = []
+
+        self.u_syst_mean_user_list = []
+        self.u_compass_user_list = []
+        self.u_movbed_user_list = []
+        self.u_meas_mean_user_list = []
+        self.u_ens_user_list = []
+        self.u_top_mean_user_list = []
+        self.u_bot_mean_user_list = []
+        self.u_left_mean_user_list = []
+        self.u_right_mean_user_list = []
+        self.u_invalid_boat_user_list = []
+        self.u_invalid_depth_user_list = []
+        self.u_invalid_water_user_list = []
+
+        # Term computed for measurement
+        self.cov_68 = np.nan
+
+        self.nb_transects = np.nan
+        self.checked_idx = []
+
+        # --- Store results of all simulations in DataFrame
+        self.sim_original = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot', 'q_left', 'q_right', 'q_middle'])
+        self.sim_extrap_pp_16 = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_pp_opt = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_pp_min = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_pp_max = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_cns_16 = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_cns_opt = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_cns_min = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_cns_max = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_3pns_16 = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_3pns_opt = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_edge_min = pd.DataFrame(columns=['q_total', 'q_left', 'q_right'])
+        self.sim_edge_max = pd.DataFrame(columns=['q_total', 'q_left', 'q_right'])
+        self.sim_draft_min = pd.DataFrame(columns=['q_total', 'q_top', 'q_left', 'q_right'])
+        self.sim_draft_max = pd.DataFrame(columns=['q_total', 'q_top', 'q_left', 'q_right'])
+        self.sim_cells_trdi = pd.DataFrame(columns=['q_total', 'q_middle'])
+        self.sim_cells_above = pd.DataFrame(columns=['q_total', 'q_middle'])
+        self.sim_cells_below = pd.DataFrame(columns=['q_total', 'q_middle'])
+        self.sim_cells_before = pd.DataFrame(columns=['q_total', 'q_middle'])
+        self.sim_cells_after = pd.DataFrame(columns=['q_total', 'q_middle'])
+        self.sim_shallow = pd.DataFrame(columns=['q_total', 'q_middle'])
+        self.sim_depth_hold = pd.DataFrame(columns=['q_total', 'q_middle'])
+        self.sim_depth_next = pd.DataFrame(columns=['q_total', 'q_middle'])
+        self.sim_boat_hold = pd.DataFrame(columns=['q_total', 'q_middle'])
+        self.sim_boat_next = pd.DataFrame(columns=['q_total', 'q_middle'])
+        self.u_contribution_meas = pd.DataFrame(columns=['boat', 'water', 'depth', 'dzi'])
+        self.u = pd.DataFrame(columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top', 'u_bot',
+                                       'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water', 'u_cov', 'total',
+                                       'total_95'])
+        self.u_measurement = pd.DataFrame(columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top',
+                                                   'u_bot', 'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water',
+                                                   'u_cov', 'total', 'total_95'])
+        self.u_contribution = pd.DataFrame(columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top',
+                                                    'u_bot', 'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water',
+                                                    'u_cov', 'total'])
+        self.u_contribution_measurement = pd.DataFrame(columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas',
+                                                                'u_top', 'u_bot', 'u_left', 'u_right', 'u_boat',
+                                                                'u_depth', 'u_water', 'u_cov', 'total'])
+        self.u_user = pd.DataFrame(columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top', 'u_bot',
+                                            'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water', 'u_cov', 'total',
+                                            'total_95'])
+        self.u_measurement_user = pd.DataFrame(columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top',
+                                                        'u_bot', 'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water',
+                                                        'u_cov', 'total', 'total_95'])
+        self.u_contribution_user = pd.DataFrame(columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top',
+                                                         'u_bot', 'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water',
+                                                         'u_cov', 'total'])
+        self.u_contribution_measurement_user = pd.DataFrame(columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens',
+                                                                     'u_meas', 'u_top', 'u_bot', 'u_left', 'u_right',
+                                                                     'u_boat', 'u_depth', 'u_water', 'u_cov', 'total'])
+
+    def populate_from_qrev_mat(self, meas_struct):
+
+        # User provided parameters
+        self.user_advanced_settings = {'exp_pp_min_user': meas_struct.oursin.user_advanced_settings.exp_pp_min_user,
+                                       'exp_pp_max_user': meas_struct.oursin.user_advanced_settings.exp_pp_max_user,
+                                       'exp_ns_min_user': meas_struct.oursin.user_advanced_settings.exp_ns_min_user,
+                                       'exp_ns_max_user': meas_struct.oursin.user_advanced_settings.exp_ns_max_user,
+                                       'draft_error_m_user':
+                                           meas_struct.oursin.user_advanced_settings.draft_error_m_user,
+                                       'dzi_prct_user': meas_struct.oursin.user_advanced_settings.dzi_prct_user,
+                                       'right_edge_dist_prct_user':
+                                           meas_struct.oursin.user_advanced_settings.right_edge_dist_prct_user,
+                                       'left_edge_dist_prct_user':
+                                           meas_struct.oursin.user_advanced_settings.left_edge_dist_prct_user,
+                                       'gga_boat_mps_user': meas_struct.oursin.user_advanced_settings.gga_boat_mps_user,
+                                       'vtg_boat_mps_user': meas_struct.oursin.user_advanced_settings.vtg_boat_mps_user,
+                                       'compass_error_deg_user':
+                                           meas_struct.oursin.user_advanced_settings.compass_error_deg_user,
+                                       'cov_prior_user': meas_struct.oursin.user_advanced_settings.cov_prior_user,
+                                       'cov_prior_u_user': meas_struct.oursin.user_advanced_settings.cov_prior_u_user}
+
+        self.user_specified_u = {'u_syst_mean_user': meas_struct.oursin.user_specified_u.u_syst_mean_user,
+                                 'u_movbed_user': meas_struct.oursin.user_specified_u.u_movbed_user,
+                                 'u_compass_user': meas_struct.oursin.user_specified_u.u_compass_user,
+                                 'u_ens_user': meas_struct.oursin.user_specified_u.u_ens_user,
+                                 'u_meas_mean_user': meas_struct.oursin.user_specified_u.u_meas_mean_user,
+                                 'u_top_mean_user': meas_struct.oursin.user_specified_u.u_top_mean_user,
+                                 'u_bot_mean_user': meas_struct.oursin.user_specified_u.u_bot_mean_user,
+                                 'u_right_mean_user': meas_struct.oursin.user_specified_u.u_right_mean_user,
+                                 'u_left_mean_user': meas_struct.oursin.user_specified_u.u_left_mean_user,
+                                 'u_invalid_boat_user': meas_struct.oursin.user_specified_u.u_invalid_boat_user,
+                                 'u_invalid_depth_user': meas_struct.oursin.user_specified_u.u_invalid_depth_user,
+                                 'u_invalid_water_user': meas_struct.oursin.user_specified_u.u_invalid_water_user}
+
+        # Extrap results
+        if type(meas_struct.oursin.bot_meth) is str:
+            self.bot_meth = [meas_struct.oursin.bot_meth]
+        else:
+            self.bot_meth = meas_struct.oursin.bot_meth.tolist()
+
+        if type(meas_struct.oursin.exp_95ic_min) is float:
+            self.exp_95ic_min = meas_struct.oursin.exp_95ic_min
+        else:
+            self.exp_95ic_min = meas_struct.oursin.exp_95ic_min.tolist()
+
+        if type(meas_struct.oursin.exp_95ic_max) is float:
+            self.exp_95ic_max = meas_struct.oursin.exp_95ic_max
+        else:
+            self.exp_95ic_max = meas_struct.oursin.exp_95ic_max.tolist()
+
+        if type(meas_struct.oursin.ppExponent) is float:
+            self.pp_exp = meas_struct.oursin.ppExponent
+        else:
+            self.pp_exp = meas_struct.oursin.ppExponent.tolist()
+
+        if type(meas_struct.oursin.nsExponent) is float:
+            self.ns_exp = meas_struct.oursin.nsExponent
+        else:
+            self.ns_exp = meas_struct.oursin.nsExponent.tolist()
+
+        # Parameters used for computing the uncertainty
+        self.exp_pp_min = meas_struct.oursin.exp_pp_min
+        self.exp_pp_max = meas_struct.oursin.exp_pp_max
+        self.exp_ns_min = meas_struct.oursin.exp_ns_min
+        self.exp_ns_max = meas_struct.oursin.exp_ns_max
+
+        if type(meas_struct.oursin.d_right_error_min) is float:
+            self.d_right_error_min = meas_struct.oursin.d_right_error_min
+            self.d_left_error_min = meas_struct.oursin.d_left_error_min
+            self.d_right_error_max = meas_struct.oursin.d_right_error_max
+            self.d_left_error_max = meas_struct.oursin.d_left_error_max
+            self.draft_error_list = meas_struct.oursin.draft_error_list
+        else:
+            self.d_right_error_min = meas_struct.oursin.d_right_error_min.tolist()
+            self.d_left_error_min = meas_struct.oursin.d_left_error_min.tolist()
+            self.d_right_error_max = meas_struct.oursin.d_right_error_max.tolist()
+            self.d_left_error_max = meas_struct.oursin.d_left_error_max.tolist()
+            self.draft_error_list = meas_struct.oursin.draft_error_list.tolist()
+
+        # Terms computed by transect (list at 68% level)
+        if type(meas_struct.oursin.u_syst_mean_user_list) is float:
+            self.u_syst_list = [meas_struct.oursin.u_syst_list]
+            self.u_compass_list = [meas_struct.oursin.u_compass_list]
+            self.u_meas_list = [meas_struct.oursin.u_meas_list]
+            self.u_ens_list = [meas_struct.oursin.u_ens_list]
+            self.u_movbed_list = [meas_struct.oursin.u_movbed_list]
+            self.u_invalid_water_list = [meas_struct.oursin.u_invalid_water_list]
+            self.u_invalid_boat_list = [meas_struct.oursin.u_invalid_boat_list]
+            self.u_invalid_depth_list = [meas_struct.oursin.u_invalid_depth_list]
+            self.u_top_list = [meas_struct.oursin.u_top_list]
+            self.u_bot_list = [meas_struct.oursin.u_bot_list]
+            self.u_left_list = [meas_struct.oursin.u_left_list]
+            self.u_right_list = [meas_struct.oursin.u_right_list]
+
+            self.u_syst_mean_user_list = [meas_struct.oursin.u_syst_mean_user_list]
+            self.u_compass_user_list = [meas_struct.oursin.u_compass_user_list]
+            self.u_movbed_user_list = [meas_struct.oursin.u_movbed_user_list]
+            self.u_meas_mean_user_list = [meas_struct.oursin.u_meas_mean_user_list]
+            self.u_ens_user_list = [meas_struct.oursin.u_ens_user_list]
+            self.u_top_mean_user_list = [meas_struct.oursin.u_top_mean_user_list]
+            self.u_bot_mean_user_list = [meas_struct.oursin.u_bot_mean_user_list]
+            self.u_left_mean_user_list = [meas_struct.oursin.u_left_mean_user_list]
+            self.u_invalid_boat_user_list = [meas_struct.oursin.u_invalid_boat_user_list]
+            self.u_invalid_depth_user_list = [meas_struct.oursin.u_invalid_depth_user_list]
+            self.u_invalid_water_user_list = [meas_struct.oursin.u_invalid_water_user_list]
+        else:
+            self.u_syst_list = meas_struct.oursin.u_syst_list.tolist()
+            self.u_compass_list = meas_struct.oursin.u_compass_list.tolist()
+            self.u_meas_list = meas_struct.oursin.u_meas_list.tolist()
+            self.u_ens_list = meas_struct.oursin.u_ens_list.tolist()
+            self.u_movbed_list = meas_struct.oursin.u_movbed_list.tolist()
+            self.u_invalid_water_list = meas_struct.oursin.u_invalid_water_list.tolist()
+            self.u_invalid_boat_list = meas_struct.oursin.u_invalid_boat_list.tolist()
+            self.u_invalid_depth_list = meas_struct.oursin.u_invalid_depth_list.tolist()
+            self.u_top_list = meas_struct.oursin.u_top_list.tolist()
+            self.u_bot_list = meas_struct.oursin.u_bot_list.tolist()
+            self.u_left_list = meas_struct.oursin.u_left_list.tolist()
+            self.u_right_list = meas_struct.oursin.u_right_list.tolist()
+
+            self.u_syst_mean_user_list = meas_struct.oursin.u_syst_mean_user_list.tolist()
+            self.u_compass_user_list = meas_struct.oursin.u_compass_user_list.tolist()
+            self.u_movbed_user_list = meas_struct.oursin.u_movbed_user_list.tolist()
+            self.u_meas_mean_user_list = meas_struct.oursin.u_meas_mean_user_list.tolist()
+            self.u_ens_user_list = meas_struct.oursin.u_ens_user_list.tolist()
+            self.u_top_mean_user_list = meas_struct.oursin.u_top_mean_user_list.tolist()
+            self.u_bot_mean_user_list = meas_struct.oursin.u_bot_mean_user_list.tolist()
+            self.u_left_mean_user_list = meas_struct.oursin.u_left_mean_user_list.tolist()
+            self.u_right_mean_user_list = meas_struct.oursin.u_right_mean_user_list.tolist()
+            self.u_invalid_boat_user_list = meas_struct.oursin.u_invalid_boat_user_list.tolist()
+            self.u_invalid_depth_user_list = meas_struct.oursin.u_invalid_depth_user_list.tolist()
+            self.u_invalid_water_user_list = meas_struct.oursin.u_invalid_water_user_list.tolist()
+
+        # COV
+        self.cov_68 = meas_struct.oursin.cov_68
+
+        self.nb_transects = meas_struct.oursin.nb_transects
+        self.checked_idx = meas_struct.oursin.checked_idx
+
+        # Reconstruct data frames from Matlab arrays
+        self.sim_original = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_original),
+                                         columns=['q_total', 'q_top', 'q_bot', 'q_left', 'q_right', 'q_middle'])
+        self.sim_extrap_pp_16 = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_extrap_pp_16),
+                                             columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_pp_opt = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_extrap_pp_opt),
+                                              columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_pp_min = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_extrap_pp_min),
+                                              columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_pp_max = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_extrap_pp_max),
+                                              columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_cns_16 = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_extrap_cns_16),
+                                              columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_cns_opt = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_extrap_cns_opt),
+                                               columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_cns_min = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_extrap_cns_min),
+                                               columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_cns_max = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_extrap_cns_max),
+                                               columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_3pns_16 = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_extrap_3pns_16),
+                                               columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_3pns_opt = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_extrap_3pns_opt),
+                                                columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_edge_min = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_edge_min),
+                                         columns=['q_total', 'q_left', 'q_right'])
+        self.sim_edge_max = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_edge_max),
+                                         columns=['q_total', 'q_left', 'q_right'])
+        self.sim_draft_min = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_draft_min),
+                                          columns=['q_total', 'q_top', 'q_left', 'q_right'])
+        self.sim_draft_max = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_draft_max),
+                                          columns=['q_total', 'q_top', 'q_left', 'q_right'])
+        self.sim_cells_trdi = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_cells_trdi),
+                                           columns=['q_total', 'q_middle'])
+        self.sim_cells_above = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_cells_above),
+                                            columns=['q_total', 'q_middle'])
+        self.sim_cells_below = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_cells_below),
+                                            columns=['q_total', 'q_middle'])
+        self.sim_cells_before = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_cells_before),
+                                             columns=['q_total', 'q_middle'])
+        self.sim_cells_after = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_cells_after),
+                                            columns=['q_total', 'q_middle'])
+        self.sim_shallow = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_shallow),
+                                        columns=['q_total', 'q_middle'])
+        self.sim_depth_hold = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_depth_hold),
+                                           columns=['q_total', 'q_middle'])
+        self.sim_depth_next = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_depth_next),
+                                           columns=['q_total', 'q_middle'])
+        self.sim_boat_hold = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_boat_hold),
+                                          columns=['q_total', 'q_middle'])
+        self.sim_boat_next = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_boat_next),
+                                          columns=['q_total', 'q_middle'])
+        self.u_contribution_meas = pd.DataFrame(self.checkshape(meas_struct.oursin.u_contribution_meas),
+                                                columns=['boat', 'water', 'dzi'])
+        self.u = pd.DataFrame(self.checkshape(meas_struct.oursin.u),
+                              columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top', 'u_bot',
+                                       'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water', 'u_cov', 'total',
+                                       'total_95'])
+        self.u_measurement = pd.DataFrame(self.checkshape(meas_struct.oursin.u_measurement),
+                                          columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top',
+                                                   'u_bot', 'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water',
+                                                   'u_cov', 'total', 'total_95'])
+        self.u_contribution = pd.DataFrame(self.checkshape(meas_struct.oursin.u_contribution),
+                                           columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top',
+                                                    'u_bot', 'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water',
+                                                    'u_cov', 'total'])
+        self.u_contribution_measurement = pd.DataFrame(self.checkshape(meas_struct.oursin.u_contribution_measurement),
+                                                       columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas',
+                                                                'u_top', 'u_bot', 'u_left', 'u_right', 'u_boat',
+                                                                'u_depth', 'u_water', 'u_cov', 'total'])
+        self.u_user = pd.DataFrame(self.checkshape(meas_struct.oursin.u_user),
+                                   columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top', 'u_bot',
+                                            'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water', 'u_cov', 'total',
+                                            'total_95'])
+        self.u_measurement_user = pd.DataFrame(self.checkshape(meas_struct.oursin.u_measurement_user),
+                                               columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top',
+                                                        'u_bot', 'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water',
+                                                        'u_cov', 'total', 'total_95'])
+        self.u_contribution_user = pd.DataFrame(self.checkshape(meas_struct.oursin.u_contribution_user),
+                                                columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top',
+                                                         'u_bot', 'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water',
+                                                         'u_cov', 'total'])
+        self.u_contribution_measurement_user = pd.DataFrame(
+            self.checkshape(meas_struct.oursin.u_contribution_measurement_user),
+            columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens',
+                     'u_meas', 'u_top', 'u_bot', 'u_left', 'u_right',
+                     'u_boat', 'u_depth', 'u_water', 'u_cov', 'total'])
+
+    @staticmethod
+    def checkshape(a):
+        if len(a.shape) < 2:
+            a = a.reshape(1, -1)
+        return a
+
+    # @profile
+    def compute_oursin(self, meas, user_advanced_settings=None, u_measurement_user=None):
+        """Computes the uncertainty for the components of the discharge measurement
+        using measurement data or user provided values.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        user_advanced_settings: dict
+            Dictionary of user input on advanced settings
+        u_measurement_user: dict
+            Dictionary of user estimates of uncertainty, standard deviation in percent, for each component
+        """
+
+        if user_advanced_settings is not None:
+            self.user_advanced_settings = user_advanced_settings
+
+        if u_measurement_user is not None:
+            self.u_measurement_user = u_measurement_user
+
+        # Initialize lists
+        self.checked_idx = []
+        self.u_syst_list = []
+        self.u_meas_list = []
+        self.u_ens_list = []
+        self.u_movbed_list = []
+        self.u_invalid_water_list = []
+        self.u_invalid_boat_list = []
+        self.u_invalid_depth_list = []
+        self.u_top_list = []
+        self.u_bot_list = []
+        self.u_left_list = []
+        self.u_right_list = []
+
+        # Prep data for computations
+        self.data_prep(meas)
+        self.compute_measurement_cov(meas=meas, method='Bayes')
+
+        # 1. Systematic terms + correction terms (moving bed)
+        self.uncertainty_system()
+        self.uncertainty_moving_bed(meas=meas)
+        self.uncertainty_compass(meas=meas)
+
+        # 2. Measured uncertainty
+        self.uncertainty_measured_discharge(meas=meas)
+        self.uncertainty_number_ensembles(meas)
+
+        # 3. Run all the simulations to compute possible discharges
+        self.run_simulations(meas)
+
+        # 4. Compute uncertainty terms based on simulations and assuming a rectangular law
+        self.uncertainty_top_discharge()
+        self.uncertainty_bottom_discharge()
+        self.uncertainty_left_discharge()
+        self.uncertainty_right_discharge()
+        self.uncertainty_invalid_depth_data()
+        self.uncertainty_invalid_boat_data()
+        self.uncertainty_invalid_water_data()
+
+        # 6. Compute combined uncertainty
+        self.u, self.u_measurement, self.u_contribution, self.u_contribution_measurement = \
+            self.compute_combined_uncertainty(u_syst=self.u_syst_list,
+                                              u_compass=self.u_compass_list,
+                                              u_movbed=self.u_movbed_list,
+                                              u_meas=self.u_meas_list,
+                                              u_ens=self.u_ens_list,
+                                              u_top=self.u_top_list,
+                                              u_bot=self.u_bot_list,
+                                              u_left=self.u_left_list,
+                                              u_right=self.u_right_list,
+                                              u_boat=self.u_invalid_boat_list,
+                                              u_depth=self.u_invalid_depth_list,
+                                              u_water=self.u_invalid_water_list,
+                                              cov_68=self.cov_68)
+
+        self.u_user, self.u_measurement_user, self.u_contribution_user, self.u_contribution_measurement_user = \
+            self.compute_combined_uncertainty(u_syst=self.u_syst_mean_user_list,
+                                              u_compass=self.u_compass_user_list,
+                                              u_movbed=self.u_movbed_user_list,
+                                              u_meas=self.u_meas_mean_user_list,
+                                              u_ens=self.u_ens_user_list,
+                                              u_top=self.u_top_mean_user_list,
+                                              u_bot=self.u_bot_mean_user_list,
+                                              u_left=self.u_left_mean_user_list,
+                                              u_right=self.u_right_mean_user_list,
+                                              u_boat=self.u_invalid_boat_user_list,
+                                              u_depth=self.u_invalid_depth_user_list,
+                                              u_water=self.u_invalid_water_user_list,
+                                              cov_68=self.cov_68)
+
+    @staticmethod
+    def compute_combined_uncertainty(u_syst, u_compass, u_movbed, u_meas, u_ens, u_top, u_bot, u_left, u_right,
+                                     u_boat, u_depth, u_water, cov_68):
+        """Combined the uncertainty for each transect and for the measurement
+
+        Parameters
+        ----------
+        u_syst: list
+            List of system uncertainties for each transect
+        u_compass: list
+            List of uncertainties due to heading error
+        u_movbed: list
+            List of moving-bed uncertainties for each transect
+        u_meas: list
+            List of uncertainties for the measured portion for each transect
+        u_ens: list
+            List of uncertainties due to number of ensembles in each transect
+        u_top: list
+            List of uncertainties due to top extrapolation in each transect
+        u_bot: list
+            List of uncertainties due to the bottom extrapolation in each transect
+        u_left: list
+            List of uncertainties due to the left edge discharge in each transect
+        u_right: list
+            List of uncertainties due to the right edge discharge in each transect
+        u_boat: list
+            List of uncertainties due to invalid boat velocities
+        u_depth: list
+            List of uncertainties due to invalid depth velocities
+        u_water: list
+            List of uncertainties due to invalid water data in each transect
+        cov_68: float
+            Coefficient of variation for all transects
+
+        Returns
+        -------
+        u_contribution_meas: DataFrame
+            DataFrame containing measured discharge uncertainty contribution from: boat, water, depth, and dzi
+        u: DataFrame
+            DataFrame containing standard deviations in percent for each transect: u_syst, u_compass, u_movbed, u_ens,
+            u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, u_water, u_cov, total, and total_95
+        u_measurement: DataFrame
+            DataFrame containing standard deviations in percent for the whole measurement: u_syst, u_compass, u_movbed,
+            u_ens, u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, u_water, u_cov, total, and total_95
+        u_contribution_measurement: DataFrame
+            DataFrame containing uncertainty contribution in percent from: u_syst, u_compass, u_movbed,
+            u_ens, u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, u_water, u_cov, and total
+        """
+
+        # Create a Dataframe with all computed uncertainty for each checked transect
+        u = pd.DataFrame(columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top', 'u_bot',
+                                  'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water', 'u_cov'])
+        u['u_syst'] = u_syst
+        u['u_compass'] = u_compass
+        u['u_movbed'] = u_movbed
+        u['u_meas'] = u_meas
+        u['u_ens'] = u_ens
+        u['u_water'] = u_water
+        u['u_top'] = u_top
+        u['u_bot'] = u_bot
+        u['u_left'] = u_left
+        u['u_right'] = u_right
+        u['u_cov'] = cov_68
+        u['u_boat'] = u_boat
+        u['u_depth'] = u_depth
+
+        n_transects = len(u_ens)
+
+        # Convert uncertainty (68% level of confidence) into variance
+        # Note that only variance is additive
+        u2 = u.pow(2)
+        u2_measurement = u2.mean(axis=0, skipna=False).to_frame().T
+
+        # Combined uncertainty by transect
+        # Sum of variance of each component, then sqrt, then multiply by 100 for percentage
+        # u['total'] = u2.drop(['u_cov'], axis=1).sum(axis=1, skipna=False) ** 0.5
+        u['total'] = (u2.sum(axis=1, skipna=False) ** 0.5)
+        u['total_95'] = u['total'] * 2
+        u = u.mul(100)
+
+        # Uncertainty for the measurement
+        # The random error is computed as a mean of the random error from the measured portion and the overall
+        # random error from the COV.
+        u2_random = u2['u_meas'].mean(skipna=False) + u2['u_cov'].mean(skipna=False)
+
+        # All other sources are systematic (mostly due to computation method and values from user)
+        u2_bias = u2_measurement.drop(['u_meas', 'u_cov'], axis=1).sum(axis=1, skipna=False)
+
+        # Combined all uncertainty sources
+        u2_measurement['total'] = (1 / n_transects) * u2_random + u2_bias[0]
+        u_measurement = u2_measurement ** 0.5
+        u_measurement['total_95'] = u_measurement['total'] * 2
+        u_measurement = u_measurement * 100
+
+        # Compute relative contributions from each source
+        u_contribution_measurement = u2_measurement.copy()
+
+        # Adjust contribution of u_meas and u_cov to account for number of transects
+        u_contribution_measurement['u_meas'] = u2_measurement['u_meas'] / n_transects
+        u_contribution_measurement['u_cov'] = u2_measurement['u_cov'] / n_transects
+        u_contribution_measurement = u_contribution_measurement.div(u_contribution_measurement['total'], axis=0)
+
+        # Adjust contribution of u_meas and u_cov to account for number of transects
+        u_contribution = u2.copy()
+        u_contribution['u_meas'] = u2['u_meas'].div(n_transects, axis=0)
+        u_contribution['u_cov'] = u2['u_cov'].div(n_transects, axis=0)
+        u_contribution['total'] = u_contribution.sum(axis=1)
+        u_contribution = u_contribution.div(u_contribution['total'], axis=0)
+
+        return u, u_measurement, u_contribution, u_contribution_measurement
+
+    def data_prep(self, meas):
+        """Determine checked transects and max and min exponents for power and no slip extrapolation.
+
+        Parameters
+        ----------
+        meas: MeasurementData
+            Object of MeasurementData
+        """
+
+        # Use only checked transects
+        # Extract data that are used later on (PP and NS exponents)
+        self.checked_idx = []
+        self.bot_meth = []
+        self.exp_95ic_min = []
+        self.exp_95ic_max = []
+        self.pp_exp = []
+        self.ns_exp = []
+
+        for n in range(len(meas.transects)):
+            if meas.transects[n].checked:
+                self.checked_idx.append(n)
+
+                # Bottom method selected using data from each transect only
+                self.bot_meth.append(meas.extrap_fit.sel_fit[n].bot_method_auto)
+
+                # Store 95 percent bounds on power fit exponent for each transect if power selected
+                if meas.extrap_fit.sel_fit[n].bot_method_auto == "Power":
+                    try:
+                        self.exp_95ic_min.append(meas.extrap_fit.sel_fit[n].exponent_95_ci[0])
+                    except TypeError:
+                        self.exp_95ic_min.append(np.nan)
+                    try:
+                        self.exp_95ic_max.append(meas.extrap_fit.sel_fit[n].exponent_95_ci[1])
+                    except TypeError:
+                        self.exp_95ic_max.append(np.nan)
+
+                    self.pp_exp.append(meas.extrap_fit.sel_fit[n].pp_exponent)
+
+                # Store no slip exponent if no slip selected
+                elif meas.extrap_fit.sel_fit[n].bot_method_auto == "No Slip":
+                    self.ns_exp.append(meas.extrap_fit.sel_fit[n].ns_exponent)
+
+        self.nb_transects = len(self.checked_idx)
+
+    def run_simulations(self, meas):
+        """Compute discharges (top, bot, right, left, total, middle)  based on possible scenarios
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        # If list have not be saved recompute q_sensitivity
+        if not hasattr(meas.extrap_fit.q_sensitivity, 'q_pp_list'):
+            meas.extrap_fit.q_sensitivity.populate_data(meas.transects, meas.extrap_fit.sel_fit)
+
+        # Simulation original
+        self.sim_orig(meas)
+
+        # Simulation power / power default 1/6
+        self.sim_extrap_pp_16['q_total'] = meas.extrap_fit.q_sensitivity.q_pp_list
+        self.sim_extrap_pp_16['q_top'] = meas.extrap_fit.q_sensitivity.q_top_pp_list
+        self.sim_extrap_pp_16['q_bot'] = meas.extrap_fit.q_sensitivity.q_bot_pp_list
+
+        # Simulations power / power optimized
+        self.sim_pp_min_max_opt(meas=meas)
+
+        # Simulation cns default 1/6
+        self.sim_extrap_cns_16['q_total'] = meas.extrap_fit.q_sensitivity.q_cns_list
+        self.sim_extrap_cns_16['q_top'] = meas.extrap_fit.q_sensitivity.q_top_cns_list
+        self.sim_extrap_cns_16['q_bot'] = meas.extrap_fit.q_sensitivity.q_bot_cns_list
+
+        # Simulation cns optimized
+        self.sim_cns_min_max_opt(meas=meas)
+
+        # Simulation 3pt no slip default 1/6
+        self.sim_extrap_3pns_16['q_total'] = meas.extrap_fit.q_sensitivity.q_3p_ns_list
+        self.sim_extrap_3pns_16['q_top'] = meas.extrap_fit.q_sensitivity.q_top_3p_ns_list
+        self.sim_extrap_3pns_16['q_bot'] = meas.extrap_fit.q_sensitivity.q_bot_3p_ns_list
+
+        # Simulation 3pt no slip optimized
+        self.sim_extrap_3pns_opt['q_total'] = meas.extrap_fit.q_sensitivity.q_3p_ns_opt_list
+        self.sim_extrap_3pns_opt['q_top'] = meas.extrap_fit.q_sensitivity.q_top_3p_ns_opt_list
+        self.sim_extrap_3pns_opt['q_bot'] = meas.extrap_fit.q_sensitivity.q_bot_3p_ns_opt_list
+
+        # Simulations edge min and max
+        self.sim_edge_min_max(meas=meas)
+
+        # Simulation draft min and max
+        self.sim_draft_max_min(meas=meas)
+
+        # Simulation of invalid cells and ensembles
+        self.sim_invalid_cells(meas=meas)
+
+        # Simulation of shallow no cells
+        self.sim_shallow_ens(meas=meas)
+
+        # Simulation of invalid boat velocity
+        self.sim_invalid_boat_velocity(meas=meas)
+
+        # Simulation of invalid depths
+        self.sim_invalid_depth(meas=meas)
+
+    def uncertainty_measured_discharge(self, meas):
+        """Compute the uncertainty related to the measured area.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        self.u_contribution_meas = pd.DataFrame(columns=['boat', 'water', 'dzi'])
+
+        # Set uncertainty of cell size
+        if np.isnan(self.user_advanced_settings['dzi_prct_user']):
+            u_dzi = self.default_advanced_settings['dzi_prct'] * 0.01
+        else:
+            u_dzi = self.user_advanced_settings['dzi_prct_user'] * 0.01
+
+        # Compute the uncertainty due to the measured area
+        for transect_id in self.checked_idx:
+
+            # Relative standard deviation of error velocity (Water Track)
+            std_ev_wt_ens = self.water_std_by_error_velocity(meas.transects[transect_id])
+
+            u_boat = np.nan
+            if meas.transects[transect_id].boat_vel.selected == 'bt_vel':
+                # Relative standard deviation of error velocity (Bottom Track)
+                u_boat = self.boat_std_by_error_velocity(meas.transects[transect_id])
+
+            elif meas.transects[transect_id].boat_vel.selected == 'gga_vel':
+                boat_std = np.nan
+                if np.isnan(self.user_advanced_settings['gga_boat_mps_user']):
+                    if meas.transects[transect_id].gps.altitude_ens_m is not None:
+                        # Estimate the uncertainty in gga boat velocity as 1/3 of the standard deviation of
+                        # the elevation (estimate of horizontal position uncertainty) divided by time
+                        boat_std = (np.nanstd(meas.transects[transect_id].gps.altitude_ens_m, ddof=1) / 3) / \
+                                   np.nanmean(np.diff(meas.transects[transect_id].gps.gga_serial_time_ens))
+                else:
+                    boat_std = self.user_advanced_settings['gga_boat_mps']
+                if meas.transects[transect_id].boat_vel.gga_vel is not None:
+                    u = meas.transects[transect_id].boat_vel.gga_vel.u_processed_mps
+                    v = meas.transects[transect_id].boat_vel.gga_vel.v_processed_mps
+                    speed = np.sqrt(u ** 2 + v ** 2)
+                    u_boat = boat_std / speed
+
+            elif meas.transects[transect_id].boat_vel.selected == 'vtg_vel':
+                if np.isnan(self.user_advanced_settings['vtg_boat_mps_user']):
+                    boat_std = np.nan
+                    if meas.transects[transect_id].gps is not None:
+                        boat_std = self.default_advanced_settings['vtg_boat_mps']
+                else:
+                    boat_std = self.user_advanced_settings['vtg_boat_mps_user']
+                if meas.transects[transect_id].boat_vel.vtg_vel is not None:
+                    u = meas.transects[transect_id].boat_vel.vtg_vel.u_processed_mps
+                    v = meas.transects[transect_id].boat_vel.vtg_vel.v_processed_mps
+                    speed = np.sqrt(u ** 2 + v ** 2)
+                    u_boat = boat_std / speed
+
+            # Computation of u_meas
+            q_2_tran = meas.discharge[transect_id].total ** 2
+            q_2_ens = meas.discharge[transect_id].middle_ens ** 2
+            n_cell_ens = meas.transects[transect_id].w_vel.cells_above_sl.sum(axis=0)  # number of cells by ens
+            n_cell_ens = np.where(n_cell_ens == 0, np.nan, n_cell_ens)
+
+            # Variance for each ensembles
+            u_2_meas = q_2_ens * (u_boat ** 2 + (1 / n_cell_ens) * (std_ev_wt_ens ** 2 + u_dzi ** 2))
+
+            u_2_prct_meas = np.nansum(u_2_meas) / q_2_tran
+
+            # Standard deviation
+            u_prct_meas = u_2_prct_meas ** 0.5
+            self.u_meas_list.append(u_prct_meas)
+
+            # Compute the contribution of all terms to u_meas (sum of a0 to g0 =1)
+            u_contrib_boat = (np.nan_to_num(q_2_ens * (u_boat ** 2)).sum() / q_2_tran) / u_2_prct_meas
+            u_contrib_water = (np.nan_to_num(q_2_ens * ((1 / n_cell_ens) * (std_ev_wt_ens ** 2))).sum()
+                               / q_2_tran) / u_2_prct_meas
+            u_contrib_dzi = (np.nan_to_num(q_2_ens * ((1 / n_cell_ens) * (u_dzi ** 2))).sum()
+                             / q_2_tran) / u_2_prct_meas
+
+            self.u_contribution_meas.loc[len(self.u_contribution_meas)] = [u_contrib_boat,
+                                                                           u_contrib_water,
+                                                                           u_contrib_dzi]
+
+        # Apply user specified uncertainty
+        if np.isnan(self.user_specified_u['u_meas_mean_user']):
+            self.u_meas_mean_user_list = self.u_meas_list
+        else:
+            self.u_meas_mean_user_list = [0.01 * self.user_specified_u['u_meas_mean_user']] * self.nb_transects
+
+    def uncertainty_moving_bed(self, meas):
+        """Computes the moving-bed uncertainty
+
+        Parameters
+        ----------
+        meas: MeasurementData
+            Object of MeasurementData
+        """
+
+        # Compute moving-bed uncertainty
+        if len(self.checked_idx) and meas.transects[self.checked_idx[0]].boat_vel.selected == 'bt_vel':
+            # Boat velocity based on bottom track, moving-bed possible
+            if len(meas.mb_tests) > 0:
+                # Moving_bed tests recorded
+                user_valid = []
+                quality = []
+                moving_bed = []
+                used = []
+                for test in meas.mb_tests:
+                    user_valid.append(test.user_valid)
+                    if test.test_quality == 'Errors':
+                        quality.append(False)
+                    else:
+                        quality.append(True)
+                    moving_bed.append(test.moving_bed)
+                    used.append(test.use_2_correct)
+
+                # Check to see if there are any valid tests
+                if np.any(np.logical_and(np.asarray(quality), np.asarray(user_valid))):
+                    # Check to see if the valid tests indicate a moving bed
+                    moving_bed_bool = []
+                    for result in moving_bed:
+                        if result == 'Yes':
+                            moving_bed_bool.append(True)
+                        else:
+                            moving_bed_bool.append(False)
+                    valid_moving_bed = np.logical_and(quality, np.asarray(moving_bed_bool))
+                    if np.any(valid_moving_bed):
+                        # Check to see that a correction was used
+                        if np.any(np.logical_and(valid_moving_bed, np.asarray(used))):
+                            # Moving-bed exists and correction applied
+                            moving_bed_uncertainty = 1.5
+                        else:
+                            # Moving-bed exists and no correction applied
+                            moving_bed_uncertainty = 3
+                    else:
+                        # Valid tests indicated no moving bed
+                        moving_bed_uncertainty = 1
+                else:
+                    moving_bed_uncertainty = 3
+            elif meas.observed_no_moving_bed:
+                moving_bed_uncertainty = 1
+            else:
+                # No moving bed tests
+                moving_bed_uncertainty = 3
+        else:
+            # GPS used as boat velocity reference
+            moving_bed_uncertainty = 0
+
+        # Expand to list
+        self.u_movbed_list = [0.01 * moving_bed_uncertainty / 2] * self.nb_transects
+
+        # Apply user specified
+        if np.isnan(self.user_specified_u['u_movbed_user']):
+            self.u_movbed_user_list = self.u_movbed_list
+        else:
+            self.u_movbed_user_list = [self.user_specified_u['u_movbed_user'] * 0.01] * self.nb_transects
+
+    def uncertainty_system(self):
+        """Compute systematic uncertainty
+        """
+
+        # Assume 1.31% systematic bias at 68%
+        self.u_syst_list = [0.01 * 1.31] * self.nb_transects
+
+        # Override with user specification if available
+        if np.isnan(self.user_specified_u['u_syst_mean_user']):
+            self.u_syst_mean_user_list = self.u_syst_list
+        else:
+            self.u_syst_mean_user_list = [self.user_specified_u['u_syst_mean_user'] * 0.01] * self.nb_transects
+
+    def uncertainty_number_ensembles(self, meas):
+        """Computes the uncertainty due to the number of ensembles in a transect.
+
+        Parameters
+        ----------
+        meas: MeasurementData
+            Object of MeasurementData
+        """
+
+        for trans_id in self.checked_idx:
+            # Compute uncertainty due to limited number of ensembles (ISO 748; Le Coz et al., 2012)
+            self.u_ens_list.append(0.01 * 32 * len(meas.discharge[trans_id].middle_ens) ** (-0.88))
+
+        if np.isnan(self.user_specified_u['u_ens_user']):
+            self.u_ens_user_list = self.u_ens_list
+        else:
+            self.u_ens_user_list = [0.01 * self.user_specified_u['u_ens_user']] * self.nb_transects
+
+    def uncertainty_compass(self, meas):
+        """Compute the potential bias in the measurement due to dynamic compass errors when using GPS as
+        the navigation reference. The method is based on Mueller (2018,
+        https://doi.org/10.1016/j.flowmeasinst.2018.10.004, equation 41.
+
+        Parameters
+        ----------
+        meas: MeasurementData
+            Object of MeasurementData
+        """
+
+        # No compass error component for bottom track referenced discharges
+        if meas.transects[self.checked_idx[0]].boat_vel.selected == 'bt_vel':
+            self.u_compass_list = [0] * self.nb_transects
+        else:
+            # Assume a default compass error unless one is provided by the user
+            if np.isnan(self.user_advanced_settings['compass_error_deg_user']):
+                compass_error = self.default_advanced_settings['compass_error_deg']
+            else:
+                compass_error = self.user_advanced_settings['compass_error_deg_user']
+
+            # Compute discharge bias based on compass error and boat speed
+            meas_stats = meas.compute_measurement_properties(meas)
+            speed_ratio = meas_stats['avg_boat_speed'][self.checked_idx] / \
+                meas_stats['avg_water_speed'][self.checked_idx]
+            self.u_compass_list = np.abs(1 - (cosd(compass_error) + 0.5 * speed_ratio * sind(compass_error)))
+
+        # Override if user provides uncertainty due to compass
+        if np.isnan(self.user_specified_u['u_compass_user']):
+            self.u_compass_user_list = self.u_compass_list
+        else:
+            self.u_compass_user_list = [self.user_specified_u['u_compass_user'] * 0.01] * self.nb_transects
+
+    def uncertainty_top_discharge(self):
+        """Computes the uncertainty in the top discharge using simulations and rectangular law.
+        """
+
+        self.u_top_list = list(Oursin.apply_u_rect(list_sims=[self.sim_original,
+                                                              self.sim_extrap_pp_opt,
+                                                              self.sim_extrap_pp_min,
+                                                              self.sim_extrap_pp_max,
+                                                              self.sim_extrap_cns_opt,
+                                                              self.sim_extrap_cns_min,
+                                                              self.sim_extrap_cns_max,
+                                                              self.sim_extrap_3pns_opt,
+                                                              self.sim_draft_max,
+                                                              self.sim_draft_min],
+                                                   col_name='q_top')
+                               / np.abs(self.sim_original['q_total']))
+
+        if np.isnan(self.user_specified_u['u_top_mean_user']):
+            self.u_top_mean_user_list = self.u_top_list
+        else:
+            self.u_top_mean_user_list = [0.01 * self.user_specified_u['u_top_mean_user']] * self.nb_transects
+
+    def uncertainty_bottom_discharge(self):
+        """Computes uncertainty of bottom discharge using simulations and rectangular law.
+        """
+
+        self.u_bot_list = list(Oursin.apply_u_rect(list_sims=[self.sim_original,
+                                                              self.sim_extrap_pp_opt,
+                                                              self.sim_extrap_pp_min,
+                                                              self.sim_extrap_pp_max,
+                                                              self.sim_extrap_cns_opt,
+                                                              self.sim_extrap_cns_min,
+                                                              self.sim_extrap_cns_max,
+                                                              self.sim_extrap_3pns_opt],
+                                                   col_name='q_bot')
+                               / np.abs(self.sim_original['q_total']))
+
+        if np.isnan(self.user_specified_u['u_bot_mean_user']):
+            self.u_bot_mean_user_list = self.u_bot_list
+        else:
+            self.u_bot_mean_user_list = [0.01 * self.user_specified_u['u_bot_mean_user']] * self.nb_transects
+
+    def uncertainty_left_discharge(self):
+        """Computes the uncertianty of the left edge discharge using simulations and the rectangular law.
+        """
+
+        self.u_left_list = list(Oursin.apply_u_rect(list_sims=[self.sim_original['q_left'],
+                                                               self.sim_edge_min,
+                                                               self.sim_edge_max,
+                                                               self.sim_draft_min,
+                                                               self.sim_draft_max],
+                                                    col_name='q_left')
+                                / np.abs(self.sim_original['q_total']))
+
+        if np.isnan(self.user_specified_u['u_left_mean_user']):
+            self.u_left_mean_user_list = self.u_left_list
+        else:
+            self.u_left_mean_user_list = [0.01 * self.user_specified_u['u_left_mean_user']] * self.nb_transects
+
+    def uncertainty_right_discharge(self):
+        """Computes the uncertainty of the right edge discharge using simulations and the rectangular law.
+        """
+
+        self.u_right_list = list(Oursin.apply_u_rect(list_sims=[self.sim_original['q_right'],
+                                                                self.sim_edge_min,
+                                                                self.sim_edge_max,
+                                                                self.sim_draft_min,
+                                                                self.sim_draft_max],
+                                                     col_name='q_right')
+                                 / np.abs(self.sim_original['q_total']))
+
+        if np.isnan(self.user_specified_u['u_right_mean_user']):
+            self.u_right_mean_user_list = self.u_right_list
+        else:
+            self.u_right_mean_user_list = [0.01 * self.user_specified_u['u_right_mean_user']] * self.nb_transects
+
+    def uncertainty_invalid_depth_data(self):
+        """Computes the uncertainty due to invalid depth data using simulations and the retangular law.
+        """
+
+        self.u_invalid_depth_list = list(Oursin.apply_u_rect(list_sims=[self.sim_original,
+                                                                        self.sim_depth_hold,
+                                                                        self.sim_depth_next],
+                                                             col_name='q_total')
+                                         / np.abs(self.sim_original['q_total']))
+
+        if np.isnan(self.user_specified_u['u_invalid_depth_user']):
+            self.u_invalid_depth_user_list = self.u_invalid_depth_list
+        else:
+            self.u_invalid_depth_user_list = [0.01 * self.user_specified_u[
+                'u_invalid_depth_user']] * self.nb_transects
+
+    def uncertainty_invalid_boat_data(self):
+        """Computes the uncertainty due to invalid boat data using simulations and the rectangular law.
+        """
+
+        self.u_invalid_boat_list = list(Oursin.apply_u_rect(list_sims=[self.sim_original,
+                                                                       self.sim_boat_hold,
+                                                                       self.sim_boat_next],
+                                                            col_name='q_total')
+                                        / np.abs(self.sim_original['q_total']))
+
+        if np.isnan(self.user_specified_u['u_invalid_boat_user']):
+            self.u_invalid_boat_user_list = self.u_invalid_boat_list
+        else:
+            self.u_invalid_boat_user_list = [0.01 * self.user_specified_u['u_invalid_boat_user']] * self.nb_transects
+
+    def uncertainty_invalid_water_data(self):
+        """Computes the uncertainty due to invalid water data assuming rectangular law.
+        """
+
+        # Uncertainty due to invalid cells and ensembles
+        self.u_invalid_water_list = list(Oursin.apply_u_rect(list_sims=[self.sim_original,
+                                                                        self.sim_cells_trdi,
+                                                                        self.sim_cells_above,
+                                                                        self.sim_cells_below,
+                                                                        self.sim_cells_before,
+                                                                        self.sim_cells_after,
+                                                                        self.sim_shallow],
+                                                             col_name='q_total')
+                                         / np.abs(self.sim_original['q_total']))
+
+        if np.isnan(self.user_specified_u['u_invalid_water_user']):
+            self.u_invalid_water_user_list = self.u_invalid_water_list
+        else:
+            self.u_invalid_water_user_list = [0.01 * self.user_specified_u['u_invalid_water_user']] \
+                                             * self.nb_transects
+
+    def compute_measurement_cov(self, meas, method='Bayes'):
+        """Compute the coefficient of variation of the total transect discharges used in the measurement.
+
+        Parameters
+        ----------
+        meas: MeasurementData
+            Object of MeasurementData
+        method: str
+            Determines method to use (Bayes or QRev)
+        """
+
+        self.cov_68 = np.nan
+
+        if method == 'QRev':
+
+            # Only compute for multiple transects
+            if self.nb_transects > 1:
+                total_q = []
+                for trans_id in self.checked_idx:
+                    total_q.append(meas.discharge[trans_id].total)
+
+                # Compute coefficient of variation
+                cov = np.abs(np.nanstd(total_q, ddof=1) / np.nanmean(total_q))
+
+                # Inflate the cov to the 95% value
+                if len(total_q) == 2:
+                    # Use the approximate method as taught in class to reduce the high coverage factor for 2 transects
+                    # and account for prior knowledge related to 720 second duration analysis
+                    cov_95 = cov * 3.3
+                    self.cov_68 = cov_95 / 2
+                else:
+                    # Use Student's t to inflate COV for n > 2
+                    cov_95 = t.interval(0.95, len(total_q) - 1)[1] * cov / len(total_q) ** 0.5
+                    self.cov_68 = cov_95 / 2
+        elif method == 'Bayes':
+
+            # Set prior
+            if np.isnan(meas.oursin.user_advanced_settings['cov_prior_user']):
+                cov_prior = meas.oursin.default_advanced_settings['cov_prior']
+            else:
+                cov_prior = meas.oursin.user_advanced_settings['cov_prior_user']
+
+            if np.isnan(meas.oursin.user_advanced_settings['cov_prior_u_user']):
+                cov_prior_u = meas.oursin.default_advanced_settings['cov_prior_u']
+            else:
+                cov_prior_u = meas.oursin.user_advanced_settings['cov_prior_u_user']
+
+            # Create list of observations
+            transects_total_q = []
+            for idx in meas.checked_transect_idx:
+                transects_total_q.append(meas.discharge[idx].total)
+
+            # Compute COV
+            self.cov_68 = bayes_cov(np.array(transects_total_q), cov_prior, cov_prior_u, 20000)
+
+    def sim_orig(self, meas):
+        """Stores original measurement results in a data frame
+
+        Parameters
+        ----------
+        meas: MeasurementData
+            Object of MeasurementData
+        """
+        self.sim_original = self.sim_original.iloc[0:0]
+        transect_q = dict()
+        for trans_id in self.checked_idx:
+            transect_q['q_total'] = meas.discharge[trans_id].total
+            transect_q['q_top'] = meas.discharge[trans_id].top
+            transect_q['q_bot'] = meas.discharge[trans_id].bottom
+            transect_q['q_right'] = meas.discharge[trans_id].right
+            transect_q['q_left'] = meas.discharge[trans_id].left
+            transect_q['q_middle'] = meas.discharge[trans_id].middle
+            self.sim_original = self.sim_original.append(transect_q, ignore_index=True, sort=False)
+
+    def sim_cns_min_max_opt(self, meas):
+        """Computes simulations resulting in the the min and max discharges for a constant no slip extrapolation
+        fit.
+
+        Parameters
+        ----------
+        meas: MeasurementData
+            Object of MeasurementData
+        """
+
+        # Compute min-max no slip exponent
+        skip_ns_min_max, self.exp_ns_max, self.exp_ns_min = \
+            self.compute_ns_max_min(meas=meas,
+                                    ns_exp=self.ns_exp,
+                                    exp_ns_min_user=self.user_advanced_settings['exp_ns_min_user'],
+                                    exp_ns_max_user=self.user_advanced_settings['exp_ns_max_user'])
+
+        # Optimized
+        self.sim_extrap_cns_opt['q_total'] = meas.extrap_fit.q_sensitivity.q_cns_opt_list
+        self.sim_extrap_cns_opt['q_top'] = meas.extrap_fit.q_sensitivity.q_top_cns_opt_list
+        self.sim_extrap_cns_opt['q_bot'] = meas.extrap_fit.q_sensitivity.q_bot_cns_opt_list
+
+        # Max min
+        if skip_ns_min_max:
+            # If cns not used both max and min are equal to the optimized value
+            self.sim_extrap_cns_min['q_total'] = meas.extrap_fit.q_sensitivity.q_cns_opt_list
+            self.sim_extrap_cns_min['q_top'] = meas.extrap_fit.q_sensitivity.q_top_cns_opt_list
+            self.sim_extrap_cns_min['q_bot'] = meas.extrap_fit.q_sensitivity.q_bot_cns_opt_list
+            self.sim_extrap_cns_max['q_total'] = meas.extrap_fit.q_sensitivity.q_cns_opt_list
+            self.sim_extrap_cns_max['q_top'] = meas.extrap_fit.q_sensitivity.q_top_cns_opt_list
+            self.sim_extrap_cns_max['q_bot'] = meas.extrap_fit.q_sensitivity.q_bot_cns_opt_list
+        else:
+            # Compute q for min and max values
+            q = QComp()
+            self.sim_extrap_cns_min = pd.DataFrame(columns=self.sim_extrap_cns_min.columns)
+            self.sim_extrap_cns_max = pd.DataFrame(columns=self.sim_extrap_cns_max.columns)
+
+            for trans_id in self.checked_idx:
+                # Compute min values
+                q.populate_data(data_in=meas.transects[trans_id],
+                                top_method='Constant',
+                                bot_method='No Slip',
+                                exponent=self.exp_ns_min)
+                self.sim_extrap_cns_min.loc[len(self.sim_extrap_cns_min)] = [q.total, q.top, q.bottom]
+                # Compute max values
+                q.populate_data(data_in=meas.transects[trans_id],
+                                top_method='Constant',
+                                bot_method='No Slip',
+                                exponent=self.exp_ns_max)
+                self.sim_extrap_cns_max.loc[len(self.sim_extrap_cns_max)] = [q.total, q.top, q.bottom]
+
+    def sim_pp_min_max_opt(self, meas):
+        """Computes simulations resulting in the the min and max discharges for a power power extrapolation
+        fit.
+
+        Parameters
+        ----------
+        meas: MeasurementData
+            Object of MeasurementData
+        """
+
+        # A power fit is not applicable to bi-directional flow
+        mean_q = meas.mean_discharges(meas)
+        if np.sign(mean_q['top_mean']) != np.sign(mean_q['bot_mean']):
+            self.sim_extrap_pp_min = self.sim_original[['q_total', 'q_top', 'q_bot']]
+            self.sim_extrap_pp_max = self.sim_original[['q_total', 'q_top', 'q_bot']]
+            self.sim_extrap_pp_opt = self.sim_original[['q_total', 'q_top', 'q_bot']]
+
+        else:
+            # Compute min-max power exponent
+            skip_pp_min_max, self.exp_pp_max, self.exp_pp_min = \
+                self.compute_pp_max_min(meas=meas,
+                                        exp_95ic_min=self.exp_95ic_min,
+                                        exp_95ic_max=self.exp_95ic_max,
+                                        pp_exp=self.pp_exp,
+                                        exp_pp_min_user=self.user_advanced_settings['exp_pp_min_user'],
+                                        exp_pp_max_user=self.user_advanced_settings['exp_pp_max_user'])
+
+            # Optimized
+            self.sim_extrap_pp_opt['q_total'] = meas.extrap_fit.q_sensitivity.q_pp_opt_list
+            self.sim_extrap_pp_opt['q_top'] = meas.extrap_fit.q_sensitivity.q_top_pp_opt_list
+            self.sim_extrap_pp_opt['q_bot'] = meas.extrap_fit.q_sensitivity.q_bot_pp_opt_list
+
+            # Max min
+            if skip_pp_min_max:
+                self.sim_extrap_pp_min['q_total'] = meas.extrap_fit.q_sensitivity.q_pp_opt_list
+                self.sim_extrap_pp_min['q_top'] = meas.extrap_fit.q_sensitivity.q_top_pp_opt_list
+                self.sim_extrap_pp_min['q_bot'] = meas.extrap_fit.q_sensitivity.q_bot_pp_opt_list
+                self.sim_extrap_pp_max['q_total'] = meas.extrap_fit.q_sensitivity.q_pp_opt_list
+                self.sim_extrap_pp_max['q_top'] = meas.extrap_fit.q_sensitivity.q_top_pp_opt_list
+                self.sim_extrap_pp_max['q_bot'] = meas.extrap_fit.q_sensitivity.q_bot_pp_opt_list
+            else:
+                q = QComp()
+                self.sim_extrap_pp_min = pd.DataFrame(columns=self.sim_extrap_pp_min.columns)
+                self.sim_extrap_pp_max = pd.DataFrame(columns=self.sim_extrap_pp_max.columns)
+
+                for trans_id in self.checked_idx:
+                    q.populate_data(data_in=meas.transects[trans_id],
+                                    top_method='Power',
+                                    bot_method='Power',
+                                    exponent=self.exp_pp_min)
+                    self.sim_extrap_pp_min.loc[len(self.sim_extrap_pp_min)] = [q.total, q.top, q.bottom]
+
+                    q.populate_data(data_in=meas.transects[trans_id],
+                                    top_method='Power',
+                                    bot_method='Power',
+                                    exponent=self.exp_pp_max)
+                    self.sim_extrap_pp_max.loc[len(self.sim_extrap_pp_max)] = [q.total, q.top, q.bottom]
+
+    def sim_edge_min_max(self, meas):
+        """Computes simulations for the maximum and minimum edge discharges.
+
+        Parameters
+        ----------
+        meas: MeasurementData
+            Object of measurement data
+        """
+
+        # Clear variables
+        self.d_right_error_min = []
+        self.d_left_error_min = []
+        self.d_right_error_max = []
+        self.d_left_error_max = []
+        self.sim_edge_min = pd.DataFrame(columns=self.sim_edge_min.columns)
+        self.sim_edge_max = pd.DataFrame(columns=self.sim_edge_max.columns)
+
+        # Create measurement copy to allow changes without affecting original
+        meas_temp = copy.deepcopy(meas)
+
+        # Process each checked transect
+        for trans_id in self.checked_idx:
+            # Compute max and min edge distances
+            max_left_dist, max_right_dist, min_left_dist, min_right_dist = \
+                self.compute_edge_dist_max_min(transect=meas.transects[trans_id],
+                                               user_settings=self.user_advanced_settings,
+                                               default_settings=self.default_advanced_settings)
+
+            # Compute edge minimum
+            self.d_right_error_min.append(min_right_dist)
+            self.d_left_error_min.append(min_left_dist)
+            meas_temp.transects[trans_id].edges.left.distance_m = min_left_dist
+            meas_temp.transects[trans_id].edges.right.distance_m = min_right_dist
+            meas_temp.transects[trans_id].edges.left.type = 'Triangular'
+            meas_temp.transects[trans_id].edges.right.type = 'Triangular'
+            meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id],
+                                                        moving_bed_data=meas_temp.mb_tests)
+            self.sim_edge_min.loc[len(self.sim_edge_min)] = [meas_temp.discharge[trans_id].total,
+                                                             meas_temp.discharge[trans_id].left,
+                                                             meas_temp.discharge[trans_id].right]
+
+            # Compute edge maximum
+            self.d_right_error_max.append(max_right_dist)
+            self.d_left_error_max.append(max_left_dist)
+            meas_temp.transects[trans_id].edges.left.distance_m = max_left_dist
+            meas_temp.transects[trans_id].edges.right.distance_m = max_right_dist
+            meas_temp.transects[trans_id].edges.left.type = 'Rectangular'
+            meas_temp.transects[trans_id].edges.right.type = 'Rectangular'
+            meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id],
+                                                        moving_bed_data=meas_temp.mb_tests)
+            self.sim_edge_max.loc[len(self.sim_edge_max)] = [meas_temp.discharge[trans_id].total,
+                                                             meas_temp.discharge[trans_id].left,
+                                                             meas_temp.discharge[trans_id].right]
+
+    def sim_draft_max_min(self, meas):
+        """Compute the simulations for the max and min draft errror.
+
+        Parameters
+        ----------
+        meas: MeasurementData
+            Object of MeasurementData
+        """
+
+        # Reset variables
+        self.draft_error_list = []
+        self.sim_draft_min = pd.DataFrame(columns=self.sim_draft_min.columns)
+        self.sim_draft_max = pd.DataFrame(columns=self.sim_draft_max.columns)
+
+        # Create copy of meas to avoid changing original
+        meas_temp = copy.deepcopy(meas)
+
+        for trans_id in self.checked_idx:
+            # Compute max and min draft
+            draft_max, draft_min, draft_error = \
+                self.compute_draft_max_min(transect=meas.transects[trans_id],
+                                           draft_error_m_user=self.user_advanced_settings['draft_error_m_user'])
+            self.draft_error_list.append(draft_error)
+
+            # Compute discharge for draft min
+            meas_temp.transects[trans_id].change_draft(draft_min)
+            meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id],
+                                                        moving_bed_data=meas_temp.mb_tests)
+            self.sim_draft_min.loc[len(self.sim_draft_min)] = [meas_temp.discharge[trans_id].total,
+                                                               meas_temp.discharge[trans_id].top,
+                                                               meas_temp.discharge[trans_id].left,
+                                                               meas_temp.discharge[trans_id].right]
+            # Compute discharge for draft max
+            meas_temp.transects[trans_id].change_draft(draft_max)
+            meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id],
+                                                        moving_bed_data=meas_temp.mb_tests)
+            self.sim_draft_max.loc[len(self.sim_draft_max)] = [meas_temp.discharge[trans_id].total,
+                                                               meas_temp.discharge[trans_id].top,
+                                                               meas_temp.discharge[trans_id].left,
+                                                               meas_temp.discharge[trans_id].right]
+
+    def sim_invalid_cells(self, meas):
+        """Computes simulations using different methods to interpolate for invalid cells and ensembles.
+
+        Parameters
+        ----------
+        meas: MeasurementData
+            Object of MeasurementData
+        """
+
+        # Reset data frames
+        self.sim_cells_trdi = pd.DataFrame(columns=self.sim_cells_trdi.columns)
+        self.sim_cells_above = pd.DataFrame(columns=self.sim_cells_above.columns)
+        self.sim_cells_below = pd.DataFrame(columns=self.sim_cells_below.columns)
+        self.sim_cells_before = pd.DataFrame(columns=self.sim_cells_before.columns)
+        self.sim_cells_after = pd.DataFrame(columns=self.sim_cells_after.columns)
+
+        # Simulations for invalid cells and ensembles
+        meas_temp = copy.deepcopy(meas)
+        for trans_id in self.checked_idx:
+            # TRDI method
+            meas_temp.transects[trans_id].w_vel.interpolate_cells_trdi(meas_temp.transects[trans_id])
+            meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id],
+                                                        moving_bed_data=meas_temp.mb_tests)
+            self.sim_cells_trdi.loc[len(self.sim_cells_trdi)] = [meas_temp.discharge[trans_id].total,
+                                                                 meas_temp.discharge[trans_id].middle]
+
+            # Above only
+            meas_temp.transects[trans_id].w_vel.interpolate_abba(meas_temp.transects[trans_id], search_loc=['above'])
+            meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id],
+                                                        moving_bed_data=meas_temp.mb_tests)
+            self.sim_cells_above.loc[len(self.sim_cells_above)] = [meas_temp.discharge[trans_id].total,
+                                                                   meas_temp.discharge[trans_id].middle]
+            # Below only
+            meas_temp.transects[trans_id].w_vel.interpolate_abba(meas_temp.transects[trans_id], search_loc=['below'])
+            meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id],
+                                                        moving_bed_data=meas_temp.mb_tests)
+            self.sim_cells_below.loc[len(self.sim_cells_below)] = [meas_temp.discharge[trans_id].total,
+                                                                   meas_temp.discharge[trans_id].middle]
+            # Before only
+            meas_temp.transects[trans_id].w_vel.interpolate_abba(meas_temp.transects[trans_id], search_loc=['before'])
+            meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id],
+                                                        moving_bed_data=meas_temp.mb_tests)
+            self.sim_cells_before.loc[len(self.sim_cells_before)] = [meas_temp.discharge[trans_id].total,
+                                                                     meas_temp.discharge[trans_id].middle]
+            # After only
+            meas_temp.transects[trans_id].w_vel.interpolate_abba(meas_temp.transects[trans_id], search_loc=['after'])
+            meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id],
+                                                        moving_bed_data=meas_temp.mb_tests)
+            self.sim_cells_after.loc[len(self.sim_cells_after)] = [meas_temp.discharge[trans_id].total,
+                                                                   meas_temp.discharge[trans_id].middle]
+
+    def sim_shallow_ens(self, meas):
+        """Computes simulations assuming no interpolation of discharge for ensembles where depths are too shallow
+        for any valid cells.
+
+        Parameters
+        ----------
+        meas: MeasurementData
+            Object of MeasurementData
+        """
+
+        # Reset data frame
+        self.sim_shallow = pd.DataFrame(columns=self.sim_shallow.columns)
+
+        for trans_id in self.checked_idx:
+            shallow_estimate = np.nansum(meas.discharge[trans_id].middle_ens) \
+                               - np.nansum(np.nansum(meas.discharge[trans_id].middle_cells))
+            if np.abs(shallow_estimate) > 0:
+                self.sim_shallow.loc[len(self.sim_shallow)] = [meas.discharge[trans_id].total - shallow_estimate,
+                                                               meas.discharge[trans_id].middle - shallow_estimate]
+            else:
+                self.sim_shallow.loc[len(self.sim_shallow)] = [meas.discharge[trans_id].total,
+                                                               meas.discharge[trans_id].middle]
+
+    def sim_invalid_depth(self, meas):
+        """Computes simulations using different methods to interpolate for invalid depths.
+
+        Parameters
+        ----------
+        meas: MeasurementData
+           Object of MeasurementData
+        """
+
+        # Reset dataframes
+        self.sim_depth_hold = pd.DataFrame(columns=self.sim_depth_hold.columns)
+        self.sim_depth_next = pd.DataFrame(columns=self.sim_depth_next.columns)
+
+        # Simulations for invalid depths
+        meas_temp = copy.deepcopy(meas)
+        for trans_id in self.checked_idx:
+            depths = getattr(meas_temp.transects[trans_id].depths, meas_temp.transects[trans_id].depths.selected)
+            # Hold last
+            depths.interpolate_hold_last()
+            meas_temp.transects[trans_id].w_vel.adjust_side_lobe(meas_temp.transects[trans_id])
+            meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id],
+                                                        moving_bed_data=meas_temp.mb_tests)
+            self.sim_depth_hold.loc[len(self.sim_depth_hold)] = [meas_temp.discharge[trans_id].total,
+                                                                 meas_temp.discharge[trans_id].middle]
+            # Fill with next
+            depths.interpolate_next()
+            meas_temp.transects[trans_id].w_vel.adjust_side_lobe(meas_temp.transects[trans_id])
+            meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id],
+                                                        moving_bed_data=meas_temp.mb_tests)
+            self.sim_depth_next.loc[len(self.sim_depth_next)] = [meas_temp.discharge[trans_id].total,
+                                                                 meas_temp.discharge[trans_id].middle]
+
+    def sim_invalid_boat_velocity(self, meas):
+        """Computes simulations using different methods to interpolate for invalid boat velocity.
+
+        Parameters
+        ----------
+        meas: MeasurementData
+           Object of MeasurementData
+        """
+
+        # Reset dataframes
+        self.sim_boat_hold = pd.DataFrame(columns=self.sim_boat_hold.columns)
+        self.sim_boat_next = pd.DataFrame(columns=self.sim_boat_next.columns)
+
+        # Simulations for invalid boat velocity
+        meas_temp = copy.deepcopy(meas)
+        for trans_id in self.checked_idx:
+            # Hold last
+            boat_data = getattr(meas_temp.transects[trans_id].boat_vel, meas_temp.transects[trans_id].boat_vel.selected)
+            if boat_data is not None:
+                boat_data.interpolate_hold_last()
+                meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id],
+                                                            moving_bed_data=meas_temp.mb_tests)
+                self.sim_boat_hold.loc[len(self.sim_boat_hold)] = [meas_temp.discharge[trans_id].total,
+                                                                   meas_temp.discharge[trans_id].middle]
+                # Fill with next
+                boat_data.interpolate_next()
+                meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id],
+                                                            moving_bed_data=meas_temp.mb_tests)
+                self.sim_boat_next.loc[len(self.sim_boat_next)] = [meas_temp.discharge[trans_id].total,
+                                                                   meas_temp.discharge[trans_id].middle]
+            else:
+                self.sim_boat_next.loc[len(self.sim_boat_next)] = [meas.discharge[trans_id].total,
+                                                                   meas_temp.discharge[trans_id].middle]
+                self.sim_boat_hold.loc[len(self.sim_boat_hold)] = [meas.discharge[trans_id].total,
+                                                                   meas_temp.discharge[trans_id].middle]
+
+    @staticmethod
+    def compute_draft_max_min(transect, draft_error_m_user=np.nan):
+        """Determine the max and min values of the ADCP draft.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of transect data
+        draft_error_m_user: float
+            User specified draft error in m
+
+        Returns
+        -------
+        draft_max: float
+            Maximum draft in m for simulations
+        draft_min: float
+            Minimum draft in m for simulations
+        draft_error: float
+            Draft error in m
+        """
+        depths = transect.depths.bt_depths.depth_processed_m  # depth by ens
+        depth_90 = np.quantile(depths, q=0.9)  # quantile 90% to avoid spikes
+
+        # Determine draft error value
+        if np.isnan(draft_error_m_user):
+            if depth_90 < 2.50:
+                draft_error = 0.02
+            else:
+                draft_error = 0.05
+        else:
+            draft_error = draft_error_m_user
+
+        # Compute draft max and min
+        draft_min = transect.depths.bt_depths.draft_orig_m - draft_error
+        draft_max = transect.depths.bt_depths.draft_orig_m + draft_error
+
+        if draft_min <= 0:
+            draft_min = 0.01
+
+        return draft_max, draft_min, draft_error
+
+    @staticmethod
+    def compute_edge_dist_max_min(transect, user_settings, default_settings):
+        """Compute the max and min edge distances.
+        """
+
+        init_dist_right = transect.edges.right.distance_m
+        init_dist_left = transect.edges.left.distance_m
+
+        # Select user percentage or default
+        if np.isnan(user_settings['right_edge_dist_prct_user']):
+            d_right_error_prct = default_settings['right_edge_dist_prct']
+        else:
+            d_right_error_prct = user_settings['right_edge_dist_prct_user']
+
+        if np.isnan(user_settings['left_edge_dist_prct_user']):
+            d_left_error_prct = default_settings['left_edge_dist_prct']
+        else:
+            d_left_error_prct = user_settings['left_edge_dist_prct_user']
+
+        # Compute min distance for both edges
+        min_left_dist = (1 - d_left_error_prct * 0.01) * init_dist_left
+        min_right_dist = (1 - d_right_error_prct * 0.01) * init_dist_right
+
+        if min_left_dist <= 0:
+            min_left_dist = 0.10
+        if min_right_dist <= 0:
+            min_right_dist = 0.10
+
+        # Compute max distance for both edges
+        max_left_dist = (1 + d_left_error_prct * 0.01) * init_dist_left
+        max_right_dist = (1 + d_right_error_prct * 0.01) * init_dist_right
+
+        return max_left_dist, max_right_dist, min_left_dist, min_right_dist
+
+    @staticmethod
+    def compute_pp_max_min(meas, exp_95ic_min, exp_95ic_max, pp_exp, exp_pp_min_user, exp_pp_max_user):
+        """Determine the max and min exponents for power fit.
+
+        Parameters
+        ----------
+        meas: MeasurementData
+            Object of MeasurementData
+        exp_95ic_min: list
+            Minimum power fit exponent from the 95% confidence interval for each transect
+        exp_95ic_max: list
+            Maximum power fit exponent from the 95% confidence interval for each transect
+        pp_exp: list
+            Optimized power fit exponent for each transect
+        exp_pp_min_user: float
+            User supplied minimum power fit exponent
+        exp_pp_max_user: float
+            User supplied maximum power fit exponent
+
+        Returns
+        -------
+        skip_pp_min_max: bool
+            Boolean to identify if power fit simulations should be skipped
+        exp_pp_max: float
+            Maximum power fit exponent to be used in simulations
+        exp_pp_min: float
+            Minimum power fit exponent to be used in simulations
+        """
+        skip_pp_min_max = False
+        if len(pp_exp) == 0:
+            skip_pp_min_max = True
+            min_pp = meas.extrap_fit.q_sensitivity.pp_exp
+            max_pp = meas.extrap_fit.q_sensitivity.pp_exp
+        else:
+            if np.isnan(pp_exp).any():
+                mean_pp = 0.16
+            else:
+                mean_pp = np.nanmean(pp_exp)
+
+            # If all transects have confidence intervals, use the mean of the confidence interval min/max
+            # Otherwise adjust average +/- 0.2
+            if np.isnan(exp_95ic_min).any():
+                min_pp = mean_pp - 0.2
+            else:
+                min_pp = np.nanmean(exp_95ic_min)
+
+            if np.isnan(exp_95ic_max).any():
+                max_pp = mean_pp + 0.2
+            else:
+                max_pp = np.nanmean(exp_95ic_max)
+
+            # Diff between mean PP exponent and min/max
+            if mean_pp - min_pp > 0.2:
+                min_pp = mean_pp - 0.2
+            if max_pp - mean_pp > 0.2:
+                max_pp = mean_pp + 0.2
+
+            # Check that 0 < exponent < 1
+            if min_pp <= 0:
+                min_pp = 0.01
+            if max_pp >= 1:
+                max_pp = 0.99
+
+        # Set min-max exponents of user override
+        if np.isnan(exp_pp_min_user):
+            exp_pp_min = min_pp
+        else:
+            exp_pp_min = exp_pp_min_user
+
+        if np.isnan(exp_pp_max_user):
+            exp_pp_max = max_pp
+        else:
+            exp_pp_max = exp_pp_max_user
+
+        return skip_pp_min_max, exp_pp_max, exp_pp_min
+
+    @staticmethod
+    def compute_ns_max_min(meas, ns_exp, exp_ns_min_user=np.nan, exp_ns_max_user=np.nan):
+        """Determine the max and min no slip exponents.
+
+        Parameters
+        ----------
+        meas: MeasurementData
+            Object of MeasurementData
+        ns_exp: list
+            List of maximum and minimum no slip exponents.
+        exp_ns_min_user: float
+            User supplied minimum no slip exponent
+        exp_ns_max_user: float
+            User supplied maximum no slip exponent
+
+        Returns
+        -------
+        skip_ns_min_max: bool
+            Boolean to identify if no slip simulations should be skipped
+        exp_ns_max: float
+            Maximum no slip exponent to be used in simulations
+        exp_ns_min: float
+            Minimum no slip exponent to be used in simulations
+        """
+        skip_ns_min_max = False
+        if len(ns_exp) == 0:
+            skip_ns_min_max = True
+            min_ns = meas.extrap_fit.q_sensitivity.ns_exp
+            max_ns = meas.extrap_fit.q_sensitivity.ns_exp
+        else:
+            mean_ns = np.nanmean(ns_exp)
+            if len(ns_exp) == 1:
+                min_ns = ns_exp[0]-0.05
+                max_ns = ns_exp[0]+0.05
+            else:
+                min_ns = np.nanmin(ns_exp)
+                max_ns = np.nanmax(ns_exp)
+
+            # Diff between mean NS exponent and min/max shouldn't be > 0.2
+            if mean_ns - min_ns > 0.2:
+                min_ns = mean_ns - 0.2
+            if max_ns - mean_ns > 0.2:
+                max_ns = mean_ns + 0.2
+
+            # Check that 0 < exponent < 1
+            if min_ns <= 0:
+                min_ns = 0.01
+            if max_ns >= 1:
+                max_ns = 0.99
+
+        # Apply user overides
+        if np.isnan(exp_ns_min_user):
+            exp_ns_min = min_ns
+        else:
+            exp_ns_min = exp_ns_min_user
+
+        if np.isnan(exp_ns_max_user):
+            exp_ns_max = max_ns
+        else:
+            exp_ns_max = exp_ns_max_user
+
+        return skip_ns_min_max, exp_ns_max, exp_ns_min
+
+    @staticmethod
+    def depth_error_boat_motion(transect):
+        """Relative depth error due to vertical velocity of boat
+           the height [m] is vertical velocity times ensemble duration
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+
+        Returns
+        -------
+        relative_error_depth: float
+            Random depth error by ensemble
+       """
+
+        d_ens = transect.depths.bt_depths.depth_processed_m
+        depth_vv = transect.boat_vel.bt_vel.w_mps * transect.date_time.ens_duration_sec
+        relative_error_depth = np.abs(depth_vv) / d_ens
+        relative_error_depth[np.isnan(relative_error_depth)] = 0.00
+        return relative_error_depth
+
+    @staticmethod
+    def water_std_by_error_velocity(transect):
+        """Compute the relative standard deviation of the water velocity using the fact that the error velocity is
+        scaled so that the standard deviation of the error velocity is the same as the standard deviation
+        of the horizontal water velocity.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+
+        Returns
+        -------
+        std_ev_wt_ens: float
+            Standard deviation of water track error velocity for each ensemble
+        """
+
+        # Computer water speed
+        u_water = transect.w_vel.u_processed_mps
+        v_water = transect.w_vel.v_processed_mps
+        v_wa_cell_abs = np.sqrt(u_water ** 2 + v_water ** 2)
+
+        # Use only valid error velocity data
+        d_vel_filtered = np.tile([np.nan], transect.w_vel.d_mps.shape)
+        d_vel_filtered[transect.w_vel.valid_data[0]] = transect.w_vel.d_mps[transect.w_vel.valid_data[0]]
+
+        # Compute relative standard deviation of error velocity
+        std_ev_wt = np.nanstd(d_vel_filtered) / np.abs(v_wa_cell_abs)
+        std_ev_wt_ens = np.nanmedian(std_ev_wt, axis=0)
+        # TODO consider substituting the overall std for nan rather than 0
+        # all_std_ev_WT = np.nanstd(d_vel_filtered[:])
+        # std_ev_wt_ens[np.isnan(std_ev_wt_ens)] = all_std_ev_WT
+        std_ev_wt_ens[np.isnan(std_ev_wt_ens)] = 0.00
+        return std_ev_wt_ens
+
+    @staticmethod
+    def boat_std_by_error_velocity(transect):
+        """Compute the relative standard deviation of the boat velocity using the fact that the error velocity is
+        scaled so that the standard deviation of the error velocity is the same as the standard deviation
+        of the horizontal boat velocity.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+
+        Returns
+        -------
+        std_ev_bt: float
+            Standard deviation of bottom track error velocity
+        """
+
+        # Compute boat speed
+        u_boat = transect.boat_vel.bt_vel.u_processed_mps
+        v_boat = transect.boat_vel.bt_vel.v_processed_mps
+        speed = np.sqrt(u_boat ** 2 + v_boat ** 2)
+
+        # Use only valid error velocity data
+        d_vel_filtered = np.tile([np.nan], transect.boat_vel.bt_vel.d_mps.shape)
+        d_vel_filtered[transect.boat_vel.bt_vel.valid_data[0]] = \
+            transect.boat_vel.bt_vel.d_mps[transect.boat_vel.bt_vel.valid_data[0]]
+
+        # Compute relative standard deviation of error velocity
+        all_std_ev_bt = np.nanstd(d_vel_filtered)
+        std_ev_bt = np.abs(all_std_ev_bt) / speed
+        # TODO Consider substituting the overall std for nan rather than 0
+        # std_ev_bt[np.isnan(std_ev_bt)] = all_std_ev_bt
+        std_ev_bt[np.isnan(std_ev_bt)] = 0.00
+
+        return std_ev_bt
+
+    @staticmethod
+    def apply_u_rect(list_sims, col_name):
+        """Compute the uncertainty using list of simulated discharges following a ranctangular law
+
+        Parameters
+        ----------
+        list_sims: list
+            List of simulation data frames to be used in the computation
+        col_name: str
+            Name of column in the data frames to be used in the computation
+
+        Returns
+        -------
+        u_rect: float
+            Result of rectangular law
+        """
+
+        # Combine data frames
+        vertical_stack = pd.concat(list_sims, axis=0, sort=True)
+
+        # Apply rectangular law
+        u_rect = (vertical_stack.groupby(vertical_stack.index)[col_name].max()
+                  - vertical_stack.groupby(vertical_stack.index)[col_name].min()) / (2 * (3 ** 0.5))
+
+        return u_rect
+
+    # Bayesian COV
+    # ============
+    @staticmethod
+    def bayes_cov(transects_total_q, cov_prior=0.03, cov_prior_u=0.2, nsim=20000):
+        """Computes the coefficient of variation using a Bayesian approach and an assumed posterior
+        log-normal distribution.
+
+        Parameters
+        ----------
+        transects_total_q: list
+            List of total discharge for each transect
+        cov_prior: float
+            Expected COV (68%) based on prior knowledge. Assumed to be 3% by default.
+        cov_prior_u: float
+            Uncertainty (68%) of cov_prior. Assumed to be 20%.
+        nsim: int
+            Number of simulations. 20000 was found to produce stable results.
+
+        Returns
+        -------
+        cov: float
+            Coefficient of variation
+        """
+
+        sav = Oursin.metropolis(theta0=[np.mean(transects_total_q), cov_prior],
+                                obs_data=transects_total_q,
+                                cov_prior=cov_prior,
+                                cov_prior_u=cov_prior_u,
+                                nsim=nsim,
+                                theta_std=np.abs(np.array([np.mean(transects_total_q), cov_prior]))* cov_prior_u / np.sqrt(len(transects_total_q)))
+
+
+        n_burn = int(nsim / 2)
+
+        cov = np.mean(sav['sam'][n_burn:nsim, 1])
+
+        return cov
+
+    @staticmethod
+    def metropolis(theta0, obs_data, cov_prior, cov_prior_u, nsim=1000, theta_std=np.nan):
+        """Implements the Metropolis_Hastings Markov chain Monte Carlo (MCMC) algorithm for sampling the
+        posterior distribution, assuming a log-normal posterior distribution.
+
+        Parameters
+        ----------
+        theta0: list
+            Starting value of parameters (mean and cov_prior)
+        obs_data: list
+            List of total discharge for each transect
+        cov_prior: float
+            Expected COV (68%) based on prior knowledge.
+        cov_prior_u: float
+            Uncertainty (68%) of cov_prior.
+        nsim: int
+            Number of simulations.
+        theta_std: float
+            Standard deviation for the gaussian Jump distribution. If blank a default value is computed.
+
+        Returns
+        -------
+        w: dict
+            Dictionary containing
+            sam: np.array(float)
+                Matrix containing the MCMC samples
+            obj_funk: np.array(float)
+                Vector containing the corresponding values of the objective function 
+                (i.e. of the unnormalized log-posterior)
+        """
+       
+        # Initialize
+        npar = len(theta0)
+        sam = np.zeros((nsim + 1, npar))  
+        obj_funk = np.zeros((nsim + 1, 1))  
+
+        # Parameters - used for automatic computation of starting stds of the Gaussian Jump distribution
+        if np.any(np.isnan(theta_std)):
+            std_factor = 0.1
+            theta_std = std_factor * np.abs(theta0)
+
+        # Check if starting point is feasible - abandon otherwise
+        f_current = Oursin.log_post(param=theta0, measures=obs_data, cov_prior=cov_prior, cov_prior_u=cov_prior_u)
+
+        if not Oursin.is_feasible(f_current):
+            print('Metropolis:FATAL:unfeasible starting point')
+            w = {'sam': sam, 'obj_funk': obj_funk}
+            return w
+        else:
+            sam[0, :] = list(theta0)
+            obj_funk[0] = f_current
+
+            # MCMC loop
+            # candid = np.array([np.nan, np.nan])
+            np.random.seed(0)
+            for i in range(nsim):
+                current = sam[i, :]
+                f_current = obj_funk[i]
+                # Propose a new candidate
+                candid = np.random.normal(loc=current, scale=theta_std)
+                # Change for use in Numba
+                # candid[0] = np.random.normal(loc=current[0], scale=theta_std[0])
+                # candid[1] = np.random.normal(loc=current[1], scale=theta_std[1])
+                # Evaluate objective function at candidate
+                f_candid = Oursin.log_post(param=candid,
+                                           measures=obs_data,
+                                           cov_prior=cov_prior,
+                                           cov_prior_u=cov_prior_u)
+
+                if not Oursin.is_feasible(f_candid):
+                    sam[i + 1, :] = current
+                    obj_funk[i + 1] = f_current
+                else:
+                    # Generate deviate ~U[0,1]
+                    u = np.random.uniform(0, 1)
+
+                    # Compute Metropolis acceptance ratio
+                    # Changed for use in Numba
+                    ratio = math.exp(min(max(-100, f_candid - f_current), 0))
+                    # ratio = np.exp(min(((np.max(np.hstack((float(-100), f_candid - f_current))), float(0)))))
+
+                    # Apply acceptance rule
+                    if u <= ratio:
+                        sam[i + 1, :] = candid
+                        obj_funk[i + 1] = f_candid
+                    else:
+                        sam[i + 1, :] = current
+                        obj_funk[i + 1] = f_current
+
+            w = {'sam': sam, 'obj_funk': obj_funk}
+            return w
+
+    @staticmethod
+    def log_post(param, measures, cov_prior, cov_prior_u):
+        """Define function returning the posterior log-pdf using the model measures ~ N(true_value,cov*true_value),
+        with a flat prior on true_value and a log-normal prior for cov (= coefficient of variation)
+        
+        Parameters
+        ----------
+        param: np.array(float)
+            Array containing the true value and COV
+        
+        measures: np.array(float)
+            Array of observations
+        cov_prior: float
+            Expected COV (68%) based on prior knowledge.
+        cov_prior_u: float
+            Uncertainty (68%) of cov_prior.
+            
+        Returns
+        -------
+        logp: 
+        """
+        # Check if any parameter is <=0
+        # since  both true_value and cov have to be positive - otherwise sigma = true_value*cov does not make sense
+        if any(item <= 0 for item in param):
+            return -math.inf
+
+        true_value = param[0]
+        cov = param[1]
+        sigma = cov * true_value  # standard deviation
+
+        # Compute log-likelihood under the model: measures ~ N(true_value,sigma)
+        # You can easily change this model (e.g. lognormal for a positive measurand?)
+        # OPTION 1 : the model follows a Normal distribution
+        log_likelihood = np.sum(scipy.stats.norm.logpdf(measures, loc=true_value, scale=sigma))
+        # Change for Numba
+        # log_likelihood = np.sum(np.log(np.exp(-(((measures - true_value) / sigma) ** 2) / 2)
+        #                                / (np.sqrt(2 * np.pi) * sigma)))
+
+        # Prior on true_value - flat prior used here but you may change this if you have prior knowledge
+        log_prior_1 = 0
+
+        # Lognormal prior
+        x = cov
+        mu = np.log(cov_prior)
+        scale = cov_prior_u
+        pdf = np.exp(-(np.log(x) - mu) ** 2 / (2 * scale ** 2)) / (x * scale * np.sqrt(2 * np.pi))
+        log_prior_2 = np.log(pdf)
+
+        # Joint prior (prior independence)
+        log_prior = log_prior_1 + log_prior_2
+
+        # Return (unnormalized) log-posterior
+        logp = log_likelihood + log_prior
+        if np.isnan(logp):
+            logp = -math.inf  # returns -Inf rather than NaN's (required by the MCMC sampler used subsequently)
+        return logp
+
+    @staticmethod
+    def is_feasible(value):
+        """Checks that a value is a real value (not infinity or nan)
+        
+        Parameters
+        ----------
+        value: float or int
+        
+        Returns
+        -------
+        bool
+        """
+        if np.isinf(value) or np.isnan(value):
+            return False
+        else:
+            return True
+
+    # Hening Huang proposed method for random uncertainty (not used)
+    # ---------------------------------------------------
+    @staticmethod
+    def hh_random_meas(meas):
+        """Implements the semi-empirical method for computing the random uncertainty of an ADCP discharge transect,
+        as presented in Hening Huang (2018) Estimating uncertainty of streamflow measurements with
+        moving-boat acoustic Doppler current profilers, Hydrological Sciences Journal, 63:3, 353-368,
+        DOI:10.1080/02626667.2018.1433833
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+
+        Returns
+        -------
+            random_u: list
+                List of random uncertainty for each checked transect.
+        """
+        random_u = []
+        for idx in meas.checked_transect_idx:
+            # Get or compute base variables
+            q_m = meas.discharge[idx].middle
+            q_i = meas.discharge[idx].middle_ens
+            q_bar = np.nanmean(q_i[:])
+
+            # Compute r1
+            r1_numerator = []
+            r1_denominator = []
+            for n in range(len(q_i) - 1):
+                r1_numerator.append((q_i[n] - q_bar) * (q_i[n+1] - q_bar))
+                r1_denominator.append((q_i[n] - q_bar)**2)
+            r1_denominator.append((q_i[-1] - q_bar)**2)
+            r1 = np.nansum(r1_numerator) / np.nansum(r1_denominator)
+
+            # Compute g(r1)
+            g_r1 = 0.6 + (0.1 * np.exp(r1)) + (0.01 * (1 - np.exp((r1**0.6)-1)**-0.08))
+            if g_r1 < 1:
+                g_r1 = 1.0
+
+            # Compute (delta qi)**2
+            alpha = 1. / 3.
+            c1 = 0.5 * (1 - alpha)
+            delta_list = []
+            for n in range(1, len(q_i) - 1):
+                q_i_hat = c1 * q_i[n - 1] + alpha * q_i[n] + c1 * q_i[n + 1]
+                delta_list.append((q_i[n] - q_i_hat)**2)
+
+            # Compute unbiased residual sum of squares
+            urss = ((2. / 3.) * (1 / (1 - alpha))**2) * np.nansum(delta_list)
+
+            # Compute random uncertainty
+            random_u.append(g_r1 * np.sqrt(urss) / q_m)
+
+        return random_u
diff --git a/Classes/Pd0TRDI.py b/Classes/Pd0TRDI.py
new file mode 100644
index 0000000000000000000000000000000000000000..05bb1b9e654870e95f1748f48a204f01da8a1633
--- /dev/null
+++ b/Classes/Pd0TRDI.py
@@ -0,0 +1,2463 @@
+import os
+import re
+import numpy as np
+import struct
+from MiscLibs.common_functions import pol2cart, valid_number, nans
+
+
+class Pd0TRDI(object):
+    """Class to read data from PD0 files
+
+    Attributes
+    ----------
+    file_name: str
+        Full name including path of pd0 file to be read
+    Hdr: Hdr
+        Object of Hdr for heading information
+    Inst: Inst
+        Object of Inst to hold instrument information
+    Cfg: Cfg
+        Object of Cfg to hold configuration information
+    Sensor: Sensor
+        Object of Sensor to hold sensor data
+    Wt: Wt
+        Object of Wt to hold water track data
+    Bt: Bt
+        Object of Bt to hold bottom track data
+    Gps: Gps
+        Object of Gps to hold GPS data from previous versions of WR
+    Gps2: Gps2
+        Object of Gps2 to hold GPS data from WR2
+    Surface: Surface
+        Object of Surface to hold surface cell data
+    AutoMode: AutoMode
+        Object of AutoMode to hold auto configuration settings
+    Nmea: Nmea
+        Object of Nmea to hold Nmea data
+    """
+    
+    def __init__(self, file_name):
+        """Constructor initializing instance variables.
+
+        Parameters
+        ----------
+        file_name: str
+            Full name including path of pd0 file to be read
+        """
+        
+        self.file_name = file_name
+        self.Hdr = None
+        self.Inst = None
+        self.Cfg = None
+        self.Sensor = None
+        self.Wt = None
+        self.Bt = None
+        self.Gps = None
+        self.Gps2 = None
+        self.Surface = None
+        self.AutoMode = None
+        self.Nmea = None
+        
+        self.pd0_read(file_name)
+        
+    def create_objects(self, n_ensembles, n_types, n_bins, max_surface_bins, n_velocities, wr2=False):
+        """Create objects for instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        n_types: int
+            Number of data types
+        n_bins: int
+            Number of bins or depth cells
+        max_surface_bins: int
+            Maximum number of surface cells
+        n_velocities: int
+            Number of velocities
+        wr2: bool
+            Whether WR2 processing of GPS data should be applied
+        """
+
+        self.Hdr = Hdr(n_ensembles, n_types)
+        self.Inst = Inst(n_ensembles)
+        self.Cfg = Cfg(n_ensembles)
+        self.Sensor = Sensor(n_ensembles)
+        self.Wt = Wt(n_bins, n_ensembles, n_velocities)
+        self.Bt = Bt(n_ensembles, n_velocities)
+        self.Gps = Gps(n_ensembles)
+        self.Gps2 = Gps2(n_ensembles, wr2)
+        self.Surface = Surface(n_ensembles, n_velocities, max_surface_bins)
+        self.AutoMode = AutoMode(n_ensembles)
+        self.Nmea = Nmea(n_ensembles)
+
+    def pd0_read(self, fullname, wr2=False):
+        """Reads the binary pd0 file and assigns values to object instance variables.
+
+        Parameters
+        ----------
+        fullname: str
+            Full file name including path
+        wr2: bool
+            Determines if WR2 processing should be applied to GPS data
+        """
+
+        # Assign default values
+        n_velocities = 4
+        max_surface_bins = 5
+
+        # Check to ensure file exists
+        if os.path.exists(fullname):
+            file_info = os.path.getsize(fullname)
+
+            if file_info > 0:
+                # Open file for processing
+                with open(fullname, 'rb') as f:
+
+                    # Read leader ID
+                    leader_id = hex(np.fromfile(f, np.uint16, count=1)[0])
+                    # Leader ID 7f7f marks beginning of ensemble
+                    if leader_id != '0x7f7f':
+                        while leader_id != '0x7f7f':
+                            f.seek(-1, 1)
+                            leader_id = hex(np.fromfile(f, np.uint16, count=1)[0])
+
+                    # Read header information
+                    initial_pos = f.tell()-2
+                    bytes_per_ens = np.fromfile(f, dtype=np.uint16, count=1)[0]
+                    f.seek(1, 1)
+                    n_types = np.fromfile(f, np.uint8, count=1)[0]
+                    offset = np.fromfile(f, np.uint16, count=1)[0]
+                    f.seek(initial_pos+offset+8, 0)
+                    n_beams = np.fromfile(f, np.uint8, count=1)[0]
+                    n_bins = np.fromfile(f, np.uint8, count=1)[0]
+
+                    # Determine number of ensembles in the file to allow pre-allocation of arrays
+                    n_ensembles = Pd0TRDI.number_of_ensembles(f, file_info)
+
+                    # Create objects and pre-allocate arrays
+                    self.create_objects(n_ensembles=n_ensembles,
+                                        n_types=n_types,
+                                        n_bins=n_bins,
+                                        max_surface_bins=max_surface_bins,
+                                        n_velocities=n_velocities)
+
+                    # Initialize counters and variables
+                    i_ens = -1
+                    end_file_check = 0
+                    end_file = file_info
+                    i_data_types = 0
+                    n_data_types = 1
+                    file_loc = 0
+                    i2022 = 0
+                    j100, j101, j102, j103 = -1, -1, -1, -1
+                    rr_bt_depth_correction = np.tile(np.nan, (n_beams, n_ensembles))
+
+                    # Reset position in file
+                    f.seek(initial_pos, 0)
+
+                    # Begin reading file
+                    while end_file_check < end_file:
+
+                        # Read leader ID
+                        leader_id = hex(np.fromfile(f, np.uint16, count=1)[0])
+                        if i_data_types >= n_data_types and leader_id != '0x7f7f':
+                            leader_id = '0x9999'
+
+                        # 7f7f marks the beginning of an ensemble
+                        if leader_id == '0x7f7f':
+                            i2022 = 0
+                            file_loc = f.tell() - 2
+
+                            # Check for last ensemble in file
+                            if file_loc+bytes_per_ens > end_file and i_ens >= n_ensembles:
+                                end_file_check = end_file+1
+
+                            else:
+                                # Process ensemble
+                                i_data_types = 0
+                                store_file_loc = f.tell()
+                                bytes_per_ens = np.fromfile(f, np.uint16, count=1)[0]
+
+                                # Check check_sum
+                                if self.check_sum(f, file_loc, bytes_per_ens):
+                                    f.seek(file_loc+5, 0)
+                                    n_data_types = np.fromfile(f, np.uint8, count=1)[0]
+                                    data_offsets = np.fromfile(f, np.uint16, count=n_data_types)
+
+                                    # Find variable leader ID
+                                    while i_data_types+1 <= n_data_types and leader_id != '0x80':
+                                        f.seek(data_offsets[i_data_types]+file_loc, 0)
+                                        leader_id = hex(np.fromfile(f, np.uint16, count=1)[0])
+                                        i_data_types += 1
+
+                                    # Check for consecutive ensemble numbers
+                                    if i_ens > -1 and leader_id == '0x80':
+                                        ens_num = np.fromfile(f, np.uint16, count=1)[0]
+                                        ens_num_diff = ens_num - self.Sensor.num[i_ens]
+                                        if ens_num_diff > 1:
+                                            for nn in range(0, int(ens_num_diff-1)):
+                                                if i_ens < n_ensembles:
+                                                    self.Sensor.num[i_ens] = self.Sensor.num[i_ens-1]+1
+                                                    i_ens += 1
+                                        elif ens_num_diff < 1:
+                                            i_ens -= 1
+                                else:
+                                    self.bad_check_sum(f, file_loc)
+
+                                # Initialize variables
+                                f.seek(store_file_loc, 0)
+                                i_data_types = 0
+                                j100, j101, j102, j103 = -1, -1, -1, -1
+                                i_ens += 1
+
+                                # Read bytes in this ensemble
+                                self.Hdr.bytes_per_ens[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+
+                                # If checksum is valid read header data
+                                if self.check_sum(f, file_loc, int(self.Hdr.bytes_per_ens[i_ens])):
+
+                                    # Read number of data types
+                                    f.seek(file_loc+5, 0)
+                                    self.Hdr.n_data_types[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+
+                                    # Read data offsets
+                                    test = np.fromfile(f, np.uint16, count=int(self.Hdr.n_data_types[i_ens]))
+                                    if test.shape[0] > self.Hdr.data_offsets.shape[1]:
+                                        self.Hdr.data_offsets = np.resize(self.Hdr.data_offsets,
+                                                                          (n_ensembles, test.shape[0]))
+                                    self.Hdr.data_offsets[i_ens, 0:int(self.Hdr.n_data_types[i_ens])] = \
+                                        test[0:int(self.Hdr.n_data_types[i_ens])]
+
+                                    # Check for end of data types
+                                    self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+                                else:
+                                    self.bad_check_sum(f, file_loc)
+                                    i_data_types = -1
+
+                        # Read binary fixed leader data
+                        elif leader_id == '0x0':
+                            # Update data types counter
+                            i_data_types += 1
+
+                            # Read and decode firmware version
+                            self.Inst.firm_ver[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Inst.firm_ver[i_ens] = self.Inst.firm_ver[i_ens] + \
+                                np.fromfile(f, np.uint8, count=1)[0] / 100
+
+                            # Read and decode instrument characteristics
+                            bitls = np.fromfile(f, np.uint8, count=1)[0]
+                            bitls = "{0:08b}".format(bitls)
+
+                            val = int(bitls[5:], 2)
+                            if val == 0:
+                                self.Inst.freq[i_ens] = 75
+                            elif val == 1:
+                                self.Inst.freq[i_ens] = 150
+                            elif val == 2:
+                                self.Inst.freq[i_ens] = 300
+                            elif val == 3:
+                                self.Inst.freq[i_ens] = 600
+                            elif val == 4:
+                                self.Inst.freq[i_ens] = 1200
+                            elif val == 5:
+                                self.Inst.freq[i_ens] = 2400
+                            else:
+                                self.Inst.freq[i_ens] = np.nan
+
+                            val = int(bitls[4], 2)
+                            if val == 0:
+                                self.Inst.pat[i_ens] = 'Concave'
+                            elif val == 1:
+                                self.Inst.pat[i_ens] = 'Convex'
+                            else:
+                                self.Inst.pat[i_ens] = 'n/a'
+
+                            self.Inst.sensor_CFG[i_ens] = int(bitls[2:3], 2) + 1
+
+                            val = int(bitls[1], 2)
+                            if val == 0:
+                                self.Inst.xducer[i_ens] = 'Not Attached'
+                            elif val == 1:
+                                self.Inst.xducer[i_ens] = 'Attached'
+                            else:
+                                self.Inst.xducer[i_ens] = 'n/a'
+
+                            val = int(bitls[0], 2)
+                            if val == 0:
+                                self.Sensor.orient[i_ens] = 'Down'
+                            elif val == 1:
+                                self.Sensor.orient[i_ens] = 'Up'
+                            else:
+                                self.Sensor.orient[i_ens] = 'n/a'
+
+                            bitms = np.fromfile(f, np.uint8, count=1)[0]
+                            bitms = "{0:08b}".format(bitms)
+
+                            val = int(bitms[6:], 2)
+                            if val == 0:
+                                self.Inst.beam_ang[i_ens] = 15
+                            elif val == 1:
+                                self.Inst.beam_ang[i_ens] = 20
+                            elif val == 2:
+                                self.Inst.beam_ang[i_ens] = 30
+                            elif val == 3:
+                                self.Inst.beam_ang[i_ens] = np.nan
+                            else:
+                                self.Inst.beam_ang[i_ens] = np.nan
+
+                            val = int(bitms[:4], 2)
+                            if val == 4:
+                                self.Inst.beams[i_ens] = 4
+                            elif val == 5:
+                                self.Inst.beams[i_ens] = 5
+                                self.Inst.demod[i_ens] = 1
+                            elif val == 15:
+                                self.Inst.beams[i_ens] = 5
+                                self.Inst.demod[i_ens] = 2
+                            else:
+                                self.Inst.beams[i_ens] = np.nan
+                                self.Inst.demod[i_ens] = np.nan
+
+                            val = np.fromfile(f, np.uint8, count=1)[0]
+                            if val == 0:
+                                self.Inst.data_type[i_ens] = 'Real'
+                            else:
+                                self.Inst.data_type[i_ens] = 'Simu'
+
+                            # Position file pointer and read configuration information
+                            f.seek(1, 1)
+                            self.Cfg.n_beams[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Cfg.wn[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Cfg.wp[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.Cfg.ws_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.Cfg.wf_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.Cfg.wm[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Cfg.wc[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Cfg.code_reps[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Cfg.wg_per[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Cfg.we_mmps[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.Cfg.tp_sec[i_ens] = np.sum(np.fromfile(f, np.uint8, count=3) * np.array([60, 1, 0.01]))
+                            self.Cfg.ex[i_ens] = "{0:08b}".format(ord(f.read(1)))
+
+                            val = int(self.Cfg.ex[i_ens][3:5], 2)
+                            if val == 0:
+                                self.Cfg.coord_sys[i_ens] = 'Beam'
+                            elif val == 1:
+                                self.Cfg.coord_sys[i_ens] = 'Inst'
+                            elif val == 2:
+                                self.Cfg.coord_sys[i_ens] = 'Ship'
+                            elif val == 3:
+                                self.Cfg.coord_sys[i_ens] = 'Earth'
+                            else:
+                                self.Cfg.coord_sys[i_ens] = "N/a"
+
+                            val = int(self.Cfg.ex[i_ens][5], 2)
+                            if val == 0:
+                                self.Cfg.use_pr = 'No'
+                            elif val == 1:
+                                self.Cfg.use_pr = 'Yes'
+                            else:
+                                self.Cfg.use_pr = 'N/a'
+
+                            val = int(self.Cfg.ex[i_ens][6], 2)
+                            if val == 0:
+                                self.Cfg.use_3beam = 'No'
+                            elif val == 1:
+                                self.Cfg.use_3beam = 'Yes'
+                            else:
+                                self.Cfg.use_3beam = 'N/a'
+
+                            val = int(self.Cfg.ex[i_ens][7], 2)
+                            if val == 0:
+                                self.Cfg.map_bins = 'No'
+                            elif val == 1:
+                                self.Cfg.map_bins = 'Yes'
+                            else:
+                                self.Cfg.map_bins = 'N/a'
+
+                            self.Cfg.ea_deg[i_ens] = np.fromfile(f, np.int16, count=1)[0] * 0.01
+                            self.Cfg.eb_deg[i_ens] = np.fromfile(f, np.uint16, count=1)[0] * 0.01
+                            self.Cfg.ez[i_ens] = "{0:08b}".format(np.fromfile(f, np.uint8, count=1)[0])
+
+                            val = int(self.Cfg.ez[i_ens][:2], 2)
+                            if val == 0:
+                                self.Cfg.sos_src[i_ens] = 'Manual EC'
+                            elif val == 1:
+                                self.Cfg.sos_src[i_ens] = 'Calculated'
+                            elif val == 3:
+                                self.Cfg.sos_src[i_ens] = 'SVSS Sensor'
+                            else:
+                                self.Cfg.sos_src[i_ens] = 'N/a'
+
+                            val = int(self.Cfg.ez[i_ens][2], 2)
+                            if val == 0:
+                                self.Cfg.xdcr_dep_srs[i_ens] = 'Manual ED'
+                            if val == 1:
+                                self.Cfg.xdcr_dep_srs[i_ens] = 'Sensor'
+                            else:
+                                self.Cfg.xdcr_dep_srs[i_ens] = 'N/a'
+
+                            val = int(self.Cfg.ez[i_ens][3], 2)
+                            if val == 0:
+                                self.Cfg.head_src[i_ens] = 'Manual EH'
+                            if val == 1:
+                                self.Cfg.head_src[i_ens] = 'Int. Sensor'
+                            else:
+                                self.Cfg.head_src[i_ens] = 'N/a'
+
+                            val = int(self.Cfg.ez[i_ens][4], 2)
+                            if val == 0:
+                                self.Cfg.pitch_src[i_ens] = 'Manual EP'
+                            if val == 1:
+                                self.Cfg.pitch_src[i_ens] = 'Int. Sensor'
+                            else:
+                                self.Cfg.pitch_src[i_ens] = 'N/a'
+
+                            val = int(self.Cfg.ez[i_ens][5], 2)
+                            if val == 0:
+                                self.Cfg.roll_src[i_ens] = 'Manual ER'
+                            if val == 1:
+                                self.Cfg.roll_src[i_ens] = 'Int. Sensor'
+                            else:
+                                self.Cfg.roll_src[i_ens] = 'N/a'
+
+                            val = int(self.Cfg.ez[i_ens][6], 2)
+                            if val == 0:
+                                self.Cfg.xdcr_dep_srs[i_ens] = 'Manual ES'
+                            if val == 1:
+                                self.Cfg.xdcr_dep_srs[i_ens] = 'Int. Sensor'
+                            else:
+                                self.Cfg.xdcr_dep_srs[i_ens] = 'N/a'
+
+                            val = int(self.Cfg.ez[i_ens][7], 2)
+                            if val == 0:
+                                self.Cfg.temp_src[i_ens] = 'Manual ET'
+                            if val == 1:
+                                self.Cfg.temp_src[i_ens] = 'Int. Sensor'
+                            else:
+                                self.Cfg.temp_src[i_ens] = 'N/a'
+
+                            self.Cfg.sensor_avail[i_ens] = "{0:08b}".format(np.fromfile(f, np.uint8, count=1)[0])
+                            self.Cfg.dist_bin1_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.Cfg.xmit_pulse_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.Cfg.ref_lay_str_cell[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Cfg.ref_lay_end_cell[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Cfg.wa[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Cfg.cx[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Cfg.lag_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.Cfg.cpu_ser_no[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Cfg.wb[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Cfg.cq[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Read variable leader data
+                        elif leader_id == '0x80':
+                            # Update the data types counter
+                            i_data_types += 1
+
+                            # Read instrument clock and sensor data
+                            self.Sensor.num[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.Sensor.date_not_y2k[i_ens, :] = np.fromfile(f, np.uint8, count=3)
+                            self.Sensor.time[i_ens, :] = np.fromfile(f, np.uint8, count=4)
+                            self.Sensor.num_fact[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Sensor.num_tot[i_ens] = self.Sensor.num[i_ens] + self.Sensor.num_fact[i_ens]*65535
+                            self.Sensor.bit_test[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.Sensor.sos_mps[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.Sensor.xdcr_depth_dm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.Sensor.heading_deg[i_ens] = np.fromfile(f, np.uint16, count=1)[0] / 100.
+                            self.Sensor.pitch_deg[i_ens] = np.fromfile(f, np.int16, count=1)[0] / 100.
+                            self.Sensor.roll_deg[i_ens] = np.fromfile(f, np.int16, count=1)[0] / 100.
+                            self.Sensor.salinity_ppt[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.Sensor.temperature_deg_c[i_ens] = np.fromfile(f, np.int16, count=1)[0] / 100.
+                            self.Sensor.mpt_msc[i_ens, :] = np.fromfile(f, np.uint8, count=3)
+                            self.Sensor.heading_std_dev_deg[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Sensor.pitch_std_dev_deg[i_ens] = np.fromfile(f, np.uint8, count=1)[0] / 10.
+                            self.Sensor.roll_std_dev_deg[i_ens] = np.fromfile(f, np.uint8, count=1) / 10.
+                            self.Sensor.xmit_current[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Sensor.xmit_voltage[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Sensor.ambient_temp[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Sensor.pressure_pos[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Sensor.pressure_neg[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Sensor.attitude_temp[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Sensor.attitude[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Sensor.contam_sensor[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Sensor.error_status_word[i_ens] = ["{0:08b}".format(x)
+                                                                    for x in np.fromfile(f, np.uint8, count=4)]
+                            f.seek(2, 1)
+                            self.Sensor.pressure_pascal[i_ens] = np.fromfile(f, np.uint32, count=1)[0]
+                            self.Sensor.pressure_var_pascal[i_ens] = np.fromfile(f, np.uint32, count=1)[0]
+
+                            f.seek(1, 1)
+                            self.Sensor.date_y2k[i_ens, :] = np.fromfile(f, np.uint8, count=4)
+                            self.Sensor.time_y2k[i_ens, :] = np.fromfile(f, np.uint8, count=4)
+                            self.Sensor.date[i_ens, :] = self.Sensor.date_not_y2k[i_ens, :]
+                            self.Sensor.date[i_ens, 0] = self.Sensor.date_y2k[i_ens, 0] * 100 + \
+                                self.Sensor.date_y2k[i_ens, 1]
+                            self.Cfg.lag_near_bottom[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Read water-tracking velocity data
+                        elif leader_id == '0x100':
+                            # Update the data types counter
+                            i_data_types += 1
+
+                            if self.Cfg.wn[i_ens] > self.Wt.vel_mps.shape[1]:
+                                append = np.zeros([self.Wt.vel_mps.shape[0],
+                                                   int(self.Cfg.wn[i_ens] - self.Wt.vel_mps.shape[1]),
+                                                   self.Wt.vel_mps.shape[2]])
+                                self.Wt.vel_mps = np.hstack([self.Wt.vel_mps, append])
+
+                            dummy = np.fromfile(f, np.int16, count=int(self.Cfg.wn[i_ens]*4))
+                            dummy = np.reshape(dummy, [int(self.Cfg.wn[i_ens]), n_velocities])
+                            self.Wt.vel_mps[:n_velocities, :int(self.Cfg.wn[i_ens]), i_ens] = dummy.T
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Read correlation magnitude
+                        elif leader_id == '0x200':
+                            # Update the data types counter
+                            i_data_types += 1
+
+                            if self.Cfg.wn[i_ens] > self.Wt.corr.shape[1]:
+                                append = np.zeros([self.Wt.corr.shape[0],
+                                                   int(self.Cfg.wn[i_ens] - self.Wt.corr.shape[1]),
+                                                   self.Wt.corr.shape[2]])
+                                self.Wt.corr = np.hstack([self.Wt.corr, append])
+
+                            dummy = np.fromfile(f, np.uint8, count=int(self.Cfg.wn[i_ens]*4))
+                            dummy = np.reshape(dummy, [int(self.Cfg.wn[i_ens]), n_velocities])
+                            self.Wt.corr[:n_velocities, :int(self.Cfg.wn[i_ens]), i_ens] = dummy.T
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Read echo intensity
+                        elif leader_id == '0x300':
+                            # Update the data types counter
+                            i_data_types += 1
+
+                            if self.Cfg.wn[i_ens] > self.Wt.rssi.shape[1]:
+                                append = np.zeros([self.Wt.rssi.shape[0],
+                                                   int(self.Cfg.wn[i_ens] - self.Wt.rssi.shape[1]),
+                                                   self.Wt.rssi.shape[2]])
+                                self.Wt.rssi = np.hstack([self.Wt.rssi, append])
+
+                            dummy = np.fromfile(f, np.uint8, count=int(self.Cfg.wn[i_ens]*4))
+                            dummy = np.reshape(dummy, [int(self.Cfg.wn[i_ens]), n_velocities])
+                            self.Wt.rssi[:n_velocities, :int(self.Cfg.wn[i_ens]), i_ens] = dummy.T
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Read percent-good data
+                        elif leader_id == '0x400':
+                            # Update the data types counter
+                            i_data_types += 1
+
+                            if self.Cfg.wn[i_ens] > self.Wt.pergd.shape[1]:
+                                append = np.zeros([self.Wt.pergd.shape[0],
+                                                   int(self.Cfg.wn[i_ens] - self.Wt.pergd.shape[1]),
+                                                   self.Wt.pergd.shape[2]])
+                                self.Wt.pergd = np.hstack([self.Wt.pergd, append])
+                            dummy = np.fromfile(f, np.uint8, count=int(self.Cfg.wn[i_ens]*4))
+                            dummy = np.reshape(dummy, [int(self.Cfg.wn[i_ens]), n_velocities])
+                            self.Wt.pergd[:n_velocities, :int(self.Cfg.wn[i_ens]), i_ens] = dummy.T
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Read bottom track data
+                        elif leader_id == '0x600':
+                            # Update the data types counter
+                            i_data_types += 1
+
+                            # Read bottom track configuration data
+                            self.Cfg.bp[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            long1 = np.fromfile(f, np.uint16, count=1)[0]
+                            self.Cfg.bc[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Cfg.ba[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Cfg.bg[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Cfg.bm[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Cfg.be_mmps[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+
+                            # Read winriver 10.06 format GPS data
+                            self.Gps.lat_deg[i_ens] = (np.fromfile(f, np.int32, count=1)[0]/2**31) * 180
+
+                            # Read the Least Significant Bytes for beam depths
+                            dummy = np.fromfile(f, np.uint16, count=4)
+                            self.Bt.depth_m[0:4, i_ens] = dummy.T
+
+                            # Read bottom-track velocities
+                            dummy = np.fromfile(f, np.int16, count=4)
+                            self.Bt.vel_mps[0:4, i_ens] = dummy.T
+
+                            # Read bottom-track correlations
+                            dummy = np.fromfile(f, np.uint8, count=4)
+                            self.Bt.corr[0:4, i_ens] = dummy.T
+
+                            # Read bottom-track evaluation amplitude
+                            dummy = np.fromfile(f, np.uint8, count=4)
+                            self.Bt.eval_amp[0:4, i_ens] = dummy.T
+
+                            # Read bottom-track percent good
+                            dummy = np.fromfile(f, np.uint8, count=4)
+                            self.Bt.pergd[0:4, i_ens] = dummy.T
+
+                            # Read WinRiver 10.06 format GPS data
+                            dummy = np.fromfile(f, np.uint16, count=1)[0]
+                            if dummy != -32768:
+                                self.Gps.alt_m[i_ens] = (dummy-32768)/10
+                            else:
+                                self.Gps.altm[i_ens] = np.nan
+
+                            long2 = np.fromfile(f, np.uint16, count=1)[0]
+                            self.Gps.long_deg[i_ens] = ((long1+long2*2**16)/2**31)*180
+                            if self.Gps.long_deg[i_ens] > 180:
+                                self.Gps.long_deg[i_ens] -= 360
+
+                            self.Bt.ext_depth_cm[i_ens] = np.fromfile(f, np.int16, count=1)[0]
+                            dummy = np.fromfile(f, np.int16, count=1)[0]
+                            if dummy != -32768:
+                                self.Gps.gga_vel_e_mps[i_ens] = dummy * -1 / 1000
+                            else:
+                                self.Gps.gga_vel_e_mps[i_ens] = np.nan
+
+                            dummy = np.fromfile(f, np.int16, count=1)[0]
+                            if dummy != -32768:
+                                self.Gps.gga_vel_n_mps[i_ens] = dummy * -1 / 1000
+                            else:
+                                self.Gps.gga_vel_n_mps[i_ens] = np.nan
+
+                            dummy = np.fromfile(f, np.int16, count=1)[0]
+                            if dummy != -32768:
+                                self.Gps.vtg_vel_e_mps[i_ens] = dummy * -1 / 1000
+                            else:
+                                self.Gps.vtg_vel_e_mps[i_ens] = np.nan
+
+                            dummy = np.fromfile(f, np.int16, count=1)[0]
+                            if dummy != -32768:
+                                self.Gps.vtg_vel_n_mps[i_ens] = dummy * -1 / 1000
+                            else:
+                                self.Gps.vtg_vel_n_mps[i_ens] = np.nan
+
+                            dummy = np.fromfile(f, np.uint8, count=1)[0]
+                            if dummy != 0:
+                                self.Gps.gsa_v_dop[i_ens] = dummy
+                            dummy = np.fromfile(f, np.uint8, count=1)[0]
+                            if dummy != 0:
+                                self.Gps.gsa_p_dop[i_ens] = dummy
+                            dummy = np.fromfile(f, np.uint8, count=1)[0]
+                            if dummy != 0:
+                                self.Gps.gga_n_stats[i_ens] = dummy
+
+                            f.seek(1, 1)
+                            self.Gps.gsa_sat[i_ens, 4] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Gps.gsa_sat[i_ens, 5] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Gps.gga_diff[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+
+                            dummy = np.fromfile(f, np.uint8, count=1)[0]
+                            if dummy != 0:
+                                self.Gps.gga_hdop[i_ens] = dummy / 10
+
+                            self.Gps.gsa_sat[i_ens, 0] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Gps.gsa_sat[i_ens, 1] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Gps.gsa_sat[i_ens, 2] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Gps.gsa_sat[i_ens, 3] = np.fromfile(f, np.uint8, count=1)[0]
+
+                            # Read bx configuration setting
+                            self.Cfg.bx_dm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+
+                            # Read bottom-tracking RSSI
+                            self.Bt.rssi[0, i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Bt.rssi[1, i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Bt.rssi[2, i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Bt.rssi[3, i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+
+                            # Read wj configuration setting
+                            self.Cfg.wj[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+
+                            # Read most significant byte and compute beam depths
+                            dummy = np.fromfile(f, np.uint8, count=1)[0]
+                            rr_bt_depth_correction[0:4, i_ens] = dummy.T * 2e16 / 100
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Read  General NMEA Structure
+                        # Data type '2022' contains sub data types the identify specfic NMEA
+                        # 0183 data types that will be decoded. There may be multiple values
+                        # for a single ensemble.
+                        elif leader_id == '0x2022':
+                            i2022 += 1
+                            # Update the data types counter
+                            i_data_types += 1
+
+                            specific_id = np.fromfile(f, np.int16, count=1)[0]
+                            msg_size = np.fromfile(f, np.int16, count=1)[0]
+                            delta_time = np.fromfile(f, np.double, count=1)[0]
+
+                            # GGA
+                            if specific_id == 100:
+                                j100 += 1
+                                # If the number of values exceeds 20 expand arrays
+                                if j100 > self.Gps2.gga_delta_time.shape[1] - 1:
+                                    self.Gps2.gga_expand(n_ensembles)
+
+                                self.Gps2.gga_delta_time[i_ens, j100] = delta_time
+
+                                self.Gps2.gga_header[i_ens][j100] = ''.join([chr(x) for x in f.read(10)])
+
+                                try:
+                                    temp = ''.join([chr(x) for x in f.read(10)])
+                                    self.Gps2.utc[i_ens, j100] = float(re.findall('^\d+\.\d+|\d+', temp)[0])
+                                except ValueError:
+                                    self.Gps2.utc[i_ens, j100] = np.nan
+
+                                self.Gps2.lat_deg[i_ens, j100] = np.fromfile(f, np.float64, count=1)[0]
+                                self.Gps2.lat_ref[i_ens][j100] = chr(f.read(1)[0])
+                                self.Gps2.lon_deg[i_ens, j100] = np.fromfile(f, np.float64, count=1)[0]
+                                self.Gps2.lon_ref[i_ens][j100] = chr(f.read(1)[0])
+                                self.Gps2.corr_qual[i_ens, j100] = np.fromfile(f, np.uint8, count=1)[0]
+                                self.Gps2.num_sats[i_ens, j100] = np.fromfile(f, np.uint8, count=1)[0]
+                                self.Gps2.hdop[i_ens, j100] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.alt[i_ens, j100] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.alt_unit[i_ens][j100] = chr(f.read(1)[0])
+                                self.Gps2.geoid[i_ens, j100] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.geoid_unit[i_ens][j100] = chr(f.read(1)[0])
+                                self.Gps2.d_gps_age[i_ens, j100] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.ref_stat_id[i_ens, j100] = np.fromfile(f, np.int16, count=0)[0]
+
+                            # VTG
+                            elif specific_id == 101:
+                                j101 += 1
+                                # If the number of values exceeds 20 expand arrays
+                                if j101 > self.Gps2.vtg_delta_time.shape[1] - 1:
+                                    self.Gps2.vtg_expand(n_ensembles)
+
+                                self.Gps2.vtg_delta_time[i_ens, j101] = delta_time
+                                self.Gps2.vtg_header[i_ens][j101] = ''.join([chr(x) for x in f.read(10)])
+                                self.Gps2.course_true[i_ens, j101] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.true_indicator[i_ens][j101] = chr(f.read(1)[0])
+                                self.Gps2.course_mag[i_ens, j101] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.mag_indicator[i_ens][j101] = chr(f.read(1)[0])
+                                self.Gps2.speed_knots[i_ens, j101] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.knots_indicator[i_ens][j101] = chr(f.read(1)[0])
+                                self.Gps2.speed_kph[i_ens, j101] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.kph_indicator[i_ens][j101] = chr(f.read(1)[0])
+                                self.Gps2.mode_indicator[i_ens][j101] = chr(f.read(1)[0])
+
+                            # Depth sounder
+                            elif specific_id == 102:
+                                j102 += 1
+
+                                if j102 > self.Gps2.dbt_delta_time.shape[1] - 1:
+                                    self.Gps2.dbt_expand(n_ensembles)
+
+                                self.Gps2.dbt_delta_time[i_ens, j102] = delta_time
+                                self.Gps2.dbt_header[i_ens][j102] = ''.join([chr(x) for x in f.read(10)])
+                                self.Gps2.depth_ft[i_ens, j102] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.ft_indicator[i_ens][j102] = chr(f.read(1)[0])
+                                self.Gps2.depth_m[i_ens, j102] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.m_indicator[i_ens][j102] = chr(f.read(1)[0])
+                                self.Gps2.depth_fath[i_ens, j102] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.fath_indicator[i_ens][j102] = chr(f.read(1)[0])
+
+                            # External heading
+                            elif specific_id == 103:
+                                j103 += 1
+
+                                if j103 > self.Gps2.hdt_delta_time.shape[1]:
+                                    self.Gps2.hdt_expand(n_ensembles)
+
+                                self.Gps2.hdt_delta_time[i_ens, j103] = delta_time
+                                self.Gps2.hdt_header[i_ens][j103] = ''.join([chr(x) for x in f.read(10)])
+                                self.Gps2.heading_deg[i_ens, j103] = np.fromfile(f, np.double, count=1)[0]
+                                self.Gps2.h_true_indicator[i_ens][j103] = chr(f.read(1)[0])
+
+                            # GGA
+                            elif specific_id == 104:
+                                j100 += 1
+
+                                if j100 > self.Gps2.gga_delta_time.shape[1] - 1:
+                                    self.Gps2.gga_expand(n_ensembles)
+
+                                self.Gps2.gga_delta_time[i_ens, j100] = delta_time
+                                try:
+                                    self.Gps2.gga_header[i_ens][j100] = ''.join([chr(x) for x in f.read(7)])
+                                except IndexError:
+                                    self.Gps2.gga_header[i_ens][j100] = '       '
+
+                                try:
+                                    temp = ''.join([chr(x) for x in f.read(10)])
+                                    self.Gps2.utc[i_ens, j100] = \
+                                        float(re.findall('^\d+\.\d+|\d+', temp)[0])
+                                except (ValueError, AttributeError, IndexError):
+                                    self.Gps2.utc[i_ens, j100] = np.nan
+
+                                self.Gps2.lat_deg[i_ens, j100] = np.fromfile(f, np.float64, count=1)[0]
+                                self.Gps2.lat_ref[i_ens][j100] = chr(f.read(1)[0])
+                                self.Gps2.lon_deg[i_ens, j100] = np.fromfile(f, np.float64, count=1)[0]
+                                self.Gps2.lon_ref[i_ens][j100] = chr(f.read(1)[0])
+                                self.Gps2.corr_qual[i_ens, j100] = np.fromfile(f, np.uint8, count=1)[0]
+                                self.Gps2.num_sats[i_ens, j100] = np.fromfile(f, np.uint8, count=1)[0]
+                                self.Gps2.hdop[i_ens, j100] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.alt[i_ens, j100] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.alt_unit[i_ens][j100] = chr(f.read(1)[0])
+                                self.Gps2.geoid[i_ens, j100] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.geoid_unit[i_ens][j100] = chr(f.read(1)[0])
+                                self.Gps2.d_gps_age[i_ens, j100] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.ref_stat_id[i_ens, j100] = np.fromfile(f, np.int16, count=1)[0]
+
+                            # VTG
+                            elif specific_id == 105:
+                                j101 += 1
+
+                                if j101 > self.Gps2.vtg_delta_time.shape[1] - 1:
+                                    self.Gps2.vtg_expand(n_ensembles)
+
+                                self.Gps2.vtg_delta_time[i_ens, j101] = delta_time
+                                self.Gps2.vtg_header[i_ens][j101] = ''.join([chr(x) for x in f.read(7)])
+                                self.Gps2.course_true[i_ens, j101] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.true_indicator[i_ens][j101] = chr(f.read(1)[0])
+                                self.Gps2.course_mag[i_ens, j101] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.mag_indicator[i_ens][j101] = chr(f.read(1)[0])
+                                self.Gps2.speed_knots[i_ens, j101] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.knots_indicator[i_ens][j101] = chr(f.read(1)[0])
+                                self.Gps2.speed_kph[i_ens, j101] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.kph_indicator[i_ens][j101] = chr(f.read(1)[0])
+                                self.Gps2.mode_indicator[i_ens][j101] = chr(f.read(1)[0])
+
+                            # Depth sounder
+                            elif specific_id == 106:
+                                j102 += 1
+
+                                if j102 > self.Gps2.dbt_delta_time.shape[1] - 1:
+                                    self.Gps2.dbt_expand(n_ensembles)
+
+                                self.Gps2.dbt_delta_time[i_ens, j102] = delta_time
+                                self.Gps2.dbt_header[i_ens][j102] = ''.join([chr(x) for x in f.read(7)])
+                                self.Gps2.depth_ft[i_ens, j102] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.ft_indicator[i_ens][j102] = chr(f.read(1)[0])
+                                self.Gps2.depth_m[i_ens, j102] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.m_indicator[i_ens][j102] = chr(f.read(1)[0])
+                                self.Gps2.depth_fath[i_ens, j102] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.fath_indicator[i_ens][j102] = chr(f.read(1)[0])
+
+                            # External heading
+                            elif specific_id == 107:
+                                j103 += 1
+
+                                if j103 > self.Gps2.hdt_delta_time.shape[1] - 1:
+                                    self.Gps2.hdt_expand(n_ensembles)
+
+                                self.Gps2.hdt_delta_time[i_ens, j103] = delta_time
+                                self.Gps2.hdt_header[i_ens][j103] = ''.join([chr(x) for x in f.read(7)])
+                                self.Gps2.heading_deg[i_ens, j103] = np.fromfile(f, np.double, count=1)[0]
+                                self.Gps2.h_true_indicator[i_ens][j103] = chr(f.read(1)[0])
+
+                            # GGA
+                            elif specific_id == 204:
+                                j100 += 1
+
+                                if j100 > self.Gps2.gga_delta_time.shape[1] - 1:
+                                    self.Gps2.gga_expand(n_ensembles)
+
+                                temp = ''.join([chr(x) for x in f.read(msg_size)])
+                                self.Gps2.gga_sentence[i_ens][j100] = temp
+                                temp_array = np.array(temp.split(','))
+                                temp_array[temp_array == '999.9'] = ''
+
+                                try:
+                                    self.Gps2.gga_delta_time[i_ens, j100] = delta_time
+                                    self.Gps2.gga_header[i_ens][j100] = temp_array[0]
+                                    self.Gps2.utc[i_ens, j100] = float(temp_array[1])
+                                    lat_str = temp_array[2]
+                                    lat_deg = float(lat_str[0:2])
+                                    lat_deg = lat_deg+float(lat_str[2:]) / 60
+                                    self.Gps2.lat_deg[i_ens, j100] = lat_deg
+                                    self.Gps2.lat_ref[i_ens][j100] = temp_array[3]
+                                    lon_str = temp_array[4]
+                                    lon_num = float(lon_str)
+                                    lon_deg = np.floor(lon_num / 100)
+                                    lon_deg = lon_deg + (((lon_num / 100.) - lon_deg) * 100.) / 60.
+                                    self.Gps2.lon_deg[i_ens, j100] = lon_deg
+                                    self.Gps2.lon_ref[i_ens][j100] = temp_array[5]
+                                    self.Gps2.corr_qual[i_ens, j100] = float(temp_array[6])
+                                    self.Gps2.num_sats[i_ens, j100] = float(temp_array[7])
+                                    self.Gps2.hdop[i_ens, j100] = float(temp_array[8])
+                                    self.Gps2.alt[i_ens, j100] = float(temp_array[9])
+                                    self.Gps2.alt_unit[i_ens][j100] = temp_array[10]
+                                    self.Gps2.geoid[i_ens, j100] = temp_array[11]
+                                    self.Gps2.geoid_unit[i_ens][j100] = temp_array[12]
+                                    self.Gps2.d_gps_age[i_ens, j100] = float(temp_array[13])
+                                    idx_star = temp_array[14].find('*')
+                                    self.Gps2.ref_stat_id[i_ens, j100] = float(temp_array[15][:idx_star])
+
+                                except (ValueError, EOFError, IndexError):
+                                    pass
+
+                            # VTG
+                            elif specific_id == 205:
+                                j101 += 1
+
+                                if j101 > self.Gps2.vtg_delta_time.shape[1] - 1:
+                                    self.Gps2.vtg_expand(n_ensembles)
+
+                                temp = ''.join([chr(x) for x in f.read(msg_size)])
+                                self.Gps2.vtg_sentence[i_ens][j100] = temp
+                                temp_array = np.array(temp.split(','))
+                                temp_array[temp_array == '999.9'] = ''
+
+                                try:
+                                    self.Gps2.vtg_delta_time[i_ens, j101] = delta_time
+                                    self.Gps2.vtg_header[i_ens][j101] = temp_array[0]
+                                    self.Gps2.course_true[i_ens, j101] = valid_number(temp_array[1])
+                                    self.Gps2.true_indicator[i_ens][j101] = temp_array[2]
+                                    self.Gps2.course_mag[i_ens, j101] = valid_number(temp_array[3])
+                                    self.Gps2.mag_indicator[i_ens][j101] = temp_array[4]
+                                    self.Gps2.speed_knots[i_ens, j101] = valid_number(temp_array[5])
+                                    self.Gps2.knots_indicator[i_ens][j101] = temp_array[6]
+                                    self.Gps2.speed_kph[i_ens, j101] = valid_number(temp_array[7])
+                                    self.Gps2.kph_indicator[i_ens][j101] = temp_array[8]
+                                    idx_star = temp_array[9].find('*')
+                                    self.Gps2.mode_indicator[i_ens][j101] = temp_array[9][:idx_star]
+
+                                except (ValueError, EOFError, IndexError):
+                                    pass
+
+                            # Depth sounder
+                            elif specific_id == 206:
+                                j102 += 1
+
+                                if j102 > self.Gps2.dbt_delta_time.shape[1] - 1:
+                                    self.Gps2.dbt_expand(n_ensembles)
+
+                                temp = ''.join([chr(x) for x in f.read(msg_size)])
+                                temp_array = np.array(temp.split(','))
+                                temp_array[temp_array == '999.9'] = ''
+
+                                try:
+                                    self.Gps2.dbt_delta_time[i_ens, j102] = delta_time
+                                    self.Gps2.dbt_header[i_ens][j102] = temp_array[0]
+                                    self.Gps2.depth_ft[i_ens, j102] = float(temp_array[1])
+                                    self.Gps2.ft_indicator[i_ens][j102] = temp_array[2]
+                                    self.Gps2.depth_m[i_ens, j102] = float(temp_array[3])
+                                    self.Gps2.m_indicator[i_ens][j102] = temp_array[4]
+                                    self.Gps2.depth_fath[i_ens, j102] = float(temp_array[5])
+                                    idx_star = temp.find('*')
+                                    self.Gps2.fath_indicator[i_ens][j102] = temp_array[6][:idx_star]
+
+                                except (ValueError, EOFError, IndexError):
+                                    pass
+
+                            # External heading
+                            elif specific_id == 207:
+                                j103 += 1
+
+                                if j103 > self.Gps2.hdt_delta_time.shape[1] - 1:
+                                    self.Gps2.hdt_expand(n_ensembles)
+
+                                temp = ''.join([chr(x) for x in f.read(msg_size)])
+                                temp_array = np.array(temp.split(','))
+                                temp_array[temp_array == '999.9'] = ''
+
+                                try:
+                                    self.Gps2.hdt_delta_time[i_ens, j103] = delta_time
+                                    self.Gps2.hdt_header[i_ens][j103] = temp_array[0]
+                                    self.Gps2.heading_deg[i_ens, j103] = float(temp_array[1])
+                                    idx_star = temp.find('*')
+                                    self.Gps2.h_true_indicator[i_ens][j103] = temp_array[2][:idx_star]
+
+                                except (ValueError, EOFError, IndexError):
+                                    pass
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Raw NMEA dbt sentence
+                        elif leader_id == '0x2100':
+
+                            # Update data types counter
+                            i_data_types += 1
+
+                            # Reposition file pointer
+                            f.seek(int(self.Hdr.data_offsets[i_ens, i_data_types-1])+file_loc+4, 0)
+
+                            # Determine the number of characters to read
+                            if i_data_types < self.Hdr.n_data_types[i_ens]:
+                                num_2_read = self.Hdr.data_offsets[i_ens, i_data_types] \
+                                             - self.Hdr.data_offsets[i_ens, i_data_types - 1] - 4
+                            else:
+                                num_2_read = bytes_per_ens - self.Hdr.data_offsets[i_ens, i_data_types-1] - 6
+
+                            # Read DBT sentence
+                            self.Nmea.dbt[i_ens] = ''.join([chr(x) for x in f.read(int(num_2_read))])
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Raw NMEA gga sentence
+                        elif leader_id == '0x2101':
+                            # Update data types counter
+                            i_data_types += 1
+
+                            # Reposition file pointer
+                            f.seek(int(self.Hdr.data_offsets[i_ens, i_data_types-1])+file_loc+4, 0)
+
+                            # Determine the number of characters to read
+                            if i_data_types < self.Hdr.n_data_types[i_ens]:
+                                num_2_read = self.Hdr.data_offsets[i_ens, i_data_types] \
+                                             - self.Hdr.data_offsets[i_ens, i_data_types - 1] - 4
+                            else:
+                                num_2_read = bytes_per_ens - self.Hdr.data_offsets[i_ens, i_data_types-1] - 6
+
+                            # Read GGA sentence
+                            self.Nmea.gga[i_ens] = ''.join([chr(x) for x in f.read(int(num_2_read))])
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Raw NMEA vtg sentence
+                        elif leader_id == '0x2102':
+                            # Update data types counter
+                            i_data_types += 1
+
+                            # Reposition file pointer
+                            f.seek(int(self.Hdr.data_offsets[i_ens, i_data_types-1])+file_loc+4, 0)
+
+                            # Determine the number of characters to read
+                            if i_data_types < self.Hdr.n_data_types[i_ens]:
+                                num_2_read = self.Hdr.data_offsets[i_ens, i_data_types] \
+                                             - self.Hdr.data_offsets[i_ens, i_data_types - 1] - 4
+                            else:
+                                num_2_read = bytes_per_ens - self.Hdr.data_offsets[i_ens, i_data_types-1] - 6
+
+                            # Read VTG sentence
+                            self.Nmea.vtg[i_ens] = ''.join([chr(x) for x in f.read(int(num_2_read))])
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Raw NMEA gsa sentence
+                        elif leader_id == '0x2103':
+                            # Update data types counter
+                            i_data_types += 1
+
+                            # Reposition file pointer
+                            f.seek(int(self.Hdr.data_offsets[i_ens, i_data_types-1])+file_loc+4, 0)
+
+                            # Determine the number of characters to read
+                            if i_data_types < self.Hdr.n_data_types[i_ens]:
+                                num_2_read = self.Hdr.data_offsets[i_ens, i_data_types] \
+                                             - self.Hdr.data_offsets[i_ens, i_data_types - 1] - 4
+                            else:
+                                num_2_read = bytes_per_ens - self.Hdr.data_offsets[i_ens, i_data_types-1] - 6
+
+                            # Read GSA sentence
+                            self.Nmea.gsa[i_ens] = ''.join([chr(x) for x in f.read(int(num_2_read))])
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Surface cells: cell data
+                        elif leader_id == '0x10':
+                            # Update data types counter
+                            i_data_types += 1
+
+                            self.Surface.no_cells[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Surface.cell_size_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.Surface.dist_bin1_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Surface cells: velocity data
+                        elif leader_id == '0x110':
+                            # Update data types counter
+                            i_data_types += 1
+
+                            dummy = np.fromfile(f, np.int16, count=int((self.Surface.no_cells[i_ens]*4)))
+                            dummy = np.reshape(dummy, [int(self.Surface.no_cells[i_ens]), n_velocities])
+                            self.Surface.vel_mps[:n_velocities, :int(self.Surface.no_cells[i_ens]), i_ens] = dummy.T
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Surface cells: correlation magnitude
+                        elif leader_id == '0x210':
+                            # Update data types counter
+                            i_data_types += 1
+
+                            dummy = np.fromfile(f, np.uint8, count=int((self.Surface.no_cells[i_ens]*4)))
+                            dummy = np.reshape(dummy, [int(self.Surface.no_cells[i_ens]), n_velocities])
+                            self.Surface.corr[:n_velocities, :int(self.Surface.no_cells[i_ens]), i_ens] = dummy.T
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Surface cells: echo intensity
+                        elif leader_id == '0x310':
+                            # Update data types counter
+                            i_data_types += 1
+
+                            dummy = np.fromfile(f, np.uint8, count=int((self.Surface.no_cells[i_ens]*4)))
+                            dummy = np.reshape(dummy, [int(self.Surface.no_cells[i_ens]), n_velocities])
+                            self.Surface.rssi[:n_velocities, :int(self.Surface.no_cells[i_ens]), i_ens] = dummy.T
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Surface cells: percent good
+                        elif leader_id == '0x410':
+                            # Update data types counter
+                            i_data_types += 1
+
+                            dummy = np.fromfile(f, np.uint8, count=int((self.Surface.no_cells[i_ens]*4)))
+                            dummy = np.reshape(dummy, [int(self.Surface.no_cells[i_ens]), n_velocities])
+                            self.Surface.pergd[:n_velocities, :self.Surface.no_cells[i_ens], i_ens] = dummy.T
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Undefined data skipped
+                        elif leader_id == '0x510':
+                            # Update data types counter
+                            i_data_types += 1
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        #  Automatic mode configuration
+                        elif leader_id == '0x4401':
+                            # Update data types counter
+                            i_data_types += 1
+
+                            self.AutoMode.beam_count[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+
+                            self.AutoMode.Beam1.mode[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam1.depth_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam1.ping_count[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam1.ping_type[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam1.cell_count[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam1.cell_size_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam1.cell_mid_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam1.code_repeat[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam1.trans_length_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam1.lag_length_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam1.transmit_bw[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam1.receive_bw[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam1.ping_interval_ms[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+
+                            self.AutoMode.Beam2.mode[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam2.depth_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam2.ping_count[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam2.ping_type[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam2.cell_count[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam2.cell_size_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam2.cell_mid_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam2.code_repeat[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam2.trans_length_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam2.lag_length_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam2.transmit_bw[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam2.receive_bw[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam2.ping_interval_ms[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+
+                            self.AutoMode.Beam3.mode[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam3.depth_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam3.ping_count[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam3.ping_type[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam3.cell_count[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam3.cell_size_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam3.cell_mid_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam3.code_repeat[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam3.trans_length_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam3.lag_length_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam3.transmit_bw[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam3.receive_bw[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam3.ping_interval_ms[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+
+                            self.AutoMode.Beam4.mode[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam4.depth_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam4.ping_count[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam4.ping_type[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam4.cell_count[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam4.cell_size_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam4.cell_mid_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam4.code_repeat[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam4.trans_length_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam4.lag_length_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam4.transmit_bw[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam4.receive_bw[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam4.ping_interval_ms[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+
+                            self.AutoMode.Reserved[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Vertical beam
+                        elif leader_id == '0x4100':
+                            # Update data types counter
+                            i_data_types += 1
+
+                            self.Sensor.vert_beam_eval_amp[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Sensor.vert_beam_RSSI_amp[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Sensor.vert_beam_range_m[i_ens] = np.fromfile(f, np.uint32, count=1)[0] / 1000
+                            temp = "{0:08b}".format(np.fromfile(f, np.uint8, count=1)[0])
+                            self.Sensor.vert_beam_status[i_ens] = int(temp[6:], 2)
+                            if temp[5] == '0':
+                                self.Sensor.vert_beam_gain[i_ens] = 'L'
+                            else:
+                                self.Sensor.vert_beam_gain[i_ens] = 'H'
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Transformation matrix
+                        elif leader_id == '0x3200':
+                            # Update data types counter
+                            i_data_types += 1
+
+                            self.Inst.t_matrix[0, :] = np.fromfile(f, np.int16, count=4) * .0001
+                            self.Inst.t_matrix[1, :] = np.fromfile(f, np.int16, count=4) * .0001
+                            self.Inst.t_matrix[2, :] = np.fromfile(f, np.int16, count=4) * .0001
+                            self.Inst.t_matrix[3, :] = np.fromfile(f, np.int16, count=4) * .0001
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        else:
+
+                            # Unrecognized leader ID
+                            self.Hdr.invalid[i_ens] = leader_id
+                            i_data_types += 1
+
+                            # Find next leader ID
+                            if (i_data_types + 1) <= self.Hdr.n_data_types[i_ens]:
+                                # Reposition file pointer for next data type
+                                f.seek(int(self.Hdr.data_offsets[i_ens, i_data_types])+file_loc, 0)
+                            else:
+                                if f.tell() < end_file:
+                                    # Locate next ensemble if no more data types
+                                    if i_data_types + 1 > self.Hdr.n_data_types[i_ens] + 1:
+                                        current_loc = f.tell()
+                                        srch_string = struct.unpack('B'*(end_file-current_loc),
+                                                                    f.read(end_file-current_loc))
+                                        hex_string = ''.join([hex(x) for x in srch_string])
+
+                                        next_ens = hex_string.find('0x7f7f')
+                                        if next_ens > 0:
+                                            next_ens = int((next_ens - 1) / 2)
+                                            f.seek(current_loc+next_ens, 0)
+                                            i_data_types = 0
+                                        else:
+                                            end_file_check = end_file + 1
+
+                                    else:
+                                        f.seek(file_loc+bytes_per_ens-2, 0)
+
+                        # If all data types have been read, read last two bytes of ensemble
+                        if i_ens <= len(self.Hdr.n_data_types):
+                            if i_data_types >= self.Hdr.n_data_types[i_ens] and f.tell() <= end_file:
+
+                                try:
+                                    self.Inst.res_RDI = np.fromfile(f, np.uint16, count=1)[0]
+                                    # Read checksum but not used
+                                    _ = np.fromfile(f, np.uint16, count=1)[0]
+                                except (ValueError, EOFError, IndexError):
+                                    pass
+                        else:
+                            end_file_check = end_file
+
+                        if end_file_check < end_file:
+                            end_file_check = f.tell()
+
+                    # Screen for bad data, and do the unit conversions
+                    self.Wt.vel_mps[self.Wt.vel_mps == -32768] = np.nan
+                    self.Wt.vel_mps = self.Wt.vel_mps / 1000
+                    self.Wt.corr[self.Wt.corr == -32768] = np.nan
+                    self.Wt.rssi[self.Wt.rssi == -32768] = np.nan
+                    self.Wt.pergd[self.Wt.pergd == -32768] = np.nan
+
+                    # Remove bad data, convert units
+                    self.Bt.depth_m[self.Bt.depth_m == -32768] = np.nan
+                    self.Bt.depth_m = self.Bt.depth_m / 100
+                    self.Bt.vel_mps[self.Bt.vel_mps == -32768] = np.nan
+                    self.Bt.vel_mps = self.Bt.vel_mps / 1000
+                    self.Bt.corr[self.Bt.corr == -32768] = np.nan
+                    self.Bt.eval_amp[self.Bt.eval_amp == -32768] = np.nan
+                    self.Bt.pergd[self.Bt.pergd == -32768] = np.nan
+
+                    # Correct Bt.depth_m for RiverRay data
+                    if not np.isnan(rr_bt_depth_correction).any():
+                        rr_bt_depth_correction[rr_bt_depth_correction == (-32768 * 2e16) / 100] = np.nan
+                        self.Bt.depth_m += rr_bt_depth_correction
+
+                    # Remove bad data from Surface structure (RR), convert where needed
+                    self.Surface.vel_mps[self.Surface.vel_mps == -32768] = np.nan
+                    self.Surface.vel_mps = self.Surface.vel_mps / 1000
+                    self.Surface.corr[self.Surface.corr == -32768] = np.nan
+                    self.Surface.rssi[self.Surface.rssi == -32768] = np.nan
+                    self.Surface.pergd[self.Surface.pergd == -32768] = np.nan
+
+                    # If requested compute WR2 compatible GPS-based boat velocities
+                    if wr2:
+
+                        # If vtg data are available compute north and east components
+                        if self.Gps2.vtg_header[0, 0] == '$':
+
+                            # Find minimum of absolute value of delta time from raw data
+                            vtg_delta_time = np.abs(self.Gps2.vtg_delta_time)
+                            vtg_min = np.nanmin(vtg_delta_time, 1)
+
+                            # Compute the velocity components in m/s
+                            for i in range(len(vtg_delta_time)):
+                                idx = np.where(vtg_delta_time == vtg_min)[0][0]
+                                self.Gps2.vtg_velE_mps[i], self.Gps2.vtg_velN_mps[i] = \
+                                    pol2cart((90 - self.Gps2.course_true[i, idx])*np.pi/180,
+                                             self.Gps2.speed_kph[i, idx] * 0.2777778)
+
+                        if self.Gps2.gga_header[0, 0] == '$':
+
+                            # Initialize constants
+                            e_radius = 6378137
+                            coeff = e_radius * np.pi / 180
+                            ellip = 1 / 298.257223563
+
+                            # Find minimum of absolute value of delta time from raw data
+                            gga_delta_time = np.abs(self.Gps2.gga_delta_time)
+                            gga_min = np.nanmin(gga_delta_time, axis=1)
+
+                            # Process gga data
+                            for i in range(len(gga_delta_time)):
+                                idx = np.where(gga_delta_time[i:] == gga_min)
+                                if idx > 0:
+                                    lat_avg_rad = (self.Gps2.lat_deg[i, idx[i]]
+                                                   + self.Gps2.lat_deg[i - 1, idx[i - 1]]) / 2
+                                    sin_lat_avg_rad = np.sin(np.deg2rad(lat_avg_rad))
+                                    r_e = coeff * (1 + ellip * sin_lat_avg_rad * sin_lat_avg_rad)
+                                    rn = coeff * (1 - 2 * ellip + 3 * ellip * sin_lat_avg_rad * sin_lat_avg_rad)
+                                    dx = r_e * (self.Gps2.lon_deg[i, idx[i]] -
+                                                self.Gps2.lon_deg(i-1, idx[i-1])) * np.cos(np.deg2rad(lat_avg_rad))
+                                    dy = rn * (self.Gps2.lat_deg[i, idx[i]] - self.Gps2.lat_deg[i - 1, idx[i - 1]])
+                                    dt = self.Gps2.utc[i, idx[i]] - self.Gps2.utc[i-1, idx[i-1]]
+                                    self.Gps2.gga_velE_mps[i] = dx / dt
+                                    self.Gps2.gga_velN_mps[i] = dy / dt
+                                else:
+                                    self.Gps2.gga_velE_mps[i] = np.nan
+                                    self.Gps2.gga_velN_mps[i] = np.nan
+
+    @staticmethod
+    def number_of_ensembles(f, f_size):
+        """Determines the number of ensembles in the data file.
+
+        Parameters
+        ----------
+        f: BufferedReader
+            File object of pd0 file
+        f_size: int
+            File size in bytes
+
+        Returns
+        -------
+        n_ensembles: int
+            Number of ensembles
+        """
+
+        i = 0
+        leader_id = '0000'
+            
+        # Find the first ensemble
+        while leader_id != '0x7f7f' and i < f_size:
+            f.seek(i, 0)
+            i = i + 1
+            leader_id = hex(np.fromfile(f, np.uint16, count=1)[0])
+
+        # Call find_ens_no to get the first ensemble number
+        first_num = Pd0TRDI.find_ens_no(f)
+
+        # Find last ensemble
+        i = 0
+        leader_id = '0000'
+        last_num = -1
+        
+        while last_num < 0:
+            while leader_id != '0x7f7f' and i < f_size:
+                i = i + 1
+                f.seek(-i, 2)
+
+                try:
+                    leader_id = hex(np.fromfile(f, np.uint16, count=1)[0])
+                except (ValueError, EOFError, IndexError):
+                    continue
+
+            last_num = Pd0TRDI.find_ens_no(f)
+            if last_num is None or np.isnan(last_num):
+                last_num = -1
+            
+            leader_id = '0000'
+        n_ensembles = last_num-first_num+1
+
+        return n_ensembles
+
+    @staticmethod
+    def find_ens_no(f):
+        """This function assumes the current position of the file pointer is just
+            after '7F7F'. The function then reads the ensemble header and
+            works through the data offsets until the 00800 data type is found. The
+            ensemble number is then read.
+
+        Parameters
+        ----------
+        f: BufferedReader
+            File object
+
+        Returns
+        -------
+        ensemble_num
+        """
+
+        ensemble_num = np.nan
+        try:
+            fileloc = f.tell() - 2
+
+            # Check check sum
+            if Pd0TRDI.check_sum(f, fileloc):
+
+                # Read header information
+                f.seek(fileloc+5, 0)
+                n_data_types = np.fromfile(f, np.uint8, count=1)[0]
+                data_offsets = []
+                for x in range(n_data_types):
+                    data_offsets.append(np.fromfile(f, np.uint16, count=1)[0])
+
+                # Initialize variables
+                i = 0
+                leader_id = '0000'
+
+                # Search for 0x80
+                while leader_id != '0x80' and i < n_data_types:
+
+                    f.seek(data_offsets[i]+fileloc, 0)
+                    leader_id = hex(np.fromfile(f, np.uint16, count=1)[0])
+                    i = i + 1
+                    
+                # Read ensemble number from data type 0x80
+                if leader_id == '0x80':
+                    ensemble_num = np.fromfile(f, np.uint16, count=1)[0]
+
+            else:
+                ensemble_num = -1
+        except (EOFError, ValueError):
+            ensemble_num = np.nan
+
+        return ensemble_num
+
+    @staticmethod
+    def check_sum(f, fileloc, bytes_per_ens=None):
+        """Compute and verify checksum values.
+
+        Parameters
+        ----------
+        f: BufferedReader
+            File object
+        fileloc: int
+            Location within file
+        bytes_per_ens: int
+            Number of bytes in ensemble
+
+        Returns
+        -------
+        bool
+        """
+
+        try:
+             
+            if bytes_per_ens is None:
+                bytes_per_ens = np.fromfile(f, np.uint16, count=1)[0] 
+            # Go to file location from the beginning of file
+            f.seek(fileloc, 0)
+              
+            # Read in the values for all of the bytes an get a check sum
+            test_b = []
+            x = f.read(bytes_per_ens)
+            for y in x:
+                test_b.append(y)
+                  
+            check_sum = sum(test_b)
+            check_h = hex(check_sum)[2:]
+              
+            # Check for a hex that is greater than 4 (including L indicator at the end)
+            if len(check_h) > 4:
+                  
+                # Seek to location of check sum and compared to computed
+                if check_h[-1] == 'L':
+                    check_h = check_h[:-1]
+                      
+                f.seek(fileloc+bytes_per_ens, 0)
+                check_sum = np.fromfile(f, np.uint16, count=1)[0]  
+                if int('0x'+check_h[1:], 16) == check_sum:
+                    return True
+                else:
+                    return False
+            elif len(check_h) > 3:
+                # Seek to location of check sum and compared to computed
+                if check_h[-1] == 'L':
+                    check_h = check_h[:-1]
+                      
+                f.seek(fileloc+bytes_per_ens, 0)
+                check_sum = np.fromfile(f, np.uint16, count=1)[0]  
+                if int('0x'+check_h, 16) == check_sum:
+                    return True
+                else:
+                    return False
+            else:
+                return False
+        except Exception:
+            return False
+
+    @staticmethod
+    def bad_check_sum(f, file_loc):
+        """Searches for next ensemble.
+
+        Parameters
+        ----------
+        f: BufferedReader
+            File object
+        file_loc: int
+            Location in file
+        """
+
+        search_id = '    '
+        search_loc = file_loc+2
+        while search_id != '0x7f7f':
+            f.seek(search_loc, 0)
+            search_loc += 1
+            try:
+                search_id = hex(np.fromfile(f, np.uint16, count=1)[0])
+            except (ValueError, EOFError):
+                continue
+        f.seek(search_loc, 0)
+        
+    def end_reading(self, f, file_loc, i_data_types, i_ens, bytes_per_ens):
+        """Checks if more data types need to be read and position file pointer.
+
+        Parameters
+        ----------
+        f: BufferedReader
+            File object
+        file_loc: int
+            Location in file
+        i_data_types: int
+            Number of data types
+        i_ens: int
+            Ensemble counter
+        bytes_per_ens: int
+            Number of bytes in the ensemble
+
+        """
+        if i_data_types + 1 <= self.Hdr.n_data_types[i_ens]:
+            f.seek(int(self.Hdr.data_offsets[i_ens, i_data_types])+file_loc, 0)
+        else:
+            f.seek(file_loc+bytes_per_ens-2, 0)
+
+
+class Hdr(object):
+    """Class to hold header variables.
+
+    Attributes
+    ----------
+    bytes_per_ens: int
+        Number of bytes in ensemble
+    data_offsets: int
+        File offset to start of ensemble
+    n_data_types: int
+        Number of data types in ensemble
+    data_ok: int
+
+    invalid: str
+        Leader ID that was not recognized
+    """
+
+    def __init__(self, n_ensembles, n_types):
+        """Initialize instance variables to empty arrays.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        n_types: int
+            Number of data types
+        """
+        self.bytes_per_ens = nans(n_ensembles)
+        self.data_offsets = nans([n_ensembles, n_types])
+        self.n_data_types = nans(n_ensembles)
+        self.data_ok = nans(n_ensembles)
+        self.invalid = [''] * n_ensembles
+
+
+class Inst(object):
+    """Class to hold information about the instrument.
+
+    Attributes
+    ----------
+    beam_ang: np.array(int)
+        Angle of transducers in degrees
+    beams: np.array(int)
+        Number of beams used for velocity
+    data_type: list
+        Data type
+    firm_ver: np.array(str)
+        Firmware version
+    freq: np.array(int)
+        Frequency of ADCP in kHz
+    pat = list
+        Beam pattern
+    res_RDI:
+        Reserved for TRDI
+    sensor_CFG: np.array(int)
+        Sensor configuration
+    xducer: list
+        Indicates if transducer is attached
+    t_matrix: np.array(float)
+        Transformation matrix
+    demod: np.array(int)
+        Demodulation code
+    """
+
+    def __init__(self, n_ensembles):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        """
+        self.beam_ang = nans(n_ensembles)
+        self.beams = nans(n_ensembles)
+        self.data_type = [''] * n_ensembles
+        self.firm_ver = nans(n_ensembles)
+        self.freq = nans(n_ensembles)
+        self.pat = [''] * n_ensembles
+        self.res_RDI = 0
+        self.sensor_CFG = nans(n_ensembles)
+        self.xducer = [''] * n_ensembles
+        self.t_matrix = np.tile([np.nan], [4, 4])
+        self.demod = nans(n_ensembles)
+
+
+class AutoMode(object):
+    """Class to hold auto configuration mode settings for each beam.
+
+    Attributes
+    ----------
+    beam_count: np.array(int)
+        Number of beams
+    Beam1: Beam
+        Object of class Beam
+    Beam2: Beam
+        Object of class Beam
+    Beam3: Beam
+        Object of class Beam
+    Beam4: Beam
+        Object of class Beam
+    Reserved: np.array
+    """
+
+    def __init__(self, n_ensembles):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        """
+        self.beam_count = nans(n_ensembles)
+        self.Beam1 = Beam(n_ensembles)
+        self.Beam2 = Beam(n_ensembles)
+        self.Beam3 = Beam(n_ensembles)
+        self.Beam4 = Beam(n_ensembles)
+        self.Reserved = nans(n_ensembles)
+
+
+class Beam(object):
+    """Class to hold auto configuration settings for a beam.
+
+    Attributes
+    ----------
+    mode: np.array(int)
+        Water mode
+    depth_cm: np.array(int)
+        Depth in cm
+    ping_count: np.array(int)
+        Number of pings
+    ping_type: np.array(int)
+        Type of pings
+    cell_count: np.array(int)
+        Number of cells
+    cell_size_cm: np.array(int)
+        Cell size in cm
+    cell_mid_cm: np.array(int)
+        Distance to center of cell 1 in cm
+    code_repeat: np.array(int)
+        Number of code repeats
+    trans_length_cm: np.array(int)
+        Transmit length in cm
+    lag_length_cm: np.array(int)
+        Lag length in cm
+    transmit_bw: np.array(int)
+        Transmit bandwidth
+    receive_bw: np.array(int)
+        Receive bandwidth
+    ping_interval_ms: np.array(int)
+        Time between pings in ms
+    """
+
+    def __init__(self, n_ensembles):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        """
+
+        self.mode = nans(n_ensembles)
+        self.depth_cm = nans(n_ensembles)
+        self.ping_count = nans(n_ensembles)
+        self.ping_type = nans(n_ensembles)
+        self.cell_count = nans(n_ensembles)
+        self.cell_size_cm = nans(n_ensembles)
+        self.cell_mid_cm = nans(n_ensembles)
+        self.code_repeat = nans(n_ensembles)
+        self.trans_length_cm = nans(n_ensembles)
+        self.lag_length_cm = nans(n_ensembles)
+        self.transmit_bw = nans(n_ensembles)
+        self.receive_bw = nans(n_ensembles)
+        self.ping_interval_ms = nans(n_ensembles)
+
+
+class Bt(object):
+    """Class to hold bottom track data.
+
+    Attributes
+    ----------
+    corr: np.array(int)
+        Correlation for each beam
+    depth_m: np.array(float)
+        Depth for each beam
+    eval_amp: np.array(int)
+        Return amplitude for each beam
+    ext_depth_cm: np.array(int)
+        External depth in cm
+    pergd: np.array(int)
+        Percent good
+    rssi: np.array(int)
+        Return signal strength indicator in counts for each beam
+    vel_mps: np.array(float)
+        Velocity in m/s, rows depend on coordinate system
+    """
+
+    def __init__(self, n_ensembles, n_velocities):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        n_velocities: int
+            Number of velocity beams
+        """
+
+        self.corr = nans([n_velocities, n_ensembles])
+        self.depth_m = nans([n_velocities, n_ensembles])
+        self.eval_amp = nans([n_velocities, n_ensembles])
+        self.ext_depth_cm = nans(n_ensembles)
+        self.pergd = nans([n_velocities, n_ensembles])
+        self.rssi = nans([n_velocities, n_ensembles])
+        self.vel_mps = nans([n_velocities, n_ensembles])
+
+
+class Cfg(object):
+    """Class to hold configuration settings.
+
+    Attributes
+    ----------
+    ba: np.array(int)
+        Bottom track amplitude threshold
+    bc: np.array(int)
+        Bottom track correlation threshold
+    be_mmps: np.array(int)
+        Bottom track error velocity threshold
+    bg: np.array(int)
+        Bottom track percent good threshold
+    bm: np.array(int)
+        Bottom mode
+    bp: np.array(int)
+        Number of bottom pings
+    bx_dm: np.array(int)
+        Maximum tracking depth in decimeters
+    code_reps: np.array(int)
+        Number of code repetitions
+    coord_sys: np.array(str)
+        Coordinate system
+    cpu_ser_no: np.array(int)
+        CPU serial number
+    cq: np.array(int)
+        Transmit power
+    cx: np.array(int)
+        Low latency trigger
+    dist_bin1_cm: np.array(int)
+        Distance to center of bin 1 from transducer
+    ea_deg: np.array(int)
+        Heading alignment
+    eb_deg: np.array(int)
+        Heading bias
+    sensor_avail: np.array(str)
+        Sensor availability codes
+    ex: np.array(str)
+        Coordinate transformation codes
+    ez: np.array(str)
+        Sensor codes
+    head_src: np.array(str)
+        Heading source
+    lag_cm: np.array(int)
+        Lag
+    map_bins: np.array(str)
+        Bin mapping
+    n_beams: np.array(int)
+        Number of velocity beams
+    pitch_src: np.array(str)
+        Source of pitch data
+    ref_lay_end_cell: np.array(int)
+        Reference layer end
+    ref_lay_str_cell: np.array(int)
+        Reference layer start
+    roll_src: np.array(str)
+        Roll source
+    sal_src: np.array(str)
+        Salinity source
+    wm: np.array(int)
+        Water mode
+    sos_src: np.array(str)
+        Speed of sound source
+    temp_src: np.array(str)
+        Temperature source
+    tp_sec: np.array(int)
+        Time between pings
+    use_3beam: np.array(str)
+        Setting on whether to use 3-beam solutions or not
+    use_pr =: np.array(str)
+        Setting to use pitch and roll or not
+    wa: np.array(int)
+        Water track amplitude threshold
+    wb: np.array(int)
+        Water track bandwidth control
+    wc: np.array(int)
+        Water track correlation threshold
+    we_mmps: np.array(int)
+        Water track error velocity threshold
+    wf_cm: np.array(int)
+        Blank after transmit
+    wg_per: np.array(int)
+        Water track percent good threshold
+    wj: np.array(int)
+        Receiver gain setting
+    wn: np.array(int)
+        Number of depth cells (bins)
+    wp: np.array(int)
+        Number of water pings
+    ws_cm: np.array(int)
+        Bin size
+    xdcr_dep_srs: np.array(str)
+        Salinity source
+    xmit_pulse_cm: np.array(int)
+        Transmit pulse length
+    lag_near_bottom: np.array(int)
+        Lag near bottom setting
+    """
+
+    def __init__(self, n_ensembles):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        """
+
+        self.ba = nans(n_ensembles)
+        self.bc = nans(n_ensembles)
+        self.be_mmps = nans(n_ensembles)
+        self.bg = nans(n_ensembles)
+        self.bm = nans(n_ensembles)
+        self.bp = nans(n_ensembles)
+        self.bx_dm = nans(n_ensembles)
+        self.code_reps = nans(n_ensembles)
+        self.coord_sys = [''] * n_ensembles
+        self.cpu_ser_no = nans([n_ensembles, 8])
+        self.cq = nans(n_ensembles)
+        self.cx = nans(n_ensembles)
+        self.dist_bin1_cm = nans(n_ensembles)
+        self.ea_deg = nans(n_ensembles)
+        self.eb_deg = nans(n_ensembles)
+        self.sensor_avail = [''] * n_ensembles
+        self.ex = [''] * n_ensembles
+        self.ez = [''] * n_ensembles
+        self.head_src = [''] * n_ensembles
+        self.lag_cm = nans(n_ensembles)
+        self.map_bins = [''] * n_ensembles
+        self.n_beams = nans(n_ensembles)
+        self.pitch_src = [''] * n_ensembles
+        self.ref_lay_end_cell = nans(n_ensembles)
+        self.ref_lay_str_cell = nans(n_ensembles)
+        self.roll_src = [''] * n_ensembles
+        self.sal_src = [''] * n_ensembles
+        self.wm = nans(n_ensembles)
+        self.sos_src = [''] * n_ensembles
+        self.temp_src = [''] * n_ensembles
+        self.tp_sec = nans(n_ensembles)
+        self.use_3beam = [''] * n_ensembles
+        self.use_pr = [''] * n_ensembles
+        self.wa = nans(n_ensembles)
+        self.wb = nans(n_ensembles)
+        self.wc = nans(n_ensembles)
+        self.we_mmps = nans(n_ensembles)
+        self.wf_cm = nans(n_ensembles)
+        self.wg_per = nans(n_ensembles)
+        self.wj = nans(n_ensembles)
+        self.wn = nans(n_ensembles)
+        self.wp = nans(n_ensembles)
+        self.ws_cm = nans(n_ensembles)
+        self.xdcr_dep_srs = [''] * n_ensembles
+        self.xmit_pulse_cm = nans(n_ensembles)
+        self.lag_near_bottom = nans(n_ensembles)
+
+
+class Gps(object):
+    """Class to hold GPS data from WinRiver.
+
+    Attributes
+    ----------
+    alt_m: np.array(float)
+        Altitude in meters
+    gga_diff: np.array(int)
+        Differential correction indicator
+    gga_hdop: np.array(float)
+        Horizontal dilution of precision
+    gga_n_stats: np.array(int)
+        Number of satellites
+    gga_vel_e_mps: np.array(float)
+        Velocity in east direction from GGA data
+    gga_vel_n_mps: np.array(float)
+        Velocity in north directio from GGA data
+    gsa_p_dop: np.array(int)
+        Position dilution of precision
+    gsa_sat: np.array(int)
+        Satellites
+    gsa_v_dop: np.array(float)
+        Vertical dilution of precision
+    lat_deg: np.array(float)
+        Latitude in degrees
+    long_deg: np.array(float)
+        Longitude in degrees
+    vtg_vel_e_mps: np.array(float)
+        Velocity in east direction from VTG data
+    vtg_vel_n_mps: np.array(float)
+        Velocity in north direction from VTG data
+    """
+
+    def __init__(self, n_ensembles):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        """
+
+        self.alt_m = nans(n_ensembles)
+        self.gga_diff = nans(n_ensembles)
+        self.gga_hdop = nans(n_ensembles)
+        self.gga_n_stats = nans(n_ensembles)
+        self.gga_vel_e_mps = nans(n_ensembles)
+        self.gga_vel_n_mps = nans(n_ensembles)
+        self.gsa_p_dop = nans(n_ensembles)
+        self.gsa_sat = nans([n_ensembles, 6])
+        self.gsa_v_dop = nans(n_ensembles)
+        self.lat_deg = nans(n_ensembles)
+        self.long_deg = nans(n_ensembles)
+        self.vtg_vel_e_mps = nans(n_ensembles)
+        self.vtg_vel_n_mps = nans(n_ensembles)
+
+
+class Gps2(object):
+    """Class to hold GPS data for WinRiver II.
+
+    Attributes
+    ----------
+    gga_delta_time: np.array(float)
+        Time between ping and gga data
+    gga_header: list
+        GGA header
+    gga_sentence: list
+        GGA sentence
+    utc: np.array(float)
+        UTC time
+    lat_deg: np.array(float)
+        Latitude in degrees
+    lat_ref: list
+        Latitude reference
+    lon_deg: np.array(float)
+        Longitude in degrees
+    lon_ref: list
+        Longitude reference
+    corr_qual: np.array(float)
+        Differential quality indicator
+    num_sats: np.array(int)
+        Number of satellites
+    hdop: np.array(float)
+        Horizontal dilution of precision
+    alt: np.array(float)
+        Altitude
+    alt_unit: list
+        Units for altitude
+    geoid: np.array(float)
+        Geoid height
+    geoid_unit: list
+        Units for geoid height
+    d_gps_age: np.array(float)
+        Age of differential correction
+    ref_stat_id: np.array(float)
+        Reference station ID
+    vtg_delta_time: np.array(float)
+        Time between ping and VTG data
+    vtg_header: list
+        VTG header
+    vtg_sentence: list
+        VTG sentence
+    course_true: np.array(float)
+        Course relative to true north
+    true_indicator: list
+        True north indicator
+    course_mag: np.array(float)
+        Course relative to magnetic north
+    mag_indicator: list
+        Magnetic north indicator
+    speed_knots: np.array(float)
+        Speed in knots
+    knots_indicator: list
+        Knots indicator
+    speed_kph: np.array(float)
+        Speed in kilometers per hour
+    kph_indicator: list
+        Kilometers per hour indicator
+    mode_indicator: list
+        Mode indicator
+    dbt_delta_time: np.array(float)
+        Time between ping and echo sounder data
+    dbt_header: list
+        Echo sounder header
+    depth_ft: np.array(float)
+        Depth in ft from echo sounder
+    ft_indicator: list
+        Feet indicator
+    depth_m: np.array(float)
+        Depth in meters from echo sounder
+    m_indicator: list
+        Meters indicator
+    depth_fath: np.array(float)
+        Depth in fathoms from echo sounder
+    fath_indicator: list
+        Fathoms indicator
+    hdt_delta_time: np.array(float)
+        Time between ping and external heading data
+    hdt_header: list
+        External heading header
+    heading_deg: np.array(float)
+        Heading in degrees from external heading
+    h_true_indicator: list
+        Heading indicator to true north
+    gga_velE_mps: np.array(float)
+        Velocity in east direction in m/s from GGA for WR
+    gga_velN_mps: np.array(float)
+        Velocity in north direction in m/s from GGA for WR
+    vtg_velE_mps: np.array(float)
+        Velocity in east direction in m/s from VTG for WR
+    vtg_velN_mps: np.array(float)
+        Velocity in north direction in m/s from VTG for WR
+    """
+
+    def __init__(self, n_ensembles, wr2):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        wr2: bool
+            Setting of whether data is from WR or WR2
+        """
+
+        self.gga_delta_time = np.full([n_ensembles, 20], np.nan)
+        self.gga_header = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.gga_sentence = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.utc = np.full([n_ensembles, 20], np.nan)
+        self.lat_deg = np.zeros([n_ensembles, 20])
+        self.lat_ref = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.lon_deg = np.zeros([n_ensembles, 20])
+        self.lon_ref = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.corr_qual = np.full([n_ensembles, 20], np.nan)
+        self.num_sats = np.full([n_ensembles, 20], np.nan)
+        self.hdop = np.full([n_ensembles, 20], np.nan)
+        self.alt = np.full([n_ensembles, 20], np.nan)
+        self.alt_unit = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.geoid = np.full([n_ensembles, 20], np.nan)
+        self.geoid_unit = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.d_gps_age = np.full([n_ensembles, 20], np.nan)
+        self.ref_stat_id = np.full([n_ensembles, 20], np.nan)
+        self.vtg_delta_time = np.full([n_ensembles, 20], np.nan)
+        self.vtg_header = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.vtg_sentence = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.course_true = np.full([n_ensembles, 20], np.nan)
+        self.true_indicator = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.course_mag = np.full([n_ensembles, 20], np.nan)
+        self.mag_indicator = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.speed_knots = np.full([n_ensembles, 20], np.nan)
+        self.knots_indicator = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.speed_kph = np.zeros([n_ensembles, 20])
+        self.kph_indicator = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.mode_indicator = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.dbt_delta_time = np.full([n_ensembles, 20], np.nan)
+        self.dbt_header = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.depth_ft = np.full([n_ensembles, 20], np.nan)
+        self.ft_indicator = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.depth_m = np.zeros([n_ensembles, 20])
+        self.m_indicator = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.depth_fath = np.full([n_ensembles, 20], np.nan)
+        self.fath_indicator = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.hdt_delta_time = np.full([n_ensembles, 20], np.nan)
+        self.hdt_header = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.heading_deg = np.full([n_ensembles, 20], np.nan)
+        self.h_true_indicator = [x[:] for x in [[''] * 20] * n_ensembles]
+
+        # if wr2:
+        self.gga_velE_mps = nans(n_ensembles)
+        self.gga_velN_mps = nans(n_ensembles)
+        self.vtg_velE_mps = nans(n_ensembles)
+        self.vtg_velN_mps = nans(n_ensembles)
+
+    def gga_expand(self, n_ensembles):
+        self.gga_delta_time = np.concatenate(
+            (self.gga_delta_time, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.utc = np.concatenate(
+            (self.utc, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.lat_deg = np.concatenate(
+            (self.lat_deg, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.lon_deg = np.concatenate(
+            (self.lon_deg, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.corr_qual = np.concatenate(
+            (self.corr_qual, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.num_sats = np.concatenate(
+            (self.num_sats, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.hdop = np.concatenate(
+            (self.hdop, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.alt = np.concatenate(
+            (self.alt, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.geoid = np.concatenate(
+            (self.geoid, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.d_gps_age = np.concatenate(
+            (self.d_gps_age, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.ref_stat_id = np.concatenate(
+            (self.ref_stat_id, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        for ens in range(n_ensembles):
+            self.gga_header[ens].append('')
+            self.geoid_unit[ens].append('')
+            self.alt_unit[ens].append('')
+            self.lon_ref[ens].append('')
+            self.lat_ref[ens].append('')
+
+    def vtg_expand(self, n_ensembles):
+        self.vtg_delta_time = np.concatenate(
+            (self.vtg_delta_time, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.course_true = np.concatenate(
+            (self.course_true, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.course_mag = np.concatenate(
+            (self.course_mag, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.speed_knots = np.concatenate(
+            (self.speed_knots, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.speed_kph = np.concatenate(
+            (self.speed_kph, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        for ens in range(n_ensembles):
+            self.kph_indicator[ens].append('')
+            self.mode_indicator[ens].append('')
+            self.vtg_header[ens].append('')
+            self.true_indicator[ens].append('')
+            self.mag_indicator[ens].append('')
+            self.knots_indicator[ens].append('')
+
+    def dbt_expand(self, n_ensembles):
+        self.dbt_delta_time = np.concatenate(
+            (self.dbt_delta_time, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.depth_ft = np.concatenate(
+            (self.depth_ft, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.depth_m = np.concatenate(
+            (self.depth_m, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.depth_fath = np.concatenate(
+            (self.depth_fath, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        for ens in range(n_ensembles):
+            self.fath_indicator[ens].append('')
+            self.dbt_header[ens].append('')
+            self.ft_indicator[ens].append('')
+            self.m_indicator[ens].append('')
+
+    def hdt_expand(self, n_ensembles):
+        self.hdt_delta_time = np.concatenate(
+            (self.hdt_delta_time, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.heading_deg = np.concatenate(
+            (self.heading_deg, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        for ens in range(n_ensembles):
+            self.h_true_indicator[ens].append('')
+            self.hdt_header[ens].append('')
+
+
+class Nmea(object):
+    """Class to hold raw NMEA sentences.
+
+    Attributes
+    ----------
+    gga: list
+        List of GGA sentences
+    gsa: list
+        List of GSA sentences
+    vtg: list
+        List of VTG sentences
+    dbt: list
+        List of DBT sentences
+    """
+
+    def __init__(self, n_ensembles):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        """
+        self.gga = ['']*n_ensembles
+        self.gsa = ['']*n_ensembles
+        self.vtg = ['']*n_ensembles
+        # self.raw = ['']*n_ensembles DSM: not sure this was used
+        self.dbt = ['']*n_ensembles
+
+
+class Sensor(object):
+    """Class to hold sensor data.
+
+    Attributes
+    ----------
+    ambient_temp: np.array(int)
+        ADC ambient temperature
+    attitude_temp: np.array(int)
+        ADC attitude temperature
+    attitude: np.array(int)
+        ADC attitude
+    bit_test: np.array(int)
+        Bit test results
+    contam_sensor: np.array(int)
+        ADC contamination sensor
+    date: np.array(int)
+        Date
+    date_y2k: np.array(int)
+        Y2K compatible date
+    date_not_y2k: np.array(int)
+        Date not Y2K compatible
+    error_status_word: np.array(int)
+        Error status codes
+    heading_deg: np.array(float)
+        Heading to magnetic north in degrees
+    heading_std_dev_deg: np.array(float)
+        Standard deviation of headings for an ensemble
+    mpt_msc: np.array(int)
+        Minimum time prior to ping
+    num: np.array(int)
+        Ensemble number
+    num_fact: np.array(int)
+        Number fraction
+    num_tot: np.array(int)
+        Number total
+    orient: list
+        Orientation of ADCP
+    pitch_std_dev_deg: np.array(float)
+        Standard deviation of pitch for an ensemble
+    pitch_deg: np.array(float)
+        Pitch in degrees
+    pressure_neg: np.array(int)
+        ADC pressure negative
+    pressure_pos: np.array(int)
+        ADC pressure positive
+    pressure_pascal: np.array(int)
+        Pressure at transducer face in deca-pascals
+    pressure_var_pascal: np.array(int)
+        Pressure variance in deca-pascals
+    roll_std_dev_deg: np.array(float)
+        Standard deviation of roll for an ensemble
+    roll_deg: np.array(float)
+        Roll in degrees
+    salinity_ppt: np.array(int)
+        Salinit in parts per thousand
+    sos_mps: np.array(int)
+        Speed of sound in m/s
+    temperature_deg_c: np.array(float)
+        Water temperatuer in degrees C
+    time: np.array(int)
+        Time
+    time_y2k: np.array(int)
+        Y2K compatible time
+    xdcr_depth_dm: np.array(int)
+        Transducer depth in decimeters
+    xmit_current: np.array(int)
+        Transmit current
+    self.xmit_voltage = nans(n_ensembles)
+        Transmit voltage
+    self.vert_beam_eval_amp: np.array(int)
+        Vertical beam amplitude
+    self.vert_beam_RSSI_amp: np.array(int)
+        Vertical beam return signal stength indicator
+    self.vert_beam_range_m: np.array(float)
+        Vertical beam range in m
+    self.vert_beam_gain: list
+        Vertical beam gain setting
+    self.vert_beam_status: np.array(int)
+        Vertical beam status code
+    """
+
+    def __init__(self, n_ensembles):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        """
+
+        self.ambient_temp = nans(n_ensembles)
+        self.attitude_temp = nans(n_ensembles)
+        self.attitude = nans(n_ensembles)
+        self.bit_test = nans(n_ensembles)
+        self.contam_sensor = nans(n_ensembles)
+        self.date = nans([n_ensembles, 3])
+        self.date_y2k = nans([n_ensembles, 4])
+        self.date_not_y2k = nans([n_ensembles, 3])
+        self.error_status_word = [''] * n_ensembles
+        self.heading_deg = nans(n_ensembles)
+        self.heading_std_dev_deg = nans(n_ensembles)
+        self.mpt_msc = nans([n_ensembles, 3])
+        self.num = nans(n_ensembles)
+        self.num_fact = nans(n_ensembles)
+        self.num_tot = nans(n_ensembles)
+        self.orient = [''] * n_ensembles
+        self.pitch_std_dev_deg = nans(n_ensembles)
+        self.pitch_deg = nans(n_ensembles)
+        self.pressure_neg = nans(n_ensembles)
+        self.pressure_pos = nans(n_ensembles)
+        self.pressure_pascal = nans(n_ensembles)
+        self.pressure_var_pascal = nans(n_ensembles)
+        self.roll_std_dev_deg = nans(n_ensembles)
+        self.roll_deg = nans(n_ensembles)
+        self.salinity_ppt = nans(n_ensembles)
+        self.sos_mps = nans(n_ensembles)
+        self.temperature_deg_c = nans(n_ensembles)
+        self.time = nans([n_ensembles, 4])
+        self.time_y2k = nans([n_ensembles, 4])
+        self.xdcr_depth_dm = nans(n_ensembles)
+        self.xmit_current = nans(n_ensembles)
+        self.xmit_voltage = nans(n_ensembles)
+        self.vert_beam_eval_amp = nans(n_ensembles)
+        self.vert_beam_RSSI_amp = nans(n_ensembles)
+        self.vert_beam_range_m = nans(n_ensembles)
+        self.vert_beam_gain = [''] * n_ensembles
+        self.vert_beam_status = np.zeros(n_ensembles)
+
+
+class Surface(object):
+    """Class to hold surface cell data.
+
+    Attributes
+    ----------
+    no_cells: np.array(int)
+        Number of surface cells in the ensemble
+    cell_size_cm: np.array(int)
+        Cell size in cm
+    dist_bin1_cm: np.array(int)
+        Distance to center of cell 1 in cm
+    vel_mps: np.array(float)
+        3D array of velocity data in each cell and ensemble
+    corr: np.array(int)
+        3D array of correlation data for each beam, cell, and ensemble
+    pergd: np.array(int)
+        3D array of percent good for each beam, cell, and ensemble
+    rssi: np.array(int)
+        3D array of return signal strength indicator for each beam, cell, and ensemble
+    """
+
+    def __init__(self, n_ensembles, n_velocities, max_surface_bins):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        n_velocities: int
+            Number of velocity beams
+        max_surface_bins: int
+            Maximum number of surface bins in an ensemble in the transect
+        """
+
+        self.no_cells = np.zeros(n_ensembles)
+        self.cell_size_cm = nans(n_ensembles)
+        self.dist_bin1_cm = nans(n_ensembles)
+        self.vel_mps = np.tile([np.nan], [n_velocities, max_surface_bins, n_ensembles])
+        self.corr = nans([n_velocities, max_surface_bins, n_ensembles])
+        self.pergd = nans([n_velocities, max_surface_bins, n_ensembles])
+        self.rssi = nans([n_velocities, max_surface_bins, n_ensembles])
+
+
+class Wt(object):
+    """Class to hold water track data.
+
+    Attributes
+    ----------
+    vel_mps: np.array(float)
+        3D array of velocity data in each cell and ensemble
+    corr: np.array(int)
+        3D array of correlation data for each beam, cell, and ensemble
+    pergd: np.array(int)
+        3D array of percent good for each beam, cell, and ensemble
+    rssi: np.array(int)
+        3D array of return signal strength indicator for each beam, cell, and ensemble
+    """
+
+    def __init__(self, n_bins, n_ensembles, n_velocities):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        n_velocities: int
+            Number of velocity beams
+        n_bins: int
+            Maximum number of bins in an ensemble in the transect
+        """
+
+        self.corr = nans([n_velocities, n_bins, n_ensembles])
+        self.pergd = nans([n_velocities, n_bins, n_ensembles])
+        self.rssi = nans([n_velocities, n_bins, n_ensembles])
+        self.vel_mps = nans([n_velocities, n_bins, n_ensembles])
diff --git a/Classes/Pd0TRDI_2.py b/Classes/Pd0TRDI_2.py
new file mode 100644
index 0000000000000000000000000000000000000000..6a20448a59b9502b4d3b8f345be9b956319c1911
--- /dev/null
+++ b/Classes/Pd0TRDI_2.py
@@ -0,0 +1,3503 @@
+import os
+import re
+import numpy as np
+import struct
+from MiscLibs.common_functions import pol2cart, valid_number, nans
+
+
+class Pd0TRDI(object):
+    """Class to read data from PD0 files
+
+    Attributes
+    ----------
+    file_name: str
+        Full name including path of pd0 file to be read
+    Hdr: Hdr
+        Object of Hdr for heading information
+    Inst: Inst
+        Object of Inst to hold instrument information
+    Cfg: Cfg
+        Object of Cfg to hold configuration information
+    Sensor: Sensor
+        Object of Sensor to hold sensor data
+    Wt: Wt
+        Object of Wt to hold water track data
+    Bt: Bt
+        Object of Bt to hold bottom track data
+    Gps: Gps
+        Object of Gps to hold GPS data from previous versions of WR
+    Gps2: Gps2
+        Object of Gps2 to hold GPS data from WR2
+    Surface: Surface
+        Object of Surface to hold surface cell data
+    AutoMode: AutoMode
+        Object of AutoMode to hold auto configuration settings
+    Nmea: Nmea
+        Object of Nmea to hold Nmea data
+    """
+
+    def __init__(self, file_name):
+        """Constructor initializing instance variables.
+
+        Parameters
+        ----------
+        file_name: str
+            Full name including path of pd0 file to be read
+        """
+
+        self.file_name = file_name
+        self.Hdr = None
+        self.Inst = None
+        self.Cfg = None
+        self.Sensor = None
+        self.Wt = None
+        self.Bt = None
+        self.Gps = None
+        self.Gps2 = None
+        self.Surface = None
+        self.AutoMode = None
+        self.Nmea = None
+
+        self.data_decoders = {
+            0x0000: ('fixed_leader', self.decode_fixed_leader),
+            0x0080: ('variable_leader', self.decode_variable_leader),
+            0x0100: ('velocity', self.decode_velocity),
+            0x0200: ('correlation', self.decode_correlation),
+            0x0300: ('echo_intensity', self.decode_echo_intensity),
+            0x0400: ('percent_good', self.decode_percent_good),
+            0x0500: ('status', self.decode_status),
+            0x0600: ('bottom_track', self.decode_bottom_track),
+            0x2022: ('nmea', self.decode_nmea),
+            0x2100: ('dbt_sentence', self.decode_dbt_sentence),
+            0x2101: ('gga_sentence', self.decode_gga_sentence),
+            0x2102: ('vtg_sentence', self.decode_vtg_sentence),
+            0x2103: ('gsa_sentence', self.decode_gsa_sentence),
+            0x0010: ('surface_leader', self.decode_surface_leader),
+            0x0110: ('surface_velocity', self.decode_surface_velocity),
+            0x0210: ('surface_correlation', self.decode_surface_correlation),
+            0x0310: ('surface_intensity', self.decode_surface_intensity),
+            0x0410: ('surface_percent_good', self.decode_surface_percent_good),
+            0x0510: ('surface_status', self.decode_surface_status),
+            0x4401: ('auto_configuration', self.decode_auto_config),
+            0x4100: ('vertical_beam', self.decode_vertical_beam),
+            0x3200: ('transformation_matrix', self.decode_transformation_matrix)
+        }
+
+        self.nmea_decoders = {100: ('gga', self.decode_gga_100),
+                              101: ('vtg', self.decode_vtg_101),
+                              102: ('ds', self.decode_ds_102),
+                              103: ('ext_heading', self.decode_ext_heading_103),
+                              104: ('gga', self.decode_gga_104),
+                              105: ('vtg', self.decode_vtg_105),
+                              106: ('ds', self.decode_ds_106),
+                              107: ('ext_heading', self.decode_ext_heading_107),
+                              204: ('gga', self.decode_gga_204),
+                              205: ('vtg', self.decode_vtg_205),
+                              206: ('ds', self.decode_ds_206),
+                              207: ('ext_heading', self.decode_ext_heading_207)}
+
+        self.n_velocities = 4
+        self.max_surface_bins = 5
+
+        self.pd0_read(file_name)
+
+    def create_objects(self, n_ensembles, n_types, n_bins, max_surface_bins, n_velocities, wr2=False):
+        """Create objects for instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        n_types: int
+            Number of data types
+        n_bins: int
+            Number of bins or depth cells
+        max_surface_bins: int
+            Maximum number of surface cells
+        n_velocities: int
+            Number of velocities
+        wr2: bool
+            Whether WR2 processing of GPS data should be applied
+        """
+
+        self.Hdr = Hdr(n_ensembles, n_types)
+        self.Inst = Inst(n_ensembles)
+        self.Cfg = Cfg(n_ensembles)
+        self.Sensor = Sensor(n_ensembles)
+        self.Wt = Wt(n_bins, n_ensembles, n_velocities)
+        self.Bt = Bt(n_ensembles, n_velocities)
+        self.Gps = Gps(n_ensembles)
+        self.Gps2 = Gps2(n_ensembles, wr2)
+        self.Surface = Surface(n_ensembles, n_velocities, max_surface_bins)
+        self.AutoMode = AutoMode(n_ensembles)
+        self.Nmea = Nmea(n_ensembles)
+
+    def pd0_read(self, fullname, wr2=False):
+        """Reads the binary pd0 file and assigns values to object instance variables.
+
+        Parameters
+        ----------
+        fullname: str
+            Full file name including path
+        wr2: bool
+            Determines if WR2 processing should be applied to GPS data
+        """
+
+        # Check to ensure file exists
+        if os.path.exists(fullname):
+            file_info = os.path.getsize(fullname)
+
+            if file_info > 0:
+                # Open file for processing
+                with open(fullname, 'rb') as f:
+                    pd0 = f.read()
+                pd0_bytes = bytearray(pd0)
+
+                # Intialize classes and arrays
+                n_ensembles, max_types, max_beams, max_bins = self.number_of_ensembles(self, file_info, pd0_bytes)
+                self.create_objects(n_ensembles, max_types, max_bins, self.max_surface_bins, self.n_velocities, wr2)
+                self.decode_all(pd0_bytes, file_info)
+                self.screen_and_convert(wr2)
+
+    def screen_and_convert(self, wr2):
+
+        # Screen for bad data, and do the unit conversions
+        self.Wt.vel_mps[self.Wt.vel_mps == -32768] = np.nan
+        self.Wt.vel_mps = self.Wt.vel_mps / 1000
+        self.Wt.corr[self.Wt.corr == -32768] = np.nan
+        self.Wt.rssi[self.Wt.rssi == -32768] = np.nan
+        self.Wt.pergd[self.Wt.pergd == -32768] = np.nan
+
+        # Remove bad data, convert units
+        self.Bt.depth_m[self.Bt.depth_m == -32768] = np.nan
+        self.Bt.depth_m = self.Bt.depth_m / 100
+        self.Bt.vel_mps[self.Bt.vel_mps == -32768] = np.nan
+        self.Bt.vel_mps = self.Bt.vel_mps / 1000
+        self.Bt.corr[self.Bt.corr == -32768] = np.nan
+        self.Bt.eval_amp[self.Bt.eval_amp == -32768] = np.nan
+        self.Bt.pergd[self.Bt.pergd == -32768] = np.nan
+
+        # Remove bad data from Surface structure (RR), convert where needed
+        self.Surface.vel_mps[self.Surface.vel_mps == -32768] = np.nan
+        self.Surface.vel_mps = self.Surface.vel_mps / 1000
+        self.Surface.corr[self.Surface.corr == -32768] = np.nan
+        self.Surface.rssi[self.Surface.rssi == -32768] = np.nan
+        self.Surface.pergd[self.Surface.pergd == -32768] = np.nan
+
+        # If requested compute WR2 compatible GPS-based boat velocities
+        if wr2:
+
+            # If vtg data are available compute north and east components
+            if self.Gps2.vtg_header[0, 0] == '$':
+
+                # Find minimum of absolute value of delta time from raw data
+                vtg_delta_time = np.abs(self.Gps2.vtg_delta_time)
+                vtg_min = np.nanmin(vtg_delta_time, 1)
+
+                # Compute the velocity components in m/s
+                for i in range(len(vtg_delta_time)):
+                    idx = np.where(vtg_delta_time == vtg_min)[0][0]
+                    self.Gps2.vtg_velE_mps[i], self.Gps2.vtg_velN_mps[i] = \
+                        pol2cart((90 - self.Gps2.course_true[i, idx]) * np.pi / 180,
+                                 self.Gps2.speed_kph[i, idx] * 0.2777778)
+
+            if self.Gps2.gga_header[0, 0] == '$':
+
+                # Initialize constants
+                e_radius = 6378137
+                coeff = e_radius * np.pi / 180
+                ellip = 1 / 298.257223563
+
+                # Find minimum of absolute value of delta time from raw data
+                gga_delta_time = np.abs(self.Gps2.gga_delta_time)
+                gga_min = np.nanmin(gga_delta_time, axis=1)
+
+                # Process gga data
+                for i in range(len(gga_delta_time)):
+                    idx = np.where(gga_delta_time[i:] == gga_min)
+                    if idx > 0:
+                        lat_avg_rad = (self.Gps2.lat_deg[i, idx[i]]
+                                       + self.Gps2.lat_deg[i - 1, idx[i - 1]]) / 2
+                        sin_lat_avg_rad = np.sin(np.deg2rad(lat_avg_rad))
+                        r_e = coeff * (1 + ellip * sin_lat_avg_rad * sin_lat_avg_rad)
+                        rn = coeff * (1 - 2 * ellip + 3 * ellip * sin_lat_avg_rad * sin_lat_avg_rad)
+                        dx = r_e * (self.Gps2.lon_deg[i, idx[i]] -
+                                    self.Gps2.lon_deg(i - 1, idx[i - 1])) * np.cos(np.deg2rad(lat_avg_rad))
+                        dy = rn * (self.Gps2.lat_deg[i, idx[i]] - self.Gps2.lat_deg[i - 1, idx[i - 1]])
+                        dt = self.Gps2.utc[i, idx[i]] - self.Gps2.utc[i - 1, idx[i - 1]]
+                        self.Gps2.gga_velE_mps[i] = dx / dt
+                        self.Gps2.gga_velN_mps[i] = dy / dt
+                    else:
+                        self.Gps2.gga_velE_mps[i] = np.nan
+                        self.Gps2.gga_velN_mps[i] = np.nan
+
+    def decode_all(self, pd0_bytes, file_info):
+
+        start_byte = 0
+        n = 0
+        ensemble_number = 0
+        while start_byte < file_info:
+            data = self.decode_pd0_bytearray(self.data_decoders, pd0_bytes[start_byte:])
+            if data['checksum']:
+                # Adjust index for lost ensembles
+                if ensemble_number > 0:
+                    n = n + data['variable_leader']['ensemble_number'] - ensemble_number
+                try:
+                    self.Hdr.populate_data(n, data)
+                    self.Inst.populate_data(n, data)
+                    self.Cfg.populate_data(n, data)
+                    self.Sensor.populate_data(n, data)
+                    self.Wt.populate_data(n, data, self)
+                    self.Bt.populate_data(n, data)
+                    # self.Gps.populate_data(n, data)
+                    self.Gps2.populate_data(n, data)
+                    self.Surface.populate_data(n, data, self)
+                    self.AutoMode.populate_data(n, data)
+                    self.Nmea.populate_data(n, data)
+                    start_byte = start_byte + data['header']['number_of_bytes'] + 2
+                    ensemble_number = data['variable_leader']['ensemble_number']
+                except ValueError:
+                    start_byte = Pd0TRDI.find_next(pd0_bytes, start_byte, file_info)
+            else:
+                start_byte = Pd0TRDI.find_next(pd0_bytes, start_byte, file_info)
+
+
+    @staticmethod
+    def number_of_ensembles(self, file_info, pd0_bytes):
+        """Determines the number of ensembles in the data file.
+
+        Parameters
+        ----------
+        self: Pd0TRDI
+            Current class
+        file_info: int
+            File size in bytes
+        pd0_bytes: bytearray
+            Contents of pd0 file
+
+        Returns
+        -------
+        n_ensembles: int
+            Number of ensembles
+        max_data_types: int
+            Maximum number of data types in file
+        max_beams: int
+            Maximum number of beamse
+        max_bins: int
+            Maximum number of regular bins
+        """
+
+        # Configure data decoders to be used
+        data_decoders = {0x0000: ('fixed_leader', self.preload_fixed_leader),
+                         0x0080: ('variable_leader', self.preload_variable_leader)}
+
+        # Intitialize variables
+        start_byte = 0
+        n_beams = []
+        n_bins = []
+        n_data_types = []
+        ens_num = []
+
+        # Loop through entire file
+        while start_byte < file_info:
+
+            data = self.decode_pd0_bytearray(data_decoders, pd0_bytes[start_byte:])
+            # start_byte = start_byte + data['header']['number_of_bytes'] + 2
+            if data['checksum']:
+                # if 'number_of_bytes' in data['header'] and data['header']['number_of_bytes'] > 0:
+                if 'number_of_bytes' in data['header'] and 'fixed_leader' in data and 'variable_leader' in data:
+                    n_data_types.append(data['header']['number_of_data_types'])
+                    n_beams.append(data['fixed_leader']['number_of_beams'])
+                    n_bins.append(data['fixed_leader']['number_of_cells'])
+                    ens_num.append(data['variable_leader']['ensemble_number'])
+                    start_byte = start_byte + data['header']['number_of_bytes'] + 2
+                else:
+                    start_byte = Pd0TRDI.find_next(pd0_bytes, start_byte, file_info)
+            else:
+                start_byte = Pd0TRDI.find_next(pd0_bytes, start_byte, file_info)
+
+
+        # Compute maximums
+        max_data_types = np.nanmax(n_data_types)
+        max_beams = np.nanmax(n_beams)
+        max_bins = np.nanmax(n_bins)
+        n_ensembles = ens_num[-1] - ens_num[0] + 1
+
+        return n_ensembles, max_data_types, max_beams, max_bins
+
+    @staticmethod
+    def find_next (pd0_bytes, start_byte, file_info):
+
+        try:
+            start_byte = start_byte + 1
+            skip_forward = pd0_bytes[start_byte:].index(b'\x7f\x7f')
+            # data['header'] = Pd0TRDI.decode_fixed_header(pd0_bytes[start_byte + skip_forward:])
+            start_byte = start_byte + skip_forward
+        except ValueError:
+            start_byte = file_info
+
+        return start_byte
+
+    @staticmethod
+    def preload_fixed_leader(pd0_bytes, offset, data):
+        """Parses the fixed leader for number of beams and number of cells.
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Not used, included for compatibilty with other decoders
+
+        Returns
+        -------
+        number_of_beams: int
+            Number of beams in ensemble
+        number_of_cells: int
+            Number of regular cells in ensemble
+        """
+
+        fixed_leader_format = (('number_of_beams', 'B', 8), ('number_of_cells', 'B', 9))
+
+        return Pd0TRDI.unpack_bytes(pd0_bytes, fixed_leader_format, offset)
+
+    @staticmethod
+    def preload_variable_leader(pd0_bytes, offset, data):
+        """Decodes variable leader ensemble number
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        :dict
+            Dictionary of decoded data
+        """
+
+        # Define format
+        variable_leader_format = (('ensemble_number', '<H', 2),)
+
+        return Pd0TRDI.unpack_bytes(pd0_bytes, variable_leader_format, offset)
+
+    @staticmethod
+    def decode_pd0_bytearray(data_decoders, pd0_bytes):
+        """Loops through data and calls appropriate parsing method for each header ID.
+
+        Parameters
+        ----------
+        data_decoders: dict
+            Dictionary associating a method with a leader ID
+        pd0_bytes: bytearray
+            Byte array of entire pd0 file
+
+        Returns
+        -------
+        data: dict
+            Dictionary of decoded data
+        """
+
+        data = {}
+
+        # Read in header
+        data['header'] = Pd0TRDI.decode_fixed_header(pd0_bytes)
+        data['checksum'] = False
+        if 'number_of_bytes' in data['header'] and data['header']['number_of_bytes'] > 0:
+            if 'number_of_data_types' in data['header']:
+                # If checksum is OK then decode address offsets to the data types
+                if Pd0TRDI.validate_checksum(pd0_bytes, data['header']['number_of_bytes']):
+                    data['checksum'] = True
+                    data['header']['address_offsets'] = Pd0TRDI.decode_address_offsets(pd0_bytes,
+                                                                                       data['header']['number_of_data_types'])
+                    data['header']['invalid'] = []
+                    # Loop to decode all data types for which a data decoder is provided
+                    for offset in data['header']['address_offsets']:
+                        if len(pd0_bytes) > offset + 2:
+                            header_id = struct.unpack('<H', pd0_bytes[offset: offset + 2])[0]
+                            if header_id in data_decoders:
+                                key = data_decoders[header_id][0]
+                                decoder = data_decoders[header_id][1]
+                                data[key] = decoder(pd0_bytes, offset, data)
+                            else:
+                                data['header']['invalid'].append(header_id)
+
+        return data
+
+    @staticmethod
+    def unpack_bytes(pd0_bytes, data_format_tuples, offset=0):
+        """Unpackes the data based on the supplied data format tuples and offset.
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        data_format_tuples: tuple
+            A tuple of tuples providing the data name, format, and byte location
+        offset: int
+            Pointer into pd0_bytes
+
+        Returns
+        -------
+        data: dict
+            Dictionary of decoded data
+        """
+        data = {}
+
+        # Decode data for each format specified in the data format tuples and assign to the data dictionary
+        for fmt in data_format_tuples:
+            try:
+                struct_offset = offset + fmt[2]
+                size = struct.calcsize(fmt[1])
+                data[fmt[0]] = struct.unpack(fmt[1], pd0_bytes[struct_offset: struct_offset + size])[0]
+            except:
+                print('Error parsing %s with the arguments ')
+
+        return data
+
+    @staticmethod
+    def validate_checksum(pd0_bytes, offset):
+        """Validates that the checksum is correct to ensure data integrity.
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+
+        Returns
+        -------
+        :bool
+            True if checksum is valid
+
+        """
+        if len(pd0_bytes) > offset + 1:
+            calc_checksum = sum(pd0_bytes[:offset]) & 0xFFFF
+            given_checksum = struct.unpack('<H', pd0_bytes[offset: offset + 2])[0]
+
+            if calc_checksum == given_checksum:
+                return True
+            else:
+                return False
+        return False
+
+    @staticmethod
+    def bin2str(bin_in):
+
+        try:
+            str_out = bin_in.decode('utf-8')
+        except:
+            str_out = ''
+        return str_out
+
+    @staticmethod
+    def decode_address_offsets(pd0_bytes, num_data_types, offset=6):
+        """Decodes the address offsets for each data type.
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        num_data_types: int
+            Number of data types for which to find offsets
+        offset: int
+            Pointer into pd0_bytes
+
+        Returns
+        -------
+        address_data: list
+            List of offsets to each data type
+        """
+
+        address_data = []
+
+        # Loop through each data type
+        for bytes_start in range(offset, offset + (num_data_types * 2), 2):
+            data = struct.unpack_from('<H', pd0_bytes[bytes_start: bytes_start + 2])[0]
+            address_data.append(data)
+
+        return address_data
+
+    @staticmethod
+    def decode_fixed_header(pd0_bytes):
+        """Decodes fixed header
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+
+        Returns
+        -------
+        header: dict
+            Dictionary of header data
+        """
+
+        header_data_format = (('id', 'B', 0),
+                              ('data_source', 'B', 1),
+                              ('number_of_bytes', '<H', 2),
+                              ('spare', 'B', 4),
+                              ('number_of_data_types', 'B', 5))
+
+        header = Pd0TRDI.unpack_bytes(pd0_bytes, header_data_format)
+        return header
+
+    @staticmethod
+    def decode_fixed_leader(pd0_bytes, offset, data):
+        """Decodes fixed leader data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        :dict
+            Dictionary of decoded data
+        """
+
+        # Define format
+        fixed_leader_format = (
+            ('id', '<H', 0),
+            ('cpu_firmware_version', 'B', 2),
+            ('cpu_firmware_revision', 'B', 3),
+            ('system_configuration_ls', 'B', 4),
+            ('system_configuration_ms', 'B', 5),
+            ('simulation_data_flag', 'B', 6),
+            ('lag_length', 'B', 7),
+            ('number_of_beams', 'B', 8),
+            ('number_of_cells', 'B', 9),
+            ('number_of_water_pings', '<H', 10),
+            ('depth_cell_size', '<H', 12),
+            ('blank_after_transmit', '<H', 14),
+            ('water_mode', 'B', 16),
+            ('low_correlation_threshold', 'B', 17),
+            ('number_of_code_repetitions', 'B', 18),
+            ('minimum_percentage_water_profile_pings', 'B', 19),
+            ('error_velocity_threshold', '<H', 20),
+            ('minutes', 'B', 22),
+            ('seconds', 'B', 23),
+            ('hundredths', 'B', 24),
+            ('coordinate_transformation_process', 'B', 25),
+            ('heading_alignment', '<H', 26),
+            ('heading_bias', '<H', 28),
+            ('sensor_source', 'B', 30),
+            ('sensor_available', 'B', 31),
+            ('bin_1_distance', '<H', 32),
+            ('transmit_pulse_length', '<H', 34),
+            ('starting_depth_cell', 'B', 36),
+            ('ending_depth_cell', 'B', 37),
+            ('false_target_threshold', 'B', 38),
+            ('low_latency_trigger', 'B', 39),
+            ('transmit_lag_distance', '<H', 40),
+            ('cpu_board_serial_number', '<Q', 42),
+            ('system_bandwidth', '<H', 50),
+            ('system_power', 'B', 52),
+            ('spare', 'B', 53),
+            ('serial_number', '<I', 54),
+            ('beam_angle', 'B', 58)
+        )
+
+        return Pd0TRDI.unpack_bytes(pd0_bytes, fixed_leader_format, offset)
+
+    @staticmethod
+    def decode_variable_leader(pd0_bytes, offset, data):
+        """Decodes variabl leader data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        :dict
+            Dictionary of decoded data
+        """
+
+        # Define format
+        variable_leader_format = (
+            ('id', '<H', 0),
+            ('ensemble_number', '<H', 2),
+            ('rtc_year', 'B', 4),
+            ('rtc_month', 'B', 5),
+            ('rtc_day', 'B', 6),
+            ('rtc_hour', 'B', 7),
+            ('rtc_minutes', 'B', 8),
+            ('rtc_seconds', 'B', 9),
+            ('rtc_hundredths', 'B', 10),
+            ('ensemble_number_msb', 'B', 11),
+            ('bit_fault', 'B', 12),
+            ('bit_count', 'B', 13),
+            ('speed_of_sound', '<H', 14),
+            ('depth_of_transducer', '<H', 16),
+            ('heading', '<H', 18),
+            ('pitch', '<h', 20),
+            ('roll', '<h', 22),
+            ('salinity', '<H', 24),
+            ('temperature', '<h', 26),
+            ('mpt_minutes', 'B', 28),
+            ('mpt_seconds', 'B', 29),
+            ('mpt_hundredths', 'B', 30),
+            ('heading_standard_deviation', 'B', 31),
+            ('pitch_standard_deviation', 'B', 32),
+            ('roll_standard_deviation', 'B', 33),
+            ('transmit_current', 'B', 34),
+            ('transmit_voltage', 'B', 35),
+            ('ambient_temperature', 'B', 36),
+            ('pressure_positive', 'B', 37),
+            ('pressure_negative', 'B', 38),
+            ('attitude_temperature', 'B', 39),
+            ('attitude', 'B', 40),
+            ('contamination_sensor', 'B', 41),
+            ('error_status_word', '<I', 42),
+            ('reserved', '<H', 46),
+            ('pressure', '<I', 48),
+            ('pressure_variance', '<I', 52),
+            ('spare', 'B', 56),
+            ('rtc_y2k_century', 'B', 57),
+            ('rtc_y2k_year', 'B', 58),
+            ('rtc_y2k_month', 'B', 59),
+            ('rtc_y2k_day', 'B', 60),
+            ('rtc_y2k_hour', 'B', 61),
+            ('rtc_y2k_minutes', 'B', 62),
+            ('rtc_y2k_seconds', 'B', 63),
+            ('rtc_y2k_hundredths', 'B', 64),
+            ('lag_near_bottom', 'B', 65)
+        )
+
+        return Pd0TRDI.unpack_bytes(pd0_bytes, variable_leader_format, offset)
+
+    def decode_per_cell_per_beam(pd0_bytes, offset, number_of_cells, number_of_beams, struct_format):
+        """Parses fields that are stored in serial cells and beams structures.
+        Returns an array of cell readings where each reading is an array containing the value at that beam.
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        number_of_cells: int
+            Number of cells in data
+        number of beams: int
+            Number of beams in data
+        struct_format: str
+            A string identifying the type of data to decode
+
+        Returns
+        -------
+        data: list
+            A list of list containing cell data for each beam
+        """
+
+        data_size = struct.calcsize(struct_format)
+        data = []
+        # Loop through cells
+        for cell in range(0, number_of_cells):
+            cell_start = offset + cell * number_of_beams * data_size
+            cell_data = []
+            # Loop through beams in each cell
+            for field in range(0, number_of_beams):
+                field_start = cell_start + field * data_size
+                data_bytes = pd0_bytes[field_start: field_start + data_size]
+                field_data = struct.unpack(struct_format, data_bytes)[0]
+                cell_data.append(field_data)
+            data.append(cell_data)
+
+        return data
+
+    @staticmethod
+    def decode_velocity(pd0_bytes, offset, data):
+        """Decodes velocity data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        velocity_data:dict
+            Dictionary of decoded data
+        """
+
+        # Define format
+        velocity_format = (('id', '<h', 0),)
+
+        # Unpack data
+        velocity_data = Pd0TRDI.unpack_bytes(pd0_bytes, velocity_format, offset)
+        # Move past id field
+        offset += 2
+        # Arrange data in list of depth cells and beams or velocity components
+        velocity_data['data'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes,
+                                                                 offset,
+                                                                 data['fixed_leader']['number_of_cells'],
+                                                                 data['fixed_leader']['number_of_beams'],
+                                                                 '<h')
+
+        return velocity_data
+
+    @staticmethod
+    def decode_correlation(pd0_bytes, offset, data):
+        """Decodes correlation data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        correlation_data:dict
+            Dictionary of decoded data
+        """
+
+        correlation_format = (('id', '<H', 0),)
+        # Unpack data
+        correlation_data = Pd0TRDI.unpack_bytes(pd0_bytes, correlation_format, offset)
+        # Move past id field
+        offset += 2
+        # Arrange data in list of depth cells and beams
+        correlation_data['data'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes,
+                                                                    offset,
+                                                                    data['fixed_leader']['number_of_cells'],
+                                                                    data['fixed_leader']['number_of_beams'],
+                                                                    'B')
+
+        return correlation_data
+
+    @staticmethod
+    def decode_echo_intensity(pd0_bytes, offset, data):
+        """Decodes echo intensity data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        echo_intensity_data:dict
+            Dictionary of decoded data
+        """
+
+        echo_intensity_format = (('id', '<H', 0),)
+        # Unpack data
+        echo_intensity_data = Pd0TRDI.unpack_bytes(pd0_bytes, echo_intensity_format, offset)
+        # Move past id field
+        offset += 2
+        # Arrange data in list of depth cells and beams
+        echo_intensity_data['data'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes,
+                                                                       offset,
+                                                                       data['fixed_leader']['number_of_cells'],
+                                                                       data['fixed_leader']['number_of_beams'],
+                                                                       'B')
+
+        return echo_intensity_data
+
+    @staticmethod
+    def decode_percent_good(pd0_bytes, offset, data):
+        """Decodes percent good data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        percent_good_data:dict
+            Dictionary of decoded data
+        """
+
+        percent_good_format = (('id', '<H', 0),)
+        # Unpack data
+        percent_good_data = Pd0TRDI.unpack_bytes(pd0_bytes, percent_good_format, offset)
+        # Move past id field
+        offset += 2
+        # Arrange data in list of depth cells and beams
+        percent_good_data['data'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes,
+                                                                     offset,
+                                                                     data['fixed_leader']['number_of_cells'],
+                                                                     data['fixed_leader']['number_of_beams'],
+                                                                     'B')
+
+        return percent_good_data
+
+    @staticmethod
+    def decode_status(pd0_bytes, offset, data):
+        """Decodes percent good data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        status_data:dict
+            Dictionary of decoded data
+        """
+
+        status_format = (('id', '<H', 0),)
+        # Unpack data
+        status_data = Pd0TRDI.unpack_bytes(pd0_bytes, status_format, offset)
+        # Move past id field
+        offset += 2
+        # Arrange data in list of depth cells and beams
+        status_data['data'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes,
+                                                               offset,
+                                                               data['fixed_leader']['number_of_cells'],
+                                                               data['fixed_leader']['number_of_beams'],
+                                                               'B')
+
+        return status_data
+
+    @staticmethod
+    def decode_bottom_track(pd0_bytes, offset, data):
+        """Decodes bottom track data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data (not used)
+
+        Returns
+        -------
+        bottom_track_data:dict
+            Dictionary of decoded data
+        """
+        bottom_track_format = (('id', '<H', 0),
+                               ('pings_per_ensemble_bp', '<H', 2),
+                               ('delay_before_reaquire', '<H', 4),
+                               ('correlation_magnitude_minimum_bc', 'B', 6),
+                               ('evaluation_amplitude_minimum_ba', 'B', 7),
+                               ('percent_good_minimum_bg', 'B', 8),
+                               ('bottom_track_mode_bm', 'B', 9),
+                               ('error_velocity_maximum_be', '<H', 10))
+
+        bottom_track_data = Pd0TRDI.unpack_bytes(pd0_bytes, bottom_track_format, offset)
+        bottom_track_data['range_lsb'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 16, 1, 4, '<H')
+        bottom_track_data['velocity'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 24, 1, 4, '<h')
+        bottom_track_data['correlation'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 32, 1, 4, 'B')
+        bottom_track_data['amplitude'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 36, 1, 4, 'B')
+        bottom_track_data['percent_good'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 40, 1, 4, 'B')
+        bottom_track_data['rssi'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 72, 1, 4, 'B')
+        bottom_track_data['range_msb'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 77, 1, 4, 'B')
+
+        return bottom_track_data
+
+    def decode_nmea(self, pd0_bytes, offset, data):
+        """Decodes nmea data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        nmea_data:dict
+            Dictionary of decoded data
+        """
+        nmea_leader_format = (('id', '<H', 0),
+                              ('msg_id', '<H', 2),
+                              ('msg_size', '<H', 4),
+                              ('delta_time', 'd', 6))
+
+        nmea_data = Pd0TRDI.unpack_bytes(pd0_bytes, nmea_leader_format, offset)
+        if nmea_data['msg_id'] in self.nmea_decoders:
+            key = self.nmea_decoders[nmea_data['msg_id']][0]
+            decoder = self.nmea_decoders[nmea_data['msg_id']][1]
+            if key in data:
+                data[key].append(decoder(pd0_bytes, offset + 14, nmea_data))
+            else:
+                data[key] = [decoder(pd0_bytes, offset + 14, nmea_data)]
+        return nmea_data
+
+    @staticmethod
+    def decode_gga_100(pd0_bytes, offset, data):
+        """Decodes gga data for WinRiver versions prior to 2.00
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        # Define format
+        format = (('header', '10s', 0),
+                  ('utc', '10s', 10),
+                  ('lat_deg', 'd', 20),
+                  ('lat_ref', 'c', 28),
+                  ('lon_deg', 'd', 29),
+                  ('lon_ref', 'c', 37),
+                  ('corr_qual', 'B', 38),
+                  ('num_sats', 'B', 39),
+                  ('hdop', 'f', 40),
+                  ('alt', 'f', 44),
+                  ('alt_unit', 'c', 48),
+                  ('geoid', 'f', 49),
+                  ('geoid_unit', 'c', 53),
+                  ('d_gps_age', 'f', 54),
+                  ('ref_stat_id', '<H', 58))
+
+        # Decode data
+        decoded_data = Pd0TRDI.unpack_bytes(pd0_bytes, format, offset)
+        decoded_data['header'] = Pd0TRDI.bin2str(decoded_data['header']).rstrip('\x00')
+        try:
+            decoded_data['utc'] = float(re.findall(b'^\d+\.\d+|\d+', decoded_data['utc'])[0])
+        except BaseException:
+            decoded_data['utc'] = np.nan
+        decoded_data['lat_ref'] = Pd0TRDI.bin2str(decoded_data['lat_ref'])
+        decoded_data['lon_ref'] = Pd0TRDI.bin2str(decoded_data['lon_ref'])
+        decoded_data['geoid_unit'] = Pd0TRDI.bin2str(decoded_data['geoid_unit'])
+        decoded_data['alt_unit'] = Pd0TRDI.bin2str(decoded_data['alt_unit'])
+        decoded_data['delta_time'] = data['delta_time']
+
+        return decoded_data
+
+    @staticmethod
+    def decode_vtg_101(pd0_bytes, offset, data):
+        """Decodes vtg data for WinRiver versions prior to 2.00
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        # Define format
+        format = (('header', '10s', 0),
+                  ('course_true', 'f', 10),
+                  ('true_indicator', 'c', 14),
+                  ('course_mag', 'f', 15),
+                  ('mag_indicator', 'c', 19),
+                  ('speed_knots', 'f', 20),
+                  ('knots_indicator', 'c', 24),
+                  ('speed_kph', 'f', 25),
+                  ('kph_indicator', 'c', 29),
+                  ('mode_indicator', 'c', 30))
+
+        # Decode data
+        decoded_data = Pd0TRDI.unpack_bytes(pd0_bytes, format, offset)
+        decoded_data['header'] = Pd0TRDI.bin2str(decoded_data['header']).rstrip('\x00')
+        decoded_data['true_indicator'] = Pd0TRDI.bin2str(decoded_data['true_indicator'])
+        decoded_data['mag_indicator'] = Pd0TRDI.bin2str(decoded_data['mag_indicator'])
+        decoded_data['knots_indicator'] = Pd0TRDI.bin2str(decoded_data['knots_indicator'])
+        decoded_data['kph_indicator'] = Pd0TRDI.bin2str(decoded_data['kph_indicator'])
+        decoded_data['delta_time'] = data['delta_time']
+
+        return decoded_data
+
+    @staticmethod
+    def decode_ds_102(pd0_bytes, offset, data):
+        """Decodes depth sounder for WinRiver versions prior to 2.00
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        # Define format
+        format = (('header', '10s', 0),
+                  ('depth_ft', 'f', 10),
+                  ('ft_indicator', 'c', 14),
+                  ('depth_m', 'f', 15),
+                  ('m_indicator', 'c', 19),
+                  ('depth_fath', 'f', 20),
+                  ('fath_indicator', 'c', 24))
+
+        # Decode data
+        decoded_data = Pd0TRDI.unpack_bytes(pd0_bytes, format, offset)
+        decoded_data['header'] = Pd0TRDI.bin2str(decoded_data['header']).rstrip('\x00')
+        decoded_data['ft_indicator'] = Pd0TRDI.bin2str(decoded_data['ft_indicator'])
+        decoded_data['m_indicator'] = Pd0TRDI.bin2str(decoded_data['m_indicator'])
+        decoded_data['fath_indicator'] = Pd0TRDI.bin2str(decoded_data['fath_indicator'])
+        decoded_data['delta_time'] = data['delta_time']
+
+        return decoded_data
+
+    @staticmethod
+    def decode_ext_heading_103(pd0_bytes, offset, data):
+        """Decodes external heading for WinRiver versions prior to 2.00
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        # Define format
+        format = (('header', '10s', 0),
+                  ('heading_deg', 'd', 10),
+                  ('h_true_indicator', 'c', 14))
+
+        # Decode data
+        decoded_data = Pd0TRDI.unpack_bytes(pd0_bytes, format, offset)
+        decoded_data['header'] = Pd0TRDI.bin2str(decoded_data['header']).rstrip('\x00')
+        decoded_data['h_true_indicator'] = Pd0TRDI.bin2str(decoded_data['h_true_indicator'])
+        decoded_data['delta_time'] = data['delta_time']
+
+        return decoded_data
+
+    @staticmethod
+    def decode_gga_104(pd0_bytes, offset, data):
+        """Decodes gga data for WinRiver 2.00 and greater with ADCP's without integrated NMEA data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        # Define format
+        format = (('header', '7s', 0),
+                  ('utc', '10s', 7),
+                  ('lat_deg', 'd', 17),
+                  ('lat_ref', 'c', 25),
+                  ('lon_deg', 'd', 26),
+                  ('lon_ref', 'c', 34),
+                  ('corr_qual', 'B', 35),
+                  ('num_sats', 'B', 36),
+                  ('hdop', 'f', 37),
+                  ('alt', 'f', 41),
+                  ('alt_unit', 'c', 45),
+                  ('geoid', 'f', 46),
+                  ('geoid_unit', 'c', 50),
+                  ('d_gps_age', 'f', 51),
+                  ('ref_stat_id', '<H', 55))
+
+        # Decode data
+        decoded_data = Pd0TRDI.unpack_bytes(pd0_bytes, format, offset)
+        decoded_data['header'] = Pd0TRDI.bin2str(decoded_data['header']).rstrip('\x00')
+        try:
+            decoded_data['utc'] = float(re.findall(b'^\d+\.\d+|\d+', decoded_data['utc'])[0])
+        except BaseException:
+            decoded_data['utc'] = np.nan
+        decoded_data['lat_ref'] = Pd0TRDI.bin2str(decoded_data['lat_ref'])
+        decoded_data['lon_ref'] = Pd0TRDI.bin2str(decoded_data['lon_ref'])
+        decoded_data['geoid_unit'] = Pd0TRDI.bin2str(decoded_data['geoid_unit'])
+        decoded_data['alt_unit'] = Pd0TRDI.bin2str(decoded_data['alt_unit'])
+        decoded_data['delta_time'] = data['delta_time']
+
+        return decoded_data
+
+    @staticmethod
+    def decode_vtg_105(pd0_bytes, offset, data):
+        """Decodes vtg data for WinRiver 2.00 and greater with ADCP's without integrated NMEA data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        # Define format
+        format = (('header', '7s', 0),
+                  ('course_true', 'f', 7),
+                  ('true_indicator', 'c', 11),
+                  ('course_mag', 'f', 12),
+                  ('mag_indicator', 'c', 16),
+                  ('speed_knots', 'f', 17),
+                  ('knots_indicator', 'c', 21),
+                  ('speed_kph', 'f', 22),
+                  ('kph_indicator', 'c', 26),
+                  ('mode_indicator', 'c', 27))
+
+        # Decode data
+        decoded_data = Pd0TRDI.unpack_bytes(pd0_bytes, format, offset)
+        decoded_data['header'] = Pd0TRDI.bin2str(decoded_data['header']).rstrip('\x00')
+        decoded_data['true_indicator'] = Pd0TRDI.bin2str(decoded_data['true_indicator'])
+        decoded_data['mag_indicator'] = Pd0TRDI.bin2str(decoded_data['mag_indicator'])
+        decoded_data['knots_indicator'] = Pd0TRDI.bin2str(decoded_data['knots_indicator'])
+        decoded_data['kph_indicator'] = Pd0TRDI.bin2str(decoded_data['kph_indicator'])
+        decoded_data['delta_time'] = data['delta_time']
+
+        return decoded_data
+
+    @staticmethod
+    def decode_ds_106(pd0_bytes, offset, data):
+        """Decodes depth sounder for WinRiver 2.00 and greater with ADCP's without integrated NMEA data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        # Define format
+        format = (('header', '7s', 0),
+                  ('depth_ft', 'f', 7),
+                  ('ft_indicator', 'c', 11),
+                  ('depth_m', 'f', 12),
+                  ('m_indicator', 'c', 16),
+                  ('depth_fath', 'f', 17),
+                  ('fath_indicator', 'c', 21))
+
+        # Decode data
+        decoded_data = Pd0TRDI.unpack_bytes(pd0_bytes, format, offset)
+        decoded_data['header'] = Pd0TRDI.bin2str(decoded_data['header']).rstrip('\x00')
+        decoded_data['ft_indicator'] = Pd0TRDI.bin2str(decoded_data['ft_indicator'])
+        decoded_data['m_indicator'] = Pd0TRDI.bin2str(decoded_data['m_indicator'])
+        decoded_data['fath_indicator'] = Pd0TRDI.bin2str(decoded_data['fath_indicator'])
+        decoded_data['delta_time'] = data['delta_time']
+
+        return decoded_data
+
+    @staticmethod
+    def decode_ext_heading_107(pd0_bytes, offset, data):
+        """Decodes external heading for WinRiver 2.00 and greater with ADCP's without integrated NMEA data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        # Define format
+        format = (('header', '7s', 0),
+                  ('heading_deg', 'd', 7),
+                  ('h_true_indicator', 'c', 15))
+
+        # Decode data
+        decoded_data = Pd0TRDI.unpack_bytes(pd0_bytes, format, offset)
+        decoded_data['header'] = Pd0TRDI.bin2str(decoded_data['header']).rstrip('\x00')
+        if abs(decoded_data['heading_deg']) < 360:
+            try:
+                decoded_data['h_true_indicator'] = Pd0TRDI.bin2str(decoded_data['h_true_indicator'])
+            except:
+                decoded_data['h_true_indicator'] = ''
+        else:
+            decoded_data['heading_deg'] = np.nan
+            decoded_data['h_true_indicator'] = ''
+        decoded_data['delta_time'] = data['delta_time']
+
+        return decoded_data
+
+    @staticmethod
+    def decode_gga_204(pd0_bytes, offset, data):
+        """Decodes gga data for ADCP's with integrated NMEA data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        # Initialize dictionary
+        decoded_data = {}
+        decoded_data['header'] = ''
+        decoded_data['utc'] = np.nan
+        decoded_data['lat_deg'] = np.nan
+        decoded_data['lat_ref'] = ''
+        decoded_data['lon_deg'] = np.nan
+        decoded_data['lon_ref'] = ''
+        decoded_data['corr_qual'] = np.nan
+        decoded_data['num_sats'] = np.nan
+        decoded_data['hdop'] = np.nan
+        decoded_data['alt'] = np.nan
+        decoded_data['alt_unit'] = ''
+        decoded_data['geoid'] = ''
+        decoded_data['geoid_unit'] = ''
+        decoded_data['d_gps_age'] = np.nan
+        decoded_data['ref_stat_id'] = np.nan
+        decoded_data['delta_time'] = np.nan
+
+        # Decode NMEA sentence and split into an array
+        format = str(data['msg_size']) + 'c'
+        sentence = Pd0TRDI.bin2str(b''.join(list(struct.unpack(format, pd0_bytes[offset: offset + data['msg_size']]))))
+        temp_array = np.array(sentence.split(','))
+        temp_array[temp_array == '999.9'] = ''
+
+        # Assign parts of array to dictionary
+        try:
+            decoded_data['delta_time'] = data['delta_time']
+            decoded_data['header'] = temp_array[0]
+            decoded_data['utc'] = valid_number(temp_array[1])
+            lat_str = temp_array[2]
+            lat_deg = valid_number(lat_str[0:2])
+            decoded_data['lat_deg'] = lat_deg + valid_number(lat_str[2:]) / 60
+            decoded_data['lat_ref'] = temp_array[3]
+            lon_str = temp_array[4]
+            lon_num = valid_number(lon_str)
+            lon_deg = np.floor(lon_num / 100.)
+            decoded_data['lon_deg'] = lon_deg + (((lon_num / 100.) - lon_deg) * 100.) / 60.
+            decoded_data['lon_ref'] = temp_array[5]
+            decoded_data['corr_qual'] = valid_number(temp_array[6])
+            decoded_data['num_sats'] = valid_number(temp_array[7])
+            decoded_data['hdop'] = valid_number(temp_array[8])
+            decoded_data['alt'] = valid_number(temp_array[9])
+            decoded_data['alt_unit'] = temp_array[10]
+            decoded_data['geoid'] = temp_array[11]
+            decoded_data['geoid_unit'] = temp_array[12]
+            decoded_data['d_gps_age'] = valid_number(temp_array[13])
+            idx_star = temp_array[14].find('*')
+            decoded_data['ref_stat_id'] = valid_number(temp_array[15][:idx_star])
+
+        except (ValueError, EOFError, IndexError):
+            pass
+
+        return decoded_data
+
+    @staticmethod
+    def decode_vtg_205(pd0_bytes, offset, data):
+        """Decodes vtg data for ADCP's with integrated NMEA data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        # Initialize dictionary
+        decoded_data = {}
+        decoded_data['header'] = ''
+        decoded_data['course_true'] = np.nan
+        decoded_data['true_indicator'] = ''
+        decoded_data['course_mag'] = np.nan
+        decoded_data['mag_indicator'] = ''
+        decoded_data['speed_knots'] = np.nan
+        decoded_data['knots_indicator'] = ''
+        decoded_data['speed_kph'] = np.nan
+        decoded_data['kph_indicator'] = ''
+        decoded_data['mode_indicator'] = ''
+        decoded_data['delta_time'] = np.nan
+
+        # Decode NMEA sentence and split into an array
+        format = str(data['msg_size']) + 'c'
+        sentence = Pd0TRDI.bin2str(b''.join(list(struct.unpack(format, pd0_bytes[offset: offset + data['msg_size']]))))
+        temp_array = np.array(sentence.split(','))
+        temp_array[temp_array == '999.9'] = ''
+
+        # Assign parts of array to dictionary
+        try:
+            decoded_data['vtg_header'] = temp_array[0]
+            decoded_data['course_true'] = valid_number(temp_array[1])
+            decoded_data['true_indicator'] = temp_array[2]
+            decoded_data['course_mag'] = valid_number(temp_array[3])
+            decoded_data['mag_indicator'] = temp_array[4]
+            decoded_data['speed_knots'] = valid_number(temp_array[5])
+            decoded_data['knots_indicator'] = temp_array[6]
+            decoded_data['speed_kph'] = valid_number(temp_array[7])
+            decoded_data['kph_indicator'] = temp_array[8]
+            idx_star = temp_array[9].find('*')
+            decoded_data['mode_indicator'] = temp_array[9][:idx_star]
+            decoded_data['delta_time'] = data['delta_time']
+
+        except (ValueError, EOFError, IndexError):
+            pass
+
+        return decoded_data
+
+    @staticmethod
+    def decode_ds_206(pd0_bytes, offset, data):
+        """Decodes depth sounder for ADCP's with integrated NMEA data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        # Initialize dictionary
+        decoded_data = {}
+        decoded_data['header'] = ''
+        decoded_data['depth_ft'] = np.nan
+        decoded_data['ft_indicator'] = ''
+        decoded_data['depth_m'] = np.nan
+        decoded_data['m_indicator'] = ''
+        decoded_data['depth_fath'] = np.nan
+        decoded_data['fath_indicator'] = ''
+        decoded_data['delta_time'] = np.nan
+
+        # Decode NMEA sentence and split into an array
+        format = str(data['msg_size']) + 'c'
+        sentence = Pd0TRDI.bin2str(b''.join(list(struct.unpack(format, pd0_bytes[offset: offset + data['msg_size']]))))
+        temp_array = np.array(sentence.split(','))
+        temp_array[temp_array == '999.9'] = ''
+
+        # Assign parts of array to dictionary
+        try:
+            decoded_data['dbt_header'] = temp_array[0]
+            decoded_data['depth_ft'] = valid_number(temp_array[1])
+            decoded_data['ft_indicator'] = temp_array[2]
+            decoded_data['depth_m'] = valid_number(temp_array[3])
+            decoded_data['m_indicator'] = temp_array[4]
+            decoded_data['depth_fath'] = valid_number(temp_array[5])
+            idx_star = temp_array[6].find('*')
+            decoded_data['fath_indicator'] = temp_array[6][:idx_star]
+            decoded_data['delta_time'] = data['delta_time']
+
+        except (ValueError, EOFError, IndexError):
+            pass
+
+        return decoded_data
+
+    @staticmethod
+    def decode_ext_heading_207(pd0_bytes, offset, data):
+        """Decodes external heading for ADCP's with integrated NMEA data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        # Initialize dictionary
+        decoded_data = {}
+        decoded_data['header'] = ''
+        decoded_data['heading_deg'] = np.nan
+        decoded_data['h_true_indicator'] = ''
+        decoded_data['delta_time'] = np.nan
+
+        # Decode NMEA sentence and split into an array
+        format = str(data['msg_size']) + 'c'
+        sentence = Pd0TRDI.bin2str(b''.join(list(struct.unpack(format, pd0_bytes[offset: offset + data['msg_size']]))))
+        temp_array = np.array(sentence.split(','))
+        temp_array[temp_array == '999.9'] = ''
+
+        # Assign parts of array to dictionary
+        try:
+            decoded_data['header'] = temp_array[0]
+            decoded_data['heading_deg'] = valid_number(temp_array[1])
+            idx_star = temp_array[2].find('*')
+            decoded_data['h_true_indicator'] = temp_array[2][:idx_star]
+            decoded_data['delta_time'] = data['delta_time']
+
+        except (ValueError, EOFError, IndexError):
+            pass
+
+        return decoded_data
+
+    @staticmethod
+    def decode_dbt_sentence(pd0_bytes, offset, data):
+        """Stores dbt sentence
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        return Pd0TRDI.decode_nmea_sentence(pd0_bytes, offset, data, 'dbt_sentence')
+
+    @staticmethod
+    def decode_gga_sentence(pd0_bytes, offset, data):
+        """Stores dbt sentence
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        return Pd0TRDI.decode_nmea_sentence(pd0_bytes, offset, data, 'gga_sentence')
+
+    @staticmethod
+    def decode_vtg_sentence(pd0_bytes, offset, data):
+        """Stores dbt sentence
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        return Pd0TRDI.decode_nmea_sentence(pd0_bytes, offset, data, 'vtg_sentence')
+
+    @staticmethod
+    def decode_gsa_sentence(pd0_bytes, offset, data):
+        """Stores dbt sentence
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        return Pd0TRDI.decode_nmea_sentence(pd0_bytes, offset, data, 'gsa_sentence')
+
+    @staticmethod
+    def decode_nmea_sentence(pd0_bytes, offset, data, target):
+        """Decodes nmea sentence
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+        target: str
+            Dictionary key for decoded data in data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        # Compute number of characters in the sentence
+        offset_idx = data['header']['address_offsets'].index(offset)
+
+        if offset_idx + 1 == data['header']['number_of_data_types']:
+            end_offset = data['header']['number_of_bytes']
+        else:
+            end_offset = data['header']['address_offsets'][offset_idx + 1]
+        number_of_characters = end_offset - data['header']['address_offsets'][offset_idx]
+
+        # Generate format string
+        format_str = str(number_of_characters - 4) + 'c'
+        format = (('sentence', format_str, 0))
+        offset = data['header']['address_offsets'][offset_idx]
+        # Decode data
+        sentence = struct.unpack(format_str, pd0_bytes[offset + 4: offset + number_of_characters ])
+        try:
+            end_of_sentence = sentence.index(b'\n') + 1
+            sentence = b''.join(sentence[0:end_of_sentence]).decode('utf-8')
+        except ValueError:
+            sentence = ''
+        # Create or add to list of target sentences
+        if target in data:
+            decoded_data = data[target]
+            decoded_data.append(sentence)
+        else:
+            decoded_data = sentence
+
+        return decoded_data
+
+    @staticmethod
+    def decode_surface_leader(pd0_bytes, offset, data):
+        """Decodes surface velocity leader
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        surface_leader_data:dict
+            Dictionary of decoded data
+        """
+        surface_leader_format = (('id', '<H', 0),
+                                 ('cell_count', 'B', 2),
+                                 ('cell_size', '<H', 3),
+                                 ('range_cell_1', '<H', 5))
+
+        surface_leader_data = Pd0TRDI.unpack_bytes(pd0_bytes, surface_leader_format, offset)
+        return surface_leader_data
+
+    @staticmethod
+    def decode_surface_velocity(pd0_bytes, offset, data):
+        """Decodes surface velocity data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        surface_velocity_data:dict
+            Dictionary of decoded data
+        """
+        surface_velocity_format = (('id', '<H', 0),)
+
+        surface_velocity_data = Pd0TRDI.unpack_bytes(pd0_bytes, surface_velocity_format, offset)
+        surface_velocity_data['velocity'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 2,
+                                                                             data['surface_leader']['cell_count'],
+                                                                             4, '<h')
+        return surface_velocity_data
+
+    @staticmethod
+    def decode_surface_correlation(pd0_bytes, offset, data):
+        """Decodes surface correlation data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        surface_velocity_data:dict
+            Dictionary of decoded data
+        """
+        surface_correlation_format = (('id', '<H', 0),)
+
+        surface_correlation_data = Pd0TRDI.unpack_bytes(pd0_bytes, surface_correlation_format, offset)
+        surface_correlation_data['correlation'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 2,
+                                                                                   data['surface_leader']['cell_count'],
+                                                                                   4, 'B')
+        return surface_correlation_data
+
+    @staticmethod
+    def decode_surface_intensity(pd0_bytes, offset, data):
+        """Decodes surface intensity data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        surface_rssi_data:dict
+            Dictionary of decoded data
+        """
+        surface_rssi_format = (('id', '<H', 0),)
+
+        surface_rssi_data = Pd0TRDI.unpack_bytes(pd0_bytes, surface_rssi_format, offset)
+        surface_rssi_data['rssi'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 2,
+                                                                     data['surface_leader']['cell_count'],
+                                                                     4, 'B')
+        return surface_rssi_data
+
+    @staticmethod
+    def decode_surface_percent_good(pd0_bytes, offset, data):
+        """Decodes surface percent good data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        surface_per_good_data:dict
+            Dictionary of decoded data
+        """
+        surface_per_good_format = (('id', '<H', 0),)
+
+        surface_per_good_data = Pd0TRDI.unpack_bytes(pd0_bytes, surface_per_good_format, offset)
+        surface_per_good_data['percent_good'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 2,
+                                                                                 data['surface_leader']['cell_count'],
+                                                                                 4, 'B')
+        return surface_per_good_data
+
+    @staticmethod
+    def decode_surface_status(pd0_bytes, offset, data):
+        """Decodes surface percent good data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        surface_statusdata:dict
+            Dictionary of decoded data
+        """
+        surface_status_format = (('id', '<H', 0),)
+
+        surface_status_data = Pd0TRDI.unpack_bytes(pd0_bytes, surface_status_format, offset)
+        surface_status_data['percent_good'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 2,
+                                                                               data['surface_leader']['cell_count'],
+                                                                               4, 'B')
+        return surface_status_data
+
+    @staticmethod
+    def decode_auto_config(pd0_bytes, offset, data):
+        """Decodes auto configuration data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary of previously decoded data
+
+        Returns
+        -------
+        auto_config_data:dict
+            Dictionary of decoded data
+        """
+        auto_config_leader_format = (('id', '<H', 0), ('beam_count', 'B', 2))
+        auto_config_beam_format = (('setup', 'B', 0),
+                                   ('depth', '<H', 1),
+                                   ('ping_count', 'B', 3),
+                                   ('ping_type', 'B', 4),
+                                   ('cell_count', '<H', 5),
+                                   ('cell_size', '<H', 7),
+                                   ('bin_1_mid', '<H', 9),
+                                   ('code_reps', 'B', 11),
+                                   ('transmit_length', '<H', 12),
+                                   ('lag_length', '<H', 15),
+                                   ('transmit_bandwidth', 'B', 16),
+                                   ('receive_bandwidth', 'B', 17),
+                                   ('min_ping_interval', '<H', 18))
+        auto_config_data = {}
+        auto_config_data['leader'] = Pd0TRDI.unpack_bytes(pd0_bytes, auto_config_leader_format, offset)
+
+        for n in range(1, auto_config_data['leader']['beam_count'] + 1):
+            label = 'beam_' + str(n)
+            beam_offset = offset + 3 + (20 * (n - 1))
+            auto_config_data[label] = Pd0TRDI.unpack_bytes(pd0_bytes, auto_config_beam_format, beam_offset)
+
+        return auto_config_data
+
+    @staticmethod
+    def decode_vertical_beam(pd0_bytes, offset, data):
+        """Decodes vertical beam data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing fixed leader data
+
+        Returns
+        -------
+        vertical_beam_data:dict
+            Dictionary of decoded data
+        """
+        vertical_beam_format = (('id', '<H', 0),
+                                ('eval_amp', 'B', 2),
+                                ('rssi', 'B', 3),
+                                ('range', 'L', 4),
+                                ('status', 'B', 8))
+
+        vertical_beam_data = Pd0TRDI.unpack_bytes(pd0_bytes, vertical_beam_format, offset)
+        return vertical_beam_data
+
+    @staticmethod
+    def decode_transformation_matrix(pd0_bytes, offset, data):
+        """Decodes transformation matrix
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing fixed leader data
+
+        Returns
+        -------
+        matrix_data:dict
+            Dictionary of decoded data
+        """
+        matrix_id_format = (('id', '<H', 0),)
+        matrix_data_format = (('element', '<h', 0),)
+
+        matrix_data = Pd0TRDI.unpack_bytes(pd0_bytes, matrix_id_format, offset)
+        matrix = []
+        for row in range(4):
+            row_list = []
+            for col in range(4):
+                offset = offset + 2
+                # row.append(struct.unpack('<H', pd0_bytes[offset: offset + 2])[0])
+                row_list.append(Pd0TRDI.unpack_bytes(pd0_bytes, matrix_data_format, offset)['element'])
+            matrix.append(row_list)
+        matrix_data['matrix'] = matrix
+
+        return matrix_data
+
+
+class Hdr(object):
+    """Class to hold header variables.
+
+    Attributes
+    ----------
+    bytes_per_ens: int
+        Number of bytes in ensemble
+    data_offsets: int
+        File offset to start of ensemble
+    n_data_types: int
+        Number of data types in ensemble
+    data_ok: int
+
+    invalid: str
+        Leader ID that was not recognized
+    """
+
+    def __init__(self, n_ensembles, n_types):
+        """Initialize instance variables to empty arrays.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        n_types: int
+            Number of data types
+        """
+        self.bytes_per_ens = nans(n_ensembles)
+        self.data_offsets = nans([n_ensembles, n_types])
+        self.n_data_types = nans(n_ensembles)
+        self.data_ok = nans(n_ensembles)
+        self.invalid = [''] * n_ensembles
+
+    def populate_data(self, n_ens, data):
+        """Populates the class with data for an ensemble.
+
+        Parameters
+        ----------
+        i_ens: int
+            Ensemble index
+        data: dict
+            Dictionary of all data for this ensemble
+        """
+
+        if 'header' in data:
+            self.bytes_per_ens[n_ens] = data['header']['number_of_bytes']
+            self.data_offsets[n_ens, :len(data['header']['address_offsets'])] = \
+                np.array(data['header']['address_offsets'])
+            self.n_data_types[n_ens] = data['header']['number_of_data_types']
+            self.invalid[n_ens] = data['header']['invalid']
+
+
+class Inst(object):
+    """Class to hold information about the instrument.
+
+    Attributes
+    ----------
+    beam_ang: np.array(int)
+        Angle of transducers in degrees
+    beams: np.array(int)
+        Number of beams used for velocity
+    data_type: list
+        Data type
+    firm_ver: np.array(str)
+        Firmware version
+    freq: np.array(int)
+        Frequency of ADCP in kHz
+    pat = list
+        Beam pattern
+    res_RDI:
+        Reserved for TRDI
+    sensor_CFG: np.array(int)
+        Sensor configuration
+    xducer: list
+        Indicates if transducer is attached
+    t_matrix: np.array(float)
+        Transformation matrix
+    demod: np.array(int)
+        Demodulation code
+    serial_number: int
+        serial number of ADCP
+    """
+
+    def __init__(self, n_ensembles):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        """
+
+        #TODO change n_ensembles to (ensembles,)
+        self.beam_ang = nans(n_ensembles)
+        self.beams = nans(n_ensembles)
+        self.data_type = [''] * n_ensembles
+        self.firm_ver = nans(n_ensembles)
+        self.freq = nans(n_ensembles)
+        self.pat = [''] * n_ensembles
+        self.res_RDI = 0
+        self.sensor_CFG = nans(n_ensembles)
+        self.xducer = [''] * n_ensembles
+        self.t_matrix = np.tile([np.nan], [4, 4])
+        self.demod = nans(n_ensembles)
+        self.serial_number = np.nan
+
+    def populate_data(self, i_ens, data):
+        """Populates the class with data for an ensemble.
+
+        Parameters
+        ----------
+        i_ens: int
+            Ensemble index
+        data: dict
+            Dictionary of all data for this ensemble
+        """
+
+
+        if 'fixed_leader' in data:
+            self.firm_ver[i_ens] = data['fixed_leader']['cpu_firmware_version'] + \
+                            (data['fixed_leader']['cpu_firmware_revision']  / 100)
+
+            # Convert system_configuration_ls to individual bits
+            bitls = "{0:08b}".format(data['fixed_leader']['system_configuration_ls'])
+            val = int(bitls[5:], 2)
+            if val == 0:
+                self.freq[i_ens] = 75
+            elif val == 1:
+                self.freq[i_ens] = 150
+            elif val == 2:
+                self.freq[i_ens] = 300
+            elif val == 3:
+                self.freq[i_ens] = 600
+            elif val == 4:
+                self.freq[i_ens] = 1200
+            elif val == 5:
+                self.freq[i_ens] = 2400
+            else:
+                self.freq[i_ens] = np.nan
+
+            val = int(bitls[4], 2)
+            if val == 0:
+                self.pat[i_ens] = 'Concave'
+            elif val == 1:
+                self.pat[i_ens] = 'Convex'
+            else:
+                self.pat[i_ens] = 'n/a'
+
+            self.sensor_CFG[i_ens] = int(bitls[2:3], 2) + 1
+
+            val = int(bitls[1], 2)
+            if val == 0:
+                self.xducer[i_ens] = 'Not Attached'
+            elif val == 1:
+                self.xducer[i_ens] = 'Attached'
+            else:
+                self.xducer[i_ens] = 'n/a'
+
+            # Convert system_configuration_ms to individual bits
+            bitms = "{0:08b}".format(data['fixed_leader']['system_configuration_ms'])
+
+            val = int(bitms[6:], 2)
+            if val == 0:
+                self.beam_ang[i_ens] = 15
+            elif val == 1:
+                self.beam_ang[i_ens] = 20
+            elif val == 2:
+                self.beam_ang[i_ens] = 30
+            elif val == 3:
+                self.beam_ang[i_ens] = np.nan
+            else:
+                self.beam_ang[i_ens] = np.nan
+
+            val = int(bitms[:4], 2)
+            if val == 4:
+                self.beams[i_ens] = 4
+            elif val == 5:
+                self.beams[i_ens] = 5
+                self.demod[i_ens] = 1
+            elif val == 15:
+                self.beams[i_ens] = 5
+                self.demod[i_ens] = 2
+            else:
+                self.beams[i_ens] = np.nan
+                self.demod[i_ens] = np.nan
+
+            if data['fixed_leader']['simulation_data_flag'] == 0:
+                self.data_type[i_ens] = 'Real'
+            else:
+                self.data_type[i_ens] = 'Simu'
+
+            self.serial_number = data['fixed_leader']['serial_number']
+
+        if 'transformation_matrix' in data:
+            self.res_RDI = 0
+            # Scale transformation matrix
+            self.t_matrix = np.array(data['transformation_matrix']['matrix']) / 10000
+
+
+class AutoMode(object):
+    """Class to hold auto configuration mode settings for each beam.
+
+    Attributes
+    ----------
+    beam_count: np.array(int)
+        Number of beams
+    Beam1: Beam
+        Object of class Beam
+    Beam2: Beam
+        Object of class Beam
+    Beam3: Beam
+        Object of class Beam
+    Beam4: Beam
+        Object of class Beam
+    Reserved: np.array
+    """
+
+    def __init__(self, n_ensembles):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        """
+        self.beam_count = nans(n_ensembles)
+        self.Beam1 = Beam(n_ensembles)
+        self.Beam2 = Beam(n_ensembles)
+        self.Beam3 = Beam(n_ensembles)
+        self.Beam4 = Beam(n_ensembles)
+        self.Reserved = nans(n_ensembles)
+
+    def populate_data(self, i_ens, data):
+        """Populates the class with data for an ensemble.
+
+        Parameters
+        ----------
+        i_ens: int
+            Ensemble index
+        data: dict
+            Dictionary of all data for this ensemble
+        """
+
+        if 'auto_configuration' in data:
+            self.beam_count[i_ens] = data['auto_configuration']['leader']['beam_count']
+            self.Beam1.populate_data(i_ens, data['auto_configuration']['beam_1'])
+            self.Beam2.populate_data(i_ens, data['auto_configuration']['beam_2'])
+            self.Beam3.populate_data(i_ens, data['auto_configuration']['beam_3'])
+            self.Beam4.populate_data(i_ens, data['auto_configuration']['beam_4'])
+
+
+class Beam(object):
+    """Class to hold auto configuration settings for a beam.
+
+    Attributes
+    ----------
+    mode: np.array(int)
+        Water mode
+    depth_cm: np.array(int)
+        Depth in cm
+    ping_count: np.array(int)
+        Number of pings
+    ping_type: np.array(int)
+        Type of pings
+    cell_count: np.array(int)
+        Number of cells
+    cell_size_cm: np.array(int)
+        Cell size in cm
+    cell_mid_cm: np.array(int)
+        Distance to center of cell 1 in cm
+    code_repeat: np.array(int)
+        Number of code repeats
+    trans_length_cm: np.array(int)
+        Transmit length in cm
+    lag_length_cm: np.array(int)
+        Lag length in cm
+    transmit_bw: np.array(int)
+        Transmit bandwidth
+    receive_bw: np.array(int)
+        Receive bandwidth
+    ping_interval_ms: np.array(int)
+        Time between pings in ms
+    """
+
+    def __init__(self, n_ensembles):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        """
+
+        self.mode = nans(n_ensembles)
+        self.depth_cm = nans(n_ensembles)
+        self.ping_count = nans(n_ensembles)
+        self.ping_type = nans(n_ensembles)
+        self.cell_count = nans(n_ensembles)
+        self.cell_size_cm = nans(n_ensembles)
+        self.cell_mid_cm = nans(n_ensembles)
+        self.code_repeat = nans(n_ensembles)
+        self.trans_length_cm = nans(n_ensembles)
+        self.lag_length_cm = nans(n_ensembles)
+        self.transmit_bw = nans(n_ensembles)
+        self.receive_bw = nans(n_ensembles)
+        self.ping_interval_ms = nans(n_ensembles)
+
+    def populate_data(self, i_ens, beam_data):
+        """Populates the class with data for an ensemble.
+
+        Parameters
+        ----------
+        i_ens: int
+            Ensemble index
+        data: dict
+            Dictionary of all data for this ensemble
+        """
+
+        self.mode = beam_data['setup']
+        self.depth_cm = beam_data['depth']
+        self.ping_count = beam_data['ping_count']
+        self.ping_type = beam_data['ping_type']
+        self.cell_count = beam_data['cell_count']
+        self.cell_size_cm = beam_data['cell_size']
+        self.cell_mid_cm = beam_data['bin_1_mid']
+        self.code_repeat = beam_data['code_reps']
+        self.trans_length_cm = beam_data['transmit_length']
+        self.lag_length_cm = beam_data['lag_length']
+        self.transmit_bw = beam_data['transmit_bandwidth']
+        self.receive_bw = beam_data['receive_bandwidth']
+        self.ping_interval_ms = beam_data['min_ping_interval']
+
+
+class Bt(object):
+    """Class to hold bottom track data.
+
+    Attributes
+    ----------
+    corr: np.array(int)
+        Correlation for each beam
+    depth_m: np.array(float)
+        Depth for each beam
+    eval_amp: np.array(int)
+        Return amplitude for each beam
+    ext_depth_cm: np.array(int)
+        External depth in cm
+    pergd: np.array(int)
+        Percent good
+    rssi: np.array(int)
+        Return signal strength indicator in counts for each beam
+    vel_mps: np.array(float)
+        Velocity in m/s, rows depend on coordinate system
+    """
+
+    def __init__(self, n_ensembles, n_velocities):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        n_velocities: int
+            Number of velocity beams
+        """
+
+        self.corr = nans([n_velocities, n_ensembles])
+        self.depth_m = nans([n_velocities, n_ensembles])
+        self.eval_amp = nans([n_velocities, n_ensembles])
+        self.ext_depth_cm = nans(n_ensembles)
+        self.pergd = nans([n_velocities, n_ensembles])
+        self.rssi = nans([n_velocities, n_ensembles])
+        self.vel_mps = nans([n_velocities, n_ensembles])
+
+    def populate_data(self, i_ens, data):
+        """Populates the class with data for an ensemble.
+
+        Parameters
+        ----------
+        i_ens: int
+            Ensemble index
+        data: dict
+            Dictionary of all data for this ensemble
+        """
+
+        if 'bottom_track' in data:
+            # Combine bytes to compute depth
+            self.depth_m[0:4, i_ens] = np.squeeze(np.array(data['bottom_track']['range_lsb']).T) + \
+                                       np.squeeze(np.array(data['bottom_track']['range_msb']).T) * 2e16 / 100
+            self.vel_mps[0:4, i_ens] = np.squeeze(np.array(data['bottom_track']['velocity']).T)
+            self.corr[0:4, i_ens] = np.squeeze(np.array(data['bottom_track']['correlation']).T)
+            self.eval_amp[0:4, i_ens] = np.squeeze(np.array(data['bottom_track']['amplitude']).T)
+            self.pergd[0:4, i_ens] = np.squeeze(np.array(data['bottom_track']['percent_good']).T)
+            self.rssi[0:4, i_ens] = np.squeeze(np.array(data['bottom_track']['rssi']).T)
+
+
+class Cfg(object):
+    """Class to hold configuration settings.
+
+    Attributes
+    ----------
+    ba: np.array(int)
+        Bottom track amplitude threshold
+    bc: np.array(int)
+        Bottom track correlation threshold
+    be_mmps: np.array(int)
+        Bottom track error velocity threshold
+    bg: np.array(int)
+        Bottom track percent good threshold
+    bm: np.array(int)
+        Bottom mode
+    bp: np.array(int)
+        Number of bottom pings
+    bx_dm: np.array(int)
+        Maximum tracking depth in decimeters
+    code_reps: np.array(int)
+        Number of code repetitions
+    coord_sys: np.array(str)
+        Coordinate system
+    cpu_ser_no: np.array(int)
+        CPU serial number
+    cq: np.array(int)
+        Transmit power
+    cx: np.array(int)
+        Low latency trigger
+    dist_bin1_cm: np.array(int)
+        Distance to center of bin 1 from transducer
+    ea_deg: np.array(int)
+        Heading alignment
+    eb_deg: np.array(int)
+        Heading bias
+    sensor_avail: np.array(str)
+        Sensor availability codes
+    ex: np.array(str)
+        Coordinate transformation codes
+    ez: np.array(str)
+        Sensor codes
+    head_src: np.array(str)
+        Heading source
+    lag_cm: np.array(int)
+        Lag
+    map_bins: np.array(str)
+        Bin mapping
+    n_beams: np.array(int)
+        Number of velocity beams
+    pitch_src: np.array(str)
+        Source of pitch data
+    ref_lay_end_cell: np.array(int)
+        Reference layer end
+    ref_lay_str_cell: np.array(int)
+        Reference layer start
+    roll_src: np.array(str)
+        Roll source
+    sal_src: np.array(str)
+        Salinity source
+    wm: np.array(int)
+        Water mode
+    sos_src: np.array(str)
+        Speed of sound source
+    temp_src: np.array(str)
+        Temperature source
+    tp_sec: np.array(int)
+        Time between pings
+    use_3beam: np.array(str)
+        Setting on whether to use 3-beam solutions or not
+    use_pr =: np.array(str)
+        Setting to use pitch and roll or not
+    wa: np.array(int)
+        Water track amplitude threshold
+    wb: np.array(int)
+        Water track bandwidth control
+    wc: np.array(int)
+        Water track correlation threshold
+    we_mmps: np.array(int)
+        Water track error velocity threshold
+    wf_cm: np.array(int)
+        Blank after transmit
+    wg_per: np.array(int)
+        Water track percent good threshold
+    wj: np.array(int)
+        Receiver gain setting
+    wn: np.array(int)
+        Number of depth cells (bins)
+    wp: np.array(int)
+        Number of water pings
+    ws_cm: np.array(int)
+        Bin size
+    xdcr_dep_srs: np.array(str)
+        Salinity source
+    xmit_pulse_cm: np.array(int)
+        Transmit pulse length
+    lag_near_bottom: np.array(int)
+        Lag near bottom setting
+    """
+
+    def __init__(self, n_ensembles):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        """
+
+        self.ba = nans(n_ensembles)
+        self.bc = nans(n_ensembles)
+        self.be_mmps = nans(n_ensembles)
+        self.bg = nans(n_ensembles)
+        self.bm = nans(n_ensembles)
+        self.bp = nans(n_ensembles)
+        self.bx_dm = nans(n_ensembles)
+        self.code_reps = nans(n_ensembles)
+        self.coord_sys = [''] * n_ensembles
+        self.cpu_ser_no = nans([n_ensembles, 8])
+        self.cq = nans(n_ensembles)
+        self.cx = nans(n_ensembles)
+        self.dist_bin1_cm = nans(n_ensembles)
+        self.ea_deg = nans(n_ensembles)
+        self.eb_deg = nans(n_ensembles)
+        self.sensor_avail = [''] * n_ensembles
+        self.ex = [''] * n_ensembles
+        self.ez = [''] * n_ensembles
+        self.head_src = [''] * n_ensembles
+        self.lag_cm = nans(n_ensembles)
+        self.map_bins = [''] * n_ensembles
+        self.n_beams = nans(n_ensembles)
+        self.pitch_src = [''] * n_ensembles
+        self.ref_lay_end_cell = nans(n_ensembles)
+        self.ref_lay_str_cell = nans(n_ensembles)
+        self.roll_src = [''] * n_ensembles
+        self.sal_src = [''] * n_ensembles
+        self.wm = nans(n_ensembles)
+        self.sos_src = [''] * n_ensembles
+        self.temp_src = [''] * n_ensembles
+        self.tp_sec = nans(n_ensembles)
+        self.use_3beam = [''] * n_ensembles
+        self.use_pr = [''] * n_ensembles
+        self.wa = nans(n_ensembles)
+        self.wb = nans(n_ensembles)
+        self.wc = nans(n_ensembles)
+        self.we_mmps = nans(n_ensembles)
+        self.wf_cm = nans(n_ensembles)
+        self.wg_per = nans(n_ensembles)
+        self.wj = nans(n_ensembles)
+        self.wn = nans(n_ensembles)
+        self.wp = nans(n_ensembles)
+        self.ws_cm = nans(n_ensembles)
+        self.xdcr_dep_srs = [''] * n_ensembles
+        self.xmit_pulse_cm = nans(n_ensembles)
+        self.lag_near_bottom = nans(n_ensembles)
+
+    def populate_data (self, i_ens, data):
+        """Populates the class with data for an ensemble.
+
+        Parameters
+        ----------
+        i_ens: int
+            Ensemble index
+        data: dict
+            Dictionary of all data for this ensemble
+        """
+
+        if 'fixed_leader' in data:
+            self.n_beams[i_ens] = data['fixed_leader']['number_of_beams']
+            self.wn[i_ens] = data['fixed_leader']['number_of_cells']
+            self.wp[i_ens] = data['fixed_leader']['number_of_water_pings']
+            self.ws_cm[i_ens] = data['fixed_leader']['depth_cell_size']
+            self.wf_cm[i_ens] = data['fixed_leader']['blank_after_transmit']
+            self.wm[i_ens] = data['fixed_leader']['water_mode']
+            self.wc[i_ens] = data['fixed_leader']['low_correlation_threshold']
+            self.code_reps[i_ens] = data['fixed_leader']['number_of_code_repetitions']
+            self.wg_per[i_ens] = data['fixed_leader']['minimum_percentage_water_profile_pings']
+            self.we_mmps[i_ens] = data['fixed_leader']['error_velocity_threshold']
+            self.tp_sec[i_ens] = data['fixed_leader']['minutes'] * 60. + \
+                                     data['fixed_leader']['seconds'] + \
+                                     data['fixed_leader']['hundredths'] * 0.01
+
+            # Convert coordinate_transformation_process to individual bits
+            self.ex[i_ens] = "{0:08b}".format(data['fixed_leader']['coordinate_transformation_process'])
+
+            val = int(self.ex[i_ens][3:5], 2)
+            if val == 0:
+                self.coord_sys[i_ens] = 'Beam'
+            elif val == 1:
+                self.coord_sys[i_ens] = 'Inst'
+            elif val == 2:
+                self.coord_sys[i_ens] = 'Ship'
+            elif val == 3:
+                self.coord_sys[i_ens] = 'Earth'
+            else:
+                self.coord_sys[i_ens] = "N/a"
+
+            val = int(self.ex[i_ens][5], 2)
+            if val == 0:
+                self.use_pr = 'No'
+            elif val == 1:
+                self.use_pr = 'Yes'
+            else:
+                self.use_pr = 'N/a'
+
+            val = int(self.ex[i_ens][6], 2)
+            if val == 0:
+                self.use_3beam = 'No'
+            elif val == 1:
+                self.use_3beam = 'Yes'
+            else:
+                self.use_3beam = 'N/a'
+
+            val = int(self.ex[i_ens][7], 2)
+            if val == 0:
+                self.map_bins = 'No'
+            elif val == 1:
+                self.map_bins = 'Yes'
+            else:
+                self.map_bins = 'N/a'
+
+            self.ea_deg[i_ens] = data['fixed_leader']['heading_alignment'] * 0.01
+            self.eb_deg[i_ens] = data['fixed_leader']['heading_bias'] * 0.01
+
+            # Convert sensour_source to individual bits
+            self.ez[i_ens] = "{0:08b}".format(data['fixed_leader']['sensor_source'])
+
+            val = int(self.ez[i_ens][:2], 2)
+            if val == 0:
+                self.sos_src[i_ens] = 'Manual EC'
+            elif val == 1:
+                self.sos_src[i_ens] = 'Calculated'
+            elif val == 3:
+                self.sos_src[i_ens] = 'SVSS Sensor'
+            else:
+                self.sos_src[i_ens] = 'N/a'
+
+            val = int(self.ez[i_ens][2], 2)
+            if val == 0:
+                self.xdcr_dep_srs[i_ens] = 'Manual ED'
+            if val == 1:
+                self.xdcr_dep_srs[i_ens] = 'Sensor'
+            else:
+                self.xdcr_dep_srs[i_ens] = 'N/a'
+
+            val = int(self.ez[i_ens][3], 2)
+            if val == 0:
+                self.head_src[i_ens] = 'Manual EH'
+            if val == 1:
+                self.head_src[i_ens] = 'Int. Sensor'
+            else:
+                self.head_src[i_ens] = 'N/a'
+
+            val = int(self.ez[i_ens][4], 2)
+            if val == 0:
+                self.pitch_src[i_ens] = 'Manual EP'
+            if val == 1:
+                self.pitch_src[i_ens] = 'Int. Sensor'
+            else:
+                self.pitch_src[i_ens] = 'N/a'
+
+            val = int(self.ez[i_ens][5], 2)
+            if val == 0:
+                self.roll_src[i_ens] = 'Manual ER'
+            if val == 1:
+                self.roll_src[i_ens] = 'Int. Sensor'
+            else:
+                self.roll_src[i_ens] = 'N/a'
+
+            val = int(self.ez[i_ens][6], 2)
+            if val == 0:
+                self.xdcr_dep_srs[i_ens] = 'Manual ES'
+            if val == 1:
+                self.xdcr_dep_srs[i_ens] = 'Int. Sensor'
+            else:
+                self.xdcr_dep_srs[i_ens] = 'N/a'
+
+            val = int(self.ez[i_ens][7], 2)
+            if val == 0:
+                self.temp_src[i_ens] = 'Manual ET'
+            if val == 1:
+                self.temp_src[i_ens] = 'Int. Sensor'
+            else:
+                self.temp_src[i_ens] = 'N/a'
+
+            self.sensor_avail[i_ens] = "{0:08b}".format(data['fixed_leader']['sensor_available'])
+            self.dist_bin1_cm[i_ens] = data['fixed_leader']['bin_1_distance']
+            self.xmit_pulse_cm[i_ens] = data['fixed_leader']['transmit_pulse_length']
+            self.ref_lay_str_cell[i_ens] = data['fixed_leader']['starting_depth_cell']
+            self.ref_lay_end_cell[i_ens] = data['fixed_leader']['ending_depth_cell']
+            self.wa[i_ens] = data['fixed_leader']['false_target_threshold']
+            self.cx[i_ens] = data['fixed_leader']['low_latency_trigger']
+            self.lag_cm[i_ens] = data['fixed_leader']['transmit_lag_distance']
+            self.cpu_ser_no[i_ens] = data['fixed_leader']['cpu_board_serial_number']
+            self.wb[i_ens] = data['fixed_leader']['system_bandwidth']
+            self.cq[i_ens] = data['fixed_leader']['system_power']
+
+        if 'variable_leader' in data:
+            self.lag_near_bottom[i_ens] = data['variable_leader']['lag_near_bottom']
+
+        if 'bottom_track' in data:
+            self.bp[i_ens] = data['bottom_track']['pings_per_ensemble_bp']
+            self.bc[i_ens] = data['bottom_track']['correlation_magnitude_minimum_bc']
+            self.ba[i_ens] = data['bottom_track']['evaluation_amplitude_minimum_ba']
+            self.bg[i_ens] = data['bottom_track']['percent_good_minimum_bg']
+            self.bm[i_ens] = data['bottom_track']['bottom_track_mode_bm']
+            self.be_mmps[i_ens] = data['bottom_track']['error_velocity_maximum_be']
+
+
+class Gps(object):
+    """Class to hold GPS data from WinRiver. CLASS NOT USED
+
+    Attributes
+    ----------
+    alt_m: np.array(float)
+        Altitude in meters
+    gga_diff: np.array(int)
+        Differential correction indicator
+    gga_hdop: np.array(float)
+        Horizontal dilution of precision
+    gga_n_stats: np.array(int)
+        Number of satellites
+    gga_vel_e_mps: np.array(float)
+        Velocity in east direction from GGA data
+    gga_vel_n_mps: np.array(float)
+        Velocity in north directio from GGA data
+    gsa_p_dop: np.array(int)
+        Position dilution of precision
+    gsa_sat: np.array(int)
+        Satellites
+    gsa_v_dop: np.array(float)
+        Vertical dilution of precision
+    lat_deg: np.array(float)
+        Latitude in degrees
+    long_deg: np.array(float)
+        Longitude in degrees
+    vtg_vel_e_mps: np.array(float)
+        Velocity in east direction from VTG data
+    vtg_vel_n_mps: np.array(float)
+        Velocity in north direction from VTG data
+    """
+
+    def __init__(self, n_ensembles):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        """
+
+        self.alt_m = nans(n_ensembles)
+        self.gga_diff = nans(n_ensembles)
+        self.gga_hdop = nans(n_ensembles)
+        self.gga_n_stats = nans(n_ensembles)
+        self.gga_vel_e_mps = nans(n_ensembles)
+        self.gga_vel_n_mps = nans(n_ensembles)
+        self.gsa_p_dop = nans(n_ensembles)
+        self.gsa_sat = nans([n_ensembles, 6])
+        self.gsa_v_dop = nans(n_ensembles)
+        self.lat_deg = nans(n_ensembles)
+        self.long_deg = nans(n_ensembles)
+        self.vtg_vel_e_mps = nans(n_ensembles)
+        self.vtg_vel_n_mps = nans(n_ensembles)
+
+
+class Gps2(object):
+    """Class to hold GPS data for WinRiver II.
+
+    Attributes
+    ----------
+    gga_delta_time: np.array(float)
+        Time between ping and gga data
+    gga_header: list
+        GGA header
+    gga_sentence: list
+        GGA sentence
+    utc: np.array(float)
+        UTC time
+    lat_deg: np.array(float)
+        Latitude in degrees
+    lat_ref: list
+        Latitude reference
+    lon_deg: np.array(float)
+        Longitude in degrees
+    lon_ref: list
+        Longitude reference
+    corr_qual: np.array(float)
+        Differential quality indicator
+    num_sats: np.array(int)
+        Number of satellites
+    hdop: np.array(float)
+        Horizontal dilution of precision
+    alt: np.array(float)
+        Altitude
+    alt_unit: list
+        Units for altitude
+    geoid: np.array(float)
+        Geoid height
+    geoid_unit: list
+        Units for geoid height
+    d_gps_age: np.array(float)
+        Age of differential correction
+    ref_stat_id: np.array(float)
+        Reference station ID
+    vtg_delta_time: np.array(float)
+        Time between ping and VTG data
+    vtg_header: list
+        VTG header
+    vtg_sentence: list
+        VTG sentence
+    course_true: np.array(float)
+        Course relative to true north
+    true_indicator: list
+        True north indicator
+    course_mag: np.array(float)
+        Course relative to magnetic north
+    mag_indicator: list
+        Magnetic north indicator
+    speed_knots: np.array(float)
+        Speed in knots
+    knots_indicator: list
+        Knots indicator
+    speed_kph: np.array(float)
+        Speed in kilometers per hour
+    kph_indicator: list
+        Kilometers per hour indicator
+    mode_indicator: list
+        Mode indicator
+    dbt_delta_time: np.array(float)
+        Time between ping and echo sounder data
+    dbt_header: list
+        Echo sounder header
+    depth_ft: np.array(float)
+        Depth in ft from echo sounder
+    ft_indicator: list
+        Feet indicator
+    depth_m: np.array(float)
+        Depth in meters from echo sounder
+    m_indicator: list
+        Meters indicator
+    depth_fath: np.array(float)
+        Depth in fathoms from echo sounder
+    fath_indicator: list
+        Fathoms indicator
+    hdt_delta_time: np.array(float)
+        Time between ping and external heading data
+    hdt_header: list
+        External heading header
+    heading_deg: np.array(float)
+        Heading in degrees from external heading
+    h_true_indicator: list
+        Heading indicator to true north
+    gga_velE_mps: np.array(float)
+        Velocity in east direction in m/s from GGA for WR
+    gga_velN_mps: np.array(float)
+        Velocity in north direction in m/s from GGA for WR
+    vtg_velE_mps: np.array(float)
+        Velocity in east direction in m/s from VTG for WR
+    vtg_velN_mps: np.array(float)
+        Velocity in north direction in m/s from VTG for WR
+    """
+
+    def __init__(self, n_ensembles, wr2):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        wr2: bool
+            Setting of whether data is from WR or WR2
+        """
+
+        self.gga_delta_time = np.full([n_ensembles, 20], np.nan)
+        self.gga_header = np.full([n_ensembles, 20], '      ')
+        self.gga_sentence = np.full([n_ensembles, 20], '')
+        self.utc = np.full([n_ensembles, 20], np.nan)
+        self.lat_deg = np.zeros([n_ensembles, 20])
+        self.lat_ref = np.full([n_ensembles, 20], '')
+        self.lon_deg = np.zeros([n_ensembles, 20])
+        self.lon_ref = np.full([n_ensembles, 20], '')
+        self.corr_qual = np.full([n_ensembles, 20], np.nan)
+        self.num_sats = np.full([n_ensembles, 20], np.nan)
+        self.hdop = np.full([n_ensembles, 20], np.nan)
+        self.alt = np.full([n_ensembles, 20], np.nan)
+        self.alt_unit = np.full([n_ensembles, 20], '')
+        self.geoid = np.full([n_ensembles, 20], np.nan)
+        self.geoid_unit = np.full([n_ensembles, 20], '')
+        self.d_gps_age = np.full([n_ensembles, 20], np.nan)
+        self.ref_stat_id = np.full([n_ensembles, 20], np.nan)
+        self.vtg_delta_time = np.full([n_ensembles, 20], np.nan)
+        self.vtg_header = np.full([n_ensembles, 20], '      ')
+        self.vtg_sentence = np.full([n_ensembles, 20], '')
+        self.course_true = np.full([n_ensembles, 20], np.nan)
+        self.true_indicator = np.full([n_ensembles, 20], '')
+        self.course_mag = np.full([n_ensembles, 20], np.nan)
+        self.mag_indicator = np.full([n_ensembles, 20], '')
+        self.speed_knots = np.full([n_ensembles, 20], np.nan)
+        self.knots_indicator = np.full([n_ensembles, 20], '')
+        self.speed_kph = np.zeros([n_ensembles, 20])
+        self.kph_indicator = np.full([n_ensembles, 20], '')
+        self.mode_indicator = np.full([n_ensembles, 20], '')
+        self.dbt_delta_time = np.full([n_ensembles, 20], np.nan)
+        self.dbt_header = np.full([n_ensembles, 20], '      ')
+        self.depth_ft = np.full([n_ensembles, 20], np.nan)
+        self.ft_indicator = np.full([n_ensembles, 20], '')
+        self.depth_m = np.zeros([n_ensembles, 20])
+        self.m_indicator = np.full([n_ensembles, 20], '')
+        self.depth_fath = np.full([n_ensembles, 20], np.nan)
+        self.fath_indicator = np.full([n_ensembles, 20], '')
+        self.hdt_delta_time = np.full([n_ensembles, 20], np.nan)
+        self.hdt_header = np.full([n_ensembles, 20], '      ')
+        self.heading_deg = np.full([n_ensembles, 20], np.nan)
+        self.h_true_indicator = np.full([n_ensembles, 20], '')
+
+        # if wr2:
+        self.gga_velE_mps = nans(n_ensembles)
+        self.gga_velN_mps = nans(n_ensembles)
+        self.vtg_velE_mps = nans(n_ensembles)
+        self.vtg_velN_mps = nans(n_ensembles)
+
+    def populate_data(self, i_ens, data):
+        """Populates the class with data for an ensemble.
+
+        Parameters
+        ----------
+        i_ens: int
+            Ensemble index
+        data: dict
+            Dictionary of all data for this ensemble
+        """
+
+        if 'gga' in data:
+
+            # Check size and expand if needed
+            if len(data['gga']) > self.gga_delta_time.shape[1]:
+                self.gga_expand(len(data['gga']))
+
+            for n, gga_data in enumerate(data['gga']):
+                # Try implemented because of occasional garbage in data stream.
+                # This prevents a crash and data after garbage are not used, but any data before garbage is saved
+                try:
+                    self.gga_delta_time[i_ens, n] = gga_data['delta_time']
+                    self.gga_header[i_ens, n] = gga_data['header']
+                    self.utc[i_ens, n] = gga_data['utc']
+                    self.lat_deg[i_ens, n] = gga_data['lat_deg']
+                    self.lat_ref[i_ens, n] = gga_data['lat_ref']
+                    self.lon_deg[i_ens, n] = gga_data['lon_deg']
+                    self.lon_ref[i_ens, n] = gga_data['lon_ref']
+                    self.corr_qual[i_ens, n] = gga_data['corr_qual']
+                    self.num_sats[i_ens, n] = gga_data['num_sats']
+                    self.hdop[i_ens, n] = gga_data['hdop']
+                    self.alt[i_ens, n] = gga_data['alt']
+                    self.alt_unit[i_ens, n] = gga_data['alt_unit']
+                    self.geoid[i_ens, n] = gga_data['geoid']
+                    self.geoid_unit[i_ens, n] = gga_data['geoid_unit']
+                    self.d_gps_age[i_ens, n] = gga_data['d_gps_age']
+                    self.ref_stat_id[i_ens, n] = gga_data['ref_stat_id']
+                except:
+                    pass
+
+        if 'vtg' in data:
+
+            # Check size and expand if needed
+            if len(data['vtg']) > self.vtg_delta_time.shape[1]:
+                self.vtg_expand(len(data['vtg']))
+
+            for n, vtg_data in enumerate(data['vtg']):
+                # Try implemented because of occasional garbage in data stream.
+                # This prevents a crash and data after garbage are not used, but any data before garbage is saved
+                try:
+                    self.vtg_delta_time[i_ens, n] = vtg_data['delta_time']
+                    self.vtg_header[i_ens, n] = vtg_data['header']
+                    self.course_true[i_ens, n] = vtg_data['course_true']
+                    self.true_indicator[i_ens, n] = vtg_data['true_indicator']
+                    self.course_mag[i_ens, n] = vtg_data['course_mag']
+                    self.mag_indicator[i_ens, n] = vtg_data['mag_indicator']
+                    self.speed_knots[i_ens, n] = vtg_data['speed_knots']
+                    self.knots_indicator[i_ens, n] = vtg_data['knots_indicator']
+                    self.speed_kph[i_ens, n] = vtg_data['speed_kph']
+                    self.kph_indicator[i_ens, n] = vtg_data['kph_indicator']
+                    self.mode_indicator[i_ens, n] = vtg_data['mode_indicator']
+                except:
+                    pass
+
+        if 'ds' in data:
+
+            # Check size and expand if needed
+            if len(data['ds']) > self.dbt_delta_time.shape[1]:
+                self.dbt_expand(len(data['ds']))
+
+            for n, dbt_data in enumerate(data['ds']):
+                # Try implemented because of occasional garbage in data stream.
+                # This prevents a crash and data after garbage are not used, but any data before garbage is saved
+                try:
+                    self.dbt_delta_time[i_ens, n] = dbt_data['delta_time']
+                    self.dbt_header[i_ens, n] = dbt_data['header']
+                    self.depth_ft[i_ens, n] = dbt_data['depth_ft']
+                    self.ft_indicator[i_ens, n] = dbt_data['ft_indicator']
+                    self.depth_m[i_ens, n] = dbt_data['depth_m']
+                    self.m_indicator[i_ens, n] = dbt_data['m_indicator']
+                    self.depth_fath[i_ens, n] = dbt_data['depth_fath']
+                    self.fath_indicator[i_ens, n] = dbt_data['fath_indicator']
+                except:
+                    pass
+
+        if 'ext_heading' in data:
+
+            # Check size and expand if needed
+            if len(data['ext_heading']) > self.hdt_delta_time.shape[1]:
+                self.hdt_expand(len(data['ext_heading']))
+
+            for n, hdt_data in enumerate(data['ext_heading']):
+                # Try implemented because of occasional garbage in data stream.
+                # This prevents a crash and data after garbage are not used, but any data before garbage is saved
+                try:
+                    self.hdt_delta_time[i_ens, n] = hdt_data['delta_time']
+                    self.hdt_header[i_ens, n] = hdt_data['header']
+                    self.heading_deg[i_ens, n] = hdt_data['heading_deg']
+                    self.h_true_indicator[i_ens, n] = hdt_data['h_true_indicator']
+                except:
+                    pass
+
+    def gga_expand(self, n_samples):
+        """Expand arrays.
+
+        Parameters
+        ----------
+        n_samples: int
+            Desired size of array
+        """
+
+        # Determine amount of required expansion
+        n_expansion = n_samples - self.gga_delta_time.shape[1]
+        n_ensembles = self.gga_delta_time.shape[0]
+
+        # Expand arrays
+        self.gga_delta_time = np.concatenate(
+            (self.gga_delta_time, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.utc = np.concatenate(
+            (self.utc, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.lat_deg = np.concatenate(
+            (self.lat_deg, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.lon_deg = np.concatenate(
+            (self.lon_deg, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.corr_qual = np.concatenate(
+            (self.corr_qual, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.num_sats = np.concatenate(
+            (self.num_sats, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.hdop = np.concatenate(
+            (self.hdop, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.alt = np.concatenate(
+            (self.alt, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.geoid = np.concatenate(
+            (self.geoid, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.d_gps_age = np.concatenate(
+            (self.d_gps_age, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.ref_stat_id = np.concatenate(
+            (self.ref_stat_id, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+
+        self.gga_header = np.concatenate(
+            (self.gga_header, np.tile('', (n_ensembles, n_expansion))), axis=1)
+        self.geoid_unit = np.concatenate(
+            (self.geoid_unit, np.tile('', (n_ensembles, n_expansion))), axis=1)
+        self.alt_unit = np.concatenate(
+            (self.alt_unit, np.tile('', (n_ensembles, n_expansion))), axis=1)
+        self.lon_ref = np.concatenate(
+            (self.lon_ref, np.tile('', (n_ensembles, n_expansion))), axis=1)
+        self.lat_ref = np.concatenate(
+            (self.lat_ref, np.tile('', (n_ensembles, n_expansion))), axis=1)
+
+    def vtg_expand(self, n_samples):
+        """Expand arrays.
+
+        Parameters
+        ----------
+        n_samples: int
+            Desired size of array
+        """
+
+        # Determine amount of required expansion
+        n_expansion = n_samples - self.vtg_delta_time.shape[1]
+        n_ensembles = self.vtg_delta_time.shape[0]
+
+        # Expand arrays
+        self.vtg_delta_time = np.concatenate(
+            (self.vtg_delta_time, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.course_true = np.concatenate(
+            (self.course_true, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.course_mag = np.concatenate(
+            (self.course_mag, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.speed_knots = np.concatenate(
+            (self.speed_knots, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.speed_kph = np.concatenate(
+            (self.speed_kph, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+
+        self.kph_indicator = np.concatenate(
+            (self.kph_indicator, np.tile('', (n_ensembles, n_expansion))), axis=1)
+        self.mode_indicator = np.concatenate(
+            (self.mode_indicator, np.tile('', (n_ensembles, n_expansion))), axis=1)
+        self.vtg_header = np.concatenate(
+            (self.vtg_header, np.tile('', (n_ensembles, n_expansion))), axis=1)
+        self.true_indicator = np.concatenate(
+            (self.true_indicator, np.tile('', (n_ensembles, n_expansion))), axis=1)
+        self.mag_indicator = np.concatenate(
+            (self.mag_indicator, np.tile('', (n_ensembles, n_expansion))), axis=1)
+        self.knots_indicator = np.concatenate(
+            (self.knots_indicator, np.tile('', (n_ensembles, n_expansion))), axis=1)
+
+    def dbt_expand(self, n_samples):
+        """Expand arrays.
+
+        Parameters
+        ----------
+        n_samples: int
+            Desired size of array
+        """
+
+        # Determine amount of required expansion
+        n_expansion = n_samples - self.dbt_delta_time.shape[1]
+        n_ensembles = self.dbt_delta_time.shape[0]
+
+        # Expand arrays
+        self.dbt_delta_time = np.concatenate(
+            (self.dbt_delta_time, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.depth_ft = np.concatenate(
+            (self.depth_ft, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.depth_m = np.concatenate(
+            (self.depth_m, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.depth_fath = np.concatenate(
+            (self.depth_fath, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+
+        self.fath_indicator = np.concatenate(
+            (self.fath_indicator, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.dbt_header = np.concatenate(
+            (self.dbt_header, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.ft_indicator = np.concatenate(
+            (self.ft_indicator, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.m_indicator = np.concatenate(
+            (self.m_indicator, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+
+    def hdt_expand(self, n_samples):
+        """Expand arrays.
+
+        Parameters
+        ----------
+        n_samples: int
+            Desired size of array
+        """
+
+        # Determine amount of required expansion
+        n_expansion = n_samples - self.hdt_delta_time.shape[1]
+        n_ensembles = self.hdt_delta_time.shape[0]
+
+        # Expand the arrays
+        self.hdt_delta_time = np.concatenate(
+            (self.hdt_delta_time, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.heading_deg = np.concatenate(
+            (self.heading_deg, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.h_true_indicator = np.concatenate(
+            (self.h_true_indicator, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.hdt_header = np.concatenate(
+            (self.hdt_header, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+
+
+class Nmea(object):
+    """Class to hold raw NMEA sentences.
+
+    Attributes
+    ----------
+    gga: list
+        List of GGA sentences
+    gsa: list
+        List of GSA sentences
+    vtg: list
+        List of VTG sentences
+    dbt: list
+        List of DBT sentences
+    """
+
+    def __init__(self, n_ensembles):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        """
+        self.gga = [''] * n_ensembles
+        self.gsa = [''] * n_ensembles
+        self.vtg = [''] * n_ensembles
+        # self.raw = ['']*n_ensembles DSM: not sure this was used
+        self.dbt = [''] * n_ensembles
+
+    def populate_data(self, i_ens, data):
+        """Populates the class with data for an ensemble.
+
+        Parameters
+        ----------
+        i_ens: int
+            Ensemble index
+        data: dict
+            Dictionary of all data for this ensemble
+        """
+
+        if 'gga_sentence' in data:
+            self.gga[i_ens] = data['gga_sentence']
+
+        if 'vtg_sentence' in data:
+            self.vtg[i_ens] = data['vtg_sentence']
+
+        if 'gsa_sentence' in data:
+            self.gsa[i_ens] = data['gsa_sentence']
+
+        if 'dbt_sentence' in data:
+            self.dbt[i_ens] = data['dbt_sentence']
+
+
+class Sensor(object):
+    """Class to hold sensor data.
+
+    Attributes
+    ----------
+    ambient_temp: np.array(int)
+        ADC ambient temperature
+    attitude_temp: np.array(int)
+        ADC attitude temperature
+    attitude: np.array(int)
+        ADC attitude
+    bit_test: np.array(int)
+        Bit test results
+    bit_test_count: np.array(int)
+        Number of fails for newer ADCPs, not used for Rio Grande
+    contam_sensor: np.array(int)
+        ADC contamination sensor
+    date: np.array(int)
+        Date
+    date_y2k: np.array(int)
+        Y2K compatible date
+    date_not_y2k: np.array(int)
+        Date not Y2K compatible
+    error_status_word: np.array(int)
+        Error status codes
+    heading_deg: np.array(float)
+        Heading to magnetic north in degrees
+    heading_std_dev_deg: np.array(float)
+        Standard deviation of headings for an ensemble
+    mpt_msc: np.array(int)
+        Minimum time prior to ping
+    num: np.array(int)
+        Ensemble number
+    num_fact: np.array(int)
+        Number fraction
+    num_tot: np.array(int)
+        Number total
+    orient: list
+        Orientation of ADCP
+    pitch_std_dev_deg: np.array(float)
+        Standard deviation of pitch for an ensemble
+    pitch_deg: np.array(float)
+        Pitch in degrees
+    pressure_neg: np.array(int)
+        ADC pressure negative
+    pressure_pos: np.array(int)
+        ADC pressure positive
+    pressure_pascal: np.array(int)
+        Pressure at transducer face in deca-pascals
+    pressure_var_pascal: np.array(int)
+        Pressure variance in deca-pascals
+    roll_std_dev_deg: np.array(float)
+        Standard deviation of roll for an ensemble
+    roll_deg: np.array(float)
+        Roll in degrees
+    salinity_ppt: np.array(int)
+        Salinit in parts per thousand
+    sos_mps: np.array(int)
+        Speed of sound in m/s
+    temperature_deg_c: np.array(float)
+        Water temperatuer in degrees C
+    time: np.array(int)
+        Time
+    time_y2k: np.array(int)
+        Y2K compatible time
+    xdcr_depth_dm: np.array(int)
+        Transducer depth in decimeters
+    xmit_current: np.array(int)
+        Transmit current
+    self.xmit_voltage = nans(n_ensembles)
+        Transmit voltage
+    self.vert_beam_eval_amp: np.array(int)
+        Vertical beam amplitude
+    self.vert_beam_RSSI_amp: np.array(int)
+        Vertical beam return signal stength indicator
+    self.vert_beam_range_m: np.array(float)
+        Vertical beam range in m
+    self.vert_beam_gain: list
+        Vertical beam gain setting
+    self.vert_beam_status: np.array(int)
+        Vertical beam status code
+    """
+
+    def __init__(self, n_ensembles):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        """
+
+        self.ambient_temp = nans(n_ensembles)
+        self.attitude_temp = nans(n_ensembles)
+        self.attitude = nans(n_ensembles)
+        self.bit_test = nans(n_ensembles)
+        self.bit_test_count = nans(n_ensembles)
+        self.contam_sensor = nans(n_ensembles)
+        self.date = nans([n_ensembles, 3])
+        self.date_y2k = nans([n_ensembles, 4])
+        self.date_not_y2k = nans([n_ensembles, 3])
+        self.error_status_word = [''] * n_ensembles
+        self.heading_deg = nans(n_ensembles)
+        self.heading_std_dev_deg = nans(n_ensembles)
+        self.mpt_msc = nans([n_ensembles, 3])
+        self.num = nans(n_ensembles)
+        self.num_fact = nans(n_ensembles)
+        self.num_tot = nans(n_ensembles)
+        self.orient = [''] * n_ensembles
+        self.pitch_std_dev_deg = nans(n_ensembles)
+        self.pitch_deg = nans(n_ensembles)
+        self.pressure_neg = nans(n_ensembles)
+        self.pressure_pos = nans(n_ensembles)
+        self.pressure_pascal = nans(n_ensembles)
+        self.pressure_var_pascal = nans(n_ensembles)
+        self.roll_std_dev_deg = nans(n_ensembles)
+        self.roll_deg = nans(n_ensembles)
+        self.salinity_ppt = nans(n_ensembles)
+        self.sos_mps = nans(n_ensembles)
+        self.temperature_deg_c = nans(n_ensembles)
+        self.time = nans([n_ensembles, 4])
+        self.time_y2k = nans([n_ensembles, 4])
+        self.xdcr_depth_dm = nans(n_ensembles)
+        self.xmit_current = nans(n_ensembles)
+        self.xmit_voltage = nans(n_ensembles)
+        self.vert_beam_eval_amp = nans(n_ensembles)
+        self.vert_beam_RSSI_amp = nans(n_ensembles)
+        self.vert_beam_range_m = nans(n_ensembles)
+        self.vert_beam_gain = [''] * n_ensembles
+        self.vert_beam_status = np.zeros(n_ensembles)
+
+    def populate_data(self, i_ens, data):
+        """Populates the class with data for an ensemble.
+
+        Parameters
+        ----------
+        i_ens: int
+            Ensemble index
+        data: dict
+            Dictionary of all data for this ensemble
+        """
+
+        if 'fixed_leader' in data and 'variable_leader' in data:
+            # Convert system_configuration_ls to 1s and 0s
+            bitls = "{0:08b}".format(data['fixed_leader']['system_configuration_ls'])
+
+            # Convert first two bits to integer
+            val = int(bitls[0], 2)
+            if val == 0:
+                self.orient[i_ens] = 'Down'
+            elif val == 1:
+                self.orient[i_ens] = 'Up'
+            else:
+                self.orient[i_ens] = 'n/a'
+
+            self.num[i_ens] = data['variable_leader']['ensemble_number']
+
+            # Store data and time as list
+            self.date_not_y2k[i_ens, :] = [data['variable_leader']['rtc_year'],
+                                                  data['variable_leader']['rtc_month'],
+                                                  data['variable_leader']['rtc_day']]
+            self.time[i_ens, :] = [data['variable_leader']['rtc_hour'],
+                                          data['variable_leader']['rtc_minutes'],
+                                          data['variable_leader']['rtc_seconds'],
+                                          data['variable_leader']['rtc_hundredths']]
+
+            self.num_fact[i_ens] = data['variable_leader']['ensemble_number_msb']
+            self.num_tot[i_ens] = self.num[i_ens] + self.num_fact[i_ens] * 65535
+            self.bit_test[i_ens] = data['variable_leader']['bit_fault']
+            self.bit_test_count[i_ens] = data['variable_leader']['bit_count']
+            self.sos_mps[i_ens] = data['variable_leader']['speed_of_sound']
+            self.xdcr_depth_dm[i_ens] = data['variable_leader']['depth_of_transducer']
+            self.heading_deg[i_ens] = data['variable_leader']['heading'] / 100.
+            self.pitch_deg[i_ens] = data['variable_leader']['pitch'] / 100.
+            self.roll_deg[i_ens] = data['variable_leader']['roll'] / 100.
+            self.salinity_ppt[i_ens] = data['variable_leader']['salinity']
+            self.temperature_deg_c[i_ens] = data['variable_leader']['temperature'] / 100.
+            self.mpt_msc[i_ens, :] = [data['variable_leader']['mpt_minutes'],
+                                             data['variable_leader']['mpt_seconds'],
+                                             data['variable_leader']['mpt_hundredths']]
+            self.heading_std_dev_deg[i_ens] = data['variable_leader']['heading_standard_deviation']
+            self.pitch_std_dev_deg[i_ens] = data['variable_leader']['pitch_standard_deviation'] / 10.
+            self.roll_std_dev_deg[i_ens] = data['variable_leader']['roll_standard_deviation'] / 10.
+            self.xmit_current[i_ens] = data['variable_leader']['transmit_current']
+            self.xmit_voltage[i_ens] = data['variable_leader']['transmit_voltage']
+            self.ambient_temp[i_ens] = data['variable_leader']['ambient_temperature']
+            self.pressure_pos[i_ens] = data['variable_leader']['pressure_positive']
+            self.pressure_neg[i_ens] = data['variable_leader']['pressure_negative']
+            self.attitude_temp[i_ens] = data['variable_leader']['attitude_temperature']
+            self.attitude[i_ens] = data['variable_leader']['attitude']
+            self.contam_sensor[i_ens] = data['variable_leader']['contamination_sensor']
+            self.error_status_word[i_ens] = "{0:032b}".format(data['variable_leader']['error_status_word'])
+            self.pressure_pascal[i_ens] = data['variable_leader']['pressure']
+            self.pressure_var_pascal[i_ens] = data['variable_leader']['pressure_variance']
+
+            # Store Y2K date and time as list
+            self.date_y2k[i_ens, :] = [data['variable_leader']['rtc_y2k_century'],
+                                              data['variable_leader']['rtc_y2k_year'],
+                                              data['variable_leader']['rtc_y2k_month'],
+                                              data['variable_leader']['rtc_y2k_day']]
+            self.time_y2k[i_ens, :] = [data['variable_leader']['rtc_y2k_hour'],
+                                              data['variable_leader']['rtc_y2k_minutes'],
+                                              data['variable_leader']['rtc_y2k_seconds'],
+                                              data['variable_leader']['rtc_y2k_hundredths']]
+            self.date[i_ens, :] = self.date_not_y2k[i_ens, :]
+            self.date[i_ens, 0] = self.date_y2k[i_ens, 0] * 100 + \
+                                         self.date_y2k[i_ens, 1]
+
+            if 'vertical_beam' in data:
+                self.vert_beam_eval_amp[i_ens] = data['vertical_beam']['eval_amp']
+                self.vert_beam_RSSI_amp[i_ens] = data['vertical_beam']['rssi']
+                self.vert_beam_range_m[i_ens] = data['vertical_beam']['range'] / 1000
+
+                # Use first 8 bits of status and the 6 the bit to determine the gain
+                temp = "{0:08b}".format(data['vertical_beam']['status'])
+                self.vert_beam_status[i_ens] = int(temp[6:], 2)
+                if temp[5] == '0':
+                    self.vert_beam_gain[i_ens] = 'L'
+                else:
+                    self.vert_beam_gain[i_ens] = 'H'
+
+
+class Surface(object):
+    """Class to hold surface cell data.
+
+    Attributes
+    ----------
+    no_cells: np.array(int)
+        Number of surface cells in the ensemble
+    cell_size_cm: np.array(int)
+        Cell size in cm
+    dist_bin1_cm: np.array(int)
+        Distance to center of cell 1 in cm
+    vel_mps: np.array(float)
+        3D array of velocity data in each cell and ensemble
+    corr: np.array(int)
+        3D array of correlation data for each beam, cell, and ensemble
+    pergd: np.array(int)
+        3D array of percent good for each beam, cell, and ensemble
+    rssi: np.array(int)
+        3D array of return signal strength indicator for each beam, cell, and ensemble
+    """
+
+    def __init__(self, n_ensembles, n_velocities, max_surface_bins):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        n_velocities: int
+            Number of velocity beams
+        max_surface_bins: int
+            Maximum number of surface bins in an ensemble in the transect
+        """
+
+        self.no_cells = np.zeros(n_ensembles)
+        self.cell_size_cm = nans(n_ensembles)
+        self.dist_bin1_cm = nans(n_ensembles)
+        self.vel_mps = np.tile([np.nan], [n_velocities, max_surface_bins, n_ensembles])
+        self.corr = nans([n_velocities, max_surface_bins, n_ensembles])
+        self.pergd = nans([n_velocities, max_surface_bins, n_ensembles])
+        self.rssi = nans([n_velocities, max_surface_bins, n_ensembles])
+
+    def populate_data(self, i_ens, data, main_data):
+        """Populates the class with data for an ensemble.
+
+        Parameters
+        ----------
+        i_ens: int
+            Ensemble index
+        data: dict
+            Dictionary of all data for this ensemble
+        main_data: Pd0TRDI
+            Object of PD0TRDI
+        """
+
+        if 'surface_leader' in data:
+            self.no_cells[i_ens] = data['surface_leader']['cell_count']
+            self.cell_size_cm[i_ens] = data['surface_leader']['cell_size']
+            self.dist_bin1_cm[i_ens] = data['surface_leader']['range_cell_1']
+
+        if 'surface_velocity' in data:
+            self.vel_mps[:main_data.n_velocities, :len(data['surface_velocity']['velocity']), i_ens] = \
+                np.array(data['surface_velocity']['velocity']).T
+
+        if 'surface_correlation' in data:
+            self.corr[:main_data.n_velocities, :len(data['surface_correlation']['correlation']), i_ens] = \
+                np.array(data['surface_correlation']['correlation']).T
+
+        if 'surface_intensity' in data:
+            self.rssi[:main_data.n_velocities, :len(data['surface_intensity']['rssi']), i_ens] = \
+                np.array(data['surface_intensity']['rssi']).T
+
+        if 'surface_percent_good' in data:
+            self.pergd[:main_data.n_velocities, :len(data['surface_percent_good']['percent_good']), i_ens] = \
+                np.array(data['surface_percent_good']['percent_good']).T
+
+
+class Wt(object):
+    """Class to hold water track data.
+
+    Attributes
+    ----------
+    vel_mps: np.array(float)
+        3D array of velocity data in each cell and ensemble
+    corr: np.array(int)
+        3D array of correlation data for each beam, cell, and ensemble
+    pergd: np.array(int)
+        3D array of percent good for each beam, cell, and ensemble
+    rssi: np.array(int)
+        3D array of return signal strength indicator for each beam, cell, and ensemble
+    """
+
+    def __init__(self, n_bins, n_ensembles, n_velocities):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        n_velocities: int
+            Number of velocity beams
+        n_bins: int
+            Maximum number of bins in an ensemble in the transect
+        """
+
+        self.corr = nans([n_velocities, n_bins, n_ensembles])
+        self.pergd = nans([n_velocities, n_bins, n_ensembles])
+        self.rssi = nans([n_velocities, n_bins, n_ensembles])
+        self.vel_mps = nans([n_velocities, n_bins, n_ensembles])
+
+    def populate_data(self, i_ens, data, main_data):
+        """Populates the class with data for an ensemble.
+
+        Parameters
+        ----------
+        i_ens: int
+            Ensemble index
+        data: dict
+            Dictionary of all data for this ensemble
+        main_data: Pd0TRDI
+            Object of PD0TRDI
+        """
+
+
+        if 'velocity' in data:
+            # Check size in case array needs to be expanded
+            if main_data.Cfg.wn[i_ens] > self.vel_mps.shape[1]:
+                append = np.zeros([self.vel_mps.shape[0],
+                                   int(main_data.Cfg.wn[i_ens] - self.vel_mps.shape[1]),
+                                   self.vel_mps.shape[2]])
+                self.vel_mps = np.hstack([self.vel_mps, append])
+                self.corr = np.hstack([self.corr, append])
+                self.rssi = np.hstack([self.rssi, append])
+                self.pergd = np.hstack([self.pergd, append])
+
+            # Reformat and assign data
+            if 'velocity' in data:
+                self.vel_mps[:main_data.n_velocities, :int(main_data.Cfg.wn[i_ens]), i_ens] = \
+                    np.array(data['velocity']['data']).T
+            if 'correlation' in data:
+                self.corr[:main_data.n_velocities, :int(main_data.Cfg.wn[i_ens]), i_ens] = \
+                    np.array(data['correlation']['data']).T
+            if 'echo_intensity' in data:
+                self.rssi[:main_data.n_velocities, :int(main_data.Cfg.wn[i_ens]), i_ens] = \
+                    np.array(data['echo_intensity']['data']).T
+            if 'percent_good' in data:
+                self.pergd[:main_data.n_velocities, :int(main_data.Cfg.wn[i_ens]), i_ens] = \
+                    np.array(data['percent_good']['data']).T
diff --git a/Classes/PreMeasurement.py b/Classes/PreMeasurement.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed2b5c5d4fb3998880cb6cd06f8ecfa8019a6574
--- /dev/null
+++ b/Classes/PreMeasurement.py
@@ -0,0 +1,430 @@
+import re
+import copy
+import numpy as np
+
+
+class PreMeasurement(object):
+    """Stores tests, calibrations, and evaluations conducted prior ot measurement.
+
+    Attributes
+    ----------
+    time_stamp: str
+        Time and date of test
+    data: str
+        Raw data from test
+    result: dict
+        Dictionary of test results. Varies by test.
+    """
+    
+    def __init__(self):
+        """Initialize instance variables."""
+
+        self.time_stamp = None
+        self.data = None
+        self.result = {}
+        
+    def populate_data(self, time_stamp, data_in, data_type):
+        """Coordinates storing of test, calibration, and evaluation data.
+
+        Parameters
+        ----------
+        time_stamp: str
+            Time and date text.
+        data_in: str
+            Raw data from test
+        data_type: str
+            Type of data, C-compass, TST-TRDI test, SST-SonTek test
+        """
+
+        # Store time stamp and data
+        self.time_stamp = time_stamp
+        self.data = data_in
+
+        # Process data depending on data type and store result
+        if data_type[1] == 'C':
+            self.compass_read()
+        elif data_type == 'TST':
+            self.sys_test_read()
+            self.pt3_data()
+        elif data_type == 'SST':
+            self.sys_test_read()
+
+    def compass_read(self):
+        """Method for getting compass evaluation data"""
+
+        # Match regex for compass evaluation error:
+        splits = re.split('(Total error:|Double Cycle Errors:|Error from calibration:)', self.data)
+        if len(splits) > 1:
+            error = float(re.search('\d+\.*\d*', splits[-1])[0])
+        else:
+            error = 'N/A'
+        self.result['compass'] = {'error': error}
+
+    @staticmethod
+    def cc_qrev_mat_in(meas_struct):
+        """Processes the Matlab data structure to obtain a list of Premeasurement objects containing compass calibration
+           data from the Matlab data structure.
+
+       Parameters
+       ----------
+       meas_struct: mat_struct
+           Matlab data structure obtained from sio.loadmat
+
+       Returns
+       -------
+       cc: list
+           List of Premeasurement data objects
+       """
+        cc = []
+        if hasattr(meas_struct, 'compassCal'):
+            if type(meas_struct.compassCal) is np.ndarray:
+                for cal in meas_struct.compassCal:
+                    pm = PreMeasurement()
+                    pm.compass_populate_from_qrev_mat(cal)
+                    cc.append(pm)
+            elif len(meas_struct.compassCal.data) > 0:
+                pm = PreMeasurement()
+                pm.compass_populate_from_qrev_mat(meas_struct.compassCal)
+                cc.append(pm)
+
+        return cc
+
+    @staticmethod
+    def ce_qrev_mat_in(meas_struct):
+        """Processes the Matlab data structure to obtain a list of Premeasurement objects containing compass evaluation
+           data from the Matlab data structure.
+
+       Parameters
+       ----------
+       meas_struct: mat_struct
+           Matlab data structure obtained from sio.loadmat
+
+       Returns
+       -------
+       ce: list
+           List of Premeasurement data objects
+       """
+        ce = []
+        if hasattr(meas_struct, 'compassEval'):
+            if type(meas_struct.compassEval) is np.ndarray:
+                for comp_eval in meas_struct.compassEval:
+                    pm = PreMeasurement()
+                    pm.compass_populate_from_qrev_mat(comp_eval)
+                    ce.append(pm)
+            elif len(meas_struct.compassEval.data) > 0:
+                pm = PreMeasurement()
+                pm.compass_populate_from_qrev_mat(meas_struct.compassEval)
+                ce.append(pm)
+        return ce
+
+    def compass_populate_from_qrev_mat(self, data_in):
+        """Populated Premeasurement instance variables with data from QRev Matlab file.
+
+        Parameters
+        ----------
+        data_in: mat_struct
+            mat_struct_object containing compass cal/eval data
+        """
+        self.data = data_in.data
+        self.time_stamp = data_in.timeStamp
+        if hasattr(data_in, 'result'):
+            self.result = {'compass': {'error': data_in.result.compass.error}}
+        else:
+            # Match regex for compass evaluation error:
+            splits = re.split('(Total error:|Double Cycle Errors:|Error from calibration:)', self.data)
+            if len(splits) > 1:
+                error = float(re.search('\d+\.*\d*', splits[-1])[0])
+            else:
+                error = 'N/A'
+            self.result['compass'] = {'error': error}
+            
+    def sys_test_read(self):
+        """Method for reading the system test data"""
+        if self.data is not None:
+            # Match regex for number of tests and number of failures
+            num_tests = re.findall('(Fail|FAIL|F A I L|Pass|PASS|NOT DETECTED|P A S S)', self.data)
+            num_fails = re.findall('(Fail|FAIL|F A I L)', self.data)
+
+            # Store results
+            self.result = {'sysTest': {'n_tests': len(num_tests)}}
+            self.result['sysTest']['n_failed'] = len(num_fails)
+        else:
+            self.result = {'sysTest': {'n_tests': None}}
+            self.result['sysTest']['n_failed'] = None
+
+    @staticmethod
+    def sys_test_qrev_mat_in(meas_struct):
+        """Processes the Matlab data structure to obtain a list of Premeasurement objects containing system test data
+           from the Matlab data structure.
+
+           Parameters
+           ----------
+           meas_struct: mat_struct
+               Matlab data structure obtained from sio.loadmat
+
+           Returns
+           -------
+           system_tst: list
+               List of Premeasurement data objects
+           """
+        system_tst = []
+        if hasattr(meas_struct, 'sysTest'):
+            try:
+                if type(meas_struct.sysTest) == np.ndarray:
+                    for test in meas_struct.sysTest:
+                        tst = PreMeasurement()
+                        tst.sys_tst_populate_from_qrev_mat(test)
+                        system_tst.append(tst)
+                elif len(meas_struct.sysTest.data) > 0:
+                    tst = PreMeasurement()
+                    tst.sys_tst_populate_from_qrev_mat(meas_struct.sysTest)
+                    system_tst.append(tst)
+            except AttributeError:
+                pass
+        return system_tst
+
+    def sys_tst_populate_from_qrev_mat(self, test_in):
+        """Populated Premeasurement instance variables with data from QRev Matlab file.
+
+        Parameters
+        ----------
+        test_in: mat_struct
+            mat_struct_object containing system test data
+        """
+        try:
+            self.data = test_in.data
+            self.time_stamp = test_in.timeStamp
+            self.result = {'sysTest': {'n_failed': test_in.result.sysTest.nFailed}}
+            self.result['sysTest']['n_tests'] = test_in.result.sysTest.nTests
+
+            if hasattr(test_in.result, 'pt3'):
+                data_types = {'corr_table': np.array([]), 'sdc': np.array([]), 'cdc': np.array([]),
+                              'noise_floor': np.array([])}
+                test_types = {'high_wide': data_types.copy(), 'high_narrow': data_types.copy(),
+                              'low_wide': data_types.copy(),
+                              'low_narrow': data_types.copy()}
+                pt3 = {'hard_limit': copy.deepcopy(test_types), 'linear': copy.deepcopy(test_types)}
+                if hasattr(test_in.result.pt3, 'hardLimit'):
+                    if hasattr(test_in.result.pt3.hardLimit, 'hw'):
+                        pt3['hard_limit']['high_wide']['corr_table'] = test_in.result.pt3.hardLimit.hw.corrTable
+                        pt3['hard_limit']['high_wide']['sdc'] = test_in.result.pt3.hardLimit.hw.sdc
+                        pt3['hard_limit']['high_wide']['cdc'] = test_in.result.pt3.hardLimit.hw.cdc
+                        pt3['hard_limit']['high_wide']['noise_floor'] = test_in.result.pt3.hardLimit.hw.noiseFloor
+                    if hasattr(test_in.result.pt3.hardLimit, 'lw'):
+                        pt3['hard_limit']['low_wide']['corr_table'] = test_in.result.pt3.hardLimit.lw.corrTable
+                        pt3['hard_limit']['low_wide']['sdc'] = test_in.result.pt3.hardLimit.lw.sdc
+                        pt3['hard_limit']['low_wide']['cdc'] = test_in.result.pt3.hardLimit.lw.cdc
+                        pt3['hard_limit']['low_wide']['noise_floor'] = test_in.result.pt3.hardLimit.lw.noiseFloor
+                    if hasattr(test_in.result.pt3.hardLimit, 'hn'):
+                        pt3['hard_limit']['high_narrow']['corr_table'] = test_in.result.pt3.hardLimit.hn.corrTable
+                        pt3['hard_limit']['high_narrow']['sdc'] = test_in.result.pt3.hardLimit.hn.sdc
+                        pt3['hard_limit']['high_narrow']['cdc'] = test_in.result.pt3.hardLimit.hn.cdc
+                        pt3['hard_limit']['high_narrow']['noise_floor'] = test_in.result.pt3.hardLimit.hn.noiseFloor
+                    if hasattr(test_in.result.pt3.hardLimit, 'ln'):
+                        pt3['hard_limit']['low_narrow']['corr_table'] = test_in.result.pt3.hardLimit.ln.corrTable
+                        pt3['hard_limit']['low_narrow']['sdc'] = test_in.result.pt3.hardLimit.ln.sdc
+                        pt3['hard_limit']['low_narrow']['cdc'] = test_in.result.pt3.hardLimit.ln.cdc
+                        pt3['hard_limit']['low_narrow']['noise_floor'] = test_in.result.pt3.hardLimit.ln.noiseFloor
+                if hasattr(test_in.result.pt3, 'linear'):
+                    if hasattr(test_in.result.pt3.linear, 'hw'):
+                        pt3['linear']['high_wide']['corr_table'] = test_in.result.pt3.linear.hw.corrTable
+                        pt3['linear']['high_wide']['noise_floor'] = test_in.result.pt3.linear.hw.noiseFloor
+                    if hasattr(test_in.result.pt3.linear, 'lw'):
+                        pt3['linear']['low_wide']['corr_table'] = test_in.result.pt3.linear.lw.corrTable
+                        pt3['linear']['low_wide']['noise_floor'] = test_in.result.pt3.linear.lw.noiseFloor
+                    if hasattr(test_in.result.pt3.linear, 'hn'):
+                        pt3['linear']['high_narrow']['corr_table'] = test_in.result.pt3.linear.hn.corrTable
+                        pt3['linear']['high_narrow']['noise_floor'] = test_in.result.pt3.linear.hn.noiseFloor
+                    if hasattr(test_in.result.pt3.linear, 'ln'):
+                        pt3['linear']['low_narrow']['corr_table'] = test_in.result.pt3.linear.ln.corrTable
+                        pt3['linear']['low_narrow']['noise_floor'] = test_in.result.pt3.linear.ln.noiseFloor
+
+                self.result['pt3'] = pt3
+        except AttributeError:
+            # Match regex for number of tests and number of failures
+            num_tests = re.findall('(Fail|FAIL|F A I L|Pass|PASS|NOT DETECTED|P A S S)', test_in.data)
+            num_fails = re.findall('(Fail|FAIL|F A I L)', test_in.data)
+
+            # Store results
+            self.result = {'sysTest': {'n_tests': len(num_tests)}}
+            self.result['sysTest']['n_failed'] = len(num_fails)
+
+    def pt3_data(self):
+        """Method for processing the data in the correlation matrices."""
+        try:
+            data_types = {'corr_table': np.array([]), 'sdc': np.array([]), 'cdc': np.array([]),
+                          'noise_floor': np.array([])}
+            test_types = {'high_wide': data_types.copy(), 'high_narrow': data_types.copy(),
+                          'low_wide': data_types.copy(),
+                          'low_narrow': data_types.copy()}
+            pt3 = {'hard_limit': copy.deepcopy(test_types), 'linear': copy.deepcopy(test_types)}
+
+            # Match regex for correlation tables
+            matches = re.findall('Lag.*?0', self.data, re.DOTALL)
+
+            # Count the number or correlation tables to process
+            correl_count = 0
+            for match in matches:
+                bm1_matches = re.findall('Bm1', match)
+                correl_count += len(bm1_matches)
+
+            # Correlation table match
+            lag_matches = re.findall('Lag.*?^\s*$', self.data, re.MULTILINE | re.DOTALL)
+
+            # Sin match
+            sin_match = re.findall('((Sin|SIN).*?^\s*$)', self.data, re.MULTILINE | re.DOTALL)[0][0]
+            sin_array = np.array(re.findall('\d+\.*\d*', sin_match), dtype=int)
+
+            # Cos match
+            cos_match = re.findall('((Cos|COS).*?^\s*$)', self.data, re.MULTILINE | re.DOTALL)[0][0]
+            cos_array = np.array(re.findall('\d+\.*\d*', cos_match), dtype=int)
+
+            # RSSI match
+            rssi_array = np.array([])
+            rssi_matches = re.findall('RSSI.*?^\s*$', self.data, re.MULTILINE | re.DOTALL)
+            for rssi_match in rssi_matches:
+                rssi_array = np.hstack((rssi_array, np.array(re.findall('\d+\.*\d*', rssi_match), dtype=int)))
+
+            # Process each set of correlation tables
+            for n, lag_match in enumerate(lag_matches):
+
+                # Count the Bm1 string to know how many tables to read
+                bm_count = len(re.findall('Bm1', lag_match))
+
+                # Extract the table into list
+                numbers = re.findall('\d+\.*\d*', lag_match)
+
+                # Create array from data in table
+                corr_data = np.array(numbers[(bm_count * 4):(bm_count * 44)],
+                                     dtype=int).reshape([8, (bm_count * 4) + 1])[:, 1::]
+
+                # Only one pt3 test. Typical of Rio Grande and Streampro
+                if bm_count == 1:
+
+                    # Assign matrix slices to corresponding variables
+                    # corr_hlimit_hgain_wband = corr_data
+                    pt3['hard_limit']['high_wide']['corr_table'] = corr_data
+                    pt3['hard_limit']['high_wide']['sdc'] = sin_array[0:4]
+                    pt3['hard_limit']['high_wide']['cdc'] = cos_array[0:4]
+                    pt3['hard_limit']['high_wide']['noise_floor'] = rssi_array[0:4]
+
+                # 4 tests arranged in groups of 2. All data are hard limited.
+                elif bm_count == 2 and correl_count == 4:
+
+                    # Hard limited wide bandwidth (n=0)
+                    if n == 0:
+
+                        pt3['hard_limit']['high_wide']['corr_table'] = corr_data[:, 0:4]
+                        pt3['hard_limit']['high_wide']['sdc'] = sin_array[n * 4: (n + 1) * 4]
+                        pt3['hard_limit']['high_wide']['cdc'] = cos_array[n * 4: (n + 1) * 4]
+                        pt3['hard_limit']['high_wide']['noise_floor'] = rssi_array[n * 4: (n + 1) * 4]
+
+                        pt3['hard_limit']['low_wide']['corr_table'] = corr_data[:, 4::]
+                        pt3['hard_limit']['low_wide']['sdc'] = sin_array[(n + 1) * 4: (n + 2) * 4]
+                        pt3['hard_limit']['low_wide']['cdc'] = cos_array[(n + 1) * 4: (n + 2) * 4]
+                        pt3['hard_limit']['low_wide']['noise_floor'] = rssi_array[(n + 1) * 4: (n + 2) * 4]
+
+                    # Hard limited narrow bandwidth (n=1)
+                    elif n == 1:
+
+                        pt3['hard_limit']['high_narrow']['corr_table'] = corr_data[:, 0:4]
+                        pt3['hard_limit']['high_narrow']['sdc'] = sin_array[(n + 1) * 4: (n + 2) * 4]
+                        pt3['hard_limit']['high_narrow']['cdc'] = cos_array[(n + 1) * 4: (n + 2) * 4]
+                        pt3['hard_limit']['high_narrow']['noise_floor'] = rssi_array[(n + 1) * 4: (n + 2) * 4]
+
+                        pt3['hard_limit']['low_narrow']['corr_table'] = corr_data[:, 4::]
+                        pt3['hard_limit']['low_narrow']['sdc'] = sin_array[(n + 2) * 4: (n + 3) * 4]
+                        pt3['hard_limit']['low_narrow']['cdc'] = cos_array[(n + 2) * 4: (n + 3) * 4]
+                        pt3['hard_limit']['low_narrow']['noise_floor'] = rssi_array[(n + 2) * 4: (n + 3) * 4]
+
+                # 8 tests arranged in sets of 2. The linear is 1st followed by the hard limit.
+                elif bm_count == 2 and correl_count == 8:
+
+                    # Hard limit bandwidth (n=0)
+                    if n == 0:
+
+                        pt3['hard_limit']['high_wide']['corr_table'] = corr_data[:, 0:4]
+                        pt3['hard_limit']['high_wide']['sdc'] = sin_array[n * 4: (n + 1) * 4]
+                        pt3['hard_limit']['high_wide']['cdc'] = cos_array[n * 4: (n + 1) * 4]
+                        pt3['hard_limit']['high_wide']['noise_floor'] = rssi_array[n * 4: (n + 1) * 4]
+
+                        pt3['hard_limit']['low_wide']['corr_table'] = corr_data[:, 4::]
+                        pt3['hard_limit']['low_wide']['sdc'] = sin_array[(n + 1) * 4: (n + 2) * 4]
+                        pt3['hard_limit']['low_wide']['cdc'] = cos_array[(n + 1) * 4: (n + 2) * 4]
+                        pt3['hard_limit']['low_wide']['noise_floor'] = rssi_array[(n + 1) * 4: (n + 2) * 4]
+
+                    # Hard limit narrow bandwidth (n=1)
+                    elif n == 1:
+
+                        pt3['hard_limit']['high_narrow']['corr_table'] = corr_data[:, 0:4]
+                        pt3['hard_limit']['high_narrow']['sdc'] = sin_array[(n + 1) * 4: (n + 2) * 4]
+                        pt3['hard_limit']['high_narrow']['cdc'] = cos_array[(n + 1) * 4: (n + 2) * 4]
+                        pt3['hard_limit']['high_narrow']['noise_floor'] = rssi_array[(n + 1) * 4: (n + 2) * 4]
+
+                        pt3['hard_limit']['low_narrow']['corr_table'] = corr_data[:, 4::]
+                        pt3['hard_limit']['low_narrow']['sdc'] = sin_array[(n + 2) * 4: (n + 3) * 4]
+                        pt3['hard_limit']['low_narrow']['cdc'] = cos_array[(n + 2) * 4: (n + 3) * 4]
+                        pt3['hard_limit']['low_narrow']['noise_floor'] = rssi_array[(n + 2) * 4: (n + 3) * 4]
+
+                    # Linear wide bandwidth (n=2)
+                    elif n == 2:
+
+                        pt3['linear']['high_wide']['corr_table'] = corr_data[:, 0:4]
+                        pt3['linear']['high_wide']['noise_floor'] = rssi_array[(n + 2) * 4: (n + 3) * 4]
+
+                        pt3['linear']['low_wide']['corr_table'] = corr_data[:, 4::]
+                        pt3['linear']['low_wide']['noise_floor'] = rssi_array[(n + 3) * 4: (n + 4) * 4]
+
+                    # Linear narrow bandwidth (n=3)
+                    elif n == 3:
+
+                        pt3['linear']['high_narrow']['corr_table'] = corr_data[:, 0:4]
+                        pt3['linear']['high_narrow']['noise_floor'] = rssi_array[(n + 3) * 4: (n + 4) * 4]
+
+                        pt3['linear']['low_narrow']['corr_table'] = corr_data[:, 4::]
+                        pt3['linear']['low_narrow']['noise_floor'] = rssi_array[(n + 4) * 4: (n + 5) * 4]
+
+                # 8 tests in groups of 4. Hard limit is the first group then the linear.
+                elif bm_count == 4:
+
+                    # Hard limit data (n=0)
+                    if n == 0:
+
+                        pt3['hard_limit']['high_wide']['corr_table'] = corr_data[:, 0:4]
+                        pt3['hard_limit']['high_wide']['sdc'] = sin_array[n * 4: (n + 1) * 4]
+                        pt3['hard_limit']['high_wide']['cdc'] = cos_array[n * 4: (n + 1) * 4]
+                        pt3['hard_limit']['high_wide']['noise_floor'] = rssi_array[n * 4: (n + 1) * 4]
+
+                        pt3['hard_limit']['low_wide']['corr_table'] = corr_data[:, 4:8]
+                        pt3['hard_limit']['low_wide']['sdc'] = sin_array[(n + 1) * 4: (n + 2) * 4]
+                        pt3['hard_limit']['low_wide']['cdc'] = cos_array[(n + 1) * 4: (n + 2) * 4]
+                        pt3['hard_limit']['low_wide']['noise_floor'] = rssi_array[(n + 1) * 4: (n + 2) * 4]
+
+                        pt3['hard_limit']['high_narrow']['corr_table'] = corr_data[:, 8:12]
+                        pt3['hard_limit']['high_narrow']['sdc'] = sin_array[(n + 2) * 4: (n + 3) * 4]
+                        pt3['hard_limit']['high_narrow']['cdc'] = cos_array[(n + 2) * 4: (n + 3) * 4]
+                        pt3['hard_limit']['high_narrow']['noise_floor'] = rssi_array[(n + 2) * 4: (n + 3) * 4]
+
+                        pt3['hard_limit']['low_narrow']['corr_table'] = corr_data[:, 12::]
+                        pt3['hard_limit']['low_narrow']['sdc'] = sin_array[(n + 3) * 4: (n + 4) * 4]
+                        pt3['hard_limit']['low_narrow']['cdc'] = cos_array[(n + 3) * 4: (n + 4) * 4]
+                        pt3['hard_limit']['low_narrow']['noise_floor'] = rssi_array[(n + 3) * 4: (n + 4) * 4]
+
+                    # Linear data (n=1)
+                    else:
+                        pt3['linear']['high_wide']['corr_table'] = corr_data[:, 0:4]
+                        pt3['linear']['high_wide']['noise_floor'] = rssi_array[(n + 3) * 4: (n + 4) * 4]
+
+                        pt3['linear']['low_wide']['corr_table'] = corr_data[:, 4:8]
+                        pt3['linear']['low_wide']['noise_floor'] = rssi_array[(n + 4) * 4: (n + 5) * 4]
+
+                        pt3['linear']['high_narrow']['corr_table'] = corr_data[:, 8:12]
+                        pt3['linear']['high_narrow']['noise_floor'] = rssi_array[(n + 5) * 4: (n + 6) * 4]
+
+                        pt3['linear']['low_narrow']['corr_table'] = corr_data[:, 12::]
+                        pt3['linear']['low_narrow']['noise_floor'] = rssi_array[(n + 6) * 4: (n + 7) * 4]
+            self.result['pt3'] = pt3
+        except Exception:
+            pass
+        #     pt3 = None
+        # self.result['pt3'] = pt3
diff --git a/Classes/Python2Matlab.py b/Classes/Python2Matlab.py
new file mode 100644
index 0000000000000000000000000000000000000000..13d957081c6f6c251ce56748f87321e4a296dba6
--- /dev/null
+++ b/Classes/Python2Matlab.py
@@ -0,0 +1,725 @@
+import numpy as np
+import pandas as pd
+import scipy.io as sio
+import copy as copy
+from Classes.PreMeasurement import PreMeasurement
+
+
+class Python2Matlab(object):
+    """Converts python meas class to QRev for Matlab structure.
+
+    Attributes
+    ----------
+    matlab_dict: dict
+        Dictionary of Matlab structures
+    """
+
+    def __init__(self, meas, checked):
+        """Initialize dictionaries and convert Python data to Matlab structures.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        # Create Python to Matlab variable name conversion dictionary
+        py_2_mat_dict = self.create_py_2_mat_dict()
+
+        # Initialize Matlab dictionary
+        self.matlab_dict = dict()
+
+        # Apply conversion of Python data to be compatible with Matlab conventions
+        meas_mat = self.data2matlab(meas)
+
+        checked_idx = np.array(checked)
+        checked_idx_meas = np.copy(checked_idx)
+        np.append(checked_idx_meas, len(meas_mat.extrap_fit.sel_fit)-1)
+
+        # Convert Python data structure to Matlab
+        self.matlab_dict['stationName'] = meas_mat.station_name
+        if self.matlab_dict['stationName'] is None:
+            self.matlab_dict['stationName'] = ''
+        self.matlab_dict['stationNumber'] = meas_mat.station_number
+        if self.matlab_dict['stationNumber'] is None:
+            self.matlab_dict['stationNumber'] = ''
+        self.matlab_dict['persons'] = meas_mat.persons
+        self.matlab_dict['meas_number'] = meas_mat.meas_number
+        self.matlab_dict['stage_start_m'] = meas_mat.stage_start_m
+        self.matlab_dict['stage_end_m'] = meas_mat.stage_end_m
+        self.matlab_dict['stage_meas_m'] = meas_mat.stage_meas_m
+        self.matlab_dict['processing'] = meas_mat.processing
+        self.matlab_dict['extTempChk'] = meas_mat.ext_temp_chk
+        self.matlab_dict['userRating'] = meas_mat.user_rating
+        self.matlab_dict['initialSettings'] = meas_mat.initial_settings
+        self.matlab_dict['comments'] = self.comment2struct(meas_mat.comments)
+        self.matlab_dict['compassCal'] = self.listobj2struct(meas_mat.compass_cal, py_2_mat_dict)
+        self.matlab_dict['compassEval'] = self.listobj2struct(meas_mat.compass_eval, py_2_mat_dict)
+        self.matlab_dict['sysTest'] = self.listobj2struct(meas_mat.system_tst, py_2_mat_dict)
+        discharge = np.copy(meas_mat.discharge)
+        discharge_sel = [discharge[i] for i in checked_idx]
+        self.matlab_dict['discharge'] = self.listobj2struct(discharge_sel, py_2_mat_dict)
+        transects = np.copy(meas_mat.transects)
+        transects_sel = [transects[i] for i in checked_idx]
+        self.matlab_dict['transects'] = self.listobj2struct(transects_sel, py_2_mat_dict)
+        extrap = copy.deepcopy(meas_mat.extrap_fit)
+        self.matlab_dict['extrapFit'] = self.listobj2struct([extrap], py_2_mat_dict)
+        # Check for multiple moving-bed tests
+        if type(meas_mat.mb_tests) == list:
+            mb_tests = self.listobj2struct(meas_mat.mb_tests, py_2_mat_dict)
+        else:
+            mb_tests = self.obj2dict(meas_mat.mb_tests, py_2_mat_dict)
+        if len(mb_tests) == 0:
+            mb_tests = np.array([])
+
+        self.matlab_dict['mbTests'] = mb_tests
+
+        self.matlab_dict['observed_no_moving_bed'] = meas_mat.observed_no_moving_bed
+
+        self.matlab_dict['uncertainty'] = self.listobj2struct([meas_mat.uncertainty], py_2_mat_dict)
+        self.matlab_dict['qa'] = self.listobj2struct([meas_mat.qa], py_2_mat_dict)
+        self.matlab_dict['run_oursin'] = meas_mat.run_oursin
+        if meas_mat.oursin is not None:
+            self.matlab_dict['oursin'] = self.listobj2struct([meas_mat.oursin], py_2_mat_dict)
+
+
+    @staticmethod
+    def listobj2struct(list_in, new_key_dict=None):
+        """Converts a list of objects to a structured array.
+
+        Parameters
+        ----------
+        list_in: list
+            List of objects
+        new_key_dict: dict
+            Dictionary to translate python variable names to Matlab variable names
+
+        Returns
+        -------
+        struct: np.array
+            Structured array
+        """
+
+        # Verify that list_in exists
+        if list_in:
+
+            # Create data type for each variable in object
+            keys = list(vars(list_in[0]).keys())
+            data_type = []
+            for key in keys:
+                if new_key_dict is not None and key in new_key_dict:
+                    if new_key_dict[key] is None:
+                        data_type.append((np.nan, list))
+                    else:
+                        data_type.append((new_key_dict[key], list))
+                else:
+                    data_type.append((key, list))
+
+            # Create structured array based on data type and length of list
+            dt = np.dtype(data_type)
+            struct = np.zeros((len(list_in),), dt)
+
+            # Populate the structure with data from the objects
+            for n, item in enumerate(list_in):
+
+                if type(item) is list:
+                    # If item is a list apply recursion
+                    struct = Python2Matlab.listobj2struct(item, new_key_dict)
+                else:
+                    # If item is not a list convert it to a dictionary
+                    new_dict = Python2Matlab.obj2dict(item, new_key_dict)
+                    # Change name for consistency with Matlab is necessary
+                    for key in new_dict:
+                        if new_key_dict is not None and key in new_key_dict:
+                            struct[new_key_dict[key]][n] = new_dict[key]
+                        else:
+                            struct[key][n] = new_dict[key]
+        else:
+            struct = np.array([np.nan])
+
+        return struct
+
+    @staticmethod
+    def change_dict_keys(dict_in, new_key_dict):
+        """Recursively changes the name of dictionary keys and checks for str data types and converts them to arrays.
+
+        Parameters
+        ----------
+        dict_in: dict
+            Dictionary with keys that need a name change
+        new_key_dict: dict
+            Dictionary to cross reference existing key to new key names
+        """
+
+        dict_out = dict()
+
+        for key in dict_in:
+            # Iterate on nested dictionaries
+            if type(dict_in[key]) is dict:
+                dict_in[key] = Python2Matlab.change_dict_keys(dict_in[key], new_key_dict)
+
+            # If a list contains a str variable, such as messages, convert the string to an array
+            if type(dict_in[key]) is list:
+                for line in range(len(dict_in[key])):
+                    if type(line) == str:
+                        for col in range(len(dict_in[key][line])):
+                            if type(dict_in[key][line][col]) is str:
+                                dict_in[key][line][col] = np.array([list(dict_in[key][line][col])])
+
+            # Change key if needed
+            if new_key_dict is not None and key in new_key_dict:
+                dict_out[new_key_dict[key]] = dict_in[key]
+            else:
+                dict_out[key] = dict_in[key]
+
+        return dict_out
+
+    @staticmethod
+    def obj2dict(obj, new_key_dict=None):
+        """Converts object variables to dictionaries. Works recursively to all levels of objects.
+
+        Parameters
+        ----------
+        obj: object
+            Object of some class
+        new_key_dict: dict
+            Dictionary to translate python variable names to Matlab variable names
+
+        Returns
+        -------
+        obj_dict: dict
+            Dictionary of all object variables
+        """
+        obj_dict = vars(obj)
+        new_dict = dict()
+        for key in obj_dict:
+
+            # If variable is another object convert to dictionary recursively
+            if str(type(obj_dict[key]))[8:13] == 'Class':
+                obj_dict[key] = Python2Matlab.obj2dict(obj_dict[key], new_key_dict)
+
+            # If variable is a list of objects convert to dictionary
+            elif type(obj_dict[key]) is list and len(obj_dict[key]) > 0 \
+                    and str(type(obj_dict[key][0]))[8:13] == 'Class':
+                obj_dict[key] = Python2Matlab.listobj2struct(obj_dict[key], new_key_dict)
+
+            elif type(obj_dict[key]) is dict:
+                obj_dict[key] = Python2Matlab.change_dict_keys(obj_dict[key], new_key_dict)
+
+            elif type(obj_dict[key]) is pd.DataFrame:
+                obj_dict[key] = obj_dict[key].to_numpy()
+
+            # If variable is None rename as necessary and convert None to empty list
+            if obj_dict[key] is None:
+                if new_key_dict is not None and key in new_key_dict:
+                    new_dict[new_key_dict[key]] = []
+                else:
+                    new_dict[key] = []
+            # If variable is not None rename as necessary
+            elif new_key_dict is not None and key in new_key_dict:
+                new_dict[new_key_dict[key]] = obj_dict[key]
+            else:
+                new_dict[key] = obj_dict[key]
+
+        return new_dict
+
+    @staticmethod
+    def comment2struct(comments):
+        """Convert comments to a structure.
+
+        Parameters
+        ----------
+        comments: list
+            List of comments
+
+        Returns
+        -------
+        struct: np.ndarray
+            Array of comments
+
+        """
+        struct = np.zeros((len(comments),), dtype=np.object)
+        cell = np.zeros((1,), dtype=np.object)
+        for n, line in enumerate(comments):
+            cell[0] = line
+            struct[n] = np.copy(cell)
+        return struct
+
+    @staticmethod
+    def listobj2dict(list_in, new_key_dict=None):
+        """Converts list of objects to list of dictionaries. Works recursively to all levels of objects.
+
+        Parameters
+        ----------
+        list_in: list
+            List of objects of some class
+        new_key_dict: dict
+            Dictionary to translate python variable names to Matlab variable names
+
+        Returns
+        -------
+        new_list: list
+            List of dictionaries
+        """
+        new_list = []
+        for obj in list_in:
+            new_list.append(Python2Matlab.obj2dict(obj, new_key_dict))
+        return new_list
+
+    @staticmethod
+    def create_py_2_mat_dict():
+        """Creates a dictionary to cross reference Python names with Matlab names
+
+        Returns
+        -------
+        py_2_mat_dict: dict
+            Dictionary of python key to Matlab variable
+        """
+
+        py_2_mat_dict = {'Python': 'Matlab',
+                         'align_correction_deg': 'alignCorrection_deg',
+                         'altitude_ens_m': 'altitudeEns_m',
+                         'avg_method': 'avgMethod',
+                         'beam_angle_deg': 'beamAngle_deg',
+                         'beam_filter': 'beamFilter',
+                         'beam_pattern': 'beamPattern',
+                         'blanking_distance_m': 'blankingDistance_m',
+                         'boat_vel': 'boatVel',
+                         'bot_diff': 'botdiff',
+                         'bot_method': 'botMethod',
+                         'bot_method_auto': 'botMethodAuto',
+                         'bot_method_orig': 'botMethodOrig',
+                         'bot_r2': 'botrsqr',
+                         'bottom_ens': 'bottomEns',
+                         'bottom_mode': 'bottomMode',
+                         'bt_depths': 'btDepths',
+                         'bt_vel': 'btVel',
+                         'cell_depth_normalized': 'cellDepthNormalized',
+                         'cells_above_sl': 'cellsAboveSL',
+                         'cells_above_sl_bt': 'cellsAboveSLbt',
+                         'compass_cal': 'compassCal',
+                         'compass_diff_deg': 'compassDiff_deg',
+                         'compass_eval': 'compassEval',
+                         'configuration_commands': 'configurationCommands',
+                         'coord_sys': 'coordSys',
+                         'corr_table': 'corrTable',
+                         'correction_factor': 'correctionFactor',
+                         'cov_95': 'cov95',
+                         'cov_95_user': 'cov95User',
+                         'cust_coef': 'custCoef',
+                         'd_filter': 'dFilter',
+                         'd_filter_thresholds': 'dFilterThreshold',
+                         'data_extent': 'dataExtent',
+                         'data_orig': 'dataOrig',
+                         'data_type': 'dataType',
+                         'date_time': 'dateTime',
+                         'depth_beams_m': 'depthBeams_m',
+                         'depth_cell_depth_m': 'depthCellDepth_m',
+                         'depth_cell_depth_orig_m': 'depthCellDepthOrig_m',
+                         'depth_cell_size_m': 'depthCellSize_m',
+                         'depth_cell_size_orig_m': 'depthCellSizeOrig_m',
+                         'depth_depth_m': 'depthCellDepth_m',
+                         'depth_source_ens': 'depthSourceEns',
+                         'depth_freq_kHz': 'depthFreq_Hz',
+                         'depth_invalid_index': 'depthInvalidIndex',
+                         'depth_orig_m': 'depthOrig_m',
+                         'depth_processed_m': 'depthProcessed_m',
+                         'depth_source': 'depthSource',
+                         'depths': 'depths',
+                         'diff_qual_ens': 'diffQualEns',
+                         'dist_us_m': 'distUS_m',
+                         'distance_m': 'dist_m',
+                         'draft_orig_m': 'draftOrig_m',
+                         'draft_use_m': 'draftUse_m',
+                         'ds_depths': 'dsDepths',
+                         'edges_95': 'edges95',
+                         'edges_95_user': 'edges95User',
+                         'end_serial_time': 'endSerialTime',
+                         'ens_duration_sec': 'ensDuration_sec',
+                         'excluded_dist_m': 'excludedDist',
+                         'exp_method': 'expMethod',
+                         'exponent_95_ci': 'exponent95confint',
+                         'exponent_auto': 'exponentAuto',
+                         'exponent_orig': 'exponentOrig',
+                         'ext_gga_altitude_m': 'extGGAAltitude_m',
+                         'ext_gga_differential': 'extGGADifferential',
+                         'ext_gga_hdop': 'extGGAHDOP',
+                         'ext_gga_lat_deg': 'extGGALat_deg',
+                         'ext_gga_lon_deg': 'extGGALon_deg',
+                         'ext_gga_num_sats': 'extGGANumSats',
+                         'ext_gga_serial_time': 'extGGASerialTime',
+                         'ext_gga_utc': 'extGGAUTC',
+                         'ext_temp_chk': 'extTempChk',
+                         'ext_vtg_course_deg': 'extVTGCourse_deg',
+                         'ext_vtg_speed_mps': 'extVTGSpeed_mps',
+                         'extrap_fit': 'extrapFit',
+                         'extrapolation_95': 'extrapolation95',
+                         'extrapolation_95_user': 'extrapolation95User',
+                         'file_name': 'fileName',
+                         'filter_type': 'filterType',
+                         'fit_method': 'fitMethod',
+                         'fit_r2': 'fitrsqr',
+                         'flow_dir_deg': 'flowDir_deg',
+                         'flow_dir': 'flowDir_deg',
+                         'flow_spd_mps': 'flowSpd_mps',
+                         'frequency_khz': 'frequency_hz',
+                         'gga_lat_ens_deg': 'ggaLatEns_deg',
+                         'gga_lon_ens_deg': 'ggaLonEns_deg',
+                         'gga_position_method': 'ggaPositionMethod',
+                         'gga_serial_time_ens': 'ggaSerialTimeEns',
+                         'gga_vel': 'ggaVel',
+                         'gga_velocity_ens_mps': 'ggaVelocityEns_mps',
+                         'gga_velocity_method': 'ggaVelocityMethod',
+                         'gps_HDOP_filter': 'gpsHDOPFilter',
+                         'gps_HDOP_filter_change': 'gpsHDOPFilterChange',
+                         'gps_HDOP_filter_max': 'gpsHDOPFilterMax',
+                         'gps_altitude_filter': 'gpsAltitudeFilter',
+                         'gps_altitude_filter_change': 'gpsAltitudeFilterChange',
+                         'gps_diff_qual_filter': 'gpsDiffQualFilter',
+                         'hard_limit': 'hardLimit',
+                         'hdop_ens': 'hdopEns',
+                         'high_narrow': 'hn',
+                         'high_wide': 'hw',
+                         'in_transect_idx': 'inTransectIdx',
+                         'initial_settings': 'initialSettings',
+                         'int_cells': 'intCells',
+                         'int_ens': 'intEns',
+                         'interp_type': 'interpType',
+                         'interpolate_cells': 'interpolateCells',
+                         'interpolate_ens': 'interpolateEns',
+                         'invalid_95': 'invalid95',
+                         'invalid_index': 'invalidIndex',
+                         'invalid_95_user': 'invalid95User',
+                         'left_idx': 'leftidx',
+                         'low_narrow': 'ln',
+                         'low_wide': 'lw',
+                         'mag_error': 'magError',
+                         'mag_var_orig_deg': 'magVarOrig_deg',
+                         'mag_var_deg': 'magVar_deg',
+                         'man_bot': 'manBot',
+                         'man_exp': 'manExp',
+                         'man_top': 'manTop',
+                         'mb_dir': 'mbDir_deg',
+                         'mb_spd_mps': 'mbSpd_mps',
+                         'mb_tests': 'mbTests',
+                         'meas': 'meas_struct',
+                         'middle_cells': 'middleCells',
+                         'middle_ens': 'middleEns',
+                         'moving_bed': 'movingBed',
+                         'moving_bed_95': 'movingBed95',
+                         'moving_bed_95_user': 'movingBed95User',
+                         'n_failed': 'nFailed',
+                         'n_tests': 'nTests',
+                         'nav_ref': 'navRef',
+                         'near_bed_speed_mps': 'nearBedSpeed_mps',
+                         'noise_floor': 'noiseFloor',
+                         'norm_data': 'normData',
+                         'ns_exp': 'nsExponent',
+                         'ns_exponent': 'nsexponent',
+                         'num_invalid': 'numInvalid',
+                         'num_sats_ens': 'numSatsEns',
+                         'number_ensembles': 'numEns2Avg',
+                         'orig_coord_sys': 'origCoordSys',
+                         'orig_ref': 'origNavRef',
+                         'orig_nav_ref': 'origNavRef',
+                         'orig_sys': 'origCoordSys',
+                         'original_data': 'originalData',
+                         'per_good_ens': 'perGoodEns',
+                         'percent_invalid_bt': 'percentInvalidBT',
+                         'percent_mb': 'percentMB',
+                         'pitch_limit': 'pitchLimit',
+                         'pp_exp': 'ppExponent',
+                         'pp_exponent': 'ppexponent',
+                         'processed_source': 'processedSource',
+                         'q_cns_mean': 'qCNSmean',
+                         'q_cns_opt_mean': 'qCNSoptmean',
+                         'q_cns_opt_per_diff': 'qCNSoptperdiff',
+                         'q_cns_per_diff': 'qCNSperdiff',
+                         'q_man_mean': 'qManmean',
+                         'q_man_per_diff': 'qManperdiff',
+                         'q_3p_ns_mean': 'q3pNSmean',
+                         'q_3p_ns_opt_mean': 'q3pNSoptmean',
+                         'q_3p_ns_opt_per_diff': 'q3pNSoptperdiff',
+                         'q_3p_ns_per_diff': 'q3pNSperdiff',
+                         'q_pp_mean': 'qPPmean',
+                         'q_pp_opt_mean': 'qPPoptmean',
+                         'q_pp_opt_per_diff': 'qPPoptperdiff',
+                         'q_pp_per_diff': 'qPPperdiff',
+                         'q_run_threshold_caution': 'qRunThresholdCaution',
+                         'q_run_threshold_warning': 'qRunThresholdWarning',
+                         'q_sensitivity': 'qSensitivity',
+                         'q_total_threshold_caution': 'qTotalThresholdWarning',
+                         'q_total_threshold_warning': 'qTotalThresholdCaution',
+                         'raw_gga_altitude_m': 'rawGGAAltitude_m',
+                         'raw_gga_delta_time': 'rawGGADeltaTime',
+                         'raw_gga_differential': 'rawGGADifferential',
+                         'raw_gga_hdop': 'rawGGAHDOP',
+                         'raw_gga_lat_deg': 'rawGGALat_deg',
+                         'raw_gga_lon_deg': 'rawGGALon_deg',
+                         'raw_gga_serial_time': 'rawGGASerialTime',
+                         'raw_gga_utc': 'rawGGAUTC',
+                         'raw_gga_num_sats': 'rawGGANumSats',
+                         'raw_vel_mps': 'rawVel_mps',
+                         'raw_vtg_course_deg': 'rawVTGCourse_deg',
+                         'raw_vtg_delta_time': 'rawVTGDeltaTime',
+                         'raw_vtg_mode_indicator': 'rawVTGModeIndicator',
+                         'raw_vtg_speed_mps': 'rawVTGSpeed_mps',
+                         'rec_edge_method': 'recEdgeMethod',
+                         'right_idx': 'rightidx',
+                         'roll_limit': 'rollLimit',
+                         'rssi_units': 'rssiUnits',
+                         'sel_fit': 'selFit',
+                         'serial_num': 'serialNum',
+                         'sl_lag_effect_m': 'slLagEffect_m',
+                         'sl_cutoff_number': 'slCutoffNum',
+                         'sl_cutoff_percent': 'slCutoffPer',
+                         'sl_cutoff_type': 'slCutoffType',
+                         'sl_cutoff_m': 'slCutoff_m',
+                         'smooth_depth': 'smoothDepth',
+                         'smooth_filter': 'smoothFilter',
+                         'smooth_lower_limit': 'smoothLowerLimit',
+                         'smooth_speed': 'smoothSpeed',
+                         'smooth_upper_limit': 'smoothUpperLimit',
+                         'snr_filter': 'snrFilter',
+                         'speed_of_sound_mps': 'speedOfSound_mps',
+                         'snr_rng': 'snrRng',
+                         'start_edge': 'startEdge',
+                         'start_serial_time': 'startSerialTime',
+                         'station_name': 'stationName',
+                         'station_number': 'stationNumber',
+                         'stationary_cs_track': 'stationaryCSTrack',
+                         'stationary_mb_vel': 'stationaryMBVel',
+                         'stationary_us_track': 'stationaryUSTrack',
+                         'system_test': 'sysTest',
+                         'system_tst': 'systemTest',
+                         'systematic_user': 'systematicUser',
+                         't_matrix': 'tMatrix',
+                         'temperature': 'temperature',
+                         'temperature_deg_c': 'temperature_degC',
+                         'test_quality': 'testQuality',
+                         'time_stamp': 'timeStamp',
+                         'top_ens': 'topEns',
+                         'top_fit_r2': 'topfitr2',
+                         'top_max_diff': 'topmaxdiff',
+                         'top_method': 'topMethod',
+                         'top_method_auto': 'topMethodAuto',
+                         'top_method_orig': 'topMethodOrig',
+                         'top_r2': 'topr2',
+                         'total_95': 'total95',
+                         'total_uncorrected': 'totalUncorrected',
+                         'total_95_user': 'total95User',
+                         'transect_duration_sec': 'transectDuration_sec',
+                         'u_auto': 'uAuto',
+                         'u_processed_mps': 'uProcessed_mps',
+                         'u_earth_no_ref_mps': 'uEarthNoRef_mps',
+                         'unit_normalized_z': 'unitNormalizedz',
+                         'unit_normalized': 'unitNormalized',
+                         'unit_normalized_25': 'unitNormalized25',
+                         'unit_normalized_75': 'unitNormalized75',
+                         'unit_normalized_med': 'unitNormalizedMed',
+                         'unit_normalized_no': 'unitNormalizedNo',
+                         'use_2_correct': 'use2Correct',
+                         'user_discharge_cms': 'userQ_cms',
+                         'user_rating': 'userRating',
+                         'user_valid': 'userValid',
+                         'utm_ens_m': 'UTMEns_m',
+                         'v_processed_mps': 'vProcessed_mps',
+                         'v_earth_no_ref_mps': 'vEarthNoRef_mps',
+                         'valid_beams': 'validBeams',
+                         'valid_data': 'validData',
+                         'valid_data_method': 'validDataMethod',
+                         'vb_depths': 'vbDepths',
+                         'vel_method': 'velMethod',
+                         'vtg_vel': 'vtgVel',
+                         'vtg_velocity_ens_mps': 'vtgVelocityEns_mps',
+                         'vtg_velocity_method': 'vtgVelocityMethod',
+                         'w_filter': 'wFilter',
+                         'w_filter_thresholds': 'wFilterThreshold',
+                         'w_vel': 'wVel',
+                         'water_mode': 'waterMode',
+                         'wt_depth_filter': 'wtDepthFilter',
+                         'z_auto': 'zAuto',
+                         'all_invalid': 'allInvalid',
+                         'q_max_run': 'qMaxRun',
+                         'q_max_run_caution': 'qRunCaution',
+                         'q_max_run_warning': 'qRunWarning',
+                         'q_total': 'qTotal',
+                         'q_total_caution': 'qTotalCaution',
+                         'q_total_warning': 'qTotalWarning',
+                         'sta_name': 'staName',
+                         'sta_number': 'staNumber',
+                         'left_q': 'leftQ',
+                         'left_q_idx': 'leftQIdx',
+                         'right_q': 'rightQ',
+                         'right_q_idx': 'rightQIdx',
+                         'left_sign': 'leftSign',
+                         'right_sign': 'rightSign',
+                         'right_dist_moved_idx': 'rightDistMovedIdx',
+                         'left_dist_moved_idx': 'leftDistMovedIdx',
+                         'left_zero': 'leftzero',
+                         'left_zero_idx': 'leftZeroIdx',
+                         'right_zero': 'rightzero',
+                         'right_zero_idx': 'rightZeroIdx',
+                         'left_type': 'leftType',
+                         'right_type': 'rightType',
+                         'pitch_mean_warning_idx': 'pitchMeanWarningIdx',
+                         'pitch_mean_caution_idx': 'pitchMeanCautionIdx',
+                         'pitch_std_caution_idx': 'pitchStdCautionIdx',
+                         'roll_mean_warning_idx': 'rollMeanWarningIdx',
+                         'roll_mean_caution_idx': 'rollMeanCautionIdx',
+                         'roll_std_caution_idx': 'rollStdCautionIdx',
+                         'magvar_idx': 'magvarIdx',
+                         'mag_error_idx': 'magErrorIdx',
+                         'invalid_transect_left_idx': 'invalidTransLeftIdx',
+                         'invalid_transect_right_idx': 'invalidTransRightIdx',
+                         }
+        return py_2_mat_dict
+
+    @staticmethod
+    def save_matlab_file(meas, file_name, version, checked=None):
+        """Saves the measurement class and all data into a Matlab file using the variable names and structure
+        from the QRev Matlab version.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        file_name: str
+            File name of saved Matlab file
+        version: str
+            QRev version
+        checked: list
+            Identifies which transects should be saved.
+        """
+
+        if checked is None:
+            checked = list(range(len(meas.transects)))
+
+        # Convert Python objects to Matlab structure
+        mat_struct = {'meas_struct': Python2Matlab(meas, checked).matlab_dict, 'version': version}
+        sio.savemat(file_name=file_name,
+                    mdict=mat_struct,
+                    appendmat=True,
+                    format='5',
+                    long_field_names=True,
+                    do_compression=True,
+                    oned_as='row')
+
+    @staticmethod
+    def data2matlab(meas):
+        """Apply changes to the Python data to replicate QRev for Matlab conventions.
+
+        Parameters
+        ----------
+        meas: Measurement
+            object of class Measurement
+
+        Returns
+        -------
+        meas_mat: Measurement
+            Deepcopy of meas with changes to replicate QRev for Matlab conventions
+        """
+
+        # Make copy to prevent changing Python meas data
+        meas_mat = copy.deepcopy(meas)
+
+        # Process changes for each transect
+        for transect in meas_mat.transects:
+            transect = Python2Matlab.reconfigure_transect(transect)
+
+        # Process changes for each moving-bed test transect
+        if len(meas.mb_tests) > 0:
+            for test in meas_mat.mb_tests:
+                test.transect = Python2Matlab.reconfigure_transect(test.transect)
+
+        # Adjust 1-D array to be row based
+        for fit in meas_mat.extrap_fit.sel_fit:
+            if fit.u is None:
+                fit.u = np.nan
+                fit.z = np.nan
+            else:
+                fit.u = fit.u.reshape(-1, 1)
+                fit.u_auto = fit.u_auto.reshape(-1, 1)
+                fit.z = fit.z.reshape(-1, 1)
+                fit.z_auto = fit.z_auto.reshape(-1, 1)
+
+        # Adjust norm_data indices from 0 base to 1 base
+        for dat in meas_mat.extrap_fit.norm_data:
+            dat.valid_data = dat.valid_data + 1
+
+        # If system tests, compass calibrations, or compass evaluations don't exist create empty objects
+        if len(meas_mat.system_tst) == 0:
+            meas_mat.system_tst = [PreMeasurement()]
+        if len(meas_mat.compass_eval) == 0:
+            meas_mat.compass_eval = [PreMeasurement()]
+        if len(meas_mat.compass_cal) == 0:
+            meas_mat.compass_cal = [PreMeasurement()]
+
+        # If only one moving-bed test change from list to MovingBedTest object
+        if len(meas_mat.mb_tests) == 1:
+            meas_mat.mb_tests = meas_mat.mb_tests[0]
+            # Convert message to cell array for Matlab
+            if len(meas_mat.mb_tests.messages) > 0:
+                meas_mat.mb_tests.messages = np.array(meas_mat.mb_tests.messages).astype(np.object)
+
+        # Fix user and adcp temperature for QRev Matlab
+        if np.isnan(meas_mat.ext_temp_chk['user']):
+            meas_mat.ext_temp_chk['user'] = ''
+        if np.isnan(meas_mat.ext_temp_chk['adcp']):
+            meas_mat.ext_temp_chk['adcp'] = ''
+
+        return meas_mat
+
+    @staticmethod
+    def reconfigure_transect(transect):
+        """Changes variable names, rearranges arrays, and adjusts time for consistency with original QRev Matlab output.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+
+        Returns
+        -------
+        transect: TransectData
+            Revised object of TransectData
+        """
+
+        # Change selected boat velocity identification
+        if transect.boat_vel.selected == 'bt_vel':
+            transect.boat_vel.selected = 'btVel'
+        elif transect.boat_vel.selected == 'gga_vel':
+            transect.boat_vel.selected = 'ggaVel'
+        elif transect.boat_vel.selected == 'vtg_vel':
+            transect.boat_vel.selected = 'vtgVel'
+
+        # Change selected depth identification
+        if transect.depths.selected == 'bt_depths':
+            transect.depths.selected = 'btDepths'
+        elif transect.depths.selected == 'vb_depths':
+            transect.depths.selected = 'vbDepths'
+        elif transect.depths.selected == 'ds_depths':
+            transect.depths.selected = 'dsDepths'
+
+        # Adjust in transect number for 1 base rather than 0 base
+        transect.in_transect_idx = transect.in_transect_idx + 1
+
+        # Adjust arrangement of 3-D arrays for consistency with Matlab
+        transect.w_vel.raw_vel_mps = np.moveaxis(transect.w_vel.raw_vel_mps, 0, 2)
+        transect.w_vel.corr = np.moveaxis(transect.w_vel.corr, 0, 2)
+        transect.w_vel.rssi = np.moveaxis(transect.w_vel.rssi, 0, 2)
+        transect.w_vel.valid_data = np.moveaxis(transect.w_vel.valid_data, 0, 2)
+        if len(transect.adcp.t_matrix.matrix.shape) == 3:
+            transect.adcp.t_matrix.matrix = np.moveaxis(transect.adcp.t_matrix.matrix, 2, 0)
+
+        # Adjust 2-D array to be row based
+        if transect.adcp.configuration_commands is not None:
+            transect.adcp.configuration_commands = transect.adcp.configuration_commands.reshape(-1, 1)
+
+        # Adjust serial time to Matlab convention
+        seconds_day = 86400
+        time_correction = 719529.0000000003
+        transect.date_time.start_serial_time = (transect.date_time.start_serial_time / seconds_day) \
+            + time_correction
+        transect.date_time.end_serial_time = (transect.date_time.end_serial_time / seconds_day) + time_correction
+        return transect
diff --git a/Classes/QAData.py b/Classes/QAData.py
new file mode 100644
index 0000000000000000000000000000000000000000..05c49d727c4b921d6baad0ef36f844c87351cf29
--- /dev/null
+++ b/Classes/QAData.py
@@ -0,0 +1,2525 @@
+import copy
+import numpy as np
+from Classes.Uncertainty import Uncertainty
+from Classes.QComp import QComp
+from Classes.MovingBedTests import MovingBedTests
+from Classes.TransectData import TransectData
+
+
+class QAData(object):
+    """Evaluates and stores quality assurance characteristics and messages.
+
+    Attributes
+    ----------
+    q_run_threshold_caution: int
+        Caution threshold for interpolated discharge for a run of invalid ensembles, in percent.
+    q_run_threshold_warning: int
+        Warning threshold for interpolated discharge for a run of invalid ensembles, in percent.
+    q_total_threshold_caution: int
+        Caution threshold for total interpolated discharge for invalid ensembles, in percent.
+    q_total_threshold_warning: int
+        Warning threshold for total interpolated discharge for invalid ensembles, in percent.
+    transects: dict
+        Dictionary of quality assurance checks for transects
+    system_tst: dict
+        Dictionary of quality assurance checks on the system test(s)
+    compass: dict
+        Dictionary of quality assurance checks on compass calibration and evaluations
+    temperature: dict
+        Dictionary of quality assurance checks on temperature comparions and variation
+    movingbed: dict
+        Dictionary of quality assurance checks on moving-bed tests
+    user: dict
+        Dictionary of quality assurance checks on user input data
+    boat: dict
+        Dictionary of quality assurance checks on boat velocities
+    bt_vel: dict
+        Dictionary of quality assurance checks on bottom track velocities
+    gga_vel: dict
+        Dictionary of quality assurance checks on gga boat velocities
+    vtg_vel: dict
+        Dictionary of quality assurance checks on vtg boat velocities
+    w_vel: dict
+        Dictionary of quality assurance checks on water track velocities
+    extrapolation: dict
+        Dictionary of quality assurance checks on extrapolations
+    edges: dict
+        Dictionary of quality assurance checks on edges
+    """
+
+    def __init__(self, meas, mat_struct=None, compute=True):
+        """Checks the measurement for all quality assurance issues.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        # Set default thresholds
+        self.q_run_threshold_caution = 3
+        self.q_run_threshold_warning = 5
+        self.q_total_threshold_caution = 10
+        self.q_total_threshold_warning = 25
+
+        # Initialize instance variables
+        self.transects = dict()
+        self.system_tst = dict()
+        self.compass = dict()
+        self.temperature = dict()
+        self.movingbed = dict()
+        self.user = dict()
+        self.depths = dict()
+        self.boat = dict()
+        self.bt_vel = dict()
+        self.gga_vel = dict()
+        self.vtg_vel = dict()
+        self.w_vel = dict()
+        self.extrapolation = dict()
+        self.edges = dict()
+        self.settings_dict = dict()
+        self.settings_dict['tab_compass'] = 'Default'
+        self.settings_dict['tab_tempsal'] = 'Default'
+        self.settings_dict['tab_mbt'] = 'Default'
+        self.settings_dict['tab_bt'] = 'Default'
+        self.settings_dict['tab_gps'] = 'Default'
+        self.settings_dict['tab_depth'] = 'Default'
+        self.settings_dict['tab_wt'] = 'Default'
+        self.settings_dict['tab_extrap'] = 'Default'
+        self.settings_dict['tab_edges'] = 'Default'
+
+        if compute:
+            # Apply QA checks
+            self.transects_qa(meas)
+            self.system_tst_qa(meas)
+            self.compass_qa(meas)
+            self.temperature_qa(meas)
+            self.moving_bed_qa(meas)
+            self.user_qa(meas)
+            self.depths_qa(meas)
+            self.boat_qa(meas)
+            self.water_qa(meas)
+            self.extrapolation_qa(meas)
+            self.edges_qa(meas)
+            self.check_bt_setting(meas)
+            self.check_wt_settings(meas)
+            self.check_depth_settings(meas)
+            self.check_gps_settings(meas)
+            self.check_edge_settings(meas)
+            self.check_extrap_settings(meas)
+            self.check_tempsal_settings(meas)
+            self.check_mbt_settings(meas)
+            self.check_compass_settings(meas)
+            if meas.oursin is not None:
+                self.check_oursin(meas)
+        else:
+            self.populate_from_qrev_mat(meas, mat_struct)
+
+    def populate_from_qrev_mat(self, meas, meas_struct):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of Measurement
+        meas_struct: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+
+        # Generate a new QA object using the measurement data and the current QA code.
+        # When QA checks from the current QA are not available from old QRev files, these
+        # checks will be included to supplement the old QRev file data.
+        new_qa = QAData(meas)
+        if hasattr(meas_struct, 'qa'):
+            # Set default thresholds
+            self.q_run_threshold_caution = meas_struct.qa.qRunThresholdCaution
+            self.q_run_threshold_warning = meas_struct.qa.qRunThresholdWarning
+            if hasattr(meas_struct.qa, 'qTotalThresholdCaution'):
+                self.q_total_threshold_caution = meas_struct.qa.qTotalThresholdCaution
+            else:
+                self.q_total_threshold_caution = 10
+            self.q_total_threshold_warning = meas_struct.qa.qTotalThresholdWarning
+
+            # Initialize instance variables
+            self.transects = dict()
+            self.transects['duration'] = meas_struct.qa.transects.duration
+            self.transects['messages'] = self.make_list(meas_struct.qa.transects.messages)
+            self.transects['number'] = meas_struct.qa.transects.number
+            self.transects['recip'] = meas_struct.qa.transects.recip
+            self.transects['sign'] = meas_struct.qa.transects.sign
+            self.transects['status'] = meas_struct.qa.transects.status
+            self.transects['uncertainty'] = meas_struct.qa.transects.uncertainty
+            self.system_tst = dict()
+            self.system_tst['messages'] = self.make_list(meas_struct.qa.systemTest.messages)
+            self.system_tst['status'] = meas_struct.qa.systemTest.status
+            self.compass = dict()
+            self.compass['messages'] = self.make_list(meas_struct.qa.compass.messages)
+            self.compass['status'] = meas_struct.qa.compass.status
+            if hasattr(meas_struct.qa.compass, 'status1'):
+                self.compass['status1'] = meas_struct.qa.compass.status1
+                self.compass['status2'] = meas_struct.qa.compass.status2
+            else:
+                self.compass['status1'] = 'good'
+                self.compass['status2'] = 'good'
+
+            # If QA check not available, get check from new QA
+            if hasattr(meas_struct.qa.compass, 'magvar'):
+                self.compass['magvar'] = meas_struct.qa.compass.magvar
+            else:
+                self.compass['magvar'] = new_qa.compass['magvar']
+                self.compass['status'] = new_qa.compass['status']
+
+            # If QA check not available, get check from new QA
+            if hasattr(meas_struct.qa.compass, 'magvarIdx'):
+                self.compass['magvar_idx'] = self.make_array(meas_struct.qa.compass.magvarIdx)
+            else:
+                self.compass['magvar_idx'] = new_qa.compass['magvar_idx']
+                self.compass['status'] = new_qa.compass['status']
+
+            # Changed mag_error_idx from bool to int array in QRevPy
+            self.compass['mag_error_idx'] = new_qa.compass['mag_error_idx']
+            self.compass['status'] = new_qa.compass['status']
+
+            # If QA check not available, get check from new QA
+            if hasattr(meas_struct.qa.compass, 'pitchMeanWarningIdx'):
+                self.compass['pitch_mean_warning_idx'] = self.make_array(meas_struct.qa.compass.pitchMeanWarningIdx)
+            else:
+                self.compass['pitch_mean_warning_idx'] = new_qa.compass['pitch_mean_warning_idx']
+                self.compass['status'] = new_qa.compass['status']
+
+            # If QA check not available, get check from new QA
+            if hasattr(meas_struct.qa.compass, 'rollMeanWarningIdx'):
+                self.compass['roll_mean_warning_idx'] = self.make_array(meas_struct.qa.compass.rollMeanWarningIdx)
+            else:
+                self.compass['roll_mean_warning_idx'] = new_qa.compass['roll_mean_warning_idx']
+                self.compass['status'] = new_qa.compass['status']
+
+            # If QA check not available, get check from new QA
+            if hasattr(meas_struct.qa.compass, 'pitchMeanCautionIdx'):
+                self.compass['pitch_mean_caution_idx'] = self.make_array(meas_struct.qa.compass.pitchMeanCautionIdx)
+            else:
+                self.compass['pitch_mean_caution_idx'] = new_qa.compass['pitch_mean_caution_idx']
+                self.compass['status'] = new_qa.compass['status']
+
+            # If QA check not available, get check from new QA
+            if hasattr(meas_struct.qa.compass, 'rollMeanCautionIdx'):
+                self.compass['roll_mean_caution_idx'] = self.make_array(meas_struct.qa.compass.rollMeanCautionIdx)
+            else:
+                self.compass['roll_mean_caution_idx'] = new_qa.compass['roll_mean_caution_idx']
+                self.compass['status'] = new_qa.compass['status']
+
+            # If QA check not available, get check from new QA
+            if hasattr(meas_struct.qa.compass, 'pitchStdCautionIdx'):
+                self.compass['pitch_std_caution_idx'] = self.make_array(meas_struct.qa.compass.pitchStdCautionIdx)
+            else:
+                self.compass['pitch_std_caution_idx'] = new_qa.compass['pitch_std_caution_idx']
+                self.compass['status'] = new_qa.compass['status']
+
+            # If QA check not available, get check from new QA
+            if hasattr(meas_struct.qa.compass, 'rollStdCautionIdx'):
+                self.compass['roll_std_caution_idx'] = self.make_array(meas_struct.qa.compass.rollStdCautionIdx)
+            else:
+                self.compass['roll_std_caution_idx'] = new_qa.compass['roll_std_caution_idx']
+                self.compass['status'] = new_qa.compass['status']
+
+            self.temperature = dict()
+            self.temperature['messages'] = self.make_list(meas_struct.qa.temperature.messages)
+            self.temperature['status'] = meas_struct.qa.temperature.status
+            self.movingbed = dict()
+            self.movingbed['messages'] = self.make_list(meas_struct.qa.movingbed.messages)
+            self.movingbed['status'] = meas_struct.qa.movingbed.status
+            self.movingbed['code'] = meas_struct.qa.movingbed.code
+            self.user = dict()
+            self.user['messages'] = self.make_list(meas_struct.qa.user.messages)
+            self.user['sta_name'] = bool(meas_struct.qa.user.staName)
+            self.user['sta_number'] = bool(meas_struct.qa.user.staNumber)
+            self.user['status'] = meas_struct.qa.user.status
+
+            # If QA check not available, get check from new QA
+            self.depths = self.create_qa_dict(self, meas_struct.qa.depths)
+            if 'draft' not in self.depths:
+                self.depths['draft'] = new_qa.depths['draft']
+                self.depths['status'] = new_qa.depths['status']
+
+            if 'all_invalid' not in self.depths:
+                self.depths['all_invalid'] = new_qa.depths['all_invalid']
+                self.depths['status'] = new_qa.depths['status']
+
+            # If QA check not available, get check from new QA
+            self.bt_vel = self.create_qa_dict(self, meas_struct.qa.btVel, ndim=2)
+            if 'all_invalid' not in self.bt_vel:
+                self.bt_vel['all_invalid'] = new_qa.bt_vel['all_invalid']
+                self.bt_vel['status'] = new_qa.bt_vel['status']
+
+            # If QA check not available, get check from new QA
+            self.gga_vel = self.create_qa_dict(self, meas_struct.qa.ggaVel, ndim=2)
+            if 'all_invalid' not in self.gga_vel:
+                self.gga_vel['all_invalid'] = new_qa.gga_vel['all_invalid']
+            if 'lag_status' not in self.gga_vel:
+                self.gga_vel['lag_status'] = new_qa.gga_vel['lag_status']
+                self.gga_vel['status'] = new_qa.gga_vel['status']
+
+            # If QA check not available, get check from new QA
+            self.vtg_vel = self.create_qa_dict(self, meas_struct.qa.vtgVel, ndim=2)
+            if 'all_invalid' not in self.vtg_vel:
+                self.vtg_vel['all_invalid'] = new_qa.vtg_vel['all_invalid']
+            if 'lag_status' not in self.vtg_vel:
+                self.vtg_vel['lag_status'] = new_qa.vtg_vel['lag_status']
+                self.vtg_vel['status'] = new_qa.vtg_vel['status']
+
+            # If QA check not available, get check from new QA
+            self.w_vel = self.create_qa_dict(self, meas_struct.qa.wVel, ndim=2)
+            if 'all_invalid' not in self.w_vel:
+                self.w_vel['all_invalid'] = new_qa.w_vel['all_invalid']
+                self.w_vel['status'] = new_qa.w_vel['status']
+
+            self.extrapolation = dict()
+            self.extrapolation['messages'] = self.make_list(meas_struct.qa.extrapolation.messages)
+            self.extrapolation['status'] = meas_struct.qa.extrapolation.status
+            self.edges = dict()
+            self.edges['messages'] = self.make_list(meas_struct.qa.edges.messages)
+            self.edges['status'] = meas_struct.qa.edges.status
+            self.edges['left_q'] = meas_struct.qa.edges.leftQ
+            self.edges['right_q'] = meas_struct.qa.edges.rightQ
+            self.edges['left_sign'] = meas_struct.qa.edges.leftSign
+            self.edges['right_sign'] = meas_struct.qa.edges.rightSign
+            self.edges['left_zero'] = meas_struct.qa.edges.leftzero
+            self.edges['right_zero'] = meas_struct.qa.edges.rightzero
+            self.edges['left_type'] = meas_struct.qa.edges.leftType
+            self.edges['right_type'] = meas_struct.qa.edges.rightType
+
+            # If QA check not available, get check from new QA
+            if hasattr(meas_struct.qa.edges, 'rightDistMovedIdx'):
+                self.edges['right_dist_moved_idx'] = self.make_array(meas_struct.qa.edges.rightDistMovedIdx)
+            else:
+                self.edges['right_dist_moved_idx'] = new_qa.edges['right_dist_moved_idx']
+                self.edges['status'] = new_qa.edges['status']
+
+            # If QA check not available, get check from new QA
+            if hasattr(meas_struct.qa.edges, 'leftDistMovedIdx'):
+                self.edges['left_dist_moved_idx'] = self.make_array(meas_struct.qa.edges.leftDistMovedIdx)
+            else:
+                self.edges['left_dist_moved_idx'] = new_qa.edges['left_dist_moved_idx']
+                self.edges['status'] = new_qa.edges['status']
+
+            # If QA check not available, get check from new QA
+            if hasattr(meas_struct.qa.edges, 'leftQIdx'):
+                self.edges['left_q_idx'] = self.make_array(meas_struct.qa.edges.leftQIdx)
+            else:
+                self.edges['left_q_idx'] = new_qa.edges['left_q_idx']
+                self.edges['status'] = new_qa.edges['status']
+
+            # If QA check not available, get check from new QA
+            if hasattr(meas_struct.qa.edges, 'rightQIdx'):
+                self.edges['right_q_idx'] = self.make_array(meas_struct.qa.edges.rightQIdx)
+            else:
+                self.edges['right_q_idx'] = new_qa.edges['right_q_idx']
+                self.edges['status'] = new_qa.edges['status']
+
+            # If QA check not available, get check from new QA
+            if hasattr(meas_struct.qa.edges, 'leftZeroIdx'):
+                self.edges['left_zero_idx'] = self.make_array(meas_struct.qa.edges.leftZeroIdx)
+            else:
+                self.edges['left_zero_idx'] = new_qa.edges['left_zero_idx']
+                self.edges['status'] = new_qa.edges['status']
+
+            # If QA check not available, get check from new QA
+            if hasattr(meas_struct.qa.edges, 'rightZeroIdx'):
+                self.edges['right_zero_idx'] = self.make_array(meas_struct.qa.edges.rightZeroIdx)
+            else:
+                self.edges['right_zero_idx'] = new_qa.edges['right_zero_idx']
+                self.edges['status'] = new_qa.edges['status']
+
+            # If QA check not available, get check from new QA
+            if hasattr(meas_struct.qa.edges, 'invalid_transect_left_idx'):
+                self.edges['invalid_transect_left_idx'] = \
+                    self.make_array(meas_struct.qa.edges.invalid_transect_left_idx)
+            elif hasattr(meas_struct.qa.edges, 'invalidTransLeftIdx'):
+                self.edges['invalid_transect_left_idx'] = \
+                    self.make_array(meas_struct.qa.edges.invalidTransLeftIdx)
+            else:
+                self.edges['invalid_transect_left_idx'] = new_qa.edges['invalid_transect_left_idx']
+                self.edges['status'] = new_qa.edges['status']
+
+            # If QA check not available, get check from new QA
+            if hasattr(meas_struct.qa.edges, 'invalid_transect_right_idx'):
+                self.edges['invalid_transect_right_idx'] = \
+                    self.make_array(meas_struct.qa.edges.invalid_transect_right_idx)
+            elif hasattr(meas_struct.qa, 'invalidTransRightIdx'):
+                self.edges['invalid_transect_right_idx'] = \
+                    self.make_array(meas_struct.qa.edges.invalidTransRightIdx)
+            else:
+                self.edges['invalid_transect_right_idx'] = new_qa.edges['invalid_transect_right_idx']
+                self.edges['status'] = new_qa.edges['status']
+
+            if hasattr(meas_struct.qa, 'settings_dict'):
+                self.settings_dict = dict()
+                try:
+                    self.settings_dict['tab_compass'] = \
+                        meas_struct.qa.settings_dict.tab_compass
+                except AttributeError:
+                    self.settings_dict['tab_compass'] = \
+                        new_qa.settings_dict['tab_compass']
+
+                try:
+                    self.settings_dict['tab_tempsal'] = \
+                        meas_struct.qa.settings_dict.tab_tempsal
+                except AttributeError:
+                    self.settings_dict['tab_tempsal'] = \
+                        new_qa.settings_dict['tab_tempsal']
+
+                try:
+                    self.settings_dict['tab_mbt'] = \
+                        meas_struct.qa.settings_dict.tab_mbt
+                except AttributeError:
+                    self.settings_dict['tab_mbt'] = \
+                        new_qa.settings_dict['tab_mbt']
+
+                try:
+                    self.settings_dict['tab_bt'] = \
+                        meas_struct.qa.settings_dict.tab_bt
+                except AttributeError:
+                    self.settings_dict['tab_bt'] = \
+                        new_qa.settings_dict['tab_bt']
+
+                try:
+                    self.settings_dict['tab_gps'] = \
+                        meas_struct.qa.settings_dict.tab_gps
+                except AttributeError:
+                    self.settings_dict['tab_gps'] = \
+                        new_qa.settings_dict['tab_gps']
+
+                try:
+                    self.settings_dict['tab_depth'] = \
+                        meas_struct.qa.settings_dict.tab_depth
+                except AttributeError:
+                    self.settings_dict['tab_depth'] = \
+                        new_qa.settings_dict['tab_depth']
+
+                try:
+                    self.settings_dict['tab_wt'] = \
+                        meas_struct.qa.settings_dict.tab_wt
+                except AttributeError:
+                    self.settings_dict['tab_wt'] = \
+                        new_qa.settings_dict['tab_wt']
+
+                try:
+                    self.settings_dict['tab_extrap'] = \
+                        meas_struct.qa.settings_dict.tab_extrap
+                except AttributeError:
+                    self.settings_dict['tab_extrap'] = \
+                        new_qa.settings_dict['tab_extrap']
+
+                try:
+                    self.settings_dict['tab_edges'] = \
+                        meas_struct.qa.settings_dict.tab_edges
+                except AttributeError:
+                    self.settings_dict['tab_edges'] = \
+                        new_qa.settings_dict['tab_edges']
+
+    @staticmethod
+    def create_qa_dict(self, mat_data, ndim=1):
+        """Creates the dictionary used to store QA checks associated with the percent of discharge estimated
+        by interpolation. This dictionary is used by BT, GPS, Depth, and WT.
+
+        Parameters
+        ----------
+        self: QAData
+            Object of QAData
+        mat_data: mat_struct
+            Matlab data from QRev file
+        ndim: int
+            Number of dimensions in data
+        """
+
+        # Initialize dictionary
+        qa_dict = dict()
+
+        # Populate dictionary from Matlab data
+        qa_dict['messages'] = QAData.make_list(mat_data.messages)
+
+        # allInvalid not available in older QRev data
+        if hasattr(mat_data, 'allInvalid'):
+            qa_dict['all_invalid'] = self.make_array(mat_data.allInvalid, 1).astype(bool)
+
+        qa_dict['q_max_run_caution'] = self.make_array(mat_data.qRunCaution, ndim).astype(bool)
+        qa_dict['q_max_run_warning'] = self.make_array(mat_data.qRunWarning, ndim).astype(bool)
+        if hasattr(mat_data, 'qTotalCaution'):
+            qa_dict['q_total_caution'] = self.make_array(mat_data.qTotalCaution, ndim).astype(bool)
+        else:
+            qa_dict['q_total_caution'] = self.make_array(mat_data.qTotalWarning, ndim).astype(bool)
+        qa_dict['q_total_warning'] = self.make_array(mat_data.qTotalWarning, ndim).astype(bool)
+        qa_dict['status'] = mat_data.status
+
+        # q_max_run and q_total not available in older QRev data
+        try:
+            qa_dict['q_max_run'] = self.make_array(mat_data.qMaxRun, ndim)
+            qa_dict['q_total'] = self.make_array(mat_data.qTotal, ndim)
+        except AttributeError:
+            qa_dict['q_max_run'] = np.tile(np.nan, (len(mat_data.qRunCaution), 6))
+            qa_dict['q_total'] = np.tile(np.nan, (len(mat_data.qRunCaution), 6))
+        return qa_dict
+
+    @staticmethod
+    def make_array(num_in, ndim=1):
+        """Ensures that num_in is an array and if not makes it an array.
+
+        num_in: any
+            Any value or array
+        """
+
+        if type(num_in) is np.ndarray:
+            if len(num_in.shape) < 2 and ndim > 1:
+                num_in = np.reshape(num_in, (1, num_in.shape[0]))
+                return num_in
+            else:
+                return num_in
+        else:
+            return np.array([num_in])
+
+    @staticmethod
+    def make_list(array_in):
+        """Converts a string or array to a list.
+
+        Parameters
+        ----------
+        array_in: any
+            Data to be converted to list.
+
+        Returns
+        -------
+        list_out: list
+            List of array_in data
+        """
+
+        list_out = []
+        # Convert string to list
+        if type(array_in) is str:
+            list_out = [array_in]
+        else:
+            # Empty array
+            if array_in.size == 0:
+                list_out = []
+            # Single message with integer codes at end
+            elif array_in.size == 3:
+                if type(array_in[1]) is int or len(array_in[1].strip()) == 1:
+                    temp = array_in.tolist()
+                    if len(temp) > 0:
+                        internal_list = []
+                        for item in temp:
+                            internal_list.append(item)
+                        list_out = [internal_list]
+                else:
+                    list_out = array_in.tolist()
+            # Either multiple messages with or without integer codes
+            else:
+                list_out = array_in.tolist()
+
+        return list_out
+
+    def transects_qa(self, meas):
+        """Apply quality checks to transects
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        # Assume good results
+        self.transects['status'] = 'good'
+
+        # Initialize keys
+        self.transects['messages'] = []
+        self.transects['recip'] = 0
+        self.transects['sign'] = 0
+        self.transects['duration'] = 0
+        self.transects['number'] = 0
+        self.transects['uncertainty'] = 0
+
+        # Initialize lists
+        checked = []
+        discharges = []
+        start_edge = []
+
+        # Populate lists
+        for n in range(len(meas.transects)):
+            checked.append(meas.transects[n].checked)
+            if meas.transects[n].checked:
+                discharges.append(meas.discharge[n])
+                start_edge.append(meas.transects[n].start_edge)
+
+        num_checked = np.nansum(np.asarray(checked))
+
+        # Check duration
+        total_duration = 0
+        if num_checked >= 1:
+            for transect in meas.transects:
+                if transect.checked:
+                    total_duration += transect.date_time.transect_duration_sec
+
+        # Check duration against USGS policy
+        if total_duration < meas.min_duration:
+            self.transects['status'] = 'caution'
+            text = 'Transects: Duration of selected transects is less than ' + str(meas.min_duration) + ' seconds;'
+            self.transects['messages'].append([text, 2, 0])
+            self.transects['duration'] = 1
+
+        # Check transects for missing ensembles
+        for transect in meas.transects:
+            if transect.checked:
+
+                # Determine number of missing ensembles
+                if transect.adcp.manufacturer == 'SonTek':
+                    # Determine number of missing ensembles for SonTek data
+                    idx_missing = np.where(transect.date_time.ens_duration_sec > 1.5)[0]
+                    if len(idx_missing) > 0:
+                        average_ensemble_duration = (np.nansum(transect.date_time.ens_duration_sec)
+                                                     - np.nansum(transect.date_time.ens_duration_sec[idx_missing])) \
+                                                     / (len(transect.date_time.ens_duration_sec) - len(idx_missing))
+                        num_missing = np.round(np.nansum(transect.date_time.ens_duration_sec[idx_missing])
+                                               / average_ensemble_duration) - len(idx_missing)
+                    else:
+                        num_missing = 0
+                else:
+                    # Determine number of lost ensembles for TRDI data
+                    idx_missing = np.where(np.isnan(transect.date_time.ens_duration_sec))[0]
+                    num_missing = len(idx_missing) - 1
+
+                # Save caution message
+                if num_missing > 0:
+                    self.transects['messages'].append(['Transects: ' + str(transect.file_name) + ' is missing '
+                                                       + str(int(num_missing)) + ' ensembles;', 2, 0])
+                    self.transects['status'] = 'caution'
+
+        # Check number of transects checked
+        if num_checked == 0:
+            # No transects selected
+            self.transects['status'] = 'warning'
+            self.transects['messages'].append(['TRANSECTS: No transects selected;', 1, 0])
+            self.transects['number'] = 2
+        elif num_checked == 1:
+            # Only one transect selected
+            self.transects['status'] = 'caution'
+            self.transects['messages'].append(['Transects: Only one transect selected;', 2, 0])
+            self.transects['number'] = 2
+        else:
+            self.transects['number'] = num_checked
+            if num_checked == 2:
+                # Only 2 transects selected
+                cov, _ = Uncertainty.uncertainty_q_random(discharges, 'total')
+                # Check uncertainty
+                if cov > 2:
+                    self.transects['status'] = 'caution'
+                    self.transects['messages'].append(
+                        ['Transects: Uncertainty would be reduced by additional transects;', 2, 0])
+
+            if num_checked < meas.min_transects:
+                self.transects['status'] = 'caution'
+                text = 'Transects: Number of transects is below the required minimum of ' \
+                       + str(meas.min_transects) + ';'
+                self.transects['messages'].append([text, 2, 0])
+
+            # Check for consistent sign
+            q_positive = []
+            for q in discharges:
+                if q.total >= 0:
+                    q_positive.append(True)
+                else:
+                    q_positive.append(False)
+            if len(np.unique(q_positive)) > 1:
+                self.transects['status'] = 'warning'
+                self.transects['messages'].append(
+                    ['TRANSECTS: Sign of total Q is not consistent. One or more start banks may be incorrect;', 1, 0])
+
+            # Check for reciprocal transects
+            num_left = start_edge.count('Left')
+            num_right = start_edge.count('Right')
+
+            if not num_left == num_right:
+                self.transects['status'] = 'warning'
+                self.transects['messages'].append(['TRANSECTS: Transects selected are not reciprocal transects;', 1, 0])
+
+        # Check for zero discharge transects
+        q_zero = False
+        for q in discharges:
+            if q.total == 0:
+                q_zero = True
+        if q_zero:
+            self.transects['status'] = 'warning'
+            self.transects['messages'].append(['TRANSECTS: One or more transects have zero Q;', 1, 0])
+
+    def system_tst_qa(self, meas):
+        """Apply QA checks to system test.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        self.system_tst['messages'] = []
+        self.system_tst['status'] = 'good'
+
+        # Determine if a system test was recorded
+        if not meas.system_tst:
+            # No system test data recorded
+            self.system_tst['status'] = 'warning'
+            self.system_tst['messages'].append(['SYSTEM TEST: No system test;', 1, 3])
+        else:
+
+            pt3_fail = False
+            num_tests_with_failure = 0
+
+            for test in meas.system_tst:
+                if hasattr(test, 'result'):
+
+                    # Check for presence of pt3 test
+                    if 'pt3' in test.result and test.result['pt3'] is not None:
+
+                        # Check hard_limit, high gain, wide bandwidth
+                        if 'hard_limit' in test.result['pt3']:
+                            if 'high_wide' in test.result['pt3']['hard_limit']:
+                                corr_table = test.result['pt3']['hard_limit']['high_wide']['corr_table']
+                                if len(corr_table) > 0:
+                                    # All lags past lag 2 should be less than 50% of lag 0
+                                    qa_threshold = corr_table[0, :] * 0.5
+                                    all_lag_check = np.greater(corr_table[3::, :], qa_threshold)
+
+                                    # Lag 7 should be less than 25% of lag 0
+                                    lag_7_check = np.greater(corr_table[7, :], corr_table[0, :] * 0.25)
+
+                                    # If either condition is met for any beam the test fails
+                                    if np.sum(np.sum(all_lag_check)) + np.sum(lag_7_check) > 1:
+                                        pt3_fail = True
+
+                    if test.result['sysTest']['n_failed'] is not None and test.result['sysTest']['n_failed'] > 0:
+                        num_tests_with_failure += 1
+
+            # pt3 test failure message
+            if pt3_fail:
+                self.system_tst['status'] = 'caution'
+                self.system_tst['messages'].append(
+                    ['System Test: One or more PT3 tests in the system test indicate potential EMI;', 2, 3])
+
+            # Check for failed tests
+            if num_tests_with_failure == len(meas.system_tst):
+                # All tests had a failure
+                self.system_tst['status'] = 'warning'
+                self.system_tst['messages'].append(
+                    ['SYSTEM TEST: All system test sets have at least one test that failed;', 1, 3])
+            elif num_tests_with_failure > 0:
+                self.system_tst['status'] = 'caution'
+                self.system_tst['messages'].append(
+                    ['System Test: One or more system test sets have at least one test that failed;', 2, 3])
+
+    def compass_qa(self, meas):
+        """Apply QA checks to compass calibration and evaluation.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        self.compass['messages'] = []
+
+        checked = []
+        for transect in meas.transects:
+            checked.append(transect.checked)
+
+        if np.any(checked):
+            heading = np.unique(meas.transects[checked.index(1)].sensors.heading_deg.internal.data)
+        else:
+            heading = np.array([0])
+
+        # Initialize variable as if ADCP has no compass
+        self.compass['status'] = 'inactive'
+        self.compass['status1'] = 'good'
+        self.compass['status2'] = 'good'
+        self.compass['magvar'] = 0
+        self.compass['magvar_idx'] = []
+        self.compass['mag_error_idx'] = []
+        self.compass['pitch_mean_warning_idx'] = []
+        self.compass['pitch_mean_caution_idx'] = []
+        self.compass['pitch_std_caution_idx'] = []
+        self.compass['roll_mean_warning_idx'] = []
+        self.compass['roll_mean_caution_idx'] = []
+        self.compass['roll_std_caution_idx'] = []
+
+        if len(heading) > 1 and np.any(np.not_equal(heading, 0)):
+            # ADCP has a compass
+            # A compass calibration is required if a loop test or GPS are used
+
+            # Check for loop test
+            loop = False
+            for test in meas.mb_tests:
+                if test.type == 'Loop':
+                    loop = True
+
+            # Check for GPS data
+            gps = False
+            if meas.transects[checked.index(True)].boat_vel.gga_vel is not None or \
+                    meas.transects[checked.index(True)].boat_vel.vtg_vel is not None:
+                gps = True
+
+            if gps or loop:
+                # Compass calibration is required
+
+                # Determine the ADCP manufacturer
+                if meas.transects[checked.index(True)].adcp.manufacturer == 'SonTek':
+                    # SonTek ADCP
+                    if len(meas.compass_cal) == 0:
+                        # No compass calibration
+                        self.compass['status1'] = 'warning'
+                        self.compass['messages'].append(['COMPASS: No compass calibration;', 1, 4])
+                    elif meas.compass_cal[-1].result['compass']['error'] == 'N/A':
+                        # If the error cannot be decoded from the calibration assume the calibration is good
+                        self.compass['status1'] = 'good'
+                    else:
+                        if meas.compass_cal[-1].result['compass']['error'] <= 0.2:
+                            self.compass['status1'] = 'good'
+                        else:
+                            self.compass['status1'] = 'caution'
+                            self.compass['messages'].append(['Compass: Calibration result > 0.2 deg;', 2, 4])
+
+                elif meas.transects[checked.index(True)].adcp.manufacturer == 'TRDI':
+                    # TRDI ADCP
+                    if len(meas.compass_cal) == 0:
+                        # No compass calibration
+                        if len(meas.compass_eval) == 0:
+                            # No calibration or evaluation
+                            self.compass['status1'] = 'warning'
+                            self.compass['messages'].append(['COMPASS: No compass calibration or evaluation;', 1, 4])
+                        else:
+                            # No calibration but an evaluation was completed
+                            self.compass['status1'] = 'caution'
+                            self.compass['messages'].append(['Compass: No compass calibration;', 2, 4])
+                    else:
+                        # Compass was calibrated
+                        if len(meas.compass_eval) == 0:
+                            # No compass evaluation
+                            self.compass['status1'] = 'caution'
+                            self.compass['messages'].append(['Compass: No compass evaluation;', 2, 4])
+                        else:
+                            # Check results of evaluation
+                            try:
+                                if float(meas.compass_eval[-1].result['compass']['error']) <= 1:
+                                    self.compass['status1'] = 'good'
+                                else:
+                                    self.compass['status1'] = 'caution'
+                                    self.compass['messages'].append(['Compass: Evaluation result > 1 deg;', 2, 4])
+                            except ValueError:
+                                self.compass['status1'] = 'good'
+            else:
+                # Compass not required
+                if len(meas.compass_cal) == 0 and len(meas.compass_eval) == 0:
+                    # No compass calibration or evaluation
+                    self.compass['status1'] = 'default'
+                else:
+                    # Compass was calibrated and evaluated
+                    self.compass['status1'] = 'good'
+
+            # Check for consistent magvar and pitch and roll mean and variation
+            magvar = []
+            align = []
+            mag_error_exceeded = []
+            pitch_mean = []
+            pitch_std = []
+            pitch_exceeded = []
+            roll_mean = []
+            roll_std = []
+            roll_exceeded = []
+            transect_idx = []
+            for n, transect in enumerate(meas.transects):
+                if transect.checked:
+                    transect_idx.append(n)
+                    heading_source_selected = getattr(
+                        transect.sensors.heading_deg, transect.sensors.heading_deg.selected)
+                    pitch_source_selected = getattr(transect.sensors.pitch_deg, transect.sensors.pitch_deg.selected)
+                    roll_source_selected = getattr(transect.sensors.roll_deg, transect.sensors.roll_deg.selected)
+
+                    magvar.append(transect.sensors.heading_deg.internal.mag_var_deg)
+                    if transect.sensors.heading_deg.external is not None:
+                        align.append(transect.sensors.heading_deg.external.align_correction_deg)
+
+                    pitch_mean.append(np.nanmean(pitch_source_selected.data))
+                    pitch_std.append(np.nanstd(pitch_source_selected.data, ddof=1))
+                    roll_mean.append(np.nanmean(roll_source_selected.data))
+                    roll_std.append(np.nanstd(roll_source_selected.data, ddof=1))
+
+                    # SonTek G3 compass provides pitch, roll, and magnetic error parameters that can be checked
+                    if transect.adcp.manufacturer == 'SonTek':
+                        if heading_source_selected.pitch_limit is not None:
+                            # Check for bug in SonTek data where pitch and roll was n x 3 use n x 1
+                            if len(pitch_source_selected.data.shape) == 1:
+                                pitch_data = pitch_source_selected.data
+                            else:
+                                pitch_data = pitch_source_selected.data[:, 0]
+                            idx_max = np.where(pitch_data > heading_source_selected.pitch_limit[0])[0]
+                            idx_min = np.where(pitch_data < heading_source_selected.pitch_limit[1])[0]
+                            if len(idx_max) > 0 or len(idx_min) > 0:
+                                pitch_exceeded.append(True)
+                            else:
+                                pitch_exceeded.append(False)
+
+                        if heading_source_selected.roll_limit is not None:
+                            if len(roll_source_selected.data.shape) == 1:
+                                roll_data = roll_source_selected.data
+                            else:
+                                roll_data = roll_source_selected.data[:, 0]
+                            idx_max = np.where(roll_data > heading_source_selected.pitch_limit[0])[0]
+                            idx_min = np.where(roll_data < heading_source_selected.pitch_limit[1])[0]
+                            if len(idx_max) > 0 or len(idx_min) > 0:
+                                roll_exceeded.append(True)
+                            else:
+                                roll_exceeded.append(False)
+
+                        if heading_source_selected.mag_error is not None:
+                            idx_max = np.where(heading_source_selected.mag_error > 2)[0]
+                            if len(idx_max) > 0:
+                                mag_error_exceeded.append(n)
+            # Check magvar consistency
+            if len(np.unique(magvar)) > 1:
+                self.compass['status2'] = 'caution'
+                self.compass['messages'].append(
+                    ['Compass: Magnetic variation is not consistent among transects;', 2, 4])
+                self.compass['magvar'] = 1
+
+            # Check magvar consistency
+            if len(np.unique(align)) > 1:
+                self.compass['status2'] = 'caution'
+                self.compass['messages'].append(
+                    ['Compass: Heading offset is not consistent among transects;', 2, 4])
+                self.compass['align'] = 1
+
+            # Check that magvar was set if GPS data are available
+            if gps:
+                if 0 in magvar:
+                    self.compass['status2'] = 'warning'
+                    self.compass['messages'].append(
+                        ['COMPASS: Magnetic variation is 0 and GPS data are present;', 1, 4])
+                    self.compass['magvar'] = 2
+                    self.compass['magvar_idx'] = np.where(np.array(magvar) == 0)[0].tolist()
+
+            # Check pitch mean
+            if np.any(np.asarray(np.abs(pitch_mean)) > 8):
+                self.compass['status2'] = 'warning'
+                self.compass['messages'].append(['PITCH: One or more transects have a mean pitch > 8 deg;', 1, 4])
+                temp = np.where(np.abs(pitch_mean) > 8)[0]
+                if len(temp) > 0:
+                    self.compass['pitch_mean_warning_idx'] = np.array(transect_idx)[temp]
+                else:
+                    self.compass['pitch_mean_warning_idx'] = []
+
+            elif np.any(np.asarray(np.abs(pitch_mean)) > 4):
+                if self.compass['status2'] == 'good':
+                    self.compass['status2'] = 'caution'
+                self.compass['messages'].append(['Pitch: One or more transects have a mean pitch > 4 deg;', 2, 4])
+                temp = np.where(np.abs(pitch_mean) > 4)[0]
+                if len(temp) > 0:
+                    self.compass['pitch_mean_caution_idx'] = np.array(transect_idx)[temp]
+                else:
+                    self.compass['pitch_mean_caution_idx'] = []
+
+            # Check roll mean
+            if np.any(np.asarray(np.abs(roll_mean)) > 8):
+                self.compass['status2'] = 'warning'
+                self.compass['messages'].append(['ROLL: One or more transects have a mean roll > 8 deg;', 1, 4])
+                temp = np.where(np.abs(roll_mean) > 8)[0]
+                if len(temp) > 0:
+                    self.compass['roll_mean_warning_idx'] = np.array(transect_idx)[temp]
+                else:
+                    self.compass['roll_mean_warning_idx'] = []
+
+            elif np.any(np.asarray(np.abs(roll_mean)) > 4):
+                if self.compass['status2'] == 'good':
+                    self.compass['status2'] = 'caution'
+                self.compass['messages'].append(['Roll: One or more transects have a mean roll > 4 deg;', 2, 4])
+                temp = np.where(np.abs(roll_mean) > 4)[0]
+                if len(temp) > 0:
+                    self.compass['roll_mean_caution_idx'] = np.array(transect_idx)[temp]
+                else:
+                    self.compass['roll_mean_caution_idx'] = []
+
+            # Check pitch standard deviation
+            if np.any(np.asarray(pitch_std) > 5):
+                if self.compass['status2'] == 'good':
+                    self.compass['status2'] = 'caution'
+                self.compass['messages'].append(['Pitch: One or more transects have a pitch std dev > 5 deg;', 2, 4])
+                temp = np.where(np.abs(pitch_std) > 5)[0]
+                if len(temp) > 0:
+                    self.compass['pitch_std_caution_idx'] = np.array(transect_idx)[temp]
+                else:
+                    self.compass['pitch_std_caution_idx'] = []
+
+            # Check roll standard deviation
+            if np.any(np.asarray(roll_std) > 5):
+                if self.compass['status2'] == 'good':
+                    self.compass['status2'] = 'caution'
+                self.compass['messages'].append(['Roll: One or more transects have a roll std dev > 5 deg;', 2, 4])
+                temp = np.where(np.abs(roll_std) > 5)[0]
+                if len(temp) > 0:
+                    self.compass['roll_std_caution_idx'] = np.array(transect_idx)[temp]
+                else:
+                    self.compass['roll_std_caution_idx'] = []
+
+            # Additional checks for SonTek G3 compass
+            if meas.transects[checked.index(True)].adcp.manufacturer == 'SonTek':
+                # Check if pitch limits were exceeded
+                if any(pitch_exceeded):
+                    if self.compass['status2'] == 'good':
+                        self.compass['status2'] = 'caution'
+                    self.compass['messages'].append(
+                        ['Compass: One or more transects have pitch exceeding calibration limits;', 2, 4])
+
+                # Check if roll limits were exceeded
+                if any(roll_exceeded):
+                    if self.compass['status2'] == 'good':
+                        self.compass['status2'] = 'caution'
+                    self.compass['messages'].append(
+                        ['Compass: One or more transects have roll exceeding calibration limits;', 2, 4])
+
+                # Check if magnetic error was exceeded
+                self.compass['mag_error_idx'] = []
+                if len(mag_error_exceeded) > 0:
+                    self.compass['mag_error_idx'] = np.array(mag_error_exceeded)
+                    if self.compass['status2'] == 'good':
+                        self.compass['status2'] = 'caution'
+                    self.compass['messages'].append(
+                        ['Compass: One or more transects have a change in mag field exceeding 2%;', 2, 4])
+
+            if self.compass['status1'] == 'warning' or self.compass['status2'] == 'warning':
+                self.compass['status'] = 'warning'
+            elif self.compass['status1'] == 'caution' or self.compass['status2'] == 'caution':
+                self.compass['status'] = 'caution'
+            else:
+                self.compass['status'] = 'good'
+
+    def temperature_qa(self, meas):
+        """Apply QA checks to temperature.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        self.temperature['messages'] = []
+        check = [0, 0]
+
+        # Create array of all temperatures
+        temp = np.array([])
+        checked = []
+        for transect in meas.transects:
+            if transect.checked:
+                checked.append(transect.checked)
+                temp_selected = getattr(transect.sensors.temperature_deg_c, transect.sensors.temperature_deg_c.selected)
+                if len(temp) == 0:
+                    temp = temp_selected.data
+                else:
+                    temp = np.hstack((temp, temp_selected.data))
+
+        # Check temperature range
+        if np.any(checked):
+            temp_range = np.nanmax(temp) - np.nanmin(temp)
+        else:
+            temp_range = 0
+
+        if temp_range > 2:
+            check[0] = 3
+            self.temperature['messages'].append(['TEMPERATURE: Temperature range is '
+                                                 + '{:3.1f}'.format(temp_range)
+                                                 + ' degrees C which is greater than 2 degrees;', 1, 5])
+        elif temp_range > 1:
+            check[0] = 2
+            self.temperature['messages'].append(['Temperature: Temperature range is '
+                                                 + '{:3.1f}'.format(temp_range)
+                                                 + ' degrees C which is greater than 1 degree;', 2, 5])
+        else:
+            check[0] = 1
+
+        # Check for independent temperature reading
+        if 'user' in meas.ext_temp_chk:
+            try:
+                user = float(meas.ext_temp_chk['user'])
+            except (ValueError, TypeError):
+                user = None
+            if user is None or np.isnan(user):
+                # No independent temperature reading
+                check[1] = 2
+                self.temperature['messages'].append(['Temperature: No independent temperature reading;', 2, 5])
+            elif not np.isnan(meas.ext_temp_chk['adcp']):
+                # Compare user to manually entered ADCP temperature
+                diff = np.abs(user - meas.ext_temp_chk['adcp'])
+                if diff < 2:
+                    check[1] = 1
+                else:
+                    check[1] = 3
+                    self.temperature['messages'].append(
+                        ['TEMPERATURE: The difference between ADCP and reference is > 2:  '
+                         + '{:3.1f}'.format(diff) + ' C;', 1, 5])
+            else:
+                # Compare user to mean of all temperature data
+                diff = np.abs(user - np.nanmean(temp))
+                if diff < 2:
+                    check[1] = 1
+                else:
+                    check[1] = 3
+                    self.temperature['messages'].append(
+                        ['TEMPERATURE: The difference between ADCP and reference is > 2:  '
+                         + '{:3.1f}'.format(diff) + ' C;', 1, 5])
+
+        # Assign temperature status
+        max_check = max(check)
+        if max_check == 1:
+            self.temperature['status'] = 'good'
+        elif max_check == 2:
+            self.temperature['status'] = 'caution'
+        elif max_check == 3:
+            self.temperature['status'] = 'warning'
+
+    def moving_bed_qa(self, meas):
+        """Applies quality checks to moving-bed tests.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        self.movingbed['messages'] = []
+        self.movingbed['code'] = 0
+
+        # Are there moving-bed tests?
+        if len(meas.mb_tests) < 1:
+            if meas.observed_no_moving_bed:
+                self.movingbed['messages'].append(['Moving-Bed Test: Visually observed no moving bed;', 2, 6])
+                self.movingbed['status'] = 'caution'
+                self.movingbed['code'] = 2
+            else:
+                # No moving-bed test
+                self.movingbed['messages'].append(['MOVING-BED TEST: No moving bed test;', 1, 6])
+                self.movingbed['status'] = 'warning'
+                self.movingbed['code'] = 3
+
+        else:
+            # Moving-bed tests available
+            mb_data = meas.mb_tests
+
+            user_valid_test = []
+            file_names = []
+            idx_selected = []
+            test_quality = []
+            mb_tests = []
+            mb = []
+            mb_test_type = []
+            loop = []
+            use_2_correct = []
+            gps_diff1 = False
+            gps_diff2 = False
+
+            for n, test in enumerate(mb_data):
+                # Are tests valid according to the user
+                if test.user_valid:
+                    user_valid_test.append(True)
+                    file_names.append(test.transect.file_name)
+                    if test.type == 'Loop' and not test.test_quality == 'Errors':
+                        loop.append(test.moving_bed)
+                    if not np.isnan(test.gps_percent_mb):
+                        if np.abs(test.bt_percent_mb - test.gps_percent_mb) > 2:
+                            gps_diff2 = True
+                        if np.logical_xor(test.bt_percent_mb >= 1, test.gps_percent_mb >= 1):
+                            gps_diff1 = True
+                    # Selected test
+                    if test.selected:
+                        idx_selected.append(n)
+                        test_quality.append(test.test_quality)
+                        mb_tests.append(test)
+                        mb.append(test.moving_bed)
+                        mb_test_type.append(test.type)
+                        use_2_correct.append(test.use_2_correct)
+                else:
+                    user_valid_test.append(False)
+
+            if not any(user_valid_test):
+                # No valid test according to user
+                self.movingbed['messages'].append(['MOVING-BED TEST: No valid moving-bed test based on user input;',
+                                                   1, 6])
+                self.movingbed['status'] = 'warning'
+                self.movingbed['code'] = 3
+            else:
+                # Check for duplicate valid moving-bed tests
+                if len(np.unique(file_names)) < len(file_names):
+                    self.movingbed['messages'].append([
+                        'MOVING-BED TEST: Duplicate moving-bed test files marked valid;', 1, 6])
+                    self.movingbed['status'] = 'warning'
+                    self.movingbed['code'] = 3
+
+            if self.movingbed['code'] == 0:
+                # Check test quality
+                if len(test_quality) > 0 and sum(np.array(test_quality) == 'Good') > 0:
+                    self.movingbed['status'] = 'good'
+                    self.movingbed['code'] = 1
+
+                    # Check if there is a moving-bed
+                    if 'Yes' in mb:
+
+                        # Moving-bed present
+                        self.movingbed['messages'].append(
+                            ['Moving-Bed Test: A moving-bed is present.', 2, 6])
+                        self.movingbed['code'] = 2
+                        self.movingbed['status'] = 'caution'
+                        if meas.transects[meas.checked_transect_idx[0]].boat_vel.composite == 'On':
+                            self.movingbed['messages'].append(
+                                ['Moving-Bed: Use of composite tracks could cause inaccurate results.', 2, 6])
+
+                        if meas.transects[meas.checked_transect_idx[0]].boat_vel.selected == 'bt_vel':
+                            if any(use_2_correct):
+                                self.movingbed['messages'].append(
+                                    ['Moving-Bed: BT based moving-bed correction applied.', 2, 6])
+                            else:
+                                self.movingbed['messages'].append(
+                                    ['MOVING-BED: Moving-bed present and BT used, but no correction applied.', 1, 6])
+                                self.movingbed['code'] = 3
+                                self.movingbed['status'] = 'warning'
+                        elif meas.transects[meas.checked_transect_idx[0]].boat_vel.selected == 'gga_vel':
+                            self.movingbed['messages'].append(
+                                ['Moving-Bed: GGA used.', 2, 6])
+                        elif meas.transects[meas.checked_transect_idx[0]].boat_vel.selected == 'vtg_vel':
+                            self.movingbed['messages'].append(
+                                ['Moving-Bed: VTG used.', 2, 6])
+
+                        # Check for test type
+                        if sum(np.array(mb_test_type) == 'Stationary'):
+                            # Check for GPS or 3 stationary tests
+                            if len(mb_tests) < 3:
+                                gps = []
+                                for transect in meas.transects:
+                                    if transect.checked:
+                                        if transect.gps is None:
+                                            gps.append(False)
+                                        else:
+                                            gps.append(True)
+                                if not all(gps):
+                                    # GPS not available for all selected transects
+                                    self.movingbed['messages'].append([
+                                        'Moving-Bed Test: '
+                                        + 'Less than 3 stationary tests available for moving-bed correction;',
+                                        2, 6])
+
+                elif len(test_quality) > 0 and sum(np.array(test_quality) == 'Warnings') > 0:
+                    # Quality check has warnings
+                    self.movingbed['messages'].append(['Moving-Bed Test: The moving-bed test(s) has warnings, '
+                                                       + 'please review tests to determine validity;', 2, 6])
+                    self.movingbed['status'] = 'caution'
+                    self.movingbed['code'] = 2
+
+                elif len(test_quality) > 0 and sum(np.array(test_quality) == 'Manual') > 0:
+                    # Manual override used
+                    self.movingbed['messages'].append(['MOVING-BED TEST: '
+                                                       + 'The user has manually forced the use of some tests;', 1, 6])
+                    self.movingbed['status'] = 'warning'
+                    self.movingbed['code'] = 3
+
+                else:
+                    # Test has critical errors
+                    self.movingbed['messages'].append(['MOVING-BED TEST: The moving-bed test(s) have critical errors '
+                                                       + 'and will not be used;', 1, 6])
+                    self.movingbed['status'] = 'warning'
+                    self.movingbed['code'] = 3
+
+                # Check multiple loops for consistency
+                if len(np.unique(loop)) > 1:
+                    self.movingbed['messages'].append(['Moving-Bed Test: Results of valid loops are not consistent, '
+                                                       + 'review moving-bed tests;', 2, 6])
+                    if self.movingbed['code'] < 3:
+                        self.movingbed['code'] = 2
+                        self.movingbed['status'] = 'caution'
+
+                # Notify of differences in results of test between BT and GPS
+                if gps_diff2:
+                    self.movingbed['messages'].append(['Moving-Bed Test: Bottom track and '
+                                                      'GPS results differ by more than 2%.', 2, 6])
+                    if self.movingbed['code'] < 3:
+                        self.movingbed['code'] = 2
+                        self.movingbed['status'] = 'caution'
+
+                if gps_diff1:
+                    self.movingbed['messages'].append(['Moving-Bed Test: Bottom track and GPS results do not agree.',
+                                                      2, 6])
+                    if self.movingbed['code'] < 3:
+                        self.movingbed['code'] = 2
+                        self.movingbed['status'] = 'caution'
+
+        self.check_mbt_settings(meas)
+
+    def user_qa(self, meas):
+        """Apply quality checks to user input data.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        self.user['messages'] = []
+        self.user['status'] = 'good'
+
+        # Check for Station Name
+        self.user['sta_name'] = False
+        if meas.station_name is None or len(meas.station_name.strip()) < 1:
+            self.user['messages'].append(['Site Info: Station name not entered;', 2, 2])
+            self.user['status'] = 'caution'
+            self.user['sta_name'] = True
+
+        # Check for Station Number
+        self.user['sta_number'] = False
+        try:
+            if meas.station_number is None or len(meas.station_number.strip()) < 1:
+                self.user['messages'].append(['Site Info: Station number not entered;', 2, 2])
+                self.user['status'] = 'caution'
+                self.user['sta_number'] = True
+        except AttributeError:
+            self.user['messages'].append(['Site Info: Station number not entered;', 2, 2])
+            self.user['status'] = 'caution'
+            self.user['sta_number'] = True
+
+    def depths_qa(self, meas):
+        """Apply quality checks to depth data.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        # Initialize variables
+        n_transects = len(meas.transects)
+        self.depths['q_total'] = np.tile(np.nan, n_transects)
+        self.depths['q_max_run'] = np.tile(np.nan, n_transects)
+        self.depths['q_total_caution'] = np.tile(False, n_transects)
+        self.depths['q_max_run_caution'] = np.tile(False, n_transects)
+        self.depths['q_total_warning'] = np.tile(False, n_transects)
+        self.depths['q_max_run_warning'] = np.tile(False, n_transects)
+        self.depths['all_invalid'] = np.tile(False, n_transects)
+        self.depths['messages'] = []
+        self.depths['status'] = 'good'
+        self.depths['draft'] = 0
+        checked = []
+        drafts = []
+        for n, transect in enumerate(meas.transects):
+            checked.append(transect.checked)
+            if transect.checked:
+                in_transect_idx = transect.in_transect_idx
+
+                depths_selected = getattr(transect.depths, transect.depths.selected)
+                drafts.append(depths_selected.draft_use_m)
+
+                # Determine valid measured depths
+                if transect.depths.composite:
+                    depth_na = depths_selected.depth_source_ens[in_transect_idx] != 'NA'
+                    depth_in = depths_selected.depth_source_ens[in_transect_idx] != 'IN'
+                    depth_valid = np.all(np.vstack((depth_na, depth_in)), 0)
+                else:
+                    depth_valid_temp = depths_selected.valid_data[in_transect_idx]
+                    depth_nan = depths_selected.depth_processed_m[in_transect_idx] != np.nan
+                    depth_valid = np.all(np.vstack((depth_nan, depth_valid_temp)), 0)
+
+                if not np.any(depth_valid):
+                    self.depths['all_invalid'][n] = True
+
+                # Compute QA characteristics
+                q_total, q_max_run, number_invalid_ensembles = QAData.invalid_qa(depth_valid, meas.discharge[n])
+                self.depths['q_total'][n] = q_total
+                self.depths['q_max_run'][n] = q_max_run
+
+                # Compute percentage compared to total
+                if meas.discharge[n].total == 0.0:
+                    q_total_percent = np.nan
+                    q_max_run_percent = np.nan
+                else:
+                    q_total_percent = np.abs((q_total / meas.discharge[n].total) * 100)
+                    q_max_run_percent = np.abs((q_max_run / meas.discharge[n].total) * 100)
+
+                # Apply total interpolated discharge threshold
+                if q_total_percent > self.q_total_threshold_warning:
+                    self.depths['q_total_warning'][n] = True
+                elif q_total_percent > self.q_total_threshold_caution:
+                    self.depths['q_total_caution'][n] = True
+
+                # Apply interpolated discharge run thresholds
+                if q_max_run_percent > self.q_run_threshold_warning:
+                    self.depths['q_max_run_warning'][n] = True
+                elif q_max_run_percent > self.q_run_threshold_caution:
+                    self.depths['q_max_run_caution'][n] = True
+
+        if checked:
+
+            # Create array of all unique draft values
+            draft_check = np.unique(np.round(drafts, 2))
+
+            # Check draft consistency
+            if len(draft_check) > 1:
+                self.depths['status'] = 'caution'
+                self.depths['draft'] = 1
+                self.depths['messages'].append(['Depth: Transducer depth is not consistent among transects;', 2, 10])
+
+            # Check for zero draft
+            if np.any(np.less(draft_check, 0.01)):
+                self.depths['status'] = 'warning'
+                self.depths['draft'] = 2
+                self.depths['messages'].append(['DEPTH: Transducer depth is too shallow, likely 0;', 1, 10])
+
+            # Check consecutive interpolated discharge criteria
+            if np.any(self.depths['q_max_run_warning']):
+                self.depths['messages'].append(['DEPTH: Int. Q for consecutive invalid ensembles exceeds '
+                                                + '%2.0f' % self.q_run_threshold_warning + '%;', 1, 10])
+                self.depths['status'] = 'warning'
+            elif np.any(self.depths['q_max_run_caution']):
+                self.depths['messages'].append(['Depth: Int. Q for consecutive invalid ensembles exceeds '
+                                                + '%2.0f' % self.q_run_threshold_caution + '%;', 2, 10])
+                self.depths['status'] = 'caution'
+
+            # Check total interpolated discharge criteria
+            if np.any(self.depths['q_total_warning']):
+                self.depths['messages'].append(['DEPTH: Int. Q for invalid ensembles in a transect exceeds '
+                                                + '%2.0f' % self.q_total_threshold_warning + '%;', 1, 10])
+                self.depths['status'] = 'warning'
+            elif np.any(self.depths['q_total_caution']):
+                self.depths['messages'].append(['Depth: Int. Q for invalid ensembles in a transect exceeds '
+                                                + '%2.0f' % self.q_total_threshold_caution + '%;', 2, 10])
+                self.depths['status'] = 'caution'
+
+            # Check if all depths are invalid
+            if np.any(self.depths['all_invalid']):
+                self.depths['messages'].append(['DEPTH: There are no valid depths for one or more transects.', 2, 10])
+                self.depths['status'] = 'warning'
+
+        else:
+            self.depths['status'] = 'inactive'
+
+    def boat_qa(self, meas):
+        """Apply quality checks to boat data.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        # Initialize variables
+        n_transects = len(meas.transects)
+        data_type = {'BT': {'class': 'bt_vel', 'warning': 'BT-', 'caution': 'bt-',
+                            'filter': [('All: ', 0), ('Original: ', 1), ('ErrorVel: ', 2),
+                                       ('VertVel: ', 3), ('Other: ', 4), ('3Beams: ', 5)]},
+                     'GGA': {'class': 'gga_vel', 'warning': 'GGA-', 'caution': 'gga-',
+                             'filter': [('All: ', 0), ('Original: ', 1), ('DGPS: ', 2),
+                                        ('Altitude: ', 3), ('Other: ', 4), ('HDOP: ', 5)]},
+                     'VTG': {'class': 'vtg_vel', 'warning': 'VTG-', 'caution': 'vtg-',
+                             'filter': [('All: ', 0), ('Original: ', 1), ('Other: ', 4), ('HDOP: ', 5)]}}
+        self.boat['messages'] = []
+
+        for dt_key, dt_value in data_type.items():
+            boat = getattr(self, dt_value['class'])
+
+            # Initialize dictionaries for each data type
+            boat['q_total_caution'] = np.tile(False, (n_transects, 6))
+            boat['q_max_run_caution'] = np.tile(False, (n_transects, 6))
+            boat['q_total_warning'] = np.tile(False, (n_transects, 6))
+            boat['q_max_run_warning'] = np.tile(False, (n_transects, 6))
+            boat['all_invalid'] = np.tile(False, n_transects)
+            boat['q_total'] = np.tile(np.nan, (n_transects, 6))
+            boat['q_max_run'] = np.tile(np.nan, (n_transects, 6))
+            boat['messages'] = []
+            status_switch = 0
+            avg_speed_check = 0
+
+            # Check the results of each filter
+            for dt_filter in dt_value['filter']:
+                boat['status'] = 'inactive'
+
+                # Quality check each transect
+                for n, transect in enumerate(meas.transects):
+
+                    # Evaluate on transects used in the discharge computation
+                    if transect.checked:
+
+                        in_transect_idx = transect.in_transect_idx
+
+                        # Check to see if data are available for the data_type
+                        if getattr(transect.boat_vel, dt_value['class']) is not None:
+                            boat['status'] = 'good'
+
+                            # Compute quality characteristics
+                            valid = getattr(transect.boat_vel, dt_value['class']).valid_data[dt_filter[1],
+                                                                                             in_transect_idx]
+                            q_total, q_max_run, number_invalid_ens = QAData.invalid_qa(valid, meas.discharge[n])
+                            boat['q_total'][n, dt_filter[1]] = q_total
+                            boat['q_max_run'][n, dt_filter[1]] = q_max_run
+
+                            # Compute percentage compared to total
+                            if meas.discharge[n].total == 0.0:
+                                q_total_percent = np.nan
+                                q_max_run_percent = np.nan
+                            else:
+                                q_total_percent = np.abs((q_total / meas.discharge[n].total) * 100)
+                                q_max_run_percent = np.abs((q_max_run / meas.discharge[n].total) * 100)
+
+                            # Check if all invalid
+                            if dt_filter[1] == 0 and not np.any(valid):
+                                boat['all_invalid'][n] = True
+
+                            # Apply total interpolated discharge threshold
+                            if q_total_percent > self.q_total_threshold_warning:
+                                boat['q_total_warning'][n, dt_filter[1]] = True
+                            elif q_total_percent > self.q_total_threshold_caution:
+                                boat['q_total_caution'][n, dt_filter[1]] = True
+
+                            # Apply interpolated discharge run thresholds
+                            if q_max_run_percent > self.q_run_threshold_warning:
+                                boat['q_max_run_warning'][n, dt_filter[1]] = True
+                            elif q_max_run_percent > self.q_run_threshold_caution:
+                                boat['q_max_run_caution'][n, dt_filter[1]] = True
+
+                            # Check boat velocity for vtg data
+                            if dt_key == 'VTG' and transect.boat_vel.selected == 'vtg_vel' and avg_speed_check == 0:
+                                if transect.boat_vel.vtg_vel.u_mps is not None:
+                                    avg_speed = np.nanmean((transect.boat_vel.vtg_vel.u_mps ** 2
+                                                            + transect.boat_vel.vtg_vel.v_mps ** 2) ** 0.5)
+                                    if avg_speed < 0.24:
+                                        boat['q_total_caution'][n, 2] = True
+                                        if status_switch < 1:
+                                            status_switch = 1
+                                        boat['messages'].append(
+                                            ['vtg-AvgSpeed: VTG data may not be accurate for average boat speed '
+                                             'less than' + '0.24 m/s (0.8 ft/s);', 2, 8])
+                                        avg_speed_check = 1
+
+                # Create message for consecutive invalid discharge
+                if boat['q_max_run_warning'][:, dt_filter[1]].any():
+                    if dt_key == 'BT':
+                        module_code = 7
+                    else:
+                        module_code = 8
+                    boat['messages'].append(
+                        [dt_value['warning'] + dt_filter[0] +
+                         'Int. Q for consecutive invalid ensembles exceeds ' +
+                         '%3.1f' % self.q_run_threshold_warning + '%;', 1, module_code])
+                    status_switch = 2
+                elif boat['q_max_run_caution'][:, dt_filter[1]].any():
+                    if dt_key == 'BT':
+                        module_code = 7
+                    else:
+                        module_code = 8
+                    boat['messages'].append(
+                        [dt_value['caution'] + dt_filter[0] +
+                         'Int. Q for consecutive invalid ensembles exceeds ' +
+                         '%3.1f' % self.q_run_threshold_caution + '%;', 2, module_code])
+                    if status_switch < 1:
+                        status_switch = 1
+
+                # Create message for total invalid discharge
+                if boat['q_total_warning'][:, dt_filter[1]].any():
+                    if dt_key == 'BT':
+                        module_code = 7
+                    else:
+                        module_code = 8
+                    boat['messages'].append(
+                        [dt_value['warning'] + dt_filter[0] +
+                         'Int. Q for invalid ensembles in a transect exceeds ' +
+                         '%3.1f' % self.q_total_threshold_warning + '%;', 1, module_code])
+                    status_switch = 2
+                elif boat['q_total_caution'][:, dt_filter[1]].any():
+                    if dt_key == 'BT':
+                        module_code = 7
+                    else:
+                        module_code = 8
+                    boat['messages'].append(
+                        [dt_value['caution'] + dt_filter[0] +
+                         'Int. Q for invalid ensembles in a transect exceeds ' +
+                         '%3.1f' % self.q_total_threshold_caution + '%;', 2, module_code])
+                    if status_switch < 1:
+                        status_switch = 1
+
+            # Create message for all data invalid
+            if boat['all_invalid'].any():
+                boat['status'] = 'warning'
+                if dt_key == 'BT':
+                    module_code = 7
+                else:
+                    module_code = 8
+                boat['messages'].append(
+                    [dt_value['warning'] + dt_value['filter'][0][0] +
+                     'There are no valid data for one or more transects.;', 1, module_code])
+
+            # Set status
+            if status_switch == 2:
+                boat['status'] = 'warning'
+            elif status_switch == 1:
+                boat['status'] = 'caution'
+
+            setattr(self, dt_value['class'], boat)
+
+        lag_gga = []
+        lag_vtg = []
+        self.gga_vel['lag_status'] = 'good'
+        self.vtg_vel['lag_status'] = 'good'
+        for transect in meas.transects:
+            gga, vtg = TransectData.compute_gps_lag(transect)
+            if gga is not None:
+                lag_gga.append(gga)
+            if vtg is not None:
+                lag_vtg.append(vtg)
+        if len(lag_gga) > 0:
+            if np.mean(np.abs(lag_gga)) > 10:
+                self.gga_vel['messages'].append(['GGA: BT and GGA do not appear to be sychronized', 1, 8])
+                if self.gga_vel['status'] != 'warning':
+                    self.gga_vel['status'] = 'warning'
+                    self.gga_vel['lag_status'] = 'warning'
+            elif np.mean(np.abs(lag_gga)) > 2:
+                self.gga_vel['messages'].append(['gga: Lag between BT and GGA > 2 sec', 2, 8])
+                if self.gga_vel['status'] != 'warning':
+                    self.gga_vel['status'] = 'caution'
+                    self.gga_vel['lag_status'] = 'caution'
+        if len(lag_vtg) > 0:
+            if np.mean(np.abs(lag_vtg)) > 10:
+                self.vtg_vel['messages'].append(['VTG: BT and VTG do not appear to be sychronized', 1, 8])
+                if self.vtg_vel['status'] != 'warning':
+                    self.vtg_vel['status'] = 'warning'
+                    self.vtg_vel['lag status'] = 'warning'
+            elif np.mean(np.abs(lag_vtg)) > 2:
+                self.vtg_vel['messages'].append(['vtg: Lag between BT and VTG > 2 sec', 2, 8])
+                if self.vtg_vel['status'] != 'warning':
+                    self.vtg_vel['status'] = 'caution'
+                    self.vtg_vel['lag_status'] = 'caution'
+
+    def water_qa(self, meas):
+        """Apply quality checks to water data.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        # Initialize filter labels and indices
+        prefix = ['All: ', 'Original: ', 'ErrorVel: ', 'VertVel: ', 'Other: ', '3Beams: ', 'SNR:']
+        if meas.transects[0].adcp.manufacturer == 'TRDI':
+            filter_index = [0, 1, 2, 3, 4, 5]
+        else:
+            filter_index = [0, 1, 2, 3, 4, 5, 7]
+
+        n_transects = len(meas.transects)
+        n_filters = len(filter_index) + 1
+        # Initialize dictionaries for each data type
+        self.w_vel['q_total_caution'] = np.tile(False, (n_transects, n_filters))
+        self.w_vel['q_max_run_caution'] = np.tile(False, (n_transects, n_filters))
+        self.w_vel['q_total_warning'] = np.tile(False, (n_transects, n_filters))
+        self.w_vel['q_max_run_warning'] = np.tile(False, (n_transects, n_filters))
+        self.w_vel['all_invalid'] = np.tile(False, n_transects)
+        self.w_vel['q_total'] = np.tile(np.nan, (n_transects, n_filters))
+        self.w_vel['q_max_run'] = np.tile(np.nan, (n_transects, n_filters))
+        self.w_vel['messages'] = []
+        status_switch = 0
+
+        # TODO if meas had a property checked as list it would save creating that list multiple times
+        checked = []
+        for transect in meas.transects:
+            checked.append(transect.checked)
+
+        # At least one transect is being used to compute discharge
+        if any(checked):
+            # Loop through filters
+            for prefix_idx, filter_idx in enumerate(filter_index):
+                # Loop through transects
+                for n, transect in enumerate(meas.transects):
+                    if transect.checked:
+                        valid_original = np.any(transect.w_vel.valid_data[1, :, transect.in_transect_idx].T, 0)
+
+                        # Determine what data each filter have marked invalid. Original invalid data are excluded
+                        valid = np.any(transect.w_vel.valid_data[filter_idx, :, transect.in_transect_idx].T, 0)
+                        if filter_idx > 1:
+                            valid_int = valid.astype(int) - valid_original.astype(int)
+                            valid = valid_int != -1
+
+                        # Check if all data are invalid
+                        if filter_idx == 0:
+                            if np.nansum(valid.astype(int)) < 1:
+                                self.w_vel['all_invalid'][n] = True
+                        # TODO seems like the rest of this should be under else of all invalid or multiple messages
+                        # generated.
+
+                        # Compute characteristics
+                        q_total, q_max_run, number_invalid_ens = QAData.invalid_qa(valid, meas.discharge[n])
+                        self.w_vel['q_total'][n, filter_idx] = q_total
+                        self.w_vel['q_max_run'][n, filter_idx] = q_max_run
+
+                        # Compute percentage compared to total
+                        if meas.discharge[n].total == 0.0:
+                            q_total_percent = np.nan
+                            q_max_run_percent = np.nan
+                        else:
+                            q_total_percent = np.abs((q_total / meas.discharge[n].total) * 100)
+                            q_max_run_percent = np.abs((q_max_run / meas.discharge[n].total) * 100)
+
+                        # Check total invalid discharge in ensembles for warning
+                        if q_total_percent > self.q_total_threshold_warning:
+                            self.w_vel['q_total_warning'][n, filter_idx] = True
+
+                        # Apply run or cluster thresholds
+                        if q_max_run_percent > self.q_run_threshold_warning:
+                            self.w_vel['q_max_run_warning'][n, filter_idx] = True
+                        elif q_max_run_percent > self.q_run_threshold_caution:
+                            self.w_vel['q_max_run_caution'][n, filter_idx] = True
+
+                        # Compute percent discharge interpolated for both cells and ensembles
+                        # This approach doesn't exclude original data
+                        valid_cells = transect.w_vel.valid_data[filter_idx, :, transect.in_transect_idx].T
+                        q_invalid_total = np.nansum(meas.discharge[n].middle_cells[np.logical_not(valid_cells)]) \
+                            + np.nansum(meas.discharge[n].top_ens[np.logical_not(valid)]) \
+                            + np.nansum(meas.discharge[n].bottom_ens[np.logical_not(valid)])
+                        q_invalid_total_percent = (q_invalid_total / meas.discharge[n].total) * 100
+
+                        if q_invalid_total_percent > self.q_total_threshold_caution:
+                            self.w_vel['q_total_caution'][n, filter_idx] = True
+
+                # Generate messages for ensemble run or clusters
+                if np.any(self.w_vel['q_max_run_warning'][:, filter_idx]):
+                    self.w_vel['messages'].append(['WT-' + prefix[prefix_idx]
+                                                   + 'Int. Q for consecutive invalid ensembles exceeds '
+                                                   + '%3.0f' % self.q_run_threshold_warning
+                                                   + '%;', 1, 11])
+                    status_switch = 2
+                elif np.any(self.w_vel['q_max_run_caution'][:, filter_idx]):
+                    self.w_vel['messages'].append(['wt-' + prefix[prefix_idx]
+                                                   + 'Int. Q for consecutive invalid ensembles exceeds '
+                                                   + '%3.0f' % self.q_run_threshold_caution
+                                                   + '%;', 2, 11])
+                    if status_switch < 1:
+                        status_switch = 1
+
+                # Generate message for total_invalid Q
+                if np.any(self.w_vel['q_total_warning'][:, filter_idx]):
+                    self.w_vel['messages'].append(['WT-' + prefix[prefix_idx]
+                                                   + 'Int. Q for invalid cells and ensembles in a transect exceeds '
+                                                   + '%3.0f' % self.q_total_threshold_warning
+                                                   + '%;', 1, 11])
+                    status_switch = 2
+                elif np.any(self.w_vel['q_total_caution'][:, filter_idx]):
+                    self.w_vel['messages'].append(['wt-' + prefix[prefix_idx]
+                                                   + 'Int. Q for invalid cells and ensembles in a transect exceeds '
+                                                   + '%3.0f' % self.q_total_threshold_caution
+                                                   + '%;', 2, 11])
+                    if status_switch < 1:
+                        status_switch = 1
+
+            # Generate message for all invalid
+            if np.any(self.w_vel['all_invalid']):
+                self.w_vel['messages'].append(['WT-' + prefix[0] + 'There are no valid data for one or more transects.',
+                                               1, 11])
+                status_switch = 2
+
+            # Set status
+            self.w_vel['status'] = 'good'
+            if status_switch == 2:
+                self.w_vel['status'] = 'warning'
+            elif status_switch == 1:
+                self.w_vel['status'] = 'caution'
+        else:
+            self.w_vel['status'] = 'inactive'
+
+    def extrapolation_qa(self, meas):
+        """Apply quality checks to extrapolation methods
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        self.extrapolation['messages'] = []
+
+        checked = []
+        discharges = []
+        for n, transect in enumerate(meas.transects):
+            checked.append(transect.checked)
+            if transect.checked:
+                discharges.append(meas.discharge[n])
+
+        if any(checked):
+            self.extrapolation['status'] = 'good'
+            extrap_uncertainty = Uncertainty.uncertainty_extrapolation(meas, discharges)
+
+            if np.abs(extrap_uncertainty) > 2:
+                self.extrapolation['messages'].append(['Extrapolation: The extrapolation uncertainty is more than '
+                                                       + '2 percent;', 2, 12])
+                self.extrapolation['messages'].append(['    Carefully review the extrapolation;', 2, 12])
+                self.extrapolation['status'] = 'caution'
+        else:
+            self.extrapolation['status'] = 'inactive'
+
+    def edges_qa(self, meas):
+        """Apply quality checks to edge estimates
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        # Initialize variables
+        self.edges['messages'] = []
+        checked = []
+        left_q = []
+        right_q = []
+        total_q = []
+        edge_dist_left = []
+        edge_dist_right = []
+        dist_moved_left = []
+        dist_moved_right = []
+        dist_made_good = []
+        left_type = []
+        right_type = []
+        transect_idx = []
+
+        for n, transect in enumerate(meas.transects):
+            checked.append(transect.checked)
+
+            if transect.checked:
+                left_q.append(meas.discharge[n].left)
+                right_q.append(meas.discharge[n].right)
+                total_q.append(meas.discharge[n].total)
+                dmr, dml, dmg = QAData.edge_distance_moved(transect)
+                dist_moved_right.append(dmr)
+                dist_moved_left.append(dml)
+                dist_made_good.append(dmg)
+                edge_dist_left.append(transect.edges.left.distance_m)
+                edge_dist_right.append(transect.edges.right.distance_m)
+                left_type.append(transect.edges.left.type)
+                right_type.append(transect.edges.right.type)
+                transect_idx.append(n)
+
+        if any(checked):
+            # Set default status to good
+            self.edges['status'] = 'good'
+
+            mean_total_q = np.nanmean(total_q)
+
+            # Check left edge q > 5%
+            self.edges['left_q'] = 0
+
+            left_q_percent = (np.nanmean(left_q) / mean_total_q) * 100
+            temp_idx = np.where(left_q / mean_total_q > 0.05)[0]
+            if len(temp_idx) > 0:
+                self.edges['left_q_idx'] = np.array(transect_idx)[temp_idx]
+            else:
+                self.edges['left_q_idx'] = []
+            if np.abs(left_q_percent) > 5:
+                self.edges['status'] = 'caution'
+                self.edges['messages'].append(['Edges: Left edge Q is greater than 5%;', 1, 13])
+                self.edges['left_q'] = 1
+            elif len(self.edges['left_q_idx']) > 0:
+                self.edges['status'] = 'caution'
+                self.edges['messages'].append(
+                    ['Edges: One or more transects have a left edge Q greater than 5%;', 1, 13])
+                self.edges['left_q'] = 1
+
+            # Check right edge q > 5%
+            self.edges['right_q'] = 0
+            right_q_percent = (np.nanmean(right_q) / mean_total_q) * 100
+            temp_idx = np.where(right_q / mean_total_q > 0.05)[0]
+            if len(temp_idx) > 0:
+                self.edges['right_q_idx'] = np.array(transect_idx)[temp_idx]
+            else:
+                self.edges['right_q_idx'] = []
+            if np.abs(right_q_percent) > 5:
+                self.edges['status'] = 'caution'
+                self.edges['messages'].append(['Edges: Right edge Q is greater than 5%;', 1, 13])
+                self.edges['right_q'] = 1
+            elif len(self.edges['right_q_idx']) > 0:
+                self.edges['status'] = 'caution'
+                self.edges['messages'].append(
+                    ['Edges: One or more transects have a right edge Q greater than 5%;', 1, 13])
+                self.edges['right_q'] = 1
+
+            # Check for consistent sign
+            q_positive = []
+            self.edges['left_sign'] = 0
+            for q in left_q:
+                if q >= 0:
+                    q_positive.append(True)
+                else:
+                    q_positive.append(False)
+            if len(np.unique(q_positive)) > 1 and left_q_percent > 0.5:
+                self.edges['status'] = 'caution'
+                self.edges['messages'].append(['Edges: Sign of left edge Q is not consistent;', 2, 13])
+                self.edges['left_sign'] = 1
+
+            q_positive = []
+            self.edges['right_sign'] = 0
+            for q in right_q:
+                if q >= 0:
+                    q_positive.append(True)
+                else:
+                    q_positive.append(False)
+            if len(np.unique(q_positive)) > 1 and right_q_percent > 0.5:
+                self.edges['status'] = 'caution'
+                self.edges['messages'].append(['Edges: Sign of right edge Q is not consistent;', 2, 13])
+                self.edges['right_sign'] = 1
+
+            # Check distance moved
+            dmg_5_percent = 0.05 * np.nanmean(dist_made_good)
+            avg_right_edge_dist = np.nanmean(edge_dist_right)
+            right_threshold = np.nanmin([dmg_5_percent, avg_right_edge_dist])
+            temp_idx = np.where(dist_moved_right > right_threshold)[0]
+            if len(temp_idx) > 0:
+                self.edges['right_dist_moved_idx'] = np.array(transect_idx)[temp_idx]
+                self.edges['status'] = 'caution'
+                self.edges['messages'].append(['Edges: Excessive boat movement in right edge ensembles;', 2, 13])
+            else:
+                self.edges['right_dist_moved_idx'] = []
+
+            avg_left_edge_dist = np.nanmean(edge_dist_left)
+            left_threshold = np.nanmin([dmg_5_percent, avg_left_edge_dist])
+            temp_idx = np.where(dist_moved_left > left_threshold)[0]
+            if len(temp_idx) > 0:
+                self.edges['left_dist_moved_idx'] = np.array(transect_idx)[temp_idx]
+                self.edges['status'] = 'caution'
+                self.edges['messages'].append(['Edges: Excessive boat movement in left edge ensembles;', 2, 13])
+            else:
+                self.edges['left_dist_moved_idx'] = []
+
+            # Check for edge ensembles marked invalid due to excluded distance
+            self.edges['invalid_transect_left_idx'] = []
+            self.edges['invalid_transect_right_idx'] = []
+            for n, transect in enumerate(meas.transects):
+                if transect.checked:
+                    ens_invalid = np.nansum(transect.w_vel.valid_data[0, :, :], 0) > 0
+                    ens_cells_above_sl = np.nansum(transect.w_vel.cells_above_sl, 0) > 0
+                    ens_invalid = np.logical_not(np.logical_and(ens_invalid, ens_cells_above_sl))
+                    if np.any(ens_invalid):
+                        if transect.start_edge == 'Left':
+                            invalid_left = ens_invalid[0:int(transect.edges.left.number_ensembles)]
+                            invalid_right = ens_invalid[-int(transect.edges.right.number_ensembles):]
+                        else:
+                            invalid_right = ens_invalid[0:int(transect.edges.right.number_ensembles)]
+                            invalid_left = ens_invalid[-int(transect.edges.left.number_ensembles):]
+                        if len(invalid_left) > 0:
+                            left_invalid_percent = sum(invalid_left) / len(invalid_left)
+                        else:
+                            left_invalid_percent = 0
+                        if len(invalid_right) > 0:
+                            right_invalid_percent = sum(invalid_right) / len(invalid_right)
+                        else:
+                            right_invalid_percent = 0
+                        max_invalid_percent = max([left_invalid_percent, right_invalid_percent]) * 100
+                        if max_invalid_percent > 25:
+                            self.edges['status'] = 'caution'
+                            if np.any(invalid_left):
+                                self.edges['invalid_transect_left_idx'].append(n)
+                            if np.any(invalid_right):
+                                self.edges['invalid_transect_right_idx'].append(n)
+
+            if len(self.edges['invalid_transect_left_idx']) > 0 or len(self.edges['invalid_transect_right_idx']) > 0:
+                self.edges['messages'].append(['Edges: The percent of invalid ensembles exceeds 25% in' +
+                                               ' one or more transects.', 2, 13])
+
+            # Check edges for zero discharge
+            self.edges['left_zero'] = 0
+            temp_idx = np.where(np.round(left_q, 4) == 0)[0]
+            if len(temp_idx) > 0:
+                self.edges['left_zero_idx'] = np.array(transect_idx)[temp_idx]
+                self.edges['status'] = 'warning'
+                self.edges['messages'].append(['EDGES: Left edge has zero Q;', 1, 13])
+                self.edges['left_zero'] = 2
+            else:
+                self.edges['left_zero_idx'] = []
+
+            self.edges['right_zero'] = 0
+            temp_idx = np.where(np.round(right_q, 4) == 0)[0]
+            if len(temp_idx) > 0:
+                self.edges['right_zero_idx'] = np.array(transect_idx)[temp_idx]
+                self.edges['status'] = 'warning'
+                self.edges['messages'].append(['EDGES: Right edge has zero Q;', 1, 13])
+                self.edges['right_zero'] = 2
+            else:
+                self.edges['right_zero_idx'] = []
+
+            # Check consistent edge type
+            self.edges['left_type'] = 0
+            if len(np.unique(left_type)) > 1:
+                self.edges['status'] = 'warning'
+                self.edges['messages'].append(['EDGES: Left edge type is not consistent;', 1, 13])
+                self.edges['left_type'] = 2
+
+            self.edges['right_type'] = 0
+            if len(np.unique(right_type)) > 1:
+                self.edges['status'] = 'warning'
+                self.edges['messages'].append(['EDGES: Right edge type is not consistent;', 1, 13])
+                self.edges['right_type'] = 2
+        else:
+            self.edges['status'] = 'inactive'
+
+    @staticmethod
+    def invalid_qa(valid, discharge):
+        """Computes the total invalid discharge in ensembles that have invalid data. The function also computes
+        the maximum run or cluster of ensembles with the maximum interpolated discharge.
+
+        Parameters
+        ----------
+        valid: np.array(bool)
+            Array identifying valid and invalid ensembles.
+        discharge: QComp
+            Object of class QComp
+
+        Returns
+        -------
+        q_invalid_total: float
+            Total interpolated discharge in invalid ensembles
+        q_invalid_max_run: float
+            Maximum interpolated discharge in a run or cluster of invalid ensembles
+        ens_invalid: int
+            Total number of invalid ensembles
+        """
+
+        # Create bool for invalid data
+        invalid = np.logical_not(valid)
+        q_invalid_total = np.nansum(discharge.middle_ens[invalid]) + np.nansum(discharge.top_ens[invalid]) \
+            + np.nansum(discharge.bottom_ens[invalid])
+
+        # Compute total number of invalid ensembles
+        ens_invalid = np.sum(invalid)
+
+        # Compute the indices of where changes occur
+
+        valid_int = np.insert(valid.astype(int), 0, -1)
+        valid_int = np.append(valid_int, -1)
+        valid_run = np.where(np.diff(valid_int) != 0)[0]
+        run_length = np.diff(valid_run)
+        run_length0 = run_length[(valid[0] == 1)::2]
+
+        n_runs = len(run_length0)
+
+        if valid[0]:
+            n_start = 1
+        else:
+            n_start = 0
+
+        n_end = len(valid_run) - 1
+
+        if n_runs > 1:
+            m = 0
+            q_invalid_run = []
+            for n in range(n_start, n_end, 2):
+                m += 1
+                idx_start = valid_run[n]
+                idx_end = valid_run[n + 1]
+                q_invalid_run.append(np.nansum(discharge.middle_ens[idx_start:idx_end])
+                                     + np.nansum(discharge.top_ens[idx_start:idx_end])
+                                     + np.nansum(discharge.bottom_ens[idx_start:idx_end]))
+
+            # Determine the maximum discharge in a single run
+            q_invalid_max_run = np.nanmax(np.abs(q_invalid_run))
+
+        else:
+            q_invalid_max_run = 0.0
+
+        return q_invalid_total, q_invalid_max_run, ens_invalid
+
+    @staticmethod
+    def edge_distance_moved(transect):
+        """Computes the boat movement during edge ensemble collection.
+
+        Parameters
+        ----------
+        transect: Transect
+            Object of class Transect
+
+        Returns
+        -------
+        right_dist_moved: float
+            Distance in m moved during collection of right edge samples
+        left_dist_moved: float
+            Distance in m moved during collection of left edge samples
+        dmg: float
+            Distance made good for the entire transect
+        """
+
+        boat_selected = getattr(transect.boat_vel, transect.boat_vel.selected)
+        ens_duration = transect.date_time.ens_duration_sec
+
+        # Get boat velocities
+        if boat_selected is not None:
+            u_processed = boat_selected.u_processed_mps
+            v_processed = boat_selected.v_processed_mps
+        else:
+            u_processed = np.tile(np.nan, transect.boat_vel.bt_vel.u_processed_mps.shape)
+            v_processed = np.tile(np.nan, transect.boat_vel.bt_vel.v_processed_mps.shape)
+
+        # Compute boat coordinates
+        x_processed = np.nancumsum(u_processed * ens_duration)
+        y_processed = np.nancumsum(v_processed * ens_duration)
+        dmg = (x_processed[-1] ** 2 + y_processed[-1] ** 2) ** 0.5
+
+        # Compute left distance moved
+        # TODO should be a dist moved function
+        left_edge_idx = QComp.edge_ensembles('left', transect)
+        if len(left_edge_idx) > 0:
+            boat_x = x_processed[left_edge_idx[-1]] - x_processed[left_edge_idx[0]]
+            boat_y = y_processed[left_edge_idx[-1]] - y_processed[left_edge_idx[0]]
+            left_dist_moved = (boat_x ** 2 + boat_y ** 2) ** 0.5
+        else:
+            left_dist_moved = np.nan
+
+        # Compute right distance moved
+        right_edge_idx = QComp.edge_ensembles('right', transect)
+        if len(right_edge_idx) > 0:
+            boat_x = x_processed[right_edge_idx[-1]] - x_processed[right_edge_idx[0]]
+            boat_y = y_processed[right_edge_idx[-1]] - y_processed[right_edge_idx[0]]
+            right_dist_moved = (boat_x ** 2 + boat_y ** 2) ** 0.5
+        else:
+            right_dist_moved = np.nan
+
+        return right_dist_moved, left_dist_moved, dmg
+    
+    # check for user changes
+    def check_bt_setting(self, meas):
+        """Checks the bt settings to see if they are still on the default
+                        settings.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        self.settings_dict['tab_bt'] = 'Default'
+
+        s = meas.current_settings()
+        d = meas.qrev_default_settings()
+
+        if s['BTbeamFilter'] != d['BTbeamFilter']:
+            self.bt_vel['messages'].append(['BT: User modified default beam setting.', 3, 8])
+            self.settings_dict['tab_bt'] = 'Custom'
+
+        if s['BTdFilter'] != d['BTdFilter']:
+            self.bt_vel['messages'].append(['BT: User modified default error velocity filter.', 3, 8])
+            self.settings_dict['tab_bt'] = 'Custom'
+
+        if s['BTwFilter'] != d['BTwFilter']:
+            self.bt_vel['messages'].append(['BT: User modified default vertical velocity filter.', 3, 8])
+            self.settings_dict['tab_bt'] = 'Custom'
+
+        if s['BTsmoothFilter'] != d['BTsmoothFilter']:
+            self.bt_vel['messages'].append(['BT: User modified default smooth filter.', 3, 8])
+            self.settings_dict['tab_bt'] = 'Custom'
+
+    def check_wt_settings(self, meas):
+        """Checks the wt settings to see if they are still on the default
+                settings.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        self.settings_dict['tab_wt'] = 'Default'
+
+        s = meas.current_settings()
+        d = meas.qrev_default_settings()
+
+        if round(s['WTExcludedDistance'], 2) != round(d['WTExcludedDistance'], 2):
+            self.w_vel['messages'].append(['WT: User modified excluded distance.', 3, 11])
+            self.settings_dict['tab_wt'] = 'Custom'
+
+        if s['WTbeamFilter'] != d['WTbeamFilter']:
+            self.w_vel['messages'].append(['WT: User modified default beam setting.', 3, 11])
+            self.settings_dict['tab_wt'] = 'Custom'
+
+        if s['WTdFilter'] != d['WTdFilter']:
+            self.w_vel['messages'].append(['WT: User modified default error velocity filter.', 3, 11])
+            self.settings_dict['tab_wt'] = 'Custom'
+
+        if s['WTwFilter'] != d['WTwFilter']:
+            self.w_vel['messages'].append(['WT: User modified default vertical velocity filter.', 3, 11])
+            self.settings_dict['tab_wt'] = 'Custom'
+
+        if s['WTsnrFilter'] != d['WTsnrFilter']:
+            self.w_vel['messages'].append(['WT: User modified default SNR filter.', 3, 11])
+            self.settings_dict['tab_wt'] = 'Custom'
+
+    def check_extrap_settings(self, meas):
+        """Checks the extrap to see if they are still on the default
+        settings.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        self.settings_dict['tab_extrap'] = 'Default'
+
+        # Check fit parameters
+        if meas.extrap_fit.sel_fit[0].fit_method != 'Automatic':
+            self.settings_dict['tab_extrap'] = 'Custom'
+            self.extrapolation['messages'].append(['Extrapolation: User modified default automatic setting.', 3, 12])
+
+        # Check data parameters
+        if meas.extrap_fit.sel_fit[-1].data_type.lower() != 'q':
+            self.settings_dict['tab_extrap'] = 'Custom'
+            self.extrapolation['messages'].append(['Extrapolation: User modified data type ', 3, 12])
+
+        if meas.extrap_fit.threshold != 20:
+            self.settings_dict['tab_extrap'] = 'Custom'
+            self.extrapolation['messages'].append(['Extrapolation: User modified default threshold.', 3, 12])
+
+        if meas.extrap_fit.subsection[0] != 0 or meas.extrap_fit.subsection[1] != 100:
+            self.settings_dict['tab_extrap'] = 'Custom'
+            self.extrapolation['messages'].append(['Extrapolation: User modified subsectioning', 3, 12])
+
+    def check_tempsal_settings(self, meas):
+        """Checks the temp and salinity settings to see if they are still on
+        the default settings.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        self.settings_dict['tab_tempsal'] = 'Default'
+
+        t_source_change = False
+        salinity_change = False
+        s_sound_change = False
+        t_user_change = False
+        t_adcp_change = False
+
+        if not all(np.isnan([meas.ext_temp_chk['user'], meas.ext_temp_chk['user_orig']])):
+            if meas.ext_temp_chk['user'] != meas.ext_temp_chk['user_orig']:
+                t_user_change = True
+
+        if not all(np.isnan([meas.ext_temp_chk['adcp'], meas.ext_temp_chk['adcp_orig']])):
+            if meas.ext_temp_chk['adcp'] != meas.ext_temp_chk['adcp_orig']:
+                t_adcp_change = True
+
+        # Check each checked transect
+        for idx in meas.checked_transect_idx:
+            transect = meas.transects[idx]
+
+            # Temperature source
+            if transect.sensors.temperature_deg_c.selected != 'internal':
+                t_source_change = True
+
+            if transect.sensors.salinity_ppt.selected != 'internal':
+                sal = getattr(transect.sensors.salinity_ppt, transect.sensors.salinity_ppt.selected)
+                if np.all(np.equal(sal.data, transect.sensors.salinity_ppt.internal.data)):
+                    salinity_change = False
+                else:
+                    salinity_change = True
+
+            # Speed of Sound
+            if transect.sensors.speed_of_sound_mps.selected != 'internal':
+                s_sound_change = True
+
+        # Report condition and messages
+        if any([t_source_change, salinity_change, s_sound_change, t_adcp_change, t_user_change]):
+            self.settings_dict['tab_tempsal'] = 'Custom'
+            
+            if t_source_change:
+                self.temperature['messages'].append(['Temperature: User modified temperature source.', 3, 5])
+
+            if s_sound_change:
+                self.temperature['messages'].append(['Temperature: User modified speed of sound source.', 3, 5])
+
+            if t_user_change:
+                self.temperature['messages'].append(['Temperature: User modified independent temperature.', 3, 5])
+
+            if t_adcp_change:
+                self.temperature['messages'].append(['Temperature: User modified ADCP temperature.', 3, 5])
+         
+    def check_gps_settings(self, meas):
+        """Checks the gps settings to see if they are still on the default
+        settings.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        gps = False
+        self.settings_dict['tab_gps'] = 'Default'
+
+        # Check for transects with gga or vtg data
+        for idx in meas.checked_transect_idx:
+            transect = meas.transects[idx]
+            if transect.boat_vel.gga_vel is not None or transect.boat_vel.gga_vel is not None:
+                gps = True
+                break
+
+        # If gga or vtg data exist check settings
+        if gps:
+
+            s = meas.current_settings()
+            d = meas.qrev_default_settings()
+
+            if s['ggaDiffQualFilter'] != d['ggaDiffQualFilter']:
+                self.gga_vel['messages'].append(['GPS: User modified default quality setting.', 3, 8])
+                self.settings_dict['tab_gps'] = 'Custom'
+
+            if s['ggaAltitudeFilter'] != d['ggaAltitudeFilter']:
+                self.gga_vel['messages'].append(['GPS: User modified default altitude filter.', 3, 8])
+                self.settings_dict['tab_gps'] = 'Custom'
+
+            if s['GPSHDOPFilter'] != d['GPSHDOPFilter']:
+                self.gga_vel['messages'].append(['GPS: User modified default HDOP filter.', 3, 8])
+                self.settings_dict['tab_gps'] = 'Custom'
+
+            if s['GPSSmoothFilter'] != d['GPSSmoothFilter']:
+                self.gga_vel['messages'].append(['GPS: User modified default smooth filter.', 3, 8])
+                self.settings_dict['tab_gps'] = 'Custom'
+
+    def check_depth_settings(self, meas):
+        """Checks the depth settings to see if they are still on the default
+                settings.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        self.settings_dict['tab_depth'] = 'Default'
+
+        s = meas.current_settings()
+        d = meas.qrev_default_settings()
+
+        if s['depthReference'] != d['depthReference']:
+            self.depths['messages'].append(['Depths: User modified '
+                                            'depth reference.', 3, 10])
+            self.settings_dict['tab_depth'] = 'Custom'
+
+        if s['depthComposite'] != d['depthComposite']:
+            self.depths['messages'].append(['Depths: User modified '
+                                            'depth reference.', 3, 10])
+            self.settings_dict['tab_depth'] = 'Custom'
+
+        if s['depthAvgMethod'] != d['depthAvgMethod']:
+            self.depths['messages'].append(['Depths: User modified '
+                                            'averaging method.', 3, 10])
+            self.settings_dict['tab_depth'] = 'Custom'
+
+        if s['depthFilterType'] != d['depthFilterType']:
+            self.depths['messages'].append(['Depths: User modified '
+                                            'filter type.', 3, 10])
+            self.settings_dict['tab_depth'] = 'Custom'
+
+        for idx in meas.checked_transect_idx:
+            transect = meas.transects[idx]
+            if transect.depths.bt_depths.draft_orig_m != transect.depths.bt_depths.draft_use_m:
+                self.depths['messages'].append(['Depths: User modified '
+                                                'draft.', 3, 10])
+                self.settings_dict['tab_depth'] = 'Custom'
+                break
+
+    def check_edge_settings(self, meas):
+        """Checks the edge settings to see if they are still on the original
+                settings.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        start_edge_change = False
+        left_edge_type_change = False
+        left_edge_dist_change = False
+        left_edge_ens_change = False
+        left_edge_q_change = False
+        left_edge_coef_change = False
+        right_edge_type_change = False
+        right_edge_dist_change = False
+        right_edge_ens_change = False
+        right_edge_q_change = False
+        right_edge_coef_change = False
+
+        for idx in meas.checked_transect_idx:
+            transect = meas.transects[idx]
+
+            if transect.start_edge != transect.orig_start_edge:
+                start_edge_change = True
+
+            if transect.edges.left.type != transect.edges.left.orig_type:
+                left_edge_type_change = True
+
+            if transect.edges.left.distance_m != transect.edges.left.orig_distance_m:
+                left_edge_dist_change = True
+
+            if transect.edges.left.number_ensembles != transect.edges.left.orig_number_ensembles:
+                left_edge_ens_change = True
+
+            if transect.edges.left.user_discharge_cms != transect.edges.left.orig_user_discharge_cms:
+                left_edge_q_change = True
+
+            if transect.edges.left.cust_coef != transect.edges.left.orig_cust_coef:
+                left_edge_coef_change = True
+
+            if transect.edges.right.type != transect.edges.right.orig_type:
+                right_edge_type_change = True
+
+            if transect.edges.right.distance_m != transect.edges.right.orig_distance_m:
+                right_edge_dist_change = True
+
+            if transect.edges.right.number_ensembles != transect.edges.right.orig_number_ensembles:
+                right_edge_ens_change = True
+
+            if transect.edges.right.user_discharge_cms != transect.edges.right.orig_user_discharge_cms:
+                right_edge_q_change = True
+
+            if transect.edges.right.cust_coef != transect.edges.right.orig_cust_coef:
+                right_edge_coef_change = True
+
+        if any([start_edge_change, left_edge_type_change, left_edge_dist_change, left_edge_ens_change,
+                left_edge_q_change, left_edge_coef_change, right_edge_type_change, right_edge_dist_change,
+                right_edge_ens_change, right_edge_q_change, right_edge_coef_change]):
+            self.settings_dict['tab_edges'] = 'Custom'
+
+            if start_edge_change:
+                self.edges['messages'].append(['Edges: User modified start edge.', 3, 10])
+            if left_edge_type_change:
+                self.edges['messages'].append(['Edges: User modified left edge type.', 3, 10])
+            if left_edge_dist_change:
+                self.edges['messages'].append(['Edges: User modified left edge distance.', 3, 10])
+            if left_edge_ens_change:
+                self.edges['messages'].append(['Edges: User modified left number of ensembles.', 3, 10])
+            if left_edge_q_change:
+                self.edges['messages'].append(['Edges: User modified left user discharge.', 3, 10])
+            if left_edge_coef_change:
+                self.edges['messages'].append(['Edges: User modified left custom coefficient.', 3, 10])
+            if right_edge_type_change:
+                self.edges['messages'].append(['Edges: User modified right edge type.', 3, 10])
+            if right_edge_dist_change:
+                self.edges['messages'].append(['Edges: User modified right edge distance.', 3, 10])
+            if right_edge_ens_change:
+                self.edges['messages'].append(['Edges: User modified right number of ensembles.', 3, 10])
+            if right_edge_q_change:
+                self.edges['messages'].append(['Edges: User modified right user discharge.', 3, 10])
+            if right_edge_coef_change:
+                self.edges['messages'].append(['Edges: User modified right custom coefficient.', 3, 10])
+        else:
+            self.settings_dict['tab_edges'] = 'Default'
+
+    def check_mbt_settings(self, meas):
+        """Checks the mbt settings to see if they are still on the original
+                settings.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        # if there are mb tests check for user changes
+        if len(meas.mb_tests) >= 1:
+            mbt = meas.mb_tests
+
+            mb_user_valid = []
+            mb_used = []
+
+            auto = copy.deepcopy(mbt)
+            auto = MovingBedTests.auto_use_2_correct(auto)
+
+            for n in range(len(mbt)):
+
+                if mbt[n].user_valid:
+                    mb_user_valid.append(False)
+                else:
+                    mb_user_valid.append(True)
+
+                if mbt[n].use_2_correct != auto[n].use_2_correct and \
+                        meas.transects[meas.checked_transect_idx[0]].boat_vel.selected == 'bt_vel':
+                    mb_used.append(True)
+                else:
+                    mb_used.append(False)
+
+            self.settings_dict['tab_mbt'] = 'Default'
+            if any(mb_user_valid):
+                self.settings_dict['tab_mbt'] = 'Custom'
+                self.movingbed['messages'].append(['Moving-Bed Test: '
+                                                   'User modified '
+                                                   'valid test settings.', 3, 6])
+            if any(mb_used):
+                self.settings_dict['tab_mbt'] = 'Custom'
+                self.movingbed['messages'].append(['Moving-Bed Test: '
+                                                   'User modified '
+                                                   'use to correct settings.', 3, 6])
+
+        if meas.observed_no_moving_bed:
+            self.settings_dict['tab_mbt'] = 'Custom'
+
+    def check_compass_settings(self, meas):
+        """Checks the compass settings for changes.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        self.settings_dict['tab_compass'] = 'Default'
+
+        magvar_change = False
+        align_change = False
+
+        # Check each checked transect
+        for idx in meas.checked_transect_idx:
+            transect = meas.transects[idx]
+
+            # Magvar
+            if transect.sensors.heading_deg.internal.mag_var_deg != \
+                    transect.sensors.heading_deg.internal.mag_var_orig_deg:
+                magvar_change = True
+
+            # Heading offset
+            if transect.sensors.heading_deg.external is not None:
+                if transect.sensors.heading_deg.external.align_correction_deg != \
+                        transect.sensors.heading_deg.external.align_correction_orig_deg:
+                    align_change = True
+
+        # Report condition and messages
+        if any([magvar_change, align_change]):
+            self.settings_dict['tab_compass'] = 'Custom'
+
+            if magvar_change:
+                self.compass['messages'].append(['Compass: User modified magnetic variation.', 3, 4])
+
+            if align_change:
+                self.compass['messages'].append(['Compass: User modified heading offset.', 3, 4])
+
+    def check_oursin(self, meas):
+        """Checks the compass settings for changes.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        self.settings_dict['tab_uncertainty_2_advanced'] = 'Default'
+        self.settings_dict['tab_uncertainty'] = 'Default'
+
+        for key in meas.oursin.user_advanced_settings.keys():
+            if not np.isnan(meas.oursin.user_advanced_settings[key]):
+                self.settings_dict['tab_uncertainty_2_advanced'] = 'Custom'
+                self.settings_dict['tab_uncertainty'] = 'Custom'
+                break
+
+        for key in meas.oursin.user_specified_u.keys():
+            if not np.isnan(meas.oursin.user_specified_u[key]):
+                self.settings_dict['tab_uncertainty'] = 'Custom'
+                break
diff --git a/Classes/QComp.py b/Classes/QComp.py
new file mode 100644
index 0000000000000000000000000000000000000000..f227e5e1bc2073ade6272a0ea7a3877d350dd214
--- /dev/null
+++ b/Classes/QComp.py
@@ -0,0 +1,1925 @@
+import numpy as np
+from Classes.TransectData import TransectData
+from Classes.BoatStructure import BoatStructure
+from MiscLibs.common_functions import cart2pol, pol2cart, nan_less, nan_greater
+# from profilehooks import profile
+from DischargeFunctions.top_discharge_extrapolation import extrapolate_top
+from DischargeFunctions.bottom_discharge_extrapolation import extrapolate_bot
+
+
+class QComp(object):
+    """Computes the discharge for each transect.
+
+    Attributes
+    ----------
+    top: float
+        Transect total extrapolated top discharge
+    middle: float
+        Transect total measured middle discharge including interpolations
+    bottom: float
+        Transect total extrapolated bottom discharge
+    top_ens: np.array(float)
+        Extrapolated top discharge by ensemble
+    middle_cells: np.array(float)
+        Measured middle discharge including interpolation by cell
+    middle_ens: np.array(float)
+        Measured middle discharge including interpolation by ensemble
+    bottom_ens: np.array(float)
+        Extrapolate bottom discharge by ensemble
+    left: float
+        Left edge discharge
+    left_idx:
+        Ensembles used for left edge
+    right: float
+        Right edge discharge
+    right_idx:
+        Ensembles used for right edge
+    total_uncorrected: float
+        Total discharge for transect uncorrected for moving-bed, if required
+    total: float
+        Total discharge with moving-bed correction applied if necessary
+    correction_factor: float
+        Moving-bed correction factor, if required
+    int_cells: float
+        Total discharge computed for invalid depth cells excluding invalid ensembles
+    int_ens: float
+        Total discharge computed for invalid ensembles
+    """
+    
+    def __init__(self):
+        """Initialize class and instance variables."""
+
+        self.top = None  # Transect total extrapolated top discharge
+        self.middle = None  # Transect toal measured middle discharge including interpolations
+        self.bottom = None  # ETransect total extrapolated bottom discharge
+        self.top_ens = None  # Extrapolated top discharge by ensemble
+        self.middle_cells = None  # Measured middle discharge including interpolation by cell
+        self.middle_ens = None  # Measured middle discharge including interpolation by ensemble
+        self.bottom_ens = None  # Extrapolate bottom discharge by ensemble
+        self.left = None  # Left edge discharge
+        self.left_idx = []  # Ensembles used for left edge
+        self.right = None  # Right edge discharge
+        self.right_idx = []  # Ensembles used for right edge
+        self.total_uncorrected = None  # Total discharge for transect uncorrected for moving-bed, if required
+        self.total = None  # Total discharge with moving-bed correction applied if necessary
+        self.correction_factor = 1  # Moving-bed correction factor, if required
+        self.int_cells = None  # Total discharge computed for invalid depth cells excluding invalid ensembles
+        self.int_ens = None  # Total discharge computed for invalid ensembles
+
+    # @profile
+    def populate_data(self, data_in, moving_bed_data=None, top_method=None, bot_method=None, exponent=None):
+        """Discharge is computed using the data provided to the method.
+        Water data provided are assumed to be corrected for the navigation reference.
+        If a moving-bed correction is to be applied it is computed and applied.
+        The TRDI method using expanded delta time is applied if the processing method is WR2.
+        
+        Parameters
+        ----------
+        data_in: TransectData
+            Object TransectData
+        moving_bed_data: list
+            List of MovingBedTests objects
+        top_method: str
+            Top extrapolation method
+        bot_method: str
+            Bottom extrapolation method
+        exponent: float
+            Extrapolation exponent
+        """
+
+        # Use bottom track interpolation settings to determine the appropriate algorithms to apply
+        if data_in.boat_vel.bt_vel.interpolate == 'None':
+            processing = 'WR2'
+        elif data_in.boat_vel.bt_vel.interpolate == 'Linear':
+            processing = 'QRev'
+        else:
+            processing = 'RSL'
+
+        # Compute cross product
+        x_prod = QComp.cross_product(data_in)
+        
+        # Get index of ensembles in moving-boat portion of transect
+        in_transect_idx = data_in.in_transect_idx
+        
+        if processing == 'WR2':
+            # TRDI uses expanded delta time to handle invalid ensembles which can be caused by invalid BT
+            # WT, or depth.  QRev by default handles this invalid data through linear interpolation of the
+            # invalid data through linear interpolation of the invalid data type.  This if statement and
+            # associated code is required to maintain compatibility with WinRiver II discharge computations.
+            
+            # Determine valid ensembles
+            valid_ens = np.any(np.logical_not(np.isnan(x_prod)))
+            valid_ens = valid_ens[in_transect_idx]
+            
+            # Compute the ensemble duration using TRDI approach of expanding delta time to compensate
+            # for invalid ensembles
+            n_ens = len(valid_ens)
+            ens_dur = data_in.date_time.ens_duration_sec[in_transect_idx]
+            delta_t = np.tile([np.nan], n_ens)
+            cum_dur = 0
+            idx = 1
+            for j in range(idx, n_ens):
+                cum_dur = np.nansum(np.hstack([cum_dur, ens_dur[j]]))
+                if valid_ens[j]:
+                    delta_t[j] = cum_dur
+                    cum_dur = 0
+                    
+        else:
+            # For non-WR2 processing use actual ensemble duration
+            delta_t = data_in.date_time.ens_duration_sec[in_transect_idx]
+            
+        # Compute measured or middle discharge
+        self.middle_cells = QComp.discharge_middle_cells(x_prod, data_in, delta_t)
+        self.middle_ens = np.nansum(self.middle_cells, 0)
+        self.middle = np.nansum(self.middle_ens)
+        
+        # Compute the top discharge
+        trans_select = getattr(data_in.depths, data_in.depths.selected)
+        num_top_method = {'Power': 0, 'Constant': 1, '3-Point': 2, None: -1}
+        self.top_ens = extrapolate_top(x_prod, data_in.w_vel.valid_data[0, :, :],
+                                       num_top_method[data_in.extrap.top_method],
+                                       data_in.extrap.exponent, data_in.in_transect_idx, trans_select.depth_cell_size_m,
+                                       trans_select.depth_cell_depth_m, trans_select.depth_processed_m, delta_t,
+                                       num_top_method[top_method], exponent)
+        self.top = np.nansum(self.top_ens)
+
+        # Compute the bottom discharge
+        num_bot_method = {'Power': 0, 'No Slip': 1, None: -1}
+        self.bottom_ens = extrapolate_bot(x_prod,
+                                          data_in.w_vel.valid_data[0, :, :],
+                                          num_bot_method[data_in.extrap.bot_method],
+                                          data_in.extrap.exponent,
+                                          data_in.in_transect_idx,
+                                          trans_select.depth_cell_size_m,
+                                          trans_select.depth_cell_depth_m,
+                                          trans_select.depth_processed_m, delta_t,
+                                          num_bot_method[bot_method],
+                                          exponent)
+        self.bottom = np.nansum(self.bottom_ens)
+
+        # Compute interpolated cell and ensemble discharge from computed
+        # measured discharge
+        self.interpolate_no_cells(data_in)
+        self.middle = np.nansum(self.middle_ens)
+        self.int_cells, self.int_ens = QComp.discharge_interpolated(self.top_ens, self.middle_cells,
+                                                                    self.bottom_ens, data_in)
+        
+        # Compute right edge discharge
+        if data_in.edges.right.type != 'User Q':
+            self.right, self.right_idx = QComp.discharge_edge('right', data_in, top_method, bot_method, exponent)
+        else:
+            self.right = data_in.edges.right.user_discharge_cms
+            self.right_idx = []
+
+        # Compute left edge discharge
+        if data_in.edges.left.type != 'User Q':
+            self.left, self.left_idx = QComp.discharge_edge('left', data_in, top_method, bot_method, exponent)
+        else:
+            self.left = data_in.edges.left.user_discharge_cms
+            self.left_idx = []
+            
+        # Compute moving-bed correction, if applicable.  Two checks are used to account for the
+        # way the meas object is created.
+
+        # Moving-bed corrections are only applied to bottom track referenced computations
+        mb_type = None
+        if data_in.boat_vel.selected == 'bt_vel':
+            if moving_bed_data is not None:
+
+                # Determine if a moving-bed test is to be used for correction
+                use_2_correct = []
+                for mb_idx, test in enumerate(moving_bed_data):
+                    use_2_correct.append(test.use_2_correct)
+                    if test.use_2_correct:
+                        mb_type = test.type
+
+                if any(use_2_correct):
+
+                    # Make sure composite tracks are turned off
+                    if data_in.boat_vel.composite == 'Off':
+                        # Apply appropriate moving-bed test correction method
+                        if mb_type == 'Stationary':
+                            self.correction_factor = self.stationary_correction_factor(self.top, self.middle,
+                                                                                       self.bottom, data_in,
+                                                                                       moving_bed_data, delta_t)
+                        else:
+                            self.correction_factor = \
+                                self.loop_correction_factor(self.top, self.middle,
+                                                            self.bottom, data_in,
+                                                            moving_bed_data[use_2_correct.index(True)],
+                                                            delta_t)
+
+        self.total_uncorrected = self.left + self.right + self.middle + self.bottom + self.top
+
+        # Compute final discharge using correction if applicable
+        if self.correction_factor is None or self.correction_factor == 1 or np.isnan(self.correction_factor):
+            self.total = self.total_uncorrected
+        else:
+            self.total = self.left + self.right + (self.middle + self.bottom + self.top) * self.correction_factor
+
+    @staticmethod
+    def qrev_mat_in(meas_struct):
+        """Processes the Matlab data structure to obtain a list of QComp objects containing the discharge data from the
+        Matlab data structure.
+
+        Parameters
+        ----------
+        meas_struct: mat_struct
+            Matlab data structure obtained from sio.loadmat
+
+        Returns
+        -------
+        discharge: list
+            List of QComp data objects
+        """
+
+        discharge = []
+        if hasattr(meas_struct.discharge, 'bottom'):
+            # Measurement has discharge data from only one transect
+            q = QComp()
+            q.populate_from_qrev_mat(meas_struct.discharge)
+            discharge.append(q)
+        else:
+            # Measurement has discharge data from multiple transects
+            for q_data in meas_struct.discharge:
+                q = QComp()
+                q.populate_from_qrev_mat(q_data)
+                discharge.append(q)
+        return discharge
+
+    def populate_from_qrev_mat(self, q_in):
+        """Populated QComp instance variables with data from QRev Matlab file.
+
+        Parameters
+        ----------
+        q_in: mat_struct
+            mat_struct_object containing QComp class data
+        """
+
+        self.top = q_in.top
+        self.middle = q_in.middle
+        self.bottom = q_in.bottom
+
+        if type(q_in.topEns) is not np.ndarray:
+            self.top_ens = np.array([q_in.topEns])
+            self.middle_ens = np.array([q_in.middleEns])
+            self.bottom_ens = np.array([q_in.bottomEns])
+        else:
+            self.top_ens = q_in.topEns
+            self.middle_ens = q_in.middleEns
+            self.bottom_ens = q_in.bottomEns
+
+        self.middle_cells = q_in.middleCells
+        # Handle special case for 1 ensemble or 1 cell
+        if len(self.middle_cells.shape) < 2:
+            if self.middle_ens.size > 1:
+                # Multiple ensembles, one cell
+                self.middle_cells = self.middle_cells[np.newaxis, :]
+            else:
+                # One ensemble, multiple cells
+                self.middle_cells = self.middle_cells[:, np.newaxis]
+
+        # If only one value, it will be read in as int but needs to be an array of len 1
+        self.left = q_in.left
+        # If only one value, it will be read in as int but needs to be an array of len 1
+        if type(q_in.leftidx) is int:
+            self.left_idx = np.array([q_in.leftidx])
+        else:
+            self.left_idx = q_in.leftidx
+        self.right = q_in.right
+        # If only one value, it will be read in as int but needs to be an array of len 1
+        if type(q_in.rightidx) is int:
+            self.right_idx = np.array([q_in.rightidx])
+        else:
+            self.right_idx = q_in.rightidx
+        self.total_uncorrected = q_in.totalUncorrected
+        self.total = q_in.total
+        self.correction_factor = q_in.correctionFactor
+        if type(self.correction_factor) is np.ndarray:
+            if len(self.correction_factor) == 0:
+                self.correction_factor = 1
+            else:
+                self.correction_factor = self.correction_factor[0]
+        self.int_cells = q_in.intCells
+        self.int_ens = q_in.intEns
+
+    def interpolate_no_cells(self, transect_data):
+        """Computes discharge for ensembles where the depth is too
+           shallow for any valid depth cells. The computation is done
+           using interpolation of unit discharge defined as the ensemble
+           discharge divided by the depth of the ensemble and the
+           duration of the ensemble. The independent variable for the
+           interpolation is the track distance. After interpolation the
+           discharge for the interpolated ensembles is computed by
+           multiplying the interpolated value by the depth and duration
+           of those ensembles to achieve discharge for those ensembles.
+
+           Parameters
+           ----------
+           transect_data: TransectData
+                Object of TransectData
+        """
+
+        # Compute the discharge in each ensemble
+        q_ensemble = self.top_ens + self.middle_ens + self.bottom_ens
+        valid_ens = np.where(np.logical_not(np.isnan(q_ensemble)))[0]
+        if len(valid_ens) > 1:
+            idx = np.where(np.isnan(q_ensemble))[0]
+
+            if len(idx) > 0:
+
+                # Compute the unit discharge by depth for each ensemble
+                depth_selected = getattr(transect_data.depths, transect_data.depths.selected)
+                unit_q_depth = (q_ensemble / depth_selected.depth_processed_m[transect_data.in_transect_idx]) \
+                    / transect_data.date_time.ens_duration_sec[transect_data.in_transect_idx]
+
+                # Compute boat track
+                boat_track = BoatStructure.compute_boat_track(transect_data, transect_data.boat_vel.selected)
+
+                # Create strict monotonic vector for 1-D interpolation
+                q_mono = unit_q_depth
+                x_mono = boat_track['distance_m'][transect_data.in_transect_idx]
+
+                # Identify duplicate values, and replace with an average
+                dups = self.group_consecutives(x_mono)
+                if len(dups):
+                    for dup in dups:
+                        q_avg = np.nanmean(q_mono[np.array(dup)])
+                        q_mono[dup[0]] = q_avg
+                        q_mono[dup[1::]] = np.nan
+                        x_mono[dup[1::]] = np.nan
+
+                valid_q_mono = np.logical_not(np.isnan(q_mono))
+                valid_x_mono = np.logical_not(np.isnan(x_mono))
+                valid = np.all(np.vstack([valid_q_mono, valid_x_mono]), 0)
+
+                # Interpolate unit q
+                if np.any(valid):
+                    unit_q_int = np.interp(boat_track['distance_m'][transect_data.in_transect_idx], x_mono[valid],
+                                           q_mono[valid], left=np.nan, right=np.nan)
+                else:
+                    unit_q_int = 0
+
+                # Compute the discharge in each ensemble based on interpolated data
+                q_int = unit_q_int * depth_selected.depth_processed_m[transect_data.in_transect_idx] \
+                    * transect_data.date_time.ens_duration_sec[transect_data.in_transect_idx]
+                self.middle_ens[idx] = q_int[idx]
+
+    @staticmethod
+    def group_consecutives(vals):
+        """Return list of consecutive lists of numbers from vals (number list).
+        """
+
+        run = []
+        result = []
+        expect = vals[0]
+        j = 0
+        for n in range(1, len(vals)):
+            if vals[n] == expect:
+                j += 1
+                if j > 1:
+                    run.append(n)
+                elif j > 0:
+                    run.append(n-1)
+                    run.append(n)
+            elif j > 0:
+                result.append(run)
+                run = []
+                j = 0
+            expect = vals[n]
+        return result
+
+    @staticmethod
+    def cross_product(transect=None, w_vel_x=None, w_vel_y=None, b_vel_x=None, b_vel_y=None, start_edge=None):
+        """Computes the cross product of the water and boat velocity.
+
+        Input data can be a transect or component vectors for the water and boat velocities with the start edge.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        w_vel_x: np.array(float)
+            Array of water velocity in the x direction
+        w_vel_y: np.array(float)
+            Array of water velocity in the y direction
+        b_vel_x: np.array(float)
+            Vector of navigation velocity in x-direction
+        b_vel_y: np.array(float)
+            Vector of naviagation velocity in y-direction
+        start_edge: str
+            Starting edge of transect (Left or Right)
+
+        Returns
+        -------
+        xprod: np.array(float)
+            Cross product values
+        """
+
+        if transect is not None:
+            # Prepare water track data
+            cells_above_sl = np.array(transect.w_vel.cells_above_sl).astype(float)
+            cells_above_sl[cells_above_sl < 0.5] = np.nan
+            w_vel_x = transect.w_vel.u_processed_mps * cells_above_sl
+            w_vel_y = transect.w_vel.v_processed_mps * cells_above_sl
+
+            # Get navigation data from object properties
+            trans_select = getattr(transect.boat_vel, transect.boat_vel.selected)
+            if trans_select is not None:
+                b_vel_x = trans_select.u_processed_mps
+                b_vel_y = trans_select.v_processed_mps
+            else:
+                b_vel_x = np.tile([np.nan], transect.boat_vel.bt_vel.u_processed_mps.shape)
+                b_vel_y = np.tile([np.nan], transect.boat_vel.bt_vel.v_processed_mps.shape)
+
+            start_edge = transect.start_edge
+
+        # Compute the cross product
+        xprod = np.multiply(w_vel_x, b_vel_y) - np.multiply(w_vel_y, b_vel_x)
+
+        # Correct the sign of the cross product based on the start edge
+        if start_edge == 'Right':
+            direction = 1
+        else:
+            direction = -1
+        xprod = xprod * direction
+
+        return xprod
+
+    @staticmethod
+    def discharge_middle_cells(xprod, transect, delta_t):
+        """Computes the discharge in the measured or middle portion of the cross section.
+
+        Parameters
+        ----------
+        xprod: np.array(float)
+            Cross product computed from the cross product method
+        transect: TransectData
+            Object of TransectData
+        delta_t: np.array(float)
+            Duration of each ensemble computed from QComp
+
+        Returns
+        -------
+        q_mid_cells: np.array(float)
+            Discharge in each bin or depth cell
+        """
+
+        # Assign properties from transect object to local variables
+        in_transect_idx = transect.in_transect_idx
+        trans_select = getattr(transect.depths, transect.depths.selected)
+        cell_size = trans_select.depth_cell_size_m
+
+        # Determine is xprod contains edge data and process appropriately
+        q_mid_cells = np.multiply(xprod[:, in_transect_idx] * cell_size[:, in_transect_idx], delta_t)
+
+        return q_mid_cells
+
+    @staticmethod
+    def discharge_edge(edge_loc, transect, top_method=None, bot_method=None, exponent=None):
+        """Computes edge discharge.
+
+        Parameters
+        ----------
+        edge_loc: str
+            Edge location (left or right)
+        transect: TransectData
+            Object of TransectData
+        top_method: str
+            Top extrapolation method
+        bot_method: str
+            Bottom extrapolation method
+        exponent: float
+            Exponent
+
+        Returns
+        -------
+        edge_q: float
+            Computed edge discharge
+        edge_idx: list
+            List of valid edge ensembles
+        """
+
+        # Determine what ensembles to use for edge computation.
+        # The method of determining varies by manufacturer
+        edge_idx = QComp.edge_ensembles(edge_loc, transect)
+
+        # Average depth for the edge ensembles
+        trans_select = getattr(transect.depths, transect.depths.selected)
+        depth = trans_select.depth_processed_m[edge_idx]
+        depth_avg = np.nanmean(depth)
+
+        # Edge distance
+        edge_selected = getattr(transect.edges, edge_loc)
+        edge_dist = edge_selected.distance_m
+
+        # Compute edge velocity and sign
+        edge_vel_sign, edge_vel_mag = QComp.edge_velocity(edge_idx, transect, top_method, bot_method, exponent)
+
+        # Compute edge coefficient
+        coef = QComp.edge_coef(edge_loc, transect)
+
+        # Compute edge discharge
+        edge_q = coef * depth_avg * edge_vel_mag * edge_dist * edge_vel_sign
+        if np.isnan(edge_q):
+            edge_q = 0
+
+        return edge_q, edge_idx
+
+    @staticmethod
+    def edge_ensembles(edge_loc, transect):
+        """This function computes the starting and ending ensemble numbers for an edge.
+
+         This method uses either the method used by TRDI which used the specified number of valid ensembles or SonTek
+        which uses the specified number of ensembles prior to screening for valid data
+
+        Parameters
+        ----------
+        edge_loc: str
+            Edge location (left or right)
+        transect: TransectData
+            Object of TransectData
+
+        Returns
+        -------
+        edge_idx: np.array
+            Indices of ensembles used to compute edge discharge
+        """
+
+        # Assign number of ensembles in edge to local variable
+        edge_select = getattr(transect.edges, edge_loc)
+        num_edge_ens = int(edge_select.number_ensembles)
+
+        # TRDI method
+        if transect.adcp.manufacturer == 'TRDI':
+            # Determine the indices of the edge ensembles which contain
+            # the specified number of valid ensembles
+            # noinspection PyTypeChecker
+            valid_ens = QComp.valid_edge_ens(transect)
+            if num_edge_ens > len(valid_ens):
+                num_edge_ens = len(valid_ens)
+            if edge_loc.lower() == transect.start_edge.lower():
+                edge_idx = np.where(valid_ens)[0][0:num_edge_ens]
+            else:
+                edge_idx = np.where(valid_ens)[0][-num_edge_ens::]
+
+        # Sontek Method
+        else:
+            # Determine the indices of the edge ensembles as collected by RiverSurveyor.  There
+            # is no check as to whether the ensembles contain valid data
+            trans_select = getattr(transect.depths, transect.depths.selected)
+            n_ensembles = len(trans_select.depth_processed_m)
+            if num_edge_ens > n_ensembles:
+                num_edge_ens = n_ensembles
+            if edge_loc.lower() == transect.start_edge.lower():
+                edge_idx = np.arange(0, num_edge_ens)
+            else:
+                edge_idx = np.arange(n_ensembles - num_edge_ens, n_ensembles)
+
+        return edge_idx
+
+    @staticmethod
+    def edge_velocity(edge_idx, transect, top_method=None, bot_method=None, exponent=None):
+        """Computes the edge velocity.
+
+        Different methods may be used depending on settings in transect.
+
+        Parameters
+        ----------
+        edge_idx: np.array
+            Indices of ensembles used to compute edge discharge
+        transect: TransectData
+            Object of TransectData
+        top_method: str
+            Top extrapolation method
+        bot_method: str
+            Bottom extrapolation method
+        exponent: float
+            Exponent
+
+        Returns
+        -------
+        edge_vel_mag: float
+            Magnitude of edge velocity
+        edge_vel_sign: int
+            Sign of edge velocity (discharge)
+        """
+
+        # Set default return
+        edge_vel_sign = 1
+        edge_vel_mag = 0
+
+        # Check to make sure there is edge data
+        if len(edge_idx) > 0:
+
+            # Compute edge velocity using specified method
+            # Used by TRDI
+            if transect.edges.vel_method == 'MeasMag':
+                edge_vel_mag, edge_vel_sign = QComp.edge_velocity_trdi(edge_idx, transect)
+
+            # Used by Sontek
+            elif transect.edges.vel_method == 'VectorProf':
+                edge_val_mag, edge_vel_sign = QComp.edge_velocity_sontek(edge_idx, transect, top_method,
+                                                                         bot_method, exponent)
+
+            # USGS proposed method
+            elif transect.edges.vel_method == 'Profile':
+                edge_vel_mag, edge_vel_sign = QComp.edge_velocity_profile(edge_idx, transect)
+
+        return edge_vel_sign, edge_vel_mag
+
+    @staticmethod
+    def edge_velocity_trdi(edge_idx, transect):
+        """Computes edge velocity magnitude and sign using TRDI's method.
+
+         This method uses only the measured data and no extrapolation
+
+        Parameters
+        ----------
+        edge_idx: np.array
+            Indices of ensembles used to compute edge discharge
+        transect: TransectData
+            Object of TransectData
+
+        Returns
+        -------
+        edge_vel_mag: float
+            Magnitude of edge velocity
+        edge_vel_sign: int
+            Sign of edge velocity (discharge)
+        """
+
+        # Assign water velocity to local variables
+        x_vel = transect.w_vel.u_processed_mps[:, edge_idx]
+        y_vel = transect.w_vel.v_processed_mps[:, edge_idx]
+
+        # Use only valid data
+        valid = np.copy(transect.w_vel.valid_data[0, :, edge_idx].T)
+        x_vel[np.logical_not(valid)] = np.nan
+        y_vel[np.logical_not(valid)] = np.nan
+
+        # Compute the mean velocity components
+        x_vel_avg = np.nanmean(np.nanmean(x_vel, 0))
+        y_vel_avg = np.nanmean(np.nanmean(y_vel, 0))
+
+        # Compute magnitude and direction
+        edge_dir, edge_vel_mag = cart2pol(x_vel_avg, y_vel_avg)
+
+        # Compute unit vector to help determine sign
+        unit_water_x, unit_water_y = pol2cart(edge_dir, 1)
+        if transect.start_edge == 'Right':
+            dir_sign = 1
+        else:
+            dir_sign = -1
+
+        # Compute unit boat vector to help determine sign
+        ens_delta_time = transect.date_time.ens_duration_sec
+        in_transect_idx = transect.in_transect_idx
+        trans_selected = getattr(transect.boat_vel, transect.boat_vel.selected)
+        if trans_selected is not None:
+            b_vel_x = trans_selected.u_processed_mps
+            b_vel_y = trans_selected.v_processed_mps
+        else:
+            b_vel_x = np.tile([np.nan], transect.boat_vel.bt_vel.u_processed_mps.shape)
+            b_vel_y = np.tile([np.nan], transect.boat_vel.bt_vel.v_processed_mps.shape)
+
+        track_x = np.nancumsum(b_vel_x[in_transect_idx] * ens_delta_time[in_transect_idx])
+        track_y = np.nancumsum(b_vel_y[in_transect_idx] * ens_delta_time[in_transect_idx])
+        boat_dir, boat_mag = cart2pol(track_x[-1], track_y[-1])
+        unit_track_x, unit_track_y = pol2cart(boat_dir, 1)
+        unit_x_prod = (unit_water_x * unit_track_y - unit_water_y * unit_track_x) * dir_sign
+        edge_vel_sign = np.sign(unit_x_prod)
+
+        return edge_vel_mag, edge_vel_sign
+
+    @staticmethod
+    def edge_velocity_sontek(edge_idx, transect, top_method=None, bot_method=None, exponent=None):
+        """Computes the edge velocity using SonTek's method.
+
+        SonTek's method uses the profile extrapolation to estimate the velocities in the
+        unmeasured top and bottom and then projects the velocity perpendicular to the
+        course made good.
+
+        Parameters
+        ----------
+        edge_idx: np.array
+            Indices of ensembles used to compute edge discharge
+        transect: TransectData
+            Object of TransectData
+        top_method: str
+            Top extrapolation method
+        bot_method: str
+            Bottom extrapolation method
+        exponent: float
+            Exponent
+
+        Returns
+        -------
+        edge_vel_mag: float
+            Magnitude of edge velocity
+        edge_vel_sign: int
+            Sign of edge velocity (discharge)
+        """
+
+        if top_method is None:
+            top_method = transect.extrap.top_method
+            bot_method = transect.extrap.bot_method
+            exponent = transect.extrap.exponent
+
+        # Compute boat track excluding the start edge ensembles but
+        # including the end edge ensembles. This the way SonTek does this
+        # as of version 3.7
+        ens_delta_time = transect.date_time.ens_duration_sec
+        in_transect_idx = transect.in_transect_idx
+        trans_selected = getattr(transect.boat_vel, transect.boat_vel.selected)
+
+        if trans_selected is not None:
+            b_vel_x = trans_selected.u_processed_mps
+            b_vel_y = trans_selected.v_processed_mps
+        else:
+            b_vel_x = np.tile([np.nan], transect.boat_vel.u_processed_mps.shape)
+            b_vel_y = np.tile([np.nan], transect.boat_vel.v_processed_mps.shape)
+
+        track_x = np.nancumsum(b_vel_x[in_transect_idx] * ens_delta_time[in_transect_idx])
+        track_y = np.nancumsum(b_vel_y[in_transect_idx] * ens_delta_time[in_transect_idx])
+
+        # Compute the unit vector for the boat track
+        boat_dir, boat_mag = cart2pol(track_x[-1], track_y[-1])
+        unit_track_x, unit_track_y = pol2cart(boat_dir, 1)
+
+        # Assign water velocity to local variables
+        x_vel = transect.w_vel.u_processed_mps[:, edge_idx]
+        y_vel = transect.w_vel.v_processed_mps[:, edge_idx]
+        valid_vel_ens = np.nansum(transect.w_vel.valid_data[0, :, edge_idx])
+
+        # Filter edge data
+        # According to SonTek the RSL code does recognize that edge samples
+        # can vary in their cell size.  It deals with this issue by
+        # remembering the cell size and cell start for the first edge sample.
+        # Any subsequent edge sample is included in the average only if it
+        # has the same cell size and cell start as the first sample.
+        transect_depths_select = getattr(transect.depths, transect.depths.selected)
+        cell_size = transect_depths_select.depth_cell_size_m[:, edge_idx]
+        cell_depth = transect_depths_select.depth_cell_depth_m[:, edge_idx]
+
+        # Find first valid edge ensemble
+        idx = np.where(valid_vel_ens > 0)[0]
+        if len(idx) > 0:
+            idx_first_valid_ensemble = idx[0]
+            ref_cell_size = cell_size[0, idx_first_valid_ensemble]
+            ref_cell_depth = cell_depth[0, idx_first_valid_ensemble]
+            valid = np.tile(True, edge_idx.shape)
+            valid[np.not_equal(cell_size[0, :], ref_cell_size)] = False
+            valid[np.not_equal(cell_depth[0, :], ref_cell_depth)] = False
+
+            # Compute profile components
+            x_profile = np.nanmean(x_vel[:, valid], 1)
+            y_profile = np.nanmean(y_vel[:, valid], 1)
+
+            # Find first valid cell in profile
+            idx = np.where(np.logical_not(np.isnan(x_profile)))[0]
+            if len(idx) > 0:
+                idx_first_valid_cell = idx[0]
+
+                # Compute cell size and depth for mean profile
+                cell_size[np.isnan(x_vel)] = np.nan
+                cell_size[:, np.logical_not(valid)] = np.nan
+                cell_size_edge = np.nanmean(cell_size, 1)
+                cell_depth[np.isnan(x_vel)] = np.nan
+                cell_depth[:, np.logical_not(valid)] = np.nan
+                cell_depth_edge = np.nanmean(cell_size, 1)
+
+                # SonTek cuts off the mean profile based on the side lobe cutoff of
+                # the mean of the shallowest beams in the edge ensembles.
+
+                # Determine valid original beam and cell depths
+                depth_bt_beam_orig = transect.depths.bt_depths.depth_orig_m[:, edge_idx]
+                depth_bt_beam_orig[:, np.logical_not(valid)] = np.nan
+                draft_bt_beam_orig = transect.depths.bt_depths.draft_orig_m
+                depth_cell_depth_orig = transect.depths.bt_depths.depth_cell_depth_orig_m[:, edge_idx]
+                depth_cell_depth_orig[:, np.logical_not(valid)] = np.nan
+
+                # Compute minimum mean depth
+                min_raw_depths = np.nanmin(depth_bt_beam_orig)
+                min_depth = np.nanmin(min_raw_depths)
+                min_depth = min_depth - draft_bt_beam_orig
+
+                # Compute last valid cell by computing the side lobe cutoff based
+                # on the mean of the minimum beam depths of the valid edge
+                # ensembles
+                if transect.w_vel.sl_cutoff_type == 'Percent':
+                    sl_depth = min_depth - ((transect.w_vel.sl_cutoff_percent / 100.) * min_depth)
+                else:
+                    sl_depth = min_depth - ((transect.w_vel.sl_cutoff_percent / 100.) * min_depth) \
+                        - (transect.w_vel.sl_cutoff_number * cell_size[0, 0])
+
+                # Adjust side lobe depth for draft
+                sl_depth = sl_depth + draft_bt_beam_orig
+                above_sl = cell_depth < (sl_depth + np.nanmax(cell_size))
+                above_sl_profile = np.nansum(above_sl, 1)
+                # TODO this line doesn't make sense to me
+                valid_idx = np.logical_and(np.less(above_sl_profile, np.nanmax(above_sl_profile)+1),
+                                           np.greater(above_sl_profile, 0))
+
+                # Compute the number of cells above the side lobe cutoff
+                # remaining_depth = sl_depth - cell_depth_edge[idx_first_valid_cell]
+                idx = np.where(np.logical_not(np.isnan(cell_size)))[0]
+                # TODO this is not consistent with Matlab code
+                n_cells = 0
+                if len(idx) > 0:
+                    n_cells = idx
+                    n_cells[n_cells > 0] = 0
+
+                # Determine index of bottom most valid cells
+                idx_last_valid_cell = idx_first_valid_cell + n_cells
+                # TODO need to work and test this logic.
+                if np.greater(idx_last_valid_cell, len(x_profile)):
+                    x_profile[not valid_idx] = np.nan
+                    y_profile[not valid_idx] = np.nan
+                else:
+                    idx_last_valid_cell = np.where(np.logical_not(np.isnan(x_profile[:idx_last_valid_cell])))[0][0]
+                    # Mark the cells in the profile below the sidelobe invalid
+                    x_profile[(idx_last_valid_cell+1):] = np.nan
+                    y_profile[(idx_last_valid_cell + 1):] = np.nan
+
+                # Find the top most 3 valid cells
+                idx_first_3_valid_cells = np.where(np.logical_not(np.isnan(x_profile)))[0][:3]
+
+                # Compute the mean measured velocity components for the edge profile
+                x_profile_mean = np.nanmean(x_profile)
+                y_profile_mean = np.nanmean(y_profile)
+
+                # Compute average depth of edge
+                depth_ens = transect_depths_select.depth_processed_m(edge_idx)
+                depth_ens[not valid] = np.nan
+                depth_avg = np.nanmean(depth_ens)
+
+                # Determine top, mid, bottom range for the profile
+                top_rng_edge = cell_depth_edge[idx_first_valid_cell] - 0.5 * ref_cell_size
+                if idx_last_valid_cell > len(x_profile):
+                    mid_rng_edge = np.nansum(cell_size_edge[valid_idx])
+                else:
+                    mid_rng_edge = np.nansum(cell_size_edge[idx_first_valid_cell:idx_last_valid_cell+1])
+
+                # Compute z
+                z_edge = depth_avg - cell_depth_edge
+                z_edge[idx_last_valid_cell+1:] = np.nan
+                z_edge[z_edge > 0] = np.nan
+                idx_last_valid_cell = np.where(np.logical_not(np.isnan(z_edge)))[0][-1]
+                bot_rng_edge = depth_avg - cell_depth_edge[idx_last_valid_cell] - 0.5 * \
+                    cell_size_edge[idx_last_valid_cell]
+
+                # Compute the top extrapolation for x-component
+                top_vel_x = QComp.discharge_top(top_method=top_method,
+                                                exponent=exponent,
+                                                idx_top=idx_first_valid_cell,
+                                                idx_top_3=idx_first_3_valid_cells,
+                                                top_rng=top_rng_edge,
+                                                component=x_profile,
+                                                cell_size=cell_size_edge,
+                                                cell_depth=cell_depth_edge,
+                                                depth_ens=depth_avg,
+                                                delta_t=1,
+                                                z=z_edge)
+                top_vel_x = top_vel_x / top_rng_edge
+
+                # Compute the bottom extrapolation for x-component
+                bot_vel_x = QComp.discharge_bot(bot_method=bot_method,
+                                                exponent=exponent,
+                                                idx_bot=idx_last_valid_cell,
+                                                bot_rng=bot_rng_edge,
+                                                component=x_profile,
+                                                cell_size=cell_size_edge,
+                                                cell_depth=cell_depth_edge,
+                                                depth_ens=depth_avg,
+                                                delta_t=1,
+                                                z=z_edge)
+                bot_vel_x = bot_vel_x / bot_rng_edge
+
+                # Compute the top extrapolation for the y-component
+                top_vel_y = QComp.discharge_top(top_method=top_method,
+                                                exponent=exponent,
+                                                idx_top=idx_first_valid_cell,
+                                                idx_top_3=idx_first_3_valid_cells,
+                                                top_rng=top_rng_edge,
+                                                component=y_profile,
+                                                cell_size=cell_size_edge,
+                                                cell_depth=cell_depth_edge,
+                                                depth_ens=depth_avg,
+                                                delta_t=1,
+                                                z=z_edge)
+                top_vel_y = top_vel_y / top_rng_edge
+
+                # Compute the bottom extrapolation for y-component
+                bot_vel_y = QComp.discharge_bot(bot_method=bot_method,
+                                                exponent=exponent,
+                                                idx_bot=idx_last_valid_cell,
+                                                bot_rng=bot_rng_edge,
+                                                component=y_profile,
+                                                cell_size=cell_size_edge,
+                                                cell_depth=cell_depth_edge,
+                                                depth_ens=depth_avg,
+                                                delta_t=1,
+                                                z=z_edge)
+                bot_vel_y = bot_vel_y / bot_rng_edge
+
+                # Compute edge velocity vector including extrapolated velocities
+                v_edge_x = ((top_vel_x * top_rng_edge) + (x_profile_mean * mid_rng_edge) + (bot_vel_x * bot_rng_edge)
+                            / depth_avg)
+                v_edge_y = ((top_vel_y * top_rng_edge) + (y_profile_mean * mid_rng_edge) + (bot_vel_y * bot_rng_edge)
+                            / depth_avg)
+
+                # Compute magnitude of edge velocity perpendicular to course made good
+                edge_vel_mag = (v_edge_x * -1 * unit_track_y) + (v_edge_y * unit_track_x)
+
+                # Determine edge sign
+                if transect.start_edge == 'Right':
+                    edge_vel_sign = -1
+                else:
+                    edge_vel_sign = 1
+            else:
+                edge_vel_mag = 0
+                edge_vel_sign = 1
+        else:
+            edge_vel_mag = 0
+            edge_vel_sign = 1
+
+        return edge_vel_mag, edge_vel_sign
+
+    @staticmethod
+    def edge_velocity_profile(edge_idx, transect):
+        """Compute edge velocity magnitude using the mean velocity of each ensemble.
+
+        The mean velocity of each ensemble is computed by first
+        computing the mean direction of the velocities in the ensemble,
+        then projecting the velocity in each cell in that direction and
+        fitting the 1/6th power curve to the projected profile. The mean
+        velocity magnitude from each ensemble is then averaged.
+
+        The sign of the velocity magnitude is computed using the same
+        approach used in WinRiver II. The cross product of the unit
+        vector of the ship track and the unit vector of the edge water
+        samples computed from the mean u and v velocities is used to
+        determine the sign of the velocity magnitude.
+
+        Parameters
+        ----------
+        edge_idx: np.array
+            Indices of ensembles used to compute edge discharge
+        transect: TransectData
+            Object of TransectData
+
+        Returns
+        -------
+        edge_vel_mag: float
+            Magnitude of edge velocity
+        edge_vel_sign: int
+            Sign of edge velocity (discharge)"""
+
+        # Assign water velocity to local variables
+        x_vel = transect.w_vel.u_processed_mps[:, edge_idx]
+        y_vel = transect.w_vel.v_processed_mps[:, edge_idx]
+
+        # Use only valid data
+        valid = transect.w_vel.valid_data[0, :, edge_idx].astype(int)
+        valid[valid == 0] = np.nan
+        x_vel = x_vel * valid
+        y_vel = y_vel * valid
+
+        # Initialize local variables
+        n_ensembles = len(edge_idx)
+        vel_ensembles = np.tile(np.nan, n_ensembles)
+        u = np.tile(np.nan, n_ensembles)
+        v = np.tile(np.nan, n_ensembles)
+        v_unit = np.array([np.nan, np.nan])
+
+        # Process each ensemble
+        for n in range(n_ensembles):
+
+            # Use ensembles that have valid data
+            selected_ensemble = edge_idx[n]
+            valid_ensemble = np.nansum(np.isnan(x_vel[:, n]))
+
+            if valid_ensemble > 0:
+
+                # Setup variables
+                v_x = x_vel[:, n]
+                v_y = y_vel[:, n]
+                depth_cell_size = transect.depths.bt_depths.depth_cell_size_m[:, selected_ensemble]
+                depth_cell_depth = transect.depths.bt_depths.depth_cell_depth_m[:, selected_ensemble]
+                depth = transect.depths.bt_depths.depth_processed_m[:, selected_ensemble]
+                depth_cell_size[np.isnan(v_x)] = np.nan
+                depth_cell_depth[np.isnan(v_x)] = np.nan
+
+                # Compute projected velocity profile for an ensemble
+                v_x_avg = np.nansum(v_x * depth_cell_size) / np.nansum(depth_cell_size)
+                v_y_avg = np.nansum(v_y * depth_cell_size) / np.nansum(depth_cell_size)
+                ens_dir, _ = cart2pol(v_x_avg, v_y_avg)
+                v_unit[0], v_unit[1] = pol2cart(ens_dir, 1)
+                v_projected_mag = np.dot(np.hstack([v_x, v_y]), np.tile(v_unit, v_x.shape))
+
+                # Compute z value for each cell
+                z = (depth - depth_cell_depth)
+                z[np.isnan(v_projected_mag)] = np.nan
+
+                # Compute coefficient for 1/6th power curve
+                b = 1.0 / 6.0
+                a = (b + 1) * (np.nansum((v_projected_mag * depth_cell_size))
+                               / (np.nansum(((z + 0.5 * depth_cell_size)**(b + 1))
+                                  - ((z - 0.5 * depth_cell_size)**(b + 1)))))
+
+                # Compute mean water speed by integrating power curve
+                vel_ensembles[n] = ((a / (b + 1)) * (depth**(b + 1))) / depth
+
+                # Compute the mean velocity components from the mean water speed and direction
+                u[n], v[n] = pol2cart(ens_dir, vel_ensembles)
+
+            else:
+
+                # No valid data in ensemble
+                vel_ensembles[n] = np.nan
+                u[n] = np.nan
+                v[n] = np.nan
+
+        # Compute the mean velocity components of the edge velocity as the mean of the mean ensemble components
+        u_avg = np.nanmean(u)
+        v_avg = np.nanmean(v)
+
+        # Compute the dge velocity magnitude
+        edge_vel_dir, edge_vel_mag = cart2pol(u_avg, v_avg)
+
+        # TODO this is the same as for TRDI need to put in separate method
+        # Compute unit vector to help determine sign
+        unit_water_x, unit_water_y = pol2cart(edge_vel_dir, 1)
+
+        # Account for direction of boat travel
+        if transect.start_edge == 'Right':
+            dir_sign = 1
+        else:
+            dir_sign = -1
+
+        # Compute unit boat vector to help determine sign
+        ens_delta_time = transect.date_time.ens_duration_sec
+        in_transect_idx = transect.in_transect_idx
+        trans_selected = getattr(transect.boat_vel, transect.boat_vel.selected)
+        if trans_selected is not None:
+            b_vel_x = trans_selected.u_proccesed_mps
+            b_vel_y = trans_selected.v_processed_mps
+        else:
+            b_vel_x = np.tile([np.nan], transect.boat_vel.u_processed_mps.shape)
+            b_vel_y = np.tile([np.nan], transect.boat_vel.v_processed_mps.shape)
+
+        track_x = np.nancumsum(b_vel_x[in_transect_idx] * ens_delta_time[in_transect_idx])
+        track_y = np.nancumsum(b_vel_y[in_transect_idx] * ens_delta_time[in_transect_idx])
+        boat_dir, boat_mag = cart2pol(track_x[-1], track_y[-1])
+        unit_track_x, unit_track_y = pol2cart(boat_dir, 1)
+
+        # Compute cross product from unit vectors
+        unit_x_prod = (unit_water_x * unit_track_y - unit_water_y * unit_track_x) * dir_sign
+
+        # Determine sign
+        edge_vel_sign = np.sign(unit_x_prod)
+
+        return edge_vel_mag, edge_vel_sign
+
+    @staticmethod
+    def edge_coef(edge_loc, transect):
+        """Returns the edge coefficient based on the edge settings and transect object.
+
+        Parameters
+        ----------
+        edge_loc: str
+            Edge location (left_or right)
+        transect: TransectData
+            Object of TransectData
+
+        Returns
+        -------
+        coef: float
+            Edge coefficient for accounting for velocity distribution and edge shape
+        """
+
+        # Process appropriate edge type
+        edge_select = getattr(transect.edges, edge_loc)
+        if edge_select.type == 'Triangular':
+            coef = 0.3535
+
+        elif edge_select.type == 'Rectangular':
+            # Rectangular edge coefficient depends on the rec_edge_method.
+            # 'Fixed' is compatible with the method used by TRDI.
+            # 'Variable is compatible with the method used by SonTek
+
+            if transect.edges.rec_edge_method == 'Fixed':
+                # Fixed Method
+                coef = 0.91
+
+            else:
+                # Variable method
+                # Get edge distance
+                dist = edge_select.dist_m
+
+                # Get edge ensembles to use
+                edge_idx = QComp.edge_ensembles(edge_loc, transect)
+
+                # Compute the mean depth for edge
+                trans_select = getattr(transect.depths, transect.depths.selected)
+                depth_edge = np.nanmean(trans_select.depth_processed_m[edge_idx])
+
+                # Compute coefficient using equation 34 from Principle of River Discharge Measurement, SonTek, 2003
+                coef = (1 - ((0.35 / 4) * (depth_edge / dist) * (1 - np.exp(-4 * (dist / depth_edge))))) / \
+                    (1 - 0.35 * np.exp(-4 * (dist / depth_edge)))
+
+        elif edge_select.type == 'Custom':
+            # Custom user supplied coefficient
+            coef = edge_select.cust_coef
+
+        else:
+            coef = []
+
+        return coef
+
+    @staticmethod
+    def loop_correction_factor(top_q, middle_q, bottom_q, trans_data, mb_data, delta_t):
+        """Computes the discharge correction factor from loop moving-bed tests
+
+        Parameters
+        ----------
+        top_q: float
+            Top discharge from extrapolation
+        middle_q: float
+            Computed middle discharge
+        bottom_q: float
+            Bottom discharge from extrapolation
+        trans_data: TransectData
+            Object of TransectData
+        mb_data: MovingBedTests
+            Object of MovingBedTests
+        delta_t: np.array(float)
+            Duration of each ensemble, computed in QComp
+
+        Returns
+        -------
+        correction_factor: float
+            Correction factor to be applied to the discharge to correct for moving-bed effects
+        """
+
+        # Assign object properties to local variables
+        moving_bed_speed = mb_data.mb_spd_mps
+        in_transect_idx = trans_data.in_transect_idx
+        cells_above_sl = trans_data.w_vel.cells_above_sl[:, in_transect_idx]
+        u = trans_data.w_vel.u_processed_mps[:, in_transect_idx] * cells_above_sl
+        v = trans_data.w_vel.v_processed_mps[:, in_transect_idx] * cells_above_sl
+        depths_select = getattr(trans_data.depths, trans_data.depths.selected)
+        depth_cell_depth = depths_select.depth_cell_depth_m[:, in_transect_idx]
+        depth = depths_select.depth_processed_m[in_transect_idx]
+        bt_u = trans_data.boat_vel.bt_vel.u_processed_mps[in_transect_idx]
+        bt_v = trans_data.boat_vel.bt_vel.v_processed_mps[in_transect_idx]
+
+        # Compute uncorrected discharge excluding the edges
+        q_orig = top_q + middle_q + bottom_q
+
+        if q_orig != 0:
+            # Compute near-bed velocities
+            nb_u, nb_v, unit_nb_u, unit_nb_v = QComp.near_bed_velocity(u, v, depth, depth_cell_depth)
+            nb_speed = np.sqrt(nb_u**2 + nb_v**2)
+            nb_u_mean = np.nanmean(nb_u)
+            nb_v_mean = np.nanmean(nb_v)
+            nb_speed_mean = np.sqrt(nb_u_mean**2 + nb_v_mean**2)
+            moving_bed_speed_ens = moving_bed_speed * (nb_speed / nb_speed_mean)
+            u_mb = moving_bed_speed_ens * unit_nb_u
+            v_mb = moving_bed_speed_ens * unit_nb_v
+
+            # Correct water velocities
+            u_adj = u + u_mb
+            v_adj = v + v_mb
+
+            bt_u_adj = bt_u + u_mb
+            bt_v_adj = bt_v + v_mb
+
+            # Compute corrected cross product
+            xprod = QComp.cross_product(transect=trans_data)
+            xprod_in = QComp.cross_product(w_vel_x=u_adj,
+                                           w_vel_y=v_adj,
+                                           b_vel_x=bt_u_adj,
+                                           b_vel_y=bt_v_adj,
+                                           start_edge=trans_data.start_edge)
+            xprod[:, in_transect_idx] = xprod_in
+
+            # Compute corrected discharges
+            q_middle_cells = QComp.discharge_middle_cells(xprod=xprod, transect=trans_data, delta_t=delta_t)
+            trans_select = getattr(trans_data.depths, trans_data.depths.selected)
+            num_top_method = {'Power': 0, 'Constant': 1, '3-Point': 2, None: -1}
+            q_top = extrapolate_top(xprod, trans_data.w_vel.valid_data[0, :, :],
+                                    num_top_method[trans_data.extrap.top_method],
+                                    trans_data.extrap.exponent, trans_data.in_transect_idx,
+                                    trans_select.depth_cell_size_m,
+                                    trans_select.depth_cell_depth_m, trans_select.depth_processed_m, delta_t,
+                                    -1, 0.1667)
+            num_bot_method = {'Power': 0, 'No Slip': 1, None: -1}
+            q_bot = extrapolate_bot(xprod, trans_data.w_vel.valid_data[0, :, :],
+                                    num_bot_method[trans_data.extrap.bot_method],
+                                    trans_data.extrap.exponent, trans_data.in_transect_idx,
+                                    trans_select.depth_cell_size_m,
+                                    trans_select.depth_cell_depth_m, trans_select.depth_processed_m, delta_t,
+                                    -1, 0.1667)
+            q_adj = np.nansum(np.nansum(q_middle_cells)) + np.nansum(q_top) + np.nansum(q_bot)
+
+            # Compute correction factor
+            correction_factor = q_adj / q_orig
+        else:
+            correction_factor = 1.0
+
+        return correction_factor
+        
+    @staticmethod
+    def stationary_correction_factor(top_q, middle_q, bottom_q, trans_data, mb_data, delta_t):
+        """Computes the discharge correction factor from stationary moving-bed tests.
+
+        Parameters
+        ----------
+        top_q: float
+            Top discharge from extrapolation
+        middle_q: float
+            Computed middle discharge
+        bottom_q: float
+            Bottom discharge from extrapolation
+        trans_data: TransectData
+            Object of TransectData
+        mb_data: MovingBedTests
+            Object of MovingBedTests
+        delta_t: np.array(float)
+            Duration of each ensemble, computed in QComp
+
+        Returns
+        -------
+        correction_factor: float
+            Correction factor to be applied to the discharge to correct for moving-bed effects
+        """
+                
+        n_mb_tests = len(mb_data)
+        n_sta_tests = 0
+        mb_speed = np.array([0])
+        near_bed_speed = np.array([0])
+        for n in range(n_mb_tests):
+            if (mb_data[n].type == 'Stationary') and mb_data[n].use_2_correct:
+                n_sta_tests += 1
+                mb_speed = np.append(mb_speed, mb_data[n].mb_spd_mps)
+                near_bed_speed = np.append(near_bed_speed, mb_data[n].near_bed_speed_mps)
+
+        if n_sta_tests > 0:
+
+            # Compute linear regression coefficient forcing through zero to relate
+            # near-bed velocity to moving-bed velocity
+            x = np.vstack(near_bed_speed)
+            corr_coef = np.linalg.lstsq(x, mb_speed, rcond=None)[0]
+
+            # Assing object properties to local variables
+            in_transect_idx = trans_data.in_transect_idx
+            cells_above_sl = trans_data.w_vel.cells_above_sl[:, in_transect_idx]
+            u = trans_data.w_vel.u_processed_mps[:, in_transect_idx] * cells_above_sl
+            v = trans_data.w_vel.v_processed_mps[:, in_transect_idx] * cells_above_sl
+            depths_select = getattr(trans_data.depths, trans_data.depths.selected)
+            depth_cell_depth = depths_select.depth_cell_depth_m[:, in_transect_idx]
+            depth = depths_select.depth_processed_m[in_transect_idx]
+            bt_u = trans_data.boat_vel.bt_vel.u_processed_mps[in_transect_idx]
+            bt_v = trans_data.boat_vel.bt_vel.v_processed_mps[in_transect_idx]
+
+            # Compute near-bed velocities
+            nb_u, nb_v, unit_nb_u, unit_nb_v = QComp.near_bed_velocity(u, v, depth, depth_cell_depth)
+
+            # Compute moving-bed vector for each ensemble
+            mb_u = corr_coef * nb_u
+            mb_v = corr_coef * nb_v
+
+            # Compute adjusted water and boat velocities
+            u_adj = u + mb_u
+            v_adj = v + mb_v
+            bt_u_adj = bt_u + mb_u
+            bt_v_adj = bt_v + mb_v
+
+            # Compute uncorrected discharge excluding the edges
+            q_orig = top_q + middle_q + bottom_q
+            if q_orig != 0:
+                # Compute corrected discharge excluding edges
+                # Compute corrected cross product
+                xprod = QComp.cross_product(transect=trans_data)
+                xprod_in = QComp.cross_product(w_vel_x=u_adj,
+                                               w_vel_y=v_adj,
+                                               b_vel_x=bt_u_adj,
+                                               b_vel_y=bt_v_adj,
+                                               start_edge=trans_data.start_edge)
+                xprod[:, in_transect_idx] = xprod_in
+
+                # Compute corrected discharges
+                q_middle_cells = QComp.discharge_middle_cells(xprod=xprod, transect=trans_data, delta_t=delta_t)
+                trans_select = getattr(trans_data.depths, trans_data.depths.selected)
+                num_top_method = {'Power': 0, 'Constant': 1, '3-Point': 2, None: -1}
+                q_top = extrapolate_top(xprod,
+                                        trans_data.w_vel.valid_data[0, :, :],
+                                        num_top_method[trans_data.extrap.top_method],
+                                        trans_data.extrap.exponent, trans_data.in_transect_idx,
+                                        trans_select.depth_cell_size_m,
+                                        trans_select.depth_cell_depth_m, trans_select.depth_processed_m, delta_t,
+                                        -1, 0.1667)
+                num_bot_method = {'Power': 0, 'No Slip': 1, None: -1}
+                q_bot = extrapolate_bot(xprod,
+                                        trans_data.w_vel.valid_data[0, :, :],
+                                        num_bot_method[trans_data.extrap.bot_method],
+                                        trans_data.extrap.exponent, trans_data.in_transect_idx,
+                                        trans_select.depth_cell_size_m,
+                                        trans_select.depth_cell_depth_m, trans_select.depth_processed_m, delta_t,
+                                        -1, 0.1667)
+                q_adj = np.nansum(np.nansum(q_middle_cells)) + np.nansum(q_top) + np.nansum(q_bot)
+
+                # Compute correction factor
+                correction_factor = q_adj / q_orig
+            else:
+                correction_factor = 1.0
+
+            return correction_factor
+
+    @staticmethod
+    def near_bed_velocity(u, v, depth, bin_depth):
+        """Compute near bed velocities.
+
+        Parameters
+        ----------
+        u: np.array(float)
+            Velocity in the x-direction, in m/s
+        v: np.array(float)
+            Velocity in the y-direction, in m/s
+        depth: np.array(float)
+            Depth for each ensemble, in m
+        bin_depth: np.array(float)
+            Depth cell depth for each depth cell, in m
+
+        Returns
+        -------
+        nb_u: np.array(float)
+            Near-bed velocity in the x-direction, in m/s.
+        nb_v: np.array(float)
+            Near-bed velocity in the y-direction, in m/s.
+        unit_nbu: np.array(float)
+            Unit vector component of near-bed velocity in x-direction.
+        unit_nbv: np.array(float)
+            Unit vector component of near-bed velocity in y-direction.
+        """
+
+        # Compute z near bed as 10% of depth
+        z_near_bed = depth * 0.1
+
+        # Begin computing near-bed velocities
+        n_ensembles = u.shape[1]
+        nb_u = np.tile([np.nan], n_ensembles)
+        nb_v = np.tile([np.nan], n_ensembles)
+        unit_nbu = np.tile([np.nan], n_ensembles)
+        unit_nbv = np.tile([np.nan], n_ensembles)
+        z_depth = np.tile([np.nan], n_ensembles)
+        u_mean = np.tile([np.nan], n_ensembles)
+        v_mean = np.tile([np.nan], n_ensembles)
+        speed_near_bed = np.tile([np.nan], n_ensembles)
+        for n in range(n_ensembles):
+            idx = np.where(np.logical_not(np.isnan(u[:, n])))[0]
+            if len(idx) > 0:
+                idx = idx[-2:]
+
+                # Compute near-bed velocity
+                z_depth[n] = depth[n] - np.nanmean(bin_depth[idx, n], 0)
+                u_mean[n] = np.nanmean(u[idx, n], 0)
+                v_mean[n] = np.nanmean(v[idx, n], 0)
+                nb_u[n] = (u_mean[n] / z_depth[n] ** (1. / 6.)) * (z_near_bed[n] ** (1. / 6.))
+                nb_v[n] = (v_mean[n] / z_depth[n] ** (1. / 6.)) * (z_near_bed[n] ** (1. / 6.))
+                speed_near_bed[n] = np.sqrt(nb_u[n] ** 2 + nb_v[n] ** 2)
+                unit_nbu[n] = nb_u[n] / speed_near_bed[n]
+                unit_nbv[n] = nb_v[n] / speed_near_bed[n]
+
+        return nb_u, nb_v, unit_nbu, unit_nbv
+
+    @staticmethod
+    def valid_edge_ens(trans_data):
+        """Determines which ensembles contain sufficient valid data to allow computation of discharge.
+        
+        Allows interpolated depth and boat velocity but requires valid
+        non-interpolated water velocity.
+        
+        Parameters
+        ----------
+        trans_data: TransectData
+            Object of TransectData
+        
+        Returns
+        -------
+        validEns: np.array(bool)
+            Boolean vector
+        """
+
+        # Get index of ensembles in moving-boat portion of transect
+        in_transect_idx = trans_data.in_transect_idx
+
+        # Get selected navigation reference
+
+        boat_vel_selected = getattr(trans_data.boat_vel, trans_data.boat_vel.selected)
+
+        # Depending on type of interpolation determine the valid navigation ensembles
+        if boat_vel_selected is not None and len(boat_vel_selected.u_processed_mps) > 0:
+            if boat_vel_selected.interpolate == 'TRDI':
+                nav_valid = boat_vel_selected.valid_data[0, in_transect_idx]
+            else:
+                nav_valid = np.logical_not(np.isnan(boat_vel_selected.u_processed_mps[in_transect_idx]))
+        else:
+            nav_valid = np.tile(False, len(in_transect_idx))
+
+        # Depending on type of interpolation determine the valid water track ensembles
+        if len(in_transect_idx) > 1:
+            water_valid = np.any(trans_data.w_vel.valid_data[0, :, in_transect_idx], 1)
+        else:
+            water_valid = np.any(trans_data.w_vel.valid_data[0, :, in_transect_idx])
+
+        # Determine the ensembles with valid depth
+        depths_select = getattr(trans_data.depths, trans_data.depths.selected)
+        if depths_select is not None:
+            depth_valid = np.logical_not(np.isnan(depths_select.depth_processed_m[in_transect_idx]))
+
+            # Determine the ensembles with valid depth, navigation, and water data
+            valid_ens = np.all(np.vstack((nav_valid, water_valid, depth_valid)), 0)
+        else:
+            valid_ens = []
+
+        return valid_ens
+
+    @staticmethod
+    def discharge_interpolated(q_top_ens, q_mid_cells, q_bot_ens, transect):
+        """Determines the amount of discharge in interpolated cells and ensembles.
+
+        Parameters
+        ----------
+        q_top_ens: np.array(float)
+            Top extrapolated discharge in each ensemble
+        q_mid_cells: np.array(float)
+            Middle of measured discharge in each ensemble
+        q_bot_ens: np.array(flot)
+            Bottom extrapolated discharge in each ensemble
+        transect: TransectData
+            Object of TransectData
+
+        Returns
+        -------
+        q_int_cells: float
+            Discharge in interpolated cells
+        q_int_ens: float
+            Discharge in interpolated ensembles
+        """
+        valid_ens, valid_wt = TransectData.raw_valid_data(transect)
+
+        # Compute interpolated cell discharge
+        q_int_cells = np.nansum(np.nansum(q_mid_cells[np.logical_not(valid_wt)]))
+
+        #  Method to compute invalid ensemble discharge depends on if
+        # navigation data are interpolated (QRev) or if expanded delta
+        # time is used to compute discharge for invalid ensembles(TRDI)
+        if transect.boat_vel.bt_vel.interpolate == 'None':
+            # Compute discharge in invalid ensembles for expanded delta time situation
+            # Find index of invalid ensembles followed by a valid ensemble
+            idx_next_valid = np.where(np.diff(np.hstack((-2, valid_ens))) == 1)[0]
+            if len(idx_next_valid) == 0:
+                q_int_ens = 0
+            else:
+                # Increase index to reference valid ensembles
+                idx_next_valid += 1
+
+                # Sum discharge in valid ensembles following invalid ensemble
+                q_int_ens = np.nansum(q_mid_cells[:, idx_next_valid]) \
+                    + q_bot_ens[idx_next_valid] + q_top_ens[idx_next_valid]
+
+                # Determine number of invalid ensembles preceding valid ensemble
+                run_length_false, _ = QComp.compute_run_length(valid_ens)
+
+                # Adjust run_length_false for situation where the transect ends with invalid ensembles
+                if len(run_length_false) > len(q_int_ens):
+                    run_length_false = run_length_false[:-1]
+
+                # Adjust discharge to remove the discharge that would have been measured in the valid ensemble
+                q_int_ens = np.nansum(q_int_ens * (run_length_false / (run_length_false+1)))
+
+        else:
+            # Compute discharge in invalid ensembles where all data were interpolated
+            q_int_ens = np.nansum(np.nansum(q_mid_cells[:, np.logical_not(valid_ens)])) \
+                        + np.nansum(q_top_ens[np.logical_not(valid_ens)]) \
+                        + np.nansum(q_bot_ens[np.logical_not(valid_ens)])
+
+        return q_int_cells, q_int_ens
+
+    @staticmethod
+    def compute_run_length(bool_vector):
+        """Compute how many false or true consecutive values are in every run of true or false in the
+        provided boolean vector.
+
+        Parameters
+        ----------
+        bool_vector: np.array(bool)
+           Boolean vector.
+
+        Returns
+        -------
+        run_length_false: np.array(int)
+            Vector with lengths of false runs.
+        run_length_true: np.array(int)
+            Vector with lengths of true runs.
+        """
+
+        # Compute the indices of where changes occur
+        valid_run = np.where(np.diff(np.hstack((-1, bool_vector, -1))) != 0)[0]
+        # Determine length of each run
+        run_length = np.diff(valid_run)
+
+        # Determine length of runs
+        if bool_vector[0]:
+            true_start = 0
+            false_start = 1
+        else:
+            true_start = 1
+            false_start = 0
+        run_length_false = run_length[bool_vector[false_start]::2]
+        run_length_true = run_length[bool_vector[true_start]::2]
+
+        return run_length_false, run_length_true
+
+    # ============================================================================================
+    # The methods below are not being used in the discharge computations.
+    # The methods for extrapolating the top and bottom discharge have been moved to separate files
+    # and compiled using Numba AOT. The methods below are included here for historical purposes
+    # and may provide an easier approach to adding new features/algorithms prior to recoding
+    # them in a manner that can be compiled using Numba AOT.
+    # =============================================================================================
+
+    @staticmethod
+    def extrapolate_top(xprod, transect, delta_t, top_method=None, exponent=None):
+        """Computes the extrapolated top discharge.
+
+        Parameters
+        ----------
+        xprod: np.array(float)
+            Cross product computed from the cross product method
+        transect: TransectData
+            Object of TransectData
+        delta_t: np.array(float)
+            Duration of each ensemble computed from QComp
+        top_method: str
+            Specifies method to use for top extrapolation
+        exponent: float
+            Exponent to use for power extrapolation
+
+        Returns
+        -------
+        q_top: np.array(float)
+            Top extrapolated discharge for each ensemble
+        """
+
+        if top_method is None:
+            top_method = transect.extrap.top_method
+            exponent = transect.extrap.exponent
+
+        # Get index for ensembles in moving-boat portion of transect
+        in_transect_idx = transect.in_transect_idx
+
+        # Compute top variables
+        idx_top, idx_top3, top_rng = QComp.top_variables(xprod, transect)
+        idx_top = idx_top[in_transect_idx]
+        idx_top3 = idx_top3[:, in_transect_idx]
+        top_rng = top_rng[in_transect_idx]
+
+        # Get data from transect object
+        trans_select = getattr(transect.depths, transect.depths.selected)
+        cell_size = trans_select.depth_cell_size_m[:, in_transect_idx]
+        cell_depth = trans_select.depth_cell_depth_m[:, in_transect_idx]
+        depth_ens = trans_select.depth_processed_m[in_transect_idx]
+
+        # Compute z
+        z = np.subtract(depth_ens, cell_depth)
+
+        # Use only valid data
+        valid_data = np.logical_not(np.isnan(xprod[:, in_transect_idx]))
+        z[np.logical_not(valid_data)] = np.nan
+        cell_size[np.logical_not(valid_data)] = np.nan
+        cell_depth[np.logical_not(valid_data)] = np.nan
+
+        # Compute top discharge
+        q_top = QComp.discharge_top(top_method, exponent, idx_top, idx_top3, top_rng,
+                                    xprod[:, in_transect_idx], cell_size, cell_depth,
+                                    depth_ens, delta_t, z)
+
+        return q_top
+
+    @staticmethod
+    def discharge_top(top_method, exponent, idx_top, idx_top_3, top_rng, component, cell_size, cell_depth,
+                      depth_ens, delta_t, z):
+        """Computes the top extrapolated value of the provided component.
+
+        Parameters
+        ----------
+        top_method: str
+            Top extrapolation method (Power, Constant, 3-Point)
+        exponent: float
+            Exponent for the power extrapolation method
+        idx_top:
+            Index to the topmost valid depth cell in each ensemble
+        idx_top_3:
+            Index to the top 3 valid depth cells in each ensemble
+        top_rng: np.array(float)
+            Range from the water surface to the top of the topmost cell
+        component: np.array(float)
+            The variable to be extrapolated (xprod, u-velocity, v-velocity)
+        cell_size: np.array(float)
+            Array of cellsizes (n cells x n ensembles)
+        cell_depth: np.array(float)
+            Depth of each cell (n cells x n ensembles)
+        depth_ens: np.array(float)
+            Bottom depth for each ensemble
+        delta_t: np.array(float)
+            Duration of each ensemble compute by QComp
+        z: np.array(float)
+            Relative depth from the bottom of each depth cell computed in discharge top method
+
+        Returns
+        -------
+        top_value: total for the specified component integrated over the top range
+        """
+
+        # Initialize return
+        top_value = 0
+
+        # Top power extrapolation
+        if top_method == 'Power':
+            numerator = ((exponent + 1) * np.nansum(component * cell_size, 0))
+            denominator = np.nansum(((z + 0.5 * cell_size)**(exponent+1)) - ((z - 0.5 * cell_size)**(exponent+1)), 0)
+            coef = np.divide(numerator, denominator, where=denominator != 0)
+            coef[denominator == 0] = np.nan
+            top_value = delta_t * (coef / (exponent + 1)) * \
+                (depth_ens**(exponent + 1) - (depth_ens-top_rng)**(exponent + 1))
+
+        # Top constant extrapolation
+        elif top_method == 'Constant':
+            n_ensembles = len(delta_t)
+            top_value = np.tile([np.nan], n_ensembles)
+            for j in range(n_ensembles):
+                if idx_top[j] >= 0:
+                    top_value[j] = delta_t[j] * component[idx_top[j], j] * top_rng[j]
+
+        # Top 3-point extrapolation
+        elif top_method == '3-Point':
+            # Determine number of bins available in each profile
+            valid_data = np.logical_not(np.isnan(component))
+            n_bins = np.nansum(valid_data, 0)
+            # Determine number of ensembles
+            n_ensembles = len(delta_t)
+            # Preallocate qtop vector
+            top_value = np.tile([np.nan], n_ensembles)
+
+            for j in range(n_ensembles):
+
+                if (n_bins[j] < 6) and (n_bins[j] > 0) and (idx_top[j] >= 0):
+                    top_value[j] = delta_t[j] * component[idx_top[j], j] * top_rng[j]
+
+                # If 6 or more bins use 3-pt at top
+                if n_bins[j] > 5:
+                    sumd = np.nansum(cell_depth[idx_top_3[0:3, j], j])
+                    sumd2 = np.nansum(cell_depth[idx_top_3[0:3, j], j]**2)
+                    sumq = np.nansum(component[idx_top_3[0:3, j], j])
+                    sumqd = np.nansum(component[idx_top_3[0:3, j], j] * cell_depth[idx_top_3[0:3, j], j])
+                    delta = 3 * sumd2 - sumd**2
+                    a = (3 * sumqd - sumq * sumd) / delta
+                    b = (sumq * sumd2 - sumqd * sumd) / delta
+                    # Compute discharge for 3-pt fit
+                    qo = (a * top_rng[j]**2) / 2 + b * top_rng[j]
+                    top_value[j] = delta_t[j] * qo
+
+        return top_value
+
+    @staticmethod
+    def top_variables(xprod, transect):
+        """Computes the index to the top and top three valid cells in each ensemble and
+        the range from the water surface to the top of the topmost cell.
+
+        Parameters
+        ----------
+        xprod: np.array(float)
+            Cross product computed from the cross product method
+        transect: TransectData
+            Object of TransectData
+
+        Returns
+        -------
+        idx_top: np.array
+            Index to the topmost valid depth cell in each ensemble
+        idx_top_3: np.array
+            Index to the top 3 valid depth cell in each ensemble
+        top_rng: np.array(float)
+            Range from the water surface to the top of the topmost cell
+        """
+
+        # Get data from transect object
+        valid_data1 = np.copy(transect.w_vel.valid_data[0, :, :])
+        valid_data2 = np.logical_not(np.isnan(xprod))
+        valid_data = valid_data1 * valid_data2
+        trans_select = getattr(transect.depths, transect.depths.selected)
+        cell_size = trans_select.depth_cell_size_m
+        cell_depth = trans_select.depth_cell_depth_m
+
+        # Preallocate variables
+        n_ensembles = valid_data.shape[1]
+        idx_top = np.tile(-1, valid_data.shape[1]).astype(int)
+        idx_top_3 = np.tile(-1, (3, valid_data.shape[1])).astype(int)
+        top_rng = np.tile([np.nan], n_ensembles)
+
+        # Loop through ensembles
+        for n in range(n_ensembles):
+            # Identify topmost 1 and 3 valid cells
+            idx_temp = np.where(np.logical_not(np.isnan(xprod[:, n])))[0]
+            if len(idx_temp) > 0:
+                idx_top[n] = idx_temp[0]
+                if len(idx_temp) > 2:
+                    idx_top_3[:, n] = idx_temp[0:3]
+                # Compute top range
+                top_rng[n] = cell_depth[idx_top[n], n] - 0.5 * cell_size[idx_top[n], n]
+            else:
+                top_rng[n] = 0
+                idx_top[n] = 0
+
+        return idx_top, idx_top_3, top_rng
+
+    @staticmethod
+    def extrapolate_bot(xprod, transect, delta_t, bot_method=None, exponent=None):
+        """Computes the extrapolated bottom discharge
+
+        Parameters
+        ----------
+        xprod: np.array(float)
+            Cross product of the water and boat velocities
+        transect: TransectData
+            Object of TransectData
+        delta_t: np.array(float)
+            Duration of each ensemble
+        bot_method: str
+            Bottom extrapolation method
+        exponent: float
+            Bottom extrapolation exponent
+
+        Returns
+        -------
+        q_bot: np.array(float)
+            Bottom extrapolated discharge for each ensemble
+        """
+
+        # Determine extrapolation methods and exponent
+        if bot_method is None:
+            bot_method = transect.extrap.bot_method
+            exponent = transect.extrap.exponent
+
+        # Get index for ensembles in moving-boat portion of transect
+        in_transect_idx = transect.in_transect_idx
+        xprod = xprod[:, in_transect_idx]
+
+        # Compute bottom variables
+        idx_bot, bot_rng = QComp.bot_variables(xprod, transect)
+
+        # Get data from transect properties
+        trans_select = getattr(transect.depths, transect.depths.selected)
+        cell_size = trans_select.depth_cell_size_m[:, in_transect_idx]
+        cell_depth = trans_select.depth_cell_depth_m[:, in_transect_idx]
+        depth_ens = trans_select.depth_processed_m[in_transect_idx]
+
+        # Compute z
+        z = np.subtract(depth_ens, cell_depth)
+        valid_data = np.logical_not(np.isnan(xprod))
+        z[np.logical_not(valid_data)] = np.nan
+        z[nan_less(z, 0)] = np.nan
+        cell_size[np.logical_not(valid_data)] = np.nan
+        cell_depth[np.logical_not(valid_data)] = np.nan
+        # Compute bottom discharge
+        q_bot = QComp.discharge_bot(bot_method, exponent, idx_bot, bot_rng, xprod,
+                                    cell_size, cell_depth, depth_ens, delta_t, z)
+
+        return q_bot
+
+    @staticmethod
+    def discharge_bot(bot_method, exponent, idx_bot, bot_rng, component,
+                      cell_size, cell_depth, depth_ens, delta_t, z):
+        """Computes the bottom extrapolated value of the provided component.
+
+        Parameters
+        ----------
+        bot_method: str
+            Bottom extrapolation method (Power, No Slip)
+        exponent: float
+            Exponent for power and no slip
+        idx_bot:
+            Index to the bottom most valid depth cell in each ensemble
+        bot_rng: np.array(float)
+            Range from the streambed to the bottom of the bottom most cell
+        component: np.array(float)
+            The variable to be extrapolated
+        cell_size: np.array(float)
+            Array of cell sizes (n cells x n ensembles)
+        cell_depth: np.array(float)
+            Depth of each cell (n cells x n ensembles)
+        depth_ens: np.array(float)
+            Bottom depth for each ensemble
+        delta_t: np.array(float)
+            Duration of each ensemble computed by QComp
+        z: np.array(float)
+            Relative depth from the bottom to each depth cell
+
+        Returns
+        -------
+        bot_value: np.array(float)
+            Total for the specified component integrated over the bottom range for each ensemble
+        """
+
+        # Initialize
+        coef = 0
+
+        # Bottom power extrapolation
+        if bot_method == 'Power':
+            numerator = ((exponent+1) * np.nansum(component * cell_size, 0))
+            denominator = np.nansum(((z + 0.5 * cell_size)**(exponent + 1)) - (z - 0.5 * cell_size)**(exponent + 1), 0)
+            coef = np.divide(numerator, denominator, where=denominator != 0)
+            coef[denominator == 0] = np.nan
+
+        # Bottom no slip extrapolation
+        elif bot_method == 'No Slip':
+            # Valid data in the lower 20% of the water column or
+            # the last valid depth cell are used to compute the no slip power fit
+            cutoff_depth = 0.8 * depth_ens
+            depth_ok = (nan_greater(cell_depth, np.tile(cutoff_depth, (cell_depth.shape[0], 1))))
+            component_ok = np.logical_not(np.isnan(component))
+            use_ns = depth_ok * component_ok
+            for j in range(len(delta_t)):
+                if idx_bot[j] >= 0:
+                    use_ns[idx_bot[j], j] = 1
+
+            # Create cross product and z arrays for the data to be used in
+            # no slip computations
+            component_ns = np.copy(component)
+            component_ns[np.logical_not(use_ns)] = np.nan
+            z_ns = np.copy(z)
+            z_ns[np.logical_not(use_ns)] = np.nan
+            numerator = ((exponent + 1) * np.nansum(component_ns * cell_size, 0))
+            denominator = np.nansum(((z_ns + 0.5 * cell_size) ** (exponent + 1))
+                                    - ((z_ns - 0.5 * cell_size) ** (exponent + 1)), 0)
+            coef = np.divide(numerator, denominator, where=denominator != 0)
+            coef[denominator == 0] = np.nan
+
+        # Compute the bottom discharge of each profile
+        bot_value = delta_t * (coef / (exponent + 1)) * (bot_rng**(exponent + 1))
+
+        return bot_value
+
+    @staticmethod
+    def bot_variables(x_prod, transect):
+        """Computes the index to the bottom most valid cell in each ensemble and the range from
+        the bottom to the bottom of the bottom most cell.
+
+        Parameters
+        ----------
+        x_prod: np.array(float)
+            Cross product computed from the cross product method
+        transect: TransectData
+            Object of TransectData
+
+        Returns
+        -------
+        idx_bot: np.array
+            Index to the bottom most valid depth cell in each ensemble
+        bot_rng: np.array(float)
+            Range from the streambed to the bottom of the bottom most cell
+        """
+
+        # Identify valid data
+        in_transect_idx = transect.in_transect_idx
+        valid_data1 = np.copy(transect.w_vel.valid_data[0, :, in_transect_idx].T)
+        valid_data2 = np.logical_not(np.isnan(x_prod))
+        valid_data = valid_data1 * valid_data2
+
+        # Assign transect properties to local variables
+        trans_selected = getattr(transect.depths, transect.depths.selected)
+        cell_size = trans_selected.depth_cell_size_m[:, in_transect_idx]
+        cell_depth = trans_selected.depth_cell_depth_m[:, in_transect_idx]
+        depth_ens = trans_selected.depth_processed_m[in_transect_idx]
+
+        # Preallocate variables
+        n_ensembles = valid_data.shape[1]
+        idx_bot = np.tile(-1, (valid_data.shape[1])).astype(int)
+        bot_rng = np.tile([np.nan], n_ensembles)
+
+        for n in range(n_ensembles):
+            # Identifying bottom most valid cell
+            idx_temp = np.where(np.logical_not(np.isnan(x_prod[:, n])))[0]
+            if len(idx_temp) > 0:
+                idx_temp = idx_temp[-1]
+                idx_bot[n] = idx_temp
+                # Compute bottom range
+                bot_rng[n] = depth_ens[n] - cell_depth[idx_bot[n], n] - 0.5 * cell_size[idx_bot[n], n]
+            else:
+                bot_rng[n] = 0
+
+        return idx_bot, bot_rng
diff --git a/Classes/SelectFit.py b/Classes/SelectFit.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb771a06e2eaefb92119177e739f2d2a5c934b63
--- /dev/null
+++ b/Classes/SelectFit.py
@@ -0,0 +1,381 @@
+import numpy as np
+from Classes.FitData import FitData
+
+
+class SelectFit(object):
+    """Class automates the extrapolation method selection information.
+
+    Attributes
+    ----------
+    fit_method: str
+        User selected method Automatic or Manual
+    top_method: str
+        Top extrapolation method
+    bot_method: str
+        Bottom extrapolation method
+    exponent: float
+        Power fit exponent
+    exp_method: str
+        Method to determine exponent (default, optimize, or manual)
+    u: np.array(float)
+        Fit values of the variable
+    u_auto: np.array(float)
+        Fit values from automatic fit
+    z: np.array(float)
+        Distance from the streambed for fit variable
+    z_auto: np.array(float)
+        z values for automtic fit
+    residuals: np.array(float)
+        Residuals from fit
+    coef: float
+        Power fit coefficient
+    bot_method_auto: str
+        Selected extrapolation for top
+    top_method_auto: str
+        Selected extrapolation for bottom
+    exponent_auto: float
+        Selected exponent
+    top_fit_r2: float
+        Top fit custom r^2
+    top_max_diff: float
+        Maximum difference between power and 3-pt at top
+    bot_diff: float
+        Difference between power and no slop at z = 0.1
+    bot_r2: float
+        Bottom fit r^2
+    fit_r2: float
+        Selected fit of selected power/no slip fit
+    ns_exponent: float
+        No slip optimized exponent
+    pp_exponent: float
+        Power Power optimized exponent
+    top_r2: float
+        r^2 for linear fit of top 4 cells
+    rsqr: float
+        Adjusted r^2 for optimized exponent
+    exponent_95_ci: np.array(float)
+        95% confidence intervals for optimized exponent
+    data_type: str
+        Type of data (v, q, V, or Q)
+    """
+
+    def __init__(self):
+        """Intialize object and instance variables."""
+
+        self.fit_method = 'Automatic'  # User selected method Automatic or Manual
+        self.top_method = 'Power'
+        self.bot_method = 'Power'
+        self.exponent = '0.1667'
+        self.exp_method = None
+        self.u = None
+        self.u_auto = None
+        self.z = None
+        self.z_auto = None
+        self.residuals = np.array([])
+        self.coef = 0
+        self.bot_method_auto = 'Power'  # Selected extrapolation for top
+        self.top_method_auto = 'Power'  # Selected extrapolation for bottom
+        self.exponent_auto = 0.1667  # Selected exponent
+        self.top_fit_r2 = 0  # Top fit custom r^2
+        self.top_max_diff = 0  # Maximum difference between power and 3-pt at top
+        self.bot_diff = 0  # Difference between power and no slop at z = 0.1
+        self.bot_r2 = 0  # Bottom fit r^2
+        self.fit_r2 = 0  # Selected fit of selected power/no slip fit
+        self.ns_exponent = 0.1667  # No slip optimized exponent
+        self.pp_exponent = 0.1667  # Power Power optimized exponent
+        self.top_r2 = 0
+        self.rsqr = 0
+        self.exponent_95_ci = 0
+        self.data_type = 'q'
+
+    def populate_data(self, normalized, fit_method, top=None, bot=None, exponent=None):
+        """Determine selected fit.
+
+        Parameters
+        ----------
+        normalized: NormData
+            Object of NormData
+        fit_method: str
+            Fit method (Automatic or Manual)
+        top: str
+            Top extrapolation method
+        bot: str
+            Bottom extrapolation method
+        exponent: float
+            Exponent for extrapolation method
+        """
+
+        valid_data = np.squeeze(normalized.valid_data)
+
+        # Store data in properties to object
+        self.fit_method = fit_method
+        self.data_type = normalized.data_type
+
+        update_fd = FitData()
+
+        if fit_method == 'Automatic':
+            # Compute power fit with optimized exponent as reference to determine
+            # if constant no slip will be more appropriate
+            ppobj = FitData()
+            ppobj.populate_data(norm_data=normalized,
+                                top='Power',
+                                bot='Power',
+                                method='optimize')
+
+            # Store results in object
+            self.pp_exponent = ppobj.exponent
+            self.residuals = ppobj.residuals
+            self.rsqr = ppobj.r_squared
+            self.exponent_95_ci = ppobj.exponent_95_ci
+
+            # Begin automatic fit
+
+            # More than 6 cells are required to compute an optimized fit.  For fewer
+            # than 7 cells the default power/power fit is selected due to lack of sufficient
+            # data for a good analysis
+            if len(self.residuals) > 6:
+                # DSM (6/4/2021) the top and bottom were mislabeled (even in Matlab). I corrected. The computations
+                # are unaffected as the top2 and bot2 are only used in the c_shape_condition equation
+                # c_shape_condition = (np.sign(bot2) * np.sign(top2) == np.sign(mid2) and np.abs(bot2 + top2) > 0.1)
+                # Compute the difference between the bottom two cells of data and the optimized power fit
+                bot2 = np.nansum(normalized.unit_normalized_med[valid_data[-2:]]
+                                 - ppobj.coef * normalized.unit_normalized_z[valid_data[-2:]] ** ppobj.exponent)
+
+                # Compute the difference between the top two cells of data and the optimized power fit
+                top2 = np.nansum(normalized.unit_normalized_med[valid_data[:2]]
+                                 - ppobj.coef * normalized.unit_normalized_z[valid_data[:2]] ** ppobj.exponent)
+
+                # Compute the difference between the middle two cells of data and the optimized power fit
+                mid1 = int(np.floor(len(np.isnan(valid_data) == False) / 2)) - 1
+
+                mid2 = np.nansum(normalized.unit_normalized_med[valid_data[mid1:mid1 + 2]]
+                                 - ppobj.coef * normalized.unit_normalized_z[valid_data[mid1:mid1 + 2]]
+                                 ** ppobj.exponent)
+
+                self.top_method_auto = 'Power'
+                self.bot_method_auto = 'Power'
+
+                # Evaluate difference in data and power fit at water surface using a linear fit through the top 4
+                # median cells and save results
+                y = normalized.unit_normalized_med[valid_data[:4]]
+                x = normalized.unit_normalized_z[valid_data[:4]]
+
+                coeffs = np.polyfit(x, y, 1)
+                resid = y - (coeffs[0]*x + coeffs[1])
+                corr = np.corrcoef(x, y)[0, 1]
+                self.top_fit_r2 = 1 - (np.sum(resid ** 2) / np.mean(np.abs(resid)))
+                self.top_r2 = corr**2
+
+                # Evaluate overall fit
+                # If the optimized power fit does not have an r^2 better than 0.8 or if the optimized
+                # exponent if 0.1667 falls within the 95% confidence interval of the optimized fit,
+                # there is insufficient justification to change the exponent from 0.1667
+                if (ppobj.r_squared < 0.8) or ((0.1667 > self.exponent_95_ci[0]) and (0.1667 < self.exponent_95_ci[1])):
+                    # If an optimized exponent cannot be justified the linear fit is used to determine if a constant
+                    # fit at the top is a better alternative than a power fit.  If the power fit is the better
+                    # alternative the exponent is set to the default 0.1667 and the data is refit
+                    if np.abs(self.top_fit_r2 < 0.8 or self.top_r2 < 0.9):
+                        ppobj = FitData()
+                        ppobj.populate_data(norm_data=normalized,
+                                            top='Power',
+                                            bot='Power',
+                                            method='Manual',
+                                            exponent=0.1667)
+
+                # Evaluate fit of top and bottom portions of the profile
+                # Set save selected exponent and associated fit statistics
+                self.exponent_auto = ppobj.exponent
+                self.fit_r2 = ppobj.r_squared
+
+                # Compute the difference at the water surface between a linear fit of the top 4 measured cells
+                # and the best selected power fit of the whole profile
+                self.top_max_diff = ppobj.u[-1] - np.sum(coeffs)
+
+                # Evaluate the difference at the bottom between power using the whole profile and power using
+                # only the bottom third
+                ns_fd = FitData()
+                ns_fd.populate_data(normalized, 'Constant', 'No Slip', 'Optimize')
+                self.ns_exponent = ns_fd.exponent
+                self.bot_r2 = ns_fd.r_squared
+                self.bot_diff = ppobj.u[np.round(ppobj.z, 2) == 0.1][0] \
+                    - ns_fd.u[np.round(ns_fd.z, 2) == 0.1][0]
+
+                # Begin automatic selection logic
+                # -----------------------------------
+
+                # A constant no slip fit condition is selected if:
+                #
+                # 1)The top of the power fit doesn't fit the data well.
+                # This is determined to be the situation when
+                # (a) the difference at the water surface between the
+                # linear fit and the power fit is greater than 10% and
+                # (b) the difference is either positive or the difference
+                # of the top measured cell differs from the best
+                # selected power fit by more than 5%.
+                top_condition = (np.abs(self.top_max_diff) > 0.1 and ((self.top_max_diff > 0)
+                                 or np.abs(normalized.unit_normalized_med[valid_data[0]] - ppobj.u[-1]) > 0.05))
+
+                # OR
+
+                # 2) The bottom of the power fit doesn't fit the data
+                # well. This is determined to be the situation when (a)
+                # the difference between and optimized no slip fit
+                # and the selected best power fit of the whole profile
+                # is greater than 10% and (b) the optimized on slip fit has
+                # an r^2 greater than 0.6.
+                bottom_condition = ((np.abs(self.bot_diff) > 0.1) and self.bot_r2 > 0.6)
+
+                # OR
+
+                # 3) Flow is bidirectional. The sign of the top of the
+                # profile is different from the sign of the bottom of
+                # the profile.
+                bidirectional_condition = (np.sign(normalized.unit_normalized_med[valid_data[0]])
+                                           != np.sign(normalized.unit_normalized_med[valid_data[-1]]))
+                # OR
+
+                # 4) The profile is C-shaped. This is determined by
+                # (a) the sign of the top and bottom difference from
+                # the best selected power fit being different than the
+                # sign of the middle difference from the best selected
+                # power fit and (b) the combined difference of the top
+                # and bottom difference from the best selected power
+                # fit being greater than 10%.
+                c_shape_condition = (np.sign(bot2) * np.sign(top2) == np.sign(mid2) and np.abs(bot2 + top2) > 0.1)
+
+                if top_condition or bottom_condition or bidirectional_condition or c_shape_condition:
+
+                    # Set the bottom to no slip
+                    self.bot_method_auto = 'No Slip'
+                    # If the no slip fit with an optimized exponent does not have r^2 better than 0.8 use
+                    # the default 0.1667 for the no slip exponent
+                    if ns_fd.r_squared > 0.8:
+                        self.exponent_auto = ns_fd.exponent
+                        self.fit_r2 = ns_fd.r_squared
+                    else:
+                        self.exponent_auto = 0.1667
+                        self.fit_r2 = np.nan
+
+                    # Use the no slip 95% confidence intervals if they are available
+                    if ns_fd.exponent_95_ci is not None and np.all(
+                            np.isnan(ns_fd.exponent_95_ci) == False):
+                        self.exponent_95_ci[0] = ns_fd.exponent_95_ci[0]
+                        self.exponent_95_ci[1] = ns_fd.exponent_95_ci[1]
+                    else:
+                        self.exponent_95_ci[0] = np.nan
+                        self.exponent_95_ci[1] = np.nan
+
+                    # Set the top method to constant
+                    self.top_method_auto = 'Constant'
+
+                else:
+
+                    # Leave fit power/power and set the best selected optimized exponent as the automatic fit exponent
+                    self.exponent_auto = ppobj.exponent
+
+            else:
+
+                # If the data are insufficient for a valid analysis use the power/power fit
+                # with the default 0.1667 exponent
+                self.top_method_auto = 'Power'
+                self.bot_method_auto = 'Power'
+                self.exponent_auto = 0.1667
+                self.ns_exponent = 0.1667
+
+            # Update the fit using the automatically selected methods
+            update_fd.populate_data(norm_data=normalized,
+                                    top=self.top_method_auto,
+                                    bot=self.bot_method_auto,
+                                    method='Manual',
+                                    exponent=self.exponent_auto)
+            self.u = update_fd.u
+            self.u_auto = update_fd.u
+            self.z_auto = update_fd.z
+            self.z = update_fd.z
+
+        elif fit_method == 'Manual':
+
+            # Identify changes in fit settings
+            if top is None:
+                top = self.top_method
+            if bot is None:
+                bot = self.bot_method
+            if exponent is None:
+                exponent = self.exponent
+
+            # Update fit with manual settings
+            update_fd.populate_data(norm_data=normalized,
+                                    top=top,
+                                    bot=bot,
+                                    method=fit_method,
+                                    exponent=exponent)
+            self.u = update_fd.u
+            self.z = update_fd.z
+
+        # Store fit data in object
+        self.top_method = update_fd.top_method
+        self.bot_method = update_fd.bot_method
+        self.exponent = update_fd.exponent
+        self.coef = update_fd.coef
+        self.exp_method = update_fd.exp_method
+        self.residuals = update_fd.residuals
+
+    @staticmethod
+    def qrev_mat_in(mat_data):
+        """Processes the Matlab data structure to obtain a list of NormData objects containing transect
+           data from the Matlab data structure.
+
+       Parameters
+       ----------
+       mat_data: mat_struct
+           Matlab data structure obtained from sio.loadmat
+
+       Returns
+       -------
+       norm_data: list
+           List of NormData objects
+       """
+        fit_data = []
+        if hasattr(mat_data, 'selFit'):
+            for n, data in enumerate(mat_data.selFit):
+                temp = SelectFit()
+                temp.populate_from_qrev_mat(data, mat_data.normData[n])
+                fit_data.append(temp)
+        return fit_data
+
+    def populate_from_qrev_mat(self, mat_data, norm_data):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        mat_data: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        norm_data: NormData
+            Object of NormData
+        """
+
+        self.fit_method = mat_data.fitMethod
+        self.top_method = mat_data.topMethod
+        self.bot_method = mat_data.botMethod
+        self.exponent = mat_data.exponent
+        self.exp_method = mat_data.expMethod
+        self.u = mat_data.u
+        self.u_auto = mat_data.uAuto
+        self.z = mat_data.z
+        self.z_auto = mat_data.zAuto
+        self.residuals = mat_data.residuals
+        self.coef = mat_data.coef
+        self.bot_method_auto = mat_data.botMethodAuto
+        self.top_method_auto = mat_data.topMethodAuto
+        self.exponent_auto = mat_data.exponentAuto
+        self.top_fit_r2 = mat_data.topfitr2
+        self.top_max_diff = mat_data.topmaxdiff
+        self.bot_diff = mat_data.botdiff
+        self.bot_r2 = mat_data.botrsqr
+        self.fit_r2 = mat_data.fitrsqr
+        self.ns_exponent = mat_data.nsexponent
+        self.pp_exponent = mat_data.ppexponent
+        self.top_r2 = mat_data.topr2
+        self.rsqr = mat_data.rsqr
+        self.exponent_95_ci = mat_data.exponent95confint
+        self.data_type = norm_data.dataType
diff --git a/Classes/SensorData.py b/Classes/SensorData.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c8f8697729f6370e319613fe9919e2f29ad3903
--- /dev/null
+++ b/Classes/SensorData.py
@@ -0,0 +1,82 @@
+import numpy as np
+
+
+class SensorData(object):
+    """Class stores data for pitch, roll, temperature, salinity, and speed of sound and its source/
+
+    Attributes
+    ----------
+    data: np.array(float)
+        Data to be used in computations.
+    data_orig: np.array(float)
+        Original data loaded from raw data file.
+    source: str
+        Source of data, examples Int. Sensor, Ext. Sensor, User
+    """
+    
+    def __init__(self):
+        """Initializes class and variables."""
+
+        self.data = None
+        self.data_orig = None
+        self.source = None
+        
+    def populate_data(self, data_in, source_in):
+        """Store data in class.
+
+        Parameters
+        ----------
+        data_in: np.array(float)
+            Data to be stored.
+        source_in: str
+            Source of data to be stored.
+        """
+
+        self.data = data_in
+        self.data_orig = data_in
+        self.source = source_in
+
+    def populate_from_qrev_mat(self, mat_data):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        mat_data: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+
+        if np.isnan(mat_data.data).all():
+            self.data = np.array([])
+        else:
+            if type(mat_data.data) is np.ndarray:
+                self.data = mat_data.data.astype(float)
+            else:
+                self.data = np.array([float(mat_data.data)])
+        if np.isnan(mat_data.dataOrig).all():
+            self.data_orig = np.array([])
+        else:
+            if type(mat_data.dataOrig) is np.ndarray:
+                self.data_orig = mat_data.dataOrig.astype(float)
+            else:
+                self.data_orig = np.array([float(mat_data.dataOrig)])
+        self.source = mat_data.source
+        
+    def change_data(self, data_in):
+        """Change data to be applied in computations.
+
+        Parameters
+        ----------
+        data_in: np.array(float)
+        """
+
+        self.data = data_in
+        
+    def set_source(self, source_in):
+        """Change source of data.
+
+        Parameters
+        ----------
+        source_in: str
+            Source of data.
+        """
+        self.source = source_in
diff --git a/Classes/SensorStructure.py b/Classes/SensorStructure.py
new file mode 100644
index 0000000000000000000000000000000000000000..20a7cf79557c05e5704997b1e28df4425bf83547
--- /dev/null
+++ b/Classes/SensorStructure.py
@@ -0,0 +1,73 @@
+import numpy as np
+from Classes.HeadingData import HeadingData
+from Classes.SensorData import SensorData
+
+
+class SensorStructure(object):
+    """Class to store sensor data from various sources.
+
+    Attributes
+    ----------
+    self.selected: str
+        The selected sensor reference name ('internal', 'external', 'user').
+    self.internal: SensorData
+        Contains the data from the internal sensor, object of SensorData
+    self.external: SensorData
+        Contains the data from an external sensor, object of SensorData
+    self.user: SensorData
+        Contains user supplied value, object of SensorData
+    """
+    
+    def __init__(self):
+        """Initialize class and set variable to None."""
+
+        self.selected = None  # The selected sensor reference name ('internal', 'external', 'user')
+        self.internal = None  # Contains the data from the internal sensor
+        self.external = None  # Contains the data from an external sensor
+        self.user = None  # Contains user supplied value
+
+    def populate_from_qrev_mat(self, mat_data, heading=False):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        mat_data: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        heading: bool
+            Determines if mat_data is heading data
+        """
+
+        if not heading:
+            # Non-heading sensors
+            if not type(mat_data.external) is np.ndarray:
+                self.external = SensorData()
+                self.external.populate_from_qrev_mat(mat_data.external)
+            if not type(mat_data.internal) is np.ndarray:
+                self.internal = SensorData()
+                self.internal.populate_from_qrev_mat(mat_data.internal)
+            if not type(mat_data.user) is np.ndarray:
+                self.user = SensorData()
+                self.user.populate_from_qrev_mat(mat_data.user)
+            self.selected = mat_data.selected
+        else:
+            # Heading data
+            if not type(mat_data.external) is np.ndarray:
+                self.external = HeadingData()
+                self.external.populate_from_qrev_mat(mat_data.external)
+            if not type(mat_data.internal) is np.ndarray:
+                self.internal = HeadingData()
+                self.internal.populate_from_qrev_mat(mat_data.internal)
+            if not type(mat_data.user) is np.ndarray:
+                self.user = HeadingData()
+                self.user.populate_from_qrev_mat(mat_data.user)
+            self.selected = mat_data.selected
+        
+    def set_selected(self, selected_name):
+        """Set the selected source for the specified object
+
+        Parameters
+        ----------
+        selected_name: str
+            Type of data (internal, external, user).
+        """
+        self.selected = selected_name
diff --git a/Classes/Sensors.py b/Classes/Sensors.py
new file mode 100644
index 0000000000000000000000000000000000000000..5734ff7b06795946870e5668ab877ed6b0768d24
--- /dev/null
+++ b/Classes/Sensors.py
@@ -0,0 +1,122 @@
+import numpy as np
+from Classes.SensorStructure import SensorStructure
+
+
+class Sensors(object):
+    """Class to store data from ADCP sensors.
+
+    Attributes
+    ----------
+    heading_deg: HeadingData
+        Object of HeadingData.
+    pitch_deg: SensorStructure
+        Pitch data, object of SensorStructure
+    roll_deg: SensorStructure
+        Roll data, object of SensorStructure
+    temperature_deg_c: SensorStructure
+        Temperature data, object of SensorStructure
+    salinity_ppt: SensorStructure
+        Salinity data, object of SensorStructure
+    speed_of_sound_mps: SensorStructure
+        Speed of sound, object of SensorStructure
+    """
+
+    def __init__(self):
+        """Initialize class and create variable objects"""
+
+        self.heading_deg = SensorStructure()  # Object of HeadingData
+        self.pitch_deg = SensorStructure()  # Pitch data, object of SensorStructure
+        self.roll_deg = SensorStructure()  # Roll data, object of SensorStructure
+        self.temperature_deg_c = SensorStructure()  # Temperature data, object of SensorStructure
+        self.salinity_ppt = SensorStructure()  # Salinity data, object of SensorStructure
+        self.speed_of_sound_mps = SensorStructure()  # Speed of sound, object of SensorStructure
+
+    def populate_from_qrev_mat(self, transect):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        transect: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+        if hasattr(transect, 'sensors'):
+            if hasattr(transect.sensors, 'heading_deg'):
+                self.heading_deg.populate_from_qrev_mat(transect.sensors.heading_deg, heading=True)
+            if hasattr(transect.sensors, 'pitch_deg'):
+                self.pitch_deg.populate_from_qrev_mat(transect.sensors.pitch_deg)
+            if hasattr(transect.sensors, 'roll_deg'):
+                self.roll_deg.populate_from_qrev_mat(transect.sensors.roll_deg)
+            if hasattr(transect.sensors, 'salinity_ppt'):
+                self.salinity_ppt.populate_from_qrev_mat(transect.sensors.salinity_ppt)
+            if hasattr(transect.sensors, 'speedOfSound_mps'):
+                self.speed_of_sound_mps.populate_from_qrev_mat(transect.sensors.speedOfSound_mps)
+            if hasattr(transect.sensors, 'temperature_degC'):
+                self.temperature_deg_c.populate_from_qrev_mat(transect.sensors.temperature_degC)
+
+    @staticmethod
+    def speed_of_sound(temperature, salinity):
+        """Computes speed of sound from temperature and salinity.
+
+        Parameters
+        ----------
+        temperature: float or np.array(float)
+            Water temperature at transducer face, in degrees C.
+        salinity: float or np.array(float)
+            Water salinity at transducer face, in ppt.
+        """
+
+        # Not provided in RS Matlab file computed from equation used in TRDI BBSS, from Urick (1983)
+        # May not be the same equation as used by SonTek
+        sos = 1449.2 + 4.6 * temperature - 0.055 * temperature**2 + 0.00029 * temperature**3 \
+            + (1.34 - 0.01 * temperature) * (salinity - 35.0)
+
+        return sos
+
+    @staticmethod
+    def unesco_speed_of_sound(t, s, p=10):
+
+        p = p / 10
+        sr = np.sqrt(np.abs(s))
+
+        # S ** 2 TERM
+        d = 1.727E-3 - 7.9836E-6 * p
+
+        # S ** 3 / 2  TERM
+        b1 = 7.3637E-5 + 1.7945E-7 * t
+        b0 = -1.922E-2 - 4.42E-5 * t
+        b = b0 + b1 * p
+
+        # S ** 1 TERM
+        a3 = (-3.389E-13 * t + 6.649E-12) * t + 1.100E-10
+        a2 = ((7.988E-12 * t - 1.6002E-10) * t + 9.1041E-9) * t - 3.9064E-7
+        a1 = (((-2.0122E-10 * t + 1.0507E-8) * t - 6.4885E-8) * t - 1.2580E-5) * t + 9.4742E-5
+        a0 = (((-3.21E-8 * t + 2.006E-6) * t + 7.164E-5) * t - 1.262E-2) * t + 1.389
+        a = ((a3 * p + a2) * p + a1) * p + a0
+
+        # S ** 0 TERM
+        c3 = (-2.3643E-12 * t + 3.8504E-10) * t - 9.7729E-9
+        c2 = (((1.0405E-12 * t - 2.5335E-10) * t + 2.5974E-8) * t - 1.7107E-6) * t + 3.1260E-5
+        c1 = (((-6.1185E-10 * t + 1.3621E-7) * t - 8.1788E-6) * t + 6.8982E-4) * t + 0.153563
+        c0 = ((((3.1464E-9 * t - 1.47800E-6) * t + 3.3420E-4) * t - 5.80852E-2) * t + 5.03711) * t + 1402.388
+        c = ((c3 * p + c2) * p + c1) * p + c0
+
+        # SOUND  SPEED
+        sos = c + (a + b * sr + d * s) * s
+
+        return sos
+
+    @staticmethod
+    def avg_temperature(transects):
+        """Compute mean temperature from temperature data from all transects.
+
+        Parameters
+        ----------
+        transects: list
+            List of TransectData objects
+        """
+
+        temps = np.array([])
+        for transect in transects:
+            if transect.checked:
+                temps = np.append(temps, transect.sensors.temperature_deg_c.internal.data)
+        return np.nanmean(temps)
diff --git a/Classes/TransectData.py b/Classes/TransectData.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d7a2ee5073c03b839b482cb1ea98a93603df53e
--- /dev/null
+++ b/Classes/TransectData.py
@@ -0,0 +1,2498 @@
+import os
+import time
+import warnings
+import concurrent.futures
+import numpy as np
+from datetime import datetime
+from datetime import timezone
+from scipy import signal, fftpack
+# from Classes.Pd0TRDI import Pd0TRDI
+from Classes.Pd0TRDI_2 import Pd0TRDI
+from Classes.DepthStructure import DepthStructure
+from Classes.WaterData import WaterData
+from Classes.BoatStructure import BoatStructure
+from Classes.GPSData import GPSData
+from Classes.Edges import Edges
+from Classes.ExtrapData import ExtrapData
+from Classes.Sensors import Sensors
+from Classes.SensorData import SensorData
+from Classes.HeadingData import HeadingData
+from Classes.DateTime import DateTime
+from Classes.InstrumentData import InstrumentData
+from Classes.MultiThread import MultiThread
+from Classes.CoordError import CoordError
+from MiscLibs.common_functions import nandiff, cosd, arctand, tand, nans, cart2pol, rad2azdeg, nan_less
+
+
+class TransectData(object):
+    """Class to hold Transect properties.
+
+    Attributes
+    ----------
+    adcp: InstrumentData
+        Object of InstrumentData
+    file_name: str
+        Filename of transect data file
+    w_vel: WaterData
+        Object of WaterData
+    boat_vel: BoatStructure
+        Object of BoatStructure containing objects of BoatData for BT, GGA, and VTG
+    gps: GPSData
+        Object of GPSData
+    sensors: SensorData
+        Object of SensorData
+    depths: DepthStructure
+        Object of DepthStructure containing objects of Depth data for bt_depths, vb_depths, ds_depths)
+    edges: Edges
+        Object of Edges (left and right object of clsEdgeData)
+    extrap: ExtrapData
+        Object of ExtrapData
+    start_edge: str
+        Starting edge of transect looking downstream (Left or Right)
+    orig_start_edge: str
+        Original starting edge of transect looking downstream (Left or Right)
+    date_time: DateTime
+        Object of DateTime
+    checked: bool
+        Setting for if transect was checked for use in mmt file assumed checked for SonTek
+    in_transect_idx: np.array(int)
+        Index of ensemble data associated with the moving-boat portion of the transect
+    """
+
+    def __init__(self):
+        self.adcp = None  # object of clsInstrument
+        self.file_name = None  # filename of transect data file
+        self.w_vel = None  # object of clsWaterData
+        self.boat_vel = None  # class for various boat velocity references (btVel, ggaVel, vtgVel)
+        self.gps = None  # object of clsGPSData
+        self.sensors = None  # object of clsSensorData
+        self.depths = None  # object of clsDepthStructure for depth data including cell depths & ref depths
+        self.edges = None  # object of clsEdges(left and right object of clsEdgeData)
+        self.extrap = None  # object of clsExtrapData
+        self.start_edge = None  # starting edge of transect looking downstream (Left or Right)
+        self.orig_start_edge = None
+        self.date_time = None  # object of DateTime
+        self.checked = None  # transect was checked for use in mmt file assumed checked for SonTek
+        self.in_transect_idx = None  # index of ensemble data associated with the moving-boat portion of the transect
+
+    def trdi(self, mmt_transect, pd0_data, mmt):
+        """Create object, lists, and instance variables for TRDI data.
+
+        Parameters
+        ----------
+        mmt_transect: MMT_Transect
+            Object of Transect (from mmt)
+        pd0_data: Pd0TRDI
+            Object of Pd0TRDI
+        mmt: MMT_TRDI
+            Object of MMT_TRDI
+        """
+
+        # Get file name of pd0 file which is first file in list of file associated with the transect
+        self.file_name = mmt_transect.Files[0]
+
+        # Get the active configuration data for the transect
+        mmt_config = getattr(mmt_transect, 'active_config')
+
+        # If the pd0 file has water track data process all of the data
+        if pd0_data.Wt is not None:
+
+            # Ensemble times
+            # Compute time for each ensemble in seconds
+            ens_time_sec = pd0_data.Sensor.time[:, 0] * 3600 \
+                           + pd0_data.Sensor.time[:, 1] * 60 \
+                           + pd0_data.Sensor.time[:, 2] \
+                           + pd0_data.Sensor.time[:, 3] / 100
+
+            # Compute the duration of each ensemble in seconds adjusting for lost data
+            ens_delta_time = np.tile([np.nan], ens_time_sec.shape)
+            idx_time = np.where(np.isnan(ens_time_sec) == False)[0]
+            ens_delta_time[idx_time[1:]] = nandiff(ens_time_sec[idx_time])
+
+            # Adjust for transects tha last past midnight
+            idx_24hr = np.where(nan_less(ens_delta_time, 0))[0]
+            ens_delta_time[idx_24hr] = 24 * 3600 + ens_delta_time[idx_24hr]
+            ens_delta_time = ens_delta_time.T
+
+            # Start date and time
+            idx = np.where(np.isnan(pd0_data.Sensor.time[:, 0]) == False)[0][0]
+            start_year = int(pd0_data.Sensor.date[idx, 0])
+
+            # StreamPro doesn't include y2k dates
+            if start_year < 100:
+                start_year = 2000 + int(pd0_data.Sensor.date_not_y2k[idx, 0])
+
+            start_month = int(pd0_data.Sensor.date[idx, 1])
+            start_day = int(pd0_data.Sensor.date[idx, 2])
+            start_hour = int(pd0_data.Sensor.time[idx, 0])
+            start_min = int(pd0_data.Sensor.time[idx, 1])
+            start_sec = int(pd0_data.Sensor.time[idx, 2] + pd0_data.Sensor.time[idx, 3] / 100)
+            start_micro = int(
+                ((pd0_data.Sensor.time[idx, 2] + pd0_data.Sensor.time[idx, 3] / 100) - start_sec) * 10 ** 6)
+
+            start_dt = datetime(start_year, start_month, start_day, start_hour, start_min, start_sec, start_micro,
+                                tzinfo=timezone.utc)
+            start_serial_time = start_dt.timestamp()
+            start_date = datetime.strftime(datetime.utcfromtimestamp(start_serial_time), '%m/%d/%Y')
+
+            # End data and time
+            idx = np.where(np.isnan(pd0_data.Sensor.time[:, 0]) == False)[0][-1]
+            end_year = int(pd0_data.Sensor.date[idx, 0])
+            # StreamPro does not include Y@K dates
+            if end_year < 100:
+                end_year = 2000 + int(pd0_data.Sensor.date_not_y2k[idx, 0])
+
+            end_month = int(pd0_data.Sensor.date[idx, 1])
+            end_day = int(pd0_data.Sensor.date[idx, 2])
+            end_hour = int(pd0_data.Sensor.time[idx, 0])
+            end_min = int(pd0_data.Sensor.time[idx, 1])
+            end_sec = int(pd0_data.Sensor.time[idx, 2] + pd0_data.Sensor.time[idx, 3] / 100)
+            end_micro = int(((pd0_data.Sensor.time[idx, 2] + pd0_data.Sensor.time[idx, 3] / 100) - end_sec) * 10 ** 6)
+
+            end_dt = datetime(end_year, end_month, end_day, end_hour, end_min, end_sec, end_micro, tzinfo=timezone.utc)
+            end_serial_time = end_dt.timestamp()
+
+            # Create date/time object
+            self.date_time = DateTime()
+            self.date_time.populate_data(date_in=start_date,
+                                         start_in=start_serial_time,
+                                         end_in=end_serial_time,
+                                         ens_dur_in=ens_delta_time)
+
+            # Transect checked for use in discharge computation
+            self.checked = mmt_transect.Checked
+
+            # Create class for adcp information
+            self.adcp = InstrumentData()
+            self.adcp.populate_data(manufacturer='TRDI', raw_data=pd0_data, mmt_transect=mmt_transect, mmt=mmt)
+
+            # Create valid frequency time series
+            freq_ts = self.valid_frequencies(pd0_data.Inst.freq)
+
+            # Initialize boat vel
+            self.boat_vel = BoatStructure()
+            # Apply 3-beam setting from mmt file
+            if mmt_config['Proc_Use_3_Beam_BT'] < 0.5:
+                min_beams = 4
+            else:
+                min_beams = 3
+            self.boat_vel.add_boat_object(source='TRDI',
+                                          vel_in=pd0_data.Bt.vel_mps,
+                                          freq_in=freq_ts,
+                                          coord_sys_in=pd0_data.Cfg.coord_sys[0],
+                                          nav_ref_in='BT',
+                                          min_beams=min_beams,
+                                          bottom_mode=pd0_data.Cfg.bm[0],
+                                          corr_in=pd0_data.Bt.corr,
+                                          rssi_in=pd0_data.Bt.rssi)
+
+            self.boat_vel.set_nav_reference('BT')
+
+            # Compute velocities from GPS Data
+            # ------------------------------------
+            # Raw Data
+            raw_gga_utc = pd0_data.Gps2.utc
+            raw_gga_lat = pd0_data.Gps2.lat_deg
+            raw_gga_lon = pd0_data.Gps2.lon_deg
+
+            # Determine correct sign for latitude
+            for n, lat_ref in enumerate(pd0_data.Gps2.lat_ref):
+                idx = np.nonzero(np.array(lat_ref) == 'S')
+                raw_gga_lat[n, idx] = raw_gga_lat[n, idx] * -1
+
+            # Determine correct sign for longitude
+            for n, lon_ref in enumerate(pd0_data.Gps2.lon_ref):
+                idx = np.nonzero(np.array(lon_ref) == 'W')
+                raw_gga_lon[n, idx] = raw_gga_lon[n, idx] * -1
+
+            # Assign data to local variables
+            raw_gga_alt = pd0_data.Gps2.alt
+            raw_gga_diff = pd0_data.Gps2.corr_qual
+            raw_gga_hdop = pd0_data.Gps2.hdop
+            raw_gga_num_sats = pd0_data.Gps2.num_sats
+            raw_vtg_course = pd0_data.Gps2.course_true
+            raw_vtg_speed = pd0_data.Gps2.speed_kph * 0.2777778
+            raw_vtg_delta_time = pd0_data.Gps2.vtg_delta_time
+            raw_vtg_mode_indicator = pd0_data.Gps2.mode_indicator
+            raw_gga_delta_time = pd0_data.Gps2.gga_delta_time
+
+            # RSL provided ensemble values, not supported for TRDI data
+            ext_gga_utc = []
+            ext_gga_lat = []
+            ext_gga_lon = []
+            ext_gga_alt = []
+            ext_gga_diff = []
+            ext_gga_hdop = []
+            ext_gga_num_sats = []
+            ext_vtg_course = []
+            ext_vtg_speed = []
+
+            # QRev methods GPS processing methods
+            gga_p_method = 'Mindt'
+            gga_v_method = 'Mindt'
+            vtg_method = 'Mindt'
+
+            # If valid gps data exist, process the data
+            if (np.nansum(np.nansum(np.abs(raw_gga_lat))) > 0) \
+                    or (np.nansum(np.nansum(np.abs(raw_vtg_speed))) > 0):
+
+                # Process raw GPS data
+                self.gps = GPSData()
+                self.gps.populate_data(raw_gga_utc=raw_gga_utc,
+                                       raw_gga_lat=raw_gga_lat,
+                                       raw_gga_lon=raw_gga_lon,
+                                       raw_gga_alt=raw_gga_alt,
+                                       raw_gga_diff=raw_gga_diff,
+                                       raw_gga_hdop=raw_gga_hdop,
+                                       raw_gga_num_sats=raw_gga_num_sats,
+                                       raw_gga_delta_time=raw_gga_delta_time,
+                                       raw_vtg_course=raw_vtg_course,
+                                       raw_vtg_speed=raw_vtg_speed,
+                                       raw_vtg_delta_time=raw_vtg_delta_time,
+                                       raw_vtg_mode_indicator=raw_vtg_mode_indicator,
+                                       ext_gga_utc=ext_gga_utc,
+                                       ext_gga_lat=ext_gga_lat,
+                                       ext_gga_lon=ext_gga_lon,
+                                       ext_gga_alt=ext_gga_alt,
+                                       ext_gga_diff=ext_gga_diff,
+                                       ext_gga_hdop=ext_gga_hdop,
+                                       ext_gga_num_sats=ext_gga_num_sats,
+                                       ext_vtg_course=ext_vtg_course,
+                                       ext_vtg_speed=ext_vtg_speed,
+                                       gga_p_method=gga_p_method,
+                                       gga_v_method=gga_v_method,
+                                       vtg_method=vtg_method)
+
+                # If valid gga data exists create gga boat velocity object
+                if np.nansum(np.nansum(np.abs(raw_gga_lat))) > 0:
+                    self.boat_vel.add_boat_object(source='TRDI',
+                                                  vel_in=self.gps.gga_velocity_ens_mps,
+                                                  coord_sys_in='Earth',
+                                                  nav_ref_in='GGA')
+
+                # If valid vtg data exist create vtg boat velocity object
+                if np.nansum(np.nansum(np.abs(raw_vtg_speed))) > 0:
+                    self.boat_vel.add_boat_object(source='TRDI',
+                                                  vel_in=self.gps.vtg_velocity_ens_mps,
+                                                  coord_sys_in='Earth',
+                                                  nav_ref_in='VTG')
+
+            # Get and compute ensemble beam depths
+            temp_depth_bt = np.array(pd0_data.Bt.depth_m)
+
+            # Screen out invalid depths
+            temp_depth_bt[temp_depth_bt < 0.01] = np.nan
+
+            # Add draft
+            temp_depth_bt += mmt_config['Offsets_Transducer_Depth']
+
+            # Get instrument cell data
+            cell_size_all_m, cell_depth_m, sl_cutoff_per, sl_lag_effect_m = \
+                TransectData.compute_cell_data(pd0_data)
+
+            # Adjust cell depth of draft
+            cell_depth_m = np.add(mmt_config['Offsets_Transducer_Depth'], cell_depth_m)
+
+            # Create depth data object for BT
+            self.depths = DepthStructure()
+            self.depths.add_depth_object(depth_in=temp_depth_bt,
+                                         source_in='BT',
+                                         freq_in=freq_ts,
+                                         draft_in=mmt_config['Offsets_Transducer_Depth'],
+                                         cell_depth_in=cell_depth_m,
+                                         cell_size_in=cell_size_all_m)
+
+            # Compute cells above side lobe
+            cells_above_sl, sl_cutoff_m = \
+                TransectData.side_lobe_cutoff(depths=self.depths.bt_depths.depth_orig_m,
+                                              draft=self.depths.bt_depths.draft_orig_m,
+                                              cell_depth=self.depths.bt_depths.depth_cell_depth_m,
+                                              sl_lag_effect=sl_lag_effect_m,
+                                              slc_type='Percent',
+                                              value=1 - sl_cutoff_per / 100)
+
+            # Check for the presence of vertical beam data
+            if np.nanmax(np.nanmax(pd0_data.Sensor.vert_beam_status)) > 0:
+                temp_depth_vb = np.tile(np.nan, (1, cell_depth_m.shape[1]))
+                temp_depth_vb[0, :] = pd0_data.Sensor.vert_beam_range_m
+
+                # Screen out invalid depths
+                temp_depth_vb[temp_depth_vb < 0.01] = np.nan
+
+                # Add draft
+                temp_depth_vb = temp_depth_vb + mmt_config['Offsets_Transducer_Depth']
+
+                # Create depth data object for vertical beam
+                self.depths.add_depth_object(depth_in=temp_depth_vb,
+                                             source_in='VB',
+                                             freq_in=freq_ts,
+                                             draft_in=mmt_config['Offsets_Transducer_Depth'],
+                                             cell_depth_in=cell_depth_m,
+                                             cell_size_in=cell_size_all_m)
+
+            # Check for the presence of depth sounder
+            if np.nansum(np.nansum(pd0_data.Gps2.depth_m)) > 1e-5:
+                temp_depth_ds = pd0_data.Gps2.depth_m
+
+                # Screen out invalid data
+                temp_depth_ds[temp_depth_ds < 0.01] = np.nan
+
+                # Use the last valid depth for each ensemble
+                last_depth_col_idx = np.sum(np.isnan(temp_depth_ds) == False, axis=1) - 1
+                last_depth_col_idx[last_depth_col_idx == -1] = 0
+                row_index = np.arange(len(temp_depth_ds))
+                last_depth = nans(row_index.size)
+                for row in row_index:
+                    last_depth[row] = temp_depth_ds[row, last_depth_col_idx[row]]
+
+                # Determine if mmt file has a scale factor and offset for the depth sounder
+                if mmt_config['DS_Cor_Spd_Sound'] == 0:
+                    scale_factor = mmt_config['DS_Scale_Factor']
+                else:
+                    scale_factor = pd0_data.Sensor.sos_mps / 1500.
+
+                # Apply scale factor, offset, and draft
+                # Note: Only the ADCP draft is stored.  The transducer
+                # draft or scaling for depth sounder data cannot be changed in QRev
+                ds_depth = np.tile(np.nan, (1, cell_depth_m.shape[1]))
+                ds_depth[0, :] = (last_depth * scale_factor) \
+                                 + mmt_config['DS_Transducer_Depth'] \
+                                 + mmt_config['DS_Transducer_Offset']
+
+                self.depths.add_depth_object(depth_in=ds_depth,
+                                             source_in='DS',
+                                             freq_in=np.tile(np.nan, pd0_data.Inst.freq.shape),
+                                             draft_in=mmt_config['Offsets_Transducer_Depth'],
+                                             cell_depth_in=cell_depth_m,
+                                             cell_size_in=cell_size_all_m)
+
+            # Set depth reference to value from mmt file
+            if 'Proc_River_Depth_Source' in mmt_config:
+                if mmt_config['Proc_River_Depth_Source'] == 0:
+                    self.depths.selected = 'bt_depths'
+                    self.depths.composite_depths(transect=self, setting='Off')
+
+                elif mmt_config['Proc_River_Depth_Source'] == 1:
+                    if self.depths.ds_depths is not None:
+                        self.depths.selected = 'ds_depths'
+                    else:
+                        self.depths.selected = 'bt_depths'
+                    self.depths.composite_depths(transect=self, setting='Off')
+
+                elif mmt_config['Proc_River_Depth_Source'] == 2:
+                    if self.depths.vb_depths is not None:
+                        self.depths.selected = 'vb_depths'
+                    else:
+                        self.depths.selected = 'bt_depths'
+                    self.depths.composite_depths(transect=self, setting='Off')
+
+                elif mmt_config['Proc_River_Depth_Source'] == 3:
+                    if self.depths.vb_depths is None:
+                        self.depths.selected = 'bt_depths'
+                        self.depths.composite_depths(transect=self, setting='Off')
+                    else:
+                        self.depths.selected = 'vb_depths'
+                        self.depths.composite_depths(transect=self, setting='On')
+
+                elif mmt_config['Proc_River_Depth_Source'] == 4:
+                    if self.depths.bt_depths is not None:
+                        self.depths.selected = 'bt_depths'
+                        if self.depths.vb_depths is not None or self.depths.ds_depths is not None:
+                            self.depths.composite_depths(transect=self, setting='On')
+                        else:
+                            self.depths.composite_depths(transect=self, setting='Off')
+                    elif self.depths.vb_depths is not None:
+                        self.depths.selected = 'vb_depths'
+                        self.depths.composite_depths(transect=self, setting='On')
+                    elif self.depths.ds_depths is not None:
+                        self.depths.selected = 'ds_depths'
+                        self.depths.composite_depths(transect=self, setting='On')
+                else:
+                    self.depths.selected = 'bt_depths'
+                    self.depths.composite_depths(transect=self, setting='Off')
+            else:
+                if mmt_config['DS_Use_Process'] > 0:
+                    if self.depths.ds_depths is not None:
+                        self.depths.selected = 'ds_depths'
+                    else:
+                        self.depths.selected = 'bt_depths'
+                else:
+                    self.depths.selected = 'bt_depths'
+                self.depths.composite_depths(transect=self, setting='Off')
+
+            # Create water_data object
+            # ------------------------
+
+            ensemble_ping_type = self.trdi_ping_type(pd0_data)
+
+            # Check for RiverRay and RiverPro data
+            firmware = str(pd0_data.Inst.firm_ver[0])
+            excluded_dist = 0
+            if (firmware[:2] == '56') and (np.nanmax(pd0_data.Sensor.vert_beam_status) < 0.9):
+                excluded_dist = 0.25
+
+            if (firmware[:2] == '44') or (firmware[:2] == '56'):
+                # Process water velocities for RiverRay and RiverPro
+                self.w_vel = WaterData()
+                self.w_vel.populate_data(vel_in=pd0_data.Wt.vel_mps,
+                                         freq_in=freq_ts,
+                                         coord_sys_in=pd0_data.Cfg.coord_sys,
+                                         nav_ref_in='None',
+                                         rssi_in=pd0_data.Wt.rssi,
+                                         rssi_units_in='Counts',
+                                         excluded_dist_in=excluded_dist,
+                                         cells_above_sl_in=cells_above_sl,
+                                         sl_cutoff_per_in=sl_cutoff_per,
+                                         sl_cutoff_num_in=0,
+                                         sl_cutoff_type_in='Percent',
+                                         sl_lag_effect_in=sl_lag_effect_m,
+                                         sl_cutoff_m=sl_cutoff_m,
+                                         wm_in=pd0_data.Cfg.wm[0],
+                                         blank_in=pd0_data.Cfg.wf_cm[0] / 100,
+                                         corr_in=pd0_data.Wt.corr,
+                                         surface_vel_in=pd0_data.Surface.vel_mps,
+                                         surface_rssi_in=pd0_data.Surface.rssi,
+                                         surface_corr_in=pd0_data.Surface.corr,
+                                         surface_num_cells_in=pd0_data.Surface.no_cells,
+                                         ping_type=ensemble_ping_type)
+
+            else:
+                # Process water velocities for non-RiverRay ADCPs
+                self.w_vel = WaterData()
+                self.w_vel.populate_data(vel_in=pd0_data.Wt.vel_mps,
+                                         freq_in=freq_ts,
+                                         coord_sys_in=pd0_data.Cfg.coord_sys[0],
+                                         nav_ref_in='None',
+                                         rssi_in=pd0_data.Wt.rssi,
+                                         rssi_units_in='Counts',
+                                         excluded_dist_in=excluded_dist,
+                                         cells_above_sl_in=cells_above_sl,
+                                         sl_cutoff_per_in=sl_cutoff_per,
+                                         sl_cutoff_num_in=0,
+                                         sl_cutoff_type_in='Percent',
+                                         sl_lag_effect_in=sl_lag_effect_m,
+                                         sl_cutoff_m=sl_cutoff_m,
+                                         wm_in=pd0_data.Cfg.wm[0],
+                                         blank_in=pd0_data.Cfg.wf_cm[0] / 100,
+                                         corr_in=pd0_data.Wt.corr,
+                                         ping_type=ensemble_ping_type)
+
+            # Create Edges Object
+            self.edges = Edges()
+            self.edges.populate_data(rec_edge_method='Fixed', vel_method='MeasMag')
+
+            # Determine number of ensembles to average
+            n_ens_left = mmt_config['Q_Shore_Pings_Avg']
+            # TRDI uses same number on left and right edges
+            n_ens_right = n_ens_left
+
+            # Set indices for ensembles in the moving-boat portion of the transect
+            self.in_transect_idx = np.arange(0, pd0_data.Bt.vel_mps.shape[1])
+
+            # Determine left and right edge distances
+            if mmt_config['Edge_Begin_Left_Bank']:
+                dist_left = float(mmt_config['Edge_Begin_Shore_Distance'])
+                dist_right = float(mmt_config['Edge_End_Shore_Distance'])
+                if 'Edge_End_Manual_Discharge' in mmt_config:
+                    user_discharge_left = float(mmt_config['Edge_Begin_Manual_Discharge'])
+                    user_discharge_right = float(mmt_config['Edge_End_Manual_Discharge'])
+                    edge_method_left = mmt_config['Edge_Begin_Method_Distance']
+                    edge_method_right = mmt_config['Edge_End_Method_Distance']
+                else:
+                    user_discharge_left = None
+                    user_discharge_right = None
+                    edge_method_left = 'Yes'
+                    edge_method_right = 'Yes'
+                self.start_edge = 'Left'
+                self.orig_start_edge = 'Left'
+            else:
+                dist_left = float(mmt_config['Edge_End_Shore_Distance'])
+                dist_right = float(mmt_config['Edge_Begin_Shore_Distance'])
+                if 'Edge_End_Manual_Discharge' in mmt_config:
+                    user_discharge_left = float(mmt_config['Edge_End_Manual_Discharge'])
+                    user_discharge_right = float(mmt_config['Edge_Begin_Manual_Discharge'])
+                    edge_method_left = mmt_config['Edge_End_Method_Distance']
+                    edge_method_right = mmt_config['Edge_Begin_Method_Distance']
+                else:
+                    user_discharge_left = None
+                    user_discharge_right = None
+                    edge_method_left = 'Yes'
+                    edge_method_right = 'Yes'
+                self.start_edge = 'Right'
+                self.orig_start_edge = 'Right'
+
+            # Create left edge
+            if edge_method_left == 'NO':
+                self.edges.left.populate_data(edge_type='User Q',
+                                              distance=dist_left,
+                                              number_ensembles=n_ens_left,
+                                              user_discharge=user_discharge_left)
+
+            elif mmt_config['Q_Left_Edge_Type'] == 0:
+                self.edges.left.populate_data(edge_type='Triangular',
+                                              distance=dist_left,
+                                              number_ensembles=n_ens_left,
+                                              user_discharge=user_discharge_left)
+
+            elif mmt_config['Q_Left_Edge_Type'] == 1:
+                self.edges.left.populate_data(edge_type='Rectangular',
+                                              distance=dist_left,
+                                              number_ensembles=n_ens_left,
+                                              user_discharge=user_discharge_left)
+
+            elif mmt_config['Q_Left_Edge_Type'] == 2:
+                self.edges.left.populate_data(edge_type='Custom',
+                                              distance=dist_left,
+                                              number_ensembles=n_ens_left,
+                                              coefficient=mmt_config['Q_Left_Edge_Coeff'],
+                                              user_discharge=user_discharge_left)
+
+            # Create right edge
+            if edge_method_right == 'NO':
+                self.edges.right.populate_data(edge_type='User Q',
+                                               distance=dist_right,
+                                               number_ensembles=n_ens_right,
+                                               user_discharge=user_discharge_right)
+            elif mmt_config['Q_Right_Edge_Type'] == 0:
+                self.edges.right.populate_data(edge_type='Triangular',
+                                               distance=dist_right,
+                                               number_ensembles=n_ens_right,
+                                               user_discharge=user_discharge_right)
+
+            elif mmt_config['Q_Right_Edge_Type'] == 1:
+                self.edges.right.populate_data(edge_type='Rectangular',
+                                               distance=dist_right,
+                                               number_ensembles=n_ens_right,
+                                               user_discharge=user_discharge_right)
+
+            elif mmt_config['Q_Right_Edge_Type'] == 2:
+                self.edges.right.populate_data(edge_type='Custom',
+                                               distance=dist_right,
+                                               number_ensembles=n_ens_right,
+                                               coefficient=mmt_config['Q_Right_Edge_Coeff'],
+                                               user_discharge=user_discharge_right)
+
+            # Create extrap object
+            # --------------------
+            # Determine top method
+            top = 'Power'
+            if mmt_config['Q_Top_Method'] == 1:
+                top = 'Constant'
+            elif mmt_config['Q_Top_Method'] == 2:
+                top = '3-Point'
+
+            # Determine bottom method
+            bot = 'Power'
+            if mmt_config['Q_Bottom_Method'] == 2:
+                bot = 'No Slip'
+
+            self.extrap = ExtrapData()
+            self.extrap.populate_data(top=top, bot=bot, exp=mmt_config['Q_Power_Curve_Coeff'])
+
+            # Sensor Data
+            self.sensors = Sensors()
+
+            # Heading
+
+            # Internal Heading
+            self.sensors.heading_deg.internal = HeadingData()
+            self.sensors.heading_deg.internal.populate_data(data_in=pd0_data.Sensor.heading_deg.T,
+                                                            source_in='internal',
+                                                            magvar=mmt_config['Offsets_Magnetic_Variation'],
+                                                            align=mmt_config['Ext_Heading_Offset'])
+
+            # External Heading
+            ext_heading_check = np.where(np.isnan(pd0_data.Gps2.heading_deg) == False)
+            if len(ext_heading_check[0]) <= 0:
+                self.sensors.heading_deg.selected = 'internal'
+            else:
+                # Determine external heading for each ensemble
+                # Using the minimum time difference
+                d_time = np.abs(pd0_data.Gps2.hdt_delta_time)
+                d_time_min = np.nanmin(d_time.T, 0).T
+                use = np.tile([np.nan], d_time.shape)
+                for nd_time in range(len(d_time_min)):
+                    use[nd_time, :] = np.abs(d_time[nd_time, :]) == d_time_min[nd_time]
+
+                ext_heading_deg = np.tile([np.nan], (len(d_time_min)))
+                for nh in range(len(d_time_min)):
+                    idx = np.where(use[nh, :])[0]
+                    if len(idx) > 0:
+                        idx = idx[0]
+                        ext_heading_deg[nh] = pd0_data.Gps2.heading_deg[nh, idx]
+
+                # Create external heading sensor
+                self.sensors.heading_deg.external = HeadingData()
+                self.sensors.heading_deg.external.populate_data(data_in=ext_heading_deg,
+                                                                source_in='external',
+                                                                magvar=mmt_config['Offsets_Magnetic_Variation'],
+                                                                align=mmt_config['Ext_Heading_Offset'])
+
+                # Determine heading source to use from mmt setting
+                source_used = mmt_config['Ext_Heading_Use']
+                if source_used:
+                    self.sensors.heading_deg.selected = 'external'
+                else:
+                    self.sensors.heading_deg.selected = 'internal'
+
+            # Pitch
+            pitch = arctand(tand(pd0_data.Sensor.pitch_deg) * cosd(pd0_data.Sensor.roll_deg))
+            pitch_src = pd0_data.Cfg.pitch_src[0]
+
+            # Create pitch sensor
+            self.sensors.pitch_deg.internal = SensorData()
+            self.sensors.pitch_deg.internal.populate_data(data_in=pitch, source_in=pitch_src)
+            self.sensors.pitch_deg.selected = 'internal'
+
+            # Roll
+            roll = pd0_data.Sensor.roll_deg.T
+            roll_src = pd0_data.Cfg.roll_src[0]
+
+            # Create Roll sensor
+            self.sensors.roll_deg.internal = SensorData()
+            self.sensors.roll_deg.internal.populate_data(data_in=roll, source_in=roll_src)
+            self.sensors.roll_deg.selected = 'internal'
+
+            # Temperature
+            temperature = pd0_data.Sensor.temperature_deg_c.T
+            temperature_src = pd0_data.Cfg.temp_src[0]
+
+            # Create temperature sensor
+            self.sensors.temperature_deg_c.internal = SensorData()
+            self.sensors.temperature_deg_c.internal.populate_data(data_in=temperature, source_in=temperature_src)
+            self.sensors.temperature_deg_c.selected = 'internal'
+
+            # Salinity
+            pd0_salinity = pd0_data.Sensor.salinity_ppt.T
+            pd0_salinity_src = pd0_data.Cfg.sal_src[0]
+
+            # Create salinity sensor from pd0 data
+            self.sensors.salinity_ppt.internal = SensorData()
+            self.sensors.salinity_ppt.internal.populate_data(data_in=pd0_salinity, source_in=pd0_salinity_src)
+
+            # Create salinity sensor from mmt data
+            mmt_salinity = mmt_config['Proc_Salinity']
+            mmt_salinity = np.tile(mmt_salinity, pd0_salinity.shape)
+            self.sensors.salinity_ppt.user = SensorData()
+            self.sensors.salinity_ppt.user.populate_data(data_in=mmt_salinity, source_in='mmt')
+
+            # Set selected salinity
+            self.sensors.salinity_ppt.selected = 'internal'
+
+            # Speed of Sound
+            speed_of_sound = pd0_data.Sensor.sos_mps.T
+            speed_of_sound_src = pd0_data.Cfg.sos_src[0]
+            self.sensors.speed_of_sound_mps.internal = SensorData()
+            self.sensors.speed_of_sound_mps.internal.populate_data(data_in=speed_of_sound, source_in=speed_of_sound_src)
+
+            # The raw data are referenced to the internal SOS
+            self.sensors.speed_of_sound_mps.selected = 'internal'
+
+    @staticmethod
+    def trdi_ping_type(pd0_data):
+        """Determines if the ping is coherent on incoherent based on the lag near bottom. A coherent ping will have
+        the lag near the bottom.
+
+        Parameters
+        ----------
+        pd0_data: Pd0TRDI
+            Raw data from pd0 file.
+
+        Returns
+        -------
+        ping_type = np.array(str)
+            Ping_type for each ensemble, C - coherent, I - incoherent
+        """
+        ping_type = np.array([])
+
+        firmware = str(pd0_data.Inst.firm_ver[0])
+        # RiverRay, RiverPro, and RioPro
+        if (firmware[:2] == '44') or (firmware[:2] == '56'):
+            if hasattr(pd0_data.Cfg, 'lag_near_bottom'):
+                ping_temp = pd0_data.Cfg.lag_near_bottom > 0
+                ping_type = np.tile(['U'], ping_temp.shape)
+                ping_type[ping_temp == 0] = 'I'
+                ping_type[ping_temp == 1] = 'C'
+
+        # StreamPro
+        elif firmware[:2] == '31':
+            if pd0_data.Cfg.wm[0] == 12:
+                ping_type = np.tile(['I'], pd0_data.Wt.vel_mps.shape[2])
+            elif pd0_data.Cfg.wm[0] == 13:
+                ping_type = np.tile(['C'], pd0_data.Wt.vel_mps.shape[2])
+            else:
+                ping_type = np.tile(['U'], pd0_data.Wt.vel_mps.shape[2])
+
+        # Rio Grande
+        elif firmware[:2] == '10':
+            if pd0_data.Cfg.wm[0] == 1 or pd0_data.Cfg.wm[0] == 12:
+                ping_type = np.tile(['I'], pd0_data.Wt.vel_mps.shape[2])
+            elif pd0_data.Cfg.wm[0] == 5 or pd0_data.Cfg.wm[0] == 8:
+                ping_type = np.tile(['C'], pd0_data.Wt.vel_mps.shape[2])
+            else:
+                ping_type = np.tile(['U'], pd0_data.Wt.vel_mps.shape[2])
+        else:
+            ping_type = np.tile(['U'], pd0_data.Wt.vel_mps.shape[2])
+        return ping_type
+
+    def sontek(self, rsdata, file_name):
+        """Reads Matlab file produced by RiverSurveyor Live and populates the transect instance variables.
+
+        Parameters
+        ----------
+        rsdata: MatSonTek
+            Object of Matlab data from SonTek Matlab files
+        file_name: str
+            Name of SonTek Matlab file not including path.
+        """
+
+        self.file_name = os.path.basename(file_name)
+
+        # ADCP instrument information
+        # ---------------------------
+        self.adcp = InstrumentData()
+        if hasattr(rsdata.System, 'InstrumentModel'):
+            self.adcp.populate_data(manufacturer='Nortek', raw_data=rsdata)
+        else:
+            self.adcp.populate_data(manufacturer='SonTek', raw_data=rsdata)
+
+        # Ensemble times
+        ensemble_delta_time = np.append([0], np.diff(rsdata.System.Time))
+        # TODO potentially add popup message when there are missing ensembles. Matlab did that.
+
+        # idx_missing = np.where(ensemble_delta_time > 1.5)
+        # if len(idx_missing[0]) > 0:
+        #     number_missing = np.sum(ensemble_delta_time[idx_missing]) - len(idx_missing)
+        #     error_str = self.file_name + ' is missing ' + str(number_missing) + ' samples'
+
+        start_serial_time = rsdata.System.Time[0] + ((30 * 365) + 7) * 24 * 60 * 60
+        end_serial_time = rsdata.System.Time[-1] + ((30 * 365) + 7) * 24 * 60 * 60
+        meas_date = datetime.strftime(datetime.fromtimestamp(start_serial_time), '%m/%d/%Y')
+        self.date_time = DateTime()
+        self.date_time.populate_data(date_in=meas_date,
+                                     start_in=start_serial_time,
+                                     end_in=end_serial_time,
+                                     ens_dur_in=ensemble_delta_time)
+
+        # Transect checked for use in discharge computations
+        self.checked = True
+
+        # Coordinate system
+        ref_coord = None
+
+        # The initial coordinate system must be set to earth for early versions of RiverSurveyor firmware.
+        # This implementation forces all versions to use the earth coordinate system.
+        if rsdata.Setup.coordinateSystem == 0:
+            # ref_coord = 'Beam'
+            raise CoordError('Beam Coordinates are not supported for all RiverSuveyor firmware releases, ' +
+                             'use Earth coordinates.')
+        elif rsdata.Setup.coordinateSystem == 1:
+            # ref_coord = 'Inst'
+            raise CoordError('Instrument Coordinates are not supported for all RiverSuveyor firmware releases, ' +
+                             'use Earth coordinates.')
+        elif rsdata.Setup.coordinateSystem == 2:
+            ref_coord = 'Earth'
+
+        # Speed of Sound Parameters
+        # -------------------------
+        # In SonTek's Matlab file the BT velocity, VB Depth, and WT Velocity are not reported as raw data but rather
+        # are reported as processed values based on manual settings of temperature, salinity, and speed of sound.
+        # Note: the 4 beam depths are raw data and are not adjusted.
+        # QRev expects raw data to be independent of user settings. Therefore, manual settings must be identified
+        # and the Matlab data adjusted to reflect the raw data before creating the data classes in QRev.
+        # The manual values will then be applied during processing.
+
+        self.sensors = Sensors()
+
+        # Temperature
+        if rsdata.System.Units.Temperature.find('C') >= 0:
+            temperature = rsdata.System.Temperature
+        else:
+            temperature = (5. / 9.) * (rsdata.System.Temperature - 32)
+        self.sensors.temperature_deg_c.internal = SensorData()
+        self.sensors.temperature_deg_c.internal.populate_data(data_in=temperature, source_in='internal')
+        self.sensors.temperature_deg_c.selected = 'internal'
+
+        if hasattr(rsdata.Setup, 'userTemperature'):
+            if rsdata.Setup.useMeasuredTemperature == 0:
+                if rsdata.Setup.Units.userTemperature.find('C') >= 0:
+                    temperature = rsdata.Setup.userTemperature
+                else:
+                    temperature = (5. / 9.) * (rsdata.Setup.userTemperature - 32)
+                self.sensors.temperature_deg_c.user = SensorData()
+                self.sensors.temperature_deg_c.user.populate_data(data_in=temperature, source_in='Manual')
+                self.sensors.temperature_deg_c.selected = 'user'
+
+        # Salinity
+        # Create internal salinity using a zero value since salinity can only be applied in RSL and not in the raw data
+        self.sensors.salinity_ppt.internal = SensorData()
+        self.sensors.salinity_ppt.internal.populate_data(data_in=0, source_in='QRev')
+        self.sensors.salinity_ppt.user = SensorData()
+        self.sensors.salinity_ppt.user.populate_data(data_in=rsdata.Setup.userSalinity, source_in='Manual')
+
+        # Set salinity source
+        if rsdata.Setup.userSalinity > 0:
+            self.sensors.salinity_ppt.selected = 'user'
+        else:
+            self.sensors.salinity_ppt.selected = 'internal'
+
+        # Speed of sound
+        # Internal sos provided in SonTek data but is computed from equation.
+        temperature = self.sensors.temperature_deg_c.internal.data
+        salinity = self.sensors.salinity_ppt.internal.data
+        speed_of_sound = Sensors.unesco_speed_of_sound(t=temperature, s=salinity)
+        self.sensors.speed_of_sound_mps.internal = SensorData()
+        self.sensors.speed_of_sound_mps.internal.populate_data(data_in=speed_of_sound, source_in='QRev')
+        self.sensors.speed_of_sound_mps.selected = 'internal'
+
+        if hasattr(rsdata.Setup, 'useFixedSoundSpeed'):
+            if rsdata.Setup.useFixedSoundSpeed > 0:
+                self.sensors.speed_of_sound_mps.user = SensorData()
+                user_sos = rsdata.Setup.fixedSoundSpeed
+                self.sensors.speed_of_sound_mps.user.populate_data(data_in=user_sos, source_in='Manual')
+                self.sensors.speed_of_sound_mps.selected = 'user'
+
+        # Speed of sound correction to obtain raw data
+        sos_correction = None
+        if self.sensors.speed_of_sound_mps.selected == 'user':
+            sos_correction = self.sensors.speed_of_sound_mps.internal.data / self.sensors.speed_of_sound_mps.user.data
+
+        elif self.sensors.salinity_ppt.selected == 'user' or self.sensors.temperature_deg_c.selected == 'user':
+            selected_temperature = getattr(self.sensors.temperature_deg_c, self.sensors.temperature_deg_c.selected)
+            temperature = selected_temperature.data
+            selected_salinity = getattr(self.sensors.salinity_ppt, self.sensors.salinity_ppt.selected)
+            salinity = selected_salinity.data
+            sos_user = Sensors.unesco_speed_of_sound(t=temperature, s=salinity)
+            sos_correction = self.sensors.speed_of_sound_mps.internal.data / sos_user
+
+
+        # Bottom Track
+        # ------------
+
+        # Convert frequency to kHz
+        if np.nanmean(rsdata.BottomTrack.BT_Frequency) > 10000:
+            freq = rsdata.BottomTrack.BT_Frequency / 1000
+        elif np.nanmean(rsdata.BottomTrack.BT_Frequency) < 100:
+            freq = rsdata.BottomTrack.BT_Frequency * 1000
+        else:
+            freq = rsdata.BottomTrack.BT_Frequency
+
+        # Create valid frequency time series
+        freq_ts = self.valid_frequencies(freq)
+
+        bt_vel = np.swapaxes(rsdata.BottomTrack.BT_Vel, 1, 0)
+
+        # Apply correction for manual sos parameters to obtain raw values
+        if sos_correction is not None:
+            bt_vel = np.around(bt_vel * sos_correction, 3)
+
+        self.boat_vel = BoatStructure()
+        self.boat_vel.add_boat_object(source='SonTek',
+                                      vel_in=bt_vel,
+                                      freq_in=freq_ts,
+                                      coord_sys_in=ref_coord,
+                                      nav_ref_in='BT')
+
+        # GPS Data
+        # --------
+        self.gps = GPSData()
+        if np.nansum(rsdata.GPS.GPS_Quality) > 0:
+
+            if len(rsdata.RawGPSData.GgaLatitude.shape) > 1:
+
+                self.gps.populate_data(raw_gga_utc=rsdata.RawGPSData.GgaUTC,
+                                       raw_gga_lat=rsdata.RawGPSData.GgaLatitude,
+                                       raw_gga_lon=rsdata.RawGPSData.GgaLongitude,
+                                       raw_gga_alt=rsdata.RawGPSData.GgaAltitude,
+                                       raw_gga_diff=rsdata.RawGPSData.GgaQuality,
+                                       raw_gga_hdop=np.swapaxes(np.tile(rsdata.GPS.HDOP,
+                                                                        (rsdata.RawGPSData.GgaLatitude.shape[1],
+                                                                         1)), 1, 0),
+                                       raw_gga_num_sats=np.swapaxes(np.tile(rsdata.GPS.Satellites,
+                                                                            (rsdata.RawGPSData.GgaLatitude.shape[1],
+                                                                             1)), 1, 0),
+                                       raw_gga_delta_time=None,
+                                       raw_vtg_course=rsdata.RawGPSData.VtgTmgTrue,
+                                       raw_vtg_speed=rsdata.RawGPSData.VtgSogMPS,
+                                       raw_vtg_delta_time=None,
+                                       raw_vtg_mode_indicator=rsdata.RawGPSData.VtgMode,
+                                       ext_gga_utc=rsdata.GPS.Utc,
+                                       ext_gga_lat=rsdata.GPS.Latitude,
+                                       ext_gga_lon=rsdata.GPS.Longitude,
+                                       ext_gga_alt=rsdata.GPS.Altitude,
+                                       ext_gga_diff=rsdata.GPS.GPS_Quality,
+                                       ext_gga_hdop=rsdata.GPS.HDOP,
+                                       ext_gga_num_sats=rsdata.GPS.Satellites,
+                                       ext_vtg_course=np.tile(np.nan, rsdata.GPS.Latitude.shape),
+                                       ext_vtg_speed=np.tile(np.nan, rsdata.GPS.Latitude.shape),
+                                       gga_p_method='End',
+                                       gga_v_method='End',
+                                       vtg_method='Average')
+            else:
+                # Nortek data
+                rows = rsdata.RawGPSData.GgaLatitude.shape[0]
+                self.gps.populate_data(raw_gga_utc=rsdata.GPS.Utc.reshape(rows, 1),
+                                       raw_gga_lat=rsdata.GPS.Latitude.reshape(rows, 1),
+                                       raw_gga_lon=rsdata.GPS.Longitude.reshape(rows, 1),
+                                       raw_gga_alt=rsdata.GPS.Altitude.reshape(rows, 1),
+                                       raw_gga_diff=rsdata.GPS.GPS_Quality.reshape(rows, 1),
+                                       raw_gga_hdop=rsdata.GPS.HDOP.reshape(rows, 1),
+                                       raw_gga_num_sats=rsdata.GPS.Satellites.reshape(rows, 1),
+                                       raw_gga_delta_time=None,
+                                       raw_vtg_course=rsdata.RawGPSData.VtgTmgTrue.reshape(rows, 1),
+                                       raw_vtg_speed=rsdata.RawGPSData.VtgSogMPS.reshape(rows, 1),
+                                       raw_vtg_delta_time=None,
+                                       raw_vtg_mode_indicator=rsdata.RawGPSData.VtgMode.reshape(rows, 1),
+                                       ext_gga_utc=rsdata.GPS.Utc,
+                                       ext_gga_lat=rsdata.GPS.Latitude,
+                                       ext_gga_lon=rsdata.GPS.Longitude,
+                                       ext_gga_alt=rsdata.GPS.Altitude,
+                                       ext_gga_diff=rsdata.GPS.GPS_Quality,
+                                       ext_gga_hdop=rsdata.GPS.HDOP,
+                                       ext_gga_num_sats=rsdata.GPS.Satellites,
+                                       ext_vtg_course=np.tile(np.nan, rsdata.GPS.Latitude.shape),
+                                       ext_vtg_speed=np.tile(np.nan, rsdata.GPS.Latitude.shape),
+                                       gga_p_method='End',
+                                       gga_v_method='End',
+                                       vtg_method='Average')
+
+            self.boat_vel.add_boat_object(source='SonTek',
+                                          vel_in=self.gps.gga_velocity_ens_mps,
+                                          freq_in=None,
+                                          coord_sys_in='Earth',
+                                          nav_ref_in='GGA')
+
+            self.boat_vel.add_boat_object(source='SonTek',
+                                          vel_in=self.gps.vtg_velocity_ens_mps,
+                                          freq_in=None,
+                                          coord_sys_in='Earth',
+                                          nav_ref_in='VTG')
+        ref = 'BT'
+        if rsdata.Setup.trackReference == 1:
+            ref = 'BT'
+        elif rsdata.Setup.trackReference == 2:
+            ref = 'GGA'
+        elif rsdata.Setup.trackReference == 3:
+            ref = 'VTG'
+        self.boat_vel.set_nav_reference(ref)
+
+        # Depth
+        # -----
+
+        # Initialize depth data structure
+        self.depths = DepthStructure()
+
+        # Determine array rows and cols
+        max_cells = rsdata.WaterTrack.Velocity.shape[0]
+        num_ens = rsdata.WaterTrack.Velocity.shape[2]
+
+        # Compute cell sizes and depths
+        cell_size = rsdata.System.Cell_Size.reshape(1, num_ens)
+        cell_size_all = np.tile(cell_size, (max_cells, 1))
+        top_of_cells = rsdata.System.Cell_Start.reshape(1, num_ens)
+        cell_depth = ((np.tile(np.arange(1, max_cells + 1, 1).reshape(max_cells, 1), (1, num_ens)) - 0.5)
+                      * cell_size_all) + np.tile(top_of_cells, (max_cells, 1))
+
+        # Adjust cell size and depth for user supplied temp, sal, or sos
+        if sos_correction is not None:
+            cell_size_all = np.around(cell_size_all * sos_correction, 6)
+            cell_depth = \
+                np.around(((cell_depth - rsdata.Setup.sensorDepth) * sos_correction) + rsdata.Setup.sensorDepth, 6)
+
+        # Prepare bottom track depth variable
+        depth = rsdata.BottomTrack.BT_Beam_Depth.T
+        depth[depth == 0] = np.nan
+
+        # Convert frequency to kHz
+        if np.nanmean(rsdata.BottomTrack.BT_Frequency) > 10000:
+            freq = rsdata.BottomTrack.BT_Frequency / 1000
+        else:
+            freq = rsdata.BottomTrack.BT_Frequency
+
+        # Create depth object for bottom track beams
+        self.depths.add_depth_object(depth_in=depth,
+                                     source_in='BT',
+                                     freq_in=freq_ts,
+                                     draft_in=rsdata.Setup.sensorDepth,
+                                     cell_depth_in=cell_depth,
+                                     cell_size_in=cell_size_all)
+
+        # Prepare vertical beam depth variable
+        depth_vb = np.tile(np.nan, (1, cell_depth.shape[1]))
+        depth_vb[0, :] = rsdata.BottomTrack.VB_Depth
+        depth_vb[depth_vb == 0] = np.nan
+
+        # Apply correction for manual sos parameters to obtain raw values
+        if sos_correction is not None:
+            depth_vb = np.around(((depth_vb - rsdata.Setup.sensorDepth) * sos_correction) + rsdata.Setup.sensorDepth, 5)
+
+        # Create depth object for vertical beam
+        self.depths.add_depth_object(depth_in=depth_vb,
+                                     source_in='VB',
+                                     freq_in=np.array([rsdata.Transformation_Matrices.Frequency[1]] * depth.shape[-1]),
+                                     draft_in=rsdata.Setup.sensorDepth,
+                                     cell_depth_in=cell_depth,
+                                     cell_size_in=cell_size_all)
+
+        # Set depth reference
+        if rsdata.Setup.depthReference < 0.5:
+            self.depths.selected = 'vb_depths'
+        else:
+            self.depths.selected = 'bt_depths'
+
+        # Water Velocity
+        # --------------
+
+        # Convert frequency to kHz
+        if np.nanmean(rsdata.WaterTrack.WT_Frequency) > 10000:
+            freq = rsdata.WaterTrack.WT_Frequency / 1000
+        else:
+            freq = rsdata.WaterTrack.WT_Frequency
+
+        # Create valid frequency time series
+        freq_ts = self.valid_frequencies(freq)
+
+        # Rearrange arrays for consistency with WaterData class
+        vel = np.swapaxes(rsdata.WaterTrack.Velocity, 1, 0)
+
+        # Apply correction for manual sos parameters to obtain raw values
+        if sos_correction is not None:
+            vel = np.around(vel * sos_correction, 3)
+        snr = np.swapaxes(rsdata.System.SNR, 1, 0)
+        if hasattr(rsdata.WaterTrack, 'Correlation'):
+            corr = np.swapaxes(rsdata.WaterTrack.Correlation, 1, 0)
+        else:
+            corr = np.array([])
+
+        # Correct SonTek difference velocity for error in earlier transformation matrices.
+        if abs(rsdata.Transformation_Matrices.Matrix[3, 0, 0]) < 0.5:
+            vel[3, :, :] = vel[3, :, :] * 2
+
+        # Apply TRDI scaling to SonTek difference velocity to convert to a TRDI compatible error velocity
+        vel[3, :, :] = vel[3, :, :] / ((2 ** 0.5) * np.tan(np.deg2rad(25)))
+
+        # Convert velocity reference from what was used in RiverSurveyor Live to None by adding the boat velocity
+        # to the reported water velocity
+        boat_vel = np.swapaxes(rsdata.Summary.Boat_Vel, 1, 0)
+        vel[0, :, :] = vel[0, :, :] + boat_vel[0, :]
+        vel[1, :, :] = vel[1, :, :] + boat_vel[1, :]
+
+        ref_water = 'None'
+
+        # Compute side lobe cutoff using Transmit Length information if availalbe, if not it is assumed to be equal
+        # to 1/2 depth_cell_size_m. The percent method is use for the side lobe cutoff computation.
+        sl_cutoff_percent = rsdata.Setup.extrapolation_dDiscardPercent
+        sl_cutoff_number = rsdata.Setup.extrapolation_nDiscardCells
+        if hasattr(rsdata.Summary, 'Transmit_Length'):
+            sl_lag_effect_m = (rsdata.Summary.Transmit_Length
+                               + self.depths.bt_depths.depth_cell_size_m[0, :]) / 2.0
+        else:
+            sl_lag_effect_m = np.copy(self.depths.bt_depths.depth_cell_size_m[0, :])
+        sl_cutoff_type = 'Percent'
+        cells_above_sl, sl_cutoff_m = TransectData.side_lobe_cutoff(depths=self.depths.bt_depths.depth_orig_m,
+                                                                    draft=self.depths.bt_depths.draft_orig_m,
+                                                                    cell_depth=self.depths.bt_depths.depth_cell_depth_m,
+                                                                    sl_lag_effect=sl_lag_effect_m,
+                                                                    slc_type=sl_cutoff_type,
+                                                                    value=1 - sl_cutoff_percent / 100)
+        # Determine water mode
+        if len(corr) > 0:
+            corr_nan = np.isnan(corr)
+            number_of_nan = np.count_nonzero(corr_nan)
+            if number_of_nan == 0:
+                wm = 'HD'
+            elif corr_nan.size == number_of_nan:
+                wm = 'IC'
+            else:
+                wm = 'Variable'
+        else:
+            wm = 'Unknown'
+
+        # Determine excluded distance (Similar to SonTek's screening distance)
+        excluded_distance = rsdata.Setup.screeningDistance - rsdata.Setup.sensorDepth
+        if excluded_distance < 0:
+            excluded_distance = 0
+
+        if hasattr(rsdata.WaterTrack, 'Vel_Expected_StdDev'):
+            # RS5
+            ping_type = self.sontek_ping_type(corr=corr, freq=rsdata.WaterTrack.WT_Frequency,
+                                              expected_std=rsdata.WaterTrack.Vel_Expected_StdDev)
+        else:
+            # M9 or S5
+            ping_type = self.sontek_ping_type(corr=corr, freq=rsdata.WaterTrack.WT_Frequency)
+
+        # Create water velocity object
+        self.w_vel = WaterData()
+        self.w_vel.populate_data(vel_in=vel,
+                                 freq_in=freq_ts,
+                                 coord_sys_in=ref_coord,
+                                 nav_ref_in=ref_water,
+                                 rssi_in=snr,
+                                 rssi_units_in='SNR',
+                                 excluded_dist_in=excluded_distance,
+                                 cells_above_sl_in=cells_above_sl,
+                                 sl_cutoff_per_in=sl_cutoff_percent,
+                                 sl_cutoff_num_in=sl_cutoff_number,
+                                 sl_cutoff_type_in=sl_cutoff_type,
+                                 sl_lag_effect_in=sl_lag_effect_m,
+                                 sl_cutoff_m=sl_cutoff_m,
+                                 wm_in=wm,
+                                 blank_in=excluded_distance,
+                                 corr_in=corr,
+                                 ping_type=ping_type)
+
+        # Edges
+        # -----
+        # Create edge object
+        self.edges = Edges()
+        self.edges.populate_data(rec_edge_method='Variable',
+                                 vel_method='VectorProf')
+
+        # Determine number of ensembles for each edge
+        if rsdata.Setup.startEdge > 0.1:
+            ensembles_right = np.nansum(rsdata.System.Step == 2)
+            ensembles_left = np.nansum(rsdata.System.Step == 4)
+            self.start_edge = 'Right'
+            self.orig_start_edge = 'Right'
+        else:
+            ensembles_right = np.nansum(rsdata.System.Step == 4)
+            ensembles_left = np.nansum(rsdata.System.Step == 2)
+            self.start_edge = 'Left'
+            self.orig_start_edge = 'Left'
+        self.in_transect_idx = np.where(rsdata.System.Step == 3)[0]
+
+        # Create left edge object
+        edge_type = None
+        if rsdata.Setup.Edges_0__Method == 2:
+            edge_type = 'Triangular'
+        elif rsdata.Setup.Edges_0__Method == 1:
+            edge_type = 'Rectangular'
+        elif rsdata.Setup.Edges_0__Method == 0:
+            edge_type = 'User Q'
+        if np.isnan(rsdata.Setup.Edges_0__EstimatedQ):
+            user_discharge = None
+        else:
+            user_discharge = rsdata.Setup.Edges_0__EstimatedQ
+        self.edges.left.populate_data(edge_type=edge_type,
+                                      distance=rsdata.Setup.Edges_0__DistanceToBank,
+                                      number_ensembles=ensembles_left,
+                                      coefficient=None,
+                                      user_discharge=user_discharge)
+
+        # Create right edge object
+        if rsdata.Setup.Edges_1__Method == 2:
+            edge_type = 'Triangular'
+        elif rsdata.Setup.Edges_1__Method == 1:
+            edge_type = 'Rectangular'
+        elif rsdata.Setup.Edges_1__Method == 0:
+            edge_type = 'User Q'
+        if np.isnan(rsdata.Setup.Edges_1__EstimatedQ):
+            user_discharge = None
+        else:
+            user_discharge = rsdata.Setup.Edges_1__EstimatedQ
+        self.edges.right.populate_data(edge_type=edge_type,
+                                       distance=rsdata.Setup.Edges_1__DistanceToBank,
+                                       number_ensembles=ensembles_right,
+                                       coefficient=None,
+                                       user_discharge=user_discharge)
+
+        # Extrapolation
+        # -------------
+        top = ''
+        bottom = ''
+
+        # Top extrapolation
+        if rsdata.Setup.extrapolation_Top_nFitType == 0:
+            top = 'Constant'
+        elif rsdata.Setup.extrapolation_Top_nFitType == 1:
+            top = 'Power'
+        elif rsdata.Setup.extrapolation_Top_nFitType == 2:
+            top = '3-Point'
+
+        # Bottom extrapolation
+        if rsdata.Setup.extrapolation_Bottom_nFitType == 0:
+            bottom = 'Constant'
+        elif rsdata.Setup.extrapolation_Bottom_nFitType == 1:
+            if rsdata.Setup.extrapolation_Bottom_nEntirePro > 1.1:
+                bottom = 'No Slip'
+            else:
+                bottom = 'Power'
+
+        # Create extrapolation object
+        self.extrap = ExtrapData()
+        self.extrap.populate_data(top=top,
+                                  bot=bottom,
+                                  exp=rsdata.Setup.extrapolation_Bottom_dExponent)
+
+        # Sensor data
+        # -----------
+
+        # Internal heading
+        self.sensors.heading_deg.internal = HeadingData()
+
+        # Check for firmware supporting G3 compass and associated data
+        if hasattr(rsdata, 'Compass'):
+            # TODO need to find older file that had 3 columns in Magnetic error to test and modify code
+            mag_error = rsdata.Compass.Magnetic_error
+            pitch_limit = np.array((rsdata.Compass.Maximum_Pitch, rsdata.Compass.Minimum_Pitch)).T
+            roll_limit = np.array((rsdata.Compass.Maximum_Roll, rsdata.Compass.Minimum_Roll)).T
+            if np.any(np.greater_equal(np.abs(pitch_limit), 90)) or np.any(np.greater_equal(np.abs(roll_limit), 90)):
+                pitch_limit = None
+                roll_limit = None
+        else:
+            mag_error = None
+            pitch_limit = None
+            roll_limit = None
+        self.sensors.heading_deg.internal.populate_data(data_in=rsdata.System.Heading,
+                                                        source_in='internal',
+                                                        magvar=rsdata.Setup.magneticDeclination,
+                                                        mag_error=mag_error,
+                                                        pitch_limit=pitch_limit,
+                                                        roll_limit=roll_limit)
+
+        # External heading
+        ext_heading = rsdata.System.GPS_Compass_Heading
+        if np.nansum(np.abs(np.diff(ext_heading))) > 0:
+            self.sensors.heading_deg.external = HeadingData()
+            self.sensors.heading_deg.external.populate_data(data_in=ext_heading,
+                                                            source_in='external',
+                                                            magvar=rsdata.Setup.magneticDeclination,
+                                                            align=rsdata.Setup.hdtHeadingCorrection)
+
+        # Set selected reference
+        if rsdata.Setup.headingSource > 1.1:
+            self.sensors.heading_deg.selected = 'external'
+        else:
+            self.sensors.heading_deg.selected = 'internal'
+
+        # Pitch and roll
+        pitch = None
+        roll = None
+        if hasattr(rsdata, 'Compass'):
+            pitch = rsdata.Compass.Pitch
+            roll = rsdata.Compass.Roll
+        elif hasattr(rsdata.System, 'Pitch'):
+            pitch = rsdata.System.Pitch
+            roll = rsdata.System.Roll
+        self.sensors.pitch_deg.internal = SensorData()
+        self.sensors.pitch_deg.internal.populate_data(data_in=pitch, source_in='internal')
+        self.sensors.pitch_deg.selected = 'internal'
+        self.sensors.roll_deg.internal = SensorData()
+        self.sensors.roll_deg.internal.populate_data(data_in=roll, source_in='internal')
+        self.sensors.roll_deg.selected = 'internal'
+
+        # Set composite depths as this is the only option in RiverSurveyor Live
+        self.depths.composite_depths(transect=self, setting="On")
+
+    @staticmethod
+    def sontek_ping_type(corr, freq, expected_std=None):
+        """Determines ping type based on the fact that HD has correlation but incoherent does not.
+
+        Parameters
+        ----------
+        corr: np.ndarray(int)
+            Water track correlation
+        freq:
+            Frequency of ping in Hz
+
+        Returns
+        -------
+        ping_type: np.array(int)
+            Ping_type for each ensemble, 3 - 1 MHz Incoherent, 4 - 1 MHz HD, 5 - 3 MHz Incoherent, 6 - 3 MHz HD
+        """
+        # Determine ping type
+
+        if expected_std is None:
+            # M9 or S5
+            if corr.size > 0:
+                corr_exists = np.nansum(np.nansum(corr, axis=1), axis=0)
+                coherent = corr_exists > 0
+            else:
+                coherent = np.tile([False], freq.size)
+            ping_type = []
+            for n in range(len(coherent)):
+                if coherent[n]:
+                    if freq[n] == 3000:
+                        ping_type.append('3C')
+                    else:
+                        ping_type.append('1C')
+                else:
+                    if freq[n] == 3000:
+                        ping_type.append('3I')
+                    else:
+                        ping_type.append('1I')
+            ping_type = np.array(ping_type)
+        else:
+            # RS5
+            ves = []
+            for n in range(4):
+                ves.append(np.nanmean(expected_std[:, n, :], axis=0))
+
+            ves = np.array(ves)
+
+            ves_avg = np.nanmean(ves, axis=0)
+
+            ping_type = np.tile(['PC/BB'], ves_avg.size)
+            ping_type[ves_avg < 0.01] = 'PC'
+            ping_type[ves_avg > 0.025] = 'BB'
+
+        return ping_type
+
+    @staticmethod
+    def qrev_mat_in(meas_struct):
+        """Processes the Matlab data structure to obtain a list of TransectData objects containing transect
+           data from the Matlab data structure.
+
+       Parameters
+       ----------
+       meas_struct: mat_struct
+           Matlab data structure obtained from sio.loadmat
+
+       Returns
+       -------
+       transects: list
+           List of TransectData objects
+       """
+
+        transects = []
+        if hasattr(meas_struct, 'transects'):
+            # If only one transect the data are not a list or array of transects
+            try:
+                if len(meas_struct.transects) > 0:
+                    for transect in meas_struct.transects:
+                        trans = TransectData()
+                        trans.populate_from_qrev_mat(transect)
+                        transects.append(trans)
+            except TypeError:
+                trans = TransectData()
+                trans.populate_from_qrev_mat(meas_struct.transects)
+                transects.append(trans)
+
+        return transects
+
+    def populate_from_qrev_mat(self, transect):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        transect: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+
+        self.adcp = InstrumentData()
+        self.adcp.populate_from_qrev_mat(transect)
+        self.file_name = os.path.basename(transect.fileName)
+        self.w_vel = WaterData()
+        self.w_vel.populate_from_qrev_mat(transect)
+        self.boat_vel = BoatStructure()
+        self.boat_vel.populate_from_qrev_mat(transect)
+        self.gps = GPSData()
+        self.gps.populate_from_qrev_mat(transect)
+        self.sensors = Sensors()
+        self.sensors.populate_from_qrev_mat(transect)
+        self.depths = DepthStructure()
+        self.depths.populate_from_qrev_mat(transect)
+        self.edges = Edges()
+        self.edges.populate_from_qrev_mat(transect)
+        self.extrap = ExtrapData()
+        self.extrap.populate_from_qrev_mat(transect)
+        self.start_edge = transect.startEdge
+        if hasattr(transect, 'orig_start_edge'):
+            self.orig_start_edge = transect.orig_start_edge
+        else:
+            self.orig_start_edge = transect.startEdge
+        self.date_time = DateTime()
+        self.date_time.populate_from_qrev_mat(transect)
+        self.checked = bool(transect.checked)
+        if type(transect.inTransectIdx) is int:
+            self.in_transect_idx = np.array([transect.inTransectIdx - 1])
+        else:
+            self.in_transect_idx = transect.inTransectIdx.astype(int) - 1
+
+    @staticmethod
+    def valid_frequencies(frequency_in):
+        """Create frequency time series for BT and WT with all valid frequencies.
+
+        Parameters
+        ----------
+        frequency_in: nd.array()
+            Frequency time series from raw data
+
+        Returns
+        -------
+        frequency_out: nd.array()
+            Frequency times series with np.nan filled with valid frequencies
+        """
+
+        # Initialize output
+        frequency_out = np.copy(frequency_in)
+
+        # Check for any invalid data
+        invalid_freq = np.isnan(frequency_in)
+        if np.any(invalid_freq):
+            # Identify the first valid frequency
+            valid = frequency_in[np.logical_not(invalid_freq)][0]
+            # Forward fill for invalid frequencies beyond first valid, backfill until 1st valid
+            for n in range(frequency_in.size):
+                if invalid_freq[n]:
+                    frequency_out[n] = valid
+                else:
+                    valid = frequency_in[n]
+
+        return frequency_out
+
+    @staticmethod
+    def compute_cell_data(pd0):
+
+        # Number of ensembles
+        num_ens = np.array(pd0.Wt.vel_mps).shape[-1]
+
+        # Retrieve and compute cell information
+        reg_cell_size = pd0.Cfg.ws_cm / 100
+        reg_cell_size[reg_cell_size == 0] = np.nan
+        dist_cell_1_m = pd0.Cfg.dist_bin1_cm / 100
+        num_reg_cells = pd0.Wt.vel_mps.shape[1]
+
+        # Surf data are to accommodate RiverRay and RiverPro.  pd0_read sets these
+        # values to nan when reading Rio Grande or StreamPro data
+        no_surf_cells = pd0.Surface.no_cells
+        no_surf_cells[np.isnan(no_surf_cells)] = 0
+        max_surf_cells = np.nanmax(no_surf_cells)
+        surf_cell_size = pd0.Surface.cell_size_cm / 100
+        surf_cell_dist = pd0.Surface.dist_bin1_cm / 100
+
+        # Compute maximum number of cells
+        max_cells = int(max_surf_cells + num_reg_cells)
+
+        # Combine cell size and cell range from transducer for both
+        # surface and regular cells
+        cell_depth = np.tile(np.nan, (max_cells, num_ens))
+        cell_size_all = np.tile(np.nan, (max_cells, num_ens))
+        for i in range(num_ens):
+            # Determine number of cells to be treated as regular cells
+            if np.nanmax(no_surf_cells) > 0:
+
+                num_reg_cells = max_cells - no_surf_cells[i]
+            else:
+                num_reg_cells = max_cells
+
+            # Compute cell depth
+            if no_surf_cells[i] > 1e-5:
+                cell_depth[:int(no_surf_cells[i]), i] = surf_cell_dist[i] + \
+                                                        np.arange(0, (no_surf_cells[i] - 1) * surf_cell_size[i] + 0.001,
+                                                                  surf_cell_size[i])
+                cell_depth[int(no_surf_cells[i]):, i] = cell_depth[int(no_surf_cells[i] - 1), i] \
+                                                        + (.5 * surf_cell_size[i] + 0.5 * reg_cell_size[i]) \
+                                                        + np.arange(0, (num_reg_cells - 1) * reg_cell_size[i] + 0.001,
+                                                                    reg_cell_size[i])
+                cell_size_all[0:int(no_surf_cells[i]), i] = np.repeat(surf_cell_size[i], int(no_surf_cells[i]))
+                cell_size_all[int(no_surf_cells[i]):, i] = np.repeat(reg_cell_size[i], int(num_reg_cells))
+            else:
+                cell_depth[:int(num_reg_cells), i] = dist_cell_1_m[i] + \
+                                                     np.linspace(0, int(num_reg_cells) - 1,
+                                                                 int(num_reg_cells)) * reg_cell_size[i]
+                cell_size_all[:, i] = np.repeat(reg_cell_size[i], num_reg_cells)
+
+        # Firmware is used to ID RiverRay data with variable modes and lags
+        firmware = str(pd0.Inst.firm_ver[0])
+
+        # Compute sl_lag_effect
+        lag = pd0.Cfg.lag_cm / 100
+        if firmware[0:2] == '44' or firmware[0:2] == '56':
+            lag_near_bottom = np.array(pd0.Cfg.lag_near_bottom)
+            lag_near_bottom[lag_near_bottom == np.nan] = 0
+            lag[lag_near_bottom != 0] = 0
+
+        pulse_len = pd0.Cfg.xmit_pulse_cm / 100
+        sl_lag_effect_m = (lag + pulse_len + reg_cell_size) / 2
+        sl_cutoff_per = (1 - (cosd(pd0.Inst.beam_ang[0]))) * 100
+
+        return cell_size_all, cell_depth, sl_cutoff_per, sl_lag_effect_m
+
+    def change_q_ensembles(self, proc_method):
+        """Sets in_transect_idx to all ensembles, except in the case of SonTek data
+        where RSL processing is applied.
+
+        Parameters
+        ----------
+        proc_method: str
+            Processing method (WR2, RSL, QRev)
+        """
+
+        if proc_method == 'RSL':
+            num_ens = self.boat_vel.bt_vel.u_processed_mps.shape[1]
+            # Determine number of ensembles for each edge
+            if self.start_edge == 'Right':
+                self.in_transect_idx = np.arange(self.edges.right.num_ens_2_avg,
+                                                 num_ens - self.edges.left.num_ens_2_avg)
+            else:
+                self.in_transect_idx = np.arange(self.edges.left.num_ens_2_avg,
+                                                 num_ens - self.edges.right.num_ens_2_avg)
+        else:
+            self.in_transect_idx = np.arange(0, self.boat_vel.bt_vel.u_processed_mps.shape[0])
+
+    def change_coord_sys(self, new_coord_sys):
+        """Changes the coordinate system of the water and boat data.
+
+        Current implementation only allows changes for original to higher order coordinate
+        systems: Beam - Inst - Ship - Earth.
+
+        Parameters
+        ----------
+        new_coord_sys: str
+            Name of new coordinate system (Beam, Int, Ship, Earth)
+        """
+        self.w_vel.change_coord_sys(new_coord_sys, self.sensors, self.adcp)
+        self.boat_vel.change_coord_sys(new_coord_sys, self.sensors, self.adcp)
+
+    def change_nav_reference(self, update, new_nav_ref):
+        """Method to set the navigation reference for the water data.
+
+        Parameters
+        ----------
+        update: bool
+            Setting to determine if water data should be updated.
+        new_nav_ref: str
+            New navigation reference (bt_vel, gga_vel, vtg_vel)
+        """
+
+        self.boat_vel.change_nav_reference(reference=new_nav_ref, transect=self)
+
+        if update:
+            self.update_water()
+
+    def change_mag_var(self, magvar):
+        """Change magnetic variation.
+
+        Parameters
+        ----------
+        magvar: float
+            Magnetic variation in degrees.
+        """
+
+        # Update object
+        if self.sensors.heading_deg.external is not None:
+            self.sensors.heading_deg.external.set_mag_var(magvar, 'external')
+
+        if self.sensors.heading_deg.selected == 'internal':
+            heading_selected = getattr(self.sensors.heading_deg, self.sensors.heading_deg.selected)
+            old_magvar = heading_selected.mag_var_deg
+            magvar_change = magvar - old_magvar
+            heading_selected.set_mag_var(magvar, 'internal')
+            self.boat_vel.bt_vel.change_heading(magvar_change)
+            self.w_vel.change_heading(self.boat_vel, magvar_change)
+        else:
+            self.sensors.heading_deg.internal.set_mag_var(magvar, 'internal')
+
+        # self.update_water()
+
+    def change_offset(self, h_offset):
+        """Change the heading offset (alignment correction). Only affects external heading.
+
+        Parameters
+        ----------
+        h_offset: float
+            Heading offset in degrees
+        """
+        self.sensors.heading_deg.internal.set_align_correction(h_offset, 'internal')
+
+        if self.sensors.heading_deg.selected == 'external':
+            old = getattr(self.sensors.heading_deg, self.sensors.heading_deg.selected)
+            old_offset = old.align_correction_deg
+            offset_change = h_offset - old_offset
+            self.boat_vel.bt_vel.change_heading(offset_change)
+            self.w_vel.change_heading(self.boat_vel, offset_change)
+
+        if self.sensors.heading_deg.external is not None:
+            self.sensors.heading_deg.external.set_align_correction(h_offset, 'external')
+
+        self.update_water()
+
+    def change_heading_source(self, h_source):
+        """Changes heading source (internal or external).
+
+        Parameters
+        ----------
+        h_source: str
+            Heading source (internal or external or user)
+        """
+
+        # If source is user, check to see if it was created, if not create it
+        if h_source == 'user':
+            if self.sensors.heading_deg.user is None:
+                self.sensors.heading_deg.user = HeadingData()
+                self.sensors.heading_deg.user.populate_data(data_in=np.zeros(
+                    self.boat_vel.bt_vel.u_processed_mps.shape),
+                    source_in='user',
+                    magvar=0,
+                    align=0)
+
+        # Get new heading object
+        new_heading_selection = getattr(self.sensors.heading_deg, h_source)
+
+        # Change source to that requested
+        if h_source is not None:
+            old_heading_selection = getattr(self.sensors.heading_deg, self.sensors.heading_deg.selected)
+            old_heading = old_heading_selection.data
+            new_heading = new_heading_selection.data
+            heading_change = new_heading - old_heading
+            self.sensors.heading_deg.set_selected(h_source)
+            self.boat_vel.bt_vel.change_heading(heading_change)
+            self.w_vel.change_heading(self.boat_vel, heading_change)
+
+        self.update_water()
+
+    def update_water(self):
+        """Method called from set_nav_reference, boat_interpolation and boat filters
+        to ensure that changes in boatvel are reflected in the water data"""
+
+        self.w_vel.set_nav_reference(self.boat_vel)
+
+        # Reapply water filters and interpolations
+        # Note wt_filters calls apply_filter which automatically calls
+        # apply_interpolation so both filters and interpolations
+        # are applied with this one call
+
+        self.w_vel.apply_filter(transect=self)
+        self.w_vel.apply_interpolation(transect=self)
+
+    @staticmethod
+    def side_lobe_cutoff(depths, draft, cell_depth, sl_lag_effect, slc_type='Percent', value=None):
+        """Computes side lobe cutoff.
+
+        The side lobe cutoff is based on the beam angle and is computed to
+        ensure that the bin and any lag beyond the actual bin cutoff is
+        above the side lobe cutoff.
+
+        Parameters
+        ----------
+        depths: np.array
+            Bottom track (all 4 beams) and vertical beam depths for each ensemble, in m.
+        draft: float
+            Draft of transducers, in m.
+        cell_depth: np.array
+            Depth to the centerline of each depth cell, in m.
+        sl_lag_effect: np.array
+            The extra depth below the last depth cell that must be above the side lobe cutoff, in m.
+        slc_type: str
+            Method used for side lobe cutoff computation.
+        value: float
+            Value used in specified method to use for side lobe cutoff computation.
+        """
+
+        # Compute minimum depths for each ensemble
+        with warnings.catch_warnings():
+            warnings.simplefilter("ignore", category=RuntimeWarning)
+            min_depths = np.nanmin(depths, 0)
+
+        # Compute range from transducer
+        range_from_xducer = min_depths - draft
+
+        # Adjust for transducer angle
+        coeff = None
+        if slc_type == 'Percent':
+            coeff = value
+        elif slc_type == 'Angle':
+            coeff = np.cos(np.deg2rad(value))
+
+        # Compute sidelobe cutoff to centerline
+        cutoff = np.array(range_from_xducer * coeff - sl_lag_effect + draft)
+
+        # Compute boolean side lobe cutoff matrix
+        cells_above_sl = nan_less(cell_depth, cutoff)
+        return cells_above_sl, cutoff
+
+    def boat_interpolations(self, update, target, method=None):
+        """Coordinates boat velocity interpolations.
+
+        Parameters
+        ----------
+        update: bool
+            Setting to control if water data are updated.
+        target: str
+            Boat velocity reference (BT or GPS)
+        method: str
+            Type of interpolation
+        """
+
+        # Interpolate bottom track data
+        if target == 'BT':
+            self.boat_vel.bt_vel.apply_interpolation(transect=self, interpolation_method=method)
+
+        if target == 'GPS':
+            # Interpolate GGA data
+            vel = getattr(self.boat_vel, 'gga_vel')
+            if vel is not None:
+                self.boat_vel.gga_vel.apply_interpolation(transect=self, interpolation_method=method)
+            # Interpolate VTG data
+            vel = getattr(self.boat_vel, 'vtg_vel')
+            if vel is not None:
+                self.boat_vel.vtg_vel.apply_interpolation(transect=self, interpolation_method=method)
+
+        # Apply composite tracks setting
+        self.composite_tracks(update=False)
+
+        # Update water to reflect changes in boat_vel
+        if update:
+            self.update_water()
+
+    def composite_tracks(self, update, setting=None):
+        """Coordinate application of composite tracks.
+
+        Parameters
+        ----------
+        update: bool
+            Setting to control if water data are updated.
+        setting: str
+            Sets composite tracks ("On" or "Off").
+        """
+
+        # Determine if setting is specified
+        if setting is None:
+            # Process transect using saved setting
+            self.boat_vel.composite_tracks(transect=self)
+        else:
+            # Process transect usin new setting
+            self.boat_vel.composite_tracks(transect=self, setting=setting)
+
+        # Update water data to reflect changes in boatvel
+        if update:
+            self.update_water()
+
+    def boat_filters(self, update, **kwargs):
+        """Coordinates application of boat filters to bottom track data
+
+        Parameters
+        ----------
+        update: bool
+            Setting to control if water data are updated.
+        **kwargs: dict
+            beam: int
+                Setting for beam filter (3, 4, -1)
+            difference: str
+                Setting for difference velocity filter (Auto, Manual, Off)
+            difference_threshold: float
+                Threshold for manual setting
+            vertical: str
+                Setting for vertical velocity filter (Auto, Manual, Off)
+            vertical_threshold: float
+                Threshold for manual setting
+            other: bool
+                Setting to other filter
+        """
+
+        # Apply filter to transect
+        self.boat_vel.bt_vel.apply_filter(self, **kwargs)
+
+        if self.boat_vel.selected == 'bt_vel' and update:
+            self.update_water()
+
+    def gps_filters(self, update, **kwargs):
+        """Coordinate filters for GPS based boat velocities
+
+        Parameters
+        ----------
+        update: bool
+            Setting to control if water data are updated.
+        **kwargs: dict
+            differential: str
+                Differential filter setting (1, 2, 4)
+            altitude: str
+                New setting for altitude filter (Off, Manual, Auto)
+            altitude_threshold: float
+                Threshold provide by user for manual altitude setting
+            hdop: str
+                Filter setting (On, off, Auto)
+            hdop_max_threshold: float
+                Maximum HDOP threshold
+            hdop_change_threshold: float
+                HDOP change threshold
+            other: bool
+                Other filter typically a smooth.
+        """
+
+        if self.boat_vel.gga_vel is not None:
+            self.boat_vel.gga_vel.apply_gps_filter(self, **kwargs)
+        if self.boat_vel.vtg_vel is not None:
+            self.boat_vel.vtg_vel.apply_gps_filter(self, **kwargs)
+
+        if (self.boat_vel.selected == 'VTG' or self.boat_vel.selected == 'GGA') and update == True:
+            self.update_water()
+
+    def set_depth_reference(self, update, setting):
+        """Coordinates setting the depth reference.
+
+        Parameters
+        ----------
+        update: bool
+            Determines if associated data should be updated
+        setting: str
+            Depth reference (bt_depths, vb_depths, ds_depths)
+        """
+
+        self.depths.selected = setting
+
+        if update:
+            self.process_depths(update)
+            self.w_vel.adjust_side_lobe(self)
+
+    def apply_averaging_method(self, setting):
+        """Method to apply the selected averaging method to the BT team depths to achieve a single
+        average depth.  It is only applicable to the multiple beams used for BT, not VB or DS.
+
+        Input:
+        setting: averaging method (IDW, Simple)
+        """
+
+        self.depths.bt_depths.compute_avg_bt_depth(setting)
+
+        self.process_depths(update=False)
+
+    def process_depths(self, update=False, filter_method=None, interpolation_method=None, composite_setting=None,
+                       avg_method=None, valid_method=None):
+        """Method applies filter, composite, and interpolation settings to  depth objects
+        so that all are updated using the same filter and interpolation settings.
+
+        Parameters
+        ----------
+        update: bool
+            Determines if water data should be updated.
+        filter_method: str
+            Filter method to be used (None, Smooth, TRDI).
+        interpolation_method: str
+            Interpolation method to be used (None, HoldLast, Smooth, Linear).
+        composite_setting: str
+            Specifies use of composite depths ("On" or "Off").
+        avg_method: str
+            Defines averaging method: "Simple", "IDW", only applicable to bottom track.
+        valid_method:
+            Defines method to determine if depth is valid (QRev or TRDI).
+        """
+
+        # Get current settings
+        depth_data = getattr(self.depths, self.depths.selected)
+        if filter_method is None:
+            filter_method = depth_data.filter_type
+
+        if interpolation_method is None:
+            interpolation_method = depth_data.interp_type
+
+        if composite_setting is None:
+            composite_setting = self.depths.composite
+
+        if avg_method is None:
+            avg_method = self.depths.bt_depths.avg_method
+
+        if valid_method is None:
+            valid_method = self.depths.bt_depths.valid_data_method
+
+        self.depths.bt_depths.valid_data_method = valid_method
+        self.depths.bt_depths.avg_method = avg_method
+        self.depths.depth_filter(transect=self, filter_method=filter_method)
+        self.depths.depth_interpolation(transect=self, method=interpolation_method)
+        self.depths.composite_depths(transect=self, setting=composite_setting)
+        self.w_vel.adjust_side_lobe(transect=self)
+
+        if update:
+            self.update_water()
+
+    def change_draft(self, draft_in):
+        """Changes the draft for the specified transects and selected depth.
+
+        Parameters
+        ----------
+        draft_in: float
+            New draft value in m
+        """
+
+        if self.depths.vb_depths is not None:
+            self.depths.vb_depths.change_draft(draft_in)
+        if self.depths.bt_depths is not None:
+            self.depths.bt_depths.change_draft(draft_in)
+
+    def change_sos(self, parameter=None, salinity=None, temperature=None, selected=None, speed=None):
+        """Coordinates changing the speed of sound.
+
+        Parameters
+        ----------
+        parameter: str
+            Speed of sound parameter to be changed ('temperatureSrc', 'temperature', 'salinity', 'sosSrc')
+        salinity: float
+            Salinity in ppt
+        temperature: float
+            Temperature in deg C
+        selected: str
+            Selected speed of sound ('internal', 'computed', 'user') or temperature ('internal', 'user')
+        speed: float
+            Manually supplied speed of sound for 'user' source
+        """
+
+        if parameter == 'temperatureSrc':
+
+            temperature_internal = getattr(self.sensors.temperature_deg_c, 'internal')
+            if selected == 'user':
+                if self.sensors.temperature_deg_c.user is None:
+                    self.sensors.temperature_deg_c.user = SensorData()
+                ens_temperature = np.tile(temperature, temperature_internal.data.shape)
+
+                self.sensors.temperature_deg_c.user.change_data(data_in=ens_temperature)
+                self.sensors.temperature_deg_c.user.set_source(source_in='Manual Input')
+
+            # Set the temperature data to the selected source
+            self.sensors.temperature_deg_c.set_selected(selected_name=selected)
+            # Update the speed of sound
+            self.update_sos()
+
+        elif parameter == 'temperature':
+            adcp_temp = self.sensors.temperature_deg_c.internal.data
+            new_user_temperature = np.tile(temperature, adcp_temp.shape)
+            self.sensors.temperature_deg_c.user.change_data(data_in=new_user_temperature)
+            self.sensors.temperature_deg_c.user.set_source(source_in='Manual Input')
+            # Set the temperature data to the selected source
+            self.sensors.temperature_deg_c.set_selected(selected_name='user')
+            # Update the speed of sound
+            self.update_sos()
+
+        elif parameter == 'salinity':
+            if salinity is not None:
+                self.sensors.salinity_ppt.user.change_data(data_in=salinity)
+                if type(self.sensors.salinity_ppt.internal.data) is float:
+                    salinity_internal = self.sensors.salinity_ppt.internal.data
+                else:
+                    salinity_internal = self.sensors.salinity_ppt.internal.data
+                if np.all(np.equal(self.sensors.salinity_ppt.user.data, salinity_internal)):
+                    self.sensors.salinity_ppt.set_selected(selected_name='internal')
+                else:
+                    self.sensors.salinity_ppt.set_selected(selected_name='user')
+                self.update_sos()
+
+        elif parameter == 'sosSrc':
+            if selected == 'internal':
+                self.update_sos()
+            elif selected == 'user':
+                self.update_sos(speed=speed, selected='user', source='Manual Input')
+
+    def update_sos(self, selected=None, source=None, speed=None):
+        """Sets a new specified speed of sound.
+
+        Parameters
+        ----------
+        self: obj
+            Object of TransectData
+        selected: str
+             Selected speed of sound ('internal', 'computed', 'user')
+        source: str
+            Source of speed of sound (Computer, Calculated)
+        speed: float
+            Manually supplied speed of sound for 'user' source
+        """
+
+        # Get current speed of sound
+        sos_selected = getattr(self.sensors.speed_of_sound_mps, self.sensors.speed_of_sound_mps.selected)
+        old_sos = sos_selected.data
+        new_sos = None
+
+        # Manual input for speed of sound
+        if selected == 'user' and source == 'Manual Input':
+            self.sensors.speed_of_sound_mps.set_selected(selected_name=selected)
+            self.sensors.speed_of_sound_mps.user = SensorData()
+            self.sensors.speed_of_sound_mps.user.populate_data(speed, source)
+
+        # If called with no input set source to internal and determine whether computed or calculated based on
+        # availability of user supplied temperature or salinity
+        elif selected is None and source is None:
+            self.sensors.speed_of_sound_mps.set_selected('internal')
+            # If temperature or salinity is set by the user the speed of sound is computed otherwise it is consider
+            # calculated by the ADCP.
+            if (self.sensors.temperature_deg_c.selected == 'user') or (self.sensors.salinity_ppt.selected == 'user'):
+                self.sensors.speed_of_sound_mps.internal.set_source('Computed')
+            else:
+                self.sensors.speed_of_sound_mps.internal.set_source('Calculated')
+
+        # Determine new speed of sound
+        if self.sensors.speed_of_sound_mps.selected == 'internal':
+
+            if self.sensors.speed_of_sound_mps.internal.source == 'Calculated':
+                # Internal: Calculated
+                new_sos = self.sensors.speed_of_sound_mps.internal.data_orig
+                self.sensors.speed_of_sound_mps.internal.change_data(data_in=new_sos)
+                # Change temperature and salinity selected to internal
+                self.sensors.temperature_deg_c.set_selected(selected_name='internal')
+                self.sensors.salinity_ppt.set_selected(selected_name='internal')
+            else:
+                # Internal: Computed
+                temperature_selected = getattr(self.sensors.temperature_deg_c, self.sensors.temperature_deg_c.selected)
+                temperature = temperature_selected.data
+                salinity_selected = getattr(self.sensors.salinity_ppt, self.sensors.salinity_ppt.selected)
+                salinity = salinity_selected.data
+                new_sos = Sensors.speed_of_sound(temperature=temperature, salinity=salinity)
+                self.sensors.speed_of_sound_mps.internal.change_data(data_in=new_sos)
+        else:
+            if speed is not None:
+                new_sos = np.tile(speed, len(self.sensors.speed_of_sound_mps.internal.data_orig))
+                self.sensors.speed_of_sound_mps.user.change_data(data_in=new_sos)
+
+        self.apply_sos_change(old_sos=old_sos, new_sos=new_sos)
+
+    def apply_sos_change(self, old_sos, new_sos):
+        """Computes the ratio and calls methods in WaterData and BoatData to apply change.
+
+        Parameters
+        ----------
+        old_sos: float
+            Speed of sound on which the current data are based, in m/s
+        new_sos: float
+            Speed of sound on which the data need to be based, in m/s
+        """
+
+        ratio = new_sos / old_sos
+
+        # RiverRay horizontal velocities are not affected by changes in speed of sound
+        if self.adcp.model != 'RiverRay':
+            # Apply speed of sound change to water and boat data
+            self.w_vel.sos_correction(ratio=ratio)
+            self.boat_vel.bt_vel.sos_correction(ratio=ratio)
+        # Correct depths
+        self.depths.sos_correction(ratio=ratio)
+
+    @staticmethod
+    def raw_valid_data(transect):
+        """Determines ensembles and cells with no interpolated water or boat data.
+
+        For valid water track cells both non-interpolated valid water data and
+        boat velocity data must be available. Interpolated depths are allowed.
+
+        For valid ensembles water, boat, and depth data must all be non-interpolated.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+
+        Returns
+        -------
+        raw_valid_ens: np.array(bool)
+            Boolean array identifying raw valid ensembles.
+        raw_valid_depth_cells: np.array(bool)
+            Boolean array identifying raw valid depth cells.
+        """
+
+        in_transect_idx = transect.in_transect_idx
+
+        # Determine valid water track ensembles based on water track and navigation data.
+        boat_vel_select = getattr(transect.boat_vel, transect.boat_vel.selected)
+        if boat_vel_select is not None and np.nansum(np.logical_not(np.isnan(boat_vel_select.u_processed_mps))) > 0:
+            valid_nav = boat_vel_select.valid_data[0, in_transect_idx]
+        else:
+            valid_nav = np.tile(False, in_transect_idx.shape[0])
+
+        valid_wt = np.copy(transect.w_vel.valid_data[0, :, in_transect_idx])
+        valid_wt_ens = np.any(valid_wt, 1)
+
+        # Determine valid depths
+        depths_select = getattr(transect.depths, transect.depths.selected)
+        if transect.depths.composite:
+            valid_depth = np.tile(True, (depths_select.depth_source_ens.shape[0]))
+            idx_na = np.where(depths_select.depth_source_ens[in_transect_idx] == 'NA')[0]
+            if len(idx_na) > 0:
+                valid_depth[idx_na] = False
+            interpolated_depth_idx = np.where(depths_select.depth_source_ens[in_transect_idx] == 'IN')[0]
+            if len(interpolated_depth_idx) > 0:
+                valid_depth[interpolated_depth_idx] = False
+        else:
+            valid_depth = depths_select.valid_data[in_transect_idx]
+            idx = np.where(np.isnan(depths_select.depth_processed_m[in_transect_idx]))[0]
+            if len(idx) > 0:
+                valid_depth[idx] = False
+
+        # Determine valid ensembles based on all data
+        valid_ens = np.all(np.vstack((valid_nav, valid_wt_ens, valid_depth)), 0)
+
+        return valid_ens, valid_wt.T
+
+    @staticmethod
+    def compute_gps_lag(transect):
+        """Computes the lag between bottom track and GGA and/or VTG using an autocorrelation method.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+
+        Returns
+        -------
+        lag_gga: float
+            Lag in seconds betweeen bottom track and gga
+        lag_vtg: float
+            Lag in seconds between bottom track and vtg
+        """
+
+        # Intialize returns
+        lag_gga = None
+        lag_vtg = None
+
+        bt_speed = np.sqrt(transect.boat_vel.bt_vel.u_processed_mps ** 2
+                           + transect.boat_vel.bt_vel.v_processed_mps ** 2)
+
+        avg_ens_dur = np.nanmean(transect.date_time.ens_duration_sec)
+
+        # Compute lag for gga, if available
+        if transect.boat_vel.gga_vel is not None:
+            gga_speed = np.sqrt(transect.boat_vel.gga_vel.u_processed_mps ** 2
+                                + transect.boat_vel.gga_vel.v_processed_mps ** 2)
+
+            # Compute lag if both bottom track and gga have valid data
+            valid_data = np.all(np.logical_not(np.isnan(np.vstack((bt_speed, gga_speed)))), axis=0)
+            if np.sometrue(valid_data):
+                # Compute lag
+                lag_gga = (np.count_nonzero(valid_data)
+                           - np.argmax(signal.correlate(bt_speed[valid_data], gga_speed[valid_data])) - 1) * avg_ens_dur
+            else:
+                lag_gga = None
+
+        # Compute lag for vtg, if available
+        if transect.boat_vel.vtg_vel is not None:
+            vtg_speed = np.sqrt(transect.boat_vel.vtg_vel.u_processed_mps ** 2
+                                + transect.boat_vel.vtg_vel.v_processed_mps ** 2)
+
+            # Compute lag if both bottom track and gga have valid data
+            valid_data = np.all(np.logical_not(np.isnan(np.vstack((bt_speed, vtg_speed)))), axis=0)
+            if np.sometrue(valid_data):
+                # Compute lag
+                lag_vtg = (np.count_nonzero(valid_data)
+                           - np.argmax(signal.correlate(bt_speed[valid_data], vtg_speed[valid_data])) - 1) * avg_ens_dur
+            else:
+                lag_vtg = None
+
+        return lag_gga, lag_vtg
+
+    @staticmethod
+    def compute_gps_lag_fft(transect):
+        """Computes the lag between bottom track and GGA and/or VTG using fft method.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+
+        Returns
+        -------
+        lag_gga: float
+            Lag in seconds betweeen bottom track and gga
+        lag_vtg: float
+            Lag in seconds between bottom track and vtg
+        """
+        lag_gga = None
+        lag_vtg = None
+
+        bt_speed = np.sqrt(transect.boat_vel.bt_vel.u_processed_mps ** 2
+                           + transect.boat_vel.bt_vel.v_processed_mps ** 2)
+
+        avg_ens_dur = np.nanmean(transect.date_time.ens_duration_sec)
+        if transect.boat_vel.gga_vel is not None:
+            gga_speed = np.sqrt(transect.boat_vel.gga_vel.u_processed_mps ** 2
+                                + transect.boat_vel.gga_vel.v_processed_mps ** 2)
+            valid_data = np.all(np.logical_not(np.isnan(np.vstack((bt_speed, gga_speed)))), axis=0)
+            b = fftpack.fft(bt_speed[valid_data])
+            g = fftpack.fft(gga_speed[valid_data])
+            br = -b.conjugat()
+            lag_gga = np.argmax(np.abs(fftpack.ifft(br * g)))
+
+        if transect.boat_vel.vtg_vel is not None:
+            vtg_speed = np.sqrt(transect.boat_vel.vtg_vel.u_processed_mps ** 2
+                                + transect.boat_vel.vtg_vel.v_processed_mps ** 2)
+            valid_data = np.all(np.logical_not(np.isnan(np.vstack((bt_speed, vtg_speed)))), axis=0)
+            b = fftpack.fft(bt_speed[valid_data])
+            g = fftpack.fft(vtg_speed[valid_data])
+            br = -b.conjugat()
+            lag_vtg = np.argmax(np.abs(fftpack.ifft(br * g)))
+
+        return lag_gga, lag_vtg
+
+    @staticmethod
+    def compute_gps_bt(transect, gps_ref='gga_vel'):
+        """Computes properties describing the difference between bottom track and the specified GPS reference.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        gps_ref: str
+            GPS referenced to be used in computation (gga_vel or vtg_vel)
+
+        Returns
+        -------
+        gps_bt: dict
+            course: float
+                Difference in course computed from gps and bt, in degrees
+            ratio: float
+                Ratio of final distance made good for bt and gps (bt dmg / gps dmg)
+            dir: float
+                Direction of vector from end of GPS track to end of bottom track
+            mag: float
+                Length of vector from end of GPS track to end of bottom track
+        """
+
+        gps_bt = dict()
+        gps_vel = getattr(transect.boat_vel, gps_ref)
+        if gps_vel is not None and \
+                1 < np.sum(np.logical_not(np.isnan(gps_vel.u_processed_mps))) and \
+                1 < np.sum(np.logical_not(np.isnan(transect.boat_vel.bt_vel.u_processed_mps))):
+            # Data prep
+            bt_track = BoatStructure.compute_boat_track(transect, ref='bt_vel')
+
+            try:
+                bt_course, _ = cart2pol(bt_track['track_x_m'][-1], bt_track['track_y_m'][-1])
+                bt_course = rad2azdeg(bt_course)
+            except TypeError:
+                bt_course = np.nan
+
+            gps_track = BoatStructure.compute_boat_track(transect, ref=gps_ref)
+            gps_course, _ = cart2pol(gps_track['track_x_m'][-1], gps_track['track_y_m'][-1])
+            gps_course = rad2azdeg(gps_course)
+
+            # Compute course
+            gps_bt['course'] = gps_course - bt_course
+            if gps_bt['course'] < 0:
+                gps_bt['course'] = gps_bt['course'] + 360
+
+            # Compute ratio
+            try:
+                gps_bt['ratio'] = bt_track['dmg_m'][-1] / gps_track['dmg_m'][-1]
+            except TypeError:
+                gps_bt['ratio'] = np.nan
+
+            # Compute closure vector
+            try:
+                x_diff = bt_track['track_x_m'][-1] - gps_track['track_x_m'][-1]
+            except TypeError:
+                x_diff = np.nan
+
+            try:
+                y_diff = bt_track['track_y_m'][-1] - gps_track['track_y_m'][-1]
+            except TypeError:
+                y_diff = np.nan
+
+            try:
+                gps_bt['dir'], gps_bt['mag'] = cart2pol(x_diff, y_diff)
+                gps_bt['dir'] = rad2azdeg(gps_bt['dir'])
+            except TypeError:
+                gps_bt['dir'] = np.nan
+                gps_bt['mag'] = np.nan
+
+        return gps_bt
+
+
+# ========================================================================
+# Begin multithread function included in module but not TransectData class
+# Currently this is coded only for TRDI data
+# ========================================================================
+
+
+# DSM changed 1/23/2018 def allocate_transects(source, mmt, kargs)
+# TODO This needs a complete rewrite from what Greg did. However it works with no multi-threading for now
+
+# def add_transect(mmt, filename, index, type):
+#     pd0_data = Pd0TRDI(filename)
+#
+#     if type == 'MB':
+#         mmt_transect = mmt.mbt_transects[index]
+#     else:
+#         mmt_transect = mmt.transects[index]
+#
+#     transect = TransectData()
+#     transect.trdi(mmt=mmt,
+#                   mmt_transect=mmt_transect,
+#                   pd0_data=pd0_data)
+#     return transect
+#
+#
+# def allocate_transects(mmt, transect_type='Q', checked=False):
+#     """Method to load transect data. Changed from Matlab approach by Greg to allow possibility
+#     of multi-thread approach.
+#
+#     Parameters
+#     ----------
+#     mmt: MMT_TRDI
+#         Object of MMT_TRDI
+#     transect_type: str
+#         Type of transect (Q: discharge or MB: moving-bed test)
+#     checked: bool
+#         Determines if all files are loaded (False) or only checked files (True)
+#     """
+#
+#     # DEBUG, set threaded to false to get manual serial commands
+#     multi_threaded = False
+#
+#     file_names = []
+#     file_idx = []
+#
+#     # Setup processing for discharge or moving-bed transects
+#     if transect_type == 'Q':
+#         # Identify discharge transect files to load
+#         if checked:
+#             for idx, transect in enumerate(mmt.transects):
+#                 if transect.Checked == 1:
+#                     file_names.append(transect.Files[0])
+#                     file_idx.append(idx)
+#             # file_names = [transect.Files[0] for transect in mmt.transects if transect.Checked == 1]
+#         else:
+#             file_names = [transect.Files[0] for transect in mmt.transects]
+#             file_idx = list(range(0, len(file_names)))
+#     elif transect_type == 'MB':
+#         file_names = [transect.Files[0] for transect in mmt.mbt_transects]
+#         file_idx = list(range(0, len(file_names)))
+#
+#     # Determine if any files are missing
+#     valid_files = []
+#     valid_indices = []
+#     for index, name in enumerate(file_names):
+#         fullname = os.path.join(mmt.path, name)
+#         if os.path.exists(fullname):
+#             valid_files.append(fullname)
+#             valid_indices.append(file_idx[index])
+#
+#
+#     start = time.perf_counter()
+#     transects = []
+#     num = len(valid_indices)
+#     # num = 1
+#     multi_process = True
+#     if multi_process:
+#         with concurrent.futures.ProcessPoolExecutor() as executor:
+#             results = [executor.submit(add_transect, mmt, valid_files[k], valid_indices[k], transect_type) for k in range(num)]
+#
+#         for f in concurrent.futures.as_completed(results):
+#             transects.append(f.result())
+#     else:
+#         for k in range(num):
+#             transects.append(add_transect(mmt, valid_files[k], valid_indices[k], transect_type))
+#
+#     # # Multi-thread for Pd0 files
+#     # # -------------------------
+#     # # Seems like this section belongs in Pd0TRDI.py
+#     # # Initialize thread variables
+#     # pd0_data = []
+#     # pd0_threads = []
+#     # thread_id = 0
+#     #
+#     # # DSM 1/24/2018 could this be moved to Pd0TRDI.py as a method
+#     # def add_pd0(file_name):
+#     #     pd0_data.append(Pd0TRDI(file_name))
+#     #
+#     # if multi_threaded:
+#     #     # TODO this belongs in the pd0 class
+#     #     for file in valid_files:
+#     #         pd0_thread = MultiThread(thread_id=thread_id, function=add_pd0, args={'file_name': file})
+#     #         thread_id += 1
+#     #         pd0_thread.start()
+#     #         pd0_threads.append(pd0_thread)
+#     # else:
+#     #     for file in valid_files:
+#     #         pd0_data.append(Pd0TRDI(file))
+#     #
+#     # for thrd in pd0_threads:
+#     #     thrd.join()
+#     #
+#     # # Multi-thread for transect data
+#     #
+#     # # Initialize thread variables
+#     # processed_transects = []
+#     # transect_threads = []
+#     # thread_id = 0
+#     #
+#     # # DSM 1/24/2018 couldn't this be added to the TransectData class
+#     # def add_transect(transect_data, mmt_transect, mt_pd0_data, mt_mmt):
+#     #     transect_data.trdi(mmt=mt_mmt,
+#     #                        mmt_transect=mmt_transect,
+#     #                        pd0_data=mt_pd0_data)
+#     #     processed_transects.append(transect_data)
+#     #
+#     # # Process each transect
+#     # for k in range(len(pd0_data)):
+#     #     transect = TransectData()
+#     #     if pd0_data[k].Wt is not None:
+#     #         if transect_type == 'MB':
+#     #             # Process moving-bed transect
+#     #             if multi_threaded:
+#     #                 t_thread = MultiThread(thread_id=thread_id,
+#     #                                        function=add_transect,
+#     #                                        args={'transect': transect,
+#     #                                              'mmt_transect': mmt.mbt_transects[valid_indices[k]],
+#     #                                              'mt_pd0_data': pd0_data[k],
+#     #                                              'mt_mmt': mmt})
+#     #                 t_thread.start()
+#     #                 transect_threads.append(t_thread)
+#     #
+#     #             else:
+#     #                 transect = TransectData()
+#     #                 add_transect(transect_data=transect,
+#     #                              mmt_transect=mmt.mbt_transects[valid_indices[k]],
+#     #                              mt_pd0_data=pd0_data[k],
+#     #                              mt_mmt=mmt)
+#     #
+#     #         else:
+#     #             # Process discharge transects
+#     #             if multi_threaded:
+#     #                 t_thread = MultiThread(thread_id=thread_id,
+#     #                                        function=add_transect,
+#     #                                        args={'transect': transect,
+#     #                                              'mmt_transect': mmt.transects[valid_indices[k]],
+#     #                                              'mt_pd0_data': pd0_data[k],
+#     #                                              'mt_mmt': mmt})
+#     #                 t_thread.start()
+#     #                 transect_threads.append(t_thread)
+#     #
+#     #             else:
+#     #                 add_transect(transect_data=transect,
+#     #                              mmt_transect=mmt.transects[valid_indices[k]],
+#     #                              mt_pd0_data=pd0_data[k],
+#     #                              mt_mmt=mmt)
+#     #
+#     # if multi_threaded:
+#     #     for x in transect_threads:
+#     #         x.join()
+#     finish = time.perf_counter()
+#     print(f'Finished in {finish - start}')
+#     return processed_transects
+
+
+def adjusted_ensemble_duration(transect, trans_type=None):
+    """Applies the TRDI method of expanding the ensemble time when data are invalid.
+
+    Parameters
+    ----------
+    transect: TransectData
+        Object of TransectData
+    trans_type: str
+        Transect type. If mbt then bottom track is used.
+
+    Returns
+    -------
+    delta_t: np.array(float)
+        Array of delta time in seconds for each ensemble.
+    """
+
+    if transect.adcp.manufacturer == 'TRDI':
+        if trans_type is None:
+            # Determine valid data from water track
+            valid = np.isnan(transect.w_vel.u_processed_mps) == False
+            valid_sum = np.sum(valid)
+        else:
+            # Determine valid data from bottom track
+            valid_sum = np.isnan(transect.boat_vel.bt_vel.u_processed_mps) == False
+
+        valid_ens = valid_sum > 0
+        n_ens = len(valid_ens)
+        ens_dur = transect.date_time.ens_duration_sec
+        delta_t = np.tile([np.nan], n_ens)
+        cum_dur = 0
+        for j in range(n_ens):
+            cum_dur = np.nansum(np.hstack([cum_dur, ens_dur[j]]))
+            if valid_ens[j]:
+                delta_t[j] = cum_dur
+                cum_dur = 0
+    else:
+        delta_t = transect.date_time.ens_duration_sec
+
+    return delta_t
diff --git a/Classes/TransformationMatrix.py b/Classes/TransformationMatrix.py
new file mode 100644
index 0000000000000000000000000000000000000000..4bd3986a95a9d528e6f47c3eca9e5fcfd584e889
--- /dev/null
+++ b/Classes/TransformationMatrix.py
@@ -0,0 +1,206 @@
+import numpy as np
+
+
+class TransformationMatrix(object):
+    """Determines the transformation matrix and source for the specified ADCP model from the data provided.
+
+    Attributes
+    ----------
+    source: str
+        Source of transformation matrix, either Nominal or ADCP
+    matrix: np.array
+        One or more 4x4 transformation matrices.
+     """
+
+    def __init__(self):
+        """Constructor initializes variable to None"""
+        self.source = None
+        self.matrix = None
+        
+    def populate_data(self, manufacturer, model=None, data_in=None):
+        """Uses the manufacturer and model to determine how to parse the transformation matrix.
+
+        Parameters
+        ----------
+        manufacturer: str
+            Name of manufacturer (TRDI, SonTek)
+        model: str
+            Model of ADCP
+        data_in:
+            System test data or 'Nominal'
+        """
+        
+        if manufacturer == 'TRDI':
+            self.trdi(model, data_in)
+        elif manufacturer == 'SonTek':
+            self.sontek(data_in)
+
+    def trdi(self, model=None, data_in=None):
+        """Processes the data to store the transformation matrix for TRDI ADCPs.
+        If no transformation matrix information is available a nominal transformation
+        matrix for that model is assumed.
+
+        Parameters
+        ----------
+        model: str
+            Model of ADCP
+        data_in:
+            System test data or 'Nominal'
+        """
+
+        adcp_model = model
+        # Set nominal matrix based on model
+        self.matrix = [[1.4619, -1.4619, 0, 0],
+                       [0, 0, -1.4619, 1.4619],
+                       [0.2661, 0.2661, 0.2661, 0.2661],
+                       [1.0337, 1.0337, -1.0337, -1.0337]]
+
+        if adcp_model == 'RiverRay':
+            self.matrix = [[1, -1, 0, 0],
+                           [0, 0, -1, 1],
+                           [0.2887, 0.2887, 0.2887, 0.2887],
+                           [0.7071, 0.7071, -0.7071, -0.7071]]
+
+        # Overwrite nominal transformation matrix with custom matrix from test data, if available
+        self.source = 'Nominal'
+        if data_in == 'Nominal':
+            self.source = 'Nominal'
+        elif adcp_model == 'Rio Grande':
+            self.riogrande(data_in)
+        elif adcp_model == 'StreamPro':
+            self.streampro(data_in)
+        elif adcp_model == 'RiverRay':
+            self.riverray(data_in)
+        elif adcp_model == 'RiverPro':
+            self.riverpro(data_in)
+        elif adcp_model == 'RioPro':
+            self.riopro(data_in)
+        elif adcp_model == 'pd0':
+            self.matrix = data_in.Inst.t_matrix
+
+        if np.array(self.matrix).size < 16:
+            self.trdi(model=model, data_in=None)
+
+        # Save matrix as np array
+        self.matrix = np.array(self.matrix)[0:4, 0:4]
+
+    def riogrande(self, data_in):
+        """Process Rio Grande test data for transformation matrix.
+
+        Parameters
+        ----------
+        data_in:
+            System test data
+        """
+        if data_in is not None:
+            idx = data_in.find('Instrument Transformation Matrix (Down):')
+            if idx != -1:
+                cell_matrix = np.fromstring(data_in[idx + 50:idx + 356], dtype=np.float64, sep=' ')
+                try:
+                    self.matrix = np.reshape(cell_matrix, (-1, 8))[:, 0:4]
+
+                    self.source = 'ADCP'
+                except ValueError:
+                    pass
+
+    def streampro(self, data_in):
+        """Process StreamPro test data for transformation matrix.
+
+        Parameters
+        ----------
+        data_in:
+            System test data
+        """
+
+        if data_in is not None:
+            idx = data_in.find('>PS3')
+            if idx != -1:
+                temp_str = data_in[idx + 5:idx + 138]
+                temp_str = temp_str.replace('-', ' -')
+                temp_str = temp_str[:temp_str.find('>')]
+                cell_matrix = np.fromstring(temp_str, dtype=np.float64, sep=' ')
+                try:
+                    self.matrix = cell_matrix.reshape(4, 4)
+                    self.source = 'ADCP'
+                except ValueError:
+                    pass
+
+    def riverray(self, data_in):
+        """Process RiverRay test data for transformation matrix.
+
+        Parameters
+        ----------
+        data_in: str
+            System test data
+        """
+        if data_in is not None:
+            idx = data_in.find('Instrument Transformation Matrix')
+            if idx != -1:
+                idx2 = data_in[idx:].find(':')
+                idx3 = idx + idx2
+                if idx2 != -1:
+                    idx4 = data_in[idx3:].find('>')
+                    idx5 = idx3 + idx4 - 2
+                    if idx4 != -1:
+                        self.matrix = float(data_in[idx3:idx5])
+                        self.source = 'ADCP'
+
+    def riverpro(self, data_in):
+        """Process RiverPro test data for transformation matrix.
+
+        Parameters
+        ----------
+        data_in: str
+            System test data
+        """
+        if data_in is not None:
+            idx = data_in.find('Instrument Transformation Matrix')
+            if idx != -1:
+                idx2 = data_in[idx:].find(':')
+                idx3 = idx + idx2
+                if idx2 != -1:
+                    idx4 = data_in[idx3:].find('Has V-Beam')
+                    idx5 = idx3 + idx4 - 2
+                    if idx4 != -1:
+                        self.matrix = float(data_in[idx3:idx5])
+                        self.source = 'ADCP'
+
+    def riopro(self, data_in):
+        """Process RioPro test data for transformation matrix.
+
+        Parameters
+        ----------
+        data_in:
+            System test data
+        """
+
+        if data_in is not None:
+            idx = data_in.find('Instrument Transformation Matrix')
+            if idx != -1:
+                idx2 = data_in[idx:].find(':')
+                idx3 = idx + idx2
+                if idx2 != -1:
+                    idx4 = data_in[idx3:].find('Has V-Beam')
+                    idx5 = idx3 + idx4 - 2
+                    if idx4 != -1:
+                        self.matrix = float(data_in[idx3:idx5])
+                        self.source = 'ADCP'
+
+    def sontek(self, data_in):
+        """Store SonTek transformation matrix data.
+
+        Parameters
+        ----------
+        data_in:
+            System test data
+        """
+
+        if data_in is not None:
+            self.source = 'ADCP'
+            # Note: for M9 this is a 4x4x3 matrix (300,500,1000)
+            # Note: for S5 this is a 4x4x2 matrix (3000,1000)
+            self.matrix = data_in
+
+    def populate_from_qrev_mat(self, tmatrix):
+        self.matrix = tmatrix.matrix
+        self.source = tmatrix.source
diff --git a/Classes/Uncertainty.py b/Classes/Uncertainty.py
new file mode 100644
index 0000000000000000000000000000000000000000..0af2deb425b8770d17689fda23aa1f402dd37be9
--- /dev/null
+++ b/Classes/Uncertainty.py
@@ -0,0 +1,418 @@
+import numpy as np
+from scipy.stats import t
+
+
+class Uncertainty(object):
+    """Computes the uncertainty of a measurement.
+
+    Attributes
+    ----------
+    cov: float
+        Coefficient of variation for all transects used in dicharge computation
+    cov_95: float
+        Coefficient of variation inflated to the 95% percent coverage
+    invalid_95: float
+        Estimated 95% uncertainty for dicharge in invalid bins and ensembles
+    edges_95: float
+        Estimated 95% uncertainty for the computed edge discharges
+    extrapolation_95: float
+        Estimated 95% uncertainty in discharge due to top and bottom extrapolations
+    moving_bed_95: float
+        Estimated 95% uncertainty due to moving-bed tests and conditions
+    systematic: float
+        Systematic error estimated at 1.5% at 1 sigma
+    total_95: float
+        Estimated 95% uncertainty in discharge using automated values
+    cov_95_user: float
+        User provided estimate of coefficient of variation inflated to the 95% percent coverage
+    invalid_95_user: float
+        User provided estimate of95% uncertainty for discharge in invalid bins and ensembles
+    edges_95_user: float
+        User provided estimate of 95% uncertainty for the computed edge discharges
+    extrapolation_95_user: float
+        User provided estimate of 95% uncertainty in discharge due to top and bottom extrapolations
+    moving_bed_95_user: float
+        User provided estimate of 95% uncertainty due to moving-bed tests and conditions
+    systematic_user: float
+        User provided estimate of systematic error estimated at 1.5% at 1 sigma
+    total_95_user: float
+        Estimated 95% uncertainty in discharge using user provide values to override automated values
+    """
+
+    def __init__(self):
+        """Initializes the instance variables."""
+        self.cov = None
+        self.cov_95 = None
+        self.invalid_95 = None
+        self.edges_95 = None
+        self.extrapolation_95 = None
+        self.moving_bed_95 = None
+        self.systematic = None
+        self.total_95 = None
+        self.cov_95_user = None
+        self.invalid_95_user = None
+        self.edges_95_user = None
+        self.extrapolation_95_user = None
+        self.moving_bed_95_user = None
+        self.systematic_user = None
+        self.total_95_user = None
+
+    def compute_uncertainty(self, meas, cov_95_user=None, invalid_95_user=None, edges_95_user=None,
+                            extrapolation_95_user=None, moving_bed_95_user=None, systematic_user=None):
+        """Computes the uncertainty for the components of the discharge measurement
+        using measurement data or user provided values.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        cov_95_user: float
+            User provided estimate of coefficient of variation inflated to the 95% percent coverage
+        invalid_95_user: float
+            User provided estimate of95% uncertainty for discharge in invalid bins and ensembles
+        edges_95_user: float
+            User provided estimate of 95% uncertainty for the computed edge discharges
+        extrapolation_95_user: float
+            User provided estimate of 95% uncertainty in discharge due to top and bottom extrapolations
+        moving_bed_95_user: float
+            User provided estimate of 95% uncertainty due to moving-bed tests and conditions
+        systematic_user: float
+            User provided estimate of systematic error estimated at 1.5% at 1 sigma
+        """
+
+        # Use only checked discharges
+        checked = []
+        discharges = []
+        for n in range(len(meas.transects)):
+            checked.append(meas.transects[n].checked)
+            if meas.transects[n].checked:
+                discharges.append(meas.discharge[n])
+
+        # Compute uncertainties from the data
+        self.cov, self.cov_95 = self.uncertainty_q_random(discharges, 'total')
+        self.invalid_95 = self.uncertainty_invalid_data(discharges)
+        self.edges_95 = self.uncertainty_edges(discharges)
+        self.extrapolation_95 = self.uncertainty_extrapolation(meas, discharges)
+        self.moving_bed_95 = self.uncertainty_moving_bed(meas, checked)
+        self.systematic = 1.5
+
+        # Get user uncertainty estimates
+        self.cov_95_user = cov_95_user
+        self.invalid_95_user = invalid_95_user
+        self.edges_95_user = edges_95_user
+        self.extrapolation_95_user = extrapolation_95_user
+        self.moving_bed_95_user = moving_bed_95_user
+        self.systematic_user = systematic_user
+
+        # Estimate the total measurement uncertainty
+        self.estimate_total_uncertainty()
+
+    def populate_from_qrev_mat(self, meas_struct):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        meas_struct: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+        if hasattr(meas_struct, 'uncertainty'):
+            self.cov = meas_struct.uncertainty.cov
+            self.cov_95 = meas_struct.uncertainty.cov95
+            self.invalid_95 = meas_struct.uncertainty.invalid95
+            self.edges_95 = meas_struct.uncertainty.edges95
+            self.extrapolation_95 = meas_struct.uncertainty.extrapolation95
+            self.moving_bed_95 = meas_struct.uncertainty.movingBed95
+            self.systematic = meas_struct.uncertainty.systematic
+            self.total_95 = meas_struct.uncertainty.total95
+            if not type(meas_struct.uncertainty.cov95User) is np.ndarray:
+                self.cov_95_user = meas_struct.uncertainty.cov95User
+            if not type(meas_struct.uncertainty.invalid95User) is np.ndarray:
+                self.invalid_95_user = meas_struct.uncertainty.invalid95User
+            if not type(meas_struct.uncertainty.edges95User) is np.ndarray:
+                self.edges_95_user = meas_struct.uncertainty.edges95User
+            if not type(meas_struct.uncertainty.extrapolation95User) is np.ndarray:
+                self.extrapolation_95_user = meas_struct.uncertainty.extrapolation95User
+            if not type(meas_struct.uncertainty.movingBed95User) is np.ndarray:
+                self.moving_bed_95_user = meas_struct.uncertainty.movingBed95User
+            if not type(meas_struct.uncertainty.systematicUser) is np.ndarray:
+                self.systematic_user = meas_struct.uncertainty.systematicUser
+            self.total_95_user = meas_struct.uncertainty.total95User
+
+    def estimate_total_uncertainty(self):
+        """Compute the uncertainty of the measurement using the automatically computed uncertainties and
+        user overrides.
+        """
+
+        self.total_95 = 2.0 * ((self.cov_95 / 2)**2
+                               + (self.invalid_95 / 2)**2
+                               + (self.edges_95 / 2)**2
+                               + (self.extrapolation_95 / 2)**2
+                               + (self.moving_bed_95 / 2)**2
+                               + self.systematic**2
+                               )**0.5
+
+        if self.cov_95_user is None:
+            cov_95_user = self.cov_95
+        else:
+            cov_95_user = self.cov_95_user
+
+        if self.invalid_95_user is None:
+            invalid_95_user = self.invalid_95
+        else:
+            invalid_95_user = self.invalid_95_user
+
+        if self.edges_95_user is None:
+            edges_95_user = self.edges_95
+        else:
+            edges_95_user = self.edges_95_user
+
+        if self.extrapolation_95_user is None:
+            extrapolation_95_user = self.extrapolation_95
+        else:
+            extrapolation_95_user = self. extrapolation_95_user
+
+        if self.moving_bed_95_user is None:
+            moving_bed_95_user = self.moving_bed_95
+        else:
+            moving_bed_95_user = self.moving_bed_95_user
+
+        if self.systematic_user is None:
+            systematic_user = self.systematic
+        else:
+            systematic_user = self.systematic_user
+
+        self.total_95_user = 2.0 * ((cov_95_user / 2)**2
+                                    + (invalid_95_user / 2)**2
+                                    + (edges_95_user / 2)**2
+                                    + (extrapolation_95_user / 2)**2
+                                    + (moving_bed_95_user / 2)**2
+                                    + systematic_user**2
+                                    )**0.5
+
+    @staticmethod
+    def get_array_attr(list_in, prop):
+        """Create an array of the requested attribute from a list of objects containing the requested attribute.
+
+        Parameters
+        ----------
+        list_in: list
+            List of objects
+        prop: str
+            Attribute requested
+
+        Returns
+        -------
+        data: np.ndarray()
+            Array of the requested attributes from each object in list_in
+
+        """
+        # Create array of specified attribute
+        data = []
+        for item in list_in:
+            data.append(getattr(item, prop))
+        np.asarray(data)
+        return data
+
+    @staticmethod
+    def uncertainty_q_random(discharges, prop):
+        """Compute 95% random uncertainty for property of discharge.
+        Uses simplified method for 2 transects.
+
+        Parameters
+        ----------
+        discharges: list
+            List of Discharge objects
+        prop: str
+            Attribute of Discharge objects
+
+        Returns
+        -------
+        cov: float
+            Coefficient of variation
+        cov_95: float
+            Coefficient of variation inflated to 95% value
+        """
+        n_max = len(discharges)
+        if n_max > 0:
+            # Create array of specified attribute
+            data = Uncertainty.get_array_attr(discharges, prop)
+
+            # Compute coefficient of variation
+            cov = np.abs(np.nanstd(data, ddof=1) / np.nanmean(data)) * 100
+
+            # Inflate the cov to the 95% value
+            if n_max == 2:
+                # Use the approximate method as taught in class to reduce the high coverage factor for 2 transects
+                # and account for prior knowledge related to 720 second duration analysis
+                cov_95 = cov * 3.3
+            else:
+                # Use Student's t to inflate COV for n > 2
+                cov_95 = t.interval(0.95, n_max-1)[1] * cov / n_max**0.5
+        else:
+            cov = np.nan
+            cov_95 = np.nan
+
+        return cov, cov_95
+
+    @staticmethod
+    def uncertainty_edges(discharges):
+        """Compute uncertainty of edge discharge. Currently assuming random plus bias
+        is within 30% of actual value.
+
+        Parameters
+        ----------
+        discharges: list
+            List of Discharge objects
+
+        Returns
+        -------
+        edge_uncertainty: float
+            95% uncertainty in discharge due to edge estimates
+        """
+
+        # Compute mean discharge values for total, left, and right
+        mean_q = np.nanmean(Uncertainty.get_array_attr(discharges, 'total'))
+        mean_left = np.nanmean(Uncertainty.get_array_attr(discharges, 'left'))
+        mean_right = np.nanmean(Uncertainty.get_array_attr(discharges, 'right'))
+
+        # Compute combined edge uncertainty
+        percent_edge = ((np.abs(mean_left) + np.abs(mean_right)) / mean_q) * 100
+        edge_uncertainty = percent_edge * 0.3
+
+        return edge_uncertainty
+
+    @staticmethod
+    def uncertainty_extrapolation(meas, discharges):
+        """Compute the uncertainty of the top and bottom extrapolations.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        discharges: list
+            List of Discharge objects
+
+        Returns
+        -------
+        extrapolation_uncertainty: float
+            95% uncertainty due to top and bottom extrapolation estimates
+        """
+
+        # Compute mean total uncorrected discharge
+        q_selected = np.nanmean(Uncertainty.get_array_attr(discharges, 'total_uncorrected'))
+
+        # Create array of discharges from the various extrapolation methods
+        q_possible = np.array([meas.extrap_fit.q_sensitivity.q_pp_mean,
+                               meas.extrap_fit.q_sensitivity.q_pp_opt_mean,
+                               meas.extrap_fit.q_sensitivity.q_cns_mean,
+                               meas.extrap_fit.q_sensitivity.q_cns_opt_mean,
+                               meas.extrap_fit.q_sensitivity.q_3p_ns_mean,
+                               meas.extrap_fit.q_sensitivity.q_3p_ns_opt_mean])
+
+        # Compute difference in discharges from the selected method
+        q_diff = np.abs(q_possible - q_selected)
+
+        # Sort differences
+        percent_diff = np.sort(q_diff) / q_selected
+
+        # Estimate the uncertainty as the average of the 4 smallest differences
+        extrapolation_uncertainty = np.nanmean(percent_diff[1:5]) * 100
+
+        return extrapolation_uncertainty
+
+    @staticmethod
+    def uncertainty_invalid_data(discharges):
+        """Computes an estimate of the uncertainty for the discharge computed for invalid bins and ensembles.
+
+        Parameters
+        ----------
+        discharges: list
+            List of Discharge objects
+
+        Returns
+        -------
+        invalid_data_uncertainty: float
+            95% uncertainty due to estimating invalid data
+        """
+
+        # Compute mean discharges
+        q_mean = np.nanmean(Uncertainty.get_array_attr(discharges, 'total'))
+        q_cells = np.nanmean(Uncertainty.get_array_attr(discharges, 'int_cells'))
+        q_ensembles = np.nanmean(Uncertainty.get_array_attr(discharges, 'int_ens'))
+
+        # Compute percentages
+        percent_cells = (q_cells / q_mean) * 100
+        percent_ensembles = (q_ensembles / q_mean) * 100
+
+        # Compute uncertainty for combined invalid cells and ensembles
+        invalid_data_uncertainty = (np.abs(percent_cells) + np.abs(percent_ensembles)) * 0.2
+
+        return invalid_data_uncertainty
+
+    @staticmethod
+    def uncertainty_moving_bed(meas, checked):
+        """Estimates the 95% uncertainty of the discharge due to a moving-bed and navigation reference.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        checked: list
+            Logical list of transects used to compute final discharge
+
+        Returns
+        -------
+        moving_bed_uncertainty: float
+            95% uncertainty associated with moving-bed conditions
+        """
+
+        if np.any(checked) and meas.transects[checked.index(1)].boat_vel.selected == 'bt_vel':
+            # Boat velocity based on bottom track, moving-bed possible
+            if len(meas.mb_tests) > 0:
+                # Moving_bed tests recorded
+                user_valid = []
+                quality = []
+                moving_bed = []
+                used = []
+                for test in meas.mb_tests:
+                    user_valid.append(test.user_valid)
+                    if test.test_quality == 'Errors':
+                        quality.append(False)
+                    else:
+                        quality.append(True)
+                    moving_bed.append(test.moving_bed)
+                    used.append(test.use_2_correct)
+
+                # Check to see if there are any valid tests
+                if np.any(np.logical_and(np.asarray(quality), np.asarray(user_valid))):
+                    # Check to see if the valid tests indicate a moving bed
+                    moving_bed_bool = []
+                    for result in moving_bed:
+                        if result == 'Yes':
+                            moving_bed_bool.append(True)
+                        else:
+                            moving_bed_bool.append(False)
+                    valid_moving_bed = np.logical_and(quality, np.asarray(moving_bed_bool))
+                    if np.any(valid_moving_bed):
+                        # Check to see that a correction was used
+                        if np.any(np.logical_and(valid_moving_bed, np.asarray(used))):
+                            # Moving-bed exists and correction applied
+                            moving_bed_uncertainty = 1.5
+                        else:
+                            # Moving-bed exists and no correction applied
+                            moving_bed_uncertainty = 3
+                    else:
+                        # Valid tests indicated no moving bed
+                        moving_bed_uncertainty = 1
+                else:
+                    moving_bed_uncertainty = 3
+            elif meas.observed_no_moving_bed:
+                moving_bed_uncertainty = 1
+            else:
+                # No moving bed tests
+                moving_bed_uncertainty = 3
+        else:
+            # GPS used as boat velocity reference
+            moving_bed_uncertainty = 0
+
+        return moving_bed_uncertainty
diff --git a/Classes/WaterData.py b/Classes/WaterData.py
new file mode 100644
index 0000000000000000000000000000000000000000..56e1b68df3d76968b197f29ad7f0cdac26ca2a36
--- /dev/null
+++ b/Classes/WaterData.py
@@ -0,0 +1,2445 @@
+import copy
+import numpy as np
+from numpy.matlib import repmat
+from scipy import interpolate
+from Classes.BoatData import BoatData
+from MiscLibs.common_functions import cart2pol, pol2cart, iqr, nan_greater, nan_less
+from MiscLibs.robust_loess import rloess
+from MiscLibs.abba_2d_interpolation import abba_idw_interpolation
+
+
+class WaterData(object):
+    """Class to process and store water velocity data.
+
+    Attributes
+    ----------
+    Original data provided to the class:
+        raw_vel_mps: np.array(float)
+            Contains the raw unfiltered velocity in m/s.  1st index 1-4 are beams 1,2,3,4 if beam or
+            u,v,w,d if otherwise.
+        frequency: np.array(float)
+            Defines ADCP frequency used for velocity measurement, in kHz.
+        orig_coord_sys: str
+            Defines the original raw velocity coordinate system "Beam", "Inst", "Ship", "Earth".
+        orig_nav_ref: str
+            Defines the original taw data naviagation reference: "None", "BT", "GGA", "VTG".
+        corr: np.array(float)
+            Correlation values for WT, if available.
+        rssi: np.array(float)
+            Returned acoustic signal strength.
+        rssi_units: str
+            Units for returned acoustic signal strength: "Counts" "dB", "SNR".
+        water_mode: str
+            WaterMode for TRDI or 'Variable' for SonTek.
+        blanking_distance_m: float
+            Distance below transducer where data are marked invalid due to potential ringing.
+        cells_above_sl: np.array(bool)
+            Logical array of depth cells above sidelobe cutoff based on selected depth reference.
+        cells_above_sl_bt: np.array(bool)
+            Logical array of depth cells above the sidelobe cutoff based on BT
+        sl_lag_effect_m: np.array(float)
+            Side lobe distance due to lag and transmit length
+
+    Data computed in this class:
+        u_earth_no_ref_mps: np.array(float)
+            Horizontal velocity in x-direction with no boat reference applied, in m/s.
+        v_earth_no_ref_mps: np.array(float)
+            Horizontal velocity in y-direction with no boat reference applied, in m/s.
+        u_mps: np.array(float)
+            Horizontal velocity in x-direction, earth coord, nav referenced, in m/s.
+        v_mps: np.array(float)
+            Horizontal velocity in y-direction, earth coord, nav referenced, in m/s.
+        u_processed_mps: np.array(float)
+            Horizontal velocity in x-direction, earth coord, nav ref, filtered, and interpolated.
+        v_processed_mps: np.array(float)
+            Horizontal veloctiy in y-direction, earth coord, nav ref, filtered, and interpolated.
+        w_mps: np.array(float)
+            Vertical velocity (+ up), in m/s.
+        d_mps: np.array(float)
+            Difference in vertical velocities compute from opposing beam pairs, in m/s.
+        invalid_index: np.array(bool)
+            Index of ensembles with no valid raw velocity data.
+        num_invalid: float
+            Estimated number of depth cells in ensembles with no valid raw velocity data.
+        valid_data: np.array(float)
+            3-D logical array of valid data
+                Dim1 0 - composite
+                Dim1 1 - original, cells above side lobe
+                Dim1 2 - dfilter
+                Dim1 3 - wfilter
+                Dim1 4 - smoothFilter
+                Dim1 5 - beamFilter
+                Dim1 6 - excluded
+                Dim1 7 - snrFilter
+                Dim1 8 - validDepthFilter
+
+    Processing settings:
+        beam_filter: int
+            Set 3 for 3-beam solutions, 4 for 4-beam solutions.
+        d_filter: str
+            Set difference velocity filter "On", "Off".
+        d_filter_thresholds: float, dict, tuple
+            Threshold(s) for difference velocity filter.
+        w_filter: str
+            Set vertical velocity filter "On", "Off".
+        w_filter_thresholds: float, dict, tuple
+            Threshold(s) for vertical velocity filter.
+        excluded_dist_m: float
+            Distance below transucer for which data are excluded or marked invalid, in m.
+        orig_excluded_dist_m: float
+            Original distance below transucer for which data are excluded or marked invalid, in m.
+        smooth_filter: str
+            Set filter based on smoothing function "On", "Off".
+        smooth_speed: np.array(float)
+            Smoothed mean water speed, in m/s.
+        smooth_upper_limit: np.array(float)
+            Smooth function upper limit of window, in m/s.
+        smooth_lower_limit: np.array(float)
+            Smooth funciton lower limit of window, in m/s.
+        snr_filter: str
+            Set SNR filter for SonTek data "On", "Off".
+        snr_rng: np.array(float)
+            Range of beam averaged SNR
+        wt_depth_filter: np.array(bool)
+            WT in ensembles with invalid depths are marked invalid.
+        interpolate_ens: str
+            Type of interpolation: "None", "TRDI", "Linear", 'abba'.
+        interpolate_cells: str
+            Type of cell interpolation: "None", "TRDI", "Linear", 'abba'
+        coord_sys: str
+            Defines the velocity coordinate system "Beam", "Inst", "Ship", "Earth"
+        nav_ref: str
+            Defines the navigation reference: "None", "BT", "GGA", "VTG"
+        sl_cutoff_percent: float
+            Percent cutoff defined by cos(angle)
+        sl_cutoff_number: float
+            User specified number of cells to cutoff from SonTek, not implemented, undefined
+        sl_cutoff_type: str
+            Type of cutoff method "Percent" or "Number".
+        ping_type: np.array(int)
+            Indicates type of ping for each cell: 0-incoherent, 1-coherent, 2-surface
+        d_meas_thresholds: list
+            Dictionary of difference velocity thresholds computed using the whole measurement by ping type
+        w_meas_thresholds: list
+            Dictionary of vertical velocity thresholds computed using the whole measurement by ping type
+        use_measurement_thresholds: bool
+            Indicates if the measurement based thresholds should be used
+
+    """
+
+    def __init__(self):
+        """Initialize instance variables.
+        """
+
+        # Data input to this class
+        self.raw_vel_mps = None
+        self.frequency = None
+        self.orig_coord_sys = None
+        self.orig_nav_ref = None
+        self.corr = None
+        self.rssi = None
+        self.rssi_units = None
+        self.water_mode = None
+        self.blanking_distance_m = None
+        self.cells_above_sl = None
+        self.cells_above_sl_bt = None
+        self.sl_lag_effect_m = None
+        
+        # Data computed in this class
+        self.u_earth_no_ref_mps = None
+        self.v_earth_no_ref_mps = None
+        self.u_mps = None
+        self.v_mps = None
+        self.u_processed_mps = None
+        self.v_processed_mps = None
+        self.w_mps = None
+        self.d_mps = None
+        self.invalid_index = None
+        self.num_invalid = []
+        self.valid_data = None
+                                
+        # Settings
+        self.beam_filter = None
+        self.d_filter = None
+        self.d_filter_thresholds = {}
+        self.w_filter = None
+        self.w_filter_thresholds = {}
+        self.excluded_dist_m = None
+        self.orig_excluded_dist_m = None
+        self.smooth_filter = None
+        self.smooth_speed = None
+        self.smooth_upper_limit = None
+        self.smooth_lower_limit = None
+        self.snr_filter = 'Off'
+        self.snr_rng = []
+        self.wt_depth_filter = True
+        self.interpolate_ens = None
+        self.interpolate_cells = None
+        self.coord_sys = None
+        self.nav_ref = None
+        self.sl_cutoff_percent = None
+        self.sl_cutoff_number = None
+        self.sl_cutoff_type = None
+        self.sl_cutoff_m = None
+        self.ping_type = np.array([])
+
+        # Filter settings populated from Measurement.create_filter_composites
+        self.d_meas_thresholds = {}
+        self.w_meas_thresholds = {}
+
+        self.use_measurement_thresholds = False
+
+    def populate_data(self, vel_in, freq_in, coord_sys_in, nav_ref_in, rssi_in, rssi_units_in,
+                      excluded_dist_in, cells_above_sl_in, sl_cutoff_per_in, sl_cutoff_num_in,
+                      sl_cutoff_type_in, sl_lag_effect_in, wm_in, blank_in, corr_in=None,
+                      surface_vel_in=None, surface_rssi_in=None, surface_corr_in=None, sl_cutoff_m=None,
+                      surface_num_cells_in=0, ping_type='U', use_measurement_thresholds=False):
+        
+        """Populates the variables with input, computed, or default values.
+
+        Parameters
+        ----------
+        vel_in: np.array(float)
+            Contains the raw unfiltered velocity data in m/s.
+            Rows 1-4 are beams 1,2,3,4 if beam or u,v,w,d if otherwise.
+        freq_in: np.array(float)
+            Defines ADCP frequency used for velocity measurement.
+        coord_sys_in: str
+            Defines the original raw  velocity coordinate system "Beam", "Inst", "Ship", "Earth".
+        nav_ref_in: str
+            Defines the original raw data navigation reference: "None", "BT", "GGA", "VTG".
+        rssi_in: np.array(float)
+            Returned acoustic signal strength.
+        rssi_units_in: str
+            Units for returned acoustic signal strength: "Counts", "dB", "SNR".
+        excluded_dist_in: float
+            Distance below transducer for which data are excluded or marked invalid.
+        cells_above_sl_in: np.array(bool)
+            Bool array of depth cells above the sidelobe cutoff based on selected depth reference.
+        sl_cutoff_per_in: float
+            Percent cutoff defined by cos(angle).
+        sl_cutoff_num_in: float
+            User specified number of cells to cutoff above sl_cutoff.
+        sl_cutoff_type_in: str
+            Method used to compute cutoff "Percent" or "Number".
+        sl_lag_effect_in: np.array(float)
+            Lag effect for each ensemble, in m.
+        wm_in: str
+            Watermode for TRDI or 'Variable' for SonTek.
+        blank_in: float
+            Blanking distance, in m.
+        corr_in: np.array(float)
+            Correlation values for water track. Optional.
+        surface_vel_in: np.array(float)
+            Surface velocity data for RiverRay, RiverPro, RioPro. Optional.
+        surface_rssi_in: np.array(float)
+            Returned acoust signal strength for RiverRay, RiverPro, RioPro. Optional.
+        surface_corr_in: np.array(float)
+            Surface velocity correlations for RiverRay, RiverPro, RioPro. Optional.
+        surface_num_cells_in: np.array(float)
+            Number of surface cells in each ensemble for RiverRay, RiverPro, RioPro. Optional.
+        sl_cutoff_m: np.array(float)
+            Depth in meters of side lobe cutoff to center of cells.
+        ping_type: np.array(str)
+            Indicates type of ping used for water tracking
+        """
+
+        # Set object properties from input data standard for all ADCPs
+        self.frequency = freq_in
+        self.orig_coord_sys = coord_sys_in
+        self.coord_sys = coord_sys_in
+        self.orig_nav_ref = nav_ref_in
+        self.nav_ref = nav_ref_in
+        self.water_mode = wm_in
+        self.excluded_dist_m = excluded_dist_in
+        self.rssi_units = rssi_units_in
+        max_cells = cells_above_sl_in.shape[0]
+        self.ping_type = np.tile(np.array([ping_type]), (max_cells, 1))
+        self.use_measurement_thresholds = use_measurement_thresholds
+
+        # Set object properties that depend on the presence or absence of surface cells
+        if np.sum(surface_num_cells_in) > 0:
+            surface_num_cells_in[np.isnan(surface_num_cells_in)] = 0
+
+            num_ens = cells_above_sl_in.shape[1]
+            num_reg_cells = vel_in.shape[1]
+            max_surf_cells = max_cells - num_reg_cells
+
+            # Combine surface velocity bins and regular velocity bins into one matrix
+            self.raw_vel_mps = np.tile([np.nan], [4, max_cells, num_ens])
+            self.rssi = np.tile([np.nan], [4, max_cells, num_ens])
+            self.corr = np.tile([np.nan], [4, max_cells, num_ens])
+
+            if max_surf_cells > 0:
+                self.raw_vel_mps[:, :max_surf_cells, :] = surface_vel_in[:, :max_surf_cells, :]
+                self.rssi[:, :max_surf_cells, :] = surface_rssi_in[:, :max_surf_cells, :]
+                self.corr[:, :max_surf_cells, :] = surface_corr_in[:, :max_surf_cells, :]
+
+            for i_ens in range(num_ens):
+                self.raw_vel_mps[:,
+                                 int(surface_num_cells_in[i_ens]):int(surface_num_cells_in[i_ens])
+                                 + num_reg_cells, i_ens] = vel_in[:, :num_reg_cells, i_ens]
+                self.rssi[:,
+                          int(surface_num_cells_in[i_ens]):int(surface_num_cells_in[i_ens])
+                          + num_reg_cells, i_ens] = rssi_in[:, :num_reg_cells, i_ens]
+                self.corr[:,
+                          int(surface_num_cells_in[i_ens]):int(surface_num_cells_in[i_ens])
+                          + num_reg_cells, i_ens] = corr_in[:, :num_reg_cells, i_ens]
+                self.ping_type[:int(surface_num_cells_in[i_ens]), i_ens] = 'S'
+        else:
+            # No surface cells
+            self.raw_vel_mps = vel_in
+            self.rssi = rssi_in
+            if corr_in.any():
+                self.corr = corr_in
+            else:
+                # No correlations input
+                self.corr = np.tile(np.nan, rssi_in.shape)
+
+        #TODO This doesn't seem correct. If raw data in beam coordinates this is not correct.
+        self.u_mps = np.copy(self.raw_vel_mps)[0, :, :]
+        self.v_mps = np.copy(self.raw_vel_mps)[1, :, :]
+        self.w_mps = np.copy(self.raw_vel_mps)[2, :, :]
+        self.d_mps = np.copy(self.raw_vel_mps)[3, :, :]
+
+        self.water_mode = wm_in
+        self.excluded_dist_m = excluded_dist_in
+        self.orig_excluded_dist_m = excluded_dist_in
+
+        # In some rare situations the blank is empty so it is set to the excluded_dist_in
+        try:
+            blank_in = float(blank_in)
+            self.blanking_distance_m = blank_in
+        except ValueError:
+            self.blanking_distance_m = excluded_dist_in
+            
+        self.cells_above_sl = cells_above_sl_in
+        self.cells_above_sl_bt = cells_above_sl_in
+        self.sl_cutoff_percent = sl_cutoff_per_in
+        self.sl_cutoff_number = sl_cutoff_num_in
+        self.sl_cutoff_type = sl_cutoff_type_in
+        self.sl_lag_effect_m = sl_lag_effect_in
+        self.sl_cutoff_m = sl_cutoff_m
+        
+        # Set filter defaults to no filtering and no interruption
+        self.beam_filter = 3
+        self.d_filter = 'Off'
+        self.d_filter_thresholds = {}
+        self.w_filter = 'Off'
+        self.w_filter_thresholds = {}
+        self.smooth_filter = False
+        self.interpolate_ens = 'None'
+        self.interpolate_cells = 'None'
+        
+        # Determine original valid
+
+        # Initialize valid data property
+        self.valid_data = np.tile(self.cells_above_sl, [9, 1, 1])
+        
+        # Find invalid raw data
+        valid_vel = np.tile(self.cells_above_sl, [4, 1, 1])
+        valid_vel[np.isnan(self.raw_vel_mps)] = False
+            
+        # Identify invalid velocity data (less than 3 valid beams)
+        valid_vel_sum = np.sum(valid_vel, axis=0)
+        valid_data2 = np.copy(self.cells_above_sl)
+        valid_data2[valid_vel_sum < 3] = False
+        
+        # Set valid_data property for original data
+        self.valid_data[1, :, :] = valid_data2
+        
+        # Combine all filter data to composite valid data
+        self.all_valid_data()
+        
+        # Estimate the number of cells in invalid ensembles using
+        # Adjacent valid ensembles
+        valid_data_2_sum = np.nansum(self.valid_data[1], 0)
+        self.invalid_index = np.where(valid_data_2_sum == 0)[0]
+        n_invalid = len(self.invalid_index)
+        for n in range(n_invalid):
+            # Find first valid ensemble
+            idx1 = np.where(valid_data_2_sum[:self.invalid_index[n]] > 0)[0]
+            if len(idx1) > 0:
+                idx1 = idx1[0]
+            else:
+                idx1 = self.invalid_index[n]
+                
+            # Find next valid ensemble
+            idx2 = np.where(valid_data_2_sum[:self.invalid_index[n]] > 0)[0]
+            if len(idx2) > 0:
+                idx2 = idx2[-1]
+            else:
+                idx2 = self.invalid_index[n]
+                
+            # Estimate number of cells in invalid ensemble
+            self.num_invalid.append(np.floor((valid_data_2_sum[idx1]+valid_data_2_sum[idx2]) / 2))
+            
+        # Set processed data to non-interpolated valid data
+        self.u_processed_mps = np.copy(self.u_mps)
+        self.v_processed_mps = np.copy(self.v_mps)
+        self.u_processed_mps[self.valid_data[0] == False] = np.nan
+        self.v_processed_mps[self.valid_data[0] == False] = np.nan
+        
+        # Compute SNR range if SNR data is provided
+        if rssi_units_in == 'SNR':
+            self.compute_snr_rng()
+
+    def populate_from_qrev_mat(self, transect):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        transect: mat_struct
+            Matlab data structure obtained from sio.loadmat
+        """
+
+        # Data requiring manipulation (special case for 1 ensemble or 1 cell)
+        if len(transect.wVel.rawVel_mps.shape) == 2:
+            if len(transect.boatVel.btVel.rawVel_mps.shape) > 1:
+                # Multiple ensembles with one cell
+                self.raw_vel_mps = np.moveaxis(transect.wVel.rawVel_mps, 1, 0)
+                self.raw_vel_mps = self.raw_vel_mps.reshape(self.raw_vel_mps.shape[0], 1, self.raw_vel_mps.shape[1])
+                self.corr = np.moveaxis(transect.wVel.corr, 1, 0)
+                self.corr = self.corr.reshape(self.corr.shape[0], 1, self.corr.shape[1])
+                self.rssi = np.moveaxis(transect.wVel.rssi, 1, 0)
+                self.rssi = self.rssi.reshape(self.rssi.shape[0], 1, self.rssi.shape[1])
+                self.valid_data = np.moveaxis(transect.wVel.validData, 1, 0)
+                self.valid_data = self.valid_data.reshape(self.valid_data.shape[0], 1, self.valid_data.shape[1])
+                self.u_earth_no_ref_mps = transect.wVel.uEarthNoRef_mps
+                self.u_earth_no_ref_mps = self.u_earth_no_ref_mps.reshape(1, self.u_earth_no_ref_mps.shape[0])
+                self.v_earth_no_ref_mps = transect.wVel.vEarthNoRef_mps
+                self.v_earth_no_ref_mps = self.v_earth_no_ref_mps.reshape(1, self.v_earth_no_ref_mps.shape[0])
+                self.u_mps = transect.wVel.u_mps
+                self.u_mps = self.u_mps.reshape(1, self.u_mps.shape[0])
+                self.v_mps = transect.wVel.v_mps
+                self.v_mps = self.v_mps.reshape(1, self.v_mps.shape[0])
+                self.u_processed_mps = transect.wVel.uProcessed_mps
+                self.u_processed_mps = self.u_processed_mps.reshape(1, self.u_processed_mps.shape[0])
+                self.v_processed_mps = transect.wVel.vProcessed_mps
+                self.v_processed_mps = self.v_processed_mps.reshape(1, self.v_processed_mps.shape[0])
+                self.w_mps = transect.wVel.w_mps
+                self.w_mps = self.w_mps.reshape(1, self.w_mps.shape[0])
+                self.d_mps = transect.wVel.d_mps
+                self.d_mps = self.d_mps.reshape(1, self.d_mps.shape[0])
+                self.snr_rng = transect.wVel.snrRng
+                self.snr_rng = self.snr_rng.reshape(1, self.snr_rng.shape[0])
+                self.cells_above_sl = transect.wVel.cellsAboveSL.astype(bool)
+                self.cells_above_sl = self.cells_above_sl.reshape(1, self.cells_above_sl.shape[0])
+                self.cells_above_sl_bt = transect.wVel.cellsAboveSLbt.astype(bool)
+                self.cells_above_sl_bt = self.cells_above_sl_bt.reshape(1, self.cells_above_sl_bt.shape[0])
+                self.sl_lag_effect_m = np.array([transect.wVel.slLagEffect_m])
+                # Ping type
+                if hasattr(transect.wVel, 'ping_type'):
+                    if type(transect.wVel.ping_type) == str:
+                        self.ping_type = np.tile(transect.wVel.ping_type, self.d_mps.shape)
+                    else:
+                        self.ping_type = transect.wVel.ping_type[np.newaxis, :]
+                else:
+                    self.ping_type = np.tile('U', self.d_mps.shape)
+            else:
+                # One ensemble with multiple cells
+                self.raw_vel_mps = np.moveaxis(transect.wVel.rawVel_mps, 1, 0)
+                self.raw_vel_mps = self.raw_vel_mps.reshape(self.raw_vel_mps.shape[0], self.raw_vel_mps.shape[1], 1)
+                self.corr = np.moveaxis(transect.wVel.corr, 1, 0)
+                self.corr = self.corr.reshape(self.corr.shape[0], self.corr.shape[1], 1)
+                self.rssi = np.moveaxis(transect.wVel.rssi, 1, 0)
+                self.rssi = self.rssi.reshape(self.rssi.shape[0], self.rssi.shape[1], 1)
+                self.valid_data = np.moveaxis(transect.wVel.validData, 1, 0)
+                self.valid_data = self.valid_data.reshape(self.valid_data.shape[0], self.valid_data.shape[1], 1)
+                self.u_earth_no_ref_mps = transect.wVel.uEarthNoRef_mps
+                self.u_earth_no_ref_mps = self.u_earth_no_ref_mps.reshape(self.u_earth_no_ref_mps.shape[0], 1)
+                self.v_earth_no_ref_mps = transect.wVel.vEarthNoRef_mps
+                self.v_earth_no_ref_mps = self.v_earth_no_ref_mps.reshape(self.v_earth_no_ref_mps.shape[0], 1)
+                self.u_mps = transect.wVel.u_mps
+                self.u_mps = self.u_mps.reshape(self.u_mps.shape[0], 1)
+                self.v_mps = transect.wVel.v_mps
+                self.v_mps = self.v_mps.reshape(self.v_mps.shape[0], 1)
+                self.u_processed_mps = transect.wVel.uProcessed_mps
+                self.u_processed_mps = self.u_processed_mps.reshape(self.u_processed_mps.shape[0], 1)
+                self.v_processed_mps = transect.wVel.vProcessed_mps
+                self.v_processed_mps = self.v_processed_mps.reshape(self.v_processed_mps.shape[0], 1)
+                self.w_mps = transect.wVel.w_mps
+                self.w_mps = self.w_mps.reshape(self.w_mps.shape[0], 1)
+                self.d_mps = transect.wVel.d_mps
+                self.d_mps = self.d_mps.reshape(self.d_mps.shape[0], 1)
+                self.snr_rng = transect.wVel.snrRng
+                self.snr_rng = self.snr_rng.reshape(self.snr_rng.shape[0], 1)
+                self.cells_above_sl = transect.wVel.cellsAboveSL.astype(bool)
+                self.cells_above_sl = self.cells_above_sl.reshape(self.cells_above_sl.shape[0], 1)
+                self.cells_above_sl_bt = transect.wVel.cellsAboveSLbt.astype(bool)
+                self.cells_above_sl_bt = self.cells_above_sl_bt.reshape(self.cells_above_sl_bt.shape[0], 1)
+                self.sl_lag_effect_m = np.array([transect.wVel.slLagEffect_m])
+                # Ping type
+                if hasattr(transect.wVel, 'ping_type'):
+                    if type(transect.wVel.ping_type) == str:
+                        self.ping_type = np.tile(transect.wVel.ping_type, self.d_mps.shape)
+                    else:
+                        self.ping_type = transect.wVel.ping_type[:, np.newaxis]
+                else:
+                    self.ping_type = np.tile('U', self.d_mps.shape)
+
+        else:
+            n_ensembles = transect.wVel.u_mps.shape[1]
+            n_cells = transect.wVel.u_mps.shape[0]
+            if transect.wVel.rawVel_mps.shape[2] != n_ensembles or transect.wVel.rawVel_mps.shape[1] != n_cells:
+                self.raw_vel_mps = np.moveaxis(transect.wVel.rawVel_mps, 2, 0)
+            else:
+                self.raw_vel_mps = transect.wVel.rawVel_mps
+
+            if transect.wVel.corr.shape[2] != n_ensembles or transect.wVel.corr.shape[1] != n_cells:
+                self.corr = np.moveaxis(transect.wVel.corr, 2, 0)
+            else:
+                self.corr = transect.wVel.corr
+
+            if transect.wVel.rssi.shape[2] != n_ensembles or transect.wVel.rssi.shape[1] != n_cells:
+                self.rssi = np.moveaxis(transect.wVel.rssi, 2, 0)
+            else:
+                self.rssi = transect.wVel.rssi
+
+            if transect.wVel.validData.shape[2] != n_ensembles or transect.wVel.validData.shape[1] != n_cells:
+                self.valid_data = np.moveaxis(transect.wVel.validData, 2, 0)
+            else:
+                self.valid_data = transect.wVel.validData
+            self.u_earth_no_ref_mps = transect.wVel.uEarthNoRef_mps
+            self.v_earth_no_ref_mps = transect.wVel.vEarthNoRef_mps
+            self.u_mps = transect.wVel.u_mps
+            self.v_mps = transect.wVel.v_mps
+            self.u_processed_mps = transect.wVel.uProcessed_mps
+            self.v_processed_mps = transect.wVel.vProcessed_mps
+            self.w_mps = transect.wVel.w_mps
+            self.d_mps = transect.wVel.d_mps
+            self.snr_rng = transect.wVel.snrRng
+            self.cells_above_sl = transect.wVel.cellsAboveSL.astype(bool)
+            self.cells_above_sl_bt = transect.wVel.cellsAboveSLbt.astype(bool)
+            self.sl_lag_effect_m = transect.wVel.slLagEffect_m
+            # Ping type
+            if hasattr(transect.wVel, 'ping_type'):
+                if type(transect.wVel.ping_type) == str:
+                    self.ping_type = np.tile(transect.wVel.ping_type, self.d_mps.shape)
+                else:
+                    self.ping_type = transect.wVel.ping_type
+            else:
+                self.ping_type = np.tile('U', self.d_mps.shape)
+
+        self.valid_data = self.valid_data.astype(bool)
+        # Fix for moving-bed transects that did not have 3D array indices adjusted properly when saved
+        # if self.valid_data.shape[0] == self.u_processed_mps.shape[1]:
+        #     self.valid_data = np.moveaxis(self.valid_data, 0, 2)
+        #     self.raw_vel_mps = np.moveaxis(self.raw_vel_mps, 0, 2)
+        #     self.corr = np.moveaxis(self.corr, 0, 2)
+        #     self.rssi = np.moveaxis(self.rssi, 0, 2)
+        self.frequency = transect.wVel.frequency
+        self.orig_coord_sys = transect.wVel.origCoordSys
+        self.orig_nav_ref = transect.wVel.origNavRef
+        self.rssi_units = transect.wVel.rssiUnits
+        self.water_mode = transect.wVel.waterMode
+        self.blanking_distance_m = transect.wVel.blankingDistance_m
+        self.invalid_index = transect.wVel.invalidIndex
+        if type(transect.wVel.numInvalid) is np.ndarray:
+            self.num_invalid = transect.wVel.numInvalid.tolist()
+        else:
+            self.num_invalid = transect.wVel.numInvalid
+
+        # Settings
+        self.beam_filter = transect.wVel.beamFilter
+        self.d_filter = transect.wVel.dFilter
+        self.d_filter_thresholds = self.struct_to_dict(transect.wVel.dFilterThreshold)
+        self.w_filter = transect.wVel.wFilter
+        self.w_filter_thresholds = self.struct_to_dict(transect.wVel.wFilterThreshold)
+        self.excluded_dist_m = transect.wVel.excludedDist
+        if hasattr(transect.wVel, 'orig_excludedDist'):
+            self.orig_excluded_dist_m = transect.wVel.orig_excludedDist
+        else:
+            self.orig_excluded_dist_m = transect.wVel.excludedDist
+        self.smooth_filter = transect.wVel.smoothFilter
+        self.smooth_speed = transect.wVel.smoothSpeed
+        self.smooth_upper_limit = transect.wVel.smoothUpperLimit
+        self.smooth_lower_limit = transect.wVel.smoothLowerLimit
+        self.snr_filter = transect.wVel.snrFilter
+        self.wt_depth_filter = transect.wVel.wtDepthFilter
+        self.interpolate_ens = transect.wVel.interpolateEns
+        self.interpolate_cells = transect.wVel.interpolateCells
+        self.coord_sys = transect.wVel.coordSys
+        self.nav_ref = transect.wVel.navRef
+        self.sl_cutoff_percent = transect.wVel.slCutoffPer
+        self.sl_cutoff_number = transect.wVel.slCutoffNum
+        self.sl_cutoff_type = transect.wVel.slCutoffType
+
+        # Use measurement for filter
+        if hasattr(transect.wVel, 'use_measurement_thresholds'):
+            self.use_measurement_thresholds = self.struct_to_dict(transect.wVel.use_measurement_thresholds)
+            self.d_meas_thresholds = self.struct_to_dict(transect.wVel.d_meas_thresholds)
+            self.w_meas_thresholds = self.struct_to_dict(transect.wVel.w_meas_thresholds)
+        else:
+            self.use_measurement_thresholds = False
+            self.d_meas_thresholds = {}
+            self.w_meas_thresholds = {}
+
+    @staticmethod
+    def struct_to_dict(struct):
+        """If input is a mat structure it converts it into a dictionary.
+
+        Parameters
+        ----------
+        struct: mat.struct or other
+            Data to be converted
+
+        Returns
+        -------
+        result: dict or other
+            Result of conversion
+        """
+
+        try:
+            keys = struct._fieldnames
+            result = {}
+            for key in keys:
+                result[key] = struct.__dict__[key]
+        except AttributeError:
+            result = struct
+        return result
+
+    def change_coord_sys(self, new_coord_sys, sensors, adcp):
+        """This function allows the coordinate system to be changed.
+
+        Current implementation is only to allow a change to a higher order
+        coordinate system Beam - Inst - Ship - Earth
+
+        Parameters
+        ----------
+        new_coord_sys: str
+            New coordinate system (Beam, Inst, Ship, Earth)
+        sensors: Sensors
+            Object of Sensors
+        adcp: InstrumentData
+            Object of instrument data
+        """
+        if type(self.orig_coord_sys) is list or type(self.orig_coord_sys) is np.ndarray:
+            o_coord_sys = self.orig_coord_sys[0].strip()
+        else:
+            o_coord_sys = self.orig_coord_sys.strip()
+
+        orig_sys = None
+        new_sys = None
+
+        if o_coord_sys != new_coord_sys:
+            
+            # Assign the transformation matrix and retrieve the sensor data
+            t_matrix = copy.deepcopy(adcp.t_matrix.matrix)
+            t_matrix_freq = copy.deepcopy(adcp.frequency_khz)
+
+            p = getattr(sensors.pitch_deg, sensors.pitch_deg.selected).data
+            r = getattr(sensors.roll_deg, sensors.roll_deg.selected).data
+            h = getattr(sensors.heading_deg, sensors.heading_deg.selected).data
+            
+            # Modify the transformation matrix and heading, pitch
+            # and roll values based on the original coordinate
+            # system so that only the needed values ar used in
+            # computing the new coordinate system.
+            if o_coord_sys.strip() == 'Beam':
+                orig_sys = 1
+            elif o_coord_sys.strip() == 'Inst':
+                orig_sys = 2
+            elif o_coord_sys.strip() == 'Ship':
+                orig_sys = 3
+                p = np.zeros(h.shape)
+                r = np.zeros(h.shape)
+                t_matrix = np.eye(len(t_matrix))
+            elif o_coord_sys.strip() == 'Earth':
+                orig_sys = 4
+
+            # Assign a value to the new coordinate system
+            if new_coord_sys.strip() == 'Beam':
+                new_sys = 1
+            elif new_coord_sys.strip() == 'Inst':
+                new_sys = 2
+            elif new_coord_sys.strip() == 'Ship':
+                new_sys = 3
+            elif new_coord_sys.strip() == 'Earth':
+                new_sys = 4
+                
+            # Check to ensure the new coordinate system is a higher order than the original system
+            if new_sys - orig_sys > 0:
+                
+                # Compute trig function for heaing, pitch and roll
+                ch = np.cos(np.deg2rad(h))
+                sh = np.sin(np.deg2rad(h))
+                cp = np.cos(np.deg2rad(p))
+                sp = np.sin(np.deg2rad(p))
+                cr = np.cos(np.deg2rad(r))
+                sr = np.sin(np.deg2rad(r))
+
+                n_ens = self.raw_vel_mps.shape[2]
+                
+                for ii in range(n_ens):
+                    
+                    # Compute matrix for heading, pitch, and roll
+                    hpr_matrix = np.array([[((ch[ii] * cr[ii]) + (sh[ii]*sp[ii] * sr[ii])),
+                                            (sh[ii] * cp[ii]),
+                                            ((ch[ii] * sr[ii]) - sh[ii]*sp[ii] * cr[ii])],
+                                           [(-1 * sh[ii] * cr[ii]) + (ch[ii] * sp[ii] * sr[ii]),
+                                            ch[ii] * cp[ii],
+                                            (-1 * sh[ii] * sr[ii])-(ch[ii] * sp[ii] * cr[ii])],
+                                           [(-1.*cp[ii] * sr[ii]),
+                                            sp[ii],
+                                            cp[ii] * cr[ii]]])
+                    
+                    # Transform beam coordinates
+                    if o_coord_sys == 'Beam':
+                        
+                        # Determine frequency index for transformation
+                        if len(t_matrix.shape) > 2:
+                            idx_freq = np.where(t_matrix_freq == self.frequency[ii])
+                            t_mult = np.copy(t_matrix[:, :, idx_freq])
+                        else:
+                            t_mult = np.copy(t_matrix)
+                            
+                        # Get velocity data
+                        vel_beams = np.copy(self.raw_vel_mps[:, :, ii])
+                        
+                        # Apply transformation matrix for 4 beam solutions
+                        temp_t = t_mult.dot(vel_beams)
+                        
+                        # Apply hpr_matrix
+                        temp_thpr = hpr_matrix.dot(temp_t[:3])
+                        temp_thpr = np.vstack([temp_thpr, temp_t[3]])
+                        
+                        # Check for invalid beams
+                        invalid_idx = np.isnan(vel_beams)
+                        
+                        # Identify rows requiring 3 beam solutions
+                        n_invalid_col = np.sum(invalid_idx, axis=0)
+                        col_idx = np.where(n_invalid_col == 1)[0]
+                        
+                        # Compute 3 beam solution, if necessary
+                        if len(col_idx) > 0:
+                            for i3 in range(len(col_idx)):
+                                
+                                # Id invalid beam
+                                vel_3_beam = vel_beams[:, col_idx[i3]]
+                                idx_3_beam = np.where(np.isnan(vel_3_beam))[0]
+                        
+                                # 3 beam solution for non-RiverRay
+                                vel_3_beam_zero = vel_3_beam
+                                vel_3_beam_zero[np.isnan(vel_3_beam)] = 0
+                                vel_error = t_mult[3, :].dot(vel_3_beam_zero)
+                                vel_3_beam[idx_3_beam] = -1 * vel_error / t_mult[3, idx_3_beam]
+                                temp_t = t_mult.dot(vel_3_beam)
+                                
+                                # Apply transformation matrix for 3
+                                # beam solutions
+                                temp_thpr[0:3, col_idx[i3]] = hpr_matrix.dot(temp_t[:3])
+                                temp_thpr[3, col_idx[i3]] = np.nan
+                            
+                    else:
+                        # Get velocity data
+                        vel_raw = np.copy(np.squeeze(self.raw_vel_mps[:, :, ii]))
+                        temp_thpr = np.array(hpr_matrix).dot(vel_raw[:3, :])
+                        temp_thpr = np.vstack([temp_thpr, vel_raw[3, :]])
+                        
+                    # Update object
+                    temp_thpr = temp_thpr.T
+                    self.u_mps[:, ii] = temp_thpr[:, 0]
+                    self.v_mps[:, ii] = temp_thpr[:, 1]
+                    self.w_mps[:, ii] = temp_thpr[:, 2]
+                    self.d_mps[:, ii] = temp_thpr[:, 3]
+
+                # Because of padded arrays with zeros and RR has a variable number of bins,
+                # the raw data may be padded with zeros.  The next 4 statements changes
+                # those to nan
+                self.u_mps[self.u_mps == 0] = np.nan
+                self.v_mps[self.v_mps == 0] = np.nan
+                self.w_mps[self.w_mps == 0] = np.nan
+                self.d_mps[self.d_mps == 0] = np.nan
+                
+                # Assign processed object properties
+                self.u_processed_mps = np.copy(self.u_mps)
+                self.v_processed_mps = np.copy(self.v_mps)
+                
+                # Assign coordinate system and reference properties
+                self.coord_sys = new_coord_sys
+                self.nav_ref = self.orig_nav_ref
+                    
+            else:
+                
+                # Reset velocity properties to raw values
+                self.u_mps = np.copy(self.raw_vel_mps[0])
+                self.v_mps = np.copy(self.raw_vel_mps[1])
+                self.w_mps = np.copy(self.raw_vel_mps[2])
+                self.d_mps = np.copy(self.raw_vel_mps[3])
+                
+                if adcp.manufacturer == 'TRDI':
+                    self.u_mps[self.u_mps == 0] = np.nan
+                    self.v_mps[self.v_mps == 0] = np.nan
+                    self.w_mps[self.w_mps == 0] = np.nan
+                    self.d_mps[self.d_mps == 0] = np.nan
+                    
+                # Assign processed properties
+                self.u_processed_mps = np.copy(self.u_mps)
+                self.v_processed_mps = np.copy(self.v_mps)
+                
+        else:
+            
+            # Reset velocity properties to raw values
+            self.u_mps = np.copy(self.raw_vel_mps[0])
+            self.v_mps = np.copy(self.raw_vel_mps[1])
+            self.w_mps = np.copy(self.raw_vel_mps[2])
+            self.d_mps = np.copy(self.raw_vel_mps[3])
+            
+            if adcp.manufacturer == 'TRDI':
+                self.u_mps[self.u_mps == 0] = np.nan
+                self.v_mps[self.v_mps == 0] = np.nan
+                self.w_mps[self.w_mps == 0] = np.nan
+                self.d_mps[self.d_mps == 0] = np.nan
+                
+            # Assign processed properties
+            self.u_processed_mps = np.copy(self.u_mps)
+            self.v_processed_mps = np.copy(self.v_mps)
+            
+        if new_coord_sys == 'Earth':
+            self.u_earth_no_ref_mps = np.copy(self.u_mps)
+            self.v_earth_no_ref_mps = np.copy(self.v_mps)
+                
+    def set_nav_reference(self, boat_vel):           
+        """This function sets the navigation reference.
+
+        The current reference is first removed from the velocity and then the
+        selected reference is applied.
+
+        Parameters
+        ----------
+        boat_vel: BoatStructure
+            Object of BoatStructure
+        """
+        
+        # Apply selected navigation reference
+        boat_select = getattr(boat_vel, boat_vel.selected)
+        if boat_select is not None:
+            self.u_mps = np.add(self.u_earth_no_ref_mps, boat_select.u_processed_mps)
+            self.v_mps = np.add(self.v_earth_no_ref_mps, boat_select.v_processed_mps)
+            self.nav_ref = boat_select.nav_ref
+        else:
+            self.u_mps = repmat([np.nan],
+                                self.u_earth_no_ref_mps.shape[0],
+                                self.u_earth_no_ref_mps.shape[1])
+            self.v_mps = repmat([np.nan],
+                                self.v_earth_no_ref_mps.shape[0],
+                                self.v_earth_no_ref_mps.shape[1])
+            if boat_vel.selected == 'bt_vel':
+                self.nav_ref = 'BT'
+            elif boat_vel.selected == 'gga_vel':
+                self.nav_ref = 'GGA'
+            elif boat_vel.selected == 'vtg_vel':
+                self.nav_ref = 'VTG'
+        
+        valid_data2 = np.copy(self.cells_above_sl)
+        valid_data2[np.isnan(self.u_mps)] = False
+        self.valid_data[1] = valid_data2
+        
+        # Duplicate original to other filters that have yet to be applied
+        self.valid_data[2:] = np.tile(self.valid_data[1], [7, 1, 1])
+        
+        # Combine all filter data and update processed properties
+        self.all_valid_data()
+        
+    def change_heading(self, boat_vel, heading_chng):
+        """Adjusts the velocity vectors for a change in heading due change in
+        magnetic variation or heading offset.
+
+        Parameters
+        ----------
+        boat_vel: BoatData
+            Object of BoatData
+        heading_chng: float
+            Heading change due to change in magvar or offset, in degrees.
+        """
+        u_nr = self.u_earth_no_ref_mps
+        v_nr = self.v_earth_no_ref_mps
+        direction, mag = cart2pol(u_nr, v_nr)
+        u_nr_rotated, v_nr_rotated = pol2cart(direction - np.deg2rad(heading_chng), mag)
+        self.u_earth_no_ref_mps = u_nr_rotated
+        self.v_earth_no_ref_mps = v_nr_rotated
+
+        # Reprocess water data to get navigation reference corrected velocities
+        self.set_nav_reference(boat_vel)
+        
+    def change_heading_source(self, boat_vel, heading):
+        """Applies changes to water velocity when the heading source is changed.
+
+        Typically called when the heading source is changed between external and internal.
+
+        Parameters
+        ----------
+        boat_vel: BoatData
+            Object of BoatData
+        heading: np.array(float)
+            New heading data, in degrees
+        """
+        u_nr = self.u_earth_no_ref_mps
+        v_nr = self.v_earth_no_ref_mps
+        direction, mag = cart2pol(u_nr, v_nr)
+        u_nr_rotated, v_nr_rotated = pol2cart(direction
+                                              - np.deg2rad(repmat(heading, len(mag), 1)), mag)
+        self.u_earth_no_ref_mps = u_nr_rotated
+        self.v_earth_no_ref_mps = v_nr_rotated
+
+        self.set_nav_reference(boat_vel)
+            
+    def apply_interpolation(self, transect, ens_interp='None', cells_interp='None'):
+        """Coordinates the application of water velocity interpolation.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        ens_interp: str
+            Specifies type of interpolation for ensembles
+        cells_interp: str
+            Specifies type of interpolation for cells
+        """
+
+        self.u_processed_mps = np.tile([np.nan], self.u_mps.shape)
+        self.v_processed_mps = np.tile([np.nan], self.v_mps.shape)
+        self.u_processed_mps[self.valid_data[0]] = self.u_mps[self.valid_data[0]]
+        self.v_processed_mps[self.valid_data[0]] = self.v_mps[self.valid_data[0]]
+        
+        # Determine interpolation methods to apply
+        if ens_interp == 'None':
+            ens_interp = self.interpolate_ens
+        else:
+            self.interpolate_ens = ens_interp
+
+        if cells_interp == 'None':
+            cells_interp = self.interpolate_cells
+        else:
+            self.interpolate_cells = cells_interp
+
+        if ens_interp == 'abba' or cells_interp == 'abba':
+            self.interpolate_ens = 'abba'
+            self.interpolate_cells = 'abba'
+            self.interpolate_abba(transect)
+        else:
+            if ens_interp == 'None':
+                # Sets invalid data to nan with no interpolation
+                self.interpolate_ens_none()
+            elif ens_interp == 'ExpandedT':
+                # Sets interpolate to None as the interpolation is done in class QComp
+                self.interpolate_ens_next()
+            elif ens_interp == 'Hold9':
+                # Interpolates using SonTek's method of holding last valid for up to 9 samples
+                self.interpolate_ens_hold_last_9()
+            elif ens_interp == 'Hold':
+                # Interpolates by holding last valid indefinitely
+                self.interpolate_ens_hold_last()
+            elif ens_interp == 'Linear':
+                # Interpolates using linear interpolation
+                self.interpolate_ens_linear(transect)
+            elif ens_interp == 'TRDI':
+                # TRDI is applied in discharge
+                self.interpolate_ens_none()
+                self.interpolate_ens = ens_interp
+
+            # Apply specified cell interpolation method
+            if cells_interp == 'None':
+                # Sets invalid data to nan with no interpolation
+                self.interpolate_cells_none()
+            elif cells_interp == 'TRDI':
+                # Use TRDI method to interpolate invalid interior cells
+                self.interpolate_cells_trdi(transect)
+            elif cells_interp == 'Linear':
+                # Uses linear interpolation to interpolate velocity for all
+                # invalid bins including those in invalid ensembles
+                # up to 9 samples
+                self.interpolate_cells_linear(transect)
+        
+    def apply_filter(self, transect, beam=None, difference=None, difference_threshold=None, vertical=None,
+                     vertical_threshold=None, other=None, excluded=None, snr=None, wt_depth=None):
+        """Coordinates application of specified filters and subsequent interpolation.
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        beam: int
+            Setting for beam filter (3, 4, or -1)
+        difference: str
+            Setting for difference filter (Auto, Off, Manual)
+        difference_threshold: float
+            Threshold value for Manual setting.
+        vertical: str
+            Setting for vertical filter (Auto, Off, Manual)
+        vertical_threshold: float
+            Threshold value for Manual setting.
+        other:
+            Setting for other filters (Off, Auto)
+        excluded:
+            Excluded distance below the transducer, in m
+        snr: str
+            SNR filter setting (Auto, Off)
+        wt_depth: bool
+            Setting for marking water data invalid if no available depth
+        """
+
+        # Determine filters to apply
+        if len({beam, difference, difference_threshold, vertical, vertical_threshold, other, excluded, snr,
+                wt_depth}) > 1:
+
+            if difference is not None:
+                if difference == 'Manual':
+                    self.filter_diff_vel(setting=difference, threshold=difference_threshold)
+                else:
+                    self.filter_diff_vel(setting=difference)
+            if vertical is not None:
+                if vertical == 'Manual':
+                    self.filter_vert_vel(setting=vertical, threshold=vertical_threshold)
+                else:
+                    self.filter_vert_vel(setting=vertical)
+            if other is not None:
+                self.filter_smooth(transect=transect, setting=other)
+            if excluded is not None:
+                self.filter_excluded(transect=transect, setting=excluded)
+            if snr is not None:
+                self.filter_snr(setting=snr)
+            if wt_depth is not None:
+                self.filter_wt_depth(transect=transect, setting=wt_depth)
+            if beam is not None:
+                self.filter_beam(setting=beam, transect=transect)
+        else:
+            self.filter_diff_vel(setting=self.d_filter, threshold=self.d_filter_thresholds)
+            self.filter_vert_vel(setting=self.w_filter, threshold=self.w_filter_thresholds)
+            self.filter_smooth(transect=transect, setting=self.smooth_filter)
+            self.filter_excluded(transect=transect, setting=self.excluded_dist_m)
+            self.filter_snr(setting=self.snr_filter)
+            self.filter_beam(setting=self.beam_filter, transect=transect)
+
+        # After filters have been applied, interpolate to estimate values for invalid data.
+        # self.apply_interpolation(transect=transect)
+        
+    def sos_correction(self, ratio):
+        """Corrects water velocities for a change in speed of sound.
+
+        Parameters
+        ----------
+        ratio: float
+            Ratio of new speed of sound to old speed of sound
+        """
+
+        # Correct water velocities
+        self.u_mps = self.u_mps * ratio
+        self.v_mps = self.v_mps * ratio
+        self.w_mps = self.w_mps * ratio
+        self.u_earth_no_ref_mps = self.u_earth_no_ref_mps * ratio
+        self.v_earth_no_ref_mps = self.v_earth_no_ref_mps * ratio
+
+    def adjust_side_lobe(self, transect):
+        """Adjust the side lobe cutoff for vertical beam and interpolated depths.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        """
+
+        selected = transect.depths.selected
+        depth_selected = getattr(transect.depths, transect.depths.selected)
+        cells_above_slbt = np.copy(self.cells_above_sl_bt)
+        
+        # Compute cutoff for vertical beam depths
+        if selected == 'vb_depths':
+            sl_cutoff_vb = (depth_selected.depth_processed_m - depth_selected.draft_use_m) \
+                 * np.cos(np.deg2rad(transect.adcp.beam_angle_deg)) \
+                - self.sl_lag_effect_m + depth_selected.draft_use_m
+            cells_above_slvb = np.round(depth_selected.depth_cell_depth_m, 2) < np.round(sl_cutoff_vb, 2)
+            idx = np.where(transect.depths.bt_depths.valid_data == False)
+            cells_above_slbt[:, idx] = cells_above_slvb[:, idx]
+            cells_above_sl = np.logical_and(cells_above_slbt, cells_above_slvb)
+        else:
+            cells_above_sl = cells_above_slbt
+
+        # Compute cutoff from interpolated depths
+        n_valid_beams = np.nansum(depth_selected.valid_beams, 0)
+
+        # Find ensembles with no valid beam depths
+        idx = np.where(n_valid_beams == 0)[0]
+
+        # Determine side lobe cutoff for ensembles with no valid beam depths
+        if len(idx) > 0:
+            if len(self.sl_lag_effect_m) > 1:
+                sl_lag_effect_m = self.sl_lag_effect_m[idx]
+            else:
+                sl_lag_effect_m = self.sl_lag_effect_m
+                
+            sl_cutoff_int = (depth_selected.depth_processed_m[idx] - depth_selected.draft_use_m) \
+                * np.cos(np.deg2rad(transect.adcp.beam_angle_deg)) - sl_lag_effect_m + \
+                depth_selected.draft_use_m
+            for i in range(len(idx)):
+                cells_above_sl[:, idx[i]] = np.less(depth_selected.depth_cell_depth_m[:, idx[i]], sl_cutoff_int[i])
+            
+        # Find ensembles with at least 1 invalid beam depth
+
+        idx = np.where(np.logical_and(n_valid_beams < 4, n_valid_beams > 0))[0]
+        if len(idx) > 0:
+            if len(self.sl_lag_effect_m) > 1:
+                sl_lag_effect_m = self.sl_lag_effect_m[idx]
+            else:
+                sl_lag_effect_m = self.sl_lag_effect_m
+                
+            sl_cutoff_int = (depth_selected.depth_processed_m[idx] - depth_selected.draft_use_m)\
+                * np.cos(np.deg2rad(transect.adcp.beam_angle_deg)) \
+                - sl_lag_effect_m + depth_selected.draft_use_m
+            cells_above_sl_int = np.tile(True, cells_above_sl.shape)
+
+            for i in range(len(idx)):
+                cells_above_sl_int[:, idx[i]] = np.less(depth_selected.depth_cell_depth_m[:, idx[i]], sl_cutoff_int[i])
+            
+            cells_above_sl[cells_above_sl_int == 0] = 0
+        
+        self.cells_above_sl = np.copy(cells_above_sl)
+        valid_vel = np.logical_not(np.isnan(self.u_mps))
+        self.valid_data[1, :, :] = self.cells_above_sl * valid_vel
+        self.all_valid_data()
+        self.compute_snr_rng()
+        self.apply_filter(transect)
+        # self.apply_interpolation(transect)
+
+    def all_valid_data(self):
+        """Combines the results of all filters to determine a final set of valid data"""
+
+        n_filters = len(self.valid_data[1:, 0, 0])
+        sum_filters = np.nansum(self.valid_data[1:, :, :], 0) / n_filters
+        valid = np.tile([True], self.cells_above_sl.shape)
+        valid[sum_filters < 1] = False
+        self.valid_data[0] = valid
+        
+    def filter_beam(self, setting, transect=None):
+        """Applies beam filter to water velocity data.
+
+        The determination of invalid data depends on whether
+        3-beam or 4-beam solutions are acceptable.  This function can be applied by
+        specifying 3 or 4 beam solutions and setting self.beam_filter to -1
+        which will trigger an automatic mode.  The automatic mode will find all 3 beam
+        solutions and them compare the velocity of the 3 beam solutions to nearest 4
+        beam solutions.  If the 3 beam solution is within 50% of the average of the
+        neighboring 4 beam solutions the data are deemed valid, if not they are marked
+        invalid.  Thus in automatic mode only those data from 3 beam solutions
+        that are sufficiently different from  the 4 beam solutions are marked invalid.
+        If the number of beams is specified manually, it is applied
+        uniformly for the whole transect.
+
+        Parameters
+        ----------
+        setting: int
+            Setting for beam filter (3, 4, or -1)
+        transect: TransectData
+            Object of TransectData
+        """
+        
+        self.beam_filter = setting
+        
+        # In manual mode (3 or 4) determine number of raw invalid and number of 2 beam solutions
+        if self.beam_filter > 0:
+            
+            # Find invalid raw data
+            valid_vel = np.array([self.cells_above_sl] * 4)
+            valid_vel[np.isnan(self.raw_vel_mps)] = 0
+            
+            # Determine how many beams or transformed coordinates are valid
+            valid_vel_sum = np.sum(valid_vel, 0)
+            valid = copy.deepcopy(self.cells_above_sl)
+            
+            # Compare number of valid beams or velocity coordinates to filter value
+            valid[np.logical_and((valid_vel_sum < self.beam_filter), (valid_vel_sum > 2))] = False
+            
+            # Save logical of valid data to object
+            self.valid_data[5, :, :] = valid
+
+            # Combine all filter data and update processed properties
+            self.all_valid_data()
+
+        else:
+
+            # Apply automatic filter
+            self.automatic_beam_filter_abba_interpolation(transect)
+
+    def automatic_beam_filter_abba_interpolation(self, transect):
+        """Applies abba interpolation to allow comparison of interpolated and 3-beam solutions.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        """
+
+        # Create array indicating which cells do not have 4-beam solutions and all cells below side lobe are nan
+        temp = copy.deepcopy(self)
+        temp.filter_beam(4)
+        valid_bool = temp.valid_data[5, :, :]
+        valid = valid_bool.astype(float)
+        valid[temp.cells_above_sl == False] = np.nan
+
+        # Initialize processed velocity data variables
+        temp.u_processed_mps = copy.deepcopy(temp.u_mps)
+        temp.v_processed_mps = copy.deepcopy(temp.v_mps)
+
+        # Set invalid data to nan in processed velocity data variables
+        temp.u_processed_mps[np.logical_not(valid)] = np.nan
+        temp.v_processed_mps[np.logical_not(valid)] = np.nan
+
+        # Find indices of cells with 3 beams solutions
+        rows_3b, cols_3b = np.where(np.abs(valid) == 0)
+
+        # Check for presence of 3-beam solutions
+        if len(rows_3b) > 0:
+            # Initialize velocity data variables
+            u = copy.deepcopy(self.u_mps)
+            v = copy.deepcopy(self.v_mps)
+
+            u = u[:, transect.in_transect_idx]
+            v = v[:, transect.in_transect_idx]
+
+            u[np.logical_not(temp.valid_data[5, :, :])] = np.nan
+            v[np.logical_not(temp.valid_data[5, :, :])] = np.nan
+            interpolated_data = self.compute_abba_interpolation(wt_data=temp,
+                                                                data_list=[u, v],
+                                                                valid=temp.valid_data[5, :, :],
+                                                                transect=transect)
+
+            if interpolated_data is not None:
+                # Compute interpolated to measured ratios and apply filter criteria
+                for n in range(len(interpolated_data[0])):
+                    u_ratio = (temp.u_mps[interpolated_data[0][n][0]] / interpolated_data[0][n][1]) - 1
+                    v_ratio = (temp.v_mps[interpolated_data[1][n][0]] / interpolated_data[1][n][1]) - 1
+                    if np.abs(u_ratio) < 0.5 and np.abs(v_ratio) < 0.5:
+                        valid_bool[interpolated_data[0][n][0]] = True
+                    else:
+                        valid_bool[interpolated_data[0][n][0]] = False
+                    # n += 1
+
+                # Update object with filter results
+                self.valid_data[5, :, :] = valid_bool
+            else:
+                self.valid_data[5, :, :] = temp.valid_data[5, :, :]
+        else:
+            self.valid_data[5, :, :] = temp.valid_data[5, :, :]
+
+        # Combine all filter data and update processed properties
+        self.all_valid_data()
+
+    def filter_diff_vel(self, setting, threshold=None):
+        """Applies filter to difference velocity.
+
+        Applies either manual or automatic filtering of the difference (error)
+        velocity.  The automatic mode is based on the following:  This filter is
+        based on the assumption that the water error velocity should follow a gaussian
+        distribution.  Therefore, 5 standard deviations should encompass all of the
+        valid data.  The standard deviation and limits (multiplier*std dev) are computed
+        in an iterative process until filtering out additional data does not change the
+        computed standard deviation.
+
+        Parameters
+        ----------
+        setting: str
+            Filter setting (Auto, Off, Manual)
+        threshold: float
+            Threshold value for Manual setting.
+        """
+
+        # Set difference filter properties
+        self.d_filter = setting
+        if threshold is not None:
+            self.d_filter_thresholds = threshold
+
+        # Get difference data from object
+        d_vel = copy.deepcopy(self.d_mps)
+
+        # NOTE: Versions prior to 1.01 did not apply this step to remove data below the side lobe cutoff
+        d_vel[np.logical_not(self.cells_above_sl)] = np.nan
+
+        d_vel_min_ref = None
+        d_vel_max_ref = None
+
+        bad_idx_rows = np.array([]).astype(int)
+        bad_idx_cols = np.array([]).astype(int)
+
+        # Apply selected method
+        if self.d_filter == 'Manual':
+            d_vel_max_ref = np.abs(self.d_filter_thresholds)
+            d_vel_min_ref = -1 * d_vel_max_ref
+            # Set valid data row 2 for difference velocity filter results
+            bad_idx_rows, bad_idx_cols = np.where(np.logical_or(nan_greater(d_vel, d_vel_max_ref),
+                                                                nan_less(d_vel, d_vel_min_ref)))
+        elif self.d_filter == 'Off':
+            d_vel_max_ref = np.nanmax(np.nanmax(d_vel)) + 1
+            d_vel_min_ref = np.nanmin(np.nanmin(d_vel)) - 1
+            # Set valid data row 2 for difference velocity filter results
+            bad_idx_rows, bad_idx_cols = np.where(np.logical_or(nan_greater(d_vel, d_vel_max_ref),
+                                                                nan_less(d_vel, d_vel_min_ref)))
+            self.d_filter_thresholds = d_vel_max_ref
+
+        elif self.d_filter == 'Auto':
+            # Apply threshold from entire measurement processing to each transect
+            if self.use_measurement_thresholds:
+                self.d_filter_thresholds = self.d_meas_thresholds
+                for p_type in self.d_meas_thresholds.keys():
+                    data_max_ref = self.d_meas_thresholds[p_type][0]
+                    data_min_ref = self.d_meas_thresholds[p_type][1]
+                    data = np.copy(self.d_mps)
+                    data[self.ping_type!=p_type] = np.nan
+                    idx_invalid_rows, idx_invalid_cols = np.where(np.logical_or(np.greater(data, data_max_ref),
+                                                                                np.less(data, data_min_ref)))
+                    if len(idx_invalid_rows) > 0:
+                        if len(bad_idx_rows) > 0:
+                            bad_idx_rows = np.hstack((bad_idx_rows, idx_invalid_rows))
+                            bad_idx_cols = np.hstack((bad_idx_cols, idx_invalid_cols))
+                        else:
+                            bad_idx_rows = idx_invalid_rows
+                            bad_idx_cols = idx_invalid_cols
+
+            # Compute unique threshold for each transect using ping types
+            elif self.ping_type.size > 1:
+
+                # Identify the ping types used in the transect
+                p_types = np.unique(self.ping_type)
+
+                thresholds = {}
+
+                # Apply the filter to each ping type
+                for p_type in p_types:
+                    # Copy of difference velocities
+                    vel_2_filter = copy.deepcopy(d_vel)
+                    # Remove data not associated with the specified ping type
+                    vel_2_filter[self.ping_type!=p_type] = np.nan
+                    # Apply filter to data of a single ping type
+                    idx_invalid_rows, idx_invalid_cols, threshold = self.iqr_filter(vel_2_filter)
+                    # Combine indices of invalid data for all ping types
+                    if len(idx_invalid_rows) > 0:
+                        if len(bad_idx_rows) > 0:
+                            bad_idx_rows = np.hstack((bad_idx_rows, idx_invalid_rows))
+                            bad_idx_cols = np.hstack((bad_idx_cols, idx_invalid_cols))
+                        else:
+                            bad_idx_rows = idx_invalid_rows
+                            bad_idx_cols = idx_invalid_cols
+                    thresholds[p_type] = threshold
+                self.d_filter_thresholds = thresholds
+
+            # Compute unique threshold for each transect when no ping types are available
+            else:
+                self.ping_type = np.array(['U'])
+                bad_idx_rows, bad_idx_cols, threshold = self.iqr_filter(d_vel)
+                self.d_filter_thresholds = {'U': threshold}
+
+        valid = copy.deepcopy(self.cells_above_sl)
+        if len(bad_idx_rows) > 0:
+            valid[bad_idx_rows, bad_idx_cols] = False
+        # TODO Seems like if the difference velocity doesn't exist due to a 3-beam solution it shouldn't be
+        #  flagged as invalid however this is the way it was in Matlab. May change this in future.
+        # valid[np.isnan(self.d_mps)] = True
+        self.valid_data[2, :, :] = valid
+
+        # Combine all filter data and update processed properties
+        self.all_valid_data()
+
+    @staticmethod
+    def iqr_filter(data, multiplier=5):
+        """Apply the iqr filter to wt data.
+
+        Parameters
+        ----------
+        data: np.ndarray(float)
+            Array of difference or vertical velocity data
+        multiplier: int
+            Number of IQR's to use to set the threshold
+
+        Returns
+        -------
+        bad_idx_rows: np.ndarray(int)
+            Row indices of invalid data
+        bad_idx_cols: np.ndarray(int)
+            Column indices of invalid data
+        threshold: float
+            Maximum threshold
+        """
+
+        data_max_ref = np.nan
+        data_min_ref = np.nan
+
+        # Check to make sure there are data to process
+        if data.size > 0 and np.any(np.logical_not(np.isnan(data))):
+
+            # Initialize variables
+            data_orig = np.copy(data)
+
+            iqr_diff = 1
+            i = -1
+            # Loop until no additional data are removed
+            while iqr_diff != 0 and i < 1000:
+                i = i + 1
+
+                # Compute standard deviation
+                data_iqr = iqr(data)
+
+                # Compute maximum and minimum thresholds
+                data_max_ref = np.nanmedian(data) + multiplier * data_iqr
+                data_min_ref = np.nanmedian(data) - multiplier * data_iqr
+
+                # Identify valid and invalid data
+                data_bad_rows, data_bad_cols = np.where(np.logical_or(nan_greater(data, data_max_ref),
+                                                                      nan_less(data, data_min_ref)))
+                # Update filtered data array
+                data[data_bad_rows, data_bad_cols] = np.nan
+
+                # Determine differences due to last filter iteration
+                if len(data) > 0:
+                    data_iqr2 = iqr(data)
+                    iqr_diff = data_iqr2 - data_iqr
+                else:
+                    iqr_diff = 0
+
+            # Determine row and column index of invalid cells with invalid data
+            bad_idx_rows, bad_idx_cols = np.where(np.logical_or(nan_greater(data_orig, data_max_ref),
+                                                                nan_less(data_orig, data_min_ref)))
+        else:
+            # All data are invalid
+            # Determine row and column index of invalid cells with invalid data
+            bad_idx_rows, bad_idx_cols = np.where(np.logical_or(np.greater(data, -1),
+                                                                np.less(data, 1)))
+
+        threshold = [data_max_ref, data_min_ref]
+
+        return bad_idx_rows, bad_idx_cols, threshold
+
+    @staticmethod
+    def meas_iqr_filter(data, multiplier=5):
+        """Apply the iqr filter to wt data.
+
+        Parameters
+        ----------
+        data: np.ndarray(float)
+            Array of difference or vertical velocity data
+        multiplier: int
+            Number of IQR's to use to set the threshold
+
+        Returns
+        -------
+        thresholds: tuple
+            Maximum and minimum thresholds for filter
+        """
+
+        # Initialize variables
+        data_max_ref = np.nan
+        data_min_ref = np.nan
+
+        # Check to make sure there are data to process
+        if data.size > 0 and np.any(np.logical_not(np.isnan(data))):
+            iqr_diff = 1
+            i = -1
+            # Loop until no additional data are removed
+            while iqr_diff != 0 and i < 1000:
+                i = i + 1
+
+                # Compute standard deviation
+                data_iqr = iqr(data)
+
+                # Compute maximum and minimum thresholds
+                data_max_ref = np.nanmedian(data) + multiplier * data_iqr
+                data_min_ref = np.nanmedian(data) - multiplier * data_iqr
+
+                # Identify valid and invalid data
+                bad_idx = np.where(np.logical_or(nan_greater(data, data_max_ref),
+                                                                      nan_less(data, data_min_ref)))
+                # Update filtered data array
+                data[bad_idx] = np.nan
+
+                # Determine differences due to last filter iteration
+                if len(data) > 0:
+                    data_iqr2 = iqr(data)
+                    iqr_diff = data_iqr2 - data_iqr
+                else:
+                    iqr_diff = 0
+
+        thresholds = [data_max_ref, data_min_ref]
+
+        return thresholds
+
+    def filter_vert_vel(self, setting, threshold=None):
+        """Applies filter to vertical velocity.
+
+        Applies either manual or automatic filter of the difference (error) velocity.  The automatic
+        mode is based on the following: This filter is based on the assumption that the water error
+        velocity should follow a gaussian distribution.  Therefore, 4 standard deviations should
+        encompass all of the valid data.  The standard deviation and limits (multplier * standard deviation)
+        are computed in an iterative process until filtering out additional data does not change
+        the computed standard deviation.
+
+        Parameters
+        ---------
+        setting: str
+            Filter setting (Auto, Off, Manual)
+        threshold: float
+            Threshold value for Manual setting."""
+        
+        # Set vertical velocity filter properties
+        self.w_filter = setting
+        if threshold is not None:
+            self.w_filter_thresholds = threshold
+
+        # Get difference data from object
+        w_vel = copy.deepcopy(self.w_mps)
+
+        # NOTE: Versions prior to 1.01 did not apply this step to remove data below the side lobe cutoff
+        w_vel[np.logical_not(self.cells_above_sl)] = np.nan
+
+        w_vel_min_ref = None
+        w_vel_max_ref = None
+
+        bad_idx_rows = np.array([]).astype(int)
+        bad_idx_cols = np.array([]).astype(int)
+
+        # Apply selected method
+        if self.w_filter == 'Manual':
+            w_vel_max_ref = np.abs(self.w_filter_thresholds)
+            w_vel_min_ref = -1 * w_vel_max_ref
+            # Identify valid and invalid data
+            bad_idx_rows, bad_idx_cols = np.where(np.logical_or(nan_greater(w_vel, w_vel_max_ref),
+                                                                    nan_less(w_vel, w_vel_min_ref)))
+        elif self.w_filter == 'Off':
+            w_vel_max_ref = np.nanmax(np.nanmax(w_vel)) + 1
+            w_vel_min_ref = np.nanmin(np.nanmin(w_vel)) - 1
+            # Identify valid and invalid data
+            bad_idx_rows, bad_idx_cols = np.where(np.logical_or(nan_greater(w_vel, w_vel_max_ref),
+                                                                    nan_less(w_vel, w_vel_min_ref)))
+            self.w_filter_thresholds = w_vel_max_ref
+
+        elif self.w_filter == 'Auto':
+            # Apply threshold from entire measurement processing to each transect
+            if self.use_measurement_thresholds:
+                self.w_filter_thresholds = self.w_meas_thresholds
+                for p_type in self.w_meas_thresholds.keys():
+                    data_max_ref = self.w_meas_thresholds[p_type][0]
+                    data_min_ref = self.w_meas_thresholds[p_type][1]
+                    data = np.copy(self.w_mps)
+                    data[self.ping_type != p_type] = np.nan
+                    idx_invalid_rows, idx_invalid_cols = np.where(np.logical_or(np.greater(data, data_max_ref),
+                                                                                np.less(data, data_min_ref)))
+                    if len(idx_invalid_rows) > 0:
+                        if len(bad_idx_rows) > 0:
+                            bad_idx_rows = np.hstack((bad_idx_rows, idx_invalid_rows))
+                            bad_idx_cols = np.hstack((bad_idx_cols, idx_invalid_cols))
+                        else:
+                            bad_idx_rows = idx_invalid_rows
+                            bad_idx_cols = idx_invalid_cols
+
+            # Compute unique threshold for each transect using ping types
+            elif self.ping_type.size > 1:
+                # Initialize variables
+                # Identify the ping types used in the transect
+                p_types = np.unique(self.ping_type)
+
+                thresholds = {}
+
+                # Apply the filter to each ping type
+                for p_type in p_types:
+                    # Copy of vertical velocities
+                    vel_2_filter = copy.deepcopy(w_vel)
+                    # Remove data not associated with the specified ping type
+                    vel_2_filter[self.ping_type != p_type] = np.nan
+                    # Apply filter to data of a single ping type
+                    idx_invalid_rows, idx_invalid_cols, threshold = self.iqr_filter(vel_2_filter)
+                    # Combine indices of invalid data for all ping types
+                    if len(idx_invalid_rows) > 0:
+                        if len(bad_idx_rows) > 0:
+                            bad_idx_rows = np.hstack((bad_idx_rows, idx_invalid_rows))
+                            bad_idx_cols = np.hstack((bad_idx_cols, idx_invalid_cols))
+                        else:
+                            bad_idx_rows = idx_invalid_rows
+                            bad_idx_cols = idx_invalid_cols
+                    thresholds[p_type] = threshold
+                self.w_filter_thresholds = thresholds
+            # Compute unique threshold for each transect when no ping types are available
+            else:
+                self.ping_type = np.array(['U'])
+                bad_idx_rows, bad_idx_cols, threshold = self.iqr_filter(w_vel)
+                self.w_filter_thresholds = {'U': threshold}
+
+        valid = copy.deepcopy(self.cells_above_sl)
+        if len(bad_idx_rows) > 0:
+            valid[bad_idx_rows, bad_idx_cols] = False
+        self.valid_data[3, :, :] = valid
+
+        # Set threshold property
+        if np.ma.is_masked(w_vel_max_ref):
+            self.w_filter_thresholds = np.nan
+
+        # Combine all filter data and update processed properties
+        self.all_valid_data()
+                
+    def filter_smooth(self, transect, setting):
+        """Filter water speed using a smooth filter.
+
+        Running Standard Deviation filter for water speed
+        This filter employs a running trimmed standard deviation filter to
+        identify and mark spikes in the water speed. First a robust Loess
+        smooth is fitted to the water speed time series and residuals between
+        the raw data and the smoothed line are computed. The trimmed standard
+        eviation is computed by selecting the number of residuals specified by
+        "halfwidth" before the target point and after the target point, but not
+        including the target point. These values are then sorted, and the points
+        with the highest and lowest values are removed from the subset, and the
+        standard deviation of the trimmed subset is computed. The filter
+        criteria are determined by multiplying the standard deviation by a user
+        specified multiplier. This criteria defines a maximum and minimum
+        acceptable residual. Data falling outside the criteria are set to nan.
+          
+        Recommended filter settings are:
+        filter_width = 10
+        half_width = 10
+        multiplier = 9
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        setting: str
+            Set filter (Auto, Off)
+        """
+        
+        self.smooth_filter = setting
+        upper_limit = None
+        lower_limit = None
+        wt_bad_idx = None
+        
+        # Compute ens_time
+        ens_time = np.nancumsum(transect.date_time.ens_duration_sec)
+        
+        # Determine if smooth filter should be applied
+        if self.smooth_filter == 'Auto':
+            
+            # Boat velocity components
+            w_vele = self.u_mps
+            w_veln = self.v_mps
+            
+            # Set filter parameters
+            filter_width = 10
+            half_width = 10
+            multiplier = 9
+            cycles = 3
+
+            # Compute mean speed and direction of water
+            w_vele_avg = np.nanmean(w_vele, 0)
+            w_veln_avg = np.nanmean(w_veln, 0)
+            _, speed = cart2pol(w_vele_avg, w_veln_avg)
+            
+            # Compute residuals from a robust Loess smooth
+            speed_smooth = rloess(ens_time, speed, filter_width)
+            speed_res = speed - speed_smooth
+            
+            # Apply a trimmed standard deviation filter multiple times
+            for i in range(cycles):
+                fill_array = BoatData.run_std_trim(half_width, speed_res.T)
+                
+                # Compute filter bounds
+                upper_limit = speed_smooth + multiplier * fill_array
+                lower_limit = speed_smooth - multiplier * fill_array
+                
+                # Apply filter to residuals
+                wt_bad_idx = np.where((speed > upper_limit) or (speed < lower_limit))[0]
+                speed_res[wt_bad_idx] = np.nan
+            
+            valid = np.copy(self.cells_above_sl)
+            
+            valid[:, wt_bad_idx] = False
+            self.valid_data[4, :, :] = valid
+            self.smooth_upper_limit = upper_limit
+            self.smooth_lower_limit = lower_limit
+            self.smooth_speed = speed_smooth
+        
+        else:
+            # No filter applied
+            self.valid_data[4, :, :] = np.copy(self.cells_above_sl)
+            self.smooth_upper_limit = np.nan
+            self.smooth_lower_limit = np.nan
+            self.smooth_speed = np.nan
+            
+        self.all_valid_data()
+     
+    def filter_snr(self, setting):
+        """Filters SonTek data based on SNR.
+
+        Computes the average SNR for all cells above the side lobe cutoff for each beam in
+        each ensemble. If the range in average SNR in an ensemble is greater than 12 dB the
+        water velocity in that ensemble is considered invalid.
+
+        Parameters
+        ----------
+        setting: str
+            Setting for filter (Auto, Off)
+        """
+
+        self.snr_filter = setting  
+        
+        if setting == 'Auto':
+            if self.snr_rng is not None:
+                bad_snr_idx = np.greater(self.snr_rng, 12)
+                valid = np.copy(self.cells_above_sl)
+                
+                bad_snr_array = np.tile(bad_snr_idx, (valid.shape[0], 1))
+                valid[bad_snr_array] = False
+                self.valid_data[7, :, :] = valid
+
+                # Combine all filter data and update processed properties
+                self.all_valid_data()
+        else:
+            self.valid_data[7, :, :] = np.copy(self.cells_above_sl)
+            self.all_valid_data()
+        
+    def filter_wt_depth(self, transect, setting):
+        """Marks water velocity data invalid if there is no valid or interpolated average depth.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        setting: bool
+            Setting for filter (True, False)
+        """
+        self.wt_depth_filter = setting
+        valid = np.copy(self.cells_above_sl)
+        
+        if setting:
+            trans_select = getattr(transect.depths, transect.depths.selected)
+            valid[:, np.isnan(trans_select.depth_processed_m)] = False
+        self.valid_data[8, :, :] = valid
+        
+        self.all_valid_data()
+        
+    def filter_excluded(self, transect, setting):
+        """Marks all data invalid that are closer to the transducer than the setting.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        setting: float
+            Range from the transducer, in m
+        """
+
+        # Initialize variables
+        trans_select = getattr(transect.depths, transect.depths.selected)
+        cell_depth = trans_select.depth_cell_depth_m
+        cell_size = trans_select.depth_cell_size_m
+        draft = trans_select.draft_use_m
+        top_cell_depth = cell_depth - 0.5 * cell_size
+        threshold = np.round((setting+draft), 3)
+
+        # Apply filter
+        exclude = np.round(top_cell_depth, 3) <= threshold
+        valid = np.copy(self.cells_above_sl)
+        valid[exclude] = False
+        self.valid_data[6, :, :] = valid
+        
+        # Set threshold property
+        self.excluded_dist_m = setting
+        
+        self.all_valid_data()
+
+    def interpolate_abba(self, transect, search_loc=['above', 'below', 'before', 'after']):
+        """" Interpolates all data marked invalid using the abba interpolation algorithm.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        """
+
+        # Set properties
+        self.interpolate_cells = 'abba'
+        self.interpolate_ens = 'abba'
+
+        # Get valid data based on all filters applied
+        valid = self.valid_data[0, :, :]
+        valid = valid[:, transect.in_transect_idx]
+
+        # Initialize velocity data variables
+        u = copy.deepcopy(self.u_mps)
+        v = copy.deepcopy(self.v_mps)
+
+        u = u[:, transect.in_transect_idx]
+        v = v[:, transect.in_transect_idx]
+
+        # Set invalid data to nan in processed velocity data variables
+        u[np.logical_not(valid)] = np.nan
+        v[np.logical_not(valid)] = np.nan
+
+        interpolated_data = self.compute_abba_interpolation(wt_data=self,
+                                                            data_list=[u, v],
+                                                            valid=valid,
+                                                            transect=transect,
+                                                            search_loc=search_loc)
+
+        if interpolated_data is not None:
+            # Incorporate interpolated values
+            for n in range(len(interpolated_data[0])):
+                u[interpolated_data[0][n][0]] = interpolated_data[0][n][1]
+                v[interpolated_data[1][n][0]] = interpolated_data[1][n][1]
+
+        # Save interpolated data, while retaining of the ensembles including those that are not
+        # in the in_transect_idx array
+        self.u_processed_mps[:, :] = np.nan
+        self.v_processed_mps[:, :] = np.nan
+        self.u_processed_mps[:, transect.in_transect_idx] = u
+        self.v_processed_mps[:, transect.in_transect_idx] = v
+
+    @staticmethod
+    def compute_abba_interpolation(wt_data, data_list, valid, transect, search_loc=['above', 'below', 'before', 'after']):
+        """Computes the interpolated values for invalid cells using the abba method.
+
+        Parameters
+        ----------
+        wt_data: WaterData
+            Object of WaterData
+        data_list: list
+            List of np.array(float) data to used for interpolation
+        valid: np.ndarray(bool)
+            Array indicating valid to be used for interpolation
+        transect: TransectData
+            Object of TransectData
+
+        Returns
+        -------
+        interpolated_data: np.ndarray(float)
+            Array of interpolated data
+        """
+        # Find cells with invalid data
+        valid_cells = wt_data.valid_data[0, :, transect.in_transect_idx]
+        boat_selected = getattr(transect.boat_vel, transect.boat_vel.selected)
+        if boat_selected is not None:
+            boat_valid = boat_selected.valid_data[0, transect.in_transect_idx]
+        else:
+            boat_valid = 0
+
+        if not np.all(valid_cells) and np.nansum(boat_valid) > 1:
+            # Compute distance along shiptrack to be used in interpolation
+            distance_along_shiptrack = transect.boat_vel.compute_boat_track(transect)['distance_m']
+
+            # Where there is invalid boat speed at beginning or end of transect mark the distance nan to avoid
+            # interpolating velocities that won't be used for discharge
+            if type(distance_along_shiptrack) is np.ndarray:
+                distance_along_shiptrack[0:np.argmax(boat_valid == True)] = np.nan
+                end_nan = np.argmax(np.flip(boat_valid) == True)
+                if end_nan > 0:
+                    distance_along_shiptrack[-1 * end_nan:] = np.nan
+            # if type(distance_along_shiptrack) is np.ndarray:
+                depth_selected = getattr(transect.depths, transect.depths.selected)
+                cells_above_sl = wt_data.valid_data[6, :, :]
+                cells_above_sl = cells_above_sl[:, transect.in_transect_idx]
+
+                # Interpolate values for  invalid cells with from neighboring data
+                interpolated_data = abba_idw_interpolation(data_list=data_list,
+                                                           valid_data=valid,
+                                                           cells_above_sl=cells_above_sl,
+                                                           y_centers=
+                                                           depth_selected.depth_cell_depth_m[:, transect.in_transect_idx],
+                                                           y_cell_size=
+                                                           depth_selected.depth_cell_size_m[:, transect.in_transect_idx],
+                                                           y_depth=
+                                                           depth_selected.depth_processed_m[transect.in_transect_idx],
+                                                           x_shiptrack=distance_along_shiptrack,
+                                                           search_loc=search_loc,
+                                                           normalize=True)
+                return interpolated_data
+            else:
+                return None
+        else:
+            return None
+
+    def interpolate_ens_next(self):
+        """Applies data from the next valid ensemble for ensembles with invalid water velocities.
+        """
+
+        # Set interpolation property for ensembles
+        self.interpolate_ens = 'ExpandedT'
+        
+        # Set processed data to nan for all invalid data
+        valid = self.valid_data[0]
+        self.u_processed_mps = np.copy(self.u_mps)
+        self.v_processed_mps = np.copy(self.v_mps)
+        self.u_processed_mps[valid == False] = np.nan
+        self.v_processed_mps[valid == False] = np.nan
+        
+        # Identifying ensembles with no valid data
+        valid_ens = np.any(valid, axis=0)
+        n_ens = len(valid_ens)
+        
+        # Set the invalid ensembles to the data in the next valid ensemble
+        for n in np.arange(0, n_ens-1)[::-1]:
+            if not valid_ens[n]:
+                self.u_processed_mps[:, n] = self.u_processed_mps[:, n+1]
+                self.v_processed_mps[:, n] = self.v_processed_mps[:, n+1]
+                
+    def interpolate_ens_hold_last(self):
+        """Interpolates velocity data for invalid ensembles by repeating the
+        the last valid data until new valid data is found
+        """
+        
+        self.interpolate_ens = 'HoldLast'
+        
+        valid = self.valid_data[0]
+        
+        # Initialize processed velocity data variables
+        self.u_processed_mps = np.copy(self.u_mps)
+        self.v_processed_mps = np.copy(self.v_mps)
+        
+        # Set invalid data to nan in processed velocity data variables
+        self.u_processed_mps[valid == False] = np.nan
+        self.v_processed_mps[valid == False] = np.nan
+        
+        # Determine ensembles with valid data
+        valid_ens = np.any(valid, axis=0)
+        
+        # Process each ensemble beginning with the second ensemble
+        n_ens = len(valid_ens)
+        
+        for n in np.arange(1, n_ens):
+            # If ensemble is invalid fill in with previous ensemble
+            if not valid_ens[n]:
+                self.u_processed_mps[:, n] = self.u_processed_mps[:, n-1]
+                self.v_processed_mps[:, n] = self.v_processed_mps[:, n-1]
+
+    def interpolate_ens_hold_last_9(self):
+        """Apply SonTek's approach to invalid data.
+
+        Interpolates velocity data for invalid ensembles by repeating the
+        last valid data for up to 9 ensembles or until new valid data is
+        found. If more the 9 consecutive ensembles are invalid the
+        ensembles beyond the 9th remain invalid. This is for
+        compatibility with SonTek RiverSurveyor Live.
+        """
+        
+        self.interpolate_ens = 'Hold9'
+        
+        valid = self.valid_data[0]
+        
+        # Initialize processed velocity data variables
+        self.u_processed_mps = np.copy(self.u_mps)
+        self.v_processed_mps = np.copy(self.v_mps)
+        
+        # Set invalid data to nan in processed velocity data variables
+        self.u_processed_mps[valid == False] = np.nan
+        self.v_processed_mps[valid == False] = np.nan
+        
+        # Determine ensembles with valid data
+        valid_ens = np.any(valid, axis=0)
+        
+        # Process each ensemble beginning with the second ensemble
+        n_ens = len(valid_ens)
+        n_invalid = 0
+        
+        for n in np.arange(1, n_ens):
+            # If ensemble is invalid fill in with previous ensemble
+            if valid_ens[n] == False and n_invalid < 10:
+                n_invalid += 1
+                self.u_processed_mps[:, n] = self.u_processed_mps[:, n-1]
+                self.v_processed_mps[:, n] = self.v_processed_mps[:, n-1]
+            else:
+                n_invalid = 0
+
+    def interpolate_ens_none(self):
+        """Applies no interpolation for invalid ensembles."""
+        
+        self.interpolate_ens = 'None'
+        
+        valid = self.valid_data[0]
+        
+        # Initialize processed velocity data variables
+        self.u_processed_mps = np.copy(self.u_mps)
+        self.v_processed_mps = np.copy(self.v_mps)
+        
+        # Set invalid data to nan in processed velocity data variables
+        self.u_processed_mps[valid == False] = np.nan
+        self.v_processed_mps[valid == False] = np.nan
+
+    def interpolate_cells_none(self):
+        """Applies no interpolation for invalid cells that are not part of
+        an invalid ensemble."""
+
+        self.interpolate_cells = 'None'
+        
+        valid = self.valid_data[0]
+
+        # Determine ensembles with valid data
+        valid_ens = np.any(valid, axis=0)
+
+        # Process each ensemble beginning with the second ensemble
+        n_ens = len(valid_ens)
+
+        # Initialize processed velocity data variables
+        self.u_processed_mps = np.copy(self.u_mps)
+        self.v_processed_mps = np.copy(self.v_mps)
+        
+        for n in range(n_ens):
+            # If ensemble is invalid fill in with previous ensemble
+            if valid_ens[n]:
+                invalid_cells = np.logical_not(valid[:, n])
+                self.u_processed_mps[invalid_cells,
+                                     n] = np.nan
+                self.v_processed_mps[invalid_cells,
+                                     n] = np.nan
+        
+    def interpolate_ens_linear(self, transect):
+        """Uses 2D linear interpolation to estimate values for invalid ensembles.
+
+        Use linear interpolation as computed by scipy's interpolation
+        function to interpolated velocity data for ensembles with no valid velocities.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        """
+
+        self.interpolate_ens = 'Linear'
+         
+        valid = self.valid_data[0, :, :]
+
+        # Initialize processed velocity data variables
+        self.u_processed_mps = np.copy(self.u_mps)
+        self.v_processed_mps = np.copy(self.v_mps)
+
+        # Determine ensembles with valid data
+        valid_ens = np.any(valid, 0)
+        
+        if np.sum(valid_ens) > 1:
+            # Determine the number of ensembles
+            # n_ens = len(valid_ens)
+            
+            trans_select = getattr(transect.depths, transect.depths.selected)
+            # Compute z
+            z = np.divide(np.subtract(trans_select.depth_processed_m, trans_select.depth_cell_depth_m),
+                          trans_select.depth_processed_m)
+            
+            # Create position array
+            boat_select = getattr(transect.boat_vel, transect.boat_vel.selected)
+            if boat_select is not None:
+                if np.nansum(boat_select.valid_data[0]) > 0:
+                    boat_vel_x = boat_select.u_processed_mps
+                    boat_vel_y = boat_select.v_processed_mps
+                    track_x = boat_vel_x * transect.date_time.ens_duration_sec
+                    track_y = boat_vel_y * transect.date_time.ens_duration_sec
+                    track = np.nancumsum(np.sqrt(track_x**2 + track_y**2))
+                    track_array = np.tile(track, (self.u_processed_mps.shape[0], 1))
+                    
+                    # Determine index of all valid data
+                    valid_z = np.isnan(z) == False
+                    valid_combined = np.logical_and(valid, valid_z)
+
+                    u = interpolate.griddata(np.vstack((z[valid_combined], track_array[valid_combined])).T,
+                                             self.u_processed_mps[valid_combined],
+                                             (z, track_array))
+                    
+                    v = interpolate.griddata(np.vstack((z[valid_combined], track_array[valid_combined])).T,
+                                             self.v_processed_mps[valid_combined],
+                                             (z, track_array))
+
+                    self.u_processed_mps = np.tile(np.nan, self.u_mps.shape)
+                    self.u_processed_mps = np.tile(np.nan, self.u_mps.shape)
+                    processed_valid_cells = self.estimate_processed_valid_cells(transect)
+                    self.u_processed_mps[processed_valid_cells] = u[processed_valid_cells]
+                    self.v_processed_mps[processed_valid_cells] = v[processed_valid_cells]
+
+    def interpolate_cells_linear(self, transect):
+        """Uses 2D linear interpolation to estimate values for invalid cells.
+
+        Use linear interpolation as computed by scipy's interpolation
+        function to interpolated velocity data for cells with no valid velocities.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        """
+
+        self.interpolate_ens = 'Linear'
+
+        valid = self.valid_data[0, :, :]
+
+        # Initialize processed velocity data variables
+        self.u_processed_mps = np.copy(self.u_mps)
+        self.v_processed_mps = np.copy(self.v_mps)
+
+        trans_select = getattr(transect.depths, transect.depths.selected)
+
+        # Compute z
+        z = np.divide(np.subtract(trans_select.depth_processed_m, trans_select.depth_cell_depth_m),
+                      trans_select.depth_processed_m)
+
+        # Create position array
+        boat_select = getattr(transect.boat_vel, transect.boat_vel.selected)
+        if boat_select is not None:
+            if np.nansum(boat_select.valid_data[0]) > 0:
+                boat_vel_x = boat_select.u_processed_mps
+                boat_vel_y = boat_select.v_processed_mps
+                track_x = boat_vel_x * transect.date_time.ens_duration_sec
+                track_y = boat_vel_y * transect.date_time.ens_duration_sec
+                track = np.nancumsum(np.sqrt(track_x ** 2 + track_y ** 2))
+                track_array = np.tile(track, (self.u_processed_mps.shape[0], 1))
+
+                # Determine index of all valid data
+                valid_z = np.isnan(z) == False
+                valid_combined = np.logical_and(valid, valid_z)
+
+                u = interpolate.griddata(np.array([z[valid_combined].ravel(),
+                                                   track_array[valid_combined].ravel()]).T,
+                                         self.u_processed_mps[valid_combined].ravel(),
+                                         (z, track_array))
+
+                v = interpolate.griddata(np.array([z[valid_combined].ravel(),
+                                                   track_array[valid_combined].ravel()]).T,
+                                         self.v_processed_mps[valid_combined].ravel(),
+                                         (z, track_array))
+
+                self.u_processed_mps = np.tile(np.nan, self.u_mps.shape)
+                self.u_processed_mps = np.tile(np.nan, self.u_mps.shape)
+                processed_valid_cells = self.estimate_processed_valid_cells(transect)
+                self.u_processed_mps[processed_valid_cells] = u[processed_valid_cells]
+                self.v_processed_mps[processed_valid_cells] = v[processed_valid_cells]
+
+    def interpolate_cells_trdi(self, transect):
+        """Interpolates values for invalid cells using methods similar to WinRiver II.
+
+        This function computes the velocity for the invalid cells using
+        the methods in WinRiver II, but applied to velocity components.
+        Although WinRiver II applies to discharge which theoretically is
+        more correct, mathematically applying to discharge or velocity
+        components is identical. By applying to velocity components the
+        user can see the velocity data interpolated.
+        Power fit uses the power fit equation and no slip uses linear interpolation.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        """
+
+        # Set property
+        self.interpolate_cells = 'TRDI'
+
+        # Construct variables
+        depths = getattr(transect.depths, transect.depths.selected)
+        valid = self.valid_data[0]
+        cell_depth = depths.depth_cell_depth_m
+        z_all = np.subtract(depths.depth_processed_m, cell_depth)
+        z = np.copy(z_all)
+        z[np.isnan(self.u_processed_mps)] = np.nan
+        z_adj = np.tile(np.nan, z.shape)
+        n_cells, n_ens = self.u_processed_mps.shape
+        cell_size = depths.depth_cell_size_m
+        exponent = transect.extrap.exponent
+        bot_method = transect.extrap.bot_method
+
+        for n in range(n_ens):
+
+            # Identify first and last valid depth cell
+            idx = np.where(valid[:, n] == True)[0]
+            if len(idx) > 0:
+                idx_first = idx[0]
+                idx_last = idx[-1]
+                idx_middle = np.where(valid[idx_first:idx_last + 1, n] == False)[0]
+
+                # For invalid middle depth cells perform interpolation based on bottom method
+                if len(idx_middle) > 0:
+                    idx_middle = idx_middle + idx_first
+                    z_adj[idx_middle, n] = z_all[idx_middle, n]
+
+                    # Interpolate velocities using power fit
+                    if bot_method == 'Power':
+                        # Compute interpolated u-velocities
+                        z2 = z[:, n] - (0.5 * cell_size[:, n])
+                        z2[nan_less(z2, 0)] = np.nan
+                        coef = ((exponent + 1) * np.nansum(self.u_processed_mps[:, n] * cell_size[:, n], 0)) / \
+                            np.nansum(((z[:, n] + 0.5 * cell_size[:, n]) ** (exponent + 1)) - (z2 ** (exponent + 1)), 0)
+
+                        temp = coef * z_adj[:, n] ** exponent
+                        self.u_processed_mps[idx_middle, n] = temp[idx_middle]
+                        # Compute interpolated v-Velocities
+                        coef = ((exponent + 1) * np.nansum(self.v_processed_mps[:, n] * cell_size[:, n])) / \
+                            np.nansum(((z[:, n] + 0.5 * cell_size[:, n]) ** (exponent + 1)) - (z2 ** (exponent + 1)))
+                        temp = coef * z_adj[:, n] ** exponent
+                        self.v_processed_mps[idx_middle, n] = temp[idx_middle]
+
+                    # Interpolate velocities using linear interpolation
+                    elif bot_method == 'No Slip':
+                        self.u_processed_mps[idx_middle, n] = np.interp(x=cell_depth[idx_middle, n],
+                                                                        xp=cell_depth[valid[:, n], n],
+                                                                        fp=self.u_processed_mps[valid[:, n], n])
+                        self.v_processed_mps[idx_middle, n] = np.interp(x=cell_depth[idx_middle, n],
+                                                                        xp=cell_depth[valid[:, n], n],
+                                                                        fp=self.v_processed_mps[valid[:, n], n])
+
+    def estimate_processed_valid_cells(self, transect):
+        """Estimate the number of valid cells for invalid ensembles.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+
+        Returns
+        -------
+        processed_valid_cells: np.ndarray(bool)
+           Estimated valid cells
+        """
+
+        processed_valid_cells = np.copy(self.valid_data[0])
+        valid_data_sum = np.nansum(processed_valid_cells, 0)
+        invalid_ens_idx = np.where(valid_data_sum == 0)[0]
+        n_invalid = len(invalid_ens_idx)
+        depth_cell_depth = transect.depths.bt_depths.depth_cell_depth_m
+        for n in range(n_invalid):
+
+            # Find nearest valid ensembles on either side of invalid ensemble
+            idx1 = np.where(valid_data_sum[:invalid_ens_idx[n]] > 0)[0]
+            if len(idx1) > 0:
+                idx1 = idx1[-1]
+                # Find the last cell in the neighboring valid ensembles
+                idx1_cell = np.where(processed_valid_cells[:, idx1] == True)[0][-1]
+                # Determine valid cells for invalid ensemble
+                idx1_cell_depth = depth_cell_depth[idx1_cell, idx1]
+            else:
+                idx1_cell_depth = 0
+
+            idx2 = np.where(valid_data_sum[invalid_ens_idx[n]:] > 0)[0]
+            if len(idx2) > 0:
+                idx2 = idx2[0]
+                idx2 = invalid_ens_idx[n] + idx2
+                # Find the last cell in the neighboring valid ensembles
+                idx2_cell = np.where(processed_valid_cells[:, idx2] == True)[0][-1]
+                # Determine valid cells for invalid ensemble
+                idx2_cell_depth = depth_cell_depth[idx2_cell, idx2]
+            else:
+                idx2_cell_depth = 0
+
+            cutoff = np.nanmax([idx1_cell_depth, idx2_cell_depth])
+            processed_valid_cells[depth_cell_depth[:, invalid_ens_idx[n]] < cutoff, invalid_ens_idx[n]] = True
+
+            # Apply excluded distance
+            processed_valid_cells = processed_valid_cells * self.valid_data[6, :, :]
+
+        return processed_valid_cells
+
+    def compute_snr_rng(self):
+        """Computes the range between the average snr for all beams.
+        The average is computed using only data above the side lobe cutoff.
+        """
+        if self.rssi_units == 'SNR':
+            cells_above_sl = np.copy(self.cells_above_sl.astype(float))
+            cells_above_sl[cells_above_sl < 0.5] = np.nan
+            snr_adjusted = self.rssi * cells_above_sl
+            snr_average = np.nanmean(snr_adjusted, 1)
+            self.snr_rng = np.nanmax(snr_average, 0) - np.nanmin(snr_average, 0)
+
+    def automated_beam_filter_old(self):
+        """Older version of automatic beam filter. Not currently used.
+        """
+
+        # Create array indicating which cells do not have 4-beam solutions and all cells below side lobe are nan
+        temp = copy.deepcopy(self)
+        temp.filter_beam(4)
+        valid_bool = temp.valid_data[5, :, :]
+        valid = valid_bool.astype(float)
+        valid[temp.cells_above_sl == False] = np.nan
+
+        # Find cells with 3 beams solutions
+        rows_3b, cols_3b = np.where(np.abs(valid) == 0)
+        if len(rows_3b) > 0:
+            # Find cells with 4 beams solutions
+            valid_rows, valid_cols = np.where(valid == 1)
+
+            valid_u = temp.u_mps[valid == 1]
+            valid_v = temp.v_mps[valid == 1]
+            # Use interpolate water velocity of cells with 3 beam solutions
+
+            # The following code duplicates Matlab scatteredInterpolant which seems to only estimate along columns
+            # as long as there is data in the ensemble above and below the value being estimated.
+            row_numbers = np.linspace(0, valid.shape[0] - 1, valid.shape[0])
+            n = 0
+            for col in cols_3b:
+                # If the cell has valid data above and below it linearly interpolate using data in that ensemble.
+                # If not, use other means of interpolation.
+                if np.any(valid_bool[rows_3b[n] + 1::, col]) and np.any(valid_bool[0:rows_3b[n], col]):
+                    est_u = np.interp(x=rows_3b[n],
+                                      xp=row_numbers[valid_bool[:, col]],
+                                      fp=temp.u_mps[valid_bool[:, col], col])
+
+                    est_v = np.interp(x=rows_3b[n],
+                                      xp=row_numbers[valid_bool[:, col]],
+                                      fp=temp.v_mps[valid_bool[:, col], col])
+                else:
+                    est_u = interpolate.griddata(np.array((valid_rows, valid_cols)).T, valid_u, (col, rows_3b[n]))
+                    est_v = interpolate.griddata(np.array((valid_cols, valid_rows)).T, valid_v, (col, rows_3b[n]))
+
+                u_ratio = (temp.u_mps[rows_3b[n], col] / est_u) - 1
+                v_ratio = (temp.v_mps[rows_3b[n], col] / est_v) - 1
+                if np.abs(u_ratio) < 0.5 or np.abs(v_ratio) < 0.5:
+                    valid_bool[rows_3b[n], col] = True
+                else:
+                    valid_bool[rows_3b[n], col] = False
+                n += 1
+            self.valid_data[5, :, :] = valid_bool
+        else:
+            self.valid_data[5, :, :] = temp.valid_data[5, :, :]
+
+        # Combine all filter data and update processed properties
+
+        self.all_valid_data()
+
+# Code from Aurelien
+    def interpolate_cells_above(self, transect):
+        """Interpolates values for invalid cells using below valid cell
+        Written by Aurelien Despax
+        Modified by dsm
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        """
+
+        # Set property
+        self.interpolate_cells = 'Above'
+
+        # Construct variables
+
+        valid = self.valid_data[0]
+        n_cells, n_ens = self.u_processed_mps.shape
+
+        for n in range(n_ens):
+
+            # Identify first and last valid depth cell
+            idx = np.where(valid[:, n] == True)[0]
+            if len(idx) > 0:
+                idx_first = idx[0]
+                idx_last = idx[-1]
+                idx_middle = np.where(valid[idx_first:idx_last + 1, n] == False)[0]
+
+                # For invalid middle depth cells assign value of shallower valid depth cell
+                # TODO this assigns the value of the shallowest depth cell not the next valid depth cell
+                if len(idx_middle) > 0:
+                    idx_middle = idx_middle + idx_first
+                    self.u_processed_mps[idx_middle, n] = self.u_processed_mps[idx_first, n]
+                    self.v_processed_mps[idx_middle, n] = self.v_processed_mps[idx_first, n]
+
+    def interpolate_cells_below(self, transect):
+        """Interpolates values for invalid cells using above valid cell
+        Written by Aurelien Despax
+        Modified by dsm
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        """
+
+        # Set property
+        self.interpolate_cells = 'Below'
+
+        # Construct variables
+        valid = self.valid_data[0]
+        n_cells, n_ens = self.u_processed_mps.shape
+
+        for n in range(n_ens):
+
+            # Identify first and last valid depth cell
+            idx = np.where(valid[:, n] == True)[0]
+            if len(idx) > 0:
+                idx_first = idx[0]
+                idx_last = idx[-1]
+                idx_middle = np.where(valid[idx_first:idx_last + 1, n] == False)[0]
+
+                # For invalid middle depth cells assign the value of the next deeper valid depth cells
+                # TODO this assigns the value of the shallowest depth cell not the next valid depth cell
+                if len(idx_middle) > 0:
+                    idx_middle = idx_middle + idx_first
+                    self.u_processed_mps[idx_middle, n] = self.u_processed_mps[idx_last, n]
+                    self.v_processed_mps[idx_middle, n] = self.v_processed_mps[idx_last, n]
+
+    def interpolate_cells_before(self, transect):
+        """Interpolates values for invalid cells using above valid cell
+        Written by Aurelien Despax
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        """
+
+        # Set property
+        self.interpolate_cells = 'Before'
+
+        # Construct variables
+        depths = getattr(transect.depths, transect.depths.selected)
+        valid = self.valid_data[0]
+        cell_depth = depths.depth_cell_depth_m
+        z_all = np.subtract(depths.depth_processed_m, cell_depth)
+        z = np.copy(z_all)
+        z[np.isnan(self.u_processed_mps)] = np.nan
+        z_adj = np.tile(np.nan, z.shape)
+        n_cells, n_ens = self.u_processed_mps.shape
+
+        for n in range(n_ens):
+
+            # Identify first and last valid depth cell
+            idx = np.where(valid[:, n] == True)[0]
+            if len(idx) > 0:
+                idx_first = idx[0]
+                idx_last = idx[-1]
+                idx_middle = np.where(valid[idx_first:idx_last + 1, n] == False)[0]
+
+                # For invalid middle depth cells perform interpolation based on bottom method
+                if len(idx_middle) > 0:
+                    idx_middle = idx_middle + idx_first
+                    z_adj[idx_middle, n] = z_all[idx_middle, n]
+
+                    # Interpolate velocities using linear interpolation
+                    self.u_processed_mps[idx_middle, n] = self.u_processed_mps[idx_middle, n - 1]
+                    self.v_processed_mps[idx_middle, n] = self.v_processed_mps[idx_middle, n - 1]
+
+    def interpolate_cells_after(self, transect):
+        """Interpolates values for invalid cells using above valid cell
+        Written by Aurelien Despax
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        """
+
+        # Set property
+        self.interpolate_cells = 'After'
+
+        # Construct variables
+        depths = getattr(transect.depths, transect.depths.selected)
+        valid = self.valid_data[0]
+        cell_depth = depths.depth_cell_depth_m
+        z_all = np.subtract(depths.depth_processed_m, cell_depth)
+        z = np.copy(z_all)
+        z[np.isnan(self.u_processed_mps)] = np.nan
+        z_adj = np.tile(np.nan, z.shape)
+        n_cells, n_ens = self.u_processed_mps.shape
+
+        for n in list(reversed(list(range(n_ens)))):
+
+            # Identify first and last valid depth cell
+            idx = np.where(valid[:, n] == True)[0]
+            if len(idx) > 0:
+                idx_first = idx[0]
+                idx_last = idx[-1]
+                idx_middle = np.where(valid[idx_first:idx_last + 1, n] == False)[0]
+
+                # For invalid middle depth cells perform interpolation based on bottom method
+                if len(idx_middle) > 0:
+                    idx_middle = idx_middle + idx_first
+                    z_adj[idx_middle, n] = z_all[idx_middle, n]
+
+                    # Interpolate velocities using linear interpolation
+                    if (n_ens > (n + 1)):
+                        self.u_processed_mps[idx_middle, n] = self.u_processed_mps[idx_middle, n + 1]
+                        self.v_processed_mps[idx_middle, n] = self.v_processed_mps[idx_middle, n + 1]
\ No newline at end of file
diff --git a/Classes/__init__.py b/Classes/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/Classes/stickysettings.py b/Classes/stickysettings.py
new file mode 100644
index 0000000000000000000000000000000000000000..890046731ce998c908abfac9071371adc6148351
--- /dev/null
+++ b/Classes/stickysettings.py
@@ -0,0 +1,123 @@
+import os
+import json
+
+
+class StickySettings(object):
+    """Provides methods to quickly store and retrieve settings to and from disk.
+
+    This class is intended to be used to store simple settings that need to be retained between session of the subject
+    application, such as, last folder opened, or units setting. Any setting that the application needs to know
+    when it is run again can be stored using the methods in this class.
+
+    Data are stored a dictionary which is then written to a json file having the filename provided by the user and
+    stored in the folder defined by the APPDATA environment variable.
+
+    Note
+    ----
+    The os and json packages are required for this class.
+
+    Attributes
+    ----------
+    settings_file : str
+        Filename of json file used to store data between sessions.
+    settings: dict
+        Dictionary used to store user defined settings.
+
+    """
+
+    def __init__(self, arg):
+        """Constructor method which establishes the json file.
+
+        If the filename (arg) provided by the user cannot be found a new file is created. If the filename (arg)
+        provided by the user is found the file is opened and all keys and values are read and stored in settings for
+        quick modification by the calling application.
+
+        Parameters
+        ----------
+        arg : str
+            User supplied filename excluding the suffix. Example 'myFile' but not 'myFile.json'
+
+        """
+        # Construct filename from user input.
+        self.settings_file = os.path.join(os.getenv('APPDATA'), arg + '.json')
+        if os.path.isfile(self.settings_file):
+            # Read json into dictionary
+            with open(self.settings_file, 'r') as f:
+                self.settings = json.load(f)
+        else:
+            # Create json file with default dictionary
+            self.settings = {}
+            with open(self.settings_file, 'w') as f:
+                json.dump(self.settings, f)
+
+    def new(self, key, value):
+        """Create new key value pair in settings.
+
+        Method checks to see if key exists. If it exists an error is raised. If the key does not exist it is created.
+
+        Paramenters
+        -----------
+        key : str
+            Label for value in dictionary.
+        value : any value that can be stored in a dictionary
+
+        Raises
+        ------
+        KeyError
+            If the key already exists in settings.
+
+        """
+
+        if key in self.settings:
+            raise KeyError('Key already exists in settings')
+        else:
+            self.settings[key] = value
+            with open(self.settings_file, 'w') as f:
+                json.dump(self.settings, f)
+
+    def set(self, key, value):
+        """Set value of existing key.
+
+        Method checks to ensure the key exists and sets the value of the key to value. If the key does not exist an
+        error is raised.
+
+        Parameters
+        ----------
+        key : str
+            Label for value in dictionary.
+        value : any value that can be stored in a dictionary
+
+        Raises
+        ------
+        KeyError
+            If the key does not exist in settings.
+
+        """
+        if key in self.settings:
+            self.settings[key] = value
+            with open(self.settings_file, 'w') as f:
+                json.dump(self.settings, f)
+        else:
+            raise KeyError('Key does not exist in settings')
+
+    def get(self, item):
+        """Get value of item for settings.
+
+        Returns the value of item from settings dictionary.
+
+        Parameters
+        ----------
+        item : str
+            Key for settings dictionary.
+
+        Returns
+        -------
+        value
+            Data type stored in key 'item'.
+
+        """
+
+        with open(self.settings_file, 'r') as f:
+            self.settings = json.load(f)
+
+        return self.settings[item]
diff --git a/Classes/test_stickysettings.py b/Classes/test_stickysettings.py
new file mode 100644
index 0000000000000000000000000000000000000000..2c3b6ec7565fee4f4534dc4fed186390cc280f75
--- /dev/null
+++ b/Classes/test_stickysettings.py
@@ -0,0 +1,68 @@
+import pytest
+import os
+import sys
+from Classes.stickysettings import StickySettings as ss
+
+
+def create_filename():
+    """Create filename to use in testing"""
+    file_exists = True
+    n = 0
+    testfile = ''
+    while file_exists:
+        n += 1
+        testfile = os.path.join(os.getenv('APPDATA'), 'xyz123')
+        if os.path.join(os.getenv('APPDATA'), 'xyz123'):
+            testfile = testfile + str(n)
+        else:
+            file_exists = False
+        if n > 10:
+            file_exists = False
+    return testfile
+
+
+def test_file_creation():
+    """Test initialization of StickySettings and file creation"""
+    testfile = create_filename()
+    _ = ss(testfile)
+    assert os.path.isfile(testfile + '.json')
+    os.remove(testfile + '.json')
+
+
+def test_store_value():
+    """Test creating a file and key value pair and getting the value"""
+    testfile = create_filename()
+    test_user = ss(testfile)
+    test_user.new('test', True)
+    assert test_user.get('test')
+    os.remove(testfile + '.json')
+
+
+def test_set_value():
+    """Test setting a value of an existing key"""
+    testfile = create_filename()
+    test_user = ss(testfile)
+    test_user.new('test', False)
+    test_user.set('test', True)
+    assert test_user.get('test')
+    os.remove(testfile + '.json')
+
+
+def test_set_value_failure():
+    """Test failure when setting a value for a key that does not exist"""
+    testfile = create_filename()
+    test_user = ss(testfile)
+    test_user.new('test', True)
+    with pytest.raises(KeyError):
+        test_user.set('Folder', 'AnyFolder')
+    os.remove(testfile + '.json')
+
+
+def test_get_value_failure():
+    """Test failure when requesting a value for a key that does not exist"""
+    testfile = create_filename()
+    test_user = ss(testfile)
+    test_user.new('test', True)
+    with pytest.raises(KeyError):
+        test_user.get('Folder')
+    os.remove(testfile + '.json')
diff --git a/DischargeFunctions/bottom_discharge_extrapolation.cp39-win_amd64.pyd b/DischargeFunctions/bottom_discharge_extrapolation.cp39-win_amd64.pyd
new file mode 100644
index 0000000000000000000000000000000000000000..046f35e897397e7efc156ce63b9ea8ceeba837b3
Binary files /dev/null and b/DischargeFunctions/bottom_discharge_extrapolation.cp39-win_amd64.pyd differ
diff --git a/DischargeFunctions/bottom_discharge_extrapolation.py b/DischargeFunctions/bottom_discharge_extrapolation.py
new file mode 100644
index 0000000000000000000000000000000000000000..71e5a5b1e136088a1ba9939e8666f4537f0a4d80
--- /dev/null
+++ b/DischargeFunctions/bottom_discharge_extrapolation.py
@@ -0,0 +1,304 @@
+"""bottom_discharge_extrapolation
+Computes the extrapolated discharge in the bottom unmeasured portion of an ADCP transect. Methods are consistent with
+equations used by TRDI and SonTek.
+
+Example
+-------
+
+from DischargeFunctions.bottom_discharge_extrapolation import
+
+    trans_select = getattr(data_in.depths, data_in.depths.selected)
+    num_top_method = {'Power': 0, 'Constant': 1, '3-Point': 2, None: -1}
+    self.top_ens =  extrapolate_top(x_prod, data_in.w_vel.valid_data[0, :, :],
+                                    num_top_method[data_in.extrap.top_method],
+                                    data_in.extrap.exponent, data_in.in_transect_idx, trans_select.depth_cell_size_m,
+                                    trans_select.depth_cell_depth_m, trans_select.depth_processed_m, delta_t,
+                                    num_top_method[top_method], exponent)
+"""
+
+import numpy as np
+from numba.pycc import CC
+from numba import njit
+
+cc = CC('bottom_discharge_extrapolation')
+
+
+# Bottom Discharge Extrapolation with Numba
+# =========================================
+@cc.export('extrapolate_bot', 'f8[:](f8[:, :], b1[:, :], i8, f8, i4[:], f8[:, :], f8[:, :], f8[:], f8[:], '
+                              'optional(i8), optional(f8))')
+def extrapolate_bot(xprod,
+                    w_valid_data,
+                    transect_bot_method,
+                    transect_exponent,
+                    in_transect_idx,
+                    depth_cell_size_m,
+                    depth_cell_depth_m,
+                    depth_processed_m,
+                    delta_t,
+                    bot_method=-1,
+                    exponent=0.1667):
+    """Computes the extrapolated bottom discharge
+
+    Parameters
+    ----------
+    xprod: np.array(float)
+        Cross product computed from the cross product method
+    w_valid_data: np.array(bool)
+        Valid water data
+    transect_bot_method: int
+        Stored bottom method (power = 0, no slip = 1)
+    transect_exponent: float
+        Exponent for power fit
+    in_transect_idx: np.array(int)
+        Indices of ensembles in transect to be used for discharge
+    depth_cell_size_m: np.array(float)
+        Size of each depth cell in m
+    depth_cell_depth_m: np.array(float)
+        Depth of each depth cell in m
+    depth_processed_m: np.array(float)
+        Depth for each ensemble in m
+    delta_t: np.array(float)
+        Duration of each ensemble computed from QComp
+    bot_method: int
+        Specifies method to use for top extrapolation
+    exponent: float
+        Exponent to use for power extrapolation
+
+    Returns
+    -------
+    q_bot: np.array(float)
+        Bottom extrapolated discharge for each ensemble
+    """
+
+    # Determine extrapolation methods and exponent
+    if bot_method == -1:
+        bot_method = transect_bot_method
+        exponent = transect_exponent
+
+    # Use only data in transect
+    w_valid_data = w_valid_data[:, in_transect_idx]
+    xprod = xprod[:, in_transect_idx]
+    cell_size = depth_cell_size_m[:, in_transect_idx]
+    cell_depth = depth_cell_depth_m[:, in_transect_idx]
+    depth_ens = depth_processed_m[in_transect_idx]
+    delta_t = delta_t[in_transect_idx]
+
+    # Compute bottom variables
+    bot_rng = bot_variables(xprod, w_valid_data, cell_size, cell_depth, depth_ens)
+
+    # Compute z
+    z = np.subtract(depth_ens, cell_depth)
+
+    # Use only valid data
+    valid_data = np.logical_not(np.isnan(xprod))
+    for row in range(valid_data.shape[0]):
+        for col in range(valid_data.shape[1]):
+            if valid_data[row, col] == False:
+                z[row, col] = np.nan
+                cell_size[row, col] = np.nan
+                cell_depth[row, col] = np.nan
+
+    # Compute bottom discharge
+    q_bot = discharge_bot(bot_method, exponent, bot_rng, xprod,
+                          cell_size, cell_depth, depth_ens, delta_t, z)
+
+    return q_bot
+
+
+@njit
+@cc.export('discharge_top', 'f8[:](i8, f8, f8[:], f8[:, :], f8[:, :], f8[:, :], f8[:], f8[:], f8[:, :])')
+def discharge_bot(bot_method, exponent, bot_rng, component,
+                  cell_size, cell_depth, depth_ens, delta_t, z):
+    """Computes the bottom extrapolated value of the provided component.
+
+    Parameters
+    ----------
+    bot_method: int
+        Bottom extrapolation method (Power, No Slip)
+    exponent: float
+        Exponent for power and no slip
+    bot_rng: np.array(float)
+        Range from the streambed to the bottom of the bottom most cell
+    component: np.array(float)
+        The variable to be extrapolated
+    cell_size: np.array(float)
+        Array of cell sizes (n cells x n ensembles)
+    cell_depth: np.array(float)
+        Depth of each cell (n cells x n ensembles)
+    depth_ens: np.array(float)
+        Bottom depth for each ensemble
+    delta_t: np.array(float)
+        Duration of each ensemble computed by QComp
+    z: np.array(float)
+        Relative depth from the bottom to each depth cell
+
+    Returns
+    -------
+    bot_value: np.array(float)
+        Total for the specified component integrated over the bottom range for each ensemble
+    """
+
+    # Initialize
+    coef = np.repeat(np.nan, int(component.shape[1]))
+
+    # Bottom power extrapolation
+    if bot_method == 0:
+        # Compute the coefficient for each ensemble
+        # Loops are used for Numba compile purposes
+
+        # Loop through ensembles
+        for col in range(component.shape[1]):
+            numerator = 0.0
+            numerator_valid = False
+            denominator_valid = False
+            denominator = 0.0
+
+            # Loop through depth cells in an ensemble
+            for row in range(component.shape[0]):
+
+                # Compute the numerator
+                numerator_temp = component[row, col] * cell_size[row, col]
+                if np.logical_not(np.isnan(numerator_temp)):
+                    numerator_valid = True
+                    numerator = numerator + numerator_temp
+
+                # Compute the denominator
+                denominator_temp = ((z[row, col] + 0.5 * cell_size[row, col]) ** (exponent + 1)) \
+                                   - ((z[row, col] - 0.5 * cell_size[row, col]) ** (exponent + 1))
+                if np.logical_not(np.isnan(denominator_temp)) and denominator_temp != 0:
+                    denominator_valid = True
+                    denominator = denominator + denominator_temp
+
+            # If both numerator and denominator are valid compute the coefficient
+            if numerator_valid and denominator_valid:
+                coef[col] = (numerator * (1 + exponent)) / denominator
+
+    # Bottom no slip extrapolation
+    elif bot_method == 1:
+        # Valid data in the lower 20% of the water column or
+        # the last valid depth cell are used to compute the no slip power fit
+        cutoff_depth = 0.8 * depth_ens
+
+        # Loop through the ensembles
+        for col in range(cell_depth.shape[1]):
+            numerator = 0.0
+            denominator = 0.0
+            numerator_valid = False
+            denominator_valid = False
+            cells_below_cutoff = False
+            last_cell_depth = np.nan
+            last_cell_size = np.nan
+            last_z = np.nan
+            last_component = np.nan
+
+            # Verify there are valid depth cutoffs
+            if np.any(np.logical_not(np.isnan(cutoff_depth))):
+
+                # Loop through depth cells
+                for row in range(cell_depth.shape[0]):
+
+                    # Identify last valid cell by end of loop
+                    if np.logical_not(np.isnan(cell_depth[row, col])):
+                        last_cell_depth = cell_depth[row, col]
+                        last_cell_size = cell_size[row, col]
+                        last_z = z[row, col]
+                        last_component = component[row, col]
+
+                        # Use all depth cells below the cutoff (1 per loop)
+                        if (cell_depth[row, col] - cutoff_depth[col]) >= 0:
+                            cells_below_cutoff = True
+
+                            # Compute numerator
+                            numerator_temp = component[row, col] * cell_size[row, col]
+                            if np.logical_not(np.isnan(numerator_temp)):
+                                numerator_valid = True
+                                numerator = numerator + numerator_temp
+
+                                # If numerator computed, compute denominator
+                                denominator_temp = ((z[row, col] + 0.5 * cell_size[row, col]) ** (exponent + 1)) \
+                                                   - ((z[row, col] - 0.5 * cell_size[row, col]) ** (exponent + 1))
+                                if np.logical_not(np.isnan(denominator_temp)) and denominator_temp != 0:
+                                    denominator_valid = True
+                                    denominator = denominator + denominator_temp
+
+                # If there are not cells below the cutoff, use the last valid depth cell
+                if np.logical_not(cells_below_cutoff):
+                    if np.logical_not(np.isnan(last_cell_depth)):
+                        # Compute numerator
+                        numerator_temp = last_component * last_cell_size
+                        if np.logical_not(np.isnan(numerator_temp)):
+                            numerator_valid = True
+                            numerator = numerator + numerator_temp
+
+                            # If numerator computed, compute denominator
+                            denominator_temp = ((last_z + 0.5 * last_cell_size) ** (exponent + 1)) \
+                                               - ((last_z - 0.5 * last_cell_size) ** (exponent + 1))
+                            if np.logical_not(np.isnan(denominator_temp)) and denominator_temp != 0:
+                                denominator_valid = True
+                                denominator = denominator + denominator_temp
+
+                # If both numerator and denominator are valid compute the coefficient
+                if numerator_valid and denominator_valid:
+                    coef[col] = (numerator * (1 + exponent)) / denominator
+
+    # Compute the bottom discharge of each profile
+    bot_value = delta_t * (coef / (exponent + 1)) * (bot_rng**(exponent + 1))
+
+    return bot_value
+
+
+@njit
+@cc.export('top_variables', 'f8[:](f8[:, :], b1[:, :], f8[:, :], f8[:, :], f8[:])')
+def bot_variables(x_prod, w_valid_data, cell_size, cell_depth, depth_ens):
+    """Computes the index to the bottom most valid cell in each ensemble and the range from
+    the bottom to the bottom of the bottom most cell.
+
+    Parameters
+    ----------
+    x_prod: np.array(float)
+        Cross product computed from the cross product method
+    w_valid_data: np.array(bool)
+        Valid water data
+    cell_size: np.array(float)
+        Size of each depth cell in m
+    cell_depth: np.array(float)
+        Depth of each depth cell in m
+    depth_ens: np.array(float)
+        Processed depth for each ensemble
+
+    Returns
+    -------
+    idx_bot: np.array(int)
+        Index to the bottom most valid depth cell in each ensemble
+    bot_rng: np.array(float)
+        Range from the streambed to the bottom of the bottom most cell
+    """
+
+    # Identify valid data
+    valid_data1 = np.copy(w_valid_data)
+    valid_data2 = np.logical_not(np.isnan(x_prod))
+    valid_data = np.logical_and(valid_data1, valid_data2)
+
+    # Preallocate variables
+    n_ensembles = int(valid_data.shape[1])
+    bot_rng = np.repeat(np.nan, n_ensembles)
+
+    # Loop through each ensemble
+    for n in range(n_ensembles):
+
+        # Identifying bottom most valid cell
+        idx_temp = np.where(np.logical_not(np.isnan(x_prod[:, n])))[0]
+        if len(idx_temp) > 0:
+            idx_bot = idx_temp[-1]
+            # Compute bottom range
+            bot_rng[n] = depth_ens[n] - cell_depth[idx_bot, n] - 0.5 * cell_size[idx_bot, n]
+        else:
+            bot_rng[n] = 0
+
+    return bot_rng
+
+
+if __name__ == '__main__':
+    # Used to compile code
+    cc.compile()
\ No newline at end of file
diff --git a/DischargeFunctions/top_discharge_extrapolation.cp39-win_amd64.pyd b/DischargeFunctions/top_discharge_extrapolation.cp39-win_amd64.pyd
new file mode 100644
index 0000000000000000000000000000000000000000..04600e7b2c4a54a2a5bc83211b22464e4e339763
Binary files /dev/null and b/DischargeFunctions/top_discharge_extrapolation.cp39-win_amd64.pyd differ
diff --git a/DischargeFunctions/top_discharge_extrapolation.py b/DischargeFunctions/top_discharge_extrapolation.py
new file mode 100644
index 0000000000000000000000000000000000000000..e6b19ad57295ee38f39b7f7c92293eb374e64117
--- /dev/null
+++ b/DischargeFunctions/top_discharge_extrapolation.py
@@ -0,0 +1,299 @@
+"""top_discharge_extrapolation
+Computes the extrapolated discharge in the top unmeasured portion of an ADCP transect. Methods are consistent with
+equations used by TRDI and SonTek.
+
+Example
+-------
+
+from DischargeFunctions.top_discharge_extrapolation import
+
+    trans_select = getattr(data_in.depths, data_in.depths.selected)
+    num_top_method = {'Power': 0, 'Constant': 1, '3-Point': 2, None: -1}
+    self.top_ens =  extrapolate_top(x_prod, data_in.w_vel.valid_data[0, :, :],
+                                    num_top_method[data_in.extrap.top_method],
+                                    data_in.extrap.exponent, data_in.in_transect_idx, trans_select.depth_cell_size_m,
+                                    trans_select.depth_cell_depth_m, trans_select.depth_processed_m, delta_t,
+                                    num_top_method[top_method], exponent)
+"""
+
+import numpy as np
+from numba.pycc import CC
+from numba import njit
+
+cc = CC('top_discharge_extrapolation')
+
+
+# Top Discharge Extrapolation with Numba
+# ======================================
+@cc.export('extrapolate_top', 'f8[:](f8[:, :], b1[:, :], i8, f8, i4[:], f8[:, :], f8[:, :], f8[:], f8[:], '
+                              'optional(i8), optional(f8))')
+def extrapolate_top(xprod,
+                    w_valid_data,
+                    transect_top_method,
+                    transect_exponent,
+                    in_transect_idx,
+                    depth_cell_size_m,
+                    depth_cell_depth_m,
+                    depth_processed_m,
+                    delta_t,
+                    top_method=-1,
+                    exponent=0.1667):
+    """Computes the extrapolated top discharge.
+
+    Parameters
+    ----------
+    xprod: np.array(float)
+        Cross product computed from the cross product method
+    w_valid_data: np.array(bool)
+        Valid water data
+    transect_top_method: int
+        Stored top method (power = 0, constant = 1, 3-point = 2)
+    transect_exponent: float
+        Exponent for power fit
+    in_transect_idx: np.array(int)
+        Indices of ensembles in transect to be used for discharge
+    depth_cell_size_m: np.array(float)
+        Size of each depth cell in m
+    depth_cell_depth_m: np.array(float)
+        Depth of each depth cell in m
+    depth_processed_m: np.array(float)
+        Depth for each ensemble in m
+    delta_t: np.array(float)
+        Duration of each ensemble computed from QComp
+    top_method: int
+        Specifies method to use for top extrapolation
+    exponent: float
+        Exponent to use for power extrapolation
+
+    Returns
+    -------
+    q_top: np.array(float)
+        Top extrapolated discharge for each ensemble
+    """
+
+    if top_method == -1:
+        top_method = transect_top_method
+        exponent = transect_exponent
+
+    # Compute top variables
+    idx_top, idx_top3, top_rng = top_variables(xprod, w_valid_data, depth_cell_size_m, depth_cell_depth_m)
+    idx_top = idx_top[in_transect_idx]
+    idx_top3 = idx_top3[:, in_transect_idx]
+    top_rng = top_rng[in_transect_idx]
+
+    # Get data from transect object
+    cell_size = depth_cell_size_m[:, in_transect_idx]
+    cell_depth = depth_cell_depth_m[:, in_transect_idx]
+    depth_ens = depth_processed_m[in_transect_idx]
+
+    # Compute z
+    z = np.subtract(depth_ens, cell_depth)
+
+    # Use only valid data
+    valid_data = np.logical_not(np.isnan(xprod[:, in_transect_idx]))
+    for row in range(valid_data.shape[0]):
+        for col in range(valid_data.shape[1]):
+            if valid_data[row, col] == False:
+                z[row, col] = np.nan
+                cell_size[row, col] = np.nan
+                cell_depth[row, col] = np.nan
+
+    # Compute top discharge
+    q_top = discharge_top(top_method, exponent, idx_top, idx_top3, top_rng,
+                          xprod[:, in_transect_idx], cell_size, cell_depth,
+                          depth_ens, delta_t, z)
+
+    return q_top
+
+
+@njit
+@cc.export('discharge_top', 'f8[:](i8, f8, i4[:], i4[:, :], f8[:], f8[:, :], f8[:, :], f8[:, :], f8[:], '
+                            'f8[:], f8[:, :])')
+def discharge_top(top_method, exponent, idx_top, idx_top_3, top_rng, component, cell_size, cell_depth,
+                  depth_ens, delta_t, z):
+    """Computes the top extrapolated value of the provided component.
+
+    Parameters
+    ----------
+    top_method: int
+        Top extrapolation method (Power = 0, Constant = 1, 3-Point = 2)
+    exponent: float
+        Exponent for the power extrapolation method
+    idx_top: np.array(int)
+        Index to the topmost valid depth cell in each ensemble
+    idx_top_3: np.array(int)
+        Index to the top 3 valid depth cells in each ensemble
+    top_rng: np.array(float)
+        Range from the water surface to the top of the topmost cell
+    component: np.array(float)
+        The variable to be extrapolated (xprod, u-velocity, v-velocity)
+    cell_size: np.array(float)
+        Array of cell sizes (n cells x n ensembles)
+    cell_depth: np.array(float)
+        Depth of each cell (n cells x n ensembles)
+    depth_ens: np.array(float)
+        Bottom depth for each ensemble
+    delta_t: np.array(float)
+        Duration of each ensemble compute by QComp
+    z: np.array(float)
+        Relative depth from the bottom of each depth cell computed in discharge top method
+
+    Returns
+    -------
+    top_value: np.array(float)
+        total for the specified component integrated over the top range
+    """
+
+    # Initialize return
+    top_value = np.array([0.0])
+
+    # Top power extrapolation
+    if top_method == 0:
+        coef = np.repeat(np.nan, int(component.shape[1]))
+
+        # Compute the coefficient for each ensemble
+        # Loops are used for Numba compile purposes
+
+        # Loop through ensembles
+        for col in range(component.shape[1]):
+            # Initialize variables
+            numerator = 0.0
+            numerator_valid = False
+            denominator_valid = False
+            denominator = 0.0
+
+            # Loop through depth cells in an ensemble
+            for row in range(component.shape[0]):
+
+                # Compute the numerator
+                numerator_temp = component[row, col] * cell_size[row, col]
+                if np.logical_not(np.isnan(numerator_temp)):
+                    numerator_valid = True
+                    numerator = numerator + numerator_temp
+
+                # Compute the denominator
+                denominator_temp = ((z[row, col] + 0.5 * cell_size[row, col]) ** (exponent + 1)) \
+                                   - ((z[row, col] - 0.5 * cell_size[row, col]) ** (exponent + 1))
+                if np.logical_not(np.isnan(denominator_temp)) and denominator_temp != 0:
+                    denominator_valid = True
+                    denominator = denominator + denominator_temp
+
+            # If both numerator and denominator are valid compute the coefficient
+            if numerator_valid and denominator_valid:
+                coef[col] = (numerator * (1 + exponent)) / denominator
+
+        # Compute the top discharge for each ensemble
+        top_value = delta_t * (coef / (exponent + 1)) * \
+            (depth_ens**(exponent + 1) - (depth_ens-top_rng)**(exponent + 1))
+
+    # Top constant extrapolation
+    elif top_method == 1:
+        n_ensembles = len(delta_t)
+        top_value = np.repeat(np.nan, n_ensembles)
+        for j in range(n_ensembles):
+            if idx_top[j] >= 0:
+                top_value[j] = delta_t[j] * component[idx_top[j], j] * top_rng[j]
+
+    # Top 3-point extrapolation
+    elif top_method == 2:
+        # Determine number of bins available in each profile
+        valid_data = np.logical_not(np.isnan(component))
+        n_bins = np.sum(valid_data, axis=0)
+        # Determine number of ensembles
+        n_ensembles = len(delta_t)
+        # Preallocate qtop vector
+        top_value = np.repeat(np.nan, n_ensembles)
+
+        # Loop through ensembles
+        for j in range(n_ensembles):
+
+            # Set default to constant
+            if (n_bins[j] < 6) and (n_bins[j] > 0) and (idx_top[j] >= 0):
+                top_value[j] = delta_t[j] * component[idx_top[j], j] * top_rng[j]
+
+            # If 6 or more bins use 3-pt at top
+            if n_bins[j] > 5:
+                sumd = 0.0
+                sumd2 = 0.0
+                sumq = 0.0
+                sumqd = 0.0
+
+                # Use loop to sum data from top 3 cells
+                for k in range(3):
+                    if np.isnan(cell_depth[idx_top_3[k, j], j]) == False:
+                        sumd = sumd + cell_depth[idx_top_3[k, j], j]
+                        sumd2 = sumd2 + cell_depth[idx_top_3[k, j], j]**2
+                        sumq = sumq + component[idx_top_3[k, j], j]
+                        sumqd = sumqd + (component[idx_top_3[k, j], j] * cell_depth[idx_top_3[k, j], j])
+                delta = 3 * sumd2 - sumd**2
+                a = (3 * sumqd - sumq * sumd) / delta
+                b = (sumq * sumd2 - sumqd * sumd) / delta
+
+                # Compute discharge for 3-pt fit
+                qo = (a * top_rng[j]**2) / 2 + b * top_rng[j]
+                top_value[j] = delta_t[j] * qo
+
+    return top_value
+
+
+@njit
+@cc.export('top_variables', '(f8[:, :], b1[:, :], f8[:, :], f8[:, :])')
+def top_variables(xprod, w_valid_data, depth_cell_size_m, depth_cell_depth_m):
+    """Computes the index to the top and top three valid cells in each ensemble and
+    the range from the water surface to the top of the topmost cell.
+
+    Parameters
+    ----------
+    xprod: np.array(float)
+        Cross product computed from the cross product method
+    w_valid_data: np.array(bool)
+        Valid water data
+    depth_cell_size_m: np.array(float)
+        Size of each depth cell in m
+    depth_cell_depth_m: np.array(float)
+        Depth of each depth cell in m
+
+    Returns
+    -------
+    idx_top: np.array(int)
+        Index to the topmost valid depth cell in each ensemble
+    idx_top_3: np.array(int)
+        Index to the top 3 valid depth cell in each ensemble
+    top_rng: np.array(float)
+        Range from the water surface to the top of the topmost cell
+    """
+
+    # Get data from transect object
+    valid_data1 = np.copy(w_valid_data)
+    valid_data2 = np.logical_not(np.isnan(xprod))
+    valid_data = np.logical_and(valid_data1, valid_data2)
+
+    # Preallocate variables
+    # NOTE: Numba does not support np.tile
+    n_ensembles = int(valid_data.shape[1])
+    idx_top = np.repeat(-1, int(valid_data.shape[1]))
+    idx_top_3 = np.ones((3, int(valid_data.shape[1])), dtype=np.int32)
+    idx_top_3[:] = int(-1)
+    top_rng = np.repeat(np.nan, n_ensembles)
+
+    # Loop through ensembles
+    for n in range(n_ensembles):
+        # Identify topmost 1 and 3 valid cells
+        idx_temp = np.where(np.logical_not(np.isnan(xprod[:, n])))[0]
+        if len(idx_temp) > 0:
+            idx_top[n] = idx_temp[0]
+            if len(idx_temp) > 2:
+                for k in range(3):
+                    idx_top_3[k, n] = idx_temp[k]
+            # Compute top range
+            top_rng[n] = depth_cell_depth_m[idx_top[n], n] - 0.5 * depth_cell_size_m[idx_top[n], n]
+        else:
+            top_rng[n] = 0
+            idx_top[n] = 0
+
+    return idx_top, idx_top_3, top_rng
+
+
+if __name__ == '__main__':
+    # Used to compile code
+    cc.compile()
diff --git a/MiscLibs/__init__.py b/MiscLibs/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/MiscLibs/abba_2d_interpolation.py b/MiscLibs/abba_2d_interpolation.py
new file mode 100644
index 0000000000000000000000000000000000000000..296806d4598c5489768eb2abe2c513896783a9eb
--- /dev/null
+++ b/MiscLibs/abba_2d_interpolation.py
@@ -0,0 +1,413 @@
+"""abba_2d_interpolation
+
+This module performs 2-D interpolation on data that is assumed to be arranged in row-column format rather
+than in a random pattern. The rows represent vertical location or y-coordinate of each cell
+in the data array. The columns represent a horizontal location or x-coordinate of the data.
+The cell size and thus the y-coordinate of a cell can change from cell to cell or ensemble to ensemble.
+The interpolation algorithm searches for the all valid cells above, below, before, and after
+that touch the cell to be interpolated. Bathymetry is honored by checking to see if the depth of the streambed
+of the cell before or after is greater than the bottom of the target cell. When searching before or after, if the
+streambed is encountered before a valid cell then no valid cell is used in that direction.
+
+The methods provide the flexibility to determine neighbors based on either a raw vertical location
+or a normalized location. To use a normalized location set normalize to True.
+
+For efficiency the data_list can contain multiple types of data that lie on the same x-y locations.
+This allows multiple interpolations without having to recompute neighbors and distances.
+
+Example
+-------
+
+For interpolating water velocities collected by an ADCP
+
+interpolated_data = abba_idw_interpolation(data_list = [u_processed_mps, v_processed_mps]
+                                           valid_data = valid_data
+                                           cells_above_sl = cells_above_sl
+                                           y_centers = depth_cell_depth_m
+                                           y_cell_size = depth_cell_size_m
+                                           y_depth = depth_processed_m
+                                           x_shiptrack = distance_along_shiptrack,
+                                           normalize = True)
+
+interpolated_u_values = interpolated_data[0]
+interpolated_v_values = interpolated_data[1]
+
+"""
+import numpy as np
+
+
+def find_neighbors(valid_data, cells_above_sl, y_cell_centers, y_cell_size, y_depth, search_loc, normalize=False):
+    """ Finds the nearest valid cells above, below, before, and after each invalid cell. The before and after
+    Cells must have data in the same y range as the invalid cell.
+
+    Parameters
+    ----------
+    valid_data: np.array(logical)
+        Logical array indicating whether each cell is valid (true) or invalid (false)
+    cells_above_sl: np.array(logical)
+        Logical array indicating which cells are above the side lobe cutoff (true)
+    y_cell_centers: np.array(float)
+        Y coordinate corresponding to the center of the data cells
+    y_cell_size: np.array(float)
+        Size of each cell in the y-direction
+    y_depth: np.array(float)
+        1-D array containing values that will be used to normalize the data and specifying the lower boundary for
+        identifying neighbors
+    search_loc: list
+        Identifies location to search (above, below, before, after)
+    normalize: bool
+        Boolean indicating if normalized data should be used
+
+    Returns
+    -------
+    neighbors: list
+        List of dictionaries providing the indices of the above, below, before, and after valid cells.
+    """
+
+    # Compute cell extents
+    y_top = y_cell_centers - 0.5 * y_cell_size
+    y_bottom = y_cell_centers + 0.5 * y_cell_size
+    y_bottom_actual = y_cell_centers + 0.5 * y_cell_size
+
+    if normalize:
+        y_top = np.round(y_top / y_depth, 3)
+        y_bottom = np.round(y_bottom / y_depth, 3)
+
+    # ID cells above side lobe with invalid data
+    valid_data_float = valid_data.astype(float)
+    valid_data_float[np.logical_not(cells_above_sl)] = np.nan
+    invalid_cell_index = np.where(valid_data_float == 0)
+
+    # Initialize output list
+    neighbors = []
+
+    # Process each index
+    for cell, ens in zip(invalid_cell_index[0], invalid_cell_index[1]):
+        points = []
+        target = (cell, ens)
+
+        if 'above' in search_loc:
+            # Identify indices of cells above and below target
+            above = find_above(target, valid_data)
+            if above is not None:
+                points.append(above)
+
+        if 'below' in search_loc:
+            below = find_below(target, valid_data)
+            if below is not None:
+                points.append(below)
+
+        # Find all cells in ensembles before or after the target ensemble that overlap the target cell
+        # This is a change implemented on 2/27/2020 - dsm
+        y_match = np.logical_and(y_top[target] <= y_bottom, y_bottom[target] >= y_top)
+        y_match = np.logical_and(y_match, valid_data)
+
+        if 'before' in search_loc:
+            # Identify indices of cells before and after target
+            before = find_before(target, y_match, y_depth, y_bottom_actual)
+            if before:
+                points = points + before
+
+        if 'after' in search_loc:
+            after = find_after(target, y_match, y_depth, y_bottom_actual)
+            if after:
+                points = points + after
+
+        neighbors.append({'target': target, 'neighbors': points})
+
+    return neighbors
+
+
+def find_above(target, valid_data):
+    """ Finds the nearest valid cell above the target.
+
+    Parameters
+    ----------
+    target: tuple
+        Indices of target cell
+    valid_data: np.array(logical)
+
+    Returns
+    -------
+    above_idx: tuple
+        Indices of valid cell immediately above target
+    """
+
+    # Initialize cell counter
+    above_idx = target[0] - 1
+
+    # Find nearest valid cell above target
+    while above_idx >= 0 and not valid_data[above_idx, target[1]]:
+        above_idx = above_idx - 1
+    if above_idx >= 0:
+        above_idx = (above_idx, target[1])
+    else:
+        above_idx = None
+
+    return above_idx
+
+
+def find_below(target, valid_data):
+    """ Finds the nearest valid cell below the target.
+
+    Parameters
+    ----------
+    target: tuple
+        Indices of target cell
+    valid_data: np.array(logical)
+
+    Returns
+    -------
+    below_idx: tuple
+        Indices of valid cell immediately below target
+    """
+
+    # Initialize cell counter
+    below_idx = target[0] + 1
+
+    # Determine cell row index limit
+    n_cells = len(valid_data[:, target[1]])-1
+
+    # Find nearest valid cell below target
+    while below_idx <= n_cells and not valid_data[below_idx, target[1]]:
+        below_idx = below_idx + 1
+    if below_idx <= n_cells:
+        below_idx = (below_idx, target[1])
+    else:
+        below_idx = None
+
+    return below_idx
+
+
+def find_before(target, y_match, y_depth, y_bottom):
+    """ Finds the nearest ensemble before the target that has valid cells within the vertical range of the target
+
+    Parameters
+    ----------
+    target: tuple
+        Indices of target cell
+    y_match: np.array(logical)
+        2-D array of all cells that are within the vertical range of the target cell
+    y_depth: np.array(float)
+        1-D array containing values that will be used to normalize the data and specifying the lower boundary for
+        identifying neighbors
+    y_bottom: np.array(float)
+        Bottom depth of each cell
+
+    Returns
+    -------
+    before_idx: list
+        List of tuples of indices of all cells in the nearest ensemble before that target that are within
+        the vertical range of the target cell
+    """
+
+    # Initialize ensemble counter
+    before_ens = target[1] - 1
+
+    # Loop until an ensemble is found that has valid data within the vertical range of the target while honoring
+    # the bathymetry. If the streambed is encountered while searching for a previously valid ensemble then
+    # it is determined that there is no available valid data before the target that can be used.
+    found = False
+
+    while (before_ens >= 0) and not found:
+        if y_bottom[target] < y_depth[before_ens] and np.any(y_match[:, before_ens]):
+            found = True
+        elif y_bottom[target] > y_depth[before_ens]:
+            before_ens = -999
+            found = True
+        else:
+            before_ens = before_ens - 1
+
+    # Find and store the indices all cells from the identified ensemble
+    # that are within the vertical range of the target
+    if before_ens >= 0:
+        rows = np.where(y_match[:, before_ens])[0]
+        before_idx = []
+        for row in rows:
+            before_idx.append((row, before_ens))
+    else:
+        before_idx = []
+
+    return before_idx
+
+
+def find_after(target, y_match, y_depth, y_bottom):
+    """ Finds the nearest ensemble after the target that has valid cells within the vertical range of the target
+
+    Parameters
+    ----------
+    target: tuple
+        Indices of target cell
+    y_match: np.array(logical)
+        2-D array of all cells that are within the vertical range of the target cell
+    y_depth: np.array(float)
+        1-D array containing values that will be used to normalize the data and specifying the lower boundary for
+        identifying neighbors
+    y_bottom: np.array(float)
+        Bottom depth of each cell
+    Returns
+    -------
+    after_idx: list
+        List of tuples of indices of all cells in the nearest ensemble after that target that are within
+        the vertical range of the target cell
+    """
+
+    # Initialize ensemble counter
+    after_ens = target[1] + 1
+
+    # Loop until an ensemble is found that has valid data within the vertical range of the target while honoring
+    # the bathymetry. If the streambed is encountered while searching for a next valid ensemble then
+    # it is determined that there is no available valid data after the target that can be used.
+    found = False
+
+    while (after_ens <= y_match.shape[1] - 1) and not found:
+        if (y_bottom[target] < y_depth[after_ens]) and np.any(y_match[:, after_ens]):
+            found = True
+        elif y_bottom[target] > y_depth[after_ens]:
+            after_ens = -999
+            found = True
+        else:
+            after_ens = after_ens + 1
+
+    # Find and store the indices all cells from the identified ensemble
+    # that are within the vertical range of the target
+    if (after_ens <= y_match.shape[1]-1) and (after_ens > 0):
+        rows = np.where(y_match[:, after_ens])[0]
+        after_idx = []
+        for row in rows:
+            after_idx.append((row, after_ens))
+    else:
+        after_idx = []
+
+    return after_idx
+
+
+def compute_distances(target, neighbors, x, y):
+    """ Computes distances between the target and neighbors.
+
+    Parameters
+    ----------
+    target: tuple
+        Indices of target cell
+    neighbors: list
+        List of indices of target's neighboring cells
+    x: np.array(float)
+        1-D array of distances between ensembles
+    y: np.array(float)
+        2-D array of vertical distances of cells for each ensemble
+
+    Returns
+    -------
+    distances: list
+        List of distances from target to each neighbor
+    """
+
+    # Intialize target location
+    target_y = y[target]
+    target_x = x[target[1]]
+
+    # Compute distance from target cell to each neighbor
+    distances = []
+    for neighbor in neighbors:
+        distances.append(np.sqrt((y[neighbor] - target_y) ** 2 + (x[neighbor[1]] - target_x) ** 2))
+
+    return distances
+
+
+def idw_interpolation(data, neighbor_indices, distances):
+    """ Interpolate value using neighbors and inverse distance weighting.
+
+    Parameters
+    ----------
+    data: np.array(float)
+        2-D array containing data to interpolate
+    neighbor_indices: list
+        List of tuples defining the indices of the target's neighbors
+    distances: list
+        List of distances from target to each neighbor
+
+    Returns
+    -------
+    interpolated_value: float
+        Value of target cell interpolated from neighbors
+    """
+
+    # Compute weighted sum or neighbor values
+    sum_of_weights = 0
+    weighted_sum = 0
+    for n, index in enumerate(neighbor_indices):
+        sum_of_weights = sum_of_weights + (1/distances[n])
+        weighted_sum = weighted_sum + data[index] * (1/distances[n])
+
+    # Compute interpolated value
+    if sum_of_weights > 0:
+        interpolated_value = weighted_sum / sum_of_weights
+    else:
+        interpolated_value = np.nan
+
+    return interpolated_value
+
+
+def abba_idw_interpolation(data_list, valid_data, cells_above_sl, y_centers, y_cell_size, y_depth,
+                           x_shiptrack, normalize, search_loc=('above', 'below', 'before', 'after')):
+    """ Interpolates values for invalid cells using the neighboring cells above, below, before, and after and
+    and inverse distance averaging.
+
+    Parameters
+    ----------
+    data_list: list
+        List of np.array(float) data to used for interpolation
+    valid_data: np.array(logical)
+        Logical array of valid data
+    cells_above_sl: np.array(logical)
+        Logical array of all valid cells above the side lobe cutoff
+    y_centers: np.array(float)
+        Y coordinate corresponding to the center of the data cells
+    y_cell_size: np.array(float)
+        Size of each cell in the y-direction
+    y_depth: np.array(float)
+        1-D array containing values that will be used to normalize the data and specifying the lower boundary for
+        identifying neighbors
+    x_shiptrack: np.array(float)
+        X coordinate of cumulative shiptrack
+    normalize: bool
+        Boolean value specifying whether data should be normalized or not.
+    search_loc: list or tuple
+        Identifies location to search (above, below, before, after)
+
+    Returns
+    -------
+    interp_data: list
+        Indices and interpolation values for invalid cells corresponding to data list.
+    """
+
+    # Initialize output list
+    interpolated_data = [[] for _ in range(len(data_list))]
+
+    valid_cells = np.logical_and(cells_above_sl, valid_data)
+    if not np.all(valid_cells):
+        # Find neighbors associated with each target
+        interpolation_points = find_neighbors(valid_data=valid_data,
+                                              cells_above_sl=cells_above_sl,
+                                              y_cell_centers=y_centers,
+                                              y_cell_size=y_cell_size,
+                                              y_depth=y_depth,
+                                              search_loc=search_loc,
+                                              normalize=normalize)
+
+        # Process each target
+        for point in interpolation_points:
+            # Compute distance from target to neighbors
+            distances = compute_distances(target=point['target'],
+                                          neighbors=point['neighbors'],
+                                          x=x_shiptrack,
+                                          y=y_centers)
+
+            # Interpolate target for each data set in data_list
+            for n, data in enumerate(data_list):
+                interpolated_value = idw_interpolation(data=data,
+                                                       neighbor_indices=point['neighbors'],
+                                                       distances=distances)
+                interpolated_data[n].append([point['target'], interpolated_value])
+
+    return interpolated_data
diff --git a/MiscLibs/bayes_cov_compiled.cp39-win_amd64.pyd b/MiscLibs/bayes_cov_compiled.cp39-win_amd64.pyd
new file mode 100644
index 0000000000000000000000000000000000000000..0f8fe8e893cbccb7a46623aeecc6897ec6038cf8
Binary files /dev/null and b/MiscLibs/bayes_cov_compiled.cp39-win_amd64.pyd differ
diff --git a/MiscLibs/bayes_cov_compiled.py b/MiscLibs/bayes_cov_compiled.py
new file mode 100644
index 0000000000000000000000000000000000000000..9f627e89eee6bc5172fc274ba95708f8b0bcc4a0
--- /dev/null
+++ b/MiscLibs/bayes_cov_compiled.py
@@ -0,0 +1,236 @@
+"""bayes_cov_compiled
+Computes the coefficient of variation using a Bayesian approach and an assumed posterior
+log-normal distribution..
+
+Example
+-------
+
+from MiscLibs.bayes_cov_compiled import bayes_cov
+
+cov_68 = bayes_cov(transects, cov_prior, cov_prior_u, nsim)
+"""
+
+import numpy as np
+from numba.pycc import CC
+from numba import njit
+
+cc = CC('bayes_cov_compiled')
+
+
+# Bayesian COV
+# ============
+@cc.export('bayes_cov', 'f8(f8[::1], f8, f8, i4)')
+def bayes_cov(transects_total_q, cov_prior=0.03, cov_prior_u=0.2, nsim=20000):
+    """Computes the coefficient of variation using a Bayesian approach and an assumed posterior
+    log-normal distribution.
+
+    Parameters
+    ----------
+    transects_total_q: np.array(float)
+        List of total discharge for each transect
+    cov_prior: float
+        Expected COV (68%) based on prior knowledge. Assumed to be 3% by default.
+    cov_prior_u: float
+        Uncertainty (68%) of cov_prior. Assumed to be 20%.
+    nsim: int
+        Number of simulations. 20000 was found to produce stable results.
+
+    Returns
+    -------
+    cov: float
+        Coefficient of variation
+    """
+
+    theta_std = np.abs(np.array([np.mean(transects_total_q), cov_prior])) * cov_prior_u \
+        / np.sqrt(len(transects_total_q))
+
+    # Modified for compatibility with Numba
+    sam, obj_funk = metropolis(theta0=np.array([np.mean(transects_total_q), cov_prior]),
+                               obs_data=transects_total_q,
+                               cov_prior=cov_prior,
+                               cov_prior_u=cov_prior_u,
+                               nsim=nsim,
+                               theta_std=theta_std)
+
+    n_burn = int(nsim / 2)
+
+    cov = np.mean(sam[n_burn:nsim, 1])
+
+    return cov
+
+
+@njit
+@cc.export('metropolis', '(f8[:], f8[:], f8, f8, i4, f8[:])')
+def metropolis(theta0, obs_data, cov_prior, cov_prior_u, nsim, theta_std):
+    """Implements the Metropolis_Hastings Markov chain Monte Carlo (MCMC) algorithm for sampling the
+    posterior distribution, assuming a log-normal posterior distribution.
+
+    Parameters
+    ----------
+    theta0: np.array(float)
+        Starting value of parameters (mean and cov_prior)
+    obs_data: np.array(float)
+        1D array of total discharge for each transect
+    cov_prior: float
+        Expected COV (68%) based on prior knowledge.
+    cov_prior_u: float
+        Uncertainty (68%) of cov_prior.
+    nsim: int
+        Number of simulations.
+    theta_std: np.array(float)
+        Standard deviation for the gaussian Jump distribution. If blank a default value is computed.
+
+    Returns
+    -------
+    sam: np.array(float)
+        Matrix containing the MCMC samples
+    obj_funk: np.array(float)
+        Vector containing the corresponding values of the objective function
+        (i.e. of the unnormalized log-posterior)
+    """
+
+    # Initialize
+    npar = len(theta0)
+    sam = np.zeros((nsim + 1, npar))
+    obj_funk = np.zeros((nsim + 1, 1))
+
+    # Parameters - used for automatic computation of starting stds of the Gaussian Jump distribution
+    if np.any(np.isnan(theta_std)):
+        std_factor = 0.1
+        theta_std = std_factor * np.abs(theta0)
+
+    # Check if starting point is feasible - abandon otherwise
+    f_current = log_post(param=theta0, measures=obs_data, cov_prior=cov_prior, cov_prior_u=cov_prior_u)
+
+    if not is_feasible(f_current):
+        print('Metropolis:FATAL:unfeasible starting point')
+        return sam, obj_funk
+    else:
+        sam[0, :] = list(theta0)
+        obj_funk[0] = f_current
+
+        # MCMC loop
+        np.random.seed(0)
+        candid = np.array([np.nan, np.nan])
+        for i in range(nsim):
+            current = sam[i, :]
+            f_current = obj_funk[i]
+
+            # Propose a new candidate
+            # Under Numba np.random.normal will not accept arrays as input
+            candid[0] = np.random.normal(loc=current[0], scale=theta_std[0])
+            candid[1] = np.random.normal(loc=current[1], scale=theta_std[1])
+
+            # Evaluate objective function at candidate
+            f_candid = log_post(param=candid,
+                                measures=obs_data,
+                                cov_prior=cov_prior,
+                                cov_prior_u=cov_prior_u)
+
+            if not is_feasible(f_candid):
+                sam[i + 1, :] = current
+                obj_funk[i + 1] = f_current
+            else:
+                # Generate deviate ~U[0,1]
+                u = np.random.uniform(0, 1)
+
+                # Compute Metropolis acceptance ratio
+                ratio = np.exp(min((np.max(np.hstack((np.array([float(-100)]), f_candid - f_current))), float(0))))
+
+                # Apply acceptance rule
+                if u <= ratio:
+                    sam[i + 1, :] = candid
+                    obj_funk[i + 1] = f_candid
+                else:
+                    sam[i + 1, :] = current
+                    obj_funk[i + 1] = f_current
+
+        # Modified return for Numba, eliminating need for dictionary
+        return sam, obj_funk
+
+
+@njit
+@cc.export('log_post', 'f8(f8[:], f8[:], f8, f8)')
+def log_post(param, measures, cov_prior, cov_prior_u):
+    """Define function returning the posterior log-pdf using the model measures ~ N(true_value,cov*true_value),
+    with a flat prior on true_value and a log-normal prior for cov (= coefficient of variation)
+
+    Parameters
+    ----------
+    param: np.array(float)
+        Array containing the true value and COV
+
+    measures: np.array(float)
+        Array of observations
+    cov_prior: float
+        Expected COV (68%) based on prior knowledge.
+    cov_prior_u: float
+        Uncertainty (68%) of cov_prior.
+
+    Returns
+    -------
+    logp: float
+        Unnormalized log-posterior
+    """
+    # Check if any parameter is <=0
+    # since  both true_value and cov have to be positive - otherwise sigma = true_value*cov does not make sense
+    # if any(item <= 0 for item in param):
+    #     return -math.inf
+    # Changed for compatibility with Numba
+    if np.any(np.less_equal(param, 0)):
+        return np.NINF
+
+    true_value = param[0]
+    cov = param[1]
+    sigma = cov * true_value  # standard deviation
+
+    # Compute log-likelihood under the model: measures ~ N(true_value,sigma)
+    # You can easily change this model (e.g. lognormal for a positive measurand?)
+    # OPTION 1 : the model follows a Normal distribution
+    # This equation is used for compatibility with Numba, instead of call to scipy.stats.norm.logpdf
+    log_likelihood = np.sum(np.log(np.exp(-(((measures - true_value) / sigma) ** 2) / 2)
+                                   / (np.sqrt(2 * np.pi) * sigma)))
+
+    # Prior on true_value - flat prior used here but you may change this if you have prior knowledge
+    log_prior_1 = 0
+
+    # Lognormal prior
+    x = cov
+    mu = np.log(cov_prior)
+    scale = cov_prior_u
+    pdf = np.exp(-(np.log(x) - mu) ** 2 / (2 * scale ** 2)) / (x * scale * np.sqrt(2 * np.pi))
+    log_prior_2 = np.log(pdf)
+
+    # Joint prior (prior independence)
+    log_prior = log_prior_1 + log_prior_2
+
+    # Return (unnormalized) log-posterior
+    logp = log_likelihood + log_prior
+    if np.isnan(logp):
+        # Used np to eliminate the need for math package
+        logp = np.NINF  # returns -Inf rather than NaN's (required by the MCMC sampler used subsequently)
+    return logp
+
+
+@njit
+@cc.export('is_feasible', 'b1(f8)')
+def is_feasible(value):
+    """Checks that a value is a real value (not infinity or nan)
+
+    Parameters
+    ----------
+    value: float
+
+    Returns
+    -------
+    bool
+    """
+    if np.isinf(value) or np.isnan(value):
+        return False
+    else:
+        return True
+
+
+if __name__ == '__main__':
+    # Used to compile code
+    cc.compile()
diff --git a/MiscLibs/common_functions.py b/MiscLibs/common_functions.py
new file mode 100644
index 0000000000000000000000000000000000000000..8a3393fc957f66d2c20a0040d6e839cb0ccff6cb
--- /dev/null
+++ b/MiscLibs/common_functions.py
@@ -0,0 +1,466 @@
+import numpy as np
+import scipy.stats as sp
+
+
+def cosd(angle):
+    """Compute cosine of angle in degrees.
+
+    Parameters
+    ----------
+    angle: float
+        Angle in degrees
+    """
+    
+    return np.cos(np.pi * angle/180)
+
+
+def sind(angle):
+    """Compute sine of angle in degrees.
+
+        Parameters
+        ----------
+        angle: float
+            Angle in degrees
+        """
+    
+    return np.sin(np.pi * angle/180)
+
+
+def tand(angle):
+    """Compute tangent of angle in degrees.
+
+        Parameters
+        ----------
+        angle: float
+            Angle in degrees
+        """
+    
+    return np.tan(np.pi * angle/180)
+
+
+def arctand(angle):
+    """Compute arctangent of angle in degrees.
+
+        Parameters
+        ----------
+        angle: float
+            Angle in degrees
+        """
+    
+    return np.arctan(angle) * 180/np.pi
+
+
+def cart2pol(x, y):
+    """Convert cartesian coordinates to polar coordinates.
+
+    Parameters
+    ----------
+    x: np.array(float)
+        x coordinate
+    y: np.array(float)
+        y coordinate
+
+    Returns
+    -------
+    phi: float
+        Angle in radians
+    rho: float
+        Magnitude
+    """
+    
+    rho = np.sqrt(x**2 + y**2)
+    phi = np.arctan2(y, x)
+    
+    return phi, rho
+
+
+def pol2cart(phi, rho):
+    """Convert polar coordinates to cartesian coordinates.
+
+        Parameters
+        ----------
+        phi: np.array(float)
+            Angle in radians
+        rho: np.array(float)
+            Magnitude
+
+        Returns
+        -------
+        x: float
+            x coordinate
+        y: float
+            y coordinate
+
+        """
+    
+    x = rho * np.cos(phi)
+    y = rho * np.sin(phi)
+    
+    return x, y
+
+
+def iqr(data):
+    """This function computes the iqr consistent with Matlab
+
+    Parameters
+    ----------
+    data: np.ndarray
+        Data for which the statistic is required
+
+    Returns
+    -------
+    sp_iqr: float
+        Inner quartile range
+
+    """
+
+    # If 2-D array flatten to 1-D array
+    if len(data.shape) > 1:
+        data_1d = data.flatten()
+    else:
+        data_1d = data
+
+    # Remove nan elements
+    idx = np.where(np.logical_not(np.isnan(data_1d)))[0]
+    data_1d = data_1d[idx]
+
+    # Compute statistics
+    q25, q50, q75 = sp.mstats.mquantiles(data_1d, alphap=0.5, betap=0.5)
+    sp_iqr = q75 - q25
+
+    return sp_iqr
+
+
+def iqr_2d(data):
+    """This function computes the iqr consistent with Matlab
+
+    Parameters
+    ----------
+    data: np.ndarray
+        Data for which the statistic is required
+
+    Returns
+    -------
+    sp_iqr: float
+        Inner quartile range
+
+    """
+
+    # Remove nan elements
+    data = np.array(data)
+    idx = np.where(np.logical_not(np.isnan(data)))[0]
+    data = data[idx]
+
+    # Compute statistics
+    q25, q50, q75 = sp.mstats.mquantiles(data, alphap=0.5, betap=0.5)
+    sp_iqr = q75 - q25
+    return sp_iqr
+
+
+def azdeg2rad(angle) -> float:
+    """Converts an azimuth angle in degrees to radians.
+
+    Parameters
+    ----------
+    angle: float, np.ndarray(float)
+        Azimuth angle in degrees
+
+    Returns
+    -------
+    direction: float, np.ndarray(float)
+        Angle in radians
+    """
+
+    # Convert to radians
+    direction = np.deg2rad(90-angle)
+
+    # Create postive angle
+    idx = np.where(direction < 0)[0]
+    if len(idx) > 1:
+        direction[idx] = direction[idx] + 2 * np.pi
+    else:
+        direction = direction + 2 * np.pi
+        
+    return direction
+
+
+def rad2azdeg(angle) -> float:
+    """Converts an angle in radians to an azimuth in degrees.
+
+    Parameters
+    ----------
+    angle: float, np.ndarray(float)
+        Angle in radians
+
+    Returns
+    -------
+    deg: float, np.ndarray(float)
+        Azimuth in degrees
+    """
+
+    if isinstance(angle, float):
+        deg = np.rad2deg(angle)
+        deg = 90 - deg
+        if deg < 0:
+            deg += 360
+            
+        return deg
+    else:
+        # Multiple values
+        deg = np.rad2deg(angle)
+        deg = 90 - deg
+        sub_zero = np.where(nan_less(deg, 0))
+        deg[sub_zero] = deg[sub_zero] + 360
+        
+        return deg
+
+
+def nandiff(values):
+    """Computes difference in consecutive values with handling of nans.
+
+    Parameters
+    ----------
+    values: np.ndarray()
+        1-D array of numbers
+
+    Returns
+    -------
+    final_values: np.ndarray()
+        1-D array of differences of consecutive non nan numbers
+    """
+    
+    final_values = []
+    for n in range(len(values) - 1):
+        # Check for nan and add nan to final values
+        if np.isnan(values[n]):
+            final_values.append(np.nan)
+        else:
+            # Search for next non nan number and compute difference
+            i = n + 1
+            while np.isnan(values[i]) and i < len(values) - 1:
+                i += 1
+            
+            final_values.append(values[i] - values[n])
+        
+    return np.array(final_values)
+
+
+def valid_number(data_in) -> float:
+    """Check to see if data_in can be converted to float.
+
+    Parameters
+    ----------
+    data_in: str
+        String to be converted to float
+
+    Returns
+    -------
+    data_out: float
+        Returns a float of data_in or nan if conversion is not possible
+    """
+
+    try:
+        data_out = float(data_in)
+    except ValueError:
+        data_out = np.nan
+    return data_out
+
+
+def nans(shape, dtype=float):
+    """Create array of nans.
+
+    Parameters
+    ----------
+    shape: tuple
+        Shape of array to be filled with nans
+    dtype: type
+        Type of array
+
+    Returns
+    -------
+    a: np.ndarray(float)
+        Array of nan
+    """
+    a = np.empty(shape, dtype)
+    a.fill(np.nan)
+    return a
+
+
+def checked_idx(transects):
+    """Create list of transect indices of all checked transects.
+
+    Parameters
+    ----------
+    transects: list
+        List of TransectData objects
+
+    Returns
+    -------
+    checked: list
+        List of indices
+
+    """
+    checked = []
+    for n, transect in enumerate(transects):
+        if transect.checked:
+            checked.append(n)
+
+    return checked
+
+
+def units_conversion(units_id='SI'):
+    """Computes the units conversion from SI units used internally to the
+    desired display units.
+
+    Parameters
+    ----------
+    units_id: str
+        String variable identifying units (English, SI) SI is the default.
+
+    Returns
+    -------
+    units: dict
+        dictionary of unit conversion and labels
+    """
+
+    if units_id == 'SI':
+        units = {'L': 1,
+                 'Q': 1,
+                 'A': 1,
+                 'V': 1,
+                 'label_L': '(m)',
+                 'label_Q': '(m3/s)',
+                 'label_A': '(m2)',
+                 'label_V': '(m/s)',
+                 'ID': 'SI'}
+
+    else:
+        units = {'L': 1.0 / 0.3048,
+                 'Q': (1.0 / 0.3048)**3,
+                 'A': (1.0 / 0.3048)**2,
+                 'V': 1.0 / 0.3048,
+                 'label_L': '(ft)',
+                 'label_Q': '(ft3/s)',
+                 'label_A': '(ft2)',
+                 'label_V': '(ft/s)',
+                 'ID': 'English'}
+
+    return units
+
+
+def convert_temperature(temp_in, units_in, units_out) -> float:
+    """Converts temperature from F to C or C to F.
+
+    Parameters
+    ==========
+    temp_in: np.array
+        temperature in units_in
+    units_in: str
+        C for Celcius or F for Fahrenheit
+    units_out: str
+        C for Celcius or F for Fahrenheit
+
+    Returns
+    =======
+    temp_out: np.array
+        temperature in units_out
+    """
+
+    temp_out = None
+    if units_in == 'F':
+        if units_out == 'C':
+            temp_out = (temp_in - 32) * (5./9.)
+        else:
+            temp_out = temp_in
+
+    elif units_in == 'C':
+        if units_out == 'C':
+            temp_out = temp_in
+        else:
+            temp_out = (temp_in * (9./5.)) + 32
+
+    return temp_out
+
+
+def nan_less_equal(data1, data2) -> bool:
+    """Computes data1 <= data2 and sets all np.nan comparisons to False.
+
+    Parameters
+    ----------
+    data1: np.array()
+        Data arrray.
+    data2: np.array()
+        Data arrray.
+
+    Returns
+    -------
+    :bool
+        Result of comparison.
+    """
+
+    d3 = data2 - data1
+    d3[np.isnan(d3)] = -999.
+    return d3 >= 0
+
+
+def nan_less(data1, data2) -> bool:
+    """Computes data1 < data2 and sets all np.nan comparisons to False.
+
+    Parameters
+    ----------
+    data1: np.array()
+        Data arrray.
+    data2: np.array()
+        Data arrray.
+
+    Returns
+    -------
+    :bool
+        Result of comparison.
+    """
+
+    d3 = data2 - data1
+    d3[np.isnan(d3)] = -999.
+    return d3 > 0
+
+
+def nan_greater_equal(data1, data2) -> bool:
+    """Computes data1 >= data2 and sets all np.nan comparisons to False.
+
+    Parameters
+    ----------
+    data1: np.array()
+        Data arrray.
+    data2: np.array()
+        Data arrray.
+
+    Returns
+    -------
+    :bool
+        Result of comparison.
+    """
+
+    d3 = data1 - data2
+    d3[np.isnan(d3)] = -999.
+    return d3 >= 0
+
+
+def nan_greater(data1, data2) -> bool:
+    """Computes data1 < data2 and sets all np.nan comparisons to False.
+
+    Parameters
+    ----------
+    data1: np.array()
+        Data arrray.
+    data2: np.array()
+        Data arrray.
+
+    Returns
+    -------
+    :bool
+        Result of comparison.
+    """
+
+    d3 = data1 - data2
+    d3[np.isnan(d3)] = -999.
+    return d3 > 0
diff --git a/MiscLibs/non_uniform_savgol.py b/MiscLibs/non_uniform_savgol.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f52b8abf357b284ee086c88b8e683a2769e24bf
--- /dev/null
+++ b/MiscLibs/non_uniform_savgol.py
@@ -0,0 +1,120 @@
+import numpy as np
+
+
+def non_uniform_savgol(x, y, window, polynom):
+    """
+    Applies a Savitzky-Golay filter to y with non-uniform spacing
+    as defined in x
+
+    This is based on https://dsp.stackexchange.com/questions/1676/savitzky-golay-smoothing-filter-for-not-equally-spaced-data
+    The borders are interpolated like scipy.signal.savgol_filter would do
+
+    Parameters
+    ----------
+    x : array_like
+        List of floats representing the x values of the data
+    y : array_like
+        List of floats representing the y values. Must have same length
+        as x
+    window : int (odd)
+        Window length of datapoints. Must be odd and smaller than x
+    polynom : int
+        The order of polynom used. Must be smaller than the window size
+
+    Returns
+    -------
+    np.array of float
+        The smoothed y values
+    """
+    if len(x) != len(y):
+        raise ValueError('"x" and "y" must be of the same size')
+
+    if len(x) < window:
+        raise ValueError('The data size must be larger than the window size')
+
+    if type(window) is not int:
+        raise TypeError('"window" must be an integer')
+
+    if window % 2 == 0:
+        raise ValueError('The "window" must be an odd integer')
+
+    if type(polynom) is not int:
+        raise TypeError('"polynom" must be an integer')
+
+    if polynom >= window:
+        raise ValueError('"polynom" must be less than "window"')
+
+    half_window = window // 2
+    polynom += 1
+
+    # Initialize variables
+    A = np.empty((window, polynom))     # Matrix
+    tA = np.empty((polynom, window))    # Transposed matrix
+    t = np.empty(window)                # Local x variables
+    y_smoothed = np.full(len(y), np.nan)
+
+    # Start smoothing
+    for i in range(half_window, len(x) - half_window, 1):
+        # Center a window of x values on x[i]
+        for j in range(0, window, 1):
+            t[j] = x[i + j - half_window] - x[i]
+
+        # Create the initial matrix A and its transposed form tA
+        for j in range(0, window, 1):
+            r = 1.0
+            for k in range(0, polynom, 1):
+                A[j, k] = r
+                tA[k, j] = r
+                r *= t[j]
+        try:
+            # Multiply the two matrices
+            tAA = np.matmul(tA, A)
+
+            # Invert the product of the matrices
+            tAA = np.linalg.inv(tAA)
+
+            # Calculate the pseudoinverse of the design matrix
+            coeffs = np.matmul(tAA, tA)
+
+            # Calculate c0 which is also the y value for y[i]
+            y_smoothed[i] = 0
+            for j in range(0, window, 1):
+                y_smoothed[i] += coeffs[0, j] * y[i + j - half_window]
+
+            # If at the end or beginning, store all coefficients for the polynom
+            if i == half_window:
+                first_coeffs = np.zeros(polynom)
+                for j in range(0, window, 1):
+                    for k in range(polynom):
+                        first_coeffs[k] += coeffs[k, j] * y[j]
+            elif i == len(x) - half_window - 1:
+                last_coeffs = np.zeros(polynom)
+                for j in range(0, window, 1):
+                    for k in range(polynom):
+                        last_coeffs[k] += coeffs[k, j] * y[len(y) - window + j]
+        except BaseException:
+            pass
+
+    # Interpolate the result at the left border
+    for i in range(0, half_window, 1):
+        y_smoothed[i] = 0
+        x_i = 1
+        try:
+            for j in range(0, polynom, 1):
+                y_smoothed[i] += first_coeffs[j] * x_i
+                x_i *= x[i] - x[half_window]
+        except BaseException:
+            y_smoothed[i] = y[i]
+
+    # Interpolate the result at the right border
+    for i in range(len(x) - half_window, len(x), 1):
+        y_smoothed[i] = 0
+        x_i = 1
+        try:
+            for j in range(0, polynom, 1):
+                y_smoothed[i] += last_coeffs[j] * x_i
+                x_i *= x[i] - x[-half_window - 1]
+        except BaseException:
+            y_smoothed[i] = y[i]
+
+    return y_smoothed
diff --git a/MiscLibs/robust_loess.py b/MiscLibs/robust_loess.py
new file mode 100644
index 0000000000000000000000000000000000000000..805a8482802f19b2516f974929f6785ca70f14bf
--- /dev/null
+++ b/MiscLibs/robust_loess.py
@@ -0,0 +1,280 @@
+"""robust_loess
+This module computes a robust loess smooth using a quadratic model as defined by
+W.S.Cleveland, (1979) "Robust Locally Weighted Regression and Smoothing Scatterplots",
+Journal of the American Statistical Association, Vol 74, No. 368, pp. 829-836.
+Both x and y values are required and are assumed to be 1D arrays (n,).
+
+Example
+-------
+
+from MiscLibs.matlab_rloess import rloess
+
+smooth_fit = rloess(x, y, span)
+"""
+import numpy as np
+from numba import jit, njit
+
+# Set constants used in multiple functions
+eps = np.finfo('float').eps
+seps = np.sqrt(eps)
+
+# @jit(cache=True, nopython=True)
+@njit
+def nearest_neighbors(num_neighbors, idx, x, valid_x):
+    """Find the nearest k neighbors to x[i] that are not nan.
+
+    Parameters
+    ----------
+    num_neighbors: int
+        Number of neighbors to find
+    idx: int
+        Index for the target x value
+    x: np.array
+        1D array of the independent variable
+    valid_x: bool
+        Boolean array indicating valid x data.
+
+    Returns
+    -------
+    neighbors_idx: int
+        Indices for neighbors in x array
+    """
+
+    # Find neighbors
+    if np.nansum(valid_x) <= num_neighbors:
+        # If there are k points or fewer, then they are all neighbors
+        neighbors_idx = np.where(np.equal(valid_x, np.repeat(True, len(valid_x))))[0]
+    else:
+        # Find the distance to the k closest points
+        distance = np.abs(x - x[idx])
+        distance_sorted = np.sort(distance[valid_x])
+        distance_neighbors = distance_sorted[num_neighbors - 1]
+
+        # Find all points that are as close as or closer than the num_neighbors closest points
+        # close = np.array(distance <= distance_neighbors)
+        close = np.less_equal(distance, distance_neighbors)
+
+        # Find the indices of x that are both close and valid
+        neighbors_idx = np.where(np.equal(np.logical_and(close, valid_x), np.repeat(True, len(valid_x))))[0]
+
+    return neighbors_idx
+
+# @jit(cache=True, nopython=True)
+@njit
+def tricube_weights(distance):
+    """ Convert distances into weights using tri-cubic weight function.
+    Note for Matlab: This function returns the square-root of the weights.
+
+    Parameters
+    ----------
+    distance: np.array
+        1D array of distances
+
+    Returns
+    -------
+    weights: np.array
+        1D array of weights
+    """
+
+    max_distance = np.max(distance)
+    if max_distance > 0:
+        distance = distance / max_distance
+    weights = (1 - distance ** 3) ** 1.5
+    return weights
+
+# @jit(cache=True, nopython=True)
+@njit
+def bisquare(data):
+    """Bisqure weight function which for values greater than are equal to 1 are set to zero.
+
+    Parameters
+    ----------
+    data: np.array
+        1D array of data used to compute weight
+
+    Returns
+    -------
+    weights: np.array
+        Computed weight
+
+    """
+    weights = np.zeros(data.shape)
+    d3 = 1 - np.abs(data)
+    d3[np.isnan(d3)] = -999.
+    idx = d3 > 0
+    # idx = nan_less(np.abs(data), 1)
+    weights[idx] = np.abs(1 - data[idx] ** 2)
+    return weights
+
+# @jit(cache=True, nopython=True)
+@njit
+def robust_weights(residuals, max_eps):
+    """Compute robust weights using residuals.
+
+    Parameters
+    ----------
+    residuals: np.array
+        1D array of residuals from previous fit
+    max_eps: float
+        Smallest value to be represented
+
+    Returns
+    -------
+    weights: np.array
+        1D array of computed weights
+    """
+
+    # Compute median using only valid data
+    s = np.nanmax([1e8 * max_eps, np.nanmedian(np.abs(residuals))])
+
+    # Compute weights
+    weights = bisquare(residuals / (6 * s))
+    weights[np.isnan(residuals)] = 0
+
+    return weights
+
+# @jit(cache=True, nopython=True)
+@njit
+def compute_loess(x, y, neighbors_idx, idx, r_weights=None):
+    """Computes the loess smooth for the specified point x[i]. If robust weights are specified the computed weights
+    are adjusted by the robust weights.
+
+    Parameters
+    ----------
+    x: np.array(float)
+        1D array of independent variable
+    y: np.array(float)
+        1D array of dependent variable
+    neighbors_idx: np.array(int)
+        1D array of indices of x defining neighbors
+    idx: int
+        Index of x defining target
+    r_weights: np.array(float)
+        1D array of robust weights
+
+    Returns
+    -------
+    smoothed_value: float
+        Computed smoothed value for target
+    """
+
+    if len(neighbors_idx) > 0:
+        # Center around current point to improve conditioning
+        distances = x[neighbors_idx] - x[idx]
+        distances_abs = np.abs(distances)
+        neighbors_y = y[neighbors_idx]
+
+        weights = tricube_weights(distances_abs)
+
+        # If all weights are 0, skip weighting
+        if np.all(weights < seps):
+            weights[:] = 1
+
+        if r_weights is not None:
+            weights = weights * r_weights[neighbors_idx]
+
+        weighted_x_matrix = np.vstack((np.ones(distances.shape), distances))
+        weighted_x_matrix = np.vstack((weighted_x_matrix, np.expand_dims(distances * distances, axis=0)))
+        weighted_x_matrix = weights.repeat(weighted_x_matrix.shape[0]).reshape(-1, 3).T * weighted_x_matrix
+        neighbors_y = weights * neighbors_y
+
+        # Solve using least squares
+        # try:
+        #     mask = ~np.isnan(weighted_x_matrix.T) & ~np.isnan(neighbors_y.T)
+        #     smoothed_values, _, _, _ = np.linalg.lstsq(weighted_x_matrix.T[mask],
+        #                                                neighbors_y.T[mask], rcond=None)
+        # except (IndexError, ValueError):
+        smoothed_values, _, _, _ = np.linalg.lstsq(weighted_x_matrix.T,
+                                                   neighbors_y.T)
+        smoothed_value = smoothed_values[0]
+    else:
+        smoothed_value = np.nan
+    return smoothed_value
+
+# @jit(cache=True, nopython=True)
+@njit
+def rloess(x, y, span):
+    """This function computes a robust loess smooth using a quadratic model as defined by
+    W.S.Cleveland, (1979) "Robust Locally Weighted Regression and Smoothing Scatterplots",
+    Journal of the American Statistical Association, Vol 74, No. 368, pp. 829-836.
+    Both x and y values are required and are assumed to be 1D arrays (n,).
+
+    Parameters
+    ----------
+    x: np.array
+        1D array of independent variable
+    y: np.array
+        1D array of dependent variable
+    span: int
+        Number of neighbors to use in the regression
+    """
+
+    # Number of cycles of the robust fit
+    cycles = 5
+
+    n_points = len(y)
+    smoothed_values = np.copy(y)
+
+    if span > 1:
+
+        diff_x = np.diff(x)
+
+        # Assumes non-uniform x
+        y_nan = np.isnan(y)
+        any_nans = np.any(y_nan[:])
+        the_diffs = np.concatenate((np.array([1]), diff_x, np.array([1])), axis=0)
+
+        # Pre-allocate space for lower and upper indices for each fit
+        lower_bound = np.repeat(0, n_points)
+        upper_bound = np.repeat(0, n_points)
+        # lower_bound = np.zeros(n_points).astype(int)
+        # upper_bound = np.zeros(n_points).astype(int)
+
+        # Compute the non-robust smooth
+        for n in range(n_points):
+
+            # if x[i] and x[i-1] are equal just use previous fit
+            if the_diffs[n] == 0:
+
+                smoothed_values[n] = smoothed_values[n-1]
+                lower_bound[n] = int(lower_bound[n-1])
+                upper_bound[n] = int(upper_bound[n-1])
+
+            else:
+
+                # Find nearest neighbors
+                neighbors_idx = nearest_neighbors(span, n, x, np.logical_not(y_nan))
+                # Store neighbors for robust loop
+                lower_bound[n] = int(np.min(neighbors_idx))
+                upper_bound[n] = int(np.max(neighbors_idx))
+
+                if len(neighbors_idx) < 1:
+                    smoothed_values[n] = np.nan
+                else:
+                    smoothed_values[n] = compute_loess(x, y, neighbors_idx, n)
+        # Non-robust fit complete
+
+        # Compute residual and apply robust fit
+        max_absy_eps = np.max(np.abs(y)) * eps
+        for cycle in range(cycles - 1):
+            residuals = y - smoothed_values
+
+            # Compute robust weights
+            r_weights = robust_weights(residuals, max_absy_eps)
+
+            # Find new value for each point
+            for n in range(n_points):
+                if n > 0 and x[n] == x[n-1]:
+                    smoothed_values[n] = smoothed_values[n-1]
+                else:
+                    if not np.isnan(smoothed_values[n]):
+                        neighbors_idx = np.array(list(range(lower_bound[n], upper_bound[n] + 1)))
+
+                        if any_nans:
+                            neighbors_idx = neighbors_idx[np.logical_not(y_nan[neighbors_idx])]
+
+                        if np.any(r_weights[neighbors_idx] <= 0):
+                            neighbors_idx = nearest_neighbors(span, n, x, (r_weights > 0))
+
+                        smoothed_values[n] = compute_loess(x, y, neighbors_idx, n, r_weights)
+    return smoothed_values
diff --git a/MiscLibs/robust_loess_compiled.cp39-win_amd64.pyd b/MiscLibs/robust_loess_compiled.cp39-win_amd64.pyd
new file mode 100644
index 0000000000000000000000000000000000000000..08116c4f307289f31802a023b62e0e224158b185
Binary files /dev/null and b/MiscLibs/robust_loess_compiled.cp39-win_amd64.pyd differ
diff --git a/MiscLibs/robust_loess_compiled.py b/MiscLibs/robust_loess_compiled.py
new file mode 100644
index 0000000000000000000000000000000000000000..55e24056c5e9f5331c357599cf8a5ab4d1ff102e
--- /dev/null
+++ b/MiscLibs/robust_loess_compiled.py
@@ -0,0 +1,284 @@
+"""robust_loess_compiled
+This module computes a robust loess smooth using a quadratic model as defined by
+W.S.Cleveland, (1979) "Robust Locally Weighted Regression and Smoothing Scatterplots",
+Journal of the American Statistical Association, Vol 74, No. 368, pp. 829-836.
+Both x and y values are required and are assumed to be 1D arrays (n,).
+
+Example
+-------
+
+from MiscLibs.robust_loess_compiled import rloess
+
+smooth_fit = rloess(x, y, span)
+"""
+import numpy as np
+from numba.pycc import CC
+from numba import njit
+
+cc = CC('robust_loess_compiled')
+
+# Set constants
+eps = np.finfo('float').eps
+seps = np.sqrt(eps)
+
+@njit
+@cc.export('nearest_neighbors', 'i8[:](i4, i4, f8[:], b1[:])')
+def nearest_neighbors(num_neighbors, idx, x, valid_x):
+    """Find the nearest k neighbors to x[i] that are not nan.
+
+    Parameters
+    ----------
+    num_neighbors: int
+        Number of neighbors to find
+    idx: int
+        Index for the target x value
+    x: np.array(float)
+        1D array of the independent variable
+    valid_x: np.array(bool)
+        Boolean array indicating valid x data.
+
+    Returns
+    -------
+    neighbors_idx: np.array(int)
+        Indices for neighbors in x array
+    """
+
+    # Find neighbors
+    if np.nansum(valid_x) <= num_neighbors:
+        # If there are k points or fewer, then they are all neighbors
+        neighbors_idx = np.where(np.equal(valid_x, np.repeat(True, len(valid_x))))[0]
+    else:
+        # Find the distance to the k closest points
+        distance = np.abs(x - x[idx])
+        distance_sorted = np.sort(distance[valid_x])
+        distance_neighbors = distance_sorted[num_neighbors - 1]
+
+        # Find all points that are as close as or closer than the num_neighbors closest points
+        close = np.less_equal(distance, distance_neighbors)
+
+        # Find the indices of x that are both close and valid
+        neighbors_idx = np.where(np.equal(np.logical_and(close, valid_x), np.repeat(True, len(valid_x))))[0]
+
+    return neighbors_idx
+
+@njit
+@cc.export('tricube_weights', 'f8[:](f8[:])')
+def tricube_weights(distance):
+    """ Convert distances into weights using tri-cubic weight function.
+    Note for Matlab: This function returns the square-root of the weights.
+
+    Parameters
+    ----------
+    distance: np.array(float)
+        1D array of distances
+
+    Returns
+    -------
+    weights: np.array(float)
+        1D array of weights
+    """
+
+    max_distance = np.max(distance)
+    if max_distance > 0:
+        distance = distance / max_distance
+    weights = (1 - distance ** 3) ** 1.5
+    return weights
+
+@njit
+@cc.export('bisquare', 'f8[:](f8[:])')
+def bisquare(data):
+    """Bisqure weight function which for values greater than are equal to 1 are set to zero.
+
+    Parameters
+    ----------
+    data: np.array(float)
+        1D array of data used to compute weight
+
+    Returns
+    -------
+    weights: np.array(float)
+        Computed weight
+
+    """
+    weights = np.zeros(data.shape)
+
+    # Code to compute less than with nan's and no runtime warnings
+    d3 = 1 - np.abs(data)
+    d3[np.isnan(d3)] = -999.
+    idx = d3 > 0
+
+    weights[idx] = np.abs(1 - data[idx] ** 2)
+    return weights
+
+@njit
+@cc.export('robust_weights', 'f8[:](f8[:], f8)')
+def robust_weights(residuals, max_eps):
+    """Compute robust weights using residuals.
+
+    Parameters
+    ----------
+    residuals: np.array(float)
+        1D array of residuals from previous fit
+    max_eps: float
+        Smallest value to be represented
+
+    Returns
+    -------
+    weights: np.array(float)
+        1D array of computed weights
+    """
+
+    # Compute median using only valid data
+    s = np.nanmax([1e8 * max_eps, np.nanmedian(np.abs(residuals))])
+
+    # Compute weights
+    weights = bisquare(residuals / (6 * s))
+    weights[np.isnan(residuals)] = 0
+
+    return weights
+
+@njit
+@cc.export('compute_loess', 'f8(f8[:], f8[:], i8[:], i4, optional(f8[:]))')
+def compute_loess(x, y, neighbors_idx, idx, r_weights=None):
+    """Computes the loess smooth for the specified point x[i]. If robust weights are specified the computed weights
+    are adjusted by the robust weights.
+
+    Parameters
+    ----------
+    x: np.array(float)
+        1D array of independent variable
+    y: np.array(float)
+        1D array of dependent variable
+    neighbors_idx: np.array(int8)
+        1D array of indices of x defining neighbors
+    idx: int
+        Index of x defining target
+    r_weights: np.array(float)
+        1D array of robust weights
+
+    Returns
+    -------
+    smoothed_value: float
+        Computed smoothed value for target
+    """
+
+    if len(neighbors_idx) > 0:
+        # Center around current point to improve conditioning
+        distances = x[neighbors_idx] - x[idx]
+        distances_abs = np.abs(distances)
+        neighbors_y = y[neighbors_idx]
+
+        weights = tricube_weights(distances_abs)
+
+        # If all weights are 0, skip weighting
+        if np.all(weights < seps):
+            weights[:] = 1
+
+        if r_weights is not None:
+            weights = weights * r_weights[neighbors_idx]
+
+        weighted_x_matrix = np.vstack((np.ones(distances.shape), distances))
+        weighted_x_matrix = np.vstack((weighted_x_matrix, np.expand_dims(distances * distances, axis=0)))
+        weighted_x_matrix = weights.repeat(weighted_x_matrix.shape[0]).reshape(-1, 3).T * weighted_x_matrix
+        neighbors_y = weights * neighbors_y
+
+        # Solve using least squares
+        # try:
+        #     mask = ~np.isnan(weighted_x_matrix.T) & ~np.isnan(neighbors_y.T)
+        #     smoothed_values, _, _, _ = np.linalg.lstsq(weighted_x_matrix.T[mask],
+        #                                                neighbors_y.T[mask], rcond=None)
+        # except (IndexError, ValueError):
+        smoothed_values, _, _, _ = np.linalg.lstsq(weighted_x_matrix.T,
+                                                   neighbors_y.T)
+        smoothed_value = smoothed_values[0]
+    else:
+        smoothed_value = np.nan
+    return smoothed_value
+
+@cc.export('rloess', 'f8[:](f8[::1], f8[::1], i4)')
+def rloess(x, y, span):
+    """This function computes a robust loess smooth using a quadratic model as defined by
+    W.S.Cleveland, (1979) "Robust Locally Weighted Regression and Smoothing Scatterplots",
+    Journal of the American Statistical Association, Vol 74, No. 368, pp. 829-836.
+    Both x and y values are required and are assumed to be 1D arrays (n,).
+
+    Parameters
+    ----------
+    x: np.array(float)
+        1D array of independent variable
+    y: np.array(float)
+        1D array of dependent variable
+    span: int
+        Number of neighbors to use in the regression
+    """
+
+    # Number of cycles of the robust fit
+    cycles = 5
+
+    n_points = len(y)
+    smoothed_values = np.copy(y)
+
+    if span > 1:
+
+        diff_x = np.diff(x)
+
+        # Assumes non-uniform x
+        y_nan = np.isnan(y)
+        any_nans = np.any(y_nan[:])
+        the_diffs = np.concatenate((np.array([1]), diff_x, np.array([1])), axis=0)
+
+        # Pre-allocate space for lower and upper indices for each fit
+        lower_bound = np.repeat(0, n_points)
+        upper_bound = np.repeat(0, n_points)
+
+        # Compute the non-robust smooth
+        for n in range(n_points):
+
+            # if x[i] and x[i-1] are equal just use previous fit
+            if the_diffs[n] == 0:
+
+                smoothed_values[n] = smoothed_values[n-1]
+                lower_bound[n] = int(lower_bound[n-1])
+                upper_bound[n] = int(upper_bound[n-1])
+
+            else:
+
+                # Find nearest neighbors
+                neighbors_idx = nearest_neighbors(span, n, x, np.logical_not(y_nan))
+                # Store neighbors for robust loop
+                lower_bound[n] = int(np.min(neighbors_idx))
+                upper_bound[n] = int(np.max(neighbors_idx))
+
+                if len(neighbors_idx) < 1:
+                    smoothed_values[n] = np.nan
+                else:
+                    smoothed_values[n] = compute_loess(x, y, neighbors_idx, n)
+        # Non-robust fit complete
+
+        # Compute residual and apply robust fit
+        max_absy_eps = np.max(np.abs(y)) * eps
+        for cycle in range(cycles - 1):
+            residuals = y - smoothed_values
+
+            # Compute robust weights
+            r_weights = robust_weights(residuals, max_absy_eps)
+
+            # Find new value for each point
+            for n in range(n_points):
+                if n > 0 and x[n] == x[n-1]:
+                    smoothed_values[n] = smoothed_values[n-1]
+                else:
+                    if not np.isnan(smoothed_values[n]):
+                        neighbors_idx = np.array(list(range(lower_bound[n], upper_bound[n] + 1)))
+
+                        if any_nans:
+                            neighbors_idx = neighbors_idx[np.logical_not(y_nan[neighbors_idx])]
+
+                        if np.any(r_weights[neighbors_idx] <= 0):
+                            neighbors_idx = nearest_neighbors(span, n, x, (r_weights > 0))
+
+                        smoothed_values[n] = compute_loess(x, y, neighbors_idx, n, r_weights)
+    return smoothed_values
+
+if __name__ == '__main__':
+    cc.compile()
\ No newline at end of file
diff --git a/MiscLibs/run_iqr.cp39-win_amd64.pyd b/MiscLibs/run_iqr.cp39-win_amd64.pyd
new file mode 100644
index 0000000000000000000000000000000000000000..cb919a42c9407054e036f49c7f05488f19e29365
Binary files /dev/null and b/MiscLibs/run_iqr.cp39-win_amd64.pyd differ
diff --git a/MiscLibs/run_iqr.py b/MiscLibs/run_iqr.py
new file mode 100644
index 0000000000000000000000000000000000000000..541d9a8005c62a7b51ff2073a998587fbfc66bf3
--- /dev/null
+++ b/MiscLibs/run_iqr.py
@@ -0,0 +1,102 @@
+import numpy as np
+from numba.pycc import CC
+from numba import njit
+
+cc = CC('run_iqr')
+
+@cc.export('run_iqr', 'f8[:](i4, f8[::1])')
+def run_iqr(half_width, data):
+    """Computes a running Innerquartile Range
+    The routine accepts a column vector as input.  "halfWidth" number of data
+    points for computing the Innerquartile Range are selected before and
+    after the target data point, but no including the target data point.
+    Near the ends of the series the number of points before or after are reduced.
+    Nan in the data are counted as points.  The IQR is computed on the selected
+    subset of points.  The process occurs for each point in the provided column vector.
+    A column vector with the computed IQR at each point is returned.
+
+    Parameters
+    ----------
+    half_width: int
+        Number of ensembles before and after current ensemble which are used to compute the IQR
+    data: np.array(float)
+        Data for which the IQR is computed
+    """
+    npts = len(data)
+    half_width = int(half_width)
+
+    if npts < 20:
+        half_width = int(np.floor(npts / 2))
+
+    iqr_array = []
+
+    # Compute IQR for each point
+    for n in range(npts):
+
+        # Sample selection for 1st point
+        if n == 0:
+            sample = data[1:1 + half_width]
+
+        # Sample selection a end of data set
+        elif n + half_width > npts:
+            sample = np.hstack((data[n - half_width - 1:n - 1], data[n:npts]))
+
+        # Sample selection at beginning of data set
+        elif half_width >= n + 1:
+            sample = np.hstack((data[0:n], data[n + 1:n + half_width + 1]))
+
+        # Sample selection in body of data set
+        else:
+            sample = np.hstack((data[n - half_width:n], data[n + 1:n + half_width + 1]))
+
+        iqr_array.append(iqr(sample))
+
+    return np.array(iqr_array)
+
+@njit
+@cc.export('iqr', 'f8(f8[::1])')
+def iqr(data_1d):
+    """This function computes the iqr consistent with Matlab
+
+    Parameters
+    ----------
+    data: np.ndarray
+        Data for which the statistic is required
+
+    Returns
+    -------
+    sp_iqr: float
+        Inner quartile range
+
+    """
+
+    # Remove nan elements
+    idx = np.where(np.logical_not(np.isnan(data_1d)))[0]
+    data_1d = data_1d[idx]
+    if len(data_1d) < 2:
+        sp_iqr = np.nan
+    else:
+        # Compute statistics
+        q25 = compute_quantile(data_1d, 0.25)
+        q75 = compute_quantile(data_1d, 0.75)
+        sp_iqr = q75 - q25
+
+    return sp_iqr
+
+@njit
+@cc.export('compute_quantile', 'f8(f8[::1], f8)')
+def compute_quantile(data_1d, q):
+
+    sorted_data = np.sort(data_1d)
+    n_samples = len(sorted_data)
+    sample_idx = q * (n_samples) - 0.5
+    x1 = int(np.floor(sample_idx))
+    x2 = int(np.ceil(sample_idx))
+    if x1 != x2:
+        result = (sample_idx - x1) * (sorted_data[x2] - sorted_data[x1]) + sorted_data[x1]
+    else:
+        result = sorted_data[x1]
+    return result
+
+if __name__ == '__main__':
+    cc.compile()
\ No newline at end of file
diff --git a/main.py b/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..58140bd4779276d64719597f2813d0668b5baf1b
--- /dev/null
+++ b/main.py
@@ -0,0 +1,353 @@
+import os
+import pandas as pd
+import open_functions as of
+# from Classes.Oursin import Oursin
+from Classes.Measurement import Measurement
+import tkinter as tk
+from tkinter import ttk
+from threading import Thread
+import datetime
+
+
+class App:
+    @property
+    def __init__(self):
+        self.choice_list = []
+        self.root = tk.Tk()
+        self.root.geometry('250x400')
+        self.root.resizable(0, 0)
+        self.root.title('MAQIE')
+        self.root.wm_iconbitmap(r'ressources/10.ico')
+        # self.root.wm_attributes("-topmost")
+        self.root.bind("<Return>", lambda event: Thread(target=self.run).start())
+
+        self.choice1 = tk.BooleanVar(value=True)
+        self.choice2 = tk.BooleanVar(value=True)
+        self.choice3 = tk.BooleanVar(value=True)
+        default_text3 = "Code station HYDRO"
+        self.text3 = tk.StringVar(value=default_text3)
+        self.nav_menu4 = tk.StringVar(self.root)
+        self.extrap_menu5 = tk.StringVar(self.root)
+        self.choice5 = tk.BooleanVar(value=False)
+        self.text5 = tk.StringVar(value="0.1667")
+
+        self.choice6 = tk.BooleanVar(value=False)
+
+        # .dat data with entry box for Bareme station's name
+        def some_callback(event):
+            print(self.text3.get())
+            if self.text3.get() == default_text3:
+                self.Entry_3.delete(0, "end")
+            return None
+
+        # Entry text
+        self.Entry_3 = tk.Entry(self.root, textvariable=self.text3)
+        # self.Entry_3.insert(0, self.text3.get())
+        self.Entry_3.bind("<Button-1>", some_callback)
+        # Create checkbutton and connect it to entry
+        self.Checkbutton_3 = tk.Checkbutton(self.root, text='Save as .dat (Barème)', variable=self.choice3,
+                                            command=lambda e=self.Entry_3, v=self.choice3: self.naccheck(e, v))
+        # Position checkbutton and entrybox
+        self.Checkbutton_3.pack(side=tk.TOP, anchor=tk.W, ipadx=15)
+        self.Entry_3.pack(anchor=tk.W, padx=35, ipadx=35)
+
+        # .csv data
+        self.Checkbutton_1 = tk.Checkbutton(self.root, text='Save as .csv (BaRatinAGE)', variable=self.choice1)
+        self.Checkbutton_1.pack(side=tk.TOP, anchor=tk.W, ipadx=15)
+        # .BAT data
+        self.Checkbutton_2 = tk.Checkbutton(self.root, text='Save as .BAD (BaRatinAGE)', variable=self.choice2)
+        self.Checkbutton_2.pack(side=tk.TOP, anchor=tk.W, ipadx=15)
+
+        # Chose navigation reference
+        self.label4 = tk.Label(self.root, text="Navigation reference")
+        self.label4.pack(side=tk.TOP, anchor=tk.W, padx=15)
+        self.nav_menu4.set("Default")  # default value
+        self.Option_nav4 = tk.OptionMenu(self.root, self.nav_menu4, "Default", "Bottom track")
+        self.Option_nav4.pack(anchor=tk.W, padx=35, ipadx=35)
+
+        # Chose extrap
+        self.label5 = tk.Label(self.root, text="Extrapolation law")
+        self.label5.pack(side=tk.TOP, anchor=tk.W, padx=15)
+
+        self.extrap_menu5.set("Default")  # default value
+        self.Entry_5 = tk.Entry(self.root, textvariable=self.text5)
+        self.Checkbutton_5 = tk.Checkbutton(self.root, text='Personnalize exp.', variable=self.choice5,
+                                            command=lambda e=self.Entry_5, v=self.choice5: self.naccheck(e, v))
+
+        self.Option_extrap5 = tk.OptionMenu(self.root, self.extrap_menu5, "Default", "Power", "CNS", "3-points",
+                                            command=lambda e=self.Checkbutton_5, v=self.extrap_menu5: self.nanmenu(e,
+                                                                                                                   v))
+        self.Option_extrap5.pack(anchor=tk.W, padx=35, ipadx=35)
+
+        self.Checkbutton_5.pack(side=tk.TOP, anchor=tk.W, ipadx=15)
+        self.Entry_5.pack(anchor=tk.W, padx=35, ipadx=35)
+        # Lock by default
+        self.Checkbutton_5["state"] = "disabled"
+        self.Entry_5.configure(state='disabled')
+
+        # No moving-bed detected
+        self.Checkbutton_6 = tk.Checkbutton(self.root, text='No moving-bed detected', variable=self.choice6)
+        self.Checkbutton_6.pack(side=tk.TOP, anchor=tk.W, ipadx=15)
+
+        # Progress bar
+        self.pb = ttk.Progressbar(self.root, orient='horizontal', length=100)
+        self.pb.pack(side=tk.TOP, anchor=tk.W, padx=10, ipadx=200)
+        self.value_label = ttk.Label(self.root, text=self.update_progress_label())
+        self.value_label.pack()
+
+        # Run Button
+        self.button = tk.Button(self.root, text='Run [Return]', command=lambda: Thread(target=self.run).start(),
+                                padx=50, pady=10, height=1, width=12)
+        self.button.pack()
+
+        # Close button
+        self.close_button = tk.Button(self.root, text='Close', command=self.close, padx=50, pady=10,
+                                      height=1, width=12)
+        self.close_button.pack()
+
+        self.root.protocol('WM_DELETE_WINDOW', self.close)
+        self.root.mainloop()
+
+    def close(self):
+        self.root.destroy()
+
+    def progress(self, value):
+        self.pb['value'] = value
+        self.value_label['text'] = self.update_progress_label()
+
+    def update_progress_label(self, run=True):
+        if run:
+            return f"Current Progress: {self.pb['value']}%"
+        else:
+            return f"Extract over"
+
+    def naccheck(self, entry, var):
+        # if self.choice3.get() == 0:
+        if var.get() == 0:
+            entry.configure(state='disabled')
+            # self.Entry_3.configure(state='disabled')
+        else:
+            entry.configure(state='normal')
+            # self.Entry_3.configure(state='normal')
+
+    def nanmenu(self, entry, var):
+        if var.get() == "Default":
+            self.Checkbutton_5["state"] = "disabled"
+            self.Entry_5.configure(state='disabled')
+        else:
+            self.Checkbutton_5["state"] = "normal"
+            self.naccheck(self.Entry_5, self.choice5)
+            # self.Entry_5.configure(state='normal')
+
+    def switch_all(self, way=True):
+        # Able/Disable button
+        if way:
+            self.Checkbutton_3["state"] = "disabled"
+            self.Entry_3.configure(state='disabled')
+            self.Checkbutton_1["state"] = "disabled"
+            self.Checkbutton_2["state"] = "disabled"
+            self.Option_nav4["state"] = "disabled"
+            self.Option_extrap5["state"] = "disabled"
+            self.Checkbutton_5["state"] = "disabled"
+            self.Entry_5.configure(state='disabled')
+            self.Checkbutton_6["state"] = "disabled"
+            self.button["state"] = "disabled"
+            self.close_button["state"] = "disabled"
+        else:
+            self.Checkbutton_3["state"] = "normal"
+            self.naccheck(self.Entry_3, self.choice3)
+            self.Checkbutton_1["state"] = "normal"
+            self.Checkbutton_2["state"] = "normal"
+            self.Option_nav4["state"] = "normal"
+            self.Option_extrap5["state"] = "normal"
+            self.nanmenu(self.Checkbutton_5, self.extrap_menu5)
+            self.Checkbutton_6["state"] = "normal"
+            self.button["state"] = "normal"
+            self.close_button["state"] = "normal"
+
+    def run(self):
+        self.switch_all(way=True)
+
+        nav_ref = None
+        fit_method = 'Automatic'
+        top = None
+        bot = None
+        exponent = None
+
+        save_as_dat = self.choice3.get()
+        code_station = self.text3.get()
+        save_as_csv = self.choice1.get()
+        save_as_bad = self.choice2.get()
+        input_nav = self.nav_menu4.get()
+        input_extrap_type = self.extrap_menu5.get()
+        input_extrap_mode = self.choice5.get()
+        input_extrap_exp = self.text5.get()
+        no_bt = self.choice6.get()
+
+        if input_nav == 'Bottom track':
+            nav_ref = 'BT'
+        if input_extrap_type != 'Default':
+            if input_extrap_type == 'Power':
+                top = 'Power'
+                bot = 'Power'
+                fit_method = 'Manual'
+            elif input_extrap_type == 'CNS':
+                top = 'Constant'
+                bot = 'No Slip'
+                fit_method = 'Manual'
+            elif input_extrap_type == '3-points':
+                top = '3-Point'
+                bot = 'No Slip'
+                fit_method = 'Manual'
+
+        if input_extrap_mode:
+            exponent = input_extrap_exp
+
+        # Check if Code station is correct
+        if save_as_dat and code_station in ['Code station HYDRO', '']:
+            tk.messagebox.showerror(title='Invalid code station', message='Please enter a valid station code.')
+            self.switch_all(way=False)
+            return
+
+        # Check if exponent is correct
+        if exponent is not None:
+            exponent = (exponent.replace(',', '.'))
+            try:
+                exponent = float(exponent)
+            except:
+                tk.messagebox.showerror(title='Invalid exponent', message='Please enter a valid exponent.')
+                self.switch_all(way=False)
+                return
+            if exponent > 1 or exponent < 0:
+                tk.messagebox.showerror(title='Invalid exponent', message='Exponent value is wrong, please enter an \n '
+                                                                          'exponent value between 0 and 1.')
+                self.switch_all(way=False)
+                return
+
+        try:
+            with open(os.getcwd() + '\\path_file.txt') as f:
+                path = f.readlines()
+            if len(path) == 0:
+                path = [None]
+            path_folder, path_meas, type_meas, name_meas = of.select_directory(path[0])
+
+            self.pb['value'] = 0
+            self.pb.update_idletasks()
+            self.value_label['text'] = self.update_progress_label()
+
+            with open(os.getcwd() + '\\path_file.txt', 'w') as f:
+                f.write('\\'.join(path_folder.split('\\')[:-1]))
+
+
+            if save_as_csv or save_as_bad or save_as_dat:
+                empty_list = []
+                uh_list = []
+                q_list = []
+                uq_list = []
+                date_list = []
+                date_day_list = []
+                date_hour_start = []
+                date_hour_end = []
+
+                width_list = []
+                area_list = []
+
+                for id_meas in range(len(path_meas)):
+                    self.pb['value'] = 100 * (id_meas + 1) / len(path_meas)
+                    self.pb.update_idletasks()
+                    self.value_label['text'] = self.update_progress_label()
+
+                    # Open measurement
+                    meas, checked_transect, navigation_reference = of.open_measurement(path_meas[id_meas],
+                                                                                       type_meas[id_meas],
+                                                                                       use_weighted=True,
+                                                                                       navigation_reference=nav_ref,
+                                                                                       run_oursin=True)
+                    # Change extrapolation method
+                    if fit_method == 'Manual':
+                        meas.extrap_fit.change_fit_method(meas.transects, 'Manual', len(meas.transects), top, bot,
+                                                          exponent)
+                        print(f'Top : {top}, bot : {bot}, exp : {exponent}')
+                        settings = meas.current_settings()
+                        meas.apply_settings(settings)
+                    # No moving-bed obeserved
+                    if no_bt and meas.current_settings()['NavRef'] == 'bt_vel':
+                        meas.observed_no_moving_bed = True
+                        meas.oursin.compute_oursin(meas)
+
+                    # Date data
+                    date_value = meas.transects[checked_transect[0]].date_time.start_serial_time
+                    date_list.append(datetime.datetime.fromtimestamp(date_value).strftime('%Y-%m-%d %H:%M'))
+                    date_day_list.append(datetime.datetime.fromtimestamp(date_value).strftime('%Y%m%d'))
+                    date_hour_start.append(datetime.datetime.fromtimestamp(date_value).strftime('%H:%M'))
+
+                    date_hour_end.append(datetime.datetime.fromtimestamp(meas.transects[checked_transect[-1]].date_time.
+                                                                         end_serial_time).strftime('%H:%M'))
+
+                    # Cross-section width and area
+                    trans_prop = Measurement.compute_measurement_properties(meas)
+                    n_transects = len(meas.transects)
+                    width_list.append(trans_prop['width'][n_transects])
+                    area_list.append(trans_prop['area'][n_transects])
+
+                    empty_list.append('')
+                    uh_list.append(0)
+                    q_list.append(meas.mean_discharges(meas)['total_mean'])
+                    uq_list.append(meas.oursin.u_measurement['total_95'][0])
+
+                # Name of the parent folder
+                if save_as_dat:
+                    name_folder = code_station
+                else:
+                    name_folder = path_folder.split('\\')[-1]
+
+                if save_as_csv:
+                    df_csv = pd.DataFrame({'H': empty_list, 'uH': uh_list, 'Q': q_list, 'uQ': uq_list, 'Active': 1,
+                                           'date': date_list})
+                    df_csv.to_csv(path_folder + '\\' + name_folder + '.csv', sep=';', index=False)
+
+                if save_as_bad:
+                    bad_uq = []
+                    for i in range(len(uq_list)):
+                        bad_uq.append(q_list[i] * uq_list[i] / 200)
+
+                    df_bad = pd.DataFrame(
+                        {'H': empty_list, 'uH': uh_list, 'Q': q_list, 'uQ': bad_uq, 'date': date_list})
+                    df_bad.to_csv(path_folder + '\\' + name_folder + '.BAD', sep=' ', index=False)
+
+                if save_as_dat:
+                    print('save as dat')
+                    length = len(q_list)
+                    df = pd.DataFrame(
+                        {'C': ['C'] * length, 'JGG': ['JGG'] * length, 'station_name': [code_station] * length,
+                         'date': date_day_list, 'date_start': date_hour_start, 'date_end': date_hour_end,
+                         'cote': [-999] * length, 'cote_start': [''] * length, 'cote_end': [''] * length,
+                         'code_station': [''] * length, 'debit': q_list, 'incertitude': uq_list,
+                         'distance_station': [''] * length, 'sect_mouillee': area_list,
+                         'peri_mouille': [''] * length, 'larg_miroir': width_list,
+                         'vitesse_moyenne': [''] * length, 'vitesse_maxi': [''] * length,
+                         'deniv': [''] * length, 'commentaire': [''] * length, 'mode': ['PC'] * length
+                         })
+
+                    df = df.to_csv(header=None, index=False, sep=';').strip('\r\n').split('\n')
+                    df_str = [i.strip('\r') for i in df]
+                    df_bareme = '\n'.join(df_str)
+                    print(path_folder + '\\' + name_folder + '.dat')
+                    with open(path_folder + '\\' + name_folder + '.dat', "w") as myfile:
+                        myfile.write("DEC;  6 13 \nDEB;BA-HYDRO;;;;;;;\n")
+
+                    with open(path_folder + '\\' + name_folder + '.dat', "a") as myfile:
+                        myfile.write(df_bareme)
+                        myfile.write('\nFIN;BA-HYDRO;33;')
+                else:
+                    print('we dont save as dat')
+
+            self.value_label['text'] = self.update_progress_label(run=False)
+            self.switch_all(way=False)
+
+        except:
+            self.switch_all(way=False)
+
+
+if __name__ == '__main__':
+    App()
diff --git a/open_functions.py b/open_functions.py
new file mode 100644
index 0000000000000000000000000000000000000000..f9d23cf1313552865d0b3817034456f35e221256
--- /dev/null
+++ b/open_functions.py
@@ -0,0 +1,189 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Fri Feb 18 14:28:25 2022
+
+@author: blais
+"""
+# ========================================
+# External imports
+# ========================================
+import os
+import glob
+import tkinter as tk
+import tkinter.filedialog
+import numpy as np
+import scipy.io as sio
+import warnings
+import sys
+
+# ========================================
+# Internal imports
+# ========================================
+from Classes.Measurement import Measurement
+
+
+# =============================================================================
+# Functions
+# =============================================================================
+def select_file(path_window=None):
+    if not path_window or not os.path.isfile(path_window):
+        path_window = os.getcwd()
+    # Open a window to select a measurement
+    root = tk.Tk()
+    root.withdraw()
+    root.attributes("-topmost", True)
+    path_meas = tk.filedialog.askopenfilenames(parent=root, initialdir=path_window)
+    if path_meas:
+        keys = [elem.split('.')[-1].split('.')[0] for elem in path_meas]
+        name_folders = [path_meas[0].split('/')[-2].split('.')[0]][0]
+
+        if 'mmt' in keys:
+            type_meas = 'TRDI'
+            fileName = path_meas[0]
+        elif 'mat' in keys:
+            type_meas = 'SonTek'
+            ind_meas = [i for i, s in enumerate(keys) if "mat" in s]
+            fileName = [(path_meas[x]) for x in ind_meas]
+
+        return fileName, type_meas, name_folders
+    else:
+        warnings.warn('No file selected')
+
+
+def select_directory(path_window=None):
+    if not path_window or not os.path.isdir(path_window):
+        path_window = os.getcwd()
+    # Open a window to select a folder which contains measurements
+    root = tk.Tk()
+    root.withdraw()
+    root.attributes("-topmost", True)
+    path_folder = tk.filedialog.askdirectory(parent=root, initialdir=path_window,
+                                             title='Select folder')
+
+    if path_folder:
+        # ADCP folders path
+        path_folder = '\\'.join(path_folder.split('/'))
+        path_folders = np.array(glob.glob(path_folder + "/*"))
+        # Load their name
+        name_folders = np.array([os.path.basename((x)) for x in path_folders])
+        # Exclude files
+        excluded_folders = [s.find('.') == -1 for s in name_folders]
+        path_folders = path_folders[excluded_folders]
+        name_folders = name_folders[excluded_folders]
+
+        # Open measurement
+        type_meas = list()
+        path_meas = list()
+        name_meas = list()
+        for id_meas in range(len(path_folders)):
+            list_files = os.listdir(path_folders[id_meas])
+            exte_files = list([i.split('.', 1)[-1] for i in list_files])
+            if 'mmt' in exte_files or 'mat' in exte_files:
+                if 'mat' in exte_files:
+                    loca_meas = [i for i, s in enumerate(exte_files) if "mat" in s]  # transects index
+                    fileNameRaw = [(list_files[x]) for x in loca_meas]
+                    qrev_data = False
+                    for name in fileNameRaw:
+                        path = os.path.join(path_folders[id_meas], name)
+                        mat_data = sio.loadmat(path, struct_as_record=False, squeeze_me=True)
+                        if 'version' in mat_data:
+                            type_meas.append('QRev')
+                            name_meas.append(name_folders[id_meas])
+                            path_meas.append(mat_data)
+                            qrev_data = True
+                            print('QRev detected')
+                            break
+                    if not qrev_data:
+                        type_meas.append('SonTek')
+                        fileName = [s for i, s in enumerate(fileNameRaw)
+                                    if "QRev.mat" not in s]
+                        name_meas.append(name_folders[id_meas])
+                        path_meas.append(
+                            [os.path.join(path_folders[id_meas], fileName[x]) for x in
+                             range(len(fileName))])
+
+                elif 'mmt' in exte_files:
+                    type_meas.append('TRDI')
+                    loca_meas = exte_files.index("mmt")  # index of the measurement file
+                    fileName = list_files[loca_meas]
+                    path_meas.append(os.path.join(path_folders[id_meas], fileName))
+                    name_meas.append(name_folders[id_meas])
+        return path_folder, path_meas, type_meas, name_meas
+    else:
+        warnings.warn('No folder selected - end')
+        return None, None, None
+
+
+def open_measurement(path_meas, type_meas, apply_settings=False, navigation_reference=None,
+                     checked_transect=None, extrap_velocity=False, run_oursin=False, use_weighted=True,
+                     use_measurement_thresholds=False):
+    # Open measurement
+    meas = Measurement(in_file=path_meas, source=type_meas, proc_type='QRev', run_oursin=run_oursin,
+                       use_weighted=use_weighted, use_measurement_thresholds=use_measurement_thresholds)
+
+    if apply_settings:
+        if navigation_reference == 'GPS':
+            if not meas.transects[meas.checked_transect_idx[0]].gps:
+                print('No GPS available : switch to BT')
+                navigation_reference = 'BT'
+        meas, checked_transect, navigation_reference = new_settings(meas,
+                                                                    navigation_reference,
+                                                                    checked_transect,
+                                                                    extrap_velocity)
+    else:
+        if meas.current_settings()['NavRef'] == 'bt_vel':
+            navigation_reference = 'BT'
+        elif meas.current_settings()['NavRef'] == 'gga_vel':
+            navigation_reference = 'GPS'
+        checked_transect = meas.checked_transect_idx
+
+    return meas, checked_transect, navigation_reference
+
+
+def new_settings(meas, navigation_reference_user=None, checked_transect_user=None, extrap_velocity=False):
+    # Apply settings
+    settings = meas.current_settings()
+
+    settings_change = False
+
+    # Default Navigation reference
+    if navigation_reference_user == None:
+        if meas.current_settings()['NavRef'] == 'bt_vel':
+            navigation_reference = 'BT'
+        elif meas.current_settings()['NavRef'] == 'gga_vel' or meas.current_settings()['NavRef'] == 'vtg_vel':
+            navigation_reference = 'GPS'
+
+    # Change Navigation reference
+    else:
+        navigation_reference = navigation_reference_user
+        if navigation_reference_user == 'BT' and meas.current_settings()['NavRef'] != 'bt_vel':
+            settings['NavRef'] = 'BT'
+            settings_change = True
+        elif navigation_reference_user == 'GPS' and meas.current_settings()['NavRef'] != 'gga_vel':
+            settings['NavRef'] = 'GGA'
+            settings_change = True
+
+    # Change checked transects
+    if not checked_transect_user or checked_transect_user == meas.checked_transect_idx:
+        checked_transect_idx = meas.checked_transect_idx
+    else:
+        checked_transect_idx = checked_transect_user
+        meas.checked_transect_idx = []
+        for n in range(len(meas.transects)):
+            if n in checked_transect_idx:
+                meas.transects[n].checked = True
+                meas.checked_transect_idx.append(n)
+            else:
+                meas.transects[n].checked = False
+        meas.selected_transects_changed(checked_transect_user)
+        # selected_transects_changed already contains apply_settings
+
+    # Apply Extrapolation on velocities
+    if extrap_velocity:
+        meas.extrap_fit.change_data_type(meas.transects, 'v')
+        settings_change = True
+
+    if settings_change:
+        meas.apply_settings(settings)
+
+    return meas, checked_transect_idx, navigation_reference
diff --git a/path_file.txt b/path_file.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a4ae75019f316567726a3be3d0ec4ac1cd79e833
--- /dev/null
+++ b/path_file.txt
@@ -0,0 +1 @@
+C:\Users\blaise.calmel\Documents\2_QRev_updates\2_Intercomparaison\intercomp_files
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..18c022c4a1eb67ca5ff500c1b2c6d06912e827c5
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,12 @@
+llvmlite==0.39.1
+numba==0.56.4
+numpy==1.23.5
+pandas==1.5.2
+profilehooks==1.12.0
+python-dateutil==2.8.2
+pytz==2022.6
+scipy==1.9.3
+six==1.16.0
+tk==0.1.0
+utm==0.7.0
+xmltodict==0.13.0
diff --git a/ressources/10.ico b/ressources/10.ico
new file mode 100644
index 0000000000000000000000000000000000000000..16c2dbc783121b7f0cea71e6c0d185640b606f2b
Binary files /dev/null and b/ressources/10.ico differ
diff --git a/ressources/11.png b/ressources/11.png
new file mode 100644
index 0000000000000000000000000000000000000000..98308ce0531909c470622a29744e7fc03a3266e5
Binary files /dev/null and b/ressources/11.png differ