From ef9da494df0a100ffc643d5c938832aaf05a969e Mon Sep 17 00:00:00 2001
From: "blaise.calmel" <blaise.calmel@inrae.fr>
Date: Mon, 5 Dec 2022 10:34:31 +0100
Subject: [PATCH] Add files

---
 Classes/BoatData.py                           | 1767 ++++++++
 Classes/BoatStructure.py                      |  437 ++
 Classes/CompassCal.py                         |   41 +
 Classes/ComputeExtrap.py                      |  330 ++
 Classes/CoordError.py                         |    4 +
 Classes/DateTime.py                           |   77 +
 Classes/DepthData.py                          | 1009 +++++
 Classes/DepthStructure.py                     |  368 ++
 Classes/EdgeData.py                           |  125 +
 Classes/Edges.py                              |   77 +
 Classes/ExtrapData.py                         |   91 +
 Classes/ExtrapQSensitivity.py                 |  372 ++
 Classes/FitData.py                            |  284 ++
 Classes/GPSData.py                            |  731 +++
 Classes/HeadingData.py                        |  224 +
 Classes/InstrumentData.py                     |  268 ++
 Classes/MMT_TRDI.py                           |  539 +++
 Classes/MatSonTek.py                          |   64 +
 Classes/Measurement.py                        | 3961 +++++++++++++++++
 Classes/MovingBedTests.py                     | 1038 +++++
 Classes/MultiThread.py                        |   22 +
 Classes/NormData.py                           |  435 ++
 Classes/Oursin.py                             | 2279 ++++++++++
 Classes/Pd0TRDI.py                            | 2463 ++++++++++
 Classes/Pd0TRDI_2.py                          | 3503 +++++++++++++++
 Classes/PreMeasurement.py                     |  430 ++
 Classes/Python2Matlab.py                      |  725 +++
 Classes/QAData.py                             | 2525 +++++++++++
 Classes/QComp.py                              | 1925 ++++++++
 Classes/SelectFit.py                          |  381 ++
 Classes/SensorData.py                         |   82 +
 Classes/SensorStructure.py                    |   73 +
 Classes/Sensors.py                            |  122 +
 Classes/TransectData.py                       | 2498 +++++++++++
 Classes/TransformationMatrix.py               |  206 +
 Classes/Uncertainty.py                        |  418 ++
 Classes/WaterData.py                          | 2445 ++++++++++
 Classes/__init__.py                           |    0
 Classes/stickysettings.py                     |  123 +
 Classes/test_stickysettings.py                |   68 +
 ...discharge_extrapolation.cp39-win_amd64.pyd |  Bin 0 -> 109056 bytes
 .../bottom_discharge_extrapolation.py         |  304 ++
 ...discharge_extrapolation.cp39-win_amd64.pyd |  Bin 0 -> 135168 bytes
 .../top_discharge_extrapolation.py            |  299 ++
 MiscLibs/__init__.py                          |    0
 MiscLibs/abba_2d_interpolation.py             |  413 ++
 .../bayes_cov_compiled.cp39-win_amd64.pyd     |  Bin 0 -> 94208 bytes
 MiscLibs/bayes_cov_compiled.py                |  236 +
 MiscLibs/common_functions.py                  |  466 ++
 MiscLibs/non_uniform_savgol.py                |  120 +
 MiscLibs/robust_loess.py                      |  280 ++
 .../robust_loess_compiled.cp39-win_amd64.pyd  |  Bin 0 -> 222720 bytes
 MiscLibs/robust_loess_compiled.py             |  284 ++
 MiscLibs/run_iqr.cp39-win_amd64.pyd           |  Bin 0 -> 69120 bytes
 MiscLibs/run_iqr.py                           |  102 +
 main.py                                       |  353 ++
 open_functions.py                             |  189 +
 path_file.txt                                 |    1 +
 requirements.txt                              |   12 +
 ressources/10.ico                             |  Bin 0 -> 2462 bytes
 ressources/11.png                             |  Bin 0 -> 1046 bytes
 61 files changed, 35589 insertions(+)
 create mode 100644 Classes/BoatData.py
 create mode 100644 Classes/BoatStructure.py
 create mode 100644 Classes/CompassCal.py
 create mode 100644 Classes/ComputeExtrap.py
 create mode 100644 Classes/CoordError.py
 create mode 100644 Classes/DateTime.py
 create mode 100644 Classes/DepthData.py
 create mode 100644 Classes/DepthStructure.py
 create mode 100644 Classes/EdgeData.py
 create mode 100644 Classes/Edges.py
 create mode 100644 Classes/ExtrapData.py
 create mode 100644 Classes/ExtrapQSensitivity.py
 create mode 100644 Classes/FitData.py
 create mode 100644 Classes/GPSData.py
 create mode 100644 Classes/HeadingData.py
 create mode 100644 Classes/InstrumentData.py
 create mode 100644 Classes/MMT_TRDI.py
 create mode 100644 Classes/MatSonTek.py
 create mode 100644 Classes/Measurement.py
 create mode 100644 Classes/MovingBedTests.py
 create mode 100644 Classes/MultiThread.py
 create mode 100644 Classes/NormData.py
 create mode 100644 Classes/Oursin.py
 create mode 100644 Classes/Pd0TRDI.py
 create mode 100644 Classes/Pd0TRDI_2.py
 create mode 100644 Classes/PreMeasurement.py
 create mode 100644 Classes/Python2Matlab.py
 create mode 100644 Classes/QAData.py
 create mode 100644 Classes/QComp.py
 create mode 100644 Classes/SelectFit.py
 create mode 100644 Classes/SensorData.py
 create mode 100644 Classes/SensorStructure.py
 create mode 100644 Classes/Sensors.py
 create mode 100644 Classes/TransectData.py
 create mode 100644 Classes/TransformationMatrix.py
 create mode 100644 Classes/Uncertainty.py
 create mode 100644 Classes/WaterData.py
 create mode 100644 Classes/__init__.py
 create mode 100644 Classes/stickysettings.py
 create mode 100644 Classes/test_stickysettings.py
 create mode 100644 DischargeFunctions/bottom_discharge_extrapolation.cp39-win_amd64.pyd
 create mode 100644 DischargeFunctions/bottom_discharge_extrapolation.py
 create mode 100644 DischargeFunctions/top_discharge_extrapolation.cp39-win_amd64.pyd
 create mode 100644 DischargeFunctions/top_discharge_extrapolation.py
 create mode 100644 MiscLibs/__init__.py
 create mode 100644 MiscLibs/abba_2d_interpolation.py
 create mode 100644 MiscLibs/bayes_cov_compiled.cp39-win_amd64.pyd
 create mode 100644 MiscLibs/bayes_cov_compiled.py
 create mode 100644 MiscLibs/common_functions.py
 create mode 100644 MiscLibs/non_uniform_savgol.py
 create mode 100644 MiscLibs/robust_loess.py
 create mode 100644 MiscLibs/robust_loess_compiled.cp39-win_amd64.pyd
 create mode 100644 MiscLibs/robust_loess_compiled.py
 create mode 100644 MiscLibs/run_iqr.cp39-win_amd64.pyd
 create mode 100644 MiscLibs/run_iqr.py
 create mode 100644 main.py
 create mode 100644 open_functions.py
 create mode 100644 path_file.txt
 create mode 100644 requirements.txt
 create mode 100644 ressources/10.ico
 create mode 100644 ressources/11.png

diff --git a/Classes/BoatData.py b/Classes/BoatData.py
new file mode 100644
index 0000000..b748f17
--- /dev/null
+++ b/Classes/BoatData.py
@@ -0,0 +1,1767 @@
+import copy
+import numpy as np
+from numpy.matlib import repmat
+from MiscLibs.common_functions import cosd, sind, cart2pol, iqr, pol2cart, nan_less_equal, \
+    nan_greater_equal, nan_greater, nan_less
+from MiscLibs.robust_loess import rloess
+
+
+class BoatData(object):
+    """Class to process and store boat velocity data.
+
+    Attributes
+    ----------
+
+    Original data provided to the class
+        raw_vel_mps: np.array
+            Contains the raw unfiltered velocity data in m/s.
+            First index is 1-4 are beams 1,2,3,3 if if beam or u,v,w,d if otherwise.
+        frequency_khz: np.array or float
+            Defines ADCP frequency used for velocity Measurement.
+        orig_coord_sys: str
+            Defines the original raw data velocity Coordinate, "Beam", "Inst", "Ship", "Earth".
+        nav_ref: str
+            Defines the original raw data navigation reference, "None", "BT", "GGA" "VTG".
+        corr: np.array
+            Correlation values for bottom track
+        rssi: np.array
+            Returned signal strength for bottom track
+
+    Coordinate transformed data
+        coord_sys: str
+            Defines the current coordinate system "Beam", "Inst", "Ship", "Earth" used to compute u, v, w, and d.
+        u_mps: np.array(float)
+            Horizontal velocity in x-direction, in m/s.
+        v_mps: np.array(float)
+            Horizontal velocity in y-direction, in m/s.
+        w_mps: np.array(float)
+            Vertical velocity (+ up), m/s.
+        d_mps: np.array(float)
+            Difference in vertical velocities compute from opposing beam pairs in m/s.
+        num_invalid: float
+            Number of ensembles with invalid velocity data.
+        bottom_mode: str
+            BT mode for TRDI, 'Variable' for SonTek.
+
+    Processed data
+        u_processed_mps: np.array(float)
+            Horizontal velocity in x-direction filtered and interpolated.
+        v_processed_mps: np.array(float)
+            Horizontal velocity in y-direction filtered and interpolated.
+        processed_source: np.array(object)
+            Source of velocity: BT, VTG, GGA, INT.
+
+    Settings variables
+        d_filter: str
+            Difference velocity filter "Manual", "Off", "Auto".
+        d_filter_thresholds: float
+            Threshold for difference velocity filter.
+        w_filter: str
+            Vertical velocity filter "Manual", "Off", "Auto".
+        w_filter_threshold: float
+            Threshold for vertical velocity filter.
+        gps_diff_qual_filter: integer
+            Differential correction quality (1,2,4).
+        gps_altitude_filter: str
+            Change in altitude filter "Auto", "Manual", "Off".
+        gps_altitude_filter_change: float
+            Threshold from mean for altitude filter.
+        gps_HDOP_filter: str
+            HDOP filter "Auto", "Manual", "Off".
+        gps_HDOP_filter_max: float
+            Max acceptable value for HDOP.
+        gps_HDOP_filter_change: float
+            Maximum change allowed from mean.
+        smooth_filter: str
+            Setting to use filter based on smoothing function ("On", "Off")
+        smooth_speed: np.array(float)
+            Smoothed boat speed.
+        smooth_upper_limit: np.array(float)
+            Smooth function upper limit of window.
+        smooth_lower_limit: np.array(float)
+            Smooth function lower limit of window.
+        interpolate: str
+            Type of interpolation: "None", "Linear", "Smooth" etc.
+        beam_filter: integer
+            Minimum number of beams for valid data, 3 for 3-beam solutions, 4 for 4-beam.
+        valid_data: np.array(bool)
+            Logical array of identifying valid and invalid data for each filter applied.
+                Row 1 [0] - composite
+                Row 2 [1] - original
+                Row 3 [2] - d_filter of diff_qual
+                Row 4 [3] - w_filter or altitude
+                Row 5 [4] - smooth_filter
+                Row 6 [5] - beam_filter or HDOP
+
+        d_meas_thresholds: dict
+            Dictionary of difference velocity thresholds computed using the whole measurement
+        w_meas_thresholds: dict
+            Dictionary of vertical velocity thresholds computed using the whole measurement
+        use_measurement_thresholds: bool
+            Indicates if the measurement based thresholds should be used
+    """
+
+    def __init__(self):
+        """Initialize instance variables."""
+
+        # Variables passed to the constructor
+        self.raw_vel_mps = None  # contains the raw unfiltered velocity data in m/s.
+        self.frequency_khz = None  # Defines ADCP frequency used for velocity Measurement
+        self.orig_coord_sys = None  # Defines the original raw data velocity Coordinate
+        self.nav_ref = None  # Defines the original raw data navigation reference
+        self.corr = np.array([])
+        self.rssi = np.array([])
+
+        # Coordinate transformed data
+        self.coord_sys = None  # Defines the current coordinate system "Beam", "Inst", "Ship", "Earth"
+        self.u_mps = None  # Horizontal velocity in x-direction, in m/s
+        self.v_mps = None  # Horizontal velocity in y-direction, in m/s
+        self.w_mps = None  # Vertical velocity (+ up), m/s
+        self.d_mps = None  # Difference in vertical velocities compute from opposing beam pairs in m/s
+        self.num_invalid = None  # Number of ensembles with invalid velocity data
+        self.bottom_mode = None  # BT mode for TRDI, 'Variable' for SonTek
+
+        # Processed data
+        self.u_processed_mps = None  # Horizontal velocity in x-direction filtered and interpolated
+        self.v_processed_mps = None  # Horizontal velocity in y-direction filtered and interpolated
+        self.processed_source = None  # Source of data, BT, GGA, VTG, INT
+
+        # Filter and interpolation properties
+        self.d_filter = None  # Difference velocity filter "Manual", "Off", "Auto"
+        self.d_filter_thresholds = {}  # Threshold for difference velocity filter
+        self.w_filter = None  # Vertical velocity filter "On", "Off"
+        self.w_filter_thresholds = {}  # Threshold for vertical velocity filter
+        self.gps_diff_qual_filter = None  # Differential correction quality (1,2,4)
+        self.gps_altitude_filter = None  # Change in altitude filter "Auto", "Manual", "Off"
+        self.gps_altitude_filter_change = None  # Threshold from mean for altitude filter
+        self.gps_HDOP_filter = None  # HDOP filter "Auto", "Manual", "Off"
+        self.gps_HDOP_filter_max = None  # Max acceptable value for HDOP
+        self.gps_HDOP_filter_change = None  # Maximum change allowed from mean
+        self.smooth_filter = None  # Filter based on smoothing function
+        self.smooth_speed = None  # Smoothed boat speed
+        self.smooth_upper_limit = None  # Smooth function upper limit of window
+        self.smooth_lower_limit = None  # Smooth function lower limit of window
+        self.interpolate = None  # Type of interpolation: "None", "Linear", "Smooth" etc.
+        self.beam_filter = None  # 3 for 3-beam solutions, 4 for 4-beam SolutionStackDescription
+        self.valid_data = None  # Logical array of identifying valid and invalid data for each filter applied
+
+        # Filter settings populated from Measurement.create_filter_composites
+        self.d_meas_thresholds = {}
+        self.w_meas_thresholds = {}
+
+        self.use_measurement_thresholds = False
+
+    def populate_data(self, source, vel_in, freq_in, coord_sys_in, nav_ref_in, beam_filter_in=3,
+                      bottom_mode_in='Variable', corr_in=None, rssi_in=None):
+        """Assigns data to instance variables.
+
+        Parameters
+        ----------
+        source: str
+            Manufacturer (TRDI, SonTek)
+        vel_in: np.array(float)
+            Boat velocity array
+        freq_in: np.array(float)
+            Acoustic frequency boat velocity
+        coord_sys_in: str
+            Coordinate system of boat velocity
+        nav_ref_in: str
+            Source of boat velocity (BT, GGA, VTG)
+        beam_filter_in: int
+            Minimum number of valid beams for valid data.
+        bottom_mode_in: str
+            Bottom mode for TRDI ADCP
+        corr: np.array
+            Correlation values for bottom track
+        rssi: np.array
+            Returned signal strength for bottom track
+        """
+
+        # Identify invalid ensembles for SonTek data.
+        if source == 'SonTek':
+            vel_in = BoatData.filter_sontek(vel_in)
+
+        # Store input data
+        self.raw_vel_mps = vel_in
+        self.frequency_khz = freq_in
+        self.coord_sys = coord_sys_in
+        self.orig_coord_sys = coord_sys_in
+        self.nav_ref = nav_ref_in
+        self.beam_filter = beam_filter_in
+        self.bottom_mode = bottom_mode_in
+        if corr_in is not None:
+            self.corr = corr_in
+        if rssi_in is not None:
+            self.rssi = rssi_in
+
+        if nav_ref_in == 'BT':
+
+            # Boat velocities are referenced to ADCP not the streambed and thus must be reversed
+            self.u_mps = np.copy(-1 * vel_in[0, :])
+            self.v_mps = np.copy(-1 * vel_in[1, :])
+            self.w_mps = np.copy(vel_in[2, :])
+            self.d_mps = np.copy(vel_in[3, :])
+
+            # Default filtering applied during initial construction of object
+            self.d_filter = 'Off'
+            self.d_filter_thresholds = {}
+            self.w_filter = 'Off'
+            self.w_filter_thresholds = {}
+            self.smooth_filter = 'Off'
+            self.interpolate = 'None'
+
+        else:
+
+            # GPS referenced boat velocity
+            self.u_mps = np.copy(vel_in[0, :])
+            self.v_mps = np.copy(vel_in[1, :])
+            self.w_mps = np.nan
+            self.d_mps = np.nan
+
+            # Default filtering
+            self.gps_diff_qual_filter = 2
+            self.gps_altitude_filter = 'Off'
+            self.gps_altitude_filter_change = 3
+            self.gps_HDOP_filter = 'Off'
+            self.gps_HDOP_filter_max = 2.5
+            self.gps_HDOP_filter_change = 1
+            self.smooth_filter = 'Off'
+            self.interpolate = 'None'
+
+        # Assign data to processed property
+        self.u_processed_mps = np.copy(self.u_mps)
+        self.v_processed_mps = np.copy(self.v_mps)
+
+        # Preallocate arrays
+        n_ensembles = vel_in.shape[1]
+        self.valid_data = repmat([True], 6, n_ensembles)
+        self.smooth_speed = np.nan
+        self.smooth_upper_limit = np.nan
+        self.smooth_lower_limit = np.nan
+
+        # Determine number of raw invalid
+        # --------------------------------
+        # Find invalid raw data
+        valid_vel = np.tile([True], self.raw_vel_mps.shape)
+        valid_vel[np.isnan(self.raw_vel_mps)] = False
+
+        # Identify invalid ensembles
+        if nav_ref_in == 'BT':
+            self.valid_data[1, np.sum(valid_vel, 0) < 3] = False
+        else:
+            self.valid_data[1, np.sum(valid_vel, 0) < 2] = False
+
+        # Combine all filter data to composite valid data
+        self.valid_data[0, :] = np.all(self.valid_data[1:, :], 0)
+        self.num_invalid = np.sum(self.valid_data[0, :] == False)
+        self.processed_source = np.array([''] * self.u_mps.shape[0], dtype=object)
+        self.processed_source[np.where(self.valid_data[0, :] == True)] = nav_ref_in
+        self.processed_source[np.where(self.valid_data[0, :] == False)] = "INT"
+
+    def populate_from_qrev_mat(self, mat_data):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        mat_data: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+
+        # Variables passed to the constructor
+
+        if type(mat_data.frequency_hz) is np.ndarray:
+            self.frequency_khz = mat_data.frequency_hz
+        elif np.isnan(mat_data.frequency_hz):
+            self.frequency_khz = None
+        else:
+            self.frequency_khz = np.array([mat_data.frequency_hz])
+        self.orig_coord_sys = mat_data.origCoordSys
+        self.nav_ref = mat_data.navRef
+
+        # Data requiring manipulation if only 1 ensemble
+        if type(mat_data.u_mps) is float:
+            self.raw_vel_mps = mat_data.rawVel_mps.reshape(mat_data.rawVel_mps.shape[0], 1)
+            # Coordinate transformed data
+            self.coord_sys = np.array([mat_data.coordSys])
+            self.u_mps = np.array([mat_data.u_mps])
+            self.v_mps = np.array([mat_data.v_mps])
+            self.w_mps = np.array([mat_data.w_mps])
+            self.d_mps = np.array([mat_data.d_mps])
+            if hasattr(mat_data, 'corr'):
+                self.corr = mat_data.corr.reshape(mat_data.corr.shape[0], 1)
+            if hasattr(mat_data, 'rssi'):
+                self.rssi = mat_data.rssi.reshape(mat_data.rssi.shape[0], 1)
+
+            # self.bottom_mode = np.array([mat_data.bottomMode])
+
+            # Processed data
+            self.u_processed_mps = np.array([mat_data.uProcessed_mps])
+            self.v_processed_mps = np.array([mat_data.vProcessed_mps])
+            self.processed_source = np.array([mat_data.processedSource])
+            self.valid_data = np.array([ mat_data.validData]).astype(bool)
+            if self.valid_data.shape[1] > 1:
+                self.valid_data = self.valid_data.reshape(-1, 1)
+            self.smooth_speed = np.array([mat_data.smoothSpeed])
+            self.smooth_upper_limit = np.array([mat_data.smoothUpperLimit])
+            self.smooth_lower_limit = np.array([mat_data.smoothLowerLimit])
+        else:
+            self.raw_vel_mps = mat_data.rawVel_mps
+            # Coordinate transformed data
+            self.coord_sys = mat_data.coordSys
+            self.u_mps = mat_data.u_mps
+            self.v_mps = mat_data.v_mps
+            self.w_mps = mat_data.w_mps
+            self.d_mps = mat_data.d_mps
+
+            if hasattr(mat_data, 'corr'):
+                self.corr = mat_data.corr
+            if hasattr(mat_data, 'rssi'):
+                self.rssi = mat_data.rssi
+
+            # self.bottom_mode = mat_data.bottomMode
+
+            # Processed data
+            self.u_processed_mps = mat_data.uProcessed_mps
+            self.v_processed_mps = mat_data.vProcessed_mps
+            self.processed_source = mat_data.processedSource
+            self.valid_data = mat_data.validData.astype(bool)
+            self.smooth_speed = mat_data.smoothSpeed
+            self.smooth_upper_limit = mat_data.smoothUpperLimit
+            self.smooth_lower_limit = mat_data.smoothLowerLimit
+
+        self.bottom_mode = mat_data.bottomMode
+        self.num_invalid = mat_data.numInvalid
+        # Error velocity filter
+        if type(mat_data.dFilter) is np.ndarray:
+            self.d_filter = None
+        else:
+            self.d_filter = mat_data.dFilter
+
+        # Error velocity threshold
+        if type(mat_data.dFilterThreshold) is np.ndarray:
+            self.d_filter_thresholds = {}
+        else:
+            self.d_filter_thresholds = self.struct_to_dict(mat_data.dFilterThreshold)
+
+        # Vertical velocity filter
+        if type(mat_data.wFilter) is np.ndarray:
+            self.w_filter = None
+        else:
+            self.w_filter = self.struct_to_dict(mat_data.wFilter)
+
+        # Vertical velocity threshold
+        if type(mat_data.wFilterThreshold) is np.ndarray:
+            self.w_filter_thresholds = {}
+        else:
+            self.w_filter_thresholds = self.struct_to_dict(mat_data.wFilterThreshold)
+
+        # GPS quality filter
+        if type(mat_data.gpsDiffQualFilter) is np.ndarray:
+            self.gps_diff_qual_filter = None
+        else:
+            self.gps_diff_qual_filter = mat_data.gpsDiffQualFilter
+
+        # GPS altitude filter
+        if type(mat_data.gpsAltitudeFilter) is np.ndarray:
+            self.gps_altitude_filter = None
+        else:
+            self.gps_altitude_filter = mat_data.gpsAltitudeFilter
+
+        # GPS altitude threshold
+        if type(mat_data.gpsAltitudeFilterChange) is np.ndarray:
+            self.gps_altitude_filter_change = None
+        else:
+            self.gps_altitude_filter_change = mat_data.gpsAltitudeFilterChange
+
+        # HDOP filter
+        if type(mat_data.gpsHDOPFilter) is np.ndarray:
+            self.gps_HDOP_filter = None
+        else:
+            self.gps_HDOP_filter = mat_data.gpsHDOPFilter
+
+        # HDOP max threshold
+        if type(mat_data.gpsHDOPFilterMax) is np.ndarray:
+            self.gps_HDOP_filter_max = None
+        else:
+            self.gps_HDOP_filter_max = mat_data.gpsHDOPFilterMax
+
+        # HDOP change threshold
+        if type(mat_data.gpsHDOPFilterChange) is np.ndarray:
+            self.gps_HDOP_filter_change = None
+        else:
+            self.gps_HDOP_filter_change = mat_data.gpsHDOPFilterChange
+
+        # Other filters
+        self.smooth_filter = mat_data.smoothFilter
+        self.interpolate = mat_data.interpolate
+        self.beam_filter = mat_data.beamFilter
+
+        # Use measurement for filter
+        if hasattr(mat_data, 'use_measurement_thresholds'):
+            self.use_measurement_thresholds = mat_data.use_measurement_thresholds
+            self.d_meas_thresholds = self.struct_to_dict(mat_data.d_meas_thresholds)
+            self.w_meas_thresholds = self.struct_to_dict(mat_data.w_meas_thresholds)
+        else:
+            self.use_measurement_thresholds = False
+            self.d_meas_thresholds = {}
+            self.w_meas_thresholds = {}
+
+    @staticmethod
+    def struct_to_dict(struct):
+        """If input is a mat structure it converts it into a dictionary.
+
+        Parameters
+        ----------
+        struct: mat.struct or other
+            Data to be converted
+
+        Returns
+        -------
+        result: dict or other
+            Result of conversion
+        """
+
+        try:
+            keys = struct._fieldnames
+            result = {}
+            for key in keys:
+                result[key] = struct.__dict__[key]
+        except AttributeError:
+            result = struct
+        return result
+
+    def change_coord_sys(self, new_coord_sys, sensors, adcp):
+        """This function allows the coordinate system to be changed.
+
+        Current implementation is only to allow change to a higher order coordinate system Beam - Inst - Ship - Earth
+
+        Parameters
+        ----------
+        new_coord_sys: str
+            New coordinate_sys (Beam, Inst, Ship, Earth)
+        sensors: Sensors
+            Object of Sensors
+        adcp: InstrumentData
+            Object of InstrumentData
+        """
+
+        # Remove any trailing spaces
+        if isinstance(self.orig_coord_sys, str):
+            o_coord_sys = self.orig_coord_sys.strip()
+        else:
+            o_coord_sys = self.orig_coord_sys.strip()
+
+        # Initialize variables
+        orig_sys = 0
+        new_sys = 0
+        temp_t = None
+
+        if self.orig_coord_sys.strip() != new_coord_sys.strip():
+            # Assign the transformation matrix and retrieve the sensor data
+            t_matrix = copy.deepcopy(adcp.t_matrix.matrix)
+            t_matrix_freq = copy.deepcopy(adcp.frequency_khz)
+            p = getattr(sensors.pitch_deg, sensors.pitch_deg.selected).data
+            r = getattr(sensors.roll_deg, sensors.roll_deg.selected).data
+            h = getattr(sensors.heading_deg, sensors.heading_deg.selected).data
+
+            # Modify the transformation matrix and heading, pitch, and roll values base on
+            # the original coordinate system so that only the needed values are used in
+            # computing the new coordinate system
+            if o_coord_sys == 'Beam':
+                orig_sys = 1
+            elif o_coord_sys == 'Inst':
+                orig_sys = 2
+                t_matrix[:] = np.eye(t_matrix.shape[0])
+            elif o_coord_sys == 'Ship':
+                orig_sys = 3
+                p = np.zeros(h.shape)
+                r = np.zeros(h.shape)
+                t_matrix[:] = np.eye(t_matrix.shape[0])
+            elif o_coord_sys == 'Earth':
+                orig_sys = 4
+
+            # Assign a value to the new coordinate system
+            if new_coord_sys == 'Beam':
+                new_sys = 1
+            elif new_coord_sys == 'Inst':
+                new_sys = 2
+            elif new_coord_sys == 'Ship':
+                new_sys = 3
+            elif new_coord_sys == 'Earth':
+                new_sys = 4
+
+            # Check to ensure the new coordinate system is a higher order than the original system
+            if new_sys - orig_sys > 0:
+
+                # Compute trig function for heaing, pitch and roll
+                ch = cosd(h)
+                sh = sind(h)
+                cp = cosd(p)
+                sp = sind(p)
+                cr = cosd(r)
+                sr = sind(r)
+
+                vel_changed = np.tile([np.nan], self.raw_vel_mps.shape)
+                n_ens = self.raw_vel_mps.shape[1]
+
+                for ii in range(n_ens):
+
+                    # Compute matrix for heading, pitch, and roll
+                    hpr_matrix = [[((ch[ii] * cr[ii]) + (sh[ii]*sp[ii]*sr[ii])),
+                                   (sh[ii] * cp[ii]),
+                                   ((ch[ii] * sr[ii]) - sh[ii]*sp[ii]*cr[ii])],
+                                  [(-1 * sh[ii] * cr[ii])+(ch[ii] * sp[ii] * sr[ii]),
+                                   ch[ii] * cp[ii],
+                                   (-1 * sh[ii] * sr[ii])-(ch[ii] * sp[ii] * cr[ii])],
+                                  [(-1.*cp[ii] * sr[ii]),
+                                   sp[ii],
+                                   cp[ii] * cr[ii]]]
+
+                    # Transform beam coordinates
+                    if o_coord_sys == 'Beam':
+
+                        # Determine frequency index for transformation matrix
+                        if len(t_matrix.shape) > 2:
+                            idx_freq = np.where(t_matrix_freq == self.frequency_khz[ii])
+                            t_mult = np.copy(t_matrix[idx_freq])
+                        else:
+                            t_mult = np.copy(t_matrix)
+
+                        # Get velocity data
+                        vel = np.copy(np.squeeze(self.raw_vel_mps[:, ii]))
+
+                        # Check for invalid beams
+                        idx_3_beam = np.where(np.isnan(vel))
+
+                        # 3-beam solution
+                        if len(idx_3_beam[0]) == 1:
+
+                            # Special processing for RiverRay
+                            if adcp.model == 'RiverRay':
+
+                                # Set beam pairing
+                                beam_pair_1a = 0
+                                beam_pair_1b = 1
+                                beam_pair_2a = 2
+                                beam_pair_2b = 3
+
+                                # Set speed of sound correction variables Note: Currently (2013-09-06)
+                                # WinRiver II does not use a variable correction and assumes the speed
+                                # of sound and the reference speed of sound are the same.
+                                # sos = sensors.speed_ofs_sound_mps.selected.data[ii]
+                                # sos_reference = 1536
+                                # sos_correction = np.sqrt(((2 * sos_reference) / sos) **2 -1)
+
+                                sos_correction = np.sqrt(3)
+
+                                # Reconfigure transformation matrix based on which beam is invalid
+
+                                # Beam 1 invalid
+                                if idx_3_beam[0][0] == beam_pair_1a:
+
+                                    # Double valid beam in invalid pair
+                                    t_mult[0:2, beam_pair_1b] *= 2
+
+                                    # Eliminate invalid pair from vertical velocity computations
+                                    t_mult[2, :] = [0, 0, 1/sos_correction, 1/sos_correction]
+
+                                    # Reconstruct beam velocity matrix to use only valid beams
+                                    t_mult = t_mult[0:3, [beam_pair_1b, beam_pair_2a, beam_pair_2b]]
+
+                                    # Reconstruct beam velocity matrix to use only valid beams
+                                    vel = vel[[beam_pair_1b, beam_pair_2a, beam_pair_2b]]
+
+                                    # Apply transformation matrix
+                                    temp_t = t_mult.dot(vel)
+
+                                    # Correct horizontal velocity for invalid pair with the vertical velocity
+                                    # and speed of sound correction
+                                    temp_t[0] = temp_t[0] + temp_t[2] * sos_correction
+
+                                # Beam 2 invalid
+                                if idx_3_beam[0][0] == beam_pair_1b:
+
+                                    # Double valid beam in invalid pair
+                                    t_mult[0:2, beam_pair_1a] = t_mult[0:2, beam_pair_1a] * 2
+
+                                    # Eliminate invalid pair from vertical velocity computations
+                                    t_mult[2, :] = [0, 0, 1/sos_correction, 1/sos_correction]
+
+                                    # Reconstruct transformation matrix as a 3x3 matrix
+                                    t_mult = t_mult[0:3, [beam_pair_1a, beam_pair_2a, beam_pair_2b]]
+
+                                    # Reconstruct beam velocity matrix to use only valid beams
+                                    vel = vel[[beam_pair_1a, beam_pair_2a, beam_pair_2b]]
+
+                                    # Apply transformation matrix
+                                    temp_t = t_mult.dot(vel)
+
+                                    # Correct horizontal velocity for invalid pair with the vertical
+                                    # velocity and speed of sound correction
+                                    temp_t[0] = temp_t[0] - temp_t[2] * sos_correction
+
+                                # Beam 3 invalid
+                                if idx_3_beam[0][0] == beam_pair_2a:
+
+                                    # Double valid beam in invalid pair
+                                    t_mult[0:2, beam_pair_2b] = t_mult[:2, beam_pair_2b] * 2
+
+                                    # Eliminate invalid pair from vertical velocity computations
+                                    t_mult[2, :] = [1/sos_correction, 1/sos_correction, 0, 0]
+
+                                    # Reconstruct transformation matrix as a 3x3 matrid
+                                    t_mult = t_mult[:3, [beam_pair_1a, beam_pair_1b, beam_pair_2b]]
+
+                                    # Reconstruct beam velocity matrix to use only valid beams
+                                    vel = vel[[beam_pair_1a, beam_pair_1b, beam_pair_2b]]
+
+                                    # Apply transformation matrix
+                                    temp_t = t_mult.dot(vel)
+
+                                    # Correct horizontal velocity for invalid pair with the vertical
+                                    # velocity and speed of sound correction
+                                    temp_t[1] = temp_t[1] - temp_t[2] * sos_correction
+
+                                # Beam 4 invalid
+                                if idx_3_beam[0][0] == beam_pair_2b:
+
+                                    # Double valid beam in invalid pair
+                                    t_mult[:2, beam_pair_2a] *= 2
+
+                                    # Eliminate invalid pair from vertical velocity computations
+                                    t_mult[2, :] = [1/sos_correction, 1/sos_correction, 0, 0]
+
+                                    # Reconstruct transformations matrix as a 3x3 matrix
+                                    t_mult = t_mult[:3, [beam_pair_1a, beam_pair_1b, beam_pair_2a]]
+
+                                    # Reconstruct beam velocity matrix to use only valid beams
+                                    vel = vel[[beam_pair_1a, beam_pair_1b, beam_pair_2a]]
+
+                                    # Apply transformation matrix
+                                    temp_t = t_mult.dot(vel)
+
+                                    # Correct horizontal velocity for invalid pair with the vertical
+                                    # velocity and speed of sound correction
+                                    temp_t[1] = temp_t[1] + temp_t[2] * sos_correction
+
+                            else:
+
+                                # 3 Beam solution for non-RiverRay
+                                vel_3_beam_zero = vel
+                                vel_3_beam_zero[np.isnan(vel)] = 0
+                                vel_error = np.matmul(t_mult[3, :], vel_3_beam_zero)
+                                vel[idx_3_beam] = -1 * vel_error / np.squeeze(t_mult[3, idx_3_beam])
+                                temp_t = t_mult.dot(vel)
+
+                            # Apply transformation matrix for 3 beam solutions
+                            temp_thpr = np.array(hpr_matrix).dot(temp_t[:3])
+                            temp_thpr = np.hstack([temp_thpr, np.nan])
+
+                        else:
+
+                            # Apply transformation matrix for 4 beam solutions
+                            temp_t = t_mult.dot(np.squeeze(self.raw_vel_mps[:, ii]))
+
+                            # Apply hpr_matrix
+                            temp_thpr = np.array(hpr_matrix).dot(temp_t[:3])
+                            temp_thpr = np.hstack([temp_thpr, temp_t[3]])
+
+                    else:
+
+                        # Get velocity data
+                        vel = np.copy(np.squeeze(self.raw_vel_mps[:, ii]))
+
+                        # Apply heading pitch roll for inst and ship coordinate data
+                        temp_thpr = np.array(hpr_matrix).dot(vel[:3])
+                        temp_thpr = np.hstack([temp_thpr, vel[3]])
+
+                    vel_changed[:, ii] = temp_thpr.T
+
+                # Assign results to object
+                self.u_mps = -1 * vel_changed[0, :]
+                self.v_mps = -1 * vel_changed[1, :]
+                self.w_mps = vel_changed[2, :]
+                self.d_mps = vel_changed[3, :]
+                self.coord_sys = new_coord_sys
+                self.u_processed_mps = np.copy(self.u_mps)
+                self.v_processed_mps = np.copy(self.v_mps)
+
+    def change_heading(self, heading_change):
+        """Rotates the boat velocities for a change in heading due to a change in
+        magnetic variation, heading offset, or heading source.
+
+        Parameters
+        ----------
+        heading_change: float
+            Change in the magnetic variation in degrees
+        """
+
+        # Apply change to processed data
+        direction, mag = cart2pol(self.u_processed_mps, self.v_processed_mps)
+        self.u_processed_mps, self.v_processed_mps = pol2cart(direction - np.deg2rad(heading_change), mag)
+
+        # Apply change to unprocessed data
+        direction, mag = cart2pol(self.u_mps, self.v_mps)
+        self.u_mps, self.v_mps = pol2cart(direction - np.deg2rad(heading_change), mag)
+
+    def apply_interpolation(self, transect, interpolation_method=None):
+        """Function to apply interpolations to navigation data.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        interpolation_method: str
+            Specified interpolation method if different from that in self
+        """
+
+        # Reset processed data
+        if self.u_mps is not None:
+            self.u_processed_mps = np.copy(self.u_mps)
+            self.v_processed_mps = np.copy(self.v_mps)
+            self.u_processed_mps[self.valid_data[0, :] == False] = np.nan
+            self.v_processed_mps[self.valid_data[0, :] == False] = np.nan
+
+            # Determine interpolation methods to apply
+            if interpolation_method is None:
+                interpolation_method = self.interpolate
+            else:
+                self.interpolate = interpolation_method
+
+            # Apply specified interpolation method
+
+            if interpolation_method == 'None':
+                # Sets invalid data to nan with no interpolation
+                self.interpolate_none()
+
+            elif interpolation_method == 'ExpandedT':
+                # Set interpolate to none as the interpolation done is in the QComp
+                self.interpolate_next()
+
+            elif interpolation_method == 'Hold9':
+                # Interpolates using SonTek method of holding last valid for up to 9 samples
+                self.interpolate_hold_9()
+
+            elif interpolation_method == 'HoldLast':
+                # Interpolates by holding last valid indefinitely
+                self.interpolate_hold_last()
+
+            elif interpolation_method == 'Linear':
+                # Interpolates using linear interpolation
+                self.interpolate_linear(transect)
+
+            elif interpolation_method == 'Smooth':
+                # Interpolates using smooth interpolation
+                self.interpolate_smooth(transect)
+
+            elif interpolation_method == 'TRDI':
+                # TRDI interpolation is done in discharge.
+                # For TRDI the interpolation is done on discharge not on velocities
+                self.interpolate_none()
+
+    def apply_composite(self, u_composite, v_composite, composite_source):
+        """Stores composite velocities and sources.
+
+        Parameters
+        ----------
+        u_composite: np.array(float)
+            Composite u-velocity component, in m/s
+        v_composite: np.array(float)
+            Composite v-velocity component, in m/s
+        composite_source: str
+            Reference used for each ensemble velocity.
+        """
+
+        self.u_processed_mps = u_composite
+        self.v_processed_mps = v_composite
+        self.processed_source[composite_source == 1] = 'BT'
+        self.processed_source[composite_source == 2] = 'GGA'
+        self.processed_source[composite_source == 3] = 'VTG'
+        self.processed_source[composite_source == 0] = 'INT'
+        self.processed_source[composite_source == -1] = 'INV'
+
+    def sos_correction(self, ratio):
+        """Correct boat velocity for a change in speed of sound.
+
+        Parameters
+        ----------
+        ratio: float
+            Ratio of new and old speed of sound
+        """
+
+        # Correct velocities
+        self.u_mps = self.u_mps * ratio
+        self.v_mps = self.v_mps * ratio
+        self.w_mps = self.w_mps * ratio
+
+    def interpolate_hold_9(self):
+        """This function applies Sontek's approach to maintaining the last valid boat speed for up to 9 invalid samples.
+        """
+
+        # Initialize variables
+        n_ensembles = self.u_mps.shape[0]
+
+        # Get data from object
+        self.u_processed_mps = np.copy(self.u_mps)
+        self.v_processed_mps = np.copy(self.v_mps)
+        self.u_processed_mps[self.valid_data[0, :] == False] = np.nan
+        self.v_processed_mps[self.valid_data[0, :] == False] = np.nan
+
+        n_invalid = 0
+        # Process data by ensembles
+        for n in range(n_ensembles):
+            # Check if ensemble is invalid and number of consecutive invalids is less than 9
+            if self.valid_data[0, n] == False and n_invalid < 9:
+                self.u_processed_mps[n] = self.u_processed_mps[n - 1]
+                self.v_processed_mps[n] = self.v_processed_mps[n - 1]
+                n_invalid += 1
+            else:
+                n_invalid = 0
+
+    def interpolate_none(self):
+        """This function removes any interpolation from the data and sets filtered data to nan."""
+
+        # Reset processed data
+        self.u_processed_mps = np.copy(self.u_mps)
+        self.v_processed_mps = np.copy(self.v_mps)
+        self.u_processed_mps[self.valid_data[0, :] == False] = np.nan
+        self.v_processed_mps[self.valid_data[0, :] == False] = np.nan
+
+    def interpolate_hold_last(self):
+        """This function holds the last valid value until the next valid data point."""
+
+        if self.u_mps is not None:
+            # Initialize variables
+            n_ensembles = len(self.u_mps)
+
+            # Get data from object
+            self.u_processed_mps = np.copy(self.u_mps)
+            self.v_processed_mps = np.copy(self.v_mps)
+            self.u_processed_mps[self.valid_data[0, :] == False] = np.nan
+            self.v_processed_mps[self.valid_data[0, :] == False] = np.nan
+
+            n_invalid = 0
+            # Process data by ensembles
+            for n in range(1, n_ensembles):
+                # Check if ensemble is invalid and number of consecutive invalids is less than 9
+                if (self.valid_data[0, n] == False) and (n_invalid < 9):
+                    self.u_processed_mps[n] = self.u_processed_mps[n - 1]
+                    self.v_processed_mps[n] = self.v_processed_mps[n - 1]
+
+    def interpolate_next(self):
+        """This function uses the next valid data to back fill for invalid"""
+
+        # Get valid ensembles
+        valid_ens = self.valid_data[0, :]
+
+        # Process ensembles
+        n_ens = len(valid_ens)
+
+        for n in np.arange(0, n_ens-1)[::-1]:
+            if not valid_ens[n]:
+                self.u_processed_mps[n] = self.u_processed_mps[n+1]
+                self.v_processed_mps[n] = self.v_processed_mps[n+1]
+
+    def interpolate_smooth(self, transect):
+        """This function interpolates data flagged invalid using the smooth function.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        """
+
+        # Get data from object
+
+        u = np.copy(self.u_mps)
+        v = np.copy(self.v_mps)
+        u[self.valid_data[0, :] == False] = np.nan
+        v[self.valid_data[0, :] == False] = np.nan
+
+        # Compute ens_time
+        ens_time = np.nancumsum(transect.date_time.ens_duration_sec)
+
+        # Apply smooth to each component
+        u_smooth = rloess(ens_time, u, 10)
+        v_smooth = rloess(ens_time, v, 10)
+
+        # Save data in object
+        self.u_processed_mps = u
+        self.v_processed_mps = v
+        self.u_processed_mps[np.isnan(u)] = u_smooth[np.isnan(u)]
+        self.v_processed_mps[np.isnan(v)] = v_smooth[np.isnan(v)]
+
+    def interpolate_linear(self, transect):
+        """This function interpolates data flagged invalid using linear interpolation.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        """
+
+        u = np.copy(self.u_mps)
+        v = np.copy(self.v_mps)
+
+        valid = np.isnan(u) == False
+
+        # Check for valid data
+        if sum(valid) > 1 and sum(self.valid_data[0, :]) > 1:
+
+            # Compute ens_time
+            ens_time = np.nancumsum(transect.date_time.ens_duration_sec)
+
+            # Apply linear interpolation
+            self.u_processed_mps = np.interp(x=ens_time,
+                                             xp=ens_time[self.valid_data[0, :]],
+                                             fp=u[self.valid_data[0, :]],
+                                             left=np.nan,
+                                             right=np.nan)
+            # Apply linear interpolation
+            self.v_processed_mps = np.interp(x=ens_time,
+                                             xp=ens_time[self.valid_data[0, :]],
+                                             fp=v[self.valid_data[0, :]],
+                                             left=np.nan,
+                                             right=np.nan)
+
+    def interpolate_composite(self, transect):
+        """This function interpolates processed data flagged invalid using linear interpolation.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        """
+
+        u = np.copy(self.u_processed_mps)
+        v = np.copy(self.v_processed_mps)
+
+        valid = np.isnan(u) == False
+
+        # Check for valid data
+        if np.sum(valid) > 1:
+
+            # Compute ensTime
+            ens_time = np.nancumsum(transect.date_time.ens_duration_sec)
+
+            # Ensure monotonic input
+            diff_time = np.diff(ens_time[valid])
+            idx = np.where(diff_time == 0)[0]
+            mono_array = np.vstack([ens_time[valid], u[valid], v[valid]])
+            # Replace non-monotonic times with average values
+            for i in idx[::-1]:
+                mono_array[1, i] = np.nanmean(mono_array[1, i:i+2])
+                mono_array[2, i] = np.nanmean(mono_array[2, i:i + 2])
+                mono_array = np.delete(mono_array, i+1, 1)
+            # Apply linear interpolation
+            # Apply linear interpolation
+            self.u_processed_mps = np.interp(ens_time,
+                                             mono_array[0, :],
+                                             mono_array[1, :])
+            # Apply linear interpolation
+            self.v_processed_mps = np.interp(ens_time,
+                                             mono_array[0, :],
+                                             mono_array[2, :])
+
+    def apply_filter(self, transect, beam=None, difference=None, difference_threshold=None, vertical=None,
+                     vertical_threshold=None, other=None):
+        """Function to apply filters to navigation data.
+
+        More than one filter can be applied during a single call.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        beam: int
+            Setting for beam filter (3, 4, -1)
+        difference: str
+            Setting for difference velocity filter (Auto, Manual, Off)
+        difference_threshold: float
+            Threshold for manual setting
+        vertical: str
+            Setting for vertical velocity filter (Auto, Manual, Off)
+        vertical_threshold: float
+            Threshold for manual setting
+        other: bool
+            Setting to other filter
+        """
+
+        if len({beam, difference, difference_threshold, vertical, vertical_threshold, other}) > 1:
+
+            # Filter based on number of valid beams
+            if beam is not None:
+                self.filter_beam(setting=beam)
+
+            # Filter based on difference velocity
+            if difference is not None:
+                if difference == 'Manual':
+                    self.filter_diff_vel(setting=difference, threshold=difference_threshold)
+                else:
+                    self.filter_diff_vel(setting=difference)
+
+            # Filter based on vertical velocity
+            if vertical is not None:
+                if vertical == 'Manual':
+                    self.filter_vert_vel(setting=vertical, threshold=vertical_threshold)
+                else:
+                    self.filter_vert_vel(setting=vertical)
+
+            # Filter based on robust loess smooth
+            if other is not None:
+                self.filter_smooth(setting=other, transect=transect)
+
+        else:
+            self.filter_beam(setting=self.beam_filter)
+            self.filter_diff_vel(setting=self.d_filter, threshold=self.d_filter_thresholds)
+            self.filter_vert_vel(setting=self.w_filter, threshold=self.w_filter_thresholds)
+            self.filter_smooth(setting=self.smooth_filter, transect=transect)
+
+        # Apply previously specified interpolation method
+        self.apply_interpolation(transect)
+
+    def filter_beam(self, setting):
+        """Applies beam filter.
+
+        The determination of invalid data depends on the whether
+        3-beam or 4-beam solutions are acceptable. This function can be
+        applied by specifying 3 or 4 beam solutions are setting
+        obj.beamFilter to -1 which will trigger an automatic mode. The
+        automatic mode will find all 3 beam solutions and then compare
+        the velocity of the 3 beam solutions to nearest 4 beam solution
+        before and after the 3 beam solution. If the 3 beam solution is
+        within 50% of the average of the neighboring 3 beam solutions the
+        data are deemed valid if not invalid. Thus in automatic mode only
+        those data from 3 beam solutions that appear sufficiently
+        than the 4 beam solutions are marked invalid. The process happens
+        for each ensemble. If the number of beams is specified manually
+        it is applied uniformly for the whole transect.
+
+        Parameters
+        ----------
+        setting: int
+            Setting for beam filter (3, 4, -1)
+        """
+
+        self.beam_filter = setting
+
+        # In manual mode determine number of raw invalid and number of 3 beam solutions
+        # 3 beam solutions if selected
+        if self.beam_filter > 0:
+
+            # Find invalid raw data
+            valid_vel = np.ones(self.raw_vel_mps.shape)
+            valid_vel[np.isnan(self.raw_vel_mps)] = 0
+
+            # Determine how many beams transformed coordinates are valid
+            valid_vel_sum = np.sum(valid_vel, 0)
+            valid = np.ones(valid_vel_sum.shape)
+
+            # Compare number of valid beams or coordinates to filter value
+            valid[valid_vel_sum < self.beam_filter] = False
+
+            # Save logical of valid data to object
+            self.valid_data[5, :] = valid
+
+        else:
+
+            # Apply automatic filter
+            # ----------------------
+            # Find all 3 beam solutions
+            self.filter_beam(3)
+            beam_3_valid_data = copy.deepcopy(self.valid_data)
+            self.filter_beam(4)
+            valid_3_beams = np.logical_xor(beam_3_valid_data[5, :], self.valid_data[5, :])
+            n_ens = len(self.valid_data[5, :])
+            idx = np.where(valid_3_beams == True)[0]
+
+            # If 3 beam solutions exist evaluate there validity
+            if len(idx) > 0:
+
+                # Identify 3 beam solutions that appear to be invalid
+                n3_beam_ens = len(idx)
+
+                # Check each three beam solution for validity
+                for m in range(n3_beam_ens):
+
+                    # Use before and after values to check 3-beam solution
+                    # but make sure the ensemble is not the first or last.
+                    if (idx[m] > 1) and (idx[m] < n_ens):
+
+                        # Find nearest 4 beam solutions before and after
+                        # 3 beam solution
+                        ref_idx_before = np.where(self.valid_data[5, :idx[m]] == True)[0]
+                        if len(ref_idx_before) > 0:
+                            ref_idx_before = ref_idx_before[-1]
+                        else:
+                            ref_idx_before = None
+
+                        ref_idx_after = np.where(self.valid_data[5, idx[m]:] == True)[0]
+                        if len(ref_idx_after) > 0:
+                            ref_idx_after = idx[m] + ref_idx_after[0]
+                        else:
+                            ref_idx_after = None
+
+                        if (ref_idx_after is not None) and (ref_idx_before is not None):
+                            u_ratio = (self.u_mps[idx[m]]) / ((self.u_mps[ref_idx_before]
+                                                               + self.u_mps[ref_idx_after]) / 2.) - 1
+                            v_ratio = (self.v_mps[idx[m]]) / ((self.v_mps[ref_idx_before]
+                                                               + self.v_mps[ref_idx_after]) / 2.) - 1
+                        else:
+                            u_ratio = 1
+                            v_ratio = 1
+
+                        # If 3-beam differs from 4-beam by more than 50% mark it invalid
+                        if (np.abs(u_ratio) > 0.5) and (np.abs(v_ratio) > 0.5):
+                            self.valid_data[5, idx[m]] = False
+                        else:
+                            self.valid_data[5, idx[m]] = True
+
+            self.beam_filter = -1
+
+        # Combine all filter data to composite valid data
+        self.valid_data[0, :] = np.all(self.valid_data[1:, :], 0)
+        self.num_invalid = np.sum(self.valid_data[0, :] == False)
+
+    def filter_diff_vel(self, setting, threshold=None):
+        """Applies either manual or automatic filtering of the difference
+        (error) velocity.
+
+        The automatic mode is based on the following:
+        This filter is based on the assumption that the water error velocity
+        should follow a gaussian distribution. Therefore, 5 iqr
+        should encompass all of the valid data. The standard deviation and
+        limits (multiplier*standard deviation) are computed in an iterative
+        process until filtering out additional data does not change the computed
+        standard deviation.
+
+        Parameters
+        ----------
+        setting: str
+            Difference velocity setting (Off, Manual, Auto)
+        threshold: float
+            If manual, the user specified threshold
+        """
+
+        self.d_filter = setting
+        if setting == 'Manual':
+            self.d_filter_thresholds = threshold
+
+        # Apply selected method
+        if self.d_filter == 'Manual':
+            d_vel_max_ref = np.abs(self.d_filter_thresholds)
+            d_vel_min_ref = -1 * d_vel_max_ref
+            invalid_idx = np.where(np.logical_or(nan_greater(self.d_mps, d_vel_max_ref),
+                                                 nan_less(self.d_mps, d_vel_min_ref)))[0]
+        elif self.d_filter == 'Off':
+            invalid_idx = np.array([])
+
+        elif self.d_filter == 'Auto':
+            if self.use_measurement_thresholds:
+                freq_ensembles = self.frequency_khz.astype(int).astype(str)
+                invalid_idx = np.array([])
+                for freq in self.d_meas_thresholds.keys():
+                    filter_data = np.copy(self.d_mps)
+                    filter_data[freq_ensembles != freq] = np.nan
+                    idx = np.where(np.logical_or(np.greater(filter_data, self.d_meas_thresholds[freq][0]),
+                                                 np.less(filter_data, self.d_meas_thresholds[freq][1])))[0]
+                    if idx.size > 0:
+                        if invalid_idx.size > 0:
+                            invalid_idx = np.hstack((invalid_idx, idx))
+                        else:
+                            invalid_idx = idx
+            else:
+                freq_used = np.unique(self.frequency_khz).astype(int).astype(str)
+                freq_ensembles = self.frequency_khz.astype(int).astype(str)
+                self.d_filter_thresholds = {}
+                invalid_idx = np.array([])
+                for freq in freq_used:
+                    filter_data = np.copy(self.d_mps)
+                    filter_data[freq_ensembles != freq] = np.nan
+                    d_vel_max_ref, d_vel_min_ref = self.iqr_filter(filter_data)
+                    self.d_filter_thresholds[freq] = [d_vel_max_ref, d_vel_min_ref]
+                    idx = np.where(np.logical_or(nan_greater(filter_data, d_vel_max_ref),
+                                                 nan_less(filter_data, d_vel_min_ref)))[0]
+                    if idx.size > 0:
+                        if invalid_idx.size > 0:
+                            invalid_idx = np.hstack((invalid_idx, idx))
+                        else:
+                            invalid_idx = idx
+        else:
+            invalid_idx = np.array([])
+
+        # Set valid data row 3 for difference velocity filter results
+        self.valid_data[2, :] = True
+        if invalid_idx.size > 0:
+            self.valid_data[2, invalid_idx] = False
+
+        # Combine all filter data to composite filter data
+        self.valid_data[0, :] = np.all(self.valid_data[1:, :], 0)
+        self.num_invalid = np.sum(self.valid_data[0, :] == False)
+
+    def filter_vert_vel(self, setting, threshold=None):
+        """Applies either manual or automatic filtering of the vertical
+        velocity.  Uses same assumptions as difference filter.
+
+        Parameters
+        ----------
+        setting: str
+            Filter setting (Off, Manual, Auto)
+        threshold: float
+            If setting is manual, the user specified threshold
+        """
+
+        # Set vertical velocity filter properties
+        self.w_filter = setting
+        if setting == 'Manual':
+            self.w_filter_thresholds = threshold
+
+        # Apply selected method
+        if self.w_filter == 'Manual':
+            w_vel_max_ref = np.abs(self.w_filter_thresholds)
+            w_vel_min_ref = -1 * w_vel_max_ref
+            invalid_idx = np.where(np.logical_or(nan_greater(self.w_mps, w_vel_max_ref),
+                                                 nan_less(self.w_mps, w_vel_min_ref)))[0]
+
+        elif self.w_filter == 'Off':
+            invalid_idx = np.array([])
+
+        elif self.w_filter == 'Auto':
+            if self.use_measurement_thresholds:
+                freq_ensembles = self.frequency_khz.astype(int).astype(str)
+                invalid_idx = np.array([])
+                for freq in self.w_meas_thresholds.keys():
+                    filter_data = np.copy(self.w_mps.astype(float))
+                    filter_data[freq_ensembles != freq] = np.nan
+                    idx = np.where(np.logical_or(np.greater(filter_data, self.w_meas_thresholds[freq][0]),
+                                                 np.less(filter_data, self.w_meas_thresholds[freq][1])))[0]
+                    if idx.size > 0:
+                        if invalid_idx.size > 0:
+                            invalid_idx = np.hstack((invalid_idx, idx))
+                        else:
+                            invalid_idx = idx
+            else:
+                freq_used = np.unique(self.frequency_khz).astype(int).astype(str)
+                freq_ensembles = self.frequency_khz.astype(int).astype(str)
+                self.w_filter_thresholds = {}
+                invalid_idx = np.array([])
+                for freq in freq_used:
+                    filter_data = np.copy(self.w_mps)
+                    filter_data[freq_ensembles != freq] = np.nan
+                    w_vel_max_ref, w_vel_min_ref = self.iqr_filter(filter_data)
+                    self.w_filter_thresholds[freq] = [w_vel_max_ref, w_vel_min_ref]
+                    idx = np.where(np.logical_or(nan_greater(filter_data, w_vel_max_ref),
+                                                 nan_less(filter_data, w_vel_min_ref)))[0]
+                    if idx.size > 0:
+                        if invalid_idx.size > 0:
+                            invalid_idx = np.hstack((invalid_idx, idx))
+                        else:
+                            invalid_idx = idx
+        else:
+            invalid_idx = np.array([])
+
+        # Set valid data row 3 for difference velocity filter results
+        self.valid_data[3, :] = True
+        if invalid_idx.size > 0:
+            self.valid_data[3, invalid_idx] = False
+
+        # Combine all filter data to composite valid data
+        self.valid_data[0, :] = np.all(self.valid_data[1:, :], 0)
+        self.num_invalid = np.sum(self.valid_data[0, :] == False)
+
+    @staticmethod
+    def iqr_filter(data, multiplier=5,  minimum_window = 0.01):
+        """Apply the iqr filter to bt data.
+
+        Parameters
+        ----------
+        data: np.ndarray(float)
+            Array of difference or vertical velocity data
+        multiplier: int
+            Number of IQR's to use to set the threshold
+        minimum_window: float
+            Minimum allowable threshold
+
+        Returns
+        -------
+        data_max_ref: float
+            Maximum threshold
+        data_min_ref: float
+            Minimum threshold
+        """
+        data_max_ref = np.nan
+        data_min_ref = np.nan
+
+        # Check to make sure there are data to process
+        if data.size > 0 and np.any(np.logical_not(np.isnan(data))):
+            # Initialize loop controllers
+            k = 0
+            iqr_diff = 1
+
+
+            # Loop until no additional data are removed
+            while iqr_diff != 0 and k < 1000:
+                k += 1
+
+                # Compute inner quartile range
+                data_iqr = iqr(data)
+                threshold_window = multiplier * data_iqr
+                if threshold_window < minimum_window:
+                    threshold_window = minimum_window
+
+                # Compute maximum and minimum thresholds
+                data_max_ref = np.nanmedian(data) + threshold_window
+                data_min_ref = np.nanmedian(data) - threshold_window
+
+                # Identify valid and invalid data
+                data_less_idx = np.where(nan_less_equal(data, data_max_ref))[0]
+                data_greater_idx = np.where(nan_greater_equal(data, data_min_ref))[0]
+                data_good_idx = list(np.intersect1d(data_less_idx, data_greater_idx))
+
+                # Update filtered data array
+                data = copy.deepcopy(data[data_good_idx])
+
+                # Determine differences due to last filter iteration
+                if len(data) > 0:
+                    data_iqr2 = iqr(data)
+                    iqr_diff = data_iqr2 - data_iqr
+                else:
+                    iqr_diff = 0
+
+        return data_max_ref, data_min_ref
+
+    def filter_smooth(self, transect, setting):
+        """This filter employs a running trimmed standard deviation filter to
+        identify and mark spikes in the boat speed.
+
+        First a robust Loess smooth is fitted to the boat speed time series and
+        residuals between the raw data and the smoothed line are computed. The
+        trimmed standard deviation is computed by selecting the number of residuals
+        specified by "halfwidth" before the target point and after the target point,
+        but not including the target point. These values are then sorted, and the points
+        with the highest and lowest values are removed from the subset, and the
+        standard deviation of the trimmed subset is computed. The filter
+        criteria are determined by multiplying the standard deviation by a user
+        specified multiplier. This criteria defines a maximum and minimum
+        acceptable residual. Data falling outside the criteria are set to nan.
+
+        Recommended filter setting are:
+        filterWidth=10;
+        halfWidth=10;
+        multiplier=9;
+
+        David S. Mueller, USGS, OSW
+        9/8/2005
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        setting: bool
+            Filter setting (True, False)
+        """
+
+        # Set property
+        self.smooth_filter = setting
+
+        # Compute ens_time
+        ens_time = np.nancumsum(transect.date_time.ens_duration_sec)
+        n_ensembles = len(ens_time)
+        # Determine if smooth filter should be applied
+        if self.smooth_filter == 'On':
+            # Initialize arrays
+            self.smooth_speed = repmat([np.nan], 1, n_ensembles)
+            self.smooth_upper_limit = repmat([np.nan], 1, n_ensembles)
+            self.smooth_lower_limit = repmat([np.nan], 1, n_ensembles)
+
+            # Boat velocity components
+            b_vele = np.copy(self.u_mps)
+            b_veln = np.copy(self.v_mps)
+
+            # Set filter parameters
+            filter_width = 10
+            half_width = 10
+            multiplier = 9
+            cycles = 3
+
+            # Initialize variables
+            bt_bad_idx = []
+            upper_limit = 0
+            lower_limit = 0
+
+            # Compute speed and direction of boat
+            direct, speed = cart2pol(b_vele, b_veln)
+
+            # Compute residuals from a robust Loess smooth
+            speed_smooth = rloess(ens_time, speed, filter_width)
+            speed_res = speed - speed_smooth
+
+            # Apply a trimmed standard deviation filter multiple times
+            for i in range(cycles):
+                filter_array = BoatData.run_std_trim(half_width, speed_res.T)
+
+                # Compute filter bounds
+                upper_limit = speed_smooth + multiplier * filter_array
+                lower_limit = speed_smooth - multiplier * filter_array
+
+                # Apply filter to residuals
+                bt_bad_idx = np.where(np.logical_or(np.greater(speed, upper_limit), np.less(speed, lower_limit)))[0]
+                speed_res[bt_bad_idx] = np.nan
+
+            # Update valid_data property
+            self.valid_data[4, :] = True
+            self.valid_data[4, bt_bad_idx] = False
+            self.valid_data[4, self.valid_data[1, :] == False] = True
+            self.smooth_upper_limit = upper_limit
+            self.smooth_lower_limit = lower_limit
+            self.smooth_speed = speed_smooth
+
+        else:
+
+            # No filter applied all data assumed valid
+            self.valid_data[4, :] = True
+            self.smooth_upper_limit = np.nan
+            self.smooth_lower_limit = np.nan
+            self.smooth_speed = np.nan
+
+        # Combine all filter data to composite valid data
+        self.valid_data[0, :] = np.all(self.valid_data[1:, ], 0)
+        self.num_invalid = np.sum(self.valid_data[0, :] == False, 0)
+
+    def apply_gps_filter(self, transect, differential=None, altitude=None, altitude_threshold=None,
+                         hdop=None, hdop_max_threshold=None, hdop_change_threshold=None, other=None):
+        """Applies filters to GPS referenced boat velocity data.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        differential: str
+            Differential filter setting (1, 2, 4)
+        altitude: str
+            New setting for altitude filter (Off, Manual, Auto)
+        altitude_threshold: float
+            Threshold provide by user for manual altitude setting
+        hdop: str
+            Filter setting (On, off, Auto)
+        hdop_max_threshold: float
+            Maximum HDOP threshold
+        hdop_change_threshold: float
+            HDOP change threshold
+        other: bool
+            Other filter typically a smooth.
+        """
+
+        if len({differential, altitude, altitude_threshold, hdop,
+                hdop_max_threshold, hdop_change_threshold, other}) > 0:
+            # Differential filter only applies to GGA data, defaults to 1 for VTG
+            if differential is not None:
+                if self.nav_ref == 'GGA':
+                    self.filter_diff_qual(gps_data=transect.gps, setting=int(differential))
+                else:
+                    self.filter_diff_qual(gps_data=transect.gps, setting=1)
+
+            # Altitude filter only applies to GGA data
+            if altitude is not None:
+                if (altitude == 'Manual') and (self.nav_ref == 'GGA'):
+                    self.filter_altitude(gps_data=transect.gps, setting=altitude, threshold=altitude_threshold)
+                elif self.nav_ref == 'GGA':
+                    self.filter_altitude(gps_data=transect.gps, setting=altitude)
+
+            if hdop is not None:
+                if hdop == 'Manual':
+                    self.filter_hdop(gps_data=transect.gps, setting=hdop, max_threshold=hdop_max_threshold,
+                                     change_threshold=hdop_change_threshold)
+                else:
+                    self.filter_hdop(gps_data=transect.gps, setting=hdop)
+
+            if other is not None:
+                self.filter_smooth(transect=transect, setting=other)
+        else:
+            self.filter_diff_qual(gps_data=transect.gps)
+            self.filter_altitude(gps_data=transect.gps)
+            self.filter_hdop(gps_data=transect.gps)
+            self.filter_smooth(transect=transect, setting=self.smooth_filter)
+
+        # Apply previously specified interpolation method
+        self.apply_interpolation(transect=transect)
+
+    def filter_diff_qual(self, gps_data, setting=None):
+        """Filters GPS data based on the minimum acceptable differential correction quality.
+
+        Parameters
+        ----------
+        gps_data: GPSData
+            Object of GPSData
+        setting: int
+            Filter setting (1, 2, 4).
+        """
+
+        # New filter setting if provided
+        if setting is not None:
+            self.gps_diff_qual_filter = setting
+
+        # Reset valid_data property
+        self.valid_data[2, :] = True
+        self.valid_data[5, :] = True
+
+        # Determine and apply appropriate filter type
+        if gps_data.diff_qual_ens is not None:
+            self.valid_data[2, np.isnan(gps_data.diff_qual_ens)] = False
+            if self.gps_diff_qual_filter is not None:
+                # Autonomous
+                if self.gps_diff_qual_filter == 1:
+                    self.valid_data[2, gps_data.diff_qual_ens < 1] = False
+                # Differential correction
+                elif self.gps_diff_qual_filter == 2:
+                    self.valid_data[2, gps_data.diff_qual_ens < 2] = False
+                # RTK
+                elif self.gps_diff_qual_filter == 4:
+                    self.valid_data[2, gps_data.diff_qual_ens < 4] = False
+
+                # If there is no indication of the quality assume 1 fot vtg
+                if self.nav_ref == 'VTG':
+                    self.valid_data[2, np.isnan(gps_data.diff_qual_ens)] = True
+            else:
+                self.valid_data[2, :] = False
+
+        # Combine all filter data to composite valid data
+        self.valid_data[0, :] = np.all(self.valid_data[1:, :], 0)
+        self.num_invalid = np.sum(self.valid_data[0, :] == False)
+
+    def filter_altitude(self, gps_data, setting=None, threshold=None):
+        """Filter GPS data based on a change in altitude.
+
+        Assuming the data are collected on the river the altitude should not
+        change substantially during the transect. Since vertical resolution is
+        about 3 x worse that horizontal resolution the automatic filter
+        threshold is set to 3 m, which should ensure submeter horizontal
+        accuracy.
+
+        Parameters
+        ----------
+        gps_data: GPSData
+            Object of GPSData
+        setting: str
+            New setting for filter (Off, Manual, Auto)
+        threshold: float
+            Threshold provide by user for manual setting
+        """
+
+        # New filter settings if provided
+        if setting is not None:
+            self.gps_altitude_filter = setting
+            if setting == 'Manual':
+                self.gps_altitude_filter_change = threshold
+
+        # Set threshold for Auto
+        if self.gps_altitude_filter == 'Auto':
+            self.gps_altitude_filter_change = 3
+
+        # Set all data to valid
+        self.valid_data[3, :] = True
+        # self.valid_data[5, :] = True
+
+        # Manual or Auto is selected, apply filter
+        if not self.gps_altitude_filter == 'Off':
+            # Initialize variables
+            num_valid_old = np.sum(self.valid_data[3, :])
+            k = 0
+            change = 1
+            # Loop until no change in the number of valid ensembles
+            while k < 100 and change > 0.1:
+                # Compute mean using valid ensembles
+                if self.valid_data.shape[1] == 1:
+                    if self.valid_data[1,0]:
+                        alt_mean = gps_data.altitude_ens_m
+                    else:
+                        alt_mean = np.nan
+                else:
+                    alt_mean = np.nanmean(gps_data.altitude_ens_m[self.valid_data[1, :]])
+
+                # Compute difference for each ensemble
+                diff = np.abs(gps_data.altitude_ens_m - alt_mean)
+
+                # Mark invalid those ensembles with differences greater than the change threshold
+                self.valid_data[3, diff > self.gps_altitude_filter_change] = False
+                k += 1
+                num_valid = np.sum(self.valid_data[3, :])
+                change = num_valid_old - num_valid
+
+        # Combine all filter data to composite valid data
+        self.valid_data[0, :] = np.all(self.valid_data[1:, :], 0)
+        self.num_invalid = np.sum(self.valid_data[0, :] == False)
+
+    def filter_hdop(self, gps_data, setting=None, max_threshold=None, change_threshold=None):
+        """Filter GPS data based on both a maximum HDOP and a change in HDOP
+        over the transect.
+
+        Parameters
+        ----------
+        gps_data: GPSData
+            Object of GPSData
+        setting: str
+            Filter setting (On, off, Auto)
+        max_threshold: float
+            Maximum threshold
+        change_threshold: float
+            Change threshold
+        """
+
+        if gps_data.hdop_ens is None or gps_data.hdop_ens.size == 0:
+            self.valid_data[5, :self.valid_data.shape[1]] = True
+        else:
+            # New settings if provided
+            if setting is not None:
+                self.gps_HDOP_filter = setting
+                if self.gps_HDOP_filter == 'Manual':
+                    self.gps_HDOP_filter_max = max_threshold
+                    self.gps_HDOP_filter_change = change_threshold
+
+            # Settings for auto mode
+            if self.gps_HDOP_filter == 'Auto':
+                self.gps_HDOP_filter_change = 3
+                self.gps_HDOP_filter_max = 4
+
+            # Set all ensembles to valid
+            self.valid_data[5, :] = True
+
+            # Apply filter for manual or auto
+            if not self.gps_HDOP_filter == 'Off':
+
+                # Initialize variables
+                num_valid_old = np.sum(self.valid_data[5, :])
+                k = 0
+                change = 1
+
+                # Apply max filter
+                self.valid_data[5, np.greater(gps_data.hdop_ens, self.gps_HDOP_filter_max)] = False
+
+                # Loop until the number of valid ensembles does not change
+                while k < 100 and change > 0.1:
+
+                    # Compute mean HDOP for all valid ensembles
+                    if self.valid_data.shape[1] == 1:
+                        if self.valid_data[5, 0]:
+                            hdop_mean = gps_data.hdop_ens
+                        else:
+                            hdop_mean = np.nan
+                    else:
+                        hdop_mean = np.nanmean(gps_data.hdop_ens[self.valid_data[5, :]])
+
+                    # Compute the difference in HDOP and the mean for all ensembles
+                    diff = np.abs(gps_data.hdop_ens - hdop_mean)
+
+                    # If the change is HDOP or the value of HDOP is greater
+                    # than the threshold setting mark the data invalid
+                    self.valid_data[5, np.greater(diff, self.gps_HDOP_filter_change)] = False
+
+                    k += 1
+                    num_valid = np.sum(self.valid_data[5, :])
+                    change = num_valid_old - num_valid
+                    num_valid_old = num_valid
+
+        # Combine all filter data to composite data
+        self.valid_data[0, :] = np.all(self.valid_data[1:, :], 0)
+        self.num_invalid = np.sum(self.valid_data[0, :] == False)
+
+    @staticmethod
+    def filter_sontek(vel_in):
+        """Determines invalid raw bottom track samples for SonTek data.
+
+        Invalid data are those that are zero or where the velocity doesn't change between ensembles.
+
+        Parameters
+        ----------
+        vel_in: np.array(float)
+            Bottom track velocity data, in m/s.
+
+        Returns
+        -------
+        vel_out: np.array(float)
+            Filtered bottom track velocity data with all invalid data set to np.nan.
+        """
+
+        # Identify all samples where the velocity did not change
+        test1 = np.abs(np.diff(vel_in, 1, 1)) < 0.00001
+
+        # Identify all samples with all zero values
+        test2 = np.nansum(np.abs(vel_in), 0) < 0.00001
+        test2 = test2[1:] * 4  # using 1: makes the array dimension consistent with test1 as diff results in 1 less.
+
+        # Combine criteria
+        test_sum = np.sum(test1, 0) + test2
+
+        # Develop logical vector of invalid ensembles
+        invalid_bool = np.full(test_sum.size, False)
+        invalid_bool[test_sum > 3] = True
+        # Handle first ensemble
+        invalid_bool = np.concatenate((np.array([False]), invalid_bool), 0)
+        if np.nansum(vel_in[:, 0]) == 0:
+            invalid_bool[0] = True
+
+        # Set invalid ensembles to nan
+        vel_out = np.copy(vel_in)
+        vel_out[:, invalid_bool] = np.nan
+
+        return vel_out
+
+    @staticmethod
+    def run_std_trim(half_width, my_data):
+        """Computes a standard deviation over +/- halfwidth of points.
+
+        The routine accepts a column vector as input. "halfWidth" number of data
+        points for computing the standard deviation are selected before and
+        after the target data point, but not including the target data point.
+        Near the ends of the series the number of points before or after are
+        reduced. nan in the data are counted as points. The selected subset of
+        points are sorted and the points with the highest and lowest values are
+        removed from the subset and the standard deviation computed on the
+        remaining points in the subset. The process occurs for each point in the
+        provided column vector. A column vector with the computed standard
+        deviation at each point is returned.
+
+        Parameters
+        ----------
+        half_width: int
+             Number of ensembles on each side of target ensemble to used
+             for computing trimmed standard deviation
+        my_data: np.array(float)
+             Data to be processed
+
+        Returns
+        -------
+        filter_array: np.array(float)
+             Vector with computed standard
+        """
+
+        # Determine number of points to process
+        n_pts = my_data.shape[0]
+        if n_pts < 20:
+            half_width = np.floor(n_pts / 2.)
+
+        filter_array = []
+        # Compute standard deviation for each point
+        for n in range(n_pts):
+
+            # Sample selection for 1st point
+            if n == 0:
+                sample = my_data[1:1 + half_width]
+
+            # Sample selection at end of data set
+            elif n + half_width > n_pts:
+                sample = np.hstack((my_data[n - half_width - 1:n - 1], my_data[n:n_pts]))
+
+            # Sample selection at beginning of data set
+            elif half_width >= n + 1:
+                sample = np.hstack((my_data[0:n], my_data[n + 1:n + half_width + 1]))
+
+            # Samples selection in body of data set
+            else:
+                sample = np.hstack((my_data[n - half_width:n], my_data[n + 1:n + half_width + 1]))
+
+            # Sort and compute trummed standard deviation
+            sample = np.sort(sample)
+            filter_array.append(np.nanstd(sample[1:sample.shape[0] - 1], ddof=1))
+
+        return np.array(filter_array)
diff --git a/Classes/BoatStructure.py b/Classes/BoatStructure.py
new file mode 100644
index 0000000..f009c7b
--- /dev/null
+++ b/Classes/BoatStructure.py
@@ -0,0 +1,437 @@
+import numpy as np
+from Classes.BoatData import BoatData
+
+
+class BoatStructure(object):
+    """This class organizes the various sources for boat velocity into
+    a single structured class and establishes a selected property that
+    contains the select source for velocity and discharge computations.
+
+    Attributes
+    ----------
+    selected: str
+        Name of BoatData object to be used for discharge computations.
+    bt_vel: BoatData
+        BoatData object for bottom track velocity
+    gga_vel: BoatData
+        BoatData object for gga velocity
+    vtg_vel: BoatData
+        BoatData object for vtg velocity
+    composite: str
+        Setting to use ("On") or not ("Off") composite tracks.
+    """
+
+    def __init__(self):
+
+        self.selected = None  # Name of BoatData object to be used for discharge computations
+        self.bt_vel = None  # BoatData object for bottom track velocity
+        self.gga_vel = None  # BoatData object for gga velocity
+        self.vtg_vel = None  # BoatData object for vtg velocity
+
+        # Composite track information is not currently provided by the manufacturers.
+        # Future versions may try to determine this setting from SonTek data
+        self.composite = 'Off'  # Setting for compositir tracks
+
+    def add_boat_object(self, source, vel_in, freq_in=None, coord_sys_in=None, nav_ref_in=None,
+                        min_beams=3, bottom_mode='Variable', corr_in=None, rssi_in=None):
+        """Adds a BoatData object to the appropriate property
+
+        Parameters
+        ----------
+        source: str
+            Name of manufacturer.
+        vel_in: np.array
+            Boat velocity array.
+        freq_in: np.array or float
+            Acoustic frequency
+        coord_sys_in: str
+            Coordinate system of velocity data.
+        nav_ref_in: str
+            Source of boat velocity data
+        min_beams: int
+            Setting to allow 3 beam solutions or require 4 beam solutions or set to Auto (-1)
+        bottom_mode: str
+            Bottom mode used
+        """
+
+        if nav_ref_in == 'BT':
+            self.bt_vel = BoatData()
+            self.bt_vel.populate_data(source, vel_in, freq_in, coord_sys_in, nav_ref_in, min_beams, bottom_mode,
+                                      corr_in, rssi_in)
+        if nav_ref_in == 'GGA':
+            self.gga_vel = BoatData()
+            self.gga_vel.populate_data(source, vel_in, freq_in, coord_sys_in, nav_ref_in)
+        if nav_ref_in == 'VTG':
+            self.vtg_vel = BoatData()
+            self.vtg_vel.populate_data(source, vel_in, freq_in, coord_sys_in, nav_ref_in)
+
+    def set_nav_reference(self, reference):
+        """This function will set the navigation reference property to the specified object reference.
+
+        Parameters
+        ----------
+        reference: str
+            Navigation reference, BT, GGA, or VTG
+        """
+
+        if reference == 'BT':
+            self.selected = 'bt_vel'
+        elif reference == 'GGA':
+            self.selected = 'gga_vel'
+        elif reference == 'VTG':
+            self.selected = 'vtg_vel'
+
+    def change_nav_reference(self, reference, transect):
+        """This function changes the navigation reference to the specified object reference and recomputes
+        the composite tracks, if necessary.
+
+        Parameters
+        ----------
+        reference: str
+            New navigation reference, BT, GGA, or VTG.
+        transect: TransectData
+            Object of TransectData.
+        """
+
+        if reference == 'BT':
+            self.selected = 'bt_vel'
+        elif reference == 'GGA':
+            self.selected = 'gga_vel'
+        elif reference == 'VTG':
+            self.selected = 'vtg_vel'
+        elif reference == 'bt_vel':
+            self.selected = 'bt_vel'
+        elif reference == 'gga_vel':
+            self.selected = 'gga_vel'
+        elif reference == 'vtg_vel':
+            self.selected = 'vtg_vel'
+
+        self.composite_tracks(transect)
+
+    def change_coord_sys(self, new_coord_sys, sensors, adcp):
+        """This function will change the coordinate system of the boat velocity reference.
+        
+        Parameters
+        ----------
+        new_coord_sys: str
+            Specified new coordinate system.
+        sensors: Sensors
+            Object of Sensors.
+        adcp: InstrumentData
+            Object of InstrumentData.
+        """
+
+        # Change coordinate system for all available boat velocity sources
+        if self.bt_vel is not None:
+            self.bt_vel.change_coord_sys(new_coord_sys, sensors, adcp)
+        if self.gga_vel is not None:
+            self.gga_vel.change_coord_sys(new_coord_sys, sensors, adcp)
+        if self.vtg_vel is not None:
+            self.vtg_vel.change_coord_sys(new_coord_sys, sensors, adcp)
+
+    def composite_tracks(self, transect, setting=None):
+        """If new composite setting is provided it is used, if not the setting saved in the object is used
+        
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData.
+        setting: bool
+            New setting for composite tracks
+        """
+
+        if setting is None:
+            setting = self.composite
+        else:
+            # New Setting
+            self.composite = setting
+
+        # Composite depths turned on
+        if setting == 'On':
+            # Initialize variables
+            u_bt = np.array([])
+            v_bt = np.array([])
+            u_gga = np.array([])
+            v_gga = np.array([])
+            u_vtg = np.array([])
+            v_vtg = np.array([])
+
+            # Prepare bt data
+            if self.bt_vel is not None:
+                u_bt = self.bt_vel.u_processed_mps
+                v_bt = self.bt_vel.v_processed_mps
+                # Set to invalid all interpolated velocities
+                valid_bt = self.bt_vel.valid_data[0, :]
+                u_bt[valid_bt == False] = np.nan
+                v_bt[valid_bt == False] = np.nan
+
+            # Prepare gga data
+            if self.gga_vel is not None:
+                # Get gga velocities
+                u_gga = self.gga_vel.u_processed_mps
+                v_gga = self.gga_vel.v_processed_mps
+                # Set to invalid all interpolated velocities
+                valid_gga = self.gga_vel.valid_data[0, :]
+                u_gga[valid_gga == False] = np.nan
+                v_gga[valid_gga == False] = np.nan
+            elif self.bt_vel is not None:
+                u_gga = np.tile([np.nan], u_bt.shape)
+                v_gga = np.tile([np.nan], v_bt.shape)
+
+            # Prepare vtg data
+            if self.vtg_vel is not None:
+                # Get vtg velocities
+                u_vtg = self.vtg_vel.u_processed_mps
+                v_vtg = self.vtg_vel.v_processed_mps
+                # Set to invalid all interpolated velocities
+                valid_vtg = self.vtg_vel.valid_data[0, :]
+                u_vtg[valid_vtg == False] = np.nan
+                v_vtg[valid_vtg == False] = np.nan
+            elif self.bt_vel is not None:
+                u_vtg = np.tile([np.nan], u_bt.shape)
+                v_vtg = np.tile([np.nan], v_bt.shape)
+
+            # Process bt as primary
+            if self.selected == 'bt_vel':
+                # Initialize composite source
+                comp_source = np.tile(np.nan, u_bt.shape)
+
+                # Process u velocity component
+                u_comp = u_bt
+                comp_source[np.isnan(u_comp) == False] = 1
+
+                # If BT data are not valid try VTG and set composite source (BUG HERE DSM)
+                u_comp[np.isnan(u_comp)] = u_vtg[np.isnan(u_comp)]
+                comp_source[np.logical_and(np.isnan(u_comp) == False, np.isnan(comp_source))] = 3
+
+                # If there are still invalid boat velocities, try GGA and set composite source
+                u_comp[np.isnan(u_comp)] = u_gga[np.isnan(u_comp)]
+                comp_source[np.logical_and(np.isnan(u_comp) == False, np.isnan(comp_source))] = 2
+
+                # If there are still invalid boat velocities, use interpolated
+                # values if present and set composite source
+                comp_source[np.logical_and(np.isnan(u_comp) == False, np.isnan(comp_source))] = 0
+
+                # Set composite source to invalid for all remaining invalid boat velocity data
+                comp_source[np.isnan(comp_source)] = -1
+
+                # Process v velocity component.  Assume that the composite source is the same
+                # as the u component
+                v_comp = v_bt
+                v_comp[np.isnan(v_comp)] = v_vtg[np.isnan(v_comp)]
+                v_comp[np.isnan(v_comp)] = v_gga[np.isnan(v_comp)]
+                v_comp[np.isnan(v_comp)] = self.bt_vel.v_processed_mps[np.isnan(v_comp)]
+
+                # Apply the composite settings to the bottom track Boatdata objects
+                self.bt_vel.apply_composite(u_comp, v_comp, comp_source)
+                self.bt_vel.interpolate_composite(transect)
+
+            # Process gga as primary
+            elif self.selected == 'gga_vel':
+                # Initialize the composite source
+                comp_source = np.tile([np.nan], u_bt.shape)
+
+                # Process the u velocity component
+                u_comp = u_gga
+                comp_source[np.isnan(u_comp) == False] = 2
+
+                # If GGA data are not valid try VTG and set composite source
+                u_comp[np.isnan(u_comp)] = u_vtg[np.isnan(u_comp)]
+                comp_source[np.logical_and(np.isnan(u_comp) == False, np.isnan(comp_source))] = 3
+
+                # If there are still invalid boar velocities, try BT and set composite source
+                u_comp[np.isnan(u_comp)] = u_bt[np.isnan(u_comp)]
+                comp_source[np.logical_and(np.isnan(u_comp) == False, np.isnan(comp_source))] = 1
+
+                # If there are still invalid boat velocities, use interpolated values,
+                # if present and set composite source
+                comp_source[np.logical_and(np.isnan(u_comp) == False, np.isnan(comp_source))] = 0
+
+                # Set composite source to invalid for all remaining invalid boat velocity data
+                comp_source[np.isnan(comp_source)] = -1
+
+                # Process v velocity component.  Assume that the composite source is the
+                # same as the u component
+                v_comp = v_gga
+                v_comp[np.isnan(v_comp)] = v_vtg[np.isnan(v_comp)]
+                v_comp[np.isnan(v_comp)] = v_bt[np.isnan(v_comp)]
+                # v_comp[np.isnan(v_comp)] = self.gga_vel.v_processed_mps[np.isnan(v_comp)]
+
+                # Apply the composite settings to the gga BoatData object
+                # For the situation where the transect has no GGA data but other transects do and composite tracks
+                # has been turned on, create the gga_vel object and populate only the u and v processed, comp_source,
+                # and valid_data attributes.
+                if self.gga_vel is None:
+                    self.gga_vel = BoatData()
+                    self.gga_vel.processed_source = np.array([''] * comp_source.shape[0], dtype=object)
+                    self.gga_vel.valid_data = np.full((6, comp_source.shape[0]), False)
+                self.gga_vel.apply_composite(u_comp, v_comp, comp_source)
+                self.gga_vel.interpolate_composite(transect)
+
+            # Process vtg as primary
+            elif self.selected == 'vtg_vel':
+                # Initialize the composite source
+                comp_source = np.tile([np.nan], u_bt.shape)
+
+                # Process the u velocity component
+                u_comp = u_vtg
+                comp_source[np.isnan(u_comp) == False] = 3
+
+                # If VTG data are not valid try GGA and set composite source
+                u_comp[np.isnan(u_comp)] = u_gga[np.isnan(u_comp)]
+                comp_source[np.logical_and(np.isnan(u_comp) == False, np.isnan(comp_source))] = 2
+
+                # If there are still invalid boat velocities, try BT and set composite source
+                u_comp[np.isnan(u_comp)] = u_bt[np.isnan(u_comp)]
+                comp_source[np.logical_and(np.isnan(u_comp) == False, np.isnan(comp_source))] = 1
+
+                # If there are still invalid boat velocities, use interpolated values,
+                # if present and set composite source
+                comp_source[np.logical_and(np.isnan(u_comp) == False, np.isnan(comp_source))] = 0
+
+                # Set composite source to invalid for all remaining invalid boat velocity data
+                comp_source[np.isnan(comp_source)] = -1
+
+                # Process v velocity component.  Assume that the composite source is the
+                # same as the u component
+                v_comp = v_vtg
+                # DSM wrong in Matlab version 1/29/2018 v_comp[np.isnan(v_comp)] = v_vtg[np.isnan(v_comp)]
+                v_comp[np.isnan(v_comp)] = v_gga[np.isnan(v_comp)]
+                v_comp[np.isnan(v_comp)] = v_bt[np.isnan(v_comp)]
+                # v_comp[np.isnan(v_comp)] = self.vtg_vel.v_processed_mps[np.isnan(v_comp)]
+
+                # Apply the composite settings to the gga BoatData object
+                # For the situation where the transect has no GGA data but other transects do and composite tracks
+                # has been turned on, create the gga_vel object and populate only the u and v processed, comp_source,
+                # and valid_data attributes.
+                if self.vtg_vel is None:
+                    self.vtg_vel = BoatData()
+                    self.vtg_vel.processed_source = np.array([''] * comp_source.shape[0], dtype=object)
+                    self.vtg_vel.valid_data = np.full((6, comp_source.shape[0]), False)
+                self.vtg_vel.apply_composite(u_comp, v_comp, comp_source)
+                self.vtg_vel.interpolate_composite(transect)
+        else:
+            # Composite tracks off
+
+            # Use only interpolations for bt
+            if self.bt_vel is not None:
+                self.bt_vel.apply_interpolation(transect=transect,
+                                                interpolation_method=transect.boat_vel.bt_vel.interpolate)
+                comp_source = np.tile(np.nan, self.bt_vel.u_processed_mps.shape)
+                comp_source[self.bt_vel.valid_data[0, :]] = 1
+                comp_source[np.logical_and(np.isnan(comp_source),
+                                           (np.isnan(self.bt_vel.u_processed_mps) == False))] = 0
+                comp_source[np.isnan(comp_source)] = -1
+                self.bt_vel.apply_composite(u_composite=self.bt_vel.u_processed_mps,
+                                            v_composite=self.bt_vel.v_processed_mps,
+                                            composite_source=comp_source)
+
+            # Use only interpolations for gga
+            if self.gga_vel is not None:
+                # This if statement handles the situation where there is no GPS data for a transect but there is GPS
+                # data for other transects and the user has turned on / off composite tracks.
+                if self.gga_vel.u_mps is not None:
+                    self.gga_vel.apply_interpolation(transect=transect,
+                                                     interpolation_method=transect.boat_vel.gga_vel.interpolate)
+                    comp_source = np.tile(np.nan, self.gga_vel.u_processed_mps.shape)
+                    comp_source[self.gga_vel.valid_data[0, :]] = 2
+                    comp_source[np.logical_and(np.isnan(comp_source),
+                                               (np.isnan(self.gga_vel.u_processed_mps) == False))] = 0
+                    comp_source[np.isnan(comp_source)] = -1
+                    self.gga_vel.apply_composite(u_composite=self.gga_vel.u_processed_mps,
+                                                 v_composite=self.gga_vel.v_processed_mps,
+                                                 composite_source=comp_source)
+                else:
+                    self.gga_vel = None
+
+            # Use only interpolations for vtg
+            if self.vtg_vel is not None:
+                # This if statement handles the situation where there is no GPS data for a transect but there is GPS
+                # data for other transects and the user has turned on / off composite tracks.
+                if self.vtg_vel.u_mps is not None:
+                    self.vtg_vel.apply_interpolation(transect=transect,
+                                                     interpolation_method=transect.boat_vel.vtg_vel.interpolate)
+                    comp_source = np.tile(np.nan, self.vtg_vel.u_processed_mps.shape)
+                    comp_source[self.vtg_vel.valid_data[0, :]] = 3
+                    comp_source[np.logical_and(np.isnan(comp_source),
+                                               (np.isnan(self.vtg_vel.u_processed_mps) == False))] = 0
+                    comp_source[np.isnan(comp_source)] = -1
+                    self.vtg_vel.apply_composite(u_composite=self.vtg_vel.u_processed_mps,
+                                                 v_composite=self.vtg_vel.v_processed_mps,
+                                                 composite_source=comp_source)
+                else:
+                    self.vtg_vel = None
+
+    @staticmethod
+    def compute_boat_track(transect, ref=None):
+        """Computes the shiptrack coordinates, along track distance, and distance made
+        good for the selected boat reference.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        ref: str
+            Setting to determine what navigation reference should be used. In None use selected.
+
+        Returns
+        -------
+        boat_track: dict
+            Dictionary containing shiptrack coordinates (track_x_m, track_y_m), along track distance (distance_m),
+            and distance made good (dmg_m)
+        """
+
+        # Initialize dictionary
+        boat_track = {'track_x_m': np.nan, 'track_y_m': np.nan, 'distance_m': np.nan, 'dmg_m': np.nan}
+
+        # Compute incremental track coordinates
+        if ref is None:
+            boat_vel_selected = getattr(transect.boat_vel, transect.boat_vel.selected)
+        else:
+            boat_vel_selected = getattr(transect.boat_vel, ref)
+
+        if boat_vel_selected is None:
+            boat_vel_selected = getattr(transect.boat_vel, 'bt_vel')
+        track_x = boat_vel_selected.u_processed_mps[transect.in_transect_idx] * \
+            transect.date_time.ens_duration_sec[transect.in_transect_idx]
+        track_y = boat_vel_selected.v_processed_mps[transect.in_transect_idx] * \
+            transect.date_time.ens_duration_sec[transect.in_transect_idx]
+
+        # Check for any valid data
+        idx = np.where(np.logical_not(np.isnan(track_x)))
+        if idx[0].size > 1:
+            # Compute variables
+            boat_track['distance_m'] = np.nancumsum(np.sqrt(track_x ** 2 + track_y ** 2))
+            boat_track['track_x_m'] = np.nancumsum(track_x)
+            boat_track['track_y_m'] = np.nancumsum(track_y)
+            boat_track['dmg_m'] = np.sqrt(boat_track['track_x_m'] ** 2 + boat_track['track_y_m'] ** 2)
+
+        return boat_track
+
+    def populate_from_qrev_mat(self, transect):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        transect: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+
+        if hasattr(transect, 'boatVel'):
+            if hasattr(transect.boatVel, 'btVel'):
+                if hasattr(transect.boatVel.btVel, 'u_mps'):
+                    self.bt_vel = BoatData()
+                    self.bt_vel.populate_from_qrev_mat(transect.boatVel.btVel)
+            if hasattr(transect.boatVel, 'ggaVel'):
+                if hasattr(transect.boatVel.ggaVel, 'u_mps'):
+                    self.gga_vel = BoatData()
+                    self.gga_vel.populate_from_qrev_mat(transect.boatVel.ggaVel)
+            if hasattr(transect.boatVel, 'vtgVel'):
+                if hasattr(transect.boatVel.vtgVel, 'u_mps'):
+                    self.vtg_vel = BoatData()
+                    self.vtg_vel.populate_from_qrev_mat(transect.boatVel.vtgVel)
+            nav_dict = {'btVel':'bt_vel', 'bt_vel':'bt_vel',
+                        'ggaVel':'gga_vel', 'gga_vel':'gga_vel',
+                        'vtgVel':'vtg_vel', 'vtg_vel':'vtg_vel'}
+            self.selected = nav_dict[transect.boatVel.selected]
+
diff --git a/Classes/CompassCal.py b/Classes/CompassCal.py
new file mode 100644
index 0000000..2b279e9
--- /dev/null
+++ b/Classes/CompassCal.py
@@ -0,0 +1,41 @@
+import re
+
+
+class CompassCal(object):
+    """Class stores compass calibration or evaluation data and parses the compass error from the raw data.
+
+    Attributes
+    ----------
+    time_stamp: str
+        Time of calibration or evaluation (mm/dd/yyyy).
+    data: str
+        All calibration or evaluation data provided by the manufacturer.
+    error: float
+        Remaining compass error after calibration or from evaluation, in degrees.
+    """
+
+    def __init__(self):
+        """Initialize class and instance variables."""
+
+        self.time_stamp = None
+        self.data = None
+        self.error = None
+
+    def populate_data(self, time_stamp, data_in):
+        """Store data and parse compass error from compass data.
+
+        Parameters
+        ----------
+        time_stamp: str
+            Time of calibration or evaluation (mm/dd/yyyy).
+        data_in: str
+            All calibration or evaluation data provided by the manufacturer.
+        """
+        self.time_stamp = time_stamp
+        self.data = data_in
+
+        splits = re.split('(Total error:|Double Cycle Errors:|Error from calibration:)', data_in)
+        if len(splits) > 1:
+            self.error = re.search('\d+\.*\d*', splits[2])[0]
+        else:
+            self.error = 'N/A'
diff --git a/Classes/ComputeExtrap.py b/Classes/ComputeExtrap.py
new file mode 100644
index 0000000..fd272d1
--- /dev/null
+++ b/Classes/ComputeExtrap.py
@@ -0,0 +1,330 @@
+import numpy as np
+from Classes.SelectFit import SelectFit
+from Classes.ExtrapQSensitivity import ExtrapQSensitivity
+from Classes.NormData import NormData
+
+
+class ComputeExtrap(object):
+    """Class to compute the optimized or manually specified extrapolation methods
+
+    Attributes
+    ----------
+    threshold: float
+        Threshold as a percent for determining if a median is valid
+    subsection: list
+        Percent of discharge
+    fit_method: str
+        Method used to determine fit.  Automatic or manual
+    norm_data: NormData
+        Object of class NormData
+    sel_fit: SelectFit
+        Object of class SelectFit
+    q_sensitivity: ExtrapQSensitivity
+        Object of class ExtrapQSensitivity
+    messages: str
+        Variable for messages to UserWarning
+    use_weighted: bool
+        Specifies if discharge weighted medians are used in extrapolations
+    sub_from_left: bool
+        Specifies if when subsectioning the subsection should start from left to right.
+    use_q: bool
+        Specifies to use the discharge rather than the xprod when subsectioning
+
+    """
+    
+    def __init__(self):
+        """Initialize instance variables."""
+
+        self.threshold = None  # Threshold as a percent for determining if a median is valid
+        self.subsection = None  # Percent of discharge, does not account for transect direction
+        self.fit_method = None  # Method used to determine fit.  Automatic or manual
+        self.norm_data = []  # Object of class norm data
+        self.sel_fit = []  # Object of class SelectFit
+        self.q_sensitivity = None  # Object of class ExtrapQSensitivity
+        self.messages = []  # Variable for messages to UserWarning
+        self.use_weighted = False
+        self.use_q = False
+        self.sub_from_left = False
+        
+    def populate_data(self, transects, compute_sensitivity=True, use_weighted=False, use_q=True, sub_from_left=True):
+        """Store data in instance variables.
+
+        Parameters
+        ----------
+        transects: list
+            List of transects of TransectData
+        compute_sensitivity: bool
+            Determines is sensitivity should be computed.
+        use_weighted: bool
+        Specifies if discharge weighted medians are used in extrapolations
+        """
+
+        self.threshold = 20
+        self.subsection = [0, 100]
+        self.fit_method = 'Automatic'
+        self.use_weighted = use_weighted
+        self.use_q = use_q
+        self.sub_from_left = sub_from_left
+        self.process_profiles(transects=transects, data_type='q', use_weighted=use_weighted)
+
+        # Compute the sensitivity of the final discharge to changes in extrapolation methods
+        if compute_sensitivity:
+            self.q_sensitivity = ExtrapQSensitivity()
+            self.q_sensitivity.populate_data(transects=transects, extrap_fits=self.sel_fit)
+
+    def populate_from_qrev_mat(self, meas_struct):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        meas_struct: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+
+        if hasattr(meas_struct, 'extrapFit'):
+            self.threshold = meas_struct.extrapFit.threshold
+            self.subsection = meas_struct.extrapFit.subsection
+            self.fit_method = meas_struct.extrapFit.fitMethod
+
+            # Check for consistency between transects and norm_data. If only checked transects were saved, the
+            # normData and selfit will also include unchecked transects which must be removed prior to
+            # continuing to process.
+
+            # If only a single transect the meas_struct.transects will be structure not an array, so the len method
+            # won't work.
+            try:
+                n_transects = len(meas_struct.transects)
+            except TypeError:
+                n_transects = 1
+
+            try:
+                n_data = len(meas_struct.extrapFit.normData) - 1
+            except TypeError:
+                n_data = 1
+
+            if n_transects != n_data:
+                # normData needs adjustment to match transects
+                file_names = []
+                valid_norm_data = []
+                valid_sel_fit = []
+                # Create list of transect filenames
+                if n_transects == 1:
+                    file_names.append(meas_struct.transects.fileName)
+                else:
+                    for transect in meas_struct.transects:
+                        file_names.append(transect.fileName)
+                # Create a list of norm_data and sel_fit objects that match the filenames in transects
+                for n in range(len(meas_struct.extrapFit.normData) - 1):
+                    if meas_struct.extrapFit.normData[n].fileName in file_names:
+                        valid_norm_data.append(meas_struct.extrapFit.normData[n])
+                        valid_sel_fit.append(meas_struct.extrapFit.selFit[n])
+                # Append the whole measurement objects
+                valid_norm_data.append(meas_struct.extrapFit.normData[-1])
+                valid_sel_fit.append(meas_struct.extrapFit.selFit[-1])
+                # Update meas_struct so normData and selFit match transects
+                meas_struct.extrapFit.normData = np.array(valid_norm_data)
+                meas_struct.extrapFit.selFit = np.array(valid_sel_fit)
+
+            self.norm_data = NormData.qrev_mat_in(meas_struct.extrapFit)
+            self.sel_fit = SelectFit.qrev_mat_in(meas_struct.extrapFit)
+            self.q_sensitivity = ExtrapQSensitivity()
+            self.q_sensitivity.populate_from_qrev_mat(meas_struct.extrapFit)
+            if hasattr(meas_struct.extrapFit, 'use_weighted'):
+                self.use_weighted = meas_struct.extrapFit.use_weighted
+            else:
+                self.use_weighted = False
+
+            if hasattr(meas_struct.extrapFit, 'use_q'):
+                self.use_q = meas_struct.extrapFit.use_q
+            else:
+                self.use_q = False
+
+            if hasattr(meas_struct.extrapFit, 'sub_from_left'):
+                self.sub_from_left = meas_struct.extrapFit.sub_from_left
+            else:
+                self.sub_from_left = False
+
+            if type(meas_struct.extrapFit.messages) is str:
+                self.messages = [meas_struct.extrapFit.messages]
+            elif type(meas_struct.extrapFit.messages) is np.ndarray:
+                self.messages = meas_struct.extrapFit.messages.tolist()
+
+    def process_profiles(self, transects, data_type, use_weighted=None, use_q=True, sub_from_left=True):
+        """Function that coordinates the fitting process.
+
+        Parameters
+        ----------
+        transects: TransectData
+            Object of TransectData
+        data_type: str
+            Type of data processing (q or v)
+        sub_from_left: bool
+            Specifies if when subsectioning the subsection should start from left to right.
+        use_q: bool
+            Specifies to use the discharge rather than the xprod when subsectioning
+        """
+        if use_weighted is not None:
+            self.use_weighted = use_weighted
+        else:
+            self.use_weighted = self.norm_data[-1].use_weighted
+
+        self.use_q = use_q
+        self.sub_from_left = sub_from_left
+
+        # Compute normalized data for each transect
+        self.norm_data = []
+        for transect in transects:
+            norm_data = NormData()
+            norm_data.populate_data(transect=transect,
+                                    data_type=data_type,
+                                    threshold=self.threshold,
+                                    data_extent=self.subsection,
+                                    use_weighted=self.use_weighted,
+                                    use_q=self.use_q,
+                                    sub_from_left=self.sub_from_left)
+            self.norm_data.append(norm_data)
+
+        # Compute composite normalized data
+        comp_data = NormData()
+        comp_data.use_q = self.norm_data[-1].use_q
+        comp_data.sub_from_left = self.norm_data[-1].sub_from_left
+        comp_data.create_composite(transects=transects, norm_data=self.norm_data, threshold=self.threshold)
+        self.norm_data.append(comp_data)
+
+        # Compute the fit for the selected  method
+        if self.fit_method == 'Manual':
+            for n in range(len(transects)):
+                self.sel_fit[n].populate_data(normalized=self.norm_data[n],
+                                              fit_method=self.fit_method,
+                                              top=transects[n].extrap.top_method,
+                                              bot=transects[n].extrap.bot_method,
+                                              exponent=transects[n].extrap.exponent)
+        else:
+            self.sel_fit = []
+            for n in range(len(self.norm_data)):
+                sel_fit = SelectFit()
+                sel_fit.populate_data(self.norm_data[n], self.fit_method)
+                self.sel_fit.append(sel_fit)
+
+        if self.sel_fit[-1].top_fit_r2 is not None:
+            # Evaluate if there is a potential that a 3-point top method may be appropriate
+            if (self.sel_fit[-1].top_fit_r2 > 0.9 or self.sel_fit[-1].top_r2 > 0.9) \
+                    and np.abs(self.sel_fit[-1].top_max_diff) > 0.2:
+                self.messages.append('The measurement profile may warrant a 3-point fit at the top')
+                
+    def update_q_sensitivity(self, transects):
+        """Updates the discharge sensitivity values.
+
+        Parameters
+        ----------
+        transects: list
+            List of TransectData objects
+        """
+        self.q_sensitivity = ExtrapQSensitivity()
+        self.q_sensitivity.populate_data(transects, self.sel_fit)
+        
+    def change_fit_method(self, transects, new_fit_method, idx, top=None, bot=None, exponent=None, compute_qsens=True):
+        """Function to change the extrapolation method.
+
+        Parameters
+        ----------
+        transects: list
+            List of TransectData objects
+        new_fit_method: str
+            Identifies fit method automatic or manual
+        idx: int
+            Index to the specified transect or measurement in NormData
+        top: str
+            Specifies top fit
+        bot: str
+            Specifies bottom fit
+        exponent: float
+            Specifies exponent for power or no slip fits
+        compute_qsens: bool
+            Specifies if the discharge sensitivities should be recomputed
+        """
+        self.fit_method = new_fit_method
+
+        self.sel_fit[idx].populate_data(self.norm_data[idx], new_fit_method,  top=top, bot=bot, exponent=exponent)
+        if compute_qsens & idx == len(self.norm_data)-1:
+            self.q_sensitivity = ExtrapQSensitivity()
+            self.q_sensitivity.populate_data(transects, self.sel_fit)
+        
+    def change_threshold(self, transects, data_type, threshold):
+        """Function to change the threshold for accepting the increment median as valid.  The threshold
+        is in percent of the median number of points in all increments.
+
+        Parameters
+        ----------
+        transects: list
+            List of TransectData objects
+        data_type: str
+            Specifies the data type (discharge or velocity)
+        threshold: float
+            Percent of data that must be in a median to include the median in the fit algorithm
+        """
+        
+        self.threshold = threshold
+        self.process_profiles(transects=transects, data_type=data_type)
+        self.q_sensitivity = ExtrapQSensitivity()
+        self.q_sensitivity.populate_data(transects=transects, extrap_fits=self.sel_fit)
+        
+    def change_extents(self, transects, data_type, extents, use_q, sub_from_left):
+        """Function allows the data to be subsection by specifying the percent cumulative discharge
+        for the start and end points.  Currently this function does not consider transect direction.
+
+        Parameters
+        ----------
+        transects: list
+            List of TransectData objects
+        data_type: str
+            Specifies the data type (discharge or velocity)
+        extents: list
+            List containing two values, the minimum and maximum discharge percentages to subsectioning
+        sub_from_left: bool
+            Specifies if when subsectioning the subsection should start from left to right.
+        use_q: bool
+            Specifies to use the discharge rather than the xprod when subsectioning
+        """
+        
+        self.subsection = extents
+        self.use_q = use_q
+        self.sub_from_left = sub_from_left
+        self.process_profiles(transects=transects, data_type=data_type )
+        self.q_sensitivity = ExtrapQSensitivity()
+        self.q_sensitivity.populate_data(transects=transects, extrap_fits=self.sel_fit)
+        
+    def change_data_type(self, transects, data_type):
+        """Changes the data type to be processed in extrap.
+
+        Parameters
+        ----------
+        transects: list
+            List of TransectData objects
+        data_type: str
+            Specifies the data type (discharge or velocity)
+        """
+        if data_type.lower() == 'q':
+            use_weighted = self.use_weighted
+        else:
+            use_weighted = False
+
+        self.process_profiles(transects=transects, data_type=data_type, use_weighted=use_weighted)
+        self.q_sensitivity = ExtrapQSensitivity()
+        self.q_sensitivity.populate_data(transects=transects, extrap_fits=self.sel_fit)
+
+    def change_data_auto(self, transects):
+        """Changes the data selection settings to automatic.
+
+        Parameters
+        ----------
+        transects: list
+            List of TransectData objects
+        """
+        self.threshold = 20
+        self.subsection = [0, 100]
+        self.process_profiles(transects=transects, data_type='q', use_weighted=self.use_weighted)
+
+        # Compute the sensitivity of the final discharge to changes in extrapolation methods
+        self.q_sensitivity = ExtrapQSensitivity()
+        self.q_sensitivity.populate_data(transects=transects, extrap_fits=self.sel_fit)
diff --git a/Classes/CoordError.py b/Classes/CoordError.py
new file mode 100644
index 0000000..1411a30
--- /dev/null
+++ b/Classes/CoordError.py
@@ -0,0 +1,4 @@
+class CoordError(Exception):
+
+    def __init__(self, text):
+        self.text = text
\ No newline at end of file
diff --git a/Classes/DateTime.py b/Classes/DateTime.py
new file mode 100644
index 0000000..9f7e3d3
--- /dev/null
+++ b/Classes/DateTime.py
@@ -0,0 +1,77 @@
+import numpy as np
+
+class DateTime(object):
+    """This stores the date and time data in Python compatible format.
+
+    Attributes
+    ----------
+    date: str
+        Measurement date as mm/dd/yyyy
+    start_serial_time: float
+        Python serial time for start of transect (seconds since 1/1/1970), timestamp
+    end_serial_time: float
+        Python serial time for end of transect (seconds since 1/1/1970), timestamp
+    transect_duration_sec: float
+        Duration of transect, in seconds.
+    ens_duration_sec: np.array(float)
+        Duration of each ensemble, in seconds.
+    """
+    
+    def __init__(self):
+        """Initialize class and instance variables."""
+
+        self.date = None  # Measurement date mm/dd/yyyy
+        self.start_serial_time = None  # Python serial time for start of transect, timestamp
+        self.end_serial_time = None  # Python serial time for end of transect, timestamp
+        self.transect_duration_sec = None  # Duration of transect in seconds
+        self.ens_duration_sec = None  # Duration of each ensemble in seconds
+        
+    def populate_data(self, date_in, start_in, end_in, ens_dur_in):
+        """Populate data in object.
+
+        Parameters
+        ----------
+        date_in: str
+            Measurement date as mm/dd/yyyy
+        start_in: float
+            Python serial time for start of transect.
+        end_in: float
+            Python serial time for end of transect.
+        ens_dur_in: np.array(float)
+            Duration of each ensemble, in seconds.
+        """
+        
+        self.date = date_in
+        self.start_serial_time = start_in
+        self.end_serial_time = end_in
+        self.transect_duration_sec = float(end_in - start_in)
+        self.ens_duration_sec = ens_dur_in.astype(float)
+
+    def populate_from_qrev_mat(self, transect):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        transect: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+
+        if hasattr(transect, 'dateTime'):
+            seconds_day = 86400
+            time_correction = 719529.0000000003
+
+            self.date = transect.dateTime.date
+            self.start_serial_time = (transect.dateTime.startSerialTime - time_correction) * seconds_day
+            self.end_serial_time = (transect.dateTime.endSerialTime - time_correction) * seconds_day
+            self.transect_duration_sec = float(transect.dateTime.transectDuration_sec)
+            try:
+                self.ens_duration_sec = transect.dateTime.ensDuration_sec.astype(float)
+            except AttributeError:
+                self.ens_duration_sec = np.array([np.nan])
+
+            #
+            # self.date = transect.dateTime.date
+            # self.start_serial_time = transect.dateTime.startSerialTime
+            # self.end_serial_time = transect.dateTime.endSerialTime
+            # self.transect_duration_sec = float(transect.dateTime.transectDuration_sec)
+            # self.ens_duration_sec = transect.dateTime.ensDuration_sec.astype(float)
diff --git a/Classes/DepthData.py b/Classes/DepthData.py
new file mode 100644
index 0000000..9469088
--- /dev/null
+++ b/Classes/DepthData.py
@@ -0,0 +1,1009 @@
+import copy
+import concurrent.futures
+import numpy as np
+import itertools as it
+from numpy.matlib import repmat
+from MiscLibs.common_functions import iqr, nan_less, nan_greater
+from MiscLibs.robust_loess_compiled import rloess
+from MiscLibs.non_uniform_savgol import non_uniform_savgol
+from MiscLibs.run_iqr import run_iqr, compute_quantile
+
+
+class DepthData(object):
+    """Process and store depth data.
+    Supported sources include bottom track
+    vertical beam, and external depth sounder.
+
+    Attributes
+    ----------
+        depth_orig_m: np.array
+            Original multi-beam depth data from transect file (includes draft_orig) in meters.
+        depth_beams_m: np.array
+            Depth data from transect file adjusted for any draft changes, in meters.
+        depth_processed_m: np.array
+            Depth data filtered and interpolated.
+        depth_freq_kHz: float
+            Defines acoustic frequency used to measure depth.
+        depth_invalid_index:
+            Index of depths marked invalid.
+        depth_source: str
+            Source of depth data ("BT", "VB", "DS").
+        depth_source_ens: np.array(object)
+            Source of each depth value ("BT", "VB", "DS", "IN").
+        draft_orig_m: float
+            Original draft from data files, in meters.
+        draft_use_m: float
+            Draft used in computation of depth_beams_m and depth_cell_depths_m.
+        depth_cell_depth_orig_m: np.array
+            Depth to centerline of depth cells in raw data, in meters.
+        depth_cell_depth_m: np.array
+            Depth to centerline of depth cells adjusted for draft or speed of sound changes, in meters.
+        depth_cell_size_orig_m: np.array
+            Size of depth cells in meters from raw data, in meters.
+        depth_cell_size_m:
+            Size of depth cells adjusted for draft or speed of sound changes, in meters.
+        smooth_depth: np.array
+            Smoothed beam depth, in meters.
+        smooth_upper_limit: np.array
+            Smooth function upper limit of window, in meters.
+        smooth_lower_limit: np.array
+            Smooth function lower limit or window, in meters.
+        avg_method:str
+            Defines averaging method: "Simple", "IDW", only applicable to bottom track.
+        filter_type: str
+            Type of filter: "None", "TRDI", "Smooth".
+        interp_type: str
+            Type of interpolation: "None", "Linear", "Smooth".
+        valid_data_method: str
+            QRev or TRDI.
+        valid_data: np.array
+            Logical array of valid mean depth for each ensemble.
+        valid_beams: np.array
+            Logical array, 1 row for each beam identifying valid data.
+    """
+    
+    def __init__(self):
+        """Initialize attributes.
+        """
+
+        self.depth_orig_m = None  # Original multi-beam depth data from transect file (includes draft_orig) in meters
+        self.depth_beams_m = None  # Depth data from transect file adjusted for any draft changes, in meters
+        self.depth_processed_m = None  # Depth data filtered and interpolated
+        self.depth_freq_kHz = None  # Defines ADCP frequency used of each raw data point
+        self.depth_invalid_index = None  # Index of depths marked invalid
+        self.depth_source = None  # Source of depth data ("BT", "VB", "DS")
+        self.depth_source_ens = None  # Source of each depth value ("BT", "VB", "DS", "IN")
+        self.draft_orig_m = None  # Original draft from data files, in meters
+        self.draft_use_m = None  # Draft used in computation of depth_beams_m and depth_cell_depths_m
+        self.depth_cell_depth_orig_m = None  # Depth cell range from the transducer, in meters
+        self.depth_cell_depth_m = None  # Depth to centerline of depth cells, in meters
+        self.depth_cell_size_orig_m = None  # Size of depth cells in meters from raw data
+        self.depth_cell_size_m = None  # Size of depth cells in meters
+        self.smooth_depth = None  # Smoothed beam depth
+        self.smooth_upper_limit = None  # Smooth function upper limit of window
+        self.smooth_lower_limit = None  # Smooth function lowerl limit or window
+        self.avg_method = None  # Defines averaging method: "Simple", "IDW"
+        self.filter_type = None  # Type of filter: "None", "TRDI", "Smooth"
+        self.interp_type = None  # Type of interpolation: "None", "Linear", "Smooth"
+        self.valid_data_method = None  # QRev or TRDI
+        self.valid_data = None  # Logical array of valid mean depth for each ensemble
+        self.valid_beams = None  # Logical array, 1 row for each beam identifying valid data
+        
+    def populate_data(self, depth_in, source_in, freq_in, draft_in, cell_depth_in, cell_size_in):
+        """Stores data in DepthData.
+
+        Parameters
+        ----------
+        depth_in: np.array
+            Raw depth data, in meters.
+        source_in: str
+            Source of raw depth data.
+        freq_in: float
+            Acoustic frequency used to measure depths, in kHz.
+        draft_in: float
+            Draft of transducer used to measure depths, in meters.
+        cell_depth_in: np.array
+            Depth to centerline of each depth cell, in meters. If source does not have depth cells the depth cell depth
+            from bottom track should be used.
+        cell_size_in: np.array
+            Size of each depth cell, in meters. If source does not have depth cells the depth cell size
+            from bottom track should be used.
+        """
+
+        self.depth_orig_m = depth_in
+        self.depth_beams_m = depth_in
+        self.depth_source = source_in
+        self.depth_source_ens = np.array([source_in] * depth_in.shape[-1], dtype=object)
+        self.depth_freq_kHz = freq_in
+        self.draft_orig_m = draft_in
+        self.draft_use_m = draft_in
+        self.filter_type = 'None'
+        self.interp_type = 'None'
+        self.valid_data_method = 'QRev'
+        
+        # For BT data set method to average multiple beam depths
+        if source_in == 'BT':
+            self.avg_method = 'IDW'
+        else:
+            self.avg_method = 'None'
+
+        # Store cell data
+        self.depth_cell_depth_orig_m = cell_depth_in
+        self.depth_cell_size_orig_m = cell_size_in
+        self.depth_cell_size_m = cell_size_in
+        self.depth_cell_depth_m = cell_depth_in
+
+        # Remove all filters to initialize data
+        self.apply_filter('dummy', filter_type='Off')
+
+    def populate_from_qrev_mat(self, mat_data):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        mat_data: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+
+        self.depth_processed_m = mat_data.depthProcessed_m
+        self.depth_freq_kHz = mat_data.depthFreq_Hz
+
+        # Support for older files that may not have had invalid index
+        if len(mat_data.depthInvalidIndex) > 0:
+            self.depth_invalid_index = mat_data.depthInvalidIndex
+        else:
+            self.depth_invalid_index = None
+
+        self.depth_source = mat_data.depthSource
+        self.depth_source_ens = mat_data.depthSourceEns
+        self.draft_orig_m = mat_data.draftOrig_m
+        self.draft_use_m = mat_data.draftUse_m
+        self.depth_cell_depth_orig_m = mat_data.depthCellDepthOrig_m
+        self.depth_cell_depth_m = mat_data.depthCellDepth_m
+        if hasattr(mat_data, "depthCellSizeOrig_m"):
+            self.depth_cell_size_orig_m = mat_data.depthCellSizeOrig_m
+        else:
+            self.depth_cell_size_orig_m = mat_data.depthCellSize_m
+        self.depth_cell_size_m = mat_data.depthCellSize_m
+
+        # Configure arrays properly for VB and DS
+        if mat_data.depthSource == 'BT':
+            self.depth_beams_m = mat_data.depthBeams_m
+            self.depth_orig_m = mat_data.depthOrig_m
+            self.smooth_depth = mat_data.smoothDepth
+            self.smooth_upper_limit = mat_data.smoothUpperLimit
+            self.smooth_lower_limit = mat_data.smoothLowerLimit
+        else:
+            self.depth_beams_m = mat_data.depthBeams_m.reshape(1, -1)
+            self.depth_orig_m = mat_data.depthOrig_m.reshape(1, -1)
+            self.smooth_depth = mat_data.smoothDepth.reshape(1, -1)
+            self.smooth_upper_limit = mat_data.smoothUpperLimit.reshape(1, -1)
+            self.smooth_lower_limit = mat_data.smoothLowerLimit.reshape(1, -1)
+
+        self.avg_method = mat_data.avgMethod
+        self.filter_type = mat_data.filterType
+        self.interp_type = mat_data.interpType
+        self.valid_data_method = mat_data.validDataMethod
+        if type(mat_data.validData) is int:
+            self.valid_data = np.array([mat_data.validData]).astype(bool)
+        else:
+            self.valid_data = mat_data.validData.astype(bool)
+
+        # Reshape array for vertical beam and depth sounder
+        if len(mat_data.validBeams.shape) < 2:
+            self.valid_beams = mat_data.validBeams.reshape(1, -1)
+        else:
+            self.valid_beams = mat_data.validBeams
+
+        self.valid_beams = self.valid_beams.astype(bool)
+
+        # Handle data with one ensemble and multiple cells or one cell and multiple ensembles
+        if len(self.depth_beams_m.shape) == 1:
+            # One ensemble multiple cells
+            self.depth_beams_m = self.depth_beams_m.reshape(self.depth_beams_m.shape[0], 1)
+            self.depth_cell_depth_m = self.depth_cell_depth_m.reshape(self.depth_cell_depth_m.shape[0], 1)
+            self.depth_cell_depth_orig_m = self.depth_cell_depth_orig_m.reshape(
+                self.depth_cell_depth_orig_m.shape[0], 1)
+            self.depth_cell_size_m = self.depth_cell_size_m.reshape(self.depth_cell_size_m.shape[0], 1)
+            self.depth_cell_size_orig_m = self.depth_cell_size_orig_m.reshape(self.depth_cell_size_orig_m.shape[0], 1)
+            self.depth_orig_m = self.depth_orig_m.reshape(self.depth_orig_m.shape[0], 1)
+            self.depth_processed_m = np.array([self.depth_processed_m])
+            self.smooth_depth = self.smooth_depth.reshape(self.smooth_depth.shape[0], 1)
+            self.smooth_lower_limit = self.smooth_lower_limit.reshape(self.smooth_lower_limit.shape[0], 1)
+            self.smooth_upper_limit = self.smooth_upper_limit.reshape(self.smooth_upper_limit.shape[0], 1)
+            self.valid_data = np.array([self.valid_data])
+            self.depth_source_ens = np.array([mat_data.depthSourceEns])
+        elif len(self.depth_cell_depth_m.shape) == 1:
+            # One cell, multiple ensembles
+            self.depth_cell_depth_m = self.depth_cell_depth_m.reshape(1, self.depth_cell_depth_m.shape[0])
+            self.depth_cell_depth_orig_m = self.depth_cell_depth_orig_m.reshape(1,
+                                                                                self.depth_cell_depth_orig_m.shape[0])
+            self.depth_cell_size_m = self.depth_cell_size_m.reshape(1, self.depth_cell_size_m.shape[0])
+            self.depth_cell_size_orig_m = self.depth_cell_size_orig_m.reshape(1, self.depth_cell_size_orig_m.shape[0])
+
+    def change_draft(self, draft):
+        """Changes the draft for object
+        
+        draft: new draft for object
+        """
+        # Compute draft change
+        draft_change = draft - self.draft_use_m
+        self.draft_use_m = draft
+        
+        # Apply draft to ensemble depths if BT or VB
+        if self.depth_source != 'DS':
+            self.depth_beams_m = self.depth_beams_m + draft_change
+            self.depth_processed_m = self.depth_processed_m + draft_change 
+            
+        # Apply draft to depth cell locations
+        if len(self.depth_cell_depth_m) > 0:
+            self.depth_cell_depth_m = self.depth_cell_depth_m + draft_change
+
+    def add_cell_data(self, bt_depths):
+        """Adds cell data to depth objects with no cell data
+        such as the vertical beam and depth sounder.  This allows
+        a single object to contain all the required depth data
+
+        Parameters
+        ----------
+        bt_depths: DepthData
+            Object of DepthData with bottom track depths
+        """
+        
+        self.depth_cell_depth_orig_m = bt_depths.depth_cell_depth_orig_m
+        self.depth_cell_size_m = bt_depths.depth_cell_size_m
+        self.depth_cell_depth_m = bt_depths.depth_cell_depth_m
+
+    def compute_avg_bt_depth(self, method=None):
+        """Computes average depth for BT_Depths
+
+        Parameters
+        ----------
+        method: str
+            Averaging method (Simple or IDW)
+        """
+
+        if method is not None:
+            self.avg_method = method
+
+        # Get valid depths
+        depth = np.copy(self.depth_beams_m)
+        depth[np.logical_not(self.valid_beams)] = np.nan
+
+        # Compute average depths
+        self.depth_processed_m = DepthData.average_depth(depth, self.draft_use_m, self.avg_method)
+
+        # Set depths to nan if depth are not valid beam depths
+        self.depth_processed_m[np.equal(self.valid_data, False)] = np.nan
+
+    def apply_filter(self, transect, filter_type=None):
+        """Coordinate the application of depth filters.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of transect data.
+        filter_type: str
+            Type of filter to apply (None, Smooth, TRDI).
+        """
+
+        # Compute selected filter
+        if filter_type == 'Off' or filter_type is None:
+            # No filter
+            self.filter_none()
+            # Savitzky-Golay
+        elif filter_type == 'SavGol':
+            self.filter_savgol(transect)
+        elif filter_type == 'Smooth':
+            # Smooth filter
+            self.filter_smooth(transect)
+        elif filter_type == 'TRDI' and self.depth_source == 'BT':
+            # TRDI filter for multiple returns
+            self.filter_trdi()
+            self.filter_type = 'TRDI'
+            
+        self.valid_mean_data()
+
+        # Update processed depth with filtered results
+        if self.depth_source == 'BT':
+            # Multiple beams require averaging to obtain 1-D array
+            self.compute_avg_bt_depth()
+        else:
+            # Single beam (VB or DS) save to 1-D array
+            self.depth_processed_m = np.array(self.depth_beams_m[0, :])
+            self.depth_processed_m[np.squeeze(np.equal(self.valid_data, 0))] = np.nan
+            
+    def apply_interpolation(self, transect, method=None):
+        """Coordinates application of interpolations
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        method: str
+            Type of interpolation to apply (None, HoldLast, Smooth, Linear)
+        """
+        
+        # Determine interpolation to apply
+        if method is None:
+            method = self.interp_type
+            
+        # Apply selected interpolation
+        self.interp_type = method
+        # No filtering
+        if method == 'None':
+            self.interpolate_none()
+
+        # Hold last valid depth indefinitely
+        elif method == 'HoldLast':
+            self.interpolate_hold_last()
+
+        # Use values form a Loess smooth
+        elif method == 'Smooth':
+            self.interpolate_smooth()
+
+        # Linear interpolation
+        else:
+            self.interpolate_linear(transect=transect)
+            
+        # Identify ensembles with interpolated depths
+        idx = np.where(np.logical_not(self.valid_data[:]))
+        if len(idx[0]) > 0:
+            idx = idx[0]
+            idx2 = np.where(np.logical_not(np.isnan(self.depth_processed_m[idx])))
+            if len(idx2) > 0:
+                idx2 = idx2[0]
+                self.depth_source_ens[idx[idx2]] = 'IN'
+        
+    def apply_composite(self, comp_depth, comp_source):
+        """Applies the data from CompDepth computed in DepthStructure
+        to DepthData object
+
+        Parameters
+        ----------
+        comp_depth: np.array(float)
+            Composite depth computed in DepthStructure
+        comp_source: str
+            Source of composite depth (BT, VB, DS)
+        """
+        
+        # Assign composite depth to property
+        self.depth_processed_m = comp_depth
+        
+        # Assign appropriate composite source for each ensemble
+        self.depth_source_ens[comp_source == 1] = 'BT'
+        self.depth_source_ens[comp_source == 2] = 'VB'
+        self.depth_source_ens[comp_source == 3] = 'DS'
+        self.depth_source_ens[comp_source == 4] = 'IN'
+        self.depth_source_ens[comp_source == 0] = 'NA'
+        
+    def sos_correction(self, ratio):
+        """Correct depth for new speed of sound setting
+
+        Parameters
+        ----------
+        ratio: float
+            Ratio of new to old speed of sound value
+        """
+        
+        # Correct unprocessed depths
+        self.depth_beams_m = self.draft_use_m+np.multiply(self.depth_beams_m-self.draft_use_m, ratio)
+        
+        # Correct processed depths
+        self.depth_processed_m = self.draft_use_m+np.multiply(self.depth_processed_m-self.draft_use_m, ratio)
+        
+        # Correct cell size and location
+        self.depth_cell_size_m = np.multiply(self.depth_cell_size_m, ratio)
+        self.depth_cell_depth_m = self.draft_use_m + np.multiply(self.depth_cell_depth_m - self.draft_use_m, ratio)
+        
+    def valid_mean_data(self):
+        """Determines if raw data are sufficient to compute a valid depth without interpolation.
+        """
+        
+        if self.depth_source == 'BT':
+            self.valid_data = np.tile(True, self.valid_beams.shape[1])
+            nvalid = np.sum(self.valid_beams, axis=0)
+            
+            if self.valid_data_method == 'TRDI':
+                self.valid_data[nvalid < 3] = False
+            else:
+                self.valid_data[nvalid < 2] = False
+        else:
+            self.valid_data = self.valid_beams[0, :]
+            
+    def filter_none(self):
+        """Applies no filter to depth data. Removes filter if one was applied.
+        """
+        
+        # Set all ensembles to have valid data
+        if len(self.depth_beams_m.shape) > 1:
+            self.valid_beams = np.tile(True, self.depth_beams_m.shape)
+        else:
+            self.valid_beams = np.tile(True, (1, self.depth_beams_m.shape[0]))
+        
+        # Set ensembles with no depth data to invalid
+        self.valid_beams[self.depth_beams_m == 0] = False
+        self.valid_beams[np.isnan(self.depth_beams_m)] = False
+        
+        self.filter_type = 'None'
+        
+    def filter_smooth(self, transect):
+        """This filter uses a moving InterQuartile Range filter on residuals from a
+        robust Loess smooth of the depths in each beam to identify unnatural spikes in the depth
+        measurements from each beam.  Each beam is filtered independently.  The filter
+        criteria are set to be the maximum of the IQR filter, 5% of the measured depth, or 0.1 meter
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+
+        Notes
+        -----
+        half_width - number of points to each side of target point used in computing IQR.
+            This is the raw number of points actual points used may be less if some are bad.
+
+        multiplier - number multiplied times the IQR to determine the filter criteria
+        
+        """
+
+        # If the smoothed depth has not been computed
+        if self.smooth_depth is None or len(self.smooth_depth) == 0:
+            
+            # Set filter characteristics
+            self.filter_type = 'Smooth'
+            # cycles = 3
+            # half_width = 10
+            # multiplier = 15
+            
+            # Determine number of beams
+            if len(self.depth_orig_m.shape) > 1:
+                n_beams, n_ensembles = self.depth_orig_m.shape[0], self.depth_orig_m.shape[1]
+                depth_raw = np.copy(self.depth_orig_m)
+            else:
+                n_beams = 1
+                n_ensembles = self.depth_orig_m.shape[0]
+                depth_raw = np.copy(np.reshape(self.depth_orig_m, (1, n_ensembles)))
+
+            # Set bad depths to nan
+            depth = repmat([np.nan], n_beams, n_ensembles)
+
+            # Arrays initialized
+            depth_smooth = repmat([np.nan], n_beams, n_ensembles)
+            # depth_res = repmat([np.nan], n_beams, n_ensembles)
+            upper_limit = repmat([np.nan], n_beams, n_ensembles)
+            lower_limit = repmat([np.nan], n_beams, n_ensembles)
+            depth_filtered = depth
+            depth[nan_greater(depth_raw, 0)] = depth_raw[nan_greater(depth_raw, 0)]
+
+            # Create position array
+            boat_vel_selected = getattr(transect.boat_vel, transect.boat_vel.selected)
+            if boat_vel_selected is not None:
+                track_x = boat_vel_selected.u_processed_mps * transect.date_time.ens_duration_sec
+                track_y = boat_vel_selected.v_processed_mps * transect.date_time.ens_duration_sec
+            else:
+                track_x = np.nan
+                track_y = np.nan
+
+            idx = np.where(np.isnan(track_x))
+            if len(idx[0]) < 2:
+                x = np.nancumsum(np.sqrt(track_x**2+track_y**2))
+            else:
+                x = np.nancumsum(transect.date_time.ens_duration_sec)
+
+            multi_processing = False
+            # start = time.perf_counter()
+            if multi_processing:
+                with concurrent.futures.ProcessPoolExecutor() as executor:
+                    results = executor.map(self.compute_smooth, depth, depth_filtered, it.repeat(x))
+
+                for j, result in enumerate(results):
+                    depth_smooth[j] = result[0]
+                    upper_limit[j] = result[1]
+                    lower_limit[j] = result[2]
+            else:
+                # Loop for each beam, smooth is applied to each beam
+                for j in range(n_beams):
+                    depth_smooth[j], upper_limit[j], lower_limit[j] = self.compute_smooth(depth[j],
+                                                                                          depth_filtered[j],
+                                                                                          x)
+
+            # Save smooth results to avoid recomputing them if needed later
+            self.smooth_depth = depth_smooth
+            self.smooth_upper_limit = upper_limit
+            self.smooth_lower_limit = lower_limit
+
+        # Reset valid data
+        self.filter_none()
+        
+        # Set filter type
+        self.filter_type = 'Smooth'
+        
+        # Determine number of beams
+        if len(self.depth_orig_m.shape) > 1:
+            n_beams, n_ensembles = self.depth_orig_m.shape[0], self.depth_orig_m.shape[1]
+            depth_raw = np.copy(self.depth_orig_m)
+        else:
+            n_beams = 1
+            n_ensembles = self.depth_orig_m.shape[0]
+            depth_raw = np.reshape(self.depth_orig_m, (1, n_ensembles))
+
+        depth_res = repmat([np.nan], n_beams, n_ensembles)
+
+        # Set bad depths to nan
+        depth = repmat(np.nan, depth_raw.shape[0], depth_raw.shape[1])
+        depth[nan_greater(depth_raw, 0)] = depth_raw[nan_greater(depth_raw, 0)]
+        
+        # Apply filter
+        for j in range(n_beams):
+            if np.nansum(self.smooth_upper_limit[j, :]) > 0:
+                bad_idx = np.where(
+                    np.logical_or(nan_greater(depth[j], self.smooth_upper_limit[j]),
+                                  nan_less(depth[j], self.smooth_lower_limit[j])))[0]
+                # Update depth matrix
+                depth_res[j, bad_idx] = np.nan
+
+            else:
+                bad_idx = np.isnan(depth[j])
+
+            # Update valid data matrix
+            self.valid_beams[j, bad_idx] = False
+
+    @staticmethod
+    def compute_smooth(depth, depth_filtered, x):
+        cycles = 3
+        half_width = 10
+        multiplier = 15
+
+        upper_limit = np.nan
+        lower_limit = np.nan
+
+        # At least 50% of the data in a beam must be valid to apply the smooth
+        # if np.nansum((np.isnan(depth_filtered) == False) / len(depth_filtered)) > .5:
+        # Compute residuals based on robust loess smooth
+        if len(x) > 1:
+            # Fit smooth
+            try:
+                smooth_fit = rloess(x, depth_filtered, 20)
+                depth_smooth = smooth_fit
+            except ValueError:
+                depth_smooth = depth_filtered
+        else:
+            depth_smooth = depth_filtered
+
+        depth_res = depth - depth_smooth
+
+        # Run the filter multiple times
+        for n in range(cycles - 1):
+            max_upper_limit = 9999
+            idx = np.where(np.logical_not(np.isnan(depth_filtered)))[0]
+            if len(idx) > 0:
+                max_upper_limit = compute_quantile(depth_filtered[idx], 0.90) * 3
+
+            # Compute inner quartile range
+            fill_array = run_iqr(half_width, depth_res)
+
+            # Compute filter criteria and apply appropriate
+            criteria = multiplier * fill_array
+            idx = np.where(nan_less(criteria, np.max(np.vstack((depth * .05,
+                                                                np.ones(depth.shape) / 10)), 0)))[0]
+            if len(idx) > 0:
+                criteria[idx] = np.max(np.vstack((depth[idx] * .05, np.ones(idx.shape) / 10)), 0)
+
+            # Compute limits
+            upper_limit = depth_smooth + criteria
+            idx = np.where(np.logical_or(np.greater(upper_limit, max_upper_limit), np.isnan(upper_limit)))[0]
+            if len(idx) > 0:
+                upper_limit[idx] = max_upper_limit
+            lower_limit = depth_smooth - criteria
+            idx = np.where(np.less(lower_limit, 0))[0]
+            lower_limit[idx] = 0
+
+            bad_idx = np.where(
+                np.logical_or(nan_greater(depth, upper_limit), nan_less(depth, lower_limit)))[0]
+            # Update depth matrix
+            # depth_res[bad_idx] = np.nan
+            if len(bad_idx) == 0:
+                break
+            else:
+                depth_filtered[bad_idx] = np.nan
+                # Fit smooth
+                try:
+                    smooth_fit = rloess(x, depth_filtered, 20)
+                    depth_smooth = smooth_fit
+                except ValueError:
+                    depth_smooth = depth_filtered
+
+                depth_res = depth - depth_smooth
+
+        return depth_smooth, upper_limit, lower_limit
+
+    def filter_savgol(self, transect):
+        """This filter uses a moving InterQuartile Range filter on residuals from a
+        a Savitzky-Golay filter on y with non-uniform spaced x
+        of the depths in each beam to identify unnatural spikes in the depth
+        measurements from each beam.  Each beam is filtered independently.  The filter
+        criteria are set to be the maximum of the IQR filter, 5% of the measured depth, or 0.1 meter
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+
+        Notes
+        -----
+        half_width - number of points to each side of target point used in computing IQR.
+            This is the raw number of points actual points used may be less if some are bad.
+
+        multiplier - number multiplied times the IQR to determine the filter criteria
+
+        """
+
+        # Determine number of beams
+        if len(self.depth_orig_m.shape) > 1:
+            # For slant beams
+            n_beams, n_ensembles = self.depth_orig_m.shape[0], self.depth_orig_m.shape[1]
+            depth_raw = np.copy(self.depth_orig_m)
+        else:
+            # For vertical beam or depth sounder
+            n_beams = 1
+            n_ensembles = self.depth_orig_m.shape[0]
+            depth_raw = np.copy(np.reshape(self.depth_orig_m, (1, n_ensembles)))
+
+        # Set bad depths to nan
+        depth = repmat([np.nan], n_beams, n_ensembles)
+        depth[depth_raw > 0] = depth_raw[depth_raw > 0]
+
+        # If the smoothed depth has not been computed
+        if self.smooth_depth is None:
+
+            # Set filter characteristics
+            self.filter_type = 'SavGol'
+            cycles = 3
+            half_width = 10
+            multiplier = 15
+
+            # Arrays initialized
+            depth_smooth = repmat([np.nan], n_beams, n_ensembles)
+            depth_res = repmat([np.nan], n_beams, n_ensembles)
+            upper_limit = repmat([np.nan], n_beams, n_ensembles)
+            lower_limit = repmat([np.nan], n_beams, n_ensembles)
+
+            # Create position array. If there are insufficient track data use elapsed time
+            boat_vel_selected = getattr(transect.boat_vel, transect.boat_vel.selected)
+            if boat_vel_selected is not None and \
+                    np.nansum(np.isnan(boat_vel_selected.u_processed_mps)) < 2:
+                track_x = boat_vel_selected.u_processed_mps * transect.date_time.ens_duration_sec
+                track_y = boat_vel_selected.v_processed_mps * transect.date_time.ens_duration_sec
+                x = np.nancumsum(np.sqrt(track_x ** 2 + track_y ** 2))
+            else:
+                x = np.nancumsum(transect.date_time.ens_duration_sec)
+
+            # Loop for each beam, smooth is applied to each beam
+            for j in range(n_beams):
+                # At least 50% of the data in a beam must be valid to apply the smooth
+                # if np.nansum((np.isnan(depth[j, :]) == False) / depth.shape[0]) > .5:
+
+                # Compute residuals based on non-uniform Savitzky-Golay
+                try:
+                    valid_depth_idx = np.logical_not(np.isnan(depth[j, :]))
+                    x_fit = x[valid_depth_idx]
+                    y_fit = depth[j, valid_depth_idx]
+                    smooth_fit = non_uniform_savgol(x_fit, y_fit, 15, 3)
+                    depth_smooth[j, valid_depth_idx] = smooth_fit
+                except ValueError:
+                    depth_smooth[j, :] = depth[j, :]
+
+                depth_res[j, :] = depth[j, :] - depth_smooth[j, :]
+
+                # Run the filter multiple times
+                for n in range(cycles - 1):
+
+                    # Compute inner quartile range
+                    # fill_array = DepthData.run_iqr(half_width, depth_res[j, :])
+                    fill_array = run_iqr(half_width, depth_res[j, :])
+                    # Compute filter criteria
+                    criteria = multiplier * fill_array
+
+                    # Adjust criteria so that it is never less than 5% of depth or 0.1 m which ever is greater
+                    idx = np.where(criteria < np.max(np.vstack((depth[j, :] * .05,
+                                                                np.ones(depth.shape) / 10)), 0))[0]
+                    if len(idx) > 0:
+                        criteria[idx] = np.max(np.vstack((depth[j, idx] * .05, np.ones(idx.shape) / 10)), 0)
+
+                    # Compute limits
+                    upper_limit[j] = depth_smooth[j, :] + criteria
+                    lower_limit[j] = depth_smooth[j, :] - criteria
+
+                    bad_idx = np.where(np.logical_or(np.greater(depth[j], upper_limit[j]),
+                                                     np.less(depth[j], lower_limit[j])))[0]
+                    # Update residual matrix
+                    depth_res[j, bad_idx] = np.nan
+
+            # Save smooth results to avoid recomputing them if needed later
+            self.smooth_depth = depth_smooth
+            self.smooth_upper_limit = upper_limit
+            self.smooth_lower_limit = lower_limit
+
+        # Reset valid data
+        self.filter_none()
+
+        # Set filter type
+        self.filter_type = 'SavGol'
+
+        # Apply filter
+        for j in range(n_beams):
+            if np.nansum(self.smooth_upper_limit[j]) > 0:
+                bad_idx = np.where(
+                    np.logical_or(np.greater(depth[j], self.smooth_upper_limit[j]),
+                                  np.less(depth[j], self.smooth_lower_limit[j])))[0]
+            else:
+                bad_idx = np.isnan(depth[j])
+
+            # Update valid data matrix
+            self.valid_beams[j, bad_idx] = False
+
+    def interpolate_none(self):
+        """Applies no interpolation.
+        """
+        
+        # Compute processed depth without interpolation
+        if self.depth_source == 'BT':
+            # Bottom track methods
+            self.compute_avg_bt_depth()
+        else:
+            # Vertical beam or depth sounder depths
+            self.depth_processed_m = self.depth_beams_m[0, :]
+            
+        self.depth_processed_m[np.squeeze(np.equal(self.valid_data, False))] = np.nan
+        
+        # Set interpolation type
+        self.interp_type = 'None'
+        
+    def interpolate_hold_last(self):
+        """This function holds the last valid value until the next valid data point.
+        """
+        
+        # Get number of ensembles
+        n_ensembles = len(self.depth_processed_m)
+        
+        # Process data by ensemble
+        for n in range(1, n_ensembles):
+            
+            # If current ensemble's depth is invalid assign depth from previous example
+            if not self.valid_data[n]:
+                self.depth_processed_m[n] = self.depth_processed_m[n-1]
+
+    def interpolate_next(self):
+        """This function back fills with the next valid value.
+        """
+
+        # Get number of ensembles
+        n_ens = len(self.depth_processed_m)
+
+        # Process data by ensemble
+        for n in np.arange(0, n_ens-1)[::-1]:
+
+            # If current ensemble's depth is invalid assign depth from previous example
+            if not self.valid_data[n]:
+                self.depth_processed_m[n] = self.depth_processed_m[n + 1]
+
+    def interpolate_smooth(self):
+        """Apply interpolation based on the robust loess smooth
+        """
+        
+        self.interp_type = 'Smooth'
+        
+        # Get depth data from object
+        depth_new = self.depth_beams_m
+        
+        # Update depth data with interpolated depths
+        depth_new[not self.valid_beams] = self.smooth_depth[not self.valid_beams]
+        
+        # Compute processed depths with interpolated values
+        if self.depth_source == 'BT':
+            # Temporarily change self.depth_beams_m to compute average
+            # for bottom track based depths
+            temp_save = copy.deepcopy(self.depth_beams_m)
+            self.depth_beams_m = depth_new
+            self.compute_avg_bt_depth()
+            self.depth_beams_m = temp_save
+
+        else:
+            # Assignment for VB or DS
+            self.depth_processed_m = depth_new[0, :]
+            
+    def interpolate_linear(self, transect):
+        """Apply linear interpolation
+        """
+        
+        # Set interpolation type
+        self.interp_type = 'Linear'
+
+        # Create position array
+        select = getattr(transect.boat_vel, transect.boat_vel.selected)
+        if select is not None:
+            boat_vel_x = select.u_processed_mps
+            boat_vel_y = select.v_processed_mps
+            track_x = boat_vel_x * transect.date_time.ens_duration_sec
+            track_y = boat_vel_y * transect.date_time.ens_duration_sec
+        else:
+            select = getattr(transect.boat_vel, 'bt_vel')
+            track_x = np.tile(np.nan, select.u_processed_mps.shape)
+            track_y = np.tile(np.nan, select.v_processed_mps.shape)
+              
+        idx = np.where(np.isnan(track_x[1:]))
+        
+        # If the navigation reference has no gaps use it for interpolation, if not use time
+        if len(idx[0]) < 1:
+            x = np.nancumsum(np.sqrt(track_x**2 + track_y**2))
+        else:
+            # Compute accumulated time
+            x = np.nancumsum(transect.date_time.ens_duration_sec)
+            
+        # Determine number of beams
+        n_beams = self.depth_beams_m.shape[0]
+        depth_mono = copy.deepcopy(self.depth_beams_m)
+        depth_new = copy.deepcopy(self.depth_beams_m)
+        
+#       Create strict monotonic arrays for depth and track by identifying duplicate
+#       track values.  The first track value is used and the remaining duplicates
+#       are set to nan.  The depth assigned to that first track value is the average
+#       of all duplicates.  The depths for the duplicates are then set to nan.  Only
+#       valid strictly monotonic track and depth data are used for the input in to linear
+#       interpolation.   Only the interpolated data for invalid depths are added
+#       to the valid depth data to create depth_new
+
+        x_mono = x
+        
+        idx0 = np.where(np.diff(x) == 0)[0]
+        if len(idx0) > 0:
+            if len(idx0) > 1:
+                # Split array into subarrays in proper sequence e.g [[2,3,4],[7,8,9]] etc.
+                idx1 = np.add(np.where(np.diff(idx0) != 1)[0], 1)
+                group = np.split(idx0, idx1)
+
+            else:
+                # Group of only 1 point
+                group = np.array([idx0])
+
+            # Replace repeated values with mean
+            n_group = len(group)
+            for k in range(n_group):
+                indices = group[k]
+                indices = np.append(indices, indices[-1] + 1)
+                depth_avg = np.nanmean(depth_mono[:, indices], axis=1)
+                depth_mono[:, indices[0]] = depth_avg
+                depth_mono[:, indices[1:]] = np.nan
+                x[indices[1:]] = np.nan
+                
+        # Interpolate each beam
+
+        for n in range(n_beams):
+            # Determine ensembles with valid depth data
+            valid_depth_mono = np.logical_not(np.isnan(depth_mono[n]))
+            valid_x_mono = np.logical_not(np.isnan(x_mono))
+            valid_data = copy.deepcopy(self.valid_beams[n])
+            valid = np.vstack([valid_depth_mono, valid_x_mono, valid_data])
+            valid = np.all(valid, 0)
+
+            if np.sum(valid) > 1:
+                # Compute interpolation function from all valid data
+                depth_int = np.interp(x_mono, x_mono[valid], depth_mono[n, valid], left=np.nan, right=np.nan)
+                # Fill in invalid data with interpolated data
+                depth_new[n, np.logical_not(self.valid_beams[n])] = depth_int[np.logical_not(self.valid_beams[n])]
+
+        if self.depth_source == 'BT':
+            # Bottom track depths
+            self.depth_processed_m = self.average_depth(depth_new, self.draft_use_m, self.avg_method)
+        else:
+            # Vertical beam or depth sounder depths
+            self.depth_processed_m = np.copy(depth_new[0, :])
+
+    @staticmethod
+    def average_depth(depth, draft, method):
+        """Compute average depth from bottom track beam depths.
+
+        Parameters
+        ----------
+        depth: np.array(float)
+            Individual beam depths for each beam in each ensemble including the draft
+        draft: float
+            Draft of ADCP
+        method: str
+            Averaging method (Simple, IDW)
+        
+        Returns
+        -------
+        avg_depth: np.array(float)
+            Average depth for each ensemble
+        
+        """
+        if method == 'Simple':
+            avg_depth = np.nanmean(depth, 0)
+        else:
+            # Compute inverse weighted mean depth
+            rng = depth - draft
+            w = 1 - np.divide(rng, np.nansum(rng, 0))
+            avg_depth = draft+np.nansum(np.divide((rng * w), np.nansum(w, 0), where=np.nansum(w, 0) != 0), 0)
+            avg_depth[avg_depth == draft] = np.nan
+
+        return avg_depth
+
+    def filter_trdi(self):
+        """Filter used by TRDI to filter out multiple reflections that get digitized as depth.
+        """
+
+        # Assign raw depth data to local variable
+        depth_raw = np.copy(self.depth_orig_m)
+
+        # Determine number of beams
+        n_beams = depth_raw.shape[0]
+
+        # Reset filters to none
+        self.filter_none()
+
+        # Set filter type to TRDI
+        self.filter_type = 'TRDI'
+
+        for n in range(n_beams):
+            depth_ratio = depth_raw[n, :] / depth_raw
+            exceeded = depth_ratio > 1.75
+            exceeded_ens = np.nansum(exceeded, 0)
+            self.valid_beams[n, exceeded_ens > 0] = False
+
+    # ============================================================================================
+    # The methods below are not being used.
+    # The methods have been moved to separate files and compiled using Numba AOT.
+    # The methods below are included here for historical purposes
+    # and may provide an easier approach to adding new features/algorithms prior to recoding
+    # them in a manner that can be compiled using Numba AOT.
+    # =============================================================================================
+    @staticmethod
+    def run_iqr(half_width, data):
+        """Computes a running Innerquartile Range
+        The routine accepts a column vector as input.  "halfWidth" number of data
+        points for computing the Innerquartile Range are selected before and
+        after the target data point, but no including the target data point.
+        Near the ends of the series the number of points before or after are reduced.
+        Nan in the data are counted as points.  The IQR is computed on the slected
+        subset of points.  The process occurs for each point in the provided column vector.
+        A column vector with the computed IQR at each point is returned.
+
+        Parameters
+        ----------
+        half_width: int
+            Number of ensembles before and after current ensemble which are used to compute the IQR
+        data: np.array(float)
+            Data for which the IQR is computed
+        """
+        npts = len(data)
+        half_width = int(half_width)
+
+        if npts < 20:
+            half_width = int(np.floor(npts / 2))
+
+        iqr_array = []
+
+        # Compute IQR for each point
+        for n in range(npts):
+
+            # Sample selection for 1st point
+            if n == 0:
+                sample = data[1:1 + half_width]
+
+            # Sample selection a end of data set
+            elif n + half_width > npts:
+                sample = np.hstack([data[n - half_width - 1:n - 1], data[n:npts]])
+
+            # Sample selection at beginning of data set
+            elif half_width >= n + 1:
+                sample = np.hstack([data[0:n], data[n + 1:n + half_width + 1]])
+
+            # Sample selection in body of data set
+            else:
+                sample = np.hstack([data[n - half_width:n], data[n + 1:n + half_width + 1]])
+
+            iqr_array.append(iqr(sample))
+
+        return np.array(iqr_array)
diff --git a/Classes/DepthStructure.py b/Classes/DepthStructure.py
new file mode 100644
index 0000000..4dcb2c6
--- /dev/null
+++ b/Classes/DepthStructure.py
@@ -0,0 +1,368 @@
+import numpy as np
+from Classes.DepthData import DepthData
+
+
+class DepthStructure(object):
+    """This class creates the data structure used store depths from different sources
+
+    Attributes
+    ----------
+    selected: str
+        Name of object DepthData that contains depth data.
+    bt_depths: DepthData
+        Object of DepthData for bottom track based depths.
+    vb_depths: DepthData
+        Object of DepthData for vertical beam based depths.
+    ds_depths: DepthData
+        Object of DepthData for depth sounder based depths.
+    composite: str
+        Indicates use of composite depths ("On" or "Off".
+    """
+    
+    def __init__(self):
+        """Creates object and initializes variables to None"""
+
+        self.selected = None  # name of object DepthData that contains the depth data for q computation
+        self.bt_depths = None  # object of DepthData for by depth data
+        self.vb_depths = None  # object of DepthData for vertical beam depth data
+        self.ds_depths = None  # object of DepthData for depth sounder depth data
+        self.composite = "On"  # Turn composite depths "on" or "off"
+
+    def add_depth_object(self, depth_in, source_in, freq_in, draft_in, cell_depth_in, cell_size_in):
+        """Adds a DepthData object to the depth structure for the specified type of depths.
+
+        Parameters
+        ----------
+        depth_in: np.array
+            Depth data in meters.
+        source_in: str
+            Specifies source of depth data: bottom track (BT), vertical beam (VB), or depth sounder (DS)
+        freq_in: np.array
+            Acoustic frequency in kHz of beams used to determine depth.
+        draft_in:
+            Draft of transducer (in meters) used to measure depths.
+        cell_depth_in
+            Depth of each cell in the profile. If the referenced depth does not have depth cells the depth cell
+            values from the bottom track (BT) depths should be used.
+        cell_size_in
+            Size of each depth cell. If the referenced depth does not have depth cells the cell size from
+            the bottom track (BT) depths should be used.
+        """
+
+        if source_in == 'BT':
+            self.bt_depths = DepthData()
+            self.bt_depths.populate_data(depth_in, source_in, freq_in, draft_in, cell_depth_in, cell_size_in)
+        elif source_in == 'VB':
+            self.vb_depths = DepthData()
+            self.vb_depths.populate_data(depth_in, source_in, freq_in, draft_in, cell_depth_in, cell_size_in)
+        elif source_in == 'DS':
+            self.ds_depths = DepthData()
+            self.ds_depths.populate_data(depth_in, source_in, freq_in, draft_in, cell_depth_in, cell_size_in)
+
+    def populate_from_qrev_mat(self, transect):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        transect: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+        if hasattr(transect, 'depths'):
+
+            # try:
+            self.bt_depths = DepthData()
+            self.bt_depths.populate_from_qrev_mat(transect.depths.btDepths)
+            # except AttributeError:
+            #     self.bt_depths = None
+
+            try:
+                self.vb_depths = DepthData()
+                self.vb_depths.populate_from_qrev_mat(transect.depths.vbDepths)
+            except AttributeError:
+                self.vb_depths = None
+
+            try:
+                self.ds_depths = DepthData()
+                self.ds_depths.populate_from_qrev_mat(transect.depths.dsDepths)
+            except AttributeError:
+                self.ds_depths = None
+
+            if transect.depths.selected == 'btDepths':
+                self.selected = 'bt_depths'
+            elif transect.depths.selected == 'vbDepths':
+                self.selected = 'vb_depths'
+            elif transect.depths.selected == 'dsDepths':
+                self.selected = 'ds_depths'
+            self.composite = transect.depths.composite
+            if self.vb_depths is None and self.ds_depths is None:
+                self.composite = 'Off'
+
+    def composite_depths(self, transect, setting="Off"):
+        """Depth composite is based on the following assumptions
+        
+        1. If a depth sounder is available the user must have assumed the ADCP beams
+        (BT or vertical) might have problems and it will be the second alternative if 
+        not selected as the preferred source
+        
+        2. For 4-beam BT depths, if 3 beams are valid the average is considered valid.
+        It may be based on interpolation of the invalid beam.  However, if only 2 beams
+        are valid even though the other two beams may be interpolated and included in the average the
+        average will be replaced by an alternative if available.  If no alternative is 
+        available the multi-beam average based on available beams and interpolation will
+        be used.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Transect object containing all data.
+        setting: str
+            Setting to use ("On") or not use ("Off") composite depths.
+        """
+        
+        if setting is None:
+            setting = self.composite
+        else:
+            self.composite = setting
+            
+        # The primary depth reference is the selected reference
+        ref = self.selected
+        comp_depth = np.array([])
+
+        if setting == 'On':
+            # Prepare vector of valid BT averages, which are defined as having at least 2 valid beams
+            bt_valid = self.bt_depths.valid_data
+            n_ensembles = bt_valid.shape[-1]
+            bt_filtered = np.copy(self.bt_depths.depth_processed_m)
+            bt_filtered[np.logical_not(bt_valid)] = np.nan
+            
+            # Prepare vertical beam data, using only data prior to interpolation
+            if self.vb_depths is not None:
+                vb_filtered = np.copy(self.vb_depths.depth_processed_m)
+                vb_filtered[np.squeeze(np.equal(self.vb_depths.valid_data, False))] = np.nan
+            else:
+                vb_filtered = np.tile(np.nan, n_ensembles)
+                  
+            # Prepare depth sounder data, using only data prior to interpolation
+            if self.ds_depths is not None:
+                ds_filtered = np.copy(self.ds_depths.depth_processed_m)
+                ds_filtered[np.squeeze(np.equal(self.ds_depths.valid_data, False))] = np.nan
+            else:
+                ds_filtered = np.tile(np.nan, n_ensembles)
+
+            comp_source = np.tile(np.nan, bt_filtered.shape)
+
+            # Apply composite depths
+            if ref == 'bt_depths':
+                comp_depth = np.copy(bt_filtered)
+                comp_source[np.isnan(comp_depth) == False] = 1
+                comp_depth[np.isnan(comp_depth)] = np.squeeze(ds_filtered[np.isnan(comp_depth)])
+                comp_source[np.logical_and((np.isnan(comp_depth) == False), (np.isnan(comp_source) == True))] = 3
+                comp_depth[np.isnan(comp_depth)] = vb_filtered[np.isnan(comp_depth)]
+                comp_source[np.logical_and((np.isnan(comp_depth) == False), (np.isnan(comp_source) == True))] = 2
+                comp_depth = self.interpolate_composite(transect=transect, composite_depth=comp_depth)
+                # comp_depth[np.isnan(comp_depth)] = np.squeeze(self.bt_depths.depth_processed_m[np.isnan(comp_depth)])
+                comp_source[np.logical_and((np.isnan(comp_depth) == False), (np.isnan(comp_source) == True))] = 4
+                
+            elif ref == 'vb_depths':
+                comp_depth = np.copy(vb_filtered)
+                comp_source[np.isnan(comp_depth) == False] = 2
+                comp_depth[np.isnan(comp_depth)] = np.squeeze(ds_filtered[np.isnan(comp_depth)])
+                comp_source[np.logical_and((np.isnan(comp_depth) == False), (np.isnan(comp_source) == True))] = 3
+                comp_depth[np.isnan(comp_depth)] = np.squeeze(bt_filtered[np.isnan(comp_depth)])
+                comp_source[np.logical_and((np.isnan(comp_depth) == False), (np.isnan(comp_source) == True))] = 1
+                comp_depth = self.interpolate_composite(transect=transect, composite_depth=comp_depth)
+                # comp_depth[np.isnan(comp_depth)] = np.squeeze(self.vb_depths.depth_processed_m[np.isnan(comp_depth)])
+                comp_source[np.logical_and((np.isnan(comp_depth) == False), (np.isnan(comp_source) == True))] = 4
+                
+            elif ref == 'ds_depths':
+                comp_depth = np.copy(ds_filtered)
+                comp_source[np.isnan(comp_depth) == False] = 3
+                comp_depth[np.isnan(comp_depth)] = np.squeeze(vb_filtered[np.isnan(comp_depth)])
+                comp_source[np.logical_and((np.isnan(comp_depth) == False), (np.isnan(comp_source) == True))] = 2
+                comp_depth[np.isnan(comp_depth)] = np.squeeze(bt_filtered[np.isnan(comp_depth)])
+                comp_source[np.logical_and((np.isnan(comp_depth) == False), (np.isnan(comp_source) == True))] = 1
+                comp_depth = self.interpolate_composite(transect=transect, composite_depth=comp_depth)
+                # comp_depth[np.isnan(comp_depth)] = np.squeeze(self.ds_depths.depth_processed_m[np.isnan(comp_depth)])
+                comp_source[np.logical_and((np.isnan(comp_depth) == False), (np.isnan(comp_source) == True))] = 4
+
+            # Save composite depth to depth_processed of selected primary reference
+            selected_data = getattr(self, ref)
+            selected_data.apply_composite(comp_depth, comp_source.astype(int))
+                
+        else:
+            selected_data = getattr(self, ref)
+            comp_source = np.zeros(selected_data.depth_processed_m.shape)
+            
+            if ref == 'bt_depths':
+                selected_data.valid_data[np.isnan(selected_data.valid_data)] = False
+                comp_source[np.squeeze(selected_data.valid_data)] = 1
+            elif ref == 'vb_depths':
+                comp_source[np.squeeze(selected_data.valid_data)] = 2
+            elif ref == 'ds_depths':
+                comp_source[np.squeeze(selected_data.valid_data)] = 3
+
+            selected_data.apply_interpolation(transect)
+            comp_depth = selected_data.depth_processed_m
+            selected_data.apply_composite(comp_depth, comp_source.astype(int))
+
+    def set_draft(self, target, draft):
+        """This function will change the ref_depth draft.
+
+        Parameters
+        ----------
+        target: str
+            Source of depth data.
+        draft: float
+            New draft.
+        """
+        
+        if target == 'ADCP':
+            self.bt_depths.change_draft(draft)
+            self.vb_depths.change_draft(draft)
+        else:
+            self.ds_depths.change_draft(draft)    
+            
+    def depth_filter(self, transect, filter_method):
+        """Method to apply filter to all available depth sources, so that
+        all sources have the same filter applied.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        filter_method: str
+            Method to use to filter data (Smooth, TRDI, None).
+        """
+        
+        if self.bt_depths is not None:
+            self.bt_depths.apply_filter(transect, filter_method)
+        if self.vb_depths is not None:
+            self.vb_depths.apply_filter(transect, filter_method)
+        if self.ds_depths is not None:
+            self.ds_depths.apply_filter(transect, filter_method)
+            
+    def depth_interpolation(self, transect, method=None):
+        """Method to apply interpolation to all available depth sources, so
+        that all sources have the same filter applied.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        method: str
+            Interpolation method (None, HoldLast, Smooth, Linear)
+            """
+        
+        if self.bt_depths is not None:
+            self.bt_depths.apply_interpolation(transect, method)
+        if self.vb_depths is not None:
+            self.vb_depths.apply_interpolation(transect, method)
+        if self.ds_depths is not None:
+            self.ds_depths.apply_interpolation(transect, method)
+            
+    def sos_correction(self, ratio):
+        """Correct depths for change in speed of sound.
+
+        Parameters
+        ----------
+        ratio: float
+            Ratio of new to old speed of sound.
+        """
+        
+        # Bottom Track Depths
+        if self.bt_depths is not None:
+            self.bt_depths.sos_correction(ratio)
+            
+        # Vertical beam depths
+        if self.vb_depths is not None:
+            self.vb_depths.sos_correction(ratio)
+
+    @staticmethod
+    def interpolate_composite(transect, composite_depth):
+
+        """Apply linear interpolation to composite depths
+
+        Parameters
+        ----------
+        transect: TransectData
+            Transect being processed
+        composite_depth: np.array(float)
+            Array of composite depths
+
+        Returns
+        -------
+        depth_new: np.array(float)
+            Array of composite depths with interpolated values
+        """
+
+        # Create position array
+        select = getattr(transect.boat_vel, transect.boat_vel.selected)
+        if select is not None:
+            boat_vel_x = select.u_processed_mps
+            boat_vel_y = select.v_processed_mps
+            track_x = boat_vel_x * transect.date_time.ens_duration_sec
+            track_y = boat_vel_y * transect.date_time.ens_duration_sec
+        else:
+            select = getattr(transect.boat_vel, 'bt_vel')
+            track_x = np.tile(np.nan, select.u_processed_mps.shape)
+            track_y = np.tile(np.nan, select.v_processed_mps.shape)
+
+        idx = np.where(np.isnan(track_x[1:]))
+
+        # If the navigation reference has no gaps use it for interpolation, if not use time
+        if len(idx[0]) < 1:
+            x = np.nancumsum(np.sqrt(track_x ** 2 + track_y ** 2))
+        else:
+            # Compute accumulated time
+            x = np.nancumsum(transect.date_time.ens_duration_sec)
+
+        depth_mono = np.copy(composite_depth)
+        depth_new = np.copy(composite_depth)
+
+        #       Create strict monotonic arrays for depth and track by identifying duplicate
+        #       track values.  The first track value is used and the remaining duplicates
+        #       are set to nan.  The depth assigned to that first track value is the average
+        #       of all duplicates.  The depths for the duplicates are then set to nan.  Only
+        #       valid strictly monotonic track and depth data are used for the input in to linear
+        #       interpolation.   Only the interpolated data for invalid depths are added
+        #       to the valid depth data to create depth_new
+
+        x_mono = x
+
+        idx0 = np.where(np.diff(x) == 0)[0]
+        if len(idx0) > 0:
+            if len(idx0) > 1:
+                # Split array into subarrays in proper sequence e.g [[2,3,4],[7,8,9]] etc.
+                idx1 = np.add(np.where(np.diff(idx0) != 1)[0], 1)
+                group = np.split(idx0, idx1)
+
+            else:
+                # Group of only 1 point
+                group = np.array([idx0])
+
+            # Replace repeated values with mean
+            n_group = len(group)
+            for k in range(n_group):
+                indices = group[k]
+                indices = np.append(indices, indices[-1] + 1)
+                depth_avg = np.nanmean(depth_mono[indices])
+                depth_mono[indices[0]] = depth_avg
+                depth_mono[indices[1:]] = np.nan
+                x[indices[1:]] = np.nan
+
+        # Interpolate
+
+
+        # Determine ensembles with valid depth data
+        valid_depth_mono = np.logical_not(np.isnan(depth_mono))
+        valid_x_mono = np.logical_not(np.isnan(x_mono))
+        valid = np.vstack([valid_depth_mono, valid_x_mono])
+        valid = np.all(valid, 0)
+
+        if np.sum(valid) > 1:
+            # Compute interpolation function from all valid data
+            depth_int = np.interp(x_mono, x_mono[valid], depth_mono[valid], left=np.nan, right=np.nan)
+            # Fill in invalid data with interpolated data
+            depth_new[np.logical_not(valid_depth_mono)] = depth_int[np.logical_not(valid_depth_mono)]
+
+        return depth_new
diff --git a/Classes/EdgeData.py b/Classes/EdgeData.py
new file mode 100644
index 0000000..a4fddc5
--- /dev/null
+++ b/Classes/EdgeData.py
@@ -0,0 +1,125 @@
+import numpy as np
+
+
+class EdgeData(object):
+    """Class used to store edge settings.
+
+    Attributes
+    ----------
+    type: str
+        Shape of edge: 'Triangular', 'Rectangular', 'Custom, 'User Q'
+    distance_m: float
+        Distance to shore, in m.
+    cust_coef: float
+        Custom coefficient provided by user.
+    number_ensembles: int
+        Number of ensembles to average for depth and velocities.
+    user_discharge_cms: float
+        Original user supplied discharge for edge, in cms.
+    orig_type: str
+        Original shape of edge: 'Triangular', 'Rectangular', 'Custom, 'User Q'
+    orig_distance_m: float
+        Original distance to shore, in m.
+    orig_cust_coef: float
+        Original custom coefficient provided by user.
+    orig_number_ensembles: int
+        Original number of ensembles to average for depth and velocities.
+    orig_user_discharge_cms: float
+        Original user supplied discharge for edge, in cms.
+    """
+    
+    def __init__(self):
+        """Initialize EdgeData.
+        """
+        
+        self.type = None       # Shape of edge: 'Triangular', 'Rectangular', 'Custom, 'User Q'
+        self.distance_m = None          # Distance to shore
+        self.cust_coef = None     # Custom coefficient provided by user
+        self.number_ensembles = None   # Number of ensembles to average for depth and velocities
+        self.user_discharge_cms = None      # User supplied edge discharge.
+
+        self.orig_type = None  # Shape of edge: 'Triangular', 'Rectangular', 'Custom, 'User Q'
+        self.orig_distance_m = None  # Distance to shore
+        self.orig_cust_coef = None  # Custom coefficient provided by user
+        self.orig_number_ensembles = None  # Number of ensembles to average for depth and velocities
+        self.orig_user_discharge_cms = None  # User supplied edge discharge.
+
+        
+    def populate_data(self, edge_type, distance=None, number_ensembles=10, coefficient=None, user_discharge=None):
+        """Construct left or right edge object from provided inputs
+        
+        Parameters
+        ----------
+        edge_type: str
+            Type of edge (Triangular, Rectangular, Custom, User Q)
+        distance: float
+            Distance to shore, in m.
+        number_ensembles: int
+            Number of edge ensembles for all types but User Q
+        coefficient: float
+            User supplied custom edge coefficient.
+        user_discharge: float
+            User supplied edge discharge, in cms.
+        """
+
+        # Set properties for custom coefficient
+        self.type = edge_type
+        self.distance_m = distance
+        self.number_ensembles = number_ensembles
+        self.user_discharge_cms = user_discharge
+        self.cust_coef = coefficient
+
+        if self.orig_type is None:
+            self.orig_type = edge_type
+            self.orig_distance_m = distance
+            self.orig_number_ensembles = number_ensembles
+            self.orig_user_discharge_cms = user_discharge
+            self.orig_cust_coef = coefficient
+
+    def populate_from_qrev_mat(self, mat_data):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        mat_data: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+
+        self.type = mat_data.type
+        self.distance_m = mat_data.dist_m
+        self.number_ensembles = mat_data.numEns2Avg
+        if type(mat_data.userQ_cms) is float:
+            if not np.isnan(mat_data.userQ_cms):
+                self.user_discharge_cms = mat_data.userQ_cms
+        if type(mat_data.custCoef) is float:
+            self.cust_coef = mat_data.custCoef
+        if hasattr(mat_data, 'orig_type'):
+            self.orig_type = mat_data.orig_type
+            self.orig_distance_m = mat_data.orig_distance_m
+            self.orig_number_ensembles = mat_data.orig_number_ensembles
+            if type(mat_data.orig_user_discharge_cms) is float:
+                if not np.isnan(mat_data.orig_user_discharge_cms):
+                    self.orig_user_discharge_cms = mat_data.orig_user_discharge_cms
+            if type(mat_data.custCoef) is float:
+                self.orig_cust_coef = mat_data.orig_cust_coef
+        else:
+            self.orig_type = mat_data.type
+            self.orig_distance_m = mat_data.dist_m
+            self.orig_number_ensembles = mat_data.numEns2Avg
+            if type(mat_data.userQ_cms) is float:
+                if not np.isnan(mat_data.userQ_cms):
+                    self.orig_user_discharge_cms = mat_data.userQ_cms
+            if type(mat_data.custCoef) is float:
+                self.orig_cust_coef = mat_data.custCoef
+
+    def change_property(self, prop, setting):
+        """Change edge data property
+
+        Parameters
+        ----------
+        prop: str
+            Property to change.
+        setting:
+            New setting for property.
+        """
+        setattr(self, prop, setting)
diff --git a/Classes/Edges.py b/Classes/Edges.py
new file mode 100644
index 0000000..436cec2
--- /dev/null
+++ b/Classes/Edges.py
@@ -0,0 +1,77 @@
+from Classes.EdgeData import EdgeData
+
+
+class Edges(object):
+    """Class to store and process edge data.
+
+    Attributes
+    ----------
+    rec_edge_method: str
+        Method used to determine coef for rec. edge 'Fixed', 'Variable'.
+    vel_method: str
+        Method used to compute the velocity used 'MeasMag', 'VectorProf'.
+    left: EdgeData
+        Object of EdgeData for left edge.
+    right: EdgeData
+        Object of EdgeData for right edge.
+    """
+    
+    def __init__(self):
+        """Initialize Edges.
+        """
+
+        self.rec_edge_method = None
+        self.vel_method = None
+        self.left = EdgeData()
+        self.right = EdgeData()
+        
+    def populate_data(self, rec_edge_method, vel_method):
+        """Store the general methods used for edge data.
+
+        Parameters
+        ----------
+        rec_edge_method: str
+            Method used to determine coef for rec. edge 'Fixed', 'Variable'.
+        vel_method: str
+            Method used to compute the velocity used 'MeasMag', 'VectorProf'.
+        """
+        self.rec_edge_method = rec_edge_method
+        self.vel_method = vel_method
+
+    def populate_from_qrev_mat(self, transect):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        transect: mat_struct
+           Matlab data structure obtained from sio.loadmat
+       """
+
+        if hasattr(transect, 'edges'):
+            if hasattr(transect.edges, 'left'):
+                self.left = EdgeData()
+                self.left.populate_from_qrev_mat(transect.edges.left)
+            if hasattr(transect.edges, 'right'):
+                self.right = EdgeData()
+                self.right.populate_from_qrev_mat(transect.edges.right)
+            self.rec_edge_method = transect.edges.recEdgeMethod
+            self.vel_method = transect.edges.velMethod
+
+    def change_property(self, prop, setting, edge=None):
+        """Change edge property
+        
+        Parameters
+        ----------
+        prop: str
+            Name of property.
+        setting:
+            New property setting.
+        edge: str
+            Edge to change (left, right)
+        """
+        
+        if edge is None:
+            setattr(self, prop, setting)
+        else:
+            temp = getattr(self, edge)
+            temp.change_property(prop, setting)
diff --git a/Classes/ExtrapData.py b/Classes/ExtrapData.py
new file mode 100644
index 0000000..2a6e1d4
--- /dev/null
+++ b/Classes/ExtrapData.py
@@ -0,0 +1,91 @@
+class ExtrapData(object):
+    """Class to store both original and modified extrapolation settings.
+
+    Attributes
+    ----------
+    top_method_orig: str
+        Original extrapolation method for top of profile: Power, Constant, 3-Point.
+    bot_method_orig: str
+        Original extrapolation method for bottom of profile: Power, No Slip.
+    exponent_orig: float
+        Original exponent for power of no slip methods.
+    top_method: str
+        Applied extrapolation method for top of profile: Power, Constant, 3-Point.
+    bot_method: str
+        Applied extrapolation method for bottom of profile: Power, No Slip
+    exponent: float
+        Applied exponent for power of no slip methods
+    """
+    
+    def __init__(self):
+        """Initialize class and set defaults."""
+        self.top_method_orig = None  # Extrapolation method for top of profile: Power, Constant, 3-Point
+        self.bot_method_orig = None  # Extrapolation method for bottom of profile: Power, No Slip
+        self.exponent_orig = None  # Exponent for power of no slip methods
+        self.top_method = None  # Extrapolation method for top of profile: Power, Constant, 3-Point
+        self.bot_method = None  # Extrapolation method for bottom of profile: Power, No Slip
+        self.exponent = None  # Exponent for power of no slip methods
+        
+    def populate_data(self, top, bot, exp):
+        """Store data in class variables.
+
+        Parameters
+        ----------
+        top: str
+            Original top method.
+        bot: str
+            Original bottom method.
+        exp: float
+            Original exponent.
+        """
+        self.top_method_orig = top
+        self.bot_method_orig = bot
+        self.top_method = top
+        self.bot_method = bot
+        self.exponent_orig = float(exp)
+        self.exponent = float(exp)
+
+    def populate_from_qrev_mat(self, transect):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        transect: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+
+        if hasattr(transect, 'extrap'):
+            self.top_method_orig = transect.extrap.topMethodOrig
+            self.bot_method_orig = transect.extrap.botMethodOrig
+            self.exponent_orig = transect.extrap.exponentOrig
+            self.top_method = transect.extrap.topMethod
+            self.bot_method = transect.extrap.botMethod
+            self.exponent = transect.extrap.exponent
+
+    def set_extrap_data(self, top, bot, exp):
+        """Store new extrapolation settings
+
+        Parameters
+        ----------
+        top: str
+            New top extrapolation method.
+        bot: str
+            New bottom extrapolation method.
+        exp: float
+            New exponent.
+        """
+        self.top_method = top
+        self.bot_method = bot
+        self.exponent = exp
+        
+    def set_property(self, prop, setting):
+        """Allows setting any property.
+
+        Parameters
+        ----------
+        prop: str
+            Name of property.
+        setting:
+            New setting.
+        """
+        setattr(self, prop, setting)
diff --git a/Classes/ExtrapQSensitivity.py b/Classes/ExtrapQSensitivity.py
new file mode 100644
index 0000000..75be866
--- /dev/null
+++ b/Classes/ExtrapQSensitivity.py
@@ -0,0 +1,372 @@
+import numpy as np
+from Classes.QComp import QComp
+
+
+class ExtrapQSensitivity(object):
+    """Class to compute the sensitivity of the discharge to various extrapolation methods.
+
+    Attributes
+    ----------
+    q_pp_mean: float
+        Discharge power power 1/6
+    q_pp_opt_mean: float
+        Discharge power power optimized
+    q_cns_mean: float
+        Discharge constant no RoutingSlipDelivery
+    q_cns_opt_mean: float
+        Discharge constant optimized no slip
+    q_3p_ns_mean: float
+        Discharge 3-pt no slip
+    q_3p_ns_opt_mean: float
+        Discharge 3-pt optimized no slip
+    q_pp_per_diff: float
+        Power power 1/6 difference from reference
+    q_pp_opt_per_diff: float
+        Power power optimized percent difference from reference
+    q_cns_per_diff: float
+        Constant no slip percent difference from reference
+    q_cns_opt_per_diff: float
+        Constant optimized no slip percent difference from reference
+    q_3p_ns_per_diff: float
+        3-point no skip percent difference from reference
+    q_3p_ns_opt_per_diff: float
+        3-point optimized no slip percent difference from reference
+    pp_exp: float
+        Optimized power power exponent
+    ns_exp: float
+        Optimized no slip Exponent
+    man_top: str
+        Manually specified top method
+    man_bot: str
+        Manually specified bottom method
+    man_exp: float
+        Manually specified exponent
+    q_man_mean: float
+        Mean discharge for manually specified extrapolations
+    q_man_per_diff: float
+        Manually specified extrapolations percent difference from reference
+    q_pp_list: list
+        List of single transect discharges base on default 1/6 power-power law
+    q_pp_opt_list: list
+        List of single transect discharges base on optimized power-power law
+    q_cns_list: list
+        List of single transect discharges base on default 1/6 constant no slip law
+    q_cns_opt_list: list
+        List of single transect discharges base on optimized constant no slip law
+    q_3p_ns_list: list
+        List of single transect discharges base on default 3pt no slip
+    q_3p_ns_opt_list: list
+        List of single transect discharges base on optimized 3pt no slip
+    q_top_pp_list: list
+        List of single transect top discharges base on default 1/6 power-power law
+    q_top_pp_opt_list: list
+        List of single transect top discharges base on optimized power-power law
+    q_top_cns_list: list
+        List of single transect top discharges base on default 1/6 constant no slip law
+    q_top_cns_opt_list: list
+        List of single transect top discharges base on optimized constant no slip law
+    q_top_3p_ns_list: list
+        List of single transect top discharges base on default 3pt no slip
+    q_top_3p_ns_opt_list: list
+        List of single transect top discharges base on optimized 3pt no slip
+    q_bot_pp_list: list
+        List of single transect bottom discharges base on default 1/6 power-power law
+    q_bot_pp_opt_list: list
+        List of single transect bottom discharges base on optimized power-power law
+    q_bot_cns_list: list
+        List of single transect bottom discharges base on default 1/6 constant no slip law
+    q_bot_cns_opt_list: list
+        List of single transect bottom discharges base on optimized constant no slip law
+    q_bot_3p_ns_list: list
+        List of single transect bottom discharges base on default 3pt no slip
+    q_bot_3p_ns_opt_list: list
+        List of single transect bottom discharges base on optimized 3pt no slip
+    """
+    
+    def __init__(self):
+        """Initialize object and instance variables."""
+
+        self.q_pp_mean = None  # Discharge power power 1/6
+        self.q_pp_opt_mean = None  # discharge power power optimized
+        self.q_cns_mean = None  # Discharge constant no RoutingSlipDelivery
+        self.q_cns_opt_mean = None  # Discharge constant optimized no slip
+        self.q_3p_ns_mean = None  # Discharge 3-pt no slip
+        self.q_3p_ns_opt_mean = None  # Discharge 3-pt optimized no slip
+        self.q_pp_per_diff = None  # Power power 1/6 difference from reference
+        self.q_pp_opt_per_diff = None  # Power power optimized percent difference from reference
+        self.q_cns_per_diff = None  # Constant no slip percent difference from reference
+        self.q_cns_opt_per_diff = None  # Constant optimized no slip percent difference from reference
+        self.q_3p_ns_per_diff = None  # 3-point no skip percent difference from reference
+        self.q_3p_ns_opt_per_diff = None  # 3-point optimized no slip percent difference from reference
+        self.pp_exp = None  # Optimized power power exponent
+        self.ns_exp = None  # Optimized no slip Exponent
+        self.man_top = None  # Manually specified top method
+        self.man_bot = None  # Manually specified bottom method
+        self.man_exp = None  # Manually specified exponent
+        self.q_man_mean = None  # Mean discharge for manually specified extrapolations
+        self.q_man_per_diff = None  # Manually specified extrapolations percent difference from reference
+        self.q_pp_list = []  # List of single transect discharges base on default 1/6 power-power law
+        self.q_pp_opt_list = []  # List of single transect discharges base on optimized power-power law
+        self.q_cns_list = []  # List of single transect discharges base on default 1/6 constant no slip law
+        self.q_cns_opt_list = []  # List of single transect discharges base on optimized constant no slip law
+        self.q_3p_ns_list = []  # List of single transect discharges base on default 3pt no slip
+        self.q_3p_ns_opt_list = []  # List of single transect discharges base on optimized 3pt no slip
+        self.q_top_pp_list = []  # List of single transect top discharges base on default 1/6 power-power law
+        self.q_top_pp_opt_list = []  # List of single transect top discharges base on optimized power-power law
+        self.q_top_cns_list = []  # List of single transect top discharges base on default 1/6 constant no slip law
+        self.q_top_cns_opt_list = []  # List of single transect top discharges base on optimized constant no slip law
+        self.q_top_3p_ns_list = []  # List of single transect top discharges base on default 3pt no slip
+        self.q_top_3p_ns_opt_list = []  # List of single transect top discharges base on optimized 3pt no slip
+        self.q_bot_pp_list = []  # List of single transect bottom discharges base on default 1/6 power-power law
+        self.q_bot_pp_opt_list = []  # List of single transect bottom discharges base on optimized power-power law
+        self.q_bot_cns_list = []  # List of single transect bottom discharges base on default 1/6 constant no slip law
+        self.q_bot_cns_opt_list = []  # List of single transect bottom discharges base on optimized constant no slip law
+        self.q_bot_3p_ns_list = []  # List of single transect bottom discharges base on default 3pt no slip
+        self.q_bot_3p_ns_opt_list = []  # List of single transect bottom discharges base on optimized 3pt no slip
+        
+    def populate_data(self, transects, extrap_fits):
+        """Compute means and percent differences.
+
+        Parameters
+        ----------
+        transects: list
+            List of objects of TransectData
+        extrap_fits: SelectFit
+            Object of SelectFit
+        """
+        q_pp = []
+        q_pp_opt = []
+        q_cns = []
+        q_cns_opt = []
+        q_3p_ns = []
+        q_3p_ns_opt = []
+        self.pp_exp = extrap_fits[-1].pp_exponent
+        self.ns_exp = extrap_fits[-1].ns_exponent
+
+        # Store top discharges
+        q_pp_top = []
+        q_pp_opt_top = []
+        q_cns_top = []
+        q_cns_opt_top = []
+        q_3p_ns_top = []
+        q_3p_ns_opt_top = []
+
+        # Store bottom discharges
+        q_pp_bot = []
+        q_pp_opt_bot = []
+        q_cns_bot = []
+        q_cns_opt_bot = []
+        q_3p_ns_bot = []
+        q_3p_ns_opt_bot = []
+
+        # Compute discharges for each transect for possible extrapolation combinations
+        for transect in transects:
+            if transect.checked:
+                q = QComp()
+
+                q.populate_data(data_in=transect, top_method='Power', bot_method='Power', exponent=0.1667)
+                q_pp.append(q.total)
+                q_pp_top.append(q.top)
+                q_pp_bot.append(q.bottom)
+
+                q.populate_data(data_in=transect, top_method='Power', bot_method='Power', exponent=self.pp_exp)
+                q_pp_opt.append(q.total)
+                q_pp_opt_top.append(q.top)
+                q_pp_opt_bot.append(q.bottom)
+
+                q.populate_data(data_in=transect, top_method='Constant', bot_method='No Slip', exponent=0.1667)
+                q_cns.append(q.total)
+                q_cns_top.append(q.top)
+                q_cns_bot.append(q.bottom)
+
+                q.populate_data(data_in=transect, top_method='Constant', bot_method='No Slip', exponent=self.ns_exp)
+                q_cns_opt.append(q.total)
+                q_cns_opt_top.append(q.top)
+                q_cns_opt_bot.append(q.bottom)
+
+                q.populate_data(data_in=transect, top_method='3-Point', bot_method='No Slip', exponent=0.1667)
+                q_3p_ns.append(q.total)
+                q_3p_ns_top.append(q.top)
+                q_3p_ns_bot.append(q.bottom)
+
+                q.populate_data(data_in=transect, top_method='3-Point', bot_method='No Slip', exponent=self.ns_exp)
+                q_3p_ns_opt.append(q.total)
+                q_3p_ns_opt_top.append(q.top)
+                q_3p_ns_opt_bot.append(q.bottom)
+
+        # Compute mean discharge for each combination
+        self.q_pp_mean = np.nanmean(q_pp)
+        self.q_pp_opt_mean = np.nanmean(q_pp_opt)
+        self.q_cns_mean = np.nanmean(q_cns)
+        self.q_cns_opt_mean = np.nanmean(q_cns_opt)
+        self.q_3p_ns_mean = np.nanmean(q_3p_ns)
+        self.q_3p_ns_opt_mean = np.nanmean(q_3p_ns_opt)
+
+        # Save all single-transect discharges
+        self.q_pp_list = q_pp
+        self.q_pp_opt_list = q_pp_opt
+        self.q_cns_list = q_cns
+        self.q_cns_opt_list = q_cns_opt
+        self.q_3p_ns_list = q_3p_ns
+        self.q_3p_ns_opt_list = q_3p_ns_opt
+
+        # Save all single-transect top discharges
+        self.q_top_pp_list = q_pp_top
+        self.q_top_pp_opt_list = q_pp_opt_top
+        self.q_top_cns_list = q_cns_top
+        self.q_top_cns_opt_list = q_cns_opt_top
+        self.q_top_3p_ns_list = q_3p_ns_top
+        self.q_top_3p_ns_opt_list = q_3p_ns_opt_top
+
+        # Save all single-transect bottom discharges
+        self.q_bot_pp_list = q_pp_bot
+        self.q_bot_pp_opt_list = q_pp_opt_bot
+        self.q_bot_cns_list = q_cns_bot
+        self.q_bot_cns_opt_list = q_cns_opt_bot
+        self.q_bot_3p_ns_list = q_3p_ns_bot
+        self.q_bot_3p_ns_opt_list = q_3p_ns_opt_bot
+
+        self.compute_percent_diff(extrap_fits=extrap_fits, transects=transects)
+
+    def populate_from_qrev_mat(self, mat_data):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        mat_data: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+
+        if hasattr(mat_data, 'qSensitivity'):
+            self.q_pp_mean = mat_data.qSensitivity.qPPmean
+            self.q_pp_opt_mean = mat_data.qSensitivity.qPPoptmean
+            self.q_cns_mean = mat_data.qSensitivity.qCNSmean
+            self.q_cns_opt_mean = mat_data.qSensitivity.qCNSoptmean
+            self.q_3p_ns_mean = mat_data.qSensitivity.q3pNSmean
+            self.q_3p_ns_opt_mean = mat_data.qSensitivity.q3pNSoptmean
+
+            # For compatibility with older QRev.mat files
+            if hasattr(mat_data.qSensitivity, 'qPPperdiff'):
+                self.q_pp_per_diff = mat_data.qSensitivity.qPPperdiff
+            else:
+                self.q_pp_per_diff = np.nan
+
+            self.q_pp_opt_per_diff = mat_data.qSensitivity.qPPoptperdiff
+            self.q_cns_per_diff = mat_data.qSensitivity.qCNSperdiff
+            self.q_cns_opt_per_diff = mat_data.qSensitivity.qCNSoptperdiff
+            self.q_3p_ns_per_diff = mat_data.qSensitivity.q3pNSperdiff
+            self.q_3p_ns_opt_per_diff = mat_data.qSensitivity.q3pNSoptperdiff
+            self.pp_exp = mat_data.qSensitivity.ppExponent
+            self.ns_exp = mat_data.qSensitivity.nsExponent
+
+            # If a manual fit was used
+            if len(mat_data.qSensitivity.manTop) > 0:
+                self.man_top = mat_data.qSensitivity.manTop
+                self.man_bot = mat_data.qSensitivity.manBot
+                self.man_exp = mat_data.qSensitivity.manExp
+                self.q_man_mean = mat_data.qSensitivity.qManmean
+                self.q_man_per_diff = mat_data.qSensitivity.qManperdiff
+
+            # Add compatibility for Oursin uncertainty model
+            if hasattr(mat_data.qSensitivity, 'q_pp_list'):
+                self.q_pp_list = mat_data.qSensitivity.q_pp_list
+                self.q_pp_opt_list = mat_data.qSensitivity.q_pp_opt_list
+                self.q_cns_list = mat_data.qSensitivity.q_cns_list
+                self.q_cns_opt_list = mat_data.qSensitivity.q_cns_opt_list
+                self.q_3p_ns_list = mat_data.qSensitivity.q_3p_ns_list
+                self.q_3p_ns_opt_list = mat_data.qSensitivity.q_3p_ns_opt_list
+                self.q_top_pp_list = mat_data.qSensitivity.q_top_pp_list
+                self.q_top_pp_opt_list = mat_data.qSensitivity.q_top_pp_opt_list
+                self.q_top_cns_list = mat_data.qSensitivity.q_top_cns_list
+                self.q_top_cns_opt_list = mat_data.qSensitivity.q_top_cns_opt_list
+                self.q_top_3p_ns_list = mat_data.qSensitivity.q_top_3p_ns_list
+                self.q_top_3p_ns_opt_list = mat_data.qSensitivity.q_top_3p_ns_opt_list
+                self.q_bot_pp_list = mat_data.qSensitivity.q_bot_pp_list
+                self.q_bot_pp_opt_list = mat_data.qSensitivity.q_bot_pp_opt_list
+                self.q_bot_cns_list = mat_data.qSensitivity.q_bot_cns_list
+                self.q_bot_cns_opt_list = mat_data.qSensitivity.q_bot_cns_opt_list
+                self.q_bot_3p_ns_list = mat_data.qSensitivity.q_bot_3p_ns_list
+                self.q_bot_3p_ns_opt_list = mat_data.qSensitivity.q_bot_3p_ns_opt_list
+            else:
+                self.q_pp_list = []
+                self.q_pp_opt_list = []
+                self.q_cns_list = []
+                self.q_cns_opt_list = []
+                self.q_3p_ns_list = []
+                self.q_3p_ns_opt_list = []
+                self.q_top_pp_list = []
+                self.q_top_pp_opt_list = []
+                self.q_top_cns_list = []
+                self.q_top_cns_opt_list = []
+                self.q_top_3p_ns_list = []
+                self.q_top_3p_ns_opt_list = []
+                self.q_bot_pp_list = []
+                self.q_bot_pp_opt_list = []
+                self.q_bot_cns_list = []
+                self.q_bot_cns_opt_list = []
+                self.q_bot_3p_ns_list = []
+                self.q_bot_3p_ns_opt_list =[]
+
+    def compute_percent_diff(self, extrap_fits, transects=None):
+        """Computes the percent difference for each of the extrapolation options as compared to selected method.
+
+        Parameters
+        ----------
+        extrap_fits: SelectFit
+            Object of SelectFit
+        transects: list
+            List of TransectData objects
+        """
+        # Determine which mean is the reference
+        if extrap_fits[-1].fit_method == 'Manual':
+            self.man_top = extrap_fits[-1].top_method
+            self.man_bot = extrap_fits[-1].bot_method
+            self.man_exp = extrap_fits[-1].exponent
+
+            if transects is not None:
+                q_man = []
+                checked = []
+                # Compute discharge for each transect
+                for transect in transects:
+                    q = QComp()
+                    checked.append(transect.checked)
+
+                    q.populate_data(data_in=transect,
+                                    top_method=self.man_top,
+                                    bot_method=self.man_bot,
+                                    exponent=self.man_exp)
+                    q_man.append(q)
+                container = []
+                for index, item in enumerate(q_man):
+                    if checked[index]:
+                        container.append(item.total)
+                self.q_man_mean = np.nanmean(container)
+            reference_mean = self.q_man_mean
+
+        else:
+            if extrap_fits[-1].top_method_auto == 'Power':
+                if np.abs(extrap_fits[-1].exponent_auto - 0.1667) < 0.0001:
+                    reference_mean = self.q_pp_mean
+                else:
+                    reference_mean = self.q_pp_opt_mean
+            elif extrap_fits[-1].top_method_auto == 'Constant':
+                if np.abs(extrap_fits[-1].exponent_auto - 0.1667) < 0.0001:
+                    reference_mean = self.q_cns_mean
+                else:
+                    reference_mean = self.q_cns_opt_mean
+            else:
+                if np.abs(extrap_fits[-1].exponent_auto - 0.1667) < 0.0001:
+                    reference_mean = self.q_3p_ns_mean
+                else:
+                    reference_mean = self.q_3p_ns_opt_mean
+
+        # Compute percent difference from reference
+        self.q_pp_per_diff = ((self.q_pp_mean - reference_mean) / reference_mean) * 100
+        self.q_pp_opt_per_diff = ((self.q_pp_opt_mean - reference_mean) / reference_mean) * 100
+        self.q_cns_per_diff = ((self.q_cns_mean - reference_mean) / reference_mean) * 100
+        self.q_cns_opt_per_diff = ((self.q_cns_opt_mean - reference_mean) / reference_mean) * 100
+        self.q_3p_ns_per_diff = ((self.q_3p_ns_mean - reference_mean) / reference_mean) * 100
+        self.q_3p_ns_opt_per_diff = ((self.q_3p_ns_opt_mean - reference_mean) / reference_mean) * 100
+
+        if extrap_fits[-1].fit_method == 'Manual':
+            self.q_man_per_diff = ((self.q_man_mean - reference_mean) / reference_mean) * 100
diff --git a/Classes/FitData.py b/Classes/FitData.py
new file mode 100644
index 0000000..da61b86
--- /dev/null
+++ b/Classes/FitData.py
@@ -0,0 +1,284 @@
+import numpy as np
+from scipy.optimize.minpack import curve_fit
+from scipy.stats import t
+
+
+class FitData(object):
+    """Class to compute top and bottom extrapolation methods and associated statistics.
+
+     Data required for the constructor method include data of class
+     NormData, threshold for the minimum number of points for a valid
+     median, top extrapolation method, bottom extrapolation method, type
+     of fit, and if a manual fit, the exponent.
+
+    Attributes
+    ----------
+    self.file_name: str
+        Name of transect file
+    top_method: str
+        Top extrapolation method
+    bot_method: str
+        Bottom extrapolation method
+    coef: float
+        Power fit coefficient
+    exponent: float
+        Power fit exponent
+    u: np.array(float)
+        Fit values of the variable
+    u_auto: np.array(float)
+        Fit values from automatic fit
+    z_auto: np.array(float)
+        z values for automtic fit
+    z: np.array(float)
+        Distance from the streambed for fit variable
+    exp_method: str
+        Method to determine exponent (default, optimize, or manual)
+    data_type: str
+        Type of data (v, q, V, or Q)
+    exponent_95_ci: np.array(float)
+        95% confidence intervals for optimized exponent
+    residuals: np.array(float)
+        Residuals from fit
+    r_squared: float
+        R squared of model
+    """
+
+    def __init__(self):
+        """Initialize object and instance variables."""
+
+        self.file_name = None  # Name of transect file
+        self.top_method = 'Power'  # Top extrapolation method
+        self.bot_method = 'Power'  # Bottom extrapolation method
+        self.coef = 0  # Power fit coefficient
+        self.exponent = 0.1667  # Power fit exponent
+        self.u = None  # Fit values of the variable
+        self.u_auto = None  # Fit values from automatic fit
+        self.z_auto = None  # z values for automtic fit
+        self.z = None  # Distance from the streambed for fit variable
+        self.exp_method = 'Power'  # Method to determine exponent (default, optimize, or manual)
+        self.data_type = None  # Type of data (velocity or unit discharge)
+        self.exponent_95_ci = 0  # 95% confidence intervals for optimized exponent
+        self.residuals = np.array([])  # Residuals from fit
+        self.r_squared = 0  # R squared of model
+
+    def populate_data(self, norm_data, top, bot, method, exponent=None):
+        """Computes fit and stores associated data.
+
+        Parameters
+        ----------
+        norm_data: NormData
+            Object of NormData
+        top: str
+            Top extrapolation method
+        bot: str
+            Bottom extrapolation method
+        method:
+            Method used to define the exponent (default, optimize, or manual), default is 1/6.
+        exponent:
+            Exponent for power or no slip fit methods.
+        """
+
+        avg_z = norm_data.unit_normalized_z
+        y = norm_data.unit_normalized_med
+        idxz = norm_data.valid_data
+        zc = np.nan
+
+        lower_bound = [-np.inf, 0.01]
+        upper_bound = [np.inf, 1]
+        bounds = None
+        p0 = None
+        uc = np.nan
+
+        # Process data if available
+        if len(idxz) > 0:
+            idx_power = idxz
+
+            # Create arrays for data fitting
+            # Select median values to use in extrapolation methods selected and create
+            # methods selected and create fir output data arrays
+
+            # If bottom is No Slip, Power at top is not allowed
+            if bot == 'No Slip':
+                if top == 'Power':
+                    top = 'Constant'
+
+            fit_combo = ''.join([top, bot])
+            if fit_combo == 'PowerPower':
+                self.z = np.arange(0, 1.01, 0.01)
+                zc = np.nan
+                uc = np.nan
+            elif fit_combo == 'ConstantPower':
+                self.z = np.arange(0, np.max(avg_z[idxz]), 0.01)
+                self.z = np.hstack([self.z, np.nan])
+                zc = np.arange(np.max(avg_z[idxz]) + 0.01, 1.0, 0.01)
+                uc = np.tile(y[idxz[0]], zc.shape)
+            elif fit_combo == '3-PointPower':
+                self.z = np.arange(0, np.max(avg_z[idxz]), 0.01)
+                self.z = np.hstack([self.z, np.nan])
+                # If less than 6 bins use constant at the top
+                if len(idxz) < 6:
+                    zc = np.arange(np.max(avg_z[idxz]) + 0.01, 1.0, 0.01)
+                    uc = np.tile(y[idxz[0]], zc.shape)
+                else:
+                    p = np.polyfit(avg_z[idxz[0:3]], y[idxz[0:3]], 1)
+                    zc = np.arange(np.max(avg_z[idxz]) + 0.01, 1.0, 0.01)
+                    # zc = zc.T
+                    uc = zc * p[0] + p[1]
+
+            elif fit_combo == 'ConstantNo Slip':
+                # Optimize constant / no slip if sufficient cells are available
+                if method.lower() == 'optimize':
+                    idx = idxz[int(1+len(idxz) - np.floor(len(avg_z[idxz]) / 3) - 1)::]
+                    if len(idx) < 4:
+                        method = 'default'
+
+                # Compute Constant / No Slip using WinRiver II and
+                # RiverSurveyor Live default cells
+                else:
+                    idx = np.where(avg_z[idxz] <= .2)[0]
+                    if len(idx) < 1:
+                        idx = idxz[-1]
+                    else:
+                        idx = idxz[idx]
+
+                # Configures u and z arrays
+                idxns = np.array([idx]).T
+                self.z = np.arange(0, avg_z[idxns[0]], 0.01)
+                self.z = np.hstack([self.z, [np.nan]])
+                idx_power = idx
+                zc = np.arange(np.max(avg_z[idxz]) + 0.01, 1.00, 0.01)
+                uc = np.tile(y[idxz[0]], zc.shape)
+
+            elif fit_combo == '3-PointNo Slip':
+                # Optimize 3-Point / no slip if sufficient cells are available
+                if method.lower() == 'optimize':
+                    idx = idxz[int(1 + len(idxz) - np.floor(len(avg_z[idxz])) / 3) - 1::]
+                    if len(idx) < 4:
+                        method = 'default'
+
+                # Compute 3-Point / No Slip using WinRiver II and
+                # RiverSurveyor Live default cells
+                else:
+                    idx = np.where(avg_z[idxz] <= .2)[0]
+                    if len(idx) < 1:
+                        idx = idxz[-1]
+                    else:
+                        idx = idxz[idx]
+
+                # Configures u and z arrays
+                idxns = np.array([idx]).T
+                self.z = np.arange(0, avg_z[idxns[0]], 0.01)
+                self.z = np.hstack([self.z, [np.nan]])
+                idx_power = idx
+                # If less than 6 bins use constant at the top
+                if len(idxz) < 6:
+                    zc = np.arange(np.max(idxz) + 0.01, 1.0, 0.01)
+                    uc = np.tile(y[idxz[0]], zc.shape)
+                else:
+                    p = np.polyfit(avg_z[idxz[0:3]], y[idxz[0:3]], 1)
+                    zc = np.arange(np.max(avg_z[idxz]) + 0.01, 1.0, 0.01)
+                    uc = zc * p[0] + p[1]
+
+            # Compute exponent
+            zfit = avg_z[idx_power]
+            yfit = y[idx_power]
+
+            # Check data validity
+            ok_ = np.logical_and(np.isfinite(zfit), np.isfinite(yfit))
+
+            self.exponent = np.nan
+            self.exponent_95_ci = np.nan
+            self.r_squared = np.nan
+            fit_func = 'linear'
+
+            lower_method = method.lower()
+
+            if lower_method == 'manual':
+                fit_func = 'linear'
+                self.exponent = exponent
+                bounds = None
+                p0 = None
+
+            elif lower_method == 'default':
+                fit_func = 'linear'
+                self.exponent = 1./6.
+                bounds = None
+                p0 = None
+
+            elif lower_method == 'optimize':
+                fit_func = 'power'
+                bounds = [lower_bound, upper_bound]
+                strt = yfit[ok_]
+                p0 = [strt[-1], 1./6]
+
+            fit_funcs = {
+                'linear': lambda x, a: a * x**self.exponent,
+                'power': lambda x, a, b: a * x**b
+            }
+
+            if ok_.size > 1:
+                if bounds is not None:
+                    popt, pcov = curve_fit(fit_funcs[fit_func],
+                                           zfit, yfit, p0=p0, bounds=bounds)
+                else:
+                    popt, pcov = curve_fit(fit_funcs[fit_func],
+                                           zfit, yfit, p0=p0)
+
+                # Extract exponent and confidence intervals from fit
+                if lower_method == 'optimize':
+                    self.exponent = popt[1]
+                    if self.exponent is None or self.exponent < 0.05:
+                        self.exponent = 0.05
+
+                if len(zfit[ok_]) > 2:
+
+                    n = len(zfit)    # number of data points
+
+                    t_val = t.ppf(.975, n-2)
+
+                    # Get 95% confidence intervals
+                    lower = (popt[-1] - t_val * np.sqrt(np.diag(pcov)[-1]))
+                    upper = (popt[-1] + t_val * np.sqrt(np.diag(pcov)[-1]))
+                    self.exponent_95_ci = np.hstack([lower, upper])
+
+                    # Get the rsquared for the model
+                    ss_tot = np.sum((y[idx_power] - np.mean(yfit))**2)
+                    ss_res = np.sum((y[idx_power] - fit_funcs[fit_func](zfit, *popt))**2)
+                    self.r_squared = 1 - (ss_res/ss_tot)
+                else:
+                    self.exponent_95_ci = [np.nan, np.nan]
+                    self.r_squared = np.nan
+
+            # Fit power curve to appropriate data
+            self.coef = ((self.exponent + 1) * 0.05 * np.nansum(y[idx_power])) / \
+                np.nansum(((avg_z[idx_power] + (0.5 * 0.05))**(self.exponent + 1)
+                           - ((avg_z[idx_power] - (0.5 * 0.05))**(self.exponent + 1))))
+
+            # Compute residuals
+            self.residuals = y[idx_power] - self.coef * avg_z[idx_power]**self.exponent
+            if self.residuals is None:
+                self.residuals = np.array([])
+
+            # Compute values (velocity or discharge) based on exponent and compute coefficient
+            self.u = self.coef * self.z**self.exponent
+            if type(zc) == np.ndarray:
+                self.u = np.append(self.u, uc)
+                self.z = np.append(self.z, zc)
+
+            # Assign variables to object properties
+            self.file_name = norm_data.file_name
+            self.top_method = top
+            self.bot_method = bot
+            self.exp_method = method
+            self.data_type = norm_data.data_type
+
+        else:
+            # If not data are valid simply apply methods
+            self.exponent = np.nan
+            self.exponent_95_ci = [np.nan, np.nan]
+            self.r_squared = np.nan
+            self.file_name = norm_data.file_name
+            self.top_method = top
+            self.bot_method = bot
+            self.exp_method = method
+            self.data_type = norm_data.data_type
diff --git a/Classes/GPSData.py b/Classes/GPSData.py
new file mode 100644
index 0000000..1ca09b5
--- /dev/null
+++ b/Classes/GPSData.py
@@ -0,0 +1,731 @@
+import utm
+import numpy as np
+from MiscLibs.common_functions import azdeg2rad, pol2cart, nans, nan_less
+
+
+class GPSData(object):
+    """Class containing the raw GPS data and algorithms to convert 
+    that raw data to boat velocity.
+
+    Attributes
+    ----------
+    # Raw properties:
+        raw_gga_lat_deg: np.array(float)
+            Raw latitude in degress, [ensemble,n]
+        raw_gga_lon_deg: np.array(float)
+            Raw longitude in degrees, [ensemble,n]
+        raw_gga_altitude_m: np.array(float)
+            Raw altitude in meters, [ensemble,n]
+        raw_gga_differential: np.array(float)
+            Raw differential correction indicator, [ensemble,n]
+        raw_gga_hdop: np.array(float)
+            Raw horizontal dilution of precision, [ensemble,n]
+        raw_gga_utc: np.array(float)
+            Raw UTC time, hhmmss.ss, [ensemble,n]
+        raw_gga_serial_time: np.array(float)
+            Raw UTC time of gga data in seconds past midnight, [ensemble,n]
+        raw_gga_num_sats: np.array(float)
+            Raw number of satellites reported in gga sentence, [ensemble,n]
+        raw_vtg_course_deg:np.array(float)
+            Raw course in degress, [ensemble,n]
+        raw_vtg_speed_mps: np.array(float)
+            Raw speed in m/s, [ensemble,n]
+        raw_vtg_delta_time: np.array(float)
+            Raw vtg delta time (sec), [ensemble,n]
+        raw_vtg_mode_indicator: np.array(float)
+            Raw vtg mode indicator, [ensemble,n]
+        raw_gga_delta_time: np.array(float)
+            Raw gga delta time (sec), [ensemble,n]
+
+    # Manufacturer assigned ensemble values:
+        ext_gga_lat_deg: np.array(float)
+            Latitude for each ensemble, in degrees [ensemble]
+        ext_gga_lon_deg: np.array(float)
+            Longitude for each ensemble, in degrees [ensemble]
+        ext_gga_altitude_m: np.array(float)
+            Altitude for each ensemble, in meters [ensemble]
+        ext_gga_differential: np.array(float)
+            Differential correction indicator for each ensemble [ensemble]
+        ext_gga_hdop: np.array(float)
+            Horizontal dilution of precision for each ensemble [ensemble]
+        ext_gga_utc: np.array(float)
+            UTC time, hhmmss.ss for each ensemble [ensemble]
+        ext_gga_serial_time: np.array(float)
+            UTC time of gga data in seconds past midnight for each ensemble [ensemble]
+        ext_gga_num_sats: np.array(float)
+            Number of satellites for each ensemble [ensemble]
+        ext_vtg_course_deg: np.array(float)
+            Course for each ensemble, in degrees [ensemble]
+        ext_vtg_speed_mps: np.array(float)
+            Speed for each ensemble, in m/s [ensemble]
+
+    # User specifications:
+        gga_position_method: str
+            Method used to process gga data for position ('End', 'Average' 'External')
+        gga_velocity_method: str
+            Method used to process gga data for velocity ('End','Average' 'External')
+        vtg_velocity_method: str
+            Method used to process vtg data for velocity ('Average' 'External)
+
+    # Computed properties:
+        gga_lat_ens_deg: np.array(float)
+            Processed latitude, in degrees [ensemble]
+        gga_lon_ens_deg: np.array(float)
+            Processed longitude, in degrees [ensemble]
+        utm_ens_m: np.array(float)
+            UTM position from processed gga data, in m [2,ensemble]
+        gga_velocity_ens_mps: np.array(float)
+            Boat velocity components computed from gga data, in m/s [2,ensemble]
+        gga_serial_time_ens: np.array(float)
+            UTC time of gga data, in seconds past midnight [ensemble]
+        vtg_velocity_ens_mps: np.array(float)
+            Boat velocity components computed from vtg data, in m/s [2,ensemble]
+        per_good_ens: np.array(float)
+            Percentage of available data used to compute ensemble value [ensemble]
+        hdop_ens: np.array(float)
+            HDOP for each ensemble using velocity method [ensemble]
+        num_sats_ens: np.array(float)
+            Number of satellites for each ensemble, using velocity method [ensemble]
+        altitude_ens_m: np.array(float)
+            Altitude for each ensemble, using velocity method [ensemble]
+        diff_qual_ens: np.array(float)
+            Differential quality for each ensemble, using velocity method [ensemble]
+    """
+    
+    def __init__(self):
+        """Initialize instance variables.
+        """
+        
+        # Raw properties
+        self.raw_gga_lat_deg = None         # self.raw_ latitude, in degress [ensemble,n]
+        self.raw_gga_lon_deg = None         # self.raw_ longitude, in degrees [ensemble,n]
+        self.raw_gga_altitude_m = None     # self.raw_ altitude in meters, [ensemble,n]
+        self.raw_gga_differential = None    # Differential correction indicator [ensemble,n]
+        self.raw_gga_hdop = None            # Horizontal dilution of precision [ensemble,n]
+        self.raw_gga_utc = None             # UTC time, in hhmmss.ss [ensemble,n]
+        self.raw_gga_serial_time = None     # UTC time of gga data, in seconds past midnight [ensemble,n]
+        self.raw_gga_num_sats = None        # Number of satellites reported in gga sentence [ensemble,n]
+        self.raw_vtg_course_deg = None      # Course, in degress [ensemble,n]
+        self.raw_vtg_speed_mps = None       # Speed, in m/s [ensemble,n]
+        self.raw_vtg_delta_time = None      # vtg delta time, in sec [ensemble,n]
+        self.raw_vtg_mode_indicator = None  # vtg mode indicator [ensemble,n]
+        self.raw_gga_delta_time = None      # gga delta time, in sec [ensemble,n]
+        
+        # Manufacturer assigned ensemble values
+        self.ext_gga_lat_deg = None         # Raw latitude, in degrees [1,ensemble]
+        self.ext_gga_lon_deg = None         # Raw longitude, in degrees [1,ensemble]
+        self.ext_gga_altitude_m = None      # Raw altitude, in meters [1,ensemble]
+        self.ext_gga_differential = None    # Differential correction indicator [1,ensemble]
+        self.ext_gga_hdop = None            # Horizontal dilution of precision [1,ensemble]
+        self.ext_gga_utc = None             # UTC time, in hhmmss.ss [1, ensemble]
+        self.ext_gga_serial_time = None     # UTC time of gga data, in seconds past midnight [1,ensemble]
+        self.ext_gga_num_sats = None        # Number of satellites reported by software [1,ensemble]
+        self.ext_vtg_course_deg = None      # Course, in degress [1, ensemble]
+        self.ext_vtg_speed_mps = None       # Speed, in m/s [1, ensemble]
+       
+        # User specification
+        self.gga_position_method = None     # Method used to process gga data for position ('End', 'Average' 'External')
+        self.gga_velocity_method = None     # Method used to process gga data for velocity ('End','Average' 'External')
+        self.vtg_velocity_method = None     # Method used to process vtg data for velocity ('Average' 'External)
+        
+        # Computed properties for ensembles
+        self.gga_lat_ens_deg = None         # Processed latitude in degrees, [ensemble]
+        self.gga_lon_ens_deg = None         # Processed longitude in degrees, [ensemble]
+        self.utm_ens_m = None               # UTM position from processed gga data, [2,ensemble]
+        self.gga_velocity_ens_mps = None    # Boat velocity computed from gga data [2,ensemble]
+        self.gga_serial_time_ens = None     # UTC time of gga data in seconds past midnight, [ensemble]
+        self.vtg_velocity_ens_mps = None    # Boat velocity computed from vtg data [2,ensemble]
+        self.per_good_ens = None            # Percentage of available data used to compute ensemble value [ensemble]
+        self.hdop_ens = None                # HDOP for each ensemble using velocity method [ensemble]
+        self.num_sats_ens = None            # Number of satellites for each ensemble, using velocity method [ensemble]
+        self.altitude_ens_m = None          # Altitude for each ensemble, using velocity method [ensemble]
+        self.diff_qual_ens = None           # Differential quality for each ensemble, using velocity method [ensemble]
+        
+    def populate_data(self, raw_gga_utc, raw_gga_lat, raw_gga_lon, raw_gga_alt, raw_gga_diff,
+                      raw_gga_hdop, raw_gga_num_sats, raw_gga_delta_time, raw_vtg_course, raw_vtg_speed,
+                      raw_vtg_delta_time, raw_vtg_mode_indicator, ext_gga_utc, ext_gga_lat, ext_gga_lon, ext_gga_alt,
+                      ext_gga_diff, ext_gga_hdop, ext_gga_num_sats, ext_vtg_course, ext_vtg_speed,
+                      gga_p_method, gga_v_method, vtg_method):
+        """Store and process provided data in GPSData class.
+
+        Parameters
+        ----------
+        raw_gga_utc: np.array(float)
+            Raw UTC time, hhmmss.ss, [ensemble,n]
+        raw_gga_lat: np.array(float)
+            Raw latitude in degress, [ensemble,n]
+        raw_gga_lon: np.array(float)
+            Raw longitude in degrees, [ensemble,n]
+        raw_gga_alt: np.array(float)
+            Raw altitude in meters, [ensemble,n]
+        raw_gga_diff: np.array(float)
+            Raw differential correction indicator, [ensemble,n]
+        raw_gga_hdop: np.array(float)
+            Raw horizontal dilution of precision, [ensemble,n]
+        raw_gga_num_sats: np.array(float)
+            Raw number of satellites reported in gga sentence, [ensemble,n]
+        raw_gga_delta_time: np.array(float)
+            Raw gga delta time (sec), [ensemble,n]
+        raw_vtg_course:np.array(float)
+            Raw course in degress, [ensemble,n]
+        raw_vtg_speed: np.array(float)
+            Raw speed in m/s, [ensemble,n]
+        raw_vtg_delta_time: np.array(float)
+            Raw vtg delta time (sec), [ensemble,n]
+        raw_vtg_mode_indicator: np.array(float)
+            Raw vtg mode indicator, [ensemble,n]
+        ext_gga_utc: np.array(float)
+            UTC time, hhmmss.ss for each ensemble [ensemble]
+        ext_gga_lat: np.array(float)
+            Latitude for each ensemble, in degrees [ensemble]
+        ext_gga_lon: np.array(float)
+            Longitude for each ensemble, in degrees [ensemble]
+        ext_gga_alt: np.array(float)
+            Altitude for each ensemble, in meters [ensemble]
+        ext_gga_diff: np.array(float)
+            Differential correction indicator for each ensemble [ensemble]
+        ext_gga_hdop: np.array(float)
+            Horizontal dilution of precision for each ensemble [ensemble]
+        ext_gga_num_sats: np.array(float)
+            Number of satellites for each ensemble [ensemble]
+        ext_vtg_course: np.array(float)
+            Course for each ensemble, in degrees [ensemble]
+        ext_vtg_speed: np.array(float)
+            Speed for each ensemble, in m/s [ensemble]
+        gga_p_method: str
+            Method used to process gga data for position ('End', 'Average' 'External')
+        gga_v_method: str
+            Method used to process gga data for velocity ('End','Average' 'External')
+        vtg_method: str
+            Method used to process vtg data for velocity ('Average' 'External)
+        """
+
+        # Assign input to raw properties
+        if raw_gga_utc is None:
+            self.raw_gga_utc = np.tile([np.nan], raw_gga_lat.shape)
+            self.raw_gga_serial_time = np.tile([np.nan], raw_gga_lat.shape)
+        else:
+            self.raw_gga_utc = raw_gga_utc
+            self.raw_gga_serial_time = np.floor(raw_gga_utc / 10000) * 3600 \
+                + np.floor(np.mod(raw_gga_utc, 10000, where=~np.isnan(raw_gga_utc)) / 100) \
+                                       * 60 + np.mod(raw_gga_utc, 100, where=~np.isnan(raw_gga_utc))
+
+        self.raw_gga_lat_deg = raw_gga_lat
+        self.raw_gga_lon_deg = raw_gga_lon
+        self.raw_gga_lat_deg[np.where(np.logical_and((self.raw_gga_lat_deg == 0),
+                                                     (self.raw_gga_lon_deg == 0)))] = np.nan
+        self.raw_gga_lat_deg[nan_less(raw_gga_diff, 1)] = np.nan
+        self.raw_gga_lon_deg[np.isnan(self.raw_gga_lat_deg)] = np.nan
+        self.raw_gga_altitude_m = raw_gga_alt
+        self.raw_gga_altitude_m[np.isnan(self.raw_gga_lat_deg)] = np.nan
+        self.raw_gga_differential = raw_gga_diff.astype('float')
+        self.raw_gga_differential[np.isnan(self.raw_gga_lat_deg)] = np.nan
+        self.raw_gga_hdop = raw_gga_hdop.astype('float')
+        self.raw_gga_hdop[np.isnan(self.raw_gga_lat_deg)] = np.nan
+        self.raw_gga_num_sats = raw_gga_num_sats.astype('float')
+        self.raw_gga_num_sats[np.isnan(self.raw_gga_lat_deg)] = np.nan
+        self.raw_gga_serial_time[np.isnan(self.raw_gga_lat_deg)] = np.nan
+
+        # Delta time is a TRDI only variable
+        if raw_gga_delta_time is None:
+            self.raw_gga_delta_time = np.tile(np.nan, raw_gga_lat.shape)
+        else:
+            self.raw_gga_delta_time = raw_gga_delta_time
+
+        self.raw_vtg_course_deg = raw_vtg_course
+        self.raw_vtg_speed_mps = raw_vtg_speed
+        self.raw_vtg_course_deg[np.where(np.logical_and((self.raw_vtg_course_deg == 0),
+                                                        (self.raw_vtg_speed_mps == 0)))] = np.nan
+        self.raw_vtg_speed_mps[np.isnan(self.raw_vtg_course_deg)] = np.nan
+
+        # Delta time is a TRDI only variable
+        if raw_vtg_delta_time is None:
+            self.raw_vtg_delta_time = np.tile(np.nan, raw_gga_lat.shape)
+        else:
+            self.raw_vtg_delta_time = raw_vtg_delta_time
+
+        self.raw_vtg_mode_indicator = np.array(raw_vtg_mode_indicator)
+        
+        # Assign input data to ensemble values computed by other software
+        self.ext_gga_utc = ext_gga_utc
+        self.ext_gga_lat_deg = ext_gga_lat
+        self.ext_gga_lon_deg = ext_gga_lon
+        self.ext_gga_altitude_m = ext_gga_alt
+        self.ext_gga_differential = ext_gga_diff
+        self.ext_gga_hdop = ext_gga_hdop
+        self.ext_gga_num_sats = ext_gga_num_sats
+        self.ext_gga_serial_time = np.floor(np.array(ext_gga_utc) / 10000) * 3600 + \
+            np.floor(np.mod(ext_gga_utc, 10000) / 100) * 60 + np.mod(ext_gga_utc, 100)
+        self.ext_vtg_course_deg = ext_vtg_course
+        self.ext_vtg_speed_mps = ext_vtg_speed
+        
+        # Assign input data to method properties
+        self.gga_position_method = gga_p_method
+        self.gga_velocity_method = gga_v_method
+        self.vtg_velocity_method = vtg_method
+        
+        # If gga data exist compute position and velocity
+        if np.sum(np.sum(np.isnan(raw_gga_lat) == False)) > 0:
+            self.process_gga()
+        
+        # If vtg data exist compute velocity
+        if np.sum(np.sum(np.isnan(raw_vtg_speed) == False)) > 0:
+            self.process_vtg()
+
+    def populate_from_qrev_mat(self, transect):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        transect: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+
+        if hasattr(transect, 'gps'):
+            if hasattr(transect.gps, 'diffQualEns'):
+
+                # Raw properties
+                self.raw_gga_lat_deg = transect.gps.rawGGALat_deg
+                self.raw_gga_lon_deg = transect.gps.rawGGALon_deg
+                self.raw_gga_altitude_m = transect.gps.rawGGAAltitude_m
+                self.raw_gga_differential = transect.gps.rawGGADifferential
+                self.raw_gga_hdop = transect.gps.rawGGAHDOP
+                self.raw_gga_utc = transect.gps.rawGGAUTC
+                self.raw_gga_serial_time = transect.gps.rawGGASerialTime
+                self.raw_gga_num_sats = transect.gps.rawGGANumSats
+                self.raw_vtg_course_deg = transect.gps.rawVTGCourse_deg
+                self.raw_vtg_speed_mps = transect.gps.rawVTGSpeed_mps
+                self.raw_vtg_delta_time = transect.gps.rawVTGDeltaTime
+
+                # Older versions of QRev Matlab files represented the VTG mode differently.
+                try:
+                    if transect.gps.rawVTGModeIndicator.ndim == 2 and \
+                            type(transect.gps.rawVTGModeIndicator[0][0]) is np.float64:
+                        indicator = []
+                        for row in transect.gps.rawVTGModeIndicator.astype(int):
+                            row_indicator = []
+                            for value in row:
+                                if 127 > value > 0:
+                                    row_indicator.append(chr(value))
+                                else:
+                                    row_indicator. append('')
+                            indicator.append(row_indicator)
+                        self.raw_vtg_mode_indicator = np.array(indicator)
+                    else:
+                        raw_vtg_mode_indicator = transect.gps.rawVTGModeIndicator.tolist()
+                        new_list = []
+                        for row in raw_vtg_mode_indicator:
+                            new_list.append(list(row))
+                        self.raw_vtg_mode_indicator = np.array(new_list)
+                except AttributeError:
+                    self.raw_vtg_mode_indicator = transect.gps.rawVTGModeIndicator
+
+                self.raw_gga_delta_time = transect.gps.rawGGADeltaTime
+
+                # Manufacturer assigned ensemble values
+                self.ext_gga_lat_deg = transect.gps.extGGALat_deg
+                self.ext_gga_lon_deg = transect.gps.extGGALon_deg
+                self.ext_gga_altitude_m = transect.gps.extGGAAltitude_m
+                self.ext_gga_differential = transect.gps.extGGADifferential
+                self.ext_gga_hdop = transect.gps.extGGAHDOP
+                self.ext_gga_utc = transect.gps.extGGAUTC
+                self.ext_gga_serial_time = transect.gps.extGGASerialTime
+                self.ext_gga_num_sats = transect.gps.extGGANumSats
+                self.ext_vtg_course_deg = transect.gps.extVTGCourse_deg
+                self.ext_vtg_speed_mps = transect.gps.extVTGSpeed_mps
+
+                # User specification
+                self.gga_position_method = transect.gps.ggaPositionMethod
+                self.gga_velocity_method = transect.gps.ggaVelocityMethod
+                self.vtg_velocity_method = transect.gps.vtgVelocityMethod
+
+                # Computed properties for ensembles
+                self.gga_lat_ens_deg = transect.gps.ggaLatEns_deg
+                self.gga_lon_ens_deg = transect.gps.ggaLonEns_deg
+                self.utm_ens_m = transect.gps.UTMEns_m
+                self.gga_velocity_ens_mps = transect.gps.ggaVelocityEns_mps
+                self.gga_serial_time_ens = transect.gps.ggaSerialTimeEns
+                self.vtg_velocity_ens_mps = transect.gps.vtgVelocityEns_mps
+                if len(transect.gps.perGoodEns) > 0:
+                    self.per_good_ens = transect.gps.perGoodEns
+                else:
+                    self.per_good_ens = None
+                if type(transect.gps.hdopEns) is np.ndarray:
+                    self.hdop_ens = transect.gps.hdopEns
+                else:
+                    self.hdop_ens = np.array([transect.gps.hdopEns])
+                self.num_sats_ens = transect.gps.numSatsEns
+                self.altitude_ens_m = transect.gps.altitudeEns_m
+                self.diff_qual_ens = transect.gps.diffQualEns
+
+    def process_gga(self, p_setting=None, v_setting=None):
+        """Computes boat velocity from gga data.
+
+        Parameters
+        ----------
+        p_setting: str
+            Specifies method to use for computing positions from gga data (External, End, First, Average, Mindt).
+        v_setting: str
+            Specifies method to use for computing velocity from gga data (External, End, First, Average, Mindt).
+        """
+
+        if p_setting is None:
+            p_setting = self.gga_position_method
+
+        if v_setting is None:
+            v_setting = self.gga_velocity_method
+            
+        # Use only valid gga data
+        valid = np.copy(self.raw_gga_lat_deg)
+        valid[np.logical_not(np.isnan(valid))] = 1
+        valid[np.isnan(valid)] = 0
+        # valid[valid > 0] = 1
+        gga_lat_deg = np.copy(self.raw_gga_lat_deg)
+        gga_lat_deg[valid == False] = np.nan
+        gga_lon_deg = np.copy(self.raw_gga_lon_deg)
+        gga_lon_deg[valid == False] = np.nan
+        gga_serial_time = np.copy(self.raw_gga_serial_time)
+        gga_serial_time[valid == False] = np.nan
+        gga_delta_time = np.copy(self.raw_gga_delta_time)
+        gga_delta_time[valid == False] = np.nan
+        gga_hdop = np.copy(self.raw_gga_hdop)
+        gga_hdop[valid == False] = np.nan
+        gga_num_sats = np.copy(self.raw_gga_num_sats)
+        gga_num_sats[valid == False] = np.nan
+        gga_altitude_m = np.copy(self.raw_gga_altitude_m)
+        gga_altitude_m[valid == False] = np.nan
+        gga_differential = np.copy(self.raw_gga_differential)
+        gga_differential[valid == False] = np.nan
+        n_ensembles = gga_lat_deg.shape[0]
+
+        # Apply method for computing position of ensemble
+
+        # Use ensemble data from other software
+        if p_setting == 'External':
+            self.gga_lat_ens_deg = self.ext_gga_lat_deg
+            self.gga_lon_ens_deg = self.ext_gga_lon_deg
+
+        # Uses last valid data for each ensemble
+        elif p_setting == 'End':
+            self.gga_lat_ens_deg = np.tile(np.nan, gga_lat_deg.shape[0])
+            self.gga_lon_ens_deg = np.tile(np.nan, gga_lon_deg.shape[0])
+            for n in range(n_ensembles):
+                idx = np.argwhere(~np.isnan(gga_lat_deg[n, :]))
+                if idx.size < 1:
+                    idx = 0
+                else:
+                    idx = idx[-1][0]
+                self.gga_lat_ens_deg[n] = gga_lat_deg[n, idx]
+                self.gga_lon_ens_deg[n] = gga_lon_deg[n, idx]
+
+        # Use first valid data for each ensemble
+        elif p_setting == 'First':
+            self.gga_lat_ens_deg = np.tile(np.nan, gga_lat_deg.shape[0])
+            self.gga_lon_ens_deg = np.tile(np.nan, gga_lon_deg.shape[0])
+            for n in range(n_ensembles):
+                idx = 0
+                self.gga_lat_ens_deg[n] = gga_lat_deg[n, idx]
+                self.gga_lon_ens_deg[n] = gga_lon_deg[n, idx]
+
+        # Use minimum delta time
+        elif p_setting == 'Mindt':
+            self.gga_lat_ens_deg = np.tile(np.nan, gga_lat_deg.shape[0])
+            self.gga_lon_ens_deg = np.tile(np.nan, gga_lon_deg.shape[0])
+            d_time = np.abs(gga_delta_time)
+            d_time_min = np.nanmin(d_time.T, 0).T
+            
+            use = []
+            for n in range(len(d_time_min)):
+                use.append(np.abs(d_time[n, :]) == d_time_min[n])
+                
+            use = np.array(use)
+            self.gga_lat_ens_deg = np.tile([np.nan], (len(d_time_min))) 
+            self.gga_lon_ens_deg = np.tile([np.nan], (len(d_time_min)))
+            for n in range(len(d_time_min)):
+                idx = np.where(use[n, :] == True)[0]
+                if len(idx) > 0:
+                    idx = idx[0]
+                    self.gga_lat_ens_deg[n] = gga_lat_deg[n, idx]
+                    self.gga_lon_ens_deg[n] = gga_lon_deg[n, idx]
+                    
+        y_utm, x_utm = self.compute_utm(self.gga_lat_ens_deg, self.gga_lon_ens_deg)
+        self.utm_ens_m = (x_utm, y_utm)
+
+        # Prepare variables for velocity computations
+        lat = np.tile([np.nan], n_ensembles)
+        lon = np.tile([np.nan], n_ensembles)
+        self.gga_serial_time_ens = np.tile([np.nan], n_ensembles)
+        self.altitude_ens_m = np.tile([np.nan], n_ensembles)
+        self.diff_qual_ens = np.tile([np.nan], n_ensembles)
+        self.hdop_ens = np.tile([np.nan], n_ensembles)
+        self.num_sats_ens = np.tile([np.nan], n_ensembles)
+        
+        # Apply method for computing velocity of ensemble
+        if v_setting == 'External':
+            lat = self.ext_gga_lat_deg
+            lon = self.ext_gga_lon_deg
+            self.gga_serial_time_ens = self.ext_gga_serial_time
+            self.hdop_ens = self.ext_gga_hdop
+            self.num_sats_ens = self.ext_gga_num_sats
+            self.altitude_ens_m = self.ext_gga_altitude_m
+            self.diff_qual_ens = self.ext_gga_differential
+            
+        # Average all position during an ensemble
+        elif v_setting == 'Average':
+            lat = np.nanmean(gga_lat_deg, 1)
+            lon = np.nanmean(gga_lon_deg, 1)
+            self.gga_serial_time_ens = np.nanmean(gga_serial_time, 1)
+            self.hdop_ens = np.nanmean(gga_hdop, 1)
+            self.num_sats_ens = np.floor(np.nanmean(gga_num_sats, 1))
+            self.altitude_ens_m = np.nanmean(self.raw_gga_altitude_m, 1)
+            self.diff_qual_ens = np.floor(np.nanmean(self.raw_gga_differential, 1))
+            
+        # Use the last valid data in an ensemble
+        elif v_setting == 'End':
+
+            for n in range(n_ensembles):
+                idx = np.where(np.isnan(gga_lat_deg[n, :]) == False)[0]
+                if len(idx) > 0:
+                    idx = idx[-1]
+                    lat[n] = gga_lat_deg[n, idx]
+                    lon[n] = gga_lon_deg[n, idx]
+                    self.gga_serial_time_ens[n] = gga_serial_time[n, idx]
+                    self.altitude_ens_m[n] = gga_altitude_m[n, idx]
+                    self.diff_qual_ens[n] = gga_differential[n, idx]
+
+                if idx <= len(self.raw_gga_hdop):
+                    self.hdop_ens[n] = gga_hdop[n, idx]
+                    
+                if idx <= len(gga_num_sats[n]):
+                    self.num_sats_ens[n] = gga_num_sats[n, idx]
+
+        # Use the first valid data in an ensemble
+        elif v_setting == 'First':
+            for n in range(n_ensembles):
+                idx = 0
+                lat[n] = gga_lat_deg[n, idx]
+                lon[n] = gga_lon_deg[n, idx]
+                self.gga_serial_time_ens[n] = gga_serial_time[n, idx]
+                self.altitude_ens_m[n] = gga_altitude_m[n, idx]
+                self.diff_qual_ens[n] = gga_differential[n, idx]
+                
+                if idx <= len(self.raw_gga_hdop):
+                    self.hdop_ens[n] = gga_hdop[n, idx]
+                    
+                if idx <= len(gga_num_sats[n]):
+                    self.num_sats_ens[n] = gga_num_sats[n, idx]
+
+        # Use the minimum delta time to assign data to an ensemble
+        elif v_setting == 'Mindt':
+            d_time = np.abs(gga_delta_time)
+            d_time_min = np.nanmin(d_time, 1)
+            use = []
+            for n in range(len(d_time_min)):
+                use.append(np.abs(d_time[n, :]) == d_time_min[n])
+            use = np.array(use)  
+            for n in range(len(d_time_min)):
+                idx = np.where(use[n, :] == True)[0]
+                if len(idx) > 0:
+                    idx = idx[0]
+                    lat[n] = gga_lat_deg[n, idx]
+                    lon[n] = gga_lon_deg[n, idx]
+                    self.gga_serial_time_ens[n] = gga_serial_time[n, idx]
+                    self.altitude_ens_m[n] = gga_altitude_m[n, idx]
+                    self.diff_qual_ens[n] = gga_differential[n, idx]
+                    
+                if idx <= len(gga_hdop[n]):
+                    self.hdop_ens[n] = gga_hdop[n, idx]
+                    
+                if idx <= len(gga_num_sats[n]):
+                    self.num_sats_ens[n] = gga_num_sats[n, idx]
+                    
+        # Identify valid values
+        idx_values = np.where(np.isnan(x_utm) == False)[0]
+        if len(idx_values) > 1:
+            u, v = self.gga2_vel_trdi(lat, lon, self.gga_serial_time_ens, idx_values)
+            self.gga_velocity_ens_mps = np.tile([np.nan], (2, len(lat)))
+            self.gga_velocity_ens_mps[0, idx_values[1:]] = u[idx_values[1:]]
+            self.gga_velocity_ens_mps[1, idx_values[1:]] = v[idx_values[1:]]
+        else:
+            self.gga_velocity_ens_mps = np.tile([np.nan], (2, len(lat)))
+
+    def process_vtg(self, v_setting=None):
+        """Processes raw vtg data to achieve a velocity for each ensemble containing data.
+
+        Parameters
+        ----------
+        v_setting: str
+            Method to used to compute ensemble velocity.
+        """
+        
+        # Determine method used to compute ensemble velocity
+        if v_setting is None:
+            v_setting = self.vtg_velocity_method
+
+        # Use only valid data
+        vtg_speed_mps = np.copy(self.raw_vtg_speed_mps)
+        vtg_course_deg = np.copy(self.raw_vtg_course_deg)
+        vtg_delta_time = np.copy(self.raw_vtg_delta_time)
+
+        # Use mode indicator to identify invalid original data
+        idx = np.where(self.raw_vtg_mode_indicator == 'N')
+        vtg_speed_mps[idx] = np.nan
+        vtg_course_deg[idx] = np.nan
+        vtg_delta_time[idx] = np.nan
+
+        # Use average velocity for ensemble velocity
+        if v_setting == 'Average':
+            # Compute vtg velocity in x y coordinates from speed and course
+            direction = azdeg2rad(vtg_course_deg)
+            vx, vy = pol2cart(direction, vtg_speed_mps)
+            vx[np.logical_and(vx == 0, vy == 0)] = np.nan
+            vy[np.isnan(vx)] = np.nan
+            vx_mean = np.nanmean(vx, 1)
+            vy_mean = np.nanmean(vy, 1)
+            self.vtg_velocity_ens_mps = np.vstack([vx_mean.T, vy_mean.T])
+
+        # Use last velocity for ensemble velocity
+        elif v_setting == 'End':
+            n_ensembles = vtg_speed_mps.shape[0]
+            vtg_vel = nans(n_ensembles)
+            vtg_dir = nans(n_ensembles)
+            
+            for n in range(n_ensembles):
+                idx = np.where(~np.isnan(vtg_speed_mps[n, :]))[0]
+                if len(idx) > 0:
+                    idx = idx[-1]
+                else:
+                    idx = 0
+                vtg_vel[n] = vtg_speed_mps[n, idx]
+                vtg_dir[n] = vtg_course_deg[n, idx]
+                
+            direction = azdeg2rad(vtg_dir)
+            vx, vy = pol2cart(direction, vtg_vel)
+            vx[np.logical_and(vx == 0, vy == 0)] = np.nan
+            vy[np.isnan(vx)] = np.nan
+            self.vtg_velocity_ens_mps = np.vstack([vx, vy])
+
+        # Use first velocity for ensemble velocity
+        elif v_setting == 'First':
+            n_ensembles = vtg_speed_mps.shape[0]
+            vtg_vel = nans(n_ensembles)
+            vtg_dir = nans(n_ensembles)
+            
+            for n in range(n_ensembles):
+                idx = 0
+                vtg_vel[n] = vtg_speed_mps[n, idx]
+                vtg_dir[n] = vtg_course_deg[n, idx]
+            direction = azdeg2rad(vtg_dir)
+            vx, vy = pol2cart(direction, vtg_vel)
+            vx[np.logical_and(vx == 0, vy == 0)] = np.nan
+            vy[np.isnan(vx)] = np.nan
+            self.vtg_velocity_ens_mps = np.vstack([vx, vy])
+
+        # Use the velocity with the minimum delta time for the ensemble velocity
+        elif v_setting == 'Mindt':
+            d_time = np.abs(vtg_delta_time)
+            # d_time[d_time==0] = np.nan
+            d_time_min = np.nanmin(d_time.T, 0).T
+            
+            use = []
+            vtg_speed = []
+            vtg_dir = []
+            
+            for n in range(len(d_time_min)):
+                use.append(np.abs(d_time[n, :]) == d_time_min[n])
+                
+            use = np.array(use)
+            for n in range(len(d_time_min)):
+                idx = np.where(use[n, :] == True)[0]
+                if len(idx) > 0:
+                    idx = idx[0]
+                    vtg_speed.append(vtg_speed_mps[n, idx])
+                    vtg_dir.append(vtg_course_deg[n, idx])
+                else:
+                    vtg_speed.append(np.nan)
+                    vtg_dir.append(np.nan)
+                    
+                direction = azdeg2rad(np.array(vtg_dir))
+                vx, vy = pol2cart(direction, np.array(vtg_speed))
+                self.vtg_velocity_ens_mps = np.vstack([vx, vy])
+
+        # Use velocity selected by external algorithm for ensemble velocity
+        elif v_setting == 'External':
+            direction = azdeg2rad(self.ext_vtg_course_deg)
+            vx, vy = pol2cart(direction, self.ext_vtg_speed_mps)
+            self.vtg_velocity_ens_mps = np.vstack([vx.T, vy.T])
+
+    @staticmethod
+    def compute_utm(lat_in, lon_in):
+        """Compute UTM coordinates from latitude and longitude.
+
+        Parameters
+        ----------
+        lat_in: np.array(float)
+            Latitude in degrees.
+        lon_in: np.array(float)
+            Longitude in degrees.
+        """
+
+        # Set invalid data to nan
+        lat_in[lat_in == 0] = np.nan
+        lon_in[lon_in == 0] = np.nan
+
+        lat2 = np.deg2rad(lat_in)
+        lon2 = np.deg2rad(lon_in)
+        
+        y = np.tile([np.nan], lat_in.shape)
+        x = np.tile([np.nan], lon_in.shape)
+        idx = np.where(np.logical_and((np.isnan(lat2) == False), (np.isnan(lon2) == False)))
+        for ind in idx[0]:
+            y[ind], x[ind], _, _ = utm.from_latlon(lat2[ind], lon2[ind])
+        x_utm = x.reshape(lon_in.shape)
+        y_utm = y.reshape(lat_in.shape)
+        
+        return y_utm, x_utm
+
+    @staticmethod
+    def gga2_vel_trdi(lat, lon, t, idx_values):
+        """Computes velocity from gga data using approach from TRDI WinRiver II.
+
+        Parameters
+        ----------
+        lat: np.array(float)
+            Latitude for each ensemble used for velocity computations, in degrees.
+        lon: np.array(float)
+            Longitude for each ensemble used for velocity computations, in degrees.
+        t: np.array(float)
+            GGA time associated with the latitude and longitude selected for velocity computations.
+        idx_values: np.array(bool)
+            Index of valid lat-lon data.
+        """
+        
+        u = np.zeros(lat.shape)
+        v = np.zeros(lat.shape)
+        
+        for n in range(1, len(idx_values)):
+            lat1 = lat[idx_values[n-1]]
+            lat2 = lat[idx_values[n]]
+            lon1 = lon[idx_values[n-1]]
+            lon2 = lon[idx_values[n]]
+            t1 = t[idx_values[n-1]]
+            t2 = t[idx_values[n]]
+
+            lat_avg_rad = ((lat1 + lat2) / 2) * np.pi / 180
+            sin_lat_avg_rad = np.sin(lat_avg_rad)
+            coefficient = 6378137 * np.pi / 180
+            ellipticity = 1 / 298.257223563
+            re = coefficient * (1 + ellipticity * sin_lat_avg_rad ** 2)
+            rn = coefficient * (1 - 2 * ellipticity + 3 * ellipticity * sin_lat_avg_rad ** 2)
+            delta_x = re * (lon2 - lon1) * np.cos(lat_avg_rad)
+            delta_y = rn * (lat2 - lat1)
+            delta_time = t2 - t1
+            if delta_time > 0.0001:
+                u[idx_values[n]] = delta_x / delta_time
+                v[idx_values[n]] = delta_y / delta_time
+            else:
+                u[idx_values[n]] = np.nan
+                v[idx_values[n]] = np.nan
+            
+        return u, v
+
diff --git a/Classes/HeadingData.py b/Classes/HeadingData.py
new file mode 100644
index 0000000..aa660d5
--- /dev/null
+++ b/Classes/HeadingData.py
@@ -0,0 +1,224 @@
+import numpy as np
+from _operator import xor
+
+
+class HeadingData(object):
+    """This class stores and manipulates heading and associated data.
+
+    Attributes
+    ----------
+    data: np.array(float)
+        Corrected heading data, in degrees.
+    original_data: np.array(float)
+        Original uncorrected heading data, in degrees.
+    source: str
+        Source of heading data (internal, external).
+    mag_var_deg: float
+        Magnetic variation applied to get corrected data, in degrees (East +, West -).
+    mag_var_orig_deg: float
+        Original magnetic variation, in degrees (East +, West -).
+    align_correction_deg: float
+        Alignment correction to align compass with instrument (used for external heading), in degrees CW.
+    align_correction_orig_deg: float
+        Alignment correction to align compass with instrument (used for external heading), in degrees CW.
+    mag_error: np.array(float)
+        Percent change in mean magnetic field from calibration (SonTek only).`
+    pitch_limit: np.array(float)
+        Pitch limit of compass calibration (SonTek only), in degrees.
+    roll_limit: np.array(float)
+        Roll limit of compass calibration (SonTek only), in degrees.
+    """
+    
+    def __init__(self):
+        """Initialize class and set variables to None."""
+
+        self.data = None  # Corrected self.data data
+        self.original_data = None  # original uncorrected self.data data
+        self.source = None  # Source of self.data data (internal, external)
+        self.mag_var_deg = None  # Magnetic variation for these self.data data
+        self.mag_var_orig_deg = None  # Original magnetic variation
+        self.align_correction_deg = None  # Alignment correction to align compass with instrument
+        self.align_correction_orig_deg = None
+        self.mag_error = None  # Percent change in mean magnetic field from calibration`
+        self.pitch_limit = None  # Pitch limit of compass calibration (SonTek only), in degrees.
+        self.roll_limit = None  # Roll limit of compass calibration (SonTek only), in degrees.
+        
+    def populate_data(self, data_in, source_in, magvar=0, align=0, mag_error=None, pitch_limit=None, roll_limit=None):
+        """Assigns values to instance variables.
+
+        Parameters
+        ----------
+        data_in: np.array(float)
+            Heading, in degrees.
+        source_in: str
+            Source of heading data (internal, external).
+        magvar: float
+            Magnetic variation, in degrees (East +, West -).
+        align: float
+            Alignment correction to align compass with instrument, in degrees
+        mag_error: np.array(float)
+            Percent change in magnetic field (SonTek only)
+        pitch_limit: np.array(float)
+            Pitch limit of compass calibration (SonTek only)
+        roll_limit: np.array(float)
+            Roll limit of compass calibration (SonTek only)
+        """
+
+        self.original_data = data_in
+        self.source = source_in
+        self.mag_var_deg = float(magvar)
+        self.mag_var_orig_deg = float(magvar)
+        self.align_correction_deg = float(align)
+        self.align_correction_orig_deg = float(align)
+        self.mag_error = mag_error
+
+        if pitch_limit is not None and len(pitch_limit.shape) > 1:
+            self.pitch_limit = pitch_limit[0, :]
+        else:
+            self.pitch_limit = pitch_limit
+
+        if roll_limit is not None and len(roll_limit.shape) > 1:
+            self.roll_limit = roll_limit[0, :]
+        else:
+            self.roll_limit = roll_limit
+
+        # Correct the original data for the magvar and alignment
+        if source_in == 'internal':
+            self.data = self.original_data + self.mag_var_deg
+        else:
+            self.data = self.original_data + self.align_correction_deg
+        self.fix_upper_limit()
+        self.interp_heading()
+
+    def populate_from_qrev_mat(self, mat_data):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        mat_data: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+
+        self.data = mat_data.data
+        self.original_data = mat_data.originalData
+        self.source = mat_data.source
+        self.mag_var_deg = float(mat_data.magVar_deg)
+        self.mag_var_orig_deg = float(mat_data.magVarOrig_deg)
+        self.align_correction_deg = mat_data.alignCorrection_deg
+        if hasattr(mat_data, 'align_correction_orig_deg'):
+            self.align_correction_orig_deg = mat_data.align_correction_orig_deg
+        else:
+            self.align_correction_orig_deg = mat_data.alignCorrection_deg
+
+        # Only available for SonTek G3 compass
+        try:
+            if len(mat_data.magError) > 0:
+                self.mag_error = mat_data.magError
+
+            # Only available for SonTek G3 compass
+            if mat_data.pitchLimit.size > 0:
+                if mat_data.pitchLimit.size > 2:
+                    self.pitch_limit = mat_data.pitchLimit[0]
+                else:
+                    self.pitch_limit = mat_data.pitchLimit
+
+            # Only available for SonTek G3 compass
+            if mat_data.rollLimit.size > 0:
+                if mat_data.rollLimit.size > 2:
+                    self.roll_limit = mat_data.rollLimit[0]
+                else:
+                    self.roll_limit = mat_data.rollLimit
+        except AttributeError:
+            self.mag_error = None
+            self.pitch_limit = None
+            self.roll_limit = None
+
+    def set_mag_var(self, mag_var, h_source):
+        """Applies a new magvar to the object data.
+
+        Parameters
+        ----------
+        mag_var: float
+            Magnetic variation, in degrees
+        h_source: str
+            Heading source (internal or external)
+        """
+
+        self.mag_var_deg = mag_var
+        if h_source == 'internal':
+            self.data = self.original_data + self.mag_var_deg
+            self.fix_upper_limit()
+            
+    def set_align_correction(self, align_correction, h_source):
+        """Applies a new alignment correction to the object data.
+
+        Parameters
+        ----------
+        align_correction: float
+            Alignment correction, in degrees
+        h_source: str
+            Heading source (internal or external)
+        """
+
+        self.align_correction_deg = align_correction
+        if h_source == 'external':
+            self.data = self.original_data + self.align_correction_deg
+            self.fix_upper_limit()
+
+    def fix_upper_limit(self):
+        """Fixes heading when magvar and or alignment are applied resulting in heading greater than 360 degrees.
+        """
+
+        idx = np.where(self.data > 360)[0]
+        if len(idx) > 0:
+            self.data[idx] = self.data[idx] - 360   
+            
+    def interp_heading(self):
+        """Interpolate invalid headings. Use linear interpolation if there are
+        valid values on either side of the invalid heading. If the invalid heading
+        occurs at the beginning of the time series, back fill using the 1st valid.
+        If the invalid heading occurs at the end of the time series, forward fill
+        with the last valid self.data.
+        """
+        
+        idx_invalid = np.where(np.isnan(self.data))[0]
+        
+        if len(idx_invalid) > 0:
+            
+            first_valid_idx = np.where(np.isnan(self.data) == False)[0][0]
+            last_valid_idx = np.where(np.isnan(self.data) == False)[0][-1]
+        
+            # Process each invalid self.data
+            for n in range(len(idx_invalid)):
+                before_idx = np.where(np.isnan(self.data[0:idx_invalid[n] + 1]) == False)[0]
+                after_idx = np.where(np.isnan(self.data[idx_invalid[n]:]) == False)[0]
+                
+                # If invalid self.data is beginning back fill
+                if len(before_idx) < 1:
+                    self.data[idx_invalid[n]] = self.data[first_valid_idx]
+
+                # If invalid self.data is at end forward fill
+                elif len(after_idx) < 1:
+                    self.data[idx_invalid[n]] = self.data[last_valid_idx]
+
+                # If invalid self.data is in middle interpolate
+                else:
+                    before_idx = before_idx[-1]
+                    after_idx = after_idx[0] + idx_invalid[n]
+                    
+                    test1 = self.data[before_idx] > 180
+                    test2 = self.data[after_idx] > 180
+                    c = None
+                    if not xor(test1, test2):
+                        c = 0
+                    elif test1:
+                        c = 360
+                    elif test2:
+                        c = -360
+                    self.data[idx_invalid[n]] = (((self.data[after_idx] - self.data[before_idx] + c) /
+                                                  (before_idx - after_idx)) *
+                                                 (before_idx - idx_invalid[n])) + self.data[before_idx]
+                    if self.data[idx_invalid[n]] > 360:
+                        self.data[idx_invalid[n]] = self.data[idx_invalid[n]] - 360
+                    elif self.data[idx_invalid[n]] < 0:
+                        self.data[idx_invalid[n]] = self.data[idx_invalid[n]] + 360
diff --git a/Classes/InstrumentData.py b/Classes/InstrumentData.py
new file mode 100644
index 0000000..cffa41b
--- /dev/null
+++ b/Classes/InstrumentData.py
@@ -0,0 +1,268 @@
+import numpy as np
+from Classes.TransformationMatrix import TransformationMatrix
+
+
+class InstrumentData(object):
+    """Container for characteristics of the ADCP used to make the measurement
+
+    Attributes
+    ----------
+    serial_num: str
+        Serial number of ADCP.
+    manufacturer: str
+        Name of manufacturer.
+    model: str
+        Model name of ADCP.
+    firmware: str
+        Firmware version in the ADCP.
+    frequency_khz:
+        Frequency or frequencies used by ADCP.
+    beam_angle_deg:
+        Angle of the beams from vertical in degrees.
+    beam_pattern:
+        Pattern of the beam angles, concave or convex.
+    t_matrix: TransformationMatrix
+        Object of TransformationMatrix.
+    configuration_commands:
+        Commands used to configure the instrument.
+    """
+     
+    def __init__(self):
+        """Constructor initializes the variables to None.
+        """
+
+        self.serial_num = None  # Serial number of ADCP
+        self.manufacturer = None  # manufacturer of ADCP (SonTek, TRDI)
+        self.model = None  # model of ADCP (Rio Grande, StreamPro, RiverRay, M9, S5)
+        self.firmware = None  # firmware version
+        self.frequency_khz = None  # frquency of ADCP (could be "Multi")
+        self.beam_angle_deg = None  # angle of beam from vertical
+        self.beam_pattern = None  # pattern of beams
+        self.t_matrix = None  # object of TransformationMatrix
+        self.configuration_commands = np.array([])  # configuration commands sent to ADCP
+        
+    def populate_data(self, manufacturer, raw_data, mmt_transect=None, mmt=None):
+        """Manages method calls for different manufacturers.
+
+        Parameters
+        ----------
+        manufacturer: str
+            Name of manufacturer.
+        raw_data: object
+            Object of Pd0TRDI for TRDI or Object of MatSonTek for SonTek
+        mmt_transect: MMT_Transect
+            Object of Transect (mmt object)
+        mmt: MMT_TRDI
+            Object of MMT_TRDI
+        """
+
+        # Process based on manufacturer
+        if manufacturer == 'TRDI':
+            self.manufacturer = manufacturer
+            self.trdi(pd0=raw_data, mmt_transect=mmt_transect, mmt=mmt)
+        elif manufacturer == 'SonTek':
+            self.manufacturer = manufacturer
+            self.sontek(rs=raw_data)
+        elif manufacturer == 'Nortek':
+            self.manufacturer = manufacturer
+            self.nortek(rs=raw_data)
+
+    def trdi(self, pd0, mmt_transect, mmt):
+        """Populates the variables with data from TRDI ADCPs.
+
+        Parameters
+        ----------
+        pd0: Pd0TRDI
+            Object of Pd0TRDI
+        mmt_transect: MMT_Transect
+            Object of MMT_Transect
+        mmt: MMT_Transect
+            Object of MMT_Transect
+        """
+
+        # Instrument frequency
+        self.frequency_khz = pd0.Inst.freq[0]
+
+        # Firmware
+        self.firmware = pd0.Inst.firm_ver[0]
+
+        # Instrument beam angle and pattern
+        self.beam_angle_deg = pd0.Inst.beam_ang[0]
+        self.beam_pattern = pd0.Inst.pat[0]
+
+        # Instrument characteristics
+        mmt_site = getattr(mmt, 'site_info')
+        mmt_config = getattr(mmt_transect, 'active_config')
+
+        self.serial_num = mmt_site['ADCPSerialNmb']
+
+        # Determine TRDI model
+        num = float(self.firmware)
+        model_switch = np.floor(num)
+
+        if model_switch == 10:
+            self.model = 'Rio Grande'
+            if 'Fixed_Commands' in mmt_config.keys():
+                self.configuration_commands = np.append(self.configuration_commands, 'Fixed')
+                self.configuration_commands = np.append(self.configuration_commands, mmt_config['Fixed_Commands'])
+
+        elif model_switch == 31:
+            self.model = 'StreamPro'
+            self.frequency_khz = 2000
+            if 'Fixed_Commands_StreamPro' in mmt_config.keys():
+                self.configuration_commands = np.append(self.configuration_commands, 'Fixed')
+                self.configuration_commands = np.append(self.configuration_commands,
+                                                        mmt_config['Fixed_Commands_StreamPro'])
+
+        elif model_switch == 44:
+            self.model = 'RiverRay'
+            if 'Fixed_Commands_RiverRay' in mmt_config.keys():
+                self.configuration_commands = np.append(self.configuration_commands, 'Fixed')
+                self.configuration_commands = np.append(self.configuration_commands,
+                                                        mmt_config['Fixed_Commands_RiverRay'])
+
+        elif model_switch == 56:
+            self.model = 'RiverPro'
+            if pd0.Cfg.n_beams[0] < 5:
+                if 'RG_Test' in mmt.qaqc.keys():
+                    idx = mmt.qaqc['RG_Test'][0].find('RioPro')
+                    if idx != -1:
+                        self.model = 'RioPro'
+
+            if 'Fixed_Commands_RiverPro' in mmt_config.keys():
+                self.configuration_commands = np.append(self.configuration_commands, 'Fixed')
+                self.configuration_commands = np.append(self.configuration_commands,
+                                                        mmt_config['Fixed_Commands_RiverPro'])
+            else:
+                self.configuration_commands = np.append(self.configuration_commands, 'Fixed')
+                self.configuration_commands = np.append(self.configuration_commands, ' ')
+
+        else:
+            self.model = 'Unknown'
+            if 'Fixed_Commands' in mmt_config.keys():
+                self.configuration_commands = np.append(self.configuration_commands, 'Fixed')
+                self.configuration_commands = np.append(self.configuration_commands, mmt_config['Fixed_Commands'])
+
+        if 'Wizard_Commands' in mmt_config.keys():
+            self.configuration_commands = np.append(self.configuration_commands, ['Wizard'])
+            self.configuration_commands = np.append(self.configuration_commands,
+                                                    mmt_config['Wizard_Commands'])
+
+        if 'User_Commands' in mmt_config.keys():
+            self.configuration_commands = np.append(self.configuration_commands, ['User'])
+            self.configuration_commands = np.append(self.configuration_commands,
+                                                    mmt_config['User_Commands'])
+
+        # Obtain transformation matrix from one of the available sources
+        if not np.isnan(pd0.Inst.t_matrix[0, 0]):
+            self.t_matrix = TransformationMatrix()
+            self.t_matrix.populate_data(manufacturer='TRDI', model='pd0', data_in=pd0)
+        elif self.model == 'RiverRay':
+            self.t_matrix = TransformationMatrix()
+            self.t_matrix.populate_data(manufacturer='TRDI', model=self.model, data_in='Nominal')
+        else:
+            if isinstance(mmt.qaqc, dict) and len(mmt.qaqc) > 0:
+                if 'RG_Test' in mmt.qaqc.keys():
+
+                    self.t_matrix = TransformationMatrix()
+                    self.t_matrix.populate_data(manufacturer='TRDI', model=self.model, data_in=mmt.qaqc['RG_Test'][0])
+
+                elif 'Compass_Calibration' in mmt.qaqc.keys():
+
+                    self.t_matrix = TransformationMatrix()
+                    self.t_matrix.populate_data(manufacturer='TRDI',
+                                                model=self.model,
+                                                data_in=mmt.qaqc['Compass_Calibration'][0])
+
+                elif 'Compass_Eval_Timestamp' in mmt.qaqc.keys():
+
+                    self.t_matrix = TransformationMatrix()
+                    self.t_matrix.populate_data(manufacturer='TRDI',
+                                                model=self.model,
+                                                data_in=mmt.qaqc['Compass_Evaluation'][0])
+
+                else:
+                    self.t_matrix = TransformationMatrix()
+                    self.t_matrix.populate_data(manufacturer='TRDI',
+                                                model=self.model,
+                                                data_in='Nominal')
+            else:
+                self.t_matrix = TransformationMatrix()
+                self.t_matrix.populate_data(manufacturer='TRDI',
+                                            model=self.model,
+                                            data_in='Nominal')
+
+    def sontek(self, rs):
+        """Populates the variables with data from SonTek ADCPs.
+
+        Parameters
+        ----------
+        rs: MatSonTek
+        """
+
+        self.serial_num = rs.System.SerialNumber
+        self.frequency_khz = rs.Transformation_Matrices.Frequency
+        if self.frequency_khz[2] > 0:
+            self.model = 'M9'
+        elif hasattr(rs.WaterTrack, 'Vel_Expected_StdDev'):
+            self.model = 'RS5'
+        else:
+            self.model = 'S5'
+        if hasattr(rs, 'SystemHW'):
+            revision = str(rs.SystemHW.FirmwareRevision)
+            if len(revision) < 2:
+                revision = '0' + revision
+            self.firmware = str(rs.SystemHW.FirmwareVersion) + '.' + revision
+        else:
+            self.firmware = ''
+        self.beam_angle_deg = 25
+        self.beam_pattern = 'Convex'
+        self.t_matrix = TransformationMatrix()
+        self.t_matrix.populate_data('SonTek', data_in=rs.Transformation_Matrices.Matrix)
+        self.configuration_commands = None
+
+    def nortek(self, rs):
+        self.serial_num = rs.System.SerialNumber
+        self.frequency_khz = rs.Transformation_Matrices.Frequency
+        self.model = rs.System.InstrumentModel
+        if hasattr(rs, 'SystemHW'):
+            revision = str(rs.SystemHW.FirmwareRevision)
+            if len(revision) < 2:
+                revision = '0' + revision
+            self.firmware = str(rs.SystemHW.FirmwareVersion) + '.' + revision
+        else:
+            self.firmware = ''
+        self.beam_angle_deg = 25
+        self.beam_pattern = 'Convex'
+        self.t_matrix = TransformationMatrix()
+        self.t_matrix.populate_data('SonTek', data_in=rs.Transformation_Matrices.Matrix)
+        self.configuration_commands = None
+
+    def populate_from_qrev_mat(self, transect):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+       ----------
+       transect: mat_struct
+           Matlab data structure obtained from sio.loadmat
+       """
+
+        self.serial_num = str(transect.adcp.serialNum)
+        self.manufacturer = transect.adcp.manufacturer
+        self.model = transect.adcp.model
+        self.firmware = transect.adcp.firmware
+        self.frequency_khz = transect.adcp.frequency_hz
+        self.beam_angle_deg = transect.adcp.beamAngle_deg
+        self.beam_pattern = transect.adcp.beamPattern
+        self.t_matrix = TransformationMatrix()
+        self.t_matrix.populate_from_qrev_mat(transect.adcp.tMatrix)
+
+        if len(transect.adcp.configurationCommands) > 0:
+            self.configuration_commands = []
+            for command in transect.adcp.configurationCommands:
+                if type(command) == str:
+                    self.configuration_commands.append(command)
+            self.configuration_commands = np.array(self.configuration_commands)
+
+        else:
+            self.configuration_commands = None
diff --git a/Classes/MMT_TRDI.py b/Classes/MMT_TRDI.py
new file mode 100644
index 0000000..10b9676
--- /dev/null
+++ b/Classes/MMT_TRDI.py
@@ -0,0 +1,539 @@
+import os
+import re
+import xmltodict
+import numpy as np
+
+
+class MMTtrdi(object):
+    """Class to read and store data from a WinRiver 2 mmt file.
+
+    Attributes
+    ----------
+    project: dict
+        Dictionary of measurement information
+    site_info: dict
+        Dictionary of site information
+    transects: list
+        List of Transect objects containing information for each discharge transect
+    summary: dict
+        Dictionary of measurement summary for each available boat velocity reference
+    qaqc: dict
+        Dictionary of premeasurement tests, calibrations, and evaluations
+    mbt_transects: list
+        List of Transect objects containing information for each moving-bed test transect
+    path: str
+        Path for mmt file and associated files
+    """
+
+    def __init__(self, mmt_file):
+        """Initialize instance variables and reads mmt file.
+
+        Parameters
+        ----------
+        mmt_file: str
+            Full filename including path of mmt file.
+        """
+
+        # Intialize instance variables
+        self.project = {}
+        self.site_info = {}
+        self.transects = []
+        self.summary = {}
+        self.qaqc = {}
+        self.mbt_transects = []
+        self.path = None
+
+        # Process mmt file
+        self.process_mmt(mmt_file)
+
+    def process_mmt(self, mmt_file):
+        """Method to read and process the mmt file.
+
+        Parameters
+        ----------
+        mmt_file: str
+            Full filename including path of mmt file.
+        """
+
+        # Open the file and convert to an ordered dictionary tree
+        with open(mmt_file, 'r', encoding='utf-8') as fd:
+            xml_data = fd.read()
+            clean_xml_data = ''
+            remove_re = re.compile(u'[\x00-\x08\x0B-\x0C\x0E-\x1F\x7F%]')
+            for line in xml_data:
+                new_line, count = remove_re.subn('', line)
+                clean_xml_data = clean_xml_data + new_line
+
+            win_river = xmltodict.parse(clean_xml_data)
+        # UnicodeDecodeError
+        win_river = win_river['WinRiver']
+
+        self.path = os.path.split(mmt_file)[0]
+
+        # Process project settings
+        self.project['Name'] = win_river['Project']['@Name']
+        self.project['Version'] = win_river['Project']['@Version']
+        if 'Locked' in win_river['Project'].keys():
+            self.project['Locked'] = win_river['Project']['Locked']
+        else:
+            self.project['Locked'] = None
+
+        # Process site information
+        siteinfo_keys = win_river['Project']['Site_Information'].keys()
+
+        # Iterate through all of the keys and values of site info
+        for x in siteinfo_keys:
+            site_data = win_river['Project']['Site_Information'][x]
+            if site_data is not None:
+                # Remove @ symbol from properties
+                if '@' in x:
+                    x = x[1:]
+                if x == 'Water_Temperature':
+                    self.site_info[x] = float(site_data)
+                    # -32768 used to denote no data
+                    if self.site_info[x] < -100:
+                        self.site_info[x] = ''
+                else:
+                    self.site_info[x] = site_data
+            else:
+                self.site_info[x] = ''
+        if 'Transect' in win_river['Project']['Site_Discharge'].keys():
+            trans = win_river['Project']['Site_Discharge']['Transect']
+
+            # Create a Transect class for each transect found under Site_Discharge
+            if type(trans) == list:
+                for i in range(len(trans)):
+                    if 'File' in trans[i]:
+                        self.transects.append(MMTtransect(trans[i]))
+            else:
+                self.transects = [MMTtransect(trans)]
+
+            # Discharge Summary
+            if 'Discharge_Summary' in win_river['Project']['Site_Discharge'].keys():
+                discharge_summary = win_river['Project']['Site_Discharge']['Discharge_Summary']
+
+                self.summary['NONE'] = self.mmtqsum(discharge_summary['None'])
+                self.summary['BT'] = self.mmtqsum(discharge_summary['BottomTrack'])
+                self.summary['GGA'] = self.mmtqsum(discharge_summary['GGA'])
+                self.summary['VTG'] = self.mmtqsum(discharge_summary['VTG'])
+
+        # QA_QC
+        if 'QA_QC' in win_river['Project'].keys():
+            qaqc = win_river['Project']['QA_QC']
+            for qaqc_type, data in qaqc.items():
+                # Parse qaqc data from dictionary if the type is a test, cal, or eval
+                if qaqc_type in ['RG_Test', 'Compass_Calibration', 'Compass_Evaluation']:
+                    # There could be multiple tests of the same type so they are stored in a list
+                    time_stamp = qaqc_type + '_TimeStamp'
+                    if not isinstance(data['TestResult'], list):
+                        self.qaqc[qaqc_type] = [data['TestResult']['Text']]
+                        self.qaqc[time_stamp] = [data['TestResult']['TimeStamp']]
+                    else:
+                        self.qaqc[qaqc_type] = []
+                        self.qaqc[time_stamp] = []
+                        for result in data['TestResult']:
+                            self.qaqc[qaqc_type].append(result['Text'])
+                            self.qaqc[time_stamp].append(result['TimeStamp'])
+
+                if qaqc_type == 'Moving_Bed_Test':
+                    if 'Transect' in data.keys():
+                        self.moving_bed_test(data)
+
+    def moving_bed_test(self, mb_data):
+        """Method to parse data from moving-bed test dictionary.
+
+        Parameters
+        ----------
+        mb_data: dict
+            Dictionary containing moving-bed test information
+        """
+
+        transects = mb_data['Transect']
+
+        # If only one transect make it a list
+        if not isinstance(transects, list):
+            transects = [transects]
+
+        # Process each transect dictionary
+        for tsect in transects:
+            transect = MMTtransect(tsect)
+
+            # Determine type of moving-bed test
+            if '@MBTType' in tsect:
+                if tsect['@MBTType'] == '0':
+                    transect.moving_bed_type = 'Loop'
+                elif tsect['@MBTType'] == '1':
+                    transect.moving_bed_type = 'Stationary'
+            else:
+                # Use the file name to determine the moving-bed test type
+                file_name = transect.Files[0]
+                fidx = file_name.rfind('.')
+                if file_name[fidx-3:fidx] == 'SBT':
+                    transect.moving_bed_type = 'Stationary'
+                elif file_name[fidx-3:fidx] == 'LBT':
+                    transect.moving_bed_type = 'Loop'
+                else:
+                    # If type can't be determined process as stationary
+                    transect.moving_bed_type = 'Stationary'
+
+            self.mbt_transects.append(transect)
+
+    @staticmethod
+    def mmtqsum(data):
+        """Method to parse the MMT Q summary data.
+
+        Parameters
+        ----------
+        data: dict
+            A summary dictionary from mmt file.
+
+        Returns
+        -------
+        sum_dict: dict
+            Dictionary of summary with a couple of key names changed.
+        """
+        
+        sum_dict = {
+            'Use': [],
+            'Begin_Left': [],
+            'FileName': [],
+            'LeftEdgeSlopeCoeff': [],
+            'RightEdgeSlopeCoeff': []
+            }
+
+        # Iterate through each transect
+        for transect in data.values():
+            # Iterate through each key and val in the transect summary
+            for key2, val2 in transect.items():
+                # Append value from transect to appropriate key
+                if key2 == 'UseInSummary':
+                    sum_dict['Use'].append(float(val2))
+                elif key2 == "BeginLeft":
+                    sum_dict['Begin_Left'].append(float(val2))
+                elif key2 == 'FileName':
+                    sum_dict['FileName'].append(val2)
+                elif key2 == 'LeftEdgeSlopeCoeff':
+                    sum_dict['LeftEdgeSlopeCoeff'].append(float(val2))
+                elif key2 == 'RightEdgeSlopeCoeff':
+                    sum_dict['RightEdgeSlopeCoeff'].append(float(val2))
+                else:
+                    # If the key has not been specified use key from transect summary
+                    if key2 not in sum_dict:
+                        sum_dict[key2] = []
+                    try:
+                        sum_dict[key2].append(float(val2))
+                    except ValueError:
+                        sum_dict[key2].append(np.nan)
+        return sum_dict
+
+
+class MMTtransect(object):
+    """Class to hold properties of MMT transect dictionary attributes.
+
+    Attributes
+    ----------
+    Checked: int
+    Files: list
+    Notes: list
+    """
+
+    def __init__(self, trans):
+        """Constructor immediately begins extraction of data"""
+
+        self.Checked = int(trans['@Checked'])
+        self.Files = []
+        self.Notes = []
+        self.field_config = None
+        self.active_config = None
+        self.moving_bed_type = None
+
+        files = trans['File']
+
+        # Create File classes for each file associated with transect
+        if type(files) is list:
+            for file in files:
+                self.Files.append(file['#text'])
+        else:
+            self.Files.append(files['#text'])
+
+        # Create Note classes for each file associated with transect
+        if 'Note' in trans.keys():
+            note = trans['Note']
+            if type(note) is list:
+                for n in note:
+                    if type(trans['File']) is list:
+                        self.Notes.append(self.note_dict(n, trans['File'][0]['@TransectNmb']))
+                    else:
+                        self.Notes.append(self.note_dict(n, trans['File']['@TransectNmb']))
+            else:
+                if type(trans['File']) is list:
+                    self.Notes.append(self.note_dict(note, trans['File'][0]['@TransectNmb']))
+                else:
+                    self.Notes.append(self.note_dict(note, trans['File']['@TransectNmb']))
+
+        # Create configuration dictionaries for each config attribute
+        if type(trans['Configuration']) is list:
+            for config in trans['Configuration']:
+                if int(config['@Checked']) == 0:
+                    self.field_config = self.parse_config(config)
+                if int(config['@Checked']) == 1:
+                    self.active_config = self.parse_config(config)
+        else:
+            if int(trans['Configuration']['@Checked']) == 0:
+                self.field_config = self.parse_config(trans['Configuration'])
+            if int(trans['Configuration']['@Checked']) == 1:
+                self.active_config = self.parse_config(trans['Configuration'])
+
+        # Assign active config to field config if there is no field config
+        if self.field_config is None:
+            self.field_config = self.active_config
+
+    def set_moving_bed_type(self, mvb_type):
+        """Setter for moving bed type in the case of MBT Transects
+
+        Parameters
+        ----------
+        mvb_type: str
+            Type of moving-bed test.
+        """
+
+        self.moving_bed_type = mvb_type
+
+    @staticmethod
+    def parse_config(config):
+        """Method to parse configuration file from mmt xml.
+
+        Parameters
+        ----------
+        config: dict
+            Dictionary of configuration settings
+
+        Returns
+        -------
+        config_dict: dict
+            Processed dictionary of configuration settings
+        """
+
+        # Initialize dictionary for configuration
+        config_dict = {}
+
+        # Store all instrument commands
+        command_groups = config['Commands']
+        for group in command_groups.keys():
+            config_dict[group] = []
+            for key, command in command_groups[group].items():
+                if key != '@Status':
+                    config_dict[group].append(command)
+
+        # Depth sounder configuration
+        if 'Use_Depth_Sounder_In_Processing' in config['Depth_Sounder'].keys():
+            if config['Depth_Sounder']['Use_Depth_Sounder_In_Processing']['#text'] == "YES":
+                config_dict['DS_Use_Process'] = 1
+            else:
+                config_dict['DS_Use_Process'] = 0
+        else:
+            config_dict['DS_Use_Process'] = -1
+
+        config_dict['DS_Transducer_Depth'] = float(config['Depth_Sounder']['Depth_Sounder_Transducer_Depth']['#text'])
+        config_dict['DS_Transducer_Offset'] = float(config['Depth_Sounder']['Depth_Sounder_Transducer_Offset']['#text'])
+
+        if config['Depth_Sounder']['Depth_Sounder_Correct_Speed_of_Sound']['#text'] == 'YES':
+            config_dict['DS_Cor_Spd_Sound'] = 1
+        else:
+            config_dict['DS_Cor_Spd_Sound'] = 0
+
+        config_dict['DS_Scale_Factor'] = float(config['Depth_Sounder']['Depth_Sounder_Scale_Factor']['#text'])
+
+        # External heading configuration
+        config_dict['Ext_Heading_Offset'] = float(config['Ext_Heading']['Offset']['#text'])
+
+        if 'Use_Ext_Heading' in config['Ext_Heading'].keys():
+            if config['Ext_Heading']['Use_Ext_Heading']['#text'] == 'NO':
+                config_dict['Ext_Heading_Use'] = False
+            else:
+                config_dict['Ext_Heading_Use'] = True
+        else:
+            config_dict['Ext_Heading_Use'] = False
+
+        # GPS configuration
+        if 'GPS' in config.keys():
+            config_dict['GPS_Time_Delay'] = config['GPS']['Time_Delay']['#text']
+
+        # Discharge settings
+        config_dict['Q_Top_Method'] = float(config['Discharge']['Top_Discharge_Estimate']['#text'])
+        config_dict['Q_Bottom_Method'] = float(config['Discharge']['Bottom_Discharge_Estimate']['#text'])
+        config_dict['Q_Power_Curve_Coeff'] = float(config['Discharge']['Power_Curve_Coef']['#text'])
+        config_dict['Q_Cut_Top_Bins'] = float(config['Discharge']['Cut_Top_Bins']['#text'])
+        config_dict['Q_Bins_Above_Sidelobe'] = float(config['Discharge']['Cut_Bins_Above_Sidelobe']['#text'])
+        config_dict['Q_Left_Edge_Type'] = float(config['Discharge']['River_Left_Edge_Type']['#text'])
+        config_dict['Q_Left_Edge_Coeff'] = float(config['Discharge']['Left_Edge_Slope_Coeff']['#text'])
+        config_dict['Q_Right_Edge_Type'] = float(config['Discharge']['River_Right_Edge_Type']['#text'])
+        config_dict['Q_Right_Edge_Coeff'] = float(config['Discharge']['Right_Edge_Slope_Coeff']['#text'])
+        config_dict['Q_Shore_Pings_Avg'] = float(config['Discharge']['Shore_Pings_Avg']['#text'])
+
+        # Edge estimate settings
+        config_dict['Edge_Begin_Shore_Distance'] = config['Edge_Estimates']['Begin_Shore_Distance']['#text']
+        config_dict['Edge_End_Shore_Distance'] = float(config['Edge_Estimates']['End_Shore_Distance']['#text'])
+        if config['Edge_Estimates']['Begin_Left_Bank']['#text'] == 'YES':
+            config_dict['Edge_Begin_Left_Bank'] = 1
+        else:
+            config_dict['Edge_Begin_Left_Bank'] = 0
+
+        # Check for user discharge feature in mmt file
+        if 'Begin_Manual_Discharge' in config['Edge_Estimates']:
+            config_dict['Edge_Begin_Manual_Discharge'] = float(config['Edge_Estimates']['Begin_Manual_Discharge']['#text'])
+            config_dict['Edge_Begin_Method_Distance'] = \
+                config['Edge_Estimates']['Begin_Edge_Discharge_Method_Distance']['#text']
+            config_dict['Edge_End_Manual_Discharge'] = float(config['Edge_Estimates']['End_Manual_Discharge']['#text'])
+            config_dict['Edge_End_Method_Distance'] = \
+                config['Edge_Estimates']['End_Edge_Discharge_Method_Distance']['#text']
+
+        # Offsets
+        for key in config['Offsets'].keys():
+            if key == 'ADCP_Transducer_Depth':
+                child = "Offsets_Transducer_Depth"
+            else:
+                child = "Offsets_" + key
+
+            config_dict[child] = float(config['Offsets'][key]['#text'])
+
+        # Processing settings
+        for key in config['Processing'].keys():
+            if key == 'Use_3_Beam_Solution_For_BT':
+                child = 'Proc_Use_3_Beam_BT'
+            elif key == 'Use_3_Beam_Solution_For_WT':
+                child = 'Proc_Use_3_Beam_WT'
+            elif key == 'BT_Error_Velocity_Threshold':
+                child = 'Proc_BT_Error_Vel_Threshold'
+            elif key == 'WT_Error_Velocity_Threshold':
+                child = 'Proc_WT_Error_Velocity_Threshold'
+            elif key == 'BT_Up_Velocity_Threshold':
+                child = 'Proc_BT_Up_Vel_Threshold'
+            elif key == 'WT_Up_Velocity_Threshold':
+                child = 'Proc_WT_Up_Vel_Threshold'
+            elif key == 'Fixed_Speed_Of_Sound':
+                child = 'Proc_Fixed_Speed_Of_Sound'
+            elif key == 'Mark_Below_Bottom_Bad':
+                child = 'Proc_Mark_Below_Bottom_Bad'
+            elif key == 'Use_Weighted_Mean':
+                child = 'Proc_Use_Weighted_Mean'
+            elif key == 'Absorption':
+                child = 'Proc_Absorption'
+            else:
+                child = 'Proc_' + key
+
+            # Try to cast to float otherwise assign 1 or 0 based on string value
+            try:
+                config_dict[child] = float(config['Processing'][key]['#text'])
+            except ValueError:
+                if config['Processing'][key]['#text'] == 'YES':
+                    config_dict[child] = 1
+                else:
+                    config_dict[child] = 0
+
+            # Recording
+            config_dict['Rec_Filename_Prefix'] = config['Recording']['Filename_Prefix']['#text']
+            config_dict['Rec_Output_Directory'] = config['Recording']['Output_Directory']['#text']
+
+            if 'Root_Directory' in config['Recording'].keys():
+                if '#text' in config['Recording']['Root_Directory']:
+                    config_dict['Rec_Root_Directory'] = config['Recording']['Root_Directory']['#text']
+                else:
+                    config_dict['Rec_Root_Directory'] = None
+            else:
+                config_dict['Rec_Root_Directory'] = None
+
+            if config['Recording']['MeasurmentNmb'] is None:
+                config_dict['Rec_MeasNmb'] = config['Recording']['MeasurmentNmb']
+            else:
+                config_dict['Rec_MeasNmb'] = config['Recording']['MeasurmentNmb']
+            config_dict['Rec_GPS'] = config['Recording']['GPS_Recording']['#text']
+            config_dict['Rec_DS'] = config['Recording']['DS_Recording']['#text']
+            config_dict['Rec_EH'] = config['Recording']['EH_Recording']['#text']
+            config_dict['Rec_ASCII_Output'] = config['Recording']['ASCII_Output_Recording']['#text']
+            config_dict['Rec_Max_File_Size'] = float(config['Recording']['Maximum_File_Size']['#text'])
+            config_dict['Rec_Next_Transect_Number'] = float(config['Recording']['Next_Transect_Number']['#text'])
+            config_dict['Rec_Add_Date_Time'] = float(config['Recording']['Add_Date_Time']['#text'])
+            config_dict['Rec_Use_Delimiter'] = config['Recording']['Use_Delimiter']['#text']
+            config_dict['Rec_Delimiter'] = config['Recording']['Custom_Delimiter']['#text']
+            config_dict['Rec_Prefix'] = config['Recording']['Use_Prefix']['#text']
+            config_dict['Rec_Use_MeasNmb'] = config['Recording']['Use_MeasurementNmb']['#text']
+            config_dict['Rec_Use_TransectNmb'] = config['Recording']['Use_TransectNmb']['#text']
+            config_dict['Rec_Use_SequenceNmb'] = config['Recording']['Use_SequenceNmb']['#text']
+
+            # Wizard settings
+            config_dict['Wiz_ADCP_Type'] = float(config['Wizard_Info']['ADCP_Type'])
+            config_dict['Wiz_Firmware'] = float(config['Wizard_Info']['ADCP_FW_Version'])
+            config_dict['Wiz_Use_Ext_Heading'] = config['Wizard_Info']['Use_Ext_Heading']
+            config_dict['Wiz_Use_GPS'] = config['Wizard_Info']['Use_GPS']
+            config_dict['Wiz_Use_DS'] = config['Wizard_Info']['Use_Depth_Sounder']
+            config_dict['Wiz_Max_Water_Depth'] = float(config['Wizard_Info']['Max_Water_Depth'])
+            config_dict['Wiz_Max_Water_Speed'] = float(config['Wizard_Info']['Max_Water_Speed'])
+            config_dict['Wiz_Max_Boat_Space'] = float(config['Wizard_Info']['Max_Boat_Speed'])
+            config_dict['Wiz_Material'] = float(config['Wizard_Info']['Material'])
+            config_dict['Wiz_Water_Mode'] = float(config['Wizard_Info']['Water_Mode'])
+            config_dict['Wiz_Bottom_Mode'] = float(config['Wizard_Info']['Bottom_Mode'])
+            config_dict['Wiz_Beam_Angle'] = float(config['Wizard_Info']['Beam_Angle'])
+            config_dict['Wiz_Pressure_Sensor'] = config['Wizard_Info']['Pressure_Sensor']
+            config_dict['Wiz_Water_Mode_13'] = float(config['Wizard_Info']['Water_Mode_13_Avail'])
+            config_dict['Wiz_StreamPro_Default'] = float(config['Wizard_Info']['Use_StreamPro_Def_Cfg'])
+            config_dict['Wiz_StreamPro_Bin_Size'] = float(config['Wizard_Info']['StreamPro_Bin_Size'])
+            config_dict['Wiz_StreamPro_Bin_Number'] = float(config['Wizard_Info']['StreamPro_Bin_Num'])
+
+            if 'Use_GPS_Internal' in config['Wizard_Info'].keys():
+                config_dict['Wiz_Use_GPS_Internal'] = config['Wizard_Info']['Use_GPS_Internal']
+            if 'Internal_GPS_Baud_Rate_Index' in config['Wizard_Info'].keys():
+                config_dict['Wiz_Internal_GPS_Baud_Rate_Index'] = float(config['Wizard_Info']
+                                                                        ['Internal_GPS_Baud_Rate_Index'])
+
+        return config_dict
+
+    @staticmethod
+    def file_dict(file):
+        """Create dictionary for file information.
+
+        Parameters
+        ----------
+        file: dict
+            Dictionary for file from mmt
+
+        Returns
+        -------
+        transect_file: dict
+            Dictionary of transect file information
+                Path: str
+                    Full filename of transect including path
+                File: str
+                    Filename of transect
+                Number: str
+                    Transect number assigned in WinRiver 2
+        """
+
+        transect_file = {'Path': file['@PathName'], 'File': file['#text'], 'Number': file['@TransectNmb']}
+        return transect_file
+
+    @staticmethod
+    def note_dict(note, number):
+        """Create dictionary for notes.
+
+        Parameters
+        ----------
+        note: dict
+            Dictionary from mmt for notes
+        number: str
+            Transect number
+
+        Returns
+        -------
+        note_dict_out: dict
+            Dictionary for note information
+                NoteFileNo: str
+                    Transect number associated with the note
+                NoteDate: str
+                    Date note was entered
+                NoteText: str
+                    Text of note
+        """
+
+        note_dict_out = {'NoteFileNo': number, 'NoteDate': note['@TimeStamp'], 'NoteText': note['@Text']}
+        return note_dict_out
diff --git a/Classes/MatSonTek.py b/Classes/MatSonTek.py
new file mode 100644
index 0000000..e78c346
--- /dev/null
+++ b/Classes/MatSonTek.py
@@ -0,0 +1,64 @@
+import scipy.io as sio
+import numpy as np
+
+class MatSonTek(object):
+    """Read SonTek Matlab files and returns a dictionary of mat_struct.
+     Any data in English units are converted to SI units.
+    """
+
+    def __init__(self, fullname):
+        """Initializes the object, reads the Matlab file, and converts all English units to metric.
+
+        Parameters
+        ----------
+        fullname: str
+            String contain both the path and filename.
+        """
+
+        # Read Matlab file
+        mat_data = sio.loadmat(fullname, struct_as_record=False, squeeze_me=True)
+
+        if 'BottomTrack' in mat_data:
+            # Convert data to SI units if in English units
+            if mat_data['BottomTrack'].Units.BT_Depth == 'ft':
+                self.convert2metric(mat_data)
+
+            if hasattr(mat_data['RawGPSData'], 'VtgMode'):
+                mat_data['RawGPSData'].VtgMode[np.isnan(mat_data['RawGPSData'].VtgMode)] = 0
+                mat_data['RawGPSData'].VtgMode = \
+                    np.array([chr(x) for x in range(127)])[mat_data['RawGPSData'].VtgMode.astype(int)]
+
+        # Create structure from dictionary
+        vars(self).update(mat_data)
+
+    @staticmethod
+    def convert2metric(mat_data):
+        """Converts all data in English units to metric units.
+
+        Parameters
+        ----------
+        mat_data: dict
+            Dictionary of data from Matlab file
+        """
+
+        data2correct = ['BottomTrack', 'GPS', 'Setup', 'Summary', 'System', 'WaterTrack']
+        for item in data2correct:
+            data = mat_data[item]
+            units = data.Units
+            names = units._fieldnames
+            for name in names:
+                if getattr(units, name) == 'ft':
+                    setattr(data, name, getattr(data, name) * 0.3048)
+                    setattr(units, name, 'm')
+                elif getattr(units, name) == 'ft/s':
+                    setattr(data, name, getattr(data, name) * 0.3048)
+                    setattr(units, name, 'm/s')
+                elif getattr(units, name) == 'degF':
+                    setattr(data, name, (getattr(data, name)-32) * (5.0/9.0))
+                    setattr(units, name, 'degC')
+                elif getattr(units, name) == 'cfs':
+                    setattr(data, name, getattr(data, name) * (0.3048**3))
+                    setattr(units, name, 'm3/s')
+                elif getattr(units, name) == 'ft2':
+                    setattr(data, name, getattr(data, name) * (0.3048 ** 2))
+                    setattr(units, name, 'm2')
diff --git a/Classes/Measurement.py b/Classes/Measurement.py
new file mode 100644
index 0000000..ebcba8f
--- /dev/null
+++ b/Classes/Measurement.py
@@ -0,0 +1,3961 @@
+import os
+import datetime
+import numpy as np
+import xml.etree.ElementTree as ETree
+from xml.dom.minidom import parseString
+from Classes.MMT_TRDI import MMTtrdi
+from Classes.TransectData import TransectData
+from Classes.PreMeasurement import PreMeasurement
+from Classes.MovingBedTests import MovingBedTests
+from Classes.QComp import QComp
+from Classes.MatSonTek import MatSonTek
+from Classes.ComputeExtrap import ComputeExtrap
+from Classes.ExtrapQSensitivity import ExtrapQSensitivity
+from Classes.Uncertainty import Uncertainty
+from Classes.QAData import QAData
+from Classes.BoatStructure import BoatStructure
+from Classes.BoatData import BoatData
+from Classes.WaterData import WaterData
+from Classes.Oursin import Oursin
+from Classes.Pd0TRDI_2 import Pd0TRDI
+from MiscLibs.common_functions import cart2pol, pol2cart, rad2azdeg, nans, azdeg2rad
+# from profilehooks import profile, timecall
+
+
+class Measurement(object):
+    """Class to hold all measurement details.
+
+    Attributes
+    ----------
+    station_name: str
+        Station name
+    station_number: str
+        Station number
+    meas_number: str
+        Measurement number
+    persons: str
+        Persons collecting and/or processing the measurement
+    transects: list
+        List of transect objects of TransectData
+    mb_tests: list
+        List of moving-bed test objects of MovingBedTests
+    system_tst: list
+        List of system test objects of PreMeasurement
+    compass_cal: list
+        List of compass calibration objects of PreMeasurement
+    compass_eval: list
+        List of compass evaluation objects of PreMeasurement
+    extrap_fit: ComputeExtrap
+        Object of ComputeExtrap
+    processing: str
+        Type of processing, default QRev
+    discharge: list
+        List of discharge objects of QComp
+    uncertainty: Uncertainty
+        Object of Uncertainty
+    initial_settings: dict
+        Dictionary of all initial processing settings
+    qa: QAData
+        Object of QAData
+    user_rating: str
+        Optional user rating
+    comments: list
+        List of all user supplied comments
+    ext_temp_chk: dict
+        Dictionary of external temperature readings
+    use_weighted: bool
+        Indicates the setting for use_weighted to be used for reprocessing
+    use_ping_type: bool
+        Indicates if ping types should be used in BT and WT filters
+    use_measurement_thresholds: bool
+        Indicates if the entire measurement should be used to set filter thresholds
+    stage_start_m: float
+        Stage at start of measurement
+    stage_end_m: float
+        Stage at end of measurement
+    stage_meas_m: float
+        Stage assigned to measurement
+    """
+
+    # @profile
+    def __init__(self, in_file, source, proc_type='QRev', checked=False, run_oursin=False, use_weighted=False,
+                 use_measurement_thresholds=False, use_ping_type=True, min_transects=2, min_duration=720):
+        """Initialize instance variables and initiate processing of measurement
+        data.
+
+        Parameters
+        ----------
+        in_file: str or list or dict
+            String containing fullname of mmt file for TRDI data, dict for
+            QRev data, or list of files for SonTek
+        source: str
+            Source of data. TRDI, SonTek, QRev
+        proc_type: str
+            Type of processing. QRev, None, Original
+        checked: bool
+            Boolean to determine if only checked transects should be load for
+            TRDI data.
+        run_oursin: bool
+            Determines if the Oursin uncertainty model should be run
+        use_weighted: bool
+            Specifies if discharge weighted medians are used for extrapolation
+        use_measurement_thresholds: bool
+            Specifies if filters are based on a transect or whole measurement
+        use_ping_type: bool
+            Specifies if filters are based on ping type and frequency
+        min_transects: int
+            Minimum number of transects required to pass QA
+        min_duration: float
+            Minimum duration in seconds of all transects to pass QA
+        """
+
+        self.use_ping_type = use_ping_type
+        self.use_measurement_thresholds = use_measurement_thresholds
+        self.run_oursin = run_oursin
+        self.min_transects = min_transects
+        self.min_duration = min_duration
+        self.station_name = None
+        self.station_number = None
+        self.persons = ''
+        self.meas_number = ''
+        self.transects = []
+        self.mb_tests = []
+        self.system_tst = []
+        self.compass_cal = []
+        self.compass_eval = []
+        self.extrap_fit = None
+        self.processing = None
+        self.discharge = []
+        self.uncertainty = None
+        self.initial_settings = None
+        self.qa = None
+        self.user_rating = 'Not Rated'
+        self.comments = []
+        self.ext_temp_chk = {'user': np.nan, 'units': 'C', 'adcp': np.nan, 'user_orig': np.nan, 'adcp_orig': np.nan}
+        self.checked_transect_idx = []
+        self.oursin = None
+        self.use_weighted = use_weighted
+        self.observed_no_moving_bed = False
+        self.stage_meas_m = 0
+        self.stage_end_m = 0
+        self.stage_start_m = 0
+
+        # Load data from selected source
+        if source == 'QRev':
+            self.load_qrev_mat(mat_data=in_file)
+            if proc_type == 'QRev':
+                # Apply QRev default settings
+                self.run_oursin = run_oursin
+                self.use_weighted = use_weighted
+                self.use_measurement_thresholds = use_measurement_thresholds
+                settings = self.current_settings()
+                settings['WTEnsInterpolation'] = 'abba'
+                settings['WTCellInterpolation'] = 'abba'
+                settings['Processing'] = 'QRev'
+                settings['UseMeasurementThresholds'] = use_measurement_thresholds
+                self.apply_settings(settings)
+
+        else:
+            if source == 'TRDI':
+                self.load_trdi(in_file, checked=checked)
+
+            elif source == 'SonTek':
+                self.load_sontek(in_file)
+
+            elif source == 'Nortek':
+                self.load_sontek(in_file)
+
+            # Process data
+            if len(self.transects) > 0:
+
+                # Save initial settings
+                self.initial_settings = self.current_settings()
+
+                # Process moving-bed tests
+                if len(self.mb_tests) > 0:
+                    # Get navigation reference
+                    select = self.initial_settings['NavRef']
+                    ref = None
+                    if select == 'bt_vel':
+                        ref = 'BT'
+                    elif select == 'gga_vel':
+                        ref = 'GGA'
+                    elif select == 'vtg_vel':
+                        ref = 'VTG'
+                    self.mb_tests = MovingBedTests.auto_use_2_correct(
+                        moving_bed_tests=self.mb_tests, boat_ref=ref)
+
+                # Set processing type
+                if proc_type == 'QRev':
+                    # Apply QRev default settings
+                    settings = self.qrev_default_settings(check_user_excluded_dist=True, use_weighted=use_weighted)
+                    settings['Processing'] = 'QRev'
+                    settings['UseMeasurementThresholds'] = use_measurement_thresholds
+                    settings['UsePingType'] = self.use_ping_type
+                    self.apply_settings(settings)
+
+                elif proc_type == 'None':
+                    # Processing with no filters and interpolation
+                    settings = self.no_filter_interp_settings(self)
+                    settings['Processing'] = 'None'
+                    self.apply_settings(settings)
+
+                elif proc_type == 'Original':
+                    # Processing for original settings
+                    # from manufacturer software
+                    for transect in self.transects:
+                        q = QComp()
+                        q.populate_data(data_in=transect,
+                                        moving_bed_data=self.mb_tests)
+                        self.discharge.append(q)
+                self.uncertainty = Uncertainty()
+                self.uncertainty.compute_uncertainty(self)
+
+                self.qa = QAData(self)
+
+    def load_trdi(self, mmt_file, transect_type='Q', checked=False):
+        """Method to load TRDI data.
+
+        Parameters
+        ----------
+        mmt_file: str
+            Full pathname to mmt file.
+        transect_type: str
+            Type of data (Q: discharge, MB: moving-bed test
+        checked: bool
+            Determines if all files are loaded (False) or only checked (True)
+        """
+
+        # Read mmt file
+        mmt = MMTtrdi(mmt_file)
+
+        # Get properties if they exist, otherwise set them as blank strings
+        self.station_name = str(mmt.site_info['Name'])
+        self.station_number = str(mmt.site_info['Number'])
+        self.persons = str(mmt.site_info['Party'])
+        self.meas_number = str(mmt.site_info['MeasurementNmb'])
+
+        # Get stage readings, if available. Note: mmt stage is always in m.
+        if mmt.site_info['Use_Inside_Gage_Height'] == '1':
+            stage = float(mmt.site_info['Inside_Gage_Height'])
+        else:
+            stage = float(mmt.site_info['Outside_Gage_Height'])
+
+        self.stage_start_m = stage
+        change = float(mmt.site_info['Gage_Height_Change'])
+        self.stage_end_m = stage + change
+        self.stage_meas_m = (self.stage_start_m + self.stage_end_m) / 2.
+
+        # Initialize processing variable
+        self.processing = 'WR2'
+
+        if len(mmt.transects) > 0:
+            # Create transect objects for  TRDI data
+            self.transects = self.allocate_transects(mmt=mmt,
+                                                     transect_type=transect_type,
+                                                     checked=checked)
+
+            self.checked_transect_idx = self.checked_transects(self)
+
+            # Create object for pre-measurement tests
+            if isinstance(mmt.qaqc, dict) or isinstance(mmt.mbt_transects, list):
+                self.qaqc_trdi(mmt)
+
+            # Save comments from mmt file in comments
+            self.comments.append('MMT Remarks: ' + mmt.site_info['Remarks'])
+
+            for t in range(len(self.transects)):
+                notes = getattr(mmt.transects[t], 'Notes')
+                for note in notes:
+                    note_text = ' File: ' + note['NoteFileNo'] + ' ' \
+                                + note['NoteDate'] + ': ' + note['NoteText']
+                    self.comments.append(note_text)
+
+            # Get external temperature
+            if type(mmt.site_info['Water_Temperature']) is float:
+                self.ext_temp_chk['user'] = mmt.site_info['Water_Temperature']
+                self.ext_temp_chk['units'] = 'C'
+                self.ext_temp_chk['user_orig'] = mmt.site_info['Water_Temperature']
+
+            # Initialize thresholds settings dictionary
+            threshold_settings = dict()
+            threshold_settings['wt_settings'] = {}
+            threshold_settings['bt_settings'] = {}
+            threshold_settings['depth_settings'] = {}
+
+            # Select reference transect use first checked or if none then first transect
+            if len(self.checked_transect_idx) > 0:
+                ref_transect = self.checked_transect_idx[0]
+            else:
+                ref_transect = 0
+
+            # Water track filter threshold settings
+            threshold_settings['wt_settings']['beam'] = \
+                self.set_num_beam_wt_threshold_trdi(mmt.transects[ref_transect])
+            threshold_settings['wt_settings']['difference'] = 'Manual'
+            threshold_settings['wt_settings']['difference_threshold'] = \
+                mmt.transects[ref_transect].active_config['Proc_WT_Error_Velocity_Threshold']
+            threshold_settings['wt_settings']['vertical'] = 'Manual'
+            threshold_settings['wt_settings']['vertical_threshold'] = \
+                mmt.transects[ref_transect].active_config['Proc_WT_Up_Vel_Threshold']
+
+            # Bottom track filter threshold settings
+            threshold_settings['bt_settings']['beam'] = \
+                self.set_num_beam_bt_threshold_trdi(mmt.transects[ref_transect])
+            threshold_settings['bt_settings']['difference'] = 'Manual'
+            threshold_settings['bt_settings']['difference_threshold'] = \
+                mmt.transects[ref_transect].active_config['Proc_BT_Error_Vel_Threshold']
+            threshold_settings['bt_settings']['vertical'] = 'Manual'
+            threshold_settings['bt_settings']['vertical_threshold'] = \
+                mmt.transects[ref_transect].active_config['Proc_BT_Up_Vel_Threshold']
+
+            # Depth filter and averaging settings
+            threshold_settings['depth_settings']['depth_weighting'] = \
+                self.set_depth_weighting_trdi(mmt.transects[ref_transect])
+            threshold_settings['depth_settings']['depth_valid_method'] = 'TRDI'
+            threshold_settings['depth_settings']['depth_screening'] = \
+                self.set_depth_screening_trdi(mmt.transects[ref_transect])
+
+            # Determine reference used in WR2 if available
+            reference = 'BT'
+            if 'Reference' in mmt.site_info.keys():
+                reference = mmt.site_info['Reference']
+                if reference == 'BT':
+                    target = 'bt_vel'
+                elif reference == 'GGA':
+                    target = 'gga_vel'
+                elif reference == 'VTG':
+                    target = 'vtg_vel'
+                else:
+                    target = 'bt_vel'
+
+                for transect in self.transects:
+                    if getattr(transect.boat_vel, target) is None:
+                        reference = 'BT'
+
+            # Convert to earth coordinates
+            for transect_idx, transect in enumerate(self.transects):
+                # Convert to earth coordinates
+                transect.change_coord_sys(new_coord_sys='Earth')
+
+                # Set navigation reference
+                transect.change_nav_reference(update=False, new_nav_ref=reference)
+
+                # Apply WR2 thresholds
+                self.thresholds_trdi(transect, threshold_settings)
+
+                # Apply boat interpolations
+                transect.boat_interpolations(update=False,
+                                             target='BT',
+                                             method='None')
+                if transect.gps is not None:
+                    transect.boat_interpolations(update=False,
+                                                 target='GPS',
+                                                 method='HoldLast')
+
+                # Update water data for changes in boat velocity
+                transect.update_water()
+
+                # Filter water data
+                transect.w_vel.apply_filter(transect=transect, wt_depth=True)
+
+                # Interpolate water data
+                transect.w_vel.apply_interpolation(transect=transect,
+                                                   ens_interp='None',
+                                                   cells_interp='None')
+
+                # Apply speed of sound computations as required
+                mmt_sos_method = mmt.transects[transect_idx].active_config[
+                    'Proc_Speed_of_Sound_Correction']
+
+                # Speed of sound computed based on user supplied values
+                if mmt_sos_method == 1:
+                    salinity = mmt.transects[transect_idx].active_config['Proc_Salinity']
+                    transect.change_sos(parameter='salinity', selected='user', salinity=salinity)
+                elif mmt_sos_method == 2:
+                    # Speed of sound set by user
+                    speed = mmt.transects[transect_idx].active_config[
+                        'Proc_Fixed_Speed_Of_Sound']
+                    transect.change_sos(parameter='sosSrc',
+                                        selected='user',
+                                        speed=speed)
+
+    def qaqc_trdi(self, mmt):
+        """Processes qaqc test, calibrations, and evaluations
+        
+        Parameters
+        ----------
+        mmt: MMTtrdi
+            Object of MMT_TRDI
+        """
+
+        # ADCP Test
+        if 'RG_Test' in mmt.qaqc:
+            for n in range(len(mmt.qaqc['RG_Test'])):
+                p_m = PreMeasurement()
+                p_m.populate_data(mmt.qaqc['RG_Test_TimeStamp'][n],
+                                  mmt.qaqc['RG_Test'][n], 'TST')
+                self.system_tst.append(p_m)
+
+        # Compass calibration
+        if 'Compass_Calibration' in mmt.qaqc:
+            for n in range(len(mmt.qaqc['Compass_Calibration'])):
+                cc = PreMeasurement()
+                cc.populate_data(mmt.qaqc['Compass_Calibration_TimeStamp'][n],
+                                 mmt.qaqc['Compass_Calibration'][n], 'TCC')
+                self.compass_cal.append(cc)
+            
+        # Compass evaluation
+        if 'Compass_Evaluation' in mmt.qaqc:
+            for n in range(len(mmt.qaqc['Compass_Evaluation'])):
+                ce = PreMeasurement()
+                ce.populate_data(mmt.qaqc['Compass_Evaluation_TimeStamp'][n],
+                                 mmt.qaqc['Compass_Evaluation'][n], 'TCC')
+                self.compass_eval.append(ce)
+
+        # Check for moving-bed tests
+        if len(mmt.mbt_transects) > 0:
+            
+            # Create transect objects
+            transects = self.allocate_transects(mmt, transect_type='MB')
+
+            # Process moving-bed tests
+            if len(transects) > 0:
+                self.mb_tests = []
+                for n in range(len(transects)):
+
+                    # Create moving-bed test object
+                    mb_test = MovingBedTests()
+                    mb_test.populate_data('TRDI', transects[n],
+                                          mmt.mbt_transects[n].moving_bed_type)
+                    
+                    # Save notes from mmt files in comments
+                    notes = getattr(mmt.mbt_transects[n], 'Notes')
+                    for note in notes:
+                        note_text = ' File: ' + note['NoteFileNo'] + ' ' \
+                                    + note['NoteDate'] + ': ' + note['NoteText']
+                        self.comments.append(note_text)
+
+                    self.mb_tests.append(mb_test)
+
+    @staticmethod
+    def thresholds_trdi(transect, settings):
+        """Retrieve and apply manual filter settings from mmt file
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        settings: dict
+            Threshold settings computed before processing
+        """
+
+        # Apply WT settings
+        transect.w_vel.apply_filter(transect, **settings['wt_settings'])
+
+        # Apply BT settings
+        transect.boat_vel.bt_vel.apply_filter(transect, **settings[
+            'bt_settings'])
+
+        # Apply depth settings
+        transect.depths.bt_depths.valid_data_method = settings[
+            'depth_settings']['depth_valid_method']
+        transect.depths.depth_filter(transect=transect, filter_method=settings[
+            'depth_settings']['depth_screening'])
+        transect.depths.bt_depths.compute_avg_bt_depth(method=settings[
+            'depth_settings']['depth_weighting'])
+
+        # Apply composite depths as per setting stored in transect
+        # from TransectData
+        transect.depths.composite_depths(transect)
+
+    def load_sontek(self, fullnames):
+        """Coordinates reading of all SonTek data files.
+
+        Parameters
+        ----------
+        fullnames: list
+            File names including path for all discharge transects converted
+            to Matlab files.
+        """
+
+        # Initialize variables
+        rsdata = None
+        pathname = None
+
+        for file in fullnames:
+            # Read data file
+            rsdata = MatSonTek(file)
+            pathname, file_name = os.path.split(file)
+
+            if hasattr(rsdata, 'BottomTrack'):
+                # Create transect objects for each discharge transect
+                self.transects.append(TransectData())
+                self.transects[-1].sontek(rsdata, file_name)
+            else:
+                self.comments.append(file + ' is incomplete and is not included in measurement processing')
+
+        # Identify checked transects
+        self.checked_transect_idx = self.checked_transects(self)
+
+        # Site information pulled from last file
+        if hasattr(rsdata, 'SiteInfo'):
+            if hasattr(rsdata.SiteInfo, 'Site_Name'):
+                if len(rsdata.SiteInfo.Site_Name) > 0:
+                    self.station_name = rsdata.SiteInfo.Site_Name
+                else:
+                    self.station_name = ''
+            if hasattr(rsdata.SiteInfo, 'Station_Number'):
+                if len(rsdata.SiteInfo.Station_Number) > 0:
+                    self.station_number = rsdata.SiteInfo.Station_Number
+                else:
+                    self.station_number = ''
+            if hasattr(rsdata.SiteInfo, 'Meas_Number'):
+                if len(rsdata.SiteInfo.Meas_Number) > 0:
+                    self.meas_number = rsdata.SiteInfo.Meas_Number
+            if hasattr(rsdata.SiteInfo, 'Party'):
+                if len(rsdata.SiteInfo.Party) > 0:
+                    self.persons = rsdata.SiteInfo.Party
+
+            if hasattr(rsdata.SiteInfo, 'Comments'):
+                if len(rsdata.SiteInfo.Comments) > 0:
+                    self.comments.append('RS Comments: ' + rsdata.SiteInfo.Comments)
+
+            # Although units imply meters the data are actually stored as m / 10,000
+            if hasattr(rsdata.Setup, 'startGaugeHeight'):
+                self.stage_start_m = rsdata.Setup.startGaugeHeight / 10000.
+
+            if hasattr(rsdata.Setup, 'endGaugeHeight'):
+                self.stage_end_m = rsdata.Setup.endGaugeHeight / 10000.
+
+            self.stage_meas_m = (self.stage_start_m + self.stage_end_m) / 2.
+
+
+        self.qaqc_sontek(pathname)
+
+        for transect in self.transects:
+            transect.change_coord_sys(new_coord_sys='Earth')
+            transect.change_nav_reference(
+                update=False,
+                new_nav_ref=self.transects[self.checked_transect_idx[0]].boat_vel.selected)
+            transect.boat_interpolations(update=False,
+                                         target='BT',
+                                         method='Hold9')
+            transect.boat_interpolations(update=False,
+                                         target='GPS',
+                                         method='None')
+            transect.apply_averaging_method(setting='Simple')
+            transect.process_depths(update=False,
+                                    interpolation_method='HoldLast')
+            transect.update_water()
+
+            # Filter water data
+            transect.w_vel.apply_filter(transect=transect, wt_depth=True)
+
+            # Interpolate water data
+            transect.w_vel.apply_interpolation(transect=transect,
+                                               ens_interp='None',
+                                               cells_interp='None')
+            transect.w_vel.apply_interpolation(transect=transect,
+                                               ens_interp='None',
+                                               cells_interp='TRDI')
+
+            if transect.sensors.speed_of_sound_mps.selected == 'user':
+                transect.sensors.speed_of_sound_mps.selected = 'internal'
+                transect.change_sos(parameter='sosSrc',
+                                    selected='user',
+                                    speed=transect.sensors.speed_of_sound_mps.user.data)
+            elif transect.sensors.salinity_ppt.selected == 'user':
+                transect.change_sos(parameter='salinity',
+                                    selected='user',
+                                    salinity=transect.sensors.salinity_ppt.user.data)
+            elif transect.sensors.temperature_deg_c.selected == 'user':
+                transect.change_sos(parameter='temperature',
+                                    selected='user',
+                                    temperature=transect.sensors.temperature_deg_c.user.data)
+
+    def qaqc_sontek(self, pathname):
+        """Reads and stores system tests, compass calibrations,
+        and moving-bed tests.
+
+        Parameters
+        ----------
+        pathname: str
+            Path to discharge transect files.
+        """
+
+        # Compass Calibration
+        compass_cal_folder = os.path.join(pathname, 'CompassCal')
+        time_stamp = None
+        if os.path.isdir(compass_cal_folder):
+            for file in os.listdir(compass_cal_folder):
+                valid_file = False
+                # G3 compasses
+                if file.endswith('.ccal'):
+                    time_stamp = file.split('_')
+                    time_stamp = time_stamp[0] + '_' + time_stamp[1]
+                    valid_file = True
+
+                # G2 compasses
+                elif file.endswith('.txt'):
+                    prefix, _ = os.path.splitext(file)
+                    time_stamp = prefix.split('l')[1]
+                    valid_file = True
+
+                if valid_file:
+                    with open(os.path.join(compass_cal_folder, file)) as f:
+                        cal_data = f.read()
+                        cal = PreMeasurement()
+                        cal.populate_data(time_stamp, cal_data, 'SCC')
+                        self.compass_cal.append(cal)
+
+        # System Test
+        system_test_folder = os.path.join(pathname, 'SystemTest')
+        if os.path.isdir(system_test_folder):
+            for file in os.listdir(system_test_folder):
+                # Find system test files.
+                if file.startswith('SystemTest'):
+                    with open(os.path.join(system_test_folder, file)) as f:
+                        test_data = f.read()
+                        test_data = test_data.replace('\x00', '')
+                    time_stamp = file[10:24]
+                    sys_test = PreMeasurement()
+                    sys_test.populate_data(time_stamp=time_stamp,
+                                           data_in=test_data,
+                                           data_type='SST')
+                    self.system_tst.append(sys_test)
+
+        # Moving-bed tests
+        self.sontek_moving_bed_tests(pathname)
+
+    def sontek_moving_bed_tests(self, pathname):
+        """Locates and processes SonTek moving-bed tests.
+
+        Searches the pathname for Matlab files that start with Loop or SMBA.
+        Processes these files as moving bed tests.
+
+        Parameters
+        ----------
+        pathname: str
+            Path to discharge transect files.
+        """
+        for file in os.listdir(pathname):
+            # Find moving-bed test files.
+            if file.endswith('.mat'):
+                # Process Loop test
+                if file.lower().startswith('loop'):
+                    self.mb_tests.append(MovingBedTests())
+                    self.mb_tests[-1].populate_data(source='SonTek',
+                                                    file=os.path.join(pathname, file),
+                                                    test_type='Loop')
+                # Process Stationary test
+                elif file.lower().startswith('smba'):
+                    self.mb_tests.append(MovingBedTests())
+                    self.mb_tests[-1].populate_data(source='SonTek',
+                                                    file=os.path.join(pathname, file),
+                                                    test_type='Stationary')
+
+    def load_qrev_mat(self, mat_data):
+        """Loads and coordinates the mapping of existing QRev Matlab files
+        into Python instance variables.
+
+        Parameters
+        ----------
+        mat_data: dict
+            Dictionary containing Matlab data.
+        """
+
+        meas_struct = mat_data['meas_struct']
+
+        # Assign data from meas_struct to associated instance variables
+        # in Measurement and associated objects.
+        if len(meas_struct.stationName) > 0:
+            self.station_name = meas_struct.stationName
+        if len(meas_struct.stationNumber) > 0:
+            self.station_number = meas_struct.stationNumber
+        if hasattr(meas_struct, 'meas_number'):
+            if len(meas_struct.meas_number) == 0:
+                self.meas_number = ''
+            else:
+                self.meas_number = meas_struct.meas_number
+        if hasattr(meas_struct, 'persons'):
+            if len(meas_struct.persons) == 0:
+                self.persons = ''
+            else:
+                self.persons = meas_struct.persons
+        if hasattr(meas_struct, 'stage_start_m'):
+            self.stage_start_m = meas_struct.stage_start_m
+        if hasattr(meas_struct, 'stage_end_m'):
+            self.stage_end_m = meas_struct.stage_end_m
+        if hasattr(meas_struct, 'stage_meas_m'):
+            self.stage_meas_m = meas_struct.stage_meas_m
+        self.processing = meas_struct.processing
+        if type(meas_struct.comments) == np.ndarray:
+            self.comments = meas_struct.comments.tolist()
+
+            # Needed to handle comments with blank lines
+            for n, comment in enumerate(self.comments):
+                if type(comment) is not str:
+                    new_comment = ''
+                    for item in comment:
+                        if len(item.strip()) > 0:
+                            new_comment = new_comment + item
+                        else:
+                            new_comment = new_comment + '\n'
+                    self.comments[n] = new_comment
+        else:
+            self.comments = [meas_struct.comments]
+
+        # Check to make sure all comments are str
+        for n, comment in enumerate(self.comments):
+            if type(comment) is np.ndarray:
+                # Using comment =... didn't work but self.comments[n] does
+                self.comments[2] = np.array2string(comment)
+
+        if hasattr(meas_struct, 'userRating'):
+            self.user_rating = meas_struct.userRating
+        else:
+            self.user_rating = ''
+
+        self.initial_settings = vars(meas_struct.initialSettings)
+
+        # Update initial settings to agree with Python definitions
+        nav_dict = {'btVel': 'bt_vel', 'ggaVel': 'gga_vel', 'vtgVel': 'vtg_vel',
+                    'bt_vel': 'bt_vel', 'gga_vel': 'gga_vel', 'vtg_vel': 'vtg_vel'}
+        self.initial_settings['NavRef'] = nav_dict[self.initial_settings['NavRef']]
+
+        on_off_dict = {'Off': False, 'On': True, 0: False, 1: True}
+        self.initial_settings['WTwtDepthFilter'] = on_off_dict[self.initial_settings['WTwtDepthFilter']]
+
+        if type(self.initial_settings['WTsnrFilter']) is np.ndarray:
+            self.initial_settings['WTsnrFilter'] = 'Off'
+
+        nav_dict = {'btDepths': 'bt_depths', 'vbDepths': 'vb_depths', 'dsDepths': 'ds_depths',
+                    'bt_depths': 'bt_depths', 'vb_depths': 'vb_depths', 'ds_depths': 'ds_depths'}
+        self.initial_settings['depthReference'] = nav_dict[self.initial_settings['depthReference']]
+
+        self.ext_temp_chk = {'user': meas_struct.extTempChk.user,
+                             'units': meas_struct.extTempChk.units,
+                             'adcp': meas_struct.extTempChk.adcp}
+
+        if hasattr(meas_struct.extTempChk, 'user_orig'):
+            self.ext_temp_chk['user_orig'] = meas_struct.extTempChk.user_orig
+        else:
+            self.ext_temp_chk['user_orig'] = meas_struct.extTempChk.user
+
+        if hasattr(meas_struct.extTempChk, 'adcp_orig'):
+            self.ext_temp_chk['adcp_orig'] = meas_struct.extTempChk.adcp_orig
+        else:
+            self.ext_temp_chk['adcp_orig'] = meas_struct.extTempChk.adcp
+
+        if type(self.ext_temp_chk['user']) is str:
+            self.ext_temp_chk['user'] = np.nan
+        if type(self.ext_temp_chk['adcp']) is str:
+            self.ext_temp_chk['adcp'] = np.nan
+        if type(self.ext_temp_chk['user']) is np.ndarray:
+            self.ext_temp_chk['user'] = np.nan
+        if type(self.ext_temp_chk['adcp']) is np.ndarray:
+            self.ext_temp_chk['adcp'] = np.nan
+        if type(self.ext_temp_chk['user_orig']) is str:
+            self.ext_temp_chk['user_orig'] = np.nan
+        if type(self.ext_temp_chk['adcp_orig']) is str:
+            self.ext_temp_chk['adcp_orig'] = np.nan
+        if type(self.ext_temp_chk['user_orig']) is np.ndarray:
+            self.ext_temp_chk['user_orig'] = np.nan
+        if type(self.ext_temp_chk['adcp_orig']) is np.ndarray:
+            self.ext_temp_chk['adcp_orig'] = np.nan
+
+        self.system_tst = PreMeasurement.sys_test_qrev_mat_in(meas_struct)
+
+        # no compass cal compassCal is mat_struct with len(data) = 0
+        try:
+            self.compass_cal = PreMeasurement.cc_qrev_mat_in(meas_struct)
+        except AttributeError:
+            self.compass_cal = []
+
+        try:
+            self.compass_eval = PreMeasurement.ce_qrev_mat_in(meas_struct)
+        except AttributeError:
+            self.compass_eval = []
+
+        self.transects = TransectData.qrev_mat_in(meas_struct)
+        self.mb_tests = MovingBedTests.qrev_mat_in(meas_struct)
+        self.extrap_fit = ComputeExtrap()
+        self.extrap_fit.populate_from_qrev_mat(meas_struct)
+
+        self.discharge = QComp.qrev_mat_in(meas_struct)
+
+        # For compatibility with older QRev.mat files that didn't have this feature
+        for n in range(len(self.transects)):
+            if len(self.discharge[n].left_idx) == 0:
+                self.discharge[n].left_idx = self.discharge[n].edge_ensembles(edge_loc='left',
+                                                                              transect=self.transects[n])
+
+            if len(self.discharge[n].right_idx) == 0:
+                self.discharge[n].right_idx = self.discharge[n].edge_ensembles(edge_loc='right',
+                                                                               transect=self.transects[n])
+
+            if type(self.discharge[n].correction_factor) is list:
+                self.discharge[n].correction_factor = self.discharge[n].total / self.discharge[n].total_uncorrected
+
+        # Identify checked transects
+        self.checked_transect_idx = self.checked_transects(self)
+
+        if hasattr(meas_struct, 'observed_no_moving_bed'):
+            self.observed_no_moving_bed = meas_struct.observed_no_moving_bed
+        else:
+            self.observed_no_moving_bed = False
+
+        self.uncertainty = Uncertainty()
+        self.uncertainty.populate_from_qrev_mat(meas_struct)
+        self.qa = QAData(self, mat_struct=meas_struct, compute=False)
+        if hasattr(meas_struct, 'run_oursin'):
+            self.run_oursin = meas_struct.run_oursin
+        else:
+            self.run_oursin = False
+        if hasattr(meas_struct, 'oursin'):
+            self.oursin = Oursin()
+            self.oursin.populate_from_qrev_mat(meas_struct=meas_struct)
+        else:
+            self.oursin = None
+
+        self.use_weighted = self.extrap_fit.use_weighted
+        self.use_measurement_thresholds = \
+            self.transects[self.checked_transect_idx[0]].boat_vel.bt_vel.use_measurement_thresholds
+
+    def create_filter_composites(self):
+        """Create composite for water and bottom track difference and vertical velocities and compute the thresholds
+        using these composites.
+        """
+
+        # Initialize dictionaries
+        wt_d = {}
+        wt_w = {}
+        bt_d = {}
+        bt_w = {}
+
+        # Create composite arrays for all checked transects
+        for transect in self.transects:
+            if transect.checked:
+                bt_freq = transect.boat_vel.bt_vel.frequency_khz.astype(int).astype(str)
+                freq = np.unique(bt_freq)
+                for f in freq:
+                    if f in bt_d:
+                        bt_d[f] = np.hstack((bt_d[f], transect.boat_vel.bt_vel.d_mps[bt_freq == f]))
+                        bt_w[f] = np.hstack((bt_w[f], transect.boat_vel.bt_vel.w_mps[bt_freq == f]))
+                    else:
+                        bt_d[f] = transect.boat_vel.bt_vel.d_mps[bt_freq == f]
+                        bt_w[f] = transect.boat_vel.bt_vel.w_mps[bt_freq == f]
+
+                if transect.w_vel.ping_type.size > 0:
+                    # Identify the ping types used in the transect
+                    p_types = np.unique(transect.w_vel.ping_type)
+                    # Composite for each ping type
+                    for p_type in p_types:
+                        if p_type in wt_d:
+                            wt_d[p_type] = np.hstack(
+                                (wt_d[p_type], transect.w_vel.d_mps[np.logical_and(transect.w_vel.ping_type == p_type,
+                                                                                   transect.w_vel.cells_above_sl)]))
+                            wt_w[p_type] = np.hstack(
+                                (wt_d[p_type], transect.w_vel.w_mps[np.logical_and(transect.w_vel.ping_type == p_type,
+                                                                                   transect.w_vel.cells_above_sl)]))
+                        else:
+                            wt_d[p_type] = transect.w_vel.d_mps[np.logical_and(transect.w_vel.ping_type == p_type,
+                                                                               transect.w_vel.cells_above_sl)]
+                            wt_w[p_type] = transect.w_vel.w_mps[np.logical_and(transect.w_vel.ping_type == p_type,
+                                                                               transect.w_vel.cells_above_sl)]
+                else:
+                    p_types = np.array(['U'])
+                    for p_type in p_types:
+                        if p_type in wt_d:
+                            wt_d[p_type] = np.hstack((wt_d[p_type],
+                                                      transect.w_vel.d_mps[transect.w_vel.cells_above_sl]))
+                            wt_w[p_type] = np.hstack((wt_d[p_type],
+                                                      transect.w_vel.w_mps[transect.w_vel.cells_above_sl]))
+                        else:
+                            wt_d[p_type] = transect.w_vel.d_mps[transect.w_vel.cells_above_sl]
+                            wt_w[p_type] = transect.w_vel.w_mps[transect.w_vel.cells_above_sl]
+
+        # Compute thresholds based on composite arrays
+
+        # Water track
+        wt_d_meas_thresholds = {}
+        wt_w_meas_thresholds = {}
+        for p_type in wt_d.keys():
+            wt_d_meas_thresholds[p_type] = WaterData.meas_iqr_filter(wt_d[p_type], multiplier=5)
+            wt_w_meas_thresholds[p_type] = WaterData.meas_iqr_filter(wt_w[p_type], multiplier=5)
+
+        # Bottom track
+        bt_d_meas_thresholds = {}
+        bt_w_meas_thresholds = {}
+        for freq in bt_d.keys():
+            bt_d_meas_thresholds[freq] = BoatData.iqr_filter(bt_d[freq])
+            bt_w_meas_thresholds[freq] = BoatData.iqr_filter(bt_w[freq])
+
+        # Assign threshold to each transect
+        for transect in self.transects:
+            transect.w_vel.d_meas_thresholds = wt_d_meas_thresholds
+            transect.w_vel.w_meas_thresholds = wt_w_meas_thresholds
+            transect.boat_vel.bt_vel.d_meas_thresholds = bt_d_meas_thresholds
+            transect.boat_vel.bt_vel.w_meas_thresholds = bt_w_meas_thresholds
+
+        if len(self.mb_tests) > 0:
+            for test in self.mb_tests:
+                transect = test.transect
+                transect.w_vel.d_meas_thresholds = wt_d_meas_thresholds
+                transect.w_vel.w_meas_thresholds = wt_w_meas_thresholds
+                transect.boat_vel.bt_vel.d_meas_thresholds = bt_d_meas_thresholds
+                transect.boat_vel.bt_vel.w_meas_thresholds = bt_w_meas_thresholds
+
+    @staticmethod
+    def set_num_beam_wt_threshold_trdi(mmt_transect):
+        """Get number of beams to use in processing for WT from mmt file
+        
+        Parameters
+        ----------
+        mmt_transect: MMT_Transect
+            Object of MMT_Transect
+        
+        Returns
+        -------
+        num_3_beam_wt_Out: int
+        """
+
+        use_3_beam_wt = mmt_transect.active_config['Proc_Use_3_Beam_WT']
+        if use_3_beam_wt == 0:
+            num_beam_wt_out = 4
+        else:
+            num_beam_wt_out = 3
+            
+        return num_beam_wt_out
+
+    @staticmethod
+    def set_num_beam_bt_threshold_trdi(mmt_transect):
+        """Get number of beams to use in processing for BT from mmt file
+
+        Parameters
+        ----------
+        mmt_transect: MMT_Transect
+            Object of MMT_Transect
+
+        Returns
+        -------
+        num_3_beam_WT_Out: int
+        """
+
+        use_3_beam_bt = mmt_transect.active_config['Proc_Use_3_Beam_BT']
+        if use_3_beam_bt == 0:
+            num_beam_bt_out = 4
+        else:
+            num_beam_bt_out = 3
+
+        return num_beam_bt_out
+
+    @staticmethod
+    def set_depth_weighting_trdi(mmt_transect):
+        """Get the average depth method from mmt
+        
+        Parameters
+        ----------
+        mmt_transect: MMT_Transect
+            Object of MMT_Transect
+        
+        Returns
+        -------
+        depth_weighting_setting: str
+            Method to compute mean depth
+        """
+
+        depth_weighting = mmt_transect.active_config['Proc_Use_Weighted_Mean_Depth']
+        
+        if depth_weighting == 0:
+            depth_weighting_setting = 'Simple'
+        else:
+            depth_weighting_setting = 'IDW'
+
+        return depth_weighting_setting
+
+    @staticmethod
+    def set_depth_screening_trdi(mmt_transect):
+        """Get the depth screening setting from mmt
+        
+        Parameters
+        ----------
+        mmt_transect: MMT_Transect
+            Object of MMT_Transect
+        
+        Returns
+        -------
+        depth_screening_setting: str
+            Type of depth screening to use
+        """
+
+        depth_screen = mmt_transect.active_config['Proc_Screen_Depth']
+        if depth_screen == 0:
+            depth_screening_setting = 'None'
+        else:
+            depth_screening_setting = 'TRDI'
+        
+        return depth_screening_setting
+        
+    def change_sos(self, transect_idx=None, parameter=None, salinity=None, temperature=None, selected=None, speed=None):
+        """Applies a change in speed of sound to one or all transects
+        and update the discharge and uncertainty computations
+        
+        Parameters
+        ----------
+        transect_idx: int
+            Index of transect to change
+        parameter: str
+            Speed of sound parameter to be changed ('temperatureSrc', 'temperature', 'salinity', 'sosSrc')
+        salinity: float
+            Salinity in ppt
+        temperature: float
+            Temperature in deg C
+        selected: str
+            Selected speed of sound ('internal', 'computed', 'user') or temperature ('internal', 'user')
+        speed: float
+            Manually supplied speed of sound for 'user' source
+        """
+        
+        s = self.current_settings()
+        if transect_idx is None:
+            # Apply to all transects
+            for transect in self.transects:
+                transect.change_sos(parameter=parameter,
+                                    salinity=salinity,
+                                    temperature=temperature,
+                                    selected=selected,
+                                    speed=speed)
+        else:
+            # Apply to a single transect
+            self.transects[transect_idx].change_sos(parameter=parameter,
+                                                    salinity=salinity,
+                                                    temperature=temperature,
+                                                    selected=selected,
+                                                    speed=speed)
+        # Reapply settings to newly adjusted data
+        self.apply_settings(s)
+
+    def change_magvar(self, magvar, transect_idx=None):
+        """Coordinates changing the magnetic variation.
+
+        Parameters
+        ----------
+        magvar: float
+            Magnetic variation
+        transect_idx: int
+            Index of transect to which the change is applied. None is all transects.
+        """
+
+        # Get current settings
+        s = self.current_settings()
+
+        # Initialize variables
+        n_transects = len(self.transects)
+        recompute = False
+        n = 0
+
+        # If the internal compass is used the recompute is necessary
+        while n < n_transects and recompute is False:
+            if self.transects[n].sensors.heading_deg.selected == 'internal':
+                recompute = True
+            n += 1
+
+        # Apply change
+        if transect_idx is None:
+            # Apply change to all transects
+            for transect in self.transects:
+                transect.change_mag_var(magvar)
+
+            # Apply change to moving-bed tests
+            if len(self.mb_tests) > 0:
+                for test in self.mb_tests:
+                    old_magvar = test.transect.sensors.heading_deg.internal.mag_var_deg
+                    test.transect.change_mag_var(magvar)
+                    test.magvar_change(magvar, old_magvar)
+        else:
+            self.transects[transect_idx].change_mag_var(magvar)
+
+        # Recompute is specified
+        if recompute:
+            self.apply_settings(s)
+        else:
+            self.qa.compass_qa(self)
+            self.qa.check_compass_settings(self)
+
+    def change_h_offset(self, h_offset, transect_idx=None):
+        """Coordinates changing the heading offset for external heading.
+
+        Parameters
+        ----------
+        h_offset: float
+            Heading offset
+        transect_idx: int
+            Index of transect to which the change is applied. None is all transects.
+        """
+
+        # Get current settings
+        s = self.current_settings()
+
+        # Initialize variables
+        n_transects = len(self.transects)
+        recompute = False
+        n = 0
+
+        # If external compass is used then a recompute is necessary
+        while n < n_transects and recompute is False:
+            if self.transects[n].sensors.heading_deg.selected == 'external':
+                recompute = True
+            n += 1
+
+        # Apply change
+        if transect_idx is None:
+            for transect in self.transects:
+                transect.change_offset(h_offset)
+
+            # Apply change to moving-bed tests
+            if len(self.mb_tests) > 0:
+                for test in self.mb_tests:
+                    old_h_offset = test.transect.sensors.heading_deg.external.align_correction_deg
+                    test.transect.change_offset(h_offset)
+                    test.h_offset_change(h_offset, old_h_offset)
+        else:
+            self.transects[transect_idx].change_offset(h_offset)
+
+        # Rcompute is specified
+        if recompute:
+            self.apply_settings(s)
+        else:
+            self.qa.compass_qa(self)
+            self.qa.check_compass_settings(self)
+
+    def change_h_source(self, h_source, transect_idx=None):
+        """Coordinates changing the heading source.
+
+        Parameters
+        ----------
+        h_source: str
+            Heading source (internal or external)
+        transect_idx: int
+            Index of transect to which the change is applied. None is all transects.
+        """
+
+        # Get current settings
+        s = self.current_settings()
+
+        # Apply change
+        if transect_idx is None:
+            for transect in self.transects:
+                transect.change_heading_source(h_source)
+
+            # Apply change to moving-bed tests
+            if len(self.mb_tests) > 0:
+                for test in self.mb_tests:
+                    test.transect.change_heading_source(h_source)
+                    test.process_mb_test(source=test.transect.adcp.manufacturer)
+                settings = self.current_settings()
+                select = settings['NavRef']
+                ref = None
+                if select == 'bt_vel':
+                    ref = 'BT'
+                elif select == 'gga_vel':
+                    ref = 'GGA'
+                elif select == 'vtg_vel':
+                    ref = 'VTG'
+                self.mb_tests = MovingBedTests.auto_use_2_correct(
+                    moving_bed_tests=self.mb_tests, boat_ref=ref)
+
+        else:
+            self.transects[transect_idx].change_heading_source(h_source)
+
+        self.apply_settings(s)
+
+    def change_draft(self, draft, transect_idx=None):
+        """Coordinates changing the ADCP draft.
+
+        Parameters
+        ----------
+        draft: float
+            Draft of ADCP in m
+        transect_idx: int
+            Index of transect to which the change is applied. None is all transects.
+        """
+
+        # Get current settings
+        s = self.current_settings()
+
+        # Apply change
+        if transect_idx is None:
+            for transect in self.transects:
+                transect.change_draft(draft)
+        else:
+            self.transects[transect_idx].change_draft(draft)
+
+        self.apply_settings(s)
+
+    @staticmethod
+    def h_external_valid(meas):
+        """Determine if valid external heading data is included in the measurement.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of Measurement
+        """
+
+        external = False
+        for transect in meas.transects:
+            if transect.sensors.heading_deg.external is not None:
+                external = True
+                break
+        return external
+
+    # @profile
+    def apply_settings(self, settings, force_abba=True):
+        """Applies reference, filter, and interpolation settings.
+        
+        Parameters
+        ----------
+        settings: dict
+            Dictionary of reference, filter, and interpolation settings
+        force_abba: bool
+            Allows the above, below, before, after interpolation to be applied even when the data use another approach.
+        """
+
+        self.use_ping_type = settings['UsePingType']
+
+        # If SonTek data does not have ping type identified, determine ping types
+        if self.transects[0].w_vel.ping_type.size == 1 and self.transects[0].adcp.manufacturer == 'SonTek':
+            for transect in self.transects:
+                ping_type = TransectData.sontek_ping_type(transect.w_vel.corr, transect.w_vel.frequency)
+                transect.w_vel.ping_type = np.tile(np.array([ping_type]), (transect.w_vel.corr.shape[1], 1))
+
+        # If the measurement thresholds have not been computed, compute them
+        if not self.transects[0].w_vel.d_meas_thresholds:
+            self.create_filter_composites()
+
+        # Apply settings to moving-bed tests:
+        if len(self.mb_tests) > 0:
+            self.apply_settings_to_movingbed(settings, force_abba=True)
+
+        # Apply settings to discharge transects
+        for transect in self.transects:
+
+            if not settings['UsePingType']:
+                transect.w_vel.ping_type = np.tile('U', transect.w_vel.ping_type.shape)
+                transect.boat_vel.bt_vel.frequency_khz = np.tile(0, transect.boat_vel.bt_vel.frequency_khz.shape)
+
+            # Moving-boat ensembles
+            if 'Processing' in settings.keys():
+                transect.change_q_ensembles(proc_method=settings['Processing'])
+                self.processing = settings['Processing']
+
+            # Navigation reference
+            if transect.boat_vel.selected != settings['NavRef']:
+                transect.change_nav_reference(update=False, new_nav_ref=settings['NavRef'])
+                if len(self.mb_tests) > 0:
+                    self.mb_tests = MovingBedTests.auto_use_2_correct(
+                        moving_bed_tests=self.mb_tests,
+                        boat_ref=settings['NavRef'])
+
+            # Changing the nav reference applies the current setting for
+            # Composite tracks, check to see if a change is needed
+            if transect.boat_vel.composite != settings['CompTracks']:
+                transect.composite_tracks(update=False, setting=settings['CompTracks'])
+
+            # Set difference velocity BT filter
+            bt_kwargs = {}
+            if settings['BTdFilter'] == 'Manual':
+                bt_kwargs['difference'] = settings['BTdFilter']
+                bt_kwargs['difference_threshold'] = settings['BTdFilterThreshold']
+            else:
+                bt_kwargs['difference'] = settings['BTdFilter']
+
+            # Set vertical velocity BT filter
+            if settings['BTwFilter'] == 'Manual':
+                bt_kwargs['vertical'] = settings['BTwFilter']
+                bt_kwargs['vertical_threshold'] = settings['BTwFilterThreshold']
+            else:
+                bt_kwargs['vertical'] = settings['BTwFilter']
+
+            # Apply beam filter
+                bt_kwargs['beam'] = settings['BTbeamFilter']
+
+            # Apply smooth filter
+                bt_kwargs['other'] = settings['BTsmoothFilter']
+
+            transect.boat_vel.bt_vel.use_measurement_thresholds = settings['UseMeasurementThresholds']
+
+            # Apply BT settings
+            transect.boat_filters(update=False, **bt_kwargs)
+
+            # BT Interpolation
+            transect.boat_interpolations(update=False,
+                                         target='BT',
+                                         method=settings['BTInterpolation'])
+
+            # GPS filter settings
+            if transect.gps is not None:
+                gga_kwargs = {}
+                if transect.boat_vel.gga_vel is not None:
+                    # GGA
+                    gga_kwargs['differential'] = settings['ggaDiffQualFilter']
+                    if settings['ggaAltitudeFilter'] == 'Manual':
+                        gga_kwargs['altitude'] = settings['ggaAltitudeFilter']
+                        gga_kwargs['altitude_threshold'] = settings['ggaAltitudeFilterChange']
+                    else:
+                        gga_kwargs['altitude'] = settings['ggaAltitudeFilter']
+
+                    # Set GGA HDOP Filter
+                    if settings['GPSHDOPFilter'] == 'Manual':
+                        gga_kwargs['hdop'] = settings['GPSHDOPFilter']
+                        gga_kwargs['hdop_max_threshold'] = settings['GPSHDOPFilterMax']
+                        gga_kwargs['hdop_change_threshold'] = settings['GPSHDOPFilterChange']
+                    else:
+                        gga_kwargs['hdop'] = settings['GPSHDOPFilter']
+
+                    gga_kwargs['other'] = settings['GPSSmoothFilter']
+                    # Apply GGA filters
+                    transect.gps_filters(update=False, **gga_kwargs)
+
+                if transect.boat_vel.vtg_vel is not None:
+                    vtg_kwargs = {}
+                    if settings['GPSHDOPFilter'] == 'Manual':
+                        vtg_kwargs['hdop'] = settings['GPSHDOPFilter']
+                        vtg_kwargs['hdop_max_threshold'] = settings['GPSHDOPFilterMax']
+                        vtg_kwargs['hdop_change_threshold'] = settings['GPSHDOPFilterChange']
+                        vtg_kwargs['other'] = settings['GPSSmoothFilter']
+                    else:
+                        vtg_kwargs['hdop'] = settings['GPSHDOPFilter']
+                        vtg_kwargs['other'] = settings['GPSSmoothFilter']
+
+                    # Apply VTG filters
+                    transect.gps_filters(update=False, **vtg_kwargs)
+
+                transect.boat_interpolations(update=False,
+                                             target='GPS',
+                                             method=settings['GPSInterpolation'])
+
+            # Set depth reference
+            transect.set_depth_reference(update=False, setting=settings['depthReference'])
+
+            transect.process_depths(update=True,
+                                    filter_method=settings['depthFilterType'],
+                                    interpolation_method=settings['depthInterpolation'],
+                                    composite_setting=settings['depthComposite'],
+                                    avg_method=settings['depthAvgMethod'],
+                                    valid_method=settings['depthValidMethod'])
+
+            # Set WT difference velocity filter
+            wt_kwargs = {}
+            if settings['WTdFilter'] == 'Manual':
+                wt_kwargs['difference'] = settings['WTdFilter']
+                wt_kwargs['difference_threshold'] = settings['WTdFilterThreshold']
+            else:
+                wt_kwargs['difference'] = settings['WTdFilter']
+
+            # Set WT vertical velocity filter
+            if settings['WTwFilter'] == 'Manual':
+                wt_kwargs['vertical'] = settings['WTwFilter']
+                wt_kwargs['vertical_threshold'] = settings['WTwFilterThreshold']
+            else:
+                wt_kwargs['vertical'] = settings['WTwFilter']
+
+            wt_kwargs['beam'] = settings['WTbeamFilter']
+            wt_kwargs['other'] = settings['WTsmoothFilter']
+            wt_kwargs['snr'] = settings['WTsnrFilter']
+            wt_kwargs['wt_depth'] = settings['WTwtDepthFilter']
+            wt_kwargs['excluded'] = settings['WTExcludedDistance']
+
+            # Data loaded from old QRev.mat files will be set to use this new interpolation method. When reprocessing
+            # any data the interpolation method should be 'abba'
+            if force_abba:
+                transect.w_vel.interpolate_cells = 'abba'
+                transect.w_vel.interpolate_ens = 'abba'
+                settings['WTEnsInterpolation'] = 'abba'
+                settings['WTCellInterpolation'] = 'abba'
+
+            transect.w_vel.use_measurement_thresholds = settings['UseMeasurementThresholds']
+            if transect.w_vel.ping_type.size == 0 and transect.adcp.manufacturer == 'SonTek':
+                # Correlation and frequency can be used to determine ping type
+                transect.w_vel.ping_type = TransectData.sontek_ping_type(corr=transect.w_vel.corr,
+                                                                         freq=transect.w_vel.frequency)
+
+            transect.w_vel.apply_filter(transect=transect, **wt_kwargs)
+
+            # Edge methods
+            transect.edges.rec_edge_method = settings['edgeRecEdgeMethod']
+            transect.edges.vel_method = settings['edgeVelMethod']
+
+        if settings['UseWeighted'] and not self.use_weighted:
+            if self.extrap_fit.norm_data[-1].weights is None:
+                # Compute normalized data for each transect to obtain the weights
+                self.extrap_fit.process_profiles(self.transects, self.extrap_fit.norm_data[-1].data_type,
+                                                 use_weighted=settings['UseWeighted'])
+
+        self.use_weighted = settings['UseWeighted']
+
+        if len(self.checked_transect_idx) > 0:
+            ref_transect = self.checked_transect_idx[0]
+        else:
+            ref_transect = 0
+
+        if self.transects[ref_transect].w_vel.interpolate_cells == 'TRDI':
+            if self.extrap_fit is None:
+                self.extrap_fit = ComputeExtrap()
+                self.extrap_fit.populate_data(transects=self.transects, compute_sensitivity=False,
+                                              use_weighted=settings['UseWeighted'])
+                self.change_extrapolation(self.extrap_fit.fit_method, compute_q=False,
+                                          use_weighted=settings['UseWeighted'])
+            elif self.extrap_fit.fit_method == 'Automatic':
+                self.change_extrapolation(self.extrap_fit.fit_method, compute_q=False,
+                                          use_weighted=settings['UseWeighted'])
+            else:
+                if 'extrapTop' not in settings.keys():
+                    settings['extrapTop'] = self.extrap_fit.sel_fit[-1].top_method
+                    settings['extrapBot'] = self.extrap_fit.sel_fit[-1].bot_method
+                    settings['extrapExp'] = self.extrap_fit.sel_fit[-1].exponent
+
+            self.change_extrapolation(self.extrap_fit.fit_method,
+                                      top=settings['extrapTop'],
+                                      bot=settings['extrapBot'],
+                                      exp=settings['extrapExp'],
+                                      compute_q=False,
+                                      use_weighted=settings['UseWeighted'])
+
+        for transect in self.transects:
+
+            # Water track interpolations
+            transect.w_vel.apply_interpolation(transect=transect,
+                                               ens_interp=settings['WTEnsInterpolation'],
+                                               cells_interp=settings['WTCellInterpolation'])
+
+        if self.extrap_fit is None:
+            self.extrap_fit = ComputeExtrap()
+            self.extrap_fit.populate_data(transects=self.transects, compute_sensitivity=False,
+                                          use_weighted=settings['UseWeighted'])
+            self.change_extrapolation(self.extrap_fit.fit_method, compute_q=False,
+                                      use_weighted=settings['UseWeighted'])
+        elif self.extrap_fit.fit_method == 'Automatic':
+            self.change_extrapolation(self.extrap_fit.fit_method, compute_q=False,
+                                      use_weighted=settings['UseWeighted'])
+        else:
+            if 'extrapTop' not in settings.keys():
+                settings['extrapTop'] = self.extrap_fit.sel_fit[-1].top_method
+                settings['extrapBot'] = self.extrap_fit.sel_fit[-1].bot_method
+                settings['extrapExp'] = self.extrap_fit.sel_fit[-1].exponent
+
+        self.change_extrapolation(self.extrap_fit.fit_method,
+                                  top=settings['extrapTop'],
+                                  bot=settings['extrapBot'],
+                                  exp=settings['extrapExp'],
+                                  compute_q=False,
+                                  use_weighted=settings['UseWeighted'])
+
+        self.extrap_fit.q_sensitivity = ExtrapQSensitivity()
+        self.extrap_fit.q_sensitivity.populate_data(transects=self.transects,
+                                                    extrap_fits=self.extrap_fit.sel_fit)
+
+        self.compute_discharge()
+
+        self.compute_uncertainty()
+
+    def apply_settings_to_movingbed(self, settings, force_abba=True):
+        """Applies reference, filter, and interpolation settings.
+
+        Parameters
+        ----------
+        settings: dict
+            Dictionary of reference, filter, and interpolation settings
+        force_abba: bool
+            Allows the above, below, before, after interpolation to be applied even when the data use another approach.
+        """
+
+        self.use_ping_type = settings['UsePingType']
+        # If SonTek data does not have ping type identified, determine ping types
+        if self.mb_tests[0].transect.w_vel.ping_type.size == 1 and self.transects[0].adcp.manufacturer == 'SonTek':
+            for test in self.mb_tests:
+                transect = test.transect
+                ping_type = TransectData.sontek_ping_type(transect.w_vel.corr, transect.w_vel.frequency)
+                transect.w_vel.ping_type = np.tile(np.array([ping_type]), (transect.w_vel.corr.shape[1], 1))
+
+        for test in self.mb_tests:
+            transect = test.transect
+
+            if not settings['UsePingType']:
+                transect.w_vel.ping_type = np.tile('U', transect.w_vel.ping_type.shape)
+                transect.boat_vel.bt_vel.frequency_khz = np.tile(0, transect.boat_vel.bt_vel.frequency_khz.shape)
+
+            # Moving-boat ensembles
+            if 'Processing' in settings.keys():
+                transect.change_q_ensembles(proc_method=settings['Processing'])
+                self.processing = settings['Processing']
+
+            # Set difference velocity BT filter
+            bt_kwargs = {}
+            if settings['BTdFilter'] == 'Manual':
+                bt_kwargs['difference'] = settings['BTdFilter']
+                bt_kwargs['difference_threshold'] = settings['BTdFilterThreshold']
+            else:
+                bt_kwargs['difference'] = settings['BTdFilter']
+
+            # Set vertical velocity BT filter
+            if settings['BTwFilter'] == 'Manual':
+                bt_kwargs['vertical'] = settings['BTwFilter']
+                bt_kwargs['vertical_threshold'] = settings['BTwFilterThreshold']
+            else:
+                bt_kwargs['vertical'] = settings['BTwFilter']
+
+                # Apply beam filter
+                bt_kwargs['beam'] = settings['BTbeamFilter']
+
+                # Apply smooth filter
+                bt_kwargs['other'] = settings['BTsmoothFilter']
+
+            transect.boat_vel.bt_vel.use_measurement_thresholds = settings['UseMeasurementThresholds']
+
+            # Apply BT settings
+            transect.boat_filters(update=False, **bt_kwargs)
+
+            # Don't interpolate for stationary tests
+            if test.type == 'Loop':
+                # BT Interpolation
+                transect.boat_interpolations(update=False,
+                                             target='BT',
+                                             method=settings['BTInterpolation'])
+
+            # GPS filter settings
+            if transect.gps is not None:
+                gga_kwargs = {}
+                if transect.boat_vel.gga_vel is not None:
+                    # GGA
+                    gga_kwargs['differential'] = settings['ggaDiffQualFilter']
+                    if settings['ggaAltitudeFilter'] == 'Manual':
+                        gga_kwargs['altitude'] = settings['ggaAltitudeFilter']
+                        gga_kwargs['altitude_threshold'] = settings['ggaAltitudeFilterChange']
+                    else:
+                        gga_kwargs['altitude'] = settings['ggaAltitudeFilter']
+
+                    # Set GGA HDOP Filter
+                    if settings['GPSHDOPFilter'] == 'Manual':
+                        gga_kwargs['hdop'] = settings['GPSHDOPFilter']
+                        gga_kwargs['hdop_max_threshold'] = settings['GPSHDOPFilterMax']
+                        gga_kwargs['hdop_change_threshold'] = settings['GPSHDOPFilterChange']
+                    else:
+                        gga_kwargs['hdop'] = settings['GPSHDOPFilter']
+
+                    gga_kwargs['other'] = settings['GPSSmoothFilter']
+                    # Apply GGA filters
+                    transect.gps_filters(update=False, **gga_kwargs)
+
+                if transect.boat_vel.vtg_vel is not None:
+                    vtg_kwargs = {}
+                    if settings['GPSHDOPFilter'] == 'Manual':
+                        vtg_kwargs['hdop'] = settings['GPSHDOPFilter']
+                        vtg_kwargs['hdop_max_threshold'] = settings['GPSHDOPFilterMax']
+                        vtg_kwargs['hdop_change_threshold'] = settings['GPSHDOPFilterChange']
+                        vtg_kwargs['other'] = settings['GPSSmoothFilter']
+                    else:
+                        vtg_kwargs['hdop'] = settings['GPSHDOPFilter']
+                        vtg_kwargs['other'] = settings['GPSSmoothFilter']
+
+                    # Apply VTG filters
+                    transect.gps_filters(update=False, **vtg_kwargs)
+
+                # Don't interpolate for stationary tests
+                if test.type == 'Loop':
+                    transect.boat_interpolations(update=False,
+                                                 target='GPS',
+                                                 method=settings['GPSInterpolation'])
+
+            # Set depth reference
+            transect.set_depth_reference(update=False, setting=settings['depthReference'])
+            transect.process_depths(update=False,
+                                    filter_method=settings['depthFilterType'],
+                                    interpolation_method=settings['depthInterpolation'],
+                                    composite_setting=settings['depthComposite'],
+                                    avg_method=settings['depthAvgMethod'],
+                                    valid_method=settings['depthValidMethod'])
+
+            # Set WT difference velocity filter
+            wt_kwargs = {}
+            if settings['WTdFilter'] == 'Manual':
+                wt_kwargs['difference'] = settings['WTdFilter']
+                wt_kwargs['difference_threshold'] = settings['WTdFilterThreshold']
+            else:
+                wt_kwargs['difference'] = settings['WTdFilter']
+
+            # Set WT vertical velocity filter
+            if settings['WTwFilter'] == 'Manual':
+                wt_kwargs['vertical'] = settings['WTwFilter']
+                wt_kwargs['vertical_threshold'] = settings['WTwFilterThreshold']
+            else:
+                wt_kwargs['vertical'] = settings['WTwFilter']
+
+            wt_kwargs['beam'] = settings['WTbeamFilter']
+            wt_kwargs['other'] = settings['WTsmoothFilter']
+            wt_kwargs['snr'] = settings['WTsnrFilter']
+            wt_kwargs['wt_depth'] = settings['WTwtDepthFilter']
+            wt_kwargs['excluded'] = settings['WTExcludedDistance']
+
+            # Data loaded from old QRev.mat files will be set to use this new interpolation method. When reprocessing
+            # any data the interpolation method should be 'abba'
+            if force_abba:
+                transect.w_vel.interpolate_cells = 'abba'
+                transect.w_vel.interpolate_ens = 'abba'
+                settings['WTEnsInterpolation'] = 'abba'
+                settings['WTCellInterpolation'] = 'abba'
+
+            transect.w_vel.use_measurement_thresholds = settings['UseMeasurementThresholds']
+            if transect.w_vel.ping_type.size == 0 and transect.adcp.manufacturer == 'SonTek':
+                # Correlation and frequency can be used to determine ping type
+                transect.w_vel.ping_type = TransectData.sontek_ping_type(corr=transect.w_vel.corr,
+                                                                         freq=transect.w_vel.frequency)
+
+            transect.w_vel.apply_filter(transect=transect, **wt_kwargs)
+
+            transect.w_vel.apply_interpolation(transect=transect,
+                                               ens_interp=settings['WTEnsInterpolation'],
+                                               cells_interp=settings['WTCellInterpolation'])
+
+            test.process_mb_test(source=self.transects[0].adcp.manufacturer)
+
+    def current_settings(self):
+        """Saves the current settings for a measurement. Since all settings
+        in QRev are consistent among all transects in a measurement only the
+        settings from the first transect are saved
+        """
+
+        settings = {}
+
+        if len(self.checked_transect_idx) > 0:
+            ref_transect = self.checked_transect_idx[0]
+        else:
+            ref_transect = 0
+        transect = self.transects[ref_transect]
+        
+        # Navigation reference
+        settings['NavRef'] = transect.boat_vel.selected
+        
+        # Composite tracks
+        settings['CompTracks'] = transect.boat_vel.composite
+        
+        # Water track settings
+        settings['WTbeamFilter'] = transect.w_vel.beam_filter
+        settings['WTdFilter'] = transect.w_vel.d_filter
+        settings['WTdFilterThreshold'] = transect.w_vel.d_filter_thresholds
+        settings['WTwFilter'] = transect.w_vel.w_filter
+        settings['WTwFilterThreshold'] = transect.w_vel.w_filter_thresholds
+        settings['WTsmoothFilter'] = transect.w_vel.smooth_filter
+        settings['WTsnrFilter'] = transect.w_vel.snr_filter
+        settings['WTwtDepthFilter'] = transect.w_vel.wt_depth_filter
+        settings['WTEnsInterpolation'] = transect.w_vel.interpolate_ens
+        settings['WTCellInterpolation'] = transect.w_vel.interpolate_cells
+        settings['WTExcludedDistance'] = transect.w_vel.excluded_dist_m
+        
+        # Bottom track settings
+        settings['BTbeamFilter'] = transect.boat_vel.bt_vel.beam_filter
+        settings['BTdFilter'] = transect.boat_vel.bt_vel.d_filter
+        settings['BTdFilterThreshold'] = transect.boat_vel.bt_vel.d_filter_thresholds
+        settings['BTwFilter'] = transect.boat_vel.bt_vel.w_filter
+        settings['BTwFilterThreshold'] = transect.boat_vel.bt_vel.w_filter_thresholds
+        settings['BTsmoothFilter'] = transect.boat_vel.bt_vel.smooth_filter
+        settings['BTInterpolation'] = transect.boat_vel.bt_vel.interpolate
+        
+        # Gps Settings
+        # if transect.gps is not None:
+
+        gga_present = False
+        for idx in self.checked_transect_idx:
+            if self.transects[idx].boat_vel.gga_vel is not None:
+                gga_present = True
+                transect = self.transects[idx]
+                break
+
+        # GGA settings
+        if gga_present:
+            settings['ggaDiffQualFilter'] = transect.boat_vel.gga_vel.gps_diff_qual_filter
+            settings['ggaAltitudeFilter'] = transect.boat_vel.gga_vel.gps_altitude_filter
+            settings['ggaAltitudeFilterChange'] = \
+                transect.boat_vel.gga_vel.gps_altitude_filter_change
+            settings['GPSHDOPFilter'] = transect.boat_vel.gga_vel.gps_HDOP_filter
+            settings['GPSHDOPFilterMax'] = transect.boat_vel.gga_vel.gps_HDOP_filter_max
+            settings['GPSHDOPFilterChange'] = transect.boat_vel.gga_vel.gps_HDOP_filter_change
+            settings['GPSSmoothFilter'] = transect.boat_vel.gga_vel.smooth_filter
+            settings['GPSInterpolation'] = transect.boat_vel.gga_vel.interpolate
+        else:
+            settings['ggaDiffQualFilter'] = 1
+            settings['ggaAltitudeFilter'] = 'Off'
+            settings['ggaAltitudeFilterChange'] = []
+
+            settings['ggaSmoothFilter'] = 'Off'
+            if 'GPSInterpolation' not in settings.keys():
+                settings['GPSInterpolation'] = 'None'
+            if 'GPSHDOPFilter' not in settings.keys():
+                settings['GPSHDOPFilter'] = 'Off'
+                settings['GPSHDOPFilterMax'] = []
+                settings['GPSHDOPFilterChange'] = []
+            if 'GPSSmoothFilter' not in settings.keys():
+                settings['GPSSmoothFilter'] = 'Off'
+
+        # VTG settings
+        vtg_present = False
+        for idx in self.checked_transect_idx:
+            if self.transects[idx].boat_vel.vtg_vel is not None:
+                vtg_present = True
+                transect = self.transects[idx]
+                break
+
+        if vtg_present:
+            settings['GPSHDOPFilter'] = transect.boat_vel.vtg_vel.gps_HDOP_filter
+            settings['GPSHDOPFilterMax'] = transect.boat_vel.vtg_vel.gps_HDOP_filter_max
+            settings['GPSHDOPFilterChange'] = transect.boat_vel.vtg_vel.gps_HDOP_filter_change
+            settings['GPSSmoothFilter'] = transect.boat_vel.vtg_vel.smooth_filter
+            settings['GPSInterpolation'] = transect.boat_vel.vtg_vel.interpolate
+
+        # Depth Settings
+        settings['depthAvgMethod'] = transect.depths.bt_depths.avg_method
+        settings['depthValidMethod'] = transect.depths.bt_depths.valid_data_method
+        
+        # Depth settings are always applied to all available depth sources.
+        # Only those saved in the bt_depths are used here but are applied to all sources
+        settings['depthFilterType'] = transect.depths.bt_depths.filter_type
+        settings['depthReference'] = transect.depths.selected
+        settings['depthComposite'] = transect.depths.composite
+        select = getattr(transect.depths, transect.depths.selected)
+        settings['depthInterpolation'] = select.interp_type
+        
+        # Extrap Settings
+        if self.extrap_fit is None:
+            settings['extrapTop'] = transect.extrap.top_method
+            settings['extrapBot'] = transect.extrap.bot_method
+            settings['extrapExp'] = transect.extrap.exponent
+        else:
+            settings['extrapTop'] = self.extrap_fit.sel_fit[-1].top_method
+            settings['extrapBot'] = self.extrap_fit.sel_fit[-1].bot_method
+            settings['extrapExp'] = self.extrap_fit.sel_fit[-1].exponent
+
+        # Use of self.use_weighted allows a QRev mat file to be loaded and initially processed with the settings from
+        # the QRev file but upon reprocessing the self.use_weights will be set to the options setting for use_weights
+        settings['UseWeighted'] = self.use_weighted
+
+        # Edge Settings
+        settings['edgeVelMethod'] = transect.edges.vel_method
+        settings['edgeRecEdgeMethod'] = transect.edges.rec_edge_method
+
+        settings['UseMeasurementThresholds'] = transect.w_vel.use_measurement_thresholds
+        settings['UsePingType'] = self.use_ping_type
+
+        return settings
+
+    def qrev_default_settings(self, check_user_excluded_dist=False, use_weighted=False):
+        """QRev default and filter settings for a measurement.
+        """
+
+        settings = dict()
+
+        if len(self.checked_transect_idx) > 0:
+            ref_transect = self.checked_transect_idx[0]
+        else:
+            ref_transect = 0
+
+        # Navigation reference
+        settings['NavRef'] = self.transects[ref_transect].boat_vel.selected
+
+        # Composite tracks
+        settings['CompTracks'] = 'Off'
+
+        # Water track filter settings
+        settings['WTbeamFilter'] = -1
+        settings['WTdFilter'] = 'Auto'
+        settings['WTdFilterThreshold'] = np.nan
+        settings['WTwFilter'] = 'Auto'
+        settings['WTwFilterThreshold'] = np.nan
+        settings['WTsmoothFilter'] = 'Off'
+
+        if self.transects[ref_transect].adcp.manufacturer == 'TRDI':
+            settings['WTsnrFilter'] = 'Off'
+        else:
+            settings['WTsnrFilter'] = 'Auto'
+
+        if check_user_excluded_dist:
+            temp = [x.w_vel for x in self.transects]
+            excluded_dist = np.nanmin([x.excluded_dist_m for x in temp])
+        else:
+            excluded_dist = 0
+        if excluded_dist < 0.158 and self.transects[ref_transect].adcp.model == 'M9':
+            settings['WTExcludedDistance'] = 0.16
+        elif excluded_dist < 0.248 and self.transects[ref_transect].adcp.model == 'RioPro':
+            settings['WTExcludedDistance'] = 0.25
+        else:
+            settings['WTExcludedDistance'] = excluded_dist
+
+        # Bottom track filter settings
+        settings['BTbeamFilter'] = -1
+        settings['BTdFilter'] = 'Auto'
+        settings['BTdFilterThreshold'] = np.nan
+        settings['BTwFilter'] = 'Auto'
+        settings['BTwFilterThreshold'] = np.nan
+        settings['BTsmoothFilter'] = 'Off'
+
+        # GGA Filter settings
+        settings['ggaDiffQualFilter'] = 2
+        settings['ggaAltitudeFilter'] = 'Auto'
+        settings['ggaAltitudeFilterChange'] = np.nan
+
+        # VTG filter settings
+        settings['vtgsmoothFilter'] = 'Off'
+
+        # GGA and VTG filter settings
+        settings['GPSHDOPFilter'] = 'Auto'
+        settings['GPSHDOPFilterMax'] = np.nan
+        settings['GPSHDOPFilterChange'] = np.nan
+        settings['GPSSmoothFilter'] = 'Off'
+
+        # Depth Averaging
+        settings['depthAvgMethod'] = 'IDW'
+        settings['depthValidMethod'] = 'QRev'
+
+        # Depth Reference
+
+        # Default to 4 beam depth average
+        settings['depthReference'] = 'bt_depths'
+        # Depth settings
+        settings['depthFilterType'] = 'Smooth'
+        settings['depthComposite'] = 'Off'
+        for transect in self.transects:
+            if transect.checked:
+
+                if transect.depths.vb_depths is not None or transect.depths.ds_depths is not None:
+                    settings['depthComposite'] = 'On'
+                    break
+                else:
+                    settings['depthComposite'] = 'Off'
+                    break
+
+        # Interpolation settings
+        settings = self.qrev_default_interpolation_methods(settings)
+
+        # Edge settings
+        settings['edgeVelMethod'] = 'MeasMag'
+        settings['edgeRecEdgeMethod'] = 'Fixed'
+
+        # Extrapolation Settings
+        settings['extrapTop'] = 'Power'
+        settings['extrapBot'] = 'Power'
+        settings['extrapExp'] = 0.1667
+        settings['UseWeighted'] = use_weighted
+
+        settings['UseMeasurementThresholds'] = False
+        settings['UsePingType'] = True
+
+        return settings
+
+    def update_qa(self):
+        self.qa = QAData(self)
+
+    @staticmethod
+    def no_filter_interp_settings(self):
+        """Settings to turn off all filters and interpolations.
+
+        Returns
+        -------
+        settings: dict
+            Dictionary of all processing settings.
+        """
+
+        settings = dict()
+        if len(self.checked_transect_idx) > 0:
+            ref_transect = self.checked_transect_idx[0]
+        else:
+            ref_transect = 0
+
+        settings['NavRef'] = self.transects[ref_transect].boatVel.selected
+
+        # Composite tracks
+        settings['CompTracks'] = 'Off'
+
+        # Water track filter settings
+        settings['WTbeamFilter'] = 3
+        settings['WTdFilter'] = 'Off'
+        settings['WTdFilterThreshold'] = np.nan
+        settings['WTwFilter'] = 'Off'
+        settings['WTwFilterThreshold'] = np.nan
+        settings['WTsmoothFilter'] = 'Off'
+        settings['WTsnrFilter'] = 'Off'
+
+        temp = [x.w_vel for x in self.transects]
+        excluded_dist = np.nanmin([x.excluded_dist_m for x in temp])
+
+        settings['WTExcludedDistance'] = excluded_dist
+
+        # Bottom track filter settings
+        settings['BTbeamFilter'] = 3
+        settings['BTdFilter'] = 'Off'
+        settings['BTdFilterThreshold'] = np.nan
+        settings['BTwFilter'] = 'Off'
+        settings['BTwFilterThreshold'] = np.nan
+        settings['BTsmoothFilter'] = 'Off'
+
+        # GGA filter settings
+        settings['ggaDiffQualFilter'] = 1
+        settings['ggaAltitudeFilter'] = 'Off'
+        settings['ggaAltitudeFilterChange'] = np.nan
+
+        # VTG filter settings
+        settings['vtgsmoothFilter'] = 'Off'
+
+        # GGA and VTG filter settings
+        settings['GPSHDOPFilter'] = 'Off'
+        settings['GPSHDOPFilterMax'] = np.nan
+        settings['GPSHDOPFilterChange'] = np.nan
+        settings['GPSSmoothFilter'] = 'Off'
+
+        # Depth Averaging
+        settings['depthAvgMethod'] = 'IDW'
+        settings['depthValidMethod'] = 'QRev'
+
+        # Depth Reference
+
+        # Default to 4 beam depth average
+        settings['depthReference'] = 'btDepths'
+        # Depth settings
+        settings['depthFilterType'] = 'None'
+        settings['depthComposite'] = 'Off'
+
+        # Interpolation settings
+        settings['BTInterpolation'] = 'None'
+        settings['WTEnsInterpolation'] = 'None'
+        settings['WTCellInterpolation'] = 'None'
+        settings['GPSInterpolation'] = 'None'
+        settings['depthInterpolation'] = 'None'
+        settings['WTwtDepthFilter'] = 'Off'
+
+        # Edge Settings
+        settings['edgeVelMethod'] = 'MeasMag'
+        # settings['edgeVelMethod'] = 'Profile'
+        settings['edgeRecEdgeMethod'] = 'Fixed'
+
+        return settings
+
+    def selected_transects_changed(self, selected_transects_idx):
+        """Handle changes in the transects selected for computing discharge.
+
+        Parameters
+        ----------
+        selected_transects_idx: list
+            List of indices of the transects used to compute discharge
+        """
+
+        # Update transect settings
+        self.checked_transect_idx = []
+        for n in range(len(self.transects)):
+            if n in selected_transects_idx:
+                self.transects[n].checked = True
+                self.checked_transect_idx.append(n)
+            else:
+                self.transects[n].checked = False
+
+        # Update computations
+        self.create_filter_composites()
+        settings = self.current_settings()
+        self.apply_settings(settings=settings)
+
+    def compute_discharge(self):
+        """Computes the discharge for all transects in the measurement.
+        """
+
+        self.discharge = []
+        for transect in self.transects:
+            q = QComp()
+            q.populate_data(data_in=transect, moving_bed_data=self.mb_tests)
+            self.discharge.append(q)
+
+    def compute_uncertainty(self):
+        """Computes uncertainty using QRev model and Oursin model if selected.
+        """
+
+        self.uncertainty = Uncertainty()
+        self.uncertainty.compute_uncertainty(self)
+        self.qa = QAData(self)
+
+        if self.run_oursin:
+            if self.oursin is None:
+                self.oursin = Oursin()
+                user_advanced_settings = None
+                u_measurement_user = None
+            else:
+                user_advanced_settings = self.oursin.user_advanced_settings
+                u_measurement_user = self.oursin.u_measurement_user
+                self.oursin = Oursin()
+            self.oursin.compute_oursin(self,
+                                       user_advanced_settings=user_advanced_settings,
+                                       u_measurement_user=u_measurement_user)
+
+    @staticmethod
+    def compute_edi(meas, selected_idx, percents):
+        """Computes the locations and vertical properties for the user selected transect and
+        flow percentages.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        selected_idx: int
+            Index of selected transect
+        percents: list
+            List of selected flow percents
+        """
+
+        # Get transect and discharge data
+        transect = meas.transects[selected_idx]
+        discharge = meas.discharge[selected_idx]
+
+        # Sort the percents in ascending order
+        percents.sort()
+
+        # Compute cumulative discharge
+        q_cum = np.nancumsum(discharge.middle_ens + discharge.top_ens + discharge.bottom_ens)
+
+        # Adjust for moving-bed conditions
+        q_cum = q_cum * discharge.correction_factor
+
+        # Adjust q for starting edge
+        if transect.start_edge == 'Left':
+            q_cum = q_cum + discharge.left
+            q_cum[-1] = q_cum[-1] + discharge.right
+            start_dist = transect.edges.left.distance_m
+        else:
+            q_cum = q_cum + discharge.right
+            q_cum[-1] = q_cum[-1] + discharge.left
+            start_dist = transect.edges.right.distance_m
+
+        # Determine ensemble at each percent
+        ensembles = []
+        q_target = []
+        for percent in percents:
+            q_target.append(q_cum[-1] * percent / 100)
+            if q_target[-1] > 0:
+                ensembles.append(np.where(q_cum > q_target[-1])[0][0])
+            if q_target[-1] < 0:
+                ensembles.append(np.where(q_cum < q_target[-1])[0][0])
+
+        # Compute distance from start bank
+        boat_vel_selected = getattr(transect.boat_vel, transect.boat_vel.selected)
+        track_x = np.nancumsum(boat_vel_selected.u_processed_mps[transect.in_transect_idx] *
+                               transect.date_time.ens_duration_sec[transect.in_transect_idx])
+        track_y = np.nancumsum(boat_vel_selected.v_processed_mps[transect.in_transect_idx] *
+                               transect.date_time.ens_duration_sec[transect.in_transect_idx])
+
+        dist = np.sqrt(track_x ** 2 + track_y ** 2) + start_dist
+
+        # Initialize variables for computing vertical data
+        n_pts_in_avg = int(len(q_cum) * 0.01)
+        depth_selected = getattr(transect.depths, transect.depths.selected)
+        q_actual = []
+        distance = []
+        lat = []
+        lon = []
+        depth = []
+        velocity = []
+
+        # Compute data for each vertical
+        for ensemble in ensembles:
+            q_actual.append(q_cum[ensemble])
+            distance.append(dist[ensemble])
+            # Report lat and lon if available
+            try:
+                lat.append(transect.gps.gga_lat_ens_deg[ensemble])
+                lon.append(transect.gps.gga_lon_ens_deg[ensemble])
+            except (ValueError, AttributeError, TypeError):
+                lat.append('')
+                lon.append('')
+            depth.append(depth_selected.depth_processed_m[ensemble])
+
+            # The velocity is an average velocity for ensembles +/- 1% of the total ensembles
+            # about the selected ensemble
+            u = np.nanmean(transect.w_vel.u_processed_mps[:, ensemble - n_pts_in_avg: ensemble + n_pts_in_avg + 1], 1)
+            v = np.nanmean(transect.w_vel.v_processed_mps[:, ensemble - n_pts_in_avg: ensemble + n_pts_in_avg + 1], 1)
+            velocity.append(np.sqrt(np.nanmean(u)**2 + np.nanmean(v)**2))
+
+        # Save computed results in a dictionary
+        edi_results = {'percent': percents, 'target_q': q_target, 'actual_q': q_actual, 'distance': distance,
+                       'depth': depth, 'velocity': velocity, 'lat': lat, 'lon': lon}
+        return edi_results
+
+    @staticmethod
+    def qrev_default_interpolation_methods(settings):
+        """Adds QRev default interpolation settings to existing settings data structure
+
+        Parameters
+        ----------
+        settings: dict
+            Dictionary of reference and filter settings
+
+        Returns
+        -------
+        settings: dict
+            Dictionary with reference, filter, and interpolation settings
+        """
+
+        settings['BTInterpolation'] = 'Linear'
+        settings['WTEnsInterpolation'] = 'abba'
+        settings['WTCellInterpolation'] = 'abba'
+        settings['GPSInterpolation'] = 'Linear'
+        settings['depthInterpolation'] = 'Linear'
+        settings['WTwtDepthFilter'] = 'On'
+
+        return settings
+
+    def change_extrapolation(self, method, top=None, bot=None, exp=None, extents=None, threshold=None, compute_q=True,
+                             use_weighted=False):
+        """Applies the selected extrapolation method to each transect.
+
+        Parameters
+        ----------
+        method: str
+            Method of computation Automatic or Manual
+        top: str
+            Top extrapolation method
+        bot: str
+            Bottom extrapolation method
+        exp: float
+            Exponent for power or no slip methods
+        threshold: float
+            Threshold as a percent for determining if a median is valid
+        extents: list
+            Percent of discharge, does not account for transect direction
+        compute_q: bool
+            Specifies if the discharge should be computed
+        use_weighted: bool
+            Specifies is discharge weighting is used
+        """
+
+        if top is None:
+            top = self.extrap_fit.sel_fit[-1].top_method
+        if bot is None:
+            bot = self.extrap_fit.sel_fit[-1].bot_method
+        if exp is None:
+            exp = self.extrap_fit.sel_fit[-1].exponent
+        if extents is not None:
+            self.extrap_fit.subsection = extents
+        if threshold is not None:
+            self.extrap_fit.threshold = threshold
+
+        data_type = self.extrap_fit.norm_data[-1].data_type
+        if data_type is None:
+            data_type = 'q'
+
+        if method == 'Manual':
+            self.extrap_fit.fit_method = 'Manual'
+            for transect in self.transects:
+                transect.extrap.set_extrap_data(top=top, bot=bot, exp=exp)
+            self.extrap_fit.process_profiles(transects=self.transects, data_type=data_type, use_weighted=use_weighted)
+        else:
+            self.extrap_fit.fit_method = 'Automatic'
+            self.extrap_fit.process_profiles(transects=self.transects, data_type=data_type, use_weighted=use_weighted)
+            for transect in self.transects:
+                transect.extrap.set_extrap_data(top=self.extrap_fit.sel_fit[-1].top_method,
+                                                bot=self.extrap_fit.sel_fit[-1].bot_method,
+                                                exp=self.extrap_fit.sel_fit[-1].exponent)
+
+        if compute_q:
+            self.extrap_fit.q_sensitivity = ExtrapQSensitivity()
+            self.extrap_fit.q_sensitivity.populate_data(transects=self.transects,
+                                                        extrap_fits=self.extrap_fit.sel_fit)
+
+            self.compute_discharge()
+
+    @staticmethod
+    def measurement_duration(self):
+        """Computes the duration of the measurement.
+        """
+
+        duration = 0
+        for transect in self.transects:
+            if transect.checked:
+                duration += transect.date_time.transect_duration_sec
+        return duration
+
+    @staticmethod
+    def mean_discharges(self):
+        """Computes the mean discharge for the measurement.
+        """
+
+        # Initialize lists
+        total_q = []
+        uncorrected_q = []
+        top_q = []
+        bot_q = []
+        mid_q = []
+        left_q = []
+        right_q = []
+        int_cells_q = []
+        int_ensembles_q = []
+
+        for n, transect in enumerate(self.transects):
+            if transect.checked:
+                total_q.append(self.discharge[n].total)
+                uncorrected_q.append(self.discharge[n].total_uncorrected)
+                top_q.append(self.discharge[n].top)
+                mid_q.append(self.discharge[n].middle)
+                bot_q.append(self.discharge[n].bottom)
+                left_q.append(self.discharge[n].left)
+                right_q.append(self.discharge[n].right)
+                int_cells_q.append(self.discharge[n].int_cells)
+                int_ensembles_q.append(self.discharge[n].int_ens)
+
+        discharge = {'total_mean': np.nanmean(total_q),
+                     'uncorrected_mean': np.nanmean(uncorrected_q),
+                     'top_mean': np.nanmean(top_q),
+                     'mid_mean': np.nanmean(mid_q),
+                     'bot_mean': np.nanmean(bot_q),
+                     'left_mean': np.nanmean(left_q),
+                     'right_mean': np.nanmean(right_q),
+                     'int_cells_mean': np.nanmean(int_cells_q),
+                     'int_ensembles_mean': np.nanmean(int_ensembles_q)}
+
+        return discharge
+
+    @staticmethod
+    def compute_measurement_properties(self):
+        """Computes characteristics of the transects and measurement that assist in evaluating the consistency
+        of the transects.
+
+        Returns
+        -------
+        trans_prop: dict
+        Dictionary of transect properties
+            width: float
+                width in m
+            width_cov: float
+                coefficient of variation of width in percent
+            area: float
+                cross sectional area in m**2
+            area_cov: float
+                coefficient of variation of are in percent
+            avg_boat_speed: float
+                average boat speed in mps
+            avg_boat_course: float
+                average boat course in degrees
+            avg_water_speed: float
+                average water speed in mps
+            avg_water_dir: float
+                average water direction in degrees
+            avg_depth: float
+                average depth in m
+            max_depth: float
+                maximum depth in m
+            max_water_speed: float
+                99th percentile of water speed in mps
+        """
+
+        # Initialize variables
+        checked_idx = np.array([], dtype=int)
+        n_transects = len(self.transects)
+        trans_prop = {'width': np.array([np.nan] * (n_transects + 1)),
+                      'width_cov': np.array([np.nan] * (n_transects + 1)),
+                      'area': np.array([np.nan] * (n_transects + 1)),
+                      'area_cov': np.array([np.nan] * (n_transects + 1)),
+                      'avg_boat_speed': np.array([np.nan] * (n_transects + 1)),
+                      'avg_boat_course': np.array([np.nan] * n_transects),
+                      'avg_water_speed': np.array([np.nan] * (n_transects + 1)),
+                      'avg_water_dir': np.array([np.nan] * (n_transects + 1)),
+                      'avg_depth': np.array([np.nan] * (n_transects + 1)),
+                      'max_depth': np.array([np.nan] * (n_transects + 1)),
+                      'max_water_speed': np.array([np.nan] * (n_transects + 1))}
+
+        # Process each transect
+        for n, transect in enumerate(self.transects):
+
+            # Compute boat track properties
+            boat_track = BoatStructure.compute_boat_track(transect)
+
+            # Get boat speeds
+            in_transect_idx = transect.in_transect_idx
+            if getattr(transect.boat_vel, transect.boat_vel.selected) is not None:
+                boat_selected = getattr(transect.boat_vel, transect.boat_vel.selected)
+                u_boat = boat_selected.u_processed_mps[in_transect_idx]
+                v_boat = boat_selected.v_processed_mps[in_transect_idx]
+            else:
+                u_boat = nans(transect.boat_vel.bt_vel.u_processed_mps[in_transect_idx].shape)
+                v_boat = nans(transect.boat_vel.bt_vel.v_processed_mps[in_transect_idx].shape)
+
+            if np.logical_not(np.all(np.isnan(boat_track['track_x_m']))):
+
+                # Compute boat course and mean speed
+                [course_radians, dmg] = cart2pol(boat_track['track_x_m'][-1], boat_track['track_y_m'][-1])
+                trans_prop['avg_boat_course'][n] = rad2azdeg(course_radians)
+                trans_prop['avg_boat_speed'][n] = np.nanmean(np.sqrt(u_boat**2 + v_boat**2))
+
+                # Compute width
+                trans_prop['width'][n] = np.nansum([dmg, transect.edges.left.distance_m,
+                                                    transect.edges.right.distance_m])
+
+                # Project the shiptrack onto a line from the beginning to end of the transect
+                unit_x, unit_y = pol2cart(course_radians, 1)
+                bt = np.array([boat_track['track_x_m'], boat_track['track_y_m']]).T
+                dot_prod = bt @ np.array([unit_x, unit_y])
+                projected_x = dot_prod * unit_x
+                projected_y = dot_prod * unit_y
+                station = np.sqrt(projected_x**2 + projected_y**2)
+
+                # Get selected depth object
+                depth = getattr(transect.depths, transect.depths.selected)
+                depth_a = np.copy(depth.depth_processed_m)
+                depth_a[np.isnan(depth_a)] = 0
+                # Compute area of the moving-boat portion of the cross section using trapezoidal integration.
+                # This method is consistent with AreaComp but is different from QRev in Matlab
+                area_moving_boat = np.abs(np.trapz(depth_a[in_transect_idx], station[in_transect_idx]))
+
+                # Compute area of left edge
+                edge_type = transect.edges.left.type
+                coef = 1
+                if edge_type == 'Triangular':
+                    coef = 0.5
+                elif edge_type == 'Rectangular':
+                    coef = 1.0
+                elif edge_type == 'Custom':
+                    coef = 0.5 + (transect.edges.left.cust_coef - 0.3535)
+                elif edge_type == 'User Q':
+                    coef = 0.5
+                edge_idx = QComp.edge_ensembles('left', transect)
+                edge_depth = np.nanmean(depth.depth_processed_m[edge_idx])
+                area_left = edge_depth * transect.edges.left.distance_m * coef
+
+                # Compute area of right edge
+                edge_type = transect.edges.right.type
+                if edge_type == 'Triangular':
+                    coef = 0.5
+                elif edge_type == 'Rectangular':
+                    coef = 1.0
+                elif edge_type == 'Custom':
+                    coef = 0.5 + (transect.edges.right.cust_coef - 0.3535)
+                elif edge_type == 'User Q':
+                    coef = 0.5
+                edge_idx = QComp.edge_ensembles('right', transect)
+                edge_depth = np.nanmean(depth.depth_processed_m[edge_idx])
+                area_right = edge_depth * transect.edges.right.distance_m * coef
+
+                # Compute total cross sectional area
+                trans_prop['area'][n] = np.nansum([area_left, area_moving_boat, area_right])
+
+                # Compute average water speed
+                trans_prop['avg_water_speed'][n] = self.discharge[n].total / trans_prop['area'][n]
+
+                # Compute flow direction using discharge weighting
+                u_water = transect.w_vel.u_processed_mps[:, in_transect_idx]
+                v_water = transect.w_vel.v_processed_mps[:, in_transect_idx]
+                weight = np.abs(self.discharge[n].middle_cells)
+                u = np.nansum(np.nansum(u_water * weight)) / np.nansum(np.nansum(weight))
+                v = np.nansum(np.nansum(v_water * weight)) / np.nansum(np.nansum(weight))
+                trans_prop['avg_water_dir'][n] = np.arctan2(u, v) * 180 / np.pi
+                if trans_prop['avg_water_dir'][n] < 0:
+                    trans_prop['avg_water_dir'][n] = trans_prop['avg_water_dir'][n] + 360
+
+                # Compute average and max depth
+                # This is a deviation from QRev in Matlab which simply averaged all the depths
+                trans_prop['avg_depth'][n] = trans_prop['area'][n] / trans_prop['width'][n]
+                trans_prop['max_depth'][n] = np.nanmax(depth.depth_processed_m[in_transect_idx])
+
+                # Compute max water speed using the 99th percentile
+                water_speed = np.sqrt(u_water**2 + v_water**2)
+                trans_prop['max_water_speed'][n] = np.nanpercentile(water_speed, 99)
+                if transect.checked:
+                    checked_idx = np.append(checked_idx, n)
+
+            # Only transects used for discharge are included in measurement properties
+            if len(checked_idx) > 0:
+                n = n_transects
+                trans_prop['width'][n] = np.nanmean(trans_prop['width'][checked_idx])
+                trans_prop['width_cov'][n] = (np.nanstd(trans_prop['width'][checked_idx], ddof=1) /
+                                              trans_prop['width'][n]) * 100
+                trans_prop['area'][n] = np.nanmean(trans_prop['area'][checked_idx])
+                trans_prop['area_cov'][n] = (np.nanstd(trans_prop['area'][checked_idx], ddof=1) /
+                                             trans_prop['area'][n]) * 100
+                trans_prop['avg_boat_speed'][n] = np.nanmean(trans_prop['avg_boat_speed'][checked_idx])
+                trans_prop['avg_water_speed'][n] = np.nanmean(trans_prop['avg_water_speed'][checked_idx])
+                trans_prop['avg_depth'][n] = np.nanmean(trans_prop['avg_depth'][checked_idx])
+                trans_prop['max_depth'][n] = np.nanmax(trans_prop['max_depth'][checked_idx])
+                trans_prop['max_water_speed'][n] = np.nanmax(trans_prop['max_water_speed'][checked_idx])
+
+                # Compute average water direction using vector coordinates to avoid the problem of averaging
+                # fluctuations that cross zero degrees
+                x_coord = []
+                y_coord = []
+                for idx in checked_idx:
+                    water_dir_rad = azdeg2rad(trans_prop['avg_water_dir'][idx])
+                    x, y = pol2cart(water_dir_rad, 1)
+                    x_coord.append(x)
+                    y_coord.append(y)
+                avg_water_dir_rad, _ = cart2pol(np.mean(x_coord), np.mean(y_coord))
+                trans_prop['avg_water_dir'][n] = rad2azdeg(avg_water_dir_rad)
+
+        return trans_prop
+
+    @staticmethod
+    def checked_transects(meas):
+        """Create a list of indices of the checked transects.
+        """
+
+        checked_transect_idx = []
+        for n in range(len(meas.transects)):
+            if meas.transects[n].checked:
+                checked_transect_idx.append(n)
+        return checked_transect_idx
+
+    @staticmethod
+    def compute_time_series(meas, variable=None):
+        """Computes the time series using serial time for any variable.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        variable: np.ndarray()
+            Data for which the time series is requested
+        """
+
+        # Initialize variables
+        data = np.array([])
+        serial_time = np.array([])
+        idx_transects = Measurement.checked_transects(meas)
+
+        # Process transects
+        for idx in idx_transects:
+            if variable == 'Temperature':
+                data = np.append(data, meas.transects[idx].sensors.temperature_deg_c.internal.data)
+            ens_cum_time = np.nancumsum(meas.transects[idx].date_time.ens_duration_sec)
+            ens_time = meas.transects[idx].date_time.start_serial_time + ens_cum_time
+            serial_time = np.append(serial_time, ens_time)
+
+        return data, serial_time
+
+    def xml_output(self, version, file_name):
+        channel = ETree.Element('Channel', QRevFilename=os.path.basename(file_name[:-4]), QRevVersion=version)
+
+        # (2) SiteInformation Node
+        if self.station_name or self.station_number:
+            site_info = ETree.SubElement(channel, 'SiteInformation')
+
+            # (3) StationName Node
+            if self.station_name:
+                ETree.SubElement(site_info, 'StationName', type='char').text = self.station_name
+
+            # (3) SiteID Node
+            if type(self.station_number) is str:
+                ETree.SubElement(site_info, 'SiteID', type='char').text = self.station_number
+            else:
+                ETree.SubElement(site_info, 'SiteID', type='char').text = str(self.station_number)
+
+            # (3) Persons
+            ETree.SubElement(site_info, 'Persons', type='char').text = self.persons
+
+            # (3) Measurement Number
+            ETree.SubElement(site_info, 'MeasurementNumber', type='char').text = self.meas_number
+
+            # (3) Stage start
+            temp = self.stage_start_m
+            ETree.SubElement(site_info, 'StageStart', type='double', unitsCode='m').text = '{:.5f}'.format(temp)
+
+            # (4) Stage start
+            temp = self.stage_end_m
+            ETree.SubElement(site_info, 'StageEnd', type='double', unitsCode='m').text = '{:.5f}'.format(temp)
+
+            # (3) Stage start
+            temp = self.stage_meas_m
+            ETree.SubElement(site_info, 'StageMeasurement', type='double', unitsCode='m').text = '{:.5f}'.format(temp)
+
+        # (2) QA Node
+        qa = ETree.SubElement(channel, 'QA')
+
+        # (3) DiagnosticTestResult Node
+        if len(self.system_tst) > 0:
+            last_test = self.system_tst[-1].data
+            failed_idx = last_test.count('FAIL')
+            if failed_idx == 0:
+                test_result = 'Pass'
+            else:
+                test_result = str(failed_idx) + ' Failed'
+        else:
+            test_result = 'None'
+        ETree.SubElement(qa, 'DiagnosticTestResult', type='char').text = test_result
+
+        # (3) CompassCalibrationResult Node
+        try:
+            last_eval = self.compass_eval[-1]
+            # StreamPro, RR
+            idx = last_eval.data.find('Typical Heading Error: <')
+            if idx == (-1):
+                # Rio Grande
+                idx = last_eval.data.find('>>> Total error:')
+                if idx != (-1):
+                    idx_start = idx + 17
+                    idx_end = idx_start + 10
+                    comp_error = last_eval.data[idx_start:idx_end]
+                    comp_error = ''.join([n for n in comp_error if n.isdigit() or n == '.'])
+                else:
+                    comp_error = ''
+            else:
+                # StreamPro, RR
+                idx_start = idx + 24
+                idx_end = idx_start + 10
+                comp_error = last_eval.data[idx_start:idx_end]
+                comp_error = ''.join([n for n in comp_error if n.isdigit() or n == '.'])
+
+            # Evaluation could not be determined
+            if not comp_error:
+                ETree.SubElement(qa, 'CompassCalibrationResult', type='char').text = 'Yes'
+            elif comp_error == '':
+                ETree.SubElement(qa, 'CompassCalibrationResult', type='char').text = 'No'
+            else:
+                ETree.SubElement(qa, 'CompassCalibrationResult', type='char').text = 'Max ' + comp_error
+
+        except (IndexError, TypeError, AttributeError):
+            try:
+                if len(self.compass_cal) > 0:
+                    ETree.SubElement(qa, 'CompassCalibrationResult', type='char').text = 'Yes'
+                else:
+                    ETree.SubElement(qa, 'CompassCalibrationResult', type='char').text = 'No'
+            except (IndexError, TypeError):
+                ETree.SubElement(qa, 'CompassCalibrationResult', type='char').text = 'No'
+
+        # (3) MovingBedTestType Node
+        if not self.mb_tests:
+            ETree.SubElement(qa, 'MovingBedTestType', type='char').text = 'None'
+        else:
+            selected_idx = [i for (i, val) in enumerate(self.mb_tests) if val.selected is True]
+            if len(selected_idx) >= 1:
+                temp = self.mb_tests[selected_idx[0]].type
+            else:
+                temp = self.mb_tests[-1].type
+            ETree.SubElement(qa, 'MovingBedTestType', type='char').text = str(temp)
+
+            # MovingBedTestResult Node
+            temp = 'Unknown'
+            for idx in selected_idx:
+                if self.mb_tests[idx].moving_bed == 'Yes':
+                    temp = 'Yes'
+                    break
+                elif self.mb_tests[idx].moving_bed == 'No':
+                    temp = 'No'
+
+            ETree.SubElement(qa, 'MovingBedTestResult', type='char').text = temp
+
+        # (3) DiagnosticTest and Text Node
+        if self.system_tst:
+            test_text = ''
+            for test in self.system_tst:
+                test_text += test.data
+            diag_test = ETree.SubElement(qa, 'DiagnosticTest')
+            ETree.SubElement(diag_test, 'Text', type='char').text = test_text
+
+        # (3) CompassCalibration and Text Node
+        compass_text = ''
+        try:
+            for each in self.compass_cal:
+                if self.transects[self.checked_transect_idx[0]].adcp.manufacturer == 'SonTek':
+                    idx = each.data.find('CAL_TIME')
+                    compass_text += each.data[idx:]
+                else:
+                    compass_text += each.data
+        except (IndexError, TypeError, AttributeError):
+            pass
+        try:
+            for each in self.compass_eval:
+                if self.transects[self.checked_transect_idx[0]].adcp.manufacturer == 'SonTek':
+                    idx = each.data.find('CAL_TIME')
+                    compass_text += each.data[idx:]
+                else:
+                    compass_text += each.data
+        except (IndexError, TypeError, AttributeError):
+            pass
+
+        if len(compass_text) > 0:
+            comp_cal = ETree.SubElement(qa, 'CompassCalibration')
+            ETree.SubElement(comp_cal, 'Text', type='char').text = compass_text
+
+        # (3) MovingBedTest Node
+        if self.mb_tests:
+            for each in self.mb_tests:
+                mbt = ETree.SubElement(qa, 'MovingBedTest')
+
+                # (4) Filename Node
+                ETree.SubElement(mbt, 'Filename', type='char').text = each.transect.file_name
+
+                # (4) TestType Node
+                ETree.SubElement(mbt, 'TestType', type='char').text = each.type
+
+                # (4) Duration Node
+                ETree.SubElement(mbt, 'Duration', type='double',
+                                 unitsCode='sec').text = '{:.2f}'.format(each.duration_sec)
+
+                # (4) PercentInvalidBT Node
+                ETree.SubElement(mbt, 'PercentInvalidBT', type='double').text = '{:.4f}'.format(each.percent_invalid_bt)
+
+                # (4) HeadingDifference Node
+                if each.compass_diff_deg:
+                    temp = '{:.2f}'.format(each.compass_diff_deg)
+                else:
+                    temp = ''
+                ETree.SubElement(mbt, 'HeadingDifference', type='double', unitsCode='deg').text = temp
+
+                # (4) MeanFlowDirection Node
+                if each.flow_dir:
+                    temp = '{:.2f}'.format(each.flow_dir)
+                else:
+                    temp = ''
+                ETree.SubElement(mbt, 'MeanFlowDirection', type='double', unitsCode='deg').text = temp
+
+                # (4) MovingBedDirection Node
+                if each.mb_dir:
+                    temp = '{:.2f}'.format(each.mb_dir)
+                else:
+                    temp = ''
+                ETree.SubElement(mbt, 'MovingBedDirection', type='double', unitsCode='deg').text = temp
+
+                # (4) DistanceUpstream Node
+                ETree.SubElement(mbt, 'DistanceUpstream', type='double', unitsCode='m').text = \
+                    '{:.4f}'.format(each.dist_us_m)
+
+                # (4) MeanFlowSpeed Node
+                ETree.SubElement(mbt, 'MeanFlowSpeed', type='double', unitsCode='mps').text = \
+                    '{:.4f}'.format(each.flow_spd_mps)
+
+                # (4) MovingBedSpeed Node
+                ETree.SubElement(mbt, 'MovingBedSpeed', type='double', unitsCode='mps').text = \
+                    '{:.4f}'.format(each.mb_spd_mps)
+
+                # (4) PercentMovingBed Node
+                ETree.SubElement(mbt, 'PercentMovingBed', type='double').text = '{:.2f}'.format(each.percent_mb)
+
+                # (4) TestQuality Node
+                ETree.SubElement(mbt, 'TestQuality', type='char').text = each.test_quality
+
+                # (4) MovingBedPresent Node
+                ETree.SubElement(mbt, 'MovingBedPresent', type='char').text = each.moving_bed
+
+                # (4) UseToCorrect Node
+                if each.use_2_correct:
+                    ETree.SubElement(mbt, 'UseToCorrect', type='char').text = 'Yes'
+                else:
+                    ETree.SubElement(mbt, 'UseToCorrect', type='char').text = 'No'
+
+                # (4) UserValid Node
+                if each.user_valid:
+                    ETree.SubElement(mbt, 'UserValid', type='char').text = 'Yes'
+                else:
+                    ETree.SubElement(mbt, 'UserValid', type='char').text = 'No'
+
+                # (4) Message Node
+                if len(each.messages) > 0:
+                    str_out = ''
+                    for message in each.messages:
+                        str_out = str_out + message + '; '
+                    ETree.SubElement(mbt, 'Message', type='char').text = str_out
+
+        # (3) TemperatureCheck Node
+        temp_check = ETree.SubElement(qa, 'TemperatureCheck')
+
+        # (4) VerificationTemperature Node
+        if not np.isnan(self.ext_temp_chk['user']):
+            ETree.SubElement(temp_check, 'VerificationTemperature', type='double', unitsCode='degC').text = \
+                '{:.2f}'.format(self.ext_temp_chk['user'])
+
+        # (4) InstrumentTemperature Node
+        if not np.isnan(self.ext_temp_chk['adcp']):
+            ETree.SubElement(temp_check, 'InstrumentTemperature', type='double',
+                             unitsCode='degC').text = '{:.2f}'.format(self.ext_temp_chk['adcp'])
+
+        # (4) TemperatureChange Node:
+        temp_all = np.array([np.nan])
+        for each in self.transects:
+            # Check for situation where user has entered a constant temperature
+            temperature_selected = getattr(each.sensors.temperature_deg_c, each.sensors.temperature_deg_c.selected)
+            temperature = temperature_selected.data
+            if each.sensors.temperature_deg_c.selected != 'user':
+                # Temperatures for ADCP.
+                temp_all = np.concatenate((temp_all, temperature))
+            else:
+                # User specified constant temperature.
+                # Concatenate a matrix of size of internal data with repeated user values.
+                user_arr = np.tile(each.sensors.temperature_deg_c.user.data,
+                                   (np.size(each.sensors.temperature_deg_c.internal.data)))
+                temp_all = np.concatenate((temp_all, user_arr))
+
+        t_range = np.nanmax(temp_all) - np.nanmin(temp_all)
+        ETree.SubElement(temp_check, 'TemperatureChange', type='double',
+                         unitsCode='degC').text = '{:.2f}'.format(t_range)
+
+        # (3) QRev_Message Node
+        qa_check_keys = ['bt_vel', 'compass', 'depths', 'edges', 'extrapolation', 'gga_vel', 'movingbed', 'system_tst',
+                         'temperature', 'transects', 'user', 'vtg_vel', 'w_vel']
+
+        # For each qa check retrieve messages
+        messages = []
+        for key in qa_check_keys:
+            qa_type = getattr(self.qa, key)
+            if qa_type['messages']:
+                for message in qa_type['messages']:
+                    if type(message) is str:
+                        if message[:3].isupper():
+                            messages.append([message, 1])
+                        else:
+                            messages.append([message, 2])
+                    else:
+                        messages.append(message)
+
+        # Sort messages with warning at top
+        messages.sort(key=lambda x: x[1])
+
+        if len(messages) > 0:
+            temp = ''
+            for message in messages:
+                temp = temp + message[0]
+            ETree.SubElement(qa, 'QRev_Message', type='char').text = temp
+
+        # (2) Instrument Node
+        instrument = ETree.SubElement(channel, 'Instrument')
+
+        # (3) Manufacturer Node
+        ETree.SubElement(instrument, 'Manufacturer', type='char').text = \
+            self.transects[self.checked_transect_idx[0]].adcp.manufacturer
+
+        # (3) Model Node
+        ETree.SubElement(instrument, 'Model', type='char').text = \
+            self.transects[self.checked_transect_idx[0]].adcp.model
+
+        # (3) SerialNumber Node
+        sn = self.transects[self.checked_transect_idx[0]].adcp.serial_num
+        ETree.SubElement(instrument, 'SerialNumber', type='char').text = str(sn)
+
+        # (3) FirmwareVersion Node
+        ver = self.transects[self.checked_transect_idx[0]].adcp.firmware
+        ETree.SubElement(instrument, 'FirmwareVersion', type='char').text = str(ver)
+
+        # (3) Frequency Node
+        freq = self.transects[self.checked_transect_idx[0]].adcp.frequency_khz
+        if type(freq) == np.ndarray:
+            freq = "Multi"
+        ETree.SubElement(instrument, 'Frequency', type='char', unitsCode='kHz').text = str(freq)
+
+        # (3) BeamAngle Node
+        ang = self.transects[self.checked_transect_idx[0]].adcp.beam_angle_deg
+        ETree.SubElement(instrument, 'BeamAngle', type='double', unitsCode='deg').text = '{:.1f}'.format(ang)
+
+        # (3) BlankingDistance Node
+        w_vel = []
+        for each in self.transects:
+            w_vel.append(each.w_vel)
+        blank = []
+        for each in w_vel:
+            blank.append(each.blanking_distance_m)
+        if isinstance(blank[0], float):
+            temp = np.mean(blank)
+            if self.transects[self.checked_transect_idx[0]].w_vel.excluded_dist_m > temp:
+                temp = self.transects[self.checked_transect_idx[0]].w_vel.excluded_dist_m
+        else:
+            temp = self.transects[self.checked_transect_idx[0]].w_vel.excluded_dist_m
+        ETree.SubElement(instrument, 'BlankingDistance', type='double', unitsCode='m').text = '{:.4f}'.format(temp)
+
+        # (3) InstrumentConfiguration Node
+        commands = ''
+        if self.transects[self.checked_transect_idx[0]].adcp.configuration_commands is not None:
+            for each in self.transects[self.checked_transect_idx[0]].adcp.configuration_commands:
+                if type(each) is str:
+                    commands += each + '  '
+            ETree.SubElement(instrument, 'InstrumentConfiguration', type='char').text = commands
+
+        # (2) Processing Node
+        processing = ETree.SubElement(channel, 'Processing')
+
+        # (3) SoftwareVersion Node
+        ETree.SubElement(processing, 'SoftwareVersion', type='char').text = version
+
+        # (3) Type Node
+        ETree.SubElement(processing, 'Type', type='char').text = self.processing
+
+        # (3) AreaComputationMethod Node
+        ETree.SubElement(processing, 'AreaComputationMethod', type='char').text = 'Parallel'
+
+        # (3) Navigation Node
+        navigation = ETree.SubElement(processing, 'Navigation')
+
+        # (4) Reference Node
+        ETree.SubElement(navigation, 'Reference', type='char').text = \
+            self.transects[self.checked_transect_idx[0]].w_vel.nav_ref
+
+        # (4) CompositeTrack
+        ETree.SubElement(navigation, 'CompositeTrack', type='char').text = \
+            self.transects[self.checked_transect_idx[0]].boat_vel.composite
+
+        # (4) MagneticVariation Node
+        mag_var = self.transects[self.checked_transect_idx[0]].sensors.heading_deg.internal.mag_var_deg
+        ETree.SubElement(navigation, 'MagneticVariation', type='double',
+                         unitsCode='deg').text = '{:.2f}'.format(mag_var)
+
+        # (4) BeamFilter
+        nav_data = getattr(self.transects[self.checked_transect_idx[0]].boat_vel,
+                           self.transects[self.checked_transect_idx[0]].boat_vel.selected)
+        temp = nav_data.beam_filter
+        if temp < 0:
+            temp = 'Auto'
+        else:
+            temp = str(temp)
+        ETree.SubElement(navigation, 'BeamFilter', type='char').text = temp
+
+        # (4) ErrorVelocityFilter Node
+        evf = nav_data.d_filter
+        if evf == 'Manual':
+            evf = '{:.4f}'.format(nav_data.d_filter_thresholds)
+        ETree.SubElement(navigation, 'ErrorVelocityFilter', type='char', unitsCode='mps').text = evf
+
+        # (4) VerticalVelocityFilter Node
+        vvf = nav_data.w_filter
+        if vvf == 'Manual':
+            vvf = '{:.4f}'.format(nav_data.w_filter_thresholds)
+        ETree.SubElement(navigation, 'VerticalVelocityFilter', type='char', unitsCode='mps').text = vvf
+
+        # (4) Use measurement thresholds
+        temp = nav_data.use_measurement_thresholds
+        if temp:
+            temp = 'Yes'
+        else:
+            temp = 'No'
+        ETree.SubElement(navigation, 'UseMeasurementThresholds', type='char').text = temp
+
+        # (4) OtherFilter Node
+        o_f = nav_data.smooth_filter
+        ETree.SubElement(navigation, 'OtherFilter', type='char').text = o_f
+
+        # (4) GPSDifferentialQualityFilter Node
+        temp = nav_data.gps_diff_qual_filter
+        if temp:
+            if isinstance(temp, int) or isinstance(temp, float):
+                temp = str(temp)
+            ETree.SubElement(navigation, 'GPSDifferentialQualityFilter', type='char').text = temp
+
+        # (4) GPSAltitudeFilter Node
+        temp = nav_data.gps_altitude_filter
+        if temp:
+            if temp == 'Manual':
+                temp = self.transects[self.checked_transect_idx[0]].boat_vel.gps_altitude_filter_change
+            ETree.SubElement(navigation, 'GPSAltitudeFilter', type='char', unitsCode='m').text = str(temp)
+
+        # (4) HDOPChangeFilter
+        temp = nav_data.gps_HDOP_filter
+        if temp:
+            if temp == 'Manual':
+                temp = '{:.2f}'.format(self.transects[self.checked_transect_idx[0]].boat_vel.gps_hdop_filter_change)
+            ETree.SubElement(navigation, 'HDOPChangeFilter', type='char').text = temp
+
+        # (4) HDOPThresholdFilter
+        temp = nav_data.gps_HDOP_filter
+        if temp:
+            if temp == 'Manual':
+                temp = '{:.2f}'.format(self.transects[self.checked_transect_idx[0]].boat_vel.gps_HDOP_filter_max)
+            ETree.SubElement(navigation, 'HDOPThresholdFilter', type='char').text = temp
+
+        # (4) InterpolationType Node
+        temp = nav_data.interpolate
+        ETree.SubElement(navigation, 'InterpolationType', type='char').text = temp
+
+        # (3) Depth Node
+        depth = ETree.SubElement(processing, 'Depth')
+
+        # (4) Reference Node
+        if self.transects[self.checked_transect_idx[0]].depths.selected == 'bt_depths':
+            temp = 'BT'
+        elif self.transects[self.checked_transect_idx[0]].depths.selected == 'vb_depths':
+            temp = 'VB'
+        elif self.transects[self.checked_transect_idx[0]].depths.selected == 'ds_depths':
+            temp = 'DS'
+        ETree.SubElement(depth, 'Reference', type='char').text = temp
+
+        # (4) CompositeDepth Node
+        ETree.SubElement(depth, 'CompositeDepth', type='char').text = \
+            self.transects[self.checked_transect_idx[0]].depths.composite
+
+        # (4) ADCPDepth Node
+        depth_data = getattr(self.transects[self.checked_transect_idx[0]].depths,
+                             self.transects[self.checked_transect_idx[0]].depths.selected)
+        temp = depth_data.draft_use_m
+        ETree.SubElement(depth, 'ADCPDepth', type='double', unitsCode='m').text = '{:.4f}'.format(temp)
+
+        # (4) ADCPDepthConsistent Node
+        drafts = []
+        for transect in self.transects:
+            if transect.checked:
+                transect_depth = getattr(transect.depths, transect.depths.selected)
+                drafts.append(transect_depth.draft_use_m)
+        unique_drafts = set(drafts)
+        num_drafts = len(unique_drafts)
+        if num_drafts > 1:
+            temp = 'No'
+        else:
+            temp = 'Yes'
+        ETree.SubElement(depth, 'ADCPDepthConsistent', type='boolean').text = temp
+
+        # (4) FilterType Node
+        temp = depth_data.filter_type
+        ETree.SubElement(depth, 'FilterType', type='char').text = temp
+
+        # (4) InterpolationType Node
+        temp = depth_data.interp_type
+        ETree.SubElement(depth, 'InterpolationType', type='char').text = temp
+
+        # (4) AveragingMethod Node
+        temp = depth_data.avg_method
+        ETree.SubElement(depth, 'AveragingMethod', type='char').text = temp
+
+        # (4) ValidDataMethod Node
+        temp = depth_data.valid_data_method
+        ETree.SubElement(depth, 'ValidDataMethod', type='char').text = temp
+
+        # (3) WaterTrack Node
+        water_track = ETree.SubElement(processing, 'WaterTrack')
+
+        # (4) ExcludedDistance Node
+        temp = self.transects[self.checked_transect_idx[0]].w_vel.excluded_dist_m
+        ETree.SubElement(water_track, 'ExcludedDistance', type='double', unitsCode='m').text = '{:.4f}'.format(temp)
+
+        # (4) BeamFilter Node
+        temp = self.transects[self.checked_transect_idx[0]].w_vel.beam_filter
+        if temp < 0:
+            temp = 'Auto'
+        else:
+            temp = str(temp)
+        ETree.SubElement(water_track, 'BeamFilter', type='char').text = temp
+
+        # (4) ErrorVelocityFilter Node
+        temp = self.transects[self.checked_transect_idx[0]].w_vel.d_filter
+        if temp == 'Manual':
+            temp = '{:.4f}'.format(self.transects[self.checked_transect_idx[0]].w_vel.d_filter_thresholds)
+        ETree.SubElement(water_track, 'ErrorVelocityFilter', type='char', unitsCode='mps').text = temp
+
+        # (4) VerticalVelocityFilter Node
+        temp = self.transects[self.checked_transect_idx[0]].w_vel.w_filter
+        if temp == 'Manual':
+            temp = '{:.4f}'.format(self.transects[self.checked_transect_idx[0]].w_vel.w_filter_thresholds)
+        ETree.SubElement(water_track, 'VerticalVelocityFilter', type='char', unitsCode='mps').text = temp
+
+        # (4) Use measurement thresholds
+        temp = self.transects[self.checked_transect_idx[0]].w_vel.use_measurement_thresholds
+        if temp:
+            temp = 'Yes'
+        else:
+            temp = 'No'
+        ETree.SubElement(water_track, 'UseMeasurementThresholds', type='char').text = temp
+
+        # (4) OtherFilter Node
+        temp = self.transects[self.checked_transect_idx[0]].w_vel.smooth_filter
+        ETree.SubElement(water_track, 'OtherFilter', type='char').text = temp
+
+        # (4) SNRFilter Node
+        temp = self.transects[self.checked_transect_idx[0]].w_vel.snr_filter
+        ETree.SubElement(water_track, 'SNRFilter', type='char').text = temp
+
+        # (4) CellInterpolation Node
+        temp = self.transects[self.checked_transect_idx[0]].w_vel.interpolate_cells
+        ETree.SubElement(water_track, 'CellInterpolation', type='char').text = temp
+
+        # (4) EnsembleInterpolation Node
+        temp = self.transects[self.checked_transect_idx[0]].w_vel.interpolate_ens
+        ETree.SubElement(water_track, 'EnsembleInterpolation', type='char').text = temp
+
+        # (3) Edge Node
+        edge = ETree.SubElement(processing, 'Edge')
+
+        # (4) RectangularEdgeMethod Node
+        temp = self.transects[self.checked_transect_idx[0]].edges.rec_edge_method
+        ETree.SubElement(edge, 'RectangularEdgeMethod', type='char').text = temp
+
+        # (4) VelocityMethod Node
+        temp = self.transects[self.checked_transect_idx[0]].edges.vel_method
+        ETree.SubElement(edge, 'VelocityMethod', type='char').text = temp
+
+        # (4) LeftType Node
+        typ = []
+        for n in self.transects:
+            if n.checked:
+                typ.append(n.edges.left.type)
+        unique_type = set(typ)
+        num_types = len(unique_type)
+        if num_types > 1:
+            temp = 'Varies'
+        else:
+            temp = typ[0]
+        ETree.SubElement(edge, 'LeftType', type='char').text = temp
+
+        # LeftEdgeCoefficient
+        if temp == 'User Q':
+            temp = 'N/A'
+        elif temp == 'Varies':
+            temp = 'N/A'
+        else:
+            coef = []
+            for transect in self.transects:
+                if transect.checked:
+                    coef.append(QComp.edge_coef('left', transect))
+            num_coef = len(set(coef))
+            if num_coef > 1:
+                temp = 'Varies'
+            else:
+                temp = '{:.4f}'.format(coef[0])
+        ETree.SubElement(edge, 'LeftEdgeCoefficient', type='char').text = temp
+
+        # (4) RightType Node
+        typ = []
+        for n in self.transects:
+            if n.checked:
+                typ.append(n.edges.right.type)
+        unique_type = set(typ)
+        num_types = len(unique_type)
+        if num_types > 1:
+            temp = 'Varies'
+        else:
+            temp = typ[0]
+        ETree.SubElement(edge, 'RightType', type='char').text = temp
+
+        # RightEdgeCoefficient
+        if temp == 'User Q':
+            temp = 'N/A'
+        elif temp == 'Varies':
+            temp = 'N/A'
+        else:
+            coef = []
+            for transect in self.transects:
+                if transect.checked:
+                    coef.append(QComp.edge_coef('right', transect))
+            num_coef = len(set(coef))
+            if num_coef > 1:
+                temp = 'Varies'
+            else:
+                temp = '{:.4f}'.format(coef[0])
+        ETree.SubElement(edge, 'RightEdgeCoefficient', type='char').text = temp
+
+        # (3) Extrapolation Node
+        extrap = ETree.SubElement(processing, 'Extrapolation')
+
+        # (4) TopMethod Node
+        temp = self.transects[self.checked_transect_idx[0]].extrap.top_method
+        ETree.SubElement(extrap, 'TopMethod', type='char').text = temp
+
+        # (4) BottomMethod Node
+        temp = self.transects[self.checked_transect_idx[0]].extrap.bot_method
+        ETree.SubElement(extrap, 'BottomMethod', type='char').text = temp
+
+        # (4) Exponent Node
+        temp = self.transects[self.checked_transect_idx[0]].extrap.exponent
+        ETree.SubElement(extrap, 'Exponent', type='double').text = '{:.4f}'.format(temp)
+
+        # (4) Discharge weighted medians
+        temp = self.extrap_fit.use_weighted
+        if temp:
+            temp = 'Yes'
+        else:
+            temp = 'No'
+        ETree.SubElement(extrap, 'UseWeighted', type='char').text = temp
+
+        # (3) Sensor Node
+        sensor = ETree.SubElement(processing, 'Sensor')
+
+        # (4) TemperatureSource Node
+        temp = []
+        for n in self.transects:
+            if n.checked:
+                # k+=1
+                temp.append(n.sensors.temperature_deg_c.selected)
+        sources = len(set(temp))
+        if sources > 1:
+            temp = 'Varies'
+        else:
+            temp = temp[0]
+        ETree.SubElement(sensor, 'TemperatureSource', type='char').text = temp
+
+        # (4) Salinity
+        temp = np.array([])
+        for transect in self.transects:
+            if transect.checked:
+                sal_selected = getattr(transect.sensors.salinity_ppt, transect.sensors.salinity_ppt.selected)
+                temp = np.append(temp, sal_selected.data)
+        values = np.unique(temp)
+        if len(values) > 1:
+            temp = 'Varies'
+        else:
+            temp = '{:2.1f}'.format(values[0])
+        ETree.SubElement(sensor, 'Salinity', type='char', unitsCode='ppt').text = temp
+
+        # (4) SpeedofSound Node
+        temp = []
+        for n in self.transects:
+            if n.checked:
+                temp.append(n.sensors.speed_of_sound_mps.selected)
+        sources = len(set(temp))
+        if sources > 1:
+            temp = 'Varies'
+        else:
+            temp = temp[0]
+        if temp == 'internal':
+            temp = 'ADCP'
+        ETree.SubElement(sensor, 'SpeedofSound', type='char', unitsCode='mps').text = temp
+
+        # (2) Transect Node
+        other_prop = self.compute_measurement_properties(self)
+        for n in range(len(self.transects)):
+            if self.transects[n].checked:
+                transect = ETree.SubElement(channel, 'Transect')
+
+                # (3) Filename Node
+                temp = self.transects[n].file_name
+                ETree.SubElement(transect, 'Filename', type='char').text = temp
+
+                # (3) StartDateTime Node
+                temp = int(self.transects[n].date_time.start_serial_time)
+                temp = datetime.datetime.utcfromtimestamp(temp).strftime('%m/%d/%Y %H:%M:%S')
+                ETree.SubElement(transect, 'StartDateTime', type='char').text = temp
+
+                # (3) EndDateTime Node
+                temp = int(self.transects[n].date_time.end_serial_time)
+                temp = datetime.datetime.utcfromtimestamp(temp).strftime('%m/%d/%Y %H:%M:%S')
+                ETree.SubElement(transect, 'EndDateTime', type='char').text = temp
+
+                # (3) Discharge Node
+                t_q = ETree.SubElement(transect, 'Discharge')
+
+                # (4) Top Node
+                temp = self.discharge[n].top
+                ETree.SubElement(t_q, 'Top', type='double', unitsCode='cms').text = '{:.5f}'.format(temp)
+
+                # (4) Middle Node
+                temp = self.discharge[n].middle
+                ETree.SubElement(t_q, 'Middle', type='double', unitsCode='cms').text = '{:.5f}'.format(temp)
+
+                # (4) Bottom Node
+                temp = self.discharge[n].bottom
+                ETree.SubElement(t_q, 'Bottom', type='double', unitsCode='cms').text = '{:.5f}'.format(temp)
+
+                # (4) Left Node
+                temp = self.discharge[n].left
+                ETree.SubElement(t_q, 'Left', type='double', unitsCode='cms').text = '{:.5f}'.format(temp)
+
+                # (4) Right Node
+                temp = self.discharge[n].right
+                ETree.SubElement(t_q, 'Right', type='double', unitsCode='cms').text = '{:.5f}'.format(temp)
+
+                # (4) Total Node
+                temp = self.discharge[n].total
+                ETree.SubElement(t_q, 'Total', type='double', unitsCode='cms').text = '{:.5f}'.format(temp)
+
+                # (4) MovingBedPercentCorrection Node
+                temp = ((self.discharge[n].total / self.discharge[n].total_uncorrected) - 1) * 100
+                ETree.SubElement(t_q, 'MovingBedPercentCorrection', type='double').text = '{:.2f}'.format(temp)
+
+                # (3) Edge Node
+                t_edge = ETree.SubElement(transect, 'Edge')
+
+                # (4) StartEdge Node
+                temp = self.transects[n].start_edge
+                ETree.SubElement(t_edge, 'StartEdge', type='char').text = temp
+
+                # (4) RectangularEdgeMethod Node
+                temp = self.transects[n].edges.rec_edge_method
+                ETree.SubElement(t_edge, 'RectangularEdgeMethod', type='char').text = temp
+
+                # (4) VelocityMethod Node
+                temp = self.transects[n].edges.vel_method
+                ETree.SubElement(t_edge, 'VelocityMethod', type='char').text = temp
+
+                # (4) LeftType Node
+                temp = self.transects[n].edges.left.type
+                ETree.SubElement(t_edge, 'LeftType', type='char').text = temp
+
+                # (4) LeftEdgeCoefficient Node
+                if temp == 'User Q':
+                    temp = ''
+                else:
+                    temp = '{:.4f}'.format(QComp.edge_coef('left', self.transects[n]))
+                ETree.SubElement(t_edge, 'LeftEdgeCoefficient', type='double').text = temp
+
+                # (4) LeftDistance Node
+                temp = '{:.4f}'.format(self.transects[n].edges.left.distance_m)
+                ETree.SubElement(t_edge, 'LeftDistance', type='double', unitsCode='m').text = temp
+
+                # (4) LeftNumberEnsembles
+                temp = '{:.0f}'.format(self.transects[n].edges.left.number_ensembles)
+                ETree.SubElement(t_edge, 'LeftNumberEnsembles', type='double').text = temp
+
+                # (4) RightType Node
+                temp = self.transects[n].edges.right.type
+                ETree.SubElement(t_edge, 'RightType', type='char').text = temp
+
+                # (4) RightEdgeCoefficient Node
+                if temp == 'User Q':
+                    temp = ''
+                else:
+                    temp = '{:.4f}'.format(QComp.edge_coef('right', self.transects[n]))
+                ETree.SubElement(t_edge, 'RightEdgeCoefficient', type='double').text = temp
+
+                # (4) RightDistance Node
+                temp = '{:.4f}'.format(self.transects[n].edges.right.distance_m)
+                ETree.SubElement(t_edge, 'RightDistance', type='double', unitsCode='m').text = temp
+
+                # (4) RightNumberEnsembles Node
+                temp = '{:.0f}'.format(self.transects[n].edges.right.number_ensembles)
+                ETree.SubElement(t_edge, 'RightNumberEnsembles', type='double').text = temp
+
+                # (3) Sensor Node
+                t_sensor = ETree.SubElement(transect, 'Sensor')
+
+                # (4) TemperatureSource Node
+                temp = self.transects[n].sensors.temperature_deg_c.selected
+                ETree.SubElement(t_sensor, 'TemperatureSource', type='char').text = temp
+
+                # (4) MeanTemperature Node
+                dat = getattr(self.transects[n].sensors.temperature_deg_c,
+                              self.transects[n].sensors.temperature_deg_c.selected)
+                temp = np.nanmean(dat.data)
+                temp = '{:.2f}'.format(temp)
+                ETree.SubElement(t_sensor, 'MeanTemperature', type='double', unitsCode='degC').text = temp
+
+                # (4) MeanSalinity
+                sal_data = getattr(self.transects[n].sensors.salinity_ppt,
+                                   self.transects[n].sensors.salinity_ppt.selected)
+                temp = '{:.0f}'.format(np.nanmean(sal_data.data))
+                ETree.SubElement(t_sensor, 'MeanSalinity', type='double', unitsCode='ppt').text = temp
+
+                # (4) SpeedofSoundSource Node
+                sos_selected = getattr(self.transects[n].sensors.speed_of_sound_mps,
+                                       self.transects[n].sensors.speed_of_sound_mps.selected)
+                temp = sos_selected.source
+                ETree.SubElement(t_sensor, 'SpeedofSoundSource', type='char').text = temp
+
+                # (4) SpeedofSound
+                sos_data = getattr(self.transects[n].sensors.speed_of_sound_mps,
+                                   self.transects[n].sensors.speed_of_sound_mps.selected)
+                temp = '{:.4f}'.format(np.nanmean(sos_data.data))
+                ETree.SubElement(t_sensor, 'SpeedofSound', type='double', unitsCode='mps').text = temp
+
+                # (3) Other Node
+                t_other = ETree.SubElement(transect, 'Other')
+
+                # (4) Duration Node
+                temp = '{:.2f}'.format(self.transects[n].date_time.transect_duration_sec)
+                ETree.SubElement(t_other, 'Duration', type='double', unitsCode='sec').text = temp
+
+                # (4) Width
+                temp = other_prop['width'][n]
+                ETree.SubElement(t_other, 'Width', type='double', unitsCode='m').text = '{:.4f}'.format(temp)
+
+                # (4) Area
+                temp = other_prop['area'][n]
+                ETree.SubElement(t_other, 'Area', type='double', unitsCode='sqm').text = '{:.4f}'.format(temp)
+
+                # (4) MeanBoatSpeed
+                temp = other_prop['avg_boat_speed'][n]
+                ETree.SubElement(t_other, 'MeanBoatSpeed', type='double', unitsCode='mps').text = '{:.4f}'.format(temp)
+
+                # (4) QoverA
+                temp = other_prop['avg_water_speed'][n]
+                ETree.SubElement(t_other, 'QoverA', type='double', unitsCode='mps').text = '{:.4f}'.format(temp)
+
+                # (4) CourseMadeGood
+                temp = other_prop['avg_boat_course'][n]
+                ETree.SubElement(t_other, 'CourseMadeGood', type='double', unitsCode='deg').text = '{:.2f}'.format(temp)
+
+                # (4) MeanFlowDirection
+                temp = other_prop['avg_water_dir'][n]
+                ETree.SubElement(t_other, 'MeanFlowDirection', type='double',
+                                 unitsCode='deg').text = '{:.2f}'.format(temp)
+
+                # (4) NumberofEnsembles
+                temp = len(self.transects[n].boat_vel.bt_vel.u_processed_mps)
+                ETree.SubElement(t_other, 'NumberofEnsembles', type='integer').text = str(temp)
+
+                # (4) PercentInvalidBins
+                valid_ens, valid_cells = TransectData.raw_valid_data(self.transects[n])
+                temp = (1 - (np.nansum(np.nansum(valid_cells))
+                             / np.nansum(np.nansum(self.transects[n].w_vel.cells_above_sl)))) * 100
+                ETree.SubElement(t_other, 'PercentInvalidBins', type='double').text = '{:.2f}'.format(temp)
+
+                # (4) PercentInvalidEnsembles
+                temp = (1 - (np.nansum(valid_ens) / len(self.transects[n].boat_vel.bt_vel.u_processed_mps))) * 100
+                ETree.SubElement(t_other, 'PercentInvalidEns', type='double').text = '{:.2f}'.format(temp)
+
+                pitch_source_selected = getattr(self.transects[n].sensors.pitch_deg,
+                                                self.transects[n].sensors.pitch_deg.selected)
+                roll_source_selected = getattr(self.transects[n].sensors.roll_deg,
+                                               self.transects[n].sensors.roll_deg.selected)
+
+                # (4) MeanPitch
+                temp = np.nanmean(pitch_source_selected.data)
+                ETree.SubElement(t_other, 'MeanPitch', type='double', unitsCode='deg').text = '{:.2f}'.format(temp)
+
+                # (4) MeanRoll
+                temp = np.nanmean(roll_source_selected.data)
+                ETree.SubElement(t_other, 'MeanRoll', type='double', unitsCode='deg').text = '{:.2f}'.format(temp)
+
+                # (4) PitchStdDev
+                temp = np.nanstd(pitch_source_selected.data, ddof=1)
+                ETree.SubElement(t_other, 'PitchStdDev', type='double', unitsCode='deg').text = '{:.2f}'.format(temp)
+
+                # (4) RollStdDev
+                temp = np.nanstd(roll_source_selected.data, ddof=1)
+                ETree.SubElement(t_other, 'RollStdDev', type='double', unitsCode='deg').text = '{:.2f}'.format(temp)
+
+                # (4) ADCPDepth
+                depth_source_selected = getattr(self.transects[n].depths,
+                                                self.transects[n].depths.selected)
+                temp = depth_source_selected.draft_use_m
+                ETree.SubElement(t_other, 'ADCPDepth', type='double', unitsCode='m').text = '{:.4f}'.format(temp)
+
+        # (2) ChannelSummary Node
+        summary = ETree.SubElement(channel, 'ChannelSummary')
+
+        # (3) Discharge Node
+        s_q = ETree.SubElement(summary, 'Discharge')
+        discharge = self.mean_discharges(self)
+
+        # (4) Top
+        temp = discharge['top_mean']
+        ETree.SubElement(s_q, 'Top', type='double', unitsCode='cms').text = '{:.5f}'.format(temp)
+
+        # (4) Middle
+        temp = discharge['mid_mean']
+        ETree.SubElement(s_q, 'Middle', type='double', unitsCode='cms').text = '{:.5f}'.format(temp)
+
+        # (4) Bottom
+        temp = discharge['bot_mean']
+        ETree.SubElement(s_q, 'Bottom', type='double', unitsCode='cms').text = '{:.5f}'.format(temp)
+
+        # (4) Left
+        temp = discharge['left_mean']
+        ETree.SubElement(s_q, 'Left', type='double', unitsCode='cms').text = '{:.5f}'.format(temp)
+
+        # (4) Right
+        temp = discharge['right_mean']
+        ETree.SubElement(s_q, 'Right', type='double', unitsCode='cms').text = '{:.5f}'.format(temp)
+
+        # (4) Total
+        temp = discharge['total_mean']
+        ETree.SubElement(s_q, 'Total', type='double', unitsCode='cms').text = '{:.5f}'.format(temp)
+
+        # (4) MovingBedPercentCorrection
+        temp = ((discharge['total_mean'] / discharge['uncorrected_mean']) - 1) * 100
+        ETree.SubElement(s_q, 'MovingBedPercentCorrection', type='double').text = '{:.2f}'.format(temp)
+
+        # (3) Uncertainty Node
+        s_u = ETree.SubElement(summary, 'Uncertainty')
+        if self.run_oursin:
+            u_total = self.oursin.u_measurement_user['total_95'][0]
+            u_model = 'OURSIN'
+        else:
+            u_total = self.uncertainty.total_95_user
+            u_model = 'QRevUA'
+
+        if not np.isnan(temp):
+            ETree.SubElement(s_u, 'Total', type='double').text = '{:.1f}'.format(u_total)
+            ETree.SubElement(s_u, 'Model', type='char').text = u_model
+
+        # (3) QRev_UA Uncertainty Node
+        if self.uncertainty is not None:
+            s_qu = ETree.SubElement(summary, 'QRevUAUncertainty')
+            uncertainty = self.uncertainty
+
+            # (4) COV Node
+            temp = uncertainty.cov
+            if not np.isnan(temp):
+                ETree.SubElement(s_qu, 'COV', type='double').text = '{:.1f}'.format(temp)
+
+            # (4) AutoRandom Node
+            temp = uncertainty.cov_95
+            if not np.isnan(temp):
+                ETree.SubElement(s_qu, 'AutoRandom', type='double').text = '{:.1f}'.format(temp)
+
+            # (4) AutoInvalidData Node
+            temp = uncertainty.invalid_95
+            ETree.SubElement(s_qu, 'AutoInvalidData', type='double').text = '{:.1f}'.format(temp)
+
+            # (4) AutoEdge Node
+            temp = uncertainty.edges_95
+            ETree.SubElement(s_qu, 'AutoEdge', type='double').text = '{:.1f}'.format(temp)
+
+            # (4) AutoExtrapolation Node
+            temp = uncertainty.extrapolation_95
+            ETree.SubElement(s_qu, 'AutoExtrapolation', type='double').text = '{:.1f}'.format(temp)
+
+            # (4) AutoMovingBed
+            temp = uncertainty.moving_bed_95
+            ETree.SubElement(s_qu, 'AutoMovingBed', type='double').text = '{:.1f}'.format(temp)
+
+            # (4) AutoSystematic
+            temp = uncertainty.systematic
+            ETree.SubElement(s_qu, 'AutoSystematic', type='double').text = '{:.1f}'.format(temp)
+
+            # (4) AutoTotal
+            temp = uncertainty.total_95
+            if not np.isnan(temp):
+                ETree.SubElement(s_qu, 'AutoTotal', type='double').text = '{:.1f}'.format(temp)
+
+            # (4) UserRandom Node
+            user_random = uncertainty.cov_95_user
+            if user_random:
+                ETree.SubElement(s_qu, 'UserRandom', type='double').text = '{:.1f}'.format(user_random)
+
+            # (4) UserInvalidData Node
+            user_invalid = uncertainty.invalid_95_user
+            if user_invalid:
+                ETree.SubElement(s_qu, 'UserInvalidData', type='double').text = '{:.1f}'.format(user_invalid)
+
+            # (4) UserEdge
+            user_edge = uncertainty.edges_95_user
+            if user_edge:
+                ETree.SubElement(s_qu, 'UserEdge', type='double').text = '{:.1f}'.format(user_edge)
+
+            # (4) UserExtrapolation
+            user_extrap = uncertainty.extrapolation_95_user
+            if user_extrap:
+                ETree.SubElement(s_qu, 'UserExtrapolation', type='double').text = '{:.1f}'.format(user_extrap)
+
+            # (4) UserMovingBed
+            user_mb = uncertainty.moving_bed_95_user
+            if user_mb:
+                ETree.SubElement(s_qu, 'UserMovingBed', type='double').text = '{:.1f}'.format(user_mb)
+
+            # (4) UserSystematic
+            user_systematic = uncertainty.systematic_user
+            if user_systematic:
+                ETree.SubElement(s_qu, 'UserSystematic', type='double').text = '{:.1f}'.format(user_systematic)
+
+            # (4) UserTotal Node
+            temp = uncertainty.total_95_user
+            if not np.isnan(temp):
+                ETree.SubElement(s_qu, 'UserTotal', type='double').text = '{:.1f}'.format(temp)
+
+            # (4) Random
+            if user_random:
+                temp = user_random
+            else:
+                temp = uncertainty.cov_95
+            if not np.isnan(temp):
+                ETree.SubElement(s_qu, 'Random', type='double').text = '{:.1f}'.format(temp)
+
+            # (4) InvalidData
+            if user_invalid:
+                temp = user_invalid
+            else:
+                temp = uncertainty.invalid_95
+            ETree.SubElement(s_qu, 'InvalidData', type='double').text = '{:.1f}'.format(temp)
+
+            # (4) Edge
+            if user_edge:
+                temp = user_edge
+            else:
+                temp = uncertainty.edges_95
+            ETree.SubElement(s_qu, 'Edge', type='double').text = '{:.1f}'.format(temp)
+
+            # (4) Extrapolation
+            if user_extrap:
+                temp = user_extrap
+            else:
+                temp = uncertainty.extrapolation_95
+            ETree.SubElement(s_qu, 'Extrapolation', type='double').text = '{:.1f}'.format(temp)
+
+            # (4) MovingBed
+            if user_mb:
+                temp = user_mb
+            else:
+                temp = uncertainty.moving_bed_95
+            ETree.SubElement(s_qu, 'MovingBed', type='double').text = '{:.1f}'.format(temp)
+
+            # (4) Systematic
+            if user_systematic:
+                temp = user_systematic
+            else:
+                temp = uncertainty.systematic
+            ETree.SubElement(s_qu, 'Systematic', type='double').text = '{:.1f}'.format(temp)
+
+            # (4) UserTotal Node
+            temp = uncertainty.total_95_user
+            if not np.isnan(temp):
+                ETree.SubElement(s_qu, 'Total', type='double').text = '{:.1f}'.format(temp)
+
+        # Oursin Uncertainty Node
+        if self.oursin is not None:
+            # (3) Uncertainty Node
+            s_ou = ETree.SubElement(summary, 'OursinUncertainty')
+            oursin = self.oursin
+
+            # (4) System Node
+            temp = oursin.u_measurement['u_syst'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'System', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Compass Node
+            temp = oursin.u_measurement['u_compass'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'Compass', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Moving-bed Node
+            temp = oursin.u_measurement['u_movbed'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'MovingBed', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Ensembles Node
+            temp = oursin.u_measurement['u_ens'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'Ensembles', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Measured Node
+            temp = oursin.u_measurement['u_meas'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'Measured', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Top Node
+            temp = oursin.u_measurement['u_top'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'Top', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Bottom Node
+            temp = oursin.u_measurement['u_bot'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'Bottom', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Left Node
+            temp = oursin.u_measurement['u_left'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'Left', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Bottom Node
+            temp = oursin.u_measurement['u_right'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'Right', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Invalid Boat Node
+            temp = oursin.u_measurement['u_boat'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'InvalidBoat', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Invalid Depth Node
+            temp = oursin.u_measurement['u_depth'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'InvalidDepth', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Invalid Water Node
+            temp = oursin.u_measurement['u_water'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'InvalidWater', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) COV Node
+            temp = oursin.u_measurement['u_cov'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'COV', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Auto Total 95% Node
+            temp = oursin.u_measurement['total_95'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'AutoTotal95', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Extrapolation Power/Power Minimum
+            temp = oursin.default_advanced_settings['exp_pp_min']
+            if type(temp) is float:
+                ETree.SubElement(s_ou, 'ExtrapPPMin', type='double').text = '{:.2f}'.format(temp)
+            else:
+                ETree.SubElement(s_ou, 'ExtrapPPMin', type='char').text = temp
+
+            # (4) Extrapolation Power/Power Maximum
+            temp = oursin.default_advanced_settings['exp_pp_max']
+            if type(temp) is float:
+                ETree.SubElement(s_ou, 'ExtrapPPMax', type='double').text = '{:.2f}'.format(temp)
+            else:
+                ETree.SubElement(s_ou, 'ExtrapPPMax', type='char').text = temp
+
+            # (4) Extrapolation No Slip Minimum
+            temp = oursin.default_advanced_settings['exp_ns_min']
+            if type(temp) is float:
+                ETree.SubElement(s_ou, 'ExtrapNSMin', type='double').text = '{:.2f}'.format(temp)
+            else:
+                ETree.SubElement(s_ou, 'ExtrapNSMin', type='char').text = temp
+
+            # (4) Extrapolation No Slip Maximum
+            temp = oursin.default_advanced_settings['exp_ns_max']
+            if type(temp) is float:
+                ETree.SubElement(s_ou, 'ExtrapNSMax', type='double').text = '{:.2f}'.format(temp)
+            else:
+                ETree.SubElement(s_ou, 'ExtrapNSMax', type='char').text = temp
+
+            # (4) Draft error in m
+            temp = oursin.default_advanced_settings['draft_error_m']
+            if type(temp) is float:
+                ETree.SubElement(s_ou, 'DraftErrorm', type='double').text = '{:.2f}'.format(temp)
+            else:
+                ETree.SubElement(s_ou, 'DraftErrorm', type='char').text = temp
+
+            # (4) Bin size error in percent
+            temp = oursin.default_advanced_settings['dzi_prct']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'BinErrorPer', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Right edge distance error in percent
+            temp = oursin.default_advanced_settings['right_edge_dist_prct']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'REdgeDistErrorPer', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Left edge distance error in percent
+            temp = oursin.default_advanced_settings['left_edge_dist_prct']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'LEdgeDistErrorPer', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) GGA Boat Velocity Error in mps
+            temp = oursin.default_advanced_settings['gga_boat_mps']
+            if type(temp) is float:
+                ETree.SubElement(s_ou, 'GGABoatVelErrormps', type='double').text = '{:.2f}'.format(temp)
+            else:
+                ETree.SubElement(s_ou, 'GGABoatVelErrormps', type='char').text = temp
+
+            # (4) VTG Boat Velocity Error in mps
+            temp = oursin.default_advanced_settings['vtg_boat_mps']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'VTGBoatVelErrormps', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Compass Error in deg
+            temp = oursin.default_advanced_settings['compass_error_deg']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'CompassErrordeg', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Bayesian COV prior in percent
+            temp = oursin.default_advanced_settings['cov_prior']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'BayesCOVPriorper', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Bayesian COV prior uncertaint in percent
+            temp = oursin.default_advanced_settings['cov_prior_u']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'BayesCOVPriorUncertaintyper', type='double').text = '{:.2f}'.format(temp)
+
+            # User
+
+            # (4) System Node
+            temp = oursin.u_measurement_user['u_syst'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'SystemUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Compass Node
+            temp = oursin.u_measurement_user['u_compass'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'CompassUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Moving-bed Node
+            temp = oursin.u_measurement_user['u_movbed'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'MovingBedUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Ensembles Node
+            temp = oursin.u_measurement_user['u_ens'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'EnsemblesUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Measured Node
+            temp = oursin.u_measurement_user['u_meas'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'MeasuredUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Top Node
+            temp = oursin.u_measurement_user['u_top'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'TopUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Bottom Node
+            temp = oursin.u_measurement_user['u_bot'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'BottomUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Left Node
+            temp = oursin.u_measurement_user['u_left'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'LeftUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Bottom Node
+            temp = oursin.u_measurement_user['u_right'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'RightUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Invalid Boat Node
+            temp = oursin.u_measurement_user['u_boat'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'InvalidBoatUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Invalid Depth Node
+            temp = oursin.u_measurement_user['u_depth'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'InvalidDepthUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Invalid Water Node
+            temp = oursin.u_measurement_user['u_water'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'InvalidWaterUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Auto Total 95% Node
+            temp = oursin.u_measurement_user['total_95'][0]
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'AutoTotal95User', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Extrapolation Power/Power Minimum
+            temp = oursin.user_advanced_settings['exp_pp_min_user']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'ExtrapPPMinUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Extrapolation Power/Power Maximum
+            temp = oursin.user_advanced_settings['exp_pp_max_user']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'ExtrapPPMaxUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Extrapolation No Slip Minimum
+            temp = oursin.user_advanced_settings['exp_ns_min_user']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'ExtrapNSMinUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Extrapolation No Slip Maximum
+            temp = oursin.user_advanced_settings['exp_ns_max_user']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'ExtrapNSMaxUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Draft error in m
+            temp = oursin.user_advanced_settings['draft_error_m_user']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'DraftErrormUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Bin size error in percent
+            temp = oursin.user_advanced_settings['dzi_prct_user']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'BinErrorperUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Right edge distance error in percent
+            temp = oursin.user_advanced_settings['right_edge_dist_prct_user']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'REdgeDistErrorperUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Left edge distance error in percent
+            temp = oursin.user_advanced_settings['left_edge_dist_prct_user']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'LEdgeDistErrorperUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) GGA Boat Velocity Error in mps
+            temp = oursin.user_advanced_settings['gga_boat_mps_user']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'GGABoatVelErrormpsUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) VTG Boat Velocity Error in mps
+            temp = oursin.user_advanced_settings['vtg_boat_mps_user']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'VTGBoatVelErrormpsUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Compass Error in deg
+            temp = oursin.user_advanced_settings['compass_error_deg_user']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'CompassErrordegUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Bayesian COV prior in percent
+            temp = oursin.user_advanced_settings['cov_prior_user']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'BayesCOVPriorperUser', type='double').text = '{:.2f}'.format(temp)
+
+            # (4) Bayesian COV prior uncertaint in percent
+            temp = oursin.user_advanced_settings['cov_prior_u_user']
+            if not np.isnan(temp):
+                ETree.SubElement(s_ou, 'BayesCOVPriorUncertaintyperUser', type='double').text = '{:.2f}'.format(temp)
+
+        # (3) Other Node
+        s_o = ETree.SubElement(summary, 'Other')
+
+        # (4) MeanWidth
+        temp = other_prop['width'][-1]
+        ETree.SubElement(s_o, 'MeanWidth', type='double', unitsCode='m').text = '{:.4f}'.format(temp)
+
+        # (4) WidthCOV
+        temp = other_prop['width_cov'][-1]
+        if not np.isnan(temp):
+            ETree.SubElement(s_o, 'WidthCOV', type='double').text = '{:.4f}'.format(temp)
+
+        # (4) MeanArea
+        temp = other_prop['area'][-1]
+        ETree.SubElement(s_o, 'MeanArea', type='double', unitsCode='sqm').text = '{:.4f}'.format(temp)
+
+        # (4) AreaCOV
+        temp = other_prop['area_cov'][-1]
+        if not np.isnan(temp):
+            ETree.SubElement(s_o, 'AreaCOV', type='double').text = '{:.2f}'.format(temp)
+
+        # (4) MeanBoatSpeed
+        temp = other_prop['avg_boat_speed'][-1]
+        ETree.SubElement(s_o, 'MeanBoatSpeed', type='double', unitsCode='mps').text = '{:.4f}'.format(temp)
+
+        # (4) MeanQoverA
+        temp = other_prop['avg_water_speed'][-1]
+        ETree.SubElement(s_o, 'MeanQoverA', type='double', unitsCode='mps').text = '{:.4f}'.format(temp)
+
+        # (4) MeanCourseMadeGood
+        temp = other_prop['avg_boat_course'][-1]
+        ETree.SubElement(s_o, 'MeanCourseMadeGood', type='double', unitsCode='deg').text = '{:.2f}'.format(temp)
+
+        # (4) MeanFlowDirection
+        temp = other_prop['avg_water_dir'][-1]
+        ETree.SubElement(s_o, 'MeanFlowDirection', type='double', unitsCode='deg').text = '{:.2f}'.format(temp)
+
+        # (4) MeanDepth
+        temp = other_prop['avg_depth'][-1]
+        ETree.SubElement(s_o, 'MeanDepth', type='double', unitsCode='m').text = '{:.4f}'.format(temp)
+
+        # (4) MaximumDepth
+        temp = other_prop['max_depth'][-1]
+        ETree.SubElement(s_o, 'MaximumDepth', type='double', unitsCode='m').text = '{:.4f}'.format(temp)
+
+        # (4) MaximumWaterSpeed
+        temp = other_prop['max_water_speed'][-1]
+        ETree.SubElement(s_o, 'MaximumWaterSpeed', type='double', unitsCode='mps').text = '{:.4f}'.format(temp)
+
+        # (4) NumberofTransects
+        temp = len(self.checked_transects(self))
+        ETree.SubElement(s_o, 'NumberofTransects', type='integer').text = str(temp)
+
+        # (4) Duration
+        temp = self.measurement_duration(self)
+        ETree.SubElement(s_o, 'Duration', type='double', unitsCode='sec').text = '{:.2f}'.format(temp)
+
+        # (4) LeftQPer
+        temp = 100 * discharge['left_mean'] / discharge['total_mean']
+        ETree.SubElement(s_o, 'LeftQPer', type='double').text = '{:.2f}'.format(temp)
+
+        # (4) RightQPer
+        temp = 100 * discharge['right_mean'] / discharge['total_mean']
+        ETree.SubElement(s_o, 'RightQPer', type='double').text = '{:.2f}'.format(temp)
+
+        # (4) InvalidCellsQPer
+        temp = 100 * discharge['int_cells_mean'] / discharge['total_mean']
+        ETree.SubElement(s_o, 'InvalidCellsQPer', type='double').text = '{:.2f}'.format(temp)
+
+        # (4) InvalidEnsQPer
+        temp = 100 * discharge['int_ensembles_mean'] / discharge['total_mean']
+        ETree.SubElement(s_o, 'InvalidEnsQPer', type='double').text = '{:.2f}'.format(temp)
+
+        # (4) UserRating
+        if self.user_rating:
+            temp = self.user_rating
+        else:
+            temp = 'Not Rated'
+        ETree.SubElement(s_o, 'UserRating', type='char').text = temp
+
+        # (4) DischargePPDefault
+        temp = self.extrap_fit.q_sensitivity.q_pp_mean
+        ETree.SubElement(s_o, 'DischargePPDefault', type='double').text = '{:.2f}'.format(temp)
+
+        # (2) UserComment
+        if len(self.comments) > 1:
+            temp = ''
+            for comment in self.comments:
+                temp = temp + comment.replace('\n', ' |||') + ' |||'
+            ETree.SubElement(channel, 'UserComment', type='char').text = temp
+
+        # Create xml output file
+        with open(file_name, 'wb') as xml_file:
+            # Create binary coded output file
+            et = ETree.ElementTree(channel)
+            root = et.getroot()
+            xml_out = ETree.tostring(root)
+            # Add stylesheet instructions
+            xml_out = b'<?xml-stylesheet type= "text/xsl" href="QRevStylesheet.xsl"?>' + xml_out
+            # Add tabs to make output more readable and apply utf-8 encoding
+            xml_out = parseString(xml_out).toprettyxml(encoding='utf-8')
+            # Write file
+            xml_file.write(xml_out)
+
+    @staticmethod
+    def add_transect(mmt, filename, index, transect_type):
+        """Processes a pd0 file into a TransectData object.
+
+        Parameters
+        ----------
+        mmt: MMTtrdi
+            Object of MMTtrdi
+        filename: str
+            Pd0 filename to be processed
+        index: int
+            Index to file in the mmt
+        transect_type: str
+            Indicates type of transect discharge (Q), or moving_bed (MB)
+
+        Returns
+        -------
+        transect: TransectData
+            Object of TransectData
+        """
+        pd0_data = Pd0TRDI(filename)
+
+        if transect_type == 'MB':
+            mmt_transect = mmt.mbt_transects[index]
+        else:
+            mmt_transect = mmt.transects[index]
+
+        transect = TransectData()
+        transect.trdi(mmt=mmt,
+                      mmt_transect=mmt_transect,
+                      pd0_data=pd0_data)
+        return transect
+
+    def allocate_transects(self, mmt, transect_type='Q', checked=False):
+        """Method to load transect data. Changed from Matlab approach by Greg to allow possibility
+        of multi-thread approach.
+
+        Parameters
+        ----------
+        mmt: MMT_TRDI
+            Object of MMT_TRDI
+        transect_type: str
+            Type of transect (Q: discharge or MB: moving-bed test)
+        checked: bool
+            Determines if all files are loaded (False) or only checked files (True)
+        """
+
+        file_names = []
+        file_idx = []
+
+        # Setup processing for discharge or moving-bed transects
+        if transect_type == 'Q':
+            # Identify discharge transect files to load
+            if checked:
+                for idx, transect in enumerate(mmt.transects):
+                    if transect.Checked == 1:
+                        file_names.append(transect.Files[0])
+                        file_idx.append(idx)
+
+            else:
+                file_names = [transect.Files[0] for transect in mmt.transects]
+                file_idx = list(range(0, len(file_names)))
+
+        elif transect_type == 'MB':
+            file_names = [transect.Files[0] for transect in mmt.mbt_transects]
+            file_idx = list(range(0, len(file_names)))
+
+        # Determine if any files are missing
+        valid_files = []
+        valid_indices = []
+        for index, name in enumerate(file_names):
+            fullname = os.path.join(mmt.path, name)
+            if os.path.exists(fullname):
+                valid_files.append(fullname)
+                valid_indices.append(file_idx[index])
+
+        transects = []
+        num = len(valid_indices)
+
+        for k in range(num):
+            temp = self.add_transect(mmt, valid_files[k], valid_indices[k], transect_type)
+            if temp.w_vel is not None:
+                transects.append(temp)
+
+        return transects
+
+
+if __name__ == '__main__':
+    pass
diff --git a/Classes/MovingBedTests.py b/Classes/MovingBedTests.py
new file mode 100644
index 0000000..117ab21
--- /dev/null
+++ b/Classes/MovingBedTests.py
@@ -0,0 +1,1038 @@
+import copy
+import numpy as np
+from Classes.TransectData import adjusted_ensemble_duration
+from Classes.TransectData import TransectData
+from Classes.QComp import QComp
+from Classes.MatSonTek import MatSonTek
+from MiscLibs.common_functions import cart2pol, sind, pol2cart, rad2azdeg, nan_less, nan_greater
+
+
+class MovingBedTests(object):
+    """Stores and processes moving-bed tests.
+
+    Attributes
+    ----------
+    type: str
+        Loop or Stationary
+    transect: TransectData
+        Object of TransectData
+    duration_sec: float
+        Duration of test, in secs
+    percent_invalid_bt: float
+        Percent of invalid bottom track
+    compass_diff_deg: float
+        Difference in heading for out and back of loop
+    flow_dir: float
+        Mean flow direction from loop test
+    mb_dir: float
+        Moving bed or closure error direction
+    dist_us_m: float
+        Distance moved upstream, in m
+    flow_spd_mps: float
+        Magnitude of water velocity, in mps
+    mb_spd_mps: float
+        Magnitude of moving=bed velocity, in mps
+    percent_mb: float
+        Potential error due to moving bed, in percent
+    moving_bed: str
+        Moving-bed determined ("Yes" or "No")
+    user_valid: bool
+        Boolean to allow user to determine if test should be considered a valid test (True or False)
+    test_quality: str
+        Quality of test, 'Valid' 'Warnings' 'Errors'
+    use_2_correct: bool
+        Use this test to correct discharge (True or False)
+    selected: bool
+        Selected as valid moving-bed test to use for correction or determine moving-bed condition
+    messages: list
+        List of strings for warning and error messages based on data processing
+    near_bed_speed_mps: float
+        Mean near-bed water speed for test, in mps
+    stationary_us_track: np.array(float)
+        Upstream component of the bottom track referenced ship track
+    stationary_cs_track: np.array(float)
+        Cross=stream component of the bottom track referenced ship track
+    stationary_mb_vel: np.array(float)
+        Moving-bed velocity by ensemble, m/s
+    ref: str
+        Identifies reference used to compute moving bed
+    bt_percent_mb: float
+        Percent moving-bed using only BT
+    bt_dist_us_m: float
+        Distance upstream using only BT
+    bt_mb_dir: float
+        Moving-bed direction using only BT
+    bt_mb_spd_mps: float
+        Moving-bed speed using only BT
+    bt_flow_spd_mps: float
+        Corrected flow speed using only BT
+    gps_percent_mb: float
+        Percent moving-bed using BT and GPS
+    gps_dist_us_m: float
+        Distance upstream using BT and GPS
+    gps_mb_dir: float
+        Moving-bed direction using BT and GPS
+    gps_mb_spd_mps: float
+        Moving-bed speed using BT and GPS
+    gps_flow_spd_mps: float
+        Corrected flow speed using BT and GPS
+    """
+    
+    def __init__(self):
+        """Initialize class and instance variables."""
+
+        self.type = None  # Loop or Stationary
+        self.transect = None  # Object of TransectData
+        self.duration_sec = np.nan  # Duration of test in secs
+        self.percent_invalid_bt = np.nan  # Percent of invalid bottom track
+        self.compass_diff_deg = np.nan  # Difference in heading for out and back of loop
+        self.flow_dir = np.nan  # Mean flow direction from loop test
+        self.mb_dir = np.nan  # Moving bed or closure error direction
+        self.dist_us_m = np.nan  # Distance moved upstream in m
+        self.flow_spd_mps = np.nan  # Magnitude of water velocity in mps
+        self.mb_spd_mps = np.nan  # Magnitude of moving=bed velocity in mps
+        self.percent_mb = np.nan  # Potential error due to moving bed in percent
+        self.moving_bed = np.nan  # Moving-bed determined 'Yes' 'No'
+        self.user_valid = True  # Logical to allow user to determine if test should be considered a valid test
+        self.test_quality = None  # Quality of test 'Valid' 'Warnings' 'Errors'
+        self.use_2_correct = None  # Use this test to correct discharge
+        self.selected = None  # Selected valid moving-bed test to use for correction or determine moving-bed condition
+        self.messages = None  # Cell array of warning and error messages based on data processing
+        self.near_bed_speed_mps = np.nan  # Mean near-bed water speed for test in mps
+        self.stationary_us_track = np.array([])  # Upstream component of the bottom track referenced ship track
+        self.stationary_cs_track = np.array([])  # Cross=stream component of the bottom track referenced ship track
+        self.stationary_mb_vel = np.array([])  # Moving-bed velocity by ensemble
+        self.ref = 'BT'
+        self.bt_percent_mb = np.nan
+        self.bt_dist_us_m = np.nan
+        self.bt_mb_dir = np.nan
+        self.bt_mb_spd_mps = np.nan
+        self.bt_flow_spd_mps = np.nan
+        self.gps_percent_mb = np.nan
+        self.gps_dist_us_m = np.nan
+        self.gps_mb_dir = np.nan
+        self.gps_mb_spd_mps = np.nan
+        self.gps_flow_spd_mps = np.nan
+        
+    def populate_data(self, source, file=None, test_type=None):
+        """Process and store moving-bed test data.
+
+        Parameters
+        ----------
+        source: str
+            Manufacturer of ADCP, SonTek or TRDI
+        file: TransectData or str
+            Object of TransectData for TRDI and str of filename for SonTek
+        test_type: str
+            Type of moving-bed test (Loop or Stationary)
+        """
+
+        if source == 'TRDI':
+            self.mb_trdi(file, test_type)
+        else:
+            self.mb_sontek(file, test_type)
+
+        self.process_mb_test(source)
+
+    def process_mb_test(self, source):
+        
+        # Convert to earth coordinates and set the navigation reference to BT
+        # for both boat and water data
+        # self.transect.boat_vel.bt_vel.apply_interpolation(transect=self.transect, interpolation_method='Linear')
+        self.transect.change_coord_sys(new_coord_sys='Earth')
+        self.transect.change_nav_reference(update=True, new_nav_ref='BT')
+            
+        # Adjust data for default manufacturer specific handling of invalid data
+        delta_t = adjusted_ensemble_duration(self.transect, 'mbt')
+        
+        if self.type == 'Loop':
+            if source == 'TRDI':
+                self.loop_test(delta_t)
+            else:
+                self.loop_test()
+        elif self.type == 'Stationary':
+            self.stationary_test()
+        else:
+            raise ValueError('Invalid moving-bed test identifier specified.')
+
+    @staticmethod
+    def qrev_mat_in(meas_struct):
+        """Processes the Matlab data structure to obtain a list of TransectData objects containing transect
+           data from the Matlab data structure.
+
+       Parameters
+       ----------
+       meas_struct: mat_struct
+           Matlab data structure obtained from sio.loadmat
+
+       Returns
+       -------
+       mb_tests: list
+           List of MovingBedTests objects
+       """
+
+        mb_tests = []
+        if hasattr(meas_struct, 'mbTests'):
+            try:
+                # If there are multiple test the Matlab structure will be an array
+                if type(meas_struct.mbTests) == np.ndarray:
+                    for test in meas_struct.mbTests:
+                        temp = MovingBedTests()
+                        temp.populate_from_qrev_mat(test)
+                        mb_tests.append(temp)
+                # If only one test, that test is not stored in an array
+                else:
+                    temp = MovingBedTests()
+                    temp.populate_from_qrev_mat(meas_struct.mbTests)
+                    mb_tests.append(temp)
+            except (TypeError, AttributeError):
+                pass
+        return mb_tests
+
+    def populate_from_qrev_mat(self, mat_data):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        mat_data: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+
+        self.type = mat_data.type
+        self.transect = TransectData()
+        self.transect.populate_from_qrev_mat(mat_data.transect)
+
+        # If QRev.mat may return and empty array instead of a float
+        self.duration_sec = self.return_float(mat_data.duration_sec)
+        self.percent_invalid_bt = self.return_float(mat_data.percentInvalidBT)
+        self.compass_diff_deg = self.return_float(mat_data.compassDiff_deg)
+        self.flow_dir = self.return_float(mat_data.flowDir_deg)
+        self.mb_dir = self.return_float(mat_data.mbDir_deg)
+        self.dist_us_m = self.return_float(mat_data.distUS_m)
+        self.flow_spd_mps = self.return_float(mat_data.flowSpd_mps)
+        self.mb_spd_mps = self.return_float(mat_data.mbSpd_mps)
+        self.percent_mb = self.return_float(mat_data.percentMB)
+        self.near_bed_speed_mps = self.return_float(mat_data.nearBedSpeed_mps)
+
+        self.moving_bed = mat_data.movingBed
+        self.user_valid = bool(mat_data.userValid)
+        self.test_quality = mat_data.testQuality
+        self.use_2_correct = bool(mat_data.use2Correct)
+        self.selected = bool(mat_data.selected)
+
+        # Handle situation for one or more messages
+        if type(mat_data.messages) == np.ndarray:
+            self.messages = mat_data.messages.tolist()
+        else:
+            self.messages = [mat_data.messages]
+
+        self.stationary_us_track = mat_data.stationaryUSTrack
+        self.stationary_cs_track = mat_data.stationaryCSTrack
+        self.stationary_mb_vel = mat_data.stationaryMBVel
+
+        # Feature that can use GPS for moving-bed tests
+        if hasattr(mat_data, 'bt_percent_mb'):
+            self.bt_percent_mb = self.return_float(mat_data.bt_percent_mb)
+            self.bt_dist_us_m = self.return_float(mat_data.bt_dist_us_m)
+            self.bt_mb_dir = self.return_float(mat_data.bt_mb_dir)
+            self.bt_mb_spd_mps = self.return_float(mat_data.bt_mb_spd_mps)
+            self.bt_flow_spd_mps = self.return_float(mat_data.bt_flow_spd_mps)
+            self.gps_percent_mb = self.return_float(mat_data.gps_percent_mb)
+            self.gps_dist_us_m = self.return_float(mat_data.gps_dist_us_m)
+            self.gps_mb_dir = self.return_float(mat_data.gps_mb_dir)
+            self.gps_mb_spd_mps = self.return_float(mat_data.gps_mb_spd_mps)
+        else:
+            self.bt_percent_mb = self.percent_mb
+            self.bt_dist_us_m = self.dist_us_m
+            self.bt_mb_dir = self.mb_dir
+            self.bt_mb_spd_mps = self.mb_spd_mps
+            self.bt_flow_spd_mps = self.flow_spd_mps
+            self.compute_mb_gps()
+
+    @staticmethod
+    def return_float(data):
+        """Handles situation where the Matlab file is an empty array
+
+        Parameters
+        ----------
+        data: float or np.ndarray
+            Any variable
+        """
+        if type(data) is float:
+            return data
+        elif type(data) is np.ndarray:
+            if data.size == 0:
+                return np.nan
+            else:
+                return MovingBedTests.make_list(data)
+        else:
+            return np.nan
+
+    @staticmethod
+    def make_list(array_in):
+        """Method to make list from several special cases that can occur in the Matlab data.
+
+        Parameters
+        ----------
+        array_in: np.ndarray
+            Input that needs to be convert to a list
+        """
+
+        # This traps messages with the associated codes
+        if array_in.size > 3:
+            list_out = array_in.tolist()
+        else:
+            # Create a list of lists
+            temp = array_in.tolist()
+            if len(temp) > 0:
+                internal_list = []
+                for item in temp:
+                    internal_list.append(item)
+                list_out = [internal_list]
+            else:
+                list_out = np.nan
+        return list_out
+
+    def mb_trdi(self, transect, test_type):
+        """Function to create object properties for TRDI moving-bed tests
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        test_type: str
+            Type of moving-bed test."""
+        
+        self.transect = transect
+        self.user_valid = True
+        self.type = test_type
+
+    def mb_sontek(self, file_name, test_type):
+        """Function to create object properties for SonTek moving-bed tests
+
+        Parameters
+        ----------
+        file_name: str
+            Name of moving-bed test data file
+        test_type: str
+            Type of moving-bed test."""
+        self.type = test_type
+
+        # Read Matlab file for moving-bed test
+        rsdata = MatSonTek(file_name)
+
+        # Create transect objects for each discharge transect
+        self.transect = TransectData()
+        self.transect.sontek(rsdata, file_name)
+        
+    def loop_test(self, ens_duration=None, ref='BT'):
+        """Process loop moving bed test.
+
+        Parameters
+        ----------
+        ens_duration: np.array(float)
+            Duration of each ensemble, in sec
+        ref: str
+            Reference used to compare distance moved
+        """
+
+        # Assign data from transect to local variables
+        # self.transect.boat_interpolations(update=False, target='BT', method='Linear')
+        # self.transect.boat_interpolations(update=False, target='GPS', method='Linear')
+        trans_data = copy.deepcopy(self.transect)
+        in_transect_idx = trans_data.in_transect_idx
+        n_ensembles = len(in_transect_idx)
+        bt_valid = trans_data.boat_vel.bt_vel.valid_data[0, in_transect_idx]
+
+        # Set variables to defaults
+        self.messages = []
+        vel_criteria = 0.012
+
+        # Check that there is some valid BT data
+        if np.nansum(bt_valid) > 1:
+            wt_u = trans_data.w_vel.u_processed_mps[:, in_transect_idx]
+            wt_v = trans_data.w_vel.v_processed_mps[:, in_transect_idx]
+            if ens_duration is None:
+                ens_duration = trans_data.date_time.ens_duration_sec[in_transect_idx]
+
+            bt_u = trans_data.boat_vel.bt_vel.u_processed_mps[in_transect_idx]
+            bt_v = trans_data.boat_vel.bt_vel.v_processed_mps[in_transect_idx]
+            bin_size = trans_data.depths.bt_depths.depth_cell_size_m[:, in_transect_idx]
+
+            # Compute closure distance and direction
+            bt_x = np.nancumsum(bt_u * ens_duration)
+            bt_y = np.nancumsum(bt_v * ens_duration)
+            direct, self.bt_dist_us_m = cart2pol(bt_x[-1], bt_y[-1])
+            self.bt_mb_dir = rad2azdeg(direct)
+
+            # Compute duration of test
+            self.duration_sec = np.nansum(ens_duration)
+
+            # Compute the moving-bed velocity
+            self.bt_mb_spd_mps = self.bt_dist_us_m / self.duration_sec
+
+            # Compute discharge weighted mean velocity components for the
+            # purposed of computing the mean flow direction
+            xprod = QComp.cross_product(transect=trans_data)
+            q = QComp.discharge_middle_cells(xprod, trans_data, ens_duration)
+            wght = np.abs(q)
+            se = np.nansum(np.nansum(wt_u * wght)) / np.nansum(np.nansum(wght))
+            sn = np.nansum(np.nansum(wt_v * wght)) / np.nansum(np.nansum(wght))
+            direct, flow_speed_q = cart2pol(se, sn)
+
+            # Compute flow speed and direction
+            self.flow_dir = rad2azdeg(direct)
+            
+            # Compute the area weighted mean velocity components for the
+            # purposed of computing the mean flow speed. Area weighting is used for flow speed instead of
+            # discharge so that the flow speed is not included in the weighting used to compute the mean flow speed.
+            wght_area = np.multiply(np.multiply(np.sqrt(bt_u ** 2 + bt_v ** 2), bin_size), ens_duration)
+            idx = np.where(np.isnan(wt_u) == False)
+            se = np.nansum(np.nansum(wt_u[idx] * wght_area[idx])) / np.nansum(np.nansum(wght_area[idx]))
+            sn = np.nansum(np.nansum(wt_v[idx] * wght_area[idx])) / np.nansum(np.nansum(wght_area[idx]))
+            dir_a, self.bt_flow_spd_mps = cart2pol(se, sn)
+            self.bt_flow_spd_mps = self.bt_flow_spd_mps + self.bt_mb_spd_mps
+
+            # Compute potential error in BT referenced discharge
+            self.bt_percent_mb = (self.bt_mb_spd_mps / self.bt_flow_spd_mps) * 100
+
+            # Compute test with GPS
+            self.compute_mb_gps()
+
+            # Store selected test characteristics
+            if ref == 'BT':
+                self.mb_spd_mps = self.bt_mb_spd_mps
+                self.dist_us_m = self.bt_dist_us_m
+                self.percent_mb = self.bt_percent_mb
+                self.mb_dir = self.bt_mb_dir
+                self.flow_spd_mps = self.bt_flow_spd_mps
+            elif not np.isnan(self.gps_percent_mb):
+                self.mb_spd_mps = self.gps_mb_spd_mps
+                self.dist_us_m = self.gps_dist_us_m
+                self.percent_mb = self.gps_percent_mb
+                self.mb_dir = self.gps_mb_dir
+                self.flow_spd_mps = self.bt_flow_spd_mps
+
+            # Assess invalid bottom track
+            # Compute percent invalid bottom track
+            self.percent_invalid_bt = (np.nansum(bt_valid == False) / len(bt_valid)) * 100
+
+            # Determine if more than 9 consecutive seconds of invalid BT occurred
+            consect_bt_time = np.zeros(n_ensembles)
+            for n in range(1, n_ensembles):
+                if bt_valid[n]:
+                    consect_bt_time[n] = 0
+                else:
+                    consect_bt_time[n] = consect_bt_time[n - 1] + ens_duration[n]
+
+            max_consect_bt_time = np.nanmax(consect_bt_time)
+
+            # Evaluate compass calibration based on flow direction
+
+            # Find apex of loop adapted from
+            # http://www.mathworks.de/matlabcentral/newsreader/view_thread/164048
+            loop_out = np.array([bt_x[0], bt_y[0], 0])
+            loop_return = np.array([bt_x[-1], bt_y[-1], 0])
+
+            distance = np.zeros(n_ensembles)
+            for n in range(n_ensembles):
+                p = np.array([bt_x[n], bt_y[n], 0])
+                distance[n] = np.linalg.norm(np.cross(loop_return - loop_out, p - loop_out))  \
+                    / np.linalg.norm(loop_return - loop_out)
+
+            dmg_idx = np.where(distance == np.nanmax(distance))[0][0]
+
+            # Compute flow direction on outgoing part of loop
+            u_out = wt_u[:, :dmg_idx + 1]
+            v_out = wt_v[:, :dmg_idx + 1]
+            wght = np.abs(q[:, :dmg_idx+1])
+            se = np.nansum(u_out * wght) / np.nansum(wght)
+            sn = np.nansum(v_out * wght) / np.nansum(wght)
+            direct, _ = cart2pol(se, sn)
+            flow_dir1 = rad2azdeg(direct)
+
+            # Compute unweighted flow direction in each cell
+            direct, _ = cart2pol(u_out, v_out)
+            flow_dir_cell = rad2azdeg(direct)
+
+            # Compute difference from mean and correct to +/- 180
+            v_dir_corr = flow_dir_cell - flow_dir1
+            v_dir_idx = nan_greater(v_dir_corr, 180)
+            v_dir_corr[v_dir_idx] = 360-v_dir_corr[v_dir_idx]
+            v_dir_idx = nan_less(v_dir_corr, -180)
+            v_dir_corr[v_dir_idx] = 360 + v_dir_corr[v_dir_idx]
+
+            # Number of invalid weights
+            idx2 = np.where(np.isnan(wght) == False)
+            nwght = len(idx2[0])
+
+            # Compute 95% uncertainty using weighted standard deviation
+            uncert1 = 2. * np.sqrt(np.nansum(np.nansum(wght * v_dir_corr**2))
+                                   / (((nwght - 1) * np.nansum(np.nansum(wght))) / nwght)) / np.sqrt(nwght)
+
+            # Compute flow direction on returning part of loop
+            u_ret = wt_u[:, dmg_idx + 1:]
+            v_ret = wt_v[:, dmg_idx + 1:]
+            wght = np.abs(q[:, dmg_idx+1:])
+            se = np.nansum(u_ret * wght) / np.nansum(wght)
+            sn = np.nansum(v_ret * wght) / np.nansum(wght)
+            direct, _ = cart2pol(se, sn)
+            flow_dir2 = rad2azdeg(direct)
+
+            # Compute unweighted flow direction in each cell
+            direct, _ = cart2pol(u_ret, v_ret)
+            flow_dir_cell = rad2azdeg(direct)
+
+            # Compute difference from mean and correct to +/- 180
+            v_dir_corr = flow_dir_cell - flow_dir2
+            v_dir_idx = nan_greater(v_dir_corr, 180)
+            v_dir_corr[v_dir_idx] = 360 - v_dir_corr[v_dir_idx]
+            v_dir_idx = nan_less(v_dir_corr, -180)
+            v_dir_corr[v_dir_idx] = 360 + v_dir_corr[v_dir_idx]
+
+            # Number of valid weights
+            idx2 = np.where(np.isnan(wght) == False)
+            nwght = len(idx2[0])
+
+            # Compute 95% uncertainty using weighted standard deviation
+            uncert2 = 2.*np.sqrt(np.nansum(np.nansum(wght * v_dir_corr**2))
+                                 / (((nwght-1)*np.nansum(np.nansum(wght))) / nwght)) / np.sqrt(nwght)
+
+            # Compute and report difference in flow direction
+            diff_dir = np.abs(flow_dir1 - flow_dir2)
+            if diff_dir > 180:
+                diff_dir = diff_dir - 360
+            self.compass_diff_deg = diff_dir
+            uncert = uncert1 + uncert2
+
+            # Compute potential compass error
+            idx = np.where(np.isnan(bt_x) == False)
+            if len(idx[0]) > 0:
+                idx = idx[0][-1]
+            width = np.sqrt((bt_x[dmg_idx] - bt_x[idx] / 2) ** 2 + (bt_y[dmg_idx] - bt_y[idx] / 2) ** 2)
+            compass_error = (2 * width * sind(diff_dir / 2) * 100) / (self.duration_sec * self.flow_spd_mps)
+
+            # Initialize message counter
+            self.test_quality = 'Good'
+
+            # Low water velocity
+            if self.flow_spd_mps < 0.25:
+                self.messages.append('WARNING: The water velocity is less than recommended minimum for '
+                                     + 'this test and could cause the loop method to be inaccurate. '
+                                     + 'CONSIDER USING A STATIONARY TEST TO CHECK MOVING-BED CONDITIONS')
+                self.test_quality = 'Warnings'
+
+            # Percent invalid bottom track
+            if self.percent_invalid_bt > 20:
+                self.messages.append('ERROR: Percent invalid bottom track exceeds 20 percent. '
+                                     + 'THE LOOP IS NOT ACCURATE. TRY A STATIONARY MOVING-BED TEST.')
+                self.test_quality = 'Errors'
+            elif self.percent_invalid_bt > 5:
+                self.messages.append('WARNING: Percent invalid bottom track exceeds 5 percent. '
+                                     + 'Loop may not be accurate. PLEASE REVIEW DATA.')
+                self.test_quality = 'Warnings'
+
+            # More than 9 consecutive seconds of invalid BT
+            if max_consect_bt_time > 9:
+                self.messages.append('ERROR: Bottom track is invalid for more than 9 consecutive seconds.'
+                                     + 'THE LOOP IS NOT ACCURATE. TRY A STATIONARY MOVING-BED TEST.')
+                self.test_quality = 'Errors'
+
+            if np.abs(compass_error) > 5 and np.abs(diff_dir) > 3 and np.abs(diff_dir) > uncert:
+                self.messages.append('ERROR: Difference in flow direction between out and back sections of '
+                                     + 'loop could result in a 5 percent or greater error in final discharge. '
+                                     + 'REPEAT LOOP AFTER COMPASS CAL. OR USE A STATIONARY MOVING-BED TEST.')
+                self.test_quality = 'Errors'
+
+        else:
+            self.messages.append('ERROR: Loop has no valid bottom track data. '
+                                 + 'REPEAT OR USE A STATIONARY MOVING-BED TEST.')
+            self.test_quality = 'Errors'
+
+        # If loop is valid then evaluate moving-bed condition
+        if self.test_quality != 'Errors':
+
+            # Check minimum moving-bed velocity criteria
+            if self.mb_spd_mps > vel_criteria:
+                # Check that closure error is in upstream direction
+                if 135 < np.abs(self.flow_dir - self.mb_dir) < 225:
+                    # Check if moving-bed is greater than 1% of the mean flow speed
+                    if self.percent_mb > 1:
+                        self.messages.append('Loop Indicates a Moving Bed -- Use GPS as reference. If GPS is '
+                                             + 'unavailable or invalid use the loop method to correct the '
+                                             + 'final discharge.')
+                        self.moving_bed = 'Yes'
+                    else:
+                        self.messages.append('Moving Bed Velocity < 1% of Mean Velocity -- No Correction Recommended')
+                        self.moving_bed = 'No'
+                else:
+                    self.messages.append('ERROR: Loop closure error not in upstream direction. '
+                                         + 'REPEAT LOOP or USE STATIONARY TEST')
+                    self.test_quality = 'Errors'
+                    self.moving_bed = 'Unknown'
+            else:
+                self.messages.append('Moving-bed velocity < Minimum moving-bed velocity criteria '
+                                     + '-- No correction recommended')
+                self.moving_bed = 'No'
+
+            # Notify of differences in results of test between BT and GPS
+            if not np.isnan(self.gps_percent_mb):
+                if np.abs(self.bt_percent_mb - self.gps_percent_mb) > 2:
+                    self.messages.append('WARNING - Bottom track and GPS results differ by more than 2%.')
+                    self.test_quality = 'Warnings'
+
+                if np.logical_xor(self.bt_percent_mb >= 1,  self.gps_percent_mb >= 1):
+                    self.messages.append('WARNING - Bottom track and GPS results do not agree.')
+                    self.test_quality = 'Warnings'
+
+        else:
+            self.messages.append('ERROR: Due to ERRORS noted above this loop is NOT VALID. '
+                                 + 'Please consider suggestions.')
+            self.moving_bed = 'Unknown'
+
+    def stationary_test(self, ref='BT'):
+        """Processed the stationary moving-bed tests.
+        """
+
+        # Assign data from transect to local variables
+        trans_data = copy.deepcopy(self.transect)
+        in_transect_idx = trans_data.in_transect_idx
+        bt_valid = trans_data.boat_vel.bt_vel.valid_data[0, in_transect_idx]
+
+        # Check to see that there is valid bottom track data
+        self.messages = []
+        if np.nansum(bt_valid) > 0:
+            # Assign data to local variables
+            wt_u = trans_data.w_vel.u_processed_mps[:, in_transect_idx]
+            wt_v = trans_data.w_vel.v_processed_mps[:, in_transect_idx]
+            ens_duration = trans_data.date_time.ens_duration_sec[in_transect_idx]
+            bt_u = trans_data.boat_vel.bt_vel.u_processed_mps[in_transect_idx]
+            bt_v = trans_data.boat_vel.bt_vel.v_processed_mps[in_transect_idx]
+
+            # Use only data with valid bottom track
+            valid_bt = trans_data.boat_vel.bt_vel.valid_data[0, in_transect_idx]
+            wt_u[:, valid_bt == False] = np.nan
+            wt_v[:, valid_bt == False] = np.nan
+            bt_u[valid_bt == False] = np.nan
+            bt_v[valid_bt == False] = np.nan
+
+            u_water = np.nanmean(wt_u)
+            v_water = np.nanmean(wt_v)
+            self.flow_dir = np.arctan2(u_water, v_water) * 180 / np.pi
+            if self.flow_dir < 0:
+                self.flow_dir = self.flow_dir + 360
+
+            bin_depth = trans_data.depths.bt_depths.depth_cell_depth_m[:, in_transect_idx]
+            trans_select = getattr(trans_data.depths, trans_data.depths.selected)
+            depth_ens = trans_select.depth_processed_m[in_transect_idx]
+
+            nb_u, nb_v, unit_nbu, unit_nbv = self.near_bed_velocity(wt_u, wt_v, depth_ens, bin_depth)
+            
+            # Compute bottom track parallel to water velocity
+            unit_nb_vel = np.vstack([unit_nbu, unit_nbv])
+            bt_vel = np.vstack([bt_u, bt_v])
+            bt_vel_up_strm = -1 * np.sum(bt_vel * unit_nb_vel, 0)
+            bt_up_strm_dist = bt_vel_up_strm * ens_duration
+            bt_up_strm_dist_cum = np.nancumsum(bt_up_strm_dist)
+            self.bt_dist_us_m = bt_up_strm_dist_cum[-1]
+
+            # Compute bottom track perpendicular to water velocity
+            nb_vel_ang, _ = cart2pol(unit_nbu, unit_nbv)
+            nb_vel_unit_cs1, nb_vel_unit_cs2 = pol2cart(nb_vel_ang + np.pi / 2, 1)
+            nb_vel_unit_cs = np.vstack([nb_vel_unit_cs1, nb_vel_unit_cs2])
+            bt_vel_cs = np.sum(bt_vel * nb_vel_unit_cs, 0)
+            bt_cs_strm_dist = bt_vel_cs * ens_duration
+            bt_cs_strm_dist_cum = np.nancumsum(bt_cs_strm_dist)
+            
+            # Compute cumulative mean moving bed velocity
+            valid_bt_vel_up_strm = np.isnan(bt_vel_up_strm) == False
+
+            mb_vel = np.nancumsum(bt_vel_up_strm) / np.nancumsum(valid_bt_vel_up_strm)
+
+            # Compute the average ensemble velocities corrected for moving bed
+            if mb_vel[-1] > 0:
+                u_corrected = np.add(wt_u, (unit_nb_vel[0, :]) * bt_vel_up_strm)
+                v_corrected = np.add(wt_v, (unit_nb_vel[1, :]) * bt_vel_up_strm)
+            else:
+                u_corrected = wt_u
+                v_corrected = wt_v
+                
+            # Compute the mean of the ensemble magnitudes
+
+            # Mean is computed using magnitudes because if a Streampro with no compass is the data source the change
+            # in direction could be either real change in water direction or an uncompensated turn of the floating
+            # platform. This approach is the best compromise when there is no compass or the compass is unreliable,
+            # which is often why the stationary method is used. A weighted average is used to account for the possible
+            # change in cell size within and ensemble for the RiverRay and RiverPro.
+
+            mag = np.sqrt(u_corrected**2 + v_corrected**2)
+            depth_cell_size = trans_data.depths.bt_depths.depth_cell_size_m[:, in_transect_idx]
+            depth_cell_size[np.isnan(mag)] = np.nan
+            mag_w = mag * depth_cell_size
+            self.bt_flow_spd_mps = np.nansum(mag_w) / np.nansum(depth_cell_size)
+            self.bt_mb_spd_mps = mb_vel[-1]
+            self.bt_percent_mb = (self.bt_mb_spd_mps / self.bt_flow_spd_mps) * 100
+            if self.bt_percent_mb < 0:
+                self.bt_percent_mb = 0
+
+            # Compute percent invalid bottom track
+            self.percent_invalid_bt = (np.nansum(bt_valid == False) / len(bt_valid)) * 100
+            self.duration_sec = np.nansum(ens_duration)
+
+            # Compute test using GPS
+            self.compute_mb_gps()
+
+            # Store selected test characteristics
+            if ref == 'BT':
+                self.mb_spd_mps = self.bt_mb_spd_mps
+                self.dist_us_m = self.bt_dist_us_m
+                self.percent_mb = self.bt_percent_mb
+                self.mb_dir = self.bt_mb_dir
+                self.flow_spd_mps = self.bt_flow_spd_mps
+            elif not np.isnan(self.gps_percent_mb):
+                self.mb_spd_mps = self.gps_mb_spd_mps
+                self.dist_us_m = self.gps_dist_us_m
+                self.percent_mb = self.gps_percent_mb
+                self.mb_dir = self.gps_mb_dir
+                self.flow_spd_mps = self.bt_flow_spd_mps
+
+            self.near_bed_speed_mps = np.sqrt(np.nanmean(nb_u)**2 + np.nanmean(nb_v)**2)
+            self.stationary_us_track = bt_up_strm_dist_cum
+            self.stationary_cs_track = bt_cs_strm_dist_cum
+            self.stationary_mb_vel = mb_vel
+
+            # Quality check
+            self.test_quality = 'Good'
+            # Check duration
+            if self.duration_sec < 299:
+                self.messages.append('WARNING - Duration of stationary test is less than 5 minutes')
+                self.test_quality = 'Warnings'
+                
+            # Check validity of mean moving-bed velocity
+            if self.duration_sec > 60:
+                mb_vel_std = np.nanstd(mb_vel[-30:], ddof=1)
+                cov = mb_vel_std / mb_vel[-1]
+                if cov > 0.25 and mb_vel_std > 0.03:
+                    self.messages.append('WARNING - Moving-bed velocity may not be consistent. '
+                                         + 'Average maybe inaccurate.')
+                    self.test_quality = 'Warnings'
+                    
+            # Check percentage of invalid BT data
+            if np.nansum(ens_duration[valid_bt_vel_up_strm]) <= 120:
+                
+                self.messages.append('ERROR - Total duration of valid BT data is insufficient for a valid test.')
+                self.test_quality = 'Errors'
+                self.moving_bed = 'Unknown'
+            elif self.percent_invalid_bt > 10:
+                self.messages.append('WARNING - Number of ensembles with invalid bottom track exceeds 10%')
+                self.test_quality = 'Warnings'
+                
+            # Determine if the test indicates a moving bed
+            if self.test_quality != 'Errors':
+                if self.percent_mb >= 1:
+                    self.moving_bed = 'Yes'
+                else:
+                    self.moving_bed = 'No'
+
+            # Notify of differences in results of test between BT and GPS
+            if not np.isnan(self.gps_percent_mb):
+                if np.abs(self.bt_percent_mb - self.gps_percent_mb) > 2:
+                    self.messages.append('WARNING - Bottom track and GPS results differ by more than 2%.')
+                    self.test_quality = 'Warnings'
+
+                if np.logical_xor(self.bt_percent_mb >= 1,  self.gps_percent_mb >= 1):
+                    self.messages.append('WARNING - Bottom track and GPS results do not agree.')
+                    self.test_quality = 'Warnings'
+
+        else:
+            self.messages.append('ERROR - Stationary moving-bed test has no valid bottom track data.')
+            self.test_quality = 'Errors'
+            self.moving_bed = 'Unknown'
+            self.duration_sec = np.nansum(trans_data.date_time.ens_duration_sec[in_transect_idx])
+            self.percent_invalid_bt = 100
+
+    def compute_mb_gps(self):
+        """Computes moving-bed data using GPS.
+        """
+        if np.isnan(self.flow_dir):
+            u_water = np.nanmean(self.transect.w_vel.u_processed_mps[:, self.transect.in_transect_idx])
+            v_water = np.nanmean(self.transect.w_vel.v_processed_mps[:, self.transect.in_transect_idx])
+            self.flow_dir = np.arctan2(u_water, v_water) * 180 / np.pi
+            if self.flow_dir < 0:
+                self.flow_dir = self.flow_dir + 360
+
+        gps_bt = None
+        # Use GGA data if available and VTG is GGA is not available
+        if self.transect.boat_vel.gga_vel is not None:
+            gps_bt = TransectData.compute_gps_bt(self.transect, gps_ref='gga_vel')
+        elif self.transect.boat_vel.vtg_vel is not None:
+            gps_bt = TransectData.compute_gps_bt(self.transect, gps_ref='vtg_vel')
+        if gps_bt is not None and len(gps_bt) > 0:
+            self.gps_dist_us_m = gps_bt['mag']
+            self.gps_mb_dir = gps_bt['dir']
+            self.gps_mb_spd_mps = self.gps_dist_us_m / self.duration_sec
+            self.gps_flow_spd_mps = self.bt_flow_spd_mps - self.bt_mb_spd_mps + self.gps_mb_spd_mps
+            self.gps_percent_mb = (self.gps_mb_spd_mps / self.gps_flow_spd_mps) * 100
+
+    def magvar_change(self, magvar, old_magvar):
+        """Adjust moving-bed test for change in magvar.
+
+        Parameters
+        ----------
+        magvar: float
+            New magvar
+        old_magvar: float
+            Existing magvar
+        """
+
+        if self.transect.sensors.heading_deg.selected == 'internal':
+            magvar_change = magvar - old_magvar
+            self.bt_mb_dir = self.bt_mb_dir + magvar_change
+            self.flow_dir = self.flow_dir + magvar_change
+
+            # Recompute moving-bed tests with GPS and set results using existing reference
+            self.compute_mb_gps()
+            self.change_ref(self.ref)
+
+    def h_offset_change(self, h_offset, old_h_offset):
+        """Adjust moving-bed test for change in h_offset for external compass.
+
+        Parameters
+        ----------
+        h_offset: float
+            New h_offset
+        old_h_offset: float
+            Existing h_offset
+        """
+
+        if self.transect.sensors.heading_deg.selected == 'external':
+            h_offset_change = h_offset - old_h_offset
+            self.bt_mb_dir = self.bt_mb_dir + h_offset_change
+            self.flow_dir = self.flow_dir + h_offset_change
+
+            # Recompute moving-bed tests with GPS and set results using existing reference
+            self.compute_mb_gps()
+            self.change_ref(self.ref)
+
+    def change_ref(self, ref):
+        """Change moving-bed test fixed reference.
+
+        Parameters
+        ----------
+        ref: str
+            Defines specified reference (BT or GPS)
+        """
+
+        if ref == 'BT':
+            self.mb_spd_mps = self.bt_mb_spd_mps
+            self.dist_us_m = self.bt_dist_us_m
+            self.percent_mb = self.bt_percent_mb
+            self.mb_dir = self.bt_mb_dir
+            self.flow_spd_mps = self.bt_flow_spd_mps
+            self.ref = 'BT'
+            check_mb = True
+            if self.test_quality != 'Errors':
+                if self.type == 'Loop':
+                    if self.mb_spd_mps <= 0.012:
+                        check_mb = False
+                        self.moving_bed = 'No'
+                    else:
+                        if 135 < np.abs(self.flow_dir - self.mb_dir) < 225:
+                            check_mb = True
+                        else:
+                            check_mb = False
+                            self.moving_bed = 'Unknown'
+                if check_mb:
+                    if self.percent_mb > 1:
+                        self.moving_bed = 'Yes'
+                    else:
+                        self.moving_bed = 'No'
+            else:
+                self.moving_bed = 'Unknown'
+        elif ref == 'GPS':
+            self.mb_spd_mps = self.gps_mb_spd_mps
+            self.dist_us_m = self.gps_dist_us_m
+            self.percent_mb = self.gps_percent_mb
+            self.mb_dir = self.gps_mb_dir
+            self.flow_spd_mps = self.gps_flow_spd_mps
+            self.ref = 'GPS'
+            check_mb = True
+            if self.test_quality != 'Errors':
+                if self.type == 'Loop':
+                    if self.mb_spd_mps <= 0.012:
+                        check_mb = False
+                        self.moving_bed = 'No'
+                    else:
+                        if 135 < np.abs(self.flow_dir - self.mb_dir) < 225:
+                            check_mb = True
+                        else:
+                            check_mb = False
+                            self.messages.append('ERROR: GPS Loop closure error not in upstream direction. '
+                                                 + 'REPEAT LOOP or USE STATIONARY TEST')
+                            self.moving_bed = 'Unknown'
+                if check_mb:
+                    if self.percent_mb > 1:
+                        self.moving_bed = 'Yes'
+                    else:
+                        self.moving_bed = 'No'
+            else:
+                self.moving_bed = 'Unknown'
+
+    @staticmethod
+    def near_bed_velocity(u, v, depth, bin_depth):
+        """Compute near bed velocities.
+
+        Parameters
+        ----------
+        u: np.array(float)
+            u water velocity component
+        v: np.array(float)
+            v water velocity component
+        depth: np.array(float)
+            Water depth for each ensemble
+        bin_depth: np.array(float)
+            Depth to centerline of each bin
+
+        Returns
+        -------
+        nb_u: np.array(float)
+            u near-bed velocity component
+        nb_v: np.array(float)
+            v near-bed velocity component
+        unit_nbu: np.array(float)
+            u component of the near-bed unit vector
+        unit_nbv: np.array(float)
+            v component of the near-bed unit vector
+        """
+
+        # Compute z near bed as 10% of depth
+        z_near_bed = depth * 0.1
+
+        # Initialize variables
+        n_ensembles = u.shape[1]
+        nb_u = np.tile(np.nan, n_ensembles)
+        nb_v = np.tile(np.nan, n_ensembles)
+        unit_nbu = np.tile(np.nan, n_ensembles)
+        unit_nbv = np.tile(np.nan, n_ensembles)
+        z_depth = np.tile(np.nan, n_ensembles)
+        u_mean = np.tile(np.nan, n_ensembles)
+        v_mean = np.tile(np.nan, n_ensembles)
+        speed_near_bed = np.tile(np.nan, n_ensembles)
+
+        # Compute near bed velocity for each ensemble
+        for n in range(n_ensembles):
+            idx = np.where(np.isnan(u[:, n]) == False)
+            if len(idx[-1]) > 0:
+                if len(idx[-1]) > 0:
+                    idx = idx[-1][-2::]
+                else:
+                    idx = idx[-1][-1]
+                # Compute near-bed velocity
+                z_depth[n] = depth[n] - np.nanmean(bin_depth[idx, n], 0)
+                u_mean[n] = np.nanmean(u[idx, n], 0)
+                v_mean[n] = np.nanmean(v[idx, n], 0)
+                nb_u[n] = (u_mean[n] / z_depth[n] ** (1. / 6.)) * (z_near_bed[n] ** (1. / 6.))
+                nb_v[n] = (v_mean[n] / z_depth[n] ** (1. / 6.)) * (z_near_bed[n] ** (1. / 6.))
+                speed_near_bed[n] = np.sqrt(nb_u[n] ** 2 + nb_v[n] ** 2)
+                unit_nbu[n] = nb_u[n] / speed_near_bed[n]
+                unit_nbv[n] = nb_v[n] / speed_near_bed[n]
+
+        return nb_u, nb_v, unit_nbu, unit_nbv
+
+    @staticmethod
+    def auto_use_2_correct(moving_bed_tests, boat_ref=None):
+        """Apply logic to determine which moving-bed tests should be used
+        for correcting bottom track referenced discharges with moving-bed conditions.
+
+        Parameters
+        ----------
+        moving_bed_tests: list
+            List of MovingBedTests objects.
+        boat_ref: str
+            Boat velocity reference.
+
+        Returns
+        -------
+        moving_bed_tests: list
+            List of MovingBedTests objects.
+        """
+
+        if len(moving_bed_tests) != 0:
+            # Initialize variables
+            lidx_user = []
+            lidx_no_errors = []
+            test_type = []
+            lidx_stationary = []
+            lidx_loop = []
+            flow_speed = []
+            for test in moving_bed_tests:
+                test.use_2_correct = False
+                test.selected = False
+                # Valid test according to user
+                lidx_user.append(test.user_valid == True)
+                # Valid test according to quality assessment
+                lidx_no_errors.append(test.test_quality != 'Errors')
+                # Identify type of test
+                test_type.append(test.type)
+                lidx_stationary.append(test.type == 'Stationary')
+                lidx_loop.append(test.type == 'Loop')
+                flow_speed.append(test.flow_spd_mps)
+
+            # Combine
+            lidx_valid_loop = np.all(np.vstack((lidx_user, lidx_no_errors, lidx_loop)), 0)
+            lidx_valid_stationary = np.all(np.vstack((lidx_user, lidx_no_errors, lidx_stationary)), 0)
+
+            # Check flow speed
+            lidx_flow_speed = np.array(flow_speed) > 0.25
+
+            # Determine if there are valid loop tests
+            # This is the code in matlab but I don't think it is correct. I the valid loop should also have a valid
+            # flow speed, if not then a stationary test, if available could be used.
+            lidx_loops_2_select = np.all(np.vstack((lidx_flow_speed, lidx_valid_loop)), 0)
+            if np.any(lidx_loops_2_select):
+                # Select last loop
+                idx_select = np.where(lidx_loops_2_select)[0][-1]
+                test_select = moving_bed_tests[idx_select]
+                test_select.selected = True
+
+                if test_select.moving_bed == 'Yes':
+                    test_select.use_2_correct = True
+
+            # If there are no valid loop look for valid stationary tests
+            elif np.any(lidx_valid_stationary):
+                moving_bed = []
+                for n, lidx in enumerate(lidx_valid_stationary):
+                    if lidx:
+                        moving_bed_tests[n].selected = True
+                        # Determine if any stationary test resulted in a moving bed
+                        if moving_bed_tests[n].moving_bed == 'Yes':
+                            moving_bed.append(True)
+                        else:
+                            moving_bed.append(False)
+                # If any stationary test shows a moving-bed use all valid stationary test to correct BT discharge
+                if any(moving_bed) > 0:
+                    for n, test in enumerate(moving_bed_tests):
+                        if lidx_valid_stationary[n]:
+                            test.use_2_correct = True
+
+            # If the flow speed is too low but there are not valid stationary tests use the last loop test.
+            elif np.any(lidx_valid_loop):
+                # Select last loop
+                idx_select = np.where(lidx_valid_loop)[0][-1]
+                moving_bed_tests[idx_select].selected = True
+                if moving_bed_tests[idx_select].moving_bed == 'Yes':
+                    moving_bed_tests[idx_select].use_2_correct = True
+
+            # If the navigation reference for discharge computations is set
+            # GPS then none of test should be used for correction. The
+            # selected test should be used to determine if there is a valid
+            # moving-bed and a moving-bed condition.
+            if boat_ref is None:
+                ref = 'BT'
+            else:
+                ref = boat_ref
+
+            if ref != 'BT':
+                for test in moving_bed_tests:
+                    test.use_2_correct = False
+        return moving_bed_tests
diff --git a/Classes/MultiThread.py b/Classes/MultiThread.py
new file mode 100644
index 0000000..51d4580
--- /dev/null
+++ b/Classes/MultiThread.py
@@ -0,0 +1,22 @@
+"""
+Created on Sep 28, 2017
+
+@author: gpetrochenkov
+"""
+import threading
+
+
+class MultiThread(threading.Thread):
+    
+    def __init__(self, thread_id, function, args=None):
+        threading.Thread.__init__(self)
+        self.thread_id = thread_id
+        self.function = function
+        self.args = args
+        
+    def run(self):
+        
+        if self.args is not None:
+            self.function(**self.args)
+        else:
+            self.function()
diff --git a/Classes/NormData.py b/Classes/NormData.py
new file mode 100644
index 0000000..9a35a84
--- /dev/null
+++ b/Classes/NormData.py
@@ -0,0 +1,435 @@
+import warnings
+import numpy as np
+import scipy.stats as sp
+from MiscLibs.common_functions import cart2pol, pol2cart, nan_less, nan_less_equal, nan_greater
+from Classes.QComp import QComp
+
+class NormData(object):
+    """Class creates normalized depth and unit discharge or velocity.
+
+    The constuctor method allows an object to be formed without any data.
+    populate_data method creates normalized data for a single transect.
+    create_composite method creates normalized data for all check transects.
+    also allows only a portion of the data to be used in the
+    normalization process by specifying the data extent.
+
+    Attributes
+    ----------
+    file_name: str
+        Name of transect file
+    cell_depth_normalized: np.array(float)
+        Normalized depth of cell
+    unit_normalized: np.array(float)
+        Normalized discharge or velocity for all depth cells
+    unit_normalized_med: np.array(float)
+        Median of normalized data within 5% partitions
+    unit_normalized_no: np.array(int)
+        Number of data points in each median
+    unit_normalized_z: np.array(float)
+        Relative depth for each median (5% increments)
+    unit_normalized_25: np.array(float)
+        Value for which 25% of normalized values are smaller
+    unit_normalized_75: np.array(float)
+        Value for which 75% or normalized values are larger
+    data_type: str
+        Type of data (v, q, V, or Q)
+    data_extent: list
+        Defines percent of data from start of transect to use, default [0, 100]
+    valid_data: np.array(int)
+        Index of median values with point count greater than threshold cutoff
+    weights: np.array(float)
+        Discharge based weights for computing a weighted median
+    use_weights: bool
+        Specifies if discharge weighted medians are to be used in the extrapolation fit
+    sub_from_left: bool
+        Specifies if when subsectioning the subsection should start from left to right.
+    use_q: bool
+        Specifies to use the discharge rather than the xprod when subsectioning
+    """
+    
+    def __init__(self):
+        """Creates object and initializes instance variables."""
+        self.file_name = None  # Name of transect file
+        self.cell_depth_normalized = None  # Normalized depth of cell
+        self.unit_normalized = None  # Normalized discharge or velocity for all depth cells
+        self.unit_normalized_med = None  # Median of normalized data within 5% partitions
+        self.unit_normalized_no = None  # Number of data points in each median
+        self.unit_normalized_z = None  # Relative depth for each median (5% increments)
+        self.unit_normalized_25 = None  # Value for which 25% of normalized values are smaller
+        self.unit_normalized_75 = None  # Value for which 75% or normalized values are larger
+        self.data_type = 'q'  # Type of data (v, q, V, or Q)
+        self.data_extent = None  # Defines percent of data from start of transect to use, default [0, 100]
+        self.valid_data = np.array([])  # Index of median values with point count greater than threshold cutoff
+        self.weights = np.array([])
+        self.use_weighted = True
+        self.sub_from_left = False
+        self.use_q = False
+        
+    def populate_data(self, transect, data_type, threshold, data_extent=None, use_weighted=True, sub_from_left=True, use_q=True):
+        """Computes the normalized values for a single transect.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        data_type: str
+            Type of data (v, q, V, or Q)
+        threshold: int
+            Number of data points in an increment for the increment to be valid.
+        data_extent: list
+            Defines percent of data from start of transect to use, default [0, 100]
+        use_weighted: bool
+            Specifies if discharge weighted medians are to be used in the extrapolation fit
+        sub_from_left: bool
+            Specifies if when subsectioning the subsection should start from left to right.
+        use_q: bool
+            Specifies to use the discharge rather than the xprod when subsectioning
+        """
+
+        # If the data extent is not defined set data_extent to zero to trigger all data to be used
+        if data_extent is None:
+            data_extent = [0, 100]
+
+        self.sub_from_left = sub_from_left
+        self.use_q = use_q
+            
+        # Get data copies to avoid changing original data
+        filename = transect.file_name
+        in_transect_idx = transect.in_transect_idx
+
+        depths_selected = getattr(transect.depths, transect.depths.selected)
+        cell_depth = np.copy(depths_selected.depth_cell_depth_m[:, in_transect_idx])
+        cells_above_sl = transect.w_vel.cells_above_sl[:, in_transect_idx]
+        cell_depth[cells_above_sl == False] = np.nan
+        depth_ens = np.copy(depths_selected.depth_processed_m[in_transect_idx])
+
+        w_vel_x = np.copy(transect.w_vel.u_processed_mps[:, in_transect_idx])
+        w_vel_y = np.copy(transect.w_vel.v_processed_mps[:, in_transect_idx])
+
+        invalid_data = np.logical_not(transect.w_vel.valid_data[0, :, in_transect_idx]).T
+        w_vel_x[invalid_data] = np.nan
+        w_vel_y[invalid_data] = np.nan
+
+        boat_select = getattr(transect.boat_vel, transect.boat_vel.selected)
+        if boat_select is not None:
+            bt_vel_x = np.copy(boat_select.u_processed_mps[in_transect_idx])
+            bt_vel_y = np.copy(boat_select.v_processed_mps[in_transect_idx])
+        else:
+            bt_vel_x = np.tile([np.nan], transect.boat_vel.bt_vel.u_processed_mps[in_transect_idx].shape)
+            bt_vel_y = np.tile([np.nan], transect.boat_vel.bt_vel.u_processed_mps[in_transect_idx].shape)
+
+        # Compute discharges
+        xprod = np.multiply(w_vel_x, bt_vel_y) - np.multiply(w_vel_y, bt_vel_x)
+        cell_size = depths_selected.depth_cell_size_m
+        delta_t = transect.date_time.ens_duration_sec[in_transect_idx]
+        q = np.multiply(xprod[:, in_transect_idx] * cell_size[:, in_transect_idx], delta_t)
+        q_ens = np.nansum(q, 0)
+
+        # Ensure all elements of xprod can be used to compute q (have a delta_t), first ensemble has no delta_t
+        idx_invalid = np.where(np.isnan(delta_t))[0]
+        xprod[:, idx_invalid] = np.nan
+
+        if np.abs(np.nansum(abs(q_ens))) > 0:
+            # Compute ensemble weights
+            weight_ensemble = abs(q_ens) / np.nansum(abs(q_ens))
+
+            # Apply weights to cells
+            weights = np.tile(weight_ensemble, (cell_depth.shape[0], 1))
+        else:
+            weights = np.ones(w_vel_x.shape)
+
+        # Compute normalized cell depth by average depth in each ensemble
+        norm_cell_depth = np.divide(cell_depth, depth_ens)
+        norm_cell_depth[nan_less(norm_cell_depth, 0)] = np.nan
+
+        # If data type is discharge compute unit discharge for each cell
+        if data_type.lower() == 'q':
+            # Compute the cross product for each cell
+            unit = xprod
+        else:
+            w_vel_x = np.copy(transect.w_vel.u_processed_mps[:, in_transect_idx])
+            w_vel_y = np.copy(transect.w_vel.v_processed_mps[:, in_transect_idx])
+
+            invalid_data = np.logical_not(transect.w_vel.valid_data[0, :, in_transect_idx]).T
+            w_vel_x[invalid_data] = np.nan
+            w_vel_y[invalid_data] = np.nan
+
+            # Compute mean velocity components in each ensemble
+            w_vel_mean_1 = np.nanmean(w_vel_x, 0)
+            w_vel_mean_2 = np.nanmean(w_vel_y, 0)
+
+            # Compute a unit vector
+            direction, _ = cart2pol(w_vel_mean_1, w_vel_mean_2)
+            unit_vec_1, unit_vec_2 = pol2cart(direction, 1)
+            unit_vec = np.vstack([unit_vec_1, unit_vec_2])
+            
+            # Compute the velocity magnitude in the direction of the mean velocity of each
+            # ensemble using the dot product and unit vector
+            unit = np.tile([np.nan], w_vel_x.shape)
+            for i in range(w_vel_x.shape[0]):
+                unit[i, :] = np.sum(np.vstack([w_vel_x[i, :], w_vel_y[i, :]]) * unit_vec, 0)
+
+            # Discharge weighting of velocity data is not permitted
+            use_weighted = False
+
+        # Adjust to positive value
+        unit_total = np.nansum(np.nansum(unit), 0)
+        if unit_total < 0:
+            unit *= -1
+            
+        # Compute normalize unit values
+        with warnings.catch_warnings():
+            warnings.simplefilter("ignore", category=RuntimeWarning)
+            unit_norm = np.divide(unit, np.abs(np.nanmean(unit, 0)))
+
+        # Apply extents if they have been specified
+        if data_extent[0] != 0 or data_extent[1] != 100:
+            if use_q:
+                # Adjust cumulative sum direction based on start bank so that cumsum is always from left to right
+                if transect.start_edge == 'Right' and sub_from_left:
+                    q_ens_flipped = np.flip(q_ens)
+                    q_cum = np.nancumsum(q_ens_flipped)
+                    q_max = q_cum[-1]
+                    q_cum = np.flip(q_cum)
+                else:
+                    q_cum = np.nancumsum(q_ens)
+                    q_max = q_cum[-1]
+            else:
+                unit_ens = np.nansum(unit, 0)
+                q_cum = np.nancumsum(unit_ens)
+                q_max = q_cum[-1]
+            # Adjust so total discharge is positive
+            if q_max < 0:
+                q_cum *= -1
+                q_max *= -1
+
+            # Apply extents
+            unit_left = q_max * data_extent[0] / 100
+            unit_right = q_max * data_extent[1] / 100
+            idx_extent = np.where(np.logical_and(np.greater(q_cum, unit_left),
+                                                 np.less(q_cum, unit_right)))[0]
+            # if data_type.lower() == 'v':
+            #     # Unit discharge is computed here because the unit norm could be based on velocity
+            #     unit = np.multiply(w_vel_x, bt_vel_y) - np.multiply(w_vel_y, bt_vel_x)
+            #     unit_ens = np.nansum(unit, 0)
+            #     unit_total = np.nancumsum(unit_ens)
+            #
+            #     # Adjust so total discharge is positive
+            #     if unit_total[-1] < 0:
+            #         unit_total *= -1
+                
+            # # Apply extents
+            # unit_lower = unit_total[-1] * data_extent[0] / 100
+            # unit_upper = unit_total[-1] * data_extent[1] / 100
+            # idx_extent = np.where(np.logical_and(np.greater(unit_total, unit_lower),
+            #                                      np.less(unit_total, unit_upper)))[0]
+            unit_norm = unit_norm[:, idx_extent]
+            norm_cell_depth = norm_cell_depth[:, idx_extent]
+            weights = weights[:, idx_extent]
+            
+        # If whole profile is negative make positive
+        idx_neg1 = np.tile([np.nan], [unit_norm.shape[1], 1])
+        idx_neg2 = np.tile([np.nan], [unit_norm.shape[1], 1])
+        for c in range(unit_norm.shape[1]):
+            idx_neg1[c] = len(np.where(nan_less(unit_norm[:, c], 0))[0])
+            idx_neg2[c] = len(np.where(np.isnan(unit_norm[:, c]) == False)[0])
+        idx_neg = np.squeeze(idx_neg1) == np.squeeze(idx_neg2)
+        unit_norm[:, idx_neg] = unit_norm[:, idx_neg] * -1
+
+        # Store results
+        self.file_name = filename
+        self.data_extent = data_extent
+        self.data_type = data_type
+        self.cell_depth_normalized = norm_cell_depth
+        self.unit_normalized = unit_norm
+        self.use_weighted = use_weighted
+        self.weights = weights
+        self.compute_stats(threshold)
+
+    @staticmethod
+    def qrev_mat_in(mat_data):
+        """Processes the Matlab data structure to obtain a list of NormData objects containing transect
+           data from the Matlab data structure.
+
+       Parameters
+       ----------
+       mat_data: mat_struct
+           Matlab data structure obtained from sio.loadmat
+
+       Returns
+       -------
+       norm_data: list
+           List of NormData objects
+       """
+        norm_data = []
+        if hasattr(mat_data, 'normData'):
+            for data in mat_data.normData:
+                temp = NormData()
+                temp.populate_from_qrev_mat(data)
+                norm_data.append(temp)
+        return norm_data
+
+    def populate_from_qrev_mat(self, mat_data):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        mat_data: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+
+        self.file_name = mat_data.fileName
+        self.cell_depth_normalized = mat_data.cellDepthNormalized
+        self.unit_normalized = mat_data.unitNormalized
+        self.unit_normalized_med = mat_data.unitNormalizedMed
+        self.unit_normalized_no = mat_data.unitNormalizedNo
+        self.unit_normalized_z = mat_data.unitNormalizedz
+        self.unit_normalized_25 = mat_data.unitNormalized25
+        self.unit_normalized_75 = mat_data.unitNormalized75
+        self.data_type = mat_data.dataType
+        self.data_extent = mat_data.dataExtent
+        self.valid_data = mat_data.validData - 1
+        if hasattr(mat_data, 'use_weighted'):
+            self.use_weighted = mat_data.use_weighted
+            self.weights = mat_data.weights
+        else:
+            self.use_weighted = False
+            self.weights = None
+        if hasattr(mat_data, 'use_q'):
+            self.use_q = mat_data.use_q
+        if hasattr(mat_data, 'sub_from_left'):
+            self.sub_from_left = mat_data.sub_from_left
+
+    def compute_stats(self, threshold):
+        """Computes the statistics for the normalized data.
+
+        Parameters
+        ----------
+        threshold: int
+            Number of data points in an increment for the increment to be valid.
+        """
+
+        # Set averaging interval
+        avg_interval = np.arange(0, 1.05, .05)
+
+        # Initialize variables to nan
+        unit_norm_med = np.tile([np.nan], len(avg_interval) - 1)
+        unit_norm_med_no = np.tile([np.nan], len(avg_interval) - 1)
+        unit_25 = np.tile([np.nan], len(avg_interval) - 1)
+        unit_75 = np.tile([np.nan], len(avg_interval) - 1)
+        avgz = np.tile([np.nan], len(avg_interval) - 1)
+
+        # Process each normalized increment
+        for i in range(len(avg_interval) - 1):
+            condition_1 = nan_greater(self.cell_depth_normalized, avg_interval[i])
+            condition_2 = nan_less_equal(self.cell_depth_normalized, avg_interval[i + 1])
+            condition_3 = np.logical_not(np.isnan(self.unit_normalized))
+            condition_all = np.logical_and(np.logical_and(condition_1, condition_2), condition_3)
+            if np.any(condition_all):
+                if self.data_type.lower() == 'q' and self.use_weighted:
+                    results = self.weighted_quantile(self.unit_normalized[condition_all],
+                                                     quantiles=[0.25, 0.5, 0.75],
+                                                     sample_weight=self.weights[condition_all])
+                    unit_25[i] = results[0]
+                    unit_norm_med[i] = results[1]
+                    unit_75[i] = results[2]
+                else:
+                    unit_25[i], unit_norm_med[i], unit_75[i] = sp.mstats.mquantiles(self.unit_normalized[condition_all],
+                                                                                    alphap=0.5, betap=0.5)
+
+                unit_norm_med_no[i] = np.sum(np.isnan(self.unit_normalized[condition_all]) == False)
+                avgz[i] = 1 - np.nanmean(self.cell_depth_normalized[condition_all])
+
+        # Mark increments invalid if they do not have sufficient data
+        cutoff = np.nanmedian(unit_norm_med_no[nan_greater(unit_norm_med_no, 0)]) * (threshold / 100)
+        self.valid_data = np.where(nan_greater(unit_norm_med_no, cutoff))[0]
+
+        self.unit_normalized_med = unit_norm_med
+        self.unit_normalized_no = unit_norm_med_no
+        self.unit_normalized_25 = unit_25
+        self.unit_normalized_75 = unit_75
+        self.unit_normalized_z = avgz
+
+    @staticmethod
+    def weighted_quantile(values, quantiles, sample_weight):
+        """ Very close to numpy.percentile, but supports weights.
+        NOTE: quantiles should be in [0, 1]!
+
+        Parameters
+        ----------
+        values: ndarray(float)
+            Array of normalized values
+        quantiles: list
+            List of quantiles to be computed
+        sample_weight: ndarray(float)
+            Weights for each value`
+
+        Returns
+        -------
+        results: list
+            List of values at specified quantiles
+
+        """
+
+        sorter = np.argsort(values)
+        values = values[sorter]
+        sample_weight = sample_weight[sorter]
+
+        weighted_quantiles = np.cumsum(sample_weight) - 0.5 * sample_weight
+        weighted_quantiles /= np.sum(sample_weight)
+
+        results = []
+        for quantile in quantiles:
+            results.append(np.interp(quantile, weighted_quantiles, values))
+
+        return results
+
+    def create_composite(self, transects, norm_data, threshold):
+        """Compute normalized data for measurement composite.
+
+        Parameters
+        ----------
+        transects: list
+            List of objects of TransectData
+        norm_data: list
+            List of objects of NormData
+        threshold: int
+            Number of data points in an increment for the increment to be valid.
+        """
+
+        # Initialize lists
+        n_cells = []
+        n_ens = [0]
+
+        # Determine number of cells and ensembles for each transect
+        for data in norm_data:
+            n_cells.append(data.unit_normalized.shape[0])
+            try:
+                n_ens.append(data.unit_normalized.shape[1])
+            except IndexError:
+                n_ens.append(1)
+        max_cells = max(n_cells)
+        sum_ens = np.cumsum(n_ens)
+
+        # Initialize normalized variables
+        self.unit_normalized = np.tile([np.nan], (max_cells, sum_ens[-1]))
+        self.cell_depth_normalized = np.tile([np.nan], (max_cells, sum_ens[-1]))
+        self.weights = np.tile([np.nan], (max_cells, sum_ens[-1]))
+
+        # Process each transect using data from only the checked transects
+        for n in range(len(transects)):
+            if transects[n].checked:
+                self.unit_normalized[:n_cells[n], np.arange(sum_ens[n], sum_ens[n + 1])] \
+                    = norm_data[n].unit_normalized
+                self.weights[:n_cells[n], np.arange(sum_ens[n], sum_ens[n + 1])] \
+                    = norm_data[n].weights
+                self.cell_depth_normalized[:n_cells[n], np.arange(sum_ens[n], sum_ens[n + 1])] \
+                    = norm_data[n].cell_depth_normalized
+                # if self.data_extent is None:
+                self.data_extent = norm_data[n].data_extent
+                self.data_type = norm_data[n].data_type
+                self.use_weighted = norm_data[n].use_weighted
+
+        # Store data
+        self.file_name = 'Measurement'
+        self.compute_stats(threshold)
diff --git a/Classes/Oursin.py b/Classes/Oursin.py
new file mode 100644
index 0000000..263ed78
--- /dev/null
+++ b/Classes/Oursin.py
@@ -0,0 +1,2279 @@
+import time
+
+import pandas as pd
+import copy
+from Classes.QComp import QComp
+from scipy.stats import t
+import numpy as np
+import math
+import scipy.stats
+from profilehooks import profile
+from MiscLibs.common_functions import cosd, sind
+from MiscLibs.bayes_cov_compiled import bayes_cov
+
+
+class Oursin(object):
+    """Computes the uncertainty of a measurement using Oursin method.
+
+    Attributes
+    ----------
+    bot_meth: list
+        List that contains the method proposed by Extrap for each transect
+    exp_95ic_min: list
+        List that contains the min range of 95% interval if power-power method is used for transect
+    exp_95ic_max: list
+        List that contains the max range of 95% interval if power-power method is used for transect
+    pp_exp: list
+        List that contains the power-power exponent computed by Extrap for Power-Power transect only
+    ns_exp: list
+        List that contains the no-slip exponent computed by Extrap for No-Slip method transect only
+    exp_pp_min: list
+        Minimum power-power exponent used for simulating possible discharge
+    exp_pp_max: list
+        Maximum power-power exponent used for simulating possible discharge
+    exp_ns_min: list
+        Minimum no-slip exponent used for simulating possible discharge
+    exp_ns_max: list
+        Maximum no-slip exponent used for simulating possible discharge
+    d_right_error_min: list
+        List that contains the minimum right distance (in m) used for simulating the discharge for each transect
+    d_left_error_min: list
+        List that contains the minimum left distance (in m) used for simulating the discharge for each transect
+    d_right_error_max: list
+        List that contains the maximum right distance (in m) used for simulating the discharge for each transect
+    d_left_error_max: list
+        List that contains the maximum left distance (in m) used for simulating the discharge for each transect
+    draft_error_list: list
+        List that contains the draft (in cm) used for simulating the discharge for each transect
+    u_syst_list: list
+        List that contains the computed systematic uncertainty (68%) for each transect
+    u_compass_list: list
+        List that contains the computed uncertainty (68%) due to compass error for each transect
+    u_meas_list: list
+        List that contains the computed measured area uncertainty (68%) for each transect
+    u_ens_list: list
+       List that contains the computed uncertainty (68%) due to limited number of ensemble for each transect
+    u_movbed_list: list
+       List that contains the estimated uncertainty (68%) due to moving bed for each transect
+    u_invalid_water_list: list
+        List that contains the computed uncertainty (68%) due to invalid water velocities for each transect
+    u_invalid_boat_list: list
+        List that contains the computed uncertainty (68%) due to invalid boat velocities for each transect
+    u_invalid_depth_list: list
+       List that contains the computed uncertainty (68%) due to invalid depths for each transect
+    u_top_list: list
+       List that contains the computed uncertainty (68%) due to top discharge extrapolation for each transect
+    u_bot_list: list
+       List that contains the computed uncertainty (68%) due to bottom discharge extrapolation for each transect
+    u_left_list: list
+       List that contains the computed uncertainty (68%) due to left discharge extrapolation for each transect
+    u_right_list: list
+       List that contains the computed uncertainty (68%) due to right discharge extrapolation for each transect
+    u_syst_mean_user_list: list
+        List that contains the user specified  systematic uncertainty (68%) for each transect
+    u_compass_user_list: list
+        List that contains user specified uncertainty (68%) due to compass error for each transect
+    u_meas_mean_user_list: list
+        List that contains the user specified measured area uncertainty (68%) for each transect
+    u_ens_user_list: list
+       List that contains the user specified uncertainty (68%) due to limited number of ensemble for each transect
+    u_movbed_user_list: list
+       List that contains the user specified uncertainty (68%) due to moving bed for each transect
+    u_invalid_water_user_list: list
+        List that contains the user specified uncertainty (68%) due to invalid water velocities for each transect
+    u_invalid_boat_user_list: list
+        List that contains the user specified uncertainty (68%) due to invalid boat velocities for each transect
+    u_invalid_depth_user_list: list
+       List that contains the user specified uncertainty (68%) due to invalid depths for each transect
+    u_top_mean_user_list: list
+       List that contains the user specified uncertainty (68%) due to top discharge extrapolation for each transect
+    u_bot_mean_user_list: list
+       List that contains the user specified uncertainty (68%) due to bottom discharge extrapolation for each transect
+    u_left_mean_user_list: list
+       List that contains the user specified uncertainty (68%) due to left discharge extrapolation for each transect
+    u_right_mean_user_list: list
+       List that contains the user specified uncertainty (68%) due to right discharge extrapolation for each transect
+    cov_68: float
+       Computed uncertainty (68%) due to coefficient of variation
+    sim_original: DataFrame
+        Discharges (total, and subareas) computed for the processed discharge
+    sim_extrap_pp_16: DataFrame
+        Discharges (total, and subareas) computed using power fit with 1/6th exponent
+    sim_extrap_pp_min: DataFrame
+        Discharges (total, and subareas) computed using power fit with minimum exponent
+    sim_extrap_pp_max: DataFrame
+        Discharges (total, and subareas) computed using power fit with maximum exponent
+    sim_extrap_cns_16: DataFrame
+        Discharges (total, and subareas) computed using constant no slip with 1/6th exponent
+    sim_extrap_cns_min: DataFrame
+        Discharges (total, and subareas) computed using constant no slip with minimum exponent
+    sim_extrap_cns_max: DataFrame
+        Discharges (total, and subareas) computed using constant no slip with maximum exponent
+    sim_extrap_3pns_16: DataFrame
+        Discharges (total, and subareas) computed using 3pt no slip with 1/6the exponent
+    sim_extrap_3pns_opt: DataFrame
+        Discharges (total, and subareas) computed using 3pt no slip with optimized exponent
+    sim_edge_min: DataFrame
+        Discharges (total, and subareas) computed using minimum edge q
+    sim_edge_max: DataFrame
+        Discharges (total, and subareas) computed using maximum edge q
+    sim_draft_min: DataFrame
+        Discharges (total, and subareas) computed using minimum draft
+    sim_draft_max: DataFrame
+        Discharges (total, and subareas) computed using maximum draft
+    sim_cells_trdi: DataFrame
+        Discharges (total, and subareas) computed using TRDI method for invalid cells
+    sim_cells_above: DataFrame
+        Discharges (total, and subareas) computed using cells above for invalid cells
+    sim_cells_below: DataFrame
+        Discharges (total, and subareas) computed using cells below for invalid cells
+    sim_cells_before: DataFrame
+        Discharges (total, and subareas) computed for using cells before for invalid cells
+    sim_cells_after: DataFrame
+        Discharges (total, and subareas) computed for using cells before for invalid cells
+    nb_transects: float
+        Number of transects used
+    checked_idx: list
+        List of indices of checked transects
+    user_advanced_settings: dict
+        Dictionary of user specified advanced settings
+        exp_pp_min_user: float
+            User specified minimum exponent for power fit
+        exp_pp_max_user: float
+            User specified maximum exponent for power fit
+        exp_ns_min_user: float
+            User specified minimum exponent for no slip fit
+        exp_ns_max_user: float
+            User specified maximum exponent for no slip fit
+        draft_error_user: float
+            User specified draft error in m
+        dzi_prct_user: float
+            User specified percent error in depth cell size
+        right_edge_dist_prct_user: float
+            User specified percent error in right edge distance
+        left_edge_dist_prct_user: float
+            User specified percent error in left edge distance
+        gga_boat_user: float
+            User specified standard deviation of boat velocities based on gga in m/s
+        vtg_boat_user: float
+            User specified standard deviation of boat velocities based on vtg in m/s
+        compass_error_user: float
+            User specified compass error in degrees
+    default_advanced_settings: dict
+        Dictionary of default values for advanced settings
+        exp_pp_min: float
+            Default minimum exponent for power fit
+        exp_pp_max: float
+            Default maximum exponent for power fit
+        exp_ns_min: float
+            Default minimum exponent for no slip fit
+        exp_ns_max: float
+            Default maximum exponent for no slip fit
+        draft_error: float
+            Default draft error in m
+        dzi_prct: float
+            Default percent error in depth cell size
+        right_edge_dist_prct: float
+            Default percent error in right edge distance
+        left_edge_dist_prct: float
+            Default percent error in left edge distance
+        gga_boat: float
+            Default standard deviation of boat velocities based on gga in m/s
+        vtg_boat: float
+            Default standard deviation of boat velocities based on vtg in m/s
+        compass_error: float
+            Default compass error in degrees
+    user_specified_u: dict
+        Dictionary of user specified uncertainties as standard deviation in percent
+        u_syst_mean_user: float
+            User specified uncertianty (bias) due to the system, in percent
+        u_movbed_user: float
+            User specified uncertianty (bias) due to the moving-bed conditions, in percent
+        u_compass_user: float
+            User specified uncertianty (bias) due to the compass error, in percent
+        u_ens_user: float
+            User specified uncertianty (bias) due to the number of ensembles collected, in percent
+        u_meas_mean_user: float
+            User specified uncertianty (random) of the measured portion of the cross section, in percent
+        u_top_mean_user: float
+            User specified uncertianty (bias) due to the top extrapolation, in percent
+        u_bot_mean_user: float
+            User specified uncertianty (bias) due to the bottom extrapolation, in percent
+        u_right_mean_user: float
+            User specified uncertianty (bias) due to the right edge discharge estimate, in percent
+        u_left_mean_user: float
+            User specified uncertianty (bias) due to the left edge discharge estimate, in percent
+        u_invalid_boat_user: float
+            User specified uncertianty (bias) due to invalid boat velocities, in percent
+        u_invalid_depth_user
+            User specified uncertianty (bias) due to invalid depths, in percent
+        u_invalid_water_user: float
+            User specified uncertianty (bias) due to invalid water velocities, in percent
+    u: DataFrame
+        DataFrame containing standard deviations in percent for each transect: u_syst, u_compass, u_movbed, u_ens,
+        u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, u_water, u_cov, total, and total_95
+     u_contribution_meas: DataFrame
+        DataFrame containing measured discharge uncertainty contribution from: boat, water, depth, and dzi
+    u_measurement: DataFrame
+        DataFrame containing standard deviations in percent for the whole measurement: u_syst, u_compass, u_movbed,
+        u_ens, u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, u_water, u_cov, total, and total_95
+    u_contribution_measurement: DataFrame
+        DataFrame containing uncertainty contribution in percent from: u_syst, u_compass, u_movbed,
+        u_ens, u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, u_water, u_cov, and total
+    u_user: DataFrame
+        DataFrame containing standard deviations in percent for each transect: u_syst, u_compass, u_movbed, u_ens,
+        u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, u_water, u_cov, total, and total_95
+    u_measurement_user: DataFrame
+        DataFrame containing standard deviations in percent for the whole measurement: u_syst, u_compass, u_movbed,
+        u_ens, u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, u_water, u_cov, total, and total_95
+    u_contribution_measurement_user: DataFrame
+        DataFrame containing uncertainty contribution in percent from: u_syst, u_compass, u_movbed,
+        u_ens, u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, u_water, u_cov, and total
+    """
+
+    def __init__(self):
+        """Initialize class and instance variables."""
+
+        # User provided parameters
+        self.user_advanced_settings = {'exp_pp_min_user': np.nan,
+                                       'exp_pp_max_user': np.nan,
+                                       'exp_ns_min_user': np.nan,
+                                       'exp_ns_max_user': np.nan,
+                                       'draft_error_m_user': np.nan,
+                                       'dzi_prct_user': np.nan,
+                                       'right_edge_dist_prct_user': np.nan,
+                                       'left_edge_dist_prct_user': np.nan,
+                                       'gga_boat_mps_user': np.nan,
+                                       'vtg_boat_mps_user': np.nan,
+                                       'compass_error_deg_user': np.nan,
+                                       'cov_prior_user': np.nan,
+                                       'cov_prior_u_user': np.nan}
+
+        self.default_advanced_settings = {'exp_pp_min': 'computed',
+                                          'exp_pp_max': 'computed',
+                                          'exp_ns_min': 'computed',
+                                          'exp_ns_max': 'computed',
+                                          'draft_error_m': 'computed',
+                                          'dzi_prct': 0.5,
+                                          'right_edge_dist_prct': 20,
+                                          'left_edge_dist_prct': 20,
+                                          'gga_boat_mps': 'computed',
+                                          'vtg_boat_mps': 0.05,
+                                          'compass_error_deg': 1,
+                                          'cov_prior': 0.03,
+                                          'cov_prior_u': 0.20}
+
+        self.user_specified_u = {'u_syst_mean_user': np.nan,
+                                 'u_movbed_user': np.nan,
+                                 'u_compass_user': np.nan,
+                                 'u_ens_user': np.nan,
+                                 'u_meas_mean_user': np.nan,
+                                 'u_top_mean_user': np.nan,
+                                 'u_bot_mean_user': np.nan,
+                                 'u_right_mean_user': np.nan,
+                                 'u_left_mean_user': np.nan,
+                                 'u_invalid_boat_user': np.nan,
+                                 'u_invalid_depth_user': np.nan,
+                                 'u_invalid_water_user': np.nan}
+
+        # Extrap results
+        self.bot_meth = []
+        self.exp_95ic_min = []
+        self.exp_95ic_max = []
+        self.pp_exp = []
+        self.ns_exp = []
+
+        # Parameters used for computing the uncertainty
+        self.exp_pp_min = np.nan
+        self.exp_pp_max = np.nan
+        self.exp_ns_min = np.nan
+        self.exp_ns_max = np.nan
+        self.d_right_error_min = []
+        self.d_left_error_min = []
+        self.d_right_error_max = []
+        self.d_left_error_max = []
+        self.draft_error_list = []
+
+        # Terms computed by transect (list at 68% level)
+        self.u_syst_list = []
+        self.u_compass_list = []
+        self.u_meas_list = []
+        self.u_ens_list = []
+        self.u_movbed_list = []
+        self.u_invalid_water_list = []
+        self.u_invalid_boat_list = []
+        self.u_invalid_depth_list = []
+        self.u_top_list = []
+        self.u_bot_list = []
+        self.u_left_list = []
+        self.u_right_list = []
+
+        self.u_syst_mean_user_list = []
+        self.u_compass_user_list = []
+        self.u_movbed_user_list = []
+        self.u_meas_mean_user_list = []
+        self.u_ens_user_list = []
+        self.u_top_mean_user_list = []
+        self.u_bot_mean_user_list = []
+        self.u_left_mean_user_list = []
+        self.u_right_mean_user_list = []
+        self.u_invalid_boat_user_list = []
+        self.u_invalid_depth_user_list = []
+        self.u_invalid_water_user_list = []
+
+        # Term computed for measurement
+        self.cov_68 = np.nan
+
+        self.nb_transects = np.nan
+        self.checked_idx = []
+
+        # --- Store results of all simulations in DataFrame
+        self.sim_original = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot', 'q_left', 'q_right', 'q_middle'])
+        self.sim_extrap_pp_16 = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_pp_opt = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_pp_min = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_pp_max = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_cns_16 = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_cns_opt = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_cns_min = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_cns_max = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_3pns_16 = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_3pns_opt = pd.DataFrame(columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_edge_min = pd.DataFrame(columns=['q_total', 'q_left', 'q_right'])
+        self.sim_edge_max = pd.DataFrame(columns=['q_total', 'q_left', 'q_right'])
+        self.sim_draft_min = pd.DataFrame(columns=['q_total', 'q_top', 'q_left', 'q_right'])
+        self.sim_draft_max = pd.DataFrame(columns=['q_total', 'q_top', 'q_left', 'q_right'])
+        self.sim_cells_trdi = pd.DataFrame(columns=['q_total', 'q_middle'])
+        self.sim_cells_above = pd.DataFrame(columns=['q_total', 'q_middle'])
+        self.sim_cells_below = pd.DataFrame(columns=['q_total', 'q_middle'])
+        self.sim_cells_before = pd.DataFrame(columns=['q_total', 'q_middle'])
+        self.sim_cells_after = pd.DataFrame(columns=['q_total', 'q_middle'])
+        self.sim_shallow = pd.DataFrame(columns=['q_total', 'q_middle'])
+        self.sim_depth_hold = pd.DataFrame(columns=['q_total', 'q_middle'])
+        self.sim_depth_next = pd.DataFrame(columns=['q_total', 'q_middle'])
+        self.sim_boat_hold = pd.DataFrame(columns=['q_total', 'q_middle'])
+        self.sim_boat_next = pd.DataFrame(columns=['q_total', 'q_middle'])
+        self.u_contribution_meas = pd.DataFrame(columns=['boat', 'water', 'depth', 'dzi'])
+        self.u = pd.DataFrame(columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top', 'u_bot',
+                                       'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water', 'u_cov', 'total',
+                                       'total_95'])
+        self.u_measurement = pd.DataFrame(columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top',
+                                                   'u_bot', 'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water',
+                                                   'u_cov', 'total', 'total_95'])
+        self.u_contribution = pd.DataFrame(columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top',
+                                                    'u_bot', 'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water',
+                                                    'u_cov', 'total'])
+        self.u_contribution_measurement = pd.DataFrame(columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas',
+                                                                'u_top', 'u_bot', 'u_left', 'u_right', 'u_boat',
+                                                                'u_depth', 'u_water', 'u_cov', 'total'])
+        self.u_user = pd.DataFrame(columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top', 'u_bot',
+                                            'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water', 'u_cov', 'total',
+                                            'total_95'])
+        self.u_measurement_user = pd.DataFrame(columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top',
+                                                        'u_bot', 'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water',
+                                                        'u_cov', 'total', 'total_95'])
+        self.u_contribution_user = pd.DataFrame(columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top',
+                                                         'u_bot', 'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water',
+                                                         'u_cov', 'total'])
+        self.u_contribution_measurement_user = pd.DataFrame(columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens',
+                                                                     'u_meas', 'u_top', 'u_bot', 'u_left', 'u_right',
+                                                                     'u_boat', 'u_depth', 'u_water', 'u_cov', 'total'])
+
+    def populate_from_qrev_mat(self, meas_struct):
+
+        # User provided parameters
+        self.user_advanced_settings = {'exp_pp_min_user': meas_struct.oursin.user_advanced_settings.exp_pp_min_user,
+                                       'exp_pp_max_user': meas_struct.oursin.user_advanced_settings.exp_pp_max_user,
+                                       'exp_ns_min_user': meas_struct.oursin.user_advanced_settings.exp_ns_min_user,
+                                       'exp_ns_max_user': meas_struct.oursin.user_advanced_settings.exp_ns_max_user,
+                                       'draft_error_m_user':
+                                           meas_struct.oursin.user_advanced_settings.draft_error_m_user,
+                                       'dzi_prct_user': meas_struct.oursin.user_advanced_settings.dzi_prct_user,
+                                       'right_edge_dist_prct_user':
+                                           meas_struct.oursin.user_advanced_settings.right_edge_dist_prct_user,
+                                       'left_edge_dist_prct_user':
+                                           meas_struct.oursin.user_advanced_settings.left_edge_dist_prct_user,
+                                       'gga_boat_mps_user': meas_struct.oursin.user_advanced_settings.gga_boat_mps_user,
+                                       'vtg_boat_mps_user': meas_struct.oursin.user_advanced_settings.vtg_boat_mps_user,
+                                       'compass_error_deg_user':
+                                           meas_struct.oursin.user_advanced_settings.compass_error_deg_user,
+                                       'cov_prior_user': meas_struct.oursin.user_advanced_settings.cov_prior_user,
+                                       'cov_prior_u_user': meas_struct.oursin.user_advanced_settings.cov_prior_u_user}
+
+        self.user_specified_u = {'u_syst_mean_user': meas_struct.oursin.user_specified_u.u_syst_mean_user,
+                                 'u_movbed_user': meas_struct.oursin.user_specified_u.u_movbed_user,
+                                 'u_compass_user': meas_struct.oursin.user_specified_u.u_compass_user,
+                                 'u_ens_user': meas_struct.oursin.user_specified_u.u_ens_user,
+                                 'u_meas_mean_user': meas_struct.oursin.user_specified_u.u_meas_mean_user,
+                                 'u_top_mean_user': meas_struct.oursin.user_specified_u.u_top_mean_user,
+                                 'u_bot_mean_user': meas_struct.oursin.user_specified_u.u_bot_mean_user,
+                                 'u_right_mean_user': meas_struct.oursin.user_specified_u.u_right_mean_user,
+                                 'u_left_mean_user': meas_struct.oursin.user_specified_u.u_left_mean_user,
+                                 'u_invalid_boat_user': meas_struct.oursin.user_specified_u.u_invalid_boat_user,
+                                 'u_invalid_depth_user': meas_struct.oursin.user_specified_u.u_invalid_depth_user,
+                                 'u_invalid_water_user': meas_struct.oursin.user_specified_u.u_invalid_water_user}
+
+        # Extrap results
+        if type(meas_struct.oursin.bot_meth) is str:
+            self.bot_meth = [meas_struct.oursin.bot_meth]
+        else:
+            self.bot_meth = meas_struct.oursin.bot_meth.tolist()
+
+        if type(meas_struct.oursin.exp_95ic_min) is float:
+            self.exp_95ic_min = meas_struct.oursin.exp_95ic_min
+        else:
+            self.exp_95ic_min = meas_struct.oursin.exp_95ic_min.tolist()
+
+        if type(meas_struct.oursin.exp_95ic_max) is float:
+            self.exp_95ic_max = meas_struct.oursin.exp_95ic_max
+        else:
+            self.exp_95ic_max = meas_struct.oursin.exp_95ic_max.tolist()
+
+        if type(meas_struct.oursin.ppExponent) is float:
+            self.pp_exp = meas_struct.oursin.ppExponent
+        else:
+            self.pp_exp = meas_struct.oursin.ppExponent.tolist()
+
+        if type(meas_struct.oursin.nsExponent) is float:
+            self.ns_exp = meas_struct.oursin.nsExponent
+        else:
+            self.ns_exp = meas_struct.oursin.nsExponent.tolist()
+
+        # Parameters used for computing the uncertainty
+        self.exp_pp_min = meas_struct.oursin.exp_pp_min
+        self.exp_pp_max = meas_struct.oursin.exp_pp_max
+        self.exp_ns_min = meas_struct.oursin.exp_ns_min
+        self.exp_ns_max = meas_struct.oursin.exp_ns_max
+
+        if type(meas_struct.oursin.d_right_error_min) is float:
+            self.d_right_error_min = meas_struct.oursin.d_right_error_min
+            self.d_left_error_min = meas_struct.oursin.d_left_error_min
+            self.d_right_error_max = meas_struct.oursin.d_right_error_max
+            self.d_left_error_max = meas_struct.oursin.d_left_error_max
+            self.draft_error_list = meas_struct.oursin.draft_error_list
+        else:
+            self.d_right_error_min = meas_struct.oursin.d_right_error_min.tolist()
+            self.d_left_error_min = meas_struct.oursin.d_left_error_min.tolist()
+            self.d_right_error_max = meas_struct.oursin.d_right_error_max.tolist()
+            self.d_left_error_max = meas_struct.oursin.d_left_error_max.tolist()
+            self.draft_error_list = meas_struct.oursin.draft_error_list.tolist()
+
+        # Terms computed by transect (list at 68% level)
+        if type(meas_struct.oursin.u_syst_mean_user_list) is float:
+            self.u_syst_list = [meas_struct.oursin.u_syst_list]
+            self.u_compass_list = [meas_struct.oursin.u_compass_list]
+            self.u_meas_list = [meas_struct.oursin.u_meas_list]
+            self.u_ens_list = [meas_struct.oursin.u_ens_list]
+            self.u_movbed_list = [meas_struct.oursin.u_movbed_list]
+            self.u_invalid_water_list = [meas_struct.oursin.u_invalid_water_list]
+            self.u_invalid_boat_list = [meas_struct.oursin.u_invalid_boat_list]
+            self.u_invalid_depth_list = [meas_struct.oursin.u_invalid_depth_list]
+            self.u_top_list = [meas_struct.oursin.u_top_list]
+            self.u_bot_list = [meas_struct.oursin.u_bot_list]
+            self.u_left_list = [meas_struct.oursin.u_left_list]
+            self.u_right_list = [meas_struct.oursin.u_right_list]
+
+            self.u_syst_mean_user_list = [meas_struct.oursin.u_syst_mean_user_list]
+            self.u_compass_user_list = [meas_struct.oursin.u_compass_user_list]
+            self.u_movbed_user_list = [meas_struct.oursin.u_movbed_user_list]
+            self.u_meas_mean_user_list = [meas_struct.oursin.u_meas_mean_user_list]
+            self.u_ens_user_list = [meas_struct.oursin.u_ens_user_list]
+            self.u_top_mean_user_list = [meas_struct.oursin.u_top_mean_user_list]
+            self.u_bot_mean_user_list = [meas_struct.oursin.u_bot_mean_user_list]
+            self.u_left_mean_user_list = [meas_struct.oursin.u_left_mean_user_list]
+            self.u_invalid_boat_user_list = [meas_struct.oursin.u_invalid_boat_user_list]
+            self.u_invalid_depth_user_list = [meas_struct.oursin.u_invalid_depth_user_list]
+            self.u_invalid_water_user_list = [meas_struct.oursin.u_invalid_water_user_list]
+        else:
+            self.u_syst_list = meas_struct.oursin.u_syst_list.tolist()
+            self.u_compass_list = meas_struct.oursin.u_compass_list.tolist()
+            self.u_meas_list = meas_struct.oursin.u_meas_list.tolist()
+            self.u_ens_list = meas_struct.oursin.u_ens_list.tolist()
+            self.u_movbed_list = meas_struct.oursin.u_movbed_list.tolist()
+            self.u_invalid_water_list = meas_struct.oursin.u_invalid_water_list.tolist()
+            self.u_invalid_boat_list = meas_struct.oursin.u_invalid_boat_list.tolist()
+            self.u_invalid_depth_list = meas_struct.oursin.u_invalid_depth_list.tolist()
+            self.u_top_list = meas_struct.oursin.u_top_list.tolist()
+            self.u_bot_list = meas_struct.oursin.u_bot_list.tolist()
+            self.u_left_list = meas_struct.oursin.u_left_list.tolist()
+            self.u_right_list = meas_struct.oursin.u_right_list.tolist()
+
+            self.u_syst_mean_user_list = meas_struct.oursin.u_syst_mean_user_list.tolist()
+            self.u_compass_user_list = meas_struct.oursin.u_compass_user_list.tolist()
+            self.u_movbed_user_list = meas_struct.oursin.u_movbed_user_list.tolist()
+            self.u_meas_mean_user_list = meas_struct.oursin.u_meas_mean_user_list.tolist()
+            self.u_ens_user_list = meas_struct.oursin.u_ens_user_list.tolist()
+            self.u_top_mean_user_list = meas_struct.oursin.u_top_mean_user_list.tolist()
+            self.u_bot_mean_user_list = meas_struct.oursin.u_bot_mean_user_list.tolist()
+            self.u_left_mean_user_list = meas_struct.oursin.u_left_mean_user_list.tolist()
+            self.u_right_mean_user_list = meas_struct.oursin.u_right_mean_user_list.tolist()
+            self.u_invalid_boat_user_list = meas_struct.oursin.u_invalid_boat_user_list.tolist()
+            self.u_invalid_depth_user_list = meas_struct.oursin.u_invalid_depth_user_list.tolist()
+            self.u_invalid_water_user_list = meas_struct.oursin.u_invalid_water_user_list.tolist()
+
+        # COV
+        self.cov_68 = meas_struct.oursin.cov_68
+
+        self.nb_transects = meas_struct.oursin.nb_transects
+        self.checked_idx = meas_struct.oursin.checked_idx
+
+        # Reconstruct data frames from Matlab arrays
+        self.sim_original = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_original),
+                                         columns=['q_total', 'q_top', 'q_bot', 'q_left', 'q_right', 'q_middle'])
+        self.sim_extrap_pp_16 = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_extrap_pp_16),
+                                             columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_pp_opt = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_extrap_pp_opt),
+                                              columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_pp_min = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_extrap_pp_min),
+                                              columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_pp_max = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_extrap_pp_max),
+                                              columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_cns_16 = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_extrap_cns_16),
+                                              columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_cns_opt = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_extrap_cns_opt),
+                                               columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_cns_min = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_extrap_cns_min),
+                                               columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_cns_max = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_extrap_cns_max),
+                                               columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_3pns_16 = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_extrap_3pns_16),
+                                               columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_extrap_3pns_opt = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_extrap_3pns_opt),
+                                                columns=['q_total', 'q_top', 'q_bot'])
+        self.sim_edge_min = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_edge_min),
+                                         columns=['q_total', 'q_left', 'q_right'])
+        self.sim_edge_max = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_edge_max),
+                                         columns=['q_total', 'q_left', 'q_right'])
+        self.sim_draft_min = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_draft_min),
+                                          columns=['q_total', 'q_top', 'q_left', 'q_right'])
+        self.sim_draft_max = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_draft_max),
+                                          columns=['q_total', 'q_top', 'q_left', 'q_right'])
+        self.sim_cells_trdi = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_cells_trdi),
+                                           columns=['q_total', 'q_middle'])
+        self.sim_cells_above = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_cells_above),
+                                            columns=['q_total', 'q_middle'])
+        self.sim_cells_below = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_cells_below),
+                                            columns=['q_total', 'q_middle'])
+        self.sim_cells_before = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_cells_before),
+                                             columns=['q_total', 'q_middle'])
+        self.sim_cells_after = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_cells_after),
+                                            columns=['q_total', 'q_middle'])
+        self.sim_shallow = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_shallow),
+                                        columns=['q_total', 'q_middle'])
+        self.sim_depth_hold = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_depth_hold),
+                                           columns=['q_total', 'q_middle'])
+        self.sim_depth_next = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_depth_next),
+                                           columns=['q_total', 'q_middle'])
+        self.sim_boat_hold = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_boat_hold),
+                                          columns=['q_total', 'q_middle'])
+        self.sim_boat_next = pd.DataFrame(self.checkshape(meas_struct.oursin.sim_boat_next),
+                                          columns=['q_total', 'q_middle'])
+        self.u_contribution_meas = pd.DataFrame(self.checkshape(meas_struct.oursin.u_contribution_meas),
+                                                columns=['boat', 'water', 'dzi'])
+        self.u = pd.DataFrame(self.checkshape(meas_struct.oursin.u),
+                              columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top', 'u_bot',
+                                       'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water', 'u_cov', 'total',
+                                       'total_95'])
+        self.u_measurement = pd.DataFrame(self.checkshape(meas_struct.oursin.u_measurement),
+                                          columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top',
+                                                   'u_bot', 'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water',
+                                                   'u_cov', 'total', 'total_95'])
+        self.u_contribution = pd.DataFrame(self.checkshape(meas_struct.oursin.u_contribution),
+                                           columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top',
+                                                    'u_bot', 'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water',
+                                                    'u_cov', 'total'])
+        self.u_contribution_measurement = pd.DataFrame(self.checkshape(meas_struct.oursin.u_contribution_measurement),
+                                                       columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas',
+                                                                'u_top', 'u_bot', 'u_left', 'u_right', 'u_boat',
+                                                                'u_depth', 'u_water', 'u_cov', 'total'])
+        self.u_user = pd.DataFrame(self.checkshape(meas_struct.oursin.u_user),
+                                   columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top', 'u_bot',
+                                            'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water', 'u_cov', 'total',
+                                            'total_95'])
+        self.u_measurement_user = pd.DataFrame(self.checkshape(meas_struct.oursin.u_measurement_user),
+                                               columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top',
+                                                        'u_bot', 'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water',
+                                                        'u_cov', 'total', 'total_95'])
+        self.u_contribution_user = pd.DataFrame(self.checkshape(meas_struct.oursin.u_contribution_user),
+                                                columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top',
+                                                         'u_bot', 'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water',
+                                                         'u_cov', 'total'])
+        self.u_contribution_measurement_user = pd.DataFrame(
+            self.checkshape(meas_struct.oursin.u_contribution_measurement_user),
+            columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens',
+                     'u_meas', 'u_top', 'u_bot', 'u_left', 'u_right',
+                     'u_boat', 'u_depth', 'u_water', 'u_cov', 'total'])
+
+    @staticmethod
+    def checkshape(a):
+        if len(a.shape) < 2:
+            a = a.reshape(1, -1)
+        return a
+
+    # @profile
+    def compute_oursin(self, meas, user_advanced_settings=None, u_measurement_user=None):
+        """Computes the uncertainty for the components of the discharge measurement
+        using measurement data or user provided values.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        user_advanced_settings: dict
+            Dictionary of user input on advanced settings
+        u_measurement_user: dict
+            Dictionary of user estimates of uncertainty, standard deviation in percent, for each component
+        """
+
+        if user_advanced_settings is not None:
+            self.user_advanced_settings = user_advanced_settings
+
+        if u_measurement_user is not None:
+            self.u_measurement_user = u_measurement_user
+
+        # Initialize lists
+        self.checked_idx = []
+        self.u_syst_list = []
+        self.u_meas_list = []
+        self.u_ens_list = []
+        self.u_movbed_list = []
+        self.u_invalid_water_list = []
+        self.u_invalid_boat_list = []
+        self.u_invalid_depth_list = []
+        self.u_top_list = []
+        self.u_bot_list = []
+        self.u_left_list = []
+        self.u_right_list = []
+
+        # Prep data for computations
+        self.data_prep(meas)
+        self.compute_measurement_cov(meas=meas, method='Bayes')
+
+        # 1. Systematic terms + correction terms (moving bed)
+        self.uncertainty_system()
+        self.uncertainty_moving_bed(meas=meas)
+        self.uncertainty_compass(meas=meas)
+
+        # 2. Measured uncertainty
+        self.uncertainty_measured_discharge(meas=meas)
+        self.uncertainty_number_ensembles(meas)
+
+        # 3. Run all the simulations to compute possible discharges
+        self.run_simulations(meas)
+
+        # 4. Compute uncertainty terms based on simulations and assuming a rectangular law
+        self.uncertainty_top_discharge()
+        self.uncertainty_bottom_discharge()
+        self.uncertainty_left_discharge()
+        self.uncertainty_right_discharge()
+        self.uncertainty_invalid_depth_data()
+        self.uncertainty_invalid_boat_data()
+        self.uncertainty_invalid_water_data()
+
+        # 6. Compute combined uncertainty
+        self.u, self.u_measurement, self.u_contribution, self.u_contribution_measurement = \
+            self.compute_combined_uncertainty(u_syst=self.u_syst_list,
+                                              u_compass=self.u_compass_list,
+                                              u_movbed=self.u_movbed_list,
+                                              u_meas=self.u_meas_list,
+                                              u_ens=self.u_ens_list,
+                                              u_top=self.u_top_list,
+                                              u_bot=self.u_bot_list,
+                                              u_left=self.u_left_list,
+                                              u_right=self.u_right_list,
+                                              u_boat=self.u_invalid_boat_list,
+                                              u_depth=self.u_invalid_depth_list,
+                                              u_water=self.u_invalid_water_list,
+                                              cov_68=self.cov_68)
+
+        self.u_user, self.u_measurement_user, self.u_contribution_user, self.u_contribution_measurement_user = \
+            self.compute_combined_uncertainty(u_syst=self.u_syst_mean_user_list,
+                                              u_compass=self.u_compass_user_list,
+                                              u_movbed=self.u_movbed_user_list,
+                                              u_meas=self.u_meas_mean_user_list,
+                                              u_ens=self.u_ens_user_list,
+                                              u_top=self.u_top_mean_user_list,
+                                              u_bot=self.u_bot_mean_user_list,
+                                              u_left=self.u_left_mean_user_list,
+                                              u_right=self.u_right_mean_user_list,
+                                              u_boat=self.u_invalid_boat_user_list,
+                                              u_depth=self.u_invalid_depth_user_list,
+                                              u_water=self.u_invalid_water_user_list,
+                                              cov_68=self.cov_68)
+
+    @staticmethod
+    def compute_combined_uncertainty(u_syst, u_compass, u_movbed, u_meas, u_ens, u_top, u_bot, u_left, u_right,
+                                     u_boat, u_depth, u_water, cov_68):
+        """Combined the uncertainty for each transect and for the measurement
+
+        Parameters
+        ----------
+        u_syst: list
+            List of system uncertainties for each transect
+        u_compass: list
+            List of uncertainties due to heading error
+        u_movbed: list
+            List of moving-bed uncertainties for each transect
+        u_meas: list
+            List of uncertainties for the measured portion for each transect
+        u_ens: list
+            List of uncertainties due to number of ensembles in each transect
+        u_top: list
+            List of uncertainties due to top extrapolation in each transect
+        u_bot: list
+            List of uncertainties due to the bottom extrapolation in each transect
+        u_left: list
+            List of uncertainties due to the left edge discharge in each transect
+        u_right: list
+            List of uncertainties due to the right edge discharge in each transect
+        u_boat: list
+            List of uncertainties due to invalid boat velocities
+        u_depth: list
+            List of uncertainties due to invalid depth velocities
+        u_water: list
+            List of uncertainties due to invalid water data in each transect
+        cov_68: float
+            Coefficient of variation for all transects
+
+        Returns
+        -------
+        u_contribution_meas: DataFrame
+            DataFrame containing measured discharge uncertainty contribution from: boat, water, depth, and dzi
+        u: DataFrame
+            DataFrame containing standard deviations in percent for each transect: u_syst, u_compass, u_movbed, u_ens,
+            u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, u_water, u_cov, total, and total_95
+        u_measurement: DataFrame
+            DataFrame containing standard deviations in percent for the whole measurement: u_syst, u_compass, u_movbed,
+            u_ens, u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, u_water, u_cov, total, and total_95
+        u_contribution_measurement: DataFrame
+            DataFrame containing uncertainty contribution in percent from: u_syst, u_compass, u_movbed,
+            u_ens, u_meas, u_top, u_bot, u_left, u_right, u_boat, u_depth, u_water, u_cov, and total
+        """
+
+        # Create a Dataframe with all computed uncertainty for each checked transect
+        u = pd.DataFrame(columns=['u_syst', 'u_compass', 'u_movbed', 'u_ens', 'u_meas', 'u_top', 'u_bot',
+                                  'u_left', 'u_right', 'u_boat', 'u_depth', 'u_water', 'u_cov'])
+        u['u_syst'] = u_syst
+        u['u_compass'] = u_compass
+        u['u_movbed'] = u_movbed
+        u['u_meas'] = u_meas
+        u['u_ens'] = u_ens
+        u['u_water'] = u_water
+        u['u_top'] = u_top
+        u['u_bot'] = u_bot
+        u['u_left'] = u_left
+        u['u_right'] = u_right
+        u['u_cov'] = cov_68
+        u['u_boat'] = u_boat
+        u['u_depth'] = u_depth
+
+        n_transects = len(u_ens)
+
+        # Convert uncertainty (68% level of confidence) into variance
+        # Note that only variance is additive
+        u2 = u.pow(2)
+        u2_measurement = u2.mean(axis=0, skipna=False).to_frame().T
+
+        # Combined uncertainty by transect
+        # Sum of variance of each component, then sqrt, then multiply by 100 for percentage
+        # u['total'] = u2.drop(['u_cov'], axis=1).sum(axis=1, skipna=False) ** 0.5
+        u['total'] = (u2.sum(axis=1, skipna=False) ** 0.5)
+        u['total_95'] = u['total'] * 2
+        u = u.mul(100)
+
+        # Uncertainty for the measurement
+        # The random error is computed as a mean of the random error from the measured portion and the overall
+        # random error from the COV.
+        u2_random = u2['u_meas'].mean(skipna=False) + u2['u_cov'].mean(skipna=False)
+
+        # All other sources are systematic (mostly due to computation method and values from user)
+        u2_bias = u2_measurement.drop(['u_meas', 'u_cov'], axis=1).sum(axis=1, skipna=False)
+
+        # Combined all uncertainty sources
+        u2_measurement['total'] = (1 / n_transects) * u2_random + u2_bias[0]
+        u_measurement = u2_measurement ** 0.5
+        u_measurement['total_95'] = u_measurement['total'] * 2
+        u_measurement = u_measurement * 100
+
+        # Compute relative contributions from each source
+        u_contribution_measurement = u2_measurement.copy()
+
+        # Adjust contribution of u_meas and u_cov to account for number of transects
+        u_contribution_measurement['u_meas'] = u2_measurement['u_meas'] / n_transects
+        u_contribution_measurement['u_cov'] = u2_measurement['u_cov'] / n_transects
+        u_contribution_measurement = u_contribution_measurement.div(u_contribution_measurement['total'], axis=0)
+
+        # Adjust contribution of u_meas and u_cov to account for number of transects
+        u_contribution = u2.copy()
+        u_contribution['u_meas'] = u2['u_meas'].div(n_transects, axis=0)
+        u_contribution['u_cov'] = u2['u_cov'].div(n_transects, axis=0)
+        u_contribution['total'] = u_contribution.sum(axis=1)
+        u_contribution = u_contribution.div(u_contribution['total'], axis=0)
+
+        return u, u_measurement, u_contribution, u_contribution_measurement
+
+    def data_prep(self, meas):
+        """Determine checked transects and max and min exponents for power and no slip extrapolation.
+
+        Parameters
+        ----------
+        meas: MeasurementData
+            Object of MeasurementData
+        """
+
+        # Use only checked transects
+        # Extract data that are used later on (PP and NS exponents)
+        self.checked_idx = []
+        self.bot_meth = []
+        self.exp_95ic_min = []
+        self.exp_95ic_max = []
+        self.pp_exp = []
+        self.ns_exp = []
+
+        for n in range(len(meas.transects)):
+            if meas.transects[n].checked:
+                self.checked_idx.append(n)
+
+                # Bottom method selected using data from each transect only
+                self.bot_meth.append(meas.extrap_fit.sel_fit[n].bot_method_auto)
+
+                # Store 95 percent bounds on power fit exponent for each transect if power selected
+                if meas.extrap_fit.sel_fit[n].bot_method_auto == "Power":
+                    try:
+                        self.exp_95ic_min.append(meas.extrap_fit.sel_fit[n].exponent_95_ci[0])
+                    except TypeError:
+                        self.exp_95ic_min.append(np.nan)
+                    try:
+                        self.exp_95ic_max.append(meas.extrap_fit.sel_fit[n].exponent_95_ci[1])
+                    except TypeError:
+                        self.exp_95ic_max.append(np.nan)
+
+                    self.pp_exp.append(meas.extrap_fit.sel_fit[n].pp_exponent)
+
+                # Store no slip exponent if no slip selected
+                elif meas.extrap_fit.sel_fit[n].bot_method_auto == "No Slip":
+                    self.ns_exp.append(meas.extrap_fit.sel_fit[n].ns_exponent)
+
+        self.nb_transects = len(self.checked_idx)
+
+    def run_simulations(self, meas):
+        """Compute discharges (top, bot, right, left, total, middle)  based on possible scenarios
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        # If list have not be saved recompute q_sensitivity
+        if not hasattr(meas.extrap_fit.q_sensitivity, 'q_pp_list'):
+            meas.extrap_fit.q_sensitivity.populate_data(meas.transects, meas.extrap_fit.sel_fit)
+
+        # Simulation original
+        self.sim_orig(meas)
+
+        # Simulation power / power default 1/6
+        self.sim_extrap_pp_16['q_total'] = meas.extrap_fit.q_sensitivity.q_pp_list
+        self.sim_extrap_pp_16['q_top'] = meas.extrap_fit.q_sensitivity.q_top_pp_list
+        self.sim_extrap_pp_16['q_bot'] = meas.extrap_fit.q_sensitivity.q_bot_pp_list
+
+        # Simulations power / power optimized
+        self.sim_pp_min_max_opt(meas=meas)
+
+        # Simulation cns default 1/6
+        self.sim_extrap_cns_16['q_total'] = meas.extrap_fit.q_sensitivity.q_cns_list
+        self.sim_extrap_cns_16['q_top'] = meas.extrap_fit.q_sensitivity.q_top_cns_list
+        self.sim_extrap_cns_16['q_bot'] = meas.extrap_fit.q_sensitivity.q_bot_cns_list
+
+        # Simulation cns optimized
+        self.sim_cns_min_max_opt(meas=meas)
+
+        # Simulation 3pt no slip default 1/6
+        self.sim_extrap_3pns_16['q_total'] = meas.extrap_fit.q_sensitivity.q_3p_ns_list
+        self.sim_extrap_3pns_16['q_top'] = meas.extrap_fit.q_sensitivity.q_top_3p_ns_list
+        self.sim_extrap_3pns_16['q_bot'] = meas.extrap_fit.q_sensitivity.q_bot_3p_ns_list
+
+        # Simulation 3pt no slip optimized
+        self.sim_extrap_3pns_opt['q_total'] = meas.extrap_fit.q_sensitivity.q_3p_ns_opt_list
+        self.sim_extrap_3pns_opt['q_top'] = meas.extrap_fit.q_sensitivity.q_top_3p_ns_opt_list
+        self.sim_extrap_3pns_opt['q_bot'] = meas.extrap_fit.q_sensitivity.q_bot_3p_ns_opt_list
+
+        # Simulations edge min and max
+        self.sim_edge_min_max(meas=meas)
+
+        # Simulation draft min and max
+        self.sim_draft_max_min(meas=meas)
+
+        # Simulation of invalid cells and ensembles
+        self.sim_invalid_cells(meas=meas)
+
+        # Simulation of shallow no cells
+        self.sim_shallow_ens(meas=meas)
+
+        # Simulation of invalid boat velocity
+        self.sim_invalid_boat_velocity(meas=meas)
+
+        # Simulation of invalid depths
+        self.sim_invalid_depth(meas=meas)
+
+    def uncertainty_measured_discharge(self, meas):
+        """Compute the uncertainty related to the measured area.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        self.u_contribution_meas = pd.DataFrame(columns=['boat', 'water', 'dzi'])
+
+        # Set uncertainty of cell size
+        if np.isnan(self.user_advanced_settings['dzi_prct_user']):
+            u_dzi = self.default_advanced_settings['dzi_prct'] * 0.01
+        else:
+            u_dzi = self.user_advanced_settings['dzi_prct_user'] * 0.01
+
+        # Compute the uncertainty due to the measured area
+        for transect_id in self.checked_idx:
+
+            # Relative standard deviation of error velocity (Water Track)
+            std_ev_wt_ens = self.water_std_by_error_velocity(meas.transects[transect_id])
+
+            u_boat = np.nan
+            if meas.transects[transect_id].boat_vel.selected == 'bt_vel':
+                # Relative standard deviation of error velocity (Bottom Track)
+                u_boat = self.boat_std_by_error_velocity(meas.transects[transect_id])
+
+            elif meas.transects[transect_id].boat_vel.selected == 'gga_vel':
+                boat_std = np.nan
+                if np.isnan(self.user_advanced_settings['gga_boat_mps_user']):
+                    if meas.transects[transect_id].gps.altitude_ens_m is not None:
+                        # Estimate the uncertainty in gga boat velocity as 1/3 of the standard deviation of
+                        # the elevation (estimate of horizontal position uncertainty) divided by time
+                        boat_std = (np.nanstd(meas.transects[transect_id].gps.altitude_ens_m, ddof=1) / 3) / \
+                                   np.nanmean(np.diff(meas.transects[transect_id].gps.gga_serial_time_ens))
+                else:
+                    boat_std = self.user_advanced_settings['gga_boat_mps']
+                if meas.transects[transect_id].boat_vel.gga_vel is not None:
+                    u = meas.transects[transect_id].boat_vel.gga_vel.u_processed_mps
+                    v = meas.transects[transect_id].boat_vel.gga_vel.v_processed_mps
+                    speed = np.sqrt(u ** 2 + v ** 2)
+                    u_boat = boat_std / speed
+
+            elif meas.transects[transect_id].boat_vel.selected == 'vtg_vel':
+                if np.isnan(self.user_advanced_settings['vtg_boat_mps_user']):
+                    boat_std = np.nan
+                    if meas.transects[transect_id].gps is not None:
+                        boat_std = self.default_advanced_settings['vtg_boat_mps']
+                else:
+                    boat_std = self.user_advanced_settings['vtg_boat_mps_user']
+                if meas.transects[transect_id].boat_vel.vtg_vel is not None:
+                    u = meas.transects[transect_id].boat_vel.vtg_vel.u_processed_mps
+                    v = meas.transects[transect_id].boat_vel.vtg_vel.v_processed_mps
+                    speed = np.sqrt(u ** 2 + v ** 2)
+                    u_boat = boat_std / speed
+
+            # Computation of u_meas
+            q_2_tran = meas.discharge[transect_id].total ** 2
+            q_2_ens = meas.discharge[transect_id].middle_ens ** 2
+            n_cell_ens = meas.transects[transect_id].w_vel.cells_above_sl.sum(axis=0)  # number of cells by ens
+            n_cell_ens = np.where(n_cell_ens == 0, np.nan, n_cell_ens)
+
+            # Variance for each ensembles
+            u_2_meas = q_2_ens * (u_boat ** 2 + (1 / n_cell_ens) * (std_ev_wt_ens ** 2 + u_dzi ** 2))
+
+            u_2_prct_meas = np.nansum(u_2_meas) / q_2_tran
+
+            # Standard deviation
+            u_prct_meas = u_2_prct_meas ** 0.5
+            self.u_meas_list.append(u_prct_meas)
+
+            # Compute the contribution of all terms to u_meas (sum of a0 to g0 =1)
+            u_contrib_boat = (np.nan_to_num(q_2_ens * (u_boat ** 2)).sum() / q_2_tran) / u_2_prct_meas
+            u_contrib_water = (np.nan_to_num(q_2_ens * ((1 / n_cell_ens) * (std_ev_wt_ens ** 2))).sum()
+                               / q_2_tran) / u_2_prct_meas
+            u_contrib_dzi = (np.nan_to_num(q_2_ens * ((1 / n_cell_ens) * (u_dzi ** 2))).sum()
+                             / q_2_tran) / u_2_prct_meas
+
+            self.u_contribution_meas.loc[len(self.u_contribution_meas)] = [u_contrib_boat,
+                                                                           u_contrib_water,
+                                                                           u_contrib_dzi]
+
+        # Apply user specified uncertainty
+        if np.isnan(self.user_specified_u['u_meas_mean_user']):
+            self.u_meas_mean_user_list = self.u_meas_list
+        else:
+            self.u_meas_mean_user_list = [0.01 * self.user_specified_u['u_meas_mean_user']] * self.nb_transects
+
+    def uncertainty_moving_bed(self, meas):
+        """Computes the moving-bed uncertainty
+
+        Parameters
+        ----------
+        meas: MeasurementData
+            Object of MeasurementData
+        """
+
+        # Compute moving-bed uncertainty
+        if len(self.checked_idx) and meas.transects[self.checked_idx[0]].boat_vel.selected == 'bt_vel':
+            # Boat velocity based on bottom track, moving-bed possible
+            if len(meas.mb_tests) > 0:
+                # Moving_bed tests recorded
+                user_valid = []
+                quality = []
+                moving_bed = []
+                used = []
+                for test in meas.mb_tests:
+                    user_valid.append(test.user_valid)
+                    if test.test_quality == 'Errors':
+                        quality.append(False)
+                    else:
+                        quality.append(True)
+                    moving_bed.append(test.moving_bed)
+                    used.append(test.use_2_correct)
+
+                # Check to see if there are any valid tests
+                if np.any(np.logical_and(np.asarray(quality), np.asarray(user_valid))):
+                    # Check to see if the valid tests indicate a moving bed
+                    moving_bed_bool = []
+                    for result in moving_bed:
+                        if result == 'Yes':
+                            moving_bed_bool.append(True)
+                        else:
+                            moving_bed_bool.append(False)
+                    valid_moving_bed = np.logical_and(quality, np.asarray(moving_bed_bool))
+                    if np.any(valid_moving_bed):
+                        # Check to see that a correction was used
+                        if np.any(np.logical_and(valid_moving_bed, np.asarray(used))):
+                            # Moving-bed exists and correction applied
+                            moving_bed_uncertainty = 1.5
+                        else:
+                            # Moving-bed exists and no correction applied
+                            moving_bed_uncertainty = 3
+                    else:
+                        # Valid tests indicated no moving bed
+                        moving_bed_uncertainty = 1
+                else:
+                    moving_bed_uncertainty = 3
+            elif meas.observed_no_moving_bed:
+                moving_bed_uncertainty = 1
+            else:
+                # No moving bed tests
+                moving_bed_uncertainty = 3
+        else:
+            # GPS used as boat velocity reference
+            moving_bed_uncertainty = 0
+
+        # Expand to list
+        self.u_movbed_list = [0.01 * moving_bed_uncertainty / 2] * self.nb_transects
+
+        # Apply user specified
+        if np.isnan(self.user_specified_u['u_movbed_user']):
+            self.u_movbed_user_list = self.u_movbed_list
+        else:
+            self.u_movbed_user_list = [self.user_specified_u['u_movbed_user'] * 0.01] * self.nb_transects
+
+    def uncertainty_system(self):
+        """Compute systematic uncertainty
+        """
+
+        # Assume 1.31% systematic bias at 68%
+        self.u_syst_list = [0.01 * 1.31] * self.nb_transects
+
+        # Override with user specification if available
+        if np.isnan(self.user_specified_u['u_syst_mean_user']):
+            self.u_syst_mean_user_list = self.u_syst_list
+        else:
+            self.u_syst_mean_user_list = [self.user_specified_u['u_syst_mean_user'] * 0.01] * self.nb_transects
+
+    def uncertainty_number_ensembles(self, meas):
+        """Computes the uncertainty due to the number of ensembles in a transect.
+
+        Parameters
+        ----------
+        meas: MeasurementData
+            Object of MeasurementData
+        """
+
+        for trans_id in self.checked_idx:
+            # Compute uncertainty due to limited number of ensembles (ISO 748; Le Coz et al., 2012)
+            self.u_ens_list.append(0.01 * 32 * len(meas.discharge[trans_id].middle_ens) ** (-0.88))
+
+        if np.isnan(self.user_specified_u['u_ens_user']):
+            self.u_ens_user_list = self.u_ens_list
+        else:
+            self.u_ens_user_list = [0.01 * self.user_specified_u['u_ens_user']] * self.nb_transects
+
+    def uncertainty_compass(self, meas):
+        """Compute the potential bias in the measurement due to dynamic compass errors when using GPS as
+        the navigation reference. The method is based on Mueller (2018,
+        https://doi.org/10.1016/j.flowmeasinst.2018.10.004, equation 41.
+
+        Parameters
+        ----------
+        meas: MeasurementData
+            Object of MeasurementData
+        """
+
+        # No compass error component for bottom track referenced discharges
+        if meas.transects[self.checked_idx[0]].boat_vel.selected == 'bt_vel':
+            self.u_compass_list = [0] * self.nb_transects
+        else:
+            # Assume a default compass error unless one is provided by the user
+            if np.isnan(self.user_advanced_settings['compass_error_deg_user']):
+                compass_error = self.default_advanced_settings['compass_error_deg']
+            else:
+                compass_error = self.user_advanced_settings['compass_error_deg_user']
+
+            # Compute discharge bias based on compass error and boat speed
+            meas_stats = meas.compute_measurement_properties(meas)
+            speed_ratio = meas_stats['avg_boat_speed'][self.checked_idx] / \
+                meas_stats['avg_water_speed'][self.checked_idx]
+            self.u_compass_list = np.abs(1 - (cosd(compass_error) + 0.5 * speed_ratio * sind(compass_error)))
+
+        # Override if user provides uncertainty due to compass
+        if np.isnan(self.user_specified_u['u_compass_user']):
+            self.u_compass_user_list = self.u_compass_list
+        else:
+            self.u_compass_user_list = [self.user_specified_u['u_compass_user'] * 0.01] * self.nb_transects
+
+    def uncertainty_top_discharge(self):
+        """Computes the uncertainty in the top discharge using simulations and rectangular law.
+        """
+
+        self.u_top_list = list(Oursin.apply_u_rect(list_sims=[self.sim_original,
+                                                              self.sim_extrap_pp_opt,
+                                                              self.sim_extrap_pp_min,
+                                                              self.sim_extrap_pp_max,
+                                                              self.sim_extrap_cns_opt,
+                                                              self.sim_extrap_cns_min,
+                                                              self.sim_extrap_cns_max,
+                                                              self.sim_extrap_3pns_opt,
+                                                              self.sim_draft_max,
+                                                              self.sim_draft_min],
+                                                   col_name='q_top')
+                               / np.abs(self.sim_original['q_total']))
+
+        if np.isnan(self.user_specified_u['u_top_mean_user']):
+            self.u_top_mean_user_list = self.u_top_list
+        else:
+            self.u_top_mean_user_list = [0.01 * self.user_specified_u['u_top_mean_user']] * self.nb_transects
+
+    def uncertainty_bottom_discharge(self):
+        """Computes uncertainty of bottom discharge using simulations and rectangular law.
+        """
+
+        self.u_bot_list = list(Oursin.apply_u_rect(list_sims=[self.sim_original,
+                                                              self.sim_extrap_pp_opt,
+                                                              self.sim_extrap_pp_min,
+                                                              self.sim_extrap_pp_max,
+                                                              self.sim_extrap_cns_opt,
+                                                              self.sim_extrap_cns_min,
+                                                              self.sim_extrap_cns_max,
+                                                              self.sim_extrap_3pns_opt],
+                                                   col_name='q_bot')
+                               / np.abs(self.sim_original['q_total']))
+
+        if np.isnan(self.user_specified_u['u_bot_mean_user']):
+            self.u_bot_mean_user_list = self.u_bot_list
+        else:
+            self.u_bot_mean_user_list = [0.01 * self.user_specified_u['u_bot_mean_user']] * self.nb_transects
+
+    def uncertainty_left_discharge(self):
+        """Computes the uncertianty of the left edge discharge using simulations and the rectangular law.
+        """
+
+        self.u_left_list = list(Oursin.apply_u_rect(list_sims=[self.sim_original['q_left'],
+                                                               self.sim_edge_min,
+                                                               self.sim_edge_max,
+                                                               self.sim_draft_min,
+                                                               self.sim_draft_max],
+                                                    col_name='q_left')
+                                / np.abs(self.sim_original['q_total']))
+
+        if np.isnan(self.user_specified_u['u_left_mean_user']):
+            self.u_left_mean_user_list = self.u_left_list
+        else:
+            self.u_left_mean_user_list = [0.01 * self.user_specified_u['u_left_mean_user']] * self.nb_transects
+
+    def uncertainty_right_discharge(self):
+        """Computes the uncertainty of the right edge discharge using simulations and the rectangular law.
+        """
+
+        self.u_right_list = list(Oursin.apply_u_rect(list_sims=[self.sim_original['q_right'],
+                                                                self.sim_edge_min,
+                                                                self.sim_edge_max,
+                                                                self.sim_draft_min,
+                                                                self.sim_draft_max],
+                                                     col_name='q_right')
+                                 / np.abs(self.sim_original['q_total']))
+
+        if np.isnan(self.user_specified_u['u_right_mean_user']):
+            self.u_right_mean_user_list = self.u_right_list
+        else:
+            self.u_right_mean_user_list = [0.01 * self.user_specified_u['u_right_mean_user']] * self.nb_transects
+
+    def uncertainty_invalid_depth_data(self):
+        """Computes the uncertainty due to invalid depth data using simulations and the retangular law.
+        """
+
+        self.u_invalid_depth_list = list(Oursin.apply_u_rect(list_sims=[self.sim_original,
+                                                                        self.sim_depth_hold,
+                                                                        self.sim_depth_next],
+                                                             col_name='q_total')
+                                         / np.abs(self.sim_original['q_total']))
+
+        if np.isnan(self.user_specified_u['u_invalid_depth_user']):
+            self.u_invalid_depth_user_list = self.u_invalid_depth_list
+        else:
+            self.u_invalid_depth_user_list = [0.01 * self.user_specified_u[
+                'u_invalid_depth_user']] * self.nb_transects
+
+    def uncertainty_invalid_boat_data(self):
+        """Computes the uncertainty due to invalid boat data using simulations and the rectangular law.
+        """
+
+        self.u_invalid_boat_list = list(Oursin.apply_u_rect(list_sims=[self.sim_original,
+                                                                       self.sim_boat_hold,
+                                                                       self.sim_boat_next],
+                                                            col_name='q_total')
+                                        / np.abs(self.sim_original['q_total']))
+
+        if np.isnan(self.user_specified_u['u_invalid_boat_user']):
+            self.u_invalid_boat_user_list = self.u_invalid_boat_list
+        else:
+            self.u_invalid_boat_user_list = [0.01 * self.user_specified_u['u_invalid_boat_user']] * self.nb_transects
+
+    def uncertainty_invalid_water_data(self):
+        """Computes the uncertainty due to invalid water data assuming rectangular law.
+        """
+
+        # Uncertainty due to invalid cells and ensembles
+        self.u_invalid_water_list = list(Oursin.apply_u_rect(list_sims=[self.sim_original,
+                                                                        self.sim_cells_trdi,
+                                                                        self.sim_cells_above,
+                                                                        self.sim_cells_below,
+                                                                        self.sim_cells_before,
+                                                                        self.sim_cells_after,
+                                                                        self.sim_shallow],
+                                                             col_name='q_total')
+                                         / np.abs(self.sim_original['q_total']))
+
+        if np.isnan(self.user_specified_u['u_invalid_water_user']):
+            self.u_invalid_water_user_list = self.u_invalid_water_list
+        else:
+            self.u_invalid_water_user_list = [0.01 * self.user_specified_u['u_invalid_water_user']] \
+                                             * self.nb_transects
+
+    def compute_measurement_cov(self, meas, method='Bayes'):
+        """Compute the coefficient of variation of the total transect discharges used in the measurement.
+
+        Parameters
+        ----------
+        meas: MeasurementData
+            Object of MeasurementData
+        method: str
+            Determines method to use (Bayes or QRev)
+        """
+
+        self.cov_68 = np.nan
+
+        if method == 'QRev':
+
+            # Only compute for multiple transects
+            if self.nb_transects > 1:
+                total_q = []
+                for trans_id in self.checked_idx:
+                    total_q.append(meas.discharge[trans_id].total)
+
+                # Compute coefficient of variation
+                cov = np.abs(np.nanstd(total_q, ddof=1) / np.nanmean(total_q))
+
+                # Inflate the cov to the 95% value
+                if len(total_q) == 2:
+                    # Use the approximate method as taught in class to reduce the high coverage factor for 2 transects
+                    # and account for prior knowledge related to 720 second duration analysis
+                    cov_95 = cov * 3.3
+                    self.cov_68 = cov_95 / 2
+                else:
+                    # Use Student's t to inflate COV for n > 2
+                    cov_95 = t.interval(0.95, len(total_q) - 1)[1] * cov / len(total_q) ** 0.5
+                    self.cov_68 = cov_95 / 2
+        elif method == 'Bayes':
+
+            # Set prior
+            if np.isnan(meas.oursin.user_advanced_settings['cov_prior_user']):
+                cov_prior = meas.oursin.default_advanced_settings['cov_prior']
+            else:
+                cov_prior = meas.oursin.user_advanced_settings['cov_prior_user']
+
+            if np.isnan(meas.oursin.user_advanced_settings['cov_prior_u_user']):
+                cov_prior_u = meas.oursin.default_advanced_settings['cov_prior_u']
+            else:
+                cov_prior_u = meas.oursin.user_advanced_settings['cov_prior_u_user']
+
+            # Create list of observations
+            transects_total_q = []
+            for idx in meas.checked_transect_idx:
+                transects_total_q.append(meas.discharge[idx].total)
+
+            # Compute COV
+            self.cov_68 = bayes_cov(np.array(transects_total_q), cov_prior, cov_prior_u, 20000)
+
+    def sim_orig(self, meas):
+        """Stores original measurement results in a data frame
+
+        Parameters
+        ----------
+        meas: MeasurementData
+            Object of MeasurementData
+        """
+        self.sim_original = self.sim_original.iloc[0:0]
+        transect_q = dict()
+        for trans_id in self.checked_idx:
+            transect_q['q_total'] = meas.discharge[trans_id].total
+            transect_q['q_top'] = meas.discharge[trans_id].top
+            transect_q['q_bot'] = meas.discharge[trans_id].bottom
+            transect_q['q_right'] = meas.discharge[trans_id].right
+            transect_q['q_left'] = meas.discharge[trans_id].left
+            transect_q['q_middle'] = meas.discharge[trans_id].middle
+            self.sim_original = self.sim_original.append(transect_q, ignore_index=True, sort=False)
+
+    def sim_cns_min_max_opt(self, meas):
+        """Computes simulations resulting in the the min and max discharges for a constant no slip extrapolation
+        fit.
+
+        Parameters
+        ----------
+        meas: MeasurementData
+            Object of MeasurementData
+        """
+
+        # Compute min-max no slip exponent
+        skip_ns_min_max, self.exp_ns_max, self.exp_ns_min = \
+            self.compute_ns_max_min(meas=meas,
+                                    ns_exp=self.ns_exp,
+                                    exp_ns_min_user=self.user_advanced_settings['exp_ns_min_user'],
+                                    exp_ns_max_user=self.user_advanced_settings['exp_ns_max_user'])
+
+        # Optimized
+        self.sim_extrap_cns_opt['q_total'] = meas.extrap_fit.q_sensitivity.q_cns_opt_list
+        self.sim_extrap_cns_opt['q_top'] = meas.extrap_fit.q_sensitivity.q_top_cns_opt_list
+        self.sim_extrap_cns_opt['q_bot'] = meas.extrap_fit.q_sensitivity.q_bot_cns_opt_list
+
+        # Max min
+        if skip_ns_min_max:
+            # If cns not used both max and min are equal to the optimized value
+            self.sim_extrap_cns_min['q_total'] = meas.extrap_fit.q_sensitivity.q_cns_opt_list
+            self.sim_extrap_cns_min['q_top'] = meas.extrap_fit.q_sensitivity.q_top_cns_opt_list
+            self.sim_extrap_cns_min['q_bot'] = meas.extrap_fit.q_sensitivity.q_bot_cns_opt_list
+            self.sim_extrap_cns_max['q_total'] = meas.extrap_fit.q_sensitivity.q_cns_opt_list
+            self.sim_extrap_cns_max['q_top'] = meas.extrap_fit.q_sensitivity.q_top_cns_opt_list
+            self.sim_extrap_cns_max['q_bot'] = meas.extrap_fit.q_sensitivity.q_bot_cns_opt_list
+        else:
+            # Compute q for min and max values
+            q = QComp()
+            self.sim_extrap_cns_min = pd.DataFrame(columns=self.sim_extrap_cns_min.columns)
+            self.sim_extrap_cns_max = pd.DataFrame(columns=self.sim_extrap_cns_max.columns)
+
+            for trans_id in self.checked_idx:
+                # Compute min values
+                q.populate_data(data_in=meas.transects[trans_id],
+                                top_method='Constant',
+                                bot_method='No Slip',
+                                exponent=self.exp_ns_min)
+                self.sim_extrap_cns_min.loc[len(self.sim_extrap_cns_min)] = [q.total, q.top, q.bottom]
+                # Compute max values
+                q.populate_data(data_in=meas.transects[trans_id],
+                                top_method='Constant',
+                                bot_method='No Slip',
+                                exponent=self.exp_ns_max)
+                self.sim_extrap_cns_max.loc[len(self.sim_extrap_cns_max)] = [q.total, q.top, q.bottom]
+
+    def sim_pp_min_max_opt(self, meas):
+        """Computes simulations resulting in the the min and max discharges for a power power extrapolation
+        fit.
+
+        Parameters
+        ----------
+        meas: MeasurementData
+            Object of MeasurementData
+        """
+
+        # A power fit is not applicable to bi-directional flow
+        mean_q = meas.mean_discharges(meas)
+        if np.sign(mean_q['top_mean']) != np.sign(mean_q['bot_mean']):
+            self.sim_extrap_pp_min = self.sim_original[['q_total', 'q_top', 'q_bot']]
+            self.sim_extrap_pp_max = self.sim_original[['q_total', 'q_top', 'q_bot']]
+            self.sim_extrap_pp_opt = self.sim_original[['q_total', 'q_top', 'q_bot']]
+
+        else:
+            # Compute min-max power exponent
+            skip_pp_min_max, self.exp_pp_max, self.exp_pp_min = \
+                self.compute_pp_max_min(meas=meas,
+                                        exp_95ic_min=self.exp_95ic_min,
+                                        exp_95ic_max=self.exp_95ic_max,
+                                        pp_exp=self.pp_exp,
+                                        exp_pp_min_user=self.user_advanced_settings['exp_pp_min_user'],
+                                        exp_pp_max_user=self.user_advanced_settings['exp_pp_max_user'])
+
+            # Optimized
+            self.sim_extrap_pp_opt['q_total'] = meas.extrap_fit.q_sensitivity.q_pp_opt_list
+            self.sim_extrap_pp_opt['q_top'] = meas.extrap_fit.q_sensitivity.q_top_pp_opt_list
+            self.sim_extrap_pp_opt['q_bot'] = meas.extrap_fit.q_sensitivity.q_bot_pp_opt_list
+
+            # Max min
+            if skip_pp_min_max:
+                self.sim_extrap_pp_min['q_total'] = meas.extrap_fit.q_sensitivity.q_pp_opt_list
+                self.sim_extrap_pp_min['q_top'] = meas.extrap_fit.q_sensitivity.q_top_pp_opt_list
+                self.sim_extrap_pp_min['q_bot'] = meas.extrap_fit.q_sensitivity.q_bot_pp_opt_list
+                self.sim_extrap_pp_max['q_total'] = meas.extrap_fit.q_sensitivity.q_pp_opt_list
+                self.sim_extrap_pp_max['q_top'] = meas.extrap_fit.q_sensitivity.q_top_pp_opt_list
+                self.sim_extrap_pp_max['q_bot'] = meas.extrap_fit.q_sensitivity.q_bot_pp_opt_list
+            else:
+                q = QComp()
+                self.sim_extrap_pp_min = pd.DataFrame(columns=self.sim_extrap_pp_min.columns)
+                self.sim_extrap_pp_max = pd.DataFrame(columns=self.sim_extrap_pp_max.columns)
+
+                for trans_id in self.checked_idx:
+                    q.populate_data(data_in=meas.transects[trans_id],
+                                    top_method='Power',
+                                    bot_method='Power',
+                                    exponent=self.exp_pp_min)
+                    self.sim_extrap_pp_min.loc[len(self.sim_extrap_pp_min)] = [q.total, q.top, q.bottom]
+
+                    q.populate_data(data_in=meas.transects[trans_id],
+                                    top_method='Power',
+                                    bot_method='Power',
+                                    exponent=self.exp_pp_max)
+                    self.sim_extrap_pp_max.loc[len(self.sim_extrap_pp_max)] = [q.total, q.top, q.bottom]
+
+    def sim_edge_min_max(self, meas):
+        """Computes simulations for the maximum and minimum edge discharges.
+
+        Parameters
+        ----------
+        meas: MeasurementData
+            Object of measurement data
+        """
+
+        # Clear variables
+        self.d_right_error_min = []
+        self.d_left_error_min = []
+        self.d_right_error_max = []
+        self.d_left_error_max = []
+        self.sim_edge_min = pd.DataFrame(columns=self.sim_edge_min.columns)
+        self.sim_edge_max = pd.DataFrame(columns=self.sim_edge_max.columns)
+
+        # Create measurement copy to allow changes without affecting original
+        meas_temp = copy.deepcopy(meas)
+
+        # Process each checked transect
+        for trans_id in self.checked_idx:
+            # Compute max and min edge distances
+            max_left_dist, max_right_dist, min_left_dist, min_right_dist = \
+                self.compute_edge_dist_max_min(transect=meas.transects[trans_id],
+                                               user_settings=self.user_advanced_settings,
+                                               default_settings=self.default_advanced_settings)
+
+            # Compute edge minimum
+            self.d_right_error_min.append(min_right_dist)
+            self.d_left_error_min.append(min_left_dist)
+            meas_temp.transects[trans_id].edges.left.distance_m = min_left_dist
+            meas_temp.transects[trans_id].edges.right.distance_m = min_right_dist
+            meas_temp.transects[trans_id].edges.left.type = 'Triangular'
+            meas_temp.transects[trans_id].edges.right.type = 'Triangular'
+            meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id],
+                                                        moving_bed_data=meas_temp.mb_tests)
+            self.sim_edge_min.loc[len(self.sim_edge_min)] = [meas_temp.discharge[trans_id].total,
+                                                             meas_temp.discharge[trans_id].left,
+                                                             meas_temp.discharge[trans_id].right]
+
+            # Compute edge maximum
+            self.d_right_error_max.append(max_right_dist)
+            self.d_left_error_max.append(max_left_dist)
+            meas_temp.transects[trans_id].edges.left.distance_m = max_left_dist
+            meas_temp.transects[trans_id].edges.right.distance_m = max_right_dist
+            meas_temp.transects[trans_id].edges.left.type = 'Rectangular'
+            meas_temp.transects[trans_id].edges.right.type = 'Rectangular'
+            meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id],
+                                                        moving_bed_data=meas_temp.mb_tests)
+            self.sim_edge_max.loc[len(self.sim_edge_max)] = [meas_temp.discharge[trans_id].total,
+                                                             meas_temp.discharge[trans_id].left,
+                                                             meas_temp.discharge[trans_id].right]
+
+    def sim_draft_max_min(self, meas):
+        """Compute the simulations for the max and min draft errror.
+
+        Parameters
+        ----------
+        meas: MeasurementData
+            Object of MeasurementData
+        """
+
+        # Reset variables
+        self.draft_error_list = []
+        self.sim_draft_min = pd.DataFrame(columns=self.sim_draft_min.columns)
+        self.sim_draft_max = pd.DataFrame(columns=self.sim_draft_max.columns)
+
+        # Create copy of meas to avoid changing original
+        meas_temp = copy.deepcopy(meas)
+
+        for trans_id in self.checked_idx:
+            # Compute max and min draft
+            draft_max, draft_min, draft_error = \
+                self.compute_draft_max_min(transect=meas.transects[trans_id],
+                                           draft_error_m_user=self.user_advanced_settings['draft_error_m_user'])
+            self.draft_error_list.append(draft_error)
+
+            # Compute discharge for draft min
+            meas_temp.transects[trans_id].change_draft(draft_min)
+            meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id],
+                                                        moving_bed_data=meas_temp.mb_tests)
+            self.sim_draft_min.loc[len(self.sim_draft_min)] = [meas_temp.discharge[trans_id].total,
+                                                               meas_temp.discharge[trans_id].top,
+                                                               meas_temp.discharge[trans_id].left,
+                                                               meas_temp.discharge[trans_id].right]
+            # Compute discharge for draft max
+            meas_temp.transects[trans_id].change_draft(draft_max)
+            meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id],
+                                                        moving_bed_data=meas_temp.mb_tests)
+            self.sim_draft_max.loc[len(self.sim_draft_max)] = [meas_temp.discharge[trans_id].total,
+                                                               meas_temp.discharge[trans_id].top,
+                                                               meas_temp.discharge[trans_id].left,
+                                                               meas_temp.discharge[trans_id].right]
+
+    def sim_invalid_cells(self, meas):
+        """Computes simulations using different methods to interpolate for invalid cells and ensembles.
+
+        Parameters
+        ----------
+        meas: MeasurementData
+            Object of MeasurementData
+        """
+
+        # Reset data frames
+        self.sim_cells_trdi = pd.DataFrame(columns=self.sim_cells_trdi.columns)
+        self.sim_cells_above = pd.DataFrame(columns=self.sim_cells_above.columns)
+        self.sim_cells_below = pd.DataFrame(columns=self.sim_cells_below.columns)
+        self.sim_cells_before = pd.DataFrame(columns=self.sim_cells_before.columns)
+        self.sim_cells_after = pd.DataFrame(columns=self.sim_cells_after.columns)
+
+        # Simulations for invalid cells and ensembles
+        meas_temp = copy.deepcopy(meas)
+        for trans_id in self.checked_idx:
+            # TRDI method
+            meas_temp.transects[trans_id].w_vel.interpolate_cells_trdi(meas_temp.transects[trans_id])
+            meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id],
+                                                        moving_bed_data=meas_temp.mb_tests)
+            self.sim_cells_trdi.loc[len(self.sim_cells_trdi)] = [meas_temp.discharge[trans_id].total,
+                                                                 meas_temp.discharge[trans_id].middle]
+
+            # Above only
+            meas_temp.transects[trans_id].w_vel.interpolate_abba(meas_temp.transects[trans_id], search_loc=['above'])
+            meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id],
+                                                        moving_bed_data=meas_temp.mb_tests)
+            self.sim_cells_above.loc[len(self.sim_cells_above)] = [meas_temp.discharge[trans_id].total,
+                                                                   meas_temp.discharge[trans_id].middle]
+            # Below only
+            meas_temp.transects[trans_id].w_vel.interpolate_abba(meas_temp.transects[trans_id], search_loc=['below'])
+            meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id],
+                                                        moving_bed_data=meas_temp.mb_tests)
+            self.sim_cells_below.loc[len(self.sim_cells_below)] = [meas_temp.discharge[trans_id].total,
+                                                                   meas_temp.discharge[trans_id].middle]
+            # Before only
+            meas_temp.transects[trans_id].w_vel.interpolate_abba(meas_temp.transects[trans_id], search_loc=['before'])
+            meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id],
+                                                        moving_bed_data=meas_temp.mb_tests)
+            self.sim_cells_before.loc[len(self.sim_cells_before)] = [meas_temp.discharge[trans_id].total,
+                                                                     meas_temp.discharge[trans_id].middle]
+            # After only
+            meas_temp.transects[trans_id].w_vel.interpolate_abba(meas_temp.transects[trans_id], search_loc=['after'])
+            meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id],
+                                                        moving_bed_data=meas_temp.mb_tests)
+            self.sim_cells_after.loc[len(self.sim_cells_after)] = [meas_temp.discharge[trans_id].total,
+                                                                   meas_temp.discharge[trans_id].middle]
+
+    def sim_shallow_ens(self, meas):
+        """Computes simulations assuming no interpolation of discharge for ensembles where depths are too shallow
+        for any valid cells.
+
+        Parameters
+        ----------
+        meas: MeasurementData
+            Object of MeasurementData
+        """
+
+        # Reset data frame
+        self.sim_shallow = pd.DataFrame(columns=self.sim_shallow.columns)
+
+        for trans_id in self.checked_idx:
+            shallow_estimate = np.nansum(meas.discharge[trans_id].middle_ens) \
+                               - np.nansum(np.nansum(meas.discharge[trans_id].middle_cells))
+            if np.abs(shallow_estimate) > 0:
+                self.sim_shallow.loc[len(self.sim_shallow)] = [meas.discharge[trans_id].total - shallow_estimate,
+                                                               meas.discharge[trans_id].middle - shallow_estimate]
+            else:
+                self.sim_shallow.loc[len(self.sim_shallow)] = [meas.discharge[trans_id].total,
+                                                               meas.discharge[trans_id].middle]
+
+    def sim_invalid_depth(self, meas):
+        """Computes simulations using different methods to interpolate for invalid depths.
+
+        Parameters
+        ----------
+        meas: MeasurementData
+           Object of MeasurementData
+        """
+
+        # Reset dataframes
+        self.sim_depth_hold = pd.DataFrame(columns=self.sim_depth_hold.columns)
+        self.sim_depth_next = pd.DataFrame(columns=self.sim_depth_next.columns)
+
+        # Simulations for invalid depths
+        meas_temp = copy.deepcopy(meas)
+        for trans_id in self.checked_idx:
+            depths = getattr(meas_temp.transects[trans_id].depths, meas_temp.transects[trans_id].depths.selected)
+            # Hold last
+            depths.interpolate_hold_last()
+            meas_temp.transects[trans_id].w_vel.adjust_side_lobe(meas_temp.transects[trans_id])
+            meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id],
+                                                        moving_bed_data=meas_temp.mb_tests)
+            self.sim_depth_hold.loc[len(self.sim_depth_hold)] = [meas_temp.discharge[trans_id].total,
+                                                                 meas_temp.discharge[trans_id].middle]
+            # Fill with next
+            depths.interpolate_next()
+            meas_temp.transects[trans_id].w_vel.adjust_side_lobe(meas_temp.transects[trans_id])
+            meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id],
+                                                        moving_bed_data=meas_temp.mb_tests)
+            self.sim_depth_next.loc[len(self.sim_depth_next)] = [meas_temp.discharge[trans_id].total,
+                                                                 meas_temp.discharge[trans_id].middle]
+
+    def sim_invalid_boat_velocity(self, meas):
+        """Computes simulations using different methods to interpolate for invalid boat velocity.
+
+        Parameters
+        ----------
+        meas: MeasurementData
+           Object of MeasurementData
+        """
+
+        # Reset dataframes
+        self.sim_boat_hold = pd.DataFrame(columns=self.sim_boat_hold.columns)
+        self.sim_boat_next = pd.DataFrame(columns=self.sim_boat_next.columns)
+
+        # Simulations for invalid boat velocity
+        meas_temp = copy.deepcopy(meas)
+        for trans_id in self.checked_idx:
+            # Hold last
+            boat_data = getattr(meas_temp.transects[trans_id].boat_vel, meas_temp.transects[trans_id].boat_vel.selected)
+            if boat_data is not None:
+                boat_data.interpolate_hold_last()
+                meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id],
+                                                            moving_bed_data=meas_temp.mb_tests)
+                self.sim_boat_hold.loc[len(self.sim_boat_hold)] = [meas_temp.discharge[trans_id].total,
+                                                                   meas_temp.discharge[trans_id].middle]
+                # Fill with next
+                boat_data.interpolate_next()
+                meas_temp.discharge[trans_id].populate_data(data_in=meas_temp.transects[trans_id],
+                                                            moving_bed_data=meas_temp.mb_tests)
+                self.sim_boat_next.loc[len(self.sim_boat_next)] = [meas_temp.discharge[trans_id].total,
+                                                                   meas_temp.discharge[trans_id].middle]
+            else:
+                self.sim_boat_next.loc[len(self.sim_boat_next)] = [meas.discharge[trans_id].total,
+                                                                   meas_temp.discharge[trans_id].middle]
+                self.sim_boat_hold.loc[len(self.sim_boat_hold)] = [meas.discharge[trans_id].total,
+                                                                   meas_temp.discharge[trans_id].middle]
+
+    @staticmethod
+    def compute_draft_max_min(transect, draft_error_m_user=np.nan):
+        """Determine the max and min values of the ADCP draft.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of transect data
+        draft_error_m_user: float
+            User specified draft error in m
+
+        Returns
+        -------
+        draft_max: float
+            Maximum draft in m for simulations
+        draft_min: float
+            Minimum draft in m for simulations
+        draft_error: float
+            Draft error in m
+        """
+        depths = transect.depths.bt_depths.depth_processed_m  # depth by ens
+        depth_90 = np.quantile(depths, q=0.9)  # quantile 90% to avoid spikes
+
+        # Determine draft error value
+        if np.isnan(draft_error_m_user):
+            if depth_90 < 2.50:
+                draft_error = 0.02
+            else:
+                draft_error = 0.05
+        else:
+            draft_error = draft_error_m_user
+
+        # Compute draft max and min
+        draft_min = transect.depths.bt_depths.draft_orig_m - draft_error
+        draft_max = transect.depths.bt_depths.draft_orig_m + draft_error
+
+        if draft_min <= 0:
+            draft_min = 0.01
+
+        return draft_max, draft_min, draft_error
+
+    @staticmethod
+    def compute_edge_dist_max_min(transect, user_settings, default_settings):
+        """Compute the max and min edge distances.
+        """
+
+        init_dist_right = transect.edges.right.distance_m
+        init_dist_left = transect.edges.left.distance_m
+
+        # Select user percentage or default
+        if np.isnan(user_settings['right_edge_dist_prct_user']):
+            d_right_error_prct = default_settings['right_edge_dist_prct']
+        else:
+            d_right_error_prct = user_settings['right_edge_dist_prct_user']
+
+        if np.isnan(user_settings['left_edge_dist_prct_user']):
+            d_left_error_prct = default_settings['left_edge_dist_prct']
+        else:
+            d_left_error_prct = user_settings['left_edge_dist_prct_user']
+
+        # Compute min distance for both edges
+        min_left_dist = (1 - d_left_error_prct * 0.01) * init_dist_left
+        min_right_dist = (1 - d_right_error_prct * 0.01) * init_dist_right
+
+        if min_left_dist <= 0:
+            min_left_dist = 0.10
+        if min_right_dist <= 0:
+            min_right_dist = 0.10
+
+        # Compute max distance for both edges
+        max_left_dist = (1 + d_left_error_prct * 0.01) * init_dist_left
+        max_right_dist = (1 + d_right_error_prct * 0.01) * init_dist_right
+
+        return max_left_dist, max_right_dist, min_left_dist, min_right_dist
+
+    @staticmethod
+    def compute_pp_max_min(meas, exp_95ic_min, exp_95ic_max, pp_exp, exp_pp_min_user, exp_pp_max_user):
+        """Determine the max and min exponents for power fit.
+
+        Parameters
+        ----------
+        meas: MeasurementData
+            Object of MeasurementData
+        exp_95ic_min: list
+            Minimum power fit exponent from the 95% confidence interval for each transect
+        exp_95ic_max: list
+            Maximum power fit exponent from the 95% confidence interval for each transect
+        pp_exp: list
+            Optimized power fit exponent for each transect
+        exp_pp_min_user: float
+            User supplied minimum power fit exponent
+        exp_pp_max_user: float
+            User supplied maximum power fit exponent
+
+        Returns
+        -------
+        skip_pp_min_max: bool
+            Boolean to identify if power fit simulations should be skipped
+        exp_pp_max: float
+            Maximum power fit exponent to be used in simulations
+        exp_pp_min: float
+            Minimum power fit exponent to be used in simulations
+        """
+        skip_pp_min_max = False
+        if len(pp_exp) == 0:
+            skip_pp_min_max = True
+            min_pp = meas.extrap_fit.q_sensitivity.pp_exp
+            max_pp = meas.extrap_fit.q_sensitivity.pp_exp
+        else:
+            if np.isnan(pp_exp).any():
+                mean_pp = 0.16
+            else:
+                mean_pp = np.nanmean(pp_exp)
+
+            # If all transects have confidence intervals, use the mean of the confidence interval min/max
+            # Otherwise adjust average +/- 0.2
+            if np.isnan(exp_95ic_min).any():
+                min_pp = mean_pp - 0.2
+            else:
+                min_pp = np.nanmean(exp_95ic_min)
+
+            if np.isnan(exp_95ic_max).any():
+                max_pp = mean_pp + 0.2
+            else:
+                max_pp = np.nanmean(exp_95ic_max)
+
+            # Diff between mean PP exponent and min/max
+            if mean_pp - min_pp > 0.2:
+                min_pp = mean_pp - 0.2
+            if max_pp - mean_pp > 0.2:
+                max_pp = mean_pp + 0.2
+
+            # Check that 0 < exponent < 1
+            if min_pp <= 0:
+                min_pp = 0.01
+            if max_pp >= 1:
+                max_pp = 0.99
+
+        # Set min-max exponents of user override
+        if np.isnan(exp_pp_min_user):
+            exp_pp_min = min_pp
+        else:
+            exp_pp_min = exp_pp_min_user
+
+        if np.isnan(exp_pp_max_user):
+            exp_pp_max = max_pp
+        else:
+            exp_pp_max = exp_pp_max_user
+
+        return skip_pp_min_max, exp_pp_max, exp_pp_min
+
+    @staticmethod
+    def compute_ns_max_min(meas, ns_exp, exp_ns_min_user=np.nan, exp_ns_max_user=np.nan):
+        """Determine the max and min no slip exponents.
+
+        Parameters
+        ----------
+        meas: MeasurementData
+            Object of MeasurementData
+        ns_exp: list
+            List of maximum and minimum no slip exponents.
+        exp_ns_min_user: float
+            User supplied minimum no slip exponent
+        exp_ns_max_user: float
+            User supplied maximum no slip exponent
+
+        Returns
+        -------
+        skip_ns_min_max: bool
+            Boolean to identify if no slip simulations should be skipped
+        exp_ns_max: float
+            Maximum no slip exponent to be used in simulations
+        exp_ns_min: float
+            Minimum no slip exponent to be used in simulations
+        """
+        skip_ns_min_max = False
+        if len(ns_exp) == 0:
+            skip_ns_min_max = True
+            min_ns = meas.extrap_fit.q_sensitivity.ns_exp
+            max_ns = meas.extrap_fit.q_sensitivity.ns_exp
+        else:
+            mean_ns = np.nanmean(ns_exp)
+            if len(ns_exp) == 1:
+                min_ns = ns_exp[0]-0.05
+                max_ns = ns_exp[0]+0.05
+            else:
+                min_ns = np.nanmin(ns_exp)
+                max_ns = np.nanmax(ns_exp)
+
+            # Diff between mean NS exponent and min/max shouldn't be > 0.2
+            if mean_ns - min_ns > 0.2:
+                min_ns = mean_ns - 0.2
+            if max_ns - mean_ns > 0.2:
+                max_ns = mean_ns + 0.2
+
+            # Check that 0 < exponent < 1
+            if min_ns <= 0:
+                min_ns = 0.01
+            if max_ns >= 1:
+                max_ns = 0.99
+
+        # Apply user overides
+        if np.isnan(exp_ns_min_user):
+            exp_ns_min = min_ns
+        else:
+            exp_ns_min = exp_ns_min_user
+
+        if np.isnan(exp_ns_max_user):
+            exp_ns_max = max_ns
+        else:
+            exp_ns_max = exp_ns_max_user
+
+        return skip_ns_min_max, exp_ns_max, exp_ns_min
+
+    @staticmethod
+    def depth_error_boat_motion(transect):
+        """Relative depth error due to vertical velocity of boat
+           the height [m] is vertical velocity times ensemble duration
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+
+        Returns
+        -------
+        relative_error_depth: float
+            Random depth error by ensemble
+       """
+
+        d_ens = transect.depths.bt_depths.depth_processed_m
+        depth_vv = transect.boat_vel.bt_vel.w_mps * transect.date_time.ens_duration_sec
+        relative_error_depth = np.abs(depth_vv) / d_ens
+        relative_error_depth[np.isnan(relative_error_depth)] = 0.00
+        return relative_error_depth
+
+    @staticmethod
+    def water_std_by_error_velocity(transect):
+        """Compute the relative standard deviation of the water velocity using the fact that the error velocity is
+        scaled so that the standard deviation of the error velocity is the same as the standard deviation
+        of the horizontal water velocity.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+
+        Returns
+        -------
+        std_ev_wt_ens: float
+            Standard deviation of water track error velocity for each ensemble
+        """
+
+        # Computer water speed
+        u_water = transect.w_vel.u_processed_mps
+        v_water = transect.w_vel.v_processed_mps
+        v_wa_cell_abs = np.sqrt(u_water ** 2 + v_water ** 2)
+
+        # Use only valid error velocity data
+        d_vel_filtered = np.tile([np.nan], transect.w_vel.d_mps.shape)
+        d_vel_filtered[transect.w_vel.valid_data[0]] = transect.w_vel.d_mps[transect.w_vel.valid_data[0]]
+
+        # Compute relative standard deviation of error velocity
+        std_ev_wt = np.nanstd(d_vel_filtered) / np.abs(v_wa_cell_abs)
+        std_ev_wt_ens = np.nanmedian(std_ev_wt, axis=0)
+        # TODO consider substituting the overall std for nan rather than 0
+        # all_std_ev_WT = np.nanstd(d_vel_filtered[:])
+        # std_ev_wt_ens[np.isnan(std_ev_wt_ens)] = all_std_ev_WT
+        std_ev_wt_ens[np.isnan(std_ev_wt_ens)] = 0.00
+        return std_ev_wt_ens
+
+    @staticmethod
+    def boat_std_by_error_velocity(transect):
+        """Compute the relative standard deviation of the boat velocity using the fact that the error velocity is
+        scaled so that the standard deviation of the error velocity is the same as the standard deviation
+        of the horizontal boat velocity.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+
+        Returns
+        -------
+        std_ev_bt: float
+            Standard deviation of bottom track error velocity
+        """
+
+        # Compute boat speed
+        u_boat = transect.boat_vel.bt_vel.u_processed_mps
+        v_boat = transect.boat_vel.bt_vel.v_processed_mps
+        speed = np.sqrt(u_boat ** 2 + v_boat ** 2)
+
+        # Use only valid error velocity data
+        d_vel_filtered = np.tile([np.nan], transect.boat_vel.bt_vel.d_mps.shape)
+        d_vel_filtered[transect.boat_vel.bt_vel.valid_data[0]] = \
+            transect.boat_vel.bt_vel.d_mps[transect.boat_vel.bt_vel.valid_data[0]]
+
+        # Compute relative standard deviation of error velocity
+        all_std_ev_bt = np.nanstd(d_vel_filtered)
+        std_ev_bt = np.abs(all_std_ev_bt) / speed
+        # TODO Consider substituting the overall std for nan rather than 0
+        # std_ev_bt[np.isnan(std_ev_bt)] = all_std_ev_bt
+        std_ev_bt[np.isnan(std_ev_bt)] = 0.00
+
+        return std_ev_bt
+
+    @staticmethod
+    def apply_u_rect(list_sims, col_name):
+        """Compute the uncertainty using list of simulated discharges following a ranctangular law
+
+        Parameters
+        ----------
+        list_sims: list
+            List of simulation data frames to be used in the computation
+        col_name: str
+            Name of column in the data frames to be used in the computation
+
+        Returns
+        -------
+        u_rect: float
+            Result of rectangular law
+        """
+
+        # Combine data frames
+        vertical_stack = pd.concat(list_sims, axis=0, sort=True)
+
+        # Apply rectangular law
+        u_rect = (vertical_stack.groupby(vertical_stack.index)[col_name].max()
+                  - vertical_stack.groupby(vertical_stack.index)[col_name].min()) / (2 * (3 ** 0.5))
+
+        return u_rect
+
+    # Bayesian COV
+    # ============
+    @staticmethod
+    def bayes_cov(transects_total_q, cov_prior=0.03, cov_prior_u=0.2, nsim=20000):
+        """Computes the coefficient of variation using a Bayesian approach and an assumed posterior
+        log-normal distribution.
+
+        Parameters
+        ----------
+        transects_total_q: list
+            List of total discharge for each transect
+        cov_prior: float
+            Expected COV (68%) based on prior knowledge. Assumed to be 3% by default.
+        cov_prior_u: float
+            Uncertainty (68%) of cov_prior. Assumed to be 20%.
+        nsim: int
+            Number of simulations. 20000 was found to produce stable results.
+
+        Returns
+        -------
+        cov: float
+            Coefficient of variation
+        """
+
+        sav = Oursin.metropolis(theta0=[np.mean(transects_total_q), cov_prior],
+                                obs_data=transects_total_q,
+                                cov_prior=cov_prior,
+                                cov_prior_u=cov_prior_u,
+                                nsim=nsim,
+                                theta_std=np.abs(np.array([np.mean(transects_total_q), cov_prior]))* cov_prior_u / np.sqrt(len(transects_total_q)))
+
+
+        n_burn = int(nsim / 2)
+
+        cov = np.mean(sav['sam'][n_burn:nsim, 1])
+
+        return cov
+
+    @staticmethod
+    def metropolis(theta0, obs_data, cov_prior, cov_prior_u, nsim=1000, theta_std=np.nan):
+        """Implements the Metropolis_Hastings Markov chain Monte Carlo (MCMC) algorithm for sampling the
+        posterior distribution, assuming a log-normal posterior distribution.
+
+        Parameters
+        ----------
+        theta0: list
+            Starting value of parameters (mean and cov_prior)
+        obs_data: list
+            List of total discharge for each transect
+        cov_prior: float
+            Expected COV (68%) based on prior knowledge.
+        cov_prior_u: float
+            Uncertainty (68%) of cov_prior.
+        nsim: int
+            Number of simulations.
+        theta_std: float
+            Standard deviation for the gaussian Jump distribution. If blank a default value is computed.
+
+        Returns
+        -------
+        w: dict
+            Dictionary containing
+            sam: np.array(float)
+                Matrix containing the MCMC samples
+            obj_funk: np.array(float)
+                Vector containing the corresponding values of the objective function 
+                (i.e. of the unnormalized log-posterior)
+        """
+       
+        # Initialize
+        npar = len(theta0)
+        sam = np.zeros((nsim + 1, npar))  
+        obj_funk = np.zeros((nsim + 1, 1))  
+
+        # Parameters - used for automatic computation of starting stds of the Gaussian Jump distribution
+        if np.any(np.isnan(theta_std)):
+            std_factor = 0.1
+            theta_std = std_factor * np.abs(theta0)
+
+        # Check if starting point is feasible - abandon otherwise
+        f_current = Oursin.log_post(param=theta0, measures=obs_data, cov_prior=cov_prior, cov_prior_u=cov_prior_u)
+
+        if not Oursin.is_feasible(f_current):
+            print('Metropolis:FATAL:unfeasible starting point')
+            w = {'sam': sam, 'obj_funk': obj_funk}
+            return w
+        else:
+            sam[0, :] = list(theta0)
+            obj_funk[0] = f_current
+
+            # MCMC loop
+            # candid = np.array([np.nan, np.nan])
+            np.random.seed(0)
+            for i in range(nsim):
+                current = sam[i, :]
+                f_current = obj_funk[i]
+                # Propose a new candidate
+                candid = np.random.normal(loc=current, scale=theta_std)
+                # Change for use in Numba
+                # candid[0] = np.random.normal(loc=current[0], scale=theta_std[0])
+                # candid[1] = np.random.normal(loc=current[1], scale=theta_std[1])
+                # Evaluate objective function at candidate
+                f_candid = Oursin.log_post(param=candid,
+                                           measures=obs_data,
+                                           cov_prior=cov_prior,
+                                           cov_prior_u=cov_prior_u)
+
+                if not Oursin.is_feasible(f_candid):
+                    sam[i + 1, :] = current
+                    obj_funk[i + 1] = f_current
+                else:
+                    # Generate deviate ~U[0,1]
+                    u = np.random.uniform(0, 1)
+
+                    # Compute Metropolis acceptance ratio
+                    # Changed for use in Numba
+                    ratio = math.exp(min(max(-100, f_candid - f_current), 0))
+                    # ratio = np.exp(min(((np.max(np.hstack((float(-100), f_candid - f_current))), float(0)))))
+
+                    # Apply acceptance rule
+                    if u <= ratio:
+                        sam[i + 1, :] = candid
+                        obj_funk[i + 1] = f_candid
+                    else:
+                        sam[i + 1, :] = current
+                        obj_funk[i + 1] = f_current
+
+            w = {'sam': sam, 'obj_funk': obj_funk}
+            return w
+
+    @staticmethod
+    def log_post(param, measures, cov_prior, cov_prior_u):
+        """Define function returning the posterior log-pdf using the model measures ~ N(true_value,cov*true_value),
+        with a flat prior on true_value and a log-normal prior for cov (= coefficient of variation)
+        
+        Parameters
+        ----------
+        param: np.array(float)
+            Array containing the true value and COV
+        
+        measures: np.array(float)
+            Array of observations
+        cov_prior: float
+            Expected COV (68%) based on prior knowledge.
+        cov_prior_u: float
+            Uncertainty (68%) of cov_prior.
+            
+        Returns
+        -------
+        logp: 
+        """
+        # Check if any parameter is <=0
+        # since  both true_value and cov have to be positive - otherwise sigma = true_value*cov does not make sense
+        if any(item <= 0 for item in param):
+            return -math.inf
+
+        true_value = param[0]
+        cov = param[1]
+        sigma = cov * true_value  # standard deviation
+
+        # Compute log-likelihood under the model: measures ~ N(true_value,sigma)
+        # You can easily change this model (e.g. lognormal for a positive measurand?)
+        # OPTION 1 : the model follows a Normal distribution
+        log_likelihood = np.sum(scipy.stats.norm.logpdf(measures, loc=true_value, scale=sigma))
+        # Change for Numba
+        # log_likelihood = np.sum(np.log(np.exp(-(((measures - true_value) / sigma) ** 2) / 2)
+        #                                / (np.sqrt(2 * np.pi) * sigma)))
+
+        # Prior on true_value - flat prior used here but you may change this if you have prior knowledge
+        log_prior_1 = 0
+
+        # Lognormal prior
+        x = cov
+        mu = np.log(cov_prior)
+        scale = cov_prior_u
+        pdf = np.exp(-(np.log(x) - mu) ** 2 / (2 * scale ** 2)) / (x * scale * np.sqrt(2 * np.pi))
+        log_prior_2 = np.log(pdf)
+
+        # Joint prior (prior independence)
+        log_prior = log_prior_1 + log_prior_2
+
+        # Return (unnormalized) log-posterior
+        logp = log_likelihood + log_prior
+        if np.isnan(logp):
+            logp = -math.inf  # returns -Inf rather than NaN's (required by the MCMC sampler used subsequently)
+        return logp
+
+    @staticmethod
+    def is_feasible(value):
+        """Checks that a value is a real value (not infinity or nan)
+        
+        Parameters
+        ----------
+        value: float or int
+        
+        Returns
+        -------
+        bool
+        """
+        if np.isinf(value) or np.isnan(value):
+            return False
+        else:
+            return True
+
+    # Hening Huang proposed method for random uncertainty (not used)
+    # ---------------------------------------------------
+    @staticmethod
+    def hh_random_meas(meas):
+        """Implements the semi-empirical method for computing the random uncertainty of an ADCP discharge transect,
+        as presented in Hening Huang (2018) Estimating uncertainty of streamflow measurements with
+        moving-boat acoustic Doppler current profilers, Hydrological Sciences Journal, 63:3, 353-368,
+        DOI:10.1080/02626667.2018.1433833
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+
+        Returns
+        -------
+            random_u: list
+                List of random uncertainty for each checked transect.
+        """
+        random_u = []
+        for idx in meas.checked_transect_idx:
+            # Get or compute base variables
+            q_m = meas.discharge[idx].middle
+            q_i = meas.discharge[idx].middle_ens
+            q_bar = np.nanmean(q_i[:])
+
+            # Compute r1
+            r1_numerator = []
+            r1_denominator = []
+            for n in range(len(q_i) - 1):
+                r1_numerator.append((q_i[n] - q_bar) * (q_i[n+1] - q_bar))
+                r1_denominator.append((q_i[n] - q_bar)**2)
+            r1_denominator.append((q_i[-1] - q_bar)**2)
+            r1 = np.nansum(r1_numerator) / np.nansum(r1_denominator)
+
+            # Compute g(r1)
+            g_r1 = 0.6 + (0.1 * np.exp(r1)) + (0.01 * (1 - np.exp((r1**0.6)-1)**-0.08))
+            if g_r1 < 1:
+                g_r1 = 1.0
+
+            # Compute (delta qi)**2
+            alpha = 1. / 3.
+            c1 = 0.5 * (1 - alpha)
+            delta_list = []
+            for n in range(1, len(q_i) - 1):
+                q_i_hat = c1 * q_i[n - 1] + alpha * q_i[n] + c1 * q_i[n + 1]
+                delta_list.append((q_i[n] - q_i_hat)**2)
+
+            # Compute unbiased residual sum of squares
+            urss = ((2. / 3.) * (1 / (1 - alpha))**2) * np.nansum(delta_list)
+
+            # Compute random uncertainty
+            random_u.append(g_r1 * np.sqrt(urss) / q_m)
+
+        return random_u
diff --git a/Classes/Pd0TRDI.py b/Classes/Pd0TRDI.py
new file mode 100644
index 0000000..05bb1b9
--- /dev/null
+++ b/Classes/Pd0TRDI.py
@@ -0,0 +1,2463 @@
+import os
+import re
+import numpy as np
+import struct
+from MiscLibs.common_functions import pol2cart, valid_number, nans
+
+
+class Pd0TRDI(object):
+    """Class to read data from PD0 files
+
+    Attributes
+    ----------
+    file_name: str
+        Full name including path of pd0 file to be read
+    Hdr: Hdr
+        Object of Hdr for heading information
+    Inst: Inst
+        Object of Inst to hold instrument information
+    Cfg: Cfg
+        Object of Cfg to hold configuration information
+    Sensor: Sensor
+        Object of Sensor to hold sensor data
+    Wt: Wt
+        Object of Wt to hold water track data
+    Bt: Bt
+        Object of Bt to hold bottom track data
+    Gps: Gps
+        Object of Gps to hold GPS data from previous versions of WR
+    Gps2: Gps2
+        Object of Gps2 to hold GPS data from WR2
+    Surface: Surface
+        Object of Surface to hold surface cell data
+    AutoMode: AutoMode
+        Object of AutoMode to hold auto configuration settings
+    Nmea: Nmea
+        Object of Nmea to hold Nmea data
+    """
+    
+    def __init__(self, file_name):
+        """Constructor initializing instance variables.
+
+        Parameters
+        ----------
+        file_name: str
+            Full name including path of pd0 file to be read
+        """
+        
+        self.file_name = file_name
+        self.Hdr = None
+        self.Inst = None
+        self.Cfg = None
+        self.Sensor = None
+        self.Wt = None
+        self.Bt = None
+        self.Gps = None
+        self.Gps2 = None
+        self.Surface = None
+        self.AutoMode = None
+        self.Nmea = None
+        
+        self.pd0_read(file_name)
+        
+    def create_objects(self, n_ensembles, n_types, n_bins, max_surface_bins, n_velocities, wr2=False):
+        """Create objects for instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        n_types: int
+            Number of data types
+        n_bins: int
+            Number of bins or depth cells
+        max_surface_bins: int
+            Maximum number of surface cells
+        n_velocities: int
+            Number of velocities
+        wr2: bool
+            Whether WR2 processing of GPS data should be applied
+        """
+
+        self.Hdr = Hdr(n_ensembles, n_types)
+        self.Inst = Inst(n_ensembles)
+        self.Cfg = Cfg(n_ensembles)
+        self.Sensor = Sensor(n_ensembles)
+        self.Wt = Wt(n_bins, n_ensembles, n_velocities)
+        self.Bt = Bt(n_ensembles, n_velocities)
+        self.Gps = Gps(n_ensembles)
+        self.Gps2 = Gps2(n_ensembles, wr2)
+        self.Surface = Surface(n_ensembles, n_velocities, max_surface_bins)
+        self.AutoMode = AutoMode(n_ensembles)
+        self.Nmea = Nmea(n_ensembles)
+
+    def pd0_read(self, fullname, wr2=False):
+        """Reads the binary pd0 file and assigns values to object instance variables.
+
+        Parameters
+        ----------
+        fullname: str
+            Full file name including path
+        wr2: bool
+            Determines if WR2 processing should be applied to GPS data
+        """
+
+        # Assign default values
+        n_velocities = 4
+        max_surface_bins = 5
+
+        # Check to ensure file exists
+        if os.path.exists(fullname):
+            file_info = os.path.getsize(fullname)
+
+            if file_info > 0:
+                # Open file for processing
+                with open(fullname, 'rb') as f:
+
+                    # Read leader ID
+                    leader_id = hex(np.fromfile(f, np.uint16, count=1)[0])
+                    # Leader ID 7f7f marks beginning of ensemble
+                    if leader_id != '0x7f7f':
+                        while leader_id != '0x7f7f':
+                            f.seek(-1, 1)
+                            leader_id = hex(np.fromfile(f, np.uint16, count=1)[0])
+
+                    # Read header information
+                    initial_pos = f.tell()-2
+                    bytes_per_ens = np.fromfile(f, dtype=np.uint16, count=1)[0]
+                    f.seek(1, 1)
+                    n_types = np.fromfile(f, np.uint8, count=1)[0]
+                    offset = np.fromfile(f, np.uint16, count=1)[0]
+                    f.seek(initial_pos+offset+8, 0)
+                    n_beams = np.fromfile(f, np.uint8, count=1)[0]
+                    n_bins = np.fromfile(f, np.uint8, count=1)[0]
+
+                    # Determine number of ensembles in the file to allow pre-allocation of arrays
+                    n_ensembles = Pd0TRDI.number_of_ensembles(f, file_info)
+
+                    # Create objects and pre-allocate arrays
+                    self.create_objects(n_ensembles=n_ensembles,
+                                        n_types=n_types,
+                                        n_bins=n_bins,
+                                        max_surface_bins=max_surface_bins,
+                                        n_velocities=n_velocities)
+
+                    # Initialize counters and variables
+                    i_ens = -1
+                    end_file_check = 0
+                    end_file = file_info
+                    i_data_types = 0
+                    n_data_types = 1
+                    file_loc = 0
+                    i2022 = 0
+                    j100, j101, j102, j103 = -1, -1, -1, -1
+                    rr_bt_depth_correction = np.tile(np.nan, (n_beams, n_ensembles))
+
+                    # Reset position in file
+                    f.seek(initial_pos, 0)
+
+                    # Begin reading file
+                    while end_file_check < end_file:
+
+                        # Read leader ID
+                        leader_id = hex(np.fromfile(f, np.uint16, count=1)[0])
+                        if i_data_types >= n_data_types and leader_id != '0x7f7f':
+                            leader_id = '0x9999'
+
+                        # 7f7f marks the beginning of an ensemble
+                        if leader_id == '0x7f7f':
+                            i2022 = 0
+                            file_loc = f.tell() - 2
+
+                            # Check for last ensemble in file
+                            if file_loc+bytes_per_ens > end_file and i_ens >= n_ensembles:
+                                end_file_check = end_file+1
+
+                            else:
+                                # Process ensemble
+                                i_data_types = 0
+                                store_file_loc = f.tell()
+                                bytes_per_ens = np.fromfile(f, np.uint16, count=1)[0]
+
+                                # Check check_sum
+                                if self.check_sum(f, file_loc, bytes_per_ens):
+                                    f.seek(file_loc+5, 0)
+                                    n_data_types = np.fromfile(f, np.uint8, count=1)[0]
+                                    data_offsets = np.fromfile(f, np.uint16, count=n_data_types)
+
+                                    # Find variable leader ID
+                                    while i_data_types+1 <= n_data_types and leader_id != '0x80':
+                                        f.seek(data_offsets[i_data_types]+file_loc, 0)
+                                        leader_id = hex(np.fromfile(f, np.uint16, count=1)[0])
+                                        i_data_types += 1
+
+                                    # Check for consecutive ensemble numbers
+                                    if i_ens > -1 and leader_id == '0x80':
+                                        ens_num = np.fromfile(f, np.uint16, count=1)[0]
+                                        ens_num_diff = ens_num - self.Sensor.num[i_ens]
+                                        if ens_num_diff > 1:
+                                            for nn in range(0, int(ens_num_diff-1)):
+                                                if i_ens < n_ensembles:
+                                                    self.Sensor.num[i_ens] = self.Sensor.num[i_ens-1]+1
+                                                    i_ens += 1
+                                        elif ens_num_diff < 1:
+                                            i_ens -= 1
+                                else:
+                                    self.bad_check_sum(f, file_loc)
+
+                                # Initialize variables
+                                f.seek(store_file_loc, 0)
+                                i_data_types = 0
+                                j100, j101, j102, j103 = -1, -1, -1, -1
+                                i_ens += 1
+
+                                # Read bytes in this ensemble
+                                self.Hdr.bytes_per_ens[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+
+                                # If checksum is valid read header data
+                                if self.check_sum(f, file_loc, int(self.Hdr.bytes_per_ens[i_ens])):
+
+                                    # Read number of data types
+                                    f.seek(file_loc+5, 0)
+                                    self.Hdr.n_data_types[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+
+                                    # Read data offsets
+                                    test = np.fromfile(f, np.uint16, count=int(self.Hdr.n_data_types[i_ens]))
+                                    if test.shape[0] > self.Hdr.data_offsets.shape[1]:
+                                        self.Hdr.data_offsets = np.resize(self.Hdr.data_offsets,
+                                                                          (n_ensembles, test.shape[0]))
+                                    self.Hdr.data_offsets[i_ens, 0:int(self.Hdr.n_data_types[i_ens])] = \
+                                        test[0:int(self.Hdr.n_data_types[i_ens])]
+
+                                    # Check for end of data types
+                                    self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+                                else:
+                                    self.bad_check_sum(f, file_loc)
+                                    i_data_types = -1
+
+                        # Read binary fixed leader data
+                        elif leader_id == '0x0':
+                            # Update data types counter
+                            i_data_types += 1
+
+                            # Read and decode firmware version
+                            self.Inst.firm_ver[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Inst.firm_ver[i_ens] = self.Inst.firm_ver[i_ens] + \
+                                np.fromfile(f, np.uint8, count=1)[0] / 100
+
+                            # Read and decode instrument characteristics
+                            bitls = np.fromfile(f, np.uint8, count=1)[0]
+                            bitls = "{0:08b}".format(bitls)
+
+                            val = int(bitls[5:], 2)
+                            if val == 0:
+                                self.Inst.freq[i_ens] = 75
+                            elif val == 1:
+                                self.Inst.freq[i_ens] = 150
+                            elif val == 2:
+                                self.Inst.freq[i_ens] = 300
+                            elif val == 3:
+                                self.Inst.freq[i_ens] = 600
+                            elif val == 4:
+                                self.Inst.freq[i_ens] = 1200
+                            elif val == 5:
+                                self.Inst.freq[i_ens] = 2400
+                            else:
+                                self.Inst.freq[i_ens] = np.nan
+
+                            val = int(bitls[4], 2)
+                            if val == 0:
+                                self.Inst.pat[i_ens] = 'Concave'
+                            elif val == 1:
+                                self.Inst.pat[i_ens] = 'Convex'
+                            else:
+                                self.Inst.pat[i_ens] = 'n/a'
+
+                            self.Inst.sensor_CFG[i_ens] = int(bitls[2:3], 2) + 1
+
+                            val = int(bitls[1], 2)
+                            if val == 0:
+                                self.Inst.xducer[i_ens] = 'Not Attached'
+                            elif val == 1:
+                                self.Inst.xducer[i_ens] = 'Attached'
+                            else:
+                                self.Inst.xducer[i_ens] = 'n/a'
+
+                            val = int(bitls[0], 2)
+                            if val == 0:
+                                self.Sensor.orient[i_ens] = 'Down'
+                            elif val == 1:
+                                self.Sensor.orient[i_ens] = 'Up'
+                            else:
+                                self.Sensor.orient[i_ens] = 'n/a'
+
+                            bitms = np.fromfile(f, np.uint8, count=1)[0]
+                            bitms = "{0:08b}".format(bitms)
+
+                            val = int(bitms[6:], 2)
+                            if val == 0:
+                                self.Inst.beam_ang[i_ens] = 15
+                            elif val == 1:
+                                self.Inst.beam_ang[i_ens] = 20
+                            elif val == 2:
+                                self.Inst.beam_ang[i_ens] = 30
+                            elif val == 3:
+                                self.Inst.beam_ang[i_ens] = np.nan
+                            else:
+                                self.Inst.beam_ang[i_ens] = np.nan
+
+                            val = int(bitms[:4], 2)
+                            if val == 4:
+                                self.Inst.beams[i_ens] = 4
+                            elif val == 5:
+                                self.Inst.beams[i_ens] = 5
+                                self.Inst.demod[i_ens] = 1
+                            elif val == 15:
+                                self.Inst.beams[i_ens] = 5
+                                self.Inst.demod[i_ens] = 2
+                            else:
+                                self.Inst.beams[i_ens] = np.nan
+                                self.Inst.demod[i_ens] = np.nan
+
+                            val = np.fromfile(f, np.uint8, count=1)[0]
+                            if val == 0:
+                                self.Inst.data_type[i_ens] = 'Real'
+                            else:
+                                self.Inst.data_type[i_ens] = 'Simu'
+
+                            # Position file pointer and read configuration information
+                            f.seek(1, 1)
+                            self.Cfg.n_beams[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Cfg.wn[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Cfg.wp[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.Cfg.ws_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.Cfg.wf_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.Cfg.wm[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Cfg.wc[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Cfg.code_reps[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Cfg.wg_per[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Cfg.we_mmps[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.Cfg.tp_sec[i_ens] = np.sum(np.fromfile(f, np.uint8, count=3) * np.array([60, 1, 0.01]))
+                            self.Cfg.ex[i_ens] = "{0:08b}".format(ord(f.read(1)))
+
+                            val = int(self.Cfg.ex[i_ens][3:5], 2)
+                            if val == 0:
+                                self.Cfg.coord_sys[i_ens] = 'Beam'
+                            elif val == 1:
+                                self.Cfg.coord_sys[i_ens] = 'Inst'
+                            elif val == 2:
+                                self.Cfg.coord_sys[i_ens] = 'Ship'
+                            elif val == 3:
+                                self.Cfg.coord_sys[i_ens] = 'Earth'
+                            else:
+                                self.Cfg.coord_sys[i_ens] = "N/a"
+
+                            val = int(self.Cfg.ex[i_ens][5], 2)
+                            if val == 0:
+                                self.Cfg.use_pr = 'No'
+                            elif val == 1:
+                                self.Cfg.use_pr = 'Yes'
+                            else:
+                                self.Cfg.use_pr = 'N/a'
+
+                            val = int(self.Cfg.ex[i_ens][6], 2)
+                            if val == 0:
+                                self.Cfg.use_3beam = 'No'
+                            elif val == 1:
+                                self.Cfg.use_3beam = 'Yes'
+                            else:
+                                self.Cfg.use_3beam = 'N/a'
+
+                            val = int(self.Cfg.ex[i_ens][7], 2)
+                            if val == 0:
+                                self.Cfg.map_bins = 'No'
+                            elif val == 1:
+                                self.Cfg.map_bins = 'Yes'
+                            else:
+                                self.Cfg.map_bins = 'N/a'
+
+                            self.Cfg.ea_deg[i_ens] = np.fromfile(f, np.int16, count=1)[0] * 0.01
+                            self.Cfg.eb_deg[i_ens] = np.fromfile(f, np.uint16, count=1)[0] * 0.01
+                            self.Cfg.ez[i_ens] = "{0:08b}".format(np.fromfile(f, np.uint8, count=1)[0])
+
+                            val = int(self.Cfg.ez[i_ens][:2], 2)
+                            if val == 0:
+                                self.Cfg.sos_src[i_ens] = 'Manual EC'
+                            elif val == 1:
+                                self.Cfg.sos_src[i_ens] = 'Calculated'
+                            elif val == 3:
+                                self.Cfg.sos_src[i_ens] = 'SVSS Sensor'
+                            else:
+                                self.Cfg.sos_src[i_ens] = 'N/a'
+
+                            val = int(self.Cfg.ez[i_ens][2], 2)
+                            if val == 0:
+                                self.Cfg.xdcr_dep_srs[i_ens] = 'Manual ED'
+                            if val == 1:
+                                self.Cfg.xdcr_dep_srs[i_ens] = 'Sensor'
+                            else:
+                                self.Cfg.xdcr_dep_srs[i_ens] = 'N/a'
+
+                            val = int(self.Cfg.ez[i_ens][3], 2)
+                            if val == 0:
+                                self.Cfg.head_src[i_ens] = 'Manual EH'
+                            if val == 1:
+                                self.Cfg.head_src[i_ens] = 'Int. Sensor'
+                            else:
+                                self.Cfg.head_src[i_ens] = 'N/a'
+
+                            val = int(self.Cfg.ez[i_ens][4], 2)
+                            if val == 0:
+                                self.Cfg.pitch_src[i_ens] = 'Manual EP'
+                            if val == 1:
+                                self.Cfg.pitch_src[i_ens] = 'Int. Sensor'
+                            else:
+                                self.Cfg.pitch_src[i_ens] = 'N/a'
+
+                            val = int(self.Cfg.ez[i_ens][5], 2)
+                            if val == 0:
+                                self.Cfg.roll_src[i_ens] = 'Manual ER'
+                            if val == 1:
+                                self.Cfg.roll_src[i_ens] = 'Int. Sensor'
+                            else:
+                                self.Cfg.roll_src[i_ens] = 'N/a'
+
+                            val = int(self.Cfg.ez[i_ens][6], 2)
+                            if val == 0:
+                                self.Cfg.xdcr_dep_srs[i_ens] = 'Manual ES'
+                            if val == 1:
+                                self.Cfg.xdcr_dep_srs[i_ens] = 'Int. Sensor'
+                            else:
+                                self.Cfg.xdcr_dep_srs[i_ens] = 'N/a'
+
+                            val = int(self.Cfg.ez[i_ens][7], 2)
+                            if val == 0:
+                                self.Cfg.temp_src[i_ens] = 'Manual ET'
+                            if val == 1:
+                                self.Cfg.temp_src[i_ens] = 'Int. Sensor'
+                            else:
+                                self.Cfg.temp_src[i_ens] = 'N/a'
+
+                            self.Cfg.sensor_avail[i_ens] = "{0:08b}".format(np.fromfile(f, np.uint8, count=1)[0])
+                            self.Cfg.dist_bin1_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.Cfg.xmit_pulse_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.Cfg.ref_lay_str_cell[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Cfg.ref_lay_end_cell[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Cfg.wa[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Cfg.cx[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Cfg.lag_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.Cfg.cpu_ser_no[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Cfg.wb[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Cfg.cq[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Read variable leader data
+                        elif leader_id == '0x80':
+                            # Update the data types counter
+                            i_data_types += 1
+
+                            # Read instrument clock and sensor data
+                            self.Sensor.num[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.Sensor.date_not_y2k[i_ens, :] = np.fromfile(f, np.uint8, count=3)
+                            self.Sensor.time[i_ens, :] = np.fromfile(f, np.uint8, count=4)
+                            self.Sensor.num_fact[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Sensor.num_tot[i_ens] = self.Sensor.num[i_ens] + self.Sensor.num_fact[i_ens]*65535
+                            self.Sensor.bit_test[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.Sensor.sos_mps[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.Sensor.xdcr_depth_dm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.Sensor.heading_deg[i_ens] = np.fromfile(f, np.uint16, count=1)[0] / 100.
+                            self.Sensor.pitch_deg[i_ens] = np.fromfile(f, np.int16, count=1)[0] / 100.
+                            self.Sensor.roll_deg[i_ens] = np.fromfile(f, np.int16, count=1)[0] / 100.
+                            self.Sensor.salinity_ppt[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.Sensor.temperature_deg_c[i_ens] = np.fromfile(f, np.int16, count=1)[0] / 100.
+                            self.Sensor.mpt_msc[i_ens, :] = np.fromfile(f, np.uint8, count=3)
+                            self.Sensor.heading_std_dev_deg[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Sensor.pitch_std_dev_deg[i_ens] = np.fromfile(f, np.uint8, count=1)[0] / 10.
+                            self.Sensor.roll_std_dev_deg[i_ens] = np.fromfile(f, np.uint8, count=1) / 10.
+                            self.Sensor.xmit_current[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Sensor.xmit_voltage[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Sensor.ambient_temp[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Sensor.pressure_pos[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Sensor.pressure_neg[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Sensor.attitude_temp[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Sensor.attitude[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Sensor.contam_sensor[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Sensor.error_status_word[i_ens] = ["{0:08b}".format(x)
+                                                                    for x in np.fromfile(f, np.uint8, count=4)]
+                            f.seek(2, 1)
+                            self.Sensor.pressure_pascal[i_ens] = np.fromfile(f, np.uint32, count=1)[0]
+                            self.Sensor.pressure_var_pascal[i_ens] = np.fromfile(f, np.uint32, count=1)[0]
+
+                            f.seek(1, 1)
+                            self.Sensor.date_y2k[i_ens, :] = np.fromfile(f, np.uint8, count=4)
+                            self.Sensor.time_y2k[i_ens, :] = np.fromfile(f, np.uint8, count=4)
+                            self.Sensor.date[i_ens, :] = self.Sensor.date_not_y2k[i_ens, :]
+                            self.Sensor.date[i_ens, 0] = self.Sensor.date_y2k[i_ens, 0] * 100 + \
+                                self.Sensor.date_y2k[i_ens, 1]
+                            self.Cfg.lag_near_bottom[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Read water-tracking velocity data
+                        elif leader_id == '0x100':
+                            # Update the data types counter
+                            i_data_types += 1
+
+                            if self.Cfg.wn[i_ens] > self.Wt.vel_mps.shape[1]:
+                                append = np.zeros([self.Wt.vel_mps.shape[0],
+                                                   int(self.Cfg.wn[i_ens] - self.Wt.vel_mps.shape[1]),
+                                                   self.Wt.vel_mps.shape[2]])
+                                self.Wt.vel_mps = np.hstack([self.Wt.vel_mps, append])
+
+                            dummy = np.fromfile(f, np.int16, count=int(self.Cfg.wn[i_ens]*4))
+                            dummy = np.reshape(dummy, [int(self.Cfg.wn[i_ens]), n_velocities])
+                            self.Wt.vel_mps[:n_velocities, :int(self.Cfg.wn[i_ens]), i_ens] = dummy.T
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Read correlation magnitude
+                        elif leader_id == '0x200':
+                            # Update the data types counter
+                            i_data_types += 1
+
+                            if self.Cfg.wn[i_ens] > self.Wt.corr.shape[1]:
+                                append = np.zeros([self.Wt.corr.shape[0],
+                                                   int(self.Cfg.wn[i_ens] - self.Wt.corr.shape[1]),
+                                                   self.Wt.corr.shape[2]])
+                                self.Wt.corr = np.hstack([self.Wt.corr, append])
+
+                            dummy = np.fromfile(f, np.uint8, count=int(self.Cfg.wn[i_ens]*4))
+                            dummy = np.reshape(dummy, [int(self.Cfg.wn[i_ens]), n_velocities])
+                            self.Wt.corr[:n_velocities, :int(self.Cfg.wn[i_ens]), i_ens] = dummy.T
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Read echo intensity
+                        elif leader_id == '0x300':
+                            # Update the data types counter
+                            i_data_types += 1
+
+                            if self.Cfg.wn[i_ens] > self.Wt.rssi.shape[1]:
+                                append = np.zeros([self.Wt.rssi.shape[0],
+                                                   int(self.Cfg.wn[i_ens] - self.Wt.rssi.shape[1]),
+                                                   self.Wt.rssi.shape[2]])
+                                self.Wt.rssi = np.hstack([self.Wt.rssi, append])
+
+                            dummy = np.fromfile(f, np.uint8, count=int(self.Cfg.wn[i_ens]*4))
+                            dummy = np.reshape(dummy, [int(self.Cfg.wn[i_ens]), n_velocities])
+                            self.Wt.rssi[:n_velocities, :int(self.Cfg.wn[i_ens]), i_ens] = dummy.T
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Read percent-good data
+                        elif leader_id == '0x400':
+                            # Update the data types counter
+                            i_data_types += 1
+
+                            if self.Cfg.wn[i_ens] > self.Wt.pergd.shape[1]:
+                                append = np.zeros([self.Wt.pergd.shape[0],
+                                                   int(self.Cfg.wn[i_ens] - self.Wt.pergd.shape[1]),
+                                                   self.Wt.pergd.shape[2]])
+                                self.Wt.pergd = np.hstack([self.Wt.pergd, append])
+                            dummy = np.fromfile(f, np.uint8, count=int(self.Cfg.wn[i_ens]*4))
+                            dummy = np.reshape(dummy, [int(self.Cfg.wn[i_ens]), n_velocities])
+                            self.Wt.pergd[:n_velocities, :int(self.Cfg.wn[i_ens]), i_ens] = dummy.T
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Read bottom track data
+                        elif leader_id == '0x600':
+                            # Update the data types counter
+                            i_data_types += 1
+
+                            # Read bottom track configuration data
+                            self.Cfg.bp[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            long1 = np.fromfile(f, np.uint16, count=1)[0]
+                            self.Cfg.bc[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Cfg.ba[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Cfg.bg[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Cfg.bm[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Cfg.be_mmps[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+
+                            # Read winriver 10.06 format GPS data
+                            self.Gps.lat_deg[i_ens] = (np.fromfile(f, np.int32, count=1)[0]/2**31) * 180
+
+                            # Read the Least Significant Bytes for beam depths
+                            dummy = np.fromfile(f, np.uint16, count=4)
+                            self.Bt.depth_m[0:4, i_ens] = dummy.T
+
+                            # Read bottom-track velocities
+                            dummy = np.fromfile(f, np.int16, count=4)
+                            self.Bt.vel_mps[0:4, i_ens] = dummy.T
+
+                            # Read bottom-track correlations
+                            dummy = np.fromfile(f, np.uint8, count=4)
+                            self.Bt.corr[0:4, i_ens] = dummy.T
+
+                            # Read bottom-track evaluation amplitude
+                            dummy = np.fromfile(f, np.uint8, count=4)
+                            self.Bt.eval_amp[0:4, i_ens] = dummy.T
+
+                            # Read bottom-track percent good
+                            dummy = np.fromfile(f, np.uint8, count=4)
+                            self.Bt.pergd[0:4, i_ens] = dummy.T
+
+                            # Read WinRiver 10.06 format GPS data
+                            dummy = np.fromfile(f, np.uint16, count=1)[0]
+                            if dummy != -32768:
+                                self.Gps.alt_m[i_ens] = (dummy-32768)/10
+                            else:
+                                self.Gps.altm[i_ens] = np.nan
+
+                            long2 = np.fromfile(f, np.uint16, count=1)[0]
+                            self.Gps.long_deg[i_ens] = ((long1+long2*2**16)/2**31)*180
+                            if self.Gps.long_deg[i_ens] > 180:
+                                self.Gps.long_deg[i_ens] -= 360
+
+                            self.Bt.ext_depth_cm[i_ens] = np.fromfile(f, np.int16, count=1)[0]
+                            dummy = np.fromfile(f, np.int16, count=1)[0]
+                            if dummy != -32768:
+                                self.Gps.gga_vel_e_mps[i_ens] = dummy * -1 / 1000
+                            else:
+                                self.Gps.gga_vel_e_mps[i_ens] = np.nan
+
+                            dummy = np.fromfile(f, np.int16, count=1)[0]
+                            if dummy != -32768:
+                                self.Gps.gga_vel_n_mps[i_ens] = dummy * -1 / 1000
+                            else:
+                                self.Gps.gga_vel_n_mps[i_ens] = np.nan
+
+                            dummy = np.fromfile(f, np.int16, count=1)[0]
+                            if dummy != -32768:
+                                self.Gps.vtg_vel_e_mps[i_ens] = dummy * -1 / 1000
+                            else:
+                                self.Gps.vtg_vel_e_mps[i_ens] = np.nan
+
+                            dummy = np.fromfile(f, np.int16, count=1)[0]
+                            if dummy != -32768:
+                                self.Gps.vtg_vel_n_mps[i_ens] = dummy * -1 / 1000
+                            else:
+                                self.Gps.vtg_vel_n_mps[i_ens] = np.nan
+
+                            dummy = np.fromfile(f, np.uint8, count=1)[0]
+                            if dummy != 0:
+                                self.Gps.gsa_v_dop[i_ens] = dummy
+                            dummy = np.fromfile(f, np.uint8, count=1)[0]
+                            if dummy != 0:
+                                self.Gps.gsa_p_dop[i_ens] = dummy
+                            dummy = np.fromfile(f, np.uint8, count=1)[0]
+                            if dummy != 0:
+                                self.Gps.gga_n_stats[i_ens] = dummy
+
+                            f.seek(1, 1)
+                            self.Gps.gsa_sat[i_ens, 4] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Gps.gsa_sat[i_ens, 5] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Gps.gga_diff[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+
+                            dummy = np.fromfile(f, np.uint8, count=1)[0]
+                            if dummy != 0:
+                                self.Gps.gga_hdop[i_ens] = dummy / 10
+
+                            self.Gps.gsa_sat[i_ens, 0] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Gps.gsa_sat[i_ens, 1] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Gps.gsa_sat[i_ens, 2] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Gps.gsa_sat[i_ens, 3] = np.fromfile(f, np.uint8, count=1)[0]
+
+                            # Read bx configuration setting
+                            self.Cfg.bx_dm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+
+                            # Read bottom-tracking RSSI
+                            self.Bt.rssi[0, i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Bt.rssi[1, i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Bt.rssi[2, i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Bt.rssi[3, i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+
+                            # Read wj configuration setting
+                            self.Cfg.wj[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+
+                            # Read most significant byte and compute beam depths
+                            dummy = np.fromfile(f, np.uint8, count=1)[0]
+                            rr_bt_depth_correction[0:4, i_ens] = dummy.T * 2e16 / 100
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Read  General NMEA Structure
+                        # Data type '2022' contains sub data types the identify specfic NMEA
+                        # 0183 data types that will be decoded. There may be multiple values
+                        # for a single ensemble.
+                        elif leader_id == '0x2022':
+                            i2022 += 1
+                            # Update the data types counter
+                            i_data_types += 1
+
+                            specific_id = np.fromfile(f, np.int16, count=1)[0]
+                            msg_size = np.fromfile(f, np.int16, count=1)[0]
+                            delta_time = np.fromfile(f, np.double, count=1)[0]
+
+                            # GGA
+                            if specific_id == 100:
+                                j100 += 1
+                                # If the number of values exceeds 20 expand arrays
+                                if j100 > self.Gps2.gga_delta_time.shape[1] - 1:
+                                    self.Gps2.gga_expand(n_ensembles)
+
+                                self.Gps2.gga_delta_time[i_ens, j100] = delta_time
+
+                                self.Gps2.gga_header[i_ens][j100] = ''.join([chr(x) for x in f.read(10)])
+
+                                try:
+                                    temp = ''.join([chr(x) for x in f.read(10)])
+                                    self.Gps2.utc[i_ens, j100] = float(re.findall('^\d+\.\d+|\d+', temp)[0])
+                                except ValueError:
+                                    self.Gps2.utc[i_ens, j100] = np.nan
+
+                                self.Gps2.lat_deg[i_ens, j100] = np.fromfile(f, np.float64, count=1)[0]
+                                self.Gps2.lat_ref[i_ens][j100] = chr(f.read(1)[0])
+                                self.Gps2.lon_deg[i_ens, j100] = np.fromfile(f, np.float64, count=1)[0]
+                                self.Gps2.lon_ref[i_ens][j100] = chr(f.read(1)[0])
+                                self.Gps2.corr_qual[i_ens, j100] = np.fromfile(f, np.uint8, count=1)[0]
+                                self.Gps2.num_sats[i_ens, j100] = np.fromfile(f, np.uint8, count=1)[0]
+                                self.Gps2.hdop[i_ens, j100] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.alt[i_ens, j100] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.alt_unit[i_ens][j100] = chr(f.read(1)[0])
+                                self.Gps2.geoid[i_ens, j100] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.geoid_unit[i_ens][j100] = chr(f.read(1)[0])
+                                self.Gps2.d_gps_age[i_ens, j100] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.ref_stat_id[i_ens, j100] = np.fromfile(f, np.int16, count=0)[0]
+
+                            # VTG
+                            elif specific_id == 101:
+                                j101 += 1
+                                # If the number of values exceeds 20 expand arrays
+                                if j101 > self.Gps2.vtg_delta_time.shape[1] - 1:
+                                    self.Gps2.vtg_expand(n_ensembles)
+
+                                self.Gps2.vtg_delta_time[i_ens, j101] = delta_time
+                                self.Gps2.vtg_header[i_ens][j101] = ''.join([chr(x) for x in f.read(10)])
+                                self.Gps2.course_true[i_ens, j101] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.true_indicator[i_ens][j101] = chr(f.read(1)[0])
+                                self.Gps2.course_mag[i_ens, j101] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.mag_indicator[i_ens][j101] = chr(f.read(1)[0])
+                                self.Gps2.speed_knots[i_ens, j101] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.knots_indicator[i_ens][j101] = chr(f.read(1)[0])
+                                self.Gps2.speed_kph[i_ens, j101] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.kph_indicator[i_ens][j101] = chr(f.read(1)[0])
+                                self.Gps2.mode_indicator[i_ens][j101] = chr(f.read(1)[0])
+
+                            # Depth sounder
+                            elif specific_id == 102:
+                                j102 += 1
+
+                                if j102 > self.Gps2.dbt_delta_time.shape[1] - 1:
+                                    self.Gps2.dbt_expand(n_ensembles)
+
+                                self.Gps2.dbt_delta_time[i_ens, j102] = delta_time
+                                self.Gps2.dbt_header[i_ens][j102] = ''.join([chr(x) for x in f.read(10)])
+                                self.Gps2.depth_ft[i_ens, j102] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.ft_indicator[i_ens][j102] = chr(f.read(1)[0])
+                                self.Gps2.depth_m[i_ens, j102] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.m_indicator[i_ens][j102] = chr(f.read(1)[0])
+                                self.Gps2.depth_fath[i_ens, j102] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.fath_indicator[i_ens][j102] = chr(f.read(1)[0])
+
+                            # External heading
+                            elif specific_id == 103:
+                                j103 += 1
+
+                                if j103 > self.Gps2.hdt_delta_time.shape[1]:
+                                    self.Gps2.hdt_expand(n_ensembles)
+
+                                self.Gps2.hdt_delta_time[i_ens, j103] = delta_time
+                                self.Gps2.hdt_header[i_ens][j103] = ''.join([chr(x) for x in f.read(10)])
+                                self.Gps2.heading_deg[i_ens, j103] = np.fromfile(f, np.double, count=1)[0]
+                                self.Gps2.h_true_indicator[i_ens][j103] = chr(f.read(1)[0])
+
+                            # GGA
+                            elif specific_id == 104:
+                                j100 += 1
+
+                                if j100 > self.Gps2.gga_delta_time.shape[1] - 1:
+                                    self.Gps2.gga_expand(n_ensembles)
+
+                                self.Gps2.gga_delta_time[i_ens, j100] = delta_time
+                                try:
+                                    self.Gps2.gga_header[i_ens][j100] = ''.join([chr(x) for x in f.read(7)])
+                                except IndexError:
+                                    self.Gps2.gga_header[i_ens][j100] = '       '
+
+                                try:
+                                    temp = ''.join([chr(x) for x in f.read(10)])
+                                    self.Gps2.utc[i_ens, j100] = \
+                                        float(re.findall('^\d+\.\d+|\d+', temp)[0])
+                                except (ValueError, AttributeError, IndexError):
+                                    self.Gps2.utc[i_ens, j100] = np.nan
+
+                                self.Gps2.lat_deg[i_ens, j100] = np.fromfile(f, np.float64, count=1)[0]
+                                self.Gps2.lat_ref[i_ens][j100] = chr(f.read(1)[0])
+                                self.Gps2.lon_deg[i_ens, j100] = np.fromfile(f, np.float64, count=1)[0]
+                                self.Gps2.lon_ref[i_ens][j100] = chr(f.read(1)[0])
+                                self.Gps2.corr_qual[i_ens, j100] = np.fromfile(f, np.uint8, count=1)[0]
+                                self.Gps2.num_sats[i_ens, j100] = np.fromfile(f, np.uint8, count=1)[0]
+                                self.Gps2.hdop[i_ens, j100] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.alt[i_ens, j100] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.alt_unit[i_ens][j100] = chr(f.read(1)[0])
+                                self.Gps2.geoid[i_ens, j100] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.geoid_unit[i_ens][j100] = chr(f.read(1)[0])
+                                self.Gps2.d_gps_age[i_ens, j100] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.ref_stat_id[i_ens, j100] = np.fromfile(f, np.int16, count=1)[0]
+
+                            # VTG
+                            elif specific_id == 105:
+                                j101 += 1
+
+                                if j101 > self.Gps2.vtg_delta_time.shape[1] - 1:
+                                    self.Gps2.vtg_expand(n_ensembles)
+
+                                self.Gps2.vtg_delta_time[i_ens, j101] = delta_time
+                                self.Gps2.vtg_header[i_ens][j101] = ''.join([chr(x) for x in f.read(7)])
+                                self.Gps2.course_true[i_ens, j101] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.true_indicator[i_ens][j101] = chr(f.read(1)[0])
+                                self.Gps2.course_mag[i_ens, j101] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.mag_indicator[i_ens][j101] = chr(f.read(1)[0])
+                                self.Gps2.speed_knots[i_ens, j101] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.knots_indicator[i_ens][j101] = chr(f.read(1)[0])
+                                self.Gps2.speed_kph[i_ens, j101] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.kph_indicator[i_ens][j101] = chr(f.read(1)[0])
+                                self.Gps2.mode_indicator[i_ens][j101] = chr(f.read(1)[0])
+
+                            # Depth sounder
+                            elif specific_id == 106:
+                                j102 += 1
+
+                                if j102 > self.Gps2.dbt_delta_time.shape[1] - 1:
+                                    self.Gps2.dbt_expand(n_ensembles)
+
+                                self.Gps2.dbt_delta_time[i_ens, j102] = delta_time
+                                self.Gps2.dbt_header[i_ens][j102] = ''.join([chr(x) for x in f.read(7)])
+                                self.Gps2.depth_ft[i_ens, j102] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.ft_indicator[i_ens][j102] = chr(f.read(1)[0])
+                                self.Gps2.depth_m[i_ens, j102] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.m_indicator[i_ens][j102] = chr(f.read(1)[0])
+                                self.Gps2.depth_fath[i_ens, j102] = np.fromfile(f, np.float32, count=1)[0]
+                                self.Gps2.fath_indicator[i_ens][j102] = chr(f.read(1)[0])
+
+                            # External heading
+                            elif specific_id == 107:
+                                j103 += 1
+
+                                if j103 > self.Gps2.hdt_delta_time.shape[1] - 1:
+                                    self.Gps2.hdt_expand(n_ensembles)
+
+                                self.Gps2.hdt_delta_time[i_ens, j103] = delta_time
+                                self.Gps2.hdt_header[i_ens][j103] = ''.join([chr(x) for x in f.read(7)])
+                                self.Gps2.heading_deg[i_ens, j103] = np.fromfile(f, np.double, count=1)[0]
+                                self.Gps2.h_true_indicator[i_ens][j103] = chr(f.read(1)[0])
+
+                            # GGA
+                            elif specific_id == 204:
+                                j100 += 1
+
+                                if j100 > self.Gps2.gga_delta_time.shape[1] - 1:
+                                    self.Gps2.gga_expand(n_ensembles)
+
+                                temp = ''.join([chr(x) for x in f.read(msg_size)])
+                                self.Gps2.gga_sentence[i_ens][j100] = temp
+                                temp_array = np.array(temp.split(','))
+                                temp_array[temp_array == '999.9'] = ''
+
+                                try:
+                                    self.Gps2.gga_delta_time[i_ens, j100] = delta_time
+                                    self.Gps2.gga_header[i_ens][j100] = temp_array[0]
+                                    self.Gps2.utc[i_ens, j100] = float(temp_array[1])
+                                    lat_str = temp_array[2]
+                                    lat_deg = float(lat_str[0:2])
+                                    lat_deg = lat_deg+float(lat_str[2:]) / 60
+                                    self.Gps2.lat_deg[i_ens, j100] = lat_deg
+                                    self.Gps2.lat_ref[i_ens][j100] = temp_array[3]
+                                    lon_str = temp_array[4]
+                                    lon_num = float(lon_str)
+                                    lon_deg = np.floor(lon_num / 100)
+                                    lon_deg = lon_deg + (((lon_num / 100.) - lon_deg) * 100.) / 60.
+                                    self.Gps2.lon_deg[i_ens, j100] = lon_deg
+                                    self.Gps2.lon_ref[i_ens][j100] = temp_array[5]
+                                    self.Gps2.corr_qual[i_ens, j100] = float(temp_array[6])
+                                    self.Gps2.num_sats[i_ens, j100] = float(temp_array[7])
+                                    self.Gps2.hdop[i_ens, j100] = float(temp_array[8])
+                                    self.Gps2.alt[i_ens, j100] = float(temp_array[9])
+                                    self.Gps2.alt_unit[i_ens][j100] = temp_array[10]
+                                    self.Gps2.geoid[i_ens, j100] = temp_array[11]
+                                    self.Gps2.geoid_unit[i_ens][j100] = temp_array[12]
+                                    self.Gps2.d_gps_age[i_ens, j100] = float(temp_array[13])
+                                    idx_star = temp_array[14].find('*')
+                                    self.Gps2.ref_stat_id[i_ens, j100] = float(temp_array[15][:idx_star])
+
+                                except (ValueError, EOFError, IndexError):
+                                    pass
+
+                            # VTG
+                            elif specific_id == 205:
+                                j101 += 1
+
+                                if j101 > self.Gps2.vtg_delta_time.shape[1] - 1:
+                                    self.Gps2.vtg_expand(n_ensembles)
+
+                                temp = ''.join([chr(x) for x in f.read(msg_size)])
+                                self.Gps2.vtg_sentence[i_ens][j100] = temp
+                                temp_array = np.array(temp.split(','))
+                                temp_array[temp_array == '999.9'] = ''
+
+                                try:
+                                    self.Gps2.vtg_delta_time[i_ens, j101] = delta_time
+                                    self.Gps2.vtg_header[i_ens][j101] = temp_array[0]
+                                    self.Gps2.course_true[i_ens, j101] = valid_number(temp_array[1])
+                                    self.Gps2.true_indicator[i_ens][j101] = temp_array[2]
+                                    self.Gps2.course_mag[i_ens, j101] = valid_number(temp_array[3])
+                                    self.Gps2.mag_indicator[i_ens][j101] = temp_array[4]
+                                    self.Gps2.speed_knots[i_ens, j101] = valid_number(temp_array[5])
+                                    self.Gps2.knots_indicator[i_ens][j101] = temp_array[6]
+                                    self.Gps2.speed_kph[i_ens, j101] = valid_number(temp_array[7])
+                                    self.Gps2.kph_indicator[i_ens][j101] = temp_array[8]
+                                    idx_star = temp_array[9].find('*')
+                                    self.Gps2.mode_indicator[i_ens][j101] = temp_array[9][:idx_star]
+
+                                except (ValueError, EOFError, IndexError):
+                                    pass
+
+                            # Depth sounder
+                            elif specific_id == 206:
+                                j102 += 1
+
+                                if j102 > self.Gps2.dbt_delta_time.shape[1] - 1:
+                                    self.Gps2.dbt_expand(n_ensembles)
+
+                                temp = ''.join([chr(x) for x in f.read(msg_size)])
+                                temp_array = np.array(temp.split(','))
+                                temp_array[temp_array == '999.9'] = ''
+
+                                try:
+                                    self.Gps2.dbt_delta_time[i_ens, j102] = delta_time
+                                    self.Gps2.dbt_header[i_ens][j102] = temp_array[0]
+                                    self.Gps2.depth_ft[i_ens, j102] = float(temp_array[1])
+                                    self.Gps2.ft_indicator[i_ens][j102] = temp_array[2]
+                                    self.Gps2.depth_m[i_ens, j102] = float(temp_array[3])
+                                    self.Gps2.m_indicator[i_ens][j102] = temp_array[4]
+                                    self.Gps2.depth_fath[i_ens, j102] = float(temp_array[5])
+                                    idx_star = temp.find('*')
+                                    self.Gps2.fath_indicator[i_ens][j102] = temp_array[6][:idx_star]
+
+                                except (ValueError, EOFError, IndexError):
+                                    pass
+
+                            # External heading
+                            elif specific_id == 207:
+                                j103 += 1
+
+                                if j103 > self.Gps2.hdt_delta_time.shape[1] - 1:
+                                    self.Gps2.hdt_expand(n_ensembles)
+
+                                temp = ''.join([chr(x) for x in f.read(msg_size)])
+                                temp_array = np.array(temp.split(','))
+                                temp_array[temp_array == '999.9'] = ''
+
+                                try:
+                                    self.Gps2.hdt_delta_time[i_ens, j103] = delta_time
+                                    self.Gps2.hdt_header[i_ens][j103] = temp_array[0]
+                                    self.Gps2.heading_deg[i_ens, j103] = float(temp_array[1])
+                                    idx_star = temp.find('*')
+                                    self.Gps2.h_true_indicator[i_ens][j103] = temp_array[2][:idx_star]
+
+                                except (ValueError, EOFError, IndexError):
+                                    pass
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Raw NMEA dbt sentence
+                        elif leader_id == '0x2100':
+
+                            # Update data types counter
+                            i_data_types += 1
+
+                            # Reposition file pointer
+                            f.seek(int(self.Hdr.data_offsets[i_ens, i_data_types-1])+file_loc+4, 0)
+
+                            # Determine the number of characters to read
+                            if i_data_types < self.Hdr.n_data_types[i_ens]:
+                                num_2_read = self.Hdr.data_offsets[i_ens, i_data_types] \
+                                             - self.Hdr.data_offsets[i_ens, i_data_types - 1] - 4
+                            else:
+                                num_2_read = bytes_per_ens - self.Hdr.data_offsets[i_ens, i_data_types-1] - 6
+
+                            # Read DBT sentence
+                            self.Nmea.dbt[i_ens] = ''.join([chr(x) for x in f.read(int(num_2_read))])
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Raw NMEA gga sentence
+                        elif leader_id == '0x2101':
+                            # Update data types counter
+                            i_data_types += 1
+
+                            # Reposition file pointer
+                            f.seek(int(self.Hdr.data_offsets[i_ens, i_data_types-1])+file_loc+4, 0)
+
+                            # Determine the number of characters to read
+                            if i_data_types < self.Hdr.n_data_types[i_ens]:
+                                num_2_read = self.Hdr.data_offsets[i_ens, i_data_types] \
+                                             - self.Hdr.data_offsets[i_ens, i_data_types - 1] - 4
+                            else:
+                                num_2_read = bytes_per_ens - self.Hdr.data_offsets[i_ens, i_data_types-1] - 6
+
+                            # Read GGA sentence
+                            self.Nmea.gga[i_ens] = ''.join([chr(x) for x in f.read(int(num_2_read))])
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Raw NMEA vtg sentence
+                        elif leader_id == '0x2102':
+                            # Update data types counter
+                            i_data_types += 1
+
+                            # Reposition file pointer
+                            f.seek(int(self.Hdr.data_offsets[i_ens, i_data_types-1])+file_loc+4, 0)
+
+                            # Determine the number of characters to read
+                            if i_data_types < self.Hdr.n_data_types[i_ens]:
+                                num_2_read = self.Hdr.data_offsets[i_ens, i_data_types] \
+                                             - self.Hdr.data_offsets[i_ens, i_data_types - 1] - 4
+                            else:
+                                num_2_read = bytes_per_ens - self.Hdr.data_offsets[i_ens, i_data_types-1] - 6
+
+                            # Read VTG sentence
+                            self.Nmea.vtg[i_ens] = ''.join([chr(x) for x in f.read(int(num_2_read))])
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Raw NMEA gsa sentence
+                        elif leader_id == '0x2103':
+                            # Update data types counter
+                            i_data_types += 1
+
+                            # Reposition file pointer
+                            f.seek(int(self.Hdr.data_offsets[i_ens, i_data_types-1])+file_loc+4, 0)
+
+                            # Determine the number of characters to read
+                            if i_data_types < self.Hdr.n_data_types[i_ens]:
+                                num_2_read = self.Hdr.data_offsets[i_ens, i_data_types] \
+                                             - self.Hdr.data_offsets[i_ens, i_data_types - 1] - 4
+                            else:
+                                num_2_read = bytes_per_ens - self.Hdr.data_offsets[i_ens, i_data_types-1] - 6
+
+                            # Read GSA sentence
+                            self.Nmea.gsa[i_ens] = ''.join([chr(x) for x in f.read(int(num_2_read))])
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Surface cells: cell data
+                        elif leader_id == '0x10':
+                            # Update data types counter
+                            i_data_types += 1
+
+                            self.Surface.no_cells[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Surface.cell_size_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.Surface.dist_bin1_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Surface cells: velocity data
+                        elif leader_id == '0x110':
+                            # Update data types counter
+                            i_data_types += 1
+
+                            dummy = np.fromfile(f, np.int16, count=int((self.Surface.no_cells[i_ens]*4)))
+                            dummy = np.reshape(dummy, [int(self.Surface.no_cells[i_ens]), n_velocities])
+                            self.Surface.vel_mps[:n_velocities, :int(self.Surface.no_cells[i_ens]), i_ens] = dummy.T
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Surface cells: correlation magnitude
+                        elif leader_id == '0x210':
+                            # Update data types counter
+                            i_data_types += 1
+
+                            dummy = np.fromfile(f, np.uint8, count=int((self.Surface.no_cells[i_ens]*4)))
+                            dummy = np.reshape(dummy, [int(self.Surface.no_cells[i_ens]), n_velocities])
+                            self.Surface.corr[:n_velocities, :int(self.Surface.no_cells[i_ens]), i_ens] = dummy.T
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Surface cells: echo intensity
+                        elif leader_id == '0x310':
+                            # Update data types counter
+                            i_data_types += 1
+
+                            dummy = np.fromfile(f, np.uint8, count=int((self.Surface.no_cells[i_ens]*4)))
+                            dummy = np.reshape(dummy, [int(self.Surface.no_cells[i_ens]), n_velocities])
+                            self.Surface.rssi[:n_velocities, :int(self.Surface.no_cells[i_ens]), i_ens] = dummy.T
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Surface cells: percent good
+                        elif leader_id == '0x410':
+                            # Update data types counter
+                            i_data_types += 1
+
+                            dummy = np.fromfile(f, np.uint8, count=int((self.Surface.no_cells[i_ens]*4)))
+                            dummy = np.reshape(dummy, [int(self.Surface.no_cells[i_ens]), n_velocities])
+                            self.Surface.pergd[:n_velocities, :self.Surface.no_cells[i_ens], i_ens] = dummy.T
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Undefined data skipped
+                        elif leader_id == '0x510':
+                            # Update data types counter
+                            i_data_types += 1
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        #  Automatic mode configuration
+                        elif leader_id == '0x4401':
+                            # Update data types counter
+                            i_data_types += 1
+
+                            self.AutoMode.beam_count[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+
+                            self.AutoMode.Beam1.mode[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam1.depth_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam1.ping_count[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam1.ping_type[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam1.cell_count[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam1.cell_size_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam1.cell_mid_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam1.code_repeat[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam1.trans_length_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam1.lag_length_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam1.transmit_bw[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam1.receive_bw[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam1.ping_interval_ms[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+
+                            self.AutoMode.Beam2.mode[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam2.depth_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam2.ping_count[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam2.ping_type[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam2.cell_count[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam2.cell_size_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam2.cell_mid_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam2.code_repeat[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam2.trans_length_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam2.lag_length_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam2.transmit_bw[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam2.receive_bw[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam2.ping_interval_ms[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+
+                            self.AutoMode.Beam3.mode[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam3.depth_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam3.ping_count[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam3.ping_type[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam3.cell_count[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam3.cell_size_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam3.cell_mid_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam3.code_repeat[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam3.trans_length_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam3.lag_length_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam3.transmit_bw[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam3.receive_bw[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam3.ping_interval_ms[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+
+                            self.AutoMode.Beam4.mode[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam4.depth_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam4.ping_count[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam4.ping_type[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam4.cell_count[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam4.cell_size_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam4.cell_mid_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam4.code_repeat[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam4.trans_length_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam4.lag_length_cm[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+                            self.AutoMode.Beam4.transmit_bw[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam4.receive_bw[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.AutoMode.Beam4.ping_interval_ms[i_ens] = np.fromfile(f, np.uint16, count=1)[0]
+
+                            self.AutoMode.Reserved[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Vertical beam
+                        elif leader_id == '0x4100':
+                            # Update data types counter
+                            i_data_types += 1
+
+                            self.Sensor.vert_beam_eval_amp[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Sensor.vert_beam_RSSI_amp[i_ens] = np.fromfile(f, np.uint8, count=1)[0]
+                            self.Sensor.vert_beam_range_m[i_ens] = np.fromfile(f, np.uint32, count=1)[0] / 1000
+                            temp = "{0:08b}".format(np.fromfile(f, np.uint8, count=1)[0])
+                            self.Sensor.vert_beam_status[i_ens] = int(temp[6:], 2)
+                            if temp[5] == '0':
+                                self.Sensor.vert_beam_gain[i_ens] = 'L'
+                            else:
+                                self.Sensor.vert_beam_gain[i_ens] = 'H'
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        # Transformation matrix
+                        elif leader_id == '0x3200':
+                            # Update data types counter
+                            i_data_types += 1
+
+                            self.Inst.t_matrix[0, :] = np.fromfile(f, np.int16, count=4) * .0001
+                            self.Inst.t_matrix[1, :] = np.fromfile(f, np.int16, count=4) * .0001
+                            self.Inst.t_matrix[2, :] = np.fromfile(f, np.int16, count=4) * .0001
+                            self.Inst.t_matrix[3, :] = np.fromfile(f, np.int16, count=4) * .0001
+
+                            # Check if more data types need to be read and position the pointer
+                            self.end_reading(f, file_loc, i_data_types, i_ens, bytes_per_ens)
+
+                        else:
+
+                            # Unrecognized leader ID
+                            self.Hdr.invalid[i_ens] = leader_id
+                            i_data_types += 1
+
+                            # Find next leader ID
+                            if (i_data_types + 1) <= self.Hdr.n_data_types[i_ens]:
+                                # Reposition file pointer for next data type
+                                f.seek(int(self.Hdr.data_offsets[i_ens, i_data_types])+file_loc, 0)
+                            else:
+                                if f.tell() < end_file:
+                                    # Locate next ensemble if no more data types
+                                    if i_data_types + 1 > self.Hdr.n_data_types[i_ens] + 1:
+                                        current_loc = f.tell()
+                                        srch_string = struct.unpack('B'*(end_file-current_loc),
+                                                                    f.read(end_file-current_loc))
+                                        hex_string = ''.join([hex(x) for x in srch_string])
+
+                                        next_ens = hex_string.find('0x7f7f')
+                                        if next_ens > 0:
+                                            next_ens = int((next_ens - 1) / 2)
+                                            f.seek(current_loc+next_ens, 0)
+                                            i_data_types = 0
+                                        else:
+                                            end_file_check = end_file + 1
+
+                                    else:
+                                        f.seek(file_loc+bytes_per_ens-2, 0)
+
+                        # If all data types have been read, read last two bytes of ensemble
+                        if i_ens <= len(self.Hdr.n_data_types):
+                            if i_data_types >= self.Hdr.n_data_types[i_ens] and f.tell() <= end_file:
+
+                                try:
+                                    self.Inst.res_RDI = np.fromfile(f, np.uint16, count=1)[0]
+                                    # Read checksum but not used
+                                    _ = np.fromfile(f, np.uint16, count=1)[0]
+                                except (ValueError, EOFError, IndexError):
+                                    pass
+                        else:
+                            end_file_check = end_file
+
+                        if end_file_check < end_file:
+                            end_file_check = f.tell()
+
+                    # Screen for bad data, and do the unit conversions
+                    self.Wt.vel_mps[self.Wt.vel_mps == -32768] = np.nan
+                    self.Wt.vel_mps = self.Wt.vel_mps / 1000
+                    self.Wt.corr[self.Wt.corr == -32768] = np.nan
+                    self.Wt.rssi[self.Wt.rssi == -32768] = np.nan
+                    self.Wt.pergd[self.Wt.pergd == -32768] = np.nan
+
+                    # Remove bad data, convert units
+                    self.Bt.depth_m[self.Bt.depth_m == -32768] = np.nan
+                    self.Bt.depth_m = self.Bt.depth_m / 100
+                    self.Bt.vel_mps[self.Bt.vel_mps == -32768] = np.nan
+                    self.Bt.vel_mps = self.Bt.vel_mps / 1000
+                    self.Bt.corr[self.Bt.corr == -32768] = np.nan
+                    self.Bt.eval_amp[self.Bt.eval_amp == -32768] = np.nan
+                    self.Bt.pergd[self.Bt.pergd == -32768] = np.nan
+
+                    # Correct Bt.depth_m for RiverRay data
+                    if not np.isnan(rr_bt_depth_correction).any():
+                        rr_bt_depth_correction[rr_bt_depth_correction == (-32768 * 2e16) / 100] = np.nan
+                        self.Bt.depth_m += rr_bt_depth_correction
+
+                    # Remove bad data from Surface structure (RR), convert where needed
+                    self.Surface.vel_mps[self.Surface.vel_mps == -32768] = np.nan
+                    self.Surface.vel_mps = self.Surface.vel_mps / 1000
+                    self.Surface.corr[self.Surface.corr == -32768] = np.nan
+                    self.Surface.rssi[self.Surface.rssi == -32768] = np.nan
+                    self.Surface.pergd[self.Surface.pergd == -32768] = np.nan
+
+                    # If requested compute WR2 compatible GPS-based boat velocities
+                    if wr2:
+
+                        # If vtg data are available compute north and east components
+                        if self.Gps2.vtg_header[0, 0] == '$':
+
+                            # Find minimum of absolute value of delta time from raw data
+                            vtg_delta_time = np.abs(self.Gps2.vtg_delta_time)
+                            vtg_min = np.nanmin(vtg_delta_time, 1)
+
+                            # Compute the velocity components in m/s
+                            for i in range(len(vtg_delta_time)):
+                                idx = np.where(vtg_delta_time == vtg_min)[0][0]
+                                self.Gps2.vtg_velE_mps[i], self.Gps2.vtg_velN_mps[i] = \
+                                    pol2cart((90 - self.Gps2.course_true[i, idx])*np.pi/180,
+                                             self.Gps2.speed_kph[i, idx] * 0.2777778)
+
+                        if self.Gps2.gga_header[0, 0] == '$':
+
+                            # Initialize constants
+                            e_radius = 6378137
+                            coeff = e_radius * np.pi / 180
+                            ellip = 1 / 298.257223563
+
+                            # Find minimum of absolute value of delta time from raw data
+                            gga_delta_time = np.abs(self.Gps2.gga_delta_time)
+                            gga_min = np.nanmin(gga_delta_time, axis=1)
+
+                            # Process gga data
+                            for i in range(len(gga_delta_time)):
+                                idx = np.where(gga_delta_time[i:] == gga_min)
+                                if idx > 0:
+                                    lat_avg_rad = (self.Gps2.lat_deg[i, idx[i]]
+                                                   + self.Gps2.lat_deg[i - 1, idx[i - 1]]) / 2
+                                    sin_lat_avg_rad = np.sin(np.deg2rad(lat_avg_rad))
+                                    r_e = coeff * (1 + ellip * sin_lat_avg_rad * sin_lat_avg_rad)
+                                    rn = coeff * (1 - 2 * ellip + 3 * ellip * sin_lat_avg_rad * sin_lat_avg_rad)
+                                    dx = r_e * (self.Gps2.lon_deg[i, idx[i]] -
+                                                self.Gps2.lon_deg(i-1, idx[i-1])) * np.cos(np.deg2rad(lat_avg_rad))
+                                    dy = rn * (self.Gps2.lat_deg[i, idx[i]] - self.Gps2.lat_deg[i - 1, idx[i - 1]])
+                                    dt = self.Gps2.utc[i, idx[i]] - self.Gps2.utc[i-1, idx[i-1]]
+                                    self.Gps2.gga_velE_mps[i] = dx / dt
+                                    self.Gps2.gga_velN_mps[i] = dy / dt
+                                else:
+                                    self.Gps2.gga_velE_mps[i] = np.nan
+                                    self.Gps2.gga_velN_mps[i] = np.nan
+
+    @staticmethod
+    def number_of_ensembles(f, f_size):
+        """Determines the number of ensembles in the data file.
+
+        Parameters
+        ----------
+        f: BufferedReader
+            File object of pd0 file
+        f_size: int
+            File size in bytes
+
+        Returns
+        -------
+        n_ensembles: int
+            Number of ensembles
+        """
+
+        i = 0
+        leader_id = '0000'
+            
+        # Find the first ensemble
+        while leader_id != '0x7f7f' and i < f_size:
+            f.seek(i, 0)
+            i = i + 1
+            leader_id = hex(np.fromfile(f, np.uint16, count=1)[0])
+
+        # Call find_ens_no to get the first ensemble number
+        first_num = Pd0TRDI.find_ens_no(f)
+
+        # Find last ensemble
+        i = 0
+        leader_id = '0000'
+        last_num = -1
+        
+        while last_num < 0:
+            while leader_id != '0x7f7f' and i < f_size:
+                i = i + 1
+                f.seek(-i, 2)
+
+                try:
+                    leader_id = hex(np.fromfile(f, np.uint16, count=1)[0])
+                except (ValueError, EOFError, IndexError):
+                    continue
+
+            last_num = Pd0TRDI.find_ens_no(f)
+            if last_num is None or np.isnan(last_num):
+                last_num = -1
+            
+            leader_id = '0000'
+        n_ensembles = last_num-first_num+1
+
+        return n_ensembles
+
+    @staticmethod
+    def find_ens_no(f):
+        """This function assumes the current position of the file pointer is just
+            after '7F7F'. The function then reads the ensemble header and
+            works through the data offsets until the 00800 data type is found. The
+            ensemble number is then read.
+
+        Parameters
+        ----------
+        f: BufferedReader
+            File object
+
+        Returns
+        -------
+        ensemble_num
+        """
+
+        ensemble_num = np.nan
+        try:
+            fileloc = f.tell() - 2
+
+            # Check check sum
+            if Pd0TRDI.check_sum(f, fileloc):
+
+                # Read header information
+                f.seek(fileloc+5, 0)
+                n_data_types = np.fromfile(f, np.uint8, count=1)[0]
+                data_offsets = []
+                for x in range(n_data_types):
+                    data_offsets.append(np.fromfile(f, np.uint16, count=1)[0])
+
+                # Initialize variables
+                i = 0
+                leader_id = '0000'
+
+                # Search for 0x80
+                while leader_id != '0x80' and i < n_data_types:
+
+                    f.seek(data_offsets[i]+fileloc, 0)
+                    leader_id = hex(np.fromfile(f, np.uint16, count=1)[0])
+                    i = i + 1
+                    
+                # Read ensemble number from data type 0x80
+                if leader_id == '0x80':
+                    ensemble_num = np.fromfile(f, np.uint16, count=1)[0]
+
+            else:
+                ensemble_num = -1
+        except (EOFError, ValueError):
+            ensemble_num = np.nan
+
+        return ensemble_num
+
+    @staticmethod
+    def check_sum(f, fileloc, bytes_per_ens=None):
+        """Compute and verify checksum values.
+
+        Parameters
+        ----------
+        f: BufferedReader
+            File object
+        fileloc: int
+            Location within file
+        bytes_per_ens: int
+            Number of bytes in ensemble
+
+        Returns
+        -------
+        bool
+        """
+
+        try:
+             
+            if bytes_per_ens is None:
+                bytes_per_ens = np.fromfile(f, np.uint16, count=1)[0] 
+            # Go to file location from the beginning of file
+            f.seek(fileloc, 0)
+              
+            # Read in the values for all of the bytes an get a check sum
+            test_b = []
+            x = f.read(bytes_per_ens)
+            for y in x:
+                test_b.append(y)
+                  
+            check_sum = sum(test_b)
+            check_h = hex(check_sum)[2:]
+              
+            # Check for a hex that is greater than 4 (including L indicator at the end)
+            if len(check_h) > 4:
+                  
+                # Seek to location of check sum and compared to computed
+                if check_h[-1] == 'L':
+                    check_h = check_h[:-1]
+                      
+                f.seek(fileloc+bytes_per_ens, 0)
+                check_sum = np.fromfile(f, np.uint16, count=1)[0]  
+                if int('0x'+check_h[1:], 16) == check_sum:
+                    return True
+                else:
+                    return False
+            elif len(check_h) > 3:
+                # Seek to location of check sum and compared to computed
+                if check_h[-1] == 'L':
+                    check_h = check_h[:-1]
+                      
+                f.seek(fileloc+bytes_per_ens, 0)
+                check_sum = np.fromfile(f, np.uint16, count=1)[0]  
+                if int('0x'+check_h, 16) == check_sum:
+                    return True
+                else:
+                    return False
+            else:
+                return False
+        except Exception:
+            return False
+
+    @staticmethod
+    def bad_check_sum(f, file_loc):
+        """Searches for next ensemble.
+
+        Parameters
+        ----------
+        f: BufferedReader
+            File object
+        file_loc: int
+            Location in file
+        """
+
+        search_id = '    '
+        search_loc = file_loc+2
+        while search_id != '0x7f7f':
+            f.seek(search_loc, 0)
+            search_loc += 1
+            try:
+                search_id = hex(np.fromfile(f, np.uint16, count=1)[0])
+            except (ValueError, EOFError):
+                continue
+        f.seek(search_loc, 0)
+        
+    def end_reading(self, f, file_loc, i_data_types, i_ens, bytes_per_ens):
+        """Checks if more data types need to be read and position file pointer.
+
+        Parameters
+        ----------
+        f: BufferedReader
+            File object
+        file_loc: int
+            Location in file
+        i_data_types: int
+            Number of data types
+        i_ens: int
+            Ensemble counter
+        bytes_per_ens: int
+            Number of bytes in the ensemble
+
+        """
+        if i_data_types + 1 <= self.Hdr.n_data_types[i_ens]:
+            f.seek(int(self.Hdr.data_offsets[i_ens, i_data_types])+file_loc, 0)
+        else:
+            f.seek(file_loc+bytes_per_ens-2, 0)
+
+
+class Hdr(object):
+    """Class to hold header variables.
+
+    Attributes
+    ----------
+    bytes_per_ens: int
+        Number of bytes in ensemble
+    data_offsets: int
+        File offset to start of ensemble
+    n_data_types: int
+        Number of data types in ensemble
+    data_ok: int
+
+    invalid: str
+        Leader ID that was not recognized
+    """
+
+    def __init__(self, n_ensembles, n_types):
+        """Initialize instance variables to empty arrays.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        n_types: int
+            Number of data types
+        """
+        self.bytes_per_ens = nans(n_ensembles)
+        self.data_offsets = nans([n_ensembles, n_types])
+        self.n_data_types = nans(n_ensembles)
+        self.data_ok = nans(n_ensembles)
+        self.invalid = [''] * n_ensembles
+
+
+class Inst(object):
+    """Class to hold information about the instrument.
+
+    Attributes
+    ----------
+    beam_ang: np.array(int)
+        Angle of transducers in degrees
+    beams: np.array(int)
+        Number of beams used for velocity
+    data_type: list
+        Data type
+    firm_ver: np.array(str)
+        Firmware version
+    freq: np.array(int)
+        Frequency of ADCP in kHz
+    pat = list
+        Beam pattern
+    res_RDI:
+        Reserved for TRDI
+    sensor_CFG: np.array(int)
+        Sensor configuration
+    xducer: list
+        Indicates if transducer is attached
+    t_matrix: np.array(float)
+        Transformation matrix
+    demod: np.array(int)
+        Demodulation code
+    """
+
+    def __init__(self, n_ensembles):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        """
+        self.beam_ang = nans(n_ensembles)
+        self.beams = nans(n_ensembles)
+        self.data_type = [''] * n_ensembles
+        self.firm_ver = nans(n_ensembles)
+        self.freq = nans(n_ensembles)
+        self.pat = [''] * n_ensembles
+        self.res_RDI = 0
+        self.sensor_CFG = nans(n_ensembles)
+        self.xducer = [''] * n_ensembles
+        self.t_matrix = np.tile([np.nan], [4, 4])
+        self.demod = nans(n_ensembles)
+
+
+class AutoMode(object):
+    """Class to hold auto configuration mode settings for each beam.
+
+    Attributes
+    ----------
+    beam_count: np.array(int)
+        Number of beams
+    Beam1: Beam
+        Object of class Beam
+    Beam2: Beam
+        Object of class Beam
+    Beam3: Beam
+        Object of class Beam
+    Beam4: Beam
+        Object of class Beam
+    Reserved: np.array
+    """
+
+    def __init__(self, n_ensembles):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        """
+        self.beam_count = nans(n_ensembles)
+        self.Beam1 = Beam(n_ensembles)
+        self.Beam2 = Beam(n_ensembles)
+        self.Beam3 = Beam(n_ensembles)
+        self.Beam4 = Beam(n_ensembles)
+        self.Reserved = nans(n_ensembles)
+
+
+class Beam(object):
+    """Class to hold auto configuration settings for a beam.
+
+    Attributes
+    ----------
+    mode: np.array(int)
+        Water mode
+    depth_cm: np.array(int)
+        Depth in cm
+    ping_count: np.array(int)
+        Number of pings
+    ping_type: np.array(int)
+        Type of pings
+    cell_count: np.array(int)
+        Number of cells
+    cell_size_cm: np.array(int)
+        Cell size in cm
+    cell_mid_cm: np.array(int)
+        Distance to center of cell 1 in cm
+    code_repeat: np.array(int)
+        Number of code repeats
+    trans_length_cm: np.array(int)
+        Transmit length in cm
+    lag_length_cm: np.array(int)
+        Lag length in cm
+    transmit_bw: np.array(int)
+        Transmit bandwidth
+    receive_bw: np.array(int)
+        Receive bandwidth
+    ping_interval_ms: np.array(int)
+        Time between pings in ms
+    """
+
+    def __init__(self, n_ensembles):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        """
+
+        self.mode = nans(n_ensembles)
+        self.depth_cm = nans(n_ensembles)
+        self.ping_count = nans(n_ensembles)
+        self.ping_type = nans(n_ensembles)
+        self.cell_count = nans(n_ensembles)
+        self.cell_size_cm = nans(n_ensembles)
+        self.cell_mid_cm = nans(n_ensembles)
+        self.code_repeat = nans(n_ensembles)
+        self.trans_length_cm = nans(n_ensembles)
+        self.lag_length_cm = nans(n_ensembles)
+        self.transmit_bw = nans(n_ensembles)
+        self.receive_bw = nans(n_ensembles)
+        self.ping_interval_ms = nans(n_ensembles)
+
+
+class Bt(object):
+    """Class to hold bottom track data.
+
+    Attributes
+    ----------
+    corr: np.array(int)
+        Correlation for each beam
+    depth_m: np.array(float)
+        Depth for each beam
+    eval_amp: np.array(int)
+        Return amplitude for each beam
+    ext_depth_cm: np.array(int)
+        External depth in cm
+    pergd: np.array(int)
+        Percent good
+    rssi: np.array(int)
+        Return signal strength indicator in counts for each beam
+    vel_mps: np.array(float)
+        Velocity in m/s, rows depend on coordinate system
+    """
+
+    def __init__(self, n_ensembles, n_velocities):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        n_velocities: int
+            Number of velocity beams
+        """
+
+        self.corr = nans([n_velocities, n_ensembles])
+        self.depth_m = nans([n_velocities, n_ensembles])
+        self.eval_amp = nans([n_velocities, n_ensembles])
+        self.ext_depth_cm = nans(n_ensembles)
+        self.pergd = nans([n_velocities, n_ensembles])
+        self.rssi = nans([n_velocities, n_ensembles])
+        self.vel_mps = nans([n_velocities, n_ensembles])
+
+
+class Cfg(object):
+    """Class to hold configuration settings.
+
+    Attributes
+    ----------
+    ba: np.array(int)
+        Bottom track amplitude threshold
+    bc: np.array(int)
+        Bottom track correlation threshold
+    be_mmps: np.array(int)
+        Bottom track error velocity threshold
+    bg: np.array(int)
+        Bottom track percent good threshold
+    bm: np.array(int)
+        Bottom mode
+    bp: np.array(int)
+        Number of bottom pings
+    bx_dm: np.array(int)
+        Maximum tracking depth in decimeters
+    code_reps: np.array(int)
+        Number of code repetitions
+    coord_sys: np.array(str)
+        Coordinate system
+    cpu_ser_no: np.array(int)
+        CPU serial number
+    cq: np.array(int)
+        Transmit power
+    cx: np.array(int)
+        Low latency trigger
+    dist_bin1_cm: np.array(int)
+        Distance to center of bin 1 from transducer
+    ea_deg: np.array(int)
+        Heading alignment
+    eb_deg: np.array(int)
+        Heading bias
+    sensor_avail: np.array(str)
+        Sensor availability codes
+    ex: np.array(str)
+        Coordinate transformation codes
+    ez: np.array(str)
+        Sensor codes
+    head_src: np.array(str)
+        Heading source
+    lag_cm: np.array(int)
+        Lag
+    map_bins: np.array(str)
+        Bin mapping
+    n_beams: np.array(int)
+        Number of velocity beams
+    pitch_src: np.array(str)
+        Source of pitch data
+    ref_lay_end_cell: np.array(int)
+        Reference layer end
+    ref_lay_str_cell: np.array(int)
+        Reference layer start
+    roll_src: np.array(str)
+        Roll source
+    sal_src: np.array(str)
+        Salinity source
+    wm: np.array(int)
+        Water mode
+    sos_src: np.array(str)
+        Speed of sound source
+    temp_src: np.array(str)
+        Temperature source
+    tp_sec: np.array(int)
+        Time between pings
+    use_3beam: np.array(str)
+        Setting on whether to use 3-beam solutions or not
+    use_pr =: np.array(str)
+        Setting to use pitch and roll or not
+    wa: np.array(int)
+        Water track amplitude threshold
+    wb: np.array(int)
+        Water track bandwidth control
+    wc: np.array(int)
+        Water track correlation threshold
+    we_mmps: np.array(int)
+        Water track error velocity threshold
+    wf_cm: np.array(int)
+        Blank after transmit
+    wg_per: np.array(int)
+        Water track percent good threshold
+    wj: np.array(int)
+        Receiver gain setting
+    wn: np.array(int)
+        Number of depth cells (bins)
+    wp: np.array(int)
+        Number of water pings
+    ws_cm: np.array(int)
+        Bin size
+    xdcr_dep_srs: np.array(str)
+        Salinity source
+    xmit_pulse_cm: np.array(int)
+        Transmit pulse length
+    lag_near_bottom: np.array(int)
+        Lag near bottom setting
+    """
+
+    def __init__(self, n_ensembles):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        """
+
+        self.ba = nans(n_ensembles)
+        self.bc = nans(n_ensembles)
+        self.be_mmps = nans(n_ensembles)
+        self.bg = nans(n_ensembles)
+        self.bm = nans(n_ensembles)
+        self.bp = nans(n_ensembles)
+        self.bx_dm = nans(n_ensembles)
+        self.code_reps = nans(n_ensembles)
+        self.coord_sys = [''] * n_ensembles
+        self.cpu_ser_no = nans([n_ensembles, 8])
+        self.cq = nans(n_ensembles)
+        self.cx = nans(n_ensembles)
+        self.dist_bin1_cm = nans(n_ensembles)
+        self.ea_deg = nans(n_ensembles)
+        self.eb_deg = nans(n_ensembles)
+        self.sensor_avail = [''] * n_ensembles
+        self.ex = [''] * n_ensembles
+        self.ez = [''] * n_ensembles
+        self.head_src = [''] * n_ensembles
+        self.lag_cm = nans(n_ensembles)
+        self.map_bins = [''] * n_ensembles
+        self.n_beams = nans(n_ensembles)
+        self.pitch_src = [''] * n_ensembles
+        self.ref_lay_end_cell = nans(n_ensembles)
+        self.ref_lay_str_cell = nans(n_ensembles)
+        self.roll_src = [''] * n_ensembles
+        self.sal_src = [''] * n_ensembles
+        self.wm = nans(n_ensembles)
+        self.sos_src = [''] * n_ensembles
+        self.temp_src = [''] * n_ensembles
+        self.tp_sec = nans(n_ensembles)
+        self.use_3beam = [''] * n_ensembles
+        self.use_pr = [''] * n_ensembles
+        self.wa = nans(n_ensembles)
+        self.wb = nans(n_ensembles)
+        self.wc = nans(n_ensembles)
+        self.we_mmps = nans(n_ensembles)
+        self.wf_cm = nans(n_ensembles)
+        self.wg_per = nans(n_ensembles)
+        self.wj = nans(n_ensembles)
+        self.wn = nans(n_ensembles)
+        self.wp = nans(n_ensembles)
+        self.ws_cm = nans(n_ensembles)
+        self.xdcr_dep_srs = [''] * n_ensembles
+        self.xmit_pulse_cm = nans(n_ensembles)
+        self.lag_near_bottom = nans(n_ensembles)
+
+
+class Gps(object):
+    """Class to hold GPS data from WinRiver.
+
+    Attributes
+    ----------
+    alt_m: np.array(float)
+        Altitude in meters
+    gga_diff: np.array(int)
+        Differential correction indicator
+    gga_hdop: np.array(float)
+        Horizontal dilution of precision
+    gga_n_stats: np.array(int)
+        Number of satellites
+    gga_vel_e_mps: np.array(float)
+        Velocity in east direction from GGA data
+    gga_vel_n_mps: np.array(float)
+        Velocity in north directio from GGA data
+    gsa_p_dop: np.array(int)
+        Position dilution of precision
+    gsa_sat: np.array(int)
+        Satellites
+    gsa_v_dop: np.array(float)
+        Vertical dilution of precision
+    lat_deg: np.array(float)
+        Latitude in degrees
+    long_deg: np.array(float)
+        Longitude in degrees
+    vtg_vel_e_mps: np.array(float)
+        Velocity in east direction from VTG data
+    vtg_vel_n_mps: np.array(float)
+        Velocity in north direction from VTG data
+    """
+
+    def __init__(self, n_ensembles):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        """
+
+        self.alt_m = nans(n_ensembles)
+        self.gga_diff = nans(n_ensembles)
+        self.gga_hdop = nans(n_ensembles)
+        self.gga_n_stats = nans(n_ensembles)
+        self.gga_vel_e_mps = nans(n_ensembles)
+        self.gga_vel_n_mps = nans(n_ensembles)
+        self.gsa_p_dop = nans(n_ensembles)
+        self.gsa_sat = nans([n_ensembles, 6])
+        self.gsa_v_dop = nans(n_ensembles)
+        self.lat_deg = nans(n_ensembles)
+        self.long_deg = nans(n_ensembles)
+        self.vtg_vel_e_mps = nans(n_ensembles)
+        self.vtg_vel_n_mps = nans(n_ensembles)
+
+
+class Gps2(object):
+    """Class to hold GPS data for WinRiver II.
+
+    Attributes
+    ----------
+    gga_delta_time: np.array(float)
+        Time between ping and gga data
+    gga_header: list
+        GGA header
+    gga_sentence: list
+        GGA sentence
+    utc: np.array(float)
+        UTC time
+    lat_deg: np.array(float)
+        Latitude in degrees
+    lat_ref: list
+        Latitude reference
+    lon_deg: np.array(float)
+        Longitude in degrees
+    lon_ref: list
+        Longitude reference
+    corr_qual: np.array(float)
+        Differential quality indicator
+    num_sats: np.array(int)
+        Number of satellites
+    hdop: np.array(float)
+        Horizontal dilution of precision
+    alt: np.array(float)
+        Altitude
+    alt_unit: list
+        Units for altitude
+    geoid: np.array(float)
+        Geoid height
+    geoid_unit: list
+        Units for geoid height
+    d_gps_age: np.array(float)
+        Age of differential correction
+    ref_stat_id: np.array(float)
+        Reference station ID
+    vtg_delta_time: np.array(float)
+        Time between ping and VTG data
+    vtg_header: list
+        VTG header
+    vtg_sentence: list
+        VTG sentence
+    course_true: np.array(float)
+        Course relative to true north
+    true_indicator: list
+        True north indicator
+    course_mag: np.array(float)
+        Course relative to magnetic north
+    mag_indicator: list
+        Magnetic north indicator
+    speed_knots: np.array(float)
+        Speed in knots
+    knots_indicator: list
+        Knots indicator
+    speed_kph: np.array(float)
+        Speed in kilometers per hour
+    kph_indicator: list
+        Kilometers per hour indicator
+    mode_indicator: list
+        Mode indicator
+    dbt_delta_time: np.array(float)
+        Time between ping and echo sounder data
+    dbt_header: list
+        Echo sounder header
+    depth_ft: np.array(float)
+        Depth in ft from echo sounder
+    ft_indicator: list
+        Feet indicator
+    depth_m: np.array(float)
+        Depth in meters from echo sounder
+    m_indicator: list
+        Meters indicator
+    depth_fath: np.array(float)
+        Depth in fathoms from echo sounder
+    fath_indicator: list
+        Fathoms indicator
+    hdt_delta_time: np.array(float)
+        Time between ping and external heading data
+    hdt_header: list
+        External heading header
+    heading_deg: np.array(float)
+        Heading in degrees from external heading
+    h_true_indicator: list
+        Heading indicator to true north
+    gga_velE_mps: np.array(float)
+        Velocity in east direction in m/s from GGA for WR
+    gga_velN_mps: np.array(float)
+        Velocity in north direction in m/s from GGA for WR
+    vtg_velE_mps: np.array(float)
+        Velocity in east direction in m/s from VTG for WR
+    vtg_velN_mps: np.array(float)
+        Velocity in north direction in m/s from VTG for WR
+    """
+
+    def __init__(self, n_ensembles, wr2):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        wr2: bool
+            Setting of whether data is from WR or WR2
+        """
+
+        self.gga_delta_time = np.full([n_ensembles, 20], np.nan)
+        self.gga_header = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.gga_sentence = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.utc = np.full([n_ensembles, 20], np.nan)
+        self.lat_deg = np.zeros([n_ensembles, 20])
+        self.lat_ref = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.lon_deg = np.zeros([n_ensembles, 20])
+        self.lon_ref = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.corr_qual = np.full([n_ensembles, 20], np.nan)
+        self.num_sats = np.full([n_ensembles, 20], np.nan)
+        self.hdop = np.full([n_ensembles, 20], np.nan)
+        self.alt = np.full([n_ensembles, 20], np.nan)
+        self.alt_unit = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.geoid = np.full([n_ensembles, 20], np.nan)
+        self.geoid_unit = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.d_gps_age = np.full([n_ensembles, 20], np.nan)
+        self.ref_stat_id = np.full([n_ensembles, 20], np.nan)
+        self.vtg_delta_time = np.full([n_ensembles, 20], np.nan)
+        self.vtg_header = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.vtg_sentence = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.course_true = np.full([n_ensembles, 20], np.nan)
+        self.true_indicator = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.course_mag = np.full([n_ensembles, 20], np.nan)
+        self.mag_indicator = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.speed_knots = np.full([n_ensembles, 20], np.nan)
+        self.knots_indicator = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.speed_kph = np.zeros([n_ensembles, 20])
+        self.kph_indicator = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.mode_indicator = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.dbt_delta_time = np.full([n_ensembles, 20], np.nan)
+        self.dbt_header = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.depth_ft = np.full([n_ensembles, 20], np.nan)
+        self.ft_indicator = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.depth_m = np.zeros([n_ensembles, 20])
+        self.m_indicator = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.depth_fath = np.full([n_ensembles, 20], np.nan)
+        self.fath_indicator = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.hdt_delta_time = np.full([n_ensembles, 20], np.nan)
+        self.hdt_header = [x[:] for x in [[''] * 20] * n_ensembles]
+        self.heading_deg = np.full([n_ensembles, 20], np.nan)
+        self.h_true_indicator = [x[:] for x in [[''] * 20] * n_ensembles]
+
+        # if wr2:
+        self.gga_velE_mps = nans(n_ensembles)
+        self.gga_velN_mps = nans(n_ensembles)
+        self.vtg_velE_mps = nans(n_ensembles)
+        self.vtg_velN_mps = nans(n_ensembles)
+
+    def gga_expand(self, n_ensembles):
+        self.gga_delta_time = np.concatenate(
+            (self.gga_delta_time, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.utc = np.concatenate(
+            (self.utc, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.lat_deg = np.concatenate(
+            (self.lat_deg, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.lon_deg = np.concatenate(
+            (self.lon_deg, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.corr_qual = np.concatenate(
+            (self.corr_qual, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.num_sats = np.concatenate(
+            (self.num_sats, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.hdop = np.concatenate(
+            (self.hdop, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.alt = np.concatenate(
+            (self.alt, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.geoid = np.concatenate(
+            (self.geoid, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.d_gps_age = np.concatenate(
+            (self.d_gps_age, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.ref_stat_id = np.concatenate(
+            (self.ref_stat_id, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        for ens in range(n_ensembles):
+            self.gga_header[ens].append('')
+            self.geoid_unit[ens].append('')
+            self.alt_unit[ens].append('')
+            self.lon_ref[ens].append('')
+            self.lat_ref[ens].append('')
+
+    def vtg_expand(self, n_ensembles):
+        self.vtg_delta_time = np.concatenate(
+            (self.vtg_delta_time, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.course_true = np.concatenate(
+            (self.course_true, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.course_mag = np.concatenate(
+            (self.course_mag, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.speed_knots = np.concatenate(
+            (self.speed_knots, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.speed_kph = np.concatenate(
+            (self.speed_kph, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        for ens in range(n_ensembles):
+            self.kph_indicator[ens].append('')
+            self.mode_indicator[ens].append('')
+            self.vtg_header[ens].append('')
+            self.true_indicator[ens].append('')
+            self.mag_indicator[ens].append('')
+            self.knots_indicator[ens].append('')
+
+    def dbt_expand(self, n_ensembles):
+        self.dbt_delta_time = np.concatenate(
+            (self.dbt_delta_time, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.depth_ft = np.concatenate(
+            (self.depth_ft, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.depth_m = np.concatenate(
+            (self.depth_m, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.depth_fath = np.concatenate(
+            (self.depth_fath, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        for ens in range(n_ensembles):
+            self.fath_indicator[ens].append('')
+            self.dbt_header[ens].append('')
+            self.ft_indicator[ens].append('')
+            self.m_indicator[ens].append('')
+
+    def hdt_expand(self, n_ensembles):
+        self.hdt_delta_time = np.concatenate(
+            (self.hdt_delta_time, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        self.heading_deg = np.concatenate(
+            (self.heading_deg, np.tile(np.nan, (1, n_ensembles)).T), axis=1)
+        for ens in range(n_ensembles):
+            self.h_true_indicator[ens].append('')
+            self.hdt_header[ens].append('')
+
+
+class Nmea(object):
+    """Class to hold raw NMEA sentences.
+
+    Attributes
+    ----------
+    gga: list
+        List of GGA sentences
+    gsa: list
+        List of GSA sentences
+    vtg: list
+        List of VTG sentences
+    dbt: list
+        List of DBT sentences
+    """
+
+    def __init__(self, n_ensembles):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        """
+        self.gga = ['']*n_ensembles
+        self.gsa = ['']*n_ensembles
+        self.vtg = ['']*n_ensembles
+        # self.raw = ['']*n_ensembles DSM: not sure this was used
+        self.dbt = ['']*n_ensembles
+
+
+class Sensor(object):
+    """Class to hold sensor data.
+
+    Attributes
+    ----------
+    ambient_temp: np.array(int)
+        ADC ambient temperature
+    attitude_temp: np.array(int)
+        ADC attitude temperature
+    attitude: np.array(int)
+        ADC attitude
+    bit_test: np.array(int)
+        Bit test results
+    contam_sensor: np.array(int)
+        ADC contamination sensor
+    date: np.array(int)
+        Date
+    date_y2k: np.array(int)
+        Y2K compatible date
+    date_not_y2k: np.array(int)
+        Date not Y2K compatible
+    error_status_word: np.array(int)
+        Error status codes
+    heading_deg: np.array(float)
+        Heading to magnetic north in degrees
+    heading_std_dev_deg: np.array(float)
+        Standard deviation of headings for an ensemble
+    mpt_msc: np.array(int)
+        Minimum time prior to ping
+    num: np.array(int)
+        Ensemble number
+    num_fact: np.array(int)
+        Number fraction
+    num_tot: np.array(int)
+        Number total
+    orient: list
+        Orientation of ADCP
+    pitch_std_dev_deg: np.array(float)
+        Standard deviation of pitch for an ensemble
+    pitch_deg: np.array(float)
+        Pitch in degrees
+    pressure_neg: np.array(int)
+        ADC pressure negative
+    pressure_pos: np.array(int)
+        ADC pressure positive
+    pressure_pascal: np.array(int)
+        Pressure at transducer face in deca-pascals
+    pressure_var_pascal: np.array(int)
+        Pressure variance in deca-pascals
+    roll_std_dev_deg: np.array(float)
+        Standard deviation of roll for an ensemble
+    roll_deg: np.array(float)
+        Roll in degrees
+    salinity_ppt: np.array(int)
+        Salinit in parts per thousand
+    sos_mps: np.array(int)
+        Speed of sound in m/s
+    temperature_deg_c: np.array(float)
+        Water temperatuer in degrees C
+    time: np.array(int)
+        Time
+    time_y2k: np.array(int)
+        Y2K compatible time
+    xdcr_depth_dm: np.array(int)
+        Transducer depth in decimeters
+    xmit_current: np.array(int)
+        Transmit current
+    self.xmit_voltage = nans(n_ensembles)
+        Transmit voltage
+    self.vert_beam_eval_amp: np.array(int)
+        Vertical beam amplitude
+    self.vert_beam_RSSI_amp: np.array(int)
+        Vertical beam return signal stength indicator
+    self.vert_beam_range_m: np.array(float)
+        Vertical beam range in m
+    self.vert_beam_gain: list
+        Vertical beam gain setting
+    self.vert_beam_status: np.array(int)
+        Vertical beam status code
+    """
+
+    def __init__(self, n_ensembles):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        """
+
+        self.ambient_temp = nans(n_ensembles)
+        self.attitude_temp = nans(n_ensembles)
+        self.attitude = nans(n_ensembles)
+        self.bit_test = nans(n_ensembles)
+        self.contam_sensor = nans(n_ensembles)
+        self.date = nans([n_ensembles, 3])
+        self.date_y2k = nans([n_ensembles, 4])
+        self.date_not_y2k = nans([n_ensembles, 3])
+        self.error_status_word = [''] * n_ensembles
+        self.heading_deg = nans(n_ensembles)
+        self.heading_std_dev_deg = nans(n_ensembles)
+        self.mpt_msc = nans([n_ensembles, 3])
+        self.num = nans(n_ensembles)
+        self.num_fact = nans(n_ensembles)
+        self.num_tot = nans(n_ensembles)
+        self.orient = [''] * n_ensembles
+        self.pitch_std_dev_deg = nans(n_ensembles)
+        self.pitch_deg = nans(n_ensembles)
+        self.pressure_neg = nans(n_ensembles)
+        self.pressure_pos = nans(n_ensembles)
+        self.pressure_pascal = nans(n_ensembles)
+        self.pressure_var_pascal = nans(n_ensembles)
+        self.roll_std_dev_deg = nans(n_ensembles)
+        self.roll_deg = nans(n_ensembles)
+        self.salinity_ppt = nans(n_ensembles)
+        self.sos_mps = nans(n_ensembles)
+        self.temperature_deg_c = nans(n_ensembles)
+        self.time = nans([n_ensembles, 4])
+        self.time_y2k = nans([n_ensembles, 4])
+        self.xdcr_depth_dm = nans(n_ensembles)
+        self.xmit_current = nans(n_ensembles)
+        self.xmit_voltage = nans(n_ensembles)
+        self.vert_beam_eval_amp = nans(n_ensembles)
+        self.vert_beam_RSSI_amp = nans(n_ensembles)
+        self.vert_beam_range_m = nans(n_ensembles)
+        self.vert_beam_gain = [''] * n_ensembles
+        self.vert_beam_status = np.zeros(n_ensembles)
+
+
+class Surface(object):
+    """Class to hold surface cell data.
+
+    Attributes
+    ----------
+    no_cells: np.array(int)
+        Number of surface cells in the ensemble
+    cell_size_cm: np.array(int)
+        Cell size in cm
+    dist_bin1_cm: np.array(int)
+        Distance to center of cell 1 in cm
+    vel_mps: np.array(float)
+        3D array of velocity data in each cell and ensemble
+    corr: np.array(int)
+        3D array of correlation data for each beam, cell, and ensemble
+    pergd: np.array(int)
+        3D array of percent good for each beam, cell, and ensemble
+    rssi: np.array(int)
+        3D array of return signal strength indicator for each beam, cell, and ensemble
+    """
+
+    def __init__(self, n_ensembles, n_velocities, max_surface_bins):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        n_velocities: int
+            Number of velocity beams
+        max_surface_bins: int
+            Maximum number of surface bins in an ensemble in the transect
+        """
+
+        self.no_cells = np.zeros(n_ensembles)
+        self.cell_size_cm = nans(n_ensembles)
+        self.dist_bin1_cm = nans(n_ensembles)
+        self.vel_mps = np.tile([np.nan], [n_velocities, max_surface_bins, n_ensembles])
+        self.corr = nans([n_velocities, max_surface_bins, n_ensembles])
+        self.pergd = nans([n_velocities, max_surface_bins, n_ensembles])
+        self.rssi = nans([n_velocities, max_surface_bins, n_ensembles])
+
+
+class Wt(object):
+    """Class to hold water track data.
+
+    Attributes
+    ----------
+    vel_mps: np.array(float)
+        3D array of velocity data in each cell and ensemble
+    corr: np.array(int)
+        3D array of correlation data for each beam, cell, and ensemble
+    pergd: np.array(int)
+        3D array of percent good for each beam, cell, and ensemble
+    rssi: np.array(int)
+        3D array of return signal strength indicator for each beam, cell, and ensemble
+    """
+
+    def __init__(self, n_bins, n_ensembles, n_velocities):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        n_velocities: int
+            Number of velocity beams
+        n_bins: int
+            Maximum number of bins in an ensemble in the transect
+        """
+
+        self.corr = nans([n_velocities, n_bins, n_ensembles])
+        self.pergd = nans([n_velocities, n_bins, n_ensembles])
+        self.rssi = nans([n_velocities, n_bins, n_ensembles])
+        self.vel_mps = nans([n_velocities, n_bins, n_ensembles])
diff --git a/Classes/Pd0TRDI_2.py b/Classes/Pd0TRDI_2.py
new file mode 100644
index 0000000..6a20448
--- /dev/null
+++ b/Classes/Pd0TRDI_2.py
@@ -0,0 +1,3503 @@
+import os
+import re
+import numpy as np
+import struct
+from MiscLibs.common_functions import pol2cart, valid_number, nans
+
+
+class Pd0TRDI(object):
+    """Class to read data from PD0 files
+
+    Attributes
+    ----------
+    file_name: str
+        Full name including path of pd0 file to be read
+    Hdr: Hdr
+        Object of Hdr for heading information
+    Inst: Inst
+        Object of Inst to hold instrument information
+    Cfg: Cfg
+        Object of Cfg to hold configuration information
+    Sensor: Sensor
+        Object of Sensor to hold sensor data
+    Wt: Wt
+        Object of Wt to hold water track data
+    Bt: Bt
+        Object of Bt to hold bottom track data
+    Gps: Gps
+        Object of Gps to hold GPS data from previous versions of WR
+    Gps2: Gps2
+        Object of Gps2 to hold GPS data from WR2
+    Surface: Surface
+        Object of Surface to hold surface cell data
+    AutoMode: AutoMode
+        Object of AutoMode to hold auto configuration settings
+    Nmea: Nmea
+        Object of Nmea to hold Nmea data
+    """
+
+    def __init__(self, file_name):
+        """Constructor initializing instance variables.
+
+        Parameters
+        ----------
+        file_name: str
+            Full name including path of pd0 file to be read
+        """
+
+        self.file_name = file_name
+        self.Hdr = None
+        self.Inst = None
+        self.Cfg = None
+        self.Sensor = None
+        self.Wt = None
+        self.Bt = None
+        self.Gps = None
+        self.Gps2 = None
+        self.Surface = None
+        self.AutoMode = None
+        self.Nmea = None
+
+        self.data_decoders = {
+            0x0000: ('fixed_leader', self.decode_fixed_leader),
+            0x0080: ('variable_leader', self.decode_variable_leader),
+            0x0100: ('velocity', self.decode_velocity),
+            0x0200: ('correlation', self.decode_correlation),
+            0x0300: ('echo_intensity', self.decode_echo_intensity),
+            0x0400: ('percent_good', self.decode_percent_good),
+            0x0500: ('status', self.decode_status),
+            0x0600: ('bottom_track', self.decode_bottom_track),
+            0x2022: ('nmea', self.decode_nmea),
+            0x2100: ('dbt_sentence', self.decode_dbt_sentence),
+            0x2101: ('gga_sentence', self.decode_gga_sentence),
+            0x2102: ('vtg_sentence', self.decode_vtg_sentence),
+            0x2103: ('gsa_sentence', self.decode_gsa_sentence),
+            0x0010: ('surface_leader', self.decode_surface_leader),
+            0x0110: ('surface_velocity', self.decode_surface_velocity),
+            0x0210: ('surface_correlation', self.decode_surface_correlation),
+            0x0310: ('surface_intensity', self.decode_surface_intensity),
+            0x0410: ('surface_percent_good', self.decode_surface_percent_good),
+            0x0510: ('surface_status', self.decode_surface_status),
+            0x4401: ('auto_configuration', self.decode_auto_config),
+            0x4100: ('vertical_beam', self.decode_vertical_beam),
+            0x3200: ('transformation_matrix', self.decode_transformation_matrix)
+        }
+
+        self.nmea_decoders = {100: ('gga', self.decode_gga_100),
+                              101: ('vtg', self.decode_vtg_101),
+                              102: ('ds', self.decode_ds_102),
+                              103: ('ext_heading', self.decode_ext_heading_103),
+                              104: ('gga', self.decode_gga_104),
+                              105: ('vtg', self.decode_vtg_105),
+                              106: ('ds', self.decode_ds_106),
+                              107: ('ext_heading', self.decode_ext_heading_107),
+                              204: ('gga', self.decode_gga_204),
+                              205: ('vtg', self.decode_vtg_205),
+                              206: ('ds', self.decode_ds_206),
+                              207: ('ext_heading', self.decode_ext_heading_207)}
+
+        self.n_velocities = 4
+        self.max_surface_bins = 5
+
+        self.pd0_read(file_name)
+
+    def create_objects(self, n_ensembles, n_types, n_bins, max_surface_bins, n_velocities, wr2=False):
+        """Create objects for instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        n_types: int
+            Number of data types
+        n_bins: int
+            Number of bins or depth cells
+        max_surface_bins: int
+            Maximum number of surface cells
+        n_velocities: int
+            Number of velocities
+        wr2: bool
+            Whether WR2 processing of GPS data should be applied
+        """
+
+        self.Hdr = Hdr(n_ensembles, n_types)
+        self.Inst = Inst(n_ensembles)
+        self.Cfg = Cfg(n_ensembles)
+        self.Sensor = Sensor(n_ensembles)
+        self.Wt = Wt(n_bins, n_ensembles, n_velocities)
+        self.Bt = Bt(n_ensembles, n_velocities)
+        self.Gps = Gps(n_ensembles)
+        self.Gps2 = Gps2(n_ensembles, wr2)
+        self.Surface = Surface(n_ensembles, n_velocities, max_surface_bins)
+        self.AutoMode = AutoMode(n_ensembles)
+        self.Nmea = Nmea(n_ensembles)
+
+    def pd0_read(self, fullname, wr2=False):
+        """Reads the binary pd0 file and assigns values to object instance variables.
+
+        Parameters
+        ----------
+        fullname: str
+            Full file name including path
+        wr2: bool
+            Determines if WR2 processing should be applied to GPS data
+        """
+
+        # Check to ensure file exists
+        if os.path.exists(fullname):
+            file_info = os.path.getsize(fullname)
+
+            if file_info > 0:
+                # Open file for processing
+                with open(fullname, 'rb') as f:
+                    pd0 = f.read()
+                pd0_bytes = bytearray(pd0)
+
+                # Intialize classes and arrays
+                n_ensembles, max_types, max_beams, max_bins = self.number_of_ensembles(self, file_info, pd0_bytes)
+                self.create_objects(n_ensembles, max_types, max_bins, self.max_surface_bins, self.n_velocities, wr2)
+                self.decode_all(pd0_bytes, file_info)
+                self.screen_and_convert(wr2)
+
+    def screen_and_convert(self, wr2):
+
+        # Screen for bad data, and do the unit conversions
+        self.Wt.vel_mps[self.Wt.vel_mps == -32768] = np.nan
+        self.Wt.vel_mps = self.Wt.vel_mps / 1000
+        self.Wt.corr[self.Wt.corr == -32768] = np.nan
+        self.Wt.rssi[self.Wt.rssi == -32768] = np.nan
+        self.Wt.pergd[self.Wt.pergd == -32768] = np.nan
+
+        # Remove bad data, convert units
+        self.Bt.depth_m[self.Bt.depth_m == -32768] = np.nan
+        self.Bt.depth_m = self.Bt.depth_m / 100
+        self.Bt.vel_mps[self.Bt.vel_mps == -32768] = np.nan
+        self.Bt.vel_mps = self.Bt.vel_mps / 1000
+        self.Bt.corr[self.Bt.corr == -32768] = np.nan
+        self.Bt.eval_amp[self.Bt.eval_amp == -32768] = np.nan
+        self.Bt.pergd[self.Bt.pergd == -32768] = np.nan
+
+        # Remove bad data from Surface structure (RR), convert where needed
+        self.Surface.vel_mps[self.Surface.vel_mps == -32768] = np.nan
+        self.Surface.vel_mps = self.Surface.vel_mps / 1000
+        self.Surface.corr[self.Surface.corr == -32768] = np.nan
+        self.Surface.rssi[self.Surface.rssi == -32768] = np.nan
+        self.Surface.pergd[self.Surface.pergd == -32768] = np.nan
+
+        # If requested compute WR2 compatible GPS-based boat velocities
+        if wr2:
+
+            # If vtg data are available compute north and east components
+            if self.Gps2.vtg_header[0, 0] == '$':
+
+                # Find minimum of absolute value of delta time from raw data
+                vtg_delta_time = np.abs(self.Gps2.vtg_delta_time)
+                vtg_min = np.nanmin(vtg_delta_time, 1)
+
+                # Compute the velocity components in m/s
+                for i in range(len(vtg_delta_time)):
+                    idx = np.where(vtg_delta_time == vtg_min)[0][0]
+                    self.Gps2.vtg_velE_mps[i], self.Gps2.vtg_velN_mps[i] = \
+                        pol2cart((90 - self.Gps2.course_true[i, idx]) * np.pi / 180,
+                                 self.Gps2.speed_kph[i, idx] * 0.2777778)
+
+            if self.Gps2.gga_header[0, 0] == '$':
+
+                # Initialize constants
+                e_radius = 6378137
+                coeff = e_radius * np.pi / 180
+                ellip = 1 / 298.257223563
+
+                # Find minimum of absolute value of delta time from raw data
+                gga_delta_time = np.abs(self.Gps2.gga_delta_time)
+                gga_min = np.nanmin(gga_delta_time, axis=1)
+
+                # Process gga data
+                for i in range(len(gga_delta_time)):
+                    idx = np.where(gga_delta_time[i:] == gga_min)
+                    if idx > 0:
+                        lat_avg_rad = (self.Gps2.lat_deg[i, idx[i]]
+                                       + self.Gps2.lat_deg[i - 1, idx[i - 1]]) / 2
+                        sin_lat_avg_rad = np.sin(np.deg2rad(lat_avg_rad))
+                        r_e = coeff * (1 + ellip * sin_lat_avg_rad * sin_lat_avg_rad)
+                        rn = coeff * (1 - 2 * ellip + 3 * ellip * sin_lat_avg_rad * sin_lat_avg_rad)
+                        dx = r_e * (self.Gps2.lon_deg[i, idx[i]] -
+                                    self.Gps2.lon_deg(i - 1, idx[i - 1])) * np.cos(np.deg2rad(lat_avg_rad))
+                        dy = rn * (self.Gps2.lat_deg[i, idx[i]] - self.Gps2.lat_deg[i - 1, idx[i - 1]])
+                        dt = self.Gps2.utc[i, idx[i]] - self.Gps2.utc[i - 1, idx[i - 1]]
+                        self.Gps2.gga_velE_mps[i] = dx / dt
+                        self.Gps2.gga_velN_mps[i] = dy / dt
+                    else:
+                        self.Gps2.gga_velE_mps[i] = np.nan
+                        self.Gps2.gga_velN_mps[i] = np.nan
+
+    def decode_all(self, pd0_bytes, file_info):
+
+        start_byte = 0
+        n = 0
+        ensemble_number = 0
+        while start_byte < file_info:
+            data = self.decode_pd0_bytearray(self.data_decoders, pd0_bytes[start_byte:])
+            if data['checksum']:
+                # Adjust index for lost ensembles
+                if ensemble_number > 0:
+                    n = n + data['variable_leader']['ensemble_number'] - ensemble_number
+                try:
+                    self.Hdr.populate_data(n, data)
+                    self.Inst.populate_data(n, data)
+                    self.Cfg.populate_data(n, data)
+                    self.Sensor.populate_data(n, data)
+                    self.Wt.populate_data(n, data, self)
+                    self.Bt.populate_data(n, data)
+                    # self.Gps.populate_data(n, data)
+                    self.Gps2.populate_data(n, data)
+                    self.Surface.populate_data(n, data, self)
+                    self.AutoMode.populate_data(n, data)
+                    self.Nmea.populate_data(n, data)
+                    start_byte = start_byte + data['header']['number_of_bytes'] + 2
+                    ensemble_number = data['variable_leader']['ensemble_number']
+                except ValueError:
+                    start_byte = Pd0TRDI.find_next(pd0_bytes, start_byte, file_info)
+            else:
+                start_byte = Pd0TRDI.find_next(pd0_bytes, start_byte, file_info)
+
+
+    @staticmethod
+    def number_of_ensembles(self, file_info, pd0_bytes):
+        """Determines the number of ensembles in the data file.
+
+        Parameters
+        ----------
+        self: Pd0TRDI
+            Current class
+        file_info: int
+            File size in bytes
+        pd0_bytes: bytearray
+            Contents of pd0 file
+
+        Returns
+        -------
+        n_ensembles: int
+            Number of ensembles
+        max_data_types: int
+            Maximum number of data types in file
+        max_beams: int
+            Maximum number of beamse
+        max_bins: int
+            Maximum number of regular bins
+        """
+
+        # Configure data decoders to be used
+        data_decoders = {0x0000: ('fixed_leader', self.preload_fixed_leader),
+                         0x0080: ('variable_leader', self.preload_variable_leader)}
+
+        # Intitialize variables
+        start_byte = 0
+        n_beams = []
+        n_bins = []
+        n_data_types = []
+        ens_num = []
+
+        # Loop through entire file
+        while start_byte < file_info:
+
+            data = self.decode_pd0_bytearray(data_decoders, pd0_bytes[start_byte:])
+            # start_byte = start_byte + data['header']['number_of_bytes'] + 2
+            if data['checksum']:
+                # if 'number_of_bytes' in data['header'] and data['header']['number_of_bytes'] > 0:
+                if 'number_of_bytes' in data['header'] and 'fixed_leader' in data and 'variable_leader' in data:
+                    n_data_types.append(data['header']['number_of_data_types'])
+                    n_beams.append(data['fixed_leader']['number_of_beams'])
+                    n_bins.append(data['fixed_leader']['number_of_cells'])
+                    ens_num.append(data['variable_leader']['ensemble_number'])
+                    start_byte = start_byte + data['header']['number_of_bytes'] + 2
+                else:
+                    start_byte = Pd0TRDI.find_next(pd0_bytes, start_byte, file_info)
+            else:
+                start_byte = Pd0TRDI.find_next(pd0_bytes, start_byte, file_info)
+
+
+        # Compute maximums
+        max_data_types = np.nanmax(n_data_types)
+        max_beams = np.nanmax(n_beams)
+        max_bins = np.nanmax(n_bins)
+        n_ensembles = ens_num[-1] - ens_num[0] + 1
+
+        return n_ensembles, max_data_types, max_beams, max_bins
+
+    @staticmethod
+    def find_next (pd0_bytes, start_byte, file_info):
+
+        try:
+            start_byte = start_byte + 1
+            skip_forward = pd0_bytes[start_byte:].index(b'\x7f\x7f')
+            # data['header'] = Pd0TRDI.decode_fixed_header(pd0_bytes[start_byte + skip_forward:])
+            start_byte = start_byte + skip_forward
+        except ValueError:
+            start_byte = file_info
+
+        return start_byte
+
+    @staticmethod
+    def preload_fixed_leader(pd0_bytes, offset, data):
+        """Parses the fixed leader for number of beams and number of cells.
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Not used, included for compatibilty with other decoders
+
+        Returns
+        -------
+        number_of_beams: int
+            Number of beams in ensemble
+        number_of_cells: int
+            Number of regular cells in ensemble
+        """
+
+        fixed_leader_format = (('number_of_beams', 'B', 8), ('number_of_cells', 'B', 9))
+
+        return Pd0TRDI.unpack_bytes(pd0_bytes, fixed_leader_format, offset)
+
+    @staticmethod
+    def preload_variable_leader(pd0_bytes, offset, data):
+        """Decodes variable leader ensemble number
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        :dict
+            Dictionary of decoded data
+        """
+
+        # Define format
+        variable_leader_format = (('ensemble_number', '<H', 2),)
+
+        return Pd0TRDI.unpack_bytes(pd0_bytes, variable_leader_format, offset)
+
+    @staticmethod
+    def decode_pd0_bytearray(data_decoders, pd0_bytes):
+        """Loops through data and calls appropriate parsing method for each header ID.
+
+        Parameters
+        ----------
+        data_decoders: dict
+            Dictionary associating a method with a leader ID
+        pd0_bytes: bytearray
+            Byte array of entire pd0 file
+
+        Returns
+        -------
+        data: dict
+            Dictionary of decoded data
+        """
+
+        data = {}
+
+        # Read in header
+        data['header'] = Pd0TRDI.decode_fixed_header(pd0_bytes)
+        data['checksum'] = False
+        if 'number_of_bytes' in data['header'] and data['header']['number_of_bytes'] > 0:
+            if 'number_of_data_types' in data['header']:
+                # If checksum is OK then decode address offsets to the data types
+                if Pd0TRDI.validate_checksum(pd0_bytes, data['header']['number_of_bytes']):
+                    data['checksum'] = True
+                    data['header']['address_offsets'] = Pd0TRDI.decode_address_offsets(pd0_bytes,
+                                                                                       data['header']['number_of_data_types'])
+                    data['header']['invalid'] = []
+                    # Loop to decode all data types for which a data decoder is provided
+                    for offset in data['header']['address_offsets']:
+                        if len(pd0_bytes) > offset + 2:
+                            header_id = struct.unpack('<H', pd0_bytes[offset: offset + 2])[0]
+                            if header_id in data_decoders:
+                                key = data_decoders[header_id][0]
+                                decoder = data_decoders[header_id][1]
+                                data[key] = decoder(pd0_bytes, offset, data)
+                            else:
+                                data['header']['invalid'].append(header_id)
+
+        return data
+
+    @staticmethod
+    def unpack_bytes(pd0_bytes, data_format_tuples, offset=0):
+        """Unpackes the data based on the supplied data format tuples and offset.
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        data_format_tuples: tuple
+            A tuple of tuples providing the data name, format, and byte location
+        offset: int
+            Pointer into pd0_bytes
+
+        Returns
+        -------
+        data: dict
+            Dictionary of decoded data
+        """
+        data = {}
+
+        # Decode data for each format specified in the data format tuples and assign to the data dictionary
+        for fmt in data_format_tuples:
+            try:
+                struct_offset = offset + fmt[2]
+                size = struct.calcsize(fmt[1])
+                data[fmt[0]] = struct.unpack(fmt[1], pd0_bytes[struct_offset: struct_offset + size])[0]
+            except:
+                print('Error parsing %s with the arguments ')
+
+        return data
+
+    @staticmethod
+    def validate_checksum(pd0_bytes, offset):
+        """Validates that the checksum is correct to ensure data integrity.
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+
+        Returns
+        -------
+        :bool
+            True if checksum is valid
+
+        """
+        if len(pd0_bytes) > offset + 1:
+            calc_checksum = sum(pd0_bytes[:offset]) & 0xFFFF
+            given_checksum = struct.unpack('<H', pd0_bytes[offset: offset + 2])[0]
+
+            if calc_checksum == given_checksum:
+                return True
+            else:
+                return False
+        return False
+
+    @staticmethod
+    def bin2str(bin_in):
+
+        try:
+            str_out = bin_in.decode('utf-8')
+        except:
+            str_out = ''
+        return str_out
+
+    @staticmethod
+    def decode_address_offsets(pd0_bytes, num_data_types, offset=6):
+        """Decodes the address offsets for each data type.
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        num_data_types: int
+            Number of data types for which to find offsets
+        offset: int
+            Pointer into pd0_bytes
+
+        Returns
+        -------
+        address_data: list
+            List of offsets to each data type
+        """
+
+        address_data = []
+
+        # Loop through each data type
+        for bytes_start in range(offset, offset + (num_data_types * 2), 2):
+            data = struct.unpack_from('<H', pd0_bytes[bytes_start: bytes_start + 2])[0]
+            address_data.append(data)
+
+        return address_data
+
+    @staticmethod
+    def decode_fixed_header(pd0_bytes):
+        """Decodes fixed header
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+
+        Returns
+        -------
+        header: dict
+            Dictionary of header data
+        """
+
+        header_data_format = (('id', 'B', 0),
+                              ('data_source', 'B', 1),
+                              ('number_of_bytes', '<H', 2),
+                              ('spare', 'B', 4),
+                              ('number_of_data_types', 'B', 5))
+
+        header = Pd0TRDI.unpack_bytes(pd0_bytes, header_data_format)
+        return header
+
+    @staticmethod
+    def decode_fixed_leader(pd0_bytes, offset, data):
+        """Decodes fixed leader data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        :dict
+            Dictionary of decoded data
+        """
+
+        # Define format
+        fixed_leader_format = (
+            ('id', '<H', 0),
+            ('cpu_firmware_version', 'B', 2),
+            ('cpu_firmware_revision', 'B', 3),
+            ('system_configuration_ls', 'B', 4),
+            ('system_configuration_ms', 'B', 5),
+            ('simulation_data_flag', 'B', 6),
+            ('lag_length', 'B', 7),
+            ('number_of_beams', 'B', 8),
+            ('number_of_cells', 'B', 9),
+            ('number_of_water_pings', '<H', 10),
+            ('depth_cell_size', '<H', 12),
+            ('blank_after_transmit', '<H', 14),
+            ('water_mode', 'B', 16),
+            ('low_correlation_threshold', 'B', 17),
+            ('number_of_code_repetitions', 'B', 18),
+            ('minimum_percentage_water_profile_pings', 'B', 19),
+            ('error_velocity_threshold', '<H', 20),
+            ('minutes', 'B', 22),
+            ('seconds', 'B', 23),
+            ('hundredths', 'B', 24),
+            ('coordinate_transformation_process', 'B', 25),
+            ('heading_alignment', '<H', 26),
+            ('heading_bias', '<H', 28),
+            ('sensor_source', 'B', 30),
+            ('sensor_available', 'B', 31),
+            ('bin_1_distance', '<H', 32),
+            ('transmit_pulse_length', '<H', 34),
+            ('starting_depth_cell', 'B', 36),
+            ('ending_depth_cell', 'B', 37),
+            ('false_target_threshold', 'B', 38),
+            ('low_latency_trigger', 'B', 39),
+            ('transmit_lag_distance', '<H', 40),
+            ('cpu_board_serial_number', '<Q', 42),
+            ('system_bandwidth', '<H', 50),
+            ('system_power', 'B', 52),
+            ('spare', 'B', 53),
+            ('serial_number', '<I', 54),
+            ('beam_angle', 'B', 58)
+        )
+
+        return Pd0TRDI.unpack_bytes(pd0_bytes, fixed_leader_format, offset)
+
+    @staticmethod
+    def decode_variable_leader(pd0_bytes, offset, data):
+        """Decodes variabl leader data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        :dict
+            Dictionary of decoded data
+        """
+
+        # Define format
+        variable_leader_format = (
+            ('id', '<H', 0),
+            ('ensemble_number', '<H', 2),
+            ('rtc_year', 'B', 4),
+            ('rtc_month', 'B', 5),
+            ('rtc_day', 'B', 6),
+            ('rtc_hour', 'B', 7),
+            ('rtc_minutes', 'B', 8),
+            ('rtc_seconds', 'B', 9),
+            ('rtc_hundredths', 'B', 10),
+            ('ensemble_number_msb', 'B', 11),
+            ('bit_fault', 'B', 12),
+            ('bit_count', 'B', 13),
+            ('speed_of_sound', '<H', 14),
+            ('depth_of_transducer', '<H', 16),
+            ('heading', '<H', 18),
+            ('pitch', '<h', 20),
+            ('roll', '<h', 22),
+            ('salinity', '<H', 24),
+            ('temperature', '<h', 26),
+            ('mpt_minutes', 'B', 28),
+            ('mpt_seconds', 'B', 29),
+            ('mpt_hundredths', 'B', 30),
+            ('heading_standard_deviation', 'B', 31),
+            ('pitch_standard_deviation', 'B', 32),
+            ('roll_standard_deviation', 'B', 33),
+            ('transmit_current', 'B', 34),
+            ('transmit_voltage', 'B', 35),
+            ('ambient_temperature', 'B', 36),
+            ('pressure_positive', 'B', 37),
+            ('pressure_negative', 'B', 38),
+            ('attitude_temperature', 'B', 39),
+            ('attitude', 'B', 40),
+            ('contamination_sensor', 'B', 41),
+            ('error_status_word', '<I', 42),
+            ('reserved', '<H', 46),
+            ('pressure', '<I', 48),
+            ('pressure_variance', '<I', 52),
+            ('spare', 'B', 56),
+            ('rtc_y2k_century', 'B', 57),
+            ('rtc_y2k_year', 'B', 58),
+            ('rtc_y2k_month', 'B', 59),
+            ('rtc_y2k_day', 'B', 60),
+            ('rtc_y2k_hour', 'B', 61),
+            ('rtc_y2k_minutes', 'B', 62),
+            ('rtc_y2k_seconds', 'B', 63),
+            ('rtc_y2k_hundredths', 'B', 64),
+            ('lag_near_bottom', 'B', 65)
+        )
+
+        return Pd0TRDI.unpack_bytes(pd0_bytes, variable_leader_format, offset)
+
+    def decode_per_cell_per_beam(pd0_bytes, offset, number_of_cells, number_of_beams, struct_format):
+        """Parses fields that are stored in serial cells and beams structures.
+        Returns an array of cell readings where each reading is an array containing the value at that beam.
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        number_of_cells: int
+            Number of cells in data
+        number of beams: int
+            Number of beams in data
+        struct_format: str
+            A string identifying the type of data to decode
+
+        Returns
+        -------
+        data: list
+            A list of list containing cell data for each beam
+        """
+
+        data_size = struct.calcsize(struct_format)
+        data = []
+        # Loop through cells
+        for cell in range(0, number_of_cells):
+            cell_start = offset + cell * number_of_beams * data_size
+            cell_data = []
+            # Loop through beams in each cell
+            for field in range(0, number_of_beams):
+                field_start = cell_start + field * data_size
+                data_bytes = pd0_bytes[field_start: field_start + data_size]
+                field_data = struct.unpack(struct_format, data_bytes)[0]
+                cell_data.append(field_data)
+            data.append(cell_data)
+
+        return data
+
+    @staticmethod
+    def decode_velocity(pd0_bytes, offset, data):
+        """Decodes velocity data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        velocity_data:dict
+            Dictionary of decoded data
+        """
+
+        # Define format
+        velocity_format = (('id', '<h', 0),)
+
+        # Unpack data
+        velocity_data = Pd0TRDI.unpack_bytes(pd0_bytes, velocity_format, offset)
+        # Move past id field
+        offset += 2
+        # Arrange data in list of depth cells and beams or velocity components
+        velocity_data['data'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes,
+                                                                 offset,
+                                                                 data['fixed_leader']['number_of_cells'],
+                                                                 data['fixed_leader']['number_of_beams'],
+                                                                 '<h')
+
+        return velocity_data
+
+    @staticmethod
+    def decode_correlation(pd0_bytes, offset, data):
+        """Decodes correlation data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        correlation_data:dict
+            Dictionary of decoded data
+        """
+
+        correlation_format = (('id', '<H', 0),)
+        # Unpack data
+        correlation_data = Pd0TRDI.unpack_bytes(pd0_bytes, correlation_format, offset)
+        # Move past id field
+        offset += 2
+        # Arrange data in list of depth cells and beams
+        correlation_data['data'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes,
+                                                                    offset,
+                                                                    data['fixed_leader']['number_of_cells'],
+                                                                    data['fixed_leader']['number_of_beams'],
+                                                                    'B')
+
+        return correlation_data
+
+    @staticmethod
+    def decode_echo_intensity(pd0_bytes, offset, data):
+        """Decodes echo intensity data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        echo_intensity_data:dict
+            Dictionary of decoded data
+        """
+
+        echo_intensity_format = (('id', '<H', 0),)
+        # Unpack data
+        echo_intensity_data = Pd0TRDI.unpack_bytes(pd0_bytes, echo_intensity_format, offset)
+        # Move past id field
+        offset += 2
+        # Arrange data in list of depth cells and beams
+        echo_intensity_data['data'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes,
+                                                                       offset,
+                                                                       data['fixed_leader']['number_of_cells'],
+                                                                       data['fixed_leader']['number_of_beams'],
+                                                                       'B')
+
+        return echo_intensity_data
+
+    @staticmethod
+    def decode_percent_good(pd0_bytes, offset, data):
+        """Decodes percent good data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        percent_good_data:dict
+            Dictionary of decoded data
+        """
+
+        percent_good_format = (('id', '<H', 0),)
+        # Unpack data
+        percent_good_data = Pd0TRDI.unpack_bytes(pd0_bytes, percent_good_format, offset)
+        # Move past id field
+        offset += 2
+        # Arrange data in list of depth cells and beams
+        percent_good_data['data'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes,
+                                                                     offset,
+                                                                     data['fixed_leader']['number_of_cells'],
+                                                                     data['fixed_leader']['number_of_beams'],
+                                                                     'B')
+
+        return percent_good_data
+
+    @staticmethod
+    def decode_status(pd0_bytes, offset, data):
+        """Decodes percent good data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        status_data:dict
+            Dictionary of decoded data
+        """
+
+        status_format = (('id', '<H', 0),)
+        # Unpack data
+        status_data = Pd0TRDI.unpack_bytes(pd0_bytes, status_format, offset)
+        # Move past id field
+        offset += 2
+        # Arrange data in list of depth cells and beams
+        status_data['data'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes,
+                                                               offset,
+                                                               data['fixed_leader']['number_of_cells'],
+                                                               data['fixed_leader']['number_of_beams'],
+                                                               'B')
+
+        return status_data
+
+    @staticmethod
+    def decode_bottom_track(pd0_bytes, offset, data):
+        """Decodes bottom track data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data (not used)
+
+        Returns
+        -------
+        bottom_track_data:dict
+            Dictionary of decoded data
+        """
+        bottom_track_format = (('id', '<H', 0),
+                               ('pings_per_ensemble_bp', '<H', 2),
+                               ('delay_before_reaquire', '<H', 4),
+                               ('correlation_magnitude_minimum_bc', 'B', 6),
+                               ('evaluation_amplitude_minimum_ba', 'B', 7),
+                               ('percent_good_minimum_bg', 'B', 8),
+                               ('bottom_track_mode_bm', 'B', 9),
+                               ('error_velocity_maximum_be', '<H', 10))
+
+        bottom_track_data = Pd0TRDI.unpack_bytes(pd0_bytes, bottom_track_format, offset)
+        bottom_track_data['range_lsb'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 16, 1, 4, '<H')
+        bottom_track_data['velocity'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 24, 1, 4, '<h')
+        bottom_track_data['correlation'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 32, 1, 4, 'B')
+        bottom_track_data['amplitude'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 36, 1, 4, 'B')
+        bottom_track_data['percent_good'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 40, 1, 4, 'B')
+        bottom_track_data['rssi'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 72, 1, 4, 'B')
+        bottom_track_data['range_msb'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 77, 1, 4, 'B')
+
+        return bottom_track_data
+
+    def decode_nmea(self, pd0_bytes, offset, data):
+        """Decodes nmea data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        nmea_data:dict
+            Dictionary of decoded data
+        """
+        nmea_leader_format = (('id', '<H', 0),
+                              ('msg_id', '<H', 2),
+                              ('msg_size', '<H', 4),
+                              ('delta_time', 'd', 6))
+
+        nmea_data = Pd0TRDI.unpack_bytes(pd0_bytes, nmea_leader_format, offset)
+        if nmea_data['msg_id'] in self.nmea_decoders:
+            key = self.nmea_decoders[nmea_data['msg_id']][0]
+            decoder = self.nmea_decoders[nmea_data['msg_id']][1]
+            if key in data:
+                data[key].append(decoder(pd0_bytes, offset + 14, nmea_data))
+            else:
+                data[key] = [decoder(pd0_bytes, offset + 14, nmea_data)]
+        return nmea_data
+
+    @staticmethod
+    def decode_gga_100(pd0_bytes, offset, data):
+        """Decodes gga data for WinRiver versions prior to 2.00
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        # Define format
+        format = (('header', '10s', 0),
+                  ('utc', '10s', 10),
+                  ('lat_deg', 'd', 20),
+                  ('lat_ref', 'c', 28),
+                  ('lon_deg', 'd', 29),
+                  ('lon_ref', 'c', 37),
+                  ('corr_qual', 'B', 38),
+                  ('num_sats', 'B', 39),
+                  ('hdop', 'f', 40),
+                  ('alt', 'f', 44),
+                  ('alt_unit', 'c', 48),
+                  ('geoid', 'f', 49),
+                  ('geoid_unit', 'c', 53),
+                  ('d_gps_age', 'f', 54),
+                  ('ref_stat_id', '<H', 58))
+
+        # Decode data
+        decoded_data = Pd0TRDI.unpack_bytes(pd0_bytes, format, offset)
+        decoded_data['header'] = Pd0TRDI.bin2str(decoded_data['header']).rstrip('\x00')
+        try:
+            decoded_data['utc'] = float(re.findall(b'^\d+\.\d+|\d+', decoded_data['utc'])[0])
+        except BaseException:
+            decoded_data['utc'] = np.nan
+        decoded_data['lat_ref'] = Pd0TRDI.bin2str(decoded_data['lat_ref'])
+        decoded_data['lon_ref'] = Pd0TRDI.bin2str(decoded_data['lon_ref'])
+        decoded_data['geoid_unit'] = Pd0TRDI.bin2str(decoded_data['geoid_unit'])
+        decoded_data['alt_unit'] = Pd0TRDI.bin2str(decoded_data['alt_unit'])
+        decoded_data['delta_time'] = data['delta_time']
+
+        return decoded_data
+
+    @staticmethod
+    def decode_vtg_101(pd0_bytes, offset, data):
+        """Decodes vtg data for WinRiver versions prior to 2.00
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        # Define format
+        format = (('header', '10s', 0),
+                  ('course_true', 'f', 10),
+                  ('true_indicator', 'c', 14),
+                  ('course_mag', 'f', 15),
+                  ('mag_indicator', 'c', 19),
+                  ('speed_knots', 'f', 20),
+                  ('knots_indicator', 'c', 24),
+                  ('speed_kph', 'f', 25),
+                  ('kph_indicator', 'c', 29),
+                  ('mode_indicator', 'c', 30))
+
+        # Decode data
+        decoded_data = Pd0TRDI.unpack_bytes(pd0_bytes, format, offset)
+        decoded_data['header'] = Pd0TRDI.bin2str(decoded_data['header']).rstrip('\x00')
+        decoded_data['true_indicator'] = Pd0TRDI.bin2str(decoded_data['true_indicator'])
+        decoded_data['mag_indicator'] = Pd0TRDI.bin2str(decoded_data['mag_indicator'])
+        decoded_data['knots_indicator'] = Pd0TRDI.bin2str(decoded_data['knots_indicator'])
+        decoded_data['kph_indicator'] = Pd0TRDI.bin2str(decoded_data['kph_indicator'])
+        decoded_data['delta_time'] = data['delta_time']
+
+        return decoded_data
+
+    @staticmethod
+    def decode_ds_102(pd0_bytes, offset, data):
+        """Decodes depth sounder for WinRiver versions prior to 2.00
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        # Define format
+        format = (('header', '10s', 0),
+                  ('depth_ft', 'f', 10),
+                  ('ft_indicator', 'c', 14),
+                  ('depth_m', 'f', 15),
+                  ('m_indicator', 'c', 19),
+                  ('depth_fath', 'f', 20),
+                  ('fath_indicator', 'c', 24))
+
+        # Decode data
+        decoded_data = Pd0TRDI.unpack_bytes(pd0_bytes, format, offset)
+        decoded_data['header'] = Pd0TRDI.bin2str(decoded_data['header']).rstrip('\x00')
+        decoded_data['ft_indicator'] = Pd0TRDI.bin2str(decoded_data['ft_indicator'])
+        decoded_data['m_indicator'] = Pd0TRDI.bin2str(decoded_data['m_indicator'])
+        decoded_data['fath_indicator'] = Pd0TRDI.bin2str(decoded_data['fath_indicator'])
+        decoded_data['delta_time'] = data['delta_time']
+
+        return decoded_data
+
+    @staticmethod
+    def decode_ext_heading_103(pd0_bytes, offset, data):
+        """Decodes external heading for WinRiver versions prior to 2.00
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        # Define format
+        format = (('header', '10s', 0),
+                  ('heading_deg', 'd', 10),
+                  ('h_true_indicator', 'c', 14))
+
+        # Decode data
+        decoded_data = Pd0TRDI.unpack_bytes(pd0_bytes, format, offset)
+        decoded_data['header'] = Pd0TRDI.bin2str(decoded_data['header']).rstrip('\x00')
+        decoded_data['h_true_indicator'] = Pd0TRDI.bin2str(decoded_data['h_true_indicator'])
+        decoded_data['delta_time'] = data['delta_time']
+
+        return decoded_data
+
+    @staticmethod
+    def decode_gga_104(pd0_bytes, offset, data):
+        """Decodes gga data for WinRiver 2.00 and greater with ADCP's without integrated NMEA data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        # Define format
+        format = (('header', '7s', 0),
+                  ('utc', '10s', 7),
+                  ('lat_deg', 'd', 17),
+                  ('lat_ref', 'c', 25),
+                  ('lon_deg', 'd', 26),
+                  ('lon_ref', 'c', 34),
+                  ('corr_qual', 'B', 35),
+                  ('num_sats', 'B', 36),
+                  ('hdop', 'f', 37),
+                  ('alt', 'f', 41),
+                  ('alt_unit', 'c', 45),
+                  ('geoid', 'f', 46),
+                  ('geoid_unit', 'c', 50),
+                  ('d_gps_age', 'f', 51),
+                  ('ref_stat_id', '<H', 55))
+
+        # Decode data
+        decoded_data = Pd0TRDI.unpack_bytes(pd0_bytes, format, offset)
+        decoded_data['header'] = Pd0TRDI.bin2str(decoded_data['header']).rstrip('\x00')
+        try:
+            decoded_data['utc'] = float(re.findall(b'^\d+\.\d+|\d+', decoded_data['utc'])[0])
+        except BaseException:
+            decoded_data['utc'] = np.nan
+        decoded_data['lat_ref'] = Pd0TRDI.bin2str(decoded_data['lat_ref'])
+        decoded_data['lon_ref'] = Pd0TRDI.bin2str(decoded_data['lon_ref'])
+        decoded_data['geoid_unit'] = Pd0TRDI.bin2str(decoded_data['geoid_unit'])
+        decoded_data['alt_unit'] = Pd0TRDI.bin2str(decoded_data['alt_unit'])
+        decoded_data['delta_time'] = data['delta_time']
+
+        return decoded_data
+
+    @staticmethod
+    def decode_vtg_105(pd0_bytes, offset, data):
+        """Decodes vtg data for WinRiver 2.00 and greater with ADCP's without integrated NMEA data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        # Define format
+        format = (('header', '7s', 0),
+                  ('course_true', 'f', 7),
+                  ('true_indicator', 'c', 11),
+                  ('course_mag', 'f', 12),
+                  ('mag_indicator', 'c', 16),
+                  ('speed_knots', 'f', 17),
+                  ('knots_indicator', 'c', 21),
+                  ('speed_kph', 'f', 22),
+                  ('kph_indicator', 'c', 26),
+                  ('mode_indicator', 'c', 27))
+
+        # Decode data
+        decoded_data = Pd0TRDI.unpack_bytes(pd0_bytes, format, offset)
+        decoded_data['header'] = Pd0TRDI.bin2str(decoded_data['header']).rstrip('\x00')
+        decoded_data['true_indicator'] = Pd0TRDI.bin2str(decoded_data['true_indicator'])
+        decoded_data['mag_indicator'] = Pd0TRDI.bin2str(decoded_data['mag_indicator'])
+        decoded_data['knots_indicator'] = Pd0TRDI.bin2str(decoded_data['knots_indicator'])
+        decoded_data['kph_indicator'] = Pd0TRDI.bin2str(decoded_data['kph_indicator'])
+        decoded_data['delta_time'] = data['delta_time']
+
+        return decoded_data
+
+    @staticmethod
+    def decode_ds_106(pd0_bytes, offset, data):
+        """Decodes depth sounder for WinRiver 2.00 and greater with ADCP's without integrated NMEA data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        # Define format
+        format = (('header', '7s', 0),
+                  ('depth_ft', 'f', 7),
+                  ('ft_indicator', 'c', 11),
+                  ('depth_m', 'f', 12),
+                  ('m_indicator', 'c', 16),
+                  ('depth_fath', 'f', 17),
+                  ('fath_indicator', 'c', 21))
+
+        # Decode data
+        decoded_data = Pd0TRDI.unpack_bytes(pd0_bytes, format, offset)
+        decoded_data['header'] = Pd0TRDI.bin2str(decoded_data['header']).rstrip('\x00')
+        decoded_data['ft_indicator'] = Pd0TRDI.bin2str(decoded_data['ft_indicator'])
+        decoded_data['m_indicator'] = Pd0TRDI.bin2str(decoded_data['m_indicator'])
+        decoded_data['fath_indicator'] = Pd0TRDI.bin2str(decoded_data['fath_indicator'])
+        decoded_data['delta_time'] = data['delta_time']
+
+        return decoded_data
+
+    @staticmethod
+    def decode_ext_heading_107(pd0_bytes, offset, data):
+        """Decodes external heading for WinRiver 2.00 and greater with ADCP's without integrated NMEA data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        # Define format
+        format = (('header', '7s', 0),
+                  ('heading_deg', 'd', 7),
+                  ('h_true_indicator', 'c', 15))
+
+        # Decode data
+        decoded_data = Pd0TRDI.unpack_bytes(pd0_bytes, format, offset)
+        decoded_data['header'] = Pd0TRDI.bin2str(decoded_data['header']).rstrip('\x00')
+        if abs(decoded_data['heading_deg']) < 360:
+            try:
+                decoded_data['h_true_indicator'] = Pd0TRDI.bin2str(decoded_data['h_true_indicator'])
+            except:
+                decoded_data['h_true_indicator'] = ''
+        else:
+            decoded_data['heading_deg'] = np.nan
+            decoded_data['h_true_indicator'] = ''
+        decoded_data['delta_time'] = data['delta_time']
+
+        return decoded_data
+
+    @staticmethod
+    def decode_gga_204(pd0_bytes, offset, data):
+        """Decodes gga data for ADCP's with integrated NMEA data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        # Initialize dictionary
+        decoded_data = {}
+        decoded_data['header'] = ''
+        decoded_data['utc'] = np.nan
+        decoded_data['lat_deg'] = np.nan
+        decoded_data['lat_ref'] = ''
+        decoded_data['lon_deg'] = np.nan
+        decoded_data['lon_ref'] = ''
+        decoded_data['corr_qual'] = np.nan
+        decoded_data['num_sats'] = np.nan
+        decoded_data['hdop'] = np.nan
+        decoded_data['alt'] = np.nan
+        decoded_data['alt_unit'] = ''
+        decoded_data['geoid'] = ''
+        decoded_data['geoid_unit'] = ''
+        decoded_data['d_gps_age'] = np.nan
+        decoded_data['ref_stat_id'] = np.nan
+        decoded_data['delta_time'] = np.nan
+
+        # Decode NMEA sentence and split into an array
+        format = str(data['msg_size']) + 'c'
+        sentence = Pd0TRDI.bin2str(b''.join(list(struct.unpack(format, pd0_bytes[offset: offset + data['msg_size']]))))
+        temp_array = np.array(sentence.split(','))
+        temp_array[temp_array == '999.9'] = ''
+
+        # Assign parts of array to dictionary
+        try:
+            decoded_data['delta_time'] = data['delta_time']
+            decoded_data['header'] = temp_array[0]
+            decoded_data['utc'] = valid_number(temp_array[1])
+            lat_str = temp_array[2]
+            lat_deg = valid_number(lat_str[0:2])
+            decoded_data['lat_deg'] = lat_deg + valid_number(lat_str[2:]) / 60
+            decoded_data['lat_ref'] = temp_array[3]
+            lon_str = temp_array[4]
+            lon_num = valid_number(lon_str)
+            lon_deg = np.floor(lon_num / 100.)
+            decoded_data['lon_deg'] = lon_deg + (((lon_num / 100.) - lon_deg) * 100.) / 60.
+            decoded_data['lon_ref'] = temp_array[5]
+            decoded_data['corr_qual'] = valid_number(temp_array[6])
+            decoded_data['num_sats'] = valid_number(temp_array[7])
+            decoded_data['hdop'] = valid_number(temp_array[8])
+            decoded_data['alt'] = valid_number(temp_array[9])
+            decoded_data['alt_unit'] = temp_array[10]
+            decoded_data['geoid'] = temp_array[11]
+            decoded_data['geoid_unit'] = temp_array[12]
+            decoded_data['d_gps_age'] = valid_number(temp_array[13])
+            idx_star = temp_array[14].find('*')
+            decoded_data['ref_stat_id'] = valid_number(temp_array[15][:idx_star])
+
+        except (ValueError, EOFError, IndexError):
+            pass
+
+        return decoded_data
+
+    @staticmethod
+    def decode_vtg_205(pd0_bytes, offset, data):
+        """Decodes vtg data for ADCP's with integrated NMEA data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        # Initialize dictionary
+        decoded_data = {}
+        decoded_data['header'] = ''
+        decoded_data['course_true'] = np.nan
+        decoded_data['true_indicator'] = ''
+        decoded_data['course_mag'] = np.nan
+        decoded_data['mag_indicator'] = ''
+        decoded_data['speed_knots'] = np.nan
+        decoded_data['knots_indicator'] = ''
+        decoded_data['speed_kph'] = np.nan
+        decoded_data['kph_indicator'] = ''
+        decoded_data['mode_indicator'] = ''
+        decoded_data['delta_time'] = np.nan
+
+        # Decode NMEA sentence and split into an array
+        format = str(data['msg_size']) + 'c'
+        sentence = Pd0TRDI.bin2str(b''.join(list(struct.unpack(format, pd0_bytes[offset: offset + data['msg_size']]))))
+        temp_array = np.array(sentence.split(','))
+        temp_array[temp_array == '999.9'] = ''
+
+        # Assign parts of array to dictionary
+        try:
+            decoded_data['vtg_header'] = temp_array[0]
+            decoded_data['course_true'] = valid_number(temp_array[1])
+            decoded_data['true_indicator'] = temp_array[2]
+            decoded_data['course_mag'] = valid_number(temp_array[3])
+            decoded_data['mag_indicator'] = temp_array[4]
+            decoded_data['speed_knots'] = valid_number(temp_array[5])
+            decoded_data['knots_indicator'] = temp_array[6]
+            decoded_data['speed_kph'] = valid_number(temp_array[7])
+            decoded_data['kph_indicator'] = temp_array[8]
+            idx_star = temp_array[9].find('*')
+            decoded_data['mode_indicator'] = temp_array[9][:idx_star]
+            decoded_data['delta_time'] = data['delta_time']
+
+        except (ValueError, EOFError, IndexError):
+            pass
+
+        return decoded_data
+
+    @staticmethod
+    def decode_ds_206(pd0_bytes, offset, data):
+        """Decodes depth sounder for ADCP's with integrated NMEA data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        # Initialize dictionary
+        decoded_data = {}
+        decoded_data['header'] = ''
+        decoded_data['depth_ft'] = np.nan
+        decoded_data['ft_indicator'] = ''
+        decoded_data['depth_m'] = np.nan
+        decoded_data['m_indicator'] = ''
+        decoded_data['depth_fath'] = np.nan
+        decoded_data['fath_indicator'] = ''
+        decoded_data['delta_time'] = np.nan
+
+        # Decode NMEA sentence and split into an array
+        format = str(data['msg_size']) + 'c'
+        sentence = Pd0TRDI.bin2str(b''.join(list(struct.unpack(format, pd0_bytes[offset: offset + data['msg_size']]))))
+        temp_array = np.array(sentence.split(','))
+        temp_array[temp_array == '999.9'] = ''
+
+        # Assign parts of array to dictionary
+        try:
+            decoded_data['dbt_header'] = temp_array[0]
+            decoded_data['depth_ft'] = valid_number(temp_array[1])
+            decoded_data['ft_indicator'] = temp_array[2]
+            decoded_data['depth_m'] = valid_number(temp_array[3])
+            decoded_data['m_indicator'] = temp_array[4]
+            decoded_data['depth_fath'] = valid_number(temp_array[5])
+            idx_star = temp_array[6].find('*')
+            decoded_data['fath_indicator'] = temp_array[6][:idx_star]
+            decoded_data['delta_time'] = data['delta_time']
+
+        except (ValueError, EOFError, IndexError):
+            pass
+
+        return decoded_data
+
+    @staticmethod
+    def decode_ext_heading_207(pd0_bytes, offset, data):
+        """Decodes external heading for ADCP's with integrated NMEA data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        # Initialize dictionary
+        decoded_data = {}
+        decoded_data['header'] = ''
+        decoded_data['heading_deg'] = np.nan
+        decoded_data['h_true_indicator'] = ''
+        decoded_data['delta_time'] = np.nan
+
+        # Decode NMEA sentence and split into an array
+        format = str(data['msg_size']) + 'c'
+        sentence = Pd0TRDI.bin2str(b''.join(list(struct.unpack(format, pd0_bytes[offset: offset + data['msg_size']]))))
+        temp_array = np.array(sentence.split(','))
+        temp_array[temp_array == '999.9'] = ''
+
+        # Assign parts of array to dictionary
+        try:
+            decoded_data['header'] = temp_array[0]
+            decoded_data['heading_deg'] = valid_number(temp_array[1])
+            idx_star = temp_array[2].find('*')
+            decoded_data['h_true_indicator'] = temp_array[2][:idx_star]
+            decoded_data['delta_time'] = data['delta_time']
+
+        except (ValueError, EOFError, IndexError):
+            pass
+
+        return decoded_data
+
+    @staticmethod
+    def decode_dbt_sentence(pd0_bytes, offset, data):
+        """Stores dbt sentence
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        return Pd0TRDI.decode_nmea_sentence(pd0_bytes, offset, data, 'dbt_sentence')
+
+    @staticmethod
+    def decode_gga_sentence(pd0_bytes, offset, data):
+        """Stores dbt sentence
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        return Pd0TRDI.decode_nmea_sentence(pd0_bytes, offset, data, 'gga_sentence')
+
+    @staticmethod
+    def decode_vtg_sentence(pd0_bytes, offset, data):
+        """Stores dbt sentence
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        return Pd0TRDI.decode_nmea_sentence(pd0_bytes, offset, data, 'vtg_sentence')
+
+    @staticmethod
+    def decode_gsa_sentence(pd0_bytes, offset, data):
+        """Stores dbt sentence
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        return Pd0TRDI.decode_nmea_sentence(pd0_bytes, offset, data, 'gsa_sentence')
+
+    @staticmethod
+    def decode_nmea_sentence(pd0_bytes, offset, data, target):
+        """Decodes nmea sentence
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+        target: str
+            Dictionary key for decoded data in data
+
+        Returns
+        -------
+        decoded_data:dict
+            Dictionary of decoded data
+        """
+
+        # Compute number of characters in the sentence
+        offset_idx = data['header']['address_offsets'].index(offset)
+
+        if offset_idx + 1 == data['header']['number_of_data_types']:
+            end_offset = data['header']['number_of_bytes']
+        else:
+            end_offset = data['header']['address_offsets'][offset_idx + 1]
+        number_of_characters = end_offset - data['header']['address_offsets'][offset_idx]
+
+        # Generate format string
+        format_str = str(number_of_characters - 4) + 'c'
+        format = (('sentence', format_str, 0))
+        offset = data['header']['address_offsets'][offset_idx]
+        # Decode data
+        sentence = struct.unpack(format_str, pd0_bytes[offset + 4: offset + number_of_characters ])
+        try:
+            end_of_sentence = sentence.index(b'\n') + 1
+            sentence = b''.join(sentence[0:end_of_sentence]).decode('utf-8')
+        except ValueError:
+            sentence = ''
+        # Create or add to list of target sentences
+        if target in data:
+            decoded_data = data[target]
+            decoded_data.append(sentence)
+        else:
+            decoded_data = sentence
+
+        return decoded_data
+
+    @staticmethod
+    def decode_surface_leader(pd0_bytes, offset, data):
+        """Decodes surface velocity leader
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        surface_leader_data:dict
+            Dictionary of decoded data
+        """
+        surface_leader_format = (('id', '<H', 0),
+                                 ('cell_count', 'B', 2),
+                                 ('cell_size', '<H', 3),
+                                 ('range_cell_1', '<H', 5))
+
+        surface_leader_data = Pd0TRDI.unpack_bytes(pd0_bytes, surface_leader_format, offset)
+        return surface_leader_data
+
+    @staticmethod
+    def decode_surface_velocity(pd0_bytes, offset, data):
+        """Decodes surface velocity data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        surface_velocity_data:dict
+            Dictionary of decoded data
+        """
+        surface_velocity_format = (('id', '<H', 0),)
+
+        surface_velocity_data = Pd0TRDI.unpack_bytes(pd0_bytes, surface_velocity_format, offset)
+        surface_velocity_data['velocity'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 2,
+                                                                             data['surface_leader']['cell_count'],
+                                                                             4, '<h')
+        return surface_velocity_data
+
+    @staticmethod
+    def decode_surface_correlation(pd0_bytes, offset, data):
+        """Decodes surface correlation data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        surface_velocity_data:dict
+            Dictionary of decoded data
+        """
+        surface_correlation_format = (('id', '<H', 0),)
+
+        surface_correlation_data = Pd0TRDI.unpack_bytes(pd0_bytes, surface_correlation_format, offset)
+        surface_correlation_data['correlation'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 2,
+                                                                                   data['surface_leader']['cell_count'],
+                                                                                   4, 'B')
+        return surface_correlation_data
+
+    @staticmethod
+    def decode_surface_intensity(pd0_bytes, offset, data):
+        """Decodes surface intensity data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        surface_rssi_data:dict
+            Dictionary of decoded data
+        """
+        surface_rssi_format = (('id', '<H', 0),)
+
+        surface_rssi_data = Pd0TRDI.unpack_bytes(pd0_bytes, surface_rssi_format, offset)
+        surface_rssi_data['rssi'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 2,
+                                                                     data['surface_leader']['cell_count'],
+                                                                     4, 'B')
+        return surface_rssi_data
+
+    @staticmethod
+    def decode_surface_percent_good(pd0_bytes, offset, data):
+        """Decodes surface percent good data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        surface_per_good_data:dict
+            Dictionary of decoded data
+        """
+        surface_per_good_format = (('id', '<H', 0),)
+
+        surface_per_good_data = Pd0TRDI.unpack_bytes(pd0_bytes, surface_per_good_format, offset)
+        surface_per_good_data['percent_good'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 2,
+                                                                                 data['surface_leader']['cell_count'],
+                                                                                 4, 'B')
+        return surface_per_good_data
+
+    @staticmethod
+    def decode_surface_status(pd0_bytes, offset, data):
+        """Decodes surface percent good data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing previously decoded data
+
+        Returns
+        -------
+        surface_statusdata:dict
+            Dictionary of decoded data
+        """
+        surface_status_format = (('id', '<H', 0),)
+
+        surface_status_data = Pd0TRDI.unpack_bytes(pd0_bytes, surface_status_format, offset)
+        surface_status_data['percent_good'] = Pd0TRDI.decode_per_cell_per_beam(pd0_bytes, offset + 2,
+                                                                               data['surface_leader']['cell_count'],
+                                                                               4, 'B')
+        return surface_status_data
+
+    @staticmethod
+    def decode_auto_config(pd0_bytes, offset, data):
+        """Decodes auto configuration data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary of previously decoded data
+
+        Returns
+        -------
+        auto_config_data:dict
+            Dictionary of decoded data
+        """
+        auto_config_leader_format = (('id', '<H', 0), ('beam_count', 'B', 2))
+        auto_config_beam_format = (('setup', 'B', 0),
+                                   ('depth', '<H', 1),
+                                   ('ping_count', 'B', 3),
+                                   ('ping_type', 'B', 4),
+                                   ('cell_count', '<H', 5),
+                                   ('cell_size', '<H', 7),
+                                   ('bin_1_mid', '<H', 9),
+                                   ('code_reps', 'B', 11),
+                                   ('transmit_length', '<H', 12),
+                                   ('lag_length', '<H', 15),
+                                   ('transmit_bandwidth', 'B', 16),
+                                   ('receive_bandwidth', 'B', 17),
+                                   ('min_ping_interval', '<H', 18))
+        auto_config_data = {}
+        auto_config_data['leader'] = Pd0TRDI.unpack_bytes(pd0_bytes, auto_config_leader_format, offset)
+
+        for n in range(1, auto_config_data['leader']['beam_count'] + 1):
+            label = 'beam_' + str(n)
+            beam_offset = offset + 3 + (20 * (n - 1))
+            auto_config_data[label] = Pd0TRDI.unpack_bytes(pd0_bytes, auto_config_beam_format, beam_offset)
+
+        return auto_config_data
+
+    @staticmethod
+    def decode_vertical_beam(pd0_bytes, offset, data):
+        """Decodes vertical beam data
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing fixed leader data
+
+        Returns
+        -------
+        vertical_beam_data:dict
+            Dictionary of decoded data
+        """
+        vertical_beam_format = (('id', '<H', 0),
+                                ('eval_amp', 'B', 2),
+                                ('rssi', 'B', 3),
+                                ('range', 'L', 4),
+                                ('status', 'B', 8))
+
+        vertical_beam_data = Pd0TRDI.unpack_bytes(pd0_bytes, vertical_beam_format, offset)
+        return vertical_beam_data
+
+    @staticmethod
+    def decode_transformation_matrix(pd0_bytes, offset, data):
+        """Decodes transformation matrix
+
+        Parameters
+        ----------
+        pd0_bytes: bytearray
+            Bytearray of all pd0 data
+        offset: int
+            Pointer into pd0_bytes
+        data: dict
+            Dictionary containing fixed leader data
+
+        Returns
+        -------
+        matrix_data:dict
+            Dictionary of decoded data
+        """
+        matrix_id_format = (('id', '<H', 0),)
+        matrix_data_format = (('element', '<h', 0),)
+
+        matrix_data = Pd0TRDI.unpack_bytes(pd0_bytes, matrix_id_format, offset)
+        matrix = []
+        for row in range(4):
+            row_list = []
+            for col in range(4):
+                offset = offset + 2
+                # row.append(struct.unpack('<H', pd0_bytes[offset: offset + 2])[0])
+                row_list.append(Pd0TRDI.unpack_bytes(pd0_bytes, matrix_data_format, offset)['element'])
+            matrix.append(row_list)
+        matrix_data['matrix'] = matrix
+
+        return matrix_data
+
+
+class Hdr(object):
+    """Class to hold header variables.
+
+    Attributes
+    ----------
+    bytes_per_ens: int
+        Number of bytes in ensemble
+    data_offsets: int
+        File offset to start of ensemble
+    n_data_types: int
+        Number of data types in ensemble
+    data_ok: int
+
+    invalid: str
+        Leader ID that was not recognized
+    """
+
+    def __init__(self, n_ensembles, n_types):
+        """Initialize instance variables to empty arrays.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        n_types: int
+            Number of data types
+        """
+        self.bytes_per_ens = nans(n_ensembles)
+        self.data_offsets = nans([n_ensembles, n_types])
+        self.n_data_types = nans(n_ensembles)
+        self.data_ok = nans(n_ensembles)
+        self.invalid = [''] * n_ensembles
+
+    def populate_data(self, n_ens, data):
+        """Populates the class with data for an ensemble.
+
+        Parameters
+        ----------
+        i_ens: int
+            Ensemble index
+        data: dict
+            Dictionary of all data for this ensemble
+        """
+
+        if 'header' in data:
+            self.bytes_per_ens[n_ens] = data['header']['number_of_bytes']
+            self.data_offsets[n_ens, :len(data['header']['address_offsets'])] = \
+                np.array(data['header']['address_offsets'])
+            self.n_data_types[n_ens] = data['header']['number_of_data_types']
+            self.invalid[n_ens] = data['header']['invalid']
+
+
+class Inst(object):
+    """Class to hold information about the instrument.
+
+    Attributes
+    ----------
+    beam_ang: np.array(int)
+        Angle of transducers in degrees
+    beams: np.array(int)
+        Number of beams used for velocity
+    data_type: list
+        Data type
+    firm_ver: np.array(str)
+        Firmware version
+    freq: np.array(int)
+        Frequency of ADCP in kHz
+    pat = list
+        Beam pattern
+    res_RDI:
+        Reserved for TRDI
+    sensor_CFG: np.array(int)
+        Sensor configuration
+    xducer: list
+        Indicates if transducer is attached
+    t_matrix: np.array(float)
+        Transformation matrix
+    demod: np.array(int)
+        Demodulation code
+    serial_number: int
+        serial number of ADCP
+    """
+
+    def __init__(self, n_ensembles):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        """
+
+        #TODO change n_ensembles to (ensembles,)
+        self.beam_ang = nans(n_ensembles)
+        self.beams = nans(n_ensembles)
+        self.data_type = [''] * n_ensembles
+        self.firm_ver = nans(n_ensembles)
+        self.freq = nans(n_ensembles)
+        self.pat = [''] * n_ensembles
+        self.res_RDI = 0
+        self.sensor_CFG = nans(n_ensembles)
+        self.xducer = [''] * n_ensembles
+        self.t_matrix = np.tile([np.nan], [4, 4])
+        self.demod = nans(n_ensembles)
+        self.serial_number = np.nan
+
+    def populate_data(self, i_ens, data):
+        """Populates the class with data for an ensemble.
+
+        Parameters
+        ----------
+        i_ens: int
+            Ensemble index
+        data: dict
+            Dictionary of all data for this ensemble
+        """
+
+
+        if 'fixed_leader' in data:
+            self.firm_ver[i_ens] = data['fixed_leader']['cpu_firmware_version'] + \
+                            (data['fixed_leader']['cpu_firmware_revision']  / 100)
+
+            # Convert system_configuration_ls to individual bits
+            bitls = "{0:08b}".format(data['fixed_leader']['system_configuration_ls'])
+            val = int(bitls[5:], 2)
+            if val == 0:
+                self.freq[i_ens] = 75
+            elif val == 1:
+                self.freq[i_ens] = 150
+            elif val == 2:
+                self.freq[i_ens] = 300
+            elif val == 3:
+                self.freq[i_ens] = 600
+            elif val == 4:
+                self.freq[i_ens] = 1200
+            elif val == 5:
+                self.freq[i_ens] = 2400
+            else:
+                self.freq[i_ens] = np.nan
+
+            val = int(bitls[4], 2)
+            if val == 0:
+                self.pat[i_ens] = 'Concave'
+            elif val == 1:
+                self.pat[i_ens] = 'Convex'
+            else:
+                self.pat[i_ens] = 'n/a'
+
+            self.sensor_CFG[i_ens] = int(bitls[2:3], 2) + 1
+
+            val = int(bitls[1], 2)
+            if val == 0:
+                self.xducer[i_ens] = 'Not Attached'
+            elif val == 1:
+                self.xducer[i_ens] = 'Attached'
+            else:
+                self.xducer[i_ens] = 'n/a'
+
+            # Convert system_configuration_ms to individual bits
+            bitms = "{0:08b}".format(data['fixed_leader']['system_configuration_ms'])
+
+            val = int(bitms[6:], 2)
+            if val == 0:
+                self.beam_ang[i_ens] = 15
+            elif val == 1:
+                self.beam_ang[i_ens] = 20
+            elif val == 2:
+                self.beam_ang[i_ens] = 30
+            elif val == 3:
+                self.beam_ang[i_ens] = np.nan
+            else:
+                self.beam_ang[i_ens] = np.nan
+
+            val = int(bitms[:4], 2)
+            if val == 4:
+                self.beams[i_ens] = 4
+            elif val == 5:
+                self.beams[i_ens] = 5
+                self.demod[i_ens] = 1
+            elif val == 15:
+                self.beams[i_ens] = 5
+                self.demod[i_ens] = 2
+            else:
+                self.beams[i_ens] = np.nan
+                self.demod[i_ens] = np.nan
+
+            if data['fixed_leader']['simulation_data_flag'] == 0:
+                self.data_type[i_ens] = 'Real'
+            else:
+                self.data_type[i_ens] = 'Simu'
+
+            self.serial_number = data['fixed_leader']['serial_number']
+
+        if 'transformation_matrix' in data:
+            self.res_RDI = 0
+            # Scale transformation matrix
+            self.t_matrix = np.array(data['transformation_matrix']['matrix']) / 10000
+
+
+class AutoMode(object):
+    """Class to hold auto configuration mode settings for each beam.
+
+    Attributes
+    ----------
+    beam_count: np.array(int)
+        Number of beams
+    Beam1: Beam
+        Object of class Beam
+    Beam2: Beam
+        Object of class Beam
+    Beam3: Beam
+        Object of class Beam
+    Beam4: Beam
+        Object of class Beam
+    Reserved: np.array
+    """
+
+    def __init__(self, n_ensembles):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        """
+        self.beam_count = nans(n_ensembles)
+        self.Beam1 = Beam(n_ensembles)
+        self.Beam2 = Beam(n_ensembles)
+        self.Beam3 = Beam(n_ensembles)
+        self.Beam4 = Beam(n_ensembles)
+        self.Reserved = nans(n_ensembles)
+
+    def populate_data(self, i_ens, data):
+        """Populates the class with data for an ensemble.
+
+        Parameters
+        ----------
+        i_ens: int
+            Ensemble index
+        data: dict
+            Dictionary of all data for this ensemble
+        """
+
+        if 'auto_configuration' in data:
+            self.beam_count[i_ens] = data['auto_configuration']['leader']['beam_count']
+            self.Beam1.populate_data(i_ens, data['auto_configuration']['beam_1'])
+            self.Beam2.populate_data(i_ens, data['auto_configuration']['beam_2'])
+            self.Beam3.populate_data(i_ens, data['auto_configuration']['beam_3'])
+            self.Beam4.populate_data(i_ens, data['auto_configuration']['beam_4'])
+
+
+class Beam(object):
+    """Class to hold auto configuration settings for a beam.
+
+    Attributes
+    ----------
+    mode: np.array(int)
+        Water mode
+    depth_cm: np.array(int)
+        Depth in cm
+    ping_count: np.array(int)
+        Number of pings
+    ping_type: np.array(int)
+        Type of pings
+    cell_count: np.array(int)
+        Number of cells
+    cell_size_cm: np.array(int)
+        Cell size in cm
+    cell_mid_cm: np.array(int)
+        Distance to center of cell 1 in cm
+    code_repeat: np.array(int)
+        Number of code repeats
+    trans_length_cm: np.array(int)
+        Transmit length in cm
+    lag_length_cm: np.array(int)
+        Lag length in cm
+    transmit_bw: np.array(int)
+        Transmit bandwidth
+    receive_bw: np.array(int)
+        Receive bandwidth
+    ping_interval_ms: np.array(int)
+        Time between pings in ms
+    """
+
+    def __init__(self, n_ensembles):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        """
+
+        self.mode = nans(n_ensembles)
+        self.depth_cm = nans(n_ensembles)
+        self.ping_count = nans(n_ensembles)
+        self.ping_type = nans(n_ensembles)
+        self.cell_count = nans(n_ensembles)
+        self.cell_size_cm = nans(n_ensembles)
+        self.cell_mid_cm = nans(n_ensembles)
+        self.code_repeat = nans(n_ensembles)
+        self.trans_length_cm = nans(n_ensembles)
+        self.lag_length_cm = nans(n_ensembles)
+        self.transmit_bw = nans(n_ensembles)
+        self.receive_bw = nans(n_ensembles)
+        self.ping_interval_ms = nans(n_ensembles)
+
+    def populate_data(self, i_ens, beam_data):
+        """Populates the class with data for an ensemble.
+
+        Parameters
+        ----------
+        i_ens: int
+            Ensemble index
+        data: dict
+            Dictionary of all data for this ensemble
+        """
+
+        self.mode = beam_data['setup']
+        self.depth_cm = beam_data['depth']
+        self.ping_count = beam_data['ping_count']
+        self.ping_type = beam_data['ping_type']
+        self.cell_count = beam_data['cell_count']
+        self.cell_size_cm = beam_data['cell_size']
+        self.cell_mid_cm = beam_data['bin_1_mid']
+        self.code_repeat = beam_data['code_reps']
+        self.trans_length_cm = beam_data['transmit_length']
+        self.lag_length_cm = beam_data['lag_length']
+        self.transmit_bw = beam_data['transmit_bandwidth']
+        self.receive_bw = beam_data['receive_bandwidth']
+        self.ping_interval_ms = beam_data['min_ping_interval']
+
+
+class Bt(object):
+    """Class to hold bottom track data.
+
+    Attributes
+    ----------
+    corr: np.array(int)
+        Correlation for each beam
+    depth_m: np.array(float)
+        Depth for each beam
+    eval_amp: np.array(int)
+        Return amplitude for each beam
+    ext_depth_cm: np.array(int)
+        External depth in cm
+    pergd: np.array(int)
+        Percent good
+    rssi: np.array(int)
+        Return signal strength indicator in counts for each beam
+    vel_mps: np.array(float)
+        Velocity in m/s, rows depend on coordinate system
+    """
+
+    def __init__(self, n_ensembles, n_velocities):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        n_velocities: int
+            Number of velocity beams
+        """
+
+        self.corr = nans([n_velocities, n_ensembles])
+        self.depth_m = nans([n_velocities, n_ensembles])
+        self.eval_amp = nans([n_velocities, n_ensembles])
+        self.ext_depth_cm = nans(n_ensembles)
+        self.pergd = nans([n_velocities, n_ensembles])
+        self.rssi = nans([n_velocities, n_ensembles])
+        self.vel_mps = nans([n_velocities, n_ensembles])
+
+    def populate_data(self, i_ens, data):
+        """Populates the class with data for an ensemble.
+
+        Parameters
+        ----------
+        i_ens: int
+            Ensemble index
+        data: dict
+            Dictionary of all data for this ensemble
+        """
+
+        if 'bottom_track' in data:
+            # Combine bytes to compute depth
+            self.depth_m[0:4, i_ens] = np.squeeze(np.array(data['bottom_track']['range_lsb']).T) + \
+                                       np.squeeze(np.array(data['bottom_track']['range_msb']).T) * 2e16 / 100
+            self.vel_mps[0:4, i_ens] = np.squeeze(np.array(data['bottom_track']['velocity']).T)
+            self.corr[0:4, i_ens] = np.squeeze(np.array(data['bottom_track']['correlation']).T)
+            self.eval_amp[0:4, i_ens] = np.squeeze(np.array(data['bottom_track']['amplitude']).T)
+            self.pergd[0:4, i_ens] = np.squeeze(np.array(data['bottom_track']['percent_good']).T)
+            self.rssi[0:4, i_ens] = np.squeeze(np.array(data['bottom_track']['rssi']).T)
+
+
+class Cfg(object):
+    """Class to hold configuration settings.
+
+    Attributes
+    ----------
+    ba: np.array(int)
+        Bottom track amplitude threshold
+    bc: np.array(int)
+        Bottom track correlation threshold
+    be_mmps: np.array(int)
+        Bottom track error velocity threshold
+    bg: np.array(int)
+        Bottom track percent good threshold
+    bm: np.array(int)
+        Bottom mode
+    bp: np.array(int)
+        Number of bottom pings
+    bx_dm: np.array(int)
+        Maximum tracking depth in decimeters
+    code_reps: np.array(int)
+        Number of code repetitions
+    coord_sys: np.array(str)
+        Coordinate system
+    cpu_ser_no: np.array(int)
+        CPU serial number
+    cq: np.array(int)
+        Transmit power
+    cx: np.array(int)
+        Low latency trigger
+    dist_bin1_cm: np.array(int)
+        Distance to center of bin 1 from transducer
+    ea_deg: np.array(int)
+        Heading alignment
+    eb_deg: np.array(int)
+        Heading bias
+    sensor_avail: np.array(str)
+        Sensor availability codes
+    ex: np.array(str)
+        Coordinate transformation codes
+    ez: np.array(str)
+        Sensor codes
+    head_src: np.array(str)
+        Heading source
+    lag_cm: np.array(int)
+        Lag
+    map_bins: np.array(str)
+        Bin mapping
+    n_beams: np.array(int)
+        Number of velocity beams
+    pitch_src: np.array(str)
+        Source of pitch data
+    ref_lay_end_cell: np.array(int)
+        Reference layer end
+    ref_lay_str_cell: np.array(int)
+        Reference layer start
+    roll_src: np.array(str)
+        Roll source
+    sal_src: np.array(str)
+        Salinity source
+    wm: np.array(int)
+        Water mode
+    sos_src: np.array(str)
+        Speed of sound source
+    temp_src: np.array(str)
+        Temperature source
+    tp_sec: np.array(int)
+        Time between pings
+    use_3beam: np.array(str)
+        Setting on whether to use 3-beam solutions or not
+    use_pr =: np.array(str)
+        Setting to use pitch and roll or not
+    wa: np.array(int)
+        Water track amplitude threshold
+    wb: np.array(int)
+        Water track bandwidth control
+    wc: np.array(int)
+        Water track correlation threshold
+    we_mmps: np.array(int)
+        Water track error velocity threshold
+    wf_cm: np.array(int)
+        Blank after transmit
+    wg_per: np.array(int)
+        Water track percent good threshold
+    wj: np.array(int)
+        Receiver gain setting
+    wn: np.array(int)
+        Number of depth cells (bins)
+    wp: np.array(int)
+        Number of water pings
+    ws_cm: np.array(int)
+        Bin size
+    xdcr_dep_srs: np.array(str)
+        Salinity source
+    xmit_pulse_cm: np.array(int)
+        Transmit pulse length
+    lag_near_bottom: np.array(int)
+        Lag near bottom setting
+    """
+
+    def __init__(self, n_ensembles):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        """
+
+        self.ba = nans(n_ensembles)
+        self.bc = nans(n_ensembles)
+        self.be_mmps = nans(n_ensembles)
+        self.bg = nans(n_ensembles)
+        self.bm = nans(n_ensembles)
+        self.bp = nans(n_ensembles)
+        self.bx_dm = nans(n_ensembles)
+        self.code_reps = nans(n_ensembles)
+        self.coord_sys = [''] * n_ensembles
+        self.cpu_ser_no = nans([n_ensembles, 8])
+        self.cq = nans(n_ensembles)
+        self.cx = nans(n_ensembles)
+        self.dist_bin1_cm = nans(n_ensembles)
+        self.ea_deg = nans(n_ensembles)
+        self.eb_deg = nans(n_ensembles)
+        self.sensor_avail = [''] * n_ensembles
+        self.ex = [''] * n_ensembles
+        self.ez = [''] * n_ensembles
+        self.head_src = [''] * n_ensembles
+        self.lag_cm = nans(n_ensembles)
+        self.map_bins = [''] * n_ensembles
+        self.n_beams = nans(n_ensembles)
+        self.pitch_src = [''] * n_ensembles
+        self.ref_lay_end_cell = nans(n_ensembles)
+        self.ref_lay_str_cell = nans(n_ensembles)
+        self.roll_src = [''] * n_ensembles
+        self.sal_src = [''] * n_ensembles
+        self.wm = nans(n_ensembles)
+        self.sos_src = [''] * n_ensembles
+        self.temp_src = [''] * n_ensembles
+        self.tp_sec = nans(n_ensembles)
+        self.use_3beam = [''] * n_ensembles
+        self.use_pr = [''] * n_ensembles
+        self.wa = nans(n_ensembles)
+        self.wb = nans(n_ensembles)
+        self.wc = nans(n_ensembles)
+        self.we_mmps = nans(n_ensembles)
+        self.wf_cm = nans(n_ensembles)
+        self.wg_per = nans(n_ensembles)
+        self.wj = nans(n_ensembles)
+        self.wn = nans(n_ensembles)
+        self.wp = nans(n_ensembles)
+        self.ws_cm = nans(n_ensembles)
+        self.xdcr_dep_srs = [''] * n_ensembles
+        self.xmit_pulse_cm = nans(n_ensembles)
+        self.lag_near_bottom = nans(n_ensembles)
+
+    def populate_data (self, i_ens, data):
+        """Populates the class with data for an ensemble.
+
+        Parameters
+        ----------
+        i_ens: int
+            Ensemble index
+        data: dict
+            Dictionary of all data for this ensemble
+        """
+
+        if 'fixed_leader' in data:
+            self.n_beams[i_ens] = data['fixed_leader']['number_of_beams']
+            self.wn[i_ens] = data['fixed_leader']['number_of_cells']
+            self.wp[i_ens] = data['fixed_leader']['number_of_water_pings']
+            self.ws_cm[i_ens] = data['fixed_leader']['depth_cell_size']
+            self.wf_cm[i_ens] = data['fixed_leader']['blank_after_transmit']
+            self.wm[i_ens] = data['fixed_leader']['water_mode']
+            self.wc[i_ens] = data['fixed_leader']['low_correlation_threshold']
+            self.code_reps[i_ens] = data['fixed_leader']['number_of_code_repetitions']
+            self.wg_per[i_ens] = data['fixed_leader']['minimum_percentage_water_profile_pings']
+            self.we_mmps[i_ens] = data['fixed_leader']['error_velocity_threshold']
+            self.tp_sec[i_ens] = data['fixed_leader']['minutes'] * 60. + \
+                                     data['fixed_leader']['seconds'] + \
+                                     data['fixed_leader']['hundredths'] * 0.01
+
+            # Convert coordinate_transformation_process to individual bits
+            self.ex[i_ens] = "{0:08b}".format(data['fixed_leader']['coordinate_transformation_process'])
+
+            val = int(self.ex[i_ens][3:5], 2)
+            if val == 0:
+                self.coord_sys[i_ens] = 'Beam'
+            elif val == 1:
+                self.coord_sys[i_ens] = 'Inst'
+            elif val == 2:
+                self.coord_sys[i_ens] = 'Ship'
+            elif val == 3:
+                self.coord_sys[i_ens] = 'Earth'
+            else:
+                self.coord_sys[i_ens] = "N/a"
+
+            val = int(self.ex[i_ens][5], 2)
+            if val == 0:
+                self.use_pr = 'No'
+            elif val == 1:
+                self.use_pr = 'Yes'
+            else:
+                self.use_pr = 'N/a'
+
+            val = int(self.ex[i_ens][6], 2)
+            if val == 0:
+                self.use_3beam = 'No'
+            elif val == 1:
+                self.use_3beam = 'Yes'
+            else:
+                self.use_3beam = 'N/a'
+
+            val = int(self.ex[i_ens][7], 2)
+            if val == 0:
+                self.map_bins = 'No'
+            elif val == 1:
+                self.map_bins = 'Yes'
+            else:
+                self.map_bins = 'N/a'
+
+            self.ea_deg[i_ens] = data['fixed_leader']['heading_alignment'] * 0.01
+            self.eb_deg[i_ens] = data['fixed_leader']['heading_bias'] * 0.01
+
+            # Convert sensour_source to individual bits
+            self.ez[i_ens] = "{0:08b}".format(data['fixed_leader']['sensor_source'])
+
+            val = int(self.ez[i_ens][:2], 2)
+            if val == 0:
+                self.sos_src[i_ens] = 'Manual EC'
+            elif val == 1:
+                self.sos_src[i_ens] = 'Calculated'
+            elif val == 3:
+                self.sos_src[i_ens] = 'SVSS Sensor'
+            else:
+                self.sos_src[i_ens] = 'N/a'
+
+            val = int(self.ez[i_ens][2], 2)
+            if val == 0:
+                self.xdcr_dep_srs[i_ens] = 'Manual ED'
+            if val == 1:
+                self.xdcr_dep_srs[i_ens] = 'Sensor'
+            else:
+                self.xdcr_dep_srs[i_ens] = 'N/a'
+
+            val = int(self.ez[i_ens][3], 2)
+            if val == 0:
+                self.head_src[i_ens] = 'Manual EH'
+            if val == 1:
+                self.head_src[i_ens] = 'Int. Sensor'
+            else:
+                self.head_src[i_ens] = 'N/a'
+
+            val = int(self.ez[i_ens][4], 2)
+            if val == 0:
+                self.pitch_src[i_ens] = 'Manual EP'
+            if val == 1:
+                self.pitch_src[i_ens] = 'Int. Sensor'
+            else:
+                self.pitch_src[i_ens] = 'N/a'
+
+            val = int(self.ez[i_ens][5], 2)
+            if val == 0:
+                self.roll_src[i_ens] = 'Manual ER'
+            if val == 1:
+                self.roll_src[i_ens] = 'Int. Sensor'
+            else:
+                self.roll_src[i_ens] = 'N/a'
+
+            val = int(self.ez[i_ens][6], 2)
+            if val == 0:
+                self.xdcr_dep_srs[i_ens] = 'Manual ES'
+            if val == 1:
+                self.xdcr_dep_srs[i_ens] = 'Int. Sensor'
+            else:
+                self.xdcr_dep_srs[i_ens] = 'N/a'
+
+            val = int(self.ez[i_ens][7], 2)
+            if val == 0:
+                self.temp_src[i_ens] = 'Manual ET'
+            if val == 1:
+                self.temp_src[i_ens] = 'Int. Sensor'
+            else:
+                self.temp_src[i_ens] = 'N/a'
+
+            self.sensor_avail[i_ens] = "{0:08b}".format(data['fixed_leader']['sensor_available'])
+            self.dist_bin1_cm[i_ens] = data['fixed_leader']['bin_1_distance']
+            self.xmit_pulse_cm[i_ens] = data['fixed_leader']['transmit_pulse_length']
+            self.ref_lay_str_cell[i_ens] = data['fixed_leader']['starting_depth_cell']
+            self.ref_lay_end_cell[i_ens] = data['fixed_leader']['ending_depth_cell']
+            self.wa[i_ens] = data['fixed_leader']['false_target_threshold']
+            self.cx[i_ens] = data['fixed_leader']['low_latency_trigger']
+            self.lag_cm[i_ens] = data['fixed_leader']['transmit_lag_distance']
+            self.cpu_ser_no[i_ens] = data['fixed_leader']['cpu_board_serial_number']
+            self.wb[i_ens] = data['fixed_leader']['system_bandwidth']
+            self.cq[i_ens] = data['fixed_leader']['system_power']
+
+        if 'variable_leader' in data:
+            self.lag_near_bottom[i_ens] = data['variable_leader']['lag_near_bottom']
+
+        if 'bottom_track' in data:
+            self.bp[i_ens] = data['bottom_track']['pings_per_ensemble_bp']
+            self.bc[i_ens] = data['bottom_track']['correlation_magnitude_minimum_bc']
+            self.ba[i_ens] = data['bottom_track']['evaluation_amplitude_minimum_ba']
+            self.bg[i_ens] = data['bottom_track']['percent_good_minimum_bg']
+            self.bm[i_ens] = data['bottom_track']['bottom_track_mode_bm']
+            self.be_mmps[i_ens] = data['bottom_track']['error_velocity_maximum_be']
+
+
+class Gps(object):
+    """Class to hold GPS data from WinRiver. CLASS NOT USED
+
+    Attributes
+    ----------
+    alt_m: np.array(float)
+        Altitude in meters
+    gga_diff: np.array(int)
+        Differential correction indicator
+    gga_hdop: np.array(float)
+        Horizontal dilution of precision
+    gga_n_stats: np.array(int)
+        Number of satellites
+    gga_vel_e_mps: np.array(float)
+        Velocity in east direction from GGA data
+    gga_vel_n_mps: np.array(float)
+        Velocity in north directio from GGA data
+    gsa_p_dop: np.array(int)
+        Position dilution of precision
+    gsa_sat: np.array(int)
+        Satellites
+    gsa_v_dop: np.array(float)
+        Vertical dilution of precision
+    lat_deg: np.array(float)
+        Latitude in degrees
+    long_deg: np.array(float)
+        Longitude in degrees
+    vtg_vel_e_mps: np.array(float)
+        Velocity in east direction from VTG data
+    vtg_vel_n_mps: np.array(float)
+        Velocity in north direction from VTG data
+    """
+
+    def __init__(self, n_ensembles):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        """
+
+        self.alt_m = nans(n_ensembles)
+        self.gga_diff = nans(n_ensembles)
+        self.gga_hdop = nans(n_ensembles)
+        self.gga_n_stats = nans(n_ensembles)
+        self.gga_vel_e_mps = nans(n_ensembles)
+        self.gga_vel_n_mps = nans(n_ensembles)
+        self.gsa_p_dop = nans(n_ensembles)
+        self.gsa_sat = nans([n_ensembles, 6])
+        self.gsa_v_dop = nans(n_ensembles)
+        self.lat_deg = nans(n_ensembles)
+        self.long_deg = nans(n_ensembles)
+        self.vtg_vel_e_mps = nans(n_ensembles)
+        self.vtg_vel_n_mps = nans(n_ensembles)
+
+
+class Gps2(object):
+    """Class to hold GPS data for WinRiver II.
+
+    Attributes
+    ----------
+    gga_delta_time: np.array(float)
+        Time between ping and gga data
+    gga_header: list
+        GGA header
+    gga_sentence: list
+        GGA sentence
+    utc: np.array(float)
+        UTC time
+    lat_deg: np.array(float)
+        Latitude in degrees
+    lat_ref: list
+        Latitude reference
+    lon_deg: np.array(float)
+        Longitude in degrees
+    lon_ref: list
+        Longitude reference
+    corr_qual: np.array(float)
+        Differential quality indicator
+    num_sats: np.array(int)
+        Number of satellites
+    hdop: np.array(float)
+        Horizontal dilution of precision
+    alt: np.array(float)
+        Altitude
+    alt_unit: list
+        Units for altitude
+    geoid: np.array(float)
+        Geoid height
+    geoid_unit: list
+        Units for geoid height
+    d_gps_age: np.array(float)
+        Age of differential correction
+    ref_stat_id: np.array(float)
+        Reference station ID
+    vtg_delta_time: np.array(float)
+        Time between ping and VTG data
+    vtg_header: list
+        VTG header
+    vtg_sentence: list
+        VTG sentence
+    course_true: np.array(float)
+        Course relative to true north
+    true_indicator: list
+        True north indicator
+    course_mag: np.array(float)
+        Course relative to magnetic north
+    mag_indicator: list
+        Magnetic north indicator
+    speed_knots: np.array(float)
+        Speed in knots
+    knots_indicator: list
+        Knots indicator
+    speed_kph: np.array(float)
+        Speed in kilometers per hour
+    kph_indicator: list
+        Kilometers per hour indicator
+    mode_indicator: list
+        Mode indicator
+    dbt_delta_time: np.array(float)
+        Time between ping and echo sounder data
+    dbt_header: list
+        Echo sounder header
+    depth_ft: np.array(float)
+        Depth in ft from echo sounder
+    ft_indicator: list
+        Feet indicator
+    depth_m: np.array(float)
+        Depth in meters from echo sounder
+    m_indicator: list
+        Meters indicator
+    depth_fath: np.array(float)
+        Depth in fathoms from echo sounder
+    fath_indicator: list
+        Fathoms indicator
+    hdt_delta_time: np.array(float)
+        Time between ping and external heading data
+    hdt_header: list
+        External heading header
+    heading_deg: np.array(float)
+        Heading in degrees from external heading
+    h_true_indicator: list
+        Heading indicator to true north
+    gga_velE_mps: np.array(float)
+        Velocity in east direction in m/s from GGA for WR
+    gga_velN_mps: np.array(float)
+        Velocity in north direction in m/s from GGA for WR
+    vtg_velE_mps: np.array(float)
+        Velocity in east direction in m/s from VTG for WR
+    vtg_velN_mps: np.array(float)
+        Velocity in north direction in m/s from VTG for WR
+    """
+
+    def __init__(self, n_ensembles, wr2):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        wr2: bool
+            Setting of whether data is from WR or WR2
+        """
+
+        self.gga_delta_time = np.full([n_ensembles, 20], np.nan)
+        self.gga_header = np.full([n_ensembles, 20], '      ')
+        self.gga_sentence = np.full([n_ensembles, 20], '')
+        self.utc = np.full([n_ensembles, 20], np.nan)
+        self.lat_deg = np.zeros([n_ensembles, 20])
+        self.lat_ref = np.full([n_ensembles, 20], '')
+        self.lon_deg = np.zeros([n_ensembles, 20])
+        self.lon_ref = np.full([n_ensembles, 20], '')
+        self.corr_qual = np.full([n_ensembles, 20], np.nan)
+        self.num_sats = np.full([n_ensembles, 20], np.nan)
+        self.hdop = np.full([n_ensembles, 20], np.nan)
+        self.alt = np.full([n_ensembles, 20], np.nan)
+        self.alt_unit = np.full([n_ensembles, 20], '')
+        self.geoid = np.full([n_ensembles, 20], np.nan)
+        self.geoid_unit = np.full([n_ensembles, 20], '')
+        self.d_gps_age = np.full([n_ensembles, 20], np.nan)
+        self.ref_stat_id = np.full([n_ensembles, 20], np.nan)
+        self.vtg_delta_time = np.full([n_ensembles, 20], np.nan)
+        self.vtg_header = np.full([n_ensembles, 20], '      ')
+        self.vtg_sentence = np.full([n_ensembles, 20], '')
+        self.course_true = np.full([n_ensembles, 20], np.nan)
+        self.true_indicator = np.full([n_ensembles, 20], '')
+        self.course_mag = np.full([n_ensembles, 20], np.nan)
+        self.mag_indicator = np.full([n_ensembles, 20], '')
+        self.speed_knots = np.full([n_ensembles, 20], np.nan)
+        self.knots_indicator = np.full([n_ensembles, 20], '')
+        self.speed_kph = np.zeros([n_ensembles, 20])
+        self.kph_indicator = np.full([n_ensembles, 20], '')
+        self.mode_indicator = np.full([n_ensembles, 20], '')
+        self.dbt_delta_time = np.full([n_ensembles, 20], np.nan)
+        self.dbt_header = np.full([n_ensembles, 20], '      ')
+        self.depth_ft = np.full([n_ensembles, 20], np.nan)
+        self.ft_indicator = np.full([n_ensembles, 20], '')
+        self.depth_m = np.zeros([n_ensembles, 20])
+        self.m_indicator = np.full([n_ensembles, 20], '')
+        self.depth_fath = np.full([n_ensembles, 20], np.nan)
+        self.fath_indicator = np.full([n_ensembles, 20], '')
+        self.hdt_delta_time = np.full([n_ensembles, 20], np.nan)
+        self.hdt_header = np.full([n_ensembles, 20], '      ')
+        self.heading_deg = np.full([n_ensembles, 20], np.nan)
+        self.h_true_indicator = np.full([n_ensembles, 20], '')
+
+        # if wr2:
+        self.gga_velE_mps = nans(n_ensembles)
+        self.gga_velN_mps = nans(n_ensembles)
+        self.vtg_velE_mps = nans(n_ensembles)
+        self.vtg_velN_mps = nans(n_ensembles)
+
+    def populate_data(self, i_ens, data):
+        """Populates the class with data for an ensemble.
+
+        Parameters
+        ----------
+        i_ens: int
+            Ensemble index
+        data: dict
+            Dictionary of all data for this ensemble
+        """
+
+        if 'gga' in data:
+
+            # Check size and expand if needed
+            if len(data['gga']) > self.gga_delta_time.shape[1]:
+                self.gga_expand(len(data['gga']))
+
+            for n, gga_data in enumerate(data['gga']):
+                # Try implemented because of occasional garbage in data stream.
+                # This prevents a crash and data after garbage are not used, but any data before garbage is saved
+                try:
+                    self.gga_delta_time[i_ens, n] = gga_data['delta_time']
+                    self.gga_header[i_ens, n] = gga_data['header']
+                    self.utc[i_ens, n] = gga_data['utc']
+                    self.lat_deg[i_ens, n] = gga_data['lat_deg']
+                    self.lat_ref[i_ens, n] = gga_data['lat_ref']
+                    self.lon_deg[i_ens, n] = gga_data['lon_deg']
+                    self.lon_ref[i_ens, n] = gga_data['lon_ref']
+                    self.corr_qual[i_ens, n] = gga_data['corr_qual']
+                    self.num_sats[i_ens, n] = gga_data['num_sats']
+                    self.hdop[i_ens, n] = gga_data['hdop']
+                    self.alt[i_ens, n] = gga_data['alt']
+                    self.alt_unit[i_ens, n] = gga_data['alt_unit']
+                    self.geoid[i_ens, n] = gga_data['geoid']
+                    self.geoid_unit[i_ens, n] = gga_data['geoid_unit']
+                    self.d_gps_age[i_ens, n] = gga_data['d_gps_age']
+                    self.ref_stat_id[i_ens, n] = gga_data['ref_stat_id']
+                except:
+                    pass
+
+        if 'vtg' in data:
+
+            # Check size and expand if needed
+            if len(data['vtg']) > self.vtg_delta_time.shape[1]:
+                self.vtg_expand(len(data['vtg']))
+
+            for n, vtg_data in enumerate(data['vtg']):
+                # Try implemented because of occasional garbage in data stream.
+                # This prevents a crash and data after garbage are not used, but any data before garbage is saved
+                try:
+                    self.vtg_delta_time[i_ens, n] = vtg_data['delta_time']
+                    self.vtg_header[i_ens, n] = vtg_data['header']
+                    self.course_true[i_ens, n] = vtg_data['course_true']
+                    self.true_indicator[i_ens, n] = vtg_data['true_indicator']
+                    self.course_mag[i_ens, n] = vtg_data['course_mag']
+                    self.mag_indicator[i_ens, n] = vtg_data['mag_indicator']
+                    self.speed_knots[i_ens, n] = vtg_data['speed_knots']
+                    self.knots_indicator[i_ens, n] = vtg_data['knots_indicator']
+                    self.speed_kph[i_ens, n] = vtg_data['speed_kph']
+                    self.kph_indicator[i_ens, n] = vtg_data['kph_indicator']
+                    self.mode_indicator[i_ens, n] = vtg_data['mode_indicator']
+                except:
+                    pass
+
+        if 'ds' in data:
+
+            # Check size and expand if needed
+            if len(data['ds']) > self.dbt_delta_time.shape[1]:
+                self.dbt_expand(len(data['ds']))
+
+            for n, dbt_data in enumerate(data['ds']):
+                # Try implemented because of occasional garbage in data stream.
+                # This prevents a crash and data after garbage are not used, but any data before garbage is saved
+                try:
+                    self.dbt_delta_time[i_ens, n] = dbt_data['delta_time']
+                    self.dbt_header[i_ens, n] = dbt_data['header']
+                    self.depth_ft[i_ens, n] = dbt_data['depth_ft']
+                    self.ft_indicator[i_ens, n] = dbt_data['ft_indicator']
+                    self.depth_m[i_ens, n] = dbt_data['depth_m']
+                    self.m_indicator[i_ens, n] = dbt_data['m_indicator']
+                    self.depth_fath[i_ens, n] = dbt_data['depth_fath']
+                    self.fath_indicator[i_ens, n] = dbt_data['fath_indicator']
+                except:
+                    pass
+
+        if 'ext_heading' in data:
+
+            # Check size and expand if needed
+            if len(data['ext_heading']) > self.hdt_delta_time.shape[1]:
+                self.hdt_expand(len(data['ext_heading']))
+
+            for n, hdt_data in enumerate(data['ext_heading']):
+                # Try implemented because of occasional garbage in data stream.
+                # This prevents a crash and data after garbage are not used, but any data before garbage is saved
+                try:
+                    self.hdt_delta_time[i_ens, n] = hdt_data['delta_time']
+                    self.hdt_header[i_ens, n] = hdt_data['header']
+                    self.heading_deg[i_ens, n] = hdt_data['heading_deg']
+                    self.h_true_indicator[i_ens, n] = hdt_data['h_true_indicator']
+                except:
+                    pass
+
+    def gga_expand(self, n_samples):
+        """Expand arrays.
+
+        Parameters
+        ----------
+        n_samples: int
+            Desired size of array
+        """
+
+        # Determine amount of required expansion
+        n_expansion = n_samples - self.gga_delta_time.shape[1]
+        n_ensembles = self.gga_delta_time.shape[0]
+
+        # Expand arrays
+        self.gga_delta_time = np.concatenate(
+            (self.gga_delta_time, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.utc = np.concatenate(
+            (self.utc, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.lat_deg = np.concatenate(
+            (self.lat_deg, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.lon_deg = np.concatenate(
+            (self.lon_deg, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.corr_qual = np.concatenate(
+            (self.corr_qual, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.num_sats = np.concatenate(
+            (self.num_sats, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.hdop = np.concatenate(
+            (self.hdop, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.alt = np.concatenate(
+            (self.alt, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.geoid = np.concatenate(
+            (self.geoid, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.d_gps_age = np.concatenate(
+            (self.d_gps_age, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.ref_stat_id = np.concatenate(
+            (self.ref_stat_id, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+
+        self.gga_header = np.concatenate(
+            (self.gga_header, np.tile('', (n_ensembles, n_expansion))), axis=1)
+        self.geoid_unit = np.concatenate(
+            (self.geoid_unit, np.tile('', (n_ensembles, n_expansion))), axis=1)
+        self.alt_unit = np.concatenate(
+            (self.alt_unit, np.tile('', (n_ensembles, n_expansion))), axis=1)
+        self.lon_ref = np.concatenate(
+            (self.lon_ref, np.tile('', (n_ensembles, n_expansion))), axis=1)
+        self.lat_ref = np.concatenate(
+            (self.lat_ref, np.tile('', (n_ensembles, n_expansion))), axis=1)
+
+    def vtg_expand(self, n_samples):
+        """Expand arrays.
+
+        Parameters
+        ----------
+        n_samples: int
+            Desired size of array
+        """
+
+        # Determine amount of required expansion
+        n_expansion = n_samples - self.vtg_delta_time.shape[1]
+        n_ensembles = self.vtg_delta_time.shape[0]
+
+        # Expand arrays
+        self.vtg_delta_time = np.concatenate(
+            (self.vtg_delta_time, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.course_true = np.concatenate(
+            (self.course_true, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.course_mag = np.concatenate(
+            (self.course_mag, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.speed_knots = np.concatenate(
+            (self.speed_knots, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.speed_kph = np.concatenate(
+            (self.speed_kph, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+
+        self.kph_indicator = np.concatenate(
+            (self.kph_indicator, np.tile('', (n_ensembles, n_expansion))), axis=1)
+        self.mode_indicator = np.concatenate(
+            (self.mode_indicator, np.tile('', (n_ensembles, n_expansion))), axis=1)
+        self.vtg_header = np.concatenate(
+            (self.vtg_header, np.tile('', (n_ensembles, n_expansion))), axis=1)
+        self.true_indicator = np.concatenate(
+            (self.true_indicator, np.tile('', (n_ensembles, n_expansion))), axis=1)
+        self.mag_indicator = np.concatenate(
+            (self.mag_indicator, np.tile('', (n_ensembles, n_expansion))), axis=1)
+        self.knots_indicator = np.concatenate(
+            (self.knots_indicator, np.tile('', (n_ensembles, n_expansion))), axis=1)
+
+    def dbt_expand(self, n_samples):
+        """Expand arrays.
+
+        Parameters
+        ----------
+        n_samples: int
+            Desired size of array
+        """
+
+        # Determine amount of required expansion
+        n_expansion = n_samples - self.dbt_delta_time.shape[1]
+        n_ensembles = self.dbt_delta_time.shape[0]
+
+        # Expand arrays
+        self.dbt_delta_time = np.concatenate(
+            (self.dbt_delta_time, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.depth_ft = np.concatenate(
+            (self.depth_ft, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.depth_m = np.concatenate(
+            (self.depth_m, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.depth_fath = np.concatenate(
+            (self.depth_fath, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+
+        self.fath_indicator = np.concatenate(
+            (self.fath_indicator, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.dbt_header = np.concatenate(
+            (self.dbt_header, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.ft_indicator = np.concatenate(
+            (self.ft_indicator, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.m_indicator = np.concatenate(
+            (self.m_indicator, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+
+    def hdt_expand(self, n_samples):
+        """Expand arrays.
+
+        Parameters
+        ----------
+        n_samples: int
+            Desired size of array
+        """
+
+        # Determine amount of required expansion
+        n_expansion = n_samples - self.hdt_delta_time.shape[1]
+        n_ensembles = self.hdt_delta_time.shape[0]
+
+        # Expand the arrays
+        self.hdt_delta_time = np.concatenate(
+            (self.hdt_delta_time, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.heading_deg = np.concatenate(
+            (self.heading_deg, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.h_true_indicator = np.concatenate(
+            (self.h_true_indicator, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+        self.hdt_header = np.concatenate(
+            (self.hdt_header, np.tile(np.nan, (n_ensembles, n_expansion))), axis=1)
+
+
+class Nmea(object):
+    """Class to hold raw NMEA sentences.
+
+    Attributes
+    ----------
+    gga: list
+        List of GGA sentences
+    gsa: list
+        List of GSA sentences
+    vtg: list
+        List of VTG sentences
+    dbt: list
+        List of DBT sentences
+    """
+
+    def __init__(self, n_ensembles):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        """
+        self.gga = [''] * n_ensembles
+        self.gsa = [''] * n_ensembles
+        self.vtg = [''] * n_ensembles
+        # self.raw = ['']*n_ensembles DSM: not sure this was used
+        self.dbt = [''] * n_ensembles
+
+    def populate_data(self, i_ens, data):
+        """Populates the class with data for an ensemble.
+
+        Parameters
+        ----------
+        i_ens: int
+            Ensemble index
+        data: dict
+            Dictionary of all data for this ensemble
+        """
+
+        if 'gga_sentence' in data:
+            self.gga[i_ens] = data['gga_sentence']
+
+        if 'vtg_sentence' in data:
+            self.vtg[i_ens] = data['vtg_sentence']
+
+        if 'gsa_sentence' in data:
+            self.gsa[i_ens] = data['gsa_sentence']
+
+        if 'dbt_sentence' in data:
+            self.dbt[i_ens] = data['dbt_sentence']
+
+
+class Sensor(object):
+    """Class to hold sensor data.
+
+    Attributes
+    ----------
+    ambient_temp: np.array(int)
+        ADC ambient temperature
+    attitude_temp: np.array(int)
+        ADC attitude temperature
+    attitude: np.array(int)
+        ADC attitude
+    bit_test: np.array(int)
+        Bit test results
+    bit_test_count: np.array(int)
+        Number of fails for newer ADCPs, not used for Rio Grande
+    contam_sensor: np.array(int)
+        ADC contamination sensor
+    date: np.array(int)
+        Date
+    date_y2k: np.array(int)
+        Y2K compatible date
+    date_not_y2k: np.array(int)
+        Date not Y2K compatible
+    error_status_word: np.array(int)
+        Error status codes
+    heading_deg: np.array(float)
+        Heading to magnetic north in degrees
+    heading_std_dev_deg: np.array(float)
+        Standard deviation of headings for an ensemble
+    mpt_msc: np.array(int)
+        Minimum time prior to ping
+    num: np.array(int)
+        Ensemble number
+    num_fact: np.array(int)
+        Number fraction
+    num_tot: np.array(int)
+        Number total
+    orient: list
+        Orientation of ADCP
+    pitch_std_dev_deg: np.array(float)
+        Standard deviation of pitch for an ensemble
+    pitch_deg: np.array(float)
+        Pitch in degrees
+    pressure_neg: np.array(int)
+        ADC pressure negative
+    pressure_pos: np.array(int)
+        ADC pressure positive
+    pressure_pascal: np.array(int)
+        Pressure at transducer face in deca-pascals
+    pressure_var_pascal: np.array(int)
+        Pressure variance in deca-pascals
+    roll_std_dev_deg: np.array(float)
+        Standard deviation of roll for an ensemble
+    roll_deg: np.array(float)
+        Roll in degrees
+    salinity_ppt: np.array(int)
+        Salinit in parts per thousand
+    sos_mps: np.array(int)
+        Speed of sound in m/s
+    temperature_deg_c: np.array(float)
+        Water temperatuer in degrees C
+    time: np.array(int)
+        Time
+    time_y2k: np.array(int)
+        Y2K compatible time
+    xdcr_depth_dm: np.array(int)
+        Transducer depth in decimeters
+    xmit_current: np.array(int)
+        Transmit current
+    self.xmit_voltage = nans(n_ensembles)
+        Transmit voltage
+    self.vert_beam_eval_amp: np.array(int)
+        Vertical beam amplitude
+    self.vert_beam_RSSI_amp: np.array(int)
+        Vertical beam return signal stength indicator
+    self.vert_beam_range_m: np.array(float)
+        Vertical beam range in m
+    self.vert_beam_gain: list
+        Vertical beam gain setting
+    self.vert_beam_status: np.array(int)
+        Vertical beam status code
+    """
+
+    def __init__(self, n_ensembles):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        """
+
+        self.ambient_temp = nans(n_ensembles)
+        self.attitude_temp = nans(n_ensembles)
+        self.attitude = nans(n_ensembles)
+        self.bit_test = nans(n_ensembles)
+        self.bit_test_count = nans(n_ensembles)
+        self.contam_sensor = nans(n_ensembles)
+        self.date = nans([n_ensembles, 3])
+        self.date_y2k = nans([n_ensembles, 4])
+        self.date_not_y2k = nans([n_ensembles, 3])
+        self.error_status_word = [''] * n_ensembles
+        self.heading_deg = nans(n_ensembles)
+        self.heading_std_dev_deg = nans(n_ensembles)
+        self.mpt_msc = nans([n_ensembles, 3])
+        self.num = nans(n_ensembles)
+        self.num_fact = nans(n_ensembles)
+        self.num_tot = nans(n_ensembles)
+        self.orient = [''] * n_ensembles
+        self.pitch_std_dev_deg = nans(n_ensembles)
+        self.pitch_deg = nans(n_ensembles)
+        self.pressure_neg = nans(n_ensembles)
+        self.pressure_pos = nans(n_ensembles)
+        self.pressure_pascal = nans(n_ensembles)
+        self.pressure_var_pascal = nans(n_ensembles)
+        self.roll_std_dev_deg = nans(n_ensembles)
+        self.roll_deg = nans(n_ensembles)
+        self.salinity_ppt = nans(n_ensembles)
+        self.sos_mps = nans(n_ensembles)
+        self.temperature_deg_c = nans(n_ensembles)
+        self.time = nans([n_ensembles, 4])
+        self.time_y2k = nans([n_ensembles, 4])
+        self.xdcr_depth_dm = nans(n_ensembles)
+        self.xmit_current = nans(n_ensembles)
+        self.xmit_voltage = nans(n_ensembles)
+        self.vert_beam_eval_amp = nans(n_ensembles)
+        self.vert_beam_RSSI_amp = nans(n_ensembles)
+        self.vert_beam_range_m = nans(n_ensembles)
+        self.vert_beam_gain = [''] * n_ensembles
+        self.vert_beam_status = np.zeros(n_ensembles)
+
+    def populate_data(self, i_ens, data):
+        """Populates the class with data for an ensemble.
+
+        Parameters
+        ----------
+        i_ens: int
+            Ensemble index
+        data: dict
+            Dictionary of all data for this ensemble
+        """
+
+        if 'fixed_leader' in data and 'variable_leader' in data:
+            # Convert system_configuration_ls to 1s and 0s
+            bitls = "{0:08b}".format(data['fixed_leader']['system_configuration_ls'])
+
+            # Convert first two bits to integer
+            val = int(bitls[0], 2)
+            if val == 0:
+                self.orient[i_ens] = 'Down'
+            elif val == 1:
+                self.orient[i_ens] = 'Up'
+            else:
+                self.orient[i_ens] = 'n/a'
+
+            self.num[i_ens] = data['variable_leader']['ensemble_number']
+
+            # Store data and time as list
+            self.date_not_y2k[i_ens, :] = [data['variable_leader']['rtc_year'],
+                                                  data['variable_leader']['rtc_month'],
+                                                  data['variable_leader']['rtc_day']]
+            self.time[i_ens, :] = [data['variable_leader']['rtc_hour'],
+                                          data['variable_leader']['rtc_minutes'],
+                                          data['variable_leader']['rtc_seconds'],
+                                          data['variable_leader']['rtc_hundredths']]
+
+            self.num_fact[i_ens] = data['variable_leader']['ensemble_number_msb']
+            self.num_tot[i_ens] = self.num[i_ens] + self.num_fact[i_ens] * 65535
+            self.bit_test[i_ens] = data['variable_leader']['bit_fault']
+            self.bit_test_count[i_ens] = data['variable_leader']['bit_count']
+            self.sos_mps[i_ens] = data['variable_leader']['speed_of_sound']
+            self.xdcr_depth_dm[i_ens] = data['variable_leader']['depth_of_transducer']
+            self.heading_deg[i_ens] = data['variable_leader']['heading'] / 100.
+            self.pitch_deg[i_ens] = data['variable_leader']['pitch'] / 100.
+            self.roll_deg[i_ens] = data['variable_leader']['roll'] / 100.
+            self.salinity_ppt[i_ens] = data['variable_leader']['salinity']
+            self.temperature_deg_c[i_ens] = data['variable_leader']['temperature'] / 100.
+            self.mpt_msc[i_ens, :] = [data['variable_leader']['mpt_minutes'],
+                                             data['variable_leader']['mpt_seconds'],
+                                             data['variable_leader']['mpt_hundredths']]
+            self.heading_std_dev_deg[i_ens] = data['variable_leader']['heading_standard_deviation']
+            self.pitch_std_dev_deg[i_ens] = data['variable_leader']['pitch_standard_deviation'] / 10.
+            self.roll_std_dev_deg[i_ens] = data['variable_leader']['roll_standard_deviation'] / 10.
+            self.xmit_current[i_ens] = data['variable_leader']['transmit_current']
+            self.xmit_voltage[i_ens] = data['variable_leader']['transmit_voltage']
+            self.ambient_temp[i_ens] = data['variable_leader']['ambient_temperature']
+            self.pressure_pos[i_ens] = data['variable_leader']['pressure_positive']
+            self.pressure_neg[i_ens] = data['variable_leader']['pressure_negative']
+            self.attitude_temp[i_ens] = data['variable_leader']['attitude_temperature']
+            self.attitude[i_ens] = data['variable_leader']['attitude']
+            self.contam_sensor[i_ens] = data['variable_leader']['contamination_sensor']
+            self.error_status_word[i_ens] = "{0:032b}".format(data['variable_leader']['error_status_word'])
+            self.pressure_pascal[i_ens] = data['variable_leader']['pressure']
+            self.pressure_var_pascal[i_ens] = data['variable_leader']['pressure_variance']
+
+            # Store Y2K date and time as list
+            self.date_y2k[i_ens, :] = [data['variable_leader']['rtc_y2k_century'],
+                                              data['variable_leader']['rtc_y2k_year'],
+                                              data['variable_leader']['rtc_y2k_month'],
+                                              data['variable_leader']['rtc_y2k_day']]
+            self.time_y2k[i_ens, :] = [data['variable_leader']['rtc_y2k_hour'],
+                                              data['variable_leader']['rtc_y2k_minutes'],
+                                              data['variable_leader']['rtc_y2k_seconds'],
+                                              data['variable_leader']['rtc_y2k_hundredths']]
+            self.date[i_ens, :] = self.date_not_y2k[i_ens, :]
+            self.date[i_ens, 0] = self.date_y2k[i_ens, 0] * 100 + \
+                                         self.date_y2k[i_ens, 1]
+
+            if 'vertical_beam' in data:
+                self.vert_beam_eval_amp[i_ens] = data['vertical_beam']['eval_amp']
+                self.vert_beam_RSSI_amp[i_ens] = data['vertical_beam']['rssi']
+                self.vert_beam_range_m[i_ens] = data['vertical_beam']['range'] / 1000
+
+                # Use first 8 bits of status and the 6 the bit to determine the gain
+                temp = "{0:08b}".format(data['vertical_beam']['status'])
+                self.vert_beam_status[i_ens] = int(temp[6:], 2)
+                if temp[5] == '0':
+                    self.vert_beam_gain[i_ens] = 'L'
+                else:
+                    self.vert_beam_gain[i_ens] = 'H'
+
+
+class Surface(object):
+    """Class to hold surface cell data.
+
+    Attributes
+    ----------
+    no_cells: np.array(int)
+        Number of surface cells in the ensemble
+    cell_size_cm: np.array(int)
+        Cell size in cm
+    dist_bin1_cm: np.array(int)
+        Distance to center of cell 1 in cm
+    vel_mps: np.array(float)
+        3D array of velocity data in each cell and ensemble
+    corr: np.array(int)
+        3D array of correlation data for each beam, cell, and ensemble
+    pergd: np.array(int)
+        3D array of percent good for each beam, cell, and ensemble
+    rssi: np.array(int)
+        3D array of return signal strength indicator for each beam, cell, and ensemble
+    """
+
+    def __init__(self, n_ensembles, n_velocities, max_surface_bins):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        n_velocities: int
+            Number of velocity beams
+        max_surface_bins: int
+            Maximum number of surface bins in an ensemble in the transect
+        """
+
+        self.no_cells = np.zeros(n_ensembles)
+        self.cell_size_cm = nans(n_ensembles)
+        self.dist_bin1_cm = nans(n_ensembles)
+        self.vel_mps = np.tile([np.nan], [n_velocities, max_surface_bins, n_ensembles])
+        self.corr = nans([n_velocities, max_surface_bins, n_ensembles])
+        self.pergd = nans([n_velocities, max_surface_bins, n_ensembles])
+        self.rssi = nans([n_velocities, max_surface_bins, n_ensembles])
+
+    def populate_data(self, i_ens, data, main_data):
+        """Populates the class with data for an ensemble.
+
+        Parameters
+        ----------
+        i_ens: int
+            Ensemble index
+        data: dict
+            Dictionary of all data for this ensemble
+        main_data: Pd0TRDI
+            Object of PD0TRDI
+        """
+
+        if 'surface_leader' in data:
+            self.no_cells[i_ens] = data['surface_leader']['cell_count']
+            self.cell_size_cm[i_ens] = data['surface_leader']['cell_size']
+            self.dist_bin1_cm[i_ens] = data['surface_leader']['range_cell_1']
+
+        if 'surface_velocity' in data:
+            self.vel_mps[:main_data.n_velocities, :len(data['surface_velocity']['velocity']), i_ens] = \
+                np.array(data['surface_velocity']['velocity']).T
+
+        if 'surface_correlation' in data:
+            self.corr[:main_data.n_velocities, :len(data['surface_correlation']['correlation']), i_ens] = \
+                np.array(data['surface_correlation']['correlation']).T
+
+        if 'surface_intensity' in data:
+            self.rssi[:main_data.n_velocities, :len(data['surface_intensity']['rssi']), i_ens] = \
+                np.array(data['surface_intensity']['rssi']).T
+
+        if 'surface_percent_good' in data:
+            self.pergd[:main_data.n_velocities, :len(data['surface_percent_good']['percent_good']), i_ens] = \
+                np.array(data['surface_percent_good']['percent_good']).T
+
+
+class Wt(object):
+    """Class to hold water track data.
+
+    Attributes
+    ----------
+    vel_mps: np.array(float)
+        3D array of velocity data in each cell and ensemble
+    corr: np.array(int)
+        3D array of correlation data for each beam, cell, and ensemble
+    pergd: np.array(int)
+        3D array of percent good for each beam, cell, and ensemble
+    rssi: np.array(int)
+        3D array of return signal strength indicator for each beam, cell, and ensemble
+    """
+
+    def __init__(self, n_bins, n_ensembles, n_velocities):
+        """Initialize instance variables.
+
+        Parameters
+        ----------
+        n_ensembles: int
+            Number of ensembles
+        n_velocities: int
+            Number of velocity beams
+        n_bins: int
+            Maximum number of bins in an ensemble in the transect
+        """
+
+        self.corr = nans([n_velocities, n_bins, n_ensembles])
+        self.pergd = nans([n_velocities, n_bins, n_ensembles])
+        self.rssi = nans([n_velocities, n_bins, n_ensembles])
+        self.vel_mps = nans([n_velocities, n_bins, n_ensembles])
+
+    def populate_data(self, i_ens, data, main_data):
+        """Populates the class with data for an ensemble.
+
+        Parameters
+        ----------
+        i_ens: int
+            Ensemble index
+        data: dict
+            Dictionary of all data for this ensemble
+        main_data: Pd0TRDI
+            Object of PD0TRDI
+        """
+
+
+        if 'velocity' in data:
+            # Check size in case array needs to be expanded
+            if main_data.Cfg.wn[i_ens] > self.vel_mps.shape[1]:
+                append = np.zeros([self.vel_mps.shape[0],
+                                   int(main_data.Cfg.wn[i_ens] - self.vel_mps.shape[1]),
+                                   self.vel_mps.shape[2]])
+                self.vel_mps = np.hstack([self.vel_mps, append])
+                self.corr = np.hstack([self.corr, append])
+                self.rssi = np.hstack([self.rssi, append])
+                self.pergd = np.hstack([self.pergd, append])
+
+            # Reformat and assign data
+            if 'velocity' in data:
+                self.vel_mps[:main_data.n_velocities, :int(main_data.Cfg.wn[i_ens]), i_ens] = \
+                    np.array(data['velocity']['data']).T
+            if 'correlation' in data:
+                self.corr[:main_data.n_velocities, :int(main_data.Cfg.wn[i_ens]), i_ens] = \
+                    np.array(data['correlation']['data']).T
+            if 'echo_intensity' in data:
+                self.rssi[:main_data.n_velocities, :int(main_data.Cfg.wn[i_ens]), i_ens] = \
+                    np.array(data['echo_intensity']['data']).T
+            if 'percent_good' in data:
+                self.pergd[:main_data.n_velocities, :int(main_data.Cfg.wn[i_ens]), i_ens] = \
+                    np.array(data['percent_good']['data']).T
diff --git a/Classes/PreMeasurement.py b/Classes/PreMeasurement.py
new file mode 100644
index 0000000..ed2b5c5
--- /dev/null
+++ b/Classes/PreMeasurement.py
@@ -0,0 +1,430 @@
+import re
+import copy
+import numpy as np
+
+
+class PreMeasurement(object):
+    """Stores tests, calibrations, and evaluations conducted prior ot measurement.
+
+    Attributes
+    ----------
+    time_stamp: str
+        Time and date of test
+    data: str
+        Raw data from test
+    result: dict
+        Dictionary of test results. Varies by test.
+    """
+    
+    def __init__(self):
+        """Initialize instance variables."""
+
+        self.time_stamp = None
+        self.data = None
+        self.result = {}
+        
+    def populate_data(self, time_stamp, data_in, data_type):
+        """Coordinates storing of test, calibration, and evaluation data.
+
+        Parameters
+        ----------
+        time_stamp: str
+            Time and date text.
+        data_in: str
+            Raw data from test
+        data_type: str
+            Type of data, C-compass, TST-TRDI test, SST-SonTek test
+        """
+
+        # Store time stamp and data
+        self.time_stamp = time_stamp
+        self.data = data_in
+
+        # Process data depending on data type and store result
+        if data_type[1] == 'C':
+            self.compass_read()
+        elif data_type == 'TST':
+            self.sys_test_read()
+            self.pt3_data()
+        elif data_type == 'SST':
+            self.sys_test_read()
+
+    def compass_read(self):
+        """Method for getting compass evaluation data"""
+
+        # Match regex for compass evaluation error:
+        splits = re.split('(Total error:|Double Cycle Errors:|Error from calibration:)', self.data)
+        if len(splits) > 1:
+            error = float(re.search('\d+\.*\d*', splits[-1])[0])
+        else:
+            error = 'N/A'
+        self.result['compass'] = {'error': error}
+
+    @staticmethod
+    def cc_qrev_mat_in(meas_struct):
+        """Processes the Matlab data structure to obtain a list of Premeasurement objects containing compass calibration
+           data from the Matlab data structure.
+
+       Parameters
+       ----------
+       meas_struct: mat_struct
+           Matlab data structure obtained from sio.loadmat
+
+       Returns
+       -------
+       cc: list
+           List of Premeasurement data objects
+       """
+        cc = []
+        if hasattr(meas_struct, 'compassCal'):
+            if type(meas_struct.compassCal) is np.ndarray:
+                for cal in meas_struct.compassCal:
+                    pm = PreMeasurement()
+                    pm.compass_populate_from_qrev_mat(cal)
+                    cc.append(pm)
+            elif len(meas_struct.compassCal.data) > 0:
+                pm = PreMeasurement()
+                pm.compass_populate_from_qrev_mat(meas_struct.compassCal)
+                cc.append(pm)
+
+        return cc
+
+    @staticmethod
+    def ce_qrev_mat_in(meas_struct):
+        """Processes the Matlab data structure to obtain a list of Premeasurement objects containing compass evaluation
+           data from the Matlab data structure.
+
+       Parameters
+       ----------
+       meas_struct: mat_struct
+           Matlab data structure obtained from sio.loadmat
+
+       Returns
+       -------
+       ce: list
+           List of Premeasurement data objects
+       """
+        ce = []
+        if hasattr(meas_struct, 'compassEval'):
+            if type(meas_struct.compassEval) is np.ndarray:
+                for comp_eval in meas_struct.compassEval:
+                    pm = PreMeasurement()
+                    pm.compass_populate_from_qrev_mat(comp_eval)
+                    ce.append(pm)
+            elif len(meas_struct.compassEval.data) > 0:
+                pm = PreMeasurement()
+                pm.compass_populate_from_qrev_mat(meas_struct.compassEval)
+                ce.append(pm)
+        return ce
+
+    def compass_populate_from_qrev_mat(self, data_in):
+        """Populated Premeasurement instance variables with data from QRev Matlab file.
+
+        Parameters
+        ----------
+        data_in: mat_struct
+            mat_struct_object containing compass cal/eval data
+        """
+        self.data = data_in.data
+        self.time_stamp = data_in.timeStamp
+        if hasattr(data_in, 'result'):
+            self.result = {'compass': {'error': data_in.result.compass.error}}
+        else:
+            # Match regex for compass evaluation error:
+            splits = re.split('(Total error:|Double Cycle Errors:|Error from calibration:)', self.data)
+            if len(splits) > 1:
+                error = float(re.search('\d+\.*\d*', splits[-1])[0])
+            else:
+                error = 'N/A'
+            self.result['compass'] = {'error': error}
+            
+    def sys_test_read(self):
+        """Method for reading the system test data"""
+        if self.data is not None:
+            # Match regex for number of tests and number of failures
+            num_tests = re.findall('(Fail|FAIL|F A I L|Pass|PASS|NOT DETECTED|P A S S)', self.data)
+            num_fails = re.findall('(Fail|FAIL|F A I L)', self.data)
+
+            # Store results
+            self.result = {'sysTest': {'n_tests': len(num_tests)}}
+            self.result['sysTest']['n_failed'] = len(num_fails)
+        else:
+            self.result = {'sysTest': {'n_tests': None}}
+            self.result['sysTest']['n_failed'] = None
+
+    @staticmethod
+    def sys_test_qrev_mat_in(meas_struct):
+        """Processes the Matlab data structure to obtain a list of Premeasurement objects containing system test data
+           from the Matlab data structure.
+
+           Parameters
+           ----------
+           meas_struct: mat_struct
+               Matlab data structure obtained from sio.loadmat
+
+           Returns
+           -------
+           system_tst: list
+               List of Premeasurement data objects
+           """
+        system_tst = []
+        if hasattr(meas_struct, 'sysTest'):
+            try:
+                if type(meas_struct.sysTest) == np.ndarray:
+                    for test in meas_struct.sysTest:
+                        tst = PreMeasurement()
+                        tst.sys_tst_populate_from_qrev_mat(test)
+                        system_tst.append(tst)
+                elif len(meas_struct.sysTest.data) > 0:
+                    tst = PreMeasurement()
+                    tst.sys_tst_populate_from_qrev_mat(meas_struct.sysTest)
+                    system_tst.append(tst)
+            except AttributeError:
+                pass
+        return system_tst
+
+    def sys_tst_populate_from_qrev_mat(self, test_in):
+        """Populated Premeasurement instance variables with data from QRev Matlab file.
+
+        Parameters
+        ----------
+        test_in: mat_struct
+            mat_struct_object containing system test data
+        """
+        try:
+            self.data = test_in.data
+            self.time_stamp = test_in.timeStamp
+            self.result = {'sysTest': {'n_failed': test_in.result.sysTest.nFailed}}
+            self.result['sysTest']['n_tests'] = test_in.result.sysTest.nTests
+
+            if hasattr(test_in.result, 'pt3'):
+                data_types = {'corr_table': np.array([]), 'sdc': np.array([]), 'cdc': np.array([]),
+                              'noise_floor': np.array([])}
+                test_types = {'high_wide': data_types.copy(), 'high_narrow': data_types.copy(),
+                              'low_wide': data_types.copy(),
+                              'low_narrow': data_types.copy()}
+                pt3 = {'hard_limit': copy.deepcopy(test_types), 'linear': copy.deepcopy(test_types)}
+                if hasattr(test_in.result.pt3, 'hardLimit'):
+                    if hasattr(test_in.result.pt3.hardLimit, 'hw'):
+                        pt3['hard_limit']['high_wide']['corr_table'] = test_in.result.pt3.hardLimit.hw.corrTable
+                        pt3['hard_limit']['high_wide']['sdc'] = test_in.result.pt3.hardLimit.hw.sdc
+                        pt3['hard_limit']['high_wide']['cdc'] = test_in.result.pt3.hardLimit.hw.cdc
+                        pt3['hard_limit']['high_wide']['noise_floor'] = test_in.result.pt3.hardLimit.hw.noiseFloor
+                    if hasattr(test_in.result.pt3.hardLimit, 'lw'):
+                        pt3['hard_limit']['low_wide']['corr_table'] = test_in.result.pt3.hardLimit.lw.corrTable
+                        pt3['hard_limit']['low_wide']['sdc'] = test_in.result.pt3.hardLimit.lw.sdc
+                        pt3['hard_limit']['low_wide']['cdc'] = test_in.result.pt3.hardLimit.lw.cdc
+                        pt3['hard_limit']['low_wide']['noise_floor'] = test_in.result.pt3.hardLimit.lw.noiseFloor
+                    if hasattr(test_in.result.pt3.hardLimit, 'hn'):
+                        pt3['hard_limit']['high_narrow']['corr_table'] = test_in.result.pt3.hardLimit.hn.corrTable
+                        pt3['hard_limit']['high_narrow']['sdc'] = test_in.result.pt3.hardLimit.hn.sdc
+                        pt3['hard_limit']['high_narrow']['cdc'] = test_in.result.pt3.hardLimit.hn.cdc
+                        pt3['hard_limit']['high_narrow']['noise_floor'] = test_in.result.pt3.hardLimit.hn.noiseFloor
+                    if hasattr(test_in.result.pt3.hardLimit, 'ln'):
+                        pt3['hard_limit']['low_narrow']['corr_table'] = test_in.result.pt3.hardLimit.ln.corrTable
+                        pt3['hard_limit']['low_narrow']['sdc'] = test_in.result.pt3.hardLimit.ln.sdc
+                        pt3['hard_limit']['low_narrow']['cdc'] = test_in.result.pt3.hardLimit.ln.cdc
+                        pt3['hard_limit']['low_narrow']['noise_floor'] = test_in.result.pt3.hardLimit.ln.noiseFloor
+                if hasattr(test_in.result.pt3, 'linear'):
+                    if hasattr(test_in.result.pt3.linear, 'hw'):
+                        pt3['linear']['high_wide']['corr_table'] = test_in.result.pt3.linear.hw.corrTable
+                        pt3['linear']['high_wide']['noise_floor'] = test_in.result.pt3.linear.hw.noiseFloor
+                    if hasattr(test_in.result.pt3.linear, 'lw'):
+                        pt3['linear']['low_wide']['corr_table'] = test_in.result.pt3.linear.lw.corrTable
+                        pt3['linear']['low_wide']['noise_floor'] = test_in.result.pt3.linear.lw.noiseFloor
+                    if hasattr(test_in.result.pt3.linear, 'hn'):
+                        pt3['linear']['high_narrow']['corr_table'] = test_in.result.pt3.linear.hn.corrTable
+                        pt3['linear']['high_narrow']['noise_floor'] = test_in.result.pt3.linear.hn.noiseFloor
+                    if hasattr(test_in.result.pt3.linear, 'ln'):
+                        pt3['linear']['low_narrow']['corr_table'] = test_in.result.pt3.linear.ln.corrTable
+                        pt3['linear']['low_narrow']['noise_floor'] = test_in.result.pt3.linear.ln.noiseFloor
+
+                self.result['pt3'] = pt3
+        except AttributeError:
+            # Match regex for number of tests and number of failures
+            num_tests = re.findall('(Fail|FAIL|F A I L|Pass|PASS|NOT DETECTED|P A S S)', test_in.data)
+            num_fails = re.findall('(Fail|FAIL|F A I L)', test_in.data)
+
+            # Store results
+            self.result = {'sysTest': {'n_tests': len(num_tests)}}
+            self.result['sysTest']['n_failed'] = len(num_fails)
+
+    def pt3_data(self):
+        """Method for processing the data in the correlation matrices."""
+        try:
+            data_types = {'corr_table': np.array([]), 'sdc': np.array([]), 'cdc': np.array([]),
+                          'noise_floor': np.array([])}
+            test_types = {'high_wide': data_types.copy(), 'high_narrow': data_types.copy(),
+                          'low_wide': data_types.copy(),
+                          'low_narrow': data_types.copy()}
+            pt3 = {'hard_limit': copy.deepcopy(test_types), 'linear': copy.deepcopy(test_types)}
+
+            # Match regex for correlation tables
+            matches = re.findall('Lag.*?0', self.data, re.DOTALL)
+
+            # Count the number or correlation tables to process
+            correl_count = 0
+            for match in matches:
+                bm1_matches = re.findall('Bm1', match)
+                correl_count += len(bm1_matches)
+
+            # Correlation table match
+            lag_matches = re.findall('Lag.*?^\s*$', self.data, re.MULTILINE | re.DOTALL)
+
+            # Sin match
+            sin_match = re.findall('((Sin|SIN).*?^\s*$)', self.data, re.MULTILINE | re.DOTALL)[0][0]
+            sin_array = np.array(re.findall('\d+\.*\d*', sin_match), dtype=int)
+
+            # Cos match
+            cos_match = re.findall('((Cos|COS).*?^\s*$)', self.data, re.MULTILINE | re.DOTALL)[0][0]
+            cos_array = np.array(re.findall('\d+\.*\d*', cos_match), dtype=int)
+
+            # RSSI match
+            rssi_array = np.array([])
+            rssi_matches = re.findall('RSSI.*?^\s*$', self.data, re.MULTILINE | re.DOTALL)
+            for rssi_match in rssi_matches:
+                rssi_array = np.hstack((rssi_array, np.array(re.findall('\d+\.*\d*', rssi_match), dtype=int)))
+
+            # Process each set of correlation tables
+            for n, lag_match in enumerate(lag_matches):
+
+                # Count the Bm1 string to know how many tables to read
+                bm_count = len(re.findall('Bm1', lag_match))
+
+                # Extract the table into list
+                numbers = re.findall('\d+\.*\d*', lag_match)
+
+                # Create array from data in table
+                corr_data = np.array(numbers[(bm_count * 4):(bm_count * 44)],
+                                     dtype=int).reshape([8, (bm_count * 4) + 1])[:, 1::]
+
+                # Only one pt3 test. Typical of Rio Grande and Streampro
+                if bm_count == 1:
+
+                    # Assign matrix slices to corresponding variables
+                    # corr_hlimit_hgain_wband = corr_data
+                    pt3['hard_limit']['high_wide']['corr_table'] = corr_data
+                    pt3['hard_limit']['high_wide']['sdc'] = sin_array[0:4]
+                    pt3['hard_limit']['high_wide']['cdc'] = cos_array[0:4]
+                    pt3['hard_limit']['high_wide']['noise_floor'] = rssi_array[0:4]
+
+                # 4 tests arranged in groups of 2. All data are hard limited.
+                elif bm_count == 2 and correl_count == 4:
+
+                    # Hard limited wide bandwidth (n=0)
+                    if n == 0:
+
+                        pt3['hard_limit']['high_wide']['corr_table'] = corr_data[:, 0:4]
+                        pt3['hard_limit']['high_wide']['sdc'] = sin_array[n * 4: (n + 1) * 4]
+                        pt3['hard_limit']['high_wide']['cdc'] = cos_array[n * 4: (n + 1) * 4]
+                        pt3['hard_limit']['high_wide']['noise_floor'] = rssi_array[n * 4: (n + 1) * 4]
+
+                        pt3['hard_limit']['low_wide']['corr_table'] = corr_data[:, 4::]
+                        pt3['hard_limit']['low_wide']['sdc'] = sin_array[(n + 1) * 4: (n + 2) * 4]
+                        pt3['hard_limit']['low_wide']['cdc'] = cos_array[(n + 1) * 4: (n + 2) * 4]
+                        pt3['hard_limit']['low_wide']['noise_floor'] = rssi_array[(n + 1) * 4: (n + 2) * 4]
+
+                    # Hard limited narrow bandwidth (n=1)
+                    elif n == 1:
+
+                        pt3['hard_limit']['high_narrow']['corr_table'] = corr_data[:, 0:4]
+                        pt3['hard_limit']['high_narrow']['sdc'] = sin_array[(n + 1) * 4: (n + 2) * 4]
+                        pt3['hard_limit']['high_narrow']['cdc'] = cos_array[(n + 1) * 4: (n + 2) * 4]
+                        pt3['hard_limit']['high_narrow']['noise_floor'] = rssi_array[(n + 1) * 4: (n + 2) * 4]
+
+                        pt3['hard_limit']['low_narrow']['corr_table'] = corr_data[:, 4::]
+                        pt3['hard_limit']['low_narrow']['sdc'] = sin_array[(n + 2) * 4: (n + 3) * 4]
+                        pt3['hard_limit']['low_narrow']['cdc'] = cos_array[(n + 2) * 4: (n + 3) * 4]
+                        pt3['hard_limit']['low_narrow']['noise_floor'] = rssi_array[(n + 2) * 4: (n + 3) * 4]
+
+                # 8 tests arranged in sets of 2. The linear is 1st followed by the hard limit.
+                elif bm_count == 2 and correl_count == 8:
+
+                    # Hard limit bandwidth (n=0)
+                    if n == 0:
+
+                        pt3['hard_limit']['high_wide']['corr_table'] = corr_data[:, 0:4]
+                        pt3['hard_limit']['high_wide']['sdc'] = sin_array[n * 4: (n + 1) * 4]
+                        pt3['hard_limit']['high_wide']['cdc'] = cos_array[n * 4: (n + 1) * 4]
+                        pt3['hard_limit']['high_wide']['noise_floor'] = rssi_array[n * 4: (n + 1) * 4]
+
+                        pt3['hard_limit']['low_wide']['corr_table'] = corr_data[:, 4::]
+                        pt3['hard_limit']['low_wide']['sdc'] = sin_array[(n + 1) * 4: (n + 2) * 4]
+                        pt3['hard_limit']['low_wide']['cdc'] = cos_array[(n + 1) * 4: (n + 2) * 4]
+                        pt3['hard_limit']['low_wide']['noise_floor'] = rssi_array[(n + 1) * 4: (n + 2) * 4]
+
+                    # Hard limit narrow bandwidth (n=1)
+                    elif n == 1:
+
+                        pt3['hard_limit']['high_narrow']['corr_table'] = corr_data[:, 0:4]
+                        pt3['hard_limit']['high_narrow']['sdc'] = sin_array[(n + 1) * 4: (n + 2) * 4]
+                        pt3['hard_limit']['high_narrow']['cdc'] = cos_array[(n + 1) * 4: (n + 2) * 4]
+                        pt3['hard_limit']['high_narrow']['noise_floor'] = rssi_array[(n + 1) * 4: (n + 2) * 4]
+
+                        pt3['hard_limit']['low_narrow']['corr_table'] = corr_data[:, 4::]
+                        pt3['hard_limit']['low_narrow']['sdc'] = sin_array[(n + 2) * 4: (n + 3) * 4]
+                        pt3['hard_limit']['low_narrow']['cdc'] = cos_array[(n + 2) * 4: (n + 3) * 4]
+                        pt3['hard_limit']['low_narrow']['noise_floor'] = rssi_array[(n + 2) * 4: (n + 3) * 4]
+
+                    # Linear wide bandwidth (n=2)
+                    elif n == 2:
+
+                        pt3['linear']['high_wide']['corr_table'] = corr_data[:, 0:4]
+                        pt3['linear']['high_wide']['noise_floor'] = rssi_array[(n + 2) * 4: (n + 3) * 4]
+
+                        pt3['linear']['low_wide']['corr_table'] = corr_data[:, 4::]
+                        pt3['linear']['low_wide']['noise_floor'] = rssi_array[(n + 3) * 4: (n + 4) * 4]
+
+                    # Linear narrow bandwidth (n=3)
+                    elif n == 3:
+
+                        pt3['linear']['high_narrow']['corr_table'] = corr_data[:, 0:4]
+                        pt3['linear']['high_narrow']['noise_floor'] = rssi_array[(n + 3) * 4: (n + 4) * 4]
+
+                        pt3['linear']['low_narrow']['corr_table'] = corr_data[:, 4::]
+                        pt3['linear']['low_narrow']['noise_floor'] = rssi_array[(n + 4) * 4: (n + 5) * 4]
+
+                # 8 tests in groups of 4. Hard limit is the first group then the linear.
+                elif bm_count == 4:
+
+                    # Hard limit data (n=0)
+                    if n == 0:
+
+                        pt3['hard_limit']['high_wide']['corr_table'] = corr_data[:, 0:4]
+                        pt3['hard_limit']['high_wide']['sdc'] = sin_array[n * 4: (n + 1) * 4]
+                        pt3['hard_limit']['high_wide']['cdc'] = cos_array[n * 4: (n + 1) * 4]
+                        pt3['hard_limit']['high_wide']['noise_floor'] = rssi_array[n * 4: (n + 1) * 4]
+
+                        pt3['hard_limit']['low_wide']['corr_table'] = corr_data[:, 4:8]
+                        pt3['hard_limit']['low_wide']['sdc'] = sin_array[(n + 1) * 4: (n + 2) * 4]
+                        pt3['hard_limit']['low_wide']['cdc'] = cos_array[(n + 1) * 4: (n + 2) * 4]
+                        pt3['hard_limit']['low_wide']['noise_floor'] = rssi_array[(n + 1) * 4: (n + 2) * 4]
+
+                        pt3['hard_limit']['high_narrow']['corr_table'] = corr_data[:, 8:12]
+                        pt3['hard_limit']['high_narrow']['sdc'] = sin_array[(n + 2) * 4: (n + 3) * 4]
+                        pt3['hard_limit']['high_narrow']['cdc'] = cos_array[(n + 2) * 4: (n + 3) * 4]
+                        pt3['hard_limit']['high_narrow']['noise_floor'] = rssi_array[(n + 2) * 4: (n + 3) * 4]
+
+                        pt3['hard_limit']['low_narrow']['corr_table'] = corr_data[:, 12::]
+                        pt3['hard_limit']['low_narrow']['sdc'] = sin_array[(n + 3) * 4: (n + 4) * 4]
+                        pt3['hard_limit']['low_narrow']['cdc'] = cos_array[(n + 3) * 4: (n + 4) * 4]
+                        pt3['hard_limit']['low_narrow']['noise_floor'] = rssi_array[(n + 3) * 4: (n + 4) * 4]
+
+                    # Linear data (n=1)
+                    else:
+                        pt3['linear']['high_wide']['corr_table'] = corr_data[:, 0:4]
+                        pt3['linear']['high_wide']['noise_floor'] = rssi_array[(n + 3) * 4: (n + 4) * 4]
+
+                        pt3['linear']['low_wide']['corr_table'] = corr_data[:, 4:8]
+                        pt3['linear']['low_wide']['noise_floor'] = rssi_array[(n + 4) * 4: (n + 5) * 4]
+
+                        pt3['linear']['high_narrow']['corr_table'] = corr_data[:, 8:12]
+                        pt3['linear']['high_narrow']['noise_floor'] = rssi_array[(n + 5) * 4: (n + 6) * 4]
+
+                        pt3['linear']['low_narrow']['corr_table'] = corr_data[:, 12::]
+                        pt3['linear']['low_narrow']['noise_floor'] = rssi_array[(n + 6) * 4: (n + 7) * 4]
+            self.result['pt3'] = pt3
+        except Exception:
+            pass
+        #     pt3 = None
+        # self.result['pt3'] = pt3
diff --git a/Classes/Python2Matlab.py b/Classes/Python2Matlab.py
new file mode 100644
index 0000000..13d9570
--- /dev/null
+++ b/Classes/Python2Matlab.py
@@ -0,0 +1,725 @@
+import numpy as np
+import pandas as pd
+import scipy.io as sio
+import copy as copy
+from Classes.PreMeasurement import PreMeasurement
+
+
+class Python2Matlab(object):
+    """Converts python meas class to QRev for Matlab structure.
+
+    Attributes
+    ----------
+    matlab_dict: dict
+        Dictionary of Matlab structures
+    """
+
+    def __init__(self, meas, checked):
+        """Initialize dictionaries and convert Python data to Matlab structures.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        # Create Python to Matlab variable name conversion dictionary
+        py_2_mat_dict = self.create_py_2_mat_dict()
+
+        # Initialize Matlab dictionary
+        self.matlab_dict = dict()
+
+        # Apply conversion of Python data to be compatible with Matlab conventions
+        meas_mat = self.data2matlab(meas)
+
+        checked_idx = np.array(checked)
+        checked_idx_meas = np.copy(checked_idx)
+        np.append(checked_idx_meas, len(meas_mat.extrap_fit.sel_fit)-1)
+
+        # Convert Python data structure to Matlab
+        self.matlab_dict['stationName'] = meas_mat.station_name
+        if self.matlab_dict['stationName'] is None:
+            self.matlab_dict['stationName'] = ''
+        self.matlab_dict['stationNumber'] = meas_mat.station_number
+        if self.matlab_dict['stationNumber'] is None:
+            self.matlab_dict['stationNumber'] = ''
+        self.matlab_dict['persons'] = meas_mat.persons
+        self.matlab_dict['meas_number'] = meas_mat.meas_number
+        self.matlab_dict['stage_start_m'] = meas_mat.stage_start_m
+        self.matlab_dict['stage_end_m'] = meas_mat.stage_end_m
+        self.matlab_dict['stage_meas_m'] = meas_mat.stage_meas_m
+        self.matlab_dict['processing'] = meas_mat.processing
+        self.matlab_dict['extTempChk'] = meas_mat.ext_temp_chk
+        self.matlab_dict['userRating'] = meas_mat.user_rating
+        self.matlab_dict['initialSettings'] = meas_mat.initial_settings
+        self.matlab_dict['comments'] = self.comment2struct(meas_mat.comments)
+        self.matlab_dict['compassCal'] = self.listobj2struct(meas_mat.compass_cal, py_2_mat_dict)
+        self.matlab_dict['compassEval'] = self.listobj2struct(meas_mat.compass_eval, py_2_mat_dict)
+        self.matlab_dict['sysTest'] = self.listobj2struct(meas_mat.system_tst, py_2_mat_dict)
+        discharge = np.copy(meas_mat.discharge)
+        discharge_sel = [discharge[i] for i in checked_idx]
+        self.matlab_dict['discharge'] = self.listobj2struct(discharge_sel, py_2_mat_dict)
+        transects = np.copy(meas_mat.transects)
+        transects_sel = [transects[i] for i in checked_idx]
+        self.matlab_dict['transects'] = self.listobj2struct(transects_sel, py_2_mat_dict)
+        extrap = copy.deepcopy(meas_mat.extrap_fit)
+        self.matlab_dict['extrapFit'] = self.listobj2struct([extrap], py_2_mat_dict)
+        # Check for multiple moving-bed tests
+        if type(meas_mat.mb_tests) == list:
+            mb_tests = self.listobj2struct(meas_mat.mb_tests, py_2_mat_dict)
+        else:
+            mb_tests = self.obj2dict(meas_mat.mb_tests, py_2_mat_dict)
+        if len(mb_tests) == 0:
+            mb_tests = np.array([])
+
+        self.matlab_dict['mbTests'] = mb_tests
+
+        self.matlab_dict['observed_no_moving_bed'] = meas_mat.observed_no_moving_bed
+
+        self.matlab_dict['uncertainty'] = self.listobj2struct([meas_mat.uncertainty], py_2_mat_dict)
+        self.matlab_dict['qa'] = self.listobj2struct([meas_mat.qa], py_2_mat_dict)
+        self.matlab_dict['run_oursin'] = meas_mat.run_oursin
+        if meas_mat.oursin is not None:
+            self.matlab_dict['oursin'] = self.listobj2struct([meas_mat.oursin], py_2_mat_dict)
+
+
+    @staticmethod
+    def listobj2struct(list_in, new_key_dict=None):
+        """Converts a list of objects to a structured array.
+
+        Parameters
+        ----------
+        list_in: list
+            List of objects
+        new_key_dict: dict
+            Dictionary to translate python variable names to Matlab variable names
+
+        Returns
+        -------
+        struct: np.array
+            Structured array
+        """
+
+        # Verify that list_in exists
+        if list_in:
+
+            # Create data type for each variable in object
+            keys = list(vars(list_in[0]).keys())
+            data_type = []
+            for key in keys:
+                if new_key_dict is not None and key in new_key_dict:
+                    if new_key_dict[key] is None:
+                        data_type.append((np.nan, list))
+                    else:
+                        data_type.append((new_key_dict[key], list))
+                else:
+                    data_type.append((key, list))
+
+            # Create structured array based on data type and length of list
+            dt = np.dtype(data_type)
+            struct = np.zeros((len(list_in),), dt)
+
+            # Populate the structure with data from the objects
+            for n, item in enumerate(list_in):
+
+                if type(item) is list:
+                    # If item is a list apply recursion
+                    struct = Python2Matlab.listobj2struct(item, new_key_dict)
+                else:
+                    # If item is not a list convert it to a dictionary
+                    new_dict = Python2Matlab.obj2dict(item, new_key_dict)
+                    # Change name for consistency with Matlab is necessary
+                    for key in new_dict:
+                        if new_key_dict is not None and key in new_key_dict:
+                            struct[new_key_dict[key]][n] = new_dict[key]
+                        else:
+                            struct[key][n] = new_dict[key]
+        else:
+            struct = np.array([np.nan])
+
+        return struct
+
+    @staticmethod
+    def change_dict_keys(dict_in, new_key_dict):
+        """Recursively changes the name of dictionary keys and checks for str data types and converts them to arrays.
+
+        Parameters
+        ----------
+        dict_in: dict
+            Dictionary with keys that need a name change
+        new_key_dict: dict
+            Dictionary to cross reference existing key to new key names
+        """
+
+        dict_out = dict()
+
+        for key in dict_in:
+            # Iterate on nested dictionaries
+            if type(dict_in[key]) is dict:
+                dict_in[key] = Python2Matlab.change_dict_keys(dict_in[key], new_key_dict)
+
+            # If a list contains a str variable, such as messages, convert the string to an array
+            if type(dict_in[key]) is list:
+                for line in range(len(dict_in[key])):
+                    if type(line) == str:
+                        for col in range(len(dict_in[key][line])):
+                            if type(dict_in[key][line][col]) is str:
+                                dict_in[key][line][col] = np.array([list(dict_in[key][line][col])])
+
+            # Change key if needed
+            if new_key_dict is not None and key in new_key_dict:
+                dict_out[new_key_dict[key]] = dict_in[key]
+            else:
+                dict_out[key] = dict_in[key]
+
+        return dict_out
+
+    @staticmethod
+    def obj2dict(obj, new_key_dict=None):
+        """Converts object variables to dictionaries. Works recursively to all levels of objects.
+
+        Parameters
+        ----------
+        obj: object
+            Object of some class
+        new_key_dict: dict
+            Dictionary to translate python variable names to Matlab variable names
+
+        Returns
+        -------
+        obj_dict: dict
+            Dictionary of all object variables
+        """
+        obj_dict = vars(obj)
+        new_dict = dict()
+        for key in obj_dict:
+
+            # If variable is another object convert to dictionary recursively
+            if str(type(obj_dict[key]))[8:13] == 'Class':
+                obj_dict[key] = Python2Matlab.obj2dict(obj_dict[key], new_key_dict)
+
+            # If variable is a list of objects convert to dictionary
+            elif type(obj_dict[key]) is list and len(obj_dict[key]) > 0 \
+                    and str(type(obj_dict[key][0]))[8:13] == 'Class':
+                obj_dict[key] = Python2Matlab.listobj2struct(obj_dict[key], new_key_dict)
+
+            elif type(obj_dict[key]) is dict:
+                obj_dict[key] = Python2Matlab.change_dict_keys(obj_dict[key], new_key_dict)
+
+            elif type(obj_dict[key]) is pd.DataFrame:
+                obj_dict[key] = obj_dict[key].to_numpy()
+
+            # If variable is None rename as necessary and convert None to empty list
+            if obj_dict[key] is None:
+                if new_key_dict is not None and key in new_key_dict:
+                    new_dict[new_key_dict[key]] = []
+                else:
+                    new_dict[key] = []
+            # If variable is not None rename as necessary
+            elif new_key_dict is not None and key in new_key_dict:
+                new_dict[new_key_dict[key]] = obj_dict[key]
+            else:
+                new_dict[key] = obj_dict[key]
+
+        return new_dict
+
+    @staticmethod
+    def comment2struct(comments):
+        """Convert comments to a structure.
+
+        Parameters
+        ----------
+        comments: list
+            List of comments
+
+        Returns
+        -------
+        struct: np.ndarray
+            Array of comments
+
+        """
+        struct = np.zeros((len(comments),), dtype=np.object)
+        cell = np.zeros((1,), dtype=np.object)
+        for n, line in enumerate(comments):
+            cell[0] = line
+            struct[n] = np.copy(cell)
+        return struct
+
+    @staticmethod
+    def listobj2dict(list_in, new_key_dict=None):
+        """Converts list of objects to list of dictionaries. Works recursively to all levels of objects.
+
+        Parameters
+        ----------
+        list_in: list
+            List of objects of some class
+        new_key_dict: dict
+            Dictionary to translate python variable names to Matlab variable names
+
+        Returns
+        -------
+        new_list: list
+            List of dictionaries
+        """
+        new_list = []
+        for obj in list_in:
+            new_list.append(Python2Matlab.obj2dict(obj, new_key_dict))
+        return new_list
+
+    @staticmethod
+    def create_py_2_mat_dict():
+        """Creates a dictionary to cross reference Python names with Matlab names
+
+        Returns
+        -------
+        py_2_mat_dict: dict
+            Dictionary of python key to Matlab variable
+        """
+
+        py_2_mat_dict = {'Python': 'Matlab',
+                         'align_correction_deg': 'alignCorrection_deg',
+                         'altitude_ens_m': 'altitudeEns_m',
+                         'avg_method': 'avgMethod',
+                         'beam_angle_deg': 'beamAngle_deg',
+                         'beam_filter': 'beamFilter',
+                         'beam_pattern': 'beamPattern',
+                         'blanking_distance_m': 'blankingDistance_m',
+                         'boat_vel': 'boatVel',
+                         'bot_diff': 'botdiff',
+                         'bot_method': 'botMethod',
+                         'bot_method_auto': 'botMethodAuto',
+                         'bot_method_orig': 'botMethodOrig',
+                         'bot_r2': 'botrsqr',
+                         'bottom_ens': 'bottomEns',
+                         'bottom_mode': 'bottomMode',
+                         'bt_depths': 'btDepths',
+                         'bt_vel': 'btVel',
+                         'cell_depth_normalized': 'cellDepthNormalized',
+                         'cells_above_sl': 'cellsAboveSL',
+                         'cells_above_sl_bt': 'cellsAboveSLbt',
+                         'compass_cal': 'compassCal',
+                         'compass_diff_deg': 'compassDiff_deg',
+                         'compass_eval': 'compassEval',
+                         'configuration_commands': 'configurationCommands',
+                         'coord_sys': 'coordSys',
+                         'corr_table': 'corrTable',
+                         'correction_factor': 'correctionFactor',
+                         'cov_95': 'cov95',
+                         'cov_95_user': 'cov95User',
+                         'cust_coef': 'custCoef',
+                         'd_filter': 'dFilter',
+                         'd_filter_thresholds': 'dFilterThreshold',
+                         'data_extent': 'dataExtent',
+                         'data_orig': 'dataOrig',
+                         'data_type': 'dataType',
+                         'date_time': 'dateTime',
+                         'depth_beams_m': 'depthBeams_m',
+                         'depth_cell_depth_m': 'depthCellDepth_m',
+                         'depth_cell_depth_orig_m': 'depthCellDepthOrig_m',
+                         'depth_cell_size_m': 'depthCellSize_m',
+                         'depth_cell_size_orig_m': 'depthCellSizeOrig_m',
+                         'depth_depth_m': 'depthCellDepth_m',
+                         'depth_source_ens': 'depthSourceEns',
+                         'depth_freq_kHz': 'depthFreq_Hz',
+                         'depth_invalid_index': 'depthInvalidIndex',
+                         'depth_orig_m': 'depthOrig_m',
+                         'depth_processed_m': 'depthProcessed_m',
+                         'depth_source': 'depthSource',
+                         'depths': 'depths',
+                         'diff_qual_ens': 'diffQualEns',
+                         'dist_us_m': 'distUS_m',
+                         'distance_m': 'dist_m',
+                         'draft_orig_m': 'draftOrig_m',
+                         'draft_use_m': 'draftUse_m',
+                         'ds_depths': 'dsDepths',
+                         'edges_95': 'edges95',
+                         'edges_95_user': 'edges95User',
+                         'end_serial_time': 'endSerialTime',
+                         'ens_duration_sec': 'ensDuration_sec',
+                         'excluded_dist_m': 'excludedDist',
+                         'exp_method': 'expMethod',
+                         'exponent_95_ci': 'exponent95confint',
+                         'exponent_auto': 'exponentAuto',
+                         'exponent_orig': 'exponentOrig',
+                         'ext_gga_altitude_m': 'extGGAAltitude_m',
+                         'ext_gga_differential': 'extGGADifferential',
+                         'ext_gga_hdop': 'extGGAHDOP',
+                         'ext_gga_lat_deg': 'extGGALat_deg',
+                         'ext_gga_lon_deg': 'extGGALon_deg',
+                         'ext_gga_num_sats': 'extGGANumSats',
+                         'ext_gga_serial_time': 'extGGASerialTime',
+                         'ext_gga_utc': 'extGGAUTC',
+                         'ext_temp_chk': 'extTempChk',
+                         'ext_vtg_course_deg': 'extVTGCourse_deg',
+                         'ext_vtg_speed_mps': 'extVTGSpeed_mps',
+                         'extrap_fit': 'extrapFit',
+                         'extrapolation_95': 'extrapolation95',
+                         'extrapolation_95_user': 'extrapolation95User',
+                         'file_name': 'fileName',
+                         'filter_type': 'filterType',
+                         'fit_method': 'fitMethod',
+                         'fit_r2': 'fitrsqr',
+                         'flow_dir_deg': 'flowDir_deg',
+                         'flow_dir': 'flowDir_deg',
+                         'flow_spd_mps': 'flowSpd_mps',
+                         'frequency_khz': 'frequency_hz',
+                         'gga_lat_ens_deg': 'ggaLatEns_deg',
+                         'gga_lon_ens_deg': 'ggaLonEns_deg',
+                         'gga_position_method': 'ggaPositionMethod',
+                         'gga_serial_time_ens': 'ggaSerialTimeEns',
+                         'gga_vel': 'ggaVel',
+                         'gga_velocity_ens_mps': 'ggaVelocityEns_mps',
+                         'gga_velocity_method': 'ggaVelocityMethod',
+                         'gps_HDOP_filter': 'gpsHDOPFilter',
+                         'gps_HDOP_filter_change': 'gpsHDOPFilterChange',
+                         'gps_HDOP_filter_max': 'gpsHDOPFilterMax',
+                         'gps_altitude_filter': 'gpsAltitudeFilter',
+                         'gps_altitude_filter_change': 'gpsAltitudeFilterChange',
+                         'gps_diff_qual_filter': 'gpsDiffQualFilter',
+                         'hard_limit': 'hardLimit',
+                         'hdop_ens': 'hdopEns',
+                         'high_narrow': 'hn',
+                         'high_wide': 'hw',
+                         'in_transect_idx': 'inTransectIdx',
+                         'initial_settings': 'initialSettings',
+                         'int_cells': 'intCells',
+                         'int_ens': 'intEns',
+                         'interp_type': 'interpType',
+                         'interpolate_cells': 'interpolateCells',
+                         'interpolate_ens': 'interpolateEns',
+                         'invalid_95': 'invalid95',
+                         'invalid_index': 'invalidIndex',
+                         'invalid_95_user': 'invalid95User',
+                         'left_idx': 'leftidx',
+                         'low_narrow': 'ln',
+                         'low_wide': 'lw',
+                         'mag_error': 'magError',
+                         'mag_var_orig_deg': 'magVarOrig_deg',
+                         'mag_var_deg': 'magVar_deg',
+                         'man_bot': 'manBot',
+                         'man_exp': 'manExp',
+                         'man_top': 'manTop',
+                         'mb_dir': 'mbDir_deg',
+                         'mb_spd_mps': 'mbSpd_mps',
+                         'mb_tests': 'mbTests',
+                         'meas': 'meas_struct',
+                         'middle_cells': 'middleCells',
+                         'middle_ens': 'middleEns',
+                         'moving_bed': 'movingBed',
+                         'moving_bed_95': 'movingBed95',
+                         'moving_bed_95_user': 'movingBed95User',
+                         'n_failed': 'nFailed',
+                         'n_tests': 'nTests',
+                         'nav_ref': 'navRef',
+                         'near_bed_speed_mps': 'nearBedSpeed_mps',
+                         'noise_floor': 'noiseFloor',
+                         'norm_data': 'normData',
+                         'ns_exp': 'nsExponent',
+                         'ns_exponent': 'nsexponent',
+                         'num_invalid': 'numInvalid',
+                         'num_sats_ens': 'numSatsEns',
+                         'number_ensembles': 'numEns2Avg',
+                         'orig_coord_sys': 'origCoordSys',
+                         'orig_ref': 'origNavRef',
+                         'orig_nav_ref': 'origNavRef',
+                         'orig_sys': 'origCoordSys',
+                         'original_data': 'originalData',
+                         'per_good_ens': 'perGoodEns',
+                         'percent_invalid_bt': 'percentInvalidBT',
+                         'percent_mb': 'percentMB',
+                         'pitch_limit': 'pitchLimit',
+                         'pp_exp': 'ppExponent',
+                         'pp_exponent': 'ppexponent',
+                         'processed_source': 'processedSource',
+                         'q_cns_mean': 'qCNSmean',
+                         'q_cns_opt_mean': 'qCNSoptmean',
+                         'q_cns_opt_per_diff': 'qCNSoptperdiff',
+                         'q_cns_per_diff': 'qCNSperdiff',
+                         'q_man_mean': 'qManmean',
+                         'q_man_per_diff': 'qManperdiff',
+                         'q_3p_ns_mean': 'q3pNSmean',
+                         'q_3p_ns_opt_mean': 'q3pNSoptmean',
+                         'q_3p_ns_opt_per_diff': 'q3pNSoptperdiff',
+                         'q_3p_ns_per_diff': 'q3pNSperdiff',
+                         'q_pp_mean': 'qPPmean',
+                         'q_pp_opt_mean': 'qPPoptmean',
+                         'q_pp_opt_per_diff': 'qPPoptperdiff',
+                         'q_pp_per_diff': 'qPPperdiff',
+                         'q_run_threshold_caution': 'qRunThresholdCaution',
+                         'q_run_threshold_warning': 'qRunThresholdWarning',
+                         'q_sensitivity': 'qSensitivity',
+                         'q_total_threshold_caution': 'qTotalThresholdWarning',
+                         'q_total_threshold_warning': 'qTotalThresholdCaution',
+                         'raw_gga_altitude_m': 'rawGGAAltitude_m',
+                         'raw_gga_delta_time': 'rawGGADeltaTime',
+                         'raw_gga_differential': 'rawGGADifferential',
+                         'raw_gga_hdop': 'rawGGAHDOP',
+                         'raw_gga_lat_deg': 'rawGGALat_deg',
+                         'raw_gga_lon_deg': 'rawGGALon_deg',
+                         'raw_gga_serial_time': 'rawGGASerialTime',
+                         'raw_gga_utc': 'rawGGAUTC',
+                         'raw_gga_num_sats': 'rawGGANumSats',
+                         'raw_vel_mps': 'rawVel_mps',
+                         'raw_vtg_course_deg': 'rawVTGCourse_deg',
+                         'raw_vtg_delta_time': 'rawVTGDeltaTime',
+                         'raw_vtg_mode_indicator': 'rawVTGModeIndicator',
+                         'raw_vtg_speed_mps': 'rawVTGSpeed_mps',
+                         'rec_edge_method': 'recEdgeMethod',
+                         'right_idx': 'rightidx',
+                         'roll_limit': 'rollLimit',
+                         'rssi_units': 'rssiUnits',
+                         'sel_fit': 'selFit',
+                         'serial_num': 'serialNum',
+                         'sl_lag_effect_m': 'slLagEffect_m',
+                         'sl_cutoff_number': 'slCutoffNum',
+                         'sl_cutoff_percent': 'slCutoffPer',
+                         'sl_cutoff_type': 'slCutoffType',
+                         'sl_cutoff_m': 'slCutoff_m',
+                         'smooth_depth': 'smoothDepth',
+                         'smooth_filter': 'smoothFilter',
+                         'smooth_lower_limit': 'smoothLowerLimit',
+                         'smooth_speed': 'smoothSpeed',
+                         'smooth_upper_limit': 'smoothUpperLimit',
+                         'snr_filter': 'snrFilter',
+                         'speed_of_sound_mps': 'speedOfSound_mps',
+                         'snr_rng': 'snrRng',
+                         'start_edge': 'startEdge',
+                         'start_serial_time': 'startSerialTime',
+                         'station_name': 'stationName',
+                         'station_number': 'stationNumber',
+                         'stationary_cs_track': 'stationaryCSTrack',
+                         'stationary_mb_vel': 'stationaryMBVel',
+                         'stationary_us_track': 'stationaryUSTrack',
+                         'system_test': 'sysTest',
+                         'system_tst': 'systemTest',
+                         'systematic_user': 'systematicUser',
+                         't_matrix': 'tMatrix',
+                         'temperature': 'temperature',
+                         'temperature_deg_c': 'temperature_degC',
+                         'test_quality': 'testQuality',
+                         'time_stamp': 'timeStamp',
+                         'top_ens': 'topEns',
+                         'top_fit_r2': 'topfitr2',
+                         'top_max_diff': 'topmaxdiff',
+                         'top_method': 'topMethod',
+                         'top_method_auto': 'topMethodAuto',
+                         'top_method_orig': 'topMethodOrig',
+                         'top_r2': 'topr2',
+                         'total_95': 'total95',
+                         'total_uncorrected': 'totalUncorrected',
+                         'total_95_user': 'total95User',
+                         'transect_duration_sec': 'transectDuration_sec',
+                         'u_auto': 'uAuto',
+                         'u_processed_mps': 'uProcessed_mps',
+                         'u_earth_no_ref_mps': 'uEarthNoRef_mps',
+                         'unit_normalized_z': 'unitNormalizedz',
+                         'unit_normalized': 'unitNormalized',
+                         'unit_normalized_25': 'unitNormalized25',
+                         'unit_normalized_75': 'unitNormalized75',
+                         'unit_normalized_med': 'unitNormalizedMed',
+                         'unit_normalized_no': 'unitNormalizedNo',
+                         'use_2_correct': 'use2Correct',
+                         'user_discharge_cms': 'userQ_cms',
+                         'user_rating': 'userRating',
+                         'user_valid': 'userValid',
+                         'utm_ens_m': 'UTMEns_m',
+                         'v_processed_mps': 'vProcessed_mps',
+                         'v_earth_no_ref_mps': 'vEarthNoRef_mps',
+                         'valid_beams': 'validBeams',
+                         'valid_data': 'validData',
+                         'valid_data_method': 'validDataMethod',
+                         'vb_depths': 'vbDepths',
+                         'vel_method': 'velMethod',
+                         'vtg_vel': 'vtgVel',
+                         'vtg_velocity_ens_mps': 'vtgVelocityEns_mps',
+                         'vtg_velocity_method': 'vtgVelocityMethod',
+                         'w_filter': 'wFilter',
+                         'w_filter_thresholds': 'wFilterThreshold',
+                         'w_vel': 'wVel',
+                         'water_mode': 'waterMode',
+                         'wt_depth_filter': 'wtDepthFilter',
+                         'z_auto': 'zAuto',
+                         'all_invalid': 'allInvalid',
+                         'q_max_run': 'qMaxRun',
+                         'q_max_run_caution': 'qRunCaution',
+                         'q_max_run_warning': 'qRunWarning',
+                         'q_total': 'qTotal',
+                         'q_total_caution': 'qTotalCaution',
+                         'q_total_warning': 'qTotalWarning',
+                         'sta_name': 'staName',
+                         'sta_number': 'staNumber',
+                         'left_q': 'leftQ',
+                         'left_q_idx': 'leftQIdx',
+                         'right_q': 'rightQ',
+                         'right_q_idx': 'rightQIdx',
+                         'left_sign': 'leftSign',
+                         'right_sign': 'rightSign',
+                         'right_dist_moved_idx': 'rightDistMovedIdx',
+                         'left_dist_moved_idx': 'leftDistMovedIdx',
+                         'left_zero': 'leftzero',
+                         'left_zero_idx': 'leftZeroIdx',
+                         'right_zero': 'rightzero',
+                         'right_zero_idx': 'rightZeroIdx',
+                         'left_type': 'leftType',
+                         'right_type': 'rightType',
+                         'pitch_mean_warning_idx': 'pitchMeanWarningIdx',
+                         'pitch_mean_caution_idx': 'pitchMeanCautionIdx',
+                         'pitch_std_caution_idx': 'pitchStdCautionIdx',
+                         'roll_mean_warning_idx': 'rollMeanWarningIdx',
+                         'roll_mean_caution_idx': 'rollMeanCautionIdx',
+                         'roll_std_caution_idx': 'rollStdCautionIdx',
+                         'magvar_idx': 'magvarIdx',
+                         'mag_error_idx': 'magErrorIdx',
+                         'invalid_transect_left_idx': 'invalidTransLeftIdx',
+                         'invalid_transect_right_idx': 'invalidTransRightIdx',
+                         }
+        return py_2_mat_dict
+
+    @staticmethod
+    def save_matlab_file(meas, file_name, version, checked=None):
+        """Saves the measurement class and all data into a Matlab file using the variable names and structure
+        from the QRev Matlab version.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        file_name: str
+            File name of saved Matlab file
+        version: str
+            QRev version
+        checked: list
+            Identifies which transects should be saved.
+        """
+
+        if checked is None:
+            checked = list(range(len(meas.transects)))
+
+        # Convert Python objects to Matlab structure
+        mat_struct = {'meas_struct': Python2Matlab(meas, checked).matlab_dict, 'version': version}
+        sio.savemat(file_name=file_name,
+                    mdict=mat_struct,
+                    appendmat=True,
+                    format='5',
+                    long_field_names=True,
+                    do_compression=True,
+                    oned_as='row')
+
+    @staticmethod
+    def data2matlab(meas):
+        """Apply changes to the Python data to replicate QRev for Matlab conventions.
+
+        Parameters
+        ----------
+        meas: Measurement
+            object of class Measurement
+
+        Returns
+        -------
+        meas_mat: Measurement
+            Deepcopy of meas with changes to replicate QRev for Matlab conventions
+        """
+
+        # Make copy to prevent changing Python meas data
+        meas_mat = copy.deepcopy(meas)
+
+        # Process changes for each transect
+        for transect in meas_mat.transects:
+            transect = Python2Matlab.reconfigure_transect(transect)
+
+        # Process changes for each moving-bed test transect
+        if len(meas.mb_tests) > 0:
+            for test in meas_mat.mb_tests:
+                test.transect = Python2Matlab.reconfigure_transect(test.transect)
+
+        # Adjust 1-D array to be row based
+        for fit in meas_mat.extrap_fit.sel_fit:
+            if fit.u is None:
+                fit.u = np.nan
+                fit.z = np.nan
+            else:
+                fit.u = fit.u.reshape(-1, 1)
+                fit.u_auto = fit.u_auto.reshape(-1, 1)
+                fit.z = fit.z.reshape(-1, 1)
+                fit.z_auto = fit.z_auto.reshape(-1, 1)
+
+        # Adjust norm_data indices from 0 base to 1 base
+        for dat in meas_mat.extrap_fit.norm_data:
+            dat.valid_data = dat.valid_data + 1
+
+        # If system tests, compass calibrations, or compass evaluations don't exist create empty objects
+        if len(meas_mat.system_tst) == 0:
+            meas_mat.system_tst = [PreMeasurement()]
+        if len(meas_mat.compass_eval) == 0:
+            meas_mat.compass_eval = [PreMeasurement()]
+        if len(meas_mat.compass_cal) == 0:
+            meas_mat.compass_cal = [PreMeasurement()]
+
+        # If only one moving-bed test change from list to MovingBedTest object
+        if len(meas_mat.mb_tests) == 1:
+            meas_mat.mb_tests = meas_mat.mb_tests[0]
+            # Convert message to cell array for Matlab
+            if len(meas_mat.mb_tests.messages) > 0:
+                meas_mat.mb_tests.messages = np.array(meas_mat.mb_tests.messages).astype(np.object)
+
+        # Fix user and adcp temperature for QRev Matlab
+        if np.isnan(meas_mat.ext_temp_chk['user']):
+            meas_mat.ext_temp_chk['user'] = ''
+        if np.isnan(meas_mat.ext_temp_chk['adcp']):
+            meas_mat.ext_temp_chk['adcp'] = ''
+
+        return meas_mat
+
+    @staticmethod
+    def reconfigure_transect(transect):
+        """Changes variable names, rearranges arrays, and adjusts time for consistency with original QRev Matlab output.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+
+        Returns
+        -------
+        transect: TransectData
+            Revised object of TransectData
+        """
+
+        # Change selected boat velocity identification
+        if transect.boat_vel.selected == 'bt_vel':
+            transect.boat_vel.selected = 'btVel'
+        elif transect.boat_vel.selected == 'gga_vel':
+            transect.boat_vel.selected = 'ggaVel'
+        elif transect.boat_vel.selected == 'vtg_vel':
+            transect.boat_vel.selected = 'vtgVel'
+
+        # Change selected depth identification
+        if transect.depths.selected == 'bt_depths':
+            transect.depths.selected = 'btDepths'
+        elif transect.depths.selected == 'vb_depths':
+            transect.depths.selected = 'vbDepths'
+        elif transect.depths.selected == 'ds_depths':
+            transect.depths.selected = 'dsDepths'
+
+        # Adjust in transect number for 1 base rather than 0 base
+        transect.in_transect_idx = transect.in_transect_idx + 1
+
+        # Adjust arrangement of 3-D arrays for consistency with Matlab
+        transect.w_vel.raw_vel_mps = np.moveaxis(transect.w_vel.raw_vel_mps, 0, 2)
+        transect.w_vel.corr = np.moveaxis(transect.w_vel.corr, 0, 2)
+        transect.w_vel.rssi = np.moveaxis(transect.w_vel.rssi, 0, 2)
+        transect.w_vel.valid_data = np.moveaxis(transect.w_vel.valid_data, 0, 2)
+        if len(transect.adcp.t_matrix.matrix.shape) == 3:
+            transect.adcp.t_matrix.matrix = np.moveaxis(transect.adcp.t_matrix.matrix, 2, 0)
+
+        # Adjust 2-D array to be row based
+        if transect.adcp.configuration_commands is not None:
+            transect.adcp.configuration_commands = transect.adcp.configuration_commands.reshape(-1, 1)
+
+        # Adjust serial time to Matlab convention
+        seconds_day = 86400
+        time_correction = 719529.0000000003
+        transect.date_time.start_serial_time = (transect.date_time.start_serial_time / seconds_day) \
+            + time_correction
+        transect.date_time.end_serial_time = (transect.date_time.end_serial_time / seconds_day) + time_correction
+        return transect
diff --git a/Classes/QAData.py b/Classes/QAData.py
new file mode 100644
index 0000000..05c49d7
--- /dev/null
+++ b/Classes/QAData.py
@@ -0,0 +1,2525 @@
+import copy
+import numpy as np
+from Classes.Uncertainty import Uncertainty
+from Classes.QComp import QComp
+from Classes.MovingBedTests import MovingBedTests
+from Classes.TransectData import TransectData
+
+
+class QAData(object):
+    """Evaluates and stores quality assurance characteristics and messages.
+
+    Attributes
+    ----------
+    q_run_threshold_caution: int
+        Caution threshold for interpolated discharge for a run of invalid ensembles, in percent.
+    q_run_threshold_warning: int
+        Warning threshold for interpolated discharge for a run of invalid ensembles, in percent.
+    q_total_threshold_caution: int
+        Caution threshold for total interpolated discharge for invalid ensembles, in percent.
+    q_total_threshold_warning: int
+        Warning threshold for total interpolated discharge for invalid ensembles, in percent.
+    transects: dict
+        Dictionary of quality assurance checks for transects
+    system_tst: dict
+        Dictionary of quality assurance checks on the system test(s)
+    compass: dict
+        Dictionary of quality assurance checks on compass calibration and evaluations
+    temperature: dict
+        Dictionary of quality assurance checks on temperature comparions and variation
+    movingbed: dict
+        Dictionary of quality assurance checks on moving-bed tests
+    user: dict
+        Dictionary of quality assurance checks on user input data
+    boat: dict
+        Dictionary of quality assurance checks on boat velocities
+    bt_vel: dict
+        Dictionary of quality assurance checks on bottom track velocities
+    gga_vel: dict
+        Dictionary of quality assurance checks on gga boat velocities
+    vtg_vel: dict
+        Dictionary of quality assurance checks on vtg boat velocities
+    w_vel: dict
+        Dictionary of quality assurance checks on water track velocities
+    extrapolation: dict
+        Dictionary of quality assurance checks on extrapolations
+    edges: dict
+        Dictionary of quality assurance checks on edges
+    """
+
+    def __init__(self, meas, mat_struct=None, compute=True):
+        """Checks the measurement for all quality assurance issues.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        # Set default thresholds
+        self.q_run_threshold_caution = 3
+        self.q_run_threshold_warning = 5
+        self.q_total_threshold_caution = 10
+        self.q_total_threshold_warning = 25
+
+        # Initialize instance variables
+        self.transects = dict()
+        self.system_tst = dict()
+        self.compass = dict()
+        self.temperature = dict()
+        self.movingbed = dict()
+        self.user = dict()
+        self.depths = dict()
+        self.boat = dict()
+        self.bt_vel = dict()
+        self.gga_vel = dict()
+        self.vtg_vel = dict()
+        self.w_vel = dict()
+        self.extrapolation = dict()
+        self.edges = dict()
+        self.settings_dict = dict()
+        self.settings_dict['tab_compass'] = 'Default'
+        self.settings_dict['tab_tempsal'] = 'Default'
+        self.settings_dict['tab_mbt'] = 'Default'
+        self.settings_dict['tab_bt'] = 'Default'
+        self.settings_dict['tab_gps'] = 'Default'
+        self.settings_dict['tab_depth'] = 'Default'
+        self.settings_dict['tab_wt'] = 'Default'
+        self.settings_dict['tab_extrap'] = 'Default'
+        self.settings_dict['tab_edges'] = 'Default'
+
+        if compute:
+            # Apply QA checks
+            self.transects_qa(meas)
+            self.system_tst_qa(meas)
+            self.compass_qa(meas)
+            self.temperature_qa(meas)
+            self.moving_bed_qa(meas)
+            self.user_qa(meas)
+            self.depths_qa(meas)
+            self.boat_qa(meas)
+            self.water_qa(meas)
+            self.extrapolation_qa(meas)
+            self.edges_qa(meas)
+            self.check_bt_setting(meas)
+            self.check_wt_settings(meas)
+            self.check_depth_settings(meas)
+            self.check_gps_settings(meas)
+            self.check_edge_settings(meas)
+            self.check_extrap_settings(meas)
+            self.check_tempsal_settings(meas)
+            self.check_mbt_settings(meas)
+            self.check_compass_settings(meas)
+            if meas.oursin is not None:
+                self.check_oursin(meas)
+        else:
+            self.populate_from_qrev_mat(meas, mat_struct)
+
+    def populate_from_qrev_mat(self, meas, meas_struct):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of Measurement
+        meas_struct: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+
+        # Generate a new QA object using the measurement data and the current QA code.
+        # When QA checks from the current QA are not available from old QRev files, these
+        # checks will be included to supplement the old QRev file data.
+        new_qa = QAData(meas)
+        if hasattr(meas_struct, 'qa'):
+            # Set default thresholds
+            self.q_run_threshold_caution = meas_struct.qa.qRunThresholdCaution
+            self.q_run_threshold_warning = meas_struct.qa.qRunThresholdWarning
+            if hasattr(meas_struct.qa, 'qTotalThresholdCaution'):
+                self.q_total_threshold_caution = meas_struct.qa.qTotalThresholdCaution
+            else:
+                self.q_total_threshold_caution = 10
+            self.q_total_threshold_warning = meas_struct.qa.qTotalThresholdWarning
+
+            # Initialize instance variables
+            self.transects = dict()
+            self.transects['duration'] = meas_struct.qa.transects.duration
+            self.transects['messages'] = self.make_list(meas_struct.qa.transects.messages)
+            self.transects['number'] = meas_struct.qa.transects.number
+            self.transects['recip'] = meas_struct.qa.transects.recip
+            self.transects['sign'] = meas_struct.qa.transects.sign
+            self.transects['status'] = meas_struct.qa.transects.status
+            self.transects['uncertainty'] = meas_struct.qa.transects.uncertainty
+            self.system_tst = dict()
+            self.system_tst['messages'] = self.make_list(meas_struct.qa.systemTest.messages)
+            self.system_tst['status'] = meas_struct.qa.systemTest.status
+            self.compass = dict()
+            self.compass['messages'] = self.make_list(meas_struct.qa.compass.messages)
+            self.compass['status'] = meas_struct.qa.compass.status
+            if hasattr(meas_struct.qa.compass, 'status1'):
+                self.compass['status1'] = meas_struct.qa.compass.status1
+                self.compass['status2'] = meas_struct.qa.compass.status2
+            else:
+                self.compass['status1'] = 'good'
+                self.compass['status2'] = 'good'
+
+            # If QA check not available, get check from new QA
+            if hasattr(meas_struct.qa.compass, 'magvar'):
+                self.compass['magvar'] = meas_struct.qa.compass.magvar
+            else:
+                self.compass['magvar'] = new_qa.compass['magvar']
+                self.compass['status'] = new_qa.compass['status']
+
+            # If QA check not available, get check from new QA
+            if hasattr(meas_struct.qa.compass, 'magvarIdx'):
+                self.compass['magvar_idx'] = self.make_array(meas_struct.qa.compass.magvarIdx)
+            else:
+                self.compass['magvar_idx'] = new_qa.compass['magvar_idx']
+                self.compass['status'] = new_qa.compass['status']
+
+            # Changed mag_error_idx from bool to int array in QRevPy
+            self.compass['mag_error_idx'] = new_qa.compass['mag_error_idx']
+            self.compass['status'] = new_qa.compass['status']
+
+            # If QA check not available, get check from new QA
+            if hasattr(meas_struct.qa.compass, 'pitchMeanWarningIdx'):
+                self.compass['pitch_mean_warning_idx'] = self.make_array(meas_struct.qa.compass.pitchMeanWarningIdx)
+            else:
+                self.compass['pitch_mean_warning_idx'] = new_qa.compass['pitch_mean_warning_idx']
+                self.compass['status'] = new_qa.compass['status']
+
+            # If QA check not available, get check from new QA
+            if hasattr(meas_struct.qa.compass, 'rollMeanWarningIdx'):
+                self.compass['roll_mean_warning_idx'] = self.make_array(meas_struct.qa.compass.rollMeanWarningIdx)
+            else:
+                self.compass['roll_mean_warning_idx'] = new_qa.compass['roll_mean_warning_idx']
+                self.compass['status'] = new_qa.compass['status']
+
+            # If QA check not available, get check from new QA
+            if hasattr(meas_struct.qa.compass, 'pitchMeanCautionIdx'):
+                self.compass['pitch_mean_caution_idx'] = self.make_array(meas_struct.qa.compass.pitchMeanCautionIdx)
+            else:
+                self.compass['pitch_mean_caution_idx'] = new_qa.compass['pitch_mean_caution_idx']
+                self.compass['status'] = new_qa.compass['status']
+
+            # If QA check not available, get check from new QA
+            if hasattr(meas_struct.qa.compass, 'rollMeanCautionIdx'):
+                self.compass['roll_mean_caution_idx'] = self.make_array(meas_struct.qa.compass.rollMeanCautionIdx)
+            else:
+                self.compass['roll_mean_caution_idx'] = new_qa.compass['roll_mean_caution_idx']
+                self.compass['status'] = new_qa.compass['status']
+
+            # If QA check not available, get check from new QA
+            if hasattr(meas_struct.qa.compass, 'pitchStdCautionIdx'):
+                self.compass['pitch_std_caution_idx'] = self.make_array(meas_struct.qa.compass.pitchStdCautionIdx)
+            else:
+                self.compass['pitch_std_caution_idx'] = new_qa.compass['pitch_std_caution_idx']
+                self.compass['status'] = new_qa.compass['status']
+
+            # If QA check not available, get check from new QA
+            if hasattr(meas_struct.qa.compass, 'rollStdCautionIdx'):
+                self.compass['roll_std_caution_idx'] = self.make_array(meas_struct.qa.compass.rollStdCautionIdx)
+            else:
+                self.compass['roll_std_caution_idx'] = new_qa.compass['roll_std_caution_idx']
+                self.compass['status'] = new_qa.compass['status']
+
+            self.temperature = dict()
+            self.temperature['messages'] = self.make_list(meas_struct.qa.temperature.messages)
+            self.temperature['status'] = meas_struct.qa.temperature.status
+            self.movingbed = dict()
+            self.movingbed['messages'] = self.make_list(meas_struct.qa.movingbed.messages)
+            self.movingbed['status'] = meas_struct.qa.movingbed.status
+            self.movingbed['code'] = meas_struct.qa.movingbed.code
+            self.user = dict()
+            self.user['messages'] = self.make_list(meas_struct.qa.user.messages)
+            self.user['sta_name'] = bool(meas_struct.qa.user.staName)
+            self.user['sta_number'] = bool(meas_struct.qa.user.staNumber)
+            self.user['status'] = meas_struct.qa.user.status
+
+            # If QA check not available, get check from new QA
+            self.depths = self.create_qa_dict(self, meas_struct.qa.depths)
+            if 'draft' not in self.depths:
+                self.depths['draft'] = new_qa.depths['draft']
+                self.depths['status'] = new_qa.depths['status']
+
+            if 'all_invalid' not in self.depths:
+                self.depths['all_invalid'] = new_qa.depths['all_invalid']
+                self.depths['status'] = new_qa.depths['status']
+
+            # If QA check not available, get check from new QA
+            self.bt_vel = self.create_qa_dict(self, meas_struct.qa.btVel, ndim=2)
+            if 'all_invalid' not in self.bt_vel:
+                self.bt_vel['all_invalid'] = new_qa.bt_vel['all_invalid']
+                self.bt_vel['status'] = new_qa.bt_vel['status']
+
+            # If QA check not available, get check from new QA
+            self.gga_vel = self.create_qa_dict(self, meas_struct.qa.ggaVel, ndim=2)
+            if 'all_invalid' not in self.gga_vel:
+                self.gga_vel['all_invalid'] = new_qa.gga_vel['all_invalid']
+            if 'lag_status' not in self.gga_vel:
+                self.gga_vel['lag_status'] = new_qa.gga_vel['lag_status']
+                self.gga_vel['status'] = new_qa.gga_vel['status']
+
+            # If QA check not available, get check from new QA
+            self.vtg_vel = self.create_qa_dict(self, meas_struct.qa.vtgVel, ndim=2)
+            if 'all_invalid' not in self.vtg_vel:
+                self.vtg_vel['all_invalid'] = new_qa.vtg_vel['all_invalid']
+            if 'lag_status' not in self.vtg_vel:
+                self.vtg_vel['lag_status'] = new_qa.vtg_vel['lag_status']
+                self.vtg_vel['status'] = new_qa.vtg_vel['status']
+
+            # If QA check not available, get check from new QA
+            self.w_vel = self.create_qa_dict(self, meas_struct.qa.wVel, ndim=2)
+            if 'all_invalid' not in self.w_vel:
+                self.w_vel['all_invalid'] = new_qa.w_vel['all_invalid']
+                self.w_vel['status'] = new_qa.w_vel['status']
+
+            self.extrapolation = dict()
+            self.extrapolation['messages'] = self.make_list(meas_struct.qa.extrapolation.messages)
+            self.extrapolation['status'] = meas_struct.qa.extrapolation.status
+            self.edges = dict()
+            self.edges['messages'] = self.make_list(meas_struct.qa.edges.messages)
+            self.edges['status'] = meas_struct.qa.edges.status
+            self.edges['left_q'] = meas_struct.qa.edges.leftQ
+            self.edges['right_q'] = meas_struct.qa.edges.rightQ
+            self.edges['left_sign'] = meas_struct.qa.edges.leftSign
+            self.edges['right_sign'] = meas_struct.qa.edges.rightSign
+            self.edges['left_zero'] = meas_struct.qa.edges.leftzero
+            self.edges['right_zero'] = meas_struct.qa.edges.rightzero
+            self.edges['left_type'] = meas_struct.qa.edges.leftType
+            self.edges['right_type'] = meas_struct.qa.edges.rightType
+
+            # If QA check not available, get check from new QA
+            if hasattr(meas_struct.qa.edges, 'rightDistMovedIdx'):
+                self.edges['right_dist_moved_idx'] = self.make_array(meas_struct.qa.edges.rightDistMovedIdx)
+            else:
+                self.edges['right_dist_moved_idx'] = new_qa.edges['right_dist_moved_idx']
+                self.edges['status'] = new_qa.edges['status']
+
+            # If QA check not available, get check from new QA
+            if hasattr(meas_struct.qa.edges, 'leftDistMovedIdx'):
+                self.edges['left_dist_moved_idx'] = self.make_array(meas_struct.qa.edges.leftDistMovedIdx)
+            else:
+                self.edges['left_dist_moved_idx'] = new_qa.edges['left_dist_moved_idx']
+                self.edges['status'] = new_qa.edges['status']
+
+            # If QA check not available, get check from new QA
+            if hasattr(meas_struct.qa.edges, 'leftQIdx'):
+                self.edges['left_q_idx'] = self.make_array(meas_struct.qa.edges.leftQIdx)
+            else:
+                self.edges['left_q_idx'] = new_qa.edges['left_q_idx']
+                self.edges['status'] = new_qa.edges['status']
+
+            # If QA check not available, get check from new QA
+            if hasattr(meas_struct.qa.edges, 'rightQIdx'):
+                self.edges['right_q_idx'] = self.make_array(meas_struct.qa.edges.rightQIdx)
+            else:
+                self.edges['right_q_idx'] = new_qa.edges['right_q_idx']
+                self.edges['status'] = new_qa.edges['status']
+
+            # If QA check not available, get check from new QA
+            if hasattr(meas_struct.qa.edges, 'leftZeroIdx'):
+                self.edges['left_zero_idx'] = self.make_array(meas_struct.qa.edges.leftZeroIdx)
+            else:
+                self.edges['left_zero_idx'] = new_qa.edges['left_zero_idx']
+                self.edges['status'] = new_qa.edges['status']
+
+            # If QA check not available, get check from new QA
+            if hasattr(meas_struct.qa.edges, 'rightZeroIdx'):
+                self.edges['right_zero_idx'] = self.make_array(meas_struct.qa.edges.rightZeroIdx)
+            else:
+                self.edges['right_zero_idx'] = new_qa.edges['right_zero_idx']
+                self.edges['status'] = new_qa.edges['status']
+
+            # If QA check not available, get check from new QA
+            if hasattr(meas_struct.qa.edges, 'invalid_transect_left_idx'):
+                self.edges['invalid_transect_left_idx'] = \
+                    self.make_array(meas_struct.qa.edges.invalid_transect_left_idx)
+            elif hasattr(meas_struct.qa.edges, 'invalidTransLeftIdx'):
+                self.edges['invalid_transect_left_idx'] = \
+                    self.make_array(meas_struct.qa.edges.invalidTransLeftIdx)
+            else:
+                self.edges['invalid_transect_left_idx'] = new_qa.edges['invalid_transect_left_idx']
+                self.edges['status'] = new_qa.edges['status']
+
+            # If QA check not available, get check from new QA
+            if hasattr(meas_struct.qa.edges, 'invalid_transect_right_idx'):
+                self.edges['invalid_transect_right_idx'] = \
+                    self.make_array(meas_struct.qa.edges.invalid_transect_right_idx)
+            elif hasattr(meas_struct.qa, 'invalidTransRightIdx'):
+                self.edges['invalid_transect_right_idx'] = \
+                    self.make_array(meas_struct.qa.edges.invalidTransRightIdx)
+            else:
+                self.edges['invalid_transect_right_idx'] = new_qa.edges['invalid_transect_right_idx']
+                self.edges['status'] = new_qa.edges['status']
+
+            if hasattr(meas_struct.qa, 'settings_dict'):
+                self.settings_dict = dict()
+                try:
+                    self.settings_dict['tab_compass'] = \
+                        meas_struct.qa.settings_dict.tab_compass
+                except AttributeError:
+                    self.settings_dict['tab_compass'] = \
+                        new_qa.settings_dict['tab_compass']
+
+                try:
+                    self.settings_dict['tab_tempsal'] = \
+                        meas_struct.qa.settings_dict.tab_tempsal
+                except AttributeError:
+                    self.settings_dict['tab_tempsal'] = \
+                        new_qa.settings_dict['tab_tempsal']
+
+                try:
+                    self.settings_dict['tab_mbt'] = \
+                        meas_struct.qa.settings_dict.tab_mbt
+                except AttributeError:
+                    self.settings_dict['tab_mbt'] = \
+                        new_qa.settings_dict['tab_mbt']
+
+                try:
+                    self.settings_dict['tab_bt'] = \
+                        meas_struct.qa.settings_dict.tab_bt
+                except AttributeError:
+                    self.settings_dict['tab_bt'] = \
+                        new_qa.settings_dict['tab_bt']
+
+                try:
+                    self.settings_dict['tab_gps'] = \
+                        meas_struct.qa.settings_dict.tab_gps
+                except AttributeError:
+                    self.settings_dict['tab_gps'] = \
+                        new_qa.settings_dict['tab_gps']
+
+                try:
+                    self.settings_dict['tab_depth'] = \
+                        meas_struct.qa.settings_dict.tab_depth
+                except AttributeError:
+                    self.settings_dict['tab_depth'] = \
+                        new_qa.settings_dict['tab_depth']
+
+                try:
+                    self.settings_dict['tab_wt'] = \
+                        meas_struct.qa.settings_dict.tab_wt
+                except AttributeError:
+                    self.settings_dict['tab_wt'] = \
+                        new_qa.settings_dict['tab_wt']
+
+                try:
+                    self.settings_dict['tab_extrap'] = \
+                        meas_struct.qa.settings_dict.tab_extrap
+                except AttributeError:
+                    self.settings_dict['tab_extrap'] = \
+                        new_qa.settings_dict['tab_extrap']
+
+                try:
+                    self.settings_dict['tab_edges'] = \
+                        meas_struct.qa.settings_dict.tab_edges
+                except AttributeError:
+                    self.settings_dict['tab_edges'] = \
+                        new_qa.settings_dict['tab_edges']
+
+    @staticmethod
+    def create_qa_dict(self, mat_data, ndim=1):
+        """Creates the dictionary used to store QA checks associated with the percent of discharge estimated
+        by interpolation. This dictionary is used by BT, GPS, Depth, and WT.
+
+        Parameters
+        ----------
+        self: QAData
+            Object of QAData
+        mat_data: mat_struct
+            Matlab data from QRev file
+        ndim: int
+            Number of dimensions in data
+        """
+
+        # Initialize dictionary
+        qa_dict = dict()
+
+        # Populate dictionary from Matlab data
+        qa_dict['messages'] = QAData.make_list(mat_data.messages)
+
+        # allInvalid not available in older QRev data
+        if hasattr(mat_data, 'allInvalid'):
+            qa_dict['all_invalid'] = self.make_array(mat_data.allInvalid, 1).astype(bool)
+
+        qa_dict['q_max_run_caution'] = self.make_array(mat_data.qRunCaution, ndim).astype(bool)
+        qa_dict['q_max_run_warning'] = self.make_array(mat_data.qRunWarning, ndim).astype(bool)
+        if hasattr(mat_data, 'qTotalCaution'):
+            qa_dict['q_total_caution'] = self.make_array(mat_data.qTotalCaution, ndim).astype(bool)
+        else:
+            qa_dict['q_total_caution'] = self.make_array(mat_data.qTotalWarning, ndim).astype(bool)
+        qa_dict['q_total_warning'] = self.make_array(mat_data.qTotalWarning, ndim).astype(bool)
+        qa_dict['status'] = mat_data.status
+
+        # q_max_run and q_total not available in older QRev data
+        try:
+            qa_dict['q_max_run'] = self.make_array(mat_data.qMaxRun, ndim)
+            qa_dict['q_total'] = self.make_array(mat_data.qTotal, ndim)
+        except AttributeError:
+            qa_dict['q_max_run'] = np.tile(np.nan, (len(mat_data.qRunCaution), 6))
+            qa_dict['q_total'] = np.tile(np.nan, (len(mat_data.qRunCaution), 6))
+        return qa_dict
+
+    @staticmethod
+    def make_array(num_in, ndim=1):
+        """Ensures that num_in is an array and if not makes it an array.
+
+        num_in: any
+            Any value or array
+        """
+
+        if type(num_in) is np.ndarray:
+            if len(num_in.shape) < 2 and ndim > 1:
+                num_in = np.reshape(num_in, (1, num_in.shape[0]))
+                return num_in
+            else:
+                return num_in
+        else:
+            return np.array([num_in])
+
+    @staticmethod
+    def make_list(array_in):
+        """Converts a string or array to a list.
+
+        Parameters
+        ----------
+        array_in: any
+            Data to be converted to list.
+
+        Returns
+        -------
+        list_out: list
+            List of array_in data
+        """
+
+        list_out = []
+        # Convert string to list
+        if type(array_in) is str:
+            list_out = [array_in]
+        else:
+            # Empty array
+            if array_in.size == 0:
+                list_out = []
+            # Single message with integer codes at end
+            elif array_in.size == 3:
+                if type(array_in[1]) is int or len(array_in[1].strip()) == 1:
+                    temp = array_in.tolist()
+                    if len(temp) > 0:
+                        internal_list = []
+                        for item in temp:
+                            internal_list.append(item)
+                        list_out = [internal_list]
+                else:
+                    list_out = array_in.tolist()
+            # Either multiple messages with or without integer codes
+            else:
+                list_out = array_in.tolist()
+
+        return list_out
+
+    def transects_qa(self, meas):
+        """Apply quality checks to transects
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        # Assume good results
+        self.transects['status'] = 'good'
+
+        # Initialize keys
+        self.transects['messages'] = []
+        self.transects['recip'] = 0
+        self.transects['sign'] = 0
+        self.transects['duration'] = 0
+        self.transects['number'] = 0
+        self.transects['uncertainty'] = 0
+
+        # Initialize lists
+        checked = []
+        discharges = []
+        start_edge = []
+
+        # Populate lists
+        for n in range(len(meas.transects)):
+            checked.append(meas.transects[n].checked)
+            if meas.transects[n].checked:
+                discharges.append(meas.discharge[n])
+                start_edge.append(meas.transects[n].start_edge)
+
+        num_checked = np.nansum(np.asarray(checked))
+
+        # Check duration
+        total_duration = 0
+        if num_checked >= 1:
+            for transect in meas.transects:
+                if transect.checked:
+                    total_duration += transect.date_time.transect_duration_sec
+
+        # Check duration against USGS policy
+        if total_duration < meas.min_duration:
+            self.transects['status'] = 'caution'
+            text = 'Transects: Duration of selected transects is less than ' + str(meas.min_duration) + ' seconds;'
+            self.transects['messages'].append([text, 2, 0])
+            self.transects['duration'] = 1
+
+        # Check transects for missing ensembles
+        for transect in meas.transects:
+            if transect.checked:
+
+                # Determine number of missing ensembles
+                if transect.adcp.manufacturer == 'SonTek':
+                    # Determine number of missing ensembles for SonTek data
+                    idx_missing = np.where(transect.date_time.ens_duration_sec > 1.5)[0]
+                    if len(idx_missing) > 0:
+                        average_ensemble_duration = (np.nansum(transect.date_time.ens_duration_sec)
+                                                     - np.nansum(transect.date_time.ens_duration_sec[idx_missing])) \
+                                                     / (len(transect.date_time.ens_duration_sec) - len(idx_missing))
+                        num_missing = np.round(np.nansum(transect.date_time.ens_duration_sec[idx_missing])
+                                               / average_ensemble_duration) - len(idx_missing)
+                    else:
+                        num_missing = 0
+                else:
+                    # Determine number of lost ensembles for TRDI data
+                    idx_missing = np.where(np.isnan(transect.date_time.ens_duration_sec))[0]
+                    num_missing = len(idx_missing) - 1
+
+                # Save caution message
+                if num_missing > 0:
+                    self.transects['messages'].append(['Transects: ' + str(transect.file_name) + ' is missing '
+                                                       + str(int(num_missing)) + ' ensembles;', 2, 0])
+                    self.transects['status'] = 'caution'
+
+        # Check number of transects checked
+        if num_checked == 0:
+            # No transects selected
+            self.transects['status'] = 'warning'
+            self.transects['messages'].append(['TRANSECTS: No transects selected;', 1, 0])
+            self.transects['number'] = 2
+        elif num_checked == 1:
+            # Only one transect selected
+            self.transects['status'] = 'caution'
+            self.transects['messages'].append(['Transects: Only one transect selected;', 2, 0])
+            self.transects['number'] = 2
+        else:
+            self.transects['number'] = num_checked
+            if num_checked == 2:
+                # Only 2 transects selected
+                cov, _ = Uncertainty.uncertainty_q_random(discharges, 'total')
+                # Check uncertainty
+                if cov > 2:
+                    self.transects['status'] = 'caution'
+                    self.transects['messages'].append(
+                        ['Transects: Uncertainty would be reduced by additional transects;', 2, 0])
+
+            if num_checked < meas.min_transects:
+                self.transects['status'] = 'caution'
+                text = 'Transects: Number of transects is below the required minimum of ' \
+                       + str(meas.min_transects) + ';'
+                self.transects['messages'].append([text, 2, 0])
+
+            # Check for consistent sign
+            q_positive = []
+            for q in discharges:
+                if q.total >= 0:
+                    q_positive.append(True)
+                else:
+                    q_positive.append(False)
+            if len(np.unique(q_positive)) > 1:
+                self.transects['status'] = 'warning'
+                self.transects['messages'].append(
+                    ['TRANSECTS: Sign of total Q is not consistent. One or more start banks may be incorrect;', 1, 0])
+
+            # Check for reciprocal transects
+            num_left = start_edge.count('Left')
+            num_right = start_edge.count('Right')
+
+            if not num_left == num_right:
+                self.transects['status'] = 'warning'
+                self.transects['messages'].append(['TRANSECTS: Transects selected are not reciprocal transects;', 1, 0])
+
+        # Check for zero discharge transects
+        q_zero = False
+        for q in discharges:
+            if q.total == 0:
+                q_zero = True
+        if q_zero:
+            self.transects['status'] = 'warning'
+            self.transects['messages'].append(['TRANSECTS: One or more transects have zero Q;', 1, 0])
+
+    def system_tst_qa(self, meas):
+        """Apply QA checks to system test.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        self.system_tst['messages'] = []
+        self.system_tst['status'] = 'good'
+
+        # Determine if a system test was recorded
+        if not meas.system_tst:
+            # No system test data recorded
+            self.system_tst['status'] = 'warning'
+            self.system_tst['messages'].append(['SYSTEM TEST: No system test;', 1, 3])
+        else:
+
+            pt3_fail = False
+            num_tests_with_failure = 0
+
+            for test in meas.system_tst:
+                if hasattr(test, 'result'):
+
+                    # Check for presence of pt3 test
+                    if 'pt3' in test.result and test.result['pt3'] is not None:
+
+                        # Check hard_limit, high gain, wide bandwidth
+                        if 'hard_limit' in test.result['pt3']:
+                            if 'high_wide' in test.result['pt3']['hard_limit']:
+                                corr_table = test.result['pt3']['hard_limit']['high_wide']['corr_table']
+                                if len(corr_table) > 0:
+                                    # All lags past lag 2 should be less than 50% of lag 0
+                                    qa_threshold = corr_table[0, :] * 0.5
+                                    all_lag_check = np.greater(corr_table[3::, :], qa_threshold)
+
+                                    # Lag 7 should be less than 25% of lag 0
+                                    lag_7_check = np.greater(corr_table[7, :], corr_table[0, :] * 0.25)
+
+                                    # If either condition is met for any beam the test fails
+                                    if np.sum(np.sum(all_lag_check)) + np.sum(lag_7_check) > 1:
+                                        pt3_fail = True
+
+                    if test.result['sysTest']['n_failed'] is not None and test.result['sysTest']['n_failed'] > 0:
+                        num_tests_with_failure += 1
+
+            # pt3 test failure message
+            if pt3_fail:
+                self.system_tst['status'] = 'caution'
+                self.system_tst['messages'].append(
+                    ['System Test: One or more PT3 tests in the system test indicate potential EMI;', 2, 3])
+
+            # Check for failed tests
+            if num_tests_with_failure == len(meas.system_tst):
+                # All tests had a failure
+                self.system_tst['status'] = 'warning'
+                self.system_tst['messages'].append(
+                    ['SYSTEM TEST: All system test sets have at least one test that failed;', 1, 3])
+            elif num_tests_with_failure > 0:
+                self.system_tst['status'] = 'caution'
+                self.system_tst['messages'].append(
+                    ['System Test: One or more system test sets have at least one test that failed;', 2, 3])
+
+    def compass_qa(self, meas):
+        """Apply QA checks to compass calibration and evaluation.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        self.compass['messages'] = []
+
+        checked = []
+        for transect in meas.transects:
+            checked.append(transect.checked)
+
+        if np.any(checked):
+            heading = np.unique(meas.transects[checked.index(1)].sensors.heading_deg.internal.data)
+        else:
+            heading = np.array([0])
+
+        # Initialize variable as if ADCP has no compass
+        self.compass['status'] = 'inactive'
+        self.compass['status1'] = 'good'
+        self.compass['status2'] = 'good'
+        self.compass['magvar'] = 0
+        self.compass['magvar_idx'] = []
+        self.compass['mag_error_idx'] = []
+        self.compass['pitch_mean_warning_idx'] = []
+        self.compass['pitch_mean_caution_idx'] = []
+        self.compass['pitch_std_caution_idx'] = []
+        self.compass['roll_mean_warning_idx'] = []
+        self.compass['roll_mean_caution_idx'] = []
+        self.compass['roll_std_caution_idx'] = []
+
+        if len(heading) > 1 and np.any(np.not_equal(heading, 0)):
+            # ADCP has a compass
+            # A compass calibration is required if a loop test or GPS are used
+
+            # Check for loop test
+            loop = False
+            for test in meas.mb_tests:
+                if test.type == 'Loop':
+                    loop = True
+
+            # Check for GPS data
+            gps = False
+            if meas.transects[checked.index(True)].boat_vel.gga_vel is not None or \
+                    meas.transects[checked.index(True)].boat_vel.vtg_vel is not None:
+                gps = True
+
+            if gps or loop:
+                # Compass calibration is required
+
+                # Determine the ADCP manufacturer
+                if meas.transects[checked.index(True)].adcp.manufacturer == 'SonTek':
+                    # SonTek ADCP
+                    if len(meas.compass_cal) == 0:
+                        # No compass calibration
+                        self.compass['status1'] = 'warning'
+                        self.compass['messages'].append(['COMPASS: No compass calibration;', 1, 4])
+                    elif meas.compass_cal[-1].result['compass']['error'] == 'N/A':
+                        # If the error cannot be decoded from the calibration assume the calibration is good
+                        self.compass['status1'] = 'good'
+                    else:
+                        if meas.compass_cal[-1].result['compass']['error'] <= 0.2:
+                            self.compass['status1'] = 'good'
+                        else:
+                            self.compass['status1'] = 'caution'
+                            self.compass['messages'].append(['Compass: Calibration result > 0.2 deg;', 2, 4])
+
+                elif meas.transects[checked.index(True)].adcp.manufacturer == 'TRDI':
+                    # TRDI ADCP
+                    if len(meas.compass_cal) == 0:
+                        # No compass calibration
+                        if len(meas.compass_eval) == 0:
+                            # No calibration or evaluation
+                            self.compass['status1'] = 'warning'
+                            self.compass['messages'].append(['COMPASS: No compass calibration or evaluation;', 1, 4])
+                        else:
+                            # No calibration but an evaluation was completed
+                            self.compass['status1'] = 'caution'
+                            self.compass['messages'].append(['Compass: No compass calibration;', 2, 4])
+                    else:
+                        # Compass was calibrated
+                        if len(meas.compass_eval) == 0:
+                            # No compass evaluation
+                            self.compass['status1'] = 'caution'
+                            self.compass['messages'].append(['Compass: No compass evaluation;', 2, 4])
+                        else:
+                            # Check results of evaluation
+                            try:
+                                if float(meas.compass_eval[-1].result['compass']['error']) <= 1:
+                                    self.compass['status1'] = 'good'
+                                else:
+                                    self.compass['status1'] = 'caution'
+                                    self.compass['messages'].append(['Compass: Evaluation result > 1 deg;', 2, 4])
+                            except ValueError:
+                                self.compass['status1'] = 'good'
+            else:
+                # Compass not required
+                if len(meas.compass_cal) == 0 and len(meas.compass_eval) == 0:
+                    # No compass calibration or evaluation
+                    self.compass['status1'] = 'default'
+                else:
+                    # Compass was calibrated and evaluated
+                    self.compass['status1'] = 'good'
+
+            # Check for consistent magvar and pitch and roll mean and variation
+            magvar = []
+            align = []
+            mag_error_exceeded = []
+            pitch_mean = []
+            pitch_std = []
+            pitch_exceeded = []
+            roll_mean = []
+            roll_std = []
+            roll_exceeded = []
+            transect_idx = []
+            for n, transect in enumerate(meas.transects):
+                if transect.checked:
+                    transect_idx.append(n)
+                    heading_source_selected = getattr(
+                        transect.sensors.heading_deg, transect.sensors.heading_deg.selected)
+                    pitch_source_selected = getattr(transect.sensors.pitch_deg, transect.sensors.pitch_deg.selected)
+                    roll_source_selected = getattr(transect.sensors.roll_deg, transect.sensors.roll_deg.selected)
+
+                    magvar.append(transect.sensors.heading_deg.internal.mag_var_deg)
+                    if transect.sensors.heading_deg.external is not None:
+                        align.append(transect.sensors.heading_deg.external.align_correction_deg)
+
+                    pitch_mean.append(np.nanmean(pitch_source_selected.data))
+                    pitch_std.append(np.nanstd(pitch_source_selected.data, ddof=1))
+                    roll_mean.append(np.nanmean(roll_source_selected.data))
+                    roll_std.append(np.nanstd(roll_source_selected.data, ddof=1))
+
+                    # SonTek G3 compass provides pitch, roll, and magnetic error parameters that can be checked
+                    if transect.adcp.manufacturer == 'SonTek':
+                        if heading_source_selected.pitch_limit is not None:
+                            # Check for bug in SonTek data where pitch and roll was n x 3 use n x 1
+                            if len(pitch_source_selected.data.shape) == 1:
+                                pitch_data = pitch_source_selected.data
+                            else:
+                                pitch_data = pitch_source_selected.data[:, 0]
+                            idx_max = np.where(pitch_data > heading_source_selected.pitch_limit[0])[0]
+                            idx_min = np.where(pitch_data < heading_source_selected.pitch_limit[1])[0]
+                            if len(idx_max) > 0 or len(idx_min) > 0:
+                                pitch_exceeded.append(True)
+                            else:
+                                pitch_exceeded.append(False)
+
+                        if heading_source_selected.roll_limit is not None:
+                            if len(roll_source_selected.data.shape) == 1:
+                                roll_data = roll_source_selected.data
+                            else:
+                                roll_data = roll_source_selected.data[:, 0]
+                            idx_max = np.where(roll_data > heading_source_selected.pitch_limit[0])[0]
+                            idx_min = np.where(roll_data < heading_source_selected.pitch_limit[1])[0]
+                            if len(idx_max) > 0 or len(idx_min) > 0:
+                                roll_exceeded.append(True)
+                            else:
+                                roll_exceeded.append(False)
+
+                        if heading_source_selected.mag_error is not None:
+                            idx_max = np.where(heading_source_selected.mag_error > 2)[0]
+                            if len(idx_max) > 0:
+                                mag_error_exceeded.append(n)
+            # Check magvar consistency
+            if len(np.unique(magvar)) > 1:
+                self.compass['status2'] = 'caution'
+                self.compass['messages'].append(
+                    ['Compass: Magnetic variation is not consistent among transects;', 2, 4])
+                self.compass['magvar'] = 1
+
+            # Check magvar consistency
+            if len(np.unique(align)) > 1:
+                self.compass['status2'] = 'caution'
+                self.compass['messages'].append(
+                    ['Compass: Heading offset is not consistent among transects;', 2, 4])
+                self.compass['align'] = 1
+
+            # Check that magvar was set if GPS data are available
+            if gps:
+                if 0 in magvar:
+                    self.compass['status2'] = 'warning'
+                    self.compass['messages'].append(
+                        ['COMPASS: Magnetic variation is 0 and GPS data are present;', 1, 4])
+                    self.compass['magvar'] = 2
+                    self.compass['magvar_idx'] = np.where(np.array(magvar) == 0)[0].tolist()
+
+            # Check pitch mean
+            if np.any(np.asarray(np.abs(pitch_mean)) > 8):
+                self.compass['status2'] = 'warning'
+                self.compass['messages'].append(['PITCH: One or more transects have a mean pitch > 8 deg;', 1, 4])
+                temp = np.where(np.abs(pitch_mean) > 8)[0]
+                if len(temp) > 0:
+                    self.compass['pitch_mean_warning_idx'] = np.array(transect_idx)[temp]
+                else:
+                    self.compass['pitch_mean_warning_idx'] = []
+
+            elif np.any(np.asarray(np.abs(pitch_mean)) > 4):
+                if self.compass['status2'] == 'good':
+                    self.compass['status2'] = 'caution'
+                self.compass['messages'].append(['Pitch: One or more transects have a mean pitch > 4 deg;', 2, 4])
+                temp = np.where(np.abs(pitch_mean) > 4)[0]
+                if len(temp) > 0:
+                    self.compass['pitch_mean_caution_idx'] = np.array(transect_idx)[temp]
+                else:
+                    self.compass['pitch_mean_caution_idx'] = []
+
+            # Check roll mean
+            if np.any(np.asarray(np.abs(roll_mean)) > 8):
+                self.compass['status2'] = 'warning'
+                self.compass['messages'].append(['ROLL: One or more transects have a mean roll > 8 deg;', 1, 4])
+                temp = np.where(np.abs(roll_mean) > 8)[0]
+                if len(temp) > 0:
+                    self.compass['roll_mean_warning_idx'] = np.array(transect_idx)[temp]
+                else:
+                    self.compass['roll_mean_warning_idx'] = []
+
+            elif np.any(np.asarray(np.abs(roll_mean)) > 4):
+                if self.compass['status2'] == 'good':
+                    self.compass['status2'] = 'caution'
+                self.compass['messages'].append(['Roll: One or more transects have a mean roll > 4 deg;', 2, 4])
+                temp = np.where(np.abs(roll_mean) > 4)[0]
+                if len(temp) > 0:
+                    self.compass['roll_mean_caution_idx'] = np.array(transect_idx)[temp]
+                else:
+                    self.compass['roll_mean_caution_idx'] = []
+
+            # Check pitch standard deviation
+            if np.any(np.asarray(pitch_std) > 5):
+                if self.compass['status2'] == 'good':
+                    self.compass['status2'] = 'caution'
+                self.compass['messages'].append(['Pitch: One or more transects have a pitch std dev > 5 deg;', 2, 4])
+                temp = np.where(np.abs(pitch_std) > 5)[0]
+                if len(temp) > 0:
+                    self.compass['pitch_std_caution_idx'] = np.array(transect_idx)[temp]
+                else:
+                    self.compass['pitch_std_caution_idx'] = []
+
+            # Check roll standard deviation
+            if np.any(np.asarray(roll_std) > 5):
+                if self.compass['status2'] == 'good':
+                    self.compass['status2'] = 'caution'
+                self.compass['messages'].append(['Roll: One or more transects have a roll std dev > 5 deg;', 2, 4])
+                temp = np.where(np.abs(roll_std) > 5)[0]
+                if len(temp) > 0:
+                    self.compass['roll_std_caution_idx'] = np.array(transect_idx)[temp]
+                else:
+                    self.compass['roll_std_caution_idx'] = []
+
+            # Additional checks for SonTek G3 compass
+            if meas.transects[checked.index(True)].adcp.manufacturer == 'SonTek':
+                # Check if pitch limits were exceeded
+                if any(pitch_exceeded):
+                    if self.compass['status2'] == 'good':
+                        self.compass['status2'] = 'caution'
+                    self.compass['messages'].append(
+                        ['Compass: One or more transects have pitch exceeding calibration limits;', 2, 4])
+
+                # Check if roll limits were exceeded
+                if any(roll_exceeded):
+                    if self.compass['status2'] == 'good':
+                        self.compass['status2'] = 'caution'
+                    self.compass['messages'].append(
+                        ['Compass: One or more transects have roll exceeding calibration limits;', 2, 4])
+
+                # Check if magnetic error was exceeded
+                self.compass['mag_error_idx'] = []
+                if len(mag_error_exceeded) > 0:
+                    self.compass['mag_error_idx'] = np.array(mag_error_exceeded)
+                    if self.compass['status2'] == 'good':
+                        self.compass['status2'] = 'caution'
+                    self.compass['messages'].append(
+                        ['Compass: One or more transects have a change in mag field exceeding 2%;', 2, 4])
+
+            if self.compass['status1'] == 'warning' or self.compass['status2'] == 'warning':
+                self.compass['status'] = 'warning'
+            elif self.compass['status1'] == 'caution' or self.compass['status2'] == 'caution':
+                self.compass['status'] = 'caution'
+            else:
+                self.compass['status'] = 'good'
+
+    def temperature_qa(self, meas):
+        """Apply QA checks to temperature.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        self.temperature['messages'] = []
+        check = [0, 0]
+
+        # Create array of all temperatures
+        temp = np.array([])
+        checked = []
+        for transect in meas.transects:
+            if transect.checked:
+                checked.append(transect.checked)
+                temp_selected = getattr(transect.sensors.temperature_deg_c, transect.sensors.temperature_deg_c.selected)
+                if len(temp) == 0:
+                    temp = temp_selected.data
+                else:
+                    temp = np.hstack((temp, temp_selected.data))
+
+        # Check temperature range
+        if np.any(checked):
+            temp_range = np.nanmax(temp) - np.nanmin(temp)
+        else:
+            temp_range = 0
+
+        if temp_range > 2:
+            check[0] = 3
+            self.temperature['messages'].append(['TEMPERATURE: Temperature range is '
+                                                 + '{:3.1f}'.format(temp_range)
+                                                 + ' degrees C which is greater than 2 degrees;', 1, 5])
+        elif temp_range > 1:
+            check[0] = 2
+            self.temperature['messages'].append(['Temperature: Temperature range is '
+                                                 + '{:3.1f}'.format(temp_range)
+                                                 + ' degrees C which is greater than 1 degree;', 2, 5])
+        else:
+            check[0] = 1
+
+        # Check for independent temperature reading
+        if 'user' in meas.ext_temp_chk:
+            try:
+                user = float(meas.ext_temp_chk['user'])
+            except (ValueError, TypeError):
+                user = None
+            if user is None or np.isnan(user):
+                # No independent temperature reading
+                check[1] = 2
+                self.temperature['messages'].append(['Temperature: No independent temperature reading;', 2, 5])
+            elif not np.isnan(meas.ext_temp_chk['adcp']):
+                # Compare user to manually entered ADCP temperature
+                diff = np.abs(user - meas.ext_temp_chk['adcp'])
+                if diff < 2:
+                    check[1] = 1
+                else:
+                    check[1] = 3
+                    self.temperature['messages'].append(
+                        ['TEMPERATURE: The difference between ADCP and reference is > 2:  '
+                         + '{:3.1f}'.format(diff) + ' C;', 1, 5])
+            else:
+                # Compare user to mean of all temperature data
+                diff = np.abs(user - np.nanmean(temp))
+                if diff < 2:
+                    check[1] = 1
+                else:
+                    check[1] = 3
+                    self.temperature['messages'].append(
+                        ['TEMPERATURE: The difference between ADCP and reference is > 2:  '
+                         + '{:3.1f}'.format(diff) + ' C;', 1, 5])
+
+        # Assign temperature status
+        max_check = max(check)
+        if max_check == 1:
+            self.temperature['status'] = 'good'
+        elif max_check == 2:
+            self.temperature['status'] = 'caution'
+        elif max_check == 3:
+            self.temperature['status'] = 'warning'
+
+    def moving_bed_qa(self, meas):
+        """Applies quality checks to moving-bed tests.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        self.movingbed['messages'] = []
+        self.movingbed['code'] = 0
+
+        # Are there moving-bed tests?
+        if len(meas.mb_tests) < 1:
+            if meas.observed_no_moving_bed:
+                self.movingbed['messages'].append(['Moving-Bed Test: Visually observed no moving bed;', 2, 6])
+                self.movingbed['status'] = 'caution'
+                self.movingbed['code'] = 2
+            else:
+                # No moving-bed test
+                self.movingbed['messages'].append(['MOVING-BED TEST: No moving bed test;', 1, 6])
+                self.movingbed['status'] = 'warning'
+                self.movingbed['code'] = 3
+
+        else:
+            # Moving-bed tests available
+            mb_data = meas.mb_tests
+
+            user_valid_test = []
+            file_names = []
+            idx_selected = []
+            test_quality = []
+            mb_tests = []
+            mb = []
+            mb_test_type = []
+            loop = []
+            use_2_correct = []
+            gps_diff1 = False
+            gps_diff2 = False
+
+            for n, test in enumerate(mb_data):
+                # Are tests valid according to the user
+                if test.user_valid:
+                    user_valid_test.append(True)
+                    file_names.append(test.transect.file_name)
+                    if test.type == 'Loop' and not test.test_quality == 'Errors':
+                        loop.append(test.moving_bed)
+                    if not np.isnan(test.gps_percent_mb):
+                        if np.abs(test.bt_percent_mb - test.gps_percent_mb) > 2:
+                            gps_diff2 = True
+                        if np.logical_xor(test.bt_percent_mb >= 1, test.gps_percent_mb >= 1):
+                            gps_diff1 = True
+                    # Selected test
+                    if test.selected:
+                        idx_selected.append(n)
+                        test_quality.append(test.test_quality)
+                        mb_tests.append(test)
+                        mb.append(test.moving_bed)
+                        mb_test_type.append(test.type)
+                        use_2_correct.append(test.use_2_correct)
+                else:
+                    user_valid_test.append(False)
+
+            if not any(user_valid_test):
+                # No valid test according to user
+                self.movingbed['messages'].append(['MOVING-BED TEST: No valid moving-bed test based on user input;',
+                                                   1, 6])
+                self.movingbed['status'] = 'warning'
+                self.movingbed['code'] = 3
+            else:
+                # Check for duplicate valid moving-bed tests
+                if len(np.unique(file_names)) < len(file_names):
+                    self.movingbed['messages'].append([
+                        'MOVING-BED TEST: Duplicate moving-bed test files marked valid;', 1, 6])
+                    self.movingbed['status'] = 'warning'
+                    self.movingbed['code'] = 3
+
+            if self.movingbed['code'] == 0:
+                # Check test quality
+                if len(test_quality) > 0 and sum(np.array(test_quality) == 'Good') > 0:
+                    self.movingbed['status'] = 'good'
+                    self.movingbed['code'] = 1
+
+                    # Check if there is a moving-bed
+                    if 'Yes' in mb:
+
+                        # Moving-bed present
+                        self.movingbed['messages'].append(
+                            ['Moving-Bed Test: A moving-bed is present.', 2, 6])
+                        self.movingbed['code'] = 2
+                        self.movingbed['status'] = 'caution'
+                        if meas.transects[meas.checked_transect_idx[0]].boat_vel.composite == 'On':
+                            self.movingbed['messages'].append(
+                                ['Moving-Bed: Use of composite tracks could cause inaccurate results.', 2, 6])
+
+                        if meas.transects[meas.checked_transect_idx[0]].boat_vel.selected == 'bt_vel':
+                            if any(use_2_correct):
+                                self.movingbed['messages'].append(
+                                    ['Moving-Bed: BT based moving-bed correction applied.', 2, 6])
+                            else:
+                                self.movingbed['messages'].append(
+                                    ['MOVING-BED: Moving-bed present and BT used, but no correction applied.', 1, 6])
+                                self.movingbed['code'] = 3
+                                self.movingbed['status'] = 'warning'
+                        elif meas.transects[meas.checked_transect_idx[0]].boat_vel.selected == 'gga_vel':
+                            self.movingbed['messages'].append(
+                                ['Moving-Bed: GGA used.', 2, 6])
+                        elif meas.transects[meas.checked_transect_idx[0]].boat_vel.selected == 'vtg_vel':
+                            self.movingbed['messages'].append(
+                                ['Moving-Bed: VTG used.', 2, 6])
+
+                        # Check for test type
+                        if sum(np.array(mb_test_type) == 'Stationary'):
+                            # Check for GPS or 3 stationary tests
+                            if len(mb_tests) < 3:
+                                gps = []
+                                for transect in meas.transects:
+                                    if transect.checked:
+                                        if transect.gps is None:
+                                            gps.append(False)
+                                        else:
+                                            gps.append(True)
+                                if not all(gps):
+                                    # GPS not available for all selected transects
+                                    self.movingbed['messages'].append([
+                                        'Moving-Bed Test: '
+                                        + 'Less than 3 stationary tests available for moving-bed correction;',
+                                        2, 6])
+
+                elif len(test_quality) > 0 and sum(np.array(test_quality) == 'Warnings') > 0:
+                    # Quality check has warnings
+                    self.movingbed['messages'].append(['Moving-Bed Test: The moving-bed test(s) has warnings, '
+                                                       + 'please review tests to determine validity;', 2, 6])
+                    self.movingbed['status'] = 'caution'
+                    self.movingbed['code'] = 2
+
+                elif len(test_quality) > 0 and sum(np.array(test_quality) == 'Manual') > 0:
+                    # Manual override used
+                    self.movingbed['messages'].append(['MOVING-BED TEST: '
+                                                       + 'The user has manually forced the use of some tests;', 1, 6])
+                    self.movingbed['status'] = 'warning'
+                    self.movingbed['code'] = 3
+
+                else:
+                    # Test has critical errors
+                    self.movingbed['messages'].append(['MOVING-BED TEST: The moving-bed test(s) have critical errors '
+                                                       + 'and will not be used;', 1, 6])
+                    self.movingbed['status'] = 'warning'
+                    self.movingbed['code'] = 3
+
+                # Check multiple loops for consistency
+                if len(np.unique(loop)) > 1:
+                    self.movingbed['messages'].append(['Moving-Bed Test: Results of valid loops are not consistent, '
+                                                       + 'review moving-bed tests;', 2, 6])
+                    if self.movingbed['code'] < 3:
+                        self.movingbed['code'] = 2
+                        self.movingbed['status'] = 'caution'
+
+                # Notify of differences in results of test between BT and GPS
+                if gps_diff2:
+                    self.movingbed['messages'].append(['Moving-Bed Test: Bottom track and '
+                                                      'GPS results differ by more than 2%.', 2, 6])
+                    if self.movingbed['code'] < 3:
+                        self.movingbed['code'] = 2
+                        self.movingbed['status'] = 'caution'
+
+                if gps_diff1:
+                    self.movingbed['messages'].append(['Moving-Bed Test: Bottom track and GPS results do not agree.',
+                                                      2, 6])
+                    if self.movingbed['code'] < 3:
+                        self.movingbed['code'] = 2
+                        self.movingbed['status'] = 'caution'
+
+        self.check_mbt_settings(meas)
+
+    def user_qa(self, meas):
+        """Apply quality checks to user input data.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        self.user['messages'] = []
+        self.user['status'] = 'good'
+
+        # Check for Station Name
+        self.user['sta_name'] = False
+        if meas.station_name is None or len(meas.station_name.strip()) < 1:
+            self.user['messages'].append(['Site Info: Station name not entered;', 2, 2])
+            self.user['status'] = 'caution'
+            self.user['sta_name'] = True
+
+        # Check for Station Number
+        self.user['sta_number'] = False
+        try:
+            if meas.station_number is None or len(meas.station_number.strip()) < 1:
+                self.user['messages'].append(['Site Info: Station number not entered;', 2, 2])
+                self.user['status'] = 'caution'
+                self.user['sta_number'] = True
+        except AttributeError:
+            self.user['messages'].append(['Site Info: Station number not entered;', 2, 2])
+            self.user['status'] = 'caution'
+            self.user['sta_number'] = True
+
+    def depths_qa(self, meas):
+        """Apply quality checks to depth data.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        # Initialize variables
+        n_transects = len(meas.transects)
+        self.depths['q_total'] = np.tile(np.nan, n_transects)
+        self.depths['q_max_run'] = np.tile(np.nan, n_transects)
+        self.depths['q_total_caution'] = np.tile(False, n_transects)
+        self.depths['q_max_run_caution'] = np.tile(False, n_transects)
+        self.depths['q_total_warning'] = np.tile(False, n_transects)
+        self.depths['q_max_run_warning'] = np.tile(False, n_transects)
+        self.depths['all_invalid'] = np.tile(False, n_transects)
+        self.depths['messages'] = []
+        self.depths['status'] = 'good'
+        self.depths['draft'] = 0
+        checked = []
+        drafts = []
+        for n, transect in enumerate(meas.transects):
+            checked.append(transect.checked)
+            if transect.checked:
+                in_transect_idx = transect.in_transect_idx
+
+                depths_selected = getattr(transect.depths, transect.depths.selected)
+                drafts.append(depths_selected.draft_use_m)
+
+                # Determine valid measured depths
+                if transect.depths.composite:
+                    depth_na = depths_selected.depth_source_ens[in_transect_idx] != 'NA'
+                    depth_in = depths_selected.depth_source_ens[in_transect_idx] != 'IN'
+                    depth_valid = np.all(np.vstack((depth_na, depth_in)), 0)
+                else:
+                    depth_valid_temp = depths_selected.valid_data[in_transect_idx]
+                    depth_nan = depths_selected.depth_processed_m[in_transect_idx] != np.nan
+                    depth_valid = np.all(np.vstack((depth_nan, depth_valid_temp)), 0)
+
+                if not np.any(depth_valid):
+                    self.depths['all_invalid'][n] = True
+
+                # Compute QA characteristics
+                q_total, q_max_run, number_invalid_ensembles = QAData.invalid_qa(depth_valid, meas.discharge[n])
+                self.depths['q_total'][n] = q_total
+                self.depths['q_max_run'][n] = q_max_run
+
+                # Compute percentage compared to total
+                if meas.discharge[n].total == 0.0:
+                    q_total_percent = np.nan
+                    q_max_run_percent = np.nan
+                else:
+                    q_total_percent = np.abs((q_total / meas.discharge[n].total) * 100)
+                    q_max_run_percent = np.abs((q_max_run / meas.discharge[n].total) * 100)
+
+                # Apply total interpolated discharge threshold
+                if q_total_percent > self.q_total_threshold_warning:
+                    self.depths['q_total_warning'][n] = True
+                elif q_total_percent > self.q_total_threshold_caution:
+                    self.depths['q_total_caution'][n] = True
+
+                # Apply interpolated discharge run thresholds
+                if q_max_run_percent > self.q_run_threshold_warning:
+                    self.depths['q_max_run_warning'][n] = True
+                elif q_max_run_percent > self.q_run_threshold_caution:
+                    self.depths['q_max_run_caution'][n] = True
+
+        if checked:
+
+            # Create array of all unique draft values
+            draft_check = np.unique(np.round(drafts, 2))
+
+            # Check draft consistency
+            if len(draft_check) > 1:
+                self.depths['status'] = 'caution'
+                self.depths['draft'] = 1
+                self.depths['messages'].append(['Depth: Transducer depth is not consistent among transects;', 2, 10])
+
+            # Check for zero draft
+            if np.any(np.less(draft_check, 0.01)):
+                self.depths['status'] = 'warning'
+                self.depths['draft'] = 2
+                self.depths['messages'].append(['DEPTH: Transducer depth is too shallow, likely 0;', 1, 10])
+
+            # Check consecutive interpolated discharge criteria
+            if np.any(self.depths['q_max_run_warning']):
+                self.depths['messages'].append(['DEPTH: Int. Q for consecutive invalid ensembles exceeds '
+                                                + '%2.0f' % self.q_run_threshold_warning + '%;', 1, 10])
+                self.depths['status'] = 'warning'
+            elif np.any(self.depths['q_max_run_caution']):
+                self.depths['messages'].append(['Depth: Int. Q for consecutive invalid ensembles exceeds '
+                                                + '%2.0f' % self.q_run_threshold_caution + '%;', 2, 10])
+                self.depths['status'] = 'caution'
+
+            # Check total interpolated discharge criteria
+            if np.any(self.depths['q_total_warning']):
+                self.depths['messages'].append(['DEPTH: Int. Q for invalid ensembles in a transect exceeds '
+                                                + '%2.0f' % self.q_total_threshold_warning + '%;', 1, 10])
+                self.depths['status'] = 'warning'
+            elif np.any(self.depths['q_total_caution']):
+                self.depths['messages'].append(['Depth: Int. Q for invalid ensembles in a transect exceeds '
+                                                + '%2.0f' % self.q_total_threshold_caution + '%;', 2, 10])
+                self.depths['status'] = 'caution'
+
+            # Check if all depths are invalid
+            if np.any(self.depths['all_invalid']):
+                self.depths['messages'].append(['DEPTH: There are no valid depths for one or more transects.', 2, 10])
+                self.depths['status'] = 'warning'
+
+        else:
+            self.depths['status'] = 'inactive'
+
+    def boat_qa(self, meas):
+        """Apply quality checks to boat data.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        # Initialize variables
+        n_transects = len(meas.transects)
+        data_type = {'BT': {'class': 'bt_vel', 'warning': 'BT-', 'caution': 'bt-',
+                            'filter': [('All: ', 0), ('Original: ', 1), ('ErrorVel: ', 2),
+                                       ('VertVel: ', 3), ('Other: ', 4), ('3Beams: ', 5)]},
+                     'GGA': {'class': 'gga_vel', 'warning': 'GGA-', 'caution': 'gga-',
+                             'filter': [('All: ', 0), ('Original: ', 1), ('DGPS: ', 2),
+                                        ('Altitude: ', 3), ('Other: ', 4), ('HDOP: ', 5)]},
+                     'VTG': {'class': 'vtg_vel', 'warning': 'VTG-', 'caution': 'vtg-',
+                             'filter': [('All: ', 0), ('Original: ', 1), ('Other: ', 4), ('HDOP: ', 5)]}}
+        self.boat['messages'] = []
+
+        for dt_key, dt_value in data_type.items():
+            boat = getattr(self, dt_value['class'])
+
+            # Initialize dictionaries for each data type
+            boat['q_total_caution'] = np.tile(False, (n_transects, 6))
+            boat['q_max_run_caution'] = np.tile(False, (n_transects, 6))
+            boat['q_total_warning'] = np.tile(False, (n_transects, 6))
+            boat['q_max_run_warning'] = np.tile(False, (n_transects, 6))
+            boat['all_invalid'] = np.tile(False, n_transects)
+            boat['q_total'] = np.tile(np.nan, (n_transects, 6))
+            boat['q_max_run'] = np.tile(np.nan, (n_transects, 6))
+            boat['messages'] = []
+            status_switch = 0
+            avg_speed_check = 0
+
+            # Check the results of each filter
+            for dt_filter in dt_value['filter']:
+                boat['status'] = 'inactive'
+
+                # Quality check each transect
+                for n, transect in enumerate(meas.transects):
+
+                    # Evaluate on transects used in the discharge computation
+                    if transect.checked:
+
+                        in_transect_idx = transect.in_transect_idx
+
+                        # Check to see if data are available for the data_type
+                        if getattr(transect.boat_vel, dt_value['class']) is not None:
+                            boat['status'] = 'good'
+
+                            # Compute quality characteristics
+                            valid = getattr(transect.boat_vel, dt_value['class']).valid_data[dt_filter[1],
+                                                                                             in_transect_idx]
+                            q_total, q_max_run, number_invalid_ens = QAData.invalid_qa(valid, meas.discharge[n])
+                            boat['q_total'][n, dt_filter[1]] = q_total
+                            boat['q_max_run'][n, dt_filter[1]] = q_max_run
+
+                            # Compute percentage compared to total
+                            if meas.discharge[n].total == 0.0:
+                                q_total_percent = np.nan
+                                q_max_run_percent = np.nan
+                            else:
+                                q_total_percent = np.abs((q_total / meas.discharge[n].total) * 100)
+                                q_max_run_percent = np.abs((q_max_run / meas.discharge[n].total) * 100)
+
+                            # Check if all invalid
+                            if dt_filter[1] == 0 and not np.any(valid):
+                                boat['all_invalid'][n] = True
+
+                            # Apply total interpolated discharge threshold
+                            if q_total_percent > self.q_total_threshold_warning:
+                                boat['q_total_warning'][n, dt_filter[1]] = True
+                            elif q_total_percent > self.q_total_threshold_caution:
+                                boat['q_total_caution'][n, dt_filter[1]] = True
+
+                            # Apply interpolated discharge run thresholds
+                            if q_max_run_percent > self.q_run_threshold_warning:
+                                boat['q_max_run_warning'][n, dt_filter[1]] = True
+                            elif q_max_run_percent > self.q_run_threshold_caution:
+                                boat['q_max_run_caution'][n, dt_filter[1]] = True
+
+                            # Check boat velocity for vtg data
+                            if dt_key == 'VTG' and transect.boat_vel.selected == 'vtg_vel' and avg_speed_check == 0:
+                                if transect.boat_vel.vtg_vel.u_mps is not None:
+                                    avg_speed = np.nanmean((transect.boat_vel.vtg_vel.u_mps ** 2
+                                                            + transect.boat_vel.vtg_vel.v_mps ** 2) ** 0.5)
+                                    if avg_speed < 0.24:
+                                        boat['q_total_caution'][n, 2] = True
+                                        if status_switch < 1:
+                                            status_switch = 1
+                                        boat['messages'].append(
+                                            ['vtg-AvgSpeed: VTG data may not be accurate for average boat speed '
+                                             'less than' + '0.24 m/s (0.8 ft/s);', 2, 8])
+                                        avg_speed_check = 1
+
+                # Create message for consecutive invalid discharge
+                if boat['q_max_run_warning'][:, dt_filter[1]].any():
+                    if dt_key == 'BT':
+                        module_code = 7
+                    else:
+                        module_code = 8
+                    boat['messages'].append(
+                        [dt_value['warning'] + dt_filter[0] +
+                         'Int. Q for consecutive invalid ensembles exceeds ' +
+                         '%3.1f' % self.q_run_threshold_warning + '%;', 1, module_code])
+                    status_switch = 2
+                elif boat['q_max_run_caution'][:, dt_filter[1]].any():
+                    if dt_key == 'BT':
+                        module_code = 7
+                    else:
+                        module_code = 8
+                    boat['messages'].append(
+                        [dt_value['caution'] + dt_filter[0] +
+                         'Int. Q for consecutive invalid ensembles exceeds ' +
+                         '%3.1f' % self.q_run_threshold_caution + '%;', 2, module_code])
+                    if status_switch < 1:
+                        status_switch = 1
+
+                # Create message for total invalid discharge
+                if boat['q_total_warning'][:, dt_filter[1]].any():
+                    if dt_key == 'BT':
+                        module_code = 7
+                    else:
+                        module_code = 8
+                    boat['messages'].append(
+                        [dt_value['warning'] + dt_filter[0] +
+                         'Int. Q for invalid ensembles in a transect exceeds ' +
+                         '%3.1f' % self.q_total_threshold_warning + '%;', 1, module_code])
+                    status_switch = 2
+                elif boat['q_total_caution'][:, dt_filter[1]].any():
+                    if dt_key == 'BT':
+                        module_code = 7
+                    else:
+                        module_code = 8
+                    boat['messages'].append(
+                        [dt_value['caution'] + dt_filter[0] +
+                         'Int. Q for invalid ensembles in a transect exceeds ' +
+                         '%3.1f' % self.q_total_threshold_caution + '%;', 2, module_code])
+                    if status_switch < 1:
+                        status_switch = 1
+
+            # Create message for all data invalid
+            if boat['all_invalid'].any():
+                boat['status'] = 'warning'
+                if dt_key == 'BT':
+                    module_code = 7
+                else:
+                    module_code = 8
+                boat['messages'].append(
+                    [dt_value['warning'] + dt_value['filter'][0][0] +
+                     'There are no valid data for one or more transects.;', 1, module_code])
+
+            # Set status
+            if status_switch == 2:
+                boat['status'] = 'warning'
+            elif status_switch == 1:
+                boat['status'] = 'caution'
+
+            setattr(self, dt_value['class'], boat)
+
+        lag_gga = []
+        lag_vtg = []
+        self.gga_vel['lag_status'] = 'good'
+        self.vtg_vel['lag_status'] = 'good'
+        for transect in meas.transects:
+            gga, vtg = TransectData.compute_gps_lag(transect)
+            if gga is not None:
+                lag_gga.append(gga)
+            if vtg is not None:
+                lag_vtg.append(vtg)
+        if len(lag_gga) > 0:
+            if np.mean(np.abs(lag_gga)) > 10:
+                self.gga_vel['messages'].append(['GGA: BT and GGA do not appear to be sychronized', 1, 8])
+                if self.gga_vel['status'] != 'warning':
+                    self.gga_vel['status'] = 'warning'
+                    self.gga_vel['lag_status'] = 'warning'
+            elif np.mean(np.abs(lag_gga)) > 2:
+                self.gga_vel['messages'].append(['gga: Lag between BT and GGA > 2 sec', 2, 8])
+                if self.gga_vel['status'] != 'warning':
+                    self.gga_vel['status'] = 'caution'
+                    self.gga_vel['lag_status'] = 'caution'
+        if len(lag_vtg) > 0:
+            if np.mean(np.abs(lag_vtg)) > 10:
+                self.vtg_vel['messages'].append(['VTG: BT and VTG do not appear to be sychronized', 1, 8])
+                if self.vtg_vel['status'] != 'warning':
+                    self.vtg_vel['status'] = 'warning'
+                    self.vtg_vel['lag status'] = 'warning'
+            elif np.mean(np.abs(lag_vtg)) > 2:
+                self.vtg_vel['messages'].append(['vtg: Lag between BT and VTG > 2 sec', 2, 8])
+                if self.vtg_vel['status'] != 'warning':
+                    self.vtg_vel['status'] = 'caution'
+                    self.vtg_vel['lag_status'] = 'caution'
+
+    def water_qa(self, meas):
+        """Apply quality checks to water data.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        # Initialize filter labels and indices
+        prefix = ['All: ', 'Original: ', 'ErrorVel: ', 'VertVel: ', 'Other: ', '3Beams: ', 'SNR:']
+        if meas.transects[0].adcp.manufacturer == 'TRDI':
+            filter_index = [0, 1, 2, 3, 4, 5]
+        else:
+            filter_index = [0, 1, 2, 3, 4, 5, 7]
+
+        n_transects = len(meas.transects)
+        n_filters = len(filter_index) + 1
+        # Initialize dictionaries for each data type
+        self.w_vel['q_total_caution'] = np.tile(False, (n_transects, n_filters))
+        self.w_vel['q_max_run_caution'] = np.tile(False, (n_transects, n_filters))
+        self.w_vel['q_total_warning'] = np.tile(False, (n_transects, n_filters))
+        self.w_vel['q_max_run_warning'] = np.tile(False, (n_transects, n_filters))
+        self.w_vel['all_invalid'] = np.tile(False, n_transects)
+        self.w_vel['q_total'] = np.tile(np.nan, (n_transects, n_filters))
+        self.w_vel['q_max_run'] = np.tile(np.nan, (n_transects, n_filters))
+        self.w_vel['messages'] = []
+        status_switch = 0
+
+        # TODO if meas had a property checked as list it would save creating that list multiple times
+        checked = []
+        for transect in meas.transects:
+            checked.append(transect.checked)
+
+        # At least one transect is being used to compute discharge
+        if any(checked):
+            # Loop through filters
+            for prefix_idx, filter_idx in enumerate(filter_index):
+                # Loop through transects
+                for n, transect in enumerate(meas.transects):
+                    if transect.checked:
+                        valid_original = np.any(transect.w_vel.valid_data[1, :, transect.in_transect_idx].T, 0)
+
+                        # Determine what data each filter have marked invalid. Original invalid data are excluded
+                        valid = np.any(transect.w_vel.valid_data[filter_idx, :, transect.in_transect_idx].T, 0)
+                        if filter_idx > 1:
+                            valid_int = valid.astype(int) - valid_original.astype(int)
+                            valid = valid_int != -1
+
+                        # Check if all data are invalid
+                        if filter_idx == 0:
+                            if np.nansum(valid.astype(int)) < 1:
+                                self.w_vel['all_invalid'][n] = True
+                        # TODO seems like the rest of this should be under else of all invalid or multiple messages
+                        # generated.
+
+                        # Compute characteristics
+                        q_total, q_max_run, number_invalid_ens = QAData.invalid_qa(valid, meas.discharge[n])
+                        self.w_vel['q_total'][n, filter_idx] = q_total
+                        self.w_vel['q_max_run'][n, filter_idx] = q_max_run
+
+                        # Compute percentage compared to total
+                        if meas.discharge[n].total == 0.0:
+                            q_total_percent = np.nan
+                            q_max_run_percent = np.nan
+                        else:
+                            q_total_percent = np.abs((q_total / meas.discharge[n].total) * 100)
+                            q_max_run_percent = np.abs((q_max_run / meas.discharge[n].total) * 100)
+
+                        # Check total invalid discharge in ensembles for warning
+                        if q_total_percent > self.q_total_threshold_warning:
+                            self.w_vel['q_total_warning'][n, filter_idx] = True
+
+                        # Apply run or cluster thresholds
+                        if q_max_run_percent > self.q_run_threshold_warning:
+                            self.w_vel['q_max_run_warning'][n, filter_idx] = True
+                        elif q_max_run_percent > self.q_run_threshold_caution:
+                            self.w_vel['q_max_run_caution'][n, filter_idx] = True
+
+                        # Compute percent discharge interpolated for both cells and ensembles
+                        # This approach doesn't exclude original data
+                        valid_cells = transect.w_vel.valid_data[filter_idx, :, transect.in_transect_idx].T
+                        q_invalid_total = np.nansum(meas.discharge[n].middle_cells[np.logical_not(valid_cells)]) \
+                            + np.nansum(meas.discharge[n].top_ens[np.logical_not(valid)]) \
+                            + np.nansum(meas.discharge[n].bottom_ens[np.logical_not(valid)])
+                        q_invalid_total_percent = (q_invalid_total / meas.discharge[n].total) * 100
+
+                        if q_invalid_total_percent > self.q_total_threshold_caution:
+                            self.w_vel['q_total_caution'][n, filter_idx] = True
+
+                # Generate messages for ensemble run or clusters
+                if np.any(self.w_vel['q_max_run_warning'][:, filter_idx]):
+                    self.w_vel['messages'].append(['WT-' + prefix[prefix_idx]
+                                                   + 'Int. Q for consecutive invalid ensembles exceeds '
+                                                   + '%3.0f' % self.q_run_threshold_warning
+                                                   + '%;', 1, 11])
+                    status_switch = 2
+                elif np.any(self.w_vel['q_max_run_caution'][:, filter_idx]):
+                    self.w_vel['messages'].append(['wt-' + prefix[prefix_idx]
+                                                   + 'Int. Q for consecutive invalid ensembles exceeds '
+                                                   + '%3.0f' % self.q_run_threshold_caution
+                                                   + '%;', 2, 11])
+                    if status_switch < 1:
+                        status_switch = 1
+
+                # Generate message for total_invalid Q
+                if np.any(self.w_vel['q_total_warning'][:, filter_idx]):
+                    self.w_vel['messages'].append(['WT-' + prefix[prefix_idx]
+                                                   + 'Int. Q for invalid cells and ensembles in a transect exceeds '
+                                                   + '%3.0f' % self.q_total_threshold_warning
+                                                   + '%;', 1, 11])
+                    status_switch = 2
+                elif np.any(self.w_vel['q_total_caution'][:, filter_idx]):
+                    self.w_vel['messages'].append(['wt-' + prefix[prefix_idx]
+                                                   + 'Int. Q for invalid cells and ensembles in a transect exceeds '
+                                                   + '%3.0f' % self.q_total_threshold_caution
+                                                   + '%;', 2, 11])
+                    if status_switch < 1:
+                        status_switch = 1
+
+            # Generate message for all invalid
+            if np.any(self.w_vel['all_invalid']):
+                self.w_vel['messages'].append(['WT-' + prefix[0] + 'There are no valid data for one or more transects.',
+                                               1, 11])
+                status_switch = 2
+
+            # Set status
+            self.w_vel['status'] = 'good'
+            if status_switch == 2:
+                self.w_vel['status'] = 'warning'
+            elif status_switch == 1:
+                self.w_vel['status'] = 'caution'
+        else:
+            self.w_vel['status'] = 'inactive'
+
+    def extrapolation_qa(self, meas):
+        """Apply quality checks to extrapolation methods
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        self.extrapolation['messages'] = []
+
+        checked = []
+        discharges = []
+        for n, transect in enumerate(meas.transects):
+            checked.append(transect.checked)
+            if transect.checked:
+                discharges.append(meas.discharge[n])
+
+        if any(checked):
+            self.extrapolation['status'] = 'good'
+            extrap_uncertainty = Uncertainty.uncertainty_extrapolation(meas, discharges)
+
+            if np.abs(extrap_uncertainty) > 2:
+                self.extrapolation['messages'].append(['Extrapolation: The extrapolation uncertainty is more than '
+                                                       + '2 percent;', 2, 12])
+                self.extrapolation['messages'].append(['    Carefully review the extrapolation;', 2, 12])
+                self.extrapolation['status'] = 'caution'
+        else:
+            self.extrapolation['status'] = 'inactive'
+
+    def edges_qa(self, meas):
+        """Apply quality checks to edge estimates
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        # Initialize variables
+        self.edges['messages'] = []
+        checked = []
+        left_q = []
+        right_q = []
+        total_q = []
+        edge_dist_left = []
+        edge_dist_right = []
+        dist_moved_left = []
+        dist_moved_right = []
+        dist_made_good = []
+        left_type = []
+        right_type = []
+        transect_idx = []
+
+        for n, transect in enumerate(meas.transects):
+            checked.append(transect.checked)
+
+            if transect.checked:
+                left_q.append(meas.discharge[n].left)
+                right_q.append(meas.discharge[n].right)
+                total_q.append(meas.discharge[n].total)
+                dmr, dml, dmg = QAData.edge_distance_moved(transect)
+                dist_moved_right.append(dmr)
+                dist_moved_left.append(dml)
+                dist_made_good.append(dmg)
+                edge_dist_left.append(transect.edges.left.distance_m)
+                edge_dist_right.append(transect.edges.right.distance_m)
+                left_type.append(transect.edges.left.type)
+                right_type.append(transect.edges.right.type)
+                transect_idx.append(n)
+
+        if any(checked):
+            # Set default status to good
+            self.edges['status'] = 'good'
+
+            mean_total_q = np.nanmean(total_q)
+
+            # Check left edge q > 5%
+            self.edges['left_q'] = 0
+
+            left_q_percent = (np.nanmean(left_q) / mean_total_q) * 100
+            temp_idx = np.where(left_q / mean_total_q > 0.05)[0]
+            if len(temp_idx) > 0:
+                self.edges['left_q_idx'] = np.array(transect_idx)[temp_idx]
+            else:
+                self.edges['left_q_idx'] = []
+            if np.abs(left_q_percent) > 5:
+                self.edges['status'] = 'caution'
+                self.edges['messages'].append(['Edges: Left edge Q is greater than 5%;', 1, 13])
+                self.edges['left_q'] = 1
+            elif len(self.edges['left_q_idx']) > 0:
+                self.edges['status'] = 'caution'
+                self.edges['messages'].append(
+                    ['Edges: One or more transects have a left edge Q greater than 5%;', 1, 13])
+                self.edges['left_q'] = 1
+
+            # Check right edge q > 5%
+            self.edges['right_q'] = 0
+            right_q_percent = (np.nanmean(right_q) / mean_total_q) * 100
+            temp_idx = np.where(right_q / mean_total_q > 0.05)[0]
+            if len(temp_idx) > 0:
+                self.edges['right_q_idx'] = np.array(transect_idx)[temp_idx]
+            else:
+                self.edges['right_q_idx'] = []
+            if np.abs(right_q_percent) > 5:
+                self.edges['status'] = 'caution'
+                self.edges['messages'].append(['Edges: Right edge Q is greater than 5%;', 1, 13])
+                self.edges['right_q'] = 1
+            elif len(self.edges['right_q_idx']) > 0:
+                self.edges['status'] = 'caution'
+                self.edges['messages'].append(
+                    ['Edges: One or more transects have a right edge Q greater than 5%;', 1, 13])
+                self.edges['right_q'] = 1
+
+            # Check for consistent sign
+            q_positive = []
+            self.edges['left_sign'] = 0
+            for q in left_q:
+                if q >= 0:
+                    q_positive.append(True)
+                else:
+                    q_positive.append(False)
+            if len(np.unique(q_positive)) > 1 and left_q_percent > 0.5:
+                self.edges['status'] = 'caution'
+                self.edges['messages'].append(['Edges: Sign of left edge Q is not consistent;', 2, 13])
+                self.edges['left_sign'] = 1
+
+            q_positive = []
+            self.edges['right_sign'] = 0
+            for q in right_q:
+                if q >= 0:
+                    q_positive.append(True)
+                else:
+                    q_positive.append(False)
+            if len(np.unique(q_positive)) > 1 and right_q_percent > 0.5:
+                self.edges['status'] = 'caution'
+                self.edges['messages'].append(['Edges: Sign of right edge Q is not consistent;', 2, 13])
+                self.edges['right_sign'] = 1
+
+            # Check distance moved
+            dmg_5_percent = 0.05 * np.nanmean(dist_made_good)
+            avg_right_edge_dist = np.nanmean(edge_dist_right)
+            right_threshold = np.nanmin([dmg_5_percent, avg_right_edge_dist])
+            temp_idx = np.where(dist_moved_right > right_threshold)[0]
+            if len(temp_idx) > 0:
+                self.edges['right_dist_moved_idx'] = np.array(transect_idx)[temp_idx]
+                self.edges['status'] = 'caution'
+                self.edges['messages'].append(['Edges: Excessive boat movement in right edge ensembles;', 2, 13])
+            else:
+                self.edges['right_dist_moved_idx'] = []
+
+            avg_left_edge_dist = np.nanmean(edge_dist_left)
+            left_threshold = np.nanmin([dmg_5_percent, avg_left_edge_dist])
+            temp_idx = np.where(dist_moved_left > left_threshold)[0]
+            if len(temp_idx) > 0:
+                self.edges['left_dist_moved_idx'] = np.array(transect_idx)[temp_idx]
+                self.edges['status'] = 'caution'
+                self.edges['messages'].append(['Edges: Excessive boat movement in left edge ensembles;', 2, 13])
+            else:
+                self.edges['left_dist_moved_idx'] = []
+
+            # Check for edge ensembles marked invalid due to excluded distance
+            self.edges['invalid_transect_left_idx'] = []
+            self.edges['invalid_transect_right_idx'] = []
+            for n, transect in enumerate(meas.transects):
+                if transect.checked:
+                    ens_invalid = np.nansum(transect.w_vel.valid_data[0, :, :], 0) > 0
+                    ens_cells_above_sl = np.nansum(transect.w_vel.cells_above_sl, 0) > 0
+                    ens_invalid = np.logical_not(np.logical_and(ens_invalid, ens_cells_above_sl))
+                    if np.any(ens_invalid):
+                        if transect.start_edge == 'Left':
+                            invalid_left = ens_invalid[0:int(transect.edges.left.number_ensembles)]
+                            invalid_right = ens_invalid[-int(transect.edges.right.number_ensembles):]
+                        else:
+                            invalid_right = ens_invalid[0:int(transect.edges.right.number_ensembles)]
+                            invalid_left = ens_invalid[-int(transect.edges.left.number_ensembles):]
+                        if len(invalid_left) > 0:
+                            left_invalid_percent = sum(invalid_left) / len(invalid_left)
+                        else:
+                            left_invalid_percent = 0
+                        if len(invalid_right) > 0:
+                            right_invalid_percent = sum(invalid_right) / len(invalid_right)
+                        else:
+                            right_invalid_percent = 0
+                        max_invalid_percent = max([left_invalid_percent, right_invalid_percent]) * 100
+                        if max_invalid_percent > 25:
+                            self.edges['status'] = 'caution'
+                            if np.any(invalid_left):
+                                self.edges['invalid_transect_left_idx'].append(n)
+                            if np.any(invalid_right):
+                                self.edges['invalid_transect_right_idx'].append(n)
+
+            if len(self.edges['invalid_transect_left_idx']) > 0 or len(self.edges['invalid_transect_right_idx']) > 0:
+                self.edges['messages'].append(['Edges: The percent of invalid ensembles exceeds 25% in' +
+                                               ' one or more transects.', 2, 13])
+
+            # Check edges for zero discharge
+            self.edges['left_zero'] = 0
+            temp_idx = np.where(np.round(left_q, 4) == 0)[0]
+            if len(temp_idx) > 0:
+                self.edges['left_zero_idx'] = np.array(transect_idx)[temp_idx]
+                self.edges['status'] = 'warning'
+                self.edges['messages'].append(['EDGES: Left edge has zero Q;', 1, 13])
+                self.edges['left_zero'] = 2
+            else:
+                self.edges['left_zero_idx'] = []
+
+            self.edges['right_zero'] = 0
+            temp_idx = np.where(np.round(right_q, 4) == 0)[0]
+            if len(temp_idx) > 0:
+                self.edges['right_zero_idx'] = np.array(transect_idx)[temp_idx]
+                self.edges['status'] = 'warning'
+                self.edges['messages'].append(['EDGES: Right edge has zero Q;', 1, 13])
+                self.edges['right_zero'] = 2
+            else:
+                self.edges['right_zero_idx'] = []
+
+            # Check consistent edge type
+            self.edges['left_type'] = 0
+            if len(np.unique(left_type)) > 1:
+                self.edges['status'] = 'warning'
+                self.edges['messages'].append(['EDGES: Left edge type is not consistent;', 1, 13])
+                self.edges['left_type'] = 2
+
+            self.edges['right_type'] = 0
+            if len(np.unique(right_type)) > 1:
+                self.edges['status'] = 'warning'
+                self.edges['messages'].append(['EDGES: Right edge type is not consistent;', 1, 13])
+                self.edges['right_type'] = 2
+        else:
+            self.edges['status'] = 'inactive'
+
+    @staticmethod
+    def invalid_qa(valid, discharge):
+        """Computes the total invalid discharge in ensembles that have invalid data. The function also computes
+        the maximum run or cluster of ensembles with the maximum interpolated discharge.
+
+        Parameters
+        ----------
+        valid: np.array(bool)
+            Array identifying valid and invalid ensembles.
+        discharge: QComp
+            Object of class QComp
+
+        Returns
+        -------
+        q_invalid_total: float
+            Total interpolated discharge in invalid ensembles
+        q_invalid_max_run: float
+            Maximum interpolated discharge in a run or cluster of invalid ensembles
+        ens_invalid: int
+            Total number of invalid ensembles
+        """
+
+        # Create bool for invalid data
+        invalid = np.logical_not(valid)
+        q_invalid_total = np.nansum(discharge.middle_ens[invalid]) + np.nansum(discharge.top_ens[invalid]) \
+            + np.nansum(discharge.bottom_ens[invalid])
+
+        # Compute total number of invalid ensembles
+        ens_invalid = np.sum(invalid)
+
+        # Compute the indices of where changes occur
+
+        valid_int = np.insert(valid.astype(int), 0, -1)
+        valid_int = np.append(valid_int, -1)
+        valid_run = np.where(np.diff(valid_int) != 0)[0]
+        run_length = np.diff(valid_run)
+        run_length0 = run_length[(valid[0] == 1)::2]
+
+        n_runs = len(run_length0)
+
+        if valid[0]:
+            n_start = 1
+        else:
+            n_start = 0
+
+        n_end = len(valid_run) - 1
+
+        if n_runs > 1:
+            m = 0
+            q_invalid_run = []
+            for n in range(n_start, n_end, 2):
+                m += 1
+                idx_start = valid_run[n]
+                idx_end = valid_run[n + 1]
+                q_invalid_run.append(np.nansum(discharge.middle_ens[idx_start:idx_end])
+                                     + np.nansum(discharge.top_ens[idx_start:idx_end])
+                                     + np.nansum(discharge.bottom_ens[idx_start:idx_end]))
+
+            # Determine the maximum discharge in a single run
+            q_invalid_max_run = np.nanmax(np.abs(q_invalid_run))
+
+        else:
+            q_invalid_max_run = 0.0
+
+        return q_invalid_total, q_invalid_max_run, ens_invalid
+
+    @staticmethod
+    def edge_distance_moved(transect):
+        """Computes the boat movement during edge ensemble collection.
+
+        Parameters
+        ----------
+        transect: Transect
+            Object of class Transect
+
+        Returns
+        -------
+        right_dist_moved: float
+            Distance in m moved during collection of right edge samples
+        left_dist_moved: float
+            Distance in m moved during collection of left edge samples
+        dmg: float
+            Distance made good for the entire transect
+        """
+
+        boat_selected = getattr(transect.boat_vel, transect.boat_vel.selected)
+        ens_duration = transect.date_time.ens_duration_sec
+
+        # Get boat velocities
+        if boat_selected is not None:
+            u_processed = boat_selected.u_processed_mps
+            v_processed = boat_selected.v_processed_mps
+        else:
+            u_processed = np.tile(np.nan, transect.boat_vel.bt_vel.u_processed_mps.shape)
+            v_processed = np.tile(np.nan, transect.boat_vel.bt_vel.v_processed_mps.shape)
+
+        # Compute boat coordinates
+        x_processed = np.nancumsum(u_processed * ens_duration)
+        y_processed = np.nancumsum(v_processed * ens_duration)
+        dmg = (x_processed[-1] ** 2 + y_processed[-1] ** 2) ** 0.5
+
+        # Compute left distance moved
+        # TODO should be a dist moved function
+        left_edge_idx = QComp.edge_ensembles('left', transect)
+        if len(left_edge_idx) > 0:
+            boat_x = x_processed[left_edge_idx[-1]] - x_processed[left_edge_idx[0]]
+            boat_y = y_processed[left_edge_idx[-1]] - y_processed[left_edge_idx[0]]
+            left_dist_moved = (boat_x ** 2 + boat_y ** 2) ** 0.5
+        else:
+            left_dist_moved = np.nan
+
+        # Compute right distance moved
+        right_edge_idx = QComp.edge_ensembles('right', transect)
+        if len(right_edge_idx) > 0:
+            boat_x = x_processed[right_edge_idx[-1]] - x_processed[right_edge_idx[0]]
+            boat_y = y_processed[right_edge_idx[-1]] - y_processed[right_edge_idx[0]]
+            right_dist_moved = (boat_x ** 2 + boat_y ** 2) ** 0.5
+        else:
+            right_dist_moved = np.nan
+
+        return right_dist_moved, left_dist_moved, dmg
+    
+    # check for user changes
+    def check_bt_setting(self, meas):
+        """Checks the bt settings to see if they are still on the default
+                        settings.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        self.settings_dict['tab_bt'] = 'Default'
+
+        s = meas.current_settings()
+        d = meas.qrev_default_settings()
+
+        if s['BTbeamFilter'] != d['BTbeamFilter']:
+            self.bt_vel['messages'].append(['BT: User modified default beam setting.', 3, 8])
+            self.settings_dict['tab_bt'] = 'Custom'
+
+        if s['BTdFilter'] != d['BTdFilter']:
+            self.bt_vel['messages'].append(['BT: User modified default error velocity filter.', 3, 8])
+            self.settings_dict['tab_bt'] = 'Custom'
+
+        if s['BTwFilter'] != d['BTwFilter']:
+            self.bt_vel['messages'].append(['BT: User modified default vertical velocity filter.', 3, 8])
+            self.settings_dict['tab_bt'] = 'Custom'
+
+        if s['BTsmoothFilter'] != d['BTsmoothFilter']:
+            self.bt_vel['messages'].append(['BT: User modified default smooth filter.', 3, 8])
+            self.settings_dict['tab_bt'] = 'Custom'
+
+    def check_wt_settings(self, meas):
+        """Checks the wt settings to see if they are still on the default
+                settings.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        self.settings_dict['tab_wt'] = 'Default'
+
+        s = meas.current_settings()
+        d = meas.qrev_default_settings()
+
+        if round(s['WTExcludedDistance'], 2) != round(d['WTExcludedDistance'], 2):
+            self.w_vel['messages'].append(['WT: User modified excluded distance.', 3, 11])
+            self.settings_dict['tab_wt'] = 'Custom'
+
+        if s['WTbeamFilter'] != d['WTbeamFilter']:
+            self.w_vel['messages'].append(['WT: User modified default beam setting.', 3, 11])
+            self.settings_dict['tab_wt'] = 'Custom'
+
+        if s['WTdFilter'] != d['WTdFilter']:
+            self.w_vel['messages'].append(['WT: User modified default error velocity filter.', 3, 11])
+            self.settings_dict['tab_wt'] = 'Custom'
+
+        if s['WTwFilter'] != d['WTwFilter']:
+            self.w_vel['messages'].append(['WT: User modified default vertical velocity filter.', 3, 11])
+            self.settings_dict['tab_wt'] = 'Custom'
+
+        if s['WTsnrFilter'] != d['WTsnrFilter']:
+            self.w_vel['messages'].append(['WT: User modified default SNR filter.', 3, 11])
+            self.settings_dict['tab_wt'] = 'Custom'
+
+    def check_extrap_settings(self, meas):
+        """Checks the extrap to see if they are still on the default
+        settings.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        self.settings_dict['tab_extrap'] = 'Default'
+
+        # Check fit parameters
+        if meas.extrap_fit.sel_fit[0].fit_method != 'Automatic':
+            self.settings_dict['tab_extrap'] = 'Custom'
+            self.extrapolation['messages'].append(['Extrapolation: User modified default automatic setting.', 3, 12])
+
+        # Check data parameters
+        if meas.extrap_fit.sel_fit[-1].data_type.lower() != 'q':
+            self.settings_dict['tab_extrap'] = 'Custom'
+            self.extrapolation['messages'].append(['Extrapolation: User modified data type ', 3, 12])
+
+        if meas.extrap_fit.threshold != 20:
+            self.settings_dict['tab_extrap'] = 'Custom'
+            self.extrapolation['messages'].append(['Extrapolation: User modified default threshold.', 3, 12])
+
+        if meas.extrap_fit.subsection[0] != 0 or meas.extrap_fit.subsection[1] != 100:
+            self.settings_dict['tab_extrap'] = 'Custom'
+            self.extrapolation['messages'].append(['Extrapolation: User modified subsectioning', 3, 12])
+
+    def check_tempsal_settings(self, meas):
+        """Checks the temp and salinity settings to see if they are still on
+        the default settings.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        self.settings_dict['tab_tempsal'] = 'Default'
+
+        t_source_change = False
+        salinity_change = False
+        s_sound_change = False
+        t_user_change = False
+        t_adcp_change = False
+
+        if not all(np.isnan([meas.ext_temp_chk['user'], meas.ext_temp_chk['user_orig']])):
+            if meas.ext_temp_chk['user'] != meas.ext_temp_chk['user_orig']:
+                t_user_change = True
+
+        if not all(np.isnan([meas.ext_temp_chk['adcp'], meas.ext_temp_chk['adcp_orig']])):
+            if meas.ext_temp_chk['adcp'] != meas.ext_temp_chk['adcp_orig']:
+                t_adcp_change = True
+
+        # Check each checked transect
+        for idx in meas.checked_transect_idx:
+            transect = meas.transects[idx]
+
+            # Temperature source
+            if transect.sensors.temperature_deg_c.selected != 'internal':
+                t_source_change = True
+
+            if transect.sensors.salinity_ppt.selected != 'internal':
+                sal = getattr(transect.sensors.salinity_ppt, transect.sensors.salinity_ppt.selected)
+                if np.all(np.equal(sal.data, transect.sensors.salinity_ppt.internal.data)):
+                    salinity_change = False
+                else:
+                    salinity_change = True
+
+            # Speed of Sound
+            if transect.sensors.speed_of_sound_mps.selected != 'internal':
+                s_sound_change = True
+
+        # Report condition and messages
+        if any([t_source_change, salinity_change, s_sound_change, t_adcp_change, t_user_change]):
+            self.settings_dict['tab_tempsal'] = 'Custom'
+            
+            if t_source_change:
+                self.temperature['messages'].append(['Temperature: User modified temperature source.', 3, 5])
+
+            if s_sound_change:
+                self.temperature['messages'].append(['Temperature: User modified speed of sound source.', 3, 5])
+
+            if t_user_change:
+                self.temperature['messages'].append(['Temperature: User modified independent temperature.', 3, 5])
+
+            if t_adcp_change:
+                self.temperature['messages'].append(['Temperature: User modified ADCP temperature.', 3, 5])
+         
+    def check_gps_settings(self, meas):
+        """Checks the gps settings to see if they are still on the default
+        settings.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        gps = False
+        self.settings_dict['tab_gps'] = 'Default'
+
+        # Check for transects with gga or vtg data
+        for idx in meas.checked_transect_idx:
+            transect = meas.transects[idx]
+            if transect.boat_vel.gga_vel is not None or transect.boat_vel.gga_vel is not None:
+                gps = True
+                break
+
+        # If gga or vtg data exist check settings
+        if gps:
+
+            s = meas.current_settings()
+            d = meas.qrev_default_settings()
+
+            if s['ggaDiffQualFilter'] != d['ggaDiffQualFilter']:
+                self.gga_vel['messages'].append(['GPS: User modified default quality setting.', 3, 8])
+                self.settings_dict['tab_gps'] = 'Custom'
+
+            if s['ggaAltitudeFilter'] != d['ggaAltitudeFilter']:
+                self.gga_vel['messages'].append(['GPS: User modified default altitude filter.', 3, 8])
+                self.settings_dict['tab_gps'] = 'Custom'
+
+            if s['GPSHDOPFilter'] != d['GPSHDOPFilter']:
+                self.gga_vel['messages'].append(['GPS: User modified default HDOP filter.', 3, 8])
+                self.settings_dict['tab_gps'] = 'Custom'
+
+            if s['GPSSmoothFilter'] != d['GPSSmoothFilter']:
+                self.gga_vel['messages'].append(['GPS: User modified default smooth filter.', 3, 8])
+                self.settings_dict['tab_gps'] = 'Custom'
+
+    def check_depth_settings(self, meas):
+        """Checks the depth settings to see if they are still on the default
+                settings.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        self.settings_dict['tab_depth'] = 'Default'
+
+        s = meas.current_settings()
+        d = meas.qrev_default_settings()
+
+        if s['depthReference'] != d['depthReference']:
+            self.depths['messages'].append(['Depths: User modified '
+                                            'depth reference.', 3, 10])
+            self.settings_dict['tab_depth'] = 'Custom'
+
+        if s['depthComposite'] != d['depthComposite']:
+            self.depths['messages'].append(['Depths: User modified '
+                                            'depth reference.', 3, 10])
+            self.settings_dict['tab_depth'] = 'Custom'
+
+        if s['depthAvgMethod'] != d['depthAvgMethod']:
+            self.depths['messages'].append(['Depths: User modified '
+                                            'averaging method.', 3, 10])
+            self.settings_dict['tab_depth'] = 'Custom'
+
+        if s['depthFilterType'] != d['depthFilterType']:
+            self.depths['messages'].append(['Depths: User modified '
+                                            'filter type.', 3, 10])
+            self.settings_dict['tab_depth'] = 'Custom'
+
+        for idx in meas.checked_transect_idx:
+            transect = meas.transects[idx]
+            if transect.depths.bt_depths.draft_orig_m != transect.depths.bt_depths.draft_use_m:
+                self.depths['messages'].append(['Depths: User modified '
+                                                'draft.', 3, 10])
+                self.settings_dict['tab_depth'] = 'Custom'
+                break
+
+    def check_edge_settings(self, meas):
+        """Checks the edge settings to see if they are still on the original
+                settings.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        start_edge_change = False
+        left_edge_type_change = False
+        left_edge_dist_change = False
+        left_edge_ens_change = False
+        left_edge_q_change = False
+        left_edge_coef_change = False
+        right_edge_type_change = False
+        right_edge_dist_change = False
+        right_edge_ens_change = False
+        right_edge_q_change = False
+        right_edge_coef_change = False
+
+        for idx in meas.checked_transect_idx:
+            transect = meas.transects[idx]
+
+            if transect.start_edge != transect.orig_start_edge:
+                start_edge_change = True
+
+            if transect.edges.left.type != transect.edges.left.orig_type:
+                left_edge_type_change = True
+
+            if transect.edges.left.distance_m != transect.edges.left.orig_distance_m:
+                left_edge_dist_change = True
+
+            if transect.edges.left.number_ensembles != transect.edges.left.orig_number_ensembles:
+                left_edge_ens_change = True
+
+            if transect.edges.left.user_discharge_cms != transect.edges.left.orig_user_discharge_cms:
+                left_edge_q_change = True
+
+            if transect.edges.left.cust_coef != transect.edges.left.orig_cust_coef:
+                left_edge_coef_change = True
+
+            if transect.edges.right.type != transect.edges.right.orig_type:
+                right_edge_type_change = True
+
+            if transect.edges.right.distance_m != transect.edges.right.orig_distance_m:
+                right_edge_dist_change = True
+
+            if transect.edges.right.number_ensembles != transect.edges.right.orig_number_ensembles:
+                right_edge_ens_change = True
+
+            if transect.edges.right.user_discharge_cms != transect.edges.right.orig_user_discharge_cms:
+                right_edge_q_change = True
+
+            if transect.edges.right.cust_coef != transect.edges.right.orig_cust_coef:
+                right_edge_coef_change = True
+
+        if any([start_edge_change, left_edge_type_change, left_edge_dist_change, left_edge_ens_change,
+                left_edge_q_change, left_edge_coef_change, right_edge_type_change, right_edge_dist_change,
+                right_edge_ens_change, right_edge_q_change, right_edge_coef_change]):
+            self.settings_dict['tab_edges'] = 'Custom'
+
+            if start_edge_change:
+                self.edges['messages'].append(['Edges: User modified start edge.', 3, 10])
+            if left_edge_type_change:
+                self.edges['messages'].append(['Edges: User modified left edge type.', 3, 10])
+            if left_edge_dist_change:
+                self.edges['messages'].append(['Edges: User modified left edge distance.', 3, 10])
+            if left_edge_ens_change:
+                self.edges['messages'].append(['Edges: User modified left number of ensembles.', 3, 10])
+            if left_edge_q_change:
+                self.edges['messages'].append(['Edges: User modified left user discharge.', 3, 10])
+            if left_edge_coef_change:
+                self.edges['messages'].append(['Edges: User modified left custom coefficient.', 3, 10])
+            if right_edge_type_change:
+                self.edges['messages'].append(['Edges: User modified right edge type.', 3, 10])
+            if right_edge_dist_change:
+                self.edges['messages'].append(['Edges: User modified right edge distance.', 3, 10])
+            if right_edge_ens_change:
+                self.edges['messages'].append(['Edges: User modified right number of ensembles.', 3, 10])
+            if right_edge_q_change:
+                self.edges['messages'].append(['Edges: User modified right user discharge.', 3, 10])
+            if right_edge_coef_change:
+                self.edges['messages'].append(['Edges: User modified right custom coefficient.', 3, 10])
+        else:
+            self.settings_dict['tab_edges'] = 'Default'
+
+    def check_mbt_settings(self, meas):
+        """Checks the mbt settings to see if they are still on the original
+                settings.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        # if there are mb tests check for user changes
+        if len(meas.mb_tests) >= 1:
+            mbt = meas.mb_tests
+
+            mb_user_valid = []
+            mb_used = []
+
+            auto = copy.deepcopy(mbt)
+            auto = MovingBedTests.auto_use_2_correct(auto)
+
+            for n in range(len(mbt)):
+
+                if mbt[n].user_valid:
+                    mb_user_valid.append(False)
+                else:
+                    mb_user_valid.append(True)
+
+                if mbt[n].use_2_correct != auto[n].use_2_correct and \
+                        meas.transects[meas.checked_transect_idx[0]].boat_vel.selected == 'bt_vel':
+                    mb_used.append(True)
+                else:
+                    mb_used.append(False)
+
+            self.settings_dict['tab_mbt'] = 'Default'
+            if any(mb_user_valid):
+                self.settings_dict['tab_mbt'] = 'Custom'
+                self.movingbed['messages'].append(['Moving-Bed Test: '
+                                                   'User modified '
+                                                   'valid test settings.', 3, 6])
+            if any(mb_used):
+                self.settings_dict['tab_mbt'] = 'Custom'
+                self.movingbed['messages'].append(['Moving-Bed Test: '
+                                                   'User modified '
+                                                   'use to correct settings.', 3, 6])
+
+        if meas.observed_no_moving_bed:
+            self.settings_dict['tab_mbt'] = 'Custom'
+
+    def check_compass_settings(self, meas):
+        """Checks the compass settings for changes.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        self.settings_dict['tab_compass'] = 'Default'
+
+        magvar_change = False
+        align_change = False
+
+        # Check each checked transect
+        for idx in meas.checked_transect_idx:
+            transect = meas.transects[idx]
+
+            # Magvar
+            if transect.sensors.heading_deg.internal.mag_var_deg != \
+                    transect.sensors.heading_deg.internal.mag_var_orig_deg:
+                magvar_change = True
+
+            # Heading offset
+            if transect.sensors.heading_deg.external is not None:
+                if transect.sensors.heading_deg.external.align_correction_deg != \
+                        transect.sensors.heading_deg.external.align_correction_orig_deg:
+                    align_change = True
+
+        # Report condition and messages
+        if any([magvar_change, align_change]):
+            self.settings_dict['tab_compass'] = 'Custom'
+
+            if magvar_change:
+                self.compass['messages'].append(['Compass: User modified magnetic variation.', 3, 4])
+
+            if align_change:
+                self.compass['messages'].append(['Compass: User modified heading offset.', 3, 4])
+
+    def check_oursin(self, meas):
+        """Checks the compass settings for changes.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        """
+
+        self.settings_dict['tab_uncertainty_2_advanced'] = 'Default'
+        self.settings_dict['tab_uncertainty'] = 'Default'
+
+        for key in meas.oursin.user_advanced_settings.keys():
+            if not np.isnan(meas.oursin.user_advanced_settings[key]):
+                self.settings_dict['tab_uncertainty_2_advanced'] = 'Custom'
+                self.settings_dict['tab_uncertainty'] = 'Custom'
+                break
+
+        for key in meas.oursin.user_specified_u.keys():
+            if not np.isnan(meas.oursin.user_specified_u[key]):
+                self.settings_dict['tab_uncertainty'] = 'Custom'
+                break
diff --git a/Classes/QComp.py b/Classes/QComp.py
new file mode 100644
index 0000000..f227e5e
--- /dev/null
+++ b/Classes/QComp.py
@@ -0,0 +1,1925 @@
+import numpy as np
+from Classes.TransectData import TransectData
+from Classes.BoatStructure import BoatStructure
+from MiscLibs.common_functions import cart2pol, pol2cart, nan_less, nan_greater
+# from profilehooks import profile
+from DischargeFunctions.top_discharge_extrapolation import extrapolate_top
+from DischargeFunctions.bottom_discharge_extrapolation import extrapolate_bot
+
+
+class QComp(object):
+    """Computes the discharge for each transect.
+
+    Attributes
+    ----------
+    top: float
+        Transect total extrapolated top discharge
+    middle: float
+        Transect total measured middle discharge including interpolations
+    bottom: float
+        Transect total extrapolated bottom discharge
+    top_ens: np.array(float)
+        Extrapolated top discharge by ensemble
+    middle_cells: np.array(float)
+        Measured middle discharge including interpolation by cell
+    middle_ens: np.array(float)
+        Measured middle discharge including interpolation by ensemble
+    bottom_ens: np.array(float)
+        Extrapolate bottom discharge by ensemble
+    left: float
+        Left edge discharge
+    left_idx:
+        Ensembles used for left edge
+    right: float
+        Right edge discharge
+    right_idx:
+        Ensembles used for right edge
+    total_uncorrected: float
+        Total discharge for transect uncorrected for moving-bed, if required
+    total: float
+        Total discharge with moving-bed correction applied if necessary
+    correction_factor: float
+        Moving-bed correction factor, if required
+    int_cells: float
+        Total discharge computed for invalid depth cells excluding invalid ensembles
+    int_ens: float
+        Total discharge computed for invalid ensembles
+    """
+    
+    def __init__(self):
+        """Initialize class and instance variables."""
+
+        self.top = None  # Transect total extrapolated top discharge
+        self.middle = None  # Transect toal measured middle discharge including interpolations
+        self.bottom = None  # ETransect total extrapolated bottom discharge
+        self.top_ens = None  # Extrapolated top discharge by ensemble
+        self.middle_cells = None  # Measured middle discharge including interpolation by cell
+        self.middle_ens = None  # Measured middle discharge including interpolation by ensemble
+        self.bottom_ens = None  # Extrapolate bottom discharge by ensemble
+        self.left = None  # Left edge discharge
+        self.left_idx = []  # Ensembles used for left edge
+        self.right = None  # Right edge discharge
+        self.right_idx = []  # Ensembles used for right edge
+        self.total_uncorrected = None  # Total discharge for transect uncorrected for moving-bed, if required
+        self.total = None  # Total discharge with moving-bed correction applied if necessary
+        self.correction_factor = 1  # Moving-bed correction factor, if required
+        self.int_cells = None  # Total discharge computed for invalid depth cells excluding invalid ensembles
+        self.int_ens = None  # Total discharge computed for invalid ensembles
+
+    # @profile
+    def populate_data(self, data_in, moving_bed_data=None, top_method=None, bot_method=None, exponent=None):
+        """Discharge is computed using the data provided to the method.
+        Water data provided are assumed to be corrected for the navigation reference.
+        If a moving-bed correction is to be applied it is computed and applied.
+        The TRDI method using expanded delta time is applied if the processing method is WR2.
+        
+        Parameters
+        ----------
+        data_in: TransectData
+            Object TransectData
+        moving_bed_data: list
+            List of MovingBedTests objects
+        top_method: str
+            Top extrapolation method
+        bot_method: str
+            Bottom extrapolation method
+        exponent: float
+            Extrapolation exponent
+        """
+
+        # Use bottom track interpolation settings to determine the appropriate algorithms to apply
+        if data_in.boat_vel.bt_vel.interpolate == 'None':
+            processing = 'WR2'
+        elif data_in.boat_vel.bt_vel.interpolate == 'Linear':
+            processing = 'QRev'
+        else:
+            processing = 'RSL'
+
+        # Compute cross product
+        x_prod = QComp.cross_product(data_in)
+        
+        # Get index of ensembles in moving-boat portion of transect
+        in_transect_idx = data_in.in_transect_idx
+        
+        if processing == 'WR2':
+            # TRDI uses expanded delta time to handle invalid ensembles which can be caused by invalid BT
+            # WT, or depth.  QRev by default handles this invalid data through linear interpolation of the
+            # invalid data through linear interpolation of the invalid data type.  This if statement and
+            # associated code is required to maintain compatibility with WinRiver II discharge computations.
+            
+            # Determine valid ensembles
+            valid_ens = np.any(np.logical_not(np.isnan(x_prod)))
+            valid_ens = valid_ens[in_transect_idx]
+            
+            # Compute the ensemble duration using TRDI approach of expanding delta time to compensate
+            # for invalid ensembles
+            n_ens = len(valid_ens)
+            ens_dur = data_in.date_time.ens_duration_sec[in_transect_idx]
+            delta_t = np.tile([np.nan], n_ens)
+            cum_dur = 0
+            idx = 1
+            for j in range(idx, n_ens):
+                cum_dur = np.nansum(np.hstack([cum_dur, ens_dur[j]]))
+                if valid_ens[j]:
+                    delta_t[j] = cum_dur
+                    cum_dur = 0
+                    
+        else:
+            # For non-WR2 processing use actual ensemble duration
+            delta_t = data_in.date_time.ens_duration_sec[in_transect_idx]
+            
+        # Compute measured or middle discharge
+        self.middle_cells = QComp.discharge_middle_cells(x_prod, data_in, delta_t)
+        self.middle_ens = np.nansum(self.middle_cells, 0)
+        self.middle = np.nansum(self.middle_ens)
+        
+        # Compute the top discharge
+        trans_select = getattr(data_in.depths, data_in.depths.selected)
+        num_top_method = {'Power': 0, 'Constant': 1, '3-Point': 2, None: -1}
+        self.top_ens = extrapolate_top(x_prod, data_in.w_vel.valid_data[0, :, :],
+                                       num_top_method[data_in.extrap.top_method],
+                                       data_in.extrap.exponent, data_in.in_transect_idx, trans_select.depth_cell_size_m,
+                                       trans_select.depth_cell_depth_m, trans_select.depth_processed_m, delta_t,
+                                       num_top_method[top_method], exponent)
+        self.top = np.nansum(self.top_ens)
+
+        # Compute the bottom discharge
+        num_bot_method = {'Power': 0, 'No Slip': 1, None: -1}
+        self.bottom_ens = extrapolate_bot(x_prod,
+                                          data_in.w_vel.valid_data[0, :, :],
+                                          num_bot_method[data_in.extrap.bot_method],
+                                          data_in.extrap.exponent,
+                                          data_in.in_transect_idx,
+                                          trans_select.depth_cell_size_m,
+                                          trans_select.depth_cell_depth_m,
+                                          trans_select.depth_processed_m, delta_t,
+                                          num_bot_method[bot_method],
+                                          exponent)
+        self.bottom = np.nansum(self.bottom_ens)
+
+        # Compute interpolated cell and ensemble discharge from computed
+        # measured discharge
+        self.interpolate_no_cells(data_in)
+        self.middle = np.nansum(self.middle_ens)
+        self.int_cells, self.int_ens = QComp.discharge_interpolated(self.top_ens, self.middle_cells,
+                                                                    self.bottom_ens, data_in)
+        
+        # Compute right edge discharge
+        if data_in.edges.right.type != 'User Q':
+            self.right, self.right_idx = QComp.discharge_edge('right', data_in, top_method, bot_method, exponent)
+        else:
+            self.right = data_in.edges.right.user_discharge_cms
+            self.right_idx = []
+
+        # Compute left edge discharge
+        if data_in.edges.left.type != 'User Q':
+            self.left, self.left_idx = QComp.discharge_edge('left', data_in, top_method, bot_method, exponent)
+        else:
+            self.left = data_in.edges.left.user_discharge_cms
+            self.left_idx = []
+            
+        # Compute moving-bed correction, if applicable.  Two checks are used to account for the
+        # way the meas object is created.
+
+        # Moving-bed corrections are only applied to bottom track referenced computations
+        mb_type = None
+        if data_in.boat_vel.selected == 'bt_vel':
+            if moving_bed_data is not None:
+
+                # Determine if a moving-bed test is to be used for correction
+                use_2_correct = []
+                for mb_idx, test in enumerate(moving_bed_data):
+                    use_2_correct.append(test.use_2_correct)
+                    if test.use_2_correct:
+                        mb_type = test.type
+
+                if any(use_2_correct):
+
+                    # Make sure composite tracks are turned off
+                    if data_in.boat_vel.composite == 'Off':
+                        # Apply appropriate moving-bed test correction method
+                        if mb_type == 'Stationary':
+                            self.correction_factor = self.stationary_correction_factor(self.top, self.middle,
+                                                                                       self.bottom, data_in,
+                                                                                       moving_bed_data, delta_t)
+                        else:
+                            self.correction_factor = \
+                                self.loop_correction_factor(self.top, self.middle,
+                                                            self.bottom, data_in,
+                                                            moving_bed_data[use_2_correct.index(True)],
+                                                            delta_t)
+
+        self.total_uncorrected = self.left + self.right + self.middle + self.bottom + self.top
+
+        # Compute final discharge using correction if applicable
+        if self.correction_factor is None or self.correction_factor == 1 or np.isnan(self.correction_factor):
+            self.total = self.total_uncorrected
+        else:
+            self.total = self.left + self.right + (self.middle + self.bottom + self.top) * self.correction_factor
+
+    @staticmethod
+    def qrev_mat_in(meas_struct):
+        """Processes the Matlab data structure to obtain a list of QComp objects containing the discharge data from the
+        Matlab data structure.
+
+        Parameters
+        ----------
+        meas_struct: mat_struct
+            Matlab data structure obtained from sio.loadmat
+
+        Returns
+        -------
+        discharge: list
+            List of QComp data objects
+        """
+
+        discharge = []
+        if hasattr(meas_struct.discharge, 'bottom'):
+            # Measurement has discharge data from only one transect
+            q = QComp()
+            q.populate_from_qrev_mat(meas_struct.discharge)
+            discharge.append(q)
+        else:
+            # Measurement has discharge data from multiple transects
+            for q_data in meas_struct.discharge:
+                q = QComp()
+                q.populate_from_qrev_mat(q_data)
+                discharge.append(q)
+        return discharge
+
+    def populate_from_qrev_mat(self, q_in):
+        """Populated QComp instance variables with data from QRev Matlab file.
+
+        Parameters
+        ----------
+        q_in: mat_struct
+            mat_struct_object containing QComp class data
+        """
+
+        self.top = q_in.top
+        self.middle = q_in.middle
+        self.bottom = q_in.bottom
+
+        if type(q_in.topEns) is not np.ndarray:
+            self.top_ens = np.array([q_in.topEns])
+            self.middle_ens = np.array([q_in.middleEns])
+            self.bottom_ens = np.array([q_in.bottomEns])
+        else:
+            self.top_ens = q_in.topEns
+            self.middle_ens = q_in.middleEns
+            self.bottom_ens = q_in.bottomEns
+
+        self.middle_cells = q_in.middleCells
+        # Handle special case for 1 ensemble or 1 cell
+        if len(self.middle_cells.shape) < 2:
+            if self.middle_ens.size > 1:
+                # Multiple ensembles, one cell
+                self.middle_cells = self.middle_cells[np.newaxis, :]
+            else:
+                # One ensemble, multiple cells
+                self.middle_cells = self.middle_cells[:, np.newaxis]
+
+        # If only one value, it will be read in as int but needs to be an array of len 1
+        self.left = q_in.left
+        # If only one value, it will be read in as int but needs to be an array of len 1
+        if type(q_in.leftidx) is int:
+            self.left_idx = np.array([q_in.leftidx])
+        else:
+            self.left_idx = q_in.leftidx
+        self.right = q_in.right
+        # If only one value, it will be read in as int but needs to be an array of len 1
+        if type(q_in.rightidx) is int:
+            self.right_idx = np.array([q_in.rightidx])
+        else:
+            self.right_idx = q_in.rightidx
+        self.total_uncorrected = q_in.totalUncorrected
+        self.total = q_in.total
+        self.correction_factor = q_in.correctionFactor
+        if type(self.correction_factor) is np.ndarray:
+            if len(self.correction_factor) == 0:
+                self.correction_factor = 1
+            else:
+                self.correction_factor = self.correction_factor[0]
+        self.int_cells = q_in.intCells
+        self.int_ens = q_in.intEns
+
+    def interpolate_no_cells(self, transect_data):
+        """Computes discharge for ensembles where the depth is too
+           shallow for any valid depth cells. The computation is done
+           using interpolation of unit discharge defined as the ensemble
+           discharge divided by the depth of the ensemble and the
+           duration of the ensemble. The independent variable for the
+           interpolation is the track distance. After interpolation the
+           discharge for the interpolated ensembles is computed by
+           multiplying the interpolated value by the depth and duration
+           of those ensembles to achieve discharge for those ensembles.
+
+           Parameters
+           ----------
+           transect_data: TransectData
+                Object of TransectData
+        """
+
+        # Compute the discharge in each ensemble
+        q_ensemble = self.top_ens + self.middle_ens + self.bottom_ens
+        valid_ens = np.where(np.logical_not(np.isnan(q_ensemble)))[0]
+        if len(valid_ens) > 1:
+            idx = np.where(np.isnan(q_ensemble))[0]
+
+            if len(idx) > 0:
+
+                # Compute the unit discharge by depth for each ensemble
+                depth_selected = getattr(transect_data.depths, transect_data.depths.selected)
+                unit_q_depth = (q_ensemble / depth_selected.depth_processed_m[transect_data.in_transect_idx]) \
+                    / transect_data.date_time.ens_duration_sec[transect_data.in_transect_idx]
+
+                # Compute boat track
+                boat_track = BoatStructure.compute_boat_track(transect_data, transect_data.boat_vel.selected)
+
+                # Create strict monotonic vector for 1-D interpolation
+                q_mono = unit_q_depth
+                x_mono = boat_track['distance_m'][transect_data.in_transect_idx]
+
+                # Identify duplicate values, and replace with an average
+                dups = self.group_consecutives(x_mono)
+                if len(dups):
+                    for dup in dups:
+                        q_avg = np.nanmean(q_mono[np.array(dup)])
+                        q_mono[dup[0]] = q_avg
+                        q_mono[dup[1::]] = np.nan
+                        x_mono[dup[1::]] = np.nan
+
+                valid_q_mono = np.logical_not(np.isnan(q_mono))
+                valid_x_mono = np.logical_not(np.isnan(x_mono))
+                valid = np.all(np.vstack([valid_q_mono, valid_x_mono]), 0)
+
+                # Interpolate unit q
+                if np.any(valid):
+                    unit_q_int = np.interp(boat_track['distance_m'][transect_data.in_transect_idx], x_mono[valid],
+                                           q_mono[valid], left=np.nan, right=np.nan)
+                else:
+                    unit_q_int = 0
+
+                # Compute the discharge in each ensemble based on interpolated data
+                q_int = unit_q_int * depth_selected.depth_processed_m[transect_data.in_transect_idx] \
+                    * transect_data.date_time.ens_duration_sec[transect_data.in_transect_idx]
+                self.middle_ens[idx] = q_int[idx]
+
+    @staticmethod
+    def group_consecutives(vals):
+        """Return list of consecutive lists of numbers from vals (number list).
+        """
+
+        run = []
+        result = []
+        expect = vals[0]
+        j = 0
+        for n in range(1, len(vals)):
+            if vals[n] == expect:
+                j += 1
+                if j > 1:
+                    run.append(n)
+                elif j > 0:
+                    run.append(n-1)
+                    run.append(n)
+            elif j > 0:
+                result.append(run)
+                run = []
+                j = 0
+            expect = vals[n]
+        return result
+
+    @staticmethod
+    def cross_product(transect=None, w_vel_x=None, w_vel_y=None, b_vel_x=None, b_vel_y=None, start_edge=None):
+        """Computes the cross product of the water and boat velocity.
+
+        Input data can be a transect or component vectors for the water and boat velocities with the start edge.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        w_vel_x: np.array(float)
+            Array of water velocity in the x direction
+        w_vel_y: np.array(float)
+            Array of water velocity in the y direction
+        b_vel_x: np.array(float)
+            Vector of navigation velocity in x-direction
+        b_vel_y: np.array(float)
+            Vector of naviagation velocity in y-direction
+        start_edge: str
+            Starting edge of transect (Left or Right)
+
+        Returns
+        -------
+        xprod: np.array(float)
+            Cross product values
+        """
+
+        if transect is not None:
+            # Prepare water track data
+            cells_above_sl = np.array(transect.w_vel.cells_above_sl).astype(float)
+            cells_above_sl[cells_above_sl < 0.5] = np.nan
+            w_vel_x = transect.w_vel.u_processed_mps * cells_above_sl
+            w_vel_y = transect.w_vel.v_processed_mps * cells_above_sl
+
+            # Get navigation data from object properties
+            trans_select = getattr(transect.boat_vel, transect.boat_vel.selected)
+            if trans_select is not None:
+                b_vel_x = trans_select.u_processed_mps
+                b_vel_y = trans_select.v_processed_mps
+            else:
+                b_vel_x = np.tile([np.nan], transect.boat_vel.bt_vel.u_processed_mps.shape)
+                b_vel_y = np.tile([np.nan], transect.boat_vel.bt_vel.v_processed_mps.shape)
+
+            start_edge = transect.start_edge
+
+        # Compute the cross product
+        xprod = np.multiply(w_vel_x, b_vel_y) - np.multiply(w_vel_y, b_vel_x)
+
+        # Correct the sign of the cross product based on the start edge
+        if start_edge == 'Right':
+            direction = 1
+        else:
+            direction = -1
+        xprod = xprod * direction
+
+        return xprod
+
+    @staticmethod
+    def discharge_middle_cells(xprod, transect, delta_t):
+        """Computes the discharge in the measured or middle portion of the cross section.
+
+        Parameters
+        ----------
+        xprod: np.array(float)
+            Cross product computed from the cross product method
+        transect: TransectData
+            Object of TransectData
+        delta_t: np.array(float)
+            Duration of each ensemble computed from QComp
+
+        Returns
+        -------
+        q_mid_cells: np.array(float)
+            Discharge in each bin or depth cell
+        """
+
+        # Assign properties from transect object to local variables
+        in_transect_idx = transect.in_transect_idx
+        trans_select = getattr(transect.depths, transect.depths.selected)
+        cell_size = trans_select.depth_cell_size_m
+
+        # Determine is xprod contains edge data and process appropriately
+        q_mid_cells = np.multiply(xprod[:, in_transect_idx] * cell_size[:, in_transect_idx], delta_t)
+
+        return q_mid_cells
+
+    @staticmethod
+    def discharge_edge(edge_loc, transect, top_method=None, bot_method=None, exponent=None):
+        """Computes edge discharge.
+
+        Parameters
+        ----------
+        edge_loc: str
+            Edge location (left or right)
+        transect: TransectData
+            Object of TransectData
+        top_method: str
+            Top extrapolation method
+        bot_method: str
+            Bottom extrapolation method
+        exponent: float
+            Exponent
+
+        Returns
+        -------
+        edge_q: float
+            Computed edge discharge
+        edge_idx: list
+            List of valid edge ensembles
+        """
+
+        # Determine what ensembles to use for edge computation.
+        # The method of determining varies by manufacturer
+        edge_idx = QComp.edge_ensembles(edge_loc, transect)
+
+        # Average depth for the edge ensembles
+        trans_select = getattr(transect.depths, transect.depths.selected)
+        depth = trans_select.depth_processed_m[edge_idx]
+        depth_avg = np.nanmean(depth)
+
+        # Edge distance
+        edge_selected = getattr(transect.edges, edge_loc)
+        edge_dist = edge_selected.distance_m
+
+        # Compute edge velocity and sign
+        edge_vel_sign, edge_vel_mag = QComp.edge_velocity(edge_idx, transect, top_method, bot_method, exponent)
+
+        # Compute edge coefficient
+        coef = QComp.edge_coef(edge_loc, transect)
+
+        # Compute edge discharge
+        edge_q = coef * depth_avg * edge_vel_mag * edge_dist * edge_vel_sign
+        if np.isnan(edge_q):
+            edge_q = 0
+
+        return edge_q, edge_idx
+
+    @staticmethod
+    def edge_ensembles(edge_loc, transect):
+        """This function computes the starting and ending ensemble numbers for an edge.
+
+         This method uses either the method used by TRDI which used the specified number of valid ensembles or SonTek
+        which uses the specified number of ensembles prior to screening for valid data
+
+        Parameters
+        ----------
+        edge_loc: str
+            Edge location (left or right)
+        transect: TransectData
+            Object of TransectData
+
+        Returns
+        -------
+        edge_idx: np.array
+            Indices of ensembles used to compute edge discharge
+        """
+
+        # Assign number of ensembles in edge to local variable
+        edge_select = getattr(transect.edges, edge_loc)
+        num_edge_ens = int(edge_select.number_ensembles)
+
+        # TRDI method
+        if transect.adcp.manufacturer == 'TRDI':
+            # Determine the indices of the edge ensembles which contain
+            # the specified number of valid ensembles
+            # noinspection PyTypeChecker
+            valid_ens = QComp.valid_edge_ens(transect)
+            if num_edge_ens > len(valid_ens):
+                num_edge_ens = len(valid_ens)
+            if edge_loc.lower() == transect.start_edge.lower():
+                edge_idx = np.where(valid_ens)[0][0:num_edge_ens]
+            else:
+                edge_idx = np.where(valid_ens)[0][-num_edge_ens::]
+
+        # Sontek Method
+        else:
+            # Determine the indices of the edge ensembles as collected by RiverSurveyor.  There
+            # is no check as to whether the ensembles contain valid data
+            trans_select = getattr(transect.depths, transect.depths.selected)
+            n_ensembles = len(trans_select.depth_processed_m)
+            if num_edge_ens > n_ensembles:
+                num_edge_ens = n_ensembles
+            if edge_loc.lower() == transect.start_edge.lower():
+                edge_idx = np.arange(0, num_edge_ens)
+            else:
+                edge_idx = np.arange(n_ensembles - num_edge_ens, n_ensembles)
+
+        return edge_idx
+
+    @staticmethod
+    def edge_velocity(edge_idx, transect, top_method=None, bot_method=None, exponent=None):
+        """Computes the edge velocity.
+
+        Different methods may be used depending on settings in transect.
+
+        Parameters
+        ----------
+        edge_idx: np.array
+            Indices of ensembles used to compute edge discharge
+        transect: TransectData
+            Object of TransectData
+        top_method: str
+            Top extrapolation method
+        bot_method: str
+            Bottom extrapolation method
+        exponent: float
+            Exponent
+
+        Returns
+        -------
+        edge_vel_mag: float
+            Magnitude of edge velocity
+        edge_vel_sign: int
+            Sign of edge velocity (discharge)
+        """
+
+        # Set default return
+        edge_vel_sign = 1
+        edge_vel_mag = 0
+
+        # Check to make sure there is edge data
+        if len(edge_idx) > 0:
+
+            # Compute edge velocity using specified method
+            # Used by TRDI
+            if transect.edges.vel_method == 'MeasMag':
+                edge_vel_mag, edge_vel_sign = QComp.edge_velocity_trdi(edge_idx, transect)
+
+            # Used by Sontek
+            elif transect.edges.vel_method == 'VectorProf':
+                edge_val_mag, edge_vel_sign = QComp.edge_velocity_sontek(edge_idx, transect, top_method,
+                                                                         bot_method, exponent)
+
+            # USGS proposed method
+            elif transect.edges.vel_method == 'Profile':
+                edge_vel_mag, edge_vel_sign = QComp.edge_velocity_profile(edge_idx, transect)
+
+        return edge_vel_sign, edge_vel_mag
+
+    @staticmethod
+    def edge_velocity_trdi(edge_idx, transect):
+        """Computes edge velocity magnitude and sign using TRDI's method.
+
+         This method uses only the measured data and no extrapolation
+
+        Parameters
+        ----------
+        edge_idx: np.array
+            Indices of ensembles used to compute edge discharge
+        transect: TransectData
+            Object of TransectData
+
+        Returns
+        -------
+        edge_vel_mag: float
+            Magnitude of edge velocity
+        edge_vel_sign: int
+            Sign of edge velocity (discharge)
+        """
+
+        # Assign water velocity to local variables
+        x_vel = transect.w_vel.u_processed_mps[:, edge_idx]
+        y_vel = transect.w_vel.v_processed_mps[:, edge_idx]
+
+        # Use only valid data
+        valid = np.copy(transect.w_vel.valid_data[0, :, edge_idx].T)
+        x_vel[np.logical_not(valid)] = np.nan
+        y_vel[np.logical_not(valid)] = np.nan
+
+        # Compute the mean velocity components
+        x_vel_avg = np.nanmean(np.nanmean(x_vel, 0))
+        y_vel_avg = np.nanmean(np.nanmean(y_vel, 0))
+
+        # Compute magnitude and direction
+        edge_dir, edge_vel_mag = cart2pol(x_vel_avg, y_vel_avg)
+
+        # Compute unit vector to help determine sign
+        unit_water_x, unit_water_y = pol2cart(edge_dir, 1)
+        if transect.start_edge == 'Right':
+            dir_sign = 1
+        else:
+            dir_sign = -1
+
+        # Compute unit boat vector to help determine sign
+        ens_delta_time = transect.date_time.ens_duration_sec
+        in_transect_idx = transect.in_transect_idx
+        trans_selected = getattr(transect.boat_vel, transect.boat_vel.selected)
+        if trans_selected is not None:
+            b_vel_x = trans_selected.u_processed_mps
+            b_vel_y = trans_selected.v_processed_mps
+        else:
+            b_vel_x = np.tile([np.nan], transect.boat_vel.bt_vel.u_processed_mps.shape)
+            b_vel_y = np.tile([np.nan], transect.boat_vel.bt_vel.v_processed_mps.shape)
+
+        track_x = np.nancumsum(b_vel_x[in_transect_idx] * ens_delta_time[in_transect_idx])
+        track_y = np.nancumsum(b_vel_y[in_transect_idx] * ens_delta_time[in_transect_idx])
+        boat_dir, boat_mag = cart2pol(track_x[-1], track_y[-1])
+        unit_track_x, unit_track_y = pol2cart(boat_dir, 1)
+        unit_x_prod = (unit_water_x * unit_track_y - unit_water_y * unit_track_x) * dir_sign
+        edge_vel_sign = np.sign(unit_x_prod)
+
+        return edge_vel_mag, edge_vel_sign
+
+    @staticmethod
+    def edge_velocity_sontek(edge_idx, transect, top_method=None, bot_method=None, exponent=None):
+        """Computes the edge velocity using SonTek's method.
+
+        SonTek's method uses the profile extrapolation to estimate the velocities in the
+        unmeasured top and bottom and then projects the velocity perpendicular to the
+        course made good.
+
+        Parameters
+        ----------
+        edge_idx: np.array
+            Indices of ensembles used to compute edge discharge
+        transect: TransectData
+            Object of TransectData
+        top_method: str
+            Top extrapolation method
+        bot_method: str
+            Bottom extrapolation method
+        exponent: float
+            Exponent
+
+        Returns
+        -------
+        edge_vel_mag: float
+            Magnitude of edge velocity
+        edge_vel_sign: int
+            Sign of edge velocity (discharge)
+        """
+
+        if top_method is None:
+            top_method = transect.extrap.top_method
+            bot_method = transect.extrap.bot_method
+            exponent = transect.extrap.exponent
+
+        # Compute boat track excluding the start edge ensembles but
+        # including the end edge ensembles. This the way SonTek does this
+        # as of version 3.7
+        ens_delta_time = transect.date_time.ens_duration_sec
+        in_transect_idx = transect.in_transect_idx
+        trans_selected = getattr(transect.boat_vel, transect.boat_vel.selected)
+
+        if trans_selected is not None:
+            b_vel_x = trans_selected.u_processed_mps
+            b_vel_y = trans_selected.v_processed_mps
+        else:
+            b_vel_x = np.tile([np.nan], transect.boat_vel.u_processed_mps.shape)
+            b_vel_y = np.tile([np.nan], transect.boat_vel.v_processed_mps.shape)
+
+        track_x = np.nancumsum(b_vel_x[in_transect_idx] * ens_delta_time[in_transect_idx])
+        track_y = np.nancumsum(b_vel_y[in_transect_idx] * ens_delta_time[in_transect_idx])
+
+        # Compute the unit vector for the boat track
+        boat_dir, boat_mag = cart2pol(track_x[-1], track_y[-1])
+        unit_track_x, unit_track_y = pol2cart(boat_dir, 1)
+
+        # Assign water velocity to local variables
+        x_vel = transect.w_vel.u_processed_mps[:, edge_idx]
+        y_vel = transect.w_vel.v_processed_mps[:, edge_idx]
+        valid_vel_ens = np.nansum(transect.w_vel.valid_data[0, :, edge_idx])
+
+        # Filter edge data
+        # According to SonTek the RSL code does recognize that edge samples
+        # can vary in their cell size.  It deals with this issue by
+        # remembering the cell size and cell start for the first edge sample.
+        # Any subsequent edge sample is included in the average only if it
+        # has the same cell size and cell start as the first sample.
+        transect_depths_select = getattr(transect.depths, transect.depths.selected)
+        cell_size = transect_depths_select.depth_cell_size_m[:, edge_idx]
+        cell_depth = transect_depths_select.depth_cell_depth_m[:, edge_idx]
+
+        # Find first valid edge ensemble
+        idx = np.where(valid_vel_ens > 0)[0]
+        if len(idx) > 0:
+            idx_first_valid_ensemble = idx[0]
+            ref_cell_size = cell_size[0, idx_first_valid_ensemble]
+            ref_cell_depth = cell_depth[0, idx_first_valid_ensemble]
+            valid = np.tile(True, edge_idx.shape)
+            valid[np.not_equal(cell_size[0, :], ref_cell_size)] = False
+            valid[np.not_equal(cell_depth[0, :], ref_cell_depth)] = False
+
+            # Compute profile components
+            x_profile = np.nanmean(x_vel[:, valid], 1)
+            y_profile = np.nanmean(y_vel[:, valid], 1)
+
+            # Find first valid cell in profile
+            idx = np.where(np.logical_not(np.isnan(x_profile)))[0]
+            if len(idx) > 0:
+                idx_first_valid_cell = idx[0]
+
+                # Compute cell size and depth for mean profile
+                cell_size[np.isnan(x_vel)] = np.nan
+                cell_size[:, np.logical_not(valid)] = np.nan
+                cell_size_edge = np.nanmean(cell_size, 1)
+                cell_depth[np.isnan(x_vel)] = np.nan
+                cell_depth[:, np.logical_not(valid)] = np.nan
+                cell_depth_edge = np.nanmean(cell_size, 1)
+
+                # SonTek cuts off the mean profile based on the side lobe cutoff of
+                # the mean of the shallowest beams in the edge ensembles.
+
+                # Determine valid original beam and cell depths
+                depth_bt_beam_orig = transect.depths.bt_depths.depth_orig_m[:, edge_idx]
+                depth_bt_beam_orig[:, np.logical_not(valid)] = np.nan
+                draft_bt_beam_orig = transect.depths.bt_depths.draft_orig_m
+                depth_cell_depth_orig = transect.depths.bt_depths.depth_cell_depth_orig_m[:, edge_idx]
+                depth_cell_depth_orig[:, np.logical_not(valid)] = np.nan
+
+                # Compute minimum mean depth
+                min_raw_depths = np.nanmin(depth_bt_beam_orig)
+                min_depth = np.nanmin(min_raw_depths)
+                min_depth = min_depth - draft_bt_beam_orig
+
+                # Compute last valid cell by computing the side lobe cutoff based
+                # on the mean of the minimum beam depths of the valid edge
+                # ensembles
+                if transect.w_vel.sl_cutoff_type == 'Percent':
+                    sl_depth = min_depth - ((transect.w_vel.sl_cutoff_percent / 100.) * min_depth)
+                else:
+                    sl_depth = min_depth - ((transect.w_vel.sl_cutoff_percent / 100.) * min_depth) \
+                        - (transect.w_vel.sl_cutoff_number * cell_size[0, 0])
+
+                # Adjust side lobe depth for draft
+                sl_depth = sl_depth + draft_bt_beam_orig
+                above_sl = cell_depth < (sl_depth + np.nanmax(cell_size))
+                above_sl_profile = np.nansum(above_sl, 1)
+                # TODO this line doesn't make sense to me
+                valid_idx = np.logical_and(np.less(above_sl_profile, np.nanmax(above_sl_profile)+1),
+                                           np.greater(above_sl_profile, 0))
+
+                # Compute the number of cells above the side lobe cutoff
+                # remaining_depth = sl_depth - cell_depth_edge[idx_first_valid_cell]
+                idx = np.where(np.logical_not(np.isnan(cell_size)))[0]
+                # TODO this is not consistent with Matlab code
+                n_cells = 0
+                if len(idx) > 0:
+                    n_cells = idx
+                    n_cells[n_cells > 0] = 0
+
+                # Determine index of bottom most valid cells
+                idx_last_valid_cell = idx_first_valid_cell + n_cells
+                # TODO need to work and test this logic.
+                if np.greater(idx_last_valid_cell, len(x_profile)):
+                    x_profile[not valid_idx] = np.nan
+                    y_profile[not valid_idx] = np.nan
+                else:
+                    idx_last_valid_cell = np.where(np.logical_not(np.isnan(x_profile[:idx_last_valid_cell])))[0][0]
+                    # Mark the cells in the profile below the sidelobe invalid
+                    x_profile[(idx_last_valid_cell+1):] = np.nan
+                    y_profile[(idx_last_valid_cell + 1):] = np.nan
+
+                # Find the top most 3 valid cells
+                idx_first_3_valid_cells = np.where(np.logical_not(np.isnan(x_profile)))[0][:3]
+
+                # Compute the mean measured velocity components for the edge profile
+                x_profile_mean = np.nanmean(x_profile)
+                y_profile_mean = np.nanmean(y_profile)
+
+                # Compute average depth of edge
+                depth_ens = transect_depths_select.depth_processed_m(edge_idx)
+                depth_ens[not valid] = np.nan
+                depth_avg = np.nanmean(depth_ens)
+
+                # Determine top, mid, bottom range for the profile
+                top_rng_edge = cell_depth_edge[idx_first_valid_cell] - 0.5 * ref_cell_size
+                if idx_last_valid_cell > len(x_profile):
+                    mid_rng_edge = np.nansum(cell_size_edge[valid_idx])
+                else:
+                    mid_rng_edge = np.nansum(cell_size_edge[idx_first_valid_cell:idx_last_valid_cell+1])
+
+                # Compute z
+                z_edge = depth_avg - cell_depth_edge
+                z_edge[idx_last_valid_cell+1:] = np.nan
+                z_edge[z_edge > 0] = np.nan
+                idx_last_valid_cell = np.where(np.logical_not(np.isnan(z_edge)))[0][-1]
+                bot_rng_edge = depth_avg - cell_depth_edge[idx_last_valid_cell] - 0.5 * \
+                    cell_size_edge[idx_last_valid_cell]
+
+                # Compute the top extrapolation for x-component
+                top_vel_x = QComp.discharge_top(top_method=top_method,
+                                                exponent=exponent,
+                                                idx_top=idx_first_valid_cell,
+                                                idx_top_3=idx_first_3_valid_cells,
+                                                top_rng=top_rng_edge,
+                                                component=x_profile,
+                                                cell_size=cell_size_edge,
+                                                cell_depth=cell_depth_edge,
+                                                depth_ens=depth_avg,
+                                                delta_t=1,
+                                                z=z_edge)
+                top_vel_x = top_vel_x / top_rng_edge
+
+                # Compute the bottom extrapolation for x-component
+                bot_vel_x = QComp.discharge_bot(bot_method=bot_method,
+                                                exponent=exponent,
+                                                idx_bot=idx_last_valid_cell,
+                                                bot_rng=bot_rng_edge,
+                                                component=x_profile,
+                                                cell_size=cell_size_edge,
+                                                cell_depth=cell_depth_edge,
+                                                depth_ens=depth_avg,
+                                                delta_t=1,
+                                                z=z_edge)
+                bot_vel_x = bot_vel_x / bot_rng_edge
+
+                # Compute the top extrapolation for the y-component
+                top_vel_y = QComp.discharge_top(top_method=top_method,
+                                                exponent=exponent,
+                                                idx_top=idx_first_valid_cell,
+                                                idx_top_3=idx_first_3_valid_cells,
+                                                top_rng=top_rng_edge,
+                                                component=y_profile,
+                                                cell_size=cell_size_edge,
+                                                cell_depth=cell_depth_edge,
+                                                depth_ens=depth_avg,
+                                                delta_t=1,
+                                                z=z_edge)
+                top_vel_y = top_vel_y / top_rng_edge
+
+                # Compute the bottom extrapolation for y-component
+                bot_vel_y = QComp.discharge_bot(bot_method=bot_method,
+                                                exponent=exponent,
+                                                idx_bot=idx_last_valid_cell,
+                                                bot_rng=bot_rng_edge,
+                                                component=y_profile,
+                                                cell_size=cell_size_edge,
+                                                cell_depth=cell_depth_edge,
+                                                depth_ens=depth_avg,
+                                                delta_t=1,
+                                                z=z_edge)
+                bot_vel_y = bot_vel_y / bot_rng_edge
+
+                # Compute edge velocity vector including extrapolated velocities
+                v_edge_x = ((top_vel_x * top_rng_edge) + (x_profile_mean * mid_rng_edge) + (bot_vel_x * bot_rng_edge)
+                            / depth_avg)
+                v_edge_y = ((top_vel_y * top_rng_edge) + (y_profile_mean * mid_rng_edge) + (bot_vel_y * bot_rng_edge)
+                            / depth_avg)
+
+                # Compute magnitude of edge velocity perpendicular to course made good
+                edge_vel_mag = (v_edge_x * -1 * unit_track_y) + (v_edge_y * unit_track_x)
+
+                # Determine edge sign
+                if transect.start_edge == 'Right':
+                    edge_vel_sign = -1
+                else:
+                    edge_vel_sign = 1
+            else:
+                edge_vel_mag = 0
+                edge_vel_sign = 1
+        else:
+            edge_vel_mag = 0
+            edge_vel_sign = 1
+
+        return edge_vel_mag, edge_vel_sign
+
+    @staticmethod
+    def edge_velocity_profile(edge_idx, transect):
+        """Compute edge velocity magnitude using the mean velocity of each ensemble.
+
+        The mean velocity of each ensemble is computed by first
+        computing the mean direction of the velocities in the ensemble,
+        then projecting the velocity in each cell in that direction and
+        fitting the 1/6th power curve to the projected profile. The mean
+        velocity magnitude from each ensemble is then averaged.
+
+        The sign of the velocity magnitude is computed using the same
+        approach used in WinRiver II. The cross product of the unit
+        vector of the ship track and the unit vector of the edge water
+        samples computed from the mean u and v velocities is used to
+        determine the sign of the velocity magnitude.
+
+        Parameters
+        ----------
+        edge_idx: np.array
+            Indices of ensembles used to compute edge discharge
+        transect: TransectData
+            Object of TransectData
+
+        Returns
+        -------
+        edge_vel_mag: float
+            Magnitude of edge velocity
+        edge_vel_sign: int
+            Sign of edge velocity (discharge)"""
+
+        # Assign water velocity to local variables
+        x_vel = transect.w_vel.u_processed_mps[:, edge_idx]
+        y_vel = transect.w_vel.v_processed_mps[:, edge_idx]
+
+        # Use only valid data
+        valid = transect.w_vel.valid_data[0, :, edge_idx].astype(int)
+        valid[valid == 0] = np.nan
+        x_vel = x_vel * valid
+        y_vel = y_vel * valid
+
+        # Initialize local variables
+        n_ensembles = len(edge_idx)
+        vel_ensembles = np.tile(np.nan, n_ensembles)
+        u = np.tile(np.nan, n_ensembles)
+        v = np.tile(np.nan, n_ensembles)
+        v_unit = np.array([np.nan, np.nan])
+
+        # Process each ensemble
+        for n in range(n_ensembles):
+
+            # Use ensembles that have valid data
+            selected_ensemble = edge_idx[n]
+            valid_ensemble = np.nansum(np.isnan(x_vel[:, n]))
+
+            if valid_ensemble > 0:
+
+                # Setup variables
+                v_x = x_vel[:, n]
+                v_y = y_vel[:, n]
+                depth_cell_size = transect.depths.bt_depths.depth_cell_size_m[:, selected_ensemble]
+                depth_cell_depth = transect.depths.bt_depths.depth_cell_depth_m[:, selected_ensemble]
+                depth = transect.depths.bt_depths.depth_processed_m[:, selected_ensemble]
+                depth_cell_size[np.isnan(v_x)] = np.nan
+                depth_cell_depth[np.isnan(v_x)] = np.nan
+
+                # Compute projected velocity profile for an ensemble
+                v_x_avg = np.nansum(v_x * depth_cell_size) / np.nansum(depth_cell_size)
+                v_y_avg = np.nansum(v_y * depth_cell_size) / np.nansum(depth_cell_size)
+                ens_dir, _ = cart2pol(v_x_avg, v_y_avg)
+                v_unit[0], v_unit[1] = pol2cart(ens_dir, 1)
+                v_projected_mag = np.dot(np.hstack([v_x, v_y]), np.tile(v_unit, v_x.shape))
+
+                # Compute z value for each cell
+                z = (depth - depth_cell_depth)
+                z[np.isnan(v_projected_mag)] = np.nan
+
+                # Compute coefficient for 1/6th power curve
+                b = 1.0 / 6.0
+                a = (b + 1) * (np.nansum((v_projected_mag * depth_cell_size))
+                               / (np.nansum(((z + 0.5 * depth_cell_size)**(b + 1))
+                                  - ((z - 0.5 * depth_cell_size)**(b + 1)))))
+
+                # Compute mean water speed by integrating power curve
+                vel_ensembles[n] = ((a / (b + 1)) * (depth**(b + 1))) / depth
+
+                # Compute the mean velocity components from the mean water speed and direction
+                u[n], v[n] = pol2cart(ens_dir, vel_ensembles)
+
+            else:
+
+                # No valid data in ensemble
+                vel_ensembles[n] = np.nan
+                u[n] = np.nan
+                v[n] = np.nan
+
+        # Compute the mean velocity components of the edge velocity as the mean of the mean ensemble components
+        u_avg = np.nanmean(u)
+        v_avg = np.nanmean(v)
+
+        # Compute the dge velocity magnitude
+        edge_vel_dir, edge_vel_mag = cart2pol(u_avg, v_avg)
+
+        # TODO this is the same as for TRDI need to put in separate method
+        # Compute unit vector to help determine sign
+        unit_water_x, unit_water_y = pol2cart(edge_vel_dir, 1)
+
+        # Account for direction of boat travel
+        if transect.start_edge == 'Right':
+            dir_sign = 1
+        else:
+            dir_sign = -1
+
+        # Compute unit boat vector to help determine sign
+        ens_delta_time = transect.date_time.ens_duration_sec
+        in_transect_idx = transect.in_transect_idx
+        trans_selected = getattr(transect.boat_vel, transect.boat_vel.selected)
+        if trans_selected is not None:
+            b_vel_x = trans_selected.u_proccesed_mps
+            b_vel_y = trans_selected.v_processed_mps
+        else:
+            b_vel_x = np.tile([np.nan], transect.boat_vel.u_processed_mps.shape)
+            b_vel_y = np.tile([np.nan], transect.boat_vel.v_processed_mps.shape)
+
+        track_x = np.nancumsum(b_vel_x[in_transect_idx] * ens_delta_time[in_transect_idx])
+        track_y = np.nancumsum(b_vel_y[in_transect_idx] * ens_delta_time[in_transect_idx])
+        boat_dir, boat_mag = cart2pol(track_x[-1], track_y[-1])
+        unit_track_x, unit_track_y = pol2cart(boat_dir, 1)
+
+        # Compute cross product from unit vectors
+        unit_x_prod = (unit_water_x * unit_track_y - unit_water_y * unit_track_x) * dir_sign
+
+        # Determine sign
+        edge_vel_sign = np.sign(unit_x_prod)
+
+        return edge_vel_mag, edge_vel_sign
+
+    @staticmethod
+    def edge_coef(edge_loc, transect):
+        """Returns the edge coefficient based on the edge settings and transect object.
+
+        Parameters
+        ----------
+        edge_loc: str
+            Edge location (left_or right)
+        transect: TransectData
+            Object of TransectData
+
+        Returns
+        -------
+        coef: float
+            Edge coefficient for accounting for velocity distribution and edge shape
+        """
+
+        # Process appropriate edge type
+        edge_select = getattr(transect.edges, edge_loc)
+        if edge_select.type == 'Triangular':
+            coef = 0.3535
+
+        elif edge_select.type == 'Rectangular':
+            # Rectangular edge coefficient depends on the rec_edge_method.
+            # 'Fixed' is compatible with the method used by TRDI.
+            # 'Variable is compatible with the method used by SonTek
+
+            if transect.edges.rec_edge_method == 'Fixed':
+                # Fixed Method
+                coef = 0.91
+
+            else:
+                # Variable method
+                # Get edge distance
+                dist = edge_select.dist_m
+
+                # Get edge ensembles to use
+                edge_idx = QComp.edge_ensembles(edge_loc, transect)
+
+                # Compute the mean depth for edge
+                trans_select = getattr(transect.depths, transect.depths.selected)
+                depth_edge = np.nanmean(trans_select.depth_processed_m[edge_idx])
+
+                # Compute coefficient using equation 34 from Principle of River Discharge Measurement, SonTek, 2003
+                coef = (1 - ((0.35 / 4) * (depth_edge / dist) * (1 - np.exp(-4 * (dist / depth_edge))))) / \
+                    (1 - 0.35 * np.exp(-4 * (dist / depth_edge)))
+
+        elif edge_select.type == 'Custom':
+            # Custom user supplied coefficient
+            coef = edge_select.cust_coef
+
+        else:
+            coef = []
+
+        return coef
+
+    @staticmethod
+    def loop_correction_factor(top_q, middle_q, bottom_q, trans_data, mb_data, delta_t):
+        """Computes the discharge correction factor from loop moving-bed tests
+
+        Parameters
+        ----------
+        top_q: float
+            Top discharge from extrapolation
+        middle_q: float
+            Computed middle discharge
+        bottom_q: float
+            Bottom discharge from extrapolation
+        trans_data: TransectData
+            Object of TransectData
+        mb_data: MovingBedTests
+            Object of MovingBedTests
+        delta_t: np.array(float)
+            Duration of each ensemble, computed in QComp
+
+        Returns
+        -------
+        correction_factor: float
+            Correction factor to be applied to the discharge to correct for moving-bed effects
+        """
+
+        # Assign object properties to local variables
+        moving_bed_speed = mb_data.mb_spd_mps
+        in_transect_idx = trans_data.in_transect_idx
+        cells_above_sl = trans_data.w_vel.cells_above_sl[:, in_transect_idx]
+        u = trans_data.w_vel.u_processed_mps[:, in_transect_idx] * cells_above_sl
+        v = trans_data.w_vel.v_processed_mps[:, in_transect_idx] * cells_above_sl
+        depths_select = getattr(trans_data.depths, trans_data.depths.selected)
+        depth_cell_depth = depths_select.depth_cell_depth_m[:, in_transect_idx]
+        depth = depths_select.depth_processed_m[in_transect_idx]
+        bt_u = trans_data.boat_vel.bt_vel.u_processed_mps[in_transect_idx]
+        bt_v = trans_data.boat_vel.bt_vel.v_processed_mps[in_transect_idx]
+
+        # Compute uncorrected discharge excluding the edges
+        q_orig = top_q + middle_q + bottom_q
+
+        if q_orig != 0:
+            # Compute near-bed velocities
+            nb_u, nb_v, unit_nb_u, unit_nb_v = QComp.near_bed_velocity(u, v, depth, depth_cell_depth)
+            nb_speed = np.sqrt(nb_u**2 + nb_v**2)
+            nb_u_mean = np.nanmean(nb_u)
+            nb_v_mean = np.nanmean(nb_v)
+            nb_speed_mean = np.sqrt(nb_u_mean**2 + nb_v_mean**2)
+            moving_bed_speed_ens = moving_bed_speed * (nb_speed / nb_speed_mean)
+            u_mb = moving_bed_speed_ens * unit_nb_u
+            v_mb = moving_bed_speed_ens * unit_nb_v
+
+            # Correct water velocities
+            u_adj = u + u_mb
+            v_adj = v + v_mb
+
+            bt_u_adj = bt_u + u_mb
+            bt_v_adj = bt_v + v_mb
+
+            # Compute corrected cross product
+            xprod = QComp.cross_product(transect=trans_data)
+            xprod_in = QComp.cross_product(w_vel_x=u_adj,
+                                           w_vel_y=v_adj,
+                                           b_vel_x=bt_u_adj,
+                                           b_vel_y=bt_v_adj,
+                                           start_edge=trans_data.start_edge)
+            xprod[:, in_transect_idx] = xprod_in
+
+            # Compute corrected discharges
+            q_middle_cells = QComp.discharge_middle_cells(xprod=xprod, transect=trans_data, delta_t=delta_t)
+            trans_select = getattr(trans_data.depths, trans_data.depths.selected)
+            num_top_method = {'Power': 0, 'Constant': 1, '3-Point': 2, None: -1}
+            q_top = extrapolate_top(xprod, trans_data.w_vel.valid_data[0, :, :],
+                                    num_top_method[trans_data.extrap.top_method],
+                                    trans_data.extrap.exponent, trans_data.in_transect_idx,
+                                    trans_select.depth_cell_size_m,
+                                    trans_select.depth_cell_depth_m, trans_select.depth_processed_m, delta_t,
+                                    -1, 0.1667)
+            num_bot_method = {'Power': 0, 'No Slip': 1, None: -1}
+            q_bot = extrapolate_bot(xprod, trans_data.w_vel.valid_data[0, :, :],
+                                    num_bot_method[trans_data.extrap.bot_method],
+                                    trans_data.extrap.exponent, trans_data.in_transect_idx,
+                                    trans_select.depth_cell_size_m,
+                                    trans_select.depth_cell_depth_m, trans_select.depth_processed_m, delta_t,
+                                    -1, 0.1667)
+            q_adj = np.nansum(np.nansum(q_middle_cells)) + np.nansum(q_top) + np.nansum(q_bot)
+
+            # Compute correction factor
+            correction_factor = q_adj / q_orig
+        else:
+            correction_factor = 1.0
+
+        return correction_factor
+        
+    @staticmethod
+    def stationary_correction_factor(top_q, middle_q, bottom_q, trans_data, mb_data, delta_t):
+        """Computes the discharge correction factor from stationary moving-bed tests.
+
+        Parameters
+        ----------
+        top_q: float
+            Top discharge from extrapolation
+        middle_q: float
+            Computed middle discharge
+        bottom_q: float
+            Bottom discharge from extrapolation
+        trans_data: TransectData
+            Object of TransectData
+        mb_data: MovingBedTests
+            Object of MovingBedTests
+        delta_t: np.array(float)
+            Duration of each ensemble, computed in QComp
+
+        Returns
+        -------
+        correction_factor: float
+            Correction factor to be applied to the discharge to correct for moving-bed effects
+        """
+                
+        n_mb_tests = len(mb_data)
+        n_sta_tests = 0
+        mb_speed = np.array([0])
+        near_bed_speed = np.array([0])
+        for n in range(n_mb_tests):
+            if (mb_data[n].type == 'Stationary') and mb_data[n].use_2_correct:
+                n_sta_tests += 1
+                mb_speed = np.append(mb_speed, mb_data[n].mb_spd_mps)
+                near_bed_speed = np.append(near_bed_speed, mb_data[n].near_bed_speed_mps)
+
+        if n_sta_tests > 0:
+
+            # Compute linear regression coefficient forcing through zero to relate
+            # near-bed velocity to moving-bed velocity
+            x = np.vstack(near_bed_speed)
+            corr_coef = np.linalg.lstsq(x, mb_speed, rcond=None)[0]
+
+            # Assing object properties to local variables
+            in_transect_idx = trans_data.in_transect_idx
+            cells_above_sl = trans_data.w_vel.cells_above_sl[:, in_transect_idx]
+            u = trans_data.w_vel.u_processed_mps[:, in_transect_idx] * cells_above_sl
+            v = trans_data.w_vel.v_processed_mps[:, in_transect_idx] * cells_above_sl
+            depths_select = getattr(trans_data.depths, trans_data.depths.selected)
+            depth_cell_depth = depths_select.depth_cell_depth_m[:, in_transect_idx]
+            depth = depths_select.depth_processed_m[in_transect_idx]
+            bt_u = trans_data.boat_vel.bt_vel.u_processed_mps[in_transect_idx]
+            bt_v = trans_data.boat_vel.bt_vel.v_processed_mps[in_transect_idx]
+
+            # Compute near-bed velocities
+            nb_u, nb_v, unit_nb_u, unit_nb_v = QComp.near_bed_velocity(u, v, depth, depth_cell_depth)
+
+            # Compute moving-bed vector for each ensemble
+            mb_u = corr_coef * nb_u
+            mb_v = corr_coef * nb_v
+
+            # Compute adjusted water and boat velocities
+            u_adj = u + mb_u
+            v_adj = v + mb_v
+            bt_u_adj = bt_u + mb_u
+            bt_v_adj = bt_v + mb_v
+
+            # Compute uncorrected discharge excluding the edges
+            q_orig = top_q + middle_q + bottom_q
+            if q_orig != 0:
+                # Compute corrected discharge excluding edges
+                # Compute corrected cross product
+                xprod = QComp.cross_product(transect=trans_data)
+                xprod_in = QComp.cross_product(w_vel_x=u_adj,
+                                               w_vel_y=v_adj,
+                                               b_vel_x=bt_u_adj,
+                                               b_vel_y=bt_v_adj,
+                                               start_edge=trans_data.start_edge)
+                xprod[:, in_transect_idx] = xprod_in
+
+                # Compute corrected discharges
+                q_middle_cells = QComp.discharge_middle_cells(xprod=xprod, transect=trans_data, delta_t=delta_t)
+                trans_select = getattr(trans_data.depths, trans_data.depths.selected)
+                num_top_method = {'Power': 0, 'Constant': 1, '3-Point': 2, None: -1}
+                q_top = extrapolate_top(xprod,
+                                        trans_data.w_vel.valid_data[0, :, :],
+                                        num_top_method[trans_data.extrap.top_method],
+                                        trans_data.extrap.exponent, trans_data.in_transect_idx,
+                                        trans_select.depth_cell_size_m,
+                                        trans_select.depth_cell_depth_m, trans_select.depth_processed_m, delta_t,
+                                        -1, 0.1667)
+                num_bot_method = {'Power': 0, 'No Slip': 1, None: -1}
+                q_bot = extrapolate_bot(xprod,
+                                        trans_data.w_vel.valid_data[0, :, :],
+                                        num_bot_method[trans_data.extrap.bot_method],
+                                        trans_data.extrap.exponent, trans_data.in_transect_idx,
+                                        trans_select.depth_cell_size_m,
+                                        trans_select.depth_cell_depth_m, trans_select.depth_processed_m, delta_t,
+                                        -1, 0.1667)
+                q_adj = np.nansum(np.nansum(q_middle_cells)) + np.nansum(q_top) + np.nansum(q_bot)
+
+                # Compute correction factor
+                correction_factor = q_adj / q_orig
+            else:
+                correction_factor = 1.0
+
+            return correction_factor
+
+    @staticmethod
+    def near_bed_velocity(u, v, depth, bin_depth):
+        """Compute near bed velocities.
+
+        Parameters
+        ----------
+        u: np.array(float)
+            Velocity in the x-direction, in m/s
+        v: np.array(float)
+            Velocity in the y-direction, in m/s
+        depth: np.array(float)
+            Depth for each ensemble, in m
+        bin_depth: np.array(float)
+            Depth cell depth for each depth cell, in m
+
+        Returns
+        -------
+        nb_u: np.array(float)
+            Near-bed velocity in the x-direction, in m/s.
+        nb_v: np.array(float)
+            Near-bed velocity in the y-direction, in m/s.
+        unit_nbu: np.array(float)
+            Unit vector component of near-bed velocity in x-direction.
+        unit_nbv: np.array(float)
+            Unit vector component of near-bed velocity in y-direction.
+        """
+
+        # Compute z near bed as 10% of depth
+        z_near_bed = depth * 0.1
+
+        # Begin computing near-bed velocities
+        n_ensembles = u.shape[1]
+        nb_u = np.tile([np.nan], n_ensembles)
+        nb_v = np.tile([np.nan], n_ensembles)
+        unit_nbu = np.tile([np.nan], n_ensembles)
+        unit_nbv = np.tile([np.nan], n_ensembles)
+        z_depth = np.tile([np.nan], n_ensembles)
+        u_mean = np.tile([np.nan], n_ensembles)
+        v_mean = np.tile([np.nan], n_ensembles)
+        speed_near_bed = np.tile([np.nan], n_ensembles)
+        for n in range(n_ensembles):
+            idx = np.where(np.logical_not(np.isnan(u[:, n])))[0]
+            if len(idx) > 0:
+                idx = idx[-2:]
+
+                # Compute near-bed velocity
+                z_depth[n] = depth[n] - np.nanmean(bin_depth[idx, n], 0)
+                u_mean[n] = np.nanmean(u[idx, n], 0)
+                v_mean[n] = np.nanmean(v[idx, n], 0)
+                nb_u[n] = (u_mean[n] / z_depth[n] ** (1. / 6.)) * (z_near_bed[n] ** (1. / 6.))
+                nb_v[n] = (v_mean[n] / z_depth[n] ** (1. / 6.)) * (z_near_bed[n] ** (1. / 6.))
+                speed_near_bed[n] = np.sqrt(nb_u[n] ** 2 + nb_v[n] ** 2)
+                unit_nbu[n] = nb_u[n] / speed_near_bed[n]
+                unit_nbv[n] = nb_v[n] / speed_near_bed[n]
+
+        return nb_u, nb_v, unit_nbu, unit_nbv
+
+    @staticmethod
+    def valid_edge_ens(trans_data):
+        """Determines which ensembles contain sufficient valid data to allow computation of discharge.
+        
+        Allows interpolated depth and boat velocity but requires valid
+        non-interpolated water velocity.
+        
+        Parameters
+        ----------
+        trans_data: TransectData
+            Object of TransectData
+        
+        Returns
+        -------
+        validEns: np.array(bool)
+            Boolean vector
+        """
+
+        # Get index of ensembles in moving-boat portion of transect
+        in_transect_idx = trans_data.in_transect_idx
+
+        # Get selected navigation reference
+
+        boat_vel_selected = getattr(trans_data.boat_vel, trans_data.boat_vel.selected)
+
+        # Depending on type of interpolation determine the valid navigation ensembles
+        if boat_vel_selected is not None and len(boat_vel_selected.u_processed_mps) > 0:
+            if boat_vel_selected.interpolate == 'TRDI':
+                nav_valid = boat_vel_selected.valid_data[0, in_transect_idx]
+            else:
+                nav_valid = np.logical_not(np.isnan(boat_vel_selected.u_processed_mps[in_transect_idx]))
+        else:
+            nav_valid = np.tile(False, len(in_transect_idx))
+
+        # Depending on type of interpolation determine the valid water track ensembles
+        if len(in_transect_idx) > 1:
+            water_valid = np.any(trans_data.w_vel.valid_data[0, :, in_transect_idx], 1)
+        else:
+            water_valid = np.any(trans_data.w_vel.valid_data[0, :, in_transect_idx])
+
+        # Determine the ensembles with valid depth
+        depths_select = getattr(trans_data.depths, trans_data.depths.selected)
+        if depths_select is not None:
+            depth_valid = np.logical_not(np.isnan(depths_select.depth_processed_m[in_transect_idx]))
+
+            # Determine the ensembles with valid depth, navigation, and water data
+            valid_ens = np.all(np.vstack((nav_valid, water_valid, depth_valid)), 0)
+        else:
+            valid_ens = []
+
+        return valid_ens
+
+    @staticmethod
+    def discharge_interpolated(q_top_ens, q_mid_cells, q_bot_ens, transect):
+        """Determines the amount of discharge in interpolated cells and ensembles.
+
+        Parameters
+        ----------
+        q_top_ens: np.array(float)
+            Top extrapolated discharge in each ensemble
+        q_mid_cells: np.array(float)
+            Middle of measured discharge in each ensemble
+        q_bot_ens: np.array(flot)
+            Bottom extrapolated discharge in each ensemble
+        transect: TransectData
+            Object of TransectData
+
+        Returns
+        -------
+        q_int_cells: float
+            Discharge in interpolated cells
+        q_int_ens: float
+            Discharge in interpolated ensembles
+        """
+        valid_ens, valid_wt = TransectData.raw_valid_data(transect)
+
+        # Compute interpolated cell discharge
+        q_int_cells = np.nansum(np.nansum(q_mid_cells[np.logical_not(valid_wt)]))
+
+        #  Method to compute invalid ensemble discharge depends on if
+        # navigation data are interpolated (QRev) or if expanded delta
+        # time is used to compute discharge for invalid ensembles(TRDI)
+        if transect.boat_vel.bt_vel.interpolate == 'None':
+            # Compute discharge in invalid ensembles for expanded delta time situation
+            # Find index of invalid ensembles followed by a valid ensemble
+            idx_next_valid = np.where(np.diff(np.hstack((-2, valid_ens))) == 1)[0]
+            if len(idx_next_valid) == 0:
+                q_int_ens = 0
+            else:
+                # Increase index to reference valid ensembles
+                idx_next_valid += 1
+
+                # Sum discharge in valid ensembles following invalid ensemble
+                q_int_ens = np.nansum(q_mid_cells[:, idx_next_valid]) \
+                    + q_bot_ens[idx_next_valid] + q_top_ens[idx_next_valid]
+
+                # Determine number of invalid ensembles preceding valid ensemble
+                run_length_false, _ = QComp.compute_run_length(valid_ens)
+
+                # Adjust run_length_false for situation where the transect ends with invalid ensembles
+                if len(run_length_false) > len(q_int_ens):
+                    run_length_false = run_length_false[:-1]
+
+                # Adjust discharge to remove the discharge that would have been measured in the valid ensemble
+                q_int_ens = np.nansum(q_int_ens * (run_length_false / (run_length_false+1)))
+
+        else:
+            # Compute discharge in invalid ensembles where all data were interpolated
+            q_int_ens = np.nansum(np.nansum(q_mid_cells[:, np.logical_not(valid_ens)])) \
+                        + np.nansum(q_top_ens[np.logical_not(valid_ens)]) \
+                        + np.nansum(q_bot_ens[np.logical_not(valid_ens)])
+
+        return q_int_cells, q_int_ens
+
+    @staticmethod
+    def compute_run_length(bool_vector):
+        """Compute how many false or true consecutive values are in every run of true or false in the
+        provided boolean vector.
+
+        Parameters
+        ----------
+        bool_vector: np.array(bool)
+           Boolean vector.
+
+        Returns
+        -------
+        run_length_false: np.array(int)
+            Vector with lengths of false runs.
+        run_length_true: np.array(int)
+            Vector with lengths of true runs.
+        """
+
+        # Compute the indices of where changes occur
+        valid_run = np.where(np.diff(np.hstack((-1, bool_vector, -1))) != 0)[0]
+        # Determine length of each run
+        run_length = np.diff(valid_run)
+
+        # Determine length of runs
+        if bool_vector[0]:
+            true_start = 0
+            false_start = 1
+        else:
+            true_start = 1
+            false_start = 0
+        run_length_false = run_length[bool_vector[false_start]::2]
+        run_length_true = run_length[bool_vector[true_start]::2]
+
+        return run_length_false, run_length_true
+
+    # ============================================================================================
+    # The methods below are not being used in the discharge computations.
+    # The methods for extrapolating the top and bottom discharge have been moved to separate files
+    # and compiled using Numba AOT. The methods below are included here for historical purposes
+    # and may provide an easier approach to adding new features/algorithms prior to recoding
+    # them in a manner that can be compiled using Numba AOT.
+    # =============================================================================================
+
+    @staticmethod
+    def extrapolate_top(xprod, transect, delta_t, top_method=None, exponent=None):
+        """Computes the extrapolated top discharge.
+
+        Parameters
+        ----------
+        xprod: np.array(float)
+            Cross product computed from the cross product method
+        transect: TransectData
+            Object of TransectData
+        delta_t: np.array(float)
+            Duration of each ensemble computed from QComp
+        top_method: str
+            Specifies method to use for top extrapolation
+        exponent: float
+            Exponent to use for power extrapolation
+
+        Returns
+        -------
+        q_top: np.array(float)
+            Top extrapolated discharge for each ensemble
+        """
+
+        if top_method is None:
+            top_method = transect.extrap.top_method
+            exponent = transect.extrap.exponent
+
+        # Get index for ensembles in moving-boat portion of transect
+        in_transect_idx = transect.in_transect_idx
+
+        # Compute top variables
+        idx_top, idx_top3, top_rng = QComp.top_variables(xprod, transect)
+        idx_top = idx_top[in_transect_idx]
+        idx_top3 = idx_top3[:, in_transect_idx]
+        top_rng = top_rng[in_transect_idx]
+
+        # Get data from transect object
+        trans_select = getattr(transect.depths, transect.depths.selected)
+        cell_size = trans_select.depth_cell_size_m[:, in_transect_idx]
+        cell_depth = trans_select.depth_cell_depth_m[:, in_transect_idx]
+        depth_ens = trans_select.depth_processed_m[in_transect_idx]
+
+        # Compute z
+        z = np.subtract(depth_ens, cell_depth)
+
+        # Use only valid data
+        valid_data = np.logical_not(np.isnan(xprod[:, in_transect_idx]))
+        z[np.logical_not(valid_data)] = np.nan
+        cell_size[np.logical_not(valid_data)] = np.nan
+        cell_depth[np.logical_not(valid_data)] = np.nan
+
+        # Compute top discharge
+        q_top = QComp.discharge_top(top_method, exponent, idx_top, idx_top3, top_rng,
+                                    xprod[:, in_transect_idx], cell_size, cell_depth,
+                                    depth_ens, delta_t, z)
+
+        return q_top
+
+    @staticmethod
+    def discharge_top(top_method, exponent, idx_top, idx_top_3, top_rng, component, cell_size, cell_depth,
+                      depth_ens, delta_t, z):
+        """Computes the top extrapolated value of the provided component.
+
+        Parameters
+        ----------
+        top_method: str
+            Top extrapolation method (Power, Constant, 3-Point)
+        exponent: float
+            Exponent for the power extrapolation method
+        idx_top:
+            Index to the topmost valid depth cell in each ensemble
+        idx_top_3:
+            Index to the top 3 valid depth cells in each ensemble
+        top_rng: np.array(float)
+            Range from the water surface to the top of the topmost cell
+        component: np.array(float)
+            The variable to be extrapolated (xprod, u-velocity, v-velocity)
+        cell_size: np.array(float)
+            Array of cellsizes (n cells x n ensembles)
+        cell_depth: np.array(float)
+            Depth of each cell (n cells x n ensembles)
+        depth_ens: np.array(float)
+            Bottom depth for each ensemble
+        delta_t: np.array(float)
+            Duration of each ensemble compute by QComp
+        z: np.array(float)
+            Relative depth from the bottom of each depth cell computed in discharge top method
+
+        Returns
+        -------
+        top_value: total for the specified component integrated over the top range
+        """
+
+        # Initialize return
+        top_value = 0
+
+        # Top power extrapolation
+        if top_method == 'Power':
+            numerator = ((exponent + 1) * np.nansum(component * cell_size, 0))
+            denominator = np.nansum(((z + 0.5 * cell_size)**(exponent+1)) - ((z - 0.5 * cell_size)**(exponent+1)), 0)
+            coef = np.divide(numerator, denominator, where=denominator != 0)
+            coef[denominator == 0] = np.nan
+            top_value = delta_t * (coef / (exponent + 1)) * \
+                (depth_ens**(exponent + 1) - (depth_ens-top_rng)**(exponent + 1))
+
+        # Top constant extrapolation
+        elif top_method == 'Constant':
+            n_ensembles = len(delta_t)
+            top_value = np.tile([np.nan], n_ensembles)
+            for j in range(n_ensembles):
+                if idx_top[j] >= 0:
+                    top_value[j] = delta_t[j] * component[idx_top[j], j] * top_rng[j]
+
+        # Top 3-point extrapolation
+        elif top_method == '3-Point':
+            # Determine number of bins available in each profile
+            valid_data = np.logical_not(np.isnan(component))
+            n_bins = np.nansum(valid_data, 0)
+            # Determine number of ensembles
+            n_ensembles = len(delta_t)
+            # Preallocate qtop vector
+            top_value = np.tile([np.nan], n_ensembles)
+
+            for j in range(n_ensembles):
+
+                if (n_bins[j] < 6) and (n_bins[j] > 0) and (idx_top[j] >= 0):
+                    top_value[j] = delta_t[j] * component[idx_top[j], j] * top_rng[j]
+
+                # If 6 or more bins use 3-pt at top
+                if n_bins[j] > 5:
+                    sumd = np.nansum(cell_depth[idx_top_3[0:3, j], j])
+                    sumd2 = np.nansum(cell_depth[idx_top_3[0:3, j], j]**2)
+                    sumq = np.nansum(component[idx_top_3[0:3, j], j])
+                    sumqd = np.nansum(component[idx_top_3[0:3, j], j] * cell_depth[idx_top_3[0:3, j], j])
+                    delta = 3 * sumd2 - sumd**2
+                    a = (3 * sumqd - sumq * sumd) / delta
+                    b = (sumq * sumd2 - sumqd * sumd) / delta
+                    # Compute discharge for 3-pt fit
+                    qo = (a * top_rng[j]**2) / 2 + b * top_rng[j]
+                    top_value[j] = delta_t[j] * qo
+
+        return top_value
+
+    @staticmethod
+    def top_variables(xprod, transect):
+        """Computes the index to the top and top three valid cells in each ensemble and
+        the range from the water surface to the top of the topmost cell.
+
+        Parameters
+        ----------
+        xprod: np.array(float)
+            Cross product computed from the cross product method
+        transect: TransectData
+            Object of TransectData
+
+        Returns
+        -------
+        idx_top: np.array
+            Index to the topmost valid depth cell in each ensemble
+        idx_top_3: np.array
+            Index to the top 3 valid depth cell in each ensemble
+        top_rng: np.array(float)
+            Range from the water surface to the top of the topmost cell
+        """
+
+        # Get data from transect object
+        valid_data1 = np.copy(transect.w_vel.valid_data[0, :, :])
+        valid_data2 = np.logical_not(np.isnan(xprod))
+        valid_data = valid_data1 * valid_data2
+        trans_select = getattr(transect.depths, transect.depths.selected)
+        cell_size = trans_select.depth_cell_size_m
+        cell_depth = trans_select.depth_cell_depth_m
+
+        # Preallocate variables
+        n_ensembles = valid_data.shape[1]
+        idx_top = np.tile(-1, valid_data.shape[1]).astype(int)
+        idx_top_3 = np.tile(-1, (3, valid_data.shape[1])).astype(int)
+        top_rng = np.tile([np.nan], n_ensembles)
+
+        # Loop through ensembles
+        for n in range(n_ensembles):
+            # Identify topmost 1 and 3 valid cells
+            idx_temp = np.where(np.logical_not(np.isnan(xprod[:, n])))[0]
+            if len(idx_temp) > 0:
+                idx_top[n] = idx_temp[0]
+                if len(idx_temp) > 2:
+                    idx_top_3[:, n] = idx_temp[0:3]
+                # Compute top range
+                top_rng[n] = cell_depth[idx_top[n], n] - 0.5 * cell_size[idx_top[n], n]
+            else:
+                top_rng[n] = 0
+                idx_top[n] = 0
+
+        return idx_top, idx_top_3, top_rng
+
+    @staticmethod
+    def extrapolate_bot(xprod, transect, delta_t, bot_method=None, exponent=None):
+        """Computes the extrapolated bottom discharge
+
+        Parameters
+        ----------
+        xprod: np.array(float)
+            Cross product of the water and boat velocities
+        transect: TransectData
+            Object of TransectData
+        delta_t: np.array(float)
+            Duration of each ensemble
+        bot_method: str
+            Bottom extrapolation method
+        exponent: float
+            Bottom extrapolation exponent
+
+        Returns
+        -------
+        q_bot: np.array(float)
+            Bottom extrapolated discharge for each ensemble
+        """
+
+        # Determine extrapolation methods and exponent
+        if bot_method is None:
+            bot_method = transect.extrap.bot_method
+            exponent = transect.extrap.exponent
+
+        # Get index for ensembles in moving-boat portion of transect
+        in_transect_idx = transect.in_transect_idx
+        xprod = xprod[:, in_transect_idx]
+
+        # Compute bottom variables
+        idx_bot, bot_rng = QComp.bot_variables(xprod, transect)
+
+        # Get data from transect properties
+        trans_select = getattr(transect.depths, transect.depths.selected)
+        cell_size = trans_select.depth_cell_size_m[:, in_transect_idx]
+        cell_depth = trans_select.depth_cell_depth_m[:, in_transect_idx]
+        depth_ens = trans_select.depth_processed_m[in_transect_idx]
+
+        # Compute z
+        z = np.subtract(depth_ens, cell_depth)
+        valid_data = np.logical_not(np.isnan(xprod))
+        z[np.logical_not(valid_data)] = np.nan
+        z[nan_less(z, 0)] = np.nan
+        cell_size[np.logical_not(valid_data)] = np.nan
+        cell_depth[np.logical_not(valid_data)] = np.nan
+        # Compute bottom discharge
+        q_bot = QComp.discharge_bot(bot_method, exponent, idx_bot, bot_rng, xprod,
+                                    cell_size, cell_depth, depth_ens, delta_t, z)
+
+        return q_bot
+
+    @staticmethod
+    def discharge_bot(bot_method, exponent, idx_bot, bot_rng, component,
+                      cell_size, cell_depth, depth_ens, delta_t, z):
+        """Computes the bottom extrapolated value of the provided component.
+
+        Parameters
+        ----------
+        bot_method: str
+            Bottom extrapolation method (Power, No Slip)
+        exponent: float
+            Exponent for power and no slip
+        idx_bot:
+            Index to the bottom most valid depth cell in each ensemble
+        bot_rng: np.array(float)
+            Range from the streambed to the bottom of the bottom most cell
+        component: np.array(float)
+            The variable to be extrapolated
+        cell_size: np.array(float)
+            Array of cell sizes (n cells x n ensembles)
+        cell_depth: np.array(float)
+            Depth of each cell (n cells x n ensembles)
+        depth_ens: np.array(float)
+            Bottom depth for each ensemble
+        delta_t: np.array(float)
+            Duration of each ensemble computed by QComp
+        z: np.array(float)
+            Relative depth from the bottom to each depth cell
+
+        Returns
+        -------
+        bot_value: np.array(float)
+            Total for the specified component integrated over the bottom range for each ensemble
+        """
+
+        # Initialize
+        coef = 0
+
+        # Bottom power extrapolation
+        if bot_method == 'Power':
+            numerator = ((exponent+1) * np.nansum(component * cell_size, 0))
+            denominator = np.nansum(((z + 0.5 * cell_size)**(exponent + 1)) - (z - 0.5 * cell_size)**(exponent + 1), 0)
+            coef = np.divide(numerator, denominator, where=denominator != 0)
+            coef[denominator == 0] = np.nan
+
+        # Bottom no slip extrapolation
+        elif bot_method == 'No Slip':
+            # Valid data in the lower 20% of the water column or
+            # the last valid depth cell are used to compute the no slip power fit
+            cutoff_depth = 0.8 * depth_ens
+            depth_ok = (nan_greater(cell_depth, np.tile(cutoff_depth, (cell_depth.shape[0], 1))))
+            component_ok = np.logical_not(np.isnan(component))
+            use_ns = depth_ok * component_ok
+            for j in range(len(delta_t)):
+                if idx_bot[j] >= 0:
+                    use_ns[idx_bot[j], j] = 1
+
+            # Create cross product and z arrays for the data to be used in
+            # no slip computations
+            component_ns = np.copy(component)
+            component_ns[np.logical_not(use_ns)] = np.nan
+            z_ns = np.copy(z)
+            z_ns[np.logical_not(use_ns)] = np.nan
+            numerator = ((exponent + 1) * np.nansum(component_ns * cell_size, 0))
+            denominator = np.nansum(((z_ns + 0.5 * cell_size) ** (exponent + 1))
+                                    - ((z_ns - 0.5 * cell_size) ** (exponent + 1)), 0)
+            coef = np.divide(numerator, denominator, where=denominator != 0)
+            coef[denominator == 0] = np.nan
+
+        # Compute the bottom discharge of each profile
+        bot_value = delta_t * (coef / (exponent + 1)) * (bot_rng**(exponent + 1))
+
+        return bot_value
+
+    @staticmethod
+    def bot_variables(x_prod, transect):
+        """Computes the index to the bottom most valid cell in each ensemble and the range from
+        the bottom to the bottom of the bottom most cell.
+
+        Parameters
+        ----------
+        x_prod: np.array(float)
+            Cross product computed from the cross product method
+        transect: TransectData
+            Object of TransectData
+
+        Returns
+        -------
+        idx_bot: np.array
+            Index to the bottom most valid depth cell in each ensemble
+        bot_rng: np.array(float)
+            Range from the streambed to the bottom of the bottom most cell
+        """
+
+        # Identify valid data
+        in_transect_idx = transect.in_transect_idx
+        valid_data1 = np.copy(transect.w_vel.valid_data[0, :, in_transect_idx].T)
+        valid_data2 = np.logical_not(np.isnan(x_prod))
+        valid_data = valid_data1 * valid_data2
+
+        # Assign transect properties to local variables
+        trans_selected = getattr(transect.depths, transect.depths.selected)
+        cell_size = trans_selected.depth_cell_size_m[:, in_transect_idx]
+        cell_depth = trans_selected.depth_cell_depth_m[:, in_transect_idx]
+        depth_ens = trans_selected.depth_processed_m[in_transect_idx]
+
+        # Preallocate variables
+        n_ensembles = valid_data.shape[1]
+        idx_bot = np.tile(-1, (valid_data.shape[1])).astype(int)
+        bot_rng = np.tile([np.nan], n_ensembles)
+
+        for n in range(n_ensembles):
+            # Identifying bottom most valid cell
+            idx_temp = np.where(np.logical_not(np.isnan(x_prod[:, n])))[0]
+            if len(idx_temp) > 0:
+                idx_temp = idx_temp[-1]
+                idx_bot[n] = idx_temp
+                # Compute bottom range
+                bot_rng[n] = depth_ens[n] - cell_depth[idx_bot[n], n] - 0.5 * cell_size[idx_bot[n], n]
+            else:
+                bot_rng[n] = 0
+
+        return idx_bot, bot_rng
diff --git a/Classes/SelectFit.py b/Classes/SelectFit.py
new file mode 100644
index 0000000..bb771a0
--- /dev/null
+++ b/Classes/SelectFit.py
@@ -0,0 +1,381 @@
+import numpy as np
+from Classes.FitData import FitData
+
+
+class SelectFit(object):
+    """Class automates the extrapolation method selection information.
+
+    Attributes
+    ----------
+    fit_method: str
+        User selected method Automatic or Manual
+    top_method: str
+        Top extrapolation method
+    bot_method: str
+        Bottom extrapolation method
+    exponent: float
+        Power fit exponent
+    exp_method: str
+        Method to determine exponent (default, optimize, or manual)
+    u: np.array(float)
+        Fit values of the variable
+    u_auto: np.array(float)
+        Fit values from automatic fit
+    z: np.array(float)
+        Distance from the streambed for fit variable
+    z_auto: np.array(float)
+        z values for automtic fit
+    residuals: np.array(float)
+        Residuals from fit
+    coef: float
+        Power fit coefficient
+    bot_method_auto: str
+        Selected extrapolation for top
+    top_method_auto: str
+        Selected extrapolation for bottom
+    exponent_auto: float
+        Selected exponent
+    top_fit_r2: float
+        Top fit custom r^2
+    top_max_diff: float
+        Maximum difference between power and 3-pt at top
+    bot_diff: float
+        Difference between power and no slop at z = 0.1
+    bot_r2: float
+        Bottom fit r^2
+    fit_r2: float
+        Selected fit of selected power/no slip fit
+    ns_exponent: float
+        No slip optimized exponent
+    pp_exponent: float
+        Power Power optimized exponent
+    top_r2: float
+        r^2 for linear fit of top 4 cells
+    rsqr: float
+        Adjusted r^2 for optimized exponent
+    exponent_95_ci: np.array(float)
+        95% confidence intervals for optimized exponent
+    data_type: str
+        Type of data (v, q, V, or Q)
+    """
+
+    def __init__(self):
+        """Intialize object and instance variables."""
+
+        self.fit_method = 'Automatic'  # User selected method Automatic or Manual
+        self.top_method = 'Power'
+        self.bot_method = 'Power'
+        self.exponent = '0.1667'
+        self.exp_method = None
+        self.u = None
+        self.u_auto = None
+        self.z = None
+        self.z_auto = None
+        self.residuals = np.array([])
+        self.coef = 0
+        self.bot_method_auto = 'Power'  # Selected extrapolation for top
+        self.top_method_auto = 'Power'  # Selected extrapolation for bottom
+        self.exponent_auto = 0.1667  # Selected exponent
+        self.top_fit_r2 = 0  # Top fit custom r^2
+        self.top_max_diff = 0  # Maximum difference between power and 3-pt at top
+        self.bot_diff = 0  # Difference between power and no slop at z = 0.1
+        self.bot_r2 = 0  # Bottom fit r^2
+        self.fit_r2 = 0  # Selected fit of selected power/no slip fit
+        self.ns_exponent = 0.1667  # No slip optimized exponent
+        self.pp_exponent = 0.1667  # Power Power optimized exponent
+        self.top_r2 = 0
+        self.rsqr = 0
+        self.exponent_95_ci = 0
+        self.data_type = 'q'
+
+    def populate_data(self, normalized, fit_method, top=None, bot=None, exponent=None):
+        """Determine selected fit.
+
+        Parameters
+        ----------
+        normalized: NormData
+            Object of NormData
+        fit_method: str
+            Fit method (Automatic or Manual)
+        top: str
+            Top extrapolation method
+        bot: str
+            Bottom extrapolation method
+        exponent: float
+            Exponent for extrapolation method
+        """
+
+        valid_data = np.squeeze(normalized.valid_data)
+
+        # Store data in properties to object
+        self.fit_method = fit_method
+        self.data_type = normalized.data_type
+
+        update_fd = FitData()
+
+        if fit_method == 'Automatic':
+            # Compute power fit with optimized exponent as reference to determine
+            # if constant no slip will be more appropriate
+            ppobj = FitData()
+            ppobj.populate_data(norm_data=normalized,
+                                top='Power',
+                                bot='Power',
+                                method='optimize')
+
+            # Store results in object
+            self.pp_exponent = ppobj.exponent
+            self.residuals = ppobj.residuals
+            self.rsqr = ppobj.r_squared
+            self.exponent_95_ci = ppobj.exponent_95_ci
+
+            # Begin automatic fit
+
+            # More than 6 cells are required to compute an optimized fit.  For fewer
+            # than 7 cells the default power/power fit is selected due to lack of sufficient
+            # data for a good analysis
+            if len(self.residuals) > 6:
+                # DSM (6/4/2021) the top and bottom were mislabeled (even in Matlab). I corrected. The computations
+                # are unaffected as the top2 and bot2 are only used in the c_shape_condition equation
+                # c_shape_condition = (np.sign(bot2) * np.sign(top2) == np.sign(mid2) and np.abs(bot2 + top2) > 0.1)
+                # Compute the difference between the bottom two cells of data and the optimized power fit
+                bot2 = np.nansum(normalized.unit_normalized_med[valid_data[-2:]]
+                                 - ppobj.coef * normalized.unit_normalized_z[valid_data[-2:]] ** ppobj.exponent)
+
+                # Compute the difference between the top two cells of data and the optimized power fit
+                top2 = np.nansum(normalized.unit_normalized_med[valid_data[:2]]
+                                 - ppobj.coef * normalized.unit_normalized_z[valid_data[:2]] ** ppobj.exponent)
+
+                # Compute the difference between the middle two cells of data and the optimized power fit
+                mid1 = int(np.floor(len(np.isnan(valid_data) == False) / 2)) - 1
+
+                mid2 = np.nansum(normalized.unit_normalized_med[valid_data[mid1:mid1 + 2]]
+                                 - ppobj.coef * normalized.unit_normalized_z[valid_data[mid1:mid1 + 2]]
+                                 ** ppobj.exponent)
+
+                self.top_method_auto = 'Power'
+                self.bot_method_auto = 'Power'
+
+                # Evaluate difference in data and power fit at water surface using a linear fit through the top 4
+                # median cells and save results
+                y = normalized.unit_normalized_med[valid_data[:4]]
+                x = normalized.unit_normalized_z[valid_data[:4]]
+
+                coeffs = np.polyfit(x, y, 1)
+                resid = y - (coeffs[0]*x + coeffs[1])
+                corr = np.corrcoef(x, y)[0, 1]
+                self.top_fit_r2 = 1 - (np.sum(resid ** 2) / np.mean(np.abs(resid)))
+                self.top_r2 = corr**2
+
+                # Evaluate overall fit
+                # If the optimized power fit does not have an r^2 better than 0.8 or if the optimized
+                # exponent if 0.1667 falls within the 95% confidence interval of the optimized fit,
+                # there is insufficient justification to change the exponent from 0.1667
+                if (ppobj.r_squared < 0.8) or ((0.1667 > self.exponent_95_ci[0]) and (0.1667 < self.exponent_95_ci[1])):
+                    # If an optimized exponent cannot be justified the linear fit is used to determine if a constant
+                    # fit at the top is a better alternative than a power fit.  If the power fit is the better
+                    # alternative the exponent is set to the default 0.1667 and the data is refit
+                    if np.abs(self.top_fit_r2 < 0.8 or self.top_r2 < 0.9):
+                        ppobj = FitData()
+                        ppobj.populate_data(norm_data=normalized,
+                                            top='Power',
+                                            bot='Power',
+                                            method='Manual',
+                                            exponent=0.1667)
+
+                # Evaluate fit of top and bottom portions of the profile
+                # Set save selected exponent and associated fit statistics
+                self.exponent_auto = ppobj.exponent
+                self.fit_r2 = ppobj.r_squared
+
+                # Compute the difference at the water surface between a linear fit of the top 4 measured cells
+                # and the best selected power fit of the whole profile
+                self.top_max_diff = ppobj.u[-1] - np.sum(coeffs)
+
+                # Evaluate the difference at the bottom between power using the whole profile and power using
+                # only the bottom third
+                ns_fd = FitData()
+                ns_fd.populate_data(normalized, 'Constant', 'No Slip', 'Optimize')
+                self.ns_exponent = ns_fd.exponent
+                self.bot_r2 = ns_fd.r_squared
+                self.bot_diff = ppobj.u[np.round(ppobj.z, 2) == 0.1][0] \
+                    - ns_fd.u[np.round(ns_fd.z, 2) == 0.1][0]
+
+                # Begin automatic selection logic
+                # -----------------------------------
+
+                # A constant no slip fit condition is selected if:
+                #
+                # 1)The top of the power fit doesn't fit the data well.
+                # This is determined to be the situation when
+                # (a) the difference at the water surface between the
+                # linear fit and the power fit is greater than 10% and
+                # (b) the difference is either positive or the difference
+                # of the top measured cell differs from the best
+                # selected power fit by more than 5%.
+                top_condition = (np.abs(self.top_max_diff) > 0.1 and ((self.top_max_diff > 0)
+                                 or np.abs(normalized.unit_normalized_med[valid_data[0]] - ppobj.u[-1]) > 0.05))
+
+                # OR
+
+                # 2) The bottom of the power fit doesn't fit the data
+                # well. This is determined to be the situation when (a)
+                # the difference between and optimized no slip fit
+                # and the selected best power fit of the whole profile
+                # is greater than 10% and (b) the optimized on slip fit has
+                # an r^2 greater than 0.6.
+                bottom_condition = ((np.abs(self.bot_diff) > 0.1) and self.bot_r2 > 0.6)
+
+                # OR
+
+                # 3) Flow is bidirectional. The sign of the top of the
+                # profile is different from the sign of the bottom of
+                # the profile.
+                bidirectional_condition = (np.sign(normalized.unit_normalized_med[valid_data[0]])
+                                           != np.sign(normalized.unit_normalized_med[valid_data[-1]]))
+                # OR
+
+                # 4) The profile is C-shaped. This is determined by
+                # (a) the sign of the top and bottom difference from
+                # the best selected power fit being different than the
+                # sign of the middle difference from the best selected
+                # power fit and (b) the combined difference of the top
+                # and bottom difference from the best selected power
+                # fit being greater than 10%.
+                c_shape_condition = (np.sign(bot2) * np.sign(top2) == np.sign(mid2) and np.abs(bot2 + top2) > 0.1)
+
+                if top_condition or bottom_condition or bidirectional_condition or c_shape_condition:
+
+                    # Set the bottom to no slip
+                    self.bot_method_auto = 'No Slip'
+                    # If the no slip fit with an optimized exponent does not have r^2 better than 0.8 use
+                    # the default 0.1667 for the no slip exponent
+                    if ns_fd.r_squared > 0.8:
+                        self.exponent_auto = ns_fd.exponent
+                        self.fit_r2 = ns_fd.r_squared
+                    else:
+                        self.exponent_auto = 0.1667
+                        self.fit_r2 = np.nan
+
+                    # Use the no slip 95% confidence intervals if they are available
+                    if ns_fd.exponent_95_ci is not None and np.all(
+                            np.isnan(ns_fd.exponent_95_ci) == False):
+                        self.exponent_95_ci[0] = ns_fd.exponent_95_ci[0]
+                        self.exponent_95_ci[1] = ns_fd.exponent_95_ci[1]
+                    else:
+                        self.exponent_95_ci[0] = np.nan
+                        self.exponent_95_ci[1] = np.nan
+
+                    # Set the top method to constant
+                    self.top_method_auto = 'Constant'
+
+                else:
+
+                    # Leave fit power/power and set the best selected optimized exponent as the automatic fit exponent
+                    self.exponent_auto = ppobj.exponent
+
+            else:
+
+                # If the data are insufficient for a valid analysis use the power/power fit
+                # with the default 0.1667 exponent
+                self.top_method_auto = 'Power'
+                self.bot_method_auto = 'Power'
+                self.exponent_auto = 0.1667
+                self.ns_exponent = 0.1667
+
+            # Update the fit using the automatically selected methods
+            update_fd.populate_data(norm_data=normalized,
+                                    top=self.top_method_auto,
+                                    bot=self.bot_method_auto,
+                                    method='Manual',
+                                    exponent=self.exponent_auto)
+            self.u = update_fd.u
+            self.u_auto = update_fd.u
+            self.z_auto = update_fd.z
+            self.z = update_fd.z
+
+        elif fit_method == 'Manual':
+
+            # Identify changes in fit settings
+            if top is None:
+                top = self.top_method
+            if bot is None:
+                bot = self.bot_method
+            if exponent is None:
+                exponent = self.exponent
+
+            # Update fit with manual settings
+            update_fd.populate_data(norm_data=normalized,
+                                    top=top,
+                                    bot=bot,
+                                    method=fit_method,
+                                    exponent=exponent)
+            self.u = update_fd.u
+            self.z = update_fd.z
+
+        # Store fit data in object
+        self.top_method = update_fd.top_method
+        self.bot_method = update_fd.bot_method
+        self.exponent = update_fd.exponent
+        self.coef = update_fd.coef
+        self.exp_method = update_fd.exp_method
+        self.residuals = update_fd.residuals
+
+    @staticmethod
+    def qrev_mat_in(mat_data):
+        """Processes the Matlab data structure to obtain a list of NormData objects containing transect
+           data from the Matlab data structure.
+
+       Parameters
+       ----------
+       mat_data: mat_struct
+           Matlab data structure obtained from sio.loadmat
+
+       Returns
+       -------
+       norm_data: list
+           List of NormData objects
+       """
+        fit_data = []
+        if hasattr(mat_data, 'selFit'):
+            for n, data in enumerate(mat_data.selFit):
+                temp = SelectFit()
+                temp.populate_from_qrev_mat(data, mat_data.normData[n])
+                fit_data.append(temp)
+        return fit_data
+
+    def populate_from_qrev_mat(self, mat_data, norm_data):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        mat_data: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        norm_data: NormData
+            Object of NormData
+        """
+
+        self.fit_method = mat_data.fitMethod
+        self.top_method = mat_data.topMethod
+        self.bot_method = mat_data.botMethod
+        self.exponent = mat_data.exponent
+        self.exp_method = mat_data.expMethod
+        self.u = mat_data.u
+        self.u_auto = mat_data.uAuto
+        self.z = mat_data.z
+        self.z_auto = mat_data.zAuto
+        self.residuals = mat_data.residuals
+        self.coef = mat_data.coef
+        self.bot_method_auto = mat_data.botMethodAuto
+        self.top_method_auto = mat_data.topMethodAuto
+        self.exponent_auto = mat_data.exponentAuto
+        self.top_fit_r2 = mat_data.topfitr2
+        self.top_max_diff = mat_data.topmaxdiff
+        self.bot_diff = mat_data.botdiff
+        self.bot_r2 = mat_data.botrsqr
+        self.fit_r2 = mat_data.fitrsqr
+        self.ns_exponent = mat_data.nsexponent
+        self.pp_exponent = mat_data.ppexponent
+        self.top_r2 = mat_data.topr2
+        self.rsqr = mat_data.rsqr
+        self.exponent_95_ci = mat_data.exponent95confint
+        self.data_type = norm_data.dataType
diff --git a/Classes/SensorData.py b/Classes/SensorData.py
new file mode 100644
index 0000000..0c8f869
--- /dev/null
+++ b/Classes/SensorData.py
@@ -0,0 +1,82 @@
+import numpy as np
+
+
+class SensorData(object):
+    """Class stores data for pitch, roll, temperature, salinity, and speed of sound and its source/
+
+    Attributes
+    ----------
+    data: np.array(float)
+        Data to be used in computations.
+    data_orig: np.array(float)
+        Original data loaded from raw data file.
+    source: str
+        Source of data, examples Int. Sensor, Ext. Sensor, User
+    """
+    
+    def __init__(self):
+        """Initializes class and variables."""
+
+        self.data = None
+        self.data_orig = None
+        self.source = None
+        
+    def populate_data(self, data_in, source_in):
+        """Store data in class.
+
+        Parameters
+        ----------
+        data_in: np.array(float)
+            Data to be stored.
+        source_in: str
+            Source of data to be stored.
+        """
+
+        self.data = data_in
+        self.data_orig = data_in
+        self.source = source_in
+
+    def populate_from_qrev_mat(self, mat_data):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        mat_data: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+
+        if np.isnan(mat_data.data).all():
+            self.data = np.array([])
+        else:
+            if type(mat_data.data) is np.ndarray:
+                self.data = mat_data.data.astype(float)
+            else:
+                self.data = np.array([float(mat_data.data)])
+        if np.isnan(mat_data.dataOrig).all():
+            self.data_orig = np.array([])
+        else:
+            if type(mat_data.dataOrig) is np.ndarray:
+                self.data_orig = mat_data.dataOrig.astype(float)
+            else:
+                self.data_orig = np.array([float(mat_data.dataOrig)])
+        self.source = mat_data.source
+        
+    def change_data(self, data_in):
+        """Change data to be applied in computations.
+
+        Parameters
+        ----------
+        data_in: np.array(float)
+        """
+
+        self.data = data_in
+        
+    def set_source(self, source_in):
+        """Change source of data.
+
+        Parameters
+        ----------
+        source_in: str
+            Source of data.
+        """
+        self.source = source_in
diff --git a/Classes/SensorStructure.py b/Classes/SensorStructure.py
new file mode 100644
index 0000000..20a7cf7
--- /dev/null
+++ b/Classes/SensorStructure.py
@@ -0,0 +1,73 @@
+import numpy as np
+from Classes.HeadingData import HeadingData
+from Classes.SensorData import SensorData
+
+
+class SensorStructure(object):
+    """Class to store sensor data from various sources.
+
+    Attributes
+    ----------
+    self.selected: str
+        The selected sensor reference name ('internal', 'external', 'user').
+    self.internal: SensorData
+        Contains the data from the internal sensor, object of SensorData
+    self.external: SensorData
+        Contains the data from an external sensor, object of SensorData
+    self.user: SensorData
+        Contains user supplied value, object of SensorData
+    """
+    
+    def __init__(self):
+        """Initialize class and set variable to None."""
+
+        self.selected = None  # The selected sensor reference name ('internal', 'external', 'user')
+        self.internal = None  # Contains the data from the internal sensor
+        self.external = None  # Contains the data from an external sensor
+        self.user = None  # Contains user supplied value
+
+    def populate_from_qrev_mat(self, mat_data, heading=False):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        mat_data: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        heading: bool
+            Determines if mat_data is heading data
+        """
+
+        if not heading:
+            # Non-heading sensors
+            if not type(mat_data.external) is np.ndarray:
+                self.external = SensorData()
+                self.external.populate_from_qrev_mat(mat_data.external)
+            if not type(mat_data.internal) is np.ndarray:
+                self.internal = SensorData()
+                self.internal.populate_from_qrev_mat(mat_data.internal)
+            if not type(mat_data.user) is np.ndarray:
+                self.user = SensorData()
+                self.user.populate_from_qrev_mat(mat_data.user)
+            self.selected = mat_data.selected
+        else:
+            # Heading data
+            if not type(mat_data.external) is np.ndarray:
+                self.external = HeadingData()
+                self.external.populate_from_qrev_mat(mat_data.external)
+            if not type(mat_data.internal) is np.ndarray:
+                self.internal = HeadingData()
+                self.internal.populate_from_qrev_mat(mat_data.internal)
+            if not type(mat_data.user) is np.ndarray:
+                self.user = HeadingData()
+                self.user.populate_from_qrev_mat(mat_data.user)
+            self.selected = mat_data.selected
+        
+    def set_selected(self, selected_name):
+        """Set the selected source for the specified object
+
+        Parameters
+        ----------
+        selected_name: str
+            Type of data (internal, external, user).
+        """
+        self.selected = selected_name
diff --git a/Classes/Sensors.py b/Classes/Sensors.py
new file mode 100644
index 0000000..5734ff7
--- /dev/null
+++ b/Classes/Sensors.py
@@ -0,0 +1,122 @@
+import numpy as np
+from Classes.SensorStructure import SensorStructure
+
+
+class Sensors(object):
+    """Class to store data from ADCP sensors.
+
+    Attributes
+    ----------
+    heading_deg: HeadingData
+        Object of HeadingData.
+    pitch_deg: SensorStructure
+        Pitch data, object of SensorStructure
+    roll_deg: SensorStructure
+        Roll data, object of SensorStructure
+    temperature_deg_c: SensorStructure
+        Temperature data, object of SensorStructure
+    salinity_ppt: SensorStructure
+        Salinity data, object of SensorStructure
+    speed_of_sound_mps: SensorStructure
+        Speed of sound, object of SensorStructure
+    """
+
+    def __init__(self):
+        """Initialize class and create variable objects"""
+
+        self.heading_deg = SensorStructure()  # Object of HeadingData
+        self.pitch_deg = SensorStructure()  # Pitch data, object of SensorStructure
+        self.roll_deg = SensorStructure()  # Roll data, object of SensorStructure
+        self.temperature_deg_c = SensorStructure()  # Temperature data, object of SensorStructure
+        self.salinity_ppt = SensorStructure()  # Salinity data, object of SensorStructure
+        self.speed_of_sound_mps = SensorStructure()  # Speed of sound, object of SensorStructure
+
+    def populate_from_qrev_mat(self, transect):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        transect: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+        if hasattr(transect, 'sensors'):
+            if hasattr(transect.sensors, 'heading_deg'):
+                self.heading_deg.populate_from_qrev_mat(transect.sensors.heading_deg, heading=True)
+            if hasattr(transect.sensors, 'pitch_deg'):
+                self.pitch_deg.populate_from_qrev_mat(transect.sensors.pitch_deg)
+            if hasattr(transect.sensors, 'roll_deg'):
+                self.roll_deg.populate_from_qrev_mat(transect.sensors.roll_deg)
+            if hasattr(transect.sensors, 'salinity_ppt'):
+                self.salinity_ppt.populate_from_qrev_mat(transect.sensors.salinity_ppt)
+            if hasattr(transect.sensors, 'speedOfSound_mps'):
+                self.speed_of_sound_mps.populate_from_qrev_mat(transect.sensors.speedOfSound_mps)
+            if hasattr(transect.sensors, 'temperature_degC'):
+                self.temperature_deg_c.populate_from_qrev_mat(transect.sensors.temperature_degC)
+
+    @staticmethod
+    def speed_of_sound(temperature, salinity):
+        """Computes speed of sound from temperature and salinity.
+
+        Parameters
+        ----------
+        temperature: float or np.array(float)
+            Water temperature at transducer face, in degrees C.
+        salinity: float or np.array(float)
+            Water salinity at transducer face, in ppt.
+        """
+
+        # Not provided in RS Matlab file computed from equation used in TRDI BBSS, from Urick (1983)
+        # May not be the same equation as used by SonTek
+        sos = 1449.2 + 4.6 * temperature - 0.055 * temperature**2 + 0.00029 * temperature**3 \
+            + (1.34 - 0.01 * temperature) * (salinity - 35.0)
+
+        return sos
+
+    @staticmethod
+    def unesco_speed_of_sound(t, s, p=10):
+
+        p = p / 10
+        sr = np.sqrt(np.abs(s))
+
+        # S ** 2 TERM
+        d = 1.727E-3 - 7.9836E-6 * p
+
+        # S ** 3 / 2  TERM
+        b1 = 7.3637E-5 + 1.7945E-7 * t
+        b0 = -1.922E-2 - 4.42E-5 * t
+        b = b0 + b1 * p
+
+        # S ** 1 TERM
+        a3 = (-3.389E-13 * t + 6.649E-12) * t + 1.100E-10
+        a2 = ((7.988E-12 * t - 1.6002E-10) * t + 9.1041E-9) * t - 3.9064E-7
+        a1 = (((-2.0122E-10 * t + 1.0507E-8) * t - 6.4885E-8) * t - 1.2580E-5) * t + 9.4742E-5
+        a0 = (((-3.21E-8 * t + 2.006E-6) * t + 7.164E-5) * t - 1.262E-2) * t + 1.389
+        a = ((a3 * p + a2) * p + a1) * p + a0
+
+        # S ** 0 TERM
+        c3 = (-2.3643E-12 * t + 3.8504E-10) * t - 9.7729E-9
+        c2 = (((1.0405E-12 * t - 2.5335E-10) * t + 2.5974E-8) * t - 1.7107E-6) * t + 3.1260E-5
+        c1 = (((-6.1185E-10 * t + 1.3621E-7) * t - 8.1788E-6) * t + 6.8982E-4) * t + 0.153563
+        c0 = ((((3.1464E-9 * t - 1.47800E-6) * t + 3.3420E-4) * t - 5.80852E-2) * t + 5.03711) * t + 1402.388
+        c = ((c3 * p + c2) * p + c1) * p + c0
+
+        # SOUND  SPEED
+        sos = c + (a + b * sr + d * s) * s
+
+        return sos
+
+    @staticmethod
+    def avg_temperature(transects):
+        """Compute mean temperature from temperature data from all transects.
+
+        Parameters
+        ----------
+        transects: list
+            List of TransectData objects
+        """
+
+        temps = np.array([])
+        for transect in transects:
+            if transect.checked:
+                temps = np.append(temps, transect.sensors.temperature_deg_c.internal.data)
+        return np.nanmean(temps)
diff --git a/Classes/TransectData.py b/Classes/TransectData.py
new file mode 100644
index 0000000..1d7a2ee
--- /dev/null
+++ b/Classes/TransectData.py
@@ -0,0 +1,2498 @@
+import os
+import time
+import warnings
+import concurrent.futures
+import numpy as np
+from datetime import datetime
+from datetime import timezone
+from scipy import signal, fftpack
+# from Classes.Pd0TRDI import Pd0TRDI
+from Classes.Pd0TRDI_2 import Pd0TRDI
+from Classes.DepthStructure import DepthStructure
+from Classes.WaterData import WaterData
+from Classes.BoatStructure import BoatStructure
+from Classes.GPSData import GPSData
+from Classes.Edges import Edges
+from Classes.ExtrapData import ExtrapData
+from Classes.Sensors import Sensors
+from Classes.SensorData import SensorData
+from Classes.HeadingData import HeadingData
+from Classes.DateTime import DateTime
+from Classes.InstrumentData import InstrumentData
+from Classes.MultiThread import MultiThread
+from Classes.CoordError import CoordError
+from MiscLibs.common_functions import nandiff, cosd, arctand, tand, nans, cart2pol, rad2azdeg, nan_less
+
+
+class TransectData(object):
+    """Class to hold Transect properties.
+
+    Attributes
+    ----------
+    adcp: InstrumentData
+        Object of InstrumentData
+    file_name: str
+        Filename of transect data file
+    w_vel: WaterData
+        Object of WaterData
+    boat_vel: BoatStructure
+        Object of BoatStructure containing objects of BoatData for BT, GGA, and VTG
+    gps: GPSData
+        Object of GPSData
+    sensors: SensorData
+        Object of SensorData
+    depths: DepthStructure
+        Object of DepthStructure containing objects of Depth data for bt_depths, vb_depths, ds_depths)
+    edges: Edges
+        Object of Edges (left and right object of clsEdgeData)
+    extrap: ExtrapData
+        Object of ExtrapData
+    start_edge: str
+        Starting edge of transect looking downstream (Left or Right)
+    orig_start_edge: str
+        Original starting edge of transect looking downstream (Left or Right)
+    date_time: DateTime
+        Object of DateTime
+    checked: bool
+        Setting for if transect was checked for use in mmt file assumed checked for SonTek
+    in_transect_idx: np.array(int)
+        Index of ensemble data associated with the moving-boat portion of the transect
+    """
+
+    def __init__(self):
+        self.adcp = None  # object of clsInstrument
+        self.file_name = None  # filename of transect data file
+        self.w_vel = None  # object of clsWaterData
+        self.boat_vel = None  # class for various boat velocity references (btVel, ggaVel, vtgVel)
+        self.gps = None  # object of clsGPSData
+        self.sensors = None  # object of clsSensorData
+        self.depths = None  # object of clsDepthStructure for depth data including cell depths & ref depths
+        self.edges = None  # object of clsEdges(left and right object of clsEdgeData)
+        self.extrap = None  # object of clsExtrapData
+        self.start_edge = None  # starting edge of transect looking downstream (Left or Right)
+        self.orig_start_edge = None
+        self.date_time = None  # object of DateTime
+        self.checked = None  # transect was checked for use in mmt file assumed checked for SonTek
+        self.in_transect_idx = None  # index of ensemble data associated with the moving-boat portion of the transect
+
+    def trdi(self, mmt_transect, pd0_data, mmt):
+        """Create object, lists, and instance variables for TRDI data.
+
+        Parameters
+        ----------
+        mmt_transect: MMT_Transect
+            Object of Transect (from mmt)
+        pd0_data: Pd0TRDI
+            Object of Pd0TRDI
+        mmt: MMT_TRDI
+            Object of MMT_TRDI
+        """
+
+        # Get file name of pd0 file which is first file in list of file associated with the transect
+        self.file_name = mmt_transect.Files[0]
+
+        # Get the active configuration data for the transect
+        mmt_config = getattr(mmt_transect, 'active_config')
+
+        # If the pd0 file has water track data process all of the data
+        if pd0_data.Wt is not None:
+
+            # Ensemble times
+            # Compute time for each ensemble in seconds
+            ens_time_sec = pd0_data.Sensor.time[:, 0] * 3600 \
+                           + pd0_data.Sensor.time[:, 1] * 60 \
+                           + pd0_data.Sensor.time[:, 2] \
+                           + pd0_data.Sensor.time[:, 3] / 100
+
+            # Compute the duration of each ensemble in seconds adjusting for lost data
+            ens_delta_time = np.tile([np.nan], ens_time_sec.shape)
+            idx_time = np.where(np.isnan(ens_time_sec) == False)[0]
+            ens_delta_time[idx_time[1:]] = nandiff(ens_time_sec[idx_time])
+
+            # Adjust for transects tha last past midnight
+            idx_24hr = np.where(nan_less(ens_delta_time, 0))[0]
+            ens_delta_time[idx_24hr] = 24 * 3600 + ens_delta_time[idx_24hr]
+            ens_delta_time = ens_delta_time.T
+
+            # Start date and time
+            idx = np.where(np.isnan(pd0_data.Sensor.time[:, 0]) == False)[0][0]
+            start_year = int(pd0_data.Sensor.date[idx, 0])
+
+            # StreamPro doesn't include y2k dates
+            if start_year < 100:
+                start_year = 2000 + int(pd0_data.Sensor.date_not_y2k[idx, 0])
+
+            start_month = int(pd0_data.Sensor.date[idx, 1])
+            start_day = int(pd0_data.Sensor.date[idx, 2])
+            start_hour = int(pd0_data.Sensor.time[idx, 0])
+            start_min = int(pd0_data.Sensor.time[idx, 1])
+            start_sec = int(pd0_data.Sensor.time[idx, 2] + pd0_data.Sensor.time[idx, 3] / 100)
+            start_micro = int(
+                ((pd0_data.Sensor.time[idx, 2] + pd0_data.Sensor.time[idx, 3] / 100) - start_sec) * 10 ** 6)
+
+            start_dt = datetime(start_year, start_month, start_day, start_hour, start_min, start_sec, start_micro,
+                                tzinfo=timezone.utc)
+            start_serial_time = start_dt.timestamp()
+            start_date = datetime.strftime(datetime.utcfromtimestamp(start_serial_time), '%m/%d/%Y')
+
+            # End data and time
+            idx = np.where(np.isnan(pd0_data.Sensor.time[:, 0]) == False)[0][-1]
+            end_year = int(pd0_data.Sensor.date[idx, 0])
+            # StreamPro does not include Y@K dates
+            if end_year < 100:
+                end_year = 2000 + int(pd0_data.Sensor.date_not_y2k[idx, 0])
+
+            end_month = int(pd0_data.Sensor.date[idx, 1])
+            end_day = int(pd0_data.Sensor.date[idx, 2])
+            end_hour = int(pd0_data.Sensor.time[idx, 0])
+            end_min = int(pd0_data.Sensor.time[idx, 1])
+            end_sec = int(pd0_data.Sensor.time[idx, 2] + pd0_data.Sensor.time[idx, 3] / 100)
+            end_micro = int(((pd0_data.Sensor.time[idx, 2] + pd0_data.Sensor.time[idx, 3] / 100) - end_sec) * 10 ** 6)
+
+            end_dt = datetime(end_year, end_month, end_day, end_hour, end_min, end_sec, end_micro, tzinfo=timezone.utc)
+            end_serial_time = end_dt.timestamp()
+
+            # Create date/time object
+            self.date_time = DateTime()
+            self.date_time.populate_data(date_in=start_date,
+                                         start_in=start_serial_time,
+                                         end_in=end_serial_time,
+                                         ens_dur_in=ens_delta_time)
+
+            # Transect checked for use in discharge computation
+            self.checked = mmt_transect.Checked
+
+            # Create class for adcp information
+            self.adcp = InstrumentData()
+            self.adcp.populate_data(manufacturer='TRDI', raw_data=pd0_data, mmt_transect=mmt_transect, mmt=mmt)
+
+            # Create valid frequency time series
+            freq_ts = self.valid_frequencies(pd0_data.Inst.freq)
+
+            # Initialize boat vel
+            self.boat_vel = BoatStructure()
+            # Apply 3-beam setting from mmt file
+            if mmt_config['Proc_Use_3_Beam_BT'] < 0.5:
+                min_beams = 4
+            else:
+                min_beams = 3
+            self.boat_vel.add_boat_object(source='TRDI',
+                                          vel_in=pd0_data.Bt.vel_mps,
+                                          freq_in=freq_ts,
+                                          coord_sys_in=pd0_data.Cfg.coord_sys[0],
+                                          nav_ref_in='BT',
+                                          min_beams=min_beams,
+                                          bottom_mode=pd0_data.Cfg.bm[0],
+                                          corr_in=pd0_data.Bt.corr,
+                                          rssi_in=pd0_data.Bt.rssi)
+
+            self.boat_vel.set_nav_reference('BT')
+
+            # Compute velocities from GPS Data
+            # ------------------------------------
+            # Raw Data
+            raw_gga_utc = pd0_data.Gps2.utc
+            raw_gga_lat = pd0_data.Gps2.lat_deg
+            raw_gga_lon = pd0_data.Gps2.lon_deg
+
+            # Determine correct sign for latitude
+            for n, lat_ref in enumerate(pd0_data.Gps2.lat_ref):
+                idx = np.nonzero(np.array(lat_ref) == 'S')
+                raw_gga_lat[n, idx] = raw_gga_lat[n, idx] * -1
+
+            # Determine correct sign for longitude
+            for n, lon_ref in enumerate(pd0_data.Gps2.lon_ref):
+                idx = np.nonzero(np.array(lon_ref) == 'W')
+                raw_gga_lon[n, idx] = raw_gga_lon[n, idx] * -1
+
+            # Assign data to local variables
+            raw_gga_alt = pd0_data.Gps2.alt
+            raw_gga_diff = pd0_data.Gps2.corr_qual
+            raw_gga_hdop = pd0_data.Gps2.hdop
+            raw_gga_num_sats = pd0_data.Gps2.num_sats
+            raw_vtg_course = pd0_data.Gps2.course_true
+            raw_vtg_speed = pd0_data.Gps2.speed_kph * 0.2777778
+            raw_vtg_delta_time = pd0_data.Gps2.vtg_delta_time
+            raw_vtg_mode_indicator = pd0_data.Gps2.mode_indicator
+            raw_gga_delta_time = pd0_data.Gps2.gga_delta_time
+
+            # RSL provided ensemble values, not supported for TRDI data
+            ext_gga_utc = []
+            ext_gga_lat = []
+            ext_gga_lon = []
+            ext_gga_alt = []
+            ext_gga_diff = []
+            ext_gga_hdop = []
+            ext_gga_num_sats = []
+            ext_vtg_course = []
+            ext_vtg_speed = []
+
+            # QRev methods GPS processing methods
+            gga_p_method = 'Mindt'
+            gga_v_method = 'Mindt'
+            vtg_method = 'Mindt'
+
+            # If valid gps data exist, process the data
+            if (np.nansum(np.nansum(np.abs(raw_gga_lat))) > 0) \
+                    or (np.nansum(np.nansum(np.abs(raw_vtg_speed))) > 0):
+
+                # Process raw GPS data
+                self.gps = GPSData()
+                self.gps.populate_data(raw_gga_utc=raw_gga_utc,
+                                       raw_gga_lat=raw_gga_lat,
+                                       raw_gga_lon=raw_gga_lon,
+                                       raw_gga_alt=raw_gga_alt,
+                                       raw_gga_diff=raw_gga_diff,
+                                       raw_gga_hdop=raw_gga_hdop,
+                                       raw_gga_num_sats=raw_gga_num_sats,
+                                       raw_gga_delta_time=raw_gga_delta_time,
+                                       raw_vtg_course=raw_vtg_course,
+                                       raw_vtg_speed=raw_vtg_speed,
+                                       raw_vtg_delta_time=raw_vtg_delta_time,
+                                       raw_vtg_mode_indicator=raw_vtg_mode_indicator,
+                                       ext_gga_utc=ext_gga_utc,
+                                       ext_gga_lat=ext_gga_lat,
+                                       ext_gga_lon=ext_gga_lon,
+                                       ext_gga_alt=ext_gga_alt,
+                                       ext_gga_diff=ext_gga_diff,
+                                       ext_gga_hdop=ext_gga_hdop,
+                                       ext_gga_num_sats=ext_gga_num_sats,
+                                       ext_vtg_course=ext_vtg_course,
+                                       ext_vtg_speed=ext_vtg_speed,
+                                       gga_p_method=gga_p_method,
+                                       gga_v_method=gga_v_method,
+                                       vtg_method=vtg_method)
+
+                # If valid gga data exists create gga boat velocity object
+                if np.nansum(np.nansum(np.abs(raw_gga_lat))) > 0:
+                    self.boat_vel.add_boat_object(source='TRDI',
+                                                  vel_in=self.gps.gga_velocity_ens_mps,
+                                                  coord_sys_in='Earth',
+                                                  nav_ref_in='GGA')
+
+                # If valid vtg data exist create vtg boat velocity object
+                if np.nansum(np.nansum(np.abs(raw_vtg_speed))) > 0:
+                    self.boat_vel.add_boat_object(source='TRDI',
+                                                  vel_in=self.gps.vtg_velocity_ens_mps,
+                                                  coord_sys_in='Earth',
+                                                  nav_ref_in='VTG')
+
+            # Get and compute ensemble beam depths
+            temp_depth_bt = np.array(pd0_data.Bt.depth_m)
+
+            # Screen out invalid depths
+            temp_depth_bt[temp_depth_bt < 0.01] = np.nan
+
+            # Add draft
+            temp_depth_bt += mmt_config['Offsets_Transducer_Depth']
+
+            # Get instrument cell data
+            cell_size_all_m, cell_depth_m, sl_cutoff_per, sl_lag_effect_m = \
+                TransectData.compute_cell_data(pd0_data)
+
+            # Adjust cell depth of draft
+            cell_depth_m = np.add(mmt_config['Offsets_Transducer_Depth'], cell_depth_m)
+
+            # Create depth data object for BT
+            self.depths = DepthStructure()
+            self.depths.add_depth_object(depth_in=temp_depth_bt,
+                                         source_in='BT',
+                                         freq_in=freq_ts,
+                                         draft_in=mmt_config['Offsets_Transducer_Depth'],
+                                         cell_depth_in=cell_depth_m,
+                                         cell_size_in=cell_size_all_m)
+
+            # Compute cells above side lobe
+            cells_above_sl, sl_cutoff_m = \
+                TransectData.side_lobe_cutoff(depths=self.depths.bt_depths.depth_orig_m,
+                                              draft=self.depths.bt_depths.draft_orig_m,
+                                              cell_depth=self.depths.bt_depths.depth_cell_depth_m,
+                                              sl_lag_effect=sl_lag_effect_m,
+                                              slc_type='Percent',
+                                              value=1 - sl_cutoff_per / 100)
+
+            # Check for the presence of vertical beam data
+            if np.nanmax(np.nanmax(pd0_data.Sensor.vert_beam_status)) > 0:
+                temp_depth_vb = np.tile(np.nan, (1, cell_depth_m.shape[1]))
+                temp_depth_vb[0, :] = pd0_data.Sensor.vert_beam_range_m
+
+                # Screen out invalid depths
+                temp_depth_vb[temp_depth_vb < 0.01] = np.nan
+
+                # Add draft
+                temp_depth_vb = temp_depth_vb + mmt_config['Offsets_Transducer_Depth']
+
+                # Create depth data object for vertical beam
+                self.depths.add_depth_object(depth_in=temp_depth_vb,
+                                             source_in='VB',
+                                             freq_in=freq_ts,
+                                             draft_in=mmt_config['Offsets_Transducer_Depth'],
+                                             cell_depth_in=cell_depth_m,
+                                             cell_size_in=cell_size_all_m)
+
+            # Check for the presence of depth sounder
+            if np.nansum(np.nansum(pd0_data.Gps2.depth_m)) > 1e-5:
+                temp_depth_ds = pd0_data.Gps2.depth_m
+
+                # Screen out invalid data
+                temp_depth_ds[temp_depth_ds < 0.01] = np.nan
+
+                # Use the last valid depth for each ensemble
+                last_depth_col_idx = np.sum(np.isnan(temp_depth_ds) == False, axis=1) - 1
+                last_depth_col_idx[last_depth_col_idx == -1] = 0
+                row_index = np.arange(len(temp_depth_ds))
+                last_depth = nans(row_index.size)
+                for row in row_index:
+                    last_depth[row] = temp_depth_ds[row, last_depth_col_idx[row]]
+
+                # Determine if mmt file has a scale factor and offset for the depth sounder
+                if mmt_config['DS_Cor_Spd_Sound'] == 0:
+                    scale_factor = mmt_config['DS_Scale_Factor']
+                else:
+                    scale_factor = pd0_data.Sensor.sos_mps / 1500.
+
+                # Apply scale factor, offset, and draft
+                # Note: Only the ADCP draft is stored.  The transducer
+                # draft or scaling for depth sounder data cannot be changed in QRev
+                ds_depth = np.tile(np.nan, (1, cell_depth_m.shape[1]))
+                ds_depth[0, :] = (last_depth * scale_factor) \
+                                 + mmt_config['DS_Transducer_Depth'] \
+                                 + mmt_config['DS_Transducer_Offset']
+
+                self.depths.add_depth_object(depth_in=ds_depth,
+                                             source_in='DS',
+                                             freq_in=np.tile(np.nan, pd0_data.Inst.freq.shape),
+                                             draft_in=mmt_config['Offsets_Transducer_Depth'],
+                                             cell_depth_in=cell_depth_m,
+                                             cell_size_in=cell_size_all_m)
+
+            # Set depth reference to value from mmt file
+            if 'Proc_River_Depth_Source' in mmt_config:
+                if mmt_config['Proc_River_Depth_Source'] == 0:
+                    self.depths.selected = 'bt_depths'
+                    self.depths.composite_depths(transect=self, setting='Off')
+
+                elif mmt_config['Proc_River_Depth_Source'] == 1:
+                    if self.depths.ds_depths is not None:
+                        self.depths.selected = 'ds_depths'
+                    else:
+                        self.depths.selected = 'bt_depths'
+                    self.depths.composite_depths(transect=self, setting='Off')
+
+                elif mmt_config['Proc_River_Depth_Source'] == 2:
+                    if self.depths.vb_depths is not None:
+                        self.depths.selected = 'vb_depths'
+                    else:
+                        self.depths.selected = 'bt_depths'
+                    self.depths.composite_depths(transect=self, setting='Off')
+
+                elif mmt_config['Proc_River_Depth_Source'] == 3:
+                    if self.depths.vb_depths is None:
+                        self.depths.selected = 'bt_depths'
+                        self.depths.composite_depths(transect=self, setting='Off')
+                    else:
+                        self.depths.selected = 'vb_depths'
+                        self.depths.composite_depths(transect=self, setting='On')
+
+                elif mmt_config['Proc_River_Depth_Source'] == 4:
+                    if self.depths.bt_depths is not None:
+                        self.depths.selected = 'bt_depths'
+                        if self.depths.vb_depths is not None or self.depths.ds_depths is not None:
+                            self.depths.composite_depths(transect=self, setting='On')
+                        else:
+                            self.depths.composite_depths(transect=self, setting='Off')
+                    elif self.depths.vb_depths is not None:
+                        self.depths.selected = 'vb_depths'
+                        self.depths.composite_depths(transect=self, setting='On')
+                    elif self.depths.ds_depths is not None:
+                        self.depths.selected = 'ds_depths'
+                        self.depths.composite_depths(transect=self, setting='On')
+                else:
+                    self.depths.selected = 'bt_depths'
+                    self.depths.composite_depths(transect=self, setting='Off')
+            else:
+                if mmt_config['DS_Use_Process'] > 0:
+                    if self.depths.ds_depths is not None:
+                        self.depths.selected = 'ds_depths'
+                    else:
+                        self.depths.selected = 'bt_depths'
+                else:
+                    self.depths.selected = 'bt_depths'
+                self.depths.composite_depths(transect=self, setting='Off')
+
+            # Create water_data object
+            # ------------------------
+
+            ensemble_ping_type = self.trdi_ping_type(pd0_data)
+
+            # Check for RiverRay and RiverPro data
+            firmware = str(pd0_data.Inst.firm_ver[0])
+            excluded_dist = 0
+            if (firmware[:2] == '56') and (np.nanmax(pd0_data.Sensor.vert_beam_status) < 0.9):
+                excluded_dist = 0.25
+
+            if (firmware[:2] == '44') or (firmware[:2] == '56'):
+                # Process water velocities for RiverRay and RiverPro
+                self.w_vel = WaterData()
+                self.w_vel.populate_data(vel_in=pd0_data.Wt.vel_mps,
+                                         freq_in=freq_ts,
+                                         coord_sys_in=pd0_data.Cfg.coord_sys,
+                                         nav_ref_in='None',
+                                         rssi_in=pd0_data.Wt.rssi,
+                                         rssi_units_in='Counts',
+                                         excluded_dist_in=excluded_dist,
+                                         cells_above_sl_in=cells_above_sl,
+                                         sl_cutoff_per_in=sl_cutoff_per,
+                                         sl_cutoff_num_in=0,
+                                         sl_cutoff_type_in='Percent',
+                                         sl_lag_effect_in=sl_lag_effect_m,
+                                         sl_cutoff_m=sl_cutoff_m,
+                                         wm_in=pd0_data.Cfg.wm[0],
+                                         blank_in=pd0_data.Cfg.wf_cm[0] / 100,
+                                         corr_in=pd0_data.Wt.corr,
+                                         surface_vel_in=pd0_data.Surface.vel_mps,
+                                         surface_rssi_in=pd0_data.Surface.rssi,
+                                         surface_corr_in=pd0_data.Surface.corr,
+                                         surface_num_cells_in=pd0_data.Surface.no_cells,
+                                         ping_type=ensemble_ping_type)
+
+            else:
+                # Process water velocities for non-RiverRay ADCPs
+                self.w_vel = WaterData()
+                self.w_vel.populate_data(vel_in=pd0_data.Wt.vel_mps,
+                                         freq_in=freq_ts,
+                                         coord_sys_in=pd0_data.Cfg.coord_sys[0],
+                                         nav_ref_in='None',
+                                         rssi_in=pd0_data.Wt.rssi,
+                                         rssi_units_in='Counts',
+                                         excluded_dist_in=excluded_dist,
+                                         cells_above_sl_in=cells_above_sl,
+                                         sl_cutoff_per_in=sl_cutoff_per,
+                                         sl_cutoff_num_in=0,
+                                         sl_cutoff_type_in='Percent',
+                                         sl_lag_effect_in=sl_lag_effect_m,
+                                         sl_cutoff_m=sl_cutoff_m,
+                                         wm_in=pd0_data.Cfg.wm[0],
+                                         blank_in=pd0_data.Cfg.wf_cm[0] / 100,
+                                         corr_in=pd0_data.Wt.corr,
+                                         ping_type=ensemble_ping_type)
+
+            # Create Edges Object
+            self.edges = Edges()
+            self.edges.populate_data(rec_edge_method='Fixed', vel_method='MeasMag')
+
+            # Determine number of ensembles to average
+            n_ens_left = mmt_config['Q_Shore_Pings_Avg']
+            # TRDI uses same number on left and right edges
+            n_ens_right = n_ens_left
+
+            # Set indices for ensembles in the moving-boat portion of the transect
+            self.in_transect_idx = np.arange(0, pd0_data.Bt.vel_mps.shape[1])
+
+            # Determine left and right edge distances
+            if mmt_config['Edge_Begin_Left_Bank']:
+                dist_left = float(mmt_config['Edge_Begin_Shore_Distance'])
+                dist_right = float(mmt_config['Edge_End_Shore_Distance'])
+                if 'Edge_End_Manual_Discharge' in mmt_config:
+                    user_discharge_left = float(mmt_config['Edge_Begin_Manual_Discharge'])
+                    user_discharge_right = float(mmt_config['Edge_End_Manual_Discharge'])
+                    edge_method_left = mmt_config['Edge_Begin_Method_Distance']
+                    edge_method_right = mmt_config['Edge_End_Method_Distance']
+                else:
+                    user_discharge_left = None
+                    user_discharge_right = None
+                    edge_method_left = 'Yes'
+                    edge_method_right = 'Yes'
+                self.start_edge = 'Left'
+                self.orig_start_edge = 'Left'
+            else:
+                dist_left = float(mmt_config['Edge_End_Shore_Distance'])
+                dist_right = float(mmt_config['Edge_Begin_Shore_Distance'])
+                if 'Edge_End_Manual_Discharge' in mmt_config:
+                    user_discharge_left = float(mmt_config['Edge_End_Manual_Discharge'])
+                    user_discharge_right = float(mmt_config['Edge_Begin_Manual_Discharge'])
+                    edge_method_left = mmt_config['Edge_End_Method_Distance']
+                    edge_method_right = mmt_config['Edge_Begin_Method_Distance']
+                else:
+                    user_discharge_left = None
+                    user_discharge_right = None
+                    edge_method_left = 'Yes'
+                    edge_method_right = 'Yes'
+                self.start_edge = 'Right'
+                self.orig_start_edge = 'Right'
+
+            # Create left edge
+            if edge_method_left == 'NO':
+                self.edges.left.populate_data(edge_type='User Q',
+                                              distance=dist_left,
+                                              number_ensembles=n_ens_left,
+                                              user_discharge=user_discharge_left)
+
+            elif mmt_config['Q_Left_Edge_Type'] == 0:
+                self.edges.left.populate_data(edge_type='Triangular',
+                                              distance=dist_left,
+                                              number_ensembles=n_ens_left,
+                                              user_discharge=user_discharge_left)
+
+            elif mmt_config['Q_Left_Edge_Type'] == 1:
+                self.edges.left.populate_data(edge_type='Rectangular',
+                                              distance=dist_left,
+                                              number_ensembles=n_ens_left,
+                                              user_discharge=user_discharge_left)
+
+            elif mmt_config['Q_Left_Edge_Type'] == 2:
+                self.edges.left.populate_data(edge_type='Custom',
+                                              distance=dist_left,
+                                              number_ensembles=n_ens_left,
+                                              coefficient=mmt_config['Q_Left_Edge_Coeff'],
+                                              user_discharge=user_discharge_left)
+
+            # Create right edge
+            if edge_method_right == 'NO':
+                self.edges.right.populate_data(edge_type='User Q',
+                                               distance=dist_right,
+                                               number_ensembles=n_ens_right,
+                                               user_discharge=user_discharge_right)
+            elif mmt_config['Q_Right_Edge_Type'] == 0:
+                self.edges.right.populate_data(edge_type='Triangular',
+                                               distance=dist_right,
+                                               number_ensembles=n_ens_right,
+                                               user_discharge=user_discharge_right)
+
+            elif mmt_config['Q_Right_Edge_Type'] == 1:
+                self.edges.right.populate_data(edge_type='Rectangular',
+                                               distance=dist_right,
+                                               number_ensembles=n_ens_right,
+                                               user_discharge=user_discharge_right)
+
+            elif mmt_config['Q_Right_Edge_Type'] == 2:
+                self.edges.right.populate_data(edge_type='Custom',
+                                               distance=dist_right,
+                                               number_ensembles=n_ens_right,
+                                               coefficient=mmt_config['Q_Right_Edge_Coeff'],
+                                               user_discharge=user_discharge_right)
+
+            # Create extrap object
+            # --------------------
+            # Determine top method
+            top = 'Power'
+            if mmt_config['Q_Top_Method'] == 1:
+                top = 'Constant'
+            elif mmt_config['Q_Top_Method'] == 2:
+                top = '3-Point'
+
+            # Determine bottom method
+            bot = 'Power'
+            if mmt_config['Q_Bottom_Method'] == 2:
+                bot = 'No Slip'
+
+            self.extrap = ExtrapData()
+            self.extrap.populate_data(top=top, bot=bot, exp=mmt_config['Q_Power_Curve_Coeff'])
+
+            # Sensor Data
+            self.sensors = Sensors()
+
+            # Heading
+
+            # Internal Heading
+            self.sensors.heading_deg.internal = HeadingData()
+            self.sensors.heading_deg.internal.populate_data(data_in=pd0_data.Sensor.heading_deg.T,
+                                                            source_in='internal',
+                                                            magvar=mmt_config['Offsets_Magnetic_Variation'],
+                                                            align=mmt_config['Ext_Heading_Offset'])
+
+            # External Heading
+            ext_heading_check = np.where(np.isnan(pd0_data.Gps2.heading_deg) == False)
+            if len(ext_heading_check[0]) <= 0:
+                self.sensors.heading_deg.selected = 'internal'
+            else:
+                # Determine external heading for each ensemble
+                # Using the minimum time difference
+                d_time = np.abs(pd0_data.Gps2.hdt_delta_time)
+                d_time_min = np.nanmin(d_time.T, 0).T
+                use = np.tile([np.nan], d_time.shape)
+                for nd_time in range(len(d_time_min)):
+                    use[nd_time, :] = np.abs(d_time[nd_time, :]) == d_time_min[nd_time]
+
+                ext_heading_deg = np.tile([np.nan], (len(d_time_min)))
+                for nh in range(len(d_time_min)):
+                    idx = np.where(use[nh, :])[0]
+                    if len(idx) > 0:
+                        idx = idx[0]
+                        ext_heading_deg[nh] = pd0_data.Gps2.heading_deg[nh, idx]
+
+                # Create external heading sensor
+                self.sensors.heading_deg.external = HeadingData()
+                self.sensors.heading_deg.external.populate_data(data_in=ext_heading_deg,
+                                                                source_in='external',
+                                                                magvar=mmt_config['Offsets_Magnetic_Variation'],
+                                                                align=mmt_config['Ext_Heading_Offset'])
+
+                # Determine heading source to use from mmt setting
+                source_used = mmt_config['Ext_Heading_Use']
+                if source_used:
+                    self.sensors.heading_deg.selected = 'external'
+                else:
+                    self.sensors.heading_deg.selected = 'internal'
+
+            # Pitch
+            pitch = arctand(tand(pd0_data.Sensor.pitch_deg) * cosd(pd0_data.Sensor.roll_deg))
+            pitch_src = pd0_data.Cfg.pitch_src[0]
+
+            # Create pitch sensor
+            self.sensors.pitch_deg.internal = SensorData()
+            self.sensors.pitch_deg.internal.populate_data(data_in=pitch, source_in=pitch_src)
+            self.sensors.pitch_deg.selected = 'internal'
+
+            # Roll
+            roll = pd0_data.Sensor.roll_deg.T
+            roll_src = pd0_data.Cfg.roll_src[0]
+
+            # Create Roll sensor
+            self.sensors.roll_deg.internal = SensorData()
+            self.sensors.roll_deg.internal.populate_data(data_in=roll, source_in=roll_src)
+            self.sensors.roll_deg.selected = 'internal'
+
+            # Temperature
+            temperature = pd0_data.Sensor.temperature_deg_c.T
+            temperature_src = pd0_data.Cfg.temp_src[0]
+
+            # Create temperature sensor
+            self.sensors.temperature_deg_c.internal = SensorData()
+            self.sensors.temperature_deg_c.internal.populate_data(data_in=temperature, source_in=temperature_src)
+            self.sensors.temperature_deg_c.selected = 'internal'
+
+            # Salinity
+            pd0_salinity = pd0_data.Sensor.salinity_ppt.T
+            pd0_salinity_src = pd0_data.Cfg.sal_src[0]
+
+            # Create salinity sensor from pd0 data
+            self.sensors.salinity_ppt.internal = SensorData()
+            self.sensors.salinity_ppt.internal.populate_data(data_in=pd0_salinity, source_in=pd0_salinity_src)
+
+            # Create salinity sensor from mmt data
+            mmt_salinity = mmt_config['Proc_Salinity']
+            mmt_salinity = np.tile(mmt_salinity, pd0_salinity.shape)
+            self.sensors.salinity_ppt.user = SensorData()
+            self.sensors.salinity_ppt.user.populate_data(data_in=mmt_salinity, source_in='mmt')
+
+            # Set selected salinity
+            self.sensors.salinity_ppt.selected = 'internal'
+
+            # Speed of Sound
+            speed_of_sound = pd0_data.Sensor.sos_mps.T
+            speed_of_sound_src = pd0_data.Cfg.sos_src[0]
+            self.sensors.speed_of_sound_mps.internal = SensorData()
+            self.sensors.speed_of_sound_mps.internal.populate_data(data_in=speed_of_sound, source_in=speed_of_sound_src)
+
+            # The raw data are referenced to the internal SOS
+            self.sensors.speed_of_sound_mps.selected = 'internal'
+
+    @staticmethod
+    def trdi_ping_type(pd0_data):
+        """Determines if the ping is coherent on incoherent based on the lag near bottom. A coherent ping will have
+        the lag near the bottom.
+
+        Parameters
+        ----------
+        pd0_data: Pd0TRDI
+            Raw data from pd0 file.
+
+        Returns
+        -------
+        ping_type = np.array(str)
+            Ping_type for each ensemble, C - coherent, I - incoherent
+        """
+        ping_type = np.array([])
+
+        firmware = str(pd0_data.Inst.firm_ver[0])
+        # RiverRay, RiverPro, and RioPro
+        if (firmware[:2] == '44') or (firmware[:2] == '56'):
+            if hasattr(pd0_data.Cfg, 'lag_near_bottom'):
+                ping_temp = pd0_data.Cfg.lag_near_bottom > 0
+                ping_type = np.tile(['U'], ping_temp.shape)
+                ping_type[ping_temp == 0] = 'I'
+                ping_type[ping_temp == 1] = 'C'
+
+        # StreamPro
+        elif firmware[:2] == '31':
+            if pd0_data.Cfg.wm[0] == 12:
+                ping_type = np.tile(['I'], pd0_data.Wt.vel_mps.shape[2])
+            elif pd0_data.Cfg.wm[0] == 13:
+                ping_type = np.tile(['C'], pd0_data.Wt.vel_mps.shape[2])
+            else:
+                ping_type = np.tile(['U'], pd0_data.Wt.vel_mps.shape[2])
+
+        # Rio Grande
+        elif firmware[:2] == '10':
+            if pd0_data.Cfg.wm[0] == 1 or pd0_data.Cfg.wm[0] == 12:
+                ping_type = np.tile(['I'], pd0_data.Wt.vel_mps.shape[2])
+            elif pd0_data.Cfg.wm[0] == 5 or pd0_data.Cfg.wm[0] == 8:
+                ping_type = np.tile(['C'], pd0_data.Wt.vel_mps.shape[2])
+            else:
+                ping_type = np.tile(['U'], pd0_data.Wt.vel_mps.shape[2])
+        else:
+            ping_type = np.tile(['U'], pd0_data.Wt.vel_mps.shape[2])
+        return ping_type
+
+    def sontek(self, rsdata, file_name):
+        """Reads Matlab file produced by RiverSurveyor Live and populates the transect instance variables.
+
+        Parameters
+        ----------
+        rsdata: MatSonTek
+            Object of Matlab data from SonTek Matlab files
+        file_name: str
+            Name of SonTek Matlab file not including path.
+        """
+
+        self.file_name = os.path.basename(file_name)
+
+        # ADCP instrument information
+        # ---------------------------
+        self.adcp = InstrumentData()
+        if hasattr(rsdata.System, 'InstrumentModel'):
+            self.adcp.populate_data(manufacturer='Nortek', raw_data=rsdata)
+        else:
+            self.adcp.populate_data(manufacturer='SonTek', raw_data=rsdata)
+
+        # Ensemble times
+        ensemble_delta_time = np.append([0], np.diff(rsdata.System.Time))
+        # TODO potentially add popup message when there are missing ensembles. Matlab did that.
+
+        # idx_missing = np.where(ensemble_delta_time > 1.5)
+        # if len(idx_missing[0]) > 0:
+        #     number_missing = np.sum(ensemble_delta_time[idx_missing]) - len(idx_missing)
+        #     error_str = self.file_name + ' is missing ' + str(number_missing) + ' samples'
+
+        start_serial_time = rsdata.System.Time[0] + ((30 * 365) + 7) * 24 * 60 * 60
+        end_serial_time = rsdata.System.Time[-1] + ((30 * 365) + 7) * 24 * 60 * 60
+        meas_date = datetime.strftime(datetime.fromtimestamp(start_serial_time), '%m/%d/%Y')
+        self.date_time = DateTime()
+        self.date_time.populate_data(date_in=meas_date,
+                                     start_in=start_serial_time,
+                                     end_in=end_serial_time,
+                                     ens_dur_in=ensemble_delta_time)
+
+        # Transect checked for use in discharge computations
+        self.checked = True
+
+        # Coordinate system
+        ref_coord = None
+
+        # The initial coordinate system must be set to earth for early versions of RiverSurveyor firmware.
+        # This implementation forces all versions to use the earth coordinate system.
+        if rsdata.Setup.coordinateSystem == 0:
+            # ref_coord = 'Beam'
+            raise CoordError('Beam Coordinates are not supported for all RiverSuveyor firmware releases, ' +
+                             'use Earth coordinates.')
+        elif rsdata.Setup.coordinateSystem == 1:
+            # ref_coord = 'Inst'
+            raise CoordError('Instrument Coordinates are not supported for all RiverSuveyor firmware releases, ' +
+                             'use Earth coordinates.')
+        elif rsdata.Setup.coordinateSystem == 2:
+            ref_coord = 'Earth'
+
+        # Speed of Sound Parameters
+        # -------------------------
+        # In SonTek's Matlab file the BT velocity, VB Depth, and WT Velocity are not reported as raw data but rather
+        # are reported as processed values based on manual settings of temperature, salinity, and speed of sound.
+        # Note: the 4 beam depths are raw data and are not adjusted.
+        # QRev expects raw data to be independent of user settings. Therefore, manual settings must be identified
+        # and the Matlab data adjusted to reflect the raw data before creating the data classes in QRev.
+        # The manual values will then be applied during processing.
+
+        self.sensors = Sensors()
+
+        # Temperature
+        if rsdata.System.Units.Temperature.find('C') >= 0:
+            temperature = rsdata.System.Temperature
+        else:
+            temperature = (5. / 9.) * (rsdata.System.Temperature - 32)
+        self.sensors.temperature_deg_c.internal = SensorData()
+        self.sensors.temperature_deg_c.internal.populate_data(data_in=temperature, source_in='internal')
+        self.sensors.temperature_deg_c.selected = 'internal'
+
+        if hasattr(rsdata.Setup, 'userTemperature'):
+            if rsdata.Setup.useMeasuredTemperature == 0:
+                if rsdata.Setup.Units.userTemperature.find('C') >= 0:
+                    temperature = rsdata.Setup.userTemperature
+                else:
+                    temperature = (5. / 9.) * (rsdata.Setup.userTemperature - 32)
+                self.sensors.temperature_deg_c.user = SensorData()
+                self.sensors.temperature_deg_c.user.populate_data(data_in=temperature, source_in='Manual')
+                self.sensors.temperature_deg_c.selected = 'user'
+
+        # Salinity
+        # Create internal salinity using a zero value since salinity can only be applied in RSL and not in the raw data
+        self.sensors.salinity_ppt.internal = SensorData()
+        self.sensors.salinity_ppt.internal.populate_data(data_in=0, source_in='QRev')
+        self.sensors.salinity_ppt.user = SensorData()
+        self.sensors.salinity_ppt.user.populate_data(data_in=rsdata.Setup.userSalinity, source_in='Manual')
+
+        # Set salinity source
+        if rsdata.Setup.userSalinity > 0:
+            self.sensors.salinity_ppt.selected = 'user'
+        else:
+            self.sensors.salinity_ppt.selected = 'internal'
+
+        # Speed of sound
+        # Internal sos provided in SonTek data but is computed from equation.
+        temperature = self.sensors.temperature_deg_c.internal.data
+        salinity = self.sensors.salinity_ppt.internal.data
+        speed_of_sound = Sensors.unesco_speed_of_sound(t=temperature, s=salinity)
+        self.sensors.speed_of_sound_mps.internal = SensorData()
+        self.sensors.speed_of_sound_mps.internal.populate_data(data_in=speed_of_sound, source_in='QRev')
+        self.sensors.speed_of_sound_mps.selected = 'internal'
+
+        if hasattr(rsdata.Setup, 'useFixedSoundSpeed'):
+            if rsdata.Setup.useFixedSoundSpeed > 0:
+                self.sensors.speed_of_sound_mps.user = SensorData()
+                user_sos = rsdata.Setup.fixedSoundSpeed
+                self.sensors.speed_of_sound_mps.user.populate_data(data_in=user_sos, source_in='Manual')
+                self.sensors.speed_of_sound_mps.selected = 'user'
+
+        # Speed of sound correction to obtain raw data
+        sos_correction = None
+        if self.sensors.speed_of_sound_mps.selected == 'user':
+            sos_correction = self.sensors.speed_of_sound_mps.internal.data / self.sensors.speed_of_sound_mps.user.data
+
+        elif self.sensors.salinity_ppt.selected == 'user' or self.sensors.temperature_deg_c.selected == 'user':
+            selected_temperature = getattr(self.sensors.temperature_deg_c, self.sensors.temperature_deg_c.selected)
+            temperature = selected_temperature.data
+            selected_salinity = getattr(self.sensors.salinity_ppt, self.sensors.salinity_ppt.selected)
+            salinity = selected_salinity.data
+            sos_user = Sensors.unesco_speed_of_sound(t=temperature, s=salinity)
+            sos_correction = self.sensors.speed_of_sound_mps.internal.data / sos_user
+
+
+        # Bottom Track
+        # ------------
+
+        # Convert frequency to kHz
+        if np.nanmean(rsdata.BottomTrack.BT_Frequency) > 10000:
+            freq = rsdata.BottomTrack.BT_Frequency / 1000
+        elif np.nanmean(rsdata.BottomTrack.BT_Frequency) < 100:
+            freq = rsdata.BottomTrack.BT_Frequency * 1000
+        else:
+            freq = rsdata.BottomTrack.BT_Frequency
+
+        # Create valid frequency time series
+        freq_ts = self.valid_frequencies(freq)
+
+        bt_vel = np.swapaxes(rsdata.BottomTrack.BT_Vel, 1, 0)
+
+        # Apply correction for manual sos parameters to obtain raw values
+        if sos_correction is not None:
+            bt_vel = np.around(bt_vel * sos_correction, 3)
+
+        self.boat_vel = BoatStructure()
+        self.boat_vel.add_boat_object(source='SonTek',
+                                      vel_in=bt_vel,
+                                      freq_in=freq_ts,
+                                      coord_sys_in=ref_coord,
+                                      nav_ref_in='BT')
+
+        # GPS Data
+        # --------
+        self.gps = GPSData()
+        if np.nansum(rsdata.GPS.GPS_Quality) > 0:
+
+            if len(rsdata.RawGPSData.GgaLatitude.shape) > 1:
+
+                self.gps.populate_data(raw_gga_utc=rsdata.RawGPSData.GgaUTC,
+                                       raw_gga_lat=rsdata.RawGPSData.GgaLatitude,
+                                       raw_gga_lon=rsdata.RawGPSData.GgaLongitude,
+                                       raw_gga_alt=rsdata.RawGPSData.GgaAltitude,
+                                       raw_gga_diff=rsdata.RawGPSData.GgaQuality,
+                                       raw_gga_hdop=np.swapaxes(np.tile(rsdata.GPS.HDOP,
+                                                                        (rsdata.RawGPSData.GgaLatitude.shape[1],
+                                                                         1)), 1, 0),
+                                       raw_gga_num_sats=np.swapaxes(np.tile(rsdata.GPS.Satellites,
+                                                                            (rsdata.RawGPSData.GgaLatitude.shape[1],
+                                                                             1)), 1, 0),
+                                       raw_gga_delta_time=None,
+                                       raw_vtg_course=rsdata.RawGPSData.VtgTmgTrue,
+                                       raw_vtg_speed=rsdata.RawGPSData.VtgSogMPS,
+                                       raw_vtg_delta_time=None,
+                                       raw_vtg_mode_indicator=rsdata.RawGPSData.VtgMode,
+                                       ext_gga_utc=rsdata.GPS.Utc,
+                                       ext_gga_lat=rsdata.GPS.Latitude,
+                                       ext_gga_lon=rsdata.GPS.Longitude,
+                                       ext_gga_alt=rsdata.GPS.Altitude,
+                                       ext_gga_diff=rsdata.GPS.GPS_Quality,
+                                       ext_gga_hdop=rsdata.GPS.HDOP,
+                                       ext_gga_num_sats=rsdata.GPS.Satellites,
+                                       ext_vtg_course=np.tile(np.nan, rsdata.GPS.Latitude.shape),
+                                       ext_vtg_speed=np.tile(np.nan, rsdata.GPS.Latitude.shape),
+                                       gga_p_method='End',
+                                       gga_v_method='End',
+                                       vtg_method='Average')
+            else:
+                # Nortek data
+                rows = rsdata.RawGPSData.GgaLatitude.shape[0]
+                self.gps.populate_data(raw_gga_utc=rsdata.GPS.Utc.reshape(rows, 1),
+                                       raw_gga_lat=rsdata.GPS.Latitude.reshape(rows, 1),
+                                       raw_gga_lon=rsdata.GPS.Longitude.reshape(rows, 1),
+                                       raw_gga_alt=rsdata.GPS.Altitude.reshape(rows, 1),
+                                       raw_gga_diff=rsdata.GPS.GPS_Quality.reshape(rows, 1),
+                                       raw_gga_hdop=rsdata.GPS.HDOP.reshape(rows, 1),
+                                       raw_gga_num_sats=rsdata.GPS.Satellites.reshape(rows, 1),
+                                       raw_gga_delta_time=None,
+                                       raw_vtg_course=rsdata.RawGPSData.VtgTmgTrue.reshape(rows, 1),
+                                       raw_vtg_speed=rsdata.RawGPSData.VtgSogMPS.reshape(rows, 1),
+                                       raw_vtg_delta_time=None,
+                                       raw_vtg_mode_indicator=rsdata.RawGPSData.VtgMode.reshape(rows, 1),
+                                       ext_gga_utc=rsdata.GPS.Utc,
+                                       ext_gga_lat=rsdata.GPS.Latitude,
+                                       ext_gga_lon=rsdata.GPS.Longitude,
+                                       ext_gga_alt=rsdata.GPS.Altitude,
+                                       ext_gga_diff=rsdata.GPS.GPS_Quality,
+                                       ext_gga_hdop=rsdata.GPS.HDOP,
+                                       ext_gga_num_sats=rsdata.GPS.Satellites,
+                                       ext_vtg_course=np.tile(np.nan, rsdata.GPS.Latitude.shape),
+                                       ext_vtg_speed=np.tile(np.nan, rsdata.GPS.Latitude.shape),
+                                       gga_p_method='End',
+                                       gga_v_method='End',
+                                       vtg_method='Average')
+
+            self.boat_vel.add_boat_object(source='SonTek',
+                                          vel_in=self.gps.gga_velocity_ens_mps,
+                                          freq_in=None,
+                                          coord_sys_in='Earth',
+                                          nav_ref_in='GGA')
+
+            self.boat_vel.add_boat_object(source='SonTek',
+                                          vel_in=self.gps.vtg_velocity_ens_mps,
+                                          freq_in=None,
+                                          coord_sys_in='Earth',
+                                          nav_ref_in='VTG')
+        ref = 'BT'
+        if rsdata.Setup.trackReference == 1:
+            ref = 'BT'
+        elif rsdata.Setup.trackReference == 2:
+            ref = 'GGA'
+        elif rsdata.Setup.trackReference == 3:
+            ref = 'VTG'
+        self.boat_vel.set_nav_reference(ref)
+
+        # Depth
+        # -----
+
+        # Initialize depth data structure
+        self.depths = DepthStructure()
+
+        # Determine array rows and cols
+        max_cells = rsdata.WaterTrack.Velocity.shape[0]
+        num_ens = rsdata.WaterTrack.Velocity.shape[2]
+
+        # Compute cell sizes and depths
+        cell_size = rsdata.System.Cell_Size.reshape(1, num_ens)
+        cell_size_all = np.tile(cell_size, (max_cells, 1))
+        top_of_cells = rsdata.System.Cell_Start.reshape(1, num_ens)
+        cell_depth = ((np.tile(np.arange(1, max_cells + 1, 1).reshape(max_cells, 1), (1, num_ens)) - 0.5)
+                      * cell_size_all) + np.tile(top_of_cells, (max_cells, 1))
+
+        # Adjust cell size and depth for user supplied temp, sal, or sos
+        if sos_correction is not None:
+            cell_size_all = np.around(cell_size_all * sos_correction, 6)
+            cell_depth = \
+                np.around(((cell_depth - rsdata.Setup.sensorDepth) * sos_correction) + rsdata.Setup.sensorDepth, 6)
+
+        # Prepare bottom track depth variable
+        depth = rsdata.BottomTrack.BT_Beam_Depth.T
+        depth[depth == 0] = np.nan
+
+        # Convert frequency to kHz
+        if np.nanmean(rsdata.BottomTrack.BT_Frequency) > 10000:
+            freq = rsdata.BottomTrack.BT_Frequency / 1000
+        else:
+            freq = rsdata.BottomTrack.BT_Frequency
+
+        # Create depth object for bottom track beams
+        self.depths.add_depth_object(depth_in=depth,
+                                     source_in='BT',
+                                     freq_in=freq_ts,
+                                     draft_in=rsdata.Setup.sensorDepth,
+                                     cell_depth_in=cell_depth,
+                                     cell_size_in=cell_size_all)
+
+        # Prepare vertical beam depth variable
+        depth_vb = np.tile(np.nan, (1, cell_depth.shape[1]))
+        depth_vb[0, :] = rsdata.BottomTrack.VB_Depth
+        depth_vb[depth_vb == 0] = np.nan
+
+        # Apply correction for manual sos parameters to obtain raw values
+        if sos_correction is not None:
+            depth_vb = np.around(((depth_vb - rsdata.Setup.sensorDepth) * sos_correction) + rsdata.Setup.sensorDepth, 5)
+
+        # Create depth object for vertical beam
+        self.depths.add_depth_object(depth_in=depth_vb,
+                                     source_in='VB',
+                                     freq_in=np.array([rsdata.Transformation_Matrices.Frequency[1]] * depth.shape[-1]),
+                                     draft_in=rsdata.Setup.sensorDepth,
+                                     cell_depth_in=cell_depth,
+                                     cell_size_in=cell_size_all)
+
+        # Set depth reference
+        if rsdata.Setup.depthReference < 0.5:
+            self.depths.selected = 'vb_depths'
+        else:
+            self.depths.selected = 'bt_depths'
+
+        # Water Velocity
+        # --------------
+
+        # Convert frequency to kHz
+        if np.nanmean(rsdata.WaterTrack.WT_Frequency) > 10000:
+            freq = rsdata.WaterTrack.WT_Frequency / 1000
+        else:
+            freq = rsdata.WaterTrack.WT_Frequency
+
+        # Create valid frequency time series
+        freq_ts = self.valid_frequencies(freq)
+
+        # Rearrange arrays for consistency with WaterData class
+        vel = np.swapaxes(rsdata.WaterTrack.Velocity, 1, 0)
+
+        # Apply correction for manual sos parameters to obtain raw values
+        if sos_correction is not None:
+            vel = np.around(vel * sos_correction, 3)
+        snr = np.swapaxes(rsdata.System.SNR, 1, 0)
+        if hasattr(rsdata.WaterTrack, 'Correlation'):
+            corr = np.swapaxes(rsdata.WaterTrack.Correlation, 1, 0)
+        else:
+            corr = np.array([])
+
+        # Correct SonTek difference velocity for error in earlier transformation matrices.
+        if abs(rsdata.Transformation_Matrices.Matrix[3, 0, 0]) < 0.5:
+            vel[3, :, :] = vel[3, :, :] * 2
+
+        # Apply TRDI scaling to SonTek difference velocity to convert to a TRDI compatible error velocity
+        vel[3, :, :] = vel[3, :, :] / ((2 ** 0.5) * np.tan(np.deg2rad(25)))
+
+        # Convert velocity reference from what was used in RiverSurveyor Live to None by adding the boat velocity
+        # to the reported water velocity
+        boat_vel = np.swapaxes(rsdata.Summary.Boat_Vel, 1, 0)
+        vel[0, :, :] = vel[0, :, :] + boat_vel[0, :]
+        vel[1, :, :] = vel[1, :, :] + boat_vel[1, :]
+
+        ref_water = 'None'
+
+        # Compute side lobe cutoff using Transmit Length information if availalbe, if not it is assumed to be equal
+        # to 1/2 depth_cell_size_m. The percent method is use for the side lobe cutoff computation.
+        sl_cutoff_percent = rsdata.Setup.extrapolation_dDiscardPercent
+        sl_cutoff_number = rsdata.Setup.extrapolation_nDiscardCells
+        if hasattr(rsdata.Summary, 'Transmit_Length'):
+            sl_lag_effect_m = (rsdata.Summary.Transmit_Length
+                               + self.depths.bt_depths.depth_cell_size_m[0, :]) / 2.0
+        else:
+            sl_lag_effect_m = np.copy(self.depths.bt_depths.depth_cell_size_m[0, :])
+        sl_cutoff_type = 'Percent'
+        cells_above_sl, sl_cutoff_m = TransectData.side_lobe_cutoff(depths=self.depths.bt_depths.depth_orig_m,
+                                                                    draft=self.depths.bt_depths.draft_orig_m,
+                                                                    cell_depth=self.depths.bt_depths.depth_cell_depth_m,
+                                                                    sl_lag_effect=sl_lag_effect_m,
+                                                                    slc_type=sl_cutoff_type,
+                                                                    value=1 - sl_cutoff_percent / 100)
+        # Determine water mode
+        if len(corr) > 0:
+            corr_nan = np.isnan(corr)
+            number_of_nan = np.count_nonzero(corr_nan)
+            if number_of_nan == 0:
+                wm = 'HD'
+            elif corr_nan.size == number_of_nan:
+                wm = 'IC'
+            else:
+                wm = 'Variable'
+        else:
+            wm = 'Unknown'
+
+        # Determine excluded distance (Similar to SonTek's screening distance)
+        excluded_distance = rsdata.Setup.screeningDistance - rsdata.Setup.sensorDepth
+        if excluded_distance < 0:
+            excluded_distance = 0
+
+        if hasattr(rsdata.WaterTrack, 'Vel_Expected_StdDev'):
+            # RS5
+            ping_type = self.sontek_ping_type(corr=corr, freq=rsdata.WaterTrack.WT_Frequency,
+                                              expected_std=rsdata.WaterTrack.Vel_Expected_StdDev)
+        else:
+            # M9 or S5
+            ping_type = self.sontek_ping_type(corr=corr, freq=rsdata.WaterTrack.WT_Frequency)
+
+        # Create water velocity object
+        self.w_vel = WaterData()
+        self.w_vel.populate_data(vel_in=vel,
+                                 freq_in=freq_ts,
+                                 coord_sys_in=ref_coord,
+                                 nav_ref_in=ref_water,
+                                 rssi_in=snr,
+                                 rssi_units_in='SNR',
+                                 excluded_dist_in=excluded_distance,
+                                 cells_above_sl_in=cells_above_sl,
+                                 sl_cutoff_per_in=sl_cutoff_percent,
+                                 sl_cutoff_num_in=sl_cutoff_number,
+                                 sl_cutoff_type_in=sl_cutoff_type,
+                                 sl_lag_effect_in=sl_lag_effect_m,
+                                 sl_cutoff_m=sl_cutoff_m,
+                                 wm_in=wm,
+                                 blank_in=excluded_distance,
+                                 corr_in=corr,
+                                 ping_type=ping_type)
+
+        # Edges
+        # -----
+        # Create edge object
+        self.edges = Edges()
+        self.edges.populate_data(rec_edge_method='Variable',
+                                 vel_method='VectorProf')
+
+        # Determine number of ensembles for each edge
+        if rsdata.Setup.startEdge > 0.1:
+            ensembles_right = np.nansum(rsdata.System.Step == 2)
+            ensembles_left = np.nansum(rsdata.System.Step == 4)
+            self.start_edge = 'Right'
+            self.orig_start_edge = 'Right'
+        else:
+            ensembles_right = np.nansum(rsdata.System.Step == 4)
+            ensembles_left = np.nansum(rsdata.System.Step == 2)
+            self.start_edge = 'Left'
+            self.orig_start_edge = 'Left'
+        self.in_transect_idx = np.where(rsdata.System.Step == 3)[0]
+
+        # Create left edge object
+        edge_type = None
+        if rsdata.Setup.Edges_0__Method == 2:
+            edge_type = 'Triangular'
+        elif rsdata.Setup.Edges_0__Method == 1:
+            edge_type = 'Rectangular'
+        elif rsdata.Setup.Edges_0__Method == 0:
+            edge_type = 'User Q'
+        if np.isnan(rsdata.Setup.Edges_0__EstimatedQ):
+            user_discharge = None
+        else:
+            user_discharge = rsdata.Setup.Edges_0__EstimatedQ
+        self.edges.left.populate_data(edge_type=edge_type,
+                                      distance=rsdata.Setup.Edges_0__DistanceToBank,
+                                      number_ensembles=ensembles_left,
+                                      coefficient=None,
+                                      user_discharge=user_discharge)
+
+        # Create right edge object
+        if rsdata.Setup.Edges_1__Method == 2:
+            edge_type = 'Triangular'
+        elif rsdata.Setup.Edges_1__Method == 1:
+            edge_type = 'Rectangular'
+        elif rsdata.Setup.Edges_1__Method == 0:
+            edge_type = 'User Q'
+        if np.isnan(rsdata.Setup.Edges_1__EstimatedQ):
+            user_discharge = None
+        else:
+            user_discharge = rsdata.Setup.Edges_1__EstimatedQ
+        self.edges.right.populate_data(edge_type=edge_type,
+                                       distance=rsdata.Setup.Edges_1__DistanceToBank,
+                                       number_ensembles=ensembles_right,
+                                       coefficient=None,
+                                       user_discharge=user_discharge)
+
+        # Extrapolation
+        # -------------
+        top = ''
+        bottom = ''
+
+        # Top extrapolation
+        if rsdata.Setup.extrapolation_Top_nFitType == 0:
+            top = 'Constant'
+        elif rsdata.Setup.extrapolation_Top_nFitType == 1:
+            top = 'Power'
+        elif rsdata.Setup.extrapolation_Top_nFitType == 2:
+            top = '3-Point'
+
+        # Bottom extrapolation
+        if rsdata.Setup.extrapolation_Bottom_nFitType == 0:
+            bottom = 'Constant'
+        elif rsdata.Setup.extrapolation_Bottom_nFitType == 1:
+            if rsdata.Setup.extrapolation_Bottom_nEntirePro > 1.1:
+                bottom = 'No Slip'
+            else:
+                bottom = 'Power'
+
+        # Create extrapolation object
+        self.extrap = ExtrapData()
+        self.extrap.populate_data(top=top,
+                                  bot=bottom,
+                                  exp=rsdata.Setup.extrapolation_Bottom_dExponent)
+
+        # Sensor data
+        # -----------
+
+        # Internal heading
+        self.sensors.heading_deg.internal = HeadingData()
+
+        # Check for firmware supporting G3 compass and associated data
+        if hasattr(rsdata, 'Compass'):
+            # TODO need to find older file that had 3 columns in Magnetic error to test and modify code
+            mag_error = rsdata.Compass.Magnetic_error
+            pitch_limit = np.array((rsdata.Compass.Maximum_Pitch, rsdata.Compass.Minimum_Pitch)).T
+            roll_limit = np.array((rsdata.Compass.Maximum_Roll, rsdata.Compass.Minimum_Roll)).T
+            if np.any(np.greater_equal(np.abs(pitch_limit), 90)) or np.any(np.greater_equal(np.abs(roll_limit), 90)):
+                pitch_limit = None
+                roll_limit = None
+        else:
+            mag_error = None
+            pitch_limit = None
+            roll_limit = None
+        self.sensors.heading_deg.internal.populate_data(data_in=rsdata.System.Heading,
+                                                        source_in='internal',
+                                                        magvar=rsdata.Setup.magneticDeclination,
+                                                        mag_error=mag_error,
+                                                        pitch_limit=pitch_limit,
+                                                        roll_limit=roll_limit)
+
+        # External heading
+        ext_heading = rsdata.System.GPS_Compass_Heading
+        if np.nansum(np.abs(np.diff(ext_heading))) > 0:
+            self.sensors.heading_deg.external = HeadingData()
+            self.sensors.heading_deg.external.populate_data(data_in=ext_heading,
+                                                            source_in='external',
+                                                            magvar=rsdata.Setup.magneticDeclination,
+                                                            align=rsdata.Setup.hdtHeadingCorrection)
+
+        # Set selected reference
+        if rsdata.Setup.headingSource > 1.1:
+            self.sensors.heading_deg.selected = 'external'
+        else:
+            self.sensors.heading_deg.selected = 'internal'
+
+        # Pitch and roll
+        pitch = None
+        roll = None
+        if hasattr(rsdata, 'Compass'):
+            pitch = rsdata.Compass.Pitch
+            roll = rsdata.Compass.Roll
+        elif hasattr(rsdata.System, 'Pitch'):
+            pitch = rsdata.System.Pitch
+            roll = rsdata.System.Roll
+        self.sensors.pitch_deg.internal = SensorData()
+        self.sensors.pitch_deg.internal.populate_data(data_in=pitch, source_in='internal')
+        self.sensors.pitch_deg.selected = 'internal'
+        self.sensors.roll_deg.internal = SensorData()
+        self.sensors.roll_deg.internal.populate_data(data_in=roll, source_in='internal')
+        self.sensors.roll_deg.selected = 'internal'
+
+        # Set composite depths as this is the only option in RiverSurveyor Live
+        self.depths.composite_depths(transect=self, setting="On")
+
+    @staticmethod
+    def sontek_ping_type(corr, freq, expected_std=None):
+        """Determines ping type based on the fact that HD has correlation but incoherent does not.
+
+        Parameters
+        ----------
+        corr: np.ndarray(int)
+            Water track correlation
+        freq:
+            Frequency of ping in Hz
+
+        Returns
+        -------
+        ping_type: np.array(int)
+            Ping_type for each ensemble, 3 - 1 MHz Incoherent, 4 - 1 MHz HD, 5 - 3 MHz Incoherent, 6 - 3 MHz HD
+        """
+        # Determine ping type
+
+        if expected_std is None:
+            # M9 or S5
+            if corr.size > 0:
+                corr_exists = np.nansum(np.nansum(corr, axis=1), axis=0)
+                coherent = corr_exists > 0
+            else:
+                coherent = np.tile([False], freq.size)
+            ping_type = []
+            for n in range(len(coherent)):
+                if coherent[n]:
+                    if freq[n] == 3000:
+                        ping_type.append('3C')
+                    else:
+                        ping_type.append('1C')
+                else:
+                    if freq[n] == 3000:
+                        ping_type.append('3I')
+                    else:
+                        ping_type.append('1I')
+            ping_type = np.array(ping_type)
+        else:
+            # RS5
+            ves = []
+            for n in range(4):
+                ves.append(np.nanmean(expected_std[:, n, :], axis=0))
+
+            ves = np.array(ves)
+
+            ves_avg = np.nanmean(ves, axis=0)
+
+            ping_type = np.tile(['PC/BB'], ves_avg.size)
+            ping_type[ves_avg < 0.01] = 'PC'
+            ping_type[ves_avg > 0.025] = 'BB'
+
+        return ping_type
+
+    @staticmethod
+    def qrev_mat_in(meas_struct):
+        """Processes the Matlab data structure to obtain a list of TransectData objects containing transect
+           data from the Matlab data structure.
+
+       Parameters
+       ----------
+       meas_struct: mat_struct
+           Matlab data structure obtained from sio.loadmat
+
+       Returns
+       -------
+       transects: list
+           List of TransectData objects
+       """
+
+        transects = []
+        if hasattr(meas_struct, 'transects'):
+            # If only one transect the data are not a list or array of transects
+            try:
+                if len(meas_struct.transects) > 0:
+                    for transect in meas_struct.transects:
+                        trans = TransectData()
+                        trans.populate_from_qrev_mat(transect)
+                        transects.append(trans)
+            except TypeError:
+                trans = TransectData()
+                trans.populate_from_qrev_mat(meas_struct.transects)
+                transects.append(trans)
+
+        return transects
+
+    def populate_from_qrev_mat(self, transect):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        transect: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+
+        self.adcp = InstrumentData()
+        self.adcp.populate_from_qrev_mat(transect)
+        self.file_name = os.path.basename(transect.fileName)
+        self.w_vel = WaterData()
+        self.w_vel.populate_from_qrev_mat(transect)
+        self.boat_vel = BoatStructure()
+        self.boat_vel.populate_from_qrev_mat(transect)
+        self.gps = GPSData()
+        self.gps.populate_from_qrev_mat(transect)
+        self.sensors = Sensors()
+        self.sensors.populate_from_qrev_mat(transect)
+        self.depths = DepthStructure()
+        self.depths.populate_from_qrev_mat(transect)
+        self.edges = Edges()
+        self.edges.populate_from_qrev_mat(transect)
+        self.extrap = ExtrapData()
+        self.extrap.populate_from_qrev_mat(transect)
+        self.start_edge = transect.startEdge
+        if hasattr(transect, 'orig_start_edge'):
+            self.orig_start_edge = transect.orig_start_edge
+        else:
+            self.orig_start_edge = transect.startEdge
+        self.date_time = DateTime()
+        self.date_time.populate_from_qrev_mat(transect)
+        self.checked = bool(transect.checked)
+        if type(transect.inTransectIdx) is int:
+            self.in_transect_idx = np.array([transect.inTransectIdx - 1])
+        else:
+            self.in_transect_idx = transect.inTransectIdx.astype(int) - 1
+
+    @staticmethod
+    def valid_frequencies(frequency_in):
+        """Create frequency time series for BT and WT with all valid frequencies.
+
+        Parameters
+        ----------
+        frequency_in: nd.array()
+            Frequency time series from raw data
+
+        Returns
+        -------
+        frequency_out: nd.array()
+            Frequency times series with np.nan filled with valid frequencies
+        """
+
+        # Initialize output
+        frequency_out = np.copy(frequency_in)
+
+        # Check for any invalid data
+        invalid_freq = np.isnan(frequency_in)
+        if np.any(invalid_freq):
+            # Identify the first valid frequency
+            valid = frequency_in[np.logical_not(invalid_freq)][0]
+            # Forward fill for invalid frequencies beyond first valid, backfill until 1st valid
+            for n in range(frequency_in.size):
+                if invalid_freq[n]:
+                    frequency_out[n] = valid
+                else:
+                    valid = frequency_in[n]
+
+        return frequency_out
+
+    @staticmethod
+    def compute_cell_data(pd0):
+
+        # Number of ensembles
+        num_ens = np.array(pd0.Wt.vel_mps).shape[-1]
+
+        # Retrieve and compute cell information
+        reg_cell_size = pd0.Cfg.ws_cm / 100
+        reg_cell_size[reg_cell_size == 0] = np.nan
+        dist_cell_1_m = pd0.Cfg.dist_bin1_cm / 100
+        num_reg_cells = pd0.Wt.vel_mps.shape[1]
+
+        # Surf data are to accommodate RiverRay and RiverPro.  pd0_read sets these
+        # values to nan when reading Rio Grande or StreamPro data
+        no_surf_cells = pd0.Surface.no_cells
+        no_surf_cells[np.isnan(no_surf_cells)] = 0
+        max_surf_cells = np.nanmax(no_surf_cells)
+        surf_cell_size = pd0.Surface.cell_size_cm / 100
+        surf_cell_dist = pd0.Surface.dist_bin1_cm / 100
+
+        # Compute maximum number of cells
+        max_cells = int(max_surf_cells + num_reg_cells)
+
+        # Combine cell size and cell range from transducer for both
+        # surface and regular cells
+        cell_depth = np.tile(np.nan, (max_cells, num_ens))
+        cell_size_all = np.tile(np.nan, (max_cells, num_ens))
+        for i in range(num_ens):
+            # Determine number of cells to be treated as regular cells
+            if np.nanmax(no_surf_cells) > 0:
+
+                num_reg_cells = max_cells - no_surf_cells[i]
+            else:
+                num_reg_cells = max_cells
+
+            # Compute cell depth
+            if no_surf_cells[i] > 1e-5:
+                cell_depth[:int(no_surf_cells[i]), i] = surf_cell_dist[i] + \
+                                                        np.arange(0, (no_surf_cells[i] - 1) * surf_cell_size[i] + 0.001,
+                                                                  surf_cell_size[i])
+                cell_depth[int(no_surf_cells[i]):, i] = cell_depth[int(no_surf_cells[i] - 1), i] \
+                                                        + (.5 * surf_cell_size[i] + 0.5 * reg_cell_size[i]) \
+                                                        + np.arange(0, (num_reg_cells - 1) * reg_cell_size[i] + 0.001,
+                                                                    reg_cell_size[i])
+                cell_size_all[0:int(no_surf_cells[i]), i] = np.repeat(surf_cell_size[i], int(no_surf_cells[i]))
+                cell_size_all[int(no_surf_cells[i]):, i] = np.repeat(reg_cell_size[i], int(num_reg_cells))
+            else:
+                cell_depth[:int(num_reg_cells), i] = dist_cell_1_m[i] + \
+                                                     np.linspace(0, int(num_reg_cells) - 1,
+                                                                 int(num_reg_cells)) * reg_cell_size[i]
+                cell_size_all[:, i] = np.repeat(reg_cell_size[i], num_reg_cells)
+
+        # Firmware is used to ID RiverRay data with variable modes and lags
+        firmware = str(pd0.Inst.firm_ver[0])
+
+        # Compute sl_lag_effect
+        lag = pd0.Cfg.lag_cm / 100
+        if firmware[0:2] == '44' or firmware[0:2] == '56':
+            lag_near_bottom = np.array(pd0.Cfg.lag_near_bottom)
+            lag_near_bottom[lag_near_bottom == np.nan] = 0
+            lag[lag_near_bottom != 0] = 0
+
+        pulse_len = pd0.Cfg.xmit_pulse_cm / 100
+        sl_lag_effect_m = (lag + pulse_len + reg_cell_size) / 2
+        sl_cutoff_per = (1 - (cosd(pd0.Inst.beam_ang[0]))) * 100
+
+        return cell_size_all, cell_depth, sl_cutoff_per, sl_lag_effect_m
+
+    def change_q_ensembles(self, proc_method):
+        """Sets in_transect_idx to all ensembles, except in the case of SonTek data
+        where RSL processing is applied.
+
+        Parameters
+        ----------
+        proc_method: str
+            Processing method (WR2, RSL, QRev)
+        """
+
+        if proc_method == 'RSL':
+            num_ens = self.boat_vel.bt_vel.u_processed_mps.shape[1]
+            # Determine number of ensembles for each edge
+            if self.start_edge == 'Right':
+                self.in_transect_idx = np.arange(self.edges.right.num_ens_2_avg,
+                                                 num_ens - self.edges.left.num_ens_2_avg)
+            else:
+                self.in_transect_idx = np.arange(self.edges.left.num_ens_2_avg,
+                                                 num_ens - self.edges.right.num_ens_2_avg)
+        else:
+            self.in_transect_idx = np.arange(0, self.boat_vel.bt_vel.u_processed_mps.shape[0])
+
+    def change_coord_sys(self, new_coord_sys):
+        """Changes the coordinate system of the water and boat data.
+
+        Current implementation only allows changes for original to higher order coordinate
+        systems: Beam - Inst - Ship - Earth.
+
+        Parameters
+        ----------
+        new_coord_sys: str
+            Name of new coordinate system (Beam, Int, Ship, Earth)
+        """
+        self.w_vel.change_coord_sys(new_coord_sys, self.sensors, self.adcp)
+        self.boat_vel.change_coord_sys(new_coord_sys, self.sensors, self.adcp)
+
+    def change_nav_reference(self, update, new_nav_ref):
+        """Method to set the navigation reference for the water data.
+
+        Parameters
+        ----------
+        update: bool
+            Setting to determine if water data should be updated.
+        new_nav_ref: str
+            New navigation reference (bt_vel, gga_vel, vtg_vel)
+        """
+
+        self.boat_vel.change_nav_reference(reference=new_nav_ref, transect=self)
+
+        if update:
+            self.update_water()
+
+    def change_mag_var(self, magvar):
+        """Change magnetic variation.
+
+        Parameters
+        ----------
+        magvar: float
+            Magnetic variation in degrees.
+        """
+
+        # Update object
+        if self.sensors.heading_deg.external is not None:
+            self.sensors.heading_deg.external.set_mag_var(magvar, 'external')
+
+        if self.sensors.heading_deg.selected == 'internal':
+            heading_selected = getattr(self.sensors.heading_deg, self.sensors.heading_deg.selected)
+            old_magvar = heading_selected.mag_var_deg
+            magvar_change = magvar - old_magvar
+            heading_selected.set_mag_var(magvar, 'internal')
+            self.boat_vel.bt_vel.change_heading(magvar_change)
+            self.w_vel.change_heading(self.boat_vel, magvar_change)
+        else:
+            self.sensors.heading_deg.internal.set_mag_var(magvar, 'internal')
+
+        # self.update_water()
+
+    def change_offset(self, h_offset):
+        """Change the heading offset (alignment correction). Only affects external heading.
+
+        Parameters
+        ----------
+        h_offset: float
+            Heading offset in degrees
+        """
+        self.sensors.heading_deg.internal.set_align_correction(h_offset, 'internal')
+
+        if self.sensors.heading_deg.selected == 'external':
+            old = getattr(self.sensors.heading_deg, self.sensors.heading_deg.selected)
+            old_offset = old.align_correction_deg
+            offset_change = h_offset - old_offset
+            self.boat_vel.bt_vel.change_heading(offset_change)
+            self.w_vel.change_heading(self.boat_vel, offset_change)
+
+        if self.sensors.heading_deg.external is not None:
+            self.sensors.heading_deg.external.set_align_correction(h_offset, 'external')
+
+        self.update_water()
+
+    def change_heading_source(self, h_source):
+        """Changes heading source (internal or external).
+
+        Parameters
+        ----------
+        h_source: str
+            Heading source (internal or external or user)
+        """
+
+        # If source is user, check to see if it was created, if not create it
+        if h_source == 'user':
+            if self.sensors.heading_deg.user is None:
+                self.sensors.heading_deg.user = HeadingData()
+                self.sensors.heading_deg.user.populate_data(data_in=np.zeros(
+                    self.boat_vel.bt_vel.u_processed_mps.shape),
+                    source_in='user',
+                    magvar=0,
+                    align=0)
+
+        # Get new heading object
+        new_heading_selection = getattr(self.sensors.heading_deg, h_source)
+
+        # Change source to that requested
+        if h_source is not None:
+            old_heading_selection = getattr(self.sensors.heading_deg, self.sensors.heading_deg.selected)
+            old_heading = old_heading_selection.data
+            new_heading = new_heading_selection.data
+            heading_change = new_heading - old_heading
+            self.sensors.heading_deg.set_selected(h_source)
+            self.boat_vel.bt_vel.change_heading(heading_change)
+            self.w_vel.change_heading(self.boat_vel, heading_change)
+
+        self.update_water()
+
+    def update_water(self):
+        """Method called from set_nav_reference, boat_interpolation and boat filters
+        to ensure that changes in boatvel are reflected in the water data"""
+
+        self.w_vel.set_nav_reference(self.boat_vel)
+
+        # Reapply water filters and interpolations
+        # Note wt_filters calls apply_filter which automatically calls
+        # apply_interpolation so both filters and interpolations
+        # are applied with this one call
+
+        self.w_vel.apply_filter(transect=self)
+        self.w_vel.apply_interpolation(transect=self)
+
+    @staticmethod
+    def side_lobe_cutoff(depths, draft, cell_depth, sl_lag_effect, slc_type='Percent', value=None):
+        """Computes side lobe cutoff.
+
+        The side lobe cutoff is based on the beam angle and is computed to
+        ensure that the bin and any lag beyond the actual bin cutoff is
+        above the side lobe cutoff.
+
+        Parameters
+        ----------
+        depths: np.array
+            Bottom track (all 4 beams) and vertical beam depths for each ensemble, in m.
+        draft: float
+            Draft of transducers, in m.
+        cell_depth: np.array
+            Depth to the centerline of each depth cell, in m.
+        sl_lag_effect: np.array
+            The extra depth below the last depth cell that must be above the side lobe cutoff, in m.
+        slc_type: str
+            Method used for side lobe cutoff computation.
+        value: float
+            Value used in specified method to use for side lobe cutoff computation.
+        """
+
+        # Compute minimum depths for each ensemble
+        with warnings.catch_warnings():
+            warnings.simplefilter("ignore", category=RuntimeWarning)
+            min_depths = np.nanmin(depths, 0)
+
+        # Compute range from transducer
+        range_from_xducer = min_depths - draft
+
+        # Adjust for transducer angle
+        coeff = None
+        if slc_type == 'Percent':
+            coeff = value
+        elif slc_type == 'Angle':
+            coeff = np.cos(np.deg2rad(value))
+
+        # Compute sidelobe cutoff to centerline
+        cutoff = np.array(range_from_xducer * coeff - sl_lag_effect + draft)
+
+        # Compute boolean side lobe cutoff matrix
+        cells_above_sl = nan_less(cell_depth, cutoff)
+        return cells_above_sl, cutoff
+
+    def boat_interpolations(self, update, target, method=None):
+        """Coordinates boat velocity interpolations.
+
+        Parameters
+        ----------
+        update: bool
+            Setting to control if water data are updated.
+        target: str
+            Boat velocity reference (BT or GPS)
+        method: str
+            Type of interpolation
+        """
+
+        # Interpolate bottom track data
+        if target == 'BT':
+            self.boat_vel.bt_vel.apply_interpolation(transect=self, interpolation_method=method)
+
+        if target == 'GPS':
+            # Interpolate GGA data
+            vel = getattr(self.boat_vel, 'gga_vel')
+            if vel is not None:
+                self.boat_vel.gga_vel.apply_interpolation(transect=self, interpolation_method=method)
+            # Interpolate VTG data
+            vel = getattr(self.boat_vel, 'vtg_vel')
+            if vel is not None:
+                self.boat_vel.vtg_vel.apply_interpolation(transect=self, interpolation_method=method)
+
+        # Apply composite tracks setting
+        self.composite_tracks(update=False)
+
+        # Update water to reflect changes in boat_vel
+        if update:
+            self.update_water()
+
+    def composite_tracks(self, update, setting=None):
+        """Coordinate application of composite tracks.
+
+        Parameters
+        ----------
+        update: bool
+            Setting to control if water data are updated.
+        setting: str
+            Sets composite tracks ("On" or "Off").
+        """
+
+        # Determine if setting is specified
+        if setting is None:
+            # Process transect using saved setting
+            self.boat_vel.composite_tracks(transect=self)
+        else:
+            # Process transect usin new setting
+            self.boat_vel.composite_tracks(transect=self, setting=setting)
+
+        # Update water data to reflect changes in boatvel
+        if update:
+            self.update_water()
+
+    def boat_filters(self, update, **kwargs):
+        """Coordinates application of boat filters to bottom track data
+
+        Parameters
+        ----------
+        update: bool
+            Setting to control if water data are updated.
+        **kwargs: dict
+            beam: int
+                Setting for beam filter (3, 4, -1)
+            difference: str
+                Setting for difference velocity filter (Auto, Manual, Off)
+            difference_threshold: float
+                Threshold for manual setting
+            vertical: str
+                Setting for vertical velocity filter (Auto, Manual, Off)
+            vertical_threshold: float
+                Threshold for manual setting
+            other: bool
+                Setting to other filter
+        """
+
+        # Apply filter to transect
+        self.boat_vel.bt_vel.apply_filter(self, **kwargs)
+
+        if self.boat_vel.selected == 'bt_vel' and update:
+            self.update_water()
+
+    def gps_filters(self, update, **kwargs):
+        """Coordinate filters for GPS based boat velocities
+
+        Parameters
+        ----------
+        update: bool
+            Setting to control if water data are updated.
+        **kwargs: dict
+            differential: str
+                Differential filter setting (1, 2, 4)
+            altitude: str
+                New setting for altitude filter (Off, Manual, Auto)
+            altitude_threshold: float
+                Threshold provide by user for manual altitude setting
+            hdop: str
+                Filter setting (On, off, Auto)
+            hdop_max_threshold: float
+                Maximum HDOP threshold
+            hdop_change_threshold: float
+                HDOP change threshold
+            other: bool
+                Other filter typically a smooth.
+        """
+
+        if self.boat_vel.gga_vel is not None:
+            self.boat_vel.gga_vel.apply_gps_filter(self, **kwargs)
+        if self.boat_vel.vtg_vel is not None:
+            self.boat_vel.vtg_vel.apply_gps_filter(self, **kwargs)
+
+        if (self.boat_vel.selected == 'VTG' or self.boat_vel.selected == 'GGA') and update == True:
+            self.update_water()
+
+    def set_depth_reference(self, update, setting):
+        """Coordinates setting the depth reference.
+
+        Parameters
+        ----------
+        update: bool
+            Determines if associated data should be updated
+        setting: str
+            Depth reference (bt_depths, vb_depths, ds_depths)
+        """
+
+        self.depths.selected = setting
+
+        if update:
+            self.process_depths(update)
+            self.w_vel.adjust_side_lobe(self)
+
+    def apply_averaging_method(self, setting):
+        """Method to apply the selected averaging method to the BT team depths to achieve a single
+        average depth.  It is only applicable to the multiple beams used for BT, not VB or DS.
+
+        Input:
+        setting: averaging method (IDW, Simple)
+        """
+
+        self.depths.bt_depths.compute_avg_bt_depth(setting)
+
+        self.process_depths(update=False)
+
+    def process_depths(self, update=False, filter_method=None, interpolation_method=None, composite_setting=None,
+                       avg_method=None, valid_method=None):
+        """Method applies filter, composite, and interpolation settings to  depth objects
+        so that all are updated using the same filter and interpolation settings.
+
+        Parameters
+        ----------
+        update: bool
+            Determines if water data should be updated.
+        filter_method: str
+            Filter method to be used (None, Smooth, TRDI).
+        interpolation_method: str
+            Interpolation method to be used (None, HoldLast, Smooth, Linear).
+        composite_setting: str
+            Specifies use of composite depths ("On" or "Off").
+        avg_method: str
+            Defines averaging method: "Simple", "IDW", only applicable to bottom track.
+        valid_method:
+            Defines method to determine if depth is valid (QRev or TRDI).
+        """
+
+        # Get current settings
+        depth_data = getattr(self.depths, self.depths.selected)
+        if filter_method is None:
+            filter_method = depth_data.filter_type
+
+        if interpolation_method is None:
+            interpolation_method = depth_data.interp_type
+
+        if composite_setting is None:
+            composite_setting = self.depths.composite
+
+        if avg_method is None:
+            avg_method = self.depths.bt_depths.avg_method
+
+        if valid_method is None:
+            valid_method = self.depths.bt_depths.valid_data_method
+
+        self.depths.bt_depths.valid_data_method = valid_method
+        self.depths.bt_depths.avg_method = avg_method
+        self.depths.depth_filter(transect=self, filter_method=filter_method)
+        self.depths.depth_interpolation(transect=self, method=interpolation_method)
+        self.depths.composite_depths(transect=self, setting=composite_setting)
+        self.w_vel.adjust_side_lobe(transect=self)
+
+        if update:
+            self.update_water()
+
+    def change_draft(self, draft_in):
+        """Changes the draft for the specified transects and selected depth.
+
+        Parameters
+        ----------
+        draft_in: float
+            New draft value in m
+        """
+
+        if self.depths.vb_depths is not None:
+            self.depths.vb_depths.change_draft(draft_in)
+        if self.depths.bt_depths is not None:
+            self.depths.bt_depths.change_draft(draft_in)
+
+    def change_sos(self, parameter=None, salinity=None, temperature=None, selected=None, speed=None):
+        """Coordinates changing the speed of sound.
+
+        Parameters
+        ----------
+        parameter: str
+            Speed of sound parameter to be changed ('temperatureSrc', 'temperature', 'salinity', 'sosSrc')
+        salinity: float
+            Salinity in ppt
+        temperature: float
+            Temperature in deg C
+        selected: str
+            Selected speed of sound ('internal', 'computed', 'user') or temperature ('internal', 'user')
+        speed: float
+            Manually supplied speed of sound for 'user' source
+        """
+
+        if parameter == 'temperatureSrc':
+
+            temperature_internal = getattr(self.sensors.temperature_deg_c, 'internal')
+            if selected == 'user':
+                if self.sensors.temperature_deg_c.user is None:
+                    self.sensors.temperature_deg_c.user = SensorData()
+                ens_temperature = np.tile(temperature, temperature_internal.data.shape)
+
+                self.sensors.temperature_deg_c.user.change_data(data_in=ens_temperature)
+                self.sensors.temperature_deg_c.user.set_source(source_in='Manual Input')
+
+            # Set the temperature data to the selected source
+            self.sensors.temperature_deg_c.set_selected(selected_name=selected)
+            # Update the speed of sound
+            self.update_sos()
+
+        elif parameter == 'temperature':
+            adcp_temp = self.sensors.temperature_deg_c.internal.data
+            new_user_temperature = np.tile(temperature, adcp_temp.shape)
+            self.sensors.temperature_deg_c.user.change_data(data_in=new_user_temperature)
+            self.sensors.temperature_deg_c.user.set_source(source_in='Manual Input')
+            # Set the temperature data to the selected source
+            self.sensors.temperature_deg_c.set_selected(selected_name='user')
+            # Update the speed of sound
+            self.update_sos()
+
+        elif parameter == 'salinity':
+            if salinity is not None:
+                self.sensors.salinity_ppt.user.change_data(data_in=salinity)
+                if type(self.sensors.salinity_ppt.internal.data) is float:
+                    salinity_internal = self.sensors.salinity_ppt.internal.data
+                else:
+                    salinity_internal = self.sensors.salinity_ppt.internal.data
+                if np.all(np.equal(self.sensors.salinity_ppt.user.data, salinity_internal)):
+                    self.sensors.salinity_ppt.set_selected(selected_name='internal')
+                else:
+                    self.sensors.salinity_ppt.set_selected(selected_name='user')
+                self.update_sos()
+
+        elif parameter == 'sosSrc':
+            if selected == 'internal':
+                self.update_sos()
+            elif selected == 'user':
+                self.update_sos(speed=speed, selected='user', source='Manual Input')
+
+    def update_sos(self, selected=None, source=None, speed=None):
+        """Sets a new specified speed of sound.
+
+        Parameters
+        ----------
+        self: obj
+            Object of TransectData
+        selected: str
+             Selected speed of sound ('internal', 'computed', 'user')
+        source: str
+            Source of speed of sound (Computer, Calculated)
+        speed: float
+            Manually supplied speed of sound for 'user' source
+        """
+
+        # Get current speed of sound
+        sos_selected = getattr(self.sensors.speed_of_sound_mps, self.sensors.speed_of_sound_mps.selected)
+        old_sos = sos_selected.data
+        new_sos = None
+
+        # Manual input for speed of sound
+        if selected == 'user' and source == 'Manual Input':
+            self.sensors.speed_of_sound_mps.set_selected(selected_name=selected)
+            self.sensors.speed_of_sound_mps.user = SensorData()
+            self.sensors.speed_of_sound_mps.user.populate_data(speed, source)
+
+        # If called with no input set source to internal and determine whether computed or calculated based on
+        # availability of user supplied temperature or salinity
+        elif selected is None and source is None:
+            self.sensors.speed_of_sound_mps.set_selected('internal')
+            # If temperature or salinity is set by the user the speed of sound is computed otherwise it is consider
+            # calculated by the ADCP.
+            if (self.sensors.temperature_deg_c.selected == 'user') or (self.sensors.salinity_ppt.selected == 'user'):
+                self.sensors.speed_of_sound_mps.internal.set_source('Computed')
+            else:
+                self.sensors.speed_of_sound_mps.internal.set_source('Calculated')
+
+        # Determine new speed of sound
+        if self.sensors.speed_of_sound_mps.selected == 'internal':
+
+            if self.sensors.speed_of_sound_mps.internal.source == 'Calculated':
+                # Internal: Calculated
+                new_sos = self.sensors.speed_of_sound_mps.internal.data_orig
+                self.sensors.speed_of_sound_mps.internal.change_data(data_in=new_sos)
+                # Change temperature and salinity selected to internal
+                self.sensors.temperature_deg_c.set_selected(selected_name='internal')
+                self.sensors.salinity_ppt.set_selected(selected_name='internal')
+            else:
+                # Internal: Computed
+                temperature_selected = getattr(self.sensors.temperature_deg_c, self.sensors.temperature_deg_c.selected)
+                temperature = temperature_selected.data
+                salinity_selected = getattr(self.sensors.salinity_ppt, self.sensors.salinity_ppt.selected)
+                salinity = salinity_selected.data
+                new_sos = Sensors.speed_of_sound(temperature=temperature, salinity=salinity)
+                self.sensors.speed_of_sound_mps.internal.change_data(data_in=new_sos)
+        else:
+            if speed is not None:
+                new_sos = np.tile(speed, len(self.sensors.speed_of_sound_mps.internal.data_orig))
+                self.sensors.speed_of_sound_mps.user.change_data(data_in=new_sos)
+
+        self.apply_sos_change(old_sos=old_sos, new_sos=new_sos)
+
+    def apply_sos_change(self, old_sos, new_sos):
+        """Computes the ratio and calls methods in WaterData and BoatData to apply change.
+
+        Parameters
+        ----------
+        old_sos: float
+            Speed of sound on which the current data are based, in m/s
+        new_sos: float
+            Speed of sound on which the data need to be based, in m/s
+        """
+
+        ratio = new_sos / old_sos
+
+        # RiverRay horizontal velocities are not affected by changes in speed of sound
+        if self.adcp.model != 'RiverRay':
+            # Apply speed of sound change to water and boat data
+            self.w_vel.sos_correction(ratio=ratio)
+            self.boat_vel.bt_vel.sos_correction(ratio=ratio)
+        # Correct depths
+        self.depths.sos_correction(ratio=ratio)
+
+    @staticmethod
+    def raw_valid_data(transect):
+        """Determines ensembles and cells with no interpolated water or boat data.
+
+        For valid water track cells both non-interpolated valid water data and
+        boat velocity data must be available. Interpolated depths are allowed.
+
+        For valid ensembles water, boat, and depth data must all be non-interpolated.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+
+        Returns
+        -------
+        raw_valid_ens: np.array(bool)
+            Boolean array identifying raw valid ensembles.
+        raw_valid_depth_cells: np.array(bool)
+            Boolean array identifying raw valid depth cells.
+        """
+
+        in_transect_idx = transect.in_transect_idx
+
+        # Determine valid water track ensembles based on water track and navigation data.
+        boat_vel_select = getattr(transect.boat_vel, transect.boat_vel.selected)
+        if boat_vel_select is not None and np.nansum(np.logical_not(np.isnan(boat_vel_select.u_processed_mps))) > 0:
+            valid_nav = boat_vel_select.valid_data[0, in_transect_idx]
+        else:
+            valid_nav = np.tile(False, in_transect_idx.shape[0])
+
+        valid_wt = np.copy(transect.w_vel.valid_data[0, :, in_transect_idx])
+        valid_wt_ens = np.any(valid_wt, 1)
+
+        # Determine valid depths
+        depths_select = getattr(transect.depths, transect.depths.selected)
+        if transect.depths.composite:
+            valid_depth = np.tile(True, (depths_select.depth_source_ens.shape[0]))
+            idx_na = np.where(depths_select.depth_source_ens[in_transect_idx] == 'NA')[0]
+            if len(idx_na) > 0:
+                valid_depth[idx_na] = False
+            interpolated_depth_idx = np.where(depths_select.depth_source_ens[in_transect_idx] == 'IN')[0]
+            if len(interpolated_depth_idx) > 0:
+                valid_depth[interpolated_depth_idx] = False
+        else:
+            valid_depth = depths_select.valid_data[in_transect_idx]
+            idx = np.where(np.isnan(depths_select.depth_processed_m[in_transect_idx]))[0]
+            if len(idx) > 0:
+                valid_depth[idx] = False
+
+        # Determine valid ensembles based on all data
+        valid_ens = np.all(np.vstack((valid_nav, valid_wt_ens, valid_depth)), 0)
+
+        return valid_ens, valid_wt.T
+
+    @staticmethod
+    def compute_gps_lag(transect):
+        """Computes the lag between bottom track and GGA and/or VTG using an autocorrelation method.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+
+        Returns
+        -------
+        lag_gga: float
+            Lag in seconds betweeen bottom track and gga
+        lag_vtg: float
+            Lag in seconds between bottom track and vtg
+        """
+
+        # Intialize returns
+        lag_gga = None
+        lag_vtg = None
+
+        bt_speed = np.sqrt(transect.boat_vel.bt_vel.u_processed_mps ** 2
+                           + transect.boat_vel.bt_vel.v_processed_mps ** 2)
+
+        avg_ens_dur = np.nanmean(transect.date_time.ens_duration_sec)
+
+        # Compute lag for gga, if available
+        if transect.boat_vel.gga_vel is not None:
+            gga_speed = np.sqrt(transect.boat_vel.gga_vel.u_processed_mps ** 2
+                                + transect.boat_vel.gga_vel.v_processed_mps ** 2)
+
+            # Compute lag if both bottom track and gga have valid data
+            valid_data = np.all(np.logical_not(np.isnan(np.vstack((bt_speed, gga_speed)))), axis=0)
+            if np.sometrue(valid_data):
+                # Compute lag
+                lag_gga = (np.count_nonzero(valid_data)
+                           - np.argmax(signal.correlate(bt_speed[valid_data], gga_speed[valid_data])) - 1) * avg_ens_dur
+            else:
+                lag_gga = None
+
+        # Compute lag for vtg, if available
+        if transect.boat_vel.vtg_vel is not None:
+            vtg_speed = np.sqrt(transect.boat_vel.vtg_vel.u_processed_mps ** 2
+                                + transect.boat_vel.vtg_vel.v_processed_mps ** 2)
+
+            # Compute lag if both bottom track and gga have valid data
+            valid_data = np.all(np.logical_not(np.isnan(np.vstack((bt_speed, vtg_speed)))), axis=0)
+            if np.sometrue(valid_data):
+                # Compute lag
+                lag_vtg = (np.count_nonzero(valid_data)
+                           - np.argmax(signal.correlate(bt_speed[valid_data], vtg_speed[valid_data])) - 1) * avg_ens_dur
+            else:
+                lag_vtg = None
+
+        return lag_gga, lag_vtg
+
+    @staticmethod
+    def compute_gps_lag_fft(transect):
+        """Computes the lag between bottom track and GGA and/or VTG using fft method.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+
+        Returns
+        -------
+        lag_gga: float
+            Lag in seconds betweeen bottom track and gga
+        lag_vtg: float
+            Lag in seconds between bottom track and vtg
+        """
+        lag_gga = None
+        lag_vtg = None
+
+        bt_speed = np.sqrt(transect.boat_vel.bt_vel.u_processed_mps ** 2
+                           + transect.boat_vel.bt_vel.v_processed_mps ** 2)
+
+        avg_ens_dur = np.nanmean(transect.date_time.ens_duration_sec)
+        if transect.boat_vel.gga_vel is not None:
+            gga_speed = np.sqrt(transect.boat_vel.gga_vel.u_processed_mps ** 2
+                                + transect.boat_vel.gga_vel.v_processed_mps ** 2)
+            valid_data = np.all(np.logical_not(np.isnan(np.vstack((bt_speed, gga_speed)))), axis=0)
+            b = fftpack.fft(bt_speed[valid_data])
+            g = fftpack.fft(gga_speed[valid_data])
+            br = -b.conjugat()
+            lag_gga = np.argmax(np.abs(fftpack.ifft(br * g)))
+
+        if transect.boat_vel.vtg_vel is not None:
+            vtg_speed = np.sqrt(transect.boat_vel.vtg_vel.u_processed_mps ** 2
+                                + transect.boat_vel.vtg_vel.v_processed_mps ** 2)
+            valid_data = np.all(np.logical_not(np.isnan(np.vstack((bt_speed, vtg_speed)))), axis=0)
+            b = fftpack.fft(bt_speed[valid_data])
+            g = fftpack.fft(vtg_speed[valid_data])
+            br = -b.conjugat()
+            lag_vtg = np.argmax(np.abs(fftpack.ifft(br * g)))
+
+        return lag_gga, lag_vtg
+
+    @staticmethod
+    def compute_gps_bt(transect, gps_ref='gga_vel'):
+        """Computes properties describing the difference between bottom track and the specified GPS reference.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        gps_ref: str
+            GPS referenced to be used in computation (gga_vel or vtg_vel)
+
+        Returns
+        -------
+        gps_bt: dict
+            course: float
+                Difference in course computed from gps and bt, in degrees
+            ratio: float
+                Ratio of final distance made good for bt and gps (bt dmg / gps dmg)
+            dir: float
+                Direction of vector from end of GPS track to end of bottom track
+            mag: float
+                Length of vector from end of GPS track to end of bottom track
+        """
+
+        gps_bt = dict()
+        gps_vel = getattr(transect.boat_vel, gps_ref)
+        if gps_vel is not None and \
+                1 < np.sum(np.logical_not(np.isnan(gps_vel.u_processed_mps))) and \
+                1 < np.sum(np.logical_not(np.isnan(transect.boat_vel.bt_vel.u_processed_mps))):
+            # Data prep
+            bt_track = BoatStructure.compute_boat_track(transect, ref='bt_vel')
+
+            try:
+                bt_course, _ = cart2pol(bt_track['track_x_m'][-1], bt_track['track_y_m'][-1])
+                bt_course = rad2azdeg(bt_course)
+            except TypeError:
+                bt_course = np.nan
+
+            gps_track = BoatStructure.compute_boat_track(transect, ref=gps_ref)
+            gps_course, _ = cart2pol(gps_track['track_x_m'][-1], gps_track['track_y_m'][-1])
+            gps_course = rad2azdeg(gps_course)
+
+            # Compute course
+            gps_bt['course'] = gps_course - bt_course
+            if gps_bt['course'] < 0:
+                gps_bt['course'] = gps_bt['course'] + 360
+
+            # Compute ratio
+            try:
+                gps_bt['ratio'] = bt_track['dmg_m'][-1] / gps_track['dmg_m'][-1]
+            except TypeError:
+                gps_bt['ratio'] = np.nan
+
+            # Compute closure vector
+            try:
+                x_diff = bt_track['track_x_m'][-1] - gps_track['track_x_m'][-1]
+            except TypeError:
+                x_diff = np.nan
+
+            try:
+                y_diff = bt_track['track_y_m'][-1] - gps_track['track_y_m'][-1]
+            except TypeError:
+                y_diff = np.nan
+
+            try:
+                gps_bt['dir'], gps_bt['mag'] = cart2pol(x_diff, y_diff)
+                gps_bt['dir'] = rad2azdeg(gps_bt['dir'])
+            except TypeError:
+                gps_bt['dir'] = np.nan
+                gps_bt['mag'] = np.nan
+
+        return gps_bt
+
+
+# ========================================================================
+# Begin multithread function included in module but not TransectData class
+# Currently this is coded only for TRDI data
+# ========================================================================
+
+
+# DSM changed 1/23/2018 def allocate_transects(source, mmt, kargs)
+# TODO This needs a complete rewrite from what Greg did. However it works with no multi-threading for now
+
+# def add_transect(mmt, filename, index, type):
+#     pd0_data = Pd0TRDI(filename)
+#
+#     if type == 'MB':
+#         mmt_transect = mmt.mbt_transects[index]
+#     else:
+#         mmt_transect = mmt.transects[index]
+#
+#     transect = TransectData()
+#     transect.trdi(mmt=mmt,
+#                   mmt_transect=mmt_transect,
+#                   pd0_data=pd0_data)
+#     return transect
+#
+#
+# def allocate_transects(mmt, transect_type='Q', checked=False):
+#     """Method to load transect data. Changed from Matlab approach by Greg to allow possibility
+#     of multi-thread approach.
+#
+#     Parameters
+#     ----------
+#     mmt: MMT_TRDI
+#         Object of MMT_TRDI
+#     transect_type: str
+#         Type of transect (Q: discharge or MB: moving-bed test)
+#     checked: bool
+#         Determines if all files are loaded (False) or only checked files (True)
+#     """
+#
+#     # DEBUG, set threaded to false to get manual serial commands
+#     multi_threaded = False
+#
+#     file_names = []
+#     file_idx = []
+#
+#     # Setup processing for discharge or moving-bed transects
+#     if transect_type == 'Q':
+#         # Identify discharge transect files to load
+#         if checked:
+#             for idx, transect in enumerate(mmt.transects):
+#                 if transect.Checked == 1:
+#                     file_names.append(transect.Files[0])
+#                     file_idx.append(idx)
+#             # file_names = [transect.Files[0] for transect in mmt.transects if transect.Checked == 1]
+#         else:
+#             file_names = [transect.Files[0] for transect in mmt.transects]
+#             file_idx = list(range(0, len(file_names)))
+#     elif transect_type == 'MB':
+#         file_names = [transect.Files[0] for transect in mmt.mbt_transects]
+#         file_idx = list(range(0, len(file_names)))
+#
+#     # Determine if any files are missing
+#     valid_files = []
+#     valid_indices = []
+#     for index, name in enumerate(file_names):
+#         fullname = os.path.join(mmt.path, name)
+#         if os.path.exists(fullname):
+#             valid_files.append(fullname)
+#             valid_indices.append(file_idx[index])
+#
+#
+#     start = time.perf_counter()
+#     transects = []
+#     num = len(valid_indices)
+#     # num = 1
+#     multi_process = True
+#     if multi_process:
+#         with concurrent.futures.ProcessPoolExecutor() as executor:
+#             results = [executor.submit(add_transect, mmt, valid_files[k], valid_indices[k], transect_type) for k in range(num)]
+#
+#         for f in concurrent.futures.as_completed(results):
+#             transects.append(f.result())
+#     else:
+#         for k in range(num):
+#             transects.append(add_transect(mmt, valid_files[k], valid_indices[k], transect_type))
+#
+#     # # Multi-thread for Pd0 files
+#     # # -------------------------
+#     # # Seems like this section belongs in Pd0TRDI.py
+#     # # Initialize thread variables
+#     # pd0_data = []
+#     # pd0_threads = []
+#     # thread_id = 0
+#     #
+#     # # DSM 1/24/2018 could this be moved to Pd0TRDI.py as a method
+#     # def add_pd0(file_name):
+#     #     pd0_data.append(Pd0TRDI(file_name))
+#     #
+#     # if multi_threaded:
+#     #     # TODO this belongs in the pd0 class
+#     #     for file in valid_files:
+#     #         pd0_thread = MultiThread(thread_id=thread_id, function=add_pd0, args={'file_name': file})
+#     #         thread_id += 1
+#     #         pd0_thread.start()
+#     #         pd0_threads.append(pd0_thread)
+#     # else:
+#     #     for file in valid_files:
+#     #         pd0_data.append(Pd0TRDI(file))
+#     #
+#     # for thrd in pd0_threads:
+#     #     thrd.join()
+#     #
+#     # # Multi-thread for transect data
+#     #
+#     # # Initialize thread variables
+#     # processed_transects = []
+#     # transect_threads = []
+#     # thread_id = 0
+#     #
+#     # # DSM 1/24/2018 couldn't this be added to the TransectData class
+#     # def add_transect(transect_data, mmt_transect, mt_pd0_data, mt_mmt):
+#     #     transect_data.trdi(mmt=mt_mmt,
+#     #                        mmt_transect=mmt_transect,
+#     #                        pd0_data=mt_pd0_data)
+#     #     processed_transects.append(transect_data)
+#     #
+#     # # Process each transect
+#     # for k in range(len(pd0_data)):
+#     #     transect = TransectData()
+#     #     if pd0_data[k].Wt is not None:
+#     #         if transect_type == 'MB':
+#     #             # Process moving-bed transect
+#     #             if multi_threaded:
+#     #                 t_thread = MultiThread(thread_id=thread_id,
+#     #                                        function=add_transect,
+#     #                                        args={'transect': transect,
+#     #                                              'mmt_transect': mmt.mbt_transects[valid_indices[k]],
+#     #                                              'mt_pd0_data': pd0_data[k],
+#     #                                              'mt_mmt': mmt})
+#     #                 t_thread.start()
+#     #                 transect_threads.append(t_thread)
+#     #
+#     #             else:
+#     #                 transect = TransectData()
+#     #                 add_transect(transect_data=transect,
+#     #                              mmt_transect=mmt.mbt_transects[valid_indices[k]],
+#     #                              mt_pd0_data=pd0_data[k],
+#     #                              mt_mmt=mmt)
+#     #
+#     #         else:
+#     #             # Process discharge transects
+#     #             if multi_threaded:
+#     #                 t_thread = MultiThread(thread_id=thread_id,
+#     #                                        function=add_transect,
+#     #                                        args={'transect': transect,
+#     #                                              'mmt_transect': mmt.transects[valid_indices[k]],
+#     #                                              'mt_pd0_data': pd0_data[k],
+#     #                                              'mt_mmt': mmt})
+#     #                 t_thread.start()
+#     #                 transect_threads.append(t_thread)
+#     #
+#     #             else:
+#     #                 add_transect(transect_data=transect,
+#     #                              mmt_transect=mmt.transects[valid_indices[k]],
+#     #                              mt_pd0_data=pd0_data[k],
+#     #                              mt_mmt=mmt)
+#     #
+#     # if multi_threaded:
+#     #     for x in transect_threads:
+#     #         x.join()
+#     finish = time.perf_counter()
+#     print(f'Finished in {finish - start}')
+#     return processed_transects
+
+
+def adjusted_ensemble_duration(transect, trans_type=None):
+    """Applies the TRDI method of expanding the ensemble time when data are invalid.
+
+    Parameters
+    ----------
+    transect: TransectData
+        Object of TransectData
+    trans_type: str
+        Transect type. If mbt then bottom track is used.
+
+    Returns
+    -------
+    delta_t: np.array(float)
+        Array of delta time in seconds for each ensemble.
+    """
+
+    if transect.adcp.manufacturer == 'TRDI':
+        if trans_type is None:
+            # Determine valid data from water track
+            valid = np.isnan(transect.w_vel.u_processed_mps) == False
+            valid_sum = np.sum(valid)
+        else:
+            # Determine valid data from bottom track
+            valid_sum = np.isnan(transect.boat_vel.bt_vel.u_processed_mps) == False
+
+        valid_ens = valid_sum > 0
+        n_ens = len(valid_ens)
+        ens_dur = transect.date_time.ens_duration_sec
+        delta_t = np.tile([np.nan], n_ens)
+        cum_dur = 0
+        for j in range(n_ens):
+            cum_dur = np.nansum(np.hstack([cum_dur, ens_dur[j]]))
+            if valid_ens[j]:
+                delta_t[j] = cum_dur
+                cum_dur = 0
+    else:
+        delta_t = transect.date_time.ens_duration_sec
+
+    return delta_t
diff --git a/Classes/TransformationMatrix.py b/Classes/TransformationMatrix.py
new file mode 100644
index 0000000..4bd3986
--- /dev/null
+++ b/Classes/TransformationMatrix.py
@@ -0,0 +1,206 @@
+import numpy as np
+
+
+class TransformationMatrix(object):
+    """Determines the transformation matrix and source for the specified ADCP model from the data provided.
+
+    Attributes
+    ----------
+    source: str
+        Source of transformation matrix, either Nominal or ADCP
+    matrix: np.array
+        One or more 4x4 transformation matrices.
+     """
+
+    def __init__(self):
+        """Constructor initializes variable to None"""
+        self.source = None
+        self.matrix = None
+        
+    def populate_data(self, manufacturer, model=None, data_in=None):
+        """Uses the manufacturer and model to determine how to parse the transformation matrix.
+
+        Parameters
+        ----------
+        manufacturer: str
+            Name of manufacturer (TRDI, SonTek)
+        model: str
+            Model of ADCP
+        data_in:
+            System test data or 'Nominal'
+        """
+        
+        if manufacturer == 'TRDI':
+            self.trdi(model, data_in)
+        elif manufacturer == 'SonTek':
+            self.sontek(data_in)
+
+    def trdi(self, model=None, data_in=None):
+        """Processes the data to store the transformation matrix for TRDI ADCPs.
+        If no transformation matrix information is available a nominal transformation
+        matrix for that model is assumed.
+
+        Parameters
+        ----------
+        model: str
+            Model of ADCP
+        data_in:
+            System test data or 'Nominal'
+        """
+
+        adcp_model = model
+        # Set nominal matrix based on model
+        self.matrix = [[1.4619, -1.4619, 0, 0],
+                       [0, 0, -1.4619, 1.4619],
+                       [0.2661, 0.2661, 0.2661, 0.2661],
+                       [1.0337, 1.0337, -1.0337, -1.0337]]
+
+        if adcp_model == 'RiverRay':
+            self.matrix = [[1, -1, 0, 0],
+                           [0, 0, -1, 1],
+                           [0.2887, 0.2887, 0.2887, 0.2887],
+                           [0.7071, 0.7071, -0.7071, -0.7071]]
+
+        # Overwrite nominal transformation matrix with custom matrix from test data, if available
+        self.source = 'Nominal'
+        if data_in == 'Nominal':
+            self.source = 'Nominal'
+        elif adcp_model == 'Rio Grande':
+            self.riogrande(data_in)
+        elif adcp_model == 'StreamPro':
+            self.streampro(data_in)
+        elif adcp_model == 'RiverRay':
+            self.riverray(data_in)
+        elif adcp_model == 'RiverPro':
+            self.riverpro(data_in)
+        elif adcp_model == 'RioPro':
+            self.riopro(data_in)
+        elif adcp_model == 'pd0':
+            self.matrix = data_in.Inst.t_matrix
+
+        if np.array(self.matrix).size < 16:
+            self.trdi(model=model, data_in=None)
+
+        # Save matrix as np array
+        self.matrix = np.array(self.matrix)[0:4, 0:4]
+
+    def riogrande(self, data_in):
+        """Process Rio Grande test data for transformation matrix.
+
+        Parameters
+        ----------
+        data_in:
+            System test data
+        """
+        if data_in is not None:
+            idx = data_in.find('Instrument Transformation Matrix (Down):')
+            if idx != -1:
+                cell_matrix = np.fromstring(data_in[idx + 50:idx + 356], dtype=np.float64, sep=' ')
+                try:
+                    self.matrix = np.reshape(cell_matrix, (-1, 8))[:, 0:4]
+
+                    self.source = 'ADCP'
+                except ValueError:
+                    pass
+
+    def streampro(self, data_in):
+        """Process StreamPro test data for transformation matrix.
+
+        Parameters
+        ----------
+        data_in:
+            System test data
+        """
+
+        if data_in is not None:
+            idx = data_in.find('>PS3')
+            if idx != -1:
+                temp_str = data_in[idx + 5:idx + 138]
+                temp_str = temp_str.replace('-', ' -')
+                temp_str = temp_str[:temp_str.find('>')]
+                cell_matrix = np.fromstring(temp_str, dtype=np.float64, sep=' ')
+                try:
+                    self.matrix = cell_matrix.reshape(4, 4)
+                    self.source = 'ADCP'
+                except ValueError:
+                    pass
+
+    def riverray(self, data_in):
+        """Process RiverRay test data for transformation matrix.
+
+        Parameters
+        ----------
+        data_in: str
+            System test data
+        """
+        if data_in is not None:
+            idx = data_in.find('Instrument Transformation Matrix')
+            if idx != -1:
+                idx2 = data_in[idx:].find(':')
+                idx3 = idx + idx2
+                if idx2 != -1:
+                    idx4 = data_in[idx3:].find('>')
+                    idx5 = idx3 + idx4 - 2
+                    if idx4 != -1:
+                        self.matrix = float(data_in[idx3:idx5])
+                        self.source = 'ADCP'
+
+    def riverpro(self, data_in):
+        """Process RiverPro test data for transformation matrix.
+
+        Parameters
+        ----------
+        data_in: str
+            System test data
+        """
+        if data_in is not None:
+            idx = data_in.find('Instrument Transformation Matrix')
+            if idx != -1:
+                idx2 = data_in[idx:].find(':')
+                idx3 = idx + idx2
+                if idx2 != -1:
+                    idx4 = data_in[idx3:].find('Has V-Beam')
+                    idx5 = idx3 + idx4 - 2
+                    if idx4 != -1:
+                        self.matrix = float(data_in[idx3:idx5])
+                        self.source = 'ADCP'
+
+    def riopro(self, data_in):
+        """Process RioPro test data for transformation matrix.
+
+        Parameters
+        ----------
+        data_in:
+            System test data
+        """
+
+        if data_in is not None:
+            idx = data_in.find('Instrument Transformation Matrix')
+            if idx != -1:
+                idx2 = data_in[idx:].find(':')
+                idx3 = idx + idx2
+                if idx2 != -1:
+                    idx4 = data_in[idx3:].find('Has V-Beam')
+                    idx5 = idx3 + idx4 - 2
+                    if idx4 != -1:
+                        self.matrix = float(data_in[idx3:idx5])
+                        self.source = 'ADCP'
+
+    def sontek(self, data_in):
+        """Store SonTek transformation matrix data.
+
+        Parameters
+        ----------
+        data_in:
+            System test data
+        """
+
+        if data_in is not None:
+            self.source = 'ADCP'
+            # Note: for M9 this is a 4x4x3 matrix (300,500,1000)
+            # Note: for S5 this is a 4x4x2 matrix (3000,1000)
+            self.matrix = data_in
+
+    def populate_from_qrev_mat(self, tmatrix):
+        self.matrix = tmatrix.matrix
+        self.source = tmatrix.source
diff --git a/Classes/Uncertainty.py b/Classes/Uncertainty.py
new file mode 100644
index 0000000..0af2deb
--- /dev/null
+++ b/Classes/Uncertainty.py
@@ -0,0 +1,418 @@
+import numpy as np
+from scipy.stats import t
+
+
+class Uncertainty(object):
+    """Computes the uncertainty of a measurement.
+
+    Attributes
+    ----------
+    cov: float
+        Coefficient of variation for all transects used in dicharge computation
+    cov_95: float
+        Coefficient of variation inflated to the 95% percent coverage
+    invalid_95: float
+        Estimated 95% uncertainty for dicharge in invalid bins and ensembles
+    edges_95: float
+        Estimated 95% uncertainty for the computed edge discharges
+    extrapolation_95: float
+        Estimated 95% uncertainty in discharge due to top and bottom extrapolations
+    moving_bed_95: float
+        Estimated 95% uncertainty due to moving-bed tests and conditions
+    systematic: float
+        Systematic error estimated at 1.5% at 1 sigma
+    total_95: float
+        Estimated 95% uncertainty in discharge using automated values
+    cov_95_user: float
+        User provided estimate of coefficient of variation inflated to the 95% percent coverage
+    invalid_95_user: float
+        User provided estimate of95% uncertainty for discharge in invalid bins and ensembles
+    edges_95_user: float
+        User provided estimate of 95% uncertainty for the computed edge discharges
+    extrapolation_95_user: float
+        User provided estimate of 95% uncertainty in discharge due to top and bottom extrapolations
+    moving_bed_95_user: float
+        User provided estimate of 95% uncertainty due to moving-bed tests and conditions
+    systematic_user: float
+        User provided estimate of systematic error estimated at 1.5% at 1 sigma
+    total_95_user: float
+        Estimated 95% uncertainty in discharge using user provide values to override automated values
+    """
+
+    def __init__(self):
+        """Initializes the instance variables."""
+        self.cov = None
+        self.cov_95 = None
+        self.invalid_95 = None
+        self.edges_95 = None
+        self.extrapolation_95 = None
+        self.moving_bed_95 = None
+        self.systematic = None
+        self.total_95 = None
+        self.cov_95_user = None
+        self.invalid_95_user = None
+        self.edges_95_user = None
+        self.extrapolation_95_user = None
+        self.moving_bed_95_user = None
+        self.systematic_user = None
+        self.total_95_user = None
+
+    def compute_uncertainty(self, meas, cov_95_user=None, invalid_95_user=None, edges_95_user=None,
+                            extrapolation_95_user=None, moving_bed_95_user=None, systematic_user=None):
+        """Computes the uncertainty for the components of the discharge measurement
+        using measurement data or user provided values.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        cov_95_user: float
+            User provided estimate of coefficient of variation inflated to the 95% percent coverage
+        invalid_95_user: float
+            User provided estimate of95% uncertainty for discharge in invalid bins and ensembles
+        edges_95_user: float
+            User provided estimate of 95% uncertainty for the computed edge discharges
+        extrapolation_95_user: float
+            User provided estimate of 95% uncertainty in discharge due to top and bottom extrapolations
+        moving_bed_95_user: float
+            User provided estimate of 95% uncertainty due to moving-bed tests and conditions
+        systematic_user: float
+            User provided estimate of systematic error estimated at 1.5% at 1 sigma
+        """
+
+        # Use only checked discharges
+        checked = []
+        discharges = []
+        for n in range(len(meas.transects)):
+            checked.append(meas.transects[n].checked)
+            if meas.transects[n].checked:
+                discharges.append(meas.discharge[n])
+
+        # Compute uncertainties from the data
+        self.cov, self.cov_95 = self.uncertainty_q_random(discharges, 'total')
+        self.invalid_95 = self.uncertainty_invalid_data(discharges)
+        self.edges_95 = self.uncertainty_edges(discharges)
+        self.extrapolation_95 = self.uncertainty_extrapolation(meas, discharges)
+        self.moving_bed_95 = self.uncertainty_moving_bed(meas, checked)
+        self.systematic = 1.5
+
+        # Get user uncertainty estimates
+        self.cov_95_user = cov_95_user
+        self.invalid_95_user = invalid_95_user
+        self.edges_95_user = edges_95_user
+        self.extrapolation_95_user = extrapolation_95_user
+        self.moving_bed_95_user = moving_bed_95_user
+        self.systematic_user = systematic_user
+
+        # Estimate the total measurement uncertainty
+        self.estimate_total_uncertainty()
+
+    def populate_from_qrev_mat(self, meas_struct):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        meas_struct: mat_struct
+           Matlab data structure obtained from sio.loadmat
+        """
+        if hasattr(meas_struct, 'uncertainty'):
+            self.cov = meas_struct.uncertainty.cov
+            self.cov_95 = meas_struct.uncertainty.cov95
+            self.invalid_95 = meas_struct.uncertainty.invalid95
+            self.edges_95 = meas_struct.uncertainty.edges95
+            self.extrapolation_95 = meas_struct.uncertainty.extrapolation95
+            self.moving_bed_95 = meas_struct.uncertainty.movingBed95
+            self.systematic = meas_struct.uncertainty.systematic
+            self.total_95 = meas_struct.uncertainty.total95
+            if not type(meas_struct.uncertainty.cov95User) is np.ndarray:
+                self.cov_95_user = meas_struct.uncertainty.cov95User
+            if not type(meas_struct.uncertainty.invalid95User) is np.ndarray:
+                self.invalid_95_user = meas_struct.uncertainty.invalid95User
+            if not type(meas_struct.uncertainty.edges95User) is np.ndarray:
+                self.edges_95_user = meas_struct.uncertainty.edges95User
+            if not type(meas_struct.uncertainty.extrapolation95User) is np.ndarray:
+                self.extrapolation_95_user = meas_struct.uncertainty.extrapolation95User
+            if not type(meas_struct.uncertainty.movingBed95User) is np.ndarray:
+                self.moving_bed_95_user = meas_struct.uncertainty.movingBed95User
+            if not type(meas_struct.uncertainty.systematicUser) is np.ndarray:
+                self.systematic_user = meas_struct.uncertainty.systematicUser
+            self.total_95_user = meas_struct.uncertainty.total95User
+
+    def estimate_total_uncertainty(self):
+        """Compute the uncertainty of the measurement using the automatically computed uncertainties and
+        user overrides.
+        """
+
+        self.total_95 = 2.0 * ((self.cov_95 / 2)**2
+                               + (self.invalid_95 / 2)**2
+                               + (self.edges_95 / 2)**2
+                               + (self.extrapolation_95 / 2)**2
+                               + (self.moving_bed_95 / 2)**2
+                               + self.systematic**2
+                               )**0.5
+
+        if self.cov_95_user is None:
+            cov_95_user = self.cov_95
+        else:
+            cov_95_user = self.cov_95_user
+
+        if self.invalid_95_user is None:
+            invalid_95_user = self.invalid_95
+        else:
+            invalid_95_user = self.invalid_95_user
+
+        if self.edges_95_user is None:
+            edges_95_user = self.edges_95
+        else:
+            edges_95_user = self.edges_95_user
+
+        if self.extrapolation_95_user is None:
+            extrapolation_95_user = self.extrapolation_95
+        else:
+            extrapolation_95_user = self. extrapolation_95_user
+
+        if self.moving_bed_95_user is None:
+            moving_bed_95_user = self.moving_bed_95
+        else:
+            moving_bed_95_user = self.moving_bed_95_user
+
+        if self.systematic_user is None:
+            systematic_user = self.systematic
+        else:
+            systematic_user = self.systematic_user
+
+        self.total_95_user = 2.0 * ((cov_95_user / 2)**2
+                                    + (invalid_95_user / 2)**2
+                                    + (edges_95_user / 2)**2
+                                    + (extrapolation_95_user / 2)**2
+                                    + (moving_bed_95_user / 2)**2
+                                    + systematic_user**2
+                                    )**0.5
+
+    @staticmethod
+    def get_array_attr(list_in, prop):
+        """Create an array of the requested attribute from a list of objects containing the requested attribute.
+
+        Parameters
+        ----------
+        list_in: list
+            List of objects
+        prop: str
+            Attribute requested
+
+        Returns
+        -------
+        data: np.ndarray()
+            Array of the requested attributes from each object in list_in
+
+        """
+        # Create array of specified attribute
+        data = []
+        for item in list_in:
+            data.append(getattr(item, prop))
+        np.asarray(data)
+        return data
+
+    @staticmethod
+    def uncertainty_q_random(discharges, prop):
+        """Compute 95% random uncertainty for property of discharge.
+        Uses simplified method for 2 transects.
+
+        Parameters
+        ----------
+        discharges: list
+            List of Discharge objects
+        prop: str
+            Attribute of Discharge objects
+
+        Returns
+        -------
+        cov: float
+            Coefficient of variation
+        cov_95: float
+            Coefficient of variation inflated to 95% value
+        """
+        n_max = len(discharges)
+        if n_max > 0:
+            # Create array of specified attribute
+            data = Uncertainty.get_array_attr(discharges, prop)
+
+            # Compute coefficient of variation
+            cov = np.abs(np.nanstd(data, ddof=1) / np.nanmean(data)) * 100
+
+            # Inflate the cov to the 95% value
+            if n_max == 2:
+                # Use the approximate method as taught in class to reduce the high coverage factor for 2 transects
+                # and account for prior knowledge related to 720 second duration analysis
+                cov_95 = cov * 3.3
+            else:
+                # Use Student's t to inflate COV for n > 2
+                cov_95 = t.interval(0.95, n_max-1)[1] * cov / n_max**0.5
+        else:
+            cov = np.nan
+            cov_95 = np.nan
+
+        return cov, cov_95
+
+    @staticmethod
+    def uncertainty_edges(discharges):
+        """Compute uncertainty of edge discharge. Currently assuming random plus bias
+        is within 30% of actual value.
+
+        Parameters
+        ----------
+        discharges: list
+            List of Discharge objects
+
+        Returns
+        -------
+        edge_uncertainty: float
+            95% uncertainty in discharge due to edge estimates
+        """
+
+        # Compute mean discharge values for total, left, and right
+        mean_q = np.nanmean(Uncertainty.get_array_attr(discharges, 'total'))
+        mean_left = np.nanmean(Uncertainty.get_array_attr(discharges, 'left'))
+        mean_right = np.nanmean(Uncertainty.get_array_attr(discharges, 'right'))
+
+        # Compute combined edge uncertainty
+        percent_edge = ((np.abs(mean_left) + np.abs(mean_right)) / mean_q) * 100
+        edge_uncertainty = percent_edge * 0.3
+
+        return edge_uncertainty
+
+    @staticmethod
+    def uncertainty_extrapolation(meas, discharges):
+        """Compute the uncertainty of the top and bottom extrapolations.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        discharges: list
+            List of Discharge objects
+
+        Returns
+        -------
+        extrapolation_uncertainty: float
+            95% uncertainty due to top and bottom extrapolation estimates
+        """
+
+        # Compute mean total uncorrected discharge
+        q_selected = np.nanmean(Uncertainty.get_array_attr(discharges, 'total_uncorrected'))
+
+        # Create array of discharges from the various extrapolation methods
+        q_possible = np.array([meas.extrap_fit.q_sensitivity.q_pp_mean,
+                               meas.extrap_fit.q_sensitivity.q_pp_opt_mean,
+                               meas.extrap_fit.q_sensitivity.q_cns_mean,
+                               meas.extrap_fit.q_sensitivity.q_cns_opt_mean,
+                               meas.extrap_fit.q_sensitivity.q_3p_ns_mean,
+                               meas.extrap_fit.q_sensitivity.q_3p_ns_opt_mean])
+
+        # Compute difference in discharges from the selected method
+        q_diff = np.abs(q_possible - q_selected)
+
+        # Sort differences
+        percent_diff = np.sort(q_diff) / q_selected
+
+        # Estimate the uncertainty as the average of the 4 smallest differences
+        extrapolation_uncertainty = np.nanmean(percent_diff[1:5]) * 100
+
+        return extrapolation_uncertainty
+
+    @staticmethod
+    def uncertainty_invalid_data(discharges):
+        """Computes an estimate of the uncertainty for the discharge computed for invalid bins and ensembles.
+
+        Parameters
+        ----------
+        discharges: list
+            List of Discharge objects
+
+        Returns
+        -------
+        invalid_data_uncertainty: float
+            95% uncertainty due to estimating invalid data
+        """
+
+        # Compute mean discharges
+        q_mean = np.nanmean(Uncertainty.get_array_attr(discharges, 'total'))
+        q_cells = np.nanmean(Uncertainty.get_array_attr(discharges, 'int_cells'))
+        q_ensembles = np.nanmean(Uncertainty.get_array_attr(discharges, 'int_ens'))
+
+        # Compute percentages
+        percent_cells = (q_cells / q_mean) * 100
+        percent_ensembles = (q_ensembles / q_mean) * 100
+
+        # Compute uncertainty for combined invalid cells and ensembles
+        invalid_data_uncertainty = (np.abs(percent_cells) + np.abs(percent_ensembles)) * 0.2
+
+        return invalid_data_uncertainty
+
+    @staticmethod
+    def uncertainty_moving_bed(meas, checked):
+        """Estimates the 95% uncertainty of the discharge due to a moving-bed and navigation reference.
+
+        Parameters
+        ----------
+        meas: Measurement
+            Object of class Measurement
+        checked: list
+            Logical list of transects used to compute final discharge
+
+        Returns
+        -------
+        moving_bed_uncertainty: float
+            95% uncertainty associated with moving-bed conditions
+        """
+
+        if np.any(checked) and meas.transects[checked.index(1)].boat_vel.selected == 'bt_vel':
+            # Boat velocity based on bottom track, moving-bed possible
+            if len(meas.mb_tests) > 0:
+                # Moving_bed tests recorded
+                user_valid = []
+                quality = []
+                moving_bed = []
+                used = []
+                for test in meas.mb_tests:
+                    user_valid.append(test.user_valid)
+                    if test.test_quality == 'Errors':
+                        quality.append(False)
+                    else:
+                        quality.append(True)
+                    moving_bed.append(test.moving_bed)
+                    used.append(test.use_2_correct)
+
+                # Check to see if there are any valid tests
+                if np.any(np.logical_and(np.asarray(quality), np.asarray(user_valid))):
+                    # Check to see if the valid tests indicate a moving bed
+                    moving_bed_bool = []
+                    for result in moving_bed:
+                        if result == 'Yes':
+                            moving_bed_bool.append(True)
+                        else:
+                            moving_bed_bool.append(False)
+                    valid_moving_bed = np.logical_and(quality, np.asarray(moving_bed_bool))
+                    if np.any(valid_moving_bed):
+                        # Check to see that a correction was used
+                        if np.any(np.logical_and(valid_moving_bed, np.asarray(used))):
+                            # Moving-bed exists and correction applied
+                            moving_bed_uncertainty = 1.5
+                        else:
+                            # Moving-bed exists and no correction applied
+                            moving_bed_uncertainty = 3
+                    else:
+                        # Valid tests indicated no moving bed
+                        moving_bed_uncertainty = 1
+                else:
+                    moving_bed_uncertainty = 3
+            elif meas.observed_no_moving_bed:
+                moving_bed_uncertainty = 1
+            else:
+                # No moving bed tests
+                moving_bed_uncertainty = 3
+        else:
+            # GPS used as boat velocity reference
+            moving_bed_uncertainty = 0
+
+        return moving_bed_uncertainty
diff --git a/Classes/WaterData.py b/Classes/WaterData.py
new file mode 100644
index 0000000..56e1b68
--- /dev/null
+++ b/Classes/WaterData.py
@@ -0,0 +1,2445 @@
+import copy
+import numpy as np
+from numpy.matlib import repmat
+from scipy import interpolate
+from Classes.BoatData import BoatData
+from MiscLibs.common_functions import cart2pol, pol2cart, iqr, nan_greater, nan_less
+from MiscLibs.robust_loess import rloess
+from MiscLibs.abba_2d_interpolation import abba_idw_interpolation
+
+
+class WaterData(object):
+    """Class to process and store water velocity data.
+
+    Attributes
+    ----------
+    Original data provided to the class:
+        raw_vel_mps: np.array(float)
+            Contains the raw unfiltered velocity in m/s.  1st index 1-4 are beams 1,2,3,4 if beam or
+            u,v,w,d if otherwise.
+        frequency: np.array(float)
+            Defines ADCP frequency used for velocity measurement, in kHz.
+        orig_coord_sys: str
+            Defines the original raw velocity coordinate system "Beam", "Inst", "Ship", "Earth".
+        orig_nav_ref: str
+            Defines the original taw data naviagation reference: "None", "BT", "GGA", "VTG".
+        corr: np.array(float)
+            Correlation values for WT, if available.
+        rssi: np.array(float)
+            Returned acoustic signal strength.
+        rssi_units: str
+            Units for returned acoustic signal strength: "Counts" "dB", "SNR".
+        water_mode: str
+            WaterMode for TRDI or 'Variable' for SonTek.
+        blanking_distance_m: float
+            Distance below transducer where data are marked invalid due to potential ringing.
+        cells_above_sl: np.array(bool)
+            Logical array of depth cells above sidelobe cutoff based on selected depth reference.
+        cells_above_sl_bt: np.array(bool)
+            Logical array of depth cells above the sidelobe cutoff based on BT
+        sl_lag_effect_m: np.array(float)
+            Side lobe distance due to lag and transmit length
+
+    Data computed in this class:
+        u_earth_no_ref_mps: np.array(float)
+            Horizontal velocity in x-direction with no boat reference applied, in m/s.
+        v_earth_no_ref_mps: np.array(float)
+            Horizontal velocity in y-direction with no boat reference applied, in m/s.
+        u_mps: np.array(float)
+            Horizontal velocity in x-direction, earth coord, nav referenced, in m/s.
+        v_mps: np.array(float)
+            Horizontal velocity in y-direction, earth coord, nav referenced, in m/s.
+        u_processed_mps: np.array(float)
+            Horizontal velocity in x-direction, earth coord, nav ref, filtered, and interpolated.
+        v_processed_mps: np.array(float)
+            Horizontal veloctiy in y-direction, earth coord, nav ref, filtered, and interpolated.
+        w_mps: np.array(float)
+            Vertical velocity (+ up), in m/s.
+        d_mps: np.array(float)
+            Difference in vertical velocities compute from opposing beam pairs, in m/s.
+        invalid_index: np.array(bool)
+            Index of ensembles with no valid raw velocity data.
+        num_invalid: float
+            Estimated number of depth cells in ensembles with no valid raw velocity data.
+        valid_data: np.array(float)
+            3-D logical array of valid data
+                Dim1 0 - composite
+                Dim1 1 - original, cells above side lobe
+                Dim1 2 - dfilter
+                Dim1 3 - wfilter
+                Dim1 4 - smoothFilter
+                Dim1 5 - beamFilter
+                Dim1 6 - excluded
+                Dim1 7 - snrFilter
+                Dim1 8 - validDepthFilter
+
+    Processing settings:
+        beam_filter: int
+            Set 3 for 3-beam solutions, 4 for 4-beam solutions.
+        d_filter: str
+            Set difference velocity filter "On", "Off".
+        d_filter_thresholds: float, dict, tuple
+            Threshold(s) for difference velocity filter.
+        w_filter: str
+            Set vertical velocity filter "On", "Off".
+        w_filter_thresholds: float, dict, tuple
+            Threshold(s) for vertical velocity filter.
+        excluded_dist_m: float
+            Distance below transucer for which data are excluded or marked invalid, in m.
+        orig_excluded_dist_m: float
+            Original distance below transucer for which data are excluded or marked invalid, in m.
+        smooth_filter: str
+            Set filter based on smoothing function "On", "Off".
+        smooth_speed: np.array(float)
+            Smoothed mean water speed, in m/s.
+        smooth_upper_limit: np.array(float)
+            Smooth function upper limit of window, in m/s.
+        smooth_lower_limit: np.array(float)
+            Smooth funciton lower limit of window, in m/s.
+        snr_filter: str
+            Set SNR filter for SonTek data "On", "Off".
+        snr_rng: np.array(float)
+            Range of beam averaged SNR
+        wt_depth_filter: np.array(bool)
+            WT in ensembles with invalid depths are marked invalid.
+        interpolate_ens: str
+            Type of interpolation: "None", "TRDI", "Linear", 'abba'.
+        interpolate_cells: str
+            Type of cell interpolation: "None", "TRDI", "Linear", 'abba'
+        coord_sys: str
+            Defines the velocity coordinate system "Beam", "Inst", "Ship", "Earth"
+        nav_ref: str
+            Defines the navigation reference: "None", "BT", "GGA", "VTG"
+        sl_cutoff_percent: float
+            Percent cutoff defined by cos(angle)
+        sl_cutoff_number: float
+            User specified number of cells to cutoff from SonTek, not implemented, undefined
+        sl_cutoff_type: str
+            Type of cutoff method "Percent" or "Number".
+        ping_type: np.array(int)
+            Indicates type of ping for each cell: 0-incoherent, 1-coherent, 2-surface
+        d_meas_thresholds: list
+            Dictionary of difference velocity thresholds computed using the whole measurement by ping type
+        w_meas_thresholds: list
+            Dictionary of vertical velocity thresholds computed using the whole measurement by ping type
+        use_measurement_thresholds: bool
+            Indicates if the measurement based thresholds should be used
+
+    """
+
+    def __init__(self):
+        """Initialize instance variables.
+        """
+
+        # Data input to this class
+        self.raw_vel_mps = None
+        self.frequency = None
+        self.orig_coord_sys = None
+        self.orig_nav_ref = None
+        self.corr = None
+        self.rssi = None
+        self.rssi_units = None
+        self.water_mode = None
+        self.blanking_distance_m = None
+        self.cells_above_sl = None
+        self.cells_above_sl_bt = None
+        self.sl_lag_effect_m = None
+        
+        # Data computed in this class
+        self.u_earth_no_ref_mps = None
+        self.v_earth_no_ref_mps = None
+        self.u_mps = None
+        self.v_mps = None
+        self.u_processed_mps = None
+        self.v_processed_mps = None
+        self.w_mps = None
+        self.d_mps = None
+        self.invalid_index = None
+        self.num_invalid = []
+        self.valid_data = None
+                                
+        # Settings
+        self.beam_filter = None
+        self.d_filter = None
+        self.d_filter_thresholds = {}
+        self.w_filter = None
+        self.w_filter_thresholds = {}
+        self.excluded_dist_m = None
+        self.orig_excluded_dist_m = None
+        self.smooth_filter = None
+        self.smooth_speed = None
+        self.smooth_upper_limit = None
+        self.smooth_lower_limit = None
+        self.snr_filter = 'Off'
+        self.snr_rng = []
+        self.wt_depth_filter = True
+        self.interpolate_ens = None
+        self.interpolate_cells = None
+        self.coord_sys = None
+        self.nav_ref = None
+        self.sl_cutoff_percent = None
+        self.sl_cutoff_number = None
+        self.sl_cutoff_type = None
+        self.sl_cutoff_m = None
+        self.ping_type = np.array([])
+
+        # Filter settings populated from Measurement.create_filter_composites
+        self.d_meas_thresholds = {}
+        self.w_meas_thresholds = {}
+
+        self.use_measurement_thresholds = False
+
+    def populate_data(self, vel_in, freq_in, coord_sys_in, nav_ref_in, rssi_in, rssi_units_in,
+                      excluded_dist_in, cells_above_sl_in, sl_cutoff_per_in, sl_cutoff_num_in,
+                      sl_cutoff_type_in, sl_lag_effect_in, wm_in, blank_in, corr_in=None,
+                      surface_vel_in=None, surface_rssi_in=None, surface_corr_in=None, sl_cutoff_m=None,
+                      surface_num_cells_in=0, ping_type='U', use_measurement_thresholds=False):
+        
+        """Populates the variables with input, computed, or default values.
+
+        Parameters
+        ----------
+        vel_in: np.array(float)
+            Contains the raw unfiltered velocity data in m/s.
+            Rows 1-4 are beams 1,2,3,4 if beam or u,v,w,d if otherwise.
+        freq_in: np.array(float)
+            Defines ADCP frequency used for velocity measurement.
+        coord_sys_in: str
+            Defines the original raw  velocity coordinate system "Beam", "Inst", "Ship", "Earth".
+        nav_ref_in: str
+            Defines the original raw data navigation reference: "None", "BT", "GGA", "VTG".
+        rssi_in: np.array(float)
+            Returned acoustic signal strength.
+        rssi_units_in: str
+            Units for returned acoustic signal strength: "Counts", "dB", "SNR".
+        excluded_dist_in: float
+            Distance below transducer for which data are excluded or marked invalid.
+        cells_above_sl_in: np.array(bool)
+            Bool array of depth cells above the sidelobe cutoff based on selected depth reference.
+        sl_cutoff_per_in: float
+            Percent cutoff defined by cos(angle).
+        sl_cutoff_num_in: float
+            User specified number of cells to cutoff above sl_cutoff.
+        sl_cutoff_type_in: str
+            Method used to compute cutoff "Percent" or "Number".
+        sl_lag_effect_in: np.array(float)
+            Lag effect for each ensemble, in m.
+        wm_in: str
+            Watermode for TRDI or 'Variable' for SonTek.
+        blank_in: float
+            Blanking distance, in m.
+        corr_in: np.array(float)
+            Correlation values for water track. Optional.
+        surface_vel_in: np.array(float)
+            Surface velocity data for RiverRay, RiverPro, RioPro. Optional.
+        surface_rssi_in: np.array(float)
+            Returned acoust signal strength for RiverRay, RiverPro, RioPro. Optional.
+        surface_corr_in: np.array(float)
+            Surface velocity correlations for RiverRay, RiverPro, RioPro. Optional.
+        surface_num_cells_in: np.array(float)
+            Number of surface cells in each ensemble for RiverRay, RiverPro, RioPro. Optional.
+        sl_cutoff_m: np.array(float)
+            Depth in meters of side lobe cutoff to center of cells.
+        ping_type: np.array(str)
+            Indicates type of ping used for water tracking
+        """
+
+        # Set object properties from input data standard for all ADCPs
+        self.frequency = freq_in
+        self.orig_coord_sys = coord_sys_in
+        self.coord_sys = coord_sys_in
+        self.orig_nav_ref = nav_ref_in
+        self.nav_ref = nav_ref_in
+        self.water_mode = wm_in
+        self.excluded_dist_m = excluded_dist_in
+        self.rssi_units = rssi_units_in
+        max_cells = cells_above_sl_in.shape[0]
+        self.ping_type = np.tile(np.array([ping_type]), (max_cells, 1))
+        self.use_measurement_thresholds = use_measurement_thresholds
+
+        # Set object properties that depend on the presence or absence of surface cells
+        if np.sum(surface_num_cells_in) > 0:
+            surface_num_cells_in[np.isnan(surface_num_cells_in)] = 0
+
+            num_ens = cells_above_sl_in.shape[1]
+            num_reg_cells = vel_in.shape[1]
+            max_surf_cells = max_cells - num_reg_cells
+
+            # Combine surface velocity bins and regular velocity bins into one matrix
+            self.raw_vel_mps = np.tile([np.nan], [4, max_cells, num_ens])
+            self.rssi = np.tile([np.nan], [4, max_cells, num_ens])
+            self.corr = np.tile([np.nan], [4, max_cells, num_ens])
+
+            if max_surf_cells > 0:
+                self.raw_vel_mps[:, :max_surf_cells, :] = surface_vel_in[:, :max_surf_cells, :]
+                self.rssi[:, :max_surf_cells, :] = surface_rssi_in[:, :max_surf_cells, :]
+                self.corr[:, :max_surf_cells, :] = surface_corr_in[:, :max_surf_cells, :]
+
+            for i_ens in range(num_ens):
+                self.raw_vel_mps[:,
+                                 int(surface_num_cells_in[i_ens]):int(surface_num_cells_in[i_ens])
+                                 + num_reg_cells, i_ens] = vel_in[:, :num_reg_cells, i_ens]
+                self.rssi[:,
+                          int(surface_num_cells_in[i_ens]):int(surface_num_cells_in[i_ens])
+                          + num_reg_cells, i_ens] = rssi_in[:, :num_reg_cells, i_ens]
+                self.corr[:,
+                          int(surface_num_cells_in[i_ens]):int(surface_num_cells_in[i_ens])
+                          + num_reg_cells, i_ens] = corr_in[:, :num_reg_cells, i_ens]
+                self.ping_type[:int(surface_num_cells_in[i_ens]), i_ens] = 'S'
+        else:
+            # No surface cells
+            self.raw_vel_mps = vel_in
+            self.rssi = rssi_in
+            if corr_in.any():
+                self.corr = corr_in
+            else:
+                # No correlations input
+                self.corr = np.tile(np.nan, rssi_in.shape)
+
+        #TODO This doesn't seem correct. If raw data in beam coordinates this is not correct.
+        self.u_mps = np.copy(self.raw_vel_mps)[0, :, :]
+        self.v_mps = np.copy(self.raw_vel_mps)[1, :, :]
+        self.w_mps = np.copy(self.raw_vel_mps)[2, :, :]
+        self.d_mps = np.copy(self.raw_vel_mps)[3, :, :]
+
+        self.water_mode = wm_in
+        self.excluded_dist_m = excluded_dist_in
+        self.orig_excluded_dist_m = excluded_dist_in
+
+        # In some rare situations the blank is empty so it is set to the excluded_dist_in
+        try:
+            blank_in = float(blank_in)
+            self.blanking_distance_m = blank_in
+        except ValueError:
+            self.blanking_distance_m = excluded_dist_in
+            
+        self.cells_above_sl = cells_above_sl_in
+        self.cells_above_sl_bt = cells_above_sl_in
+        self.sl_cutoff_percent = sl_cutoff_per_in
+        self.sl_cutoff_number = sl_cutoff_num_in
+        self.sl_cutoff_type = sl_cutoff_type_in
+        self.sl_lag_effect_m = sl_lag_effect_in
+        self.sl_cutoff_m = sl_cutoff_m
+        
+        # Set filter defaults to no filtering and no interruption
+        self.beam_filter = 3
+        self.d_filter = 'Off'
+        self.d_filter_thresholds = {}
+        self.w_filter = 'Off'
+        self.w_filter_thresholds = {}
+        self.smooth_filter = False
+        self.interpolate_ens = 'None'
+        self.interpolate_cells = 'None'
+        
+        # Determine original valid
+
+        # Initialize valid data property
+        self.valid_data = np.tile(self.cells_above_sl, [9, 1, 1])
+        
+        # Find invalid raw data
+        valid_vel = np.tile(self.cells_above_sl, [4, 1, 1])
+        valid_vel[np.isnan(self.raw_vel_mps)] = False
+            
+        # Identify invalid velocity data (less than 3 valid beams)
+        valid_vel_sum = np.sum(valid_vel, axis=0)
+        valid_data2 = np.copy(self.cells_above_sl)
+        valid_data2[valid_vel_sum < 3] = False
+        
+        # Set valid_data property for original data
+        self.valid_data[1, :, :] = valid_data2
+        
+        # Combine all filter data to composite valid data
+        self.all_valid_data()
+        
+        # Estimate the number of cells in invalid ensembles using
+        # Adjacent valid ensembles
+        valid_data_2_sum = np.nansum(self.valid_data[1], 0)
+        self.invalid_index = np.where(valid_data_2_sum == 0)[0]
+        n_invalid = len(self.invalid_index)
+        for n in range(n_invalid):
+            # Find first valid ensemble
+            idx1 = np.where(valid_data_2_sum[:self.invalid_index[n]] > 0)[0]
+            if len(idx1) > 0:
+                idx1 = idx1[0]
+            else:
+                idx1 = self.invalid_index[n]
+                
+            # Find next valid ensemble
+            idx2 = np.where(valid_data_2_sum[:self.invalid_index[n]] > 0)[0]
+            if len(idx2) > 0:
+                idx2 = idx2[-1]
+            else:
+                idx2 = self.invalid_index[n]
+                
+            # Estimate number of cells in invalid ensemble
+            self.num_invalid.append(np.floor((valid_data_2_sum[idx1]+valid_data_2_sum[idx2]) / 2))
+            
+        # Set processed data to non-interpolated valid data
+        self.u_processed_mps = np.copy(self.u_mps)
+        self.v_processed_mps = np.copy(self.v_mps)
+        self.u_processed_mps[self.valid_data[0] == False] = np.nan
+        self.v_processed_mps[self.valid_data[0] == False] = np.nan
+        
+        # Compute SNR range if SNR data is provided
+        if rssi_units_in == 'SNR':
+            self.compute_snr_rng()
+
+    def populate_from_qrev_mat(self, transect):
+        """Populates the object using data from previously saved QRev Matlab file.
+
+        Parameters
+        ----------
+        transect: mat_struct
+            Matlab data structure obtained from sio.loadmat
+        """
+
+        # Data requiring manipulation (special case for 1 ensemble or 1 cell)
+        if len(transect.wVel.rawVel_mps.shape) == 2:
+            if len(transect.boatVel.btVel.rawVel_mps.shape) > 1:
+                # Multiple ensembles with one cell
+                self.raw_vel_mps = np.moveaxis(transect.wVel.rawVel_mps, 1, 0)
+                self.raw_vel_mps = self.raw_vel_mps.reshape(self.raw_vel_mps.shape[0], 1, self.raw_vel_mps.shape[1])
+                self.corr = np.moveaxis(transect.wVel.corr, 1, 0)
+                self.corr = self.corr.reshape(self.corr.shape[0], 1, self.corr.shape[1])
+                self.rssi = np.moveaxis(transect.wVel.rssi, 1, 0)
+                self.rssi = self.rssi.reshape(self.rssi.shape[0], 1, self.rssi.shape[1])
+                self.valid_data = np.moveaxis(transect.wVel.validData, 1, 0)
+                self.valid_data = self.valid_data.reshape(self.valid_data.shape[0], 1, self.valid_data.shape[1])
+                self.u_earth_no_ref_mps = transect.wVel.uEarthNoRef_mps
+                self.u_earth_no_ref_mps = self.u_earth_no_ref_mps.reshape(1, self.u_earth_no_ref_mps.shape[0])
+                self.v_earth_no_ref_mps = transect.wVel.vEarthNoRef_mps
+                self.v_earth_no_ref_mps = self.v_earth_no_ref_mps.reshape(1, self.v_earth_no_ref_mps.shape[0])
+                self.u_mps = transect.wVel.u_mps
+                self.u_mps = self.u_mps.reshape(1, self.u_mps.shape[0])
+                self.v_mps = transect.wVel.v_mps
+                self.v_mps = self.v_mps.reshape(1, self.v_mps.shape[0])
+                self.u_processed_mps = transect.wVel.uProcessed_mps
+                self.u_processed_mps = self.u_processed_mps.reshape(1, self.u_processed_mps.shape[0])
+                self.v_processed_mps = transect.wVel.vProcessed_mps
+                self.v_processed_mps = self.v_processed_mps.reshape(1, self.v_processed_mps.shape[0])
+                self.w_mps = transect.wVel.w_mps
+                self.w_mps = self.w_mps.reshape(1, self.w_mps.shape[0])
+                self.d_mps = transect.wVel.d_mps
+                self.d_mps = self.d_mps.reshape(1, self.d_mps.shape[0])
+                self.snr_rng = transect.wVel.snrRng
+                self.snr_rng = self.snr_rng.reshape(1, self.snr_rng.shape[0])
+                self.cells_above_sl = transect.wVel.cellsAboveSL.astype(bool)
+                self.cells_above_sl = self.cells_above_sl.reshape(1, self.cells_above_sl.shape[0])
+                self.cells_above_sl_bt = transect.wVel.cellsAboveSLbt.astype(bool)
+                self.cells_above_sl_bt = self.cells_above_sl_bt.reshape(1, self.cells_above_sl_bt.shape[0])
+                self.sl_lag_effect_m = np.array([transect.wVel.slLagEffect_m])
+                # Ping type
+                if hasattr(transect.wVel, 'ping_type'):
+                    if type(transect.wVel.ping_type) == str:
+                        self.ping_type = np.tile(transect.wVel.ping_type, self.d_mps.shape)
+                    else:
+                        self.ping_type = transect.wVel.ping_type[np.newaxis, :]
+                else:
+                    self.ping_type = np.tile('U', self.d_mps.shape)
+            else:
+                # One ensemble with multiple cells
+                self.raw_vel_mps = np.moveaxis(transect.wVel.rawVel_mps, 1, 0)
+                self.raw_vel_mps = self.raw_vel_mps.reshape(self.raw_vel_mps.shape[0], self.raw_vel_mps.shape[1], 1)
+                self.corr = np.moveaxis(transect.wVel.corr, 1, 0)
+                self.corr = self.corr.reshape(self.corr.shape[0], self.corr.shape[1], 1)
+                self.rssi = np.moveaxis(transect.wVel.rssi, 1, 0)
+                self.rssi = self.rssi.reshape(self.rssi.shape[0], self.rssi.shape[1], 1)
+                self.valid_data = np.moveaxis(transect.wVel.validData, 1, 0)
+                self.valid_data = self.valid_data.reshape(self.valid_data.shape[0], self.valid_data.shape[1], 1)
+                self.u_earth_no_ref_mps = transect.wVel.uEarthNoRef_mps
+                self.u_earth_no_ref_mps = self.u_earth_no_ref_mps.reshape(self.u_earth_no_ref_mps.shape[0], 1)
+                self.v_earth_no_ref_mps = transect.wVel.vEarthNoRef_mps
+                self.v_earth_no_ref_mps = self.v_earth_no_ref_mps.reshape(self.v_earth_no_ref_mps.shape[0], 1)
+                self.u_mps = transect.wVel.u_mps
+                self.u_mps = self.u_mps.reshape(self.u_mps.shape[0], 1)
+                self.v_mps = transect.wVel.v_mps
+                self.v_mps = self.v_mps.reshape(self.v_mps.shape[0], 1)
+                self.u_processed_mps = transect.wVel.uProcessed_mps
+                self.u_processed_mps = self.u_processed_mps.reshape(self.u_processed_mps.shape[0], 1)
+                self.v_processed_mps = transect.wVel.vProcessed_mps
+                self.v_processed_mps = self.v_processed_mps.reshape(self.v_processed_mps.shape[0], 1)
+                self.w_mps = transect.wVel.w_mps
+                self.w_mps = self.w_mps.reshape(self.w_mps.shape[0], 1)
+                self.d_mps = transect.wVel.d_mps
+                self.d_mps = self.d_mps.reshape(self.d_mps.shape[0], 1)
+                self.snr_rng = transect.wVel.snrRng
+                self.snr_rng = self.snr_rng.reshape(self.snr_rng.shape[0], 1)
+                self.cells_above_sl = transect.wVel.cellsAboveSL.astype(bool)
+                self.cells_above_sl = self.cells_above_sl.reshape(self.cells_above_sl.shape[0], 1)
+                self.cells_above_sl_bt = transect.wVel.cellsAboveSLbt.astype(bool)
+                self.cells_above_sl_bt = self.cells_above_sl_bt.reshape(self.cells_above_sl_bt.shape[0], 1)
+                self.sl_lag_effect_m = np.array([transect.wVel.slLagEffect_m])
+                # Ping type
+                if hasattr(transect.wVel, 'ping_type'):
+                    if type(transect.wVel.ping_type) == str:
+                        self.ping_type = np.tile(transect.wVel.ping_type, self.d_mps.shape)
+                    else:
+                        self.ping_type = transect.wVel.ping_type[:, np.newaxis]
+                else:
+                    self.ping_type = np.tile('U', self.d_mps.shape)
+
+        else:
+            n_ensembles = transect.wVel.u_mps.shape[1]
+            n_cells = transect.wVel.u_mps.shape[0]
+            if transect.wVel.rawVel_mps.shape[2] != n_ensembles or transect.wVel.rawVel_mps.shape[1] != n_cells:
+                self.raw_vel_mps = np.moveaxis(transect.wVel.rawVel_mps, 2, 0)
+            else:
+                self.raw_vel_mps = transect.wVel.rawVel_mps
+
+            if transect.wVel.corr.shape[2] != n_ensembles or transect.wVel.corr.shape[1] != n_cells:
+                self.corr = np.moveaxis(transect.wVel.corr, 2, 0)
+            else:
+                self.corr = transect.wVel.corr
+
+            if transect.wVel.rssi.shape[2] != n_ensembles or transect.wVel.rssi.shape[1] != n_cells:
+                self.rssi = np.moveaxis(transect.wVel.rssi, 2, 0)
+            else:
+                self.rssi = transect.wVel.rssi
+
+            if transect.wVel.validData.shape[2] != n_ensembles or transect.wVel.validData.shape[1] != n_cells:
+                self.valid_data = np.moveaxis(transect.wVel.validData, 2, 0)
+            else:
+                self.valid_data = transect.wVel.validData
+            self.u_earth_no_ref_mps = transect.wVel.uEarthNoRef_mps
+            self.v_earth_no_ref_mps = transect.wVel.vEarthNoRef_mps
+            self.u_mps = transect.wVel.u_mps
+            self.v_mps = transect.wVel.v_mps
+            self.u_processed_mps = transect.wVel.uProcessed_mps
+            self.v_processed_mps = transect.wVel.vProcessed_mps
+            self.w_mps = transect.wVel.w_mps
+            self.d_mps = transect.wVel.d_mps
+            self.snr_rng = transect.wVel.snrRng
+            self.cells_above_sl = transect.wVel.cellsAboveSL.astype(bool)
+            self.cells_above_sl_bt = transect.wVel.cellsAboveSLbt.astype(bool)
+            self.sl_lag_effect_m = transect.wVel.slLagEffect_m
+            # Ping type
+            if hasattr(transect.wVel, 'ping_type'):
+                if type(transect.wVel.ping_type) == str:
+                    self.ping_type = np.tile(transect.wVel.ping_type, self.d_mps.shape)
+                else:
+                    self.ping_type = transect.wVel.ping_type
+            else:
+                self.ping_type = np.tile('U', self.d_mps.shape)
+
+        self.valid_data = self.valid_data.astype(bool)
+        # Fix for moving-bed transects that did not have 3D array indices adjusted properly when saved
+        # if self.valid_data.shape[0] == self.u_processed_mps.shape[1]:
+        #     self.valid_data = np.moveaxis(self.valid_data, 0, 2)
+        #     self.raw_vel_mps = np.moveaxis(self.raw_vel_mps, 0, 2)
+        #     self.corr = np.moveaxis(self.corr, 0, 2)
+        #     self.rssi = np.moveaxis(self.rssi, 0, 2)
+        self.frequency = transect.wVel.frequency
+        self.orig_coord_sys = transect.wVel.origCoordSys
+        self.orig_nav_ref = transect.wVel.origNavRef
+        self.rssi_units = transect.wVel.rssiUnits
+        self.water_mode = transect.wVel.waterMode
+        self.blanking_distance_m = transect.wVel.blankingDistance_m
+        self.invalid_index = transect.wVel.invalidIndex
+        if type(transect.wVel.numInvalid) is np.ndarray:
+            self.num_invalid = transect.wVel.numInvalid.tolist()
+        else:
+            self.num_invalid = transect.wVel.numInvalid
+
+        # Settings
+        self.beam_filter = transect.wVel.beamFilter
+        self.d_filter = transect.wVel.dFilter
+        self.d_filter_thresholds = self.struct_to_dict(transect.wVel.dFilterThreshold)
+        self.w_filter = transect.wVel.wFilter
+        self.w_filter_thresholds = self.struct_to_dict(transect.wVel.wFilterThreshold)
+        self.excluded_dist_m = transect.wVel.excludedDist
+        if hasattr(transect.wVel, 'orig_excludedDist'):
+            self.orig_excluded_dist_m = transect.wVel.orig_excludedDist
+        else:
+            self.orig_excluded_dist_m = transect.wVel.excludedDist
+        self.smooth_filter = transect.wVel.smoothFilter
+        self.smooth_speed = transect.wVel.smoothSpeed
+        self.smooth_upper_limit = transect.wVel.smoothUpperLimit
+        self.smooth_lower_limit = transect.wVel.smoothLowerLimit
+        self.snr_filter = transect.wVel.snrFilter
+        self.wt_depth_filter = transect.wVel.wtDepthFilter
+        self.interpolate_ens = transect.wVel.interpolateEns
+        self.interpolate_cells = transect.wVel.interpolateCells
+        self.coord_sys = transect.wVel.coordSys
+        self.nav_ref = transect.wVel.navRef
+        self.sl_cutoff_percent = transect.wVel.slCutoffPer
+        self.sl_cutoff_number = transect.wVel.slCutoffNum
+        self.sl_cutoff_type = transect.wVel.slCutoffType
+
+        # Use measurement for filter
+        if hasattr(transect.wVel, 'use_measurement_thresholds'):
+            self.use_measurement_thresholds = self.struct_to_dict(transect.wVel.use_measurement_thresholds)
+            self.d_meas_thresholds = self.struct_to_dict(transect.wVel.d_meas_thresholds)
+            self.w_meas_thresholds = self.struct_to_dict(transect.wVel.w_meas_thresholds)
+        else:
+            self.use_measurement_thresholds = False
+            self.d_meas_thresholds = {}
+            self.w_meas_thresholds = {}
+
+    @staticmethod
+    def struct_to_dict(struct):
+        """If input is a mat structure it converts it into a dictionary.
+
+        Parameters
+        ----------
+        struct: mat.struct or other
+            Data to be converted
+
+        Returns
+        -------
+        result: dict or other
+            Result of conversion
+        """
+
+        try:
+            keys = struct._fieldnames
+            result = {}
+            for key in keys:
+                result[key] = struct.__dict__[key]
+        except AttributeError:
+            result = struct
+        return result
+
+    def change_coord_sys(self, new_coord_sys, sensors, adcp):
+        """This function allows the coordinate system to be changed.
+
+        Current implementation is only to allow a change to a higher order
+        coordinate system Beam - Inst - Ship - Earth
+
+        Parameters
+        ----------
+        new_coord_sys: str
+            New coordinate system (Beam, Inst, Ship, Earth)
+        sensors: Sensors
+            Object of Sensors
+        adcp: InstrumentData
+            Object of instrument data
+        """
+        if type(self.orig_coord_sys) is list or type(self.orig_coord_sys) is np.ndarray:
+            o_coord_sys = self.orig_coord_sys[0].strip()
+        else:
+            o_coord_sys = self.orig_coord_sys.strip()
+
+        orig_sys = None
+        new_sys = None
+
+        if o_coord_sys != new_coord_sys:
+            
+            # Assign the transformation matrix and retrieve the sensor data
+            t_matrix = copy.deepcopy(adcp.t_matrix.matrix)
+            t_matrix_freq = copy.deepcopy(adcp.frequency_khz)
+
+            p = getattr(sensors.pitch_deg, sensors.pitch_deg.selected).data
+            r = getattr(sensors.roll_deg, sensors.roll_deg.selected).data
+            h = getattr(sensors.heading_deg, sensors.heading_deg.selected).data
+            
+            # Modify the transformation matrix and heading, pitch
+            # and roll values based on the original coordinate
+            # system so that only the needed values ar used in
+            # computing the new coordinate system.
+            if o_coord_sys.strip() == 'Beam':
+                orig_sys = 1
+            elif o_coord_sys.strip() == 'Inst':
+                orig_sys = 2
+            elif o_coord_sys.strip() == 'Ship':
+                orig_sys = 3
+                p = np.zeros(h.shape)
+                r = np.zeros(h.shape)
+                t_matrix = np.eye(len(t_matrix))
+            elif o_coord_sys.strip() == 'Earth':
+                orig_sys = 4
+
+            # Assign a value to the new coordinate system
+            if new_coord_sys.strip() == 'Beam':
+                new_sys = 1
+            elif new_coord_sys.strip() == 'Inst':
+                new_sys = 2
+            elif new_coord_sys.strip() == 'Ship':
+                new_sys = 3
+            elif new_coord_sys.strip() == 'Earth':
+                new_sys = 4
+                
+            # Check to ensure the new coordinate system is a higher order than the original system
+            if new_sys - orig_sys > 0:
+                
+                # Compute trig function for heaing, pitch and roll
+                ch = np.cos(np.deg2rad(h))
+                sh = np.sin(np.deg2rad(h))
+                cp = np.cos(np.deg2rad(p))
+                sp = np.sin(np.deg2rad(p))
+                cr = np.cos(np.deg2rad(r))
+                sr = np.sin(np.deg2rad(r))
+
+                n_ens = self.raw_vel_mps.shape[2]
+                
+                for ii in range(n_ens):
+                    
+                    # Compute matrix for heading, pitch, and roll
+                    hpr_matrix = np.array([[((ch[ii] * cr[ii]) + (sh[ii]*sp[ii] * sr[ii])),
+                                            (sh[ii] * cp[ii]),
+                                            ((ch[ii] * sr[ii]) - sh[ii]*sp[ii] * cr[ii])],
+                                           [(-1 * sh[ii] * cr[ii]) + (ch[ii] * sp[ii] * sr[ii]),
+                                            ch[ii] * cp[ii],
+                                            (-1 * sh[ii] * sr[ii])-(ch[ii] * sp[ii] * cr[ii])],
+                                           [(-1.*cp[ii] * sr[ii]),
+                                            sp[ii],
+                                            cp[ii] * cr[ii]]])
+                    
+                    # Transform beam coordinates
+                    if o_coord_sys == 'Beam':
+                        
+                        # Determine frequency index for transformation
+                        if len(t_matrix.shape) > 2:
+                            idx_freq = np.where(t_matrix_freq == self.frequency[ii])
+                            t_mult = np.copy(t_matrix[:, :, idx_freq])
+                        else:
+                            t_mult = np.copy(t_matrix)
+                            
+                        # Get velocity data
+                        vel_beams = np.copy(self.raw_vel_mps[:, :, ii])
+                        
+                        # Apply transformation matrix for 4 beam solutions
+                        temp_t = t_mult.dot(vel_beams)
+                        
+                        # Apply hpr_matrix
+                        temp_thpr = hpr_matrix.dot(temp_t[:3])
+                        temp_thpr = np.vstack([temp_thpr, temp_t[3]])
+                        
+                        # Check for invalid beams
+                        invalid_idx = np.isnan(vel_beams)
+                        
+                        # Identify rows requiring 3 beam solutions
+                        n_invalid_col = np.sum(invalid_idx, axis=0)
+                        col_idx = np.where(n_invalid_col == 1)[0]
+                        
+                        # Compute 3 beam solution, if necessary
+                        if len(col_idx) > 0:
+                            for i3 in range(len(col_idx)):
+                                
+                                # Id invalid beam
+                                vel_3_beam = vel_beams[:, col_idx[i3]]
+                                idx_3_beam = np.where(np.isnan(vel_3_beam))[0]
+                        
+                                # 3 beam solution for non-RiverRay
+                                vel_3_beam_zero = vel_3_beam
+                                vel_3_beam_zero[np.isnan(vel_3_beam)] = 0
+                                vel_error = t_mult[3, :].dot(vel_3_beam_zero)
+                                vel_3_beam[idx_3_beam] = -1 * vel_error / t_mult[3, idx_3_beam]
+                                temp_t = t_mult.dot(vel_3_beam)
+                                
+                                # Apply transformation matrix for 3
+                                # beam solutions
+                                temp_thpr[0:3, col_idx[i3]] = hpr_matrix.dot(temp_t[:3])
+                                temp_thpr[3, col_idx[i3]] = np.nan
+                            
+                    else:
+                        # Get velocity data
+                        vel_raw = np.copy(np.squeeze(self.raw_vel_mps[:, :, ii]))
+                        temp_thpr = np.array(hpr_matrix).dot(vel_raw[:3, :])
+                        temp_thpr = np.vstack([temp_thpr, vel_raw[3, :]])
+                        
+                    # Update object
+                    temp_thpr = temp_thpr.T
+                    self.u_mps[:, ii] = temp_thpr[:, 0]
+                    self.v_mps[:, ii] = temp_thpr[:, 1]
+                    self.w_mps[:, ii] = temp_thpr[:, 2]
+                    self.d_mps[:, ii] = temp_thpr[:, 3]
+
+                # Because of padded arrays with zeros and RR has a variable number of bins,
+                # the raw data may be padded with zeros.  The next 4 statements changes
+                # those to nan
+                self.u_mps[self.u_mps == 0] = np.nan
+                self.v_mps[self.v_mps == 0] = np.nan
+                self.w_mps[self.w_mps == 0] = np.nan
+                self.d_mps[self.d_mps == 0] = np.nan
+                
+                # Assign processed object properties
+                self.u_processed_mps = np.copy(self.u_mps)
+                self.v_processed_mps = np.copy(self.v_mps)
+                
+                # Assign coordinate system and reference properties
+                self.coord_sys = new_coord_sys
+                self.nav_ref = self.orig_nav_ref
+                    
+            else:
+                
+                # Reset velocity properties to raw values
+                self.u_mps = np.copy(self.raw_vel_mps[0])
+                self.v_mps = np.copy(self.raw_vel_mps[1])
+                self.w_mps = np.copy(self.raw_vel_mps[2])
+                self.d_mps = np.copy(self.raw_vel_mps[3])
+                
+                if adcp.manufacturer == 'TRDI':
+                    self.u_mps[self.u_mps == 0] = np.nan
+                    self.v_mps[self.v_mps == 0] = np.nan
+                    self.w_mps[self.w_mps == 0] = np.nan
+                    self.d_mps[self.d_mps == 0] = np.nan
+                    
+                # Assign processed properties
+                self.u_processed_mps = np.copy(self.u_mps)
+                self.v_processed_mps = np.copy(self.v_mps)
+                
+        else:
+            
+            # Reset velocity properties to raw values
+            self.u_mps = np.copy(self.raw_vel_mps[0])
+            self.v_mps = np.copy(self.raw_vel_mps[1])
+            self.w_mps = np.copy(self.raw_vel_mps[2])
+            self.d_mps = np.copy(self.raw_vel_mps[3])
+            
+            if adcp.manufacturer == 'TRDI':
+                self.u_mps[self.u_mps == 0] = np.nan
+                self.v_mps[self.v_mps == 0] = np.nan
+                self.w_mps[self.w_mps == 0] = np.nan
+                self.d_mps[self.d_mps == 0] = np.nan
+                
+            # Assign processed properties
+            self.u_processed_mps = np.copy(self.u_mps)
+            self.v_processed_mps = np.copy(self.v_mps)
+            
+        if new_coord_sys == 'Earth':
+            self.u_earth_no_ref_mps = np.copy(self.u_mps)
+            self.v_earth_no_ref_mps = np.copy(self.v_mps)
+                
+    def set_nav_reference(self, boat_vel):           
+        """This function sets the navigation reference.
+
+        The current reference is first removed from the velocity and then the
+        selected reference is applied.
+
+        Parameters
+        ----------
+        boat_vel: BoatStructure
+            Object of BoatStructure
+        """
+        
+        # Apply selected navigation reference
+        boat_select = getattr(boat_vel, boat_vel.selected)
+        if boat_select is not None:
+            self.u_mps = np.add(self.u_earth_no_ref_mps, boat_select.u_processed_mps)
+            self.v_mps = np.add(self.v_earth_no_ref_mps, boat_select.v_processed_mps)
+            self.nav_ref = boat_select.nav_ref
+        else:
+            self.u_mps = repmat([np.nan],
+                                self.u_earth_no_ref_mps.shape[0],
+                                self.u_earth_no_ref_mps.shape[1])
+            self.v_mps = repmat([np.nan],
+                                self.v_earth_no_ref_mps.shape[0],
+                                self.v_earth_no_ref_mps.shape[1])
+            if boat_vel.selected == 'bt_vel':
+                self.nav_ref = 'BT'
+            elif boat_vel.selected == 'gga_vel':
+                self.nav_ref = 'GGA'
+            elif boat_vel.selected == 'vtg_vel':
+                self.nav_ref = 'VTG'
+        
+        valid_data2 = np.copy(self.cells_above_sl)
+        valid_data2[np.isnan(self.u_mps)] = False
+        self.valid_data[1] = valid_data2
+        
+        # Duplicate original to other filters that have yet to be applied
+        self.valid_data[2:] = np.tile(self.valid_data[1], [7, 1, 1])
+        
+        # Combine all filter data and update processed properties
+        self.all_valid_data()
+        
+    def change_heading(self, boat_vel, heading_chng):
+        """Adjusts the velocity vectors for a change in heading due change in
+        magnetic variation or heading offset.
+
+        Parameters
+        ----------
+        boat_vel: BoatData
+            Object of BoatData
+        heading_chng: float
+            Heading change due to change in magvar or offset, in degrees.
+        """
+        u_nr = self.u_earth_no_ref_mps
+        v_nr = self.v_earth_no_ref_mps
+        direction, mag = cart2pol(u_nr, v_nr)
+        u_nr_rotated, v_nr_rotated = pol2cart(direction - np.deg2rad(heading_chng), mag)
+        self.u_earth_no_ref_mps = u_nr_rotated
+        self.v_earth_no_ref_mps = v_nr_rotated
+
+        # Reprocess water data to get navigation reference corrected velocities
+        self.set_nav_reference(boat_vel)
+        
+    def change_heading_source(self, boat_vel, heading):
+        """Applies changes to water velocity when the heading source is changed.
+
+        Typically called when the heading source is changed between external and internal.
+
+        Parameters
+        ----------
+        boat_vel: BoatData
+            Object of BoatData
+        heading: np.array(float)
+            New heading data, in degrees
+        """
+        u_nr = self.u_earth_no_ref_mps
+        v_nr = self.v_earth_no_ref_mps
+        direction, mag = cart2pol(u_nr, v_nr)
+        u_nr_rotated, v_nr_rotated = pol2cart(direction
+                                              - np.deg2rad(repmat(heading, len(mag), 1)), mag)
+        self.u_earth_no_ref_mps = u_nr_rotated
+        self.v_earth_no_ref_mps = v_nr_rotated
+
+        self.set_nav_reference(boat_vel)
+            
+    def apply_interpolation(self, transect, ens_interp='None', cells_interp='None'):
+        """Coordinates the application of water velocity interpolation.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        ens_interp: str
+            Specifies type of interpolation for ensembles
+        cells_interp: str
+            Specifies type of interpolation for cells
+        """
+
+        self.u_processed_mps = np.tile([np.nan], self.u_mps.shape)
+        self.v_processed_mps = np.tile([np.nan], self.v_mps.shape)
+        self.u_processed_mps[self.valid_data[0]] = self.u_mps[self.valid_data[0]]
+        self.v_processed_mps[self.valid_data[0]] = self.v_mps[self.valid_data[0]]
+        
+        # Determine interpolation methods to apply
+        if ens_interp == 'None':
+            ens_interp = self.interpolate_ens
+        else:
+            self.interpolate_ens = ens_interp
+
+        if cells_interp == 'None':
+            cells_interp = self.interpolate_cells
+        else:
+            self.interpolate_cells = cells_interp
+
+        if ens_interp == 'abba' or cells_interp == 'abba':
+            self.interpolate_ens = 'abba'
+            self.interpolate_cells = 'abba'
+            self.interpolate_abba(transect)
+        else:
+            if ens_interp == 'None':
+                # Sets invalid data to nan with no interpolation
+                self.interpolate_ens_none()
+            elif ens_interp == 'ExpandedT':
+                # Sets interpolate to None as the interpolation is done in class QComp
+                self.interpolate_ens_next()
+            elif ens_interp == 'Hold9':
+                # Interpolates using SonTek's method of holding last valid for up to 9 samples
+                self.interpolate_ens_hold_last_9()
+            elif ens_interp == 'Hold':
+                # Interpolates by holding last valid indefinitely
+                self.interpolate_ens_hold_last()
+            elif ens_interp == 'Linear':
+                # Interpolates using linear interpolation
+                self.interpolate_ens_linear(transect)
+            elif ens_interp == 'TRDI':
+                # TRDI is applied in discharge
+                self.interpolate_ens_none()
+                self.interpolate_ens = ens_interp
+
+            # Apply specified cell interpolation method
+            if cells_interp == 'None':
+                # Sets invalid data to nan with no interpolation
+                self.interpolate_cells_none()
+            elif cells_interp == 'TRDI':
+                # Use TRDI method to interpolate invalid interior cells
+                self.interpolate_cells_trdi(transect)
+            elif cells_interp == 'Linear':
+                # Uses linear interpolation to interpolate velocity for all
+                # invalid bins including those in invalid ensembles
+                # up to 9 samples
+                self.interpolate_cells_linear(transect)
+        
+    def apply_filter(self, transect, beam=None, difference=None, difference_threshold=None, vertical=None,
+                     vertical_threshold=None, other=None, excluded=None, snr=None, wt_depth=None):
+        """Coordinates application of specified filters and subsequent interpolation.
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        beam: int
+            Setting for beam filter (3, 4, or -1)
+        difference: str
+            Setting for difference filter (Auto, Off, Manual)
+        difference_threshold: float
+            Threshold value for Manual setting.
+        vertical: str
+            Setting for vertical filter (Auto, Off, Manual)
+        vertical_threshold: float
+            Threshold value for Manual setting.
+        other:
+            Setting for other filters (Off, Auto)
+        excluded:
+            Excluded distance below the transducer, in m
+        snr: str
+            SNR filter setting (Auto, Off)
+        wt_depth: bool
+            Setting for marking water data invalid if no available depth
+        """
+
+        # Determine filters to apply
+        if len({beam, difference, difference_threshold, vertical, vertical_threshold, other, excluded, snr,
+                wt_depth}) > 1:
+
+            if difference is not None:
+                if difference == 'Manual':
+                    self.filter_diff_vel(setting=difference, threshold=difference_threshold)
+                else:
+                    self.filter_diff_vel(setting=difference)
+            if vertical is not None:
+                if vertical == 'Manual':
+                    self.filter_vert_vel(setting=vertical, threshold=vertical_threshold)
+                else:
+                    self.filter_vert_vel(setting=vertical)
+            if other is not None:
+                self.filter_smooth(transect=transect, setting=other)
+            if excluded is not None:
+                self.filter_excluded(transect=transect, setting=excluded)
+            if snr is not None:
+                self.filter_snr(setting=snr)
+            if wt_depth is not None:
+                self.filter_wt_depth(transect=transect, setting=wt_depth)
+            if beam is not None:
+                self.filter_beam(setting=beam, transect=transect)
+        else:
+            self.filter_diff_vel(setting=self.d_filter, threshold=self.d_filter_thresholds)
+            self.filter_vert_vel(setting=self.w_filter, threshold=self.w_filter_thresholds)
+            self.filter_smooth(transect=transect, setting=self.smooth_filter)
+            self.filter_excluded(transect=transect, setting=self.excluded_dist_m)
+            self.filter_snr(setting=self.snr_filter)
+            self.filter_beam(setting=self.beam_filter, transect=transect)
+
+        # After filters have been applied, interpolate to estimate values for invalid data.
+        # self.apply_interpolation(transect=transect)
+        
+    def sos_correction(self, ratio):
+        """Corrects water velocities for a change in speed of sound.
+
+        Parameters
+        ----------
+        ratio: float
+            Ratio of new speed of sound to old speed of sound
+        """
+
+        # Correct water velocities
+        self.u_mps = self.u_mps * ratio
+        self.v_mps = self.v_mps * ratio
+        self.w_mps = self.w_mps * ratio
+        self.u_earth_no_ref_mps = self.u_earth_no_ref_mps * ratio
+        self.v_earth_no_ref_mps = self.v_earth_no_ref_mps * ratio
+
+    def adjust_side_lobe(self, transect):
+        """Adjust the side lobe cutoff for vertical beam and interpolated depths.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        """
+
+        selected = transect.depths.selected
+        depth_selected = getattr(transect.depths, transect.depths.selected)
+        cells_above_slbt = np.copy(self.cells_above_sl_bt)
+        
+        # Compute cutoff for vertical beam depths
+        if selected == 'vb_depths':
+            sl_cutoff_vb = (depth_selected.depth_processed_m - depth_selected.draft_use_m) \
+                 * np.cos(np.deg2rad(transect.adcp.beam_angle_deg)) \
+                - self.sl_lag_effect_m + depth_selected.draft_use_m
+            cells_above_slvb = np.round(depth_selected.depth_cell_depth_m, 2) < np.round(sl_cutoff_vb, 2)
+            idx = np.where(transect.depths.bt_depths.valid_data == False)
+            cells_above_slbt[:, idx] = cells_above_slvb[:, idx]
+            cells_above_sl = np.logical_and(cells_above_slbt, cells_above_slvb)
+        else:
+            cells_above_sl = cells_above_slbt
+
+        # Compute cutoff from interpolated depths
+        n_valid_beams = np.nansum(depth_selected.valid_beams, 0)
+
+        # Find ensembles with no valid beam depths
+        idx = np.where(n_valid_beams == 0)[0]
+
+        # Determine side lobe cutoff for ensembles with no valid beam depths
+        if len(idx) > 0:
+            if len(self.sl_lag_effect_m) > 1:
+                sl_lag_effect_m = self.sl_lag_effect_m[idx]
+            else:
+                sl_lag_effect_m = self.sl_lag_effect_m
+                
+            sl_cutoff_int = (depth_selected.depth_processed_m[idx] - depth_selected.draft_use_m) \
+                * np.cos(np.deg2rad(transect.adcp.beam_angle_deg)) - sl_lag_effect_m + \
+                depth_selected.draft_use_m
+            for i in range(len(idx)):
+                cells_above_sl[:, idx[i]] = np.less(depth_selected.depth_cell_depth_m[:, idx[i]], sl_cutoff_int[i])
+            
+        # Find ensembles with at least 1 invalid beam depth
+
+        idx = np.where(np.logical_and(n_valid_beams < 4, n_valid_beams > 0))[0]
+        if len(idx) > 0:
+            if len(self.sl_lag_effect_m) > 1:
+                sl_lag_effect_m = self.sl_lag_effect_m[idx]
+            else:
+                sl_lag_effect_m = self.sl_lag_effect_m
+                
+            sl_cutoff_int = (depth_selected.depth_processed_m[idx] - depth_selected.draft_use_m)\
+                * np.cos(np.deg2rad(transect.adcp.beam_angle_deg)) \
+                - sl_lag_effect_m + depth_selected.draft_use_m
+            cells_above_sl_int = np.tile(True, cells_above_sl.shape)
+
+            for i in range(len(idx)):
+                cells_above_sl_int[:, idx[i]] = np.less(depth_selected.depth_cell_depth_m[:, idx[i]], sl_cutoff_int[i])
+            
+            cells_above_sl[cells_above_sl_int == 0] = 0
+        
+        self.cells_above_sl = np.copy(cells_above_sl)
+        valid_vel = np.logical_not(np.isnan(self.u_mps))
+        self.valid_data[1, :, :] = self.cells_above_sl * valid_vel
+        self.all_valid_data()
+        self.compute_snr_rng()
+        self.apply_filter(transect)
+        # self.apply_interpolation(transect)
+
+    def all_valid_data(self):
+        """Combines the results of all filters to determine a final set of valid data"""
+
+        n_filters = len(self.valid_data[1:, 0, 0])
+        sum_filters = np.nansum(self.valid_data[1:, :, :], 0) / n_filters
+        valid = np.tile([True], self.cells_above_sl.shape)
+        valid[sum_filters < 1] = False
+        self.valid_data[0] = valid
+        
+    def filter_beam(self, setting, transect=None):
+        """Applies beam filter to water velocity data.
+
+        The determination of invalid data depends on whether
+        3-beam or 4-beam solutions are acceptable.  This function can be applied by
+        specifying 3 or 4 beam solutions and setting self.beam_filter to -1
+        which will trigger an automatic mode.  The automatic mode will find all 3 beam
+        solutions and them compare the velocity of the 3 beam solutions to nearest 4
+        beam solutions.  If the 3 beam solution is within 50% of the average of the
+        neighboring 4 beam solutions the data are deemed valid, if not they are marked
+        invalid.  Thus in automatic mode only those data from 3 beam solutions
+        that are sufficiently different from  the 4 beam solutions are marked invalid.
+        If the number of beams is specified manually, it is applied
+        uniformly for the whole transect.
+
+        Parameters
+        ----------
+        setting: int
+            Setting for beam filter (3, 4, or -1)
+        transect: TransectData
+            Object of TransectData
+        """
+        
+        self.beam_filter = setting
+        
+        # In manual mode (3 or 4) determine number of raw invalid and number of 2 beam solutions
+        if self.beam_filter > 0:
+            
+            # Find invalid raw data
+            valid_vel = np.array([self.cells_above_sl] * 4)
+            valid_vel[np.isnan(self.raw_vel_mps)] = 0
+            
+            # Determine how many beams or transformed coordinates are valid
+            valid_vel_sum = np.sum(valid_vel, 0)
+            valid = copy.deepcopy(self.cells_above_sl)
+            
+            # Compare number of valid beams or velocity coordinates to filter value
+            valid[np.logical_and((valid_vel_sum < self.beam_filter), (valid_vel_sum > 2))] = False
+            
+            # Save logical of valid data to object
+            self.valid_data[5, :, :] = valid
+
+            # Combine all filter data and update processed properties
+            self.all_valid_data()
+
+        else:
+
+            # Apply automatic filter
+            self.automatic_beam_filter_abba_interpolation(transect)
+
+    def automatic_beam_filter_abba_interpolation(self, transect):
+        """Applies abba interpolation to allow comparison of interpolated and 3-beam solutions.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        """
+
+        # Create array indicating which cells do not have 4-beam solutions and all cells below side lobe are nan
+        temp = copy.deepcopy(self)
+        temp.filter_beam(4)
+        valid_bool = temp.valid_data[5, :, :]
+        valid = valid_bool.astype(float)
+        valid[temp.cells_above_sl == False] = np.nan
+
+        # Initialize processed velocity data variables
+        temp.u_processed_mps = copy.deepcopy(temp.u_mps)
+        temp.v_processed_mps = copy.deepcopy(temp.v_mps)
+
+        # Set invalid data to nan in processed velocity data variables
+        temp.u_processed_mps[np.logical_not(valid)] = np.nan
+        temp.v_processed_mps[np.logical_not(valid)] = np.nan
+
+        # Find indices of cells with 3 beams solutions
+        rows_3b, cols_3b = np.where(np.abs(valid) == 0)
+
+        # Check for presence of 3-beam solutions
+        if len(rows_3b) > 0:
+            # Initialize velocity data variables
+            u = copy.deepcopy(self.u_mps)
+            v = copy.deepcopy(self.v_mps)
+
+            u = u[:, transect.in_transect_idx]
+            v = v[:, transect.in_transect_idx]
+
+            u[np.logical_not(temp.valid_data[5, :, :])] = np.nan
+            v[np.logical_not(temp.valid_data[5, :, :])] = np.nan
+            interpolated_data = self.compute_abba_interpolation(wt_data=temp,
+                                                                data_list=[u, v],
+                                                                valid=temp.valid_data[5, :, :],
+                                                                transect=transect)
+
+            if interpolated_data is not None:
+                # Compute interpolated to measured ratios and apply filter criteria
+                for n in range(len(interpolated_data[0])):
+                    u_ratio = (temp.u_mps[interpolated_data[0][n][0]] / interpolated_data[0][n][1]) - 1
+                    v_ratio = (temp.v_mps[interpolated_data[1][n][0]] / interpolated_data[1][n][1]) - 1
+                    if np.abs(u_ratio) < 0.5 and np.abs(v_ratio) < 0.5:
+                        valid_bool[interpolated_data[0][n][0]] = True
+                    else:
+                        valid_bool[interpolated_data[0][n][0]] = False
+                    # n += 1
+
+                # Update object with filter results
+                self.valid_data[5, :, :] = valid_bool
+            else:
+                self.valid_data[5, :, :] = temp.valid_data[5, :, :]
+        else:
+            self.valid_data[5, :, :] = temp.valid_data[5, :, :]
+
+        # Combine all filter data and update processed properties
+        self.all_valid_data()
+
+    def filter_diff_vel(self, setting, threshold=None):
+        """Applies filter to difference velocity.
+
+        Applies either manual or automatic filtering of the difference (error)
+        velocity.  The automatic mode is based on the following:  This filter is
+        based on the assumption that the water error velocity should follow a gaussian
+        distribution.  Therefore, 5 standard deviations should encompass all of the
+        valid data.  The standard deviation and limits (multiplier*std dev) are computed
+        in an iterative process until filtering out additional data does not change the
+        computed standard deviation.
+
+        Parameters
+        ----------
+        setting: str
+            Filter setting (Auto, Off, Manual)
+        threshold: float
+            Threshold value for Manual setting.
+        """
+
+        # Set difference filter properties
+        self.d_filter = setting
+        if threshold is not None:
+            self.d_filter_thresholds = threshold
+
+        # Get difference data from object
+        d_vel = copy.deepcopy(self.d_mps)
+
+        # NOTE: Versions prior to 1.01 did not apply this step to remove data below the side lobe cutoff
+        d_vel[np.logical_not(self.cells_above_sl)] = np.nan
+
+        d_vel_min_ref = None
+        d_vel_max_ref = None
+
+        bad_idx_rows = np.array([]).astype(int)
+        bad_idx_cols = np.array([]).astype(int)
+
+        # Apply selected method
+        if self.d_filter == 'Manual':
+            d_vel_max_ref = np.abs(self.d_filter_thresholds)
+            d_vel_min_ref = -1 * d_vel_max_ref
+            # Set valid data row 2 for difference velocity filter results
+            bad_idx_rows, bad_idx_cols = np.where(np.logical_or(nan_greater(d_vel, d_vel_max_ref),
+                                                                nan_less(d_vel, d_vel_min_ref)))
+        elif self.d_filter == 'Off':
+            d_vel_max_ref = np.nanmax(np.nanmax(d_vel)) + 1
+            d_vel_min_ref = np.nanmin(np.nanmin(d_vel)) - 1
+            # Set valid data row 2 for difference velocity filter results
+            bad_idx_rows, bad_idx_cols = np.where(np.logical_or(nan_greater(d_vel, d_vel_max_ref),
+                                                                nan_less(d_vel, d_vel_min_ref)))
+            self.d_filter_thresholds = d_vel_max_ref
+
+        elif self.d_filter == 'Auto':
+            # Apply threshold from entire measurement processing to each transect
+            if self.use_measurement_thresholds:
+                self.d_filter_thresholds = self.d_meas_thresholds
+                for p_type in self.d_meas_thresholds.keys():
+                    data_max_ref = self.d_meas_thresholds[p_type][0]
+                    data_min_ref = self.d_meas_thresholds[p_type][1]
+                    data = np.copy(self.d_mps)
+                    data[self.ping_type!=p_type] = np.nan
+                    idx_invalid_rows, idx_invalid_cols = np.where(np.logical_or(np.greater(data, data_max_ref),
+                                                                                np.less(data, data_min_ref)))
+                    if len(idx_invalid_rows) > 0:
+                        if len(bad_idx_rows) > 0:
+                            bad_idx_rows = np.hstack((bad_idx_rows, idx_invalid_rows))
+                            bad_idx_cols = np.hstack((bad_idx_cols, idx_invalid_cols))
+                        else:
+                            bad_idx_rows = idx_invalid_rows
+                            bad_idx_cols = idx_invalid_cols
+
+            # Compute unique threshold for each transect using ping types
+            elif self.ping_type.size > 1:
+
+                # Identify the ping types used in the transect
+                p_types = np.unique(self.ping_type)
+
+                thresholds = {}
+
+                # Apply the filter to each ping type
+                for p_type in p_types:
+                    # Copy of difference velocities
+                    vel_2_filter = copy.deepcopy(d_vel)
+                    # Remove data not associated with the specified ping type
+                    vel_2_filter[self.ping_type!=p_type] = np.nan
+                    # Apply filter to data of a single ping type
+                    idx_invalid_rows, idx_invalid_cols, threshold = self.iqr_filter(vel_2_filter)
+                    # Combine indices of invalid data for all ping types
+                    if len(idx_invalid_rows) > 0:
+                        if len(bad_idx_rows) > 0:
+                            bad_idx_rows = np.hstack((bad_idx_rows, idx_invalid_rows))
+                            bad_idx_cols = np.hstack((bad_idx_cols, idx_invalid_cols))
+                        else:
+                            bad_idx_rows = idx_invalid_rows
+                            bad_idx_cols = idx_invalid_cols
+                    thresholds[p_type] = threshold
+                self.d_filter_thresholds = thresholds
+
+            # Compute unique threshold for each transect when no ping types are available
+            else:
+                self.ping_type = np.array(['U'])
+                bad_idx_rows, bad_idx_cols, threshold = self.iqr_filter(d_vel)
+                self.d_filter_thresholds = {'U': threshold}
+
+        valid = copy.deepcopy(self.cells_above_sl)
+        if len(bad_idx_rows) > 0:
+            valid[bad_idx_rows, bad_idx_cols] = False
+        # TODO Seems like if the difference velocity doesn't exist due to a 3-beam solution it shouldn't be
+        #  flagged as invalid however this is the way it was in Matlab. May change this in future.
+        # valid[np.isnan(self.d_mps)] = True
+        self.valid_data[2, :, :] = valid
+
+        # Combine all filter data and update processed properties
+        self.all_valid_data()
+
+    @staticmethod
+    def iqr_filter(data, multiplier=5):
+        """Apply the iqr filter to wt data.
+
+        Parameters
+        ----------
+        data: np.ndarray(float)
+            Array of difference or vertical velocity data
+        multiplier: int
+            Number of IQR's to use to set the threshold
+
+        Returns
+        -------
+        bad_idx_rows: np.ndarray(int)
+            Row indices of invalid data
+        bad_idx_cols: np.ndarray(int)
+            Column indices of invalid data
+        threshold: float
+            Maximum threshold
+        """
+
+        data_max_ref = np.nan
+        data_min_ref = np.nan
+
+        # Check to make sure there are data to process
+        if data.size > 0 and np.any(np.logical_not(np.isnan(data))):
+
+            # Initialize variables
+            data_orig = np.copy(data)
+
+            iqr_diff = 1
+            i = -1
+            # Loop until no additional data are removed
+            while iqr_diff != 0 and i < 1000:
+                i = i + 1
+
+                # Compute standard deviation
+                data_iqr = iqr(data)
+
+                # Compute maximum and minimum thresholds
+                data_max_ref = np.nanmedian(data) + multiplier * data_iqr
+                data_min_ref = np.nanmedian(data) - multiplier * data_iqr
+
+                # Identify valid and invalid data
+                data_bad_rows, data_bad_cols = np.where(np.logical_or(nan_greater(data, data_max_ref),
+                                                                      nan_less(data, data_min_ref)))
+                # Update filtered data array
+                data[data_bad_rows, data_bad_cols] = np.nan
+
+                # Determine differences due to last filter iteration
+                if len(data) > 0:
+                    data_iqr2 = iqr(data)
+                    iqr_diff = data_iqr2 - data_iqr
+                else:
+                    iqr_diff = 0
+
+            # Determine row and column index of invalid cells with invalid data
+            bad_idx_rows, bad_idx_cols = np.where(np.logical_or(nan_greater(data_orig, data_max_ref),
+                                                                nan_less(data_orig, data_min_ref)))
+        else:
+            # All data are invalid
+            # Determine row and column index of invalid cells with invalid data
+            bad_idx_rows, bad_idx_cols = np.where(np.logical_or(np.greater(data, -1),
+                                                                np.less(data, 1)))
+
+        threshold = [data_max_ref, data_min_ref]
+
+        return bad_idx_rows, bad_idx_cols, threshold
+
+    @staticmethod
+    def meas_iqr_filter(data, multiplier=5):
+        """Apply the iqr filter to wt data.
+
+        Parameters
+        ----------
+        data: np.ndarray(float)
+            Array of difference or vertical velocity data
+        multiplier: int
+            Number of IQR's to use to set the threshold
+
+        Returns
+        -------
+        thresholds: tuple
+            Maximum and minimum thresholds for filter
+        """
+
+        # Initialize variables
+        data_max_ref = np.nan
+        data_min_ref = np.nan
+
+        # Check to make sure there are data to process
+        if data.size > 0 and np.any(np.logical_not(np.isnan(data))):
+            iqr_diff = 1
+            i = -1
+            # Loop until no additional data are removed
+            while iqr_diff != 0 and i < 1000:
+                i = i + 1
+
+                # Compute standard deviation
+                data_iqr = iqr(data)
+
+                # Compute maximum and minimum thresholds
+                data_max_ref = np.nanmedian(data) + multiplier * data_iqr
+                data_min_ref = np.nanmedian(data) - multiplier * data_iqr
+
+                # Identify valid and invalid data
+                bad_idx = np.where(np.logical_or(nan_greater(data, data_max_ref),
+                                                                      nan_less(data, data_min_ref)))
+                # Update filtered data array
+                data[bad_idx] = np.nan
+
+                # Determine differences due to last filter iteration
+                if len(data) > 0:
+                    data_iqr2 = iqr(data)
+                    iqr_diff = data_iqr2 - data_iqr
+                else:
+                    iqr_diff = 0
+
+        thresholds = [data_max_ref, data_min_ref]
+
+        return thresholds
+
+    def filter_vert_vel(self, setting, threshold=None):
+        """Applies filter to vertical velocity.
+
+        Applies either manual or automatic filter of the difference (error) velocity.  The automatic
+        mode is based on the following: This filter is based on the assumption that the water error
+        velocity should follow a gaussian distribution.  Therefore, 4 standard deviations should
+        encompass all of the valid data.  The standard deviation and limits (multplier * standard deviation)
+        are computed in an iterative process until filtering out additional data does not change
+        the computed standard deviation.
+
+        Parameters
+        ---------
+        setting: str
+            Filter setting (Auto, Off, Manual)
+        threshold: float
+            Threshold value for Manual setting."""
+        
+        # Set vertical velocity filter properties
+        self.w_filter = setting
+        if threshold is not None:
+            self.w_filter_thresholds = threshold
+
+        # Get difference data from object
+        w_vel = copy.deepcopy(self.w_mps)
+
+        # NOTE: Versions prior to 1.01 did not apply this step to remove data below the side lobe cutoff
+        w_vel[np.logical_not(self.cells_above_sl)] = np.nan
+
+        w_vel_min_ref = None
+        w_vel_max_ref = None
+
+        bad_idx_rows = np.array([]).astype(int)
+        bad_idx_cols = np.array([]).astype(int)
+
+        # Apply selected method
+        if self.w_filter == 'Manual':
+            w_vel_max_ref = np.abs(self.w_filter_thresholds)
+            w_vel_min_ref = -1 * w_vel_max_ref
+            # Identify valid and invalid data
+            bad_idx_rows, bad_idx_cols = np.where(np.logical_or(nan_greater(w_vel, w_vel_max_ref),
+                                                                    nan_less(w_vel, w_vel_min_ref)))
+        elif self.w_filter == 'Off':
+            w_vel_max_ref = np.nanmax(np.nanmax(w_vel)) + 1
+            w_vel_min_ref = np.nanmin(np.nanmin(w_vel)) - 1
+            # Identify valid and invalid data
+            bad_idx_rows, bad_idx_cols = np.where(np.logical_or(nan_greater(w_vel, w_vel_max_ref),
+                                                                    nan_less(w_vel, w_vel_min_ref)))
+            self.w_filter_thresholds = w_vel_max_ref
+
+        elif self.w_filter == 'Auto':
+            # Apply threshold from entire measurement processing to each transect
+            if self.use_measurement_thresholds:
+                self.w_filter_thresholds = self.w_meas_thresholds
+                for p_type in self.w_meas_thresholds.keys():
+                    data_max_ref = self.w_meas_thresholds[p_type][0]
+                    data_min_ref = self.w_meas_thresholds[p_type][1]
+                    data = np.copy(self.w_mps)
+                    data[self.ping_type != p_type] = np.nan
+                    idx_invalid_rows, idx_invalid_cols = np.where(np.logical_or(np.greater(data, data_max_ref),
+                                                                                np.less(data, data_min_ref)))
+                    if len(idx_invalid_rows) > 0:
+                        if len(bad_idx_rows) > 0:
+                            bad_idx_rows = np.hstack((bad_idx_rows, idx_invalid_rows))
+                            bad_idx_cols = np.hstack((bad_idx_cols, idx_invalid_cols))
+                        else:
+                            bad_idx_rows = idx_invalid_rows
+                            bad_idx_cols = idx_invalid_cols
+
+            # Compute unique threshold for each transect using ping types
+            elif self.ping_type.size > 1:
+                # Initialize variables
+                # Identify the ping types used in the transect
+                p_types = np.unique(self.ping_type)
+
+                thresholds = {}
+
+                # Apply the filter to each ping type
+                for p_type in p_types:
+                    # Copy of vertical velocities
+                    vel_2_filter = copy.deepcopy(w_vel)
+                    # Remove data not associated with the specified ping type
+                    vel_2_filter[self.ping_type != p_type] = np.nan
+                    # Apply filter to data of a single ping type
+                    idx_invalid_rows, idx_invalid_cols, threshold = self.iqr_filter(vel_2_filter)
+                    # Combine indices of invalid data for all ping types
+                    if len(idx_invalid_rows) > 0:
+                        if len(bad_idx_rows) > 0:
+                            bad_idx_rows = np.hstack((bad_idx_rows, idx_invalid_rows))
+                            bad_idx_cols = np.hstack((bad_idx_cols, idx_invalid_cols))
+                        else:
+                            bad_idx_rows = idx_invalid_rows
+                            bad_idx_cols = idx_invalid_cols
+                    thresholds[p_type] = threshold
+                self.w_filter_thresholds = thresholds
+            # Compute unique threshold for each transect when no ping types are available
+            else:
+                self.ping_type = np.array(['U'])
+                bad_idx_rows, bad_idx_cols, threshold = self.iqr_filter(w_vel)
+                self.w_filter_thresholds = {'U': threshold}
+
+        valid = copy.deepcopy(self.cells_above_sl)
+        if len(bad_idx_rows) > 0:
+            valid[bad_idx_rows, bad_idx_cols] = False
+        self.valid_data[3, :, :] = valid
+
+        # Set threshold property
+        if np.ma.is_masked(w_vel_max_ref):
+            self.w_filter_thresholds = np.nan
+
+        # Combine all filter data and update processed properties
+        self.all_valid_data()
+                
+    def filter_smooth(self, transect, setting):
+        """Filter water speed using a smooth filter.
+
+        Running Standard Deviation filter for water speed
+        This filter employs a running trimmed standard deviation filter to
+        identify and mark spikes in the water speed. First a robust Loess
+        smooth is fitted to the water speed time series and residuals between
+        the raw data and the smoothed line are computed. The trimmed standard
+        eviation is computed by selecting the number of residuals specified by
+        "halfwidth" before the target point and after the target point, but not
+        including the target point. These values are then sorted, and the points
+        with the highest and lowest values are removed from the subset, and the
+        standard deviation of the trimmed subset is computed. The filter
+        criteria are determined by multiplying the standard deviation by a user
+        specified multiplier. This criteria defines a maximum and minimum
+        acceptable residual. Data falling outside the criteria are set to nan.
+          
+        Recommended filter settings are:
+        filter_width = 10
+        half_width = 10
+        multiplier = 9
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        setting: str
+            Set filter (Auto, Off)
+        """
+        
+        self.smooth_filter = setting
+        upper_limit = None
+        lower_limit = None
+        wt_bad_idx = None
+        
+        # Compute ens_time
+        ens_time = np.nancumsum(transect.date_time.ens_duration_sec)
+        
+        # Determine if smooth filter should be applied
+        if self.smooth_filter == 'Auto':
+            
+            # Boat velocity components
+            w_vele = self.u_mps
+            w_veln = self.v_mps
+            
+            # Set filter parameters
+            filter_width = 10
+            half_width = 10
+            multiplier = 9
+            cycles = 3
+
+            # Compute mean speed and direction of water
+            w_vele_avg = np.nanmean(w_vele, 0)
+            w_veln_avg = np.nanmean(w_veln, 0)
+            _, speed = cart2pol(w_vele_avg, w_veln_avg)
+            
+            # Compute residuals from a robust Loess smooth
+            speed_smooth = rloess(ens_time, speed, filter_width)
+            speed_res = speed - speed_smooth
+            
+            # Apply a trimmed standard deviation filter multiple times
+            for i in range(cycles):
+                fill_array = BoatData.run_std_trim(half_width, speed_res.T)
+                
+                # Compute filter bounds
+                upper_limit = speed_smooth + multiplier * fill_array
+                lower_limit = speed_smooth - multiplier * fill_array
+                
+                # Apply filter to residuals
+                wt_bad_idx = np.where((speed > upper_limit) or (speed < lower_limit))[0]
+                speed_res[wt_bad_idx] = np.nan
+            
+            valid = np.copy(self.cells_above_sl)
+            
+            valid[:, wt_bad_idx] = False
+            self.valid_data[4, :, :] = valid
+            self.smooth_upper_limit = upper_limit
+            self.smooth_lower_limit = lower_limit
+            self.smooth_speed = speed_smooth
+        
+        else:
+            # No filter applied
+            self.valid_data[4, :, :] = np.copy(self.cells_above_sl)
+            self.smooth_upper_limit = np.nan
+            self.smooth_lower_limit = np.nan
+            self.smooth_speed = np.nan
+            
+        self.all_valid_data()
+     
+    def filter_snr(self, setting):
+        """Filters SonTek data based on SNR.
+
+        Computes the average SNR for all cells above the side lobe cutoff for each beam in
+        each ensemble. If the range in average SNR in an ensemble is greater than 12 dB the
+        water velocity in that ensemble is considered invalid.
+
+        Parameters
+        ----------
+        setting: str
+            Setting for filter (Auto, Off)
+        """
+
+        self.snr_filter = setting  
+        
+        if setting == 'Auto':
+            if self.snr_rng is not None:
+                bad_snr_idx = np.greater(self.snr_rng, 12)
+                valid = np.copy(self.cells_above_sl)
+                
+                bad_snr_array = np.tile(bad_snr_idx, (valid.shape[0], 1))
+                valid[bad_snr_array] = False
+                self.valid_data[7, :, :] = valid
+
+                # Combine all filter data and update processed properties
+                self.all_valid_data()
+        else:
+            self.valid_data[7, :, :] = np.copy(self.cells_above_sl)
+            self.all_valid_data()
+        
+    def filter_wt_depth(self, transect, setting):
+        """Marks water velocity data invalid if there is no valid or interpolated average depth.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        setting: bool
+            Setting for filter (True, False)
+        """
+        self.wt_depth_filter = setting
+        valid = np.copy(self.cells_above_sl)
+        
+        if setting:
+            trans_select = getattr(transect.depths, transect.depths.selected)
+            valid[:, np.isnan(trans_select.depth_processed_m)] = False
+        self.valid_data[8, :, :] = valid
+        
+        self.all_valid_data()
+        
+    def filter_excluded(self, transect, setting):
+        """Marks all data invalid that are closer to the transducer than the setting.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        setting: float
+            Range from the transducer, in m
+        """
+
+        # Initialize variables
+        trans_select = getattr(transect.depths, transect.depths.selected)
+        cell_depth = trans_select.depth_cell_depth_m
+        cell_size = trans_select.depth_cell_size_m
+        draft = trans_select.draft_use_m
+        top_cell_depth = cell_depth - 0.5 * cell_size
+        threshold = np.round((setting+draft), 3)
+
+        # Apply filter
+        exclude = np.round(top_cell_depth, 3) <= threshold
+        valid = np.copy(self.cells_above_sl)
+        valid[exclude] = False
+        self.valid_data[6, :, :] = valid
+        
+        # Set threshold property
+        self.excluded_dist_m = setting
+        
+        self.all_valid_data()
+
+    def interpolate_abba(self, transect, search_loc=['above', 'below', 'before', 'after']):
+        """" Interpolates all data marked invalid using the abba interpolation algorithm.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        """
+
+        # Set properties
+        self.interpolate_cells = 'abba'
+        self.interpolate_ens = 'abba'
+
+        # Get valid data based on all filters applied
+        valid = self.valid_data[0, :, :]
+        valid = valid[:, transect.in_transect_idx]
+
+        # Initialize velocity data variables
+        u = copy.deepcopy(self.u_mps)
+        v = copy.deepcopy(self.v_mps)
+
+        u = u[:, transect.in_transect_idx]
+        v = v[:, transect.in_transect_idx]
+
+        # Set invalid data to nan in processed velocity data variables
+        u[np.logical_not(valid)] = np.nan
+        v[np.logical_not(valid)] = np.nan
+
+        interpolated_data = self.compute_abba_interpolation(wt_data=self,
+                                                            data_list=[u, v],
+                                                            valid=valid,
+                                                            transect=transect,
+                                                            search_loc=search_loc)
+
+        if interpolated_data is not None:
+            # Incorporate interpolated values
+            for n in range(len(interpolated_data[0])):
+                u[interpolated_data[0][n][0]] = interpolated_data[0][n][1]
+                v[interpolated_data[1][n][0]] = interpolated_data[1][n][1]
+
+        # Save interpolated data, while retaining of the ensembles including those that are not
+        # in the in_transect_idx array
+        self.u_processed_mps[:, :] = np.nan
+        self.v_processed_mps[:, :] = np.nan
+        self.u_processed_mps[:, transect.in_transect_idx] = u
+        self.v_processed_mps[:, transect.in_transect_idx] = v
+
+    @staticmethod
+    def compute_abba_interpolation(wt_data, data_list, valid, transect, search_loc=['above', 'below', 'before', 'after']):
+        """Computes the interpolated values for invalid cells using the abba method.
+
+        Parameters
+        ----------
+        wt_data: WaterData
+            Object of WaterData
+        data_list: list
+            List of np.array(float) data to used for interpolation
+        valid: np.ndarray(bool)
+            Array indicating valid to be used for interpolation
+        transect: TransectData
+            Object of TransectData
+
+        Returns
+        -------
+        interpolated_data: np.ndarray(float)
+            Array of interpolated data
+        """
+        # Find cells with invalid data
+        valid_cells = wt_data.valid_data[0, :, transect.in_transect_idx]
+        boat_selected = getattr(transect.boat_vel, transect.boat_vel.selected)
+        if boat_selected is not None:
+            boat_valid = boat_selected.valid_data[0, transect.in_transect_idx]
+        else:
+            boat_valid = 0
+
+        if not np.all(valid_cells) and np.nansum(boat_valid) > 1:
+            # Compute distance along shiptrack to be used in interpolation
+            distance_along_shiptrack = transect.boat_vel.compute_boat_track(transect)['distance_m']
+
+            # Where there is invalid boat speed at beginning or end of transect mark the distance nan to avoid
+            # interpolating velocities that won't be used for discharge
+            if type(distance_along_shiptrack) is np.ndarray:
+                distance_along_shiptrack[0:np.argmax(boat_valid == True)] = np.nan
+                end_nan = np.argmax(np.flip(boat_valid) == True)
+                if end_nan > 0:
+                    distance_along_shiptrack[-1 * end_nan:] = np.nan
+            # if type(distance_along_shiptrack) is np.ndarray:
+                depth_selected = getattr(transect.depths, transect.depths.selected)
+                cells_above_sl = wt_data.valid_data[6, :, :]
+                cells_above_sl = cells_above_sl[:, transect.in_transect_idx]
+
+                # Interpolate values for  invalid cells with from neighboring data
+                interpolated_data = abba_idw_interpolation(data_list=data_list,
+                                                           valid_data=valid,
+                                                           cells_above_sl=cells_above_sl,
+                                                           y_centers=
+                                                           depth_selected.depth_cell_depth_m[:, transect.in_transect_idx],
+                                                           y_cell_size=
+                                                           depth_selected.depth_cell_size_m[:, transect.in_transect_idx],
+                                                           y_depth=
+                                                           depth_selected.depth_processed_m[transect.in_transect_idx],
+                                                           x_shiptrack=distance_along_shiptrack,
+                                                           search_loc=search_loc,
+                                                           normalize=True)
+                return interpolated_data
+            else:
+                return None
+        else:
+            return None
+
+    def interpolate_ens_next(self):
+        """Applies data from the next valid ensemble for ensembles with invalid water velocities.
+        """
+
+        # Set interpolation property for ensembles
+        self.interpolate_ens = 'ExpandedT'
+        
+        # Set processed data to nan for all invalid data
+        valid = self.valid_data[0]
+        self.u_processed_mps = np.copy(self.u_mps)
+        self.v_processed_mps = np.copy(self.v_mps)
+        self.u_processed_mps[valid == False] = np.nan
+        self.v_processed_mps[valid == False] = np.nan
+        
+        # Identifying ensembles with no valid data
+        valid_ens = np.any(valid, axis=0)
+        n_ens = len(valid_ens)
+        
+        # Set the invalid ensembles to the data in the next valid ensemble
+        for n in np.arange(0, n_ens-1)[::-1]:
+            if not valid_ens[n]:
+                self.u_processed_mps[:, n] = self.u_processed_mps[:, n+1]
+                self.v_processed_mps[:, n] = self.v_processed_mps[:, n+1]
+                
+    def interpolate_ens_hold_last(self):
+        """Interpolates velocity data for invalid ensembles by repeating the
+        the last valid data until new valid data is found
+        """
+        
+        self.interpolate_ens = 'HoldLast'
+        
+        valid = self.valid_data[0]
+        
+        # Initialize processed velocity data variables
+        self.u_processed_mps = np.copy(self.u_mps)
+        self.v_processed_mps = np.copy(self.v_mps)
+        
+        # Set invalid data to nan in processed velocity data variables
+        self.u_processed_mps[valid == False] = np.nan
+        self.v_processed_mps[valid == False] = np.nan
+        
+        # Determine ensembles with valid data
+        valid_ens = np.any(valid, axis=0)
+        
+        # Process each ensemble beginning with the second ensemble
+        n_ens = len(valid_ens)
+        
+        for n in np.arange(1, n_ens):
+            # If ensemble is invalid fill in with previous ensemble
+            if not valid_ens[n]:
+                self.u_processed_mps[:, n] = self.u_processed_mps[:, n-1]
+                self.v_processed_mps[:, n] = self.v_processed_mps[:, n-1]
+
+    def interpolate_ens_hold_last_9(self):
+        """Apply SonTek's approach to invalid data.
+
+        Interpolates velocity data for invalid ensembles by repeating the
+        last valid data for up to 9 ensembles or until new valid data is
+        found. If more the 9 consecutive ensembles are invalid the
+        ensembles beyond the 9th remain invalid. This is for
+        compatibility with SonTek RiverSurveyor Live.
+        """
+        
+        self.interpolate_ens = 'Hold9'
+        
+        valid = self.valid_data[0]
+        
+        # Initialize processed velocity data variables
+        self.u_processed_mps = np.copy(self.u_mps)
+        self.v_processed_mps = np.copy(self.v_mps)
+        
+        # Set invalid data to nan in processed velocity data variables
+        self.u_processed_mps[valid == False] = np.nan
+        self.v_processed_mps[valid == False] = np.nan
+        
+        # Determine ensembles with valid data
+        valid_ens = np.any(valid, axis=0)
+        
+        # Process each ensemble beginning with the second ensemble
+        n_ens = len(valid_ens)
+        n_invalid = 0
+        
+        for n in np.arange(1, n_ens):
+            # If ensemble is invalid fill in with previous ensemble
+            if valid_ens[n] == False and n_invalid < 10:
+                n_invalid += 1
+                self.u_processed_mps[:, n] = self.u_processed_mps[:, n-1]
+                self.v_processed_mps[:, n] = self.v_processed_mps[:, n-1]
+            else:
+                n_invalid = 0
+
+    def interpolate_ens_none(self):
+        """Applies no interpolation for invalid ensembles."""
+        
+        self.interpolate_ens = 'None'
+        
+        valid = self.valid_data[0]
+        
+        # Initialize processed velocity data variables
+        self.u_processed_mps = np.copy(self.u_mps)
+        self.v_processed_mps = np.copy(self.v_mps)
+        
+        # Set invalid data to nan in processed velocity data variables
+        self.u_processed_mps[valid == False] = np.nan
+        self.v_processed_mps[valid == False] = np.nan
+
+    def interpolate_cells_none(self):
+        """Applies no interpolation for invalid cells that are not part of
+        an invalid ensemble."""
+
+        self.interpolate_cells = 'None'
+        
+        valid = self.valid_data[0]
+
+        # Determine ensembles with valid data
+        valid_ens = np.any(valid, axis=0)
+
+        # Process each ensemble beginning with the second ensemble
+        n_ens = len(valid_ens)
+
+        # Initialize processed velocity data variables
+        self.u_processed_mps = np.copy(self.u_mps)
+        self.v_processed_mps = np.copy(self.v_mps)
+        
+        for n in range(n_ens):
+            # If ensemble is invalid fill in with previous ensemble
+            if valid_ens[n]:
+                invalid_cells = np.logical_not(valid[:, n])
+                self.u_processed_mps[invalid_cells,
+                                     n] = np.nan
+                self.v_processed_mps[invalid_cells,
+                                     n] = np.nan
+        
+    def interpolate_ens_linear(self, transect):
+        """Uses 2D linear interpolation to estimate values for invalid ensembles.
+
+        Use linear interpolation as computed by scipy's interpolation
+        function to interpolated velocity data for ensembles with no valid velocities.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        """
+
+        self.interpolate_ens = 'Linear'
+         
+        valid = self.valid_data[0, :, :]
+
+        # Initialize processed velocity data variables
+        self.u_processed_mps = np.copy(self.u_mps)
+        self.v_processed_mps = np.copy(self.v_mps)
+
+        # Determine ensembles with valid data
+        valid_ens = np.any(valid, 0)
+        
+        if np.sum(valid_ens) > 1:
+            # Determine the number of ensembles
+            # n_ens = len(valid_ens)
+            
+            trans_select = getattr(transect.depths, transect.depths.selected)
+            # Compute z
+            z = np.divide(np.subtract(trans_select.depth_processed_m, trans_select.depth_cell_depth_m),
+                          trans_select.depth_processed_m)
+            
+            # Create position array
+            boat_select = getattr(transect.boat_vel, transect.boat_vel.selected)
+            if boat_select is not None:
+                if np.nansum(boat_select.valid_data[0]) > 0:
+                    boat_vel_x = boat_select.u_processed_mps
+                    boat_vel_y = boat_select.v_processed_mps
+                    track_x = boat_vel_x * transect.date_time.ens_duration_sec
+                    track_y = boat_vel_y * transect.date_time.ens_duration_sec
+                    track = np.nancumsum(np.sqrt(track_x**2 + track_y**2))
+                    track_array = np.tile(track, (self.u_processed_mps.shape[0], 1))
+                    
+                    # Determine index of all valid data
+                    valid_z = np.isnan(z) == False
+                    valid_combined = np.logical_and(valid, valid_z)
+
+                    u = interpolate.griddata(np.vstack((z[valid_combined], track_array[valid_combined])).T,
+                                             self.u_processed_mps[valid_combined],
+                                             (z, track_array))
+                    
+                    v = interpolate.griddata(np.vstack((z[valid_combined], track_array[valid_combined])).T,
+                                             self.v_processed_mps[valid_combined],
+                                             (z, track_array))
+
+                    self.u_processed_mps = np.tile(np.nan, self.u_mps.shape)
+                    self.u_processed_mps = np.tile(np.nan, self.u_mps.shape)
+                    processed_valid_cells = self.estimate_processed_valid_cells(transect)
+                    self.u_processed_mps[processed_valid_cells] = u[processed_valid_cells]
+                    self.v_processed_mps[processed_valid_cells] = v[processed_valid_cells]
+
+    def interpolate_cells_linear(self, transect):
+        """Uses 2D linear interpolation to estimate values for invalid cells.
+
+        Use linear interpolation as computed by scipy's interpolation
+        function to interpolated velocity data for cells with no valid velocities.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        """
+
+        self.interpolate_ens = 'Linear'
+
+        valid = self.valid_data[0, :, :]
+
+        # Initialize processed velocity data variables
+        self.u_processed_mps = np.copy(self.u_mps)
+        self.v_processed_mps = np.copy(self.v_mps)
+
+        trans_select = getattr(transect.depths, transect.depths.selected)
+
+        # Compute z
+        z = np.divide(np.subtract(trans_select.depth_processed_m, trans_select.depth_cell_depth_m),
+                      trans_select.depth_processed_m)
+
+        # Create position array
+        boat_select = getattr(transect.boat_vel, transect.boat_vel.selected)
+        if boat_select is not None:
+            if np.nansum(boat_select.valid_data[0]) > 0:
+                boat_vel_x = boat_select.u_processed_mps
+                boat_vel_y = boat_select.v_processed_mps
+                track_x = boat_vel_x * transect.date_time.ens_duration_sec
+                track_y = boat_vel_y * transect.date_time.ens_duration_sec
+                track = np.nancumsum(np.sqrt(track_x ** 2 + track_y ** 2))
+                track_array = np.tile(track, (self.u_processed_mps.shape[0], 1))
+
+                # Determine index of all valid data
+                valid_z = np.isnan(z) == False
+                valid_combined = np.logical_and(valid, valid_z)
+
+                u = interpolate.griddata(np.array([z[valid_combined].ravel(),
+                                                   track_array[valid_combined].ravel()]).T,
+                                         self.u_processed_mps[valid_combined].ravel(),
+                                         (z, track_array))
+
+                v = interpolate.griddata(np.array([z[valid_combined].ravel(),
+                                                   track_array[valid_combined].ravel()]).T,
+                                         self.v_processed_mps[valid_combined].ravel(),
+                                         (z, track_array))
+
+                self.u_processed_mps = np.tile(np.nan, self.u_mps.shape)
+                self.u_processed_mps = np.tile(np.nan, self.u_mps.shape)
+                processed_valid_cells = self.estimate_processed_valid_cells(transect)
+                self.u_processed_mps[processed_valid_cells] = u[processed_valid_cells]
+                self.v_processed_mps[processed_valid_cells] = v[processed_valid_cells]
+
+    def interpolate_cells_trdi(self, transect):
+        """Interpolates values for invalid cells using methods similar to WinRiver II.
+
+        This function computes the velocity for the invalid cells using
+        the methods in WinRiver II, but applied to velocity components.
+        Although WinRiver II applies to discharge which theoretically is
+        more correct, mathematically applying to discharge or velocity
+        components is identical. By applying to velocity components the
+        user can see the velocity data interpolated.
+        Power fit uses the power fit equation and no slip uses linear interpolation.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        """
+
+        # Set property
+        self.interpolate_cells = 'TRDI'
+
+        # Construct variables
+        depths = getattr(transect.depths, transect.depths.selected)
+        valid = self.valid_data[0]
+        cell_depth = depths.depth_cell_depth_m
+        z_all = np.subtract(depths.depth_processed_m, cell_depth)
+        z = np.copy(z_all)
+        z[np.isnan(self.u_processed_mps)] = np.nan
+        z_adj = np.tile(np.nan, z.shape)
+        n_cells, n_ens = self.u_processed_mps.shape
+        cell_size = depths.depth_cell_size_m
+        exponent = transect.extrap.exponent
+        bot_method = transect.extrap.bot_method
+
+        for n in range(n_ens):
+
+            # Identify first and last valid depth cell
+            idx = np.where(valid[:, n] == True)[0]
+            if len(idx) > 0:
+                idx_first = idx[0]
+                idx_last = idx[-1]
+                idx_middle = np.where(valid[idx_first:idx_last + 1, n] == False)[0]
+
+                # For invalid middle depth cells perform interpolation based on bottom method
+                if len(idx_middle) > 0:
+                    idx_middle = idx_middle + idx_first
+                    z_adj[idx_middle, n] = z_all[idx_middle, n]
+
+                    # Interpolate velocities using power fit
+                    if bot_method == 'Power':
+                        # Compute interpolated u-velocities
+                        z2 = z[:, n] - (0.5 * cell_size[:, n])
+                        z2[nan_less(z2, 0)] = np.nan
+                        coef = ((exponent + 1) * np.nansum(self.u_processed_mps[:, n] * cell_size[:, n], 0)) / \
+                            np.nansum(((z[:, n] + 0.5 * cell_size[:, n]) ** (exponent + 1)) - (z2 ** (exponent + 1)), 0)
+
+                        temp = coef * z_adj[:, n] ** exponent
+                        self.u_processed_mps[idx_middle, n] = temp[idx_middle]
+                        # Compute interpolated v-Velocities
+                        coef = ((exponent + 1) * np.nansum(self.v_processed_mps[:, n] * cell_size[:, n])) / \
+                            np.nansum(((z[:, n] + 0.5 * cell_size[:, n]) ** (exponent + 1)) - (z2 ** (exponent + 1)))
+                        temp = coef * z_adj[:, n] ** exponent
+                        self.v_processed_mps[idx_middle, n] = temp[idx_middle]
+
+                    # Interpolate velocities using linear interpolation
+                    elif bot_method == 'No Slip':
+                        self.u_processed_mps[idx_middle, n] = np.interp(x=cell_depth[idx_middle, n],
+                                                                        xp=cell_depth[valid[:, n], n],
+                                                                        fp=self.u_processed_mps[valid[:, n], n])
+                        self.v_processed_mps[idx_middle, n] = np.interp(x=cell_depth[idx_middle, n],
+                                                                        xp=cell_depth[valid[:, n], n],
+                                                                        fp=self.v_processed_mps[valid[:, n], n])
+
+    def estimate_processed_valid_cells(self, transect):
+        """Estimate the number of valid cells for invalid ensembles.
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+
+        Returns
+        -------
+        processed_valid_cells: np.ndarray(bool)
+           Estimated valid cells
+        """
+
+        processed_valid_cells = np.copy(self.valid_data[0])
+        valid_data_sum = np.nansum(processed_valid_cells, 0)
+        invalid_ens_idx = np.where(valid_data_sum == 0)[0]
+        n_invalid = len(invalid_ens_idx)
+        depth_cell_depth = transect.depths.bt_depths.depth_cell_depth_m
+        for n in range(n_invalid):
+
+            # Find nearest valid ensembles on either side of invalid ensemble
+            idx1 = np.where(valid_data_sum[:invalid_ens_idx[n]] > 0)[0]
+            if len(idx1) > 0:
+                idx1 = idx1[-1]
+                # Find the last cell in the neighboring valid ensembles
+                idx1_cell = np.where(processed_valid_cells[:, idx1] == True)[0][-1]
+                # Determine valid cells for invalid ensemble
+                idx1_cell_depth = depth_cell_depth[idx1_cell, idx1]
+            else:
+                idx1_cell_depth = 0
+
+            idx2 = np.where(valid_data_sum[invalid_ens_idx[n]:] > 0)[0]
+            if len(idx2) > 0:
+                idx2 = idx2[0]
+                idx2 = invalid_ens_idx[n] + idx2
+                # Find the last cell in the neighboring valid ensembles
+                idx2_cell = np.where(processed_valid_cells[:, idx2] == True)[0][-1]
+                # Determine valid cells for invalid ensemble
+                idx2_cell_depth = depth_cell_depth[idx2_cell, idx2]
+            else:
+                idx2_cell_depth = 0
+
+            cutoff = np.nanmax([idx1_cell_depth, idx2_cell_depth])
+            processed_valid_cells[depth_cell_depth[:, invalid_ens_idx[n]] < cutoff, invalid_ens_idx[n]] = True
+
+            # Apply excluded distance
+            processed_valid_cells = processed_valid_cells * self.valid_data[6, :, :]
+
+        return processed_valid_cells
+
+    def compute_snr_rng(self):
+        """Computes the range between the average snr for all beams.
+        The average is computed using only data above the side lobe cutoff.
+        """
+        if self.rssi_units == 'SNR':
+            cells_above_sl = np.copy(self.cells_above_sl.astype(float))
+            cells_above_sl[cells_above_sl < 0.5] = np.nan
+            snr_adjusted = self.rssi * cells_above_sl
+            snr_average = np.nanmean(snr_adjusted, 1)
+            self.snr_rng = np.nanmax(snr_average, 0) - np.nanmin(snr_average, 0)
+
+    def automated_beam_filter_old(self):
+        """Older version of automatic beam filter. Not currently used.
+        """
+
+        # Create array indicating which cells do not have 4-beam solutions and all cells below side lobe are nan
+        temp = copy.deepcopy(self)
+        temp.filter_beam(4)
+        valid_bool = temp.valid_data[5, :, :]
+        valid = valid_bool.astype(float)
+        valid[temp.cells_above_sl == False] = np.nan
+
+        # Find cells with 3 beams solutions
+        rows_3b, cols_3b = np.where(np.abs(valid) == 0)
+        if len(rows_3b) > 0:
+            # Find cells with 4 beams solutions
+            valid_rows, valid_cols = np.where(valid == 1)
+
+            valid_u = temp.u_mps[valid == 1]
+            valid_v = temp.v_mps[valid == 1]
+            # Use interpolate water velocity of cells with 3 beam solutions
+
+            # The following code duplicates Matlab scatteredInterpolant which seems to only estimate along columns
+            # as long as there is data in the ensemble above and below the value being estimated.
+            row_numbers = np.linspace(0, valid.shape[0] - 1, valid.shape[0])
+            n = 0
+            for col in cols_3b:
+                # If the cell has valid data above and below it linearly interpolate using data in that ensemble.
+                # If not, use other means of interpolation.
+                if np.any(valid_bool[rows_3b[n] + 1::, col]) and np.any(valid_bool[0:rows_3b[n], col]):
+                    est_u = np.interp(x=rows_3b[n],
+                                      xp=row_numbers[valid_bool[:, col]],
+                                      fp=temp.u_mps[valid_bool[:, col], col])
+
+                    est_v = np.interp(x=rows_3b[n],
+                                      xp=row_numbers[valid_bool[:, col]],
+                                      fp=temp.v_mps[valid_bool[:, col], col])
+                else:
+                    est_u = interpolate.griddata(np.array((valid_rows, valid_cols)).T, valid_u, (col, rows_3b[n]))
+                    est_v = interpolate.griddata(np.array((valid_cols, valid_rows)).T, valid_v, (col, rows_3b[n]))
+
+                u_ratio = (temp.u_mps[rows_3b[n], col] / est_u) - 1
+                v_ratio = (temp.v_mps[rows_3b[n], col] / est_v) - 1
+                if np.abs(u_ratio) < 0.5 or np.abs(v_ratio) < 0.5:
+                    valid_bool[rows_3b[n], col] = True
+                else:
+                    valid_bool[rows_3b[n], col] = False
+                n += 1
+            self.valid_data[5, :, :] = valid_bool
+        else:
+            self.valid_data[5, :, :] = temp.valid_data[5, :, :]
+
+        # Combine all filter data and update processed properties
+
+        self.all_valid_data()
+
+# Code from Aurelien
+    def interpolate_cells_above(self, transect):
+        """Interpolates values for invalid cells using below valid cell
+        Written by Aurelien Despax
+        Modified by dsm
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        """
+
+        # Set property
+        self.interpolate_cells = 'Above'
+
+        # Construct variables
+
+        valid = self.valid_data[0]
+        n_cells, n_ens = self.u_processed_mps.shape
+
+        for n in range(n_ens):
+
+            # Identify first and last valid depth cell
+            idx = np.where(valid[:, n] == True)[0]
+            if len(idx) > 0:
+                idx_first = idx[0]
+                idx_last = idx[-1]
+                idx_middle = np.where(valid[idx_first:idx_last + 1, n] == False)[0]
+
+                # For invalid middle depth cells assign value of shallower valid depth cell
+                # TODO this assigns the value of the shallowest depth cell not the next valid depth cell
+                if len(idx_middle) > 0:
+                    idx_middle = idx_middle + idx_first
+                    self.u_processed_mps[idx_middle, n] = self.u_processed_mps[idx_first, n]
+                    self.v_processed_mps[idx_middle, n] = self.v_processed_mps[idx_first, n]
+
+    def interpolate_cells_below(self, transect):
+        """Interpolates values for invalid cells using above valid cell
+        Written by Aurelien Despax
+        Modified by dsm
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        """
+
+        # Set property
+        self.interpolate_cells = 'Below'
+
+        # Construct variables
+        valid = self.valid_data[0]
+        n_cells, n_ens = self.u_processed_mps.shape
+
+        for n in range(n_ens):
+
+            # Identify first and last valid depth cell
+            idx = np.where(valid[:, n] == True)[0]
+            if len(idx) > 0:
+                idx_first = idx[0]
+                idx_last = idx[-1]
+                idx_middle = np.where(valid[idx_first:idx_last + 1, n] == False)[0]
+
+                # For invalid middle depth cells assign the value of the next deeper valid depth cells
+                # TODO this assigns the value of the shallowest depth cell not the next valid depth cell
+                if len(idx_middle) > 0:
+                    idx_middle = idx_middle + idx_first
+                    self.u_processed_mps[idx_middle, n] = self.u_processed_mps[idx_last, n]
+                    self.v_processed_mps[idx_middle, n] = self.v_processed_mps[idx_last, n]
+
+    def interpolate_cells_before(self, transect):
+        """Interpolates values for invalid cells using above valid cell
+        Written by Aurelien Despax
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        """
+
+        # Set property
+        self.interpolate_cells = 'Before'
+
+        # Construct variables
+        depths = getattr(transect.depths, transect.depths.selected)
+        valid = self.valid_data[0]
+        cell_depth = depths.depth_cell_depth_m
+        z_all = np.subtract(depths.depth_processed_m, cell_depth)
+        z = np.copy(z_all)
+        z[np.isnan(self.u_processed_mps)] = np.nan
+        z_adj = np.tile(np.nan, z.shape)
+        n_cells, n_ens = self.u_processed_mps.shape
+
+        for n in range(n_ens):
+
+            # Identify first and last valid depth cell
+            idx = np.where(valid[:, n] == True)[0]
+            if len(idx) > 0:
+                idx_first = idx[0]
+                idx_last = idx[-1]
+                idx_middle = np.where(valid[idx_first:idx_last + 1, n] == False)[0]
+
+                # For invalid middle depth cells perform interpolation based on bottom method
+                if len(idx_middle) > 0:
+                    idx_middle = idx_middle + idx_first
+                    z_adj[idx_middle, n] = z_all[idx_middle, n]
+
+                    # Interpolate velocities using linear interpolation
+                    self.u_processed_mps[idx_middle, n] = self.u_processed_mps[idx_middle, n - 1]
+                    self.v_processed_mps[idx_middle, n] = self.v_processed_mps[idx_middle, n - 1]
+
+    def interpolate_cells_after(self, transect):
+        """Interpolates values for invalid cells using above valid cell
+        Written by Aurelien Despax
+
+        Parameters
+        ----------
+        transect: TransectData
+            Object of TransectData
+        """
+
+        # Set property
+        self.interpolate_cells = 'After'
+
+        # Construct variables
+        depths = getattr(transect.depths, transect.depths.selected)
+        valid = self.valid_data[0]
+        cell_depth = depths.depth_cell_depth_m
+        z_all = np.subtract(depths.depth_processed_m, cell_depth)
+        z = np.copy(z_all)
+        z[np.isnan(self.u_processed_mps)] = np.nan
+        z_adj = np.tile(np.nan, z.shape)
+        n_cells, n_ens = self.u_processed_mps.shape
+
+        for n in list(reversed(list(range(n_ens)))):
+
+            # Identify first and last valid depth cell
+            idx = np.where(valid[:, n] == True)[0]
+            if len(idx) > 0:
+                idx_first = idx[0]
+                idx_last = idx[-1]
+                idx_middle = np.where(valid[idx_first:idx_last + 1, n] == False)[0]
+
+                # For invalid middle depth cells perform interpolation based on bottom method
+                if len(idx_middle) > 0:
+                    idx_middle = idx_middle + idx_first
+                    z_adj[idx_middle, n] = z_all[idx_middle, n]
+
+                    # Interpolate velocities using linear interpolation
+                    if (n_ens > (n + 1)):
+                        self.u_processed_mps[idx_middle, n] = self.u_processed_mps[idx_middle, n + 1]
+                        self.v_processed_mps[idx_middle, n] = self.v_processed_mps[idx_middle, n + 1]
\ No newline at end of file
diff --git a/Classes/__init__.py b/Classes/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/Classes/stickysettings.py b/Classes/stickysettings.py
new file mode 100644
index 0000000..8900467
--- /dev/null
+++ b/Classes/stickysettings.py
@@ -0,0 +1,123 @@
+import os
+import json
+
+
+class StickySettings(object):
+    """Provides methods to quickly store and retrieve settings to and from disk.
+
+    This class is intended to be used to store simple settings that need to be retained between session of the subject
+    application, such as, last folder opened, or units setting. Any setting that the application needs to know
+    when it is run again can be stored using the methods in this class.
+
+    Data are stored a dictionary which is then written to a json file having the filename provided by the user and
+    stored in the folder defined by the APPDATA environment variable.
+
+    Note
+    ----
+    The os and json packages are required for this class.
+
+    Attributes
+    ----------
+    settings_file : str
+        Filename of json file used to store data between sessions.
+    settings: dict
+        Dictionary used to store user defined settings.
+
+    """
+
+    def __init__(self, arg):
+        """Constructor method which establishes the json file.
+
+        If the filename (arg) provided by the user cannot be found a new file is created. If the filename (arg)
+        provided by the user is found the file is opened and all keys and values are read and stored in settings for
+        quick modification by the calling application.
+
+        Parameters
+        ----------
+        arg : str
+            User supplied filename excluding the suffix. Example 'myFile' but not 'myFile.json'
+
+        """
+        # Construct filename from user input.
+        self.settings_file = os.path.join(os.getenv('APPDATA'), arg + '.json')
+        if os.path.isfile(self.settings_file):
+            # Read json into dictionary
+            with open(self.settings_file, 'r') as f:
+                self.settings = json.load(f)
+        else:
+            # Create json file with default dictionary
+            self.settings = {}
+            with open(self.settings_file, 'w') as f:
+                json.dump(self.settings, f)
+
+    def new(self, key, value):
+        """Create new key value pair in settings.
+
+        Method checks to see if key exists. If it exists an error is raised. If the key does not exist it is created.
+
+        Paramenters
+        -----------
+        key : str
+            Label for value in dictionary.
+        value : any value that can be stored in a dictionary
+
+        Raises
+        ------
+        KeyError
+            If the key already exists in settings.
+
+        """
+
+        if key in self.settings:
+            raise KeyError('Key already exists in settings')
+        else:
+            self.settings[key] = value
+            with open(self.settings_file, 'w') as f:
+                json.dump(self.settings, f)
+
+    def set(self, key, value):
+        """Set value of existing key.
+
+        Method checks to ensure the key exists and sets the value of the key to value. If the key does not exist an
+        error is raised.
+
+        Parameters
+        ----------
+        key : str
+            Label for value in dictionary.
+        value : any value that can be stored in a dictionary
+
+        Raises
+        ------
+        KeyError
+            If the key does not exist in settings.
+
+        """
+        if key in self.settings:
+            self.settings[key] = value
+            with open(self.settings_file, 'w') as f:
+                json.dump(self.settings, f)
+        else:
+            raise KeyError('Key does not exist in settings')
+
+    def get(self, item):
+        """Get value of item for settings.
+
+        Returns the value of item from settings dictionary.
+
+        Parameters
+        ----------
+        item : str
+            Key for settings dictionary.
+
+        Returns
+        -------
+        value
+            Data type stored in key 'item'.
+
+        """
+
+        with open(self.settings_file, 'r') as f:
+            self.settings = json.load(f)
+
+        return self.settings[item]
diff --git a/Classes/test_stickysettings.py b/Classes/test_stickysettings.py
new file mode 100644
index 0000000..2c3b6ec
--- /dev/null
+++ b/Classes/test_stickysettings.py
@@ -0,0 +1,68 @@
+import pytest
+import os
+import sys
+from Classes.stickysettings import StickySettings as ss
+
+
+def create_filename():
+    """Create filename to use in testing"""
+    file_exists = True
+    n = 0
+    testfile = ''
+    while file_exists:
+        n += 1
+        testfile = os.path.join(os.getenv('APPDATA'), 'xyz123')
+        if os.path.join(os.getenv('APPDATA'), 'xyz123'):
+            testfile = testfile + str(n)
+        else:
+            file_exists = False
+        if n > 10:
+            file_exists = False
+    return testfile
+
+
+def test_file_creation():
+    """Test initialization of StickySettings and file creation"""
+    testfile = create_filename()
+    _ = ss(testfile)
+    assert os.path.isfile(testfile + '.json')
+    os.remove(testfile + '.json')
+
+
+def test_store_value():
+    """Test creating a file and key value pair and getting the value"""
+    testfile = create_filename()
+    test_user = ss(testfile)
+    test_user.new('test', True)
+    assert test_user.get('test')
+    os.remove(testfile + '.json')
+
+
+def test_set_value():
+    """Test setting a value of an existing key"""
+    testfile = create_filename()
+    test_user = ss(testfile)
+    test_user.new('test', False)
+    test_user.set('test', True)
+    assert test_user.get('test')
+    os.remove(testfile + '.json')
+
+
+def test_set_value_failure():
+    """Test failure when setting a value for a key that does not exist"""
+    testfile = create_filename()
+    test_user = ss(testfile)
+    test_user.new('test', True)
+    with pytest.raises(KeyError):
+        test_user.set('Folder', 'AnyFolder')
+    os.remove(testfile + '.json')
+
+
+def test_get_value_failure():
+    """Test failure when requesting a value for a key that does not exist"""
+    testfile = create_filename()
+    test_user = ss(testfile)
+    test_user.new('test', True)
+    with pytest.raises(KeyError):
+        test_user.get('Folder')
+    os.remove(testfile + '.json')
diff --git a/DischargeFunctions/bottom_discharge_extrapolation.cp39-win_amd64.pyd b/DischargeFunctions/bottom_discharge_extrapolation.cp39-win_amd64.pyd
new file mode 100644
index 0000000000000000000000000000000000000000..046f35e897397e7efc156ce63b9ea8ceeba837b3
GIT binary patch
literal 109056
zcmd?S4S1bJng5?dQyQW5NwsLjA_t8>Q(sD=)or9|0x3QRP9$npg4>lKSgRmpA#tl)
z;Ybq9AwAu!TUUN-U9&5$b+=ioRxK{8C+S<tO9|zrDHR9^`Wy;0Ez+hyo8RYq&pan-
zMc3WycU}AcU%y=KbI(1m_sraL&pk8u%xU;|Z^^Whl9E#XPfV1QYy#@9*1muEQ<f+x
zdF7cudu7SSSKNKprbPPgvznHF=DK9-wby+5+RHwdy!^7OufC=&`N>ZuuWi3N`I)Pe
z^%s3K`MGPZ_|$?~vreh-S^wqv2d;kCTfX(a`2Tw^o%z0Vz(0KHKP;{Wz9sG8hcEs6
z#qTr!Z!C7<YZq66ul~qC7<|`T{^5OV!IjTD`TwI&e0}kI41Q~|!&|)nHx2*DXD(l^
zxN!{|mz0!Tans2qPdxpzPsV;@C2vfgdeSS;EIEaW-s*hEMu6q!&fx3ZCzX_(Y+jSF
zqy$ejDt{-Hv=G%Liox&V&Zqou6D6xivliH=#G{Flalzq4Ni<y!|2LK>X(reFXA&jz
zV!x^Ivx$;Q@XYc=Nv#p;1Cxo8Zgu$Y`CHKTsTFO6&3%O@S#|gIh?68sN?I0Nd&OmK
zm%%R?e95!qM&K)fQ~qjUqXjNn$<S*E*g&|Nq4#&<<rlQNh*n4HY7N@=JMn54TzlQM
zmy?CH$tdA8bICuyT)a<Rbqx{KMkTbB#as6~@y;dm|LMO?);?h{kO(L8o0s*5+w&9r
zjX9s8&0%)9de>Mw_(39AGnmRIvcsE~EeOu+v_=cgED3w+=1%eH4ZC)<O&<#<5}P*%
zXV#aL2tOJ86>W*lg&Rr|B~w!?JU4y*oJ2{OJ;^B9Tz7HZ$LjvHuIXbJ|LI4=FF&mw
zTUH;;zr8{{BiF`?jCZZCX!WjJD_Xp3ZAGJZt*NNRMS@ioRo<(!A}N=0D|e2K6(ugL
z^3I0rwlLdNQ5$AgRcxSk>7LswS{Nwlo?9!b46d(`qzv)T!*pTK+KP<9jTOrcuCJ&y
zxT>ODaC>LPEuOWaUmu&82(#-eGI)g9LiC{FYb&}9-xF;!d}T#zm^~8R29hSy4-7sQ
zeb3;^iWO-B*MckyF3wYn&Wcfzn`1b3<Z-CvF`)j!+^UMa!8JzB!Jz!K*S#uHlFA;4
z4t$#Mo;5DKXO-pFF@CJAtScXM-K0c4ofVN_w!WgVq3ckXO;=Qf*)<jG@N8R|>Nu7N
zx-PXkZ>&hFZLZu|7Gyqts#<MhMMTKYl0=QZ&cvcjmWMsfbHlDf!RpUhs?LhJN@e-H
z86T+Q|0UCJn4JL5o>n;DqOCI0!yfCMQwr;pKkf66M`={(R&PXWJkp|U>VxK4=aGjM
z_0bt6B}=L&V8nEA(UD;FTftKu6A9zDF!<0Au>fP?;XSJ0qUs%_FQ5J<(l1*QT)ZpH
zwpE1b?8=JfeU;044=xEVd5q5|eiioAS5y&ocEwOQF%Z@qNcRvY==>E^K`MK9I{5zj
ziWQ<idv_@JCUd7NTEgs3JT_L`LfzBT)>kwu0C<bRjTI~Kug?Dh#xw%<R_|R>eMqD{
zv2Q_d*aL}6%6?t8S#@a0-7v9<cqL(Q(e^Nxu3;2!4?B(|g3iB{Z+3u^R#miG(Sx|x
zRBXVl3SM<u$-%myyndFzC0myT3-WcrH@19nx>|3PsGSwP5JHzMK3SEHmIamDiQ2q%
zSzS=MBl@$Q;=Rf-yp|qSnDeg3`(Uti3~a%{;A{Dy^8&<)@_eLFi>ogANZp}riLi%8
zRb5(w&Yz2=mjnwQ3cCIQXK?c^e31(S4AR0EX-%b`bzJ~<A2Xo3ywXuN{bP8B*@wdH
z^RY;R-ZEHd2=+N8Obmq`gJt2w-R)7BU8-IzL|4A($eHy<(!$ii>cBE(G^v%k4%G!~
zwlArEChW@J=2|v2B-RVV+?<1A{yuzD+25u*o_NW#OiMU16lOas)Dw)&L(76|-giH9
zq5ABZ>U@3o%8Iko*;^}m2``+bNJR<SOBY2WFNl%rE1J_aV?pN^m~Ay!7KPaZZum66
zq*_D2IJGC78?4?-CK@(j_72tkCQYv3r9nppuIgQd?*L5x(jL>)x4}0~3MU5D6wegC
zso3YNvYK2si+WXuH4lBhw0hURzo_0dNt>l#7L=FX$0vPX`LbYe+l*zwmB=3bznuSj
zx8u1}o;%$C%Yl3ESr%Nsby@c9vw?Rn%U(76B)XwARdavP)ej4#YJQu}J|1+6=w9ek
z{vxn+wkWK5uzjD}dr>-fZld~``s|Bo8lySx-$R>`Iopjv{xlKR91Om>ITbWKkXbyR
zMtdOW{3QjbO*@YuM6KI8ZnvH-308}*(mhv^b@o{sff}Ca>^|IUZ8SDypJmu?w4rL_
zcU%z|nj;1ovSN{GYi)>z(>B`BRWBc^yIsSzdWU8Wv4KX|n>Wma2N*0{6lFrLmwrJQ
z(rCw$>O7v6Tf(086%tv6v+)nkthM-?8@eWp_fo+{cZo4tF38m<6RC+Usfl~Sj&Vl!
zyD2Wr4uu2b9|*I9;hWYt?oHfbV|<Jp8MEa}PZJL=*!#tQ^O5>QzmH^sfJhau@qrlz
zRU8-({a1%)MH)sD2a+PHSLw;B%4QL^NTa`WP=kGIaHGbzYIVQl#jr}=^~H91JWaAo
z-f{1#G{^Bm<TZCVA>zH4df76ugOVgfG=P-(^GK35lT3%myZMFMvh8&n5_LaFcuHI&
zT7MUQK`);eVi^_08^&rke~;F#%N$759U4f~{fGJ_WWv%E7EfIDH*3%`_iNAwotnv2
zD|IEjdb1{ORqD+zp&yHgEeC^UF9PeJK9-CxO-jbM#+5q0KC!^~HW{gI<GyZ(@`?6y
z3Qr=fB#izLb#V@tkwYEHg3j-gs`k)!T;IVZlAmusIp|r3oKYo?@cG0so4;&2p(3*v
z4;8*Z+>kkJ`M$rfBIWvYII(T2L?{U-c1rNva9Covg$9dGKcr-<O!<jsc_ag`L?s~A
zdC9U?GIXvL0-dJ#I9G<2WOk;4jp9GFswy!lQ^Shr7tcFQ=%+wwM&B2tD5Z)1$s_8D
z=xc_gD_Wx-xjJ4<w3n-bjT$bg?BTWx_PrtOSc!HRW=Gmy9o8%*2-ho^XZ|<sw(m66
zs_m?>W-OK6-Zl^Kf5Ka2)P`B7D-_O#KtfYj-h-CY29-`c-*yOkC9u;yEpr*VN>mlB
zzE#!8j!DC6wwkT42n}wms8mqLizlU!ST}wS`W<P(O|VEw_1@}<RCZf%W(EXJ<yZn4
zc{2YO?g@hrz8Gf58I6MsyiXipqFY!g?%NUFwN0YzDJvWAZG_J*{be|@Sz>At<)vyK
zZU12*u##zqlY$#JtFVdcy%aXt#Z${WLV1tUN-tO5(w8gmCA5!J%|!dp3g1OOp|w=+
z4YS)PGuL_nt;N@;Iu(5Ik>8X3(XA&`=RB(O+tkFixH=6rhueQtkcv+apLFe#boMvt
z?2G7UuC1!$_G!1#xg%O{8Ag-s6U?5GPCOHKY)3!1vwf)WyfyYj;xA`ZPZXr!UYMKr
zQkXkG5#~P3|1TxdHKTRG*HQGI0}Q5X9uK~r4{P?+1z*jlYkp%P`+~1;uAW%Zd2jot
z>XZ5QmgF8QuYGAc3s3&$*zY4QfH-y9vS1FQV?3RGY*{cn5q1oehaLH2?FVU7m(i>D
zeHhLhnt>{GnJK(*8r2TxznID`eo6Dk?<->c7FtS-Y0N?8<C^c3mcN1eDiocgEZkmO
z1`0Q=D91flNIKh+1g`|I60Sk0MNxFdK{q1RU{Se8%3-o-8c20MsB}xLpwg|m>VrzR
z3d2DbbRUj*saEM$Z}_+c9S*m~!{L^FINVAw{PGqp4gc)VFa1Q_(z-vd`$XNPZoPTJ
z`f<~ATLX4h^x?2&q(O26hS__=9BP|axLSndJ<=>>$RmwHhCPBJ*jX{+ky;_69$6$L
z@<^4CF^^OV8TUvs%$;@{+beaOUO}tX-KKA}{>LRHth+AGYh9<AR!~w@uvep&Rh_19
z{mHM7<`>-HVcx@GpyG}K<G7NniYs|7*y>>hsJOkrIBtm#EB7$zVHHquYk_gxMo(X@
zhgF`=mML_O(M>9|#K$T3agtNwZ17=u4~IP*1**(3U|eR2bzo;jxra%0Byp;M`jb1N
zO#NA%eY{2wYkmAyA3x|UvGwYk|LHqg4?Gcc{(=q>gR8%y(>H?9F`WbQ&2E=&Ia7XF
z7>8msEu91&P4f}EesBuDBv;=@&;6*F>RFtumz3aMoY5`?X{nv@3B);nBm;!GHO3^-
z8N<dlUA3YPO}CqtDhs-*tqP8jMCqs~MK#T+L78fMfzouO&q`(OGk>-KCQ0b?{l3A&
zyobX;wecuWe<DS3qp#ir0tuTy%7U*AP_?gZF1s`6x*dtpxi{$Qfv9xPdW$W+S2;`g
zC}$CwHzFBz9^U{l3bzsoU+3`=AKvG2QKIy>7{1EJrf<haf{Y0~Z1pgsBvhgo7*|4Z
zqq|wYsuIgVj;q9LeI<Sl)J{&X6}H|hEpBLuh;C1mN)n#&xEA%oS9shg0B`kpw+~<L
zaZ#rDErzc$>EkO^LXav|?qSlyDkY&(wZOPiiW`mJO#qde3vyhgKFL;ydgd`;I{TbB
zLJ8F9%3S3)dwNue@J5eUdOY;F6;61q#~1nVMIIMXN?&F8Dif`~K2<QE>Qn1sqnMF;
zv;gCJC{9#GJv3m3*b`8FLOfyi+k`^0;;4ub&qvBnc%>)7@&!+NyxE7(^|;DY{BloX
z9_UH@&xDu;f#O8DyBvw{^(2;&CnUZNG@ZS@V$51$ZN<33^%X-RHA}0jNW#m2>FoN7
zVXMMM%TwdFM5Lh_mjg9~<{G@cB5D4sYzp(FZoo^enfGwm!%;=0RmXsFt152v(4CIt
zwGxc9>JXlgoCDRSMFgo@2T6paX=SBJYu6AcF{k{c<Bk|yZ|hKY7bRsPd7(Z+xJr%L
zmyC^lJ^UR3o?Tm^xn1qjXOOKigKKOPSR1t6B8I(~6$Tlu23ZAKJh;{%93m)gw)X9;
zSmbf*a=N<8<Hi!;l^!>i0iWw}<67`Cj~iEmmw3F^=RZ!%syrJNgpYZ=(T7JKZ}#|z
z$6Gu;<niSm&wISp<JwwK{wqB0^tMhXWhif*5AXKjTDB_wEgtXoc%R2J9$#m8eZ>kz
z-r$i|A$ir1#SL4xh)W;REX470y|(etZH%GoD{9kF(Mp<z?AA0LRf{v7y;IyUVmWQJ
zt@kv2%ghCB@fbj&8h*RueYRU0m2ZhJagmTRk5s0!S5zd$%o*}5Q##y@Q{2fZ?oq<y
zIDJKTeMO7&tgmnyZEjVCR11ydh}2^BO~9b?k!bmE*im5W$4fzy5!NiCPf5TnYWc>B
zGVq`rQ43_N1dhUiJ|F$|*KP^>w82931Tf7e{4WhZ5Z!NZPjr|07osh|x{irsZEsxC
zHPOD%%2pe*{ih?9KdasqefwWk_V#^Oggx*6)DgK4L|-$%vBCn4x7D{(-Kbk$kSdbX
zJ+<XMjb&j^S+x0nyrtMAY?KZ}H$K2N)NfV4o>|9WWVF<KN;q`<5b0T<=Vl!MOXsGa
zl1TW4Z$??rv#PvCt5mCC_Qs0P;O!O7)(EpZE0*I3=~6UbSi+FOI;BGU)i-)@vm&5e
z9f<CHQVlg;xQYN8;Aq<$`A!6#A6Fqgvzi=b-w|aS{{@QAb^Z(SkG?05YUgo|i+I&B
z{Cnj8I{e+1gG!D*il5cLkzI-cJFLq6k;8!8X@ALiMf5Qxdcyf{bN-jg|NZzgX?9j0
zCf!e6_&>Yw4=DT%KKwz2|FsLh$%UV(@MGU|>3hZ}CiZ=nLO<gIPg3Ax1nz54J)P$>
zBL6OVz75Y0IL{Gz{uEEO*EgQfre-{4_~C?H9#JK)S0Ir-57fES)rJ?MWx|Oxsz{fB
zxRqK)9j6tRbsQ_TKK0d?w`di0+)`_DNq@q6OY@z^y<-Vx2@2r6<AiBc#cB$$X^x>=
zLMr`f7!MI<ZtAqb=AQMYA!-g9G8_?M5Rn4Z>@sTbc0X6S=~Z)-q(_=+N^odEmrH=)
zKiA-@q9#x&oKd{asAXf^ASY%(3AA2=lhaPdaJGRh-t8vvLZ+?d=D*RVAx-zqhTm$U
zNz;00@z<MF)R0^!y2*c${K5M?UghyyJYMPXUXLd|-tF<Z9$(?{a*wxqyv*avJznDR
z7LPMjto&w=7pE<cM?O6C_^8KYrEbLIRX*H_c7&hDhkSU_<9UzI_4p=_mwSAJ$ICpv
z&f_H>@ALS$i7t1}Y7)}pZYp7o;_+@D-*gn(JL7TFU6|Z#+LPX6dzB_EJz~?|CXciz
zMEZ}S*&NvC5u2-S@rX@`y&kDmnr@HSESB*|l|oi{q*BNxbqiUWn6$CMWHf}QcsCCW
zDj$mWj!sNuw`<X+5GAvie#B2|YU|xxCEd&7t*;ojoKQ9?lYzDoRDVn|L1zz+E~IP+
zPzshK#m{pHZAFe+>1QIXR`W*H-(iyP`Cxfn&&6e_o*=qTE2pk}TbWkK@6lQ?H|y*d
ziFi5@{lfV)bF%)uRlz>jQ^-}-Pz5EERCGG~c@CPdt_-uENT#z_&rN5Sm5136k@v~k
zQvLE4$(iFMl4goCm?+x1k&|a1RQ{)py+HAQO<9FjoeK5L3h<9Q<#8z<f$S4bkYAhN
zFxeRN0n?h09#Cs662?R)`KT7wUV^FPh9i$k<<H6%s0pzVSUjW`li4ACK6V%j(F3D+
z=ucu#Lr;GevjxN)8o=hr;83()AI%gV?@rU;U5`k>ABesL)G24X!6VV<%zsbxDZ?L&
z{@mb!=ud$e*6LJ%(~YN3BX`ckV3rNOimg<E2t$fc7kq6S+tm}b!J4gMP`}pefo=iT
zb}Czr8!Pfe-1iD|O4$?*2Ogf7XvqF1H8AmkhO>TCy(^s^P0wfZIyJ0~H=Rr8i5X=6
z%1TFC(;W5P?oh3^dws=(7y_y4q|QcL->H>{NM*f3e76JtCy%QA*ZR#n<UAqio>n&F
zp9ofqwHvzj2CHwxp;@6@^ucf$<pVBdTC>D*VD*m4sm=H!ojsZgz8|F%cc-(DXeqXd
zSd(#j!^>xc+0Eg=iysKjx-U#{82SB&GAqxpMk{?Kh5_lse$GtVcc&({H`M&5{rh5Z
z@kjJH$5AR?jUfK70_t>u=1QHlz4{ro6|$<4y3l4(n2od#A6_|B7WC$|1)(v3*Hdz@
z!yRN!or7w@0(lzHwagihsBV)jlOdjtDYVd2fhdVmQLKG#^6eAiulA`m!q!wQGMqzq
z;Qy8O347|#psJ;JO*YR6&67UI(bWF0YNHTJqo40RzKzBp;lwt2*|zDM(l(OIL1zD}
zT-*ExsC8{6zOIdq{POrVl1z@<NCnbHH^*)CW#2|+HcY(!PbZH1Hi}MYquzA(7%VZC
zPV7u)AO0ixC+sPG!_?-vyM2#u9+A_&Ud=O%EBe+$Q=5m@gQ_rlEF3rnHy-2IaQ<Of
zXa9+;qXX}Ug3QCSU5h*b)aFC2+KlnmYjC}7F1Qxjy;mV_J2Bm}!f_8}(K3I_SIi_X
z2JQ6iqa}dxkSL;k78wo?0jKg%Zz}toZy&7%-B|F0%*Nl#jW6Fm_r&Z&`)F`-ICKtD
zf)d)lrzR@L6|LN(CPJlCdtD>sN+)+X!uw^ps>Ly8krkeONMySRs_=3(MNjQqx#!|;
z=-P38lj^mCyp?UM!8NwRNn_}P@K?NIM;|92?sW2D1o~giOeV?DvCYg=_FRxFWphj9
zuz6q^ASFcq?IVu<Ng6LO8ksMtQA1Z=DHNF2$`HyVpMuqC-w*3Nou<gs54V7)wKTuS
zpD%`&GtoZh<QMd*Gad?i=0V??=|toXiL8BDxM>dZO_|Pai_Y?-X;zHpfc#gcJeG^i
zHsQO5RV^A#-m{GK{?_>TR~hakg1Rf|<IVN)l4>vFm4i#`mbR;TLVPT(dtB{G%e4A<
zrp=NMdI#y^jR>@FA1-ZG5+d)@Nq#8*SSyu$5Z<S7(wnvkjJ3=XAD$sRjq%tD6~4$k
z{XR;$N1A1D7Rtb^R&mO_x2cJh9x*j>okx<I_gko6naai;PH`{7tv~r!PVq09;vRER
zEY2wIIG;^Z-0P;qxdpef4Y$|ZD#sixiXfjxm%P5B(i~`X8$@W1z7?-z585<~7Zxwk
z!SmED8(kE-XOp68YBIRW_8lcEh7G^nHWZ8Ih%;s>KQ=rq-!*<^;SM1+=tg}eYT2m!
z0gQ~UyxZnUGE6E=rf(WCLFU&po!EN>D0{9k1=D@w2G_bBaM(tR5A9RU1v)V}{Ge*U
z2C`zt(n^dcqP&z40g{KcKs+6H2djq_rR$ks^>#UOrA+5BPcX<vUlOV$-jb{vXDau>
z@>F(T>M-Vi&!n?2C`yFC+EAq|soq7iR`1Y|C;{JBW6BM#hU~q|Vtc8W-qgfajopyo
zRAQ%0dJ=au)NBnp-vdGE?0y>PXqa1gb_#n5MYMY4<{b%hGY@?JtzpN}ldf&zJ1zLi
z5EY};xc*+^6+TMNobRhVQG@x^R;3DV{J0vT<0!UiVpd2lsCGsA689)GCpJlc@LUXx
z9yGI}3A16?X~vaz2VIY&vZGdNM5}<pTfvw0HYA4F;0URJ&tp<Iv-!j4cOGU++p!7M
zp*j-xp7!4q9zO<`<lfHz#fQV17dgl0Z2xY+V0g}28?QRrkFyPHb`pW3{h;eYs!^95
za@K*q8RBMkp|WQ|UP5IrrgQb<V|-_h^O<vOS+I0ie#bDUTRMR4#%%of;(@R&4Qr0H
z-xbzS%YAW0r}QdzUl3i0XweIE3)ztLCj+^~M{NMohv~#bSo5eX6ri{^_$qT^u`<+e
zl}%67q^|Q|(Dfu4)g^b-9aoj0OZlq;;<gKNiwCN)I&=v&H>z5bjjXyHw)!1b{SL(S
zJ1|+lLsq{-)NdO`fCt*|O4SsC&Wo^LC|plTle`o+;*{Kb!-*ZK>=dP3{aL@W46wBR
zj<SQ8g@9d{OP`8)+yIuZ6YURAVUmXI9fgx92(F76b~1D0kJQAD3v)BADd(!cEdP64
z{yQe~U+VLJk^GhEcV2SSav5nDI(G+|zorLn?zBbADJ<vk?{=7z9cy+GF&^_sy+$SM
z+N*g=LoHa{f^do_3btuY$CAJz=w_2Itg#e@<*%0)D#ObQ<)*%=1UBUUg0=GD@bIHc
zs`t7n`I=cvp3o>~ZdJtW0J6hvg=!>82&*22DoRUdN7Cqk@=swr#GtZL6CCI`%_NoB
zmP!nyYS;liXqm}8Xj>{bhrQvqS6!HUd!?+ef}1+TL><Qv0P`6yxTbynjml0WUXYvj
zEGDNx$6qTbSGp}k65Vto2vm7$;+CO?sqA1;p3&pwP4|{z#<%O-;2ZU`6PQN6mcAdv
z3}_<gV#zsCSY(U)P6$xtoIDbdjvUk{ky9EMaP7NQq%}lt-ot(zvDEWsT8Zb3((8@V
zIZ*mQaMNFa(1bCmsfjx@aj9lhW^gKpnLKM6K91fdPwxvcy?@J~o|K2&w}>+q&nnlL
zRLMuGzm+|K1-m8<W`iyc3nmJ6WYe2Yu)2nvwZ|o#B#HEMHoN2}nL+T*_NU|G3a5~R
zdcfJF2WQqZ(y<Y5+9q@E@_E=N4GzoLcbEHrIdCsi&ESYU;Whogod0|GVEMaiS$0kW
z_|UR!QvwU%Ji=`&>)&&nnm>inSf5D9*^oG)_k+&QOHLf-qb3&nGD{7znE<8%fY51r
zQ1WNqga0M!?pv0fC;Q~R%d$&j`{Z5iKS-YwFUZ}P2)fRsO=9W_1r2-cCHxR3GNIzv
z3SB#bZ{@`jcFW-2VD;Y+p{{!%jg9W%kyK)kv6%Wfe*BH!P<SN(mZp6fWQ?UReJYI=
zd#t0Rv-fJo-??wzvR=Bw@(vSYIKo_tZJy`D?6&K<trD5}@0;9GAk3{itt3orugff6
zRMI}wkZn5(it+AnLw4a7{%_y+BZ=`vG+MOmt|{8O_Lh#{ZEBIgL5qxcn=};-YXC5q
z#?%D}vb@J_RRDgA$LIQRX{m~z^!SKy;w6F7(qrwt*N01MSNy&>K20Pod>APITfBeX
z<DthRpRcVF$loR}V4siYgeIG9K3<=P-5xIYu+hUx4@Z?=_44_Y`SMKLhaMa4gm?S!
zbr$aTlE*#LVlAN7jCYLPmN=@jLaKZSJI0*t8^R8p2q3V<7r4UX%ROG}@m7zIdjeK?
ze1pd`hWiyqi7%;EIlFE3daV6WAf(>vY_7^rUfpQ(C`xVYLc;-L6lNk3_s|sgCfxc{
zoOM(Dd#AWFQ{uEv@o%2u4yVLfG{wJsio0Y=oH5EQ(lf=a^^5+D9$*|NQ*^U_!Hx8A
zX3JWnG+Nbdk)l0DjK-t>PqQH&-+}rn@v+~gD+tBGa^E@DRAiKtrtCH7c8F!O$^R6Z
z&+HE;!)zTZ>pjtnLrzmrcSO&6M12`O<`H#tbiYT`i_vzEsEebY3BhRWhrsImzE_4l
z{9EI#T~3i@WS@xOln!v*o;aqW?_=KAsIMZY*p7MModlFBAiJbw-_szkHRR-dqsw|v
z*blG7aD_W`tZ{J2ep2@`wLw0njRj{vIoF)DuZ$d%9$79#yFKFk79mw0G0RHr2PwoX
zD{DPsmX+E$Qb?`RXbVJ$SyncC#4IaYJYtrW%RQ20)BgtQQn%?;x~}dveWQ=8GW!;1
zSvgD_sXC(`j(J#8N<r{)IZ%I!qdpc?9}B9P1vdZ{HxG>C_R+o;=3!p#O`Kt19H+{s
zul2Ce!xo_8wgTh0<vy&$!!e)lsLwY!C9e2KWs6$`Gd@l)FwQUO!>T;2^>G@f#A&4!
zExm`mK+)9))SujQr^Fxj@$w#S@bO1|{GfA%me@V-u3hP_;&o1(BAlIerTDLPi0m?k
zsdV(V?~(@#><DCvF<S<mv-79qFMGBUTiSD>gWl9hnG)K`n8EdCx}s(DIJgu&>-`><
z{Ne)Qiwi`jn9S3eA*vdu*TX&!Hz*4lCJ(emf=<PaI{uje=)46aG1VOMou0^#!kwB3
z%heTvYQC(WXi?Qi5PKqCxrr#1EY(aUYra@#(6eI}sjl>hB#8k{TqPbCWx};Ztx9Pa
zMdxKfWc{(_6$(X}7>M32KSiWqoh7{uJbJs2*tN#@U89oEv#6HpwK&bJ^+5Cjb7Ax%
zALZP2xlf=0?HjR=s;Ut;c$in!X}n=z+<1x;o&OzYlXl)Kh(IH5!m~8!S}2s&$(U7T
zQdcNbK2%-#ss`GH7jF53k9vH_hmUw%<tqNL;Vf+@Nv)#QL~5;vjUKkBH6ghb7?Z5H
z(cj)m0HkUrnZlPr*Vmbg#FXcYc!*ML6<XqnRtds~JW(o1c;4g99@oaM(i;VYZ}9kX
zAHL4xqD<-g3}<SZtkft)s#0SfmJn5Vxsp(+BrvX&;zmFJHUU&>9OSr4J?<?>ub^@o
zL5eM=RL&w&Ig5x}JW*vH@AbHqNO-r$=lbxB$3=<KuP~hXYqAn86d|HpJ<O;`i0=i)
z#4Ape__kvrT>w@kG__*bST7VNYPGD_Iz0=Lm7j`S?n$tG!CO4u?ZcZruJRPW(QtRP
z;rTpCkVvfZuvSqau@M-Ps5sFl{>hQJm049JF2WNMe<C!?+K;TYYF}h<y`9KOCk_q2
z-Jk4F>e!<ixjpNPC#5P^axXM^yI*0kgHCIP=gg|QPsO-HH?>Ga@>KRv+jUVz7a<Z4
zbw-H|^w-FbqfR8lY@E@n_|*P1VPpLriiT{NI~{n=tleyOw4ECr>Qo9#d;n)q$$=)8
zZaAX{6eKC}94R&=_MNU<Pg8RE8acH8oLiF^Cb@~CmxyZ9jEAj2wP_zvf9$qML9OjZ
zT9^QHnzSUW6jU`^pTXFgbiG-Tidn4(>B$vJ2W~wKE~V8*TqzKIxyOw|z?(g8ObQ-)
z+?W`=*5lni{UVR|dc4Zxw|KnL<E<W_>v0>&<X7hLbv|5*vFO|2@pvV$$>UO%6`uEa
z<nbYok9d68<3k=F@p#_jqaK%%t^6X7uk-l0=zzZ78291b9xqW>5uWk5%~WW0b}%w#
zk?YYO$+^-a%Y{^Vq*+M0<x$UR3COS&C>;@G#3PGXwQvpzrtOkZL55JqaMT6E83G2K
z6{=1B3=Qr64;yW6(XX-42Tq1uk+1-C1`>wZtK7nEktfX#XKFpd?I_xSXk|)*yJd<y
zImJCncpPT~ZqgGUHJLc(NLnZ>6=QNVin%s`*}hj%Qc%EC+41Q8?03~=x2PEuMU8B2
z18zyd)jTrLSV05IGPN6MmZ`T`iynzSe!JEeh3F#&_e2*MJP@4=l<nYq4IYW!X|NEz
z6<F8tl3Auc;Ve^=M%JWd>MOo1TDF;G>YcI+BAXX8f@6hc(CIBxpOu&I&MB6u^Zx~J
z37;CP(m)j9QFyzMo>^BYyWDACV;3*lNP70BbF+S`BhlRSVSw9h{q^@0nKL9RFA29n
zm8UZ|8FXeXTzaK-tP!->)!~&P$6KZ@{@=<08FnP9{&zLhSm9POr2)(`^(AbkzN$ic
zX8nu<TqwIzl${P`g)ch)zsElc<iS%tiujK%;=$ecAGiVk4?F+Mod3u7;J*id8JWI^
z+pMugYn=;!uflKh;s361J=Np3b2Sp9FA!$!*C5MQj2l#_j+deCC}TcasY16n|EKRV
z|0m^th0pY3vRwVT3%|>SZ&i4`4?lGr%hk`3>GxgWIt9Lkz<um$%=&xh`M2_Xl|0{7
zy*GN2oqj)Abl%6+e2Mx|5&O&Aw7ur0vUHY%1~(09<vc);IOjG&;+Mby(Rz<a<%_<h
z#PrP91^uuP{uOX%(K@CMuXM{+E)TkOENf-0YmZwi$NR4*u74>;_N<~+hlOIORQ4Wr
zQJvO=gJlBj&+f@`K<&+(eFAwmB$&U(ZS4a^x^NRBNQNZ1{v;(Nne<ni`E{a#v$uG8
z!of94kfv`52rXC2fVNX)kgXf@XQ$U7TLK2r_zW`t0JT-%q=qv)(rMuaUzv44nYZ>C
z<dqeJ?v{lP0lF<7D-eUu+*RsGtHrzZgj*ME8Y`YU)|5BO^Z1CzLyr%8yw>AG9$)0~
zyvM6NzRBa29^c^cq{r8Je6GiD@p!q%dp%y}@otZocs%3rae6`YukiSo$6Gxfd3?FY
zM?K!+aX8-6H+tO7DM)RP5Bcy~kLNwU$m5$lUgdGk4Wg&g<Lf*=*W-O2FY|b>$4flk
z?eTHAP5qwn_?Y2buvg}hN0@)<Igc>^Vjtm=79qnPVKQdR+anBj>@_{Y%<RwqF>0t4
zvd)Jr64K|9s-mLiE)Fosk|8~TV{*6k7K_bRq~?cSkC>1iQ>QT{MnD}Hju`aPWWj?n
zj@7)bV5PO_6hCg!7)#_O{Xp`=#*)GHeuibs0GvU2KhgDP#<4f!ka6sdK((Wa)Kohv
z$h@J*(MS6&l9|&*bH1oj&M)uzSQ*=O(Z$~;Vr(0G`mIVe>pCt^M903Moa|ER5py1i
zek>Sg`s{iTYGEZ&3zgiE{mZIU_L|C6_R?fS_L{j3*-v99a8X$*J0s280LV6~aGY#A
z&NlYd+m-)m3swAeloi{?KDP~z<?^_c!=&PQ+X?b;69n)I*9}}5^)BpRylcG)3WeTk
zB0?^fS3Z>U9q(eD?Oo`2aw)r{bHvw!w(>MWn7v0*Puwiwp0}P_TfD>C0&d2*z@p_*
zhA!eP19j5AEvAy;r$4D!&2q?Kw&_QjIv$JCKqea=^oi=s>qt~%@IX{;K?gXNVaJL>
zg3QyL+n73`+)yqxVAJS?y!_zSZch7Hw^70MQWa%7R$q}cL)1@GMJ@7_7b;M*Q2KTS
zs5rJ2D9&s)=!#5n-m@e-RM#;&jkE2Og0D1^R12l070JR}+?<bZPbHq$n>5N+4-s@u
z;E!Qg(21oub$qJ&naSh7-ry&vbkBTSh&4!JFjW(^@89<ysgB2Z=wM+N$<U$})`mSz
z(uXdXCWAI?U#0a{eJD5Q6wa^;?<CS>cFTJ0kl0;DP9+}DY$H0t#J#D+t~90x>ZP_;
zs8~|{gx)6zescP`-Am6p8vJBlxg5uEoGZsf9j@1$+dc2BRL7RmFmbS<=CPpbZmM8)
zS%X7_RizV8rLzxUo9)ie$glg)DQ{3!!meX~;j<Q#*@u#&Fa1Et+)A81F09+`2{Nyn
zrIzNNk|?F;YJqC`N}%;13SacA9}-C4g=Pw>ccRXr!+72!Sp7~MDZNWH&e<M^ehsop
zd0fwQ<qPvqXrG4cFo)CW#0w1x9)rL<kup*|Th#ds5jloVw7-wKK-Y`B=fz?9Rq6x`
z1GjIc8rZj>U~ssq=aK49=NXFhTweIvoK74zQ-{a*eOsxn{j86k&h8*ShY-d1SNZq{
zu)yKC`4RFgd`20^`i^)%`2NmxqEP*ee?Ou$a2b@ol{QV)><_xQ|29#0x6#AdB=PSI
zvyTFfiBfe2W-b0nK*}$zap{*}oj{-LY~NQn#38C!U);b8EMt<q;*+-5s|nOW#wW94
zS6H*H{dCz8G-Qvqy;U!rrLqsVoy8j_g0JJqVvBAV^rZD-MM6TUq;Tgc>M(UN))?<$
zbJcexlSuTMAE<|M=_FTOpq{N2W39ec7(ul%7>C2tWSZ1#HN0N3V_!u~sijtdRd9Sm
z;@MPUPeaY-pz~cMZ|HjF`a)d2ldfH=r(J>@DU#I*$FbEVT!c?`ypZ6%6MMCdXH}l{
z%orvL#F$Bpf#jaL?wRkwiZxa9e9*Op)~4;!S-8fvRe{?L6TJvU#^j4C!k*&`Zqy~e
za2l8Est;iwkXigE%ItTG8>C@&hl!^sZVoX9lXuj0&+)~z@1Ur{8_1vTZOA@^@z#B*
z#6f#V?xuew|Ay>HL-76Wk~~PWspJ3L%;Iuor#_-y`%}4@mBkv}OO4)dZ^9gHe~_x^
z%-?r_wP#_2_#$yw)CM=*GNqusYU-rbr-tnFKKp&xnJ9Z#gF$NW28Khb=Hd3;<XyOk
z>{8i$Oe$im7y4f18YHG~o-&w+-{$0<b=~uhqc7$T-{g+SQ!Up)a@{TVIIaPD$!pP+
z{6*kfiv+5PV*($HYhL)C88cw`89lmQ(kWfh7(L)!wb7lp{<!HZm)5~rQ$_Q0!S$!r
z=haJvXdbVs@%|i}34GJqRasS$p<%@=C{x^F11X+x-fVlJXli&e4X?5b%Q)D;^|O=d
zlv*PY13p&{5|_d12W-nPmaN-Dt}TSRwh+!AuM<n!5KE?m)M)DGV{jIKH|Pe)Sz}V2
zMiikVZ|-4>JQxl<RVs^=?64MELl)ujV{+$l*JnqW1XCS5XEvPmYev8k8^Oq5Q^bhC
zD_8sVrrPJ9K<~Kb9c{lTDWP57y%Lzp)mJddd)oknjRZY29`C?i!6eP$cUU-wEF}N5
z;0*s&U9dl&?olu=6;|n4Ec9Ch$g*>T2TRBG3Xx`ybj>qC=bfxEQ`!A!6K?wd9!eWr
zs5hOGh?eDZuYYf<<0Xb`L==o-HK5go*S^cy^PJ?wC6m}gyupgWo44m#0!h70xmwN?
z<*OzgJ;tWe!)d-MHvbT%URrc{sh=>IDF(ucJKCS7^zxwNRb+=$+gs4`qL`n=mO7v$
z=)8mFqFT|c{qh!@R@fo0z51w4^qAOw%zpOs$l-n_3^Ka9e!8rL+VXt%Z&?<!<d+4P
z@xOKWeth;mK*YUt3DL>{ZJU*Yo>3#-5=C4Rv<}_RckkT<-+u=gHcc-jd0uvddwH0$
z?LOsvKRHwIWxI%aKW2ul4_WpP6V^Ja=P~Jh;y<}8xPR1$nuF^uyndIkQA??qzar++
z=~!pA?y}MzQ2Y@-`+rGsSTbEE((eap{Z*W3lCb+ROM}?CiZZU0!%8_xXwW)niKxT<
zG|bgkOrJ?cd#xnV-8QQDPf!y7FXKPC1g%@*G)0<%`*|S|I_IgNT}pTYi9zdqmWZ~#
zf0+LVl}+4yt@rt=xv-}gc1f&Yj|E+|2)09Po72@Cp0Xfty*X1Q{Qlmw+>4!ae*|gu
zG~pjk^8ysbX{ZE5lBo0#CL-EH$ETuo{!?Znm1-ZC72H3hMmwyJnB@NFC{s*!|J`!z
z)aM>)9TG^q@I}&r4-q=BEDWySECF)PzoVvb0r9bL{s8t=nMIfo?`3M%Ky5z|UU?@M
zNp9C*#EQSl1c2^Fd<Th&DACTLi2*5*!D?g?Qjg7O;I!-J@c5cZiQ1C(>k2Fp5kPv7
zEng@%cZM&~w_`MWtnGuDmG6U)8L8~zEffxxC}}%2%w3FTG{^g#<a~HmF7}yL()M%W
zO!Gebe!zVsRL7MSwIywsJ@RTKMfl=6RU9hYc7$--Tq_PO6|wD$EIfw0H<i045xwP~
znYUyHx04qIQ$0(6X$L#A@r}ikZCNl^XsZH|G*i#+xyLF8!Z-PS1{7i~t;QrrF>jak
z+qg+Jw%LF@06BAZh;>_^JQFfAbtk;+@L9r57MX1;i(Fo+cVD;h4C6rIJjjwnIK(TP
ze^`PQ*}p3xJHug{nkj~s&~Bk-!C*GS;u>}m@Ov(}p|)Hzc4e3ilbQ_aXF58xskP6!
znJw5A!5vN9s`;IrOIjg1=#dQMMo)SqFJuG+%O14fw5}C5NvSo%o^ln>?xhyTwtK1N
zG7h(b#^Dy#INUlLhg*8%aI0|~Zh?-&t=VNLL@vi|5TH-jvrk<3*WW^KZhE){_qUvT
z>0V7{OG*k~2ZujT*bmUwqS*&&?@{(aItq||P&v@s2Q8=eZnn;K+PS$9v*=mjL(HNl
z;}Nsy>GlZg(9`~7t1Nn$WU=VcH_9xxQ&V>?C~ZEN&t|8GjUKiDwfb)b>Q8ah^n$$}
z_IbDgsJMAx9CuW0PuwvNOMKjNA2&HAZm*Bi=ivqq^FZZ042<(_^<ga@Hu^Z3DRFwK
zy~^(MaD!Sy^a1rJcQ_^fu#Y$D;h2xFjbr@<omFUS>7Muf8=DSV!gfxV;s<A{o&5o8
zm9Jo}hm9V#099ZsP=DHdQ{3nuJ3x?TQbnd12bd+!e^8!`9@K4<>u-8VwME>j5NTvB
zl&__@3e#pzv;4uW(4O^fGl3<#3KzYZNqRGQ{V|;>dKEW1aHFwJmz`-tx8w$o3BS{t
zhT#G#()t7;t(l^Puk(1L$2WL9^tg#3;@5gS@52`v?sk)X6OU5+KZd77vf@VXzY!WB
zbsS_$69-*irCrs&Py6;&Y!SUk5TeICUge1@@kCX6yv*ZCkC%H~@>=QVdc54@NspHq
z?hGO0gL#TFX7zA`hk21Oh51#fUi_lZe3<|$wGQOCN<9R-s#2$@Qk>e2TA@5-U=W)_
zpw98f4aPfzw_2`Znm&~XvxovI*^+ZQPccfU)R1r*X+-IXKWyRbv{5NvnMS-+nHCRQ
z6@^M<fHCtcPIPO=G1GaHb5v$Io>*=y1eM9buoc1965c8I1eTJiX_d8D*|KH|USUz}
z!7P;$x2kA@kUkGLsNo?b57b}CIZUN;Vtte}$Yg8K*$L>JW?b<oQT0{5HG^r}QtF4!
zeMSxEl+<u;;0Y(bbu!_a!-N|XfNNG1ZcGAhb04@d5qO^uHzotW#pA|=;N2d#k-;3m
z`GX%eAgw||)e>a6kVcQR2x;~R^CN^?h1i=D;<%&U83Tw#-1NfvRf)C7qhgu76^$j0
zd2Ou7fNSTrQ8|d8l!%F&Dc=`lr}^bXFH=(SF)()b7TxRym}hR4Gth&{LlWsSP766T
za7ce!#GwSY@7DSdCS*c%CVz`TMgq{z<Tv5a0p_-~Ql$#f&w*Nz{>1RdqJJ~|Nc7LZ
zj-$ug&gy!my;@X@-{QBSF63>fuUw`oZQIv``DN<+avzAkfM4M|P#&yi^Nicj;OU<8
z%6mRq7WSMP{g7jh;vq$Qj~0vjzQlR%!GFU)ciN9oUZZnKVkhnofN|pfEWj;#+ej^*
z8&;J#X&*6&s%ur_WM>?QbYy09PLm8d{-k}E9e`|;$-(w-sM=%FU8F~t3Is%lVZT8G
z?#o-GcpRt0bR0d}c9u3dJL}O?q5G|l?kh$2%`H^@?au!c{G%J>@p|Vm+eN(gKk&ay
z{?nZQz5Q14k8^D!O5v~b_^KmliNDgWRnM#+xbXKXd_Lj!nA2M4`DS^}@rnKnb#&hn
zQrzGI%M^I{QkUbaUnKDL1b)&5?h_dg5vbFNpZ~o|;zT!r=MO-{6yNinwenmGQuqi2
z`TN`_=x5!;iQi$9sBrurzsDpIG*#M}h?*+xaEqJuCqcRaF4IPN-r%ivWmiWZPTJAw
z#)Z7cWRT9cc+M(N+g}=J8X0ADzs~N;#RmA#HHclBK{ho65g1Ou;SUX>UII>nOT$vN
zDl^FOx<TH35}in2C+M&w02fn^8jJ<qtu~Zp95iI{sgEG^tP|bj9}B)dkH>=V7LTv<
z@q0bq=kacj$AT~8afu7%zry3)9$)V9jK^CH_kyw6BPJLdJz|0}^hk^Hto4Wq#-v9~
ztj+a^Ny&1L)GAGxM-~Yw@rX&kaavNMuu>so9<d>7yit^HPSddKphSuu{8juYk=R9P
zEjnG@3@*j4#UKiug=2kbaJ_rMk-0bn4!11?=<j78C34J5m6aALqR7!+-Y+RWO0;$<
zB6rH8M2~b4Gd_6z$D5RF*3B&Fqb?%)gV&5Xk3@?F<6PUxRn@Sf`~dt<A0_IBp5tZG
z)JKW_UN%X&(=Jr;FZ>x|#y?8*Mf{`v^0*Ya^Rckw=_FKM@g82;d!BI+{hA=vp<0^>
zxzqlg03`21vPtJ=eF-4j?bAMkVBW!N5JCM;{VqhumgFZkYj;6#>VEwR@<E)=elNNc
zj#T!39=Hg)KSn(oviGp+<p)physeC&pC$Q3_h1wxDtH29S@7VWsNk7@hlJAO0E82!
z*A65=gz-lf(@*&qTQ~dZpV4WfgCt@VT4art9p<D080X>QO#~)Z;w(}w%HJMobgu<g
zb0c^-nzq^$uuhKYV6e7|@VVd`W>!V-xe6)}l&xQbHl~vmKwI^EgHU#OwtZ+>ieD(v
znL^Mtq7+2>D3Mb3`$dvQ+WtA@D*e#kDFQEUwan!R<q%?-)67(MEUNpo0?rAypQjc=
z&|JrR>F4?P(l1YfqeGnMUYE=k3jd&SmYe=p^w_=`IKGO5S6)m@Eh&7(Mqweki-S&)
zvjtea!%@&I3iP~3b{ZL$tFXAxxWF)bVN#9Vk7#0(;=IbjLd)=~ik5WtdFbYx?m%>n
zGUpY7?m7kPg@nv`7Hw%ovn%SEuMt4oH|jYN`*oZj&`YV@mq%470n2p=EE4>D(@^tV
z(3v9+FR<t=ID3G??%_5gV0ZirC1>e3M5r!V1gVj*hL?;Q9@a*`e!fJ3!$0$Z!!D4s
z|J~vIM}yVhWJomhG(C@wJuxk~i62y<&0f-P@6d%;v;W$fuh;fI4=4<T1BH{=^3Lw;
z_%*NFkIxG_0&>-%>^}qJ_fFWv=WbHzNZ9e%IAPD7%V|eGNioh<RdngsOq^e1(bZCP
zY49qvKA&-**MU9<HaK9}*XM^FHtN8bK&cNZPj%RK?0C@i7i7sV{ItCxl^D4&S2_?L
zeq>oMo#}U-6mMc%`?Ge&-^GldPW&pI|4=Gd%DI|;D1dhj?r8sQ;j^ZlVCtalBu+QV
z;s^c1?D8oO`m=qNT@HiB5BhVM6kYVy*lw<m7Sm1<;5dl}-@brn;XG}W<)$yV4fmfo
z_epZU4fmwD`Q81ZO4yCPSPVWiA<4xu&bu&tqkH9dtF3<JT5F3mxi|)Lq4S*qoBUXE
z$WQxDz0Tn#l5}=6vP?h1#mv$yWdbF|DH~YTiR1Af14?-rQt_<1><Vm^%cj%Yqs(`@
zhD4fX0{zK7?ws)aKy>QgNGm%MO#@;j%1ac{c(1$;L}Nf{Z+#NuzY@ZZupLitev6|a
zGFtqC23kgh2j3qT-5V^-J?-TNXX951MLii7872>8Xq7!EOW=*4)DS!pt+KMm5PuB7
zok8X^lq6Nh=3CZ8E2xGRM92wig+5AgxgX@pvXn9_mr{m^5kK~{j#w-k7M`L*Kn~5{
zh9i}$r9w`u;~onKqEbl+Zt*oH>a%&-tVohHWVdL%rgb7V3Sk<5`n@cKdn$L)v~=$8
zMn#K`{hC;Ub+|LXjQn!vBlA#3K9rlc6`V<FYwGaB+`DFT_u?IJz%|qF`nDtjQn^Wz
zj$bp8AuP<F=J&oZJGlxwnYWR$c;1!9bf%T3BGL)|ZEr|C)R1^2&2M3;hC$~?U!g+o
z=BPJf1fG3iu4y{;eEVx{yMh?Z7vCT^WztNfkyOVxC;fA2Q(WcOzV(9KoJZ`10ImJP
z9?oXN+?)qAJ<uD&7`22o!$HS+1VSdk49~6M;Rn)*tzj2e+Mzb+bb|XnWySc>c^+cU
zejjUwojUNMzU(o=Tr(yWJjin6e6T{9=;!>>QS=j)*fAb_{`3p%sfCAwoBnRP8g$Ns
zMnM*&I&TF^ukR*$IG-h|DC*i3biSF~XtH{C;ko!fhl~IA-hd<E4tG8o6Ohi{k>+r@
zIk@pVNS?w=8bOo4Z<b2zRpY6IX+g&lKwNXm>0%DYEShA+{z)Yso)Z6-6XHLs_=hRM
zXM%kU?}5yD#L-uuIC?yK?09ryDq6}pPM)KcE$z2+EN!(Ol^{!uPg1Y;pOQa^Bb@r&
z(Y~+n6Jo|B6zl5ROYxsQo}MD==qaL(9*@2ZJ^DG-m!-!inWX0l+Ql)$Jk^2WMORi{
zkb65njW<R8bNp2Q@R(z$`q$|fY3d*5T+IxIUN>PVJ#XyxI>YpIg{+TWmtdYfqcrT;
z%>VI;_Ma4f@DfGAfs+hyt9&%Jd0`^92hwj0K2pr&nE0vwk@7fGz3~H%I_ZlvG#Lk<
zQ05PLPQiGs@LnsMLwN>mt2mo!z2!ri){$TtT7zF65;ym9z*oLzMDx2`og-D=wRS}F
zyZly-G<w&qBbwjkcl$`IcU?7-kw@EzavB_%q3^wV?t&vsy@SCU7@Rum;CFC`1@2KG
z?_c=0FbLFP?cfm$d`yAA;XjAHmp`GDIszU%V1We%(zRDUL!f^4Y%gc4*UveV&(bqX
z`Cf4*?RfoFXX=2qylJFQpnhb7z|s*N*p{b9L`nG-Bg(dX<;a-8+eH91C<pvc)S&51
zx|=F+AWs$OYN{{~K1wnGIfec>!~YNpK4xGnLre^Br2{X>mA+mt3L$&S>$j0Z`O<Ch
zMR|H#72hki)$(1rt&#6l+gkXxZEO9Xs2}ob*?IKR+4?R$yHwzcv&#goI(ueuK9$y)
zuHnAhsV^`@m_9HWWg;qE%YKs1W5vhYsk?RwjeYZ8Usczw8I1Y&_Q4|m4i@=$u*koI
z;$MEA>YC2MBL5C%toH|d)jEy~jemE;X#WNNb)0YfJNjSaU%2}Er8A6wXBhv^aQr*y
z_;+xF=vzAI_;+wv*cF40e+S2ey<NXASl%{R4)`D8-x<cgGmL*{82`?If2Fo>p31-c
zMi6|<tBG*q|JT`;ro4WRq>gnB;0pQ%aMg^N5~bqRSgd<)HQi*hZWP_Y^&l@^j~};P
zAyT<eW^<#31YOTExcBvIMwhy6L)<j2F54%<TiSR63)r?Z(N_p}K5esOSE`1UkzQPL
zx--_h-<=n0thHZ}x3^W(&l5X6BUpU_woaM|N=TQ+04vOW5LK0GWyif%$_e`sR)8;|
zI;T2Dk2T~zHBI%dmF^nm>g+wfK6(?SmNjKwRu4KkX)!|Lla#IDrZEN_e>a3@ZH-S>
z!dxjkxsI|?An*902(!H69kbNoJ=hDo)yC&{=$G3dKW~va90S^wMIfteo-89(k|+M2
zkp*_hvz^*sL8)bfP`jTa#4AW0Eo!InvL@*KN7`L2pjHoS;+@THL1(uDa?{`Zef(LP
ztW)yXg7(L!i-0B-h3TZz|6&QI7q;je>2ayVOR2=saGE#QpxGMJiM-gPIi29iFlaOj
z=Phg#sMS$&2@17I_iwaSDtipo`5=bTn3?+JhUX7-X)ceUf!{0I#TrCmGfPVLO=@~1
z<#Hrtg|usS2uLPw!gjPlDm8Dsx!*UQ)Znmpc_gzkn2-%=I(G>=-wTO!aQ-8hsvbxu
zwqdC$bJ@+w&2`;NXJI@WC?ap`%{@MZbOR^xI$+R+btWnfT}KPC4Hafzz=TAt0u$46
zLFaumaNz@x6Sp6Rp1)fgDY9{GKCeC^v2DPL2`<PjESpld5Bs`tuu06xx|zO-*=uIy
zD^<TiSHB6>4`Z1RvcAbviy5j>a=5OWUwY0gex<A59M$i&N|eNiwrN&ezxEB2bsLug
zRTbAR=zM{3UHA~Rq!K%(Q1@6|6V!V>R9AgSyK%Lu9EP!7ALpE=@LegTu7+zWGFHRI
z?=+Q-<w;eTV6jvGP7Ea7k)e=j7v!4WnV4FVcv~DF+;oVqwH=x#T5izs5z5dTg;(he
z(AJPvcIQ6)MX8z%4qC-QKX%T%IILLgeYaIKV;{V77MXr6kK+vq9Pq+<-Am8lNx99^
z^&c&Kg=DHyG9C(WFK_U{iPZR3Uo;j~zQ&!)rH*Fra|^L6U6~DuV^(Zi#iA4X5Bp@}
zx$}JSbjW$#O}_Y@?W4w3{KR$PuaK?3myBZ~V^`ef`Z=${o61DC8((oJdKRscm|jaO
zo){i<o~9soPZb7_!?k#=-Z9B<Hl8+%-z2nSeq$T2$ZsQ_->h!%^XV@?o{ZmqcMSbC
zXJV;D!6Y&bRHptZvYcIh!Ib_XSM~?lNaTmxmoglY73sttN>Zr<$$?aYC+*#G;N0$+
zRlX?;g(|W)))60^P|3;h^hedt##5Nx$5KmGJe!_>xF}DGqv~HYswj(7mE(rgcTooa
z@7I&PpF)GwOpQ3Jrv`@`1~xWDN{iPosc$;>32qm8xskh?r}|_ne|{9^K0yu4EqSu-
zJSRT5%yPV3S$Nck<PrZg!kM(blPe34O@?f><|&3u#32{v=1e1~D2(PTbnk@nJgKzV
zboN2p&!23*c$|I2ROjO8J|EemIpKr>9vAa_hPNAEYNR3`z>bAiDsR+@4H7kyD;MMt
z6H{c#yCetQ9Ubu_%Micb&3r)2N@=e@=9+=gJnz_~uSsef<MSqB-^H(wqt2NZ`>lMv
zgprd5^YqB&8%k60()$|{T`f~ZUDs)1X>r)`RnbXEp^od?$MXWx%JTs8z4d=_9Mk`D
z$Nlf!*8g5q(f?y~*yOyj@MtW=(uwCz=>N%(=TGSWIE4Ox*MFz~yKaHSRok%8f<mg}
zVLMwa_F9vAZSg780NMXG&9TUWJRQo;T9xm+Wxd7rK3)Cgg#VQOI$izceJA_t^w@LW
zWPhDLIeENHe=*=<>8ul%LKmNg#7J5aipgqnQYg+xFs_;y4(NW3p0?+1+pk`(hc(|>
zH-8-6+(x^0G?g#+5|`=zi=SUi)0xcf?)E2&*>T*)qwnIC)5rvI#z68}ITFdr%ZqqC
z{SJA@<##+6>h0=>3qN~aSwC=5Zs9bhL#b+g8eJVPoD+QI0hBv8gSSjJh0Qmvf9f*t
zcWr^Pvhe83`1uibTs+@jQ~aE432UBjUuQpea)QFpb&noC_%TzUUI5X_wU{%NTliWn
z%N?~Zo?{IYeC6}fb$F?j6Shf`V}3GnomM%=lRMSRQ#;km6FccOZ%)`T$djoL@Ouy0
zr>v{2iyRxZebDjs2i1fVcbI;0Us0{8our8Vw>c`-FHTTzMoo$o-hgp0Cl{0K!w-q0
ztEj*!d1-%nDt~&_x%+hgkgVfRCzrBE3p<`8)5NenLY;U3zy>q=h$v1B>uK!-zefNT
z+?YUTuI)S&bd94Z)^<J<bnVB{na56s;I1b?F3i<qc@NV_K~5*`NMjJLLlUsC=0SV4
zyXH~7-CeUy1*U2qQGs=xY%+ganI@n0nSC8B_z9a5R}XWwWSCp0C*vN$&Hw#u+&#)=
zi|Ze#%{-Y9N4iTxlkHj8ebtY7*!8L(m+Did&rE&F^_ip3Tz%&0lhmg|pGtk+u1}Rd
z3-wu~&tiQN`b^j775dE5XSP14>GL{$&d}#9ecr6ke0|=bPqjYp)~80F_vv$zJ}2vQ
ziaxK@=T-W=MxWF5d4oQ0)aQTc^HzNp=<`l}-lNZZ^?AQO)AX64&#C$Z`n+17*Xr|n
zea_V9P5Qh=pSS6Awm$FD=TG!GN1qQcxtE7EV?o!Cv=C)wI;I|^Cr5!n*AeD1Kf9ec
zINu467UtT-yvCWABu3QX39d&nRFCH`Lg25pp1+zH(&B_P(AeuddF1WTIp|uX0Z&Ie
z&SLAF*SX11cdBi@uSKP?*`To*=Oq#iAs1sQgIBD@bk90&8|3kHYEYYe%IEz{uqoX3
z^31j?^LT<q{_pUb#v1~!-#+kLQ$PIH)DOQk^}}yX{qS2;Km69z55G0_!*5Of@LN+q
z{MOVDzcuy4Z%zI1TT?&$*3=KbHTA=9P5tm&Q$PIH)DOQk^}}yX{qS2;Km69z55G0_
z!*5Of@LN+q{MOVDzcuy4Z%zI1TT?&$*3=KbHTA=9P5tm&Q$PIH)DOQk^}}yX{qS2;
zKm69z55G0_!*5Of@LN+q{MOVDzcuy4Z%zI1TT?&$*3=KbHTA=9P5tm&Q$PIH)DOQk
z^}}yX{qS2;Km69z55G0_!*5Of@LN+q{MOVDzcuy4Z%zI1TT?&$*3=KbZH3>W6ZsAM
zYU8)XwjHapLn%*F1S|)A1Qi~(aAz7DRb3}W!FrLT(VIM?J*Mb2Apey!WXz1p#X~vv
zP{>RdI6H1aqvQa0FGrB}=&T}O(y_`}zfo+JQfNykwg@a<<1Db&)58>f<Ym`5#jkdr
zE$YNrwU*ZJHEAb?MU@3;|HN!3j$nx~pzE97P=k$|X0z7N9UZNpRdl7+T2nT?28z#$
zbPS9sLW>?Bk1ulGE3Fet95!lxb1a=}nVCB4m)SiXk71pGvBFs+@q#})V!LuLgaeOr
zg6!{p`VFq|FpE2EYpIa-l4|Y7Cr8ITZ!ZO1UE%Kj6>oliv5QkntZ8R`8HXgxCJ~=I
zj|mPFBOW;TF=_Bv{CWL23#t!qA8Y-%ys+2`*%O6WVlWrvrP(jniir&zMT+xc$1CaY
zf%2HSHh=gDH5U4*EIW03+-dr)*9vFVx{^01bP3d~jjylOnzoWX;uH<Z{dO}CF#6^V
z1UH>60!YysAC<Dvg07BS)XyvxuA>wAEs(-?R{QaK!=ld!I=)2&Ho(~J&?X5f&Lnc7
zPTihsV58)5E?`NXaHa=mEZ{`1?kMh3CO<=T5?I>m5uF1TZ;_(zv-`!N-{4E_yl4KH
zC#myEiV1V<Dq*m|4~lS4olO<JSK!|tV3t2%2d0DVPZc&<<c1lV!8PX>*Gt5o=1V;?
z#r?t*_lr~9FHLca{y443kKLl)yR`ymD{XMR{p)IJm0WD1wzqKkK9>70Lyx;Yn{VeO
z_B&G@N4P;-PV(x!df=@ne{O$It<ggN|3rIDxh3=L+F2|lPP}|}aeRIDVt;+MmRzo`
z)j{ANxNzyN&({7Hdxt;f_G}~P50m#F|67-5Ysa|)`N!OyZGM`v{@Yh)8(*SIf7H#{
zmNClt?_Qj>`?>$OuFaklbfu)ZAXA562NN`Nzg$wndB=+X=P%J_G`?!kqvK1oGPC53
z4`iKPp2hxv`?INzhl}@T?Pr=YnuJVj>gw!gKSw6II=fk8tGPt)as;clC~Geo=i=sd
z4il~tj@N<z;rp{MO}#(MTKa_hv)*UQ{aNob<^HUE?EdVpodDGRSp;6;)f7ItJm5VZ
zBHI1gcO!-U{n>>|?C#H|nMoS$67AzWRKtr3!RjnJ?S(xnpB<;2qPy@FbLEjRvOSrJ
z@A#CPvl;xugu4LSjXRASnYu#JxiEW&F3c{nIOE7=CVq><UOxJR#VIcxz~<tT0lO%B
zIz|7Ui?S!kD#zblNmfq<EzU$^E3?aSw`94V;WUrg?v`vaz9rj7BgMC5^Ulq5y2`B4
zqCGrFBcu=dqPsjIYrW_f9+3%O^g|G?q`X>D$CU@wUT1mPUD=Eg;|u}qqDUVeIGnX0
z4rf4!!`T$#aAt-$oaG@7XN-u$*(YKFSqW>NXaQNa27deb6HuL-zLuF;R=kDV9KJrr
zzc(3P_$qF{e{zETwPc9dU#s0x+1+M;{V6DK$nJFZ*E^@!UtjcDH6urCllIrEmP5uA
zi)-y%NF$M7FHnCn4DMX%t*~tlM02vH!?3!M!kD@I8Ykzr%cUxD57*A_{xd`DFSa;C
z>`mljFJ_n_cAs+f2Znk)^NCMGC;a_1P<F+g-y{Np0$c3hL5v1dyaN<;?NKUXZ6j9d
z%Db>nettU9Fvfn~sZ|ZwxzhrY32gU2t)Kp46o#p66#cM9L2n4RpI=5UY-}#W+Vy;I
zj{W&0IM%M`dvoj~g)fjX{Wp{T+9z}DH(AHc*rettT&_T<e!r*sJ;2x+`;O&=nf2w#
z<R_hg#&%`K<?&O+Nj3VJe`Sq+er2JY4Aa>sp_!L|_C&v4Mr+6r`;98d46!S$NR$a3
z@-BC^y;Mcf70vNOVzoRZCJ7P-tFM>BfihVe=DJ5k1KRhBaQY}i#S~r6nfIN_@1xZ`
z9(3w}ENu@_g;~7=#9KTM0yr_hHyk*E;$PswuGj?oUVeTI`)o|G4-^9r{JT{%NbPZf
zoVh$2&fgu|XV2U2%(L&N@Dx9o+wr8CXK$u|WuCocigk9Ky=HctefEyRr$|>kk3haY
zHjdsl&O4i_f&J6?>C<|5Zr^ca?saAQ0lAx`F=O3~HeYdXs^d2=;px*qcOGWJcO_ZU
zJD1M7oU69zYAm{1imuk8D^qm!7F~Ts*M_1iUvv!@U86<USkYC2v~(1d7hMvgp1z{1
zw&-dsx>|~^)}kv@boCZpeMQ%XqAOo?4HsRbMb}u-Rf0O>=_|UDMORhPrPo4SyvCxd
zrRZucx-vyqZ_(9PbZscQ@<rEh(KT9hjTK!boC10JimqhQC3`3rT3d9<9?JRY1j)H%
z4<%RqT^R>@9q4mlg9CX7h8-AnU`(L&E{9;RU&y37w!ai~{S8gX#T=dvbVIW9A2bYM
z%N1d6=DjRbwce7gf9DRP*IY)gxr|<O8NKE*dd+3@;;P?~v0DQ>-2H+beGYT?3wF3m
z1UuaQf*qqS%-t{8;SlH@NTPiNUDwhA7;j@}lfb+J!GVR{(DC;RQWHbEU$8&u+^X}W
z#8csXHkD1rln$q}{3e|)8QjSwgYVcrolLo@Z}acDWT3EGQvrVGrooe!X)@P@tBH?!
zb_j6dJi8%>C|<4|nB4Ry<X-CBo8)f9jXrt8d^&>oW>GQUJp?WnC3wH}@tc+N-IZ}d
zxnCf?LGgIQ(7RUIwx9euZFaQ#1=>KtvC(#6>bu$Lz^+~U8&r_@OrYK_LL$g?LGl)$
z469oVuJsojxgCq(l5M|jNoaO7LAJ_0%+HLi$ytBVkKqJOK>Y>}MBg#EC;FzrLi9DD
z?t<nF9^sBpbR*8<9#pv_*O@@sPS6<HN-0PZqQ5ZbUi;G2uVPs=pEhxw6psVZ#X3+!
zy|OfHhbp(?X45Z;oN<fZe$81Zf_i5{lTM4;V9e@_ry!n5HOQD%G`2%xzSqKOjk~-^
z26HLEW-aD7?ZvRqw8r*!+@q^JV1;sUspY}%>?&issr7|7s5x)7YvXRle8**)f$WAF
zS81dwstbbEKlYSnl-5*@E6I&I^Z%0R0rlqj*%^h^7R_%IpjgnkrxeQ1qU)Cgl|0<#
zvWwnwsoH0ib!&9G5WS3sI!d0w>fNdgb=IaCixj5$Z*694hA6>BcPl`>AFLKzW^T9F
zGgv6Jqe!StruVRMbZ|+~ysdjgdtJHu1eRw~#0IXQF8U_9Gg0x3`mp*LTYs(~2?Soe
zYu|!il3p^(=M%rOohHI(S44Wpv15B#s$;7*L&{{x)LAVmu`9zuU>{o=VrBNinDw#M
zO0y@noZA%I`)btsU~jb^PrCSteXp-CJ62b=H8rtQPY$+()3CcpC!R#3yS|;)Jcs{F
z)TG<Br&AYvW4j1ETMLdOU!0D+S%#gJTUmT;mt!cJQ>AfLIY!vh{TAl@Ut%we1&s_q
z7aZh79Y#QYDThZ2D{#g8U-ijnYY%NR!@+nksd*)l*}A(*s*@^)t19UHEt3lMSnyEL
zbwAGF=AC>IE<gQi1WI9+cGrw&hQ%ce)44#qyb3J5gK%}a0;4zoISCn`X9&uZGNW=3
z-fyC@c<i)#2bob=<%5M6XOgnxSoyW5a{*&4tl1rOJWua|@WZo%!A+66au+s3;o<xE
z32yzsU&oU(g3kYiKMgv)wN>g-Cz#<ghiTv;_;n`M4uj#u-R->T^&(qqp0jQof*7S+
zL^|M<_+aT_y_2Nm>D{=%%ZoTJ@VP}C7kILW;{vZN;<&)8ia0KCeu%;6zsPahgF#n2
zT~?@qF1>%(@%UJn`|KD>dx|?lDK1qoMmn#8A-T8}cKnK8u1ES-PA`FeY-wl8ptdP0
z=esmQuP(nLs^WW9)X29js^z;fYUNuWwZJZy4Cx~~;!9-seu+5l67kU`kv@5S*x#Dn
zw4WSj&)m=Fiv46bd+B~MpMBMSvYp+wpKNEZ+)uW%>-V>id1sWN`Ejg@dYxlU)aM*)
zqYch+Ym|46_0h0%Y>Y;o<MwDw4$3z2j0z*q2sO%#&~Zf>qtwp*8A~~P)qYpr>^1xQ
zoX^_*uDsc|?$0})_4{3Uvp4P^bw0Q6S9zR%B%PJ=MmU|{j>}E&Sp_d2<^ERnGj)gZ
zY*}TMfKghiW=>l(H(4_;wPwD;8aQrjwS+Zx<@8Tpt>&OQ^=ec#wpv}StTwkcrXt9n
zQZ;efTD!@bc&Rn_71qRYbCa=}JFG#<dOq%Y3LQH7_e-R<JQ1w^DhDIF%-^SaA#7`H
zkow&YOb0Xg(|DDG5;>>Ue-7S9T415c_H~2|b#J@{U#+kBKtpy^BNV>d#m{#7warBj
z*??r?kuDPEKE{nUUZe!i8-76|d$8m28U9Q|T+G(Y8n@W?Obx1@(HRIzffmH9Wq88U
z+u6>?qL3T1Yu#zaz>_A;W0`(Iu5>F2NQ|(Tu+7tU*PQY6Rn!WM2Wbn7EO;PV*R07O
zA;CO~P>%(>Yl1RO$dV}GIh{5!2Ayv&)1=74lUo+7>fB5@?c+qz6dAI+_Mj#MO^Y`@
zt(1~guMfEur<Mph{#-4cn^PUuyb#>f2qIzi0Qc5HUY_QRWQE>>>wGP7?Y-hst`_J_
zp>(#1r#svCaZ6}MaN}Q4NG}p%G909ScbSXLPx%^J9-}82IyXkA8JdjIQbQ|abh@Ec
zF?zD0i(-`Jre;0l(3d2K)H<;|jgJFnTz8tY=AbgzAM_HXkmnQTK5UY;+tSoZo?<(o
z>Yc0eirB65irB65irB65irB65irB65irB653h&1D4njM&+IfZNEv7Ef#C~If-}MEV
z+--FCxr%9RG6=c~)UkM;q3N$m{Qz&O-qp|CytLv2lUW?vjAmu_xw$`0F#3XTZcYUa
z_h%N*hZ7$NI)6!KoDW6|KL+G<Nu!Qo_<XSXOGhUrV*SM{3{1B{=Yzx$#~y0PvMz_g
z(6LF=pr2^xC{5a+T34g=jMZ%CXv}U_f*o=_#}lQx-dXvg{T#LQndYIovTZ*|Y``S<
z?lAFedK!vQ8to$~?W2(`RHfSztUe1ZRJp#1<O`k%ZnjdE1m$Pyj@iNr1-;**TAQF;
zz=&uUTd)1D%G)f>X}gl<zj(Pk_L1O{cgz!oLFG2`Xx^&ScSM)H#k`n9%zLT#z5s7B
z9VhL@B5A=<Q(MlV0m-TPaG_fL!n`(Gco(h>35!^GiyD}Q&C@36fo^MKt!>Y7Q=k`l
ztMO8P)ounxd3rYkpuBB2j9%WfyO!_D-Hm*g?r!0G#qL(h9gOTYSsu2yl9xxX%yXMe
z)6L$^3LLY*AqBF*c;yZP)$V)mRN%M;4lD4u>p^;eb#T-I?Wdl34|Vc-8?U5ZU#g!{
zPS4gQpQhOi=<Aox=Bd;2&fOU*g=770bq0=AyZfAD<L(X4v1WJPId0!Q>>O)%k2=S#
zyT|1CpQ%#)Y#N+8>DEuvY{U?C;?~b#(#@w=FrQ*pEg8=os=YI6_y5zL(rl{U(D3iP
zla+np3>MebyNdPeO2VZ3Ms#D?O)~c4JvMx%TD$p7!|RNnf6Drc@vNb&QS{=z_Z!a_
z<?Y0sJVG8$!+<qC4O@ovG(X+#&=P5{(ru|TQ!NdTL)Tx#uq;pFUhu>he<Jtmke2UJ
zVehrL+G9ZNTv~b$=z^`mjo;8HBbUuubZlpoNm$?9g>PZDlS~VqkZ!M4WN>po)n11L
z{0>k8;6;_h!r*J*v2c?dIAabqv-N~~+?pTGM45HvuvU}fjwpPSxS(>Jl9xUw>EOJp
z@ixKWWkQ=Rgbu5IZjLV_Z$w-j>lj<N1f6GLvI{xQ_Y~?OWKvo$>#a{7I5gm*O(AD8
zuiLMX6nTsVd`y1O`8ttGy`FDBIp~@7oMnB<9pwLsowi7HZHT5ebExoLQmS?1wfW>8
z#qo<Gr-$u(J$jUF^HW5;IEq(JlKi$V8ox;T%45-(!2?kQ)Z%H>;GSs2;DKluI7#*8
z(n-i7Pe&=s6)FN9rM+B4w{k1UeiYz>eQ&UHK{ULvwpWKW+&+@)6{xz&8`;}GO=cEt
zXK^f<%5H6&hxdHE{nk{%ZTByjEd^<-JB+M&WqiMEw2_D?O)Yx+#g6LMm22{22`+06
zPO#8ap-xj;E7Z|$&8OY@Nh9;5YtJ#VPNLx}G2gW4D=R9mZAx_<h2eN?L95N`L%bZW
ze3V|NC&|KR17&~_$Vpm$SBWDkuYJEeANd@ySYh(qLDZ0a-o^iw#g{1wYw_Twryz(T
zN`sDR%!k#73V%c8)CR##qZV|Gp#L@9g5HbK3Jr2g24&|d1R5Ln3~J4+xxXw+<r+|3
zhr)ZKbgmUc3gz%iCE#c1J;6ceG#16T(I_S1;m3kr%z+N_Qyf^ti7gW2!HvJ3$t!0(
zX#R*G7I+UDt}^&327aj#ih{14cuJNexefFbrtZ&RqCIo}MY(zVSsr6^HxSnBs|&u4
zncW>`a`z~H`LrzW2GTVz24CMyX-hiq4Z8Gu!}Uq_I<LWz#3JRmVm^(hS>c%7Pnv5$
zEH&9mdA+*m4nA}4NNwNtf7m+{_^7J8@81a-NPsv&GZ=9j6`Cl5fgpl_CXm2H0!9`^
zMUs#V5Y1*XgJ7vb1C=;VOIx(IE~O$Bw~Ey&F4Z7Nf?Fdh;vV-ztj48)>%8CpIp@v>
z+V*+c_j&rhOg=Zi^E=D`obx~Xx#!+XM?vM3O%Te=GjAfp>fcfX3jUTUFndo5XMfTQ
z_GC)yrkQ8RBFDXxCw|kH;0B1BE1UNxWwgz6xn)H+hZmNg`-ueDqWgp+kETg77m(Fo
zwv5i%Pj{wjyNq@m&fP(J=DZ}PH!!*7h=>mRbmyDwdPz4U_A00ezfIX^s1@W_;-B|M
zWVGhH4*n9K*)L3BfkhU8g1@n9*vg3>d3<$%<d+6D%R8Fogk-wJwPI`^YQ$scYfAIy
zyu5=-!9JchR8OTS9%yQXH?+vqir?v4;aa|mVEd^Jo0(bD&3jk^;6bWGl^}2WEI0gI
z(5?Y*al^<lIn?+4siE2LGA40E_g9p2Oby^OAki8?-Jk{x+pa1=Kjf$ikb=SUl52$p
zN`qC$yh`+|gT9>{N**ZfdY0(rG5%#d=51}2f)}rBY15GC!`aC~i|I?kDFVe|Dbziv
zl(KYxLCK^Sf5TLV|EeZDi_5dvx@9^wr<?NrRLbCq&f!N&ab>h%BfAiDRQY_N7sw0|
z6p`!TZz-Fvkl(N1E2jUv@w;gYMI-xMqWJ|<G`x{XvOzPM<yvt(>-Vl_$Qv!ahwF+J
z6aX!KDLohc1Qs{<lHMiU)s&(hci1K4w)8OMoX2|3<arYRUs3I5)sARlzoLn~r2fAs
zn)UKhr(bOLj{bl~H*ELv=Que_J1_9GxmKK}44X`cc73$pl7wcvRvd?WF>B4H_r8{4
zBt=FU)wZ?`V^NYm_A60b!B_Grfb`klsEvopZ_+7)bKjJIoWIU))q4B#a8=FGq??eo
z?K@(d&~Vzd&wU!iMrE%X#McPT_*4p9-tm8~yavAClCRSRMtm(Q_l}llxzV_Mer5Ti
zqTq9A0H#)s{Zwit@2e5j@ir=6<MJ!IppUico<(&<qxpi7sP0pxx{o8Odx73jsct*s
z3Oq)2&AVJHHsNA)#j9?(NxIp)X;+f$5y;n@oD{FD<<-c-MzxFv*-|Y>@^20=xzCXI
z`cuK49iS_iykebanTN4$%Q&KHCKWGz2I`gm#C0wcezt4$-dMjGI`e(MRvyLsjBj%G
z?=eTTRQ$CEHBmcTz74GH@}nO{AoZvSFO>g_BY*ef4*#jK=~Wei$y$y|E_;F_MCqjW
z7}5)6lb$|-eaPf)Z}YcBqrZ`PL}VD0m2S3c`6+DrM5jMyy^GL*yqIRrr!i8WJWNZ)
zzF+d^vZRjl=KoAuF|n_W@mKIU`PrI8i3e`!dWyCZH?|eii1yQ)MaH(1aF@<3OOUbc
zXu}T$8Oc6YBbiJzcz;oHS20r=8SaWg(^+TJEk}m6;?b?D;=H8m&zGvZdnIOA+e-6R
zz1p8KNU!!MFglVIHNE%knW2FrrE=?GY}hN@g_lu|-S3zoP}*GOk*duV9I0C15l2(*
z1-zdF)qF*b+KU(WPE?qFim6|7q^J5;+|9^Uk-2XJJX720k?Td$p+~OQF~yBsS>lh)
zezA`oxm?Pgfz-52HF9lb<kAh667`c_px{WRGMhb&UNqJrrhAMVxqidUM(sMp<hyGx
zmJ}^FJy6{Kij>zG=mF|rJV=@L7Nks*Ksg~?B9?L%WuH;X8~PFDDTCBwdXS>ulxd0{
zr07>)RQ;+9UwnL}a5?uL<hGgW5pGt7O`J8ypoS@Fx+OB=P7WP0PzqEJPs74fn!gnf
z%u1M^Ndf7>X{_vMq!*t?6OWEf((z7_8GnWui%_eYd_2*)uD|XpC{)q*{s$uzz2tPI
z?_|!;x1#PX-v)xxT$QSMqt_lYHDyF%ZYT3ho_Qw^JX*KUSmO-^P|$Y|HLp|CV~3gt
zX+BCjcPK;rP$M7Fk={);D`wGni5YED)o4TZ&rMOePZ)9{gUD~)rXI!(EzM$^Fth};
z&wYlL3zWSYT4F{OQS!N8NJZUuXp!6RbTnkZwY(gSb-f@}OV4_vql;G7cMmQ*l(J-S
zVR)B;`FHl{a#pOinm=-_2;st}n;ehf6%>pve52`OMweDKx(uMSzPGmy$mECqvN1#V
ztMmsgauiWU7`}ofBMkXWepUV#ysBI0tIDq6)f^hB9w}bw+DKm2NbwOj6#v1IVx*3G
z?;)`R#du=lOb|-^MsA@ejudJ1t&>AbzE0tOX7TY~+4~LBRAe_eI$#*#*BRjqK$B#}
zzJFhVs4q5=ThnFE9JswpUQ@)v>9>uP<K3Tx!)g^A)8iC{pORT5dU*R=@19qtG`|-~
zkE|X3mLf+x{d-b~mqBcT>s-t_&&hU#lHJ|nt>ACz+IM=4Z_zh}v*Tbh=;6CycM2m;
zE|2eSWgD3{bgk~PSVA-R4)cOGwb&skJdF`Qo~o`y^6w9u<d7OBwo3j&TRw?VukPaJ
zuJUTqB3?~WCP%#%e4FA86^tPjtvV&Y={lB_$SO_tgJblh-``g7t|d7f@xIO=ZRivo
z{T`OCGE=~{U%`8Ol{Vhhx_jRwHtP9_7|+S?(Pt%18U1eHw{#C`vMLi4({<=Q1=sSI
zX(6(%rFs<njfm1m>?w9l+8t5zh&}35F*kIh`{|N>t%MP8XpgRQ?WW?jQ$T88S9Qv$
z>y3zJZLdd0y(Yg-rD6{9YGAF}0n$tR=J(<f%iDi-LY4kU?BRKQDGxOR@-vNArPanl
zT&YVM=I7G7J0yq9K+0lH=oH7j2DHork9iF!XCG)torR~#(!)IJG|G??YJKNmh(5WK
zLz&7+**uVLbGbS!gMmX5+D0F#!A765!JrS`V9;lAFzDks81#u9P6Ejhp16}hW<smx
zzud0ypB3A0NqLM^Q&^enF8M}ROf`9tc}ryAyMtX4>B7{&Cma3JZQ1jG)h{z4&*2f}
zt}SeYm>w=~cdNsHT5NRtOls`{Ek?fRUd0jZ5|dw@!o*g(<8Rqg?OK3ry>yaitfLd>
zF)?v49hRjO)p4Jh^ZP9DB$9t*=KKw}XY_nIc~4ybXHUa_V}zkWJz*}OKZ@-rBU9%3
z7@sjy=9{~IO&lsaU7xY5XfoC*>zJ6l#V!i=leb)5cl-VmSvyu`Z8>#Y)RCT$KO^uV
z7NXI{fVaf#8GpZw@fVZ1Te=?R?z7HZ9;f7=GCz9#=1(kZb9m6HQ!OhzXrTJvU;R%}
z|G%6f_8+SMudDy<>i>i4{~hZ8HR}H&^?$DVKUV!8s{VHtTb3Q4#(B~Ed1KOwUa>4T
zK{stmpSNdII2_=eOPjW(cU?ofed;M*AD+EpyMJ`k2M&L>{OivfkNmyWKBu>7OZMi@
zh0Hs4IXfo-uKQY)T~V-Iu!}mZ-tT(#CPnqBtl0xo<VbS5OoKi7`u*g=Pm6<}<ZkPH
z4<XB0tF{ravxX_fl{;4!49v^B-_bPg*MG&*wc>I9w>q04v}>zVJ_xkD#eZgJD=WLs
zFPR*49wt+fkoyes&Wq2-0G+*ADz9h>9N#%pduQ*-JwUtn5%+gk8@D<~{FV3mAT)1y
z_{t|cpI`!%w>%s;y)#`_azPfuqbzPX2HQGiMMk8V#lfu)N`Rsu8{a*OgK5{G>HP4s
zVdu>EElcTWS5j_^9=cg>kCMU4i!MC*b2dMK5UX<w|5s5cvPGD34%*JiyYh>m(@ppl
zEtjY1z<C78Dcu}yYj%G99)YHlO{E!6)3_ghNTLI$bj~MdoxLUhUy$6UK^J0gk&PZi
zkh?wa%D;3TiD~!F!(VB7+tFEt@8ITLH|a#2E{Ryh6oV6pv*^I{GMh^%A8*Rp=coe8
zD++c8zodv(Y!A3byBCp$%X{UvbbUk=q48mM&M)C}6wb_x(GzO!E$>&C(%F>}b~jBX
zrk!$BKsF)%h%H(RzKF^?<&<&MiFd(Aon-c<op@nPU?f6^6D)VT1p7oLOC0&XdQ<0C
z`adH6ZM}-ve~|4b?CwjnE5d#!ulWw-MPeKp6QlolLi%*bKx$}~gA&Num0|l0ySJTT
zr(svm+*iak$fFW+JfUezUjCfYIh*ZrQ}tO2&r=lK8|2j&pU_AG&d%#lLUFJ)$e~v%
zh|J$qC7}FEyM9K}37<Dzy`Vq%Qtq~}`;CcG(Vh&upTQ7z?^Lwy!PI0++Lh;gZ+OsC
zV#m>Zro$hOdO*%dgxxLJaS*rH15y{CKAuYdlXs~Hx5A`nALVU!*1U%~BxX5s+wA-X
z4`lyrv2J#@zN3BSVSW;qR_E`qk!V|;*I|;fY;~?Prj67u4otR4>$*WgwkA6y=BRxz
z?Qc7<Z!S*u*p}Q*Nb9sM3HW&}DGW>THEkJ%BJ-x7Eeg!@244!h?=K{D@SeR0%?%gQ
z214?_li)L9_d0PQucN(9mAntk%l0w*tLk~{Hc<<=H~6RlC;5eB=km%s_Flv8g#-(?
z@@eZOcT3@w&N<&MJI_pS@Ize$&OAxqL&I-+W5}-uV;ZwL;P*E9GKpnMX4rkHN>pK{
zx4AGq?5>0bTfD=>T%ygPDIU#72`}^zj9#_F<ITmKTbS<6-Q<0MRU1E~9|y^eM&)%C
zaJR$x=NE9Irg*)3+M)LerQ|Q*7~6;0QmI%mjJvA_#RPhW-5(d2UIsteo1a3h7jzOJ
zJm_^C=u<*zQ{Y{n_rx;fJPWgu(|H90khdviVCQ?J1p~8c6`nXc@4_<*KE~$_CJ)My
zk`8)Pb~(Lc+WZ;0E!|$%-D#N!3VbwN_w^ebyY@&skg$z9Y_CpvdUe-fo#(^pF0a+r
z$-`TA_3HdNWCI)hL?~hC^v6-X==9~4T#^bSs|kP0TLKd3U^T`-dUe)dyX(W4@G6eZ
zv!g<zZL(U%82`Xb(=fk~PK>t6HxZnY#Xx^%z=3|M%kTy{NZT5A@0ck0WlNknZi#8J
zysJi<Z62wk*={AQ%Ig(Mh(8@AXVO_q+PgMk*ZGW=?(CG03&!U2Zo|9QW6<s1b)WOO
z_ejjPhop(Q3O28FZiaWt%V0W};mv3uqtmZRD*BSXd)Eh7qCUC=(U+FBblggUTEUkB
z^P9eKTykOOyA%Q?nZ4qbz(7*jAcAFVB<qsuONFN@?jMSlbL9!6nu{dOZDBqW;LOJZ
zb?^*8s33GYu+@1a|2aXUlOP#o?B$;-JBGzg8>gJd{CKqUug5T<Smdop^BhY}3%Or|
zdwV>wY_znQK<UOlbR6?W=Vu0H6c2vPb$@;a=c6*cJ=!t$<f(_H#UUO+Y|a>1H0D`<
zX7G%@#bY)#^bU?s>9MJJOiKfu;A7dl-pJj~!8=t`GMX+OX#Gg)OmFICPcTVU)6C6U
zM_0bo`4?21yB)psS^i3(m-h<y8SkoMLY1~2Pshf(@+E)r^6i1XonI1war865&al+6
zkaHWwzPV{ok@#kY%J@Cg8@gj)o;=F1?86qPl!fV`kIGj?ksN&T>Ej<j5aT{Ak93Eo
zbmwnur=U7#Vl6c6ejpE1@CF(g7FX}YkuiZ@FzmiXTuAy@yY~xqC|BjFIB55Po4uh>
zr#MI-C^E(1kpZSwiC)nrj`Wfx-sTk&jU+@_=5M@KVpU_rij^2UACbW}s6)uDYk9(t
zy{bHLhSXX5#XLF8&s$D$k-K1d-cVt;Yc(1v80eKqQ}H0N<%Qk5Pgm8iFRD)od3$s|
zqQ-A->!U1|YNE3X=*L8ts?~Z!M;;fF(BqB(b{)(tOY%?Gcj5%BWmjm-8x1^lul7c0
zpSm3EuzRjjVY`9Yu-)VLANDaSu*4nhHqvuja<{o|DhO^W%zdRGxIKSacdg%7oxkkU
zs#;%BLGa07ckZ_Q&;&<epLW+x`O7|;<f{!dCl4&_^DJ_bgI}Qd6i&kT4V>?7O6%2*
zz6N!l9(H+Koge=N?Jc#eaV<Uh@^1FHvYtFHe9X9dfdS49UwqB9%5%v;?*k5oU(vMJ
zwRTfuMnTZ|+FMLAo7(s$f^$2@UA?f|hG8SKIIAG!{c242k{?L2Nk~CZCN9Z#GF%V7
zo!Z#Du+JvKUPF*pXBo*5=>^Sc7hrTf_<n|~>F3(Qi^W4dF*hHXuiXxBT#Sz+8XFPs
zXy^SJ9&ev^Il<~tOUPq4pSn44PSAM>zMI-oTAfK4q`EgdEll~lH+%e}n%X?A&aP)H
zi^9g?eM}qi)n0Ji&Vn&J1DQ(Fz>cvUfwZQyHeB<Xk9-7>Le7@?FoziEchXBYs|hw8
zU~bE3=e_t~2w@$Sbed?nuloP(I5Bss|0gJ$5$gX)^?#K5f2#UFR{cLs{m)bX$MgTG
zr#3h?BVT%1{ZsWr-Wl(aMwK#?8S;$$nfYhuOPyoL=$aDzgwFM3)>Ko1>@B|)c3(`T
z_XhV$#XXHKus2RDBCsa{Z_`Q4jCKZ6y}`eP-H(b5-8DNBX`hKX<le%jk~f$dcHe`P
zu=_}{4W=EW9q-?69IwG~qf9L1fQxRzTT1dv^XKGWkUzV04%3IAbGsyC^n?_DHUesd
zgxsgu(q{^98te#MujZekpItwvTad|UKZy)CI{%5(896V5?I9{9&Joy<vjC4#r)6rP
z$MDo`mse7gK8{5h`EwqGFMn1zJ%6LJ2)P6JA;fzKDWKny41Pc<b;;?0@G6-w@U;m7
zzVN1HHM+G;{zexyhE#X%Bz2p@t5)HSs5yh<{t;HlyGA7nVK<1dr|DhjndIz_J6XcK
zM#+p}N)>j0!@F5nkF02w4k(nigW4H(cN)t-YRmlR#PT^~c>|Vo!Udz9MWiWEpyo%j
zNKi0s4Hm7=q5OA90)rWC8`CiqG%rtYC%+rJ`4=JWY{=S#4Eo1NmeSnM0?p3TkijV~
z?5oAT*?BS+u(LHgNf_sAP9V|E&SyIm`;BH#3JW?j$T)7_AkWp$`3ROktA=(U>`pQn
zdWy9Wig{bjIG+x?-%@cDFk+~>>RNug@{l%1h3c#17Rb|AQj=ZFFQi=;HH{r;xt0%1
zA{O&J3DeUY0$6HW({(({zQCE}Z=2Hke4>_xw1n?Zc%pAp(bF0#Bzj%Ty5ES@vOC2#
z=zJX;6{>@%ycN7BL3SZ?!tU2j6?dJYE#ekT`y+m3%@Ax4yH8U>W1HA)v618D*WjD4
zRi@<cS-PI|@=J0pKL<;--bmZ>v<!=3_ZF3aZWZ|q6~WW2ky#G(5r^i9>B=QTT*B-F
z^c@&>zo~+BDu=JA5k<j+Xi3e(P{SiM*eUD*q=emTl`qvR@KH_lf2qofD91n4+sa0a
z+Hd0nRPw^wz#a@HKWwf@)unb)np;Mb&K7U;B#(3@rXRTkQR>ECVeW2k(@<s2;;Bqx
zXsWRkC&m)EdUpOwQYmM!Rv~gqaBER8gKZ8x{0a}Gp0~{wH6J8#%C<W<J_D(&!^7@$
z70tcsY%%-eavN2i3FFm7ww+Xu6<tz~WS723rLahPx)(@xOoF$nTy&i#Q>Wk)-oZ~s
z_NQF;^;ntAcj?b}Fcto^xcPF<d9iPV5O4E2p4Le@GQ4z-<Yoan7=>aOK#0b*!h!x&
zr+Oi>yFQa99cD`Do{ZkSLHi9*mt=ZdC*>gmRlYe&TvP!(T$sC!b1hVM>d}i~cTbbt
z;$XK?afY%POhng-Xz39h5>ZGP8afpX?K(**|0GtfyHIFayMzh3hdn7;&SRjXqxxJT
zm5!GY02zOV#;EBJDoLnL=jkx^d{v&V!M;mA{a_DE5ma+0c@8f+iJ@kUyZ%+O$mry)
zxHQEZ93GfVWjvY6NdKmykX+N^#Z$;VLJcdl$-kVU6Gw+K8v&;0KTL&io`H>2*2QQ@
zt<BF8-+G**b4)t{ce@?lrv$~e!{^0j#Vh_X32iWtPtaQ=hLAhui5NvaY0_d8wHhjo
zTy3hvEzvgX2Hb<d(tvxY)b5p(;@vdfsUoa-k~H2xA5{<UL9e^k(}r0G?phyFXWU#g
ztyGN&zmvc^pE91Gswx!P`rSzE<Hqxk#fI@*@^qYp3b{X5x~1UXhKJo##78jgG=Y%&
zebTII#a0~c+|jU@)&`viOGfPR`%W$GGF#f!!lfGBp{mhB<n8*M8NbgG;ksx2$;**a
zoUUSa`mqr$$oM@=3oU2t=;D!}nDKimzT(F3KgoC%O#1;L?D6{q1a>hN#f{%?0`4<@
zXA?MK{QfgjW902ni9$QCos6*8XiN6^U4RG1Zz*qk?-{j3%gHvyt{u|uBBHSS6J*7d
z*Oe;il-DOCu#YRR)5WJLuSPM4+y%-vDas{j%qt{JFzu%TA@?cUgyGm9$HelQD|xZY
z>l-cY8I?@}yem9fUR#kDDX-tDbgB8)j-8S6TB~AqZo$Sbuk~8!-ArB*%Iija#g&&+
z>vtg`?DD!4f%}w~kAVA>*CHZFD6cfEr2HiXqJSYHEVjH>Gavtl%8NZ9D)$AFyHMIe
zZ9ud0bo3f_pPOr0dv+e)qV`at<y$>4)1GuQ<<7Imt=#aGitfVTo5exiHTww<I0cR(
zfh&|;_d#^IDxE8k$;Ne_8p)lPyeQUF=n+dYy<?v6GodKP1I^(F#lb>uI=kJfgwy&X
zCemf+YZ>xkGO2C#-ryV#2`n*>t=*tHTIUf;^kfm8E~0x=--e55|K;A`L=P1sJrF1u
z-Iy7u^9Co6%9n?XNy`gi_cZ10vRQ>Z1I_u_A9{*I6SIp4f6I`>Tc&r?zKRF)fig4S
zDhgVYM;G+k89<BEvnL1pb{F+{?u^lWlLCkGRk|Wp@>9k<v*aajxQ92mx$87Z^XSuD
z%db{FecFv%MbYbraf%5S;@=x~UrGF42L0ep#yzs?ZJxkTbx_z{z5^1{uK~*4&c|X`
zidI?WlS1curC~;}!7BN#<+K~b=y5je9z+z(99g%EM43w5jXT3R`<ao!uLV!uaG_Nk
zno4dadN{PpYrA-zbzyL;+)^Nfs7Rv3-TA-^P+5r;F;`jDt%_8Q-sc8O@`6ve9{i-Y
z>%k9t7xJ)Y!L!XJdGf`O<-65r-KNGY=Osu8uWC<`)kfI0;$?A*CTEx=M@?DOn0qW#
zxAA(Jf-I7Xw^UN3?gr?E*1jFme#hhBxhYadgU;R0%LJ>_Tk$280J}(Cd7R8rSNxH>
za^dJ7W(F8m<El!Q@^#tN72(Uq7YARIisGfh$mC`4x6#T%9XUhQkwK=8wA*zgBT`46
zu<J<74XgNJRkuL1(j^|}XcAwnZVP#XMUliaHBM2<3LK<Yq->1JN_2BkxaR{>grbo=
z^#vN)oh>P<Sjd*Tn$lgC9(IpKC>?=YKx*KJ&k1+R8-2X3J*1N6@@|0%w9Pb!#4jJo
zZT=w=F7j|?husfJfS~hwps4xiqUOsz`N6X@i-J=<)Dg8?MqI^mKZeL<Jjp9!B2W~Z
zILce$^>DH&yQulh%qh*6r10*n%e)1_$K|yvG3{-t=Zs*+CV9$Se<gLwm`zK%V%^zt
z%R?O*tbzH$PRZV)F<nbK<@~9a>SJre?$7j|BX>Q}VS48?$4cVKUGPbK$_<}2dF0vl
z40?n4tiO338nq*t>59w@Hl9Q%nYlRsfGNHXYX9DBZVHUXF-DR$%3ikegS^ANO^Y%u
ze}*^6#_AVgKQp5(5^&=XNmbgpLxGU{u~uR9`Ntqn=O`vh$p%*@!M5T5JK`g&ZyzIK
zoL`GjmG)1?M1(QIB)=+sMbwP9o>c*!j0D)UzX14WZJ3{49Nb=@`-x|Zf=|$Uc&Iv5
zS1)CsM;*Oh@}zG)#icPaf5`C$zo3u1nI5u;RRH^!oRwr5r}~>G=<)8uMJ+N%BMR-I
zM*M``r=BSGRNZ_@A7hEj+gyrHq}DgdZCquv+Xj5!_@nm|z2PL%q1#RHW%hqW38A#z
z?Gi_K*!{E^Le7cg&D+XGBG5ch8pk_0P&mzVXdVnV&GZb-mr1@=NHuHb*oWMbR`bh9
z8Y20pS0xWC1|yL!F}T;vyN@7uti9B{TS8LK<yb|-OL>LTUf3)H%dZHH0<y61KOz=C
z(H8x&czcsr1h9yV@wQH4t3X2k8FOf$ILM>@bfT2+7s&f1^5{y#?lZLhrGX6ghP<P=
z!BZ66(fRf+Sv#pcYPt#>dq}lYV;h^sKcw{HmlAaGUeOIbS0r;7^7P(v(_#+=@I~Ng
z+Ql&vVaWa97C_oXDqOV-HE-vm)d*I@YuG*Y1o0z-6w8i?4%Lc9ZW8|`e2z!7%{Xk-
zeWwbbQpP*cWF-)r7)g(P|Csfw_-*=Oj>U$TtR;uR!|u<HS7pFv+JtP$Hf!L?qr&Zs
zB$=#J{e8Vn$2;U+;sh1b_jNrg=4kz(m1XCm(w9VPZ(gt_Qk5dLWZf?t>F<&X96jhI
zn(c(FX!+821Rs;6^^{{I;f~ndSNa3A$qnqt`qqp`9&eB#v`cPeq@7DmU>A0ZOgF}<
zT&Y>X6g7%xNJ8}DEz9h(#BZ^@qCChJOJHQ!?aS89Xo4yiQz)5u(HY58-MSu1q^>Z|
zN&998<1gm@alEX3W%6im=yJ7xp|&libHbuAn=iSmt&1PhXsL#yM2mcgg<CAtrQbg$
zseIbhP-({#^1|*s5~I`ZT3*9EP;F97rbBD-2A^YoCeEt%s7(rLK%KIQYzvjC@LRja
z6V}#!f<6=5*Go#Hy;`JgeWIGzu1F&MXp~4E%aT+n74Ki8%_@Y_=qUD$@h%yP+pyZI
zEx)@7>Rllhp8}Hopu_C_AU0WK8_3&Smu+ta(V?JEwH4&Qfh`}2i(Q`PNqMrezuwNb
zjk^7W-CzG8l8X}~yKe>9#qGa6qVlS;!i3mM7{pWX=j%d$a^9-=^hr%t&USRM?RA(4
zYx<$rZm-K!5+-}N&!<gj>jrI#Z>ra7OD422?YBu;{$uSoW_$_0#E}&>VAGu<kDa~t
z@D3{far6)>2AU)LI}8@87qUkT)i4w668k2}w9;?R6k0O(2WP3NJEQ8HM@8Gtj|VX%
zhn(%OVs6dLQiA@ZD9AA%D%P90(kLdepJR41PEmPORc)9WZM}R=N=9AIDy7S<#-RjJ
zeL5L9G-fJ44|`QoIZ{$`=Nv<%Okxc>GS1l$BG%qE$Mm-sOJu>xOvSb_XKJ00PR*l0
zb$9ytK;@_kP8>619bZ(Af48h8&PaS}Vzs}RuKd23YH+lxsGWs4H0_`BczR&7>jM*(
zed~kD&A*^NP|D0@`W+{QB{y^fhtMyGHYx5DJGqf<p16`~8n>+#RR$&m(~fVmtjJxM
zR_7nEr!S>QGTL&tsXH*uS4D5Y*k-x^V$A!i0g3u(8domy$xNf-%fKXtmv0}IVfA`R
zyzccT(qTLPER;>9>tzNnop7`BcmkR&)A!W8-gziCl1FCPddu`EWiJ&va4`9fZ(`Kc
zw0F0fO;q~#i*3=7*}>0La5FpD&ty*O&rA+hsX<a5bDBk~6FIp&a>3gAf>8y*uYeA&
zJn!iI6xWK`G9HBr^NK@Lu%$&7hYClLLzl|-H-wed`-$Q!NEo@5BCqrFjyZA|D&Yxk
z<uV~9a$6<YwftkH!O59n_tnQpxXB*1)38z#cJvvkgAk%(!NtqGW|nYlVJGuNEV<91
z{B@%x*YaN=itPXtsO}@2tTeD!l={Q4`xF?1qMQK>w&8{Qd=iv9AQY7h-rTe}U)e;)
z#A2cBLys$HZmee0$LkFec~BxBt)itPTBCxnO>u%6@D78jN;*r8_jAs_fcFQ-23)YO
z2X8RIMD#};yLuG`dD=cO$~)#{2dNE|hTY3lJS$fzB{{rfoSn+ztdwHbF=}J>QLzZQ
zGaq7;jZGL;ht%|yleu7$yoQta+M-!%;Ez4rwRhmF>s}>sYD#d~dQ2-?7GJjKCt~84
z=Wi%7b?_HQBT&tV^ve-hT}BHpPxh744%P85cu;&Y!_5*dJ0l#P<7Q5ZhrPZtVzx^5
zAkr3p9gZqFyRuymb{y5nymtrd3JG#3q94XU)?RzSh<-|V@JCcEHfs(g8{RSFc4FsR
z-djj;QZ#K;tlgH_oyi@iN*-utR_G18hpYUY=~3&WL7Gs-SQ%tPbQSA}R<Yx-47=|<
zN`@e*N3P{75L(Dl*x(M?SaNSyBBb{DYs2p6VN|gx!B$np$jWc9ky|$CQ0|MNkG%+s
zx$V{^l(VQg1<msls@M*3k|5(G64B_9ir$C)u7l%i)xmg?XE=Cpn|v0j+j{CYTr-~4
zm=>*%(Jnq_s2`K|Gl@l(O<x3jtWy+=sdx3tBBtI=5{r;~%|<FyA5p*5Gntg`uio`0
z8M@Do_CyR?k=o~Kz7fUlxx%i2uH`>frN@mtda$s&_mPq~=?ncM6YAh7D#|YrPg>r&
zUwlyq*9xbjHFNUiDl~$m8?`4X4D=BX70Sa7rQTi=Ryy0n^r{-#U-cYs<3C<jFwLdY
zkYlIeXeEc(RCloN;*4TQ98YAK@(JtsBP0bXv!}2j5px$hkG>(8woXzTa__!Ryph{K
z36Eyx8iGsWRM#Jq+iY7M0VQ7#GKZ<0?<cp>S`szMeoNT>kqRDDHD)RqrfSH>`iVM$
zE9?Yjz?Bv_*7<$4M4q?TTHj}@M<s&Elxz9v)HPYx=6T1Yy`qxxl56=O)&BG2+J8S~
z$BL1rKT4{4jx1oZ<xyp((b>cm8Eg*Mgr0zHlcONTa$t?USI!pClI@+cu=`M~xsNwM
zg6ZMr3aThufNHp5@Yt$^sNtqQ>~4d_4>vy*_qgHaLlG9;M1D;u!_DyKi|7hSfI4j|
zb6BEpQxP-ZoTS81bv<gpS&#iL_deboDlu|%m<pdGD^Z3RGS2KaFEVE`^)IqsMyomb
z<XTR`vcDyBaunbZ1yJt&iHY&<u>C1OCFOCIL|Xx^uy_T8#63;{yF^%20j~;;Q2-MX
zb$U!i->M=;0YjCThyrfGUMnEZyQLB%`)bl7+N<#R9kpO8J5-Mn-c<3(d^+sDl{q=n
z&&2hay18@?CpbMxq6X&)bXeY8o{U&NFdZ9t&5XP{h#6G#*nv~zhSN(0!RMGpv0Xf(
zXmES*$uok<@|>c>e<%~EzUlel$Jl`<d|x$D;tN9IR=F|bt!OP8vw6w0@-m`&B<8Dj
z5qyrfQ(iWJxr*!P0-|%><=}05AvV}69xEENWywcH6<_L`HPrN!!WG-aFK@=T&)>N2
zD&CUL3%b2yT9<q(-;ghnM1!*w-aD{Na10NLzeM9<<`6hey$*AWYD&uoF2%Vh7%r-K
z>ND@{`Jp^V*FCrtaI-_+hnug4@<NqJ*H6z?VRjX=$_+kO$Ss$n8LnS+h2%3)d>HGm
zG#+Fnk1P(2<xUHaa|esZayM*~Y|Jegn9k=^|MH`>GhBBiZxtQ!J(Pp^#)vCF%)J7h
z7;5Ek=cd5x9Ip&}EA}GzN2BMh;+E3$nL4S@i#iK=lVKl16+DmNwq2<hi=XFa9{Kx%
z80AfcV_sacO}tGxBe;C5geeY=9XPo;`83XLdMnzB#<cyYo*eVajJ@18@rIJyWaDY@
zvr7NoVBcQIWXq|z7yGJg?DG&*pljD@BBUr(5=Q7#DF~h1T+m7)U$|_blGS&I$Qt~D
zH<;`qJnMl1*ItTouMRKgVN%#nDXMt6DEO?-J(-rQk9pN~)qQd&s$w$-)R!<}oYA^>
zSBkt;P0B`c(i4d@wsK=(%a78FLStJC2ER>tvDrY8sY)a_apa+9Rn`=pl)c=qlDdEf
zRYmsZKGpf{HsV>f#UmwKDklXa{f0dv&|jz<A(^r@mFK0(OVURbRcs?JFFLS0^OXIq
zrp)-KmzJTlqyH$~OL>1ugz7`&{KS`c3vI*RjX_PM)Lfle(OqK0{rD^foVUdsa^I_b
z%lMBJy<x0IL0YjoCGnvMm%HxFA=0i85^~S7!_*0vQIz!oy^I`WP84Bw8je(HQOme#
z4@8!6M~iLHnTHL7oSK>s)KLuCz!a&HeYNPB#{u~A4-ZGrJTAiziTV^F_C5IB8T+On
z_W0utW?uW;j_<mQ-tfN2@ke&o-Irm7=wGSyp`07{BiBAj@#P-~&dE+aS5kVj3maTk
zo)_j6R-`bO73EpUF*lXi_2oucp2zke+%}6C*%kIyAU?T0);bac?w-Hn9=rW7@*;5=
z`RTEOgZY`URQ#s|)rr1Mokz#H=p?N8xy(OND@zKF3rEsr8)P#lO>N@zV&E9cW<k6k
zac2t5sbKw!c<r%|SLGqbtK!U&MxT}9T5U(Y&;0NUhGuoYC+t@9%urgYnr<>{>9~(l
z4mpdYUyHlhvj+zR?{l-~E(xF~iPZSWJkcawK~cCPmOdr-?FbdIBfiIfCj$mEe|F+=
zzA4sOo`YD2yLU{x?S5&zyP8ifl!S!RCJE?!iWzafBGE<mySM|e-A|`CEi;_Hi`x?e
zwS;f}BspSn{=70opMkL@=<(es@MG~5N^7$N{dV6#k@<jHL+8tl5qgHLI^eGaWPU)l
zD=^!U#GoBMpR}qwA%`}NI}@)~fQJ#~R_$3H*<BU6+<)P=E|X(6B1$sDB`RODbxcEm
zush6(o%H62or(5CtYV@w_d{u8334*DoOkig^K)8Gr+`-Xx&$#1kuu*`Z5DZlnRJna
z8=^rk#29wpD;A-&6;b<X7$agLhJEVuC(JaPHsutAk`FKHK`(#47BoUY_I>(bkkce*
z<`e}t6|j!o^BGgC!BYMYK8YTST!lOHH_FalQID63#&n~@V%PYsorBsLx<by`GNQHi
z!da5{(LF%(Bu48?xcyYhy5JHI2RyTJB@b)FRW}D#@B3{IiR(2M%OsvH@`6y`kwrbW
zh27VQ5bkFxSyaTy7#K8~kLGEV9BxNQ4t6W)w8+6vkPWY^aFe$OyLqx&OYCp=)tB7K
zBF<?2KUCF!9ql)apt}BJAOd|Pf-P^k=i4og%@cUIYXa*aPVF=|dgP8=*!_DIOma-}
z_L~m709AVItzbz9^JEDeO8c>rL5ZD<L83NFKZm%`cqr{+@gRpPo6jNYN!)NQ^>7|Y
z1yc*v{oTQg#wT~#)v82VqLY32aJw$|2Va+JFlHx9cS=4c{awdA6TOjTG7y!glo<R{
zq_Hhil9{hNl2>Fh5K7bY2+A+0xG0Q!x%-xosd{7DiTW{>lCImIj)_&KMdVakIwzDc
z@a_wT)W+gwrtHmUu{D+NA-)+BO?c3efURnU-XL!sWz+AmKx%QbLrtbO$KLV8-5YN6
zHroS^;w`b<Ijj$384nFKJHu**;mpAT_Jw8xH2Vz3qX5@ySf}9+8tw<ofJR>sH@?!u
zj{ovodE8(wo?wGDn;i|3ke~Pg+3p{Cs`q#iz_jUnb;5;Wp{rEy36`#sS{t11VYU9<
zUF9&9fV3ol{$FN1p){x1nXYn^Sc%yln<Zg#V1sg)F)+QD&1_18^Bek6;~W;i-VpCJ
zQ;$uD-E(^rJh-#-!bkL-RoO$};cZqnoQI*Ei?f{Hh@T_Ne1LN@i#a!wOxbMqwzkMD
zFp>ny{CLt8(GmLfL>yK6)ctJpK7>sBeF!|P`iUCE)G3TCommS|+ZQh~h0ARzGIM}X
z+8%fj$2_<=y)-AKsCi*}kvi^iCoZa57s;Nu>HwCgb0~@D8tep;J~jW6abb$g^@2~Z
zK<P}QM%y!e<(@aaFvyzV<@~0%_ZA1Y<?ptVi-XVl56NdUYf42)@}8Z=J)UKCReapD
zMHQPlF-7@KNas;vDg509jvg+5AJ=_gQ%84kkGA4mqS<zSSIwVe+LoQBr=E(wuOeZ(
zV!Xt-4?39oo;Q>9Q=2}xa!Q_5i3_^EejLFu)}GD}uvw)XH%A?`=gB;ssqLsF94B=I
z>D1VvkO$qkd)fK0yvd`mJ~(p$cM8fnuiC67<?!qxQK%5P+m?-TT2_l|^`_t^?9t!<
z7r*)&r1Rs!WrH^w{G-8V41Q?Px>5VjFqmWT41;9`7aCk;@D78I8MKxAgWqa-?-=`@
zhJTdcZ;7$@8~lkuTh90dzRK|H4c=z(VS_si?lqXPMyKOMgSK3+aX-(XZGY+Y+W%Ds
z*BHFd;C6%W8O$-5V)U44@I-^B8k}k{!}$O5Ivvjk246MUX7CRNZ!~zB!Fq!i8abyK
zKGfg<gMAHt{Tm%mr@@^DpD_5K!PN$r8eCv-hQWyja|{kJXc>I(*E*i<1|Ko_dxO^+
zTyC(zV41<G22VBk1A_+{oMGqJ;MpdByRX&pzGLutgO3=z&ET~Le{9fiaDl<I4Hg*8
zF?f{0z6LFWAO1?m`>Mg+CS7fY+x6+;4r{6Sw6QGHq4?4SwEZuAOZ&I^zZUmi{+nOR
zuj_Ozx7#3h>~(e$qU(sd8<%)*9go@pZ2J`3!l2C^{9>K#blBKFQ0rx$@su}6b34q^
z<g3p&=BrZX2CAz4Rke+)uj)H3Q0uR%@f9{S)HSUB<?5?O)&^?kmU}L&^80<2o_T@V
z3V&5yt*3l$U4vLu27DfWohMM+;A^a_UWiTO;+na2)vH&mo_fXVp>}MR^{Yo-edEKQ
zF1<hXw325A{q^iU4F~nTKI&>&?b$kgg~rlze0+L6#=Y6NZ!kO}BTM`L%(h5y?=bnd
z-8+5|FCTq$nxL`z=zVs1bs(yZ#{;$HbE{RB=Qh-pS5}la`aR_h^8+=$TEAz6sG=_5
zuMdc2L;2#~6RYri?1WQi&usKHG|rw|U0&7b8(L9bUE`~sT~Jq{!ZpqwUV7Fv-@?*B
zePy}d*EoAb>6HAE*<7Xb2;VsS;s)Qss#<^P@ZqI7Bl$nOpsKNA0nz#<n({z&o!?(q
zQ(9@el=>F=8_MhJs>`LkhSo1W_C!xL5BGb9o#@%8aL9Pyu|s}+_|qqs-F(>_1=khd
zIBezozEf;f3jQnfllvd6pOTlVH{7}Vh>?}4HSb^DR@k<@Wyo6z`nk;T@2NlMt7_q?
zYD5!tp1D=?$9T%H8roQOsn2tQRGOic{>Amap;dlgjaZh6-)br929JM1c`g5Zo|^JS
zRW*ScPkmiuW0h1#@i{cURA;?8^{GGgy1T<uwalM%+<PmYylU(@3F$p`Kk4mvp0A;<
zplV@NBef>d38YsV8_(Ruo=fo&AD77a<I<EhdxLkDc6SdvZTZU&^u7JCd*($eBegR&
za?CYVq`h{&r?7TmRYP5^w4YNwrRPn}88uZp9IQu`me!P4)s~hHJ0(s~>F^Q5#^(;N
z809}>#)Q)0^UgVI%Jiwl)8`MLKX*j^<oS~>otj@*np;`8sPg1|X@o_U#Z|)!hvlDA
z6YxvpJ*B+5YJRP+vM}mA{NL&PcjEp#&ihRo`l~3f&GOUl&aGRd`x?)@hPoP0$>KBT
zUg)dvd+0CfJhe3bg+9+h>BEM4JTs^}b=tY6eDPe!JWsi&vTEKuUxV~tQuFL$D)ZGY
z99mIV+vsmpb#Us;;$nn)${Kv-m8Hs|%=Rlt|KK+KDkoz`{o>VE^{TB@^__8K=o=4z
z^W1Nr{K4n#TbEv4aK*-!KaJVAHa2{q_M+OlOKLqn8F)OGEb!HO=qBm0=G(cj#~vMO
ztQwa{VFUeXePHN>l9?0gc!$X%zn#T$zb9u=j{Jt_j~<mjeAvmS<m8-Y$NA63*L`CA
z_>?y!e*DNY?)Lc9ZnzyE`uTsMe@LA{9f|Y{nZ8B!R5nI^s@upBCwk`B`8`wXYJL0m
z569jZ8kh0nl_M|yxUA#in!sJB_WRLaqeGOhG_i;HC;ErsISGQKhZyybbP@kp4=}g5
zYQL>8(!o^yn>(1l(?#spd3f%b^)m98S4)YN4j-jTsC4+~;U_0J%&V>|r|@vH>#E>^
z{zZnNf1-amW&Fvb3;yf<OUp3bpV<9J$?*98WlfIe_WX5?@o$&ezhZti>_2#ZwshG2
z$IZDZbnsiR-81H->DdR*Y4k+aQlg{(g#MR4uykR0LzT=@8tsBL_)qkQ!$!pkqBqPP
z@$c^q|FOQ5x!8XD(rD=Y_GSnAqknq7Gja5&-2DG~e^hp|?r-e%_y)t<6YM=BwZA-r
zw*OUz+tql(R_$-S!8HbLciN}D9=Tw3WCrO}t0CEkNSZflb@R$AR!>`<J-NqKj<Kt!
zPMvy&+>ucKCR_g1^wzd8@VV!{Ir{9whrQo8^U=3{dHM6ZUU}Pdd5p942mjB-7hNX#
z>*}q9Ns)zJOz%P$@jd$mmeny@m)EOCX)UK}ZtJkjxKFp;4Y%DljMDz?@hmUF-AZs@
zWw;%F)hRl>y$R7_xb05)jW_1+S#KX*>zl9FK$UE2)ykr_(NoJLm8C`9CBDk&JT>?5
z6%~Je;N`Z%M~y$W(Rre`Zd3h5c3gu03FF6qczV4i{jueF$kaN2QB8fdPc{fxv)kK}
zM^x2T)Ya5ewrc-lL3zE;GmnL%UT-FDJ|g(lF%LGKSbWN?tZ`|7oZNKw&+j^YSapKF
zp50IU3HyJ??XNzSH~13j)Ky2;)|WQ;>V4(@(kfDK%#EdutZ97>tFNm3jxE9b`uc)$
zfBEd<Iy}xUX{ejuP+rqGyJWF{L0zqZ+|jeEtLDybWQ%A>eR;)2<@4DhRIB0Hwe_==
z0$Aj;<F|Us+>|R;n_bv~&Am@-{OgTxeG(oQ$p6`%b?FN)oe`V=_WbzvS~fnuy?YYe
zHy9p~k*EFJ^HVFqy=+Xp{BFbT@c(J;%Q{`>$L=3H43FgJH0^Hpj~#~F?(O5^^--4K
z?n!XpV7MKA!&n{O?jNlL_XGXo-`)TGPuq_<&_B-ppX(oKPyg-vLkHH^|EunQ{71F_
z^hvtEx7Sx?hTG%8nu*$dlCkJE+;-0~+z!7q!F_3A{CJd};9gds-R<$c%=owE=S_%r
z&olmQ_kTtEIdFgV`|m#=xWD?ps(-Qf7h=~l|I+)bcH0u9eT_9dHoo}#H3K5|Ys{wq
zci*vjXuu(h<~*}4)O5uy4Mn3u*N)E5cqbvg{pBw{zW?U^q1gD=nEuY*Pss7={>Ju0
z{{N%;^LmqhJ3m&@e|UZ#(dkcrRHJOo#s2Ja`p3|d;4LSCdw6s!?w80j;#{qs2gP!S
zBc3NE#q%CL;(2m%Ja;<ddC#8lyjQPyo{|#JQ&TnfL=v?uD`M1!o*gJpj2crD+!MK#
z9v40-xo;frk=&QZNjNW-E+4c4Q|HIITUP%#?nnsF@t~f(N4|&Uaio9}ez6IlQKu(*
zAZ=Nel)ZS-n3_d`9Go;y611#k^D~XR#?&k_kd%geK|6g{7(e*WqCg!7lYT*Q-)!R7
zsN>&m?8LtqpGn|PC-5^9_!9~I$prqF7+!A78g=~7#kh;n-cXYC*oPf7YX2|9`2Vpn
zYt-&LV%){}asq!TfxnW#Bh+&A8fAz^EgwaTGWqH9(q`fLSv;s4SAKUJt}#`<P@d$b
zyaes?`fW^kiIlbkepvz^e1iCrpDwQp65O+6+)p=VjXM3~68zh9TU*{FEnLYz+Jx7b
znstQX8dI~5GF)S7)=(o`V`|oL!!@R6<r=OrHEX2d8dI}QHe6$B)|u;cVl<{^O*UL(
zYSu90Ut?-kwsF^(npI%9#?-8njDL-(S@My0i9us()}G^ad>T`;@=bV+saf9`ca5o8
zgH3pisaf*EbCIAiHS2xjUt?<44~)A;{2TciQ?nj2?iy3Gq(cyS8dJ06gK@$&re+N?
zTw`jM+sM<Hnsu<@8g=<gc>PN~#Wrbl9C`#*djS1~pk<{jjCW5RA>1Fa)b)?Iu$iCc
zgKTQ*XI=t7J%%5{r+$vKavXxv9$q&2(U_X`is2e5U;GWkzo7UZLOKVNUP0k4#=pkY
zEctGRM5j^bpT5}qmdP8F9YYB(DB*L#VQ@j=woi@AvPNP*0(U`i=kSN)6u6-9y(T`5
zsaZ@e9jD?hDDLFlF%B*$yqAeUV`|n_CcH+SUimhd9shX3<q=*`!izm$?~Aw#FOh%h
zw=8Reanq>fxol_3=M&?uQM;oH^IMk1Q<X}##?-8d^jj0?zq~R1m+hM*G2B^J8I?!>
zmSwf@^iA}q<wv+9Ic|PoB_;442|PK0I}>=%1l}uwrzG&y<b#Sy^cm<=kp6EzW1C|N
zTu}HlummnBd?q*@E+~8!cs5*6IETX>=fVYr&jDw{1%;P`rEo#vmEc^spl}M*;e!hb
zzYtsi7ZhFtUIZ5uUJus71%)?)7sCaG(|{d*xS(+69ga)jg2I=8m%;^wUk?5VE-3uR
z;8M7t@D*SaTu}H*@F#FV;X$w&E-3t`;8k!z;Xem|1{V~*8vF%ZQ24LFtKovee*<0%
z7ZiR2cpY3&_!{s=xS(+6369^v1%>|}ycsSi{8sQ5xS;Smz}w)0!tVm_gbNDiX;sGt
zxS;Sqf%m`#g>MA!g9{3O2z&r8DEtxdVYs01P2i(&LE&4#$KZm(TfxWSg2JBw+u(x2
zp9Y_V3kq)spMeVse-3;WE+~8l_&i)t_)Fl6a6#d(f-l1bg}(v51{W0m7T5t76#g#w
zHe676C-@#*Q1~ve3oa=9Bk)7Gpzu$?kKuyCKL<aB3ku%@{tYfD{7Y~zTu}Ho;8$=#
z;rw@e3l|j5>jrtkBi$h=oOfV5lHr2FQ^8(vLE(MD-f%(TE-(!)DEweBoj*b0ZtxJe
zpzutvKU`4w;oxC#LE%S%N5BPz9|Il@7Zg4aJQgk}JPRBI7ZiR1m<<;cJ_H;L7ZjcY
z4uuN}&jp9U1%;0UN5BPzj|NYH3kn|#o(dNfemXb~E+~9Fm<JaWUI<Qr3kvsw6XAlw
zCxb<BLE%%uVz{7;S7(7|!Ucu%(MZQMxS;TJz?pDC;pc&~;DW+00MCaD3XhB>+Tcpy
zhnC@Q4*mtje+5_$7Zg4Ztb_{+uL9@81%+3G7s3UF*MT)~LE#NxJzP+D0BnQ{3SR^+
zgbNCn^yPgX9j}wG)G5}On)Tj6nrlqWTH>&h7CSr+K?%PMybLZV{3l=&Tu^urTnQHx
zz6uP%1%<CB9lszwg2Jx_uf|<a$}2@jZ)w!ywLPc7J?nbp{2Kp)68<KK)#FBLpP=wt
z!F6y!;Tym^;DW->F!D8~W}R)g#?&mvLG{y^nzht$jj36TbLyutHS03NHKu0WN4oAI
zeH%^w9)>>vf5h;|;E%$$7~Ted9R7si&%mFAw;TRE{8{)8!(WEK2!GY^4)|;Ew+w#|
z{x-Z*@%;H8!n@!f8U88!WBBKW?}h&j{-xpH!oPx}UCJYWesYqP<Vf;3_zbAxz2Uv!
zY)mMg4o`z0Y<Pe8A@EGaQ|BK6KMa19;m5*{hVzX+!l%y9h7W?DVE9n@V0ez<BjCf}
zBMm<lehPf7;d$_J@bQLEginBb70;hv3@?IDHGCTUOgP`pQ|aeTR7o@8=NUc+em=a+
z@Je_&e4gPK!so-Q4X=mSz#9x-2ycWhQapA368K{HWrqJ4emQ)(;k?8wX$AaB!+#17
z!dDr-8vb+mHHQBN{ww(PhOdF&0Kdub-^16!*DIbs{|@-A@IM&-NBCXvdkx<R{}cQ{
z!}-2$(nIhT!?(aU!M7Uz1iTggl;Q30r{UWT-vNIPz7v%4eGPmW{)Tc-o&Pqx1OBdY
z{{Va+E-3!HldK*eCgnIj1BFZTx-VvZ@df^N<NqrY-?!k`N$Czj@y}*gQW*DSP<SfX
zvxmpgr-ycT!TZAd8FxPWm~;qSQ2Za!!|IXQBgb(xXvcqBO#FlJcP#$1l)U`;C*m&~
z|AOK_7d#0rD10>jN8(>l`03b>!Clane|?PniP(?FT~OSofJJaY;nR^*f;>Utv%r~f
zLE&@ocOL!)g^T`H#mKM3UpfBg8UGi73*a?IKNrJm;f=<95g33AioDB^dnxjl#mHY8
zBVXeCaSXqLaLWnbZ1g4W(O<%TWpV;{Ch(pKyjKEGN#Lmo+_HK}+-u*8j(@`Q--zcA
zCh#M~llm>oddK8VqaL5qZD(ugN!`X>W6DY5G$86eP`+?4RAb6OF{kc`kADD*VX>F-
zPW~Sj<Nm@6+D&6>)=tAU>h!#0?CkJ8V#15OHxu07O5mGf_?owMd>Rwh&z5ZK#{L{?
z{_}MpKK(n-i{}p~@VgWEpAz_^3H+l3z9WI}O5kf~3HrAztL$r?7LDw8P%p3Sk?s(b
z`u#I71Q!(k3vd-&Q25p0FX4j1uLZAx3ktsu{54!q_>JK8a6#d}1Ahw_6n-;!6I@XE
zE#Nx1pzzzk^>9JqcY?RW1%+<_{{R;heh+vzTu}IZ;Jt7`;SYfK!v%#u3_b`K6#gjq
zXSks7$G{f2pzz1R&2T~CZQxe8pztTbZE!*1&wx+C1%*Eg{sk^5{CRLYTu}In;0tg;
z;V*+b;ex_n17Cp)3hw}4hYJdS8+;QkDEvL}9k`(IF7SQ0pzsgD58#5rKL-B_7Zm;}
z*bNsH{x|S5xS;U8;BL5}@UOrx;DW-x1;2(13U?%1jxbzMcruujobC`5-V1cX1%>wp
zQ{aNa)4)D(LE-7(L2yCghk*Uyg2MZQ8E`@2hk=K}1%)304uA^^KN>s|E-3t1&;u6~
zJ_tMxE+{-3`~h50_+apOxS;T%;E8ZS;lsd_;DW+OfWzT}!cPHDh6@Tm6&wW@6h00d
z0~ZvY2c8BO6g~mWhYJdy2o}Hvg%^R7;DW-7!8723!p{V!zy*a*154n7!e@fh;ex_v
zfoH=7g`W?e3l|hV2b>KT6kZON!Ucs_f^*@5!smlNxS;S0!3A(Z;Wgkza6#eqU@crw
zcq4c*Tu}Hz&<__Bz8Jg&E-3s*$yU;($sWh$pz!73GWbsn4}vS<A;W)}Z1wnAa*pE~
zQ2gHj{u;i<@ZaNaE&kUV|9677!S6EsUhr=CeTF{-ZiN5Y@W;SM;ad#f25yBvX?Q#M
zG<>_^cF(SnRu%Vi^cR{Pv0Ik4(~Q3w^>}t{jQi`xU1Q>Sw{&33A&#Xe&dRaTrFr_H
z2K#HY4CaaFf5-1`6HkKdmB(rOYYiqiTT<%(H-Fg|?mwNYO#0RsTyJoL!Hotbt?zvJ
z;9S8pF4h&+>I)s#>RJC-Mpo76_(uheUbbrNjFB&QEHaUuv@~gH9<oPZ_JbbqXKYmV
zdD*f&T-#o;tT(vsdfl>aVuSK|uFu}Iti4?0-=a>iDK~~I=LPOu5w;!N&=<sQ>B*M$
zlT3&83+yLNm{BZu{&OC;tfBJgJ9ZMrGr{nIHMLk-CHNl+DnHl_%*lz!&~}Rm<gixK
z5{Fjt4wCYL#9~?00c(_m%gL#j#|oeYF6kLMVH)q2pO7zp+T!ib6T9{}yCUUpXS|*A
z*AZt|to)(Rm^g}+zwS7@66J4iyq)rAvH2KH-wfq1CEiZ?OUJIekbE>qUP&-zByC&s
zfiolz)Hka-YPYbi+Fv7kIZ|g{rS2HNmtJ~l<y=`D)z_G~8~hami}F?6i{k9gQ*kei
zwVR>+t&FqN{-|&2P{&YzHMc-4YYqI`BZ(I-lYQ!;&a3eAbeKHs+F6+lU8M7s{!52-
z6@G{MG{4^Fx{Qa5+-x?`q|Tah&lS5o>|*MIRAWot!_<v(jX$YQR7!_c#y;OrqsB5e
zrxu8xxs58#7Mo9<PPn|+M)cf)e_&`sV?)KjoM9NPGVHF6+Nm^&{ej;B4II$G0Sz3`
zzyS>$(7*u=9MHf44II$G0Sz3`zyS>$(7*u=9MHf44II$G0Sz3`zyS>$(7*u=9MHf4
z4II$G0Sz3`!2eDSIQ&Oh{srmQMT`1bb5|a0&ARpw>&*2TR^djsHKwh<HEidhR#s=G
zb>!Z|tV0|pTYYo-l=QAoElX+X*`C~y)a^Ji-8yl>ENkGRbFCv*o`;|N_w{o~Z|w8;
zwLiSCb@+l)al2$+Hzl_exn;;L$Is*Y`cZNV_q7ikWd#=Wv#J*zY*nm0#5(ud4D#eA
zPyNZ$q2wu(JRL@!1|av8eZ#A`^ND*raZkWc^S*vm+{5;@cP3fR;f^xL5q+&A7D&Di
z-PeywUnc20jPwm4eTS32BS_zor0*!wcQonqkiKIGzn}am*?n+3>^o#1g6s@ryOG@=
z*@q%K6WNC$djPTzNA?kfAG2?Gs%bxWiskmFTJFkTmU~1`%RRhb$4@xp!tn}g)(*>R
z<&v{5@;l*0%Q~6M=JFs(&*4cW{W#Frdqj%Wds@$W2`dM;>hSkdt{Y6)O3c6Ky3g>t
zFh3aMF8rYwcOJB}lE-r@Z9q?JzzCN$prL<9M*SgWB41MT34UMPX<1JoPf(?0T2hI_
z)zfm37Z-VP4ewJg59g)M#XTk2N@+;0?@^YNW73@?2QSk`q*`gidzPe1e8-YEBfl5t
zf_~0r9p+HzNa8V?669p0SQ(XEGkRGW#E~&Pz24z=qPqtzH&1T%6hBHIJ+0oud)Djp
zkd_fn%XM&{dL*j!+vqr|w{_HrL#(5IF`zrMqknrw%fa>i%F;{r8&9m<ShLZUqhE^E
zZ$`S+uOY3YPkU-hulk;4#~hSw9rU@&a*jC2D&xV)kGMQPb6CfafLOlY_)>aUDdUs7
zlR7Nxn9LL_)1QoPdt1F{q#`fH>fpi4V?X8GjgpsSS=h(gMf7Z^Z+|Fd6H|ZoDZl;Y
zb3kuv0Cjc%<uHJ97(h7;7~a3b#8-cev`g$dc3alLd&uu9$uH?ku#2QGHjd2RRwjAO
zBrlodF>`oE_rV>M@%P6Wty78hPuj?|m^!E1OwK;}{4V?SURL@|solLgob}0NJ#wTD
z9wX^}o-)|do996+Yaw;>bCpMVJ|xPOUTF(b4{e>S>fx|*DTkb&M|Bc!hm06M`=rw@
z!-JBngBp7093$=F9qNVOY4xt`X*ns6yq*qgA=gzZ4$d;ga9z)O(|%HEKZKozZcW|~
zdoRs;@10`n_2(yBJ7%0=xf}&nN_Wo|-D)}ibSUSsC`Q#cfX5^!S;-u@YwzQ*Ugt{r
zg|64JeBZVh(>K_8v337lX#2A<v5fy7kF8(dWwzr!JlQ&Y+M)IR%iK8*)i={W$Mnsr
zAIg#5L;pD9N{Mr3k#l9zFWT2A_*kwyu2qXU0|{Ez_rGzRkND({9&!oadn&J~965s5
z7gt&(i;HTj{PJm-?|oIdwdiV*uYYs-F65|Vh?_HP_HW0Hm^}`wCB`m??^G_uE+r=H
z$TbeD4ZF-3yR)&|iCtNY-6HHf#L*ce?^^8E<L|qE2fiPwb#vhRp;`|Iz8|Xf@PGUJ
zp?&F9@*C!t&a9Ppr_TuRYI$o=QpsZZ>YT5kw79NzzIBhIWU&}Z^BV<}-QFH0i%Sc9
z71Mn4tVfW+o9Ii=tf&YyH263coDr2UQ9h?+SyPaqj0h>H3-FeD%UUOLs~Y{KlYIUn
zKJdo`uV*P@<%?O<{SAQ%!X1{3MNzGa=Z_|y3Dv&x1}lrug^MamW53YJId<b`y3daY
zK9^<<G5)46ZX^!%#(e7p+m4Sw5Oah2D5SL`8dj2E4KuzX-}tgx$@R?Is*1WwU+F}?
zFK42#-ZUZ4Y2YpNGiw|8tfhRk#`+2gQ|c-M)xOdRd??QE<2>)-$e6J}J{vdPkBwyB
zdNCTa{$8Cm9h(Uf%~andg*Em5#nv)(svS$G$mbd=tmz_2zGgMaSL<u2s=%u?n;0kZ
zl6)1=3c~nY0N*Ap)u0n&t&GZ=?yH_BDzNfJT0?n-Z#)&D)XOLESTQ719#XUu7Vzmo
zwCz(L(c(07Fjj&;-|ug*bx{>{nKXfVB{DCv{G?=3QL)bQG#@3{i2NdnQ|g>#lb7z5
zEFK?_ukV%eq0s80+Ie->*P<CpSz@S~8}RFoty$J+<cWRsd!($9B(+kROUfG>eM)YA
zZRKR&;!ElpDjQ3u^Hs<hta^;DC)PF8NX@(rUQkuxFBMf<Qz9u+)S6}MxTtY@V6I=h
zSX+#QX}(53-`lfxJEhhZ_;jg+Ntr2gidH0BR%KKt6B~R!D<26H%Ih1YV&JKyj<1nX
z1lGCx=D;NTPj;$({)z?G*-?2Dh}Hh=Db<7QQ#yB(hL{#>&57|ZjprO+`9-u`d|pBR
z>>8clICI9tQI@q{LQA`pLb1-XH871T%k1Uis4S8mTk=;#U8Yr4ESMl4q-gMsudCz1
zftzhDE16sE^hqN+yRNFT#82+0*)f^=oUdy6XwlUaA76Z^R0(?6Xku#J6kknU11WmQ
zmX_Zbk#$})EutaQs!k&8_{DrvHm3DbMWkj}4M>U9(8Air03|y^O4c+wNqCW@#gv<_
zA7@CKRShnYYHsyIiquQp+;ngAs7Y}N{gK}~MD=k+jT5W*7+uw+(nVR1+Bs023aKge
zTlMNA7rCQ{R#v0G7m}y>tBKlAvdo7atyjgaxUTM^K)qc93TyqMk+;R=?5YNTpuBo!
zEwf1a<QC`5TE3WCS<Pod?1oG;Q}yn0C;jDjI(w2R=n2xb*ZTPeM}?2a8E$mW@HNzs
z1;5X>vaae;)M!Xu!$i6}(ptiYkO^i@OFk>$Ygk<3Yv8jaHRZJx<bqEtQ|XV{S9(z;
zV^B0s-CJU5eI~ZLBcH(sZ1PcJwNFgeAV-1jbG7H<s<{p24U|<ikBijxka=^JXxlek
zDz=xJY^@-*1-`j~`Fu;(PUYmnX;TY}bBAl4kF{!iH5K*fGsP+`olx4y#~rKYRaKN~
zHBw^-Vc+PlER}wtw2J<!w30rdp>A<$HPKksX(FMK4&FL@!nB!FXB15-95y0HOaIoY
zucLV(p}d0rxvFk%seICt$N!Yfsye<ZT2oV3Te@&wy;PieJeM+0IuGkGtA>0r0PqAn
z<A^ejw1{6mO5IRnof0*d`mEEf(nj9``E)XlrBazPE%{*SysG(>PiZY*NU6J|luzm|
zw5(hmQme{#;>)M@Cuv!>$N6BCT*Qh*eMgr42aB&P`zq+9OXD2+6QB6Atn&J*AvKM%
ztQkU{htU5n7*aiK$gm+sCkgHi`s3TNUJ`tu{HnU|abMsoua9;9uI0!9`+rgc-@USn
z7u25c-7{Gu%ddXJ{-0lr4;^JU8#(PqSxY5u9?gsU{n5Cmd-w1Dm~qdk-oJaCu>FvL
zJd_pl8-CLpLw-HjvYx)cvwhz4I#1#(X=6KtpFDY|eu^gQYkqaz+;aLR_2pp3VVjp$
zF0PesU}(&TphZf=zH~x$of@0i>toQ2+N(jcoC%3_=N-3O<rNnPsu;nTIyb0~MX4`k
zH~7@2jVpYf3h8~3+hC?*Oy$4VpLt@<qW@Krg!92|HE;2hH+uLKx0vZmRi2SM&yK>9
zS%8WRCp&zsyM*9rO<w~8VyWuRR5p|#+rRCuzEoBbbuX=@@6<!(&{8G@YSKu^?z>I>
z`dpW{62`y1w!N|N{89Vc@L9aSQO4d5S7P|81b+(>{GDicS%N=X&VLoxm~@wz{K&q(
z{MH-$HiMo7_x6PR>QMXsUz(t&JQGg6_!IBxCP-pE_9pnt{0@Kn#<+i*n8AiR%ICJr
zYyFL$#szhO>PpXCpQqd-BddN-f7YPJEL%!qx@`Mdwq9%tgB@STmv@5mzmFE!C8wj<
z@QqGKiNSv~;*NX%x&HIJ<fi|d^`Jg|u0I=G6R2hqCZC}9%qwT!Rw<v?<#TYXPIbSz
zzcj8i?a8iJwx67Tar?66eiyde`c957zwh$@UH0|19OGe4LjUny;cWYsgm!A%|GO~q
zg$(uOQS&W%yZH?ro|9vm;CHpZm8Si%tW-bSU;Uo{9qlh~uTDotLVE0N{a?Ib+;{&E
zrT4#Cutl|=tf|vxl$KYrb}X%}RPC?SUuVY5tP?$1ELkcVvOHCdo~bj7ixbKzG5UH_
zo+Swmp0ECg^ph9g9@l(FI=l(#{ZFOKjsETQmn85~R&8wlm5TeBTvfFTS@%|Y?5Ppo
zv6t_?Rxr)-uz2%lAMdHF^~}?I5p>D(Wh>E4uj;ERE|PuPGtWGeHEC2XJGzxjS=H~%
zV@{Rns-G{(D(<tl78*S@fkwZll8woQe2bKQ9?y98{32TqCj39^e`=kloY)W<2`R%!
z1BtGZ9Yy;$F@HvW@u{8~wfV|SqNZMrILuV5StVB!ytcBcy!J#7dx|8|@2lW*s-DU!
z*?nZPYMg7?>hP5NJ!T<_@b8uzsJ*DR?vh#)(oRQd{<LZN=a%M|6xj{To;BM(H13no
z`E0nNKMA<d*C1c&_2euXv`D4bzktSS+t$tVs6kfbi;uq}Z~T8vUVJ{J3PpCp%%Gs-
zjqxX*=CRDE6ql&I_7^iRD2+}GN&~VbpnV+p|G!xSt5^^`c@*af<Q*&E$P;-N59U1Z
zD+bS-F<ZRR<RCiUhcUA*w^GjG{*0KxLuXso*O)!v5cW4SuHqa7xGliZ70m6R+$&#%
zS@0dMRhYZM>6cm7wV2Dms#`4UUd;92pZ{Q4pJQ$Tvw81NddOjI0H5J1!7T3$@^FUg
zL(GCBUnDNfqriu`uEi|)*PY10+zk$S$+8Cil(d1BTwctABVT4O4D%@PR<5y_?*M1<
z9-|V>f<NSvxYmFV8S}&7-;8-Lcp@)o7e9je##{ha8?)dtV_puv!?hgwf*G&ru!7mf
zEO;@Oq|*<EjoEsg{Bh03UhrWq@gw-IG4BN@bl?yB0&pqUddz|k8*>X-{T4dMz5}f5
z;(blx2i*Dr>A~C%)_!R5k|*rJqd4c(7jq6ciz^dz?0F|S2lYWW`ovx^%vFpzoij}@
zbIryadzMMgIkj^w!Co-?Gt0UjGiw5CCf5ectmm!WTpKa7CblMUwP0rbZ27s`Ftg^h
z?&fO8%)Hvl{TuZYa~^m<*E?bl?&10zv&9*viCh-vdrH9Pxl%C4o^6sdPA79^V=q|F
zH5N1TU8|j|MErv%?nM`v%fR(tP`5B|0M~p;zQhb3`W5e2!pz*tTE%reX6856H(WPk
zX6<I(&UG(l*0a{wZzw0stR=1MxVB(seQBkAYgtcW_JA|FUd7x3F6Xq$JD6k7DarYz
z1)Mob;k=OGPq;joW6vSUnWdj{jm2K@5v~%<g5PpgVorC`9=QCNTfm{5J6eu8_WY5Y
zOKR%HJIAmWZ05Qb^D1yfio@E1c@20!*G|kW;HEz06?5#lA~|pL30FF2js(Br8i+Z)
zufzIN8t)y$oa1s>H*@8QeLC%ps|0f%7~q<Pc`0}sR~hCF;A>n9Fn5D9`ca0M%fRcn
z7KtD5Bd(>Gt%DubJzOg>w}68Vp`BsoeRkHJT<gUiJTAjwZ4`U(Vy-Qimx8Ta?U>X1
zlXtFHF|PvO<$6c_bKdF>u5Qe7_Uc_O={GvT37or3{v~Mx=W?}4dj#cdXgg*>Is3T^
z|AKPvQ_O;LW>d_9a=!8i{0nAsEfMoS^waOm%B0JP_b#tH=N)YOu}@{%E^~5u-__6K
z11UE4^hGt*_8ezi*07;DS<I}s@y`9Saar7_95O1)BfCSD<<;D57?-u!*O+zsX=%O3
zmNz!~YUWli_8_3Paa>lQq4w0qiUq!!^2Q-GRTT|&jdk<<LzoesTHaVQbm6cp54WSL
z=5gQVY(3A?v5BaYY*~=~r1Af2D-U%N1*}}VYVEaa*Q{N?cH`O&H@Dnewyu8N(sirW
zty#BWUCX-mb=Do}cX;l|xx>nFShMk8wx)i~(lx8rbl+^POIeq>&a*COUEVtHx{`I>
zzqf8lzr}M)&MkSjl-yEwOZ_cNZ|S(L`!?(L^xHkR=iHumd&%uuR^A;Yca+^xf5*~0
zR^74Y4us~>o`Iz|t-5K=O&e}%xvBl8j+?q~veu@r^{mZVyY!Y-x0I|eTkp9w`&O$=
zr@LhRENm96uV24t{m%8>>-Vl-dh4~fuDNynts8FLc&l|=%5CYlW!{F+4YYe8drclG
zUqs5UU9(}$#x-khT7T2Vo7zlFdv8ivn@L>tYZtB68d<-#eQn3ul$+P16Up7ub#3bw
u{C?^0yMI6G7Vj;KZpmJsx4vzC`~TUB7a+S=3apy3s$zA=YM@0!1ONbb_?Srm

literal 0
HcmV?d00001

diff --git a/DischargeFunctions/bottom_discharge_extrapolation.py b/DischargeFunctions/bottom_discharge_extrapolation.py
new file mode 100644
index 0000000..71e5a5b
--- /dev/null
+++ b/DischargeFunctions/bottom_discharge_extrapolation.py
@@ -0,0 +1,304 @@
+"""bottom_discharge_extrapolation
+Computes the extrapolated discharge in the bottom unmeasured portion of an ADCP transect. Methods are consistent with
+equations used by TRDI and SonTek.
+
+Example
+-------
+
+from DischargeFunctions.bottom_discharge_extrapolation import
+
+    trans_select = getattr(data_in.depths, data_in.depths.selected)
+    num_top_method = {'Power': 0, 'Constant': 1, '3-Point': 2, None: -1}
+    self.top_ens =  extrapolate_top(x_prod, data_in.w_vel.valid_data[0, :, :],
+                                    num_top_method[data_in.extrap.top_method],
+                                    data_in.extrap.exponent, data_in.in_transect_idx, trans_select.depth_cell_size_m,
+                                    trans_select.depth_cell_depth_m, trans_select.depth_processed_m, delta_t,
+                                    num_top_method[top_method], exponent)
+"""
+
+import numpy as np
+from numba.pycc import CC
+from numba import njit
+
+cc = CC('bottom_discharge_extrapolation')
+
+
+# Bottom Discharge Extrapolation with Numba
+# =========================================
+@cc.export('extrapolate_bot', 'f8[:](f8[:, :], b1[:, :], i8, f8, i4[:], f8[:, :], f8[:, :], f8[:], f8[:], '
+                              'optional(i8), optional(f8))')
+def extrapolate_bot(xprod,
+                    w_valid_data,
+                    transect_bot_method,
+                    transect_exponent,
+                    in_transect_idx,
+                    depth_cell_size_m,
+                    depth_cell_depth_m,
+                    depth_processed_m,
+                    delta_t,
+                    bot_method=-1,
+                    exponent=0.1667):
+    """Computes the extrapolated bottom discharge
+
+    Parameters
+    ----------
+    xprod: np.array(float)
+        Cross product computed from the cross product method
+    w_valid_data: np.array(bool)
+        Valid water data
+    transect_bot_method: int
+        Stored bottom method (power = 0, no slip = 1)
+    transect_exponent: float
+        Exponent for power fit
+    in_transect_idx: np.array(int)
+        Indices of ensembles in transect to be used for discharge
+    depth_cell_size_m: np.array(float)
+        Size of each depth cell in m
+    depth_cell_depth_m: np.array(float)
+        Depth of each depth cell in m
+    depth_processed_m: np.array(float)
+        Depth for each ensemble in m
+    delta_t: np.array(float)
+        Duration of each ensemble computed from QComp
+    bot_method: int
+        Specifies method to use for top extrapolation
+    exponent: float
+        Exponent to use for power extrapolation
+
+    Returns
+    -------
+    q_bot: np.array(float)
+        Bottom extrapolated discharge for each ensemble
+    """
+
+    # Determine extrapolation methods and exponent
+    if bot_method == -1:
+        bot_method = transect_bot_method
+        exponent = transect_exponent
+
+    # Use only data in transect
+    w_valid_data = w_valid_data[:, in_transect_idx]
+    xprod = xprod[:, in_transect_idx]
+    cell_size = depth_cell_size_m[:, in_transect_idx]
+    cell_depth = depth_cell_depth_m[:, in_transect_idx]
+    depth_ens = depth_processed_m[in_transect_idx]
+    delta_t = delta_t[in_transect_idx]
+
+    # Compute bottom variables
+    bot_rng = bot_variables(xprod, w_valid_data, cell_size, cell_depth, depth_ens)
+
+    # Compute z
+    z = np.subtract(depth_ens, cell_depth)
+
+    # Use only valid data
+    valid_data = np.logical_not(np.isnan(xprod))
+    for row in range(valid_data.shape[0]):
+        for col in range(valid_data.shape[1]):
+            if valid_data[row, col] == False:
+                z[row, col] = np.nan
+                cell_size[row, col] = np.nan
+                cell_depth[row, col] = np.nan
+
+    # Compute bottom discharge
+    q_bot = discharge_bot(bot_method, exponent, bot_rng, xprod,
+                          cell_size, cell_depth, depth_ens, delta_t, z)
+
+    return q_bot
+
+
+@njit
+@cc.export('discharge_top', 'f8[:](i8, f8, f8[:], f8[:, :], f8[:, :], f8[:, :], f8[:], f8[:], f8[:, :])')
+def discharge_bot(bot_method, exponent, bot_rng, component,
+                  cell_size, cell_depth, depth_ens, delta_t, z):
+    """Computes the bottom extrapolated value of the provided component.
+
+    Parameters
+    ----------
+    bot_method: int
+        Bottom extrapolation method (Power, No Slip)
+    exponent: float
+        Exponent for power and no slip
+    bot_rng: np.array(float)
+        Range from the streambed to the bottom of the bottom most cell
+    component: np.array(float)
+        The variable to be extrapolated
+    cell_size: np.array(float)
+        Array of cell sizes (n cells x n ensembles)
+    cell_depth: np.array(float)
+        Depth of each cell (n cells x n ensembles)
+    depth_ens: np.array(float)
+        Bottom depth for each ensemble
+    delta_t: np.array(float)
+        Duration of each ensemble computed by QComp
+    z: np.array(float)
+        Relative depth from the bottom to each depth cell
+
+    Returns
+    -------
+    bot_value: np.array(float)
+        Total for the specified component integrated over the bottom range for each ensemble
+    """
+
+    # Initialize
+    coef = np.repeat(np.nan, int(component.shape[1]))
+
+    # Bottom power extrapolation
+    if bot_method == 0:
+        # Compute the coefficient for each ensemble
+        # Loops are used for Numba compile purposes
+
+        # Loop through ensembles
+        for col in range(component.shape[1]):
+            numerator = 0.0
+            numerator_valid = False
+            denominator_valid = False
+            denominator = 0.0
+
+            # Loop through depth cells in an ensemble
+            for row in range(component.shape[0]):
+
+                # Compute the numerator
+                numerator_temp = component[row, col] * cell_size[row, col]
+                if np.logical_not(np.isnan(numerator_temp)):
+                    numerator_valid = True
+                    numerator = numerator + numerator_temp
+
+                # Compute the denominator
+                denominator_temp = ((z[row, col] + 0.5 * cell_size[row, col]) ** (exponent + 1)) \
+                                   - ((z[row, col] - 0.5 * cell_size[row, col]) ** (exponent + 1))
+                if np.logical_not(np.isnan(denominator_temp)) and denominator_temp != 0:
+                    denominator_valid = True
+                    denominator = denominator + denominator_temp
+
+            # If both numerator and denominator are valid compute the coefficient
+            if numerator_valid and denominator_valid:
+                coef[col] = (numerator * (1 + exponent)) / denominator
+
+    # Bottom no slip extrapolation
+    elif bot_method == 1:
+        # Valid data in the lower 20% of the water column or
+        # the last valid depth cell are used to compute the no slip power fit
+        cutoff_depth = 0.8 * depth_ens
+
+        # Loop through the ensembles
+        for col in range(cell_depth.shape[1]):
+            numerator = 0.0
+            denominator = 0.0
+            numerator_valid = False
+            denominator_valid = False
+            cells_below_cutoff = False
+            last_cell_depth = np.nan
+            last_cell_size = np.nan
+            last_z = np.nan
+            last_component = np.nan
+
+            # Verify there are valid depth cutoffs
+            if np.any(np.logical_not(np.isnan(cutoff_depth))):
+
+                # Loop through depth cells
+                for row in range(cell_depth.shape[0]):
+
+                    # Identify last valid cell by end of loop
+                    if np.logical_not(np.isnan(cell_depth[row, col])):
+                        last_cell_depth = cell_depth[row, col]
+                        last_cell_size = cell_size[row, col]
+                        last_z = z[row, col]
+                        last_component = component[row, col]
+
+                        # Use all depth cells below the cutoff (1 per loop)
+                        if (cell_depth[row, col] - cutoff_depth[col]) >= 0:
+                            cells_below_cutoff = True
+
+                            # Compute numerator
+                            numerator_temp = component[row, col] * cell_size[row, col]
+                            if np.logical_not(np.isnan(numerator_temp)):
+                                numerator_valid = True
+                                numerator = numerator + numerator_temp
+
+                                # If numerator computed, compute denominator
+                                denominator_temp = ((z[row, col] + 0.5 * cell_size[row, col]) ** (exponent + 1)) \
+                                                   - ((z[row, col] - 0.5 * cell_size[row, col]) ** (exponent + 1))
+                                if np.logical_not(np.isnan(denominator_temp)) and denominator_temp != 0:
+                                    denominator_valid = True
+                                    denominator = denominator + denominator_temp
+
+                # If there are not cells below the cutoff, use the last valid depth cell
+                if np.logical_not(cells_below_cutoff):
+                    if np.logical_not(np.isnan(last_cell_depth)):
+                        # Compute numerator
+                        numerator_temp = last_component * last_cell_size
+                        if np.logical_not(np.isnan(numerator_temp)):
+                            numerator_valid = True
+                            numerator = numerator + numerator_temp
+
+                            # If numerator computed, compute denominator
+                            denominator_temp = ((last_z + 0.5 * last_cell_size) ** (exponent + 1)) \
+                                               - ((last_z - 0.5 * last_cell_size) ** (exponent + 1))
+                            if np.logical_not(np.isnan(denominator_temp)) and denominator_temp != 0:
+                                denominator_valid = True
+                                denominator = denominator + denominator_temp
+
+                # If both numerator and denominator are valid compute the coefficient
+                if numerator_valid and denominator_valid:
+                    coef[col] = (numerator * (1 + exponent)) / denominator
+
+    # Compute the bottom discharge of each profile
+    bot_value = delta_t * (coef / (exponent + 1)) * (bot_rng**(exponent + 1))
+
+    return bot_value
+
+
+@njit
+@cc.export('top_variables', 'f8[:](f8[:, :], b1[:, :], f8[:, :], f8[:, :], f8[:])')
+def bot_variables(x_prod, w_valid_data, cell_size, cell_depth, depth_ens):
+    """Computes the index to the bottom most valid cell in each ensemble and the range from
+    the bottom to the bottom of the bottom most cell.
+
+    Parameters
+    ----------
+    x_prod: np.array(float)
+        Cross product computed from the cross product method
+    w_valid_data: np.array(bool)
+        Valid water data
+    cell_size: np.array(float)
+        Size of each depth cell in m
+    cell_depth: np.array(float)
+        Depth of each depth cell in m
+    depth_ens: np.array(float)
+        Processed depth for each ensemble
+
+    Returns
+    -------
+    idx_bot: np.array(int)
+        Index to the bottom most valid depth cell in each ensemble
+    bot_rng: np.array(float)
+        Range from the streambed to the bottom of the bottom most cell
+    """
+
+    # Identify valid data
+    valid_data1 = np.copy(w_valid_data)
+    valid_data2 = np.logical_not(np.isnan(x_prod))
+    valid_data = np.logical_and(valid_data1, valid_data2)
+
+    # Preallocate variables
+    n_ensembles = int(valid_data.shape[1])
+    bot_rng = np.repeat(np.nan, n_ensembles)
+
+    # Loop through each ensemble
+    for n in range(n_ensembles):
+
+        # Identifying bottom most valid cell
+        idx_temp = np.where(np.logical_not(np.isnan(x_prod[:, n])))[0]
+        if len(idx_temp) > 0:
+            idx_bot = idx_temp[-1]
+            # Compute bottom range
+            bot_rng[n] = depth_ens[n] - cell_depth[idx_bot, n] - 0.5 * cell_size[idx_bot, n]
+        else:
+            bot_rng[n] = 0
+
+    return bot_rng
+
+
+if __name__ == '__main__':
+    # Used to compile code
+    cc.compile()
\ No newline at end of file
diff --git a/DischargeFunctions/top_discharge_extrapolation.cp39-win_amd64.pyd b/DischargeFunctions/top_discharge_extrapolation.cp39-win_amd64.pyd
new file mode 100644
index 0000000000000000000000000000000000000000..04600e7b2c4a54a2a5bc83211b22464e4e339763
GIT binary patch
literal 135168
zcmd?SeSF<jmG6H}n?jJ5Z_rjF(~*OodnZ+7QUy(Hl>}1x9yo!hRl-;?4%mqULIx8j
zb+A2<L~}?_C&SchN6VO*+i`klIzx3TQx){2rELlVp;QP|Am#CUC=Y21ZCcvqzCUa4
zlcWXFxxf4R{r>pnm2=i!`*H23wbx#I?S1P1q&GG-7K@ee85@hmHUjlmW8ZK6RThuM
zPCN5wr^Pm$a^G1S<LUd(TG;%RuO(V;xc<*>_|jJsSAFTa>#lE2{OOkyH?&=s_{w#O
zy30P3`0DjnfBBplGv1c;S^vc+_r84IAN}C2;%ELBr{DE)@XNpW_ji62cvjlM%fI-&
zJLj4I_wRJ!tMB{>c=xCO-r#wE^!Imlf-9eM`TXR{@7`Hy@CSD~yvh6DX85PSa#ge9
zo>;?%1+m!G-#RJw<3HQ*r^T?5*n1PFPC4z&*g`6tcfKPD+-2s@;BxLMvDis2cpS!>
z@KodSHzn3Ip2(-g*JV-!iYK3%Bk@=#@NQrqFjg}qHY!+2F2zEGH%y6*h^z<WvDv~;
z{M!+aRp6HyjK^w>P#>Ws9$Tx9|1E##w0^n0m9X$Lo@CYC*Q1yu5sNjQbHmkNYW-3y
zcFy<kCC}JuU?XtSUk%is<D$i?-bFweZK6uP`i6M<b6Q+Pt0Q%dl@hN6IO(rO`Q7lf
z8?M67+Jv@=Da!OWi}&SguP36~NNr{DPJ2VV3kd!H<=;lN%SagX$HTGw=Ec3?w)_}>
z-e+KQm>rzEV<a8?C?2fbl*-1lgPRwh6P&4`8H)vH#=@T3@<~3uVb}K7X(QoSeDmhu
z%sR#o_(|ZWw8l3VF1t7$o19wVxoKayIvxwNQ;d?$wU^g^uJ*IF3qN=HXFn6(^kOqT
zwYV;rvo5K=lWR><y)D=3q{fF_E0YQDTAnP!g;!@%J-s-Xb7xXrk7Gk}SdO}&Vsm|V
zYnWY`?4}A~c6n06HQlo&+0OV5dsZh$m=|Hsy5xG`;^6WFmRPgg92(^2&}0s`btZd(
z`jb0vE+3(+7&B3HRQe<}ladrUuH`vMdUFgH9V0lDb8J$Q_4w&84UyeO?48LLgPqAn
z!E9Z!IqcbxtN}^)+?f=M4||p;2WgY&ieqDA>FlBCQ@}90H%b{i6xA9msOFWlL^Rj%
zN7W*|!q0Ny(Ypo7tVy2JU8JZ4_*2c*pC$N;+EVJ%tzl1ldARLld}8%o2ZGKYQ;GWQ
z!8D{rkP>DejdmRs6@};_L2^~2M_WNs9b;31&d*W^uDEKUDh6E#rYYgx=pTIo)h+st
zM@EEngV1kz8nQE~CO3!ggwCY8MJ+ns2{}fM{BkRk>WqCs*{oS#iqmk*lNr@9T0$U*
zMWQvz6`3k1#}K+I46d}I)OuCm9izuuOS|$x*Q&P>DZ3%rOU_~T&g3eCi;^v2c3ra1
zkd?`d!E~~{J{zf1MVfL}@nM&>i|fke$!2P(F0p=wh;Bt8$tdj^toX?(!a6O(!h4i!
zN4_*2zc)R#GbwhTiocqQzZ{M|5Om%`)Y_hRcI{~2wXZfPD@lgI?JJXgL;}5{q-<Ys
z#ngSl>9IX`s<qEO;=0FL`;R<g!|4o<Skun{vGJRs2IgofI+|3c$<u{qH$R#^52b4M
zzyBh`hRUoGsSJwi+EpDNuZMObkMWkBtGZ?vswJ1Z=B47@Dka-#&AhKJXq<MHvg~@P
zIDXV`8e$>2X;cR*-bXfbcd3+K`FCv(gUcQbRvcz9haIoSjV*=2r2}LFh{A)9hFN-Q
z?sl>-n{nhySlWW%@*V2V4Als;tCM}e!aMnbpN{p1)rI0j4LX0KY_s>#*6XZ8!tDLH
znGPy@>W1WiAo(|?vcF0Px35VKKxBIAn&eu+_)By1wZEi_R_3m`V+-aUQ0dPUK7?Oz
zW=(8C>B}{x+eCPn`}~;kmQ--rwy+11FV3Z_<Eb%155<GdTS*aS`zd|7^?I1yWFuf0
z3RJbT-${?u&9Kq1WpQv$zBc&&mTyc`qYe`hUI#@QIp>z_TM%4PSR7Pr1#R52xHhO5
zh-zojFhRvAUW;B+nDc%=-ur?@qhRM84!)BQIxl9qQKnB8YH-yij@2I6%5-<^c%s{U
z>z>eknhAZX2=!|UI#<AjXpeIqYdb0E`6)9`lnwKB<<ntLU9yF8PEQUL{>+%x-sp>m
zG^k;3*62f;aT<BlZd;If-wd@+U9yQra!jqSF3zZ=Eo;gZ&>nW}5}le?!HP{b@D4FN
z7R=pMpM4?hxt2k4V8Pt&g)^;hI{9W)k8KX)N7GZ?OiNYo3Oa4bc8vuqhN-`LF6jCk
zF~jU=I{N^$-08ZfV{}Sz^VbMS$6pDzZ6)p)>o0wDvN|XDe%*QTbo}`+S2vfXd~4V-
zIu>+2p&Wg!3O9lDhH%vzntl3ob_eOFOiKUM<n(P%)n^|qsDHxjzEp7g&UE~Rx%o7+
zOs%vy(E3jLl6UzGrp1sUQq}pk7t+~ng}0L~oqf#4l(<TmeLc(`B%33uKsK64|1p<d
zsl%~*+g_y9@m?tsf46lR9MM{)HQ8Q!m!`o2=7TLe9(!i)_VINudt<Vx_Lo|d)C5|s
z#2Ll2Y<<)sB%Y<B#MKI47N>?m4Uj^A<4b=ZB^fVUUv%Wv*+d-_4g7_a73!-d%q~h+
zX{@!YkV?6l<Vu9w>XMBX7e|BA$wx~E3lB3;Il>85czt%1VH55C%UG-{-+BrRFX*}w
z@>#SsA1o?0F(NcTqwgN@Tvx(^Q94|74B`-_u_C3EWqy>ko-q1vR>hx8TI^oGfdn*-
zMS>L%DFaF!Hn=(&8C;$mF}N~03LIz2&l#0>Cf6#E#je-jYQN}RD^|_GH&519pz0&i
zw8Zqkrs3}vJv`{+V8y@r4Ao#-7)q@dS%T@XRQ4d;5BW^oYSuMZI7XZoHsi}+qFN$d
z;qw$!6j_-X>7E;t?Gk_*RN&M*{SqpXiB+;$LxNUqu8%((PF<I*P>EAlC#&#J!=Sa!
zhSeKvX%0I7PQGL{OIcZ8_mqr;xsZw+D*4Ng)%Kj!m2dCSBsG?)d7#y`j;TWIX4Y67
zZn<WyCFyt`9it}&%P-a3yK1V~bpN6A!}lSBh@80N+6mlby>JG7e$I~ImVc#@y7KVa
zP2{`jr_>r&7W#F~r|}e&-Kaz}w=E7Dw-Wg<Za$62(xOUhW2Ktms**E)ucE(kz7=~9
zIo<Qt#V)d9@1>}IB0om4d>UV;q|f}nQxdK7z~W#QpNdDJ;8Eb9dIUze=pijigIYS4
z(z1j+uaKv3z^rERu_TWZ^J$Vsd>+7EAW|9qaODf$GmrZz+`~$}dtcD_BFG<4>gKaY
z3T;kAH8HjDZFpPs&_0bIFQRYth?rLNQxJ`@9|7m)ZEVinHTQsXHX#8>41n>~XJ3(=
z(MPwd8(Sp5uF@F5Qxmb!DajenypRiRMlCmqx9|~<i2D@g2pMC^7k>}O?h8AP#+Myb
zE)9?)cG;xCk$o^a(^K{?k;@DY1etYKDnfhPnO1A1>KKi;eMC9I`BT})+iDB*eVG{x
zK19H1>&&qF0m12P3*UQrJXScO*+#0?)5Ge>ygwlCXU&_HvU5Kaht)%=?3SSO1-Wz6
z<|xKyV$f*C#Xxkl7}fAgF&)QH-eOrOHf3>nIIc<e{JGk3y~<;*bPHzRR`{-JlHEH|
z=KpE-$5C7CJ`*)4sc;wdpuVYV?kf#4-=s_@olK!YH8|SSY`W^ZUJ6#cpNQ$~ut;na
zX>j!rh_I`z)y?;dY|n4w!MA=$e`p+yrLtQmHtC9>ZjXn-MXcYWu(3_@(-4SD>XJ-V
zHl4Xtc5Y=lxajG0_F3YrGd?Q8Yd|8{Y0Moj0M2=vlM7YHMlxXrwNBGy^WmouUZk{0
zIdLRMUU7)8)8{`Kj%^kv?uM*X_2WTTBV4lZaT^kD^`JH#RvV^9nB9`@xjNYroSC7%
zR^i`TA4n~mo-9^ne>!`Bs%)54mAc@H`bo5Y;gmPj<=m4ds`3efe|J^1$c$Iz3#!Tu
zni;uiaa3*^W^Q=7jY#3cRCDgw+?VJitJdWH*~8pAu79jPzp?tb?mKoueU9&%6{T-k
zo$Cqy-PN%kS*&XOE6qKbRL4&yji)!K*R}ueL+=wOza_oDB>2BTZyI?eqY*-r*Dx@n
z+Ls(hu4J4==X%6A@*jCbTs1la<hQV8Cz__SFCnHHi!O?z41ME%3neC&rL#4qVYV3<
zmTSlx71@SFI@?^4&bC)Z|M{ZhueSr!+1=4U8~$kY13~6sbMzhaITYo*PgB%^D-BQn
zE8yIGP~p^HL50(Yp#^gqG8{#nS%p)h;o~%GIGnN#htt1>H{C6T+~>dW#VczU)qbJ&
z%Gxh(W}psz3Wob>Ry6h_C+P30(EXS;E`dYJxH5wqk})7lNSNzPR-n!n<7yF7>5*n3
zRUT;)Qsa?EA)!Yagfw_06w>IC8X?jmE9)vD%^s-~(&CW{A?+SXgt=Mge>NVg-FPZJ
zTl-UeM`H&iRiRD_q)lRA2x@=`HUK4Fiyi7ue(M=8f_V=IJsbuq?g+3LcZB9t+?da=
z%;%Swl%Iy2;<R{}@vs-D{Q7{!{AzqyrH2U*%Ycg804&DM`!WYT9QJSosJO97ao2k~
z^B&qRia5jI#r#@)SjNL%pwTxePM<HM*Tam5Exye4z+!%V^or`;>tRN{M49V>`jcD3
zQGbdz?Bfl3nD_BVeEgtuySDT_AFn_)j8-k^{K$0S%dP*TKRf_JM|W1qR~q8}D3D*4
zA*!k{nWlk9C;5n7->~eK+gOSYyf&U{FV3K1L<x4{jDD>k=>k(efkxu*IiR#BtfWz*
z55GOGVc7Mb3`#Nm5YLjJYpqb0QpFLaO;9RHp$1$cOoPEYy}EFn6WyqC(*z0Y{TRu6
zIOyRpP;EQ{)SpOE+-U9TFjhzy04WK+(+^qS*<5;W(Dh-6>D(1`Eu;mty;f|=Ps&+D
zDrXT{rRD@5@_0f$1#Tr0UheV8hnIO=lqh|v;VABbz8y!%QiC8yeFe+Z9uS`Z7KvA!
zXiu4I$A8vtpGuSxp|q%`U7Z{??ee;0%!*i-R2q?{d{y*_$<RK3@W|uZkScuG<0@D9
zkm1XXM|o14sD(&u@i60IuWAR$eZV5giW^lyvPjitwsdlPzmDos?Y&cVfk?$xp^cts
zl^}f36Qz=b4|rT+hw!||jRNqE9$)3d*Lz%)DgIi+mm3%Ml`11hl}dP6>0ynMP^kuB
zu~Lc~T@|=WRe&5<sZaV!?UQ<!N-4GqZSa*+3Bvn4Q7TFJDvwuryw~GK0pZ;q4}EyX
z<DyLI+YM*!pBPkmI#HDx^l;e25p^z=ijlNfDaDOeoaQPu1ae%Z{-)IRRGC$Z8KE4c
zWiiOG5rl{qPgLabW{+Emgg1G7)Q2~ET$Ct%gW=0<<@7zXo_13u@*WPV9U*=gSR`I?
zq6cTVnK1=gZF=L$%-FA$nto6m6;UY&uKa|Do&?JmyvE~0KD^4~D$mk;5-TU#xrHE+
znDMYzQ8g2QMG_S!I>(dPDw>!P-FQOcmra=1kSrr>iE-ry*Ci7M*CZ=|($CirJ<VB6
z2-LPpd5T%8Tro?PE55bLAeuIy)`yBoq|)$4s)xNE_NhZ4c|CBPWZcnTO?R#Ouo#L+
z#uJja35C>AIxdaO*JMR900>Ugilri%R?dKn1eGJ%a1}7+^rKcJTiLY=%D$`$#w?XP
zx%Azi5$&{XuZMjqScC$NP?D9ao)A-JaCNfO;7U6})#1vhgjI-&fNI|%gN$B->yiTo
z*CcgTBBCor2JzQ=+=c*nQFCcS1bmebw;=@H?Qt8$;O!o_Q4ZeX@g|>tv&Wk~-sJHX
zk2iW;<3jX=9&h(}mB%w4uk?7g$16PE>+yugS9!eL<9!}4_4rzkOE+p$$KxX&-{|qk
z<9Uw{dwjs-LmnUW_<+ZUJigK6!yb1E3BM>&-?ct`#D|++hW@ejlkjdIZtE*}#^bjB
zN|hku6B;lko`Y1`$S@sYr(Y}Ul8uTM^U*>f<yK%_vPMIBSgpdWTMyK1t0a$fc8wk8
zr5Rl{_}6m`#>8u|?AaYyL#}fR83QzLgsrv-tOGV}yrgKdiHzF4!3gz*qJBqW^9UQL
zg~>#H_7RQw5wa~+Qr!7T?%ql6hDq+SN$x??71Q>OyX$PtCBr&9Ge<^3udS9qR%%u~
zMG7#ecp^IMNshO&TST3LG$5@x!7T~6MJ+?MK^8$7iZmeV6?zhfbb$ZHiB1UoCxeeh
zw;3!%-!Zs1$^o_O)5(u^ir<t!>?1~VL@gdE6>=3wZO74Lt?ymX^-|lpqDs6d>|qaz
z&Bo{F?uh<)kEq<b=jyQMykspa8yTNvej^33%wWZxG(<F0UL;p#(mge0Jq@K{Pige0
zob;JI{TZule{|L?wuzq<(lg^OI-Skd{g??vkD<4ldmzls7y(P?rk#S7h*N2usVwMb
zHH-LDUd?hkholDKL)JW}GvIOP#2}&eS1b-QYd6w+Y~u7CwM`ZI$n=MzKYv_JI8yip
z0W`qT*7x$wW?&sSl)b}IwnmiIKv`j>^WT3y9_PtJswE;G#)B$+;3E9rCI4&imm<J*
zWAtQ!XxdLax*ui8r7qEJF45QFMA1`3@S4{dUC#eX`QL*-QeEfIUL_qWBo+2$7yb!_
z|BKS`k$=7O|L^j@)u-FXSwx}Vg@4e6pQP{`eE2_eT2xr+!e_Yf=TwPL`|w2v$Ht`K
zoH6nVtKoJ9o=4!GdOUYI&kgcS$nyhuZgrkNkmsp*%AjM)Zy18i>Ck%IExsCxn*PzV
zyVMDfMvn{PsfiyAdZbavW{XjXeh$=#{tq9hg);gfh*N4vk07On^a|)HU{{g_Zn|5F
zhvW1VUjKK3exjY+#Dm-jW2*(u$-7@WRhHIHQ72df6z^yO>Q8Qsdi~kSoGs=P`<{(9
zp%}O3zuKMUvxpBGzTEad;t;ma!GFZ!v)&qnr;3TugqpCDSG0~vP7!-e!xqa$53tN2
z@eGpAU~%0hUj~slab$40apW{7aYKf$wsl<dYXDsARlHGhl4g+B`}OjN&Q+p|eAarr
z*W-O2FYYH+c|7C8dp+Ln@otYddpzUuCXcs!ywT$=9xsaO%^oj`@=YGE@$nlyUghxy
zk5_s;^mxMKH6Abbc$LS?JYMPXQjb@7Jm&F)$BR;VxyMILGV>Nk>SvFa`fzO%)SfYq
z4|=>PHw<{ZC^zIi9x>k39~(VB?D6#;FUk#0GGzba!%bqOyaA7!Pzj#*c(+eqlq*d5
zBz(OOZ};JAJ>KH+K94thyeMZhdc4<%H+a0;<Dthh9<MRn%N<o7X;DopJ;K=}?A0Sp
zLJ}S^A*9SBCJ&T)#3YiKM{1O2)aFN>4(bsKJW?qn@<@dc#}w+^s$z4WRbBI4bG8x~
zR6G`4^2pd&cAI9uLX^y6!qY;F={2X!+C~fK^voc~3KqV>xCx9T397%2v137J4-RRl
zI)IYE9VxC<t#OJR4ZoAE1$HCfB~rBFX{4y0i^^(yE-y{>1kuy%J6L{N(Jc2zKV;v;
z0y}mA{P{157-GeO_YLMe6rCej%=H`Os%of$V$CYLp0%|y#aBfN`-Vg+du_73p0BbL
zrb?x$Y)Mk*OgG)#PTb?TZpYZs)<5hR8*9CZRbnIaiWT#xqT<=7Sj4`<F#-Oo@Q)so
zhfZ=P&Yon?bOOJQm~k@Y90b;9?@MJL4zmwPsO)3WS0_j%WUSKQYOjw$qJm|P+XSvB
zHD(fQNQ#c$q9apuv=kjpMMp!?QB!nO79EMAqYQ^hmq7i=Juua|t;9hPa|{<9BRCW-
zHYrKoClS*w$DO9L{&A<4%)Qa~3_cp&YOoO90+eWYv%%fbH{?&h4blS9S3xpvb&Lg_
z?=4qWtWR{4`29u|O;?w)Senw1B*fMd8VkPDnf!s`$&hGeXL6a`L7i77i7ALK7N-gl
zrBU)F46gLbJA^b_tY9uc^@weHh_~Kwwyg#+2QkP|HBdd+3v4}=L#qwRatPUTiaCey
z{r{r>Xq^@+HknH|dwf*=OL&vV#iec4G)P4auJpz*XcY`+Cl1uAZCsGmHe)z8Dc+co
z^Bnb`WPh9bwm6^x@VHxSI2QD{TWtKc16EPy<G1_p;$mYngMN!t9Qil+@L`XK9v=eN
z_H5AmS9$*dk5_s;@9{E^_kpKnb0vvg(@pkJ#+5UM)VW<+yGM*Gw|JyR?bz%Q<H}7Q
zF|OR`5#!1Y9!ZE}*?h|(S!pC^AFIz|YU!p-^xi8qp1l5$ajMCxaVVid<F?swtcneH
z+Er5mnLnrd1~CZ+j+<fKHdEyw88I6)IS2hr${WNKz#yF=$Y77DaVX3r5gJ|X5=Dc|
z&};7H^uFD)!xEI}66pE_p|RArX)n0j3MFU5J1KCjO<Am}WqXMA1`&c4A5&LkhuGan
zMKnqo7;kjqNp4q7NweqdKYD(6)@$kbmQ>H#g)ln@OTMt9zcd^hXnS0PVOVt4h%Q>p
zRmPbdoz>6Qj8Ub6!Z`zBkm6i}+X1(44zJ0Vh@t^PYJy(&wcdE;6lFsj^@-Hr_WeQT
zZ{9(a&X<C&ZX62gw(j!QmZVc2uAcM%z}2(Y*He`gJ@sAN)tRcB)Mc*TpH;oPb_FZa
zIM@%cUZk@RVq%C9$zxPo_Y<yaqs0AT|Lf<c&w4$K-yg=GsO_13M|SqNF*n4ZHXOUB
z?XlF@ruyo8iXGPWGcm`EQLx<lCOYo{$5gGIDm*gLJKs73s=IcnIoe5<?rAC4EXb(Q
z=zWcfr8cZJ9@x!{cT;b86(&}X`~C^zbjH~jN4|Og^xg0JM>7&T)Kvf2`KhzE%{`Ew
zgIR{_m*SAK{uwPT_DdfGM1P^=uJ3}(vD1B*yoN(`H(Hl$umd7-v1Y>$xnB9<yIrre
z_{ymZL`8kqj$*H<zG5lrFtHLX31SsXKRc<17WcAJ(LdYMajdA-MaT8e(_wr+Bj(Al
zXZE{O+1XnsdT0o<<@)MJ#(QX^Mw>WK^!@u>qhvr**}GHyM`)D0=VFh&B|QgIl+>@e
z!s5p3KxxpMmj$BD+Tiww<SwI1LFSb*_S~GVJr5LbR?oSr|111eby5Cmrv_^RS=cZE
zCkEsyf>rfiMN^jpgB6R51M~}ifYSN9RsV3RJ9Tu8Fd_0V9==I2uWP)9XYCK;uM_(*
z-yYkJYmd9@tA8`z9&6Pejg%Sn{A$ujUFwmEk@}l=jgQordYF;A^wX-y0Eum~0W~}~
z8uSNYS<Js&UBjyoDDQRC>E1AYFXRn{J=5P2W`lG*3OlyJQ|@b{bD94(`j?w%7QK2e
zWL-m_QK)qTMOZgXjM#^QOmpD6p$VwY2}NSA<Z)Wh8eh}ib<0fEgYFo$LS&Hb`4%d6
zbslwa=Lf-xn_MQa$4)mR;glW2M}%r(!UN@8i?crWMI_qCQU@QyS|cs{YgQI302L$<
zt6U6Pwu>LCMfc2CgDD|~+C}>!U!T<2-Rcr!oALWnaoTDiJ&#qD*g@yZeA~46H3Ba~
zg7JB8n49@-jT{w}&XwFB=FWXs2L?)kwbuJor;gWgUF2L-gKz&EE*U%{L=mZwP_Eu~
zTF&71M=?X4a>Hl&Cice>46uiy2#ftuMTs9`rg)z~_I~>=z@Ynn?5q}*VBC0m-LGk%
zO=0yzZO=ib!~#{k@E!^7aqPh`b9dPl*PBV{pCtX1uwxS=Vtd>3g{O{jrzbc4fmCkh
zBdO}a+Td1hM(ho4-JG81`b8eTDq%Gg+?ucLd^G6N=DaS^@9au_>jqutsxnmFodHcu
zAWS&R>h2v8n2wog+01(u2a5)A)ZJ5ttABBD`ojBI9#5~kuZ$J21cSr2d&BA{Sv%9&
z=Zh7b+^?Ow2UWP6rcULsJ%d6u0loF>QguXd`-AED;dJ$;+TgoX=wNW`rsFE)Pm!t*
zhS_7TP7elMstk4dB_I}bsT(y|Y^dd?KdMf0qQv4}QGRHI@9BqBn`6b=92>9AQLD{S
zYO{^n9B#WWoX5$HW^eY9pz}{K%h_|8TFMI}V+`L1CfW<(y6$i(26lC$e4k!-v^14V
ze~cRhx+OB!Haw}<Q`tj>|3-F@H%2c`e^TqM(jT+*hsM({^yv?iUWp>RAygFbyfCtb
zfj1}8%p10`ni@QjPd`H2z8+*2p6Yl)8YtFWs_6`G=vFdjVhy<C39)xLR_z2yx3f?8
zG&xqiGAS`s9cvOOP{QZZ#Z1GcUvjq^lyf?}36}O?$OR22g0doWN=H<04|~-|y*Lq`
z#hl*n+4C7JS9cqGe#Q7-YAj!04P$hT-uA!Ly+$w-J;>bjHb;8ZIOz+p?$BgcTTm;f
z@Ca6XAK4R?x}-f#H02<vQv)zq@#oeM<Gd^()bX;2@cP`sWSO|Z!AIxra66~#XI%OI
zINN6C@k&#ou>Qi&@+xnLBzw+RU6puZd}wK~@y>|M5F&0MDFx*dSSr3Lg<*O1zM%6r
z3M5~P#M+w5l~BCnt6{F>jQZ^6R6H;02Nbj87)F}QR9WL`v$SdHcztSpcIfL#lHMIw
zyQD$KwFKd_Q$9)>e2eP}QndR9(Ohwq!N<43H_8?sDm8=n@7H~tTTpe?ZZsF~!xXf^
znUIgEiR#W>tIZ-sxvC|Dn>RV)N^bb5YO6R@V~d~jo73^(2@<~mL6>-<9f_BSMB}yH
zN;vj_g8h71I69fUg`T{_T-SLLwpzFab~DlbVf^q!RcRg7Z{|*4y=ml}534t~6~-F@
zS%*##wFTYp>`lc-#ig@Detp_^A?fdS>8GeO*#Yb+Hf7;7lDGxI+JN~jM!Il^@zdE1
zqfLX{Ho69fK3DPC^&p-@^4#S<R{?inR=;#`akg$24ZU<o4lM+*r-$4AJ*<A>O{gu7
zJPD}2V}s7WhT9a9(4-!ypjR%=&CfHM$1floe_Z&8_~YzH{%=sXUern2agz<3a(Gu^
zzLCJ47n(JC|7Ov{g-&&BamofOj%v8D7x_URX3KO9F1uIUNh6b+T?EeE-rX;swMv&A
z2oExI-Rc#u{TaiBA$ZwJ#|-0cLF6J?%9cH@xi8a1%#+mJD;#eD?vvG5KNNH>C4>6x
zZy3hhbaTaO+@NMrngpSaf6=>L+k+o4i4W_lV=l#(fD!V@(mQnjum3Po=VKS=&Q65!
zUuWi@6Ki{*K07@cuWo!(I?koA=@0XHWY2%7Bx{p*ZW<fA#p#Pmy{afzavOyjFSFT6
z)Jl3V+S9M*o6NRTC?~a{5Zh)$A*P|W73UVac5IuC6~U^I*tP_6@nmHruD!HbZ2YRZ
z`QB9a9$4~#<ZH*j*<?IEF<_9d$M=Jw_Jd;J+%~iQhGXEiBOGEMsjHjwxNvLbct+l*
z63bPO_xXAZ6CAx0WL!HnLh$h_zCnDBQ}Ow}R8<=^9Kb`x0k8464z`3>czo1WboAXG
zAMtp($0LuId3@O8r5+#hc+BH{g5*EU&{O$?9&hxp4}YEgtn%R!+!epu<4qoKw0Iod
zdw;i&q#hpM=<{p#aJ~G=r`f}VkJs(PrM{9FR#aPQja2-iwyoLYPCeD>SU=%?8f?^~
z&xiMVJmc|hk9QlcGX{kFCKXDeQD1l^P`aT(8cUntQ6rK|ZypW`5<liM&3pf~Kq*ig
z@E7sz-oMHG-OY71W~KKp7bKrvUv4DgphruS9l#l}8%&v?t<-w&zsmg4ky`mTq9{Zi
z->95c`D!)Dr$&0NwdQkevXY4@<!yIevH}TDhvaCRTkx^940kxmU4>hJiZcS0#W+Kg
z+yl6aaW+o!@15k%Op4Po$v>Rru9_65Vv>Jsl6%AzW%LZ=F48kF$-Qw>oVB<Snk;^1
z++CM!!Hp;O8ZL*rWLR`mSrE!Zo2Doe^VV{YiQ}%vS^J}SG%I7b95wql;^nUPYlv~%
zgSmqj4%BGQ!HFvl8z|7iW+Togg~WdPrxIbd7M&(%vov({#GNohbovh)JQUq;un=t(
zB$CEi^ivR>fd9zgq3C}Z+#UUckEB5qtpu5y-*Z~nbE;O?_4simNoN0pkigN5PEL+H
zn!#{?*&L)!igB^s4U;`=M5ld{fK>LRJumg1aQ@QH#4vmQCLFToZ!pM96F_gz@5FDK
z-0bYdJ+U4#dwyf#AWaGpUss6P^LKm1?D=~=V)p#2JW``HeI7A;{<R)4d;awvF?;@v
z9!YT8^Z->k&Yu5|89JC{ID7sK4IIp2B6`^8q554@AP>}^;#B&u8V?&hYy#Sp0v6+z
z(b`I*gCR?+P9VP;U@^`xeWb7v4`V*RGM`^!Qhs?KM?Ix9;@*P2K;=6KEap4t^BMMV
z#OG)8f=Ei&;M0o%3UBfGWG3a4rxg`8=%H3Rl{Es?pWI>{`cu4w4FweL9>z2zh*Jr)
zIGyD)#3n!9H^?qTr<=EFOQ$KN68>u)A_J=o6^s7hY4Sjj5(AuMa+}fRz|U&yh<$XQ
z+4*-mm2P%ojdg2*vzIW{KY57#_ZeK{mF>=)Qlf{ZzGR`iLFUZ_-IaK$5o$ba@UTfy
zX@nM_{>JV3uX@VS9TE_qY}B1+b10&pu>CFzD}_iO-DnUCexTNvQMja;)2RfAt1^h4
zjltFKY6>i@Q8<+jl^)zEQX_c0*@stoT$Cz)rQyh{z$mr_PZ={^U|Uza9I@|i7qLQP
z2mTa+M^cn1V%NKT-}Wi_qZZXt?ZoLXNBpM}Y%C^tPB}9eR*}FDhTk=d)R>ABy>qwC
z>8}1$$b-R9fhU(Eek_z_wSp{Fb;VWHw8#n9>=d5x;gudQ_ju8Mq|D=*y^3FII7&dE
zCv})C)&C<N#%M+1Wok|OKLITEzv4z$4HE#VS_dYL+n{R}M63U|i+G4sZ0ip}@W|sa
zPn1ej_^5~@TqO%1@p!4nwE!sGC?b5=<K;el$m60^=?4vWM^?U)y%eQN^?A77!@NkK
zl7qlvB^5VX`Xm8VaxKVlm26<~RV5z*j_)oI>Q^Zzh*Sw8V!#unl7#0yzR}|wJ#G{b
zzTV>lK76gmMVZp~8SXX}zEXqKSCty}a76W|GBKhSE2B8kkDhRY>_?K!sZ@j~mfZhn
zf;yyI))-SMJylSCDzev;VEKY~dwkS~XFRU*6u;eYr>62GHbA{dZ1S*0wSmM8ut=if
zM5lQY-{(ne#uE}}8i`n-SP@*_1=<;pLG0iSuCcv^#MB1GqYBDXB$W#GI13gX(TfhT
z9-{-i-SBlz7H0~#XlTxxF(OM6q}OuGX0@5AwSFzykSA2U<W?;Z?xNKX<(K8PvAxkh
z;Eeu}ui87}#z~ec8o4&RT)#)Itrte$vJ}QrZo<h+GfI3l&gjbu5)Y~)<@)%ZKbX6V
zTk?~#xSTB7?g}oK4WV0hhRItkJm_IwWzoVTK>c;&gx}V6yK9sttBe0;*Bxg(C%&vb
z!MM>J;o2LhF|A)|%vBz@eg~J-tZ*9_;N2d#aRc7&@ji75c(ccCXoG8yq4ev0c%#QR
zdc48od5?!4AMkjU$5(m0(&K|3Pk4OD<K-S7_IR1cBafGQe8l6D=0)GA$0gAVH~t0v
zk;jdxg4-U5@G^A)_@ECrrVNhwX6X|?T++YttFTeV6!&<g4`1bRTi-}8sa^4Fe0aCV
zLyu=XUSW8hGlFk4;w8qCvdJS&LK=Lu1|f|e2`!pOYJ`lc6_9qT*n+`2z%-FGu{2it
z4Cexi;h2OO?hHIN#wwH=!K2dZ$QU#ld6dL(XKF&m8@$CoH(xB^TDLcB_Vk)9MT<ug
z8r~bp7Y^*0$s~8pB=;!c#W;D~q$fV&J#oxYVWI4wY|^kKhpGh=HYOsdWeqTu9gQj<
zlL7qQY7RwF=UTIXTT*Z(k>$?-Z6^cxA)pz+N7k5!qII`x$5e>c7<@EZZE$b25}3w+
zxxu|rr@=$fjlkNDBWC9Qj5Bj@Pzy{la}Tj%FPgc3TzYu;$j8iYq|hvSoSFLv<>h;F
zl9~HoJ&U)v**vRKfAq2EP`I5Vq-Vy%x+$5Pb<vp^xb6QrIzXAZe+3LP_nQIk+@eZZ
z&`#D~j0OqL>daDuTxSCEa1}!w$GDlhiPtBXxv$u%2xjKq@$YJ=k%FFepaIOx{RlF*
zH)lWFQTB(T><TC=+(<kqo{oQXsXS!Pj>kJ(#5LHTM3wT_gP{07#Js0!)8B)CIsUBX
zovU6&S^8lY{vR&<#Xlr`^k**ogR)FNiEwx3P(dR_8ws=adq9D^HKuxIeBC9!U5S5A
zgu-^`f2H&PuKfR}&vZ!U?5kb)CtUbnD!jvo-zIbR8_4u57x*U%Tuk7e&*C}Vc`lIW
zr{q~RcUP3e;a8u|o9&G-)QlqboPT4KvG66Vd;Q9W*rItWbN15+*YG(>kQt!)8Xa2a
z5KY$TMG(%^qG!CP=0@}gh%;xeQI}V^ZMB)RH>t}ptOOjldoP~1p0Gb}Bs<x&YsuAF
zi>TItfE!x(aa!lLC^!&bnFzNaa0|Hghj;Asnx=>n6#0TyUD!nfxc<aCRAv1o<C)z&
z*9MOw8{3Z<WItjM%>YmbVWB}MTu6T_mTEWO46?i%WT#~DR(d&TLoy>ADPl<DimD6(
z?HI`*T*n{=3kKnZK<$^-8eHvoE9qBRIJz@~YiykrZ_OwkDwBkjL%AL=^LUHLOFiD~
zaWO*C)8ug{&+*iX$DO>l(kn(+dgE`nqYroTA&u?vA(IEOtMRzWk>p?L@c|$1)&ky4
z^mxLDZ}fP%$Jcwj%;Rf4Uh45akH<W|%H!hqYL8xzyY-s$R*y$MJmc|UkGFe#$m1;@
zck4ImeUA_L@FtJvJ>KZ?jUEp@zTV?i9`EybrN>u!yu#ydeWyP?-tEK7J)ZG+nc;eY
zgUXhAq(w-~BP{k<(=pCvnAs#`#3QI&=}?cb^80feqOtP3<2R+L5i;nbRS6mJNM$!C
zCp0{A<uHJ?d$|b(I5_t-3Rx5_GHP2bJDSlFAkb+ocN0>ZUp%Fz)b!ShYfNhUj-o}n
zf|b_hFToWSZH>)qP1!07_t)wWFG34<*YKF)N1#l9Z)%aQBDI36NRgz-(WMLyS)~6B
zsiGqrIM116k^U(*CvbXakv>y9!`zJf7_HH>MD!Nv!{$5`trjfi8a)p}HLNHvSERFT
zm0`B6BFugvk<Na#Je~bA$IbPnVRiv|MX=092s@5tj@zPt8@_+Go*Bx`%BlP-D6DY3
zOj=kE@Q*H)$CY?2K2iVKOr|^`ybLzxT`)NBT4(L5P`5zH<(3Ay91oVu^}JlJ^A{_-
zL7EQ-x+NSupQzzO1@DuTsF_sM^hiBrBN=ET7*4A3I)3^zOw&6{^P(e<!}c;j{YiLh
zm&2lEiVn>i^XbE(_?me7Ght5-S)tGVp;ov@qnSXhW77>Dib@QQu=sJHc7KrhxS18w
zBveP-SX@oCM>N*Y5@Z&Adar!kX_^K$GsnHFSrPJ7@QM@(F!~Hu)L2S%I+~Rl%+?yt
z4kX3-<$~-$ZO8CbSvm#Zt|4cgyDv&s6tFdo@&4+zRD26B;L0?o^Hsd+C9FM5=ccnS
z%zY`i<89s3KfrTm81ba4qqbM}+?VRueT0MNyNJuF{<+J-9!<eqI5ll&i;t*l5|`#?
zmU2F8@2*Z9FD~xY`<|Xi%9Yc4E8dT(M0y^>0qR>@C>jeh*#iW3oN+<-qO*?H26xPU
zmt4m#!1WEe#&Epzg6`R8r8@2|;k>rKdUw$ED+sq5{H={w_`$`wnOt)Hbvny~+sbcB
z_ktLep-Os8^OeG%TSR}B@YOX=fElJ6=swq1U0B;`oEfVFju5Eb#?W|Q{qPm_Vx68&
zYsQ@BnP3GM{*dYSW7L%9#QxyWg1I|*w$qxF$Bqhm<vlpFjyPi+8ShKQ_tsZG6?AIt
z(L>NH)c&cCBRq9FkC@Pot<Mvwj$@d&HOV8sE8Mn)IAaJZQ?XXi`=;;bebd*&T)F~#
zjxA=L^wgd{rMcm1A32@fPJ9kq#^WCfbBlcZ*T?jp>7Z+mY=NpjfEDW8NwyZ=KBMGR
zmqE$LG<d3ywe2Z<1WIICY}T{)huJ-(elneXLUd*yPG=8}+mld!$-7<plk^Ph*aK}Z
zl2X&xwfR=j@SF?^P$6kF#PwntOM}Kq&~=(r)g6cN90|HAgk%e}%|MWO66Lwg9JdN^
z`=%;7?Pc{`I$5RH=@JUC!$4(abEiOihlDq#dp=dpa1DF7!o%Z%JQ&CuVtd}r`@}uh
zCL3f#5XU57Ykl>-LFWeRq`d2-B9(N^m*^O7gW!@;0y(I?F38OP1b0O?sWVeN5l4Rr
zJ{e|@(nc?1D^#1g@q^UtJN=0VFYKQFF*D>H4Z5(R7)y;k!0CM9n>dT|Y)HKts4%9a
zDhWZH*Eg?CR#<V2rwcMSe%x|<V}XY$@Zm827zNH!lue0yF6^G^3vAm;VTIEuNgJpd
zid7-$_)(d=&?v?7W%K~8lXK@Ovd1uzAos0de2CoHit=)CVxYFW<n5TyV9eMypbU$X
zo&0JFYt_Y8l**{j_N$93dTTTDE8e_-V-#>e*6EoW&r`3u0dsso54QcXSW10%z<OTP
z*A&h(3zakzG`ltG?xSdWJ&cy(`md}Ethjl#cn+~B<$ehu#dhYSNF<Ac>ybzZ#KBRi
zV!<MY4WiHhj=uu?ztH5fmSOqJNW#HVV&jmP^({K%F?!|)qVZY=EiV;z|A7t*uGum*
zyk?*z=-m`v^U_pKAzNTFLFPi~!*{?aqz^}4FH>G9^-a!YWl0#`nyhlm%Ihz<DUrZJ
zUUvAJ2c0>}otx*S%$JzEvhT@#7AC?Y$zxN(pnfD>ea~IQz?d`r0v}8@%}71ONSzq9
z+e%hYM(oDT#JlkqWVp2E;@r8)6VA2w$2^!g5}nJO&_a>GF!Ec@>#Rv$Tb<Wwc&XPC
zi-TA4)-ru$Vh*?b(pDLKj$3}|6!W7~TJ{G^XDyk6h_dhxtS4M_>k?(4J!hXF*Ya3T
zIlKWG@0W6GK~@54WBrqH4yS{Z9!mT<&7f`8C|hqCp`#J>U8QA0d@NY;3v|lq?6I)_
z7#Ay!@oMCpgSBW#U_3$U`P45)m}&ei<6dC;EW`qIW8}?i^yY4mTaVfnN@Qh&H57Z+
z&?4+UCU*tyy6iCPdaC38>Gfwl$86o;#c!_?#eRxhn~2DZ->+=v>+9H~UH|c0;_(>G
zbl$DNRIV<`EM!xuymNIQq*OM*La3nDw=g%m(rzuO+Kt+0HWOO-4=Q5om*!DJ^pV@f
z#xCh8;f)n{D<*qS_&&cwf^Qbz&F7&%+-Q7ZzxJx5AaC|I2>SPTn{cP?nMO3Z77t|@
zQ0qpVcVc;sx3T2?8c89h)Tft3nq?v>%{+&>%eaHrh}}}H=~K<4uUxtcIzMR2p&OG3
z1H0W+7(s4MW%C@hYS}i!kYL3;Y6f@XR!~Gl?jK08uN6PIxso90@b7^`@(VIA9NQjr
zJx5m-77*Nv$fN3wuXj#3(sS@e5)UY=Rnf$yvriS*Q=Y`sg3OHT*KL~@DZVV|cstoK
zCwCRjKQ2F3MU9SF(0LCVZ05-`8pj$BbMxwG_!*rYyrOsSUwH|~OL7>7K&7#89j_|`
zTKg5SiLWLl{8FB{a<mNWME2gr3&3CB1$y1WX=UK8gM7X6kYevpY<;ynW-*_{x${xJ
zD7RL*mjGG@d3t)+Be*ChcxCfqm7+pwgPl(=4qAq=(cJYUd0v0FVm~DUH9mJezc|?W
z6ke1&li;04l*NO5y+SBut7zgYxuQO;sEejyG1`(>iQ5$YQAOwB<R)w_zr<(fuLyqH
zCu!N?C<}HDDsA1gaw6<gvVJAoG)^gXP!jAWckWcotEWvTn_Wu%q@wRubY4GhdWKS-
z5v?x}PFzb<Bq!LpS&0`-n@v8E(w#_M%Vs6nM(`$Og0W`H-3N$x{jl?Hd1!)Q!k>ak
zBrwhA(epoMRXf1^pz}G+A7oUhs5<C;i`tOIK8o<I1vcywmHv)Is+>>cD&l+kEt-wH
z1rteRBTEvI_!dcM&teit)6XF6d{$r2(LB4=JV!O!j<}HldQ6N;tTSD$dbDi2RI#@$
z4uhqe#pTu=VL$P1&Ez@#T)^qgRK>%NU0CRA;<oK|*9=!N!;B@cv5X#j8g7(51Xqx}
zi1MPjJXrB-+E${T_~g{DoncntpzWXtrCod%eyry2RY;*szGr6mCegMMWaQS%5nB+<
zw7I`=i_DURb5O@i&X@DF^TKhzDb8=oWWTA*_Wyvz5SO>kUEIPy;-ULu2St|=()uCE
z$>R8h8Abu}pe`zmZE7v|#k0D&;tNlc9Da~y;i^dGz8Q~h_$~$LvS3PN_pnKVqgGv#
zg{xzD9xQ+R;juA%!=8o7<_oeV|I)LdRLq|RJv}3qExDy<L76y_7LRnifJb52RKr>y
z-l@FBvoGFOB2kE3IO-`Pg#}~{lX&KLbFlHz-t~6bK0nTvL{af*?G=K~VIGqa8NyQ4
zzd}RM_MXDMcuMf*21U?mNHzE3asN`WbJGr<jIx8eREAmqj4<~VS^7%;&C!aY8eEoa
z>jh7b%h_knS}MJehZ3?_uF?1jrOSS*!Zs|<T`6~|b617iYHM7cD5soXh@XyJT+5QU
z5f3ecWb~D0%;8$V+Z92j_L3+#{iY14Nt_w*{lT)EdKF(S9A<CoGdEf<E8r%{IGQCU
zr(|zhZzxaT5gL8-JFdAZDKBdANQ;oGKron_sdj1KGfkjHRi%y0Y%VWC(8Lb{O%p%t
z98P<Kk5lO2a5^3wYfRCG!)bzWIOPxyr!PV)B?Z!PS}D3`fXhsUMUvoi)6V92q|o5p
zz31SrbM88hRSH$OCzK5)pPZooFP|oA=WvGP9LP+xEI#YZ-UG)PGwi^DoudoFTF^O=
z9i4-2z@cn1204%fdh@=#q$W6r+29R$#BA^eJz_R^CR`CMftKwO#$WA5vfw!;eMkS=
zH^~OiT6zu#y=rlw8d`7!C_y+zw)&G_8E(OZhm{`I02Q|ZSd81^^T~MF>tUbIcRjEe
zH{ru7J*@Gt0jPYNfW^2OhL32_FcQ>Q7Hsga#zT!cqt(MQ4`UvVc-n_O?Y;QxPvw<Q
zDsQ+LznETqL7Xx*C4W{1Zox_qYdmZK8vVdx+`P|cy@!23#Tf)zoX(#yc+x%RjjWZO
z9zWYPuap=t9rdX<n8^DE8uW13!x5k|D|VVpv=le`+Rq7qlMjJRG6ga_Jv}igYoH4#
zS2I}&B&e4Of~yeWF^{V-;Txex8|4UG`fMv4d3n_0F$vVN&^<UocbS|tRKmkb4{Ly;
zy8);_(XF_4{3u(J>!DHfSKx`-s!1rMDUL`n1_%ktPySWjf4#>of8wq6c<96XJl^2(
zRffB}fWE0)sEtU@c-ZS<pK1xo>w!g*6*nqHYa&v+K_)eI(3NM4pmrXytpavGR;aVz
z(bgvet|PH-gKPXX4C0ZIc7CEj2i2lL?Y_$Jb+%OKh>!6?IOggG(It?NuS5bbRie_v
z8V?&3l}a=Li<MB^=t8#2YWF#!mC97%i7C?=pfX)*BI{K4g;aJ|idXt+a3xYlG>dln
zTm==^YpqbB;l+Cj*fJT;6#!t-2uS2Y<A{gFiCUbv2|r;fJ*@Gt!NVpGTRhAFRkvQC
z)eWtMTcjAOc&N!GE$9?+*dS93r~}*qgFG~0@c73>WM$Nczx3*>ES$a;PBYl}f$g>n
zvB3qx!sbUFh}+v!MKSRvg}7nhb%*UfBvgn7gre05(Wr2YpCO=TR{41=e_b*I)DW<F
zz!iiBrReo35E0yDRq_fo$so~TDz16P-K-YonZuG^NtuJ?bdTARSa)&e0hGSwYPO{M
zWe2PyEC#A5Qpj{9GU&9~n2w;-P&Yt;DdHI+F%sA!t^Tw{S{;gmgD~e<<1TVA!quf@
z!s)bga6IfBGzSiy^`DVr;pRl-?Lb+eyv^`K(G;NGSDef9ignyA{weWe(X{W0ettuY
zk*6y5Tw>2HewybN^M9udx9<5o&n^DM6z+SYADiDuVZJK0LW3guC%h&fs~z4%vf{DY
zU7NK}F3hm3XI!paa<eMXghZ_*GxL*+!7x9$3SfJJ=%-|$DK!j%GCPS3(ttqBPG~?a
zBB)1L%H0)*6JJ++7kdLJ*CmDN%hd|pDpx1HN7VZoR@)u7Xcq%kl{}x97lY0R=?Z&%
zF_iS*Gvj}t&xYXV>7r;eF$<N>e}(h^qWu3Af2+t%U{vIL09TQbeigUQ%Z2OgM4t+*
zGt&ed8_XsFhciaNvBsN3`nn~oqP!eQ^A%r?ocALmST;wMJytiXh^lwK>f);x{7h-0
zBfQ0>>dm4ukG_OEH|t-<@IQdR5&bJLh<+B}h@K~+nMi%EwNcqQ2L=t!(di$z@iZ#l
ziS7SgWL`>TR~Ie6#nC8gMC11<n3oJBib{r)Br2JX(e8vi97VCOq9_P55%Mn18ZRs&
z(GEMGqCiPL5;2izC2}4w(efVTi`2Q4q6^ovgP|?%!-KZC@+kf<{15yucm6r&|KE?{
zzXN|+5go>0r|?l1{#6%#lEQyX_?`u1*62L<Dzn>sq7Skc+w%tm&U1k~6__DVQ}Qpf
zB7);_hFkRK9uX&rz5r6V_sGQg!wK@VttPNzMHZWO?CpNPU^LLmS}<xntn@J9VVQ?9
z4@byF@rOMe^f2$?dY~<a#MhtPVsGYVV2c|`&Ji=x`m2RTEKod2hpQQ&IE%3+t<&a@
z35-ErqB6+61%t3;pw`B`K^7f@P7>5wIw~5dy<4nS+G3T)e;9u)QbPtYaWKeR8NlMA
zAQk;a^G6;qh)Iq?w|Gg;VEzz)-MC3+qsk}$Zu!HpLyu=XUSqgl+^Rfci<>yO78hIG
zDm>DpJQ5zUgRz)LY;_u?m6gU;x)G1mD2<j7ztT}zkJutM<dF(R8}x|rMeDoq1<MJ9
zTDZ#Szvzx9izXRvwQ@q6f;SQ@D<};CSw#&PWZnT)#*jhSu0guMASWpX*Ll0rbzW44
z_Zdx`j{ERZkFWK(hNPOj51i3e0?;2O9^#zkK6I6jt$}PE<naoRcYD0j;~9@vdA!}@
zH6CyAc<AwFk2ip8KsOq6QeDvbIt1x@(0-tXQ9`kqd=&=0XvZ+A0+%dQV-Tip&{<q*
zcs3gDWJ@hws%-SHTbzkXn~*DRJ$^0y_n%WzaV`A|8GPnu+;W(R7ZA~#sC?X<hoVCm
zxs2z!fLvvW)1)HlRx6rrZ7|%E#*7uI>Q<31j#O#TSB>shRnyr!+=Tge&6hCy?|lN2
zxfD733!62d)~MNE^!m_t7wYB?T;8D>AKGo`Qp7?hzK7YROdRA#=g+)h33Bcxxz{>3
zv&NkY5uZuJiM8`b4Rd8pnEf1zp-a)2e3C<l#ffzGO6+hxSC-CRTAI$Dq^taIqVSLz
z&RIMO8Fc<Ys*Ro*|L`2u%FX)IL$ve#)W&`f5C5C+k2cHW>qPYW#?OTvFEZa|)bR@Q
z&t79XN53S7`fRRNz}&3w6TkvnM{eodj2u8OUA}f3>+klid>GVmd_NF&+?}{`Gghbu
zCsZnLu=f!M<EV@)VK`FRojOhI{v2&k&m$_q_4{}WY0|G=FS}o**{@zN4j$&AkpAe!
zo2jF$rAbd%D}p)GE+dRTvY2+5YNFxtTn|aW`j;dc&NR$QD?7|dj5qNa7tN2XOq?p^
zqWmjMak$5>=Wb7B2cxGq+5V{$(Tow<nhf^`*YjwBqN5URmps!T+K|~Afp#D7`-GyO
z-g@ccRIqf5es(+P8d3@(eV9n8YwqXKx^h}w`sV{qfVt&9Mc}Npsaz-6sG_j~k?8S9
z80Ci9dEvGTMH|EVYh}30g`~4rF<QRHR^UP-9BK-GFW!)wb}v1)=VTmD;n2&X8`TVj
zZz~j<*WKi3UIU!F-BC~~3S`iool1sfDy&#&vA{5UNkWZ%+q2f#7giL`wG6M-iTP{L
z&7*63qsx@J&ZBD;Xuq#_p+)18wJYkjUx|#mWJ8+r%#!6tkeABcG^|1iXqHisNbuve
zzIs2$&&0u6M7BX@ABEM!W0Z*{(LmTg%99HbuS5NuM4UjUhWMpJ4y7O0b2!DoLD@kl
zaL@(vrr9ImoF{`7w;><b_bhykE%MmZ;9I=;N}C-?bv$!~F1(Jjo|b%_4ncYCN~f(;
zID*UG-|^hZSXayrI<Wh}jw4_AA+V^=N5|S6=1O>+VmH4+o8Nx{Lo=VC80V5e?p!tF
zu7+_})3~d}yXx{8MX$^EI?(69dIv1~y8NKSh8-9YDDgpMsSZ00!eZnv$P&}A*7v01
zLzm=A`on`yEbe9IX!g|b{1NfSwzj>Tu6{n~?1E#Y<4=ck9!up)FiTP}?@-;-_G;nJ
zjqBKM7u9{3Ee*HT*Z|c}u;KOtG)#aqKx^CpMLpaU{qeG*opztP<EFdC9*&oNbJH$9
z$Z)+-;%jc&Qn^i(%}x8X+#kSyg5A9F_tGlpK+b+J2o$50YlCfP<Z?;_xz;3yDOfJ9
zn|T)^gm)p|c^4W0xs+Y6b08z)sLyUqWgkiLGvsje0Sc03-vvt{8FPmlXKNo<CoDJZ
zgDifj?8$BpULl<qyrz*X%+X+u+^m15{eD^w65Y3z5E<D0o#N^@3fFjT5DEwi3VSXs
zN8Rt*+uV3M@2$0EKG=B`3rc6NMrR;?7v|C&f#()tu#_FqFC?E&K5+h1>=%+TC?);p
z%EO-b>q(R1)EN!B-y>S8qaZg+4(4~K9+nMre^O44n@F&bFSFgveG_Aj(bSwgrED%u
z^$$;t{)}Br*z-wZHJ0j)&Hm9Tme=l+l<Dii@^>jOMaWhe=%`|+-iT)!Qzm|lQz!3x
zqgQVfkA<~dOo;pG&a}m8O}04Ja=`%kh=WiPtOY9HK|`cEj+T6F=G?Kub5=b@Y$_WR
z9+;-=UkN*&KN99HV{*esA|`h}yVWN5q1j(MdG1REwm&e2;ZX+vs680@%2dp+7Vr>R
z$8#7=j+TF|1S_!5rpSihoTj?ekk_K3KO<qPW7`onjy_*rQw%$v1$!3OF?@2+`G;i9
zLenueI}9!;U;tR;A001cprh>|%(3;|VQ%`K`s|2uCfhP)+yC6Obo@2V<F_j`1b51U
zecz!@)vvVD&C_33FLVsV>#Mia2lZQ0b41)!vbc{(VaJWhkT%Y-BdyOquV*mp&*JH}
zVZDRd#n06LfR~&`<H2{aeBf757ZXjQmZY27#O8~0C0GEV(pr#xS<OHL2Sn8=Td;Cd
zVpI3R<oW!3d#dB9k&AN+tEXO^n_nH~E@=vL7t)bv%oupG1&}Gh*S9kVub)-U&@0TO
zL3G|2e~MpWPtU{tCg|M5N}0|M!0PW#$44b#i->fNQ{vPZHfBdzUoTNZ<`-z=o4^_N
zd>}LbWX^COXK6lrGfkPQ-WqhtUb28O7xL1FYwe8p3Bu1kK0HIXW%SQvq<Ve*%=A2J
z8gx{t*m$Jhw>BZKdMNl-5<lI;*%}^vI30i46#ZwCko9naL-jghW^Z;U%J;P~=8jDb
zZk|O^f$TdoH%>VUwM4+h31r8j&82dVnX_yxGk=P?%Hb_}PKvj1u~~K52^W@qNan`m
z`LWisJe~WYNeW~mGFNv}oS*g$zV-N<P&&&|ianhUyx%}y8`-D%O{6&pW32H=Ph036
z=ymZMqcQIoDLO{I!w5};9b1pIJybY{Y&f(Xe{D(Y&GmKteS9zKU(ffA{rUY2fgAe=
z1zP)u1+MKM5lHsydr?2{eq5L2g{<q+CztTO@MMfRuDkYRGh)!0{wur3Z7hP8S7nN^
z3j>A;JBeK^<7Q;hI-uAXzclgr5}%ewEjE@QJD&tQB&&*Dhxyt$#up|nMCX03ajTMh
zEc!i)a6j=e6e0c&p}*#<Rn{A?6o`*m&6oJJJZte~t+Epvkd~J${tH6)u;#X^9hTLp
z9hNPk9fD<QhG1EmRtT0Yq!EH;v_Y`!+I}s+WvS{-LD!wMXni%-%A8IQqIW6$6)Pgf
z&#Y3{<wb<PUcscso6`h03}U~66odD!O)$E2_TdxyBUE=Zs5crR;`%0NxV{+@u5W>Y
z>#NiWjr0NLFu~3NKEr&5_+W^F!3s7OLPnue87)&r%aqYFWwcBgEmKCzl+m(AY7%7_
z9V2aB#cG_QYFw!WAK9s02g%KoqzZn-bzG_lvyBP;sOw~@q9uCt>qV&|!=RRiPK&i^
z==z+)t?2l;MIMJ+_i;E;0Ed$fa5#Yhhu^H-bhm_x6Qq<Gt@~mzx2ms`6+>=X2Ofpf
zN6r0Rxu?l}%_F5e+*Yo4bFX;-*?$cL>786edlsv`(Kmkq)Oz0|4_gJ>OSLTK3QBJ4
z@RTsOXxho)H4jZ>b#H0Q7drE8S15~*xhz`KE;*~Lt+u7C%>(7|%AxYqoB>$>j=zSe
zzjXwDL<C-qo)E$>mb=lpuarCO-21p+P^iH@F<ju!n<r!92bb4=u6E()F8>VsN;a|E
z_(g#fzbJq_^r1Gg@41LK$P!xnQ@Pe81{Pib7zzIRCe;|}e))e|)!n8p?756pVy?i&
z*}C0G4Zl5u-Ng(}rGKOp=(dbb$N-)0##Xjo+mIHFVRzhJ@p=3GUHkRjYaX6xRc+2@
zk(WAXhVmrsZ=}Rf2OY7@FM_%jI%t2u%?WkThy<EcZc)i8Cv;GdxrY?WYLilu)!!7e
zLL&~n%1WID?0G|H{kIh~{`1WB@OT1S+UfRI<#wUVt@Y1bPM)aOl;pP))lyCJ^OU^A
zC$IP&gsjd%h$$L!)7GO<;#DIz>^D%-8|CkfZ1*HrqYUvb_t2mG*4c5GTxec>s5`}x
zAG#jzhmDYP&EdFHE@dZ8nkf>>aM=AGe#_4q)~@(OND9*2_DNzG*W_-n$6Lk?xSQxx
zyP?xibkwMq(I-e1q(8YwozumP6=T|VB7mZVPf|H4$%s#)VL$H%^#J>QgKVAlM-@2r
z3lj;0d$DolRS*abGGF}fl!-PmuS`*0XfNTU+19({Q@q{pF81h=KHfA{T%Asn15(^_
zp$z_2zUEG6?R47Xy6oH~W7)?$p7ob}{`S2}vfQb=dC!*Djb#|y9Nf$n%L(`&v0NRQ
z>D+V%f!dWhN+~HGe@^FSrYub={#ZIU`w~pOF=>S#q_bml4@~fg-r$aRb<g~;Y{lc-
z!s^$8u02ef`uIVM$M~`Exo;U>dwd2jXx?GMtNIr+nkbO`i2tGS4BK9of?jdM_(S}Y
zg70*Edwu-g`uIKUj*kRg7b#0WY9!esTvxHrY5lP5=MGQ4;zE76?=(+Va~LjHA8mU@
z8G35MvHRN|P{T?OA6>u}-YyA(aEM~PUV}Txd?Bg!-{4;uy3^G3uGg2M#0azdBzd84
zpuLqKL=`)%j_LV|_BCj6bjhPJSn(AF=0b*BI$2Sh?XT_FJr#Rp^-q}l!c_VQX`jM#
z^FOWkxG7O;{<+&xvF!P9eU^tVnRLp7U2JI36{xqj<C{Z%>>$qVq?hZfUko~rGVS1l
zOP|)uvu$(G#!w+l3rDBi@Cj`wDy9Y<EHFsKQt@>?i?hJ3Y@}9VTvUu9PCfSkF(^Ig
zx&~~|t=fDAw{MbcZKn`EGyO{3T(*2<_8Xjq%gIbXk`vs{-M$9^hr_8n2q?0BipP{<
zhoyKg)E3ULUiG==rk|uAIFNmI@qW|PqcR$Qo4@Te^_Z|v6|pfr6k8HFv}9^J{-Be;
z^KEyj$yG|{uf%NC{zkrQT`mE!W0@F_Wa`bg>t>h%cl=>J85<S;n#BC{Kz3vzuDADw
z^Vt2X4;y_`ikQ*YP{bU4NX3d;^5JxR5TT0w!HDR)l+o<E(l>SI93vI~;P!p#IK>Y5
z{pN3@j}9ulh)g#5wb&Ev(d*t}1Qx6J4kK_J)6KIIkCnVbuh;6HPhPCs4?AnajO4!f
zD_jk9{9{t8lg&02d-uX;j1c253a|2w;L0Dzto(7z%J<lD<!`cbk0Nwppr6j(|AzK2
z=8ryLlcw~G1qs^XrQ&`VyCTNyWtQ@52eXAcRvb@d*=HV-kNt!OcR1S;LVoS0_Wwl8
zv8l_M6q5t?q8k$x!HT~w1EaglS6b2RJ~q$C(zz=zTSc9b&eg<IxeJ)(oS2akH&;A3
z)mARNIr|{D3^pldBb!|v^W+u4_FL|aa>scSma}P#&r`-Qew^uC>P$V-dXV25;3)%q
z;1O=A{5nsZJ83F~i3rZM7RpXe8~7?7@izzR^lMS%>Z0wQ4@%3_!u%=@Ja|Zbe1Dor
z?fCRwZgZxpcLtq(C{5WbFMYT^xcyZPg$$WroSXe2-LGiBBzN|Q!rW{$P^{$y)MuaK
zN#2#l2QSUFeu%y5oz@M!%Fhi6m1ev>xSjEjW=UG8;O4&|rF8ZcX4G3`qdh3Tf-VVP
zX`Gd{2exucP*itb2B#>zuN3K^7iE?+_nDmS4VRDS7Ig8D$3zA}*A9{u4op{HjE1@S
zryDn-=O|3=Q=?z;X$iXi#Zp0%m5si?kes2exyQxldKN6pN*O0E=z4)Dh3}EKRAEi2
z>}xPzF7^FHyjXd=-!8B<!rgAUHd#4&INN$WifMQ07<cA2r*lhaKkh~bpX3KmF2de@
zFf;!oyNfqe+g*1mJNWsfLNCe9|Cqa%xH+-8wtM!eTqi(lcVF9&Fp5{3RgEth*+DH<
zsx8)Jg&!9wQ^#o3gyKubiv98<)lpTM%a75lbBo&q;a+|?JegeScp&#M<nl8CJ2La%
z34NOqkJWZBJQdxDsNFP~+J`1n`yX$hmL*Wf-jFK0_9p2zYD9df!4P!*G1uq{aW)Dz
z@xt8MRj$FmKTdaEtLS)xE0g0IyoTx>&U}+aUaj38*ZA|~51v5&)G-(D4df%F<)@(N
zY1?pIOWW9x)SwDjgQI1$S`;3$m;a>sJg&9d)fr3|k;|w#N$E6dQrosM-jPk11k$N#
z?vLL`%AkGy&ZZ0fEz!-1hc4`%J#WI33x(e?&qed%sP{fT1vMv6lk-ajEPIR#PvvpV
z+Vws)>qELc2bW^#EO#v!+}^KC=emH|7~Fhzi6>8jK(~g@HXel*-*puF^~w*?&A#t7
z5@w#N5iZI$c?gBI-HT4;@%n``ppLOC3h!oPT4YbfL2HdLF%~BAgn6&S69N%MCr>82
zFo|frxneifCe!Oq2|c}@&diNWWotSO&<S$cW_n*o?r9U`ZZUFQfBU{Vu30;866g7h
zQE1KTxTFaq9f#@g?;3HQk-I+Ew6*RY*U0%n$Tehje6hc61aIQ5FpAIPI{f^J4##*9
zc4i~dljo$!!X?lZa;J_)8z<>7JpdP=0>RBco~A}))uhq<SS)t(V(#BiASKT~zgTc_
z_(9G2Cr~qK_#sue;wO0M3A4Lvwc5)a8Zh@S%boqPG&iFgHKzvDe;vpEF!<KbIF?Ao
z57Y#=lA|6s;Zotv|Ac2v=h!ca6$`rl4otrnPjF)@xA5(DyJW3$pweL&KdRRQaJF-|
zkbHQ=z^!HIV)+4+tzq>e0B$`$u0rZM*%P%wS#9F5<sWA8_y|P=cc65;j>~MV{MO4I
zpJj?(hoZ7|h>I}nwVr&4Uj}%jCi8rpJGr#87JA8NwcR&<Ww*Yb*4MN8dR|`zeeKcL
zOZs|6UwienUth23>!7|4>FafU9o5&EzMj(8Gx~Z?Uy;6E(ASIldRbp1`r4<jSM_y3
zU!(dutgj>bI#$~~|0`j~ma?$=g`jJprgpU#R_Hn>oly08j1JHEioEW`jxBsmapI#|
zMc`qRS3LC7Zn4|sRyNNGiYwb>U$?5Is_)_VYhX!()`3oHp5P7)lu10{kE9txKN)H}
zAlb~#?mR<>LBKNtVV2*-;sO1Ob`}|Lygs<-ux@@JnN%@19q&uW`K39APiFqx80P(n
z{vuy03Pj!05rKGmAMpest?=)>(WSfgCsvsGGG1NXyDwI^$aU;8zIjg6mP5;8ZHo<j
zJYSgu-(a!w6)YoLU%^GmDyqhhX8CE2DB)^{2uT>VyT>ZVX@6$^slH;I!h0c)I;k$-
zbe(7QXNwB|NAID=?-*nL$;y{)r%e<${|*N?ufcOc=K-4^Hot=|sXLTKvzv6eh3|?>
zm@2Lvs56*;4+XcHc^cX%6;OSYdB@Lv@XJW4>cg6Soe~$j#;9;jV$@IXPmptP#{$ha
zG~PD(*n?fiygP304JPC6cZM|cCWat#)Dq@j-4f<s-4f<s-4f<s-4f<s-4f<s-4f<s
z-4f<s-4f<s-4f<s-4f<s-4f<s-4f<s-4f<s-4f<s-4f<s-4f<s-4f<s-4f<s-4f<s
z-4f<s-4f<s-4f<s-4f<s-4f<s-4f<s-4f<s-4f<s-4f<s-4f<s-4f<s$r7#`GXD<0
z`TXMoi9`m^NY%QSX)wVGON#POI?niK+@@ik$MelchyvdfXHyKvH#gzApmWDvw2+f+
zgRVQQ1plL+tgP_M7mAC4BmsKu;S-pqb(%&*k!RL){yOOT6x{^Rw7i3^FOu!=%P@=V
zGL_wt&fYiCbtZq-bboU<pQrge%jbDM1wMQDyu{}fK70A>=kpq$gM1G0d7aNuK4W~I
z;`0oj=lDc?Uf}a0pO^WJ@Y%=bRXzv!jPg0m=Lnx;e3%7W5~+?&JYIsba1w`HXB;x<
z`a1iF2`RA18P!Q^2$}1ox`{gZ^|aQ@q$U^58*_{7WY5|27#T>NhuiLdE9Pr5b5UIG
zy2NO4EYsdeRPiPgEJ{`u<sTCz^0k?d!4EhdMsTP6H+asp@;UfuyZ^ZH$FIpb(R}q@
zv0gjfXM>iRFSb<2PR%$k;t!08hcVp^1Z~kBW8%O|q7lDMr1N9;Pm(pYGO&I_&U>}j
z<4A&Z8HT6{4Baz7;6}c9Z{a+Oo%_;+Oz~#z`GGfU&kxX%wC4+|Pm#?Qd+SYh9zT(O
z#;qu>X6^sAnu&wIS@qg~TlET`AE9O_6<#ZBmv&{W@JT78UMlEhS}Vjb4H7N=yh9=(
ze*yAe(WU2D>oh`gJOW30{~k&`y76zL1F#={P&j5~%e|fFoo4^325Tl@{N0UJrzuwP
z-7hKOW5z{RI}=Wb9TI&H{)tRP4g}q^$Ub$=*5Yd@scT-=Ybcw31B=Mq%w9yVp?m`<
z&HE}*C}o2|9)JZph0VTyA{Ske(GQ?;iSJg*aBrYYOfU2WZ~h+2KI2Q_aqpq<)2YSx
zP^7fC_fYofJrwR=xuLOi?!~#Ye?kxv&~CkW5=FCH^@!H=4GufaVXrzY1~%Ry39_`q
zjq#I;n5x8q#`sAE9b$fT$OvWVEtIc9r+VbSVGm$Eq6pR_%0}Zr`AWgeLc827h{wC-
zR4Fjf9d%n0x@XWFbk8sj*{Am!Tw_}fd#0CCCFzJFshBcAwe5<3Qg0Aodt3iTi2ZWz
zoPk=7Hx-+CoDNbW;YP--{+6Y)`@*xB;Ar<10iG$hMglswu>YkJy>g|W?pkN!1&xVf
z9JN{#&!N1^y>Ga}D|OlZ9ZzGCGf<j7i{xI@mE9{%dOG*1Dd?Ua33H!{6BlDctj(>Y
z{;A-capuGRVfzVRorq}=nGAW5C$0P|nyRYg6y7eO1>5z68De0Ijt+p0X*`=!2RS2h
z(=ByyC=Cq~g=HZ#@g&PZjs^B{jcVF494V=HNr^czYX2}d{S;A_ClRZ%8+N(r7<{GK
z-P39{vhddYmAWx3h{Va`SGH=a(5>@2i?eY&bUqTSUT4wuSWznVTS6iszp5JCzS=GW
z>BPfsHSs#jfUlus+acUCdeVC)!}3QS%LApgjiXV&BCqFJ&M;1A&$En*wT5;F6~Z(G
z>_8$8-63Cy5n}RuHR_8B9Si4i*if7>LFYC0CeMashUC=6P2G?FqoyuWE1OURkM{DV
z(gaSq;Y0Fi;H2V9#Fq8)jmai9bUVQcL-Zl~ORP_;5KzbUAtEV5xK+t$qe{h7hlZ1b
zj)w_E4~L#bO-pWz$*6D(U#Z+hJbAZUs|k7*gkp)REhgkl^Z+g6HT*Twru(<rYNX5d
z%KaK?vb`c5x%}lc+Vc1w`vGcxK)R#<NZZeKE>e`*T%9^kF#@KatRApYt8<g7YF4E2
zVG(*}hHv!@mDM2v^7E*@Fte%cg@VqmQ@P7dR_wL7$J>MWdZWWq)E+txJA54W`f=Ei
z<1p29A{W(l0#j|rG1Qg($acr3(Y9}k{fLtBmTu)op6&O#*HhIurO!Q>v#D(lozH_g
zr<2+q%=suIviM$^vJ=nH(+~`D`iys2+?zSYdH7Z@=JaxOeO=8nOjq74Vb1=J=W>YL
z@G41u&)0IuS~f-h58uk6%u&kx-7n<?*EPN<TcQ8@J2?<~1S6o|`ASZ3T@zMA|3`1+
z1lN_Dy%8o(|Cb)fnG$q;!o)6S2t&vi>}_|%V!R<Tz<@v>`~ZOVzpl<1oBg);e`wn`
zdoc$)HHdwSL~rt94i;{2ktW#r#usx+PJZhYPfheFbpD<X=5$l>bhTdOIqt!n)&wlp
z9?BUl&L1-$Jn_Ap7C5MJV!fvKFRPifnC)XIY5Z6}nH}h%b*8f8#^t!(i#d)TcYymB
z#LR#4eE|1f&Jn$rgNm^A<S;v*M<Ur@NIBSgs(USGX3>ksa^!{PaolSvGyOl2AKY6x
z=3V$_aL*69XzZ4S*7J&-(ih(eRnR>CUXC=Boi8aTM?zsY?q0j*PrOuaSv)$wfl%3O
zd`>BQPSQHphm%Khj<`p2zKK0n&q*q&7k{43`B~2=OKoB29lSuV=6teDoG(u(>`t%d
zTuhRQ^+W6T5a(C6q+YzOWR!@uj`jhIxGl1UzgNQF_Z1yC1G<jMHg7;@w6@XWv`)OU
z^JHEJ64Ao+qofsu*Pc?GQC>3Vmp?6ze2N%jDd>DgQF7DHcu)lLY`O?7zNfQ80)5BR
z*p-~!g+HRqxixy|uem;V1y341%JMrPk){SyV&oK4wcQi;zc7Wf&vY)iMQQ4@!)AGY
zZpQ1|8aM^zs1%8d_iN|wD4a(mqgqgBrks01YAS9<8={)u8BfKx^Rw*w4QN-@v!9UN
z!IN(6>ZLnGE5gVk=zNmzG%I@xuebW~gQ_;*na<OBg?4(?h281X<M>R!NUkXt;`)SK
zQ!mM-KOQ4s=C?2GUU+&5k!KRH@bqcKs8x)U6xY(9j5eUOKK^{FWAm}Le>lL^8BUsC
zZ+&m=EtHdf$I;qbrk^awF&rx99b*^XGX0~F@D3d@BXLyFT^gmdW(~i<9bkSvuKi!^
z>~l`lx-a{j`<#7F;bHh9OKP1Cg?ALoY+Jr(UApJ$WW~j~l7FJ+){A*Kr|v>)N9G?s
z8u{PseF=Qj)wTZwG7ykBgJv{U)Tu@d6)_M5Vn7p_z;9%Nv7$hYEJ7kEgh(a|iV#eq
zjKkQp#jY;YYHMF<#rLd5yCnn&VKbm8xDmD083PhTHW%jq{hoV&zgY+_eSNRb|EKxD
z+;e|-Klj{o&OLYIr@p-SNFTk@cOP+TW!`<XLc!3BK=}$zxcf*=%dRnS)`3^BNVjo>
z#9x2p3rxdC(ZuVI+NJCzHHfNS7K9(q23pr2A;FRr%m->6=PmGxQb8?Gc`5Zt85`gY
zFY*y_*!vFJ2;ZhXVR~l3gX@pZVkpO42S%+v=!+&P+Ln8exL=KXN!@U<sT9hi0x&sN
zz;fC6h9I>U4#4BGJr2N1*)az|ZK4AJOD0koI5`6+X-}%%jpK8XpL4H5$i93F;0F~v
zlW;)6uM@r-ut~4nzj+;Za9($<v1AX2y==218oYrW+$*tk22g?XdSK?5HpLy+Yr}a2
zJ~{*i+X%>Mz01d<@D|__srkp55M>F!vHSSPV`QVB(r5M*qSVP|9*rqMQDQNWZV0KI
zIE}$X2cd*M|JdGupgOkq9P%NtmS77r_QNw*Xvk6*?8N)wRj4QKoOlGToZNd95g;gW
za?kNHOvdWy9t;L(!LIOp#PZ&{8M{Nb*byyubnk@+8KIfiq~eUiM-+}w_6;81yJZH0
zZbZwA(vS`2CS};hT%-?2RYQ=LVH<OiKD~D|BG2H2>?619(|h9qcySjyH4|GT59M;#
z<_Fkx${M{3*F3#<6)Pd9_lj7WlckxZnk5>6VmI;r=5Gb>McI+RV^sM9@1oj=nc;)X
zn6{I{_-@EC#iNA4@1}CuZl;=8y}6v<FdOp)OJEhjo-&@(c;H4v^@i?gXUPbiPZ)!Q
zFj(WEc==tN_b#s5l)?{qu`4jJ6IE!zTpkXl*y?)FZ|=vGSzA!N5hD*<CX2BnPM_!N
z#4>l>dA?2r;<h%N=i9gF=f%N(w6WkQ-_vC0xJ0@3?{bt67fi}gz8fJuum@Qk_Gzkp
z3zPp(`c?J9AwC}xaK9gRxV7sOKTY1I2u}PoSy!av#7|E|y_aJU*1AfF9#|FL`jlJC
z>WFyTfkK@3TkFOEQ&5ft6<AP-1x>Y}N(-v7pau(CVL_`bsL_JjEU4XrIxWZs>8<MG
zv>>+y<ycUG1(jIPR12!Kpc)Hmu%Hzdw90}SEvU_c+AXNlf^3jAx_%bqwxApfDzG44
z@1{~rwV+B1s<EI33tC}8t1PI|g4!&o-GVwT$Ohr5>t{i33(B#e0t<rHXV%YxDmCO;
zTf@veYa5iaLOH9H)2N&_<+LlOlTPYd<#F=-gcq}ZNB#GJLR@9<#jE8q4D0Ua_>+~d
zP`?fsleiv?gBNwpQe6U4TLMy90#a83Q&~X*Qd88hxdvWObA$Y@kl$7E+bF+n^4l)I
zo%l_~b()Bd`Zc<}IKXx<-$}1KhgC`mdqb&fls<t-HvBiwo3`@2={~$CfZZ0pl)Xm(
z_YoDl@-mT)K5x1aL89GKgP9n2G6XyEt&<_XK#3dvgp(mt!+-TqNK<@@I9alm;w@Qf
z_NGMq9&VU~)V!Q<y@F36yZ~@aU4nfm>3$Pt7$+}Z5qViA@)9S6c)opWoWOkayJU(Y
zFwOJreNe?OBQZ0c6wZx(-o$9&pC~auu2m9qH8MW2#GEk-^@O094Oh(J4@=AAB<3+3
z#@i*8+Z7&-SW03Z)DrWc5+D!bTtiG^?ukjvaCrEQ3_9Cr8Lb6oIeK@r7MQm~V3wnI
zM_U4O0YzfS{^>d`Fi%}iey>m3PG=zG<zKE;wSE?^DKF1p6+~Y4Kz>?Wsxr;;a4Ca`
z%cp)RNFSXYaUw&!GXNn4IkYQW&U|?q^;{Mt2i>!zND-HQQ(Qh3Vl`;I)K7C1h|F9_
znYoDK6*2@fnx*Z6Hdu%adz{?d-f>cSgFO52ZIPF4cnbm>K2Zm}nD8>3L!9R{9mV<g
zh>^0*6p7o6JZv^E#u4}ZR^a|Wq6D6Q-=zX^IDWU${}uE6d%qSXGq-dtGmDgESWdqe
z%4;Eznz8fmt&uXMv*yq5^5i@0b~rKng11Rd%z6Sx)C(h-Re1EHRm)FY7z+8hji+bv
zQKvsJ3Os1Do6<|591AM2pb`t3YC)A2RAWI67PP{GR#{M^1+`gFy9ISxkd2Z`>f*E@
zw*}=`P=N)NSkP1ps<faQ3u>^S6&AG0f*LKT&4Su3sMCUMlvJvI7UZ^|91AM2pb`t3
zYC)A2RAWI67PP{GR#{M^1+`gFy9ISxkd1;x)z5-NAgTVfzKKYZZz7Wf5}_oJNF_j?
z%{9!-v$;VzE0nWJIgQF`Q%<{bI_actRvz4k5P|$`(ee{V8oYszI8DZJ<3~6~AY|ra
zQ3V^LmHg!W4XsjNfpS=1fuz0yNqq&9`U)iV1>|Y1ky{yB)%^{vE0j;&-_WWqX=qjV
zH?+2^Fm-=JtMZ_}O}Li<$H=%J0OH<%j#q6y!W#;y8z`M)^3;oy1xlW-6M6c+(f>uw
zV^XnCR}0lmxWwU!6ARV2<Cefhff4BHCWo?-UqZ63XoLivsif&kgi96tB;gXk$IH_v
zp;^OL-5=W&HS~;mYOKGyZ<C?8S5SvOWS$!9k3C8H8*T+s5blI=6JmCph6BLiP5?!m
z%;GK)Wp(57*?fp>S%o5}4X)IW={+M0U)-F?g)L4XX$au@^h9y7IttfXf~8SImSAbo
z5I}4SnES562mVpgw<%CXaGNo6OrQV&qsdHIC!v<XKgqM*1UgN?rjt0~@{h?I1%jdC
zH~AES`@_Ew7YioXYllXLe<1!yI0%<U#9DD9;k#74aFzJq3g1dUOdD><xA0_m;`Esp
z?j^RSW){`qd}V4Di*(fOIh}$SyB=|gmSFQw>V7oXLF;e{Gtj3q`=&1G8)gZtRv&t5
zKj}uxVHh7wq~qmu`mnRZQNPGRVH^p!U#6<Y&IrG#J=~HTUJsAeEySo`&cZRVo`B;J
zs>E+t87Ho>1m(IG+(7OhW4n8B{~a16kx>6^W{Gpru;u_F;jj=MkTc*~E9ABe<OyRX
z7X4I*QW=<P<_cYrH<$7LKi*tcfL1sj$aVpHwMWs)VMvU*=Ie0S*aLZSjeS>pNPgj6
z+9NIB43E|F6|APCKWr~1Py>K1Uj>(c23juP&~g$cTRsV@ofjqcJ*Y~QaN6>a#%kJ=
zr?6?`X&q4Oa$G*aHEO638~Y^7!CE%8RfJ5uxF0fw#>CFIp@$p~>}3p05gu*C3qtj+
zhU04NkzAftg9MHT+7MWSt4Wa?C~EdMm?<{0BjKRdrLvd~L*53kXk_^u;zG5#dW*tz
zN?leBKH$A0S<rb1s}ES1o6s6LA79HGJ}h!fdILX`@ooqD;=|F7zWt8iQV$=Nm2nbe
z0x}!aNaABAREK9W^>)1Z#1|+gycQYAY%Bw6gD35SwB|vrREZzOfr_^P6o+odh-&u+
zHai}81F?BXr|IJ>xGUkjmC||~t%E^ZkB^kksK#L--5`PMXHIDJguS0l$P(-H5ugH{
zlN#YF`dY5f8=xC=eV*1M>^kPMY@y?47<+Z{JTnrEXk&}p@3gZQu!k|b1sP1_n;O|$
z!f#wd`pE9Yr&6rnW%F?id|r>wA3938{4;Prc7HnRPJ`@W1}{hY0E#ugyrIDTW`UfY
zSPg*>3=Q0dy_o<V_kSP1P|#0;ULJWHYPL1*`wvrewUfl#PkY>GaM%G)tlO)>NVqj?
zCvkxuk&Om$Wd?-h2uFR6WQ$9ri3(YKz)MPq?Wiwch@8~ImWqRpy8A$`kt^_#L+Qz(
ztN?7*h>Ct#=ctLg0fAhnk|+aBYOb<>3Cn$4MML0B25LoqrYQ2bJ`iniJTR2oS9n*-
z#RNh{V7Pjs7AnF4@vv+X!2}LrMoJ(|U6_o(8`wwXm(MNMw#)XD6^s+zz~r2c(V95Z
z2+iwW916A551I!%nGX$HUin#tZ{AQ5tgm%yD^w_TFFqk2Y*SvmvUZpcw+s1`TiN&h
zgb2X-RB2`#Z2Q=`0DleAL@vP(&p(-LTQ!_Ex)eM^7N%lUiNz6b$`-NVU5P^{xS9q#
zWI%4H=2f8)I0ui7C!v15<uaK1LCBDuUPIUM)E1-mGn`7ur?2)itj9M1;@&zik%%2V
zDC-R>(r6XQ;kb{p8q63T#GFvV<shcI;|=>bU?fErBocN6>oL`+$L_Av(ZEO=@@yzh
zY2y+<X$JAhw|QtvUCtYee+T3LkXS+UC9BZ*Em+xlQEKP7Hz+}hI~k#j9jFbPg!@cM
zLfFBY_ll#I8$Z|{g}BII2I=f@RAwEsbKLtOzhYNX$jAI&aMZqq58T(SQ?115qgac;
zuN=;dO&HhP@qphz?wg4DxNjon<Gu;gmpTx;4{_ddAJ1EUW;L_!5RsI!E`I#RrGojG
zZys)}?uZ=1>>`Gzg!osY7`#Oz6*~DdY~qRY9!vB_<GraTV@`-Vy-@upoP{^*gv~!G
zO()7<?cYFspr7!9<r-i;G96W8k8(IcHe4>JwU#TA4vpAIjB{8F!m~bHzbbb)m+z+f
z4xnx~wRZpy?WXb$VA*c!?f@R&P1PO1pLSDo2QYhg4SxN*8^8tEG;xuA%{J`FxfOfD
zZ^faE>Ad#kHQo@jrd0xWF%SncZbiY<Ut{1-1|F5bw;1>y{^Nw@^uIBX7uT%WFM$yT
zet`eDGG_W-28MCABLmwV7L{h=#+yYmGI8V0qS={vUdTCq_X_-acCW&3>F!4S`gWsA
z&KbMg@w;$0wpTa@?8c9Ox06Nu3u-<-(<xQYkgBs8vore{waso2B_1W>{c%`QoR9q#
zFQTI`oK7-_393DQhYi1;9mv92y2FiM-;Nyo&e&0a--SC$@H>0QRQ&pPRQ@X(=uN?X
zVeq^1E73>Y_*>d7mClTAX>?|H>%-=(VAJvwd8woRY<#88r<l76jr;^NA#48<_mdue
z(cE8G3}#kW%&d?|@ud+PeSz(<5rgrYx=(G#O3gOMuP<~R)Kx9H@HkJ^sjL!tRb&7P
zgJA4FZ?6%>Ns#4QZuHrRGm&z>8w1+2t&=%k36^u33~A4{Bi08Q)Ty`?Ng|}4#xd^M
z1_IRyQpSeHpPGtA>r^>(_f+IRjwTG)M}<+V3;D@nRK2YBqf4F3Wxh@5PpvXy0X@_z
zLl)4oRvEK^9&hEKg<X7ZU8{^*e4E``!-W1;8MkWm$+*QcU0-F~s&OOZw*9M(TaMMz
zG#R&PGH%m&Q<Zak>k2xa)>U*$TN~;4THEN%XeB#vE^O_jGl0*ep#@Gj|B~^WhQWs>
z$oNf@@tcP6+l2AkjPd&&bNre#>8M*nvIZZAEWidOr)MKFaF%YA(YrAR@QjTzdN-B;
zp1n~<@5ai1Mf23q#mMc=Cia$I>Mgz08^ab0{Typ~czh3caF}xdHC)Z{<fw~aGHB5&
z30M@;{uoWAR6j7y%|b5Po(a|32~`5a0}?zHOBzjlRXBpp#nBGxLD(T`6=e<v$36F=
z2pnGa1~z^;3i#X+8(zoM2OwCFx2XDnZZZd?qyX+?1JY*>$fMEf-^B~lvr*2V*AzFd
zp9j_uDfmQ5el;v5#OmkzOmux@2Yy7GT!Ph%LRp_K6q~@H?R*s%LmDq1bC3uAlvP7?
zLfhgzm93iz7DR1rBK(n$&W8tc(Vd9IKTKP(^AIMQMkrDOa2hB;I{-%Qgv&oJNgHyU
zRG(JyKu?C!i~S^~5|51nhIs`hff6PqKpjx#y)4EE=`G`^H1&IZF=qJ%aTjBHP-#q#
zV8%cMl7Z9)CX=b(A|cSGga83Lk+rI@6sBI_0Vw6Qi>r=2f^EC@hnO91IES7ZejGOx
z(!wB-z?Q>^tSk)7le@}T2uU;zt~Cw+SP(78STHXrcbuxPKL#3{jL&HXr#w&eS4@~?
zVZw4I9QH$l^vBAqrQ%{sjPw)CVRS<oOUxO$!Rum?*G$YN922N&UYd{E%glmvRM(~O
zv7@PTM+UGAy&jH>c!~oDX<#~+Rp^dMJ<HC+OC)d|b^BqCD-L|XI`%~bZ!q;Exl*O}
zXe#MWrf6t1jU#uAG=i!7VX>Y6AE-c2=-;>{o)u5&Bwi>U@c<|)ipDo;52v<$Qx8kF
zwMTk5md%F5U<7I5^wd?>mV;g9)cePh<$e@lj@$cx2UONKf(F*0fHmA&0`1?}Xr@q=
zz}^pB9wp^E`)nw8WPl$|Y(WCBM6$(+a`?Q4X}?^#caD-WUqu$Leu*-(Smx^}rx^*b
z>oWuYCvH|Nr~iuu0!z?>UHG5<4l07TJhR*2z6^r`Z$liot-6OJ__uu=IoWR^;9a<3
zBnSiEg#T|KA``-28i{qKGqv+W$K>}_0FMz&ej9K*x_1(QiF@F=gnd2Wa3p{k0;m-C
zJ5FefECshBb+iHRk(Y>kc$B^1XbNsh<H!o5$NhbcJlt&F>F))b#T~!LqT(Rh5Bw+N
z)V6%9<+rnVp@XAtD2NtY8jZj@>}3IrBy|X8)(tGe+|_u;H`T`t_}K9s29wuylz`gV
zvK1f}PTgzpu{iK4AGn@xanBQQP17zKk!!$^V<QrCq^N>>N4ld-T<-&m!ZDm1Iz^oh
zmtl!}L~IQ_B%8O>2)yC1^alHNKwQo54^r=j(WH-bq;#0`W8gG(H!+;LPlnMR+$z=0
z8(fIJZuQamzsENx9`JTNg<&saW*;lBhoR?K8dG}q(dp~ilh#pRc&G`dq?KS{5p2Fc
z-y7`H4Ex{w!LTH>u_3IhmGu^mF?=)!_eN%#N*h++)U{aQ<p$$jS*nZO$S=GZ9&($t
z`r}T1`~y7T?2t~c@#-3dalU2^>xh<F+$0JM$JEGVY*|46n0W4@pT4+@q#w;3Nnz9w
zBB{t3B%_v#LnoW(%<i8{u@e@PxpmJQY)Q9ZvVdK;-R2GDbeALx?D-+*(COr%w~zJm
zmfrfM$QxJp4BE@%$aP{Ri#*nsX;ms5u%>j>spjRXR8N4V0Rueo*ZWjdK(bl~T1Gy=
ziQE=OgInaiGnp04?)PqoSH*h|5Rn<V<qQOd?C(8|h}<eHhrUoBo{a(lLWJOO((<et
zd_o?0GntqH{)rFAJ>z;X!}eox?g<tvqoo<#S<YKNBV-`$^@Z+2!?A7x8l$Z7?Bu@s
zZ0gUjRh}`D5;yyZFLW!e&cI1>R1KSuXTY>eCHnO=T)*-ez{v+mIYUWzA{$0KN>iga
zG|Kt5nb+14lpv)?K=-h%^j-L78f>Rd&+E?Oz#gjx$aJTZ24u0avLj$ao|FnLH+#LR
z(tXH=RYF!nFf}tjSDLw_9tcDBPG9$D6mHN%l}5;ui6$HY5<49AJ%EVF<Fe4PMz%4B
zaKoggT<K6;FN2dY7EVEJj(fTRp=KB0wIm354kn(;1wM?xXH+l*jcnFITU^s$gjtSt
zHbRT!iX}lKF*qcP=1Iqx^$LfMNx2*ug2E!8aJ>Qa#^n=4J{AfwNEaSHLL8=MF)#l?
zA~X(h9v}{itBIj;x)O+6I^<aF`%p7e!5nfOfs5zXcjF|v283kf`FX=qKT7uK*2u?b
zvWbLZTu55`9_aA;7_J=m{GCVvnVY1k(Q_aHJ$vKP^UP%?da9GqvxnOW9b?uBdWag$
zzT!yD13a|=PY2@gG#we5cw#@X9i)j4K^NGi;A^kK7mzjy2f$1;U8pNV`Z^r@>@>5~
z^fjVG9YNmbsCNr$wupdX9of4OIFSgK>SW~4quKEgM)V@CwS?@K{thF00A}vE(vsF@
z7}0TZGR-s=V>So_^af*F*cYk<MsP_}KwZ-#Lmbzr@K_!<fc+GP_vf7KrNeJI8k%@#
zSIQb3<%e9tX*66pc^4wo;gY{9xICg*$<w*z@{#>PN;3(W#~7xPosxzf^^*~aN9w@a
z^e_qyIP49L#<f3VabsG>;o=Y<cE~$Y==f2yH}3;nD}c*aj^eU%Bkv7-gY$~x`%S*Q
z&)|Ur&l+FeUU*Qu!ny;F`rTlo9yiwBUlsth+#<mCFR0mC#NuJ{O5FJl;`3ZAVxCOT
z(D(2FTPx@#L+1b)3RT)UKAJ|y6U_vrIQ^fI&p8)<mdQbL5Ug)fE`l?7iFEcF{%i^N
z8xFd}w(t$~YL1wuzGZAc<m{aYE#(93*>4kim+|!CPx~;TX__3e`2;z#+ajMbIvI5<
z5>4Wdi<VBaz~87uNBCXJ(}n&(rIhcqZ<>YNWfp??G>V!fO*cz&lpe$9N&Lx%!kp1k
z=s4vl2EsA$g}zWCc-}|Z4LR>57Z$wbkzs;7M3B^v2Z2SNf*6aF0E$VOq~o}EDJYWr
zJm!-jcn|M3TGnAyL?P-(Lm(^78Yy8!X>nHDgj*5uIF@OSzF!Yjtbk0){{bfEW<Uc1
zWJsI*{ev_SSNohC%Q`bQKx9S|>$^4~SIq9sTrWE=SEtGM@Gi+E5E$!3XyEP+v?ekS
ztrQ*g-oaR(F$ZOF0FTl99j$&t(_c(Da6YTX7;YsXZ!pDdktkt9yF$S)FO5UNr*S9<
zAs~hVGHO|bjnC>t2>5nFo^c4sOU`p)G%XGTBhh-^Kz1bRs4oHr*pF0&BWcopEb8JI
ztHrD4$GErS-d`XFcoa@KIO=vl9ZO(S?`t-NDXXxlXAyyH>UVU(p?<0TkHFPTNM!f`
zTpL9$70STd9hjd&)OfMzXJXbHx*jmJcCa;Xs2{nGT$vKjoqT!g$gy}$3)#{^#j17-
zciKW$H7HtG_aR=Dcab~obkr{tph?)fqrMMn<Z*9w)OQEK<)368#G$qhKN;&_s;JAV
z0k({m^&AX5E5CwXNF+rR;I1Ma&H7{9J4-(j#^m`Z6Ta*NU>3qLT(FC)29xaF2+Q~z
zDZd9IfHxr)f1C>sfMA@y1Csa!e&!<(uA}&w{7hC2N=M2`AF+~K!Xt7OrGYW<<YG3N
zRq6iN=;+6A;eQSnEEDFSv8&Hufe3pA`IT;CrZ2M$C2Q$@odGS(Rx`1!gQc`id{1C*
z&&qc+QZuVhm<Icqg;{+}B}bUmVKA%pnpyFA246nrIB1Wa*Qr^6qkcB#VbHoqr?YpO
zap(;%$AQE|p9_uEq-sE{avYl@m<MTUkeB1$t)^r>9T_B$?8hKk5EJ)PD4IQtl1Q`r
z;1=;F@^|HfDZJRHq)(;(SYq`}MB+nRm9(#6K8lN>;{@yhzJ>(pIu25HLW-8KxYvvl
zc8`*<yC7OEvV9fwii?*>wm#m+)T||}$k#29;38kw3INj~N_-y8gmk|Q)$q8n0IPI>
zE-OpKLGv3RHRb~=$-*{?s?ih*+l(yQk4PJuEJ@S{g{XZ8g)TQCP|@WsGQb`9nRFV+
zB12^1mhj^_pHG){3_qSO=cv!1OGox$MamEsACeA50&6-PZKX~k#EY%;7zRg?!6a<?
z0MUaI9j6=vA!1ZPY95FqMDEu}lA6-1>WmtmvIY~3OvO9{N?-fqfQP{}BbwU}u$Li$
zzD&*SHBDZAL?C(1rVExhL;R+G#*V9zo-D<8VUf7p1SSG-M*~V)Ja-%$%Gf85I0{$%
z4qh!0oy|C&D<<k}d??{Z(Ajo?yw(hQO=#)^G)QRb?*gEHuM2<*z6d~6Odp=Su6-B8
z(r+mAP#F5DMzF#h4pLQ7HAyQM0L#wQOZe-*gktFkuzk#=9oKq7Tk+%KPj(w2STVE#
z66`LARC9<#^mh_b8M#}AXh>ValLsm)*%{+pW7Ve&t{p=&vbN;p@;mvT!$rfHaIG!j
zv*fGN*WN_NU}?gc@UtasQ%Qtx@(?;c?Y)?}fR%#aWU{IWZijqF?n(6aXBFRKe4jEK
zoTL)Q_X+5Y80njO36u~wcpnyrTD6i!gc?-RVq|a~7#FS?qFkoZ71%xKHI8>E9ZI2R
zOePbh=?&37h7oV;sDNRK++^<As<KC@fDMg(?u*Yc-2<w}n<^ujNvVtybdTY&q%RK;
z<R<6%SA34o5%vJAfMMGCr>q*(6Bjr->Unc8_k6&vQFy2kM6IleY)b>;utZL_C{VeL
ziRK4US?iXOXK9-ZOlU9A#Nu;|!Qt~Guy(*|&)TORVFsd+VU=}AJ0_a@1SR||6HTg-
zxX)bUhSsoNx-pDG8IBeV30pD3?_Y$P2VhHunS+X|$R5l*M;wV9#W>}p_!U!W{zr7f
z`;k9N6I>3-ij_ML->pJXa^J;A*upQP8V)-_mggjAd4;7H*0$grQVZyD9Qy|h(A-6K
zTtAuH$}m}T)Ex$G#&rsIEa9$i$C!hZ#8#&uhVBz|2@W5Q`Rt3j<o0hVoWyqt*oaBc
z6A;{GmxLqbtdLewZ^jgzmnf0;68N%KRdJOUdM_pHxaSJQBx4>%7kuv$?Y|H%_MC~-
z^al2LLsV9IVkMM{=O@2NS-maAn}^vMZ&2JPa-1`1t*iRtbyZ*9G2zNeS3Oi%_o}0Q
zB`i~-tNucOCP)2G1%R6RJpi1_v0X}p`!6P!gpLYUxei<cTB%~tk6R2HTtiJ&pi2%p
z2S1*3$eh73!QC9LPIQU0UlucXjo`^(T$9NOCyDN++2Nbn=ofNrt79<lr5b>lsPM}_
zBLb+jw2c{2^h4evBWj_OXw8A>CNFkmtaqGp7&PriCE%o<6Wi@ls^za@h)Uab)ISV4
zLkm<SpUeUKfN2<Y)bZ-+xUTi4w9rywPC@<-M6p}npo?zZLBxs0M7ED9qsRuqrkIIU
zVMb^G-tdxM9+Mz+admALKA3ZnNszjol%wcg^fD|Z`(yoU<b5a#+}{xaBKSaD|Ki28
z4{;kKik1E~1VH~@AprW9ciHlW<w{OL$A%VO3@XMF1I`q)56Pm)HI?LrZa}WkgY$+V
zSu(Eh6VLs>bzzLZ#Bu*q@!4W8=nx3WzIRhNsT&o!bD{bPUmiz!{S!p~p-4IEMb@qK
z%NWm#voXmQ8Qw`rW#RCfMu6N`)y^N)wz)*>K=6WC^=QDc2KhV$SQRAy8}VseUdo`1
zrpH?vu1Ls><SjCLOZbB1ycT|%W`sPK$4!NKy>8@Tn)6{3mBS94XB>Ww>drBfugCB%
z29Vg?-*Z@`rk?yKcEGRCk82@jf>x~>5RlNqg$I=^KP(22E;B1l$SkgjqmIvvP0U3T
zVfdBV{<Uagf<=?_n97I?1U1Mq&~-g+h;HOID}@mw@G?6B7HWr6>l<iroVj&%R$PNI
zORyUJC;}21JmT=@=Two9Q(SAWOU`K_=8}o6oyZPd&9=Q3uP~8m(!Ot6yG#`}VIb{B
zwUipgTWw7$YCkX5uDFrZiq8qHTKKw1Y8jLM-7%_<&ue+tdEK3y*NE4tL5b#cquG5*
zZ_?IdG=%z+sYh{CvWbqmQhYtJ7^VsS6KN$`xv3e&8cW5f^IX+l#xND5I4Qgh!H~j{
zb`BaW4YCS~Ihkuw<e{h!`K*u{IaTUmkWH?_Zz9<&(Zyk`mkx_z{T6&qMDIaCFNfR-
zavIikPVVD#g8W<$?0)2^=f*g^E<?SP#6g@U;#ujbrecKW%~B|RKS{xo*q#`kN27)9
zM40HH;K^ZxfAqPCZVBJ_&EpZ>8N<L#1mk&93kgaCy%HR;J;K{<NzRW;ff8bbN{^%&
z7b(4mO-hy-m_o28lqT7=&^YMslGNYCXR8{}f$xe_NxRNyW!D)!n3NGNv*khf7Thp4
z4ILq74J`2{_CVK^?!P^TeY7fG*~|fvANT^?pQI}oPvmj9--9LS<8cqv!*++wZyb3T
zDNF6FGagL@rHAi=hmV(4>5os^@Ua{o43=`Aqk=Qw_4c}1cw<6u1M6y)UGEL!xPdjP
zPMx<4WE+8!G(HPi;PeHix_yBHJRxZ0gnR0;YFO6Csj31W!sXfP_r*OLep|lp55Fq@
z{o(E6M#3B6szd)g>c@RsJo^VVug<)KM;#Ev7j<S3fI3D9;Bh%w2w<go2M=tLD!Dq6
zh^u#aMls<M0U2hI<+xtU)Fw=^BR!vBU&A$2cVk)2+CP|dpTKYo03ehP-f0LMr8I=I
zLmGNq?r&whhqBs%BSPx@79nVK`kTH>RG#DGC%%TwX{o$l{gS*df!hy}DIc!8r#tY=
z_p8t1{)syJLiWAiC&8r31$nl}sho~*FCFp_zz`thdN+h~ftm1x`}h#T_Y$vVT-+Fh
z%^Qs<ft}91f?w?cYgG`AnSoNcp)bU1PocN+iXA>+y$I!64_JT5N_F{w^-kJM`GEBc
zVm!su(Ax<d_ki_qql*Wuf7y@Oj?PSe!1`yBDIT!iMj-hC>r>Et^X=;4-FR{rw*~aV
zNA-5~NetGLIqo!Fin>4VCIas&MsBiCLOo;6iJ3wmo*{A6eE=NdQJmB&7i=j!`o?oP
zKu4dfNOc<X?BY@D!oYgG)QmIx@~rit$Ydtdm)vkeO4Hu%*g0Mp*i%^hH@rS0&srbq
zD8^*$Q*KPLp0#c`lj+vUfhBc=4m-H^T|8^e+}nHl`(pcN?Pq+_8g~!0_cVIRB8XLe
z9Gb#4n`osG>T`hCwW)Wlha+FJnS~)wR(CvL=Z$X0MN1W1aSzC+AnLjHQDr3Hxo=^D
zg%@bOj*6FXqtA8^Bkcs#;X&Y810_<&8|99UiwCO&mtH3BJ{+*v_cG#qR_-W=)wHr@
zMY-el_W|rZSS}|nDq8?_U`^z;4R~H-Enuui?t2qSaQ>zo8~AR=^Ba{fGB$R<SGF?t
zM$~3EqVrwc$|hVM^->eBhgy$Qfc`VsNIut%H|cO8)U{{C8p&t6aZu{S&vai97g-LL
zUEhPFwne?wjq8#ct+%?@(qe>JnY`6~38D$-Mh_Rd*Isp^x4Mx+ztue~`K@j|FNTe6
zSdfsNk&#sHSXRf*rpmE1z8A)OSoCw=xZmkD%#P!q^Y%`M85xVQ*aKRCZQ*NtDJ|Z-
z4qTvZCw=b1#T6Um=2mqI6|YOCd85sI=n3W;q~c@VAd~YX1GMHO^@R2=Fn{b6JN`Xy
zy#AAiWxX!KYan|iuU{cA6d><;<4C*to_ECP4k5?Ekhe>}=Z)iVI97_cn!<9)3B)C;
zocNx%1BLb5glQFaa?5+(NAP+k$L2jwlvlH`_h%{sAbHPwQyM?Yd)}M)o_B8qmu})S
z**p$~8~0j~EdD+3Ve+20)Kk6Ztq^5|JZW+7c_-BrOMt9tgF;5v^{W))ER3=+_~0FA
zyzaeK$Dhdeyotq$e9v0|zUNKg`1iaM?hnNEYN?y>Q@Y8C--Vmpd@g$7rW`u{O$Bsj
zZz`cPW7AYRrJE}8nno60R;=(ju>ZbdyaRKpirJ2@{d?X+fQQ8%;$d+q@ZeZXG~hXJ
zU;)p00|}1B!~x#(-h_=IO?b~6))DJH?`?7KdGoT#nY<qorrpnb)qAH0{O7e-9d#E%
zC7~dgSQP)-y4?SCB|4}XcS+3kH9P5yZ+6p}(VRnPc5?w8e{%_)h0RkBj7AqVql;bw
z*Tbvnj+d~5_a!_w^wI(R?*J=)2?yL>`UKH=<r98YzKd19i&eghRlbW=zKd19i;*u&
zscWtQu@YL=Ol^eF^5zv9dc1j+hE_H=YUr8fHVyr$xt$Oy`A~BwAXMdx>uD9Z*eY<b
zRp4T)z{OU9i>(3|TLmsw1$t{6-RL}M_K#sS;e5nY><7j{2x-OtAw<jL>f21I>ZtSc
za7QZ+cYs1vt?O~CVYCCr{<^Fh)Su*oy_sovTOE1?nMvG5AjiGCj$8i`Cp`J+F|BK}
zBjtKZxKN*!<_SP4qX3kC3P7o>0F)LBKq<BWl&%XvslU**sShXU+D0gS0g9$7i6_<%
zi;}t&A0pTDwt|rTA;MP?{-USXyA`6*T|T{j2~XdAnWxwPS|C|`;nV9Au)H1Fr7D=u
z3jX>dsqEE+A6D>m!rxQy;rjsB11@h!njfr2vqg<x0l*u0N$wlF4#<%EIdP>){QaCc
zqfkAn;Jlyn9LV$VBZrUSwlPQmq!!!pe4eVQp$~shb?hr|04d|j8+}!#K2Cxp1~Ja#
zGAqQT6oHliEe1h^KL1+I2eIFaxc)CBmfMKsT22y;y08xOx^Ao+W*)ua%AK4KArxvA
z?&S137GVQZ6biztOo2#$KXeQijPb$tGrA*I>`KlNS26;YGp6!NPCRt`mM`!=W((n6
zP6ognY+cA%9d{vTncEkrj=PX^SL9K$)R4X3_po8VCxBc4v6;|PhN99pq8_Zzb#ToK
z#-1Z#eaQ<si@zi2#)X_iBuB1?1`eX;`<QDK&$(0mTUy9Kxn^vXM8jhOYPL|!vVi>(
zA775;QyNdBpS+<Z?Sd$%anR2a@83M&s0$$uUT}umg;G!$48*U(!Tp;X@SHvF6AK$D
z8~%tKaLmQK6Ei2=Cx$D-^!=L%jK6=Afw*i8_iyet?-S#;OSy!z4=WS+)LZ*Exr7sY
zX5<o1PgdNGVu7tPH;LUR))@IEO4jG+v<)bACzQ`4;9ZnBb&cH6svw>ZRZxKi@qDQI
zMl+a#cs`Vnr?Eyk4a!-eoK?z^>^+Tb%Ga)(PCBVN2=_6G065~PzYSUPsqnLLBzP<j
zNF9E&ya8i}<Hx+;a6Rv1de2c;&;39t?->1G^@dXM;0LTj=KY2|56xCiB?m5v<G@B7
z2ZmY14S&M@h7-fP518zC1^WuO0>epSm-e4N0A_L}PtAwyTL}+U@MDC(1$cA(A0kca
zonmV?zwwS27-u|LnS8CpeeLxVjM_apM(|@J#El%mUEz&DLs2f&QGb@(7sQGpuAEAX
zy3QMq8r81NnKJo?=IHrzuD98u=TDGdul(lA@5S<aj{NqO-|q6e_d3SkEx)bu`y2Uv
zP=4pi?=<<nR(^}*H;OXNiYK_`mc7NuPxl_M*)aB-nw;f_@x*8~o;q)8bw+*&MzVP`
zT9;kF-Je(ceu_VvKd0f9JAZGHH)u|FbNF`5u{!MGF>oBuHp(||u#Ij6+i38fO(S0P
z6K^J{pD*L35jRfWrRy%d9tF6K;D^58{@m8^yT}9^v+6b=VR#PaNB8es>d7i7_(MwV
z$X-2=+)=*{K`r*9_7oeuw(x31s%!LL9d3cu0k$#xd;_11kn0wp0MhXWIAJGt7t}Xa
z_YYsEqPr1(nF_y%;qRZsOdm(4U?;YM!O{CSg*WyDR2QwjBJ4~<PC<;g;WpgQ5Zn+x
z9YH8G(-&O-JQH|>c-5-AFPQ!;5MCJF7PYU7YMI>8Ew_;@9VFt%%Uf7@G=K|t;pFD$
zkcln47QYSX8^MdvQo;R4gZ6r)uWwwOt`Zj@NzRn!=!SrOHUh$Y<Ww-@t{wSdDk@z)
zEPOLs7VgdVA4PR*&wmx)ZD>Y!6p*{U;QklFX8}62^UT+3-$@D2LhN93$78AzTp({~
zz}Yw)A({xzUyvCnLH`(O*W)&9bWnje*ctp7T~xol+L7l9AG6sO_sngKe1I0<C2mZ(
z?<g{Y_DqNbXb{!CAs%zsI~^HO*DtO`NjouMawzZq6JLR~;445`n791Jf_@Yqln&xk
zK@fA*Ly_rVB+K2-WIQ6tyO|$s3a=MlVSf~Y69xNEHseHH15j*z)NcpdtuC-i92i%k
zzdx#6rcze-3QbG_W#)8bm~o@7$hXZpbo9u5jiEtYRmz7HYu6SOPM$KEOd&?k>^!$O
zcqE9Ml=fqgRNKQmlLi(*4gdnNhyeXRMc%td`hsiK0N9n=8g=a+M?%{ab!`O@b?p@I
zhUa^w+BmM<HyF|LH}e}f54PUy8UEbqz**GQh;L|p&*w;sTl$0O4}AtY_%(cFq{EGX
z{S9;(Rg>YuX9M;*^at(h=>+V<5S=j#0O#XVi@h(tu-F#+Nr2dwEq0fNR)fAEeH%Ht
z(wVX)HH9U|z6a9}et_=*U#i>ui2WkzVe=yre)Irp7-jd>t{o0U7AQ_y01g^;{ed>W
z;C~%Krn%syjWlsi1rDRGCm4dZ<{7m!@Rkt`ps=4K)EC@9wB(@s_z}t&9)ltmF3oaB
zUAH4yw1vjpr`dVr!Mg9<*BQY*stfD|tlu98KeqegpI|GEk>6bHH)_i=QA%-U)ODv+
zikAx&Iis!_@C6%<L4->b94dAz|8V+?+(-tl+U_=T0p}Jujoc>VIS2>*A@}p($&D3d
zI6SZ`+5fy3L7)@^Mi+RWr58PaBlgAa$tF=jP>$p^s<!GLQP(~gmt&lGGtjyPeFZ9c
z69J&jQ2L+YkJvE0A;+2kOFrQX<qC?ltnj<2i&2|~6=ZO7;q~FCz=DC`Z<ZOs)bn%L
z(?KJx!*1kn@Mq*UVt>!m=}VDO^#_9$uV0nYahSM7{lZl$ZO`zt;6okX2;Trd*ySVY
zbL!y6j-KHk!Ltf>6corbVEQ9LUSRrsRh6~SdEJM2b&b_bbV~S6<>?u|4M4}9xb)#E
z_!9mOe)LD(uaGgbDP#OunR;LzB@@HgjD$CX`GKRHhULBLzQQt$AZ{yYiMkGrV|(!w
znW!FW#KM$C-d(_cm=a-HY3qHGiQfq%FJ*sH-nHOcc)QBo9{#IDjkWXl+8<d7K#hCH
zv-VNs^sRs75Oa8%m)dj5;BN=}3)dr>#z01RzVh?}z9<(v-oGE{14|&jR9-bBk6`H9
zg1f43#(jBr+!FpfIsrYIUH@8j7Hau6Lc`}EEt<|+zfZqI{QiN!axSMkbAoKj2Ht#a
ze;N@$gU`dEmoB``aMs&H_)QUvqYBR<6UbitNZ&y$Hm#;2U!G^*yX#oAdcH9|-Tf+5
z_mJxi_>Jxxm*kZcfrO{5J_(GYJg+dbdc5zVHI6?NX5e+XOrv`nz#cFN*T5Qzy$Kmv
z-u!L;%-~gheECiDdIzse>)zzcZ=44vxF)+}cP=i>+b)!nQF~{W?QYUcZ_s5AxZQ+I
zGn*BT?%x%D0jSO04!oRH_gZyN<388(WMP6z6OT{fi|zhh{?xkd)qTRpkN|S@FdVbN
zq_L1akK?KK{O1@O69?!u*BPOeS-dd<FGn;2DZyQuL!a%Hu1F1T+R~pd`a<qw<e}c7
z;?Ba=Tucs+!e@G;uICIu!H2_$W`rJ8+ax!E3r1bPVhE}}!sLFTb|f`|5-G+3n~hM2
zXSjt2GEV|-3YZGXx_$!#W$h?X&tg~+`B=F6$1Ih)ct8%)hF4P922~0O@u^(xa)Er|
zjwj86FCtHF(u8y=;nb_Zz$I^v<-iDL!Ys5G3q@%K7++(A{Q3a(X(6L~_%9;Av4;T2
zg1Sze-3UHJT*|05LT8=#Tc#e_4^GD^(6ZQmus+{de6DRyGxB%O!{KqtR*p~A4`$lK
zD<y3wocOfP6Qo@O0%qAUw&Bce%x!f%<_R_x<-X<#ZZBNYS>Z36UAW}KSruhoPjFMP
zGq<%cG%BU&q&CN6g-iC2DXXZ)@?g<P+mKIc@F*oUW(<9*Z!l`pdji+d^E<DII*b<k
zzW0E=8k=ooBlKE-_Q<I{@Ho>qM^@4~!@lb1r%<chcVrn@(^9Ig;rK;URfZ>MFa88d
zX6=R)Pr!a9z>c2ymIq)p*u2dXGCs-gTzCPijf!}JR9sTcYB-*Mr&m?)qLZ4Cwg-~5
z*r%cz>_<-^{YHR}=ike4)IyKr2Lm7e2+nE?oK>j8&a7I182zfMkbMfKzi+#ZliD~X
za#MY9pWA#{bM=Iv{Zz!p8<H*dZU9K`0lN)Q;h|=?e|YT%cZ)spuFZzdM!<W3Fiq>;
z<~eVtCx2%())kRcR{Mzd>h#+54G1j=ob?wt?Bs0f!+z;&=agm<ePDpOjd}KG5Cb9v
zQ>$*HiRD!J{h)}jO<?tXfWSlLcewmsCch)(_j382D8E<AuLr-YQkqc)IW5LL=|iS|
z98_u7K|>yo_hPOuB+ZpTx)cZZgSlRcIaP5m><hjfbuAgg${!)&UJe%68$p;vL|p-7
zZPZ=@&1h$JFC+Lu)b$F#fL&h$W$7fQ3c1!|71s#%in?BeKkB-aF@otA&<VNz@V*Xw
z7J;j&Sa1W28p3N!3a1oKF1)d@bjoC?4?+8f%rI|M8dlnZ=^>T-<!0{H^w(aLQvHzV
zpW~2!$M?V%sEqd4Vbn*!<RLux^&+W>{c(Ij%c^msR{C(2={NUdB5klX&XJgup?&s6
zh~>X9>MUF>A40Bb#6gN4Na2Cz#s;!~B4;pJ!;gEAr8tnV2nnlP>Pz8j&L`3Aa3^Zl
z6m4igG!)$rpYFyd<oB#p3Yq;L9Us{O?gGxl>ndESO!EeMCLbzQ)a61r<|FkjWI&<x
zm*3+@oBsHp>SJLCKX!Oj%!lz2OxTlWf9@!d;1T_3A}SP2ccPLl_DArUBgqJ6Y*_6C
z;0e?@+tA+Co%pdlZKuXOXyC$8Y*M-_0w-Yq9rC~}9r*6&_kjHge1LDV@;ypl$o}KE
zAw&cALgo9Z^5v_TS!e^oK0|(vzV?Cm2xpjb3gARtXXplP7PE)z9pJ+VZi%`+mNGmr
zVG6A}>V6{;(zhOAssWNOMy5}P-qIcO-r5maHb-3+WQ29UP3_1PNPxu}XOYd(AJe|-
ztI@tz!TKF2niFZx1W!YH^veb*9;4wd77q$U<)<P8O=au&HE17)FOsUA;%NGKs$@|1
zqppKjGMr<}QMZ<1!Sr(xmve?-Thw*EWEwxj1h2f4>W7H!3yw-FJY4f6>WjZ_xb7Ao
zWxkQV_U{ynQP)dSfleu4ycB?Ybthsva2^8!qn#3x!H_8I0DZEet`uoySOPu)MR<cJ
zV<ZIz0S$k_fGvg%0QR6}$ym6;c)|bouVnR!9REP01%?>84;;&?H6zk&)rSE@UE?HC
z8?&x|5mk=5UN1yF8;!siH(811Bi}_<JlpFo%EiH+f$|xXr}-?Qc8*Q_=<%8G%d+nM
zs3o_YHz3R6;CgQ`17;3H_!T0c_Hy&q$t?3+MyzkG?uB`H)Ris8;EiY`^FsQ1YuBfF
zYuCXtW_}}i)JI51s7<z4YUm}W+ly+)RrpP5MdWg-PQi`FMOeeq_a!-=?Y=Y>Zx-H=
z0#z6{gDuXMh9MIpFu~n2hELDh!b49%c7ZX&ffxu76yvB*0se%k-h;d&Fbraa8Y-o0
zEJn2vT&9kr1n$T*TE-M03!w5ST-^bGq5&^-HHf{@2)+_^U7)M$3wCN050Ebxp`f2J
zYjVVIu%Hlqz|dvDPzMeI0r{6;=Gp;-rgt(;$TjF4Vi{MPkn8o)`7m4Lu>*C8L(_Vx
z65rU9HhqJa6%u{F!}rKZ@QF-{{}J6OY3_q5W1oXIHZEdJzcbAU4z3;xV!RZ@7<IiP
zMUd<;;u$IA8X{r^WAeQ#;8$aE^GDX093(01&*KYH`4<2~GB=;a*h<cUIi~*<;pRBp
zDjCI(!#(^`|C&EPX$)2uB54at2)WYUjziQtx-J?~E8*oJ7o$qT6s=he;O<Dw0eq^|
zR>m2k>%m*j%mQOLc&kqmI{Yu-wc|;QVax+No{V8MUKdTDA`;<gCRXjFJikl`3fOw?
za(qWW#pHQU#)Ld)oAL>VTpyVtyF2O?b(JwqF#UQuA=i7Tvrxsmh-|hF0}E<x&_0+A
zG3ECbmD?gSx8KuG`Mpxe=mF%{@pCP|rC(Gp9)i5rob^^I+5R_b09t;}SD8+O?1*rG
zbxeN$1+fz3_wOmMg6X4?!j#`vBEtx7fJ>0y^+<S}{LV(=B>DYksK&_ekW>o8;^0VR
z_6Ek1DZf340Qt@SHZ2}NORS$vD51ZeLP6lilBrMt`|EzGI{K>@%1G?53H+k_D@Zux
z@<?pd=vw3gy#klkpq{^^6LJlEi#`MnMPPD&-NRzd{yK}EkZY?n6AA3}$NK9{<Y)EQ
zb5bvngrlJ*>Hhkolx!bD#Yy+q235tUp}ZvZ*U5;L&|h|i-yb4{*<asBhR5~S4yZ}T
z_1An9kknu4_{9Ea4TyjuW)|OH%isNy{RJBkh<grO7fL_59ZtZ01@Ibm-Ii~&9o~6n
zBTXw-Pm{G}no2iR?gCS8<wlFAcNPWT@&(_A{S~W()n}uEPe{J5lfiPOo)05W7_JMt
z(QkimADv=w#F9)Sf1@7?#SBCMdN1$=i*V$?<&qvwABt=&mOV-3Av?q|8>lpblhgPR
z>U@xnqwZmwWW7wX9?Ptq%(^$|O|tec#@<Lb2*X)j?a8akte$HG#||&#`<1B6j;QOF
z%UFX&6N~B(Ei!z;yWGCe=xpCb#~_ltEzLVIzI+#97oL`n-k@!4o~P$dEY=3E%^n-<
z)9LO0;#GNlx>cW!hx)vj$rtBuUAW7LVpCRg<nk00l6SeI?m?CoN`K@{BDygT$E*;7
z@{d42L^TZv{oqc>J;H%e5LG8fUE63L4XKAKbGQ3o_aas?%SR3EPYQ-1!A42-9d+9z
zKP6|Qu8}ALnj_}z%#o_Z(+Gz+hdmQqO9{W7h-5^IY`)Mqv}Uv$_qFZkD$($w;Ce18
zAO%sxA`x!y&-!2z2BZ6yXhWxTDdm`dQm~{TxY6<a{@#w~-|t<7O_rW*fsz7T(&nhc
zx->-S29a6z_o$UNw54&jf%`IGqJlNls*ab{5tT(`?q%?*F^)sL>}@DaHLQ`W2B?Xa
zMW!Y@GS>$ZY>`HAQW|MAXfH<vyuq+B{bLXTzFD;L#&8mW-*3^%EqQlkRzs{NkjfPj
zYtgv+=%Ooq!B<Eq1_*}A%SFdx!~z<*N@(PKO(Sh4jbvCfveBfGxCN_3v8tM+XSyiI
z{_8#LQ!Wb`L9bPLsK#!oR`tnhMvBqPnP{^&+T%HPAvtsbhp|EHY>9XJ?J%iBSM>%L
zIis$X$P|QGPlps(Dt<d()G}JSlSp%MWNQ@0X8LU`KTrj0&0WahRyY1<V_qsL?S0^Q
z1Lt@Hi`|9T1MLltbAv`wnJdcjaXkiwQ=Sxfp$K?`qlX*Q4L9Dq&-Mnc&MXexk%qk_
ziwsY29k-3ejkj^`tAZI#c+$^MtJcN&O$#IO;n)LG0CZ$v4!jN<`j2??BMZaa5@~>Z
zOl+70qHK;>^#Bf?;SaJ{L9{NoiRLUU_^c>Eo6X2@jUXQq#&SBSBNgfjHG`elOd^_#
zeJh|u8dUL(Y%B^)LSS4@3ZyMt;vl~>joSH{Hh%`ztFbpail@R+TO@3|l(kB~Z77_O
zYt3%@!1D`{pK4KDk!%M3XChey@lQ}5n%Z5OYVT*&{S-ndFfKE;SL){lWqez79eW3B
zK{d_(7NO(>h>y8pq0<-K?ooVVn>V--+`|ph0l6CJ`vTDDGi;MudP=CHr9b2t!K2`D
zkAp*cF$;kG5|6xM8Ylc_BRJl(2$4zVHOLCs0~!$r^&G<QAl+%KA7qKc2uuM^NcFW`
z#ud;U8}J`rzO0WcBW4uW6&_XNCirU96{0#4N-r0!q!WBq{C1c<Fg&<Gj^Q<$gJV1b
z&^dw07#<Ke0dR)EBDA}Yq!g_g8=<%)E&8QZ2dn)Uk7&cgxyTVLF?dAt=pkqw=3b(A
zGbQ@@HhhYuXMcs#cf3Zy@)S}70T<$f|7Cu7M1437AKux;4<UT8WW0%!coLBM#O5<`
zn+ANr4~2=Me~%)+A0R)lQpit*e-4m~jF2%L3q0Q7%i%}|8Nb-5!731lJ^nxzWgCpf
zccJ$ZrxIWj25|$<Wi<{!f$Emp1#Wcm(du(BE(%#-$o2f|a5ygd!!P4P^mc3_LdGIq
zqptG7j6)%XX@`YFnXzz@_=V^MH?a*ltkJzr5=hNjl6p$j%HkhU*M7#Wy(`BCgBRzL
zUihP~lkh7<7#x+&W@8RKc6hW6k|Y!JRDU0%wtouO5^s=_K9BWCfie2PD9g?TN_R2W
z-q-`mTm_LTXT_Iw?w<`p-2!*HftOgb<MC?3!aIU%ShXJ9MiOl|Ew3s@5<T#>Zq*~$
zffI~yAtQYfC;?X3VyYWAN~=UGC>AL`fE7`bw+pqECDFvvArWX|sU8}2-3^JwVKhqm
zMR!ULqNs)x2wPX8gtP)V$MFpd#tVACmgCu;OH=cV&|<M)h}q&=?69cHZW*Oa7k6RM
zBLCsU;?GE+Q$b6=e3iA_qA64=P!M%ZL}gUn9d&b{2a1tmEErlNHW|Ur%wVA&F;aj6
z?vpQ;S!lkbUmxMkXcoRfopEcW$>VF6V6^8>j;;MNyiSTC5Ew-vuU^7h35vVM=uf0r
z!RT<vXGCX%5)4*vs*figg32o7I0svdU=R9+X%B*tg=QclFgM#Y1zjXOP)tGohhg$z
zS+Md1#uRX7|B%^k(^caq>dJ_<Vt{3R^Ppu3_P4DPOPT_Oc<MlNpx>Z6eN%ZuV*LzI
zGU!59X;q1F<F$~P)!t>8<Mk)V4#UzNb^TRAz}D{-lsHsZsgF=-<HqmL<l~nczj5*<
zxC>iW)S&y2`N6W+7JV7Se;%qLC4fP~z5`()T*#CdGBA_AB``{jU}<koF-9`h2PcZ^
z4yii%6*kr06Lp3pZG+E;rFl*zz)!qEZ1VwO9lcoO>KNE_v?XJVv{6WHkVsnt$4Dr3
zu~*5-rN+@nB0L=pI6Y34Uxe=<1c+bu6j#mxm?V(ks=}h@zm60M0rBl&Eiv9$Sa2*<
zu~z6zEu);EJanjHr+rkQbQ}XS;{*2%V9GD-D;8<dizrrImGqcorg-jgCDkKP8XixB
z@$J(56#MZs8`bmxh2?m9kk<SY=mGeHF4G@F)d+AwxBAqmE0+iZ4@@(wYngcxda8D0
zUOP}(Jt~;~69CpKOpE<&ctrui`b%!BtiS}8#TH;pnCmYZ?ur7E@liW+8}wcD)_zRV
zs>eXQ9D9jkbtNmWxZY?m?35k!!l?Ajw*Zg{2kbvVLT$V`#;-y9eN>&%Mrhc|WI9y7
zlc1|lLAw)&7$`OUI;0RID)@WA%-YfpekIAZcF=_~NBX%A8>>V}$~LEos0H)M#jOdL
zTi-a`6Z`~DJN_3Kd4*|?`clfHP*H&|RE)0}k-kt7&Vjld(rg#fa`rxev4TiLlLrsv
z8u@2k1C%)GI7cIGTgxh`j=Ft<!LgZ9*KhkXbgWw}4J(uf7JW$SU%-Q+FyZPnLf4gG
z+rm!hi};9j28q`REaA``vVu7P2$Xe%BEi5BBK5AQYYco4irfPxQ~anao#8OTqpK*q
zv1n1n%wn_p8T7&qJ<=1Xnhm3mVFXclkcD3(#exy7k|eMTd`kr0Ab5pHCyKnEe8Y{+
zi3#T)AHfJ#LlM2Zy`!f$h)wv_!;Sn)zlpk4Pl>v|C*>?{5G17-`Svtv)HP{7%wuF>
zbvI*#T$wMz$Ob)HXh_so?97GRjnCkA_i|DM3jAp{R_)OVw#YM*<G8fok|zPxH!fIo
z_+CQzEAY?rY8w2H3z4B{M2@<95QN!ftn&(JS4tnI%AdKNq0n&0&`%s<^Bh(q+$gc*
zsyL)N@WIUfH-M!&9odfO+t03o-urUY^&pd+j;vn?fTq2%O=rD_{@~ppEEsF}Y@Lxm
z@(8{;>aYtNA!A~717RJK`d*C{ry1DtPn$y2b(OT|YPS*O{788~Vwf2ez@td4Jw{^r
z_!xDqyMQ8ybcDU>$g~JsVS_Kzu;hAMvLN;OE26Hy!3Pp6=Em*~(iGy4OE$nE*TsMj
zCsV|h-BReqUep{<;N~O}`vAoX3X3HPFnX4Fx8i%pDT!2dE~4=0JdQS?onFabmf?Pm
z$Rg8X^cdstaiTsy{VA5hY17f_GOSmLFOKdS<U<_Y-OLXm*NQd}(@8`>>5NKgS9EtS
zs-bvxj1xg<S=8qUJOacXzVAu=;;+C__W&F;mn=6Y>N@v)`p66YxRySF28V-C{)?yz
zx&HP7V}S;n=?A0LI{8DA8X1v|no0@;y%pguiSV+Z_jINuvrVqAQ0Qr3E>YJ3#81pC
zn0|?>LylPo-bf^+2|GCPdPH0&aGJ6o`#8k<A&ngdgGAgabOCrnFnt|s8*&}m$^y{Z
z9rVW<^E)JG<%IRewKm&CM@aHlLZ-j8yo=Vxs3Zo_-V${kljL!vQ7w6B(x748rz%))
zR&WUXf(2~r{5&f8^gNSFk}Wr?4p62XbytAaxG%Z@rxQMrs_b&qoiF3RFk$@nm2a>%
zVdxJhmUB3P!KE2~?)oi2S(dQLRUU8xu!W)U__$#WwpZ*e#_4Te*3_u$5`4xw-nmSs
z#LaymQJ4Tl+(3A|ESZS7sl@(l_!7m<BMeUvH>vW)nt~l<-Vir~1Gj<|paQbnlsYVm
zZ<m50aITO<AYHczoJM@_a2*%DFJp0Cog|neXRdja%nj|fz|xtL=gYuqP9a*&ov^TP
zQ6~oi+(ZESeIQDTx>kR)D+EYYc1RUX1iS)YA_5*}cme{t%NGj)A2IJZ1VABS@jw8I
z-z^0L0hdc63ju${cZGn2=uXDM7$``Xg(q6ng1um&dI9M*iCg+~)YasMCfNPB`I#&(
zO~4LLcQ<Bl|5hWMT%3%+6H@rXdywYCg$5NEkyXqEr(K@li%_FrE*|2&s4ckZs$eSb
z<Vx|M4h5=@voN{_7I>uZBZ^XOQ(6?SFkp-2^cHV^^TKVsO}G*{778~CzKB~P7M%fI
z#liPF@d`{zbd3>$fqnWKZ~oeaA9$yKtQKoP>1jpv+Zh+<lFj#}98dYyI&m$4k>9fL
zLnE+K9)#z$$rD+5?97--H^0eIzbgfH63+B^4aEa%;WL7Da=VK+81+uye8BikVW=P_
z@?Qw?V6lT|{0c>s7fB@@4@{CY9YvVs245_~lFK;|*RME2h4}6Z4cUvq13e&hs4p}E
z`mxP+RnRvAt6@zv%q`4v;)&cB?oPkT@l@)1;wakKaWby{bQI$D16(Fh+k(xVP1SE=
zdu7y^egqlcoyYfhm%Ws!s`?VqSyaClc525{o|lmEhK^nUeJ^1#5A8dO5)I%k|CNQU
zj8=SAux<p?_<|#{#s*R^$G%Nt`UY?QhP(0h54M4gK7wTvBb2&<hNp|R3I2^>AHJ*w
z6N#@U?5Z&A;}BGJq~mgC;tiEVk?CeWK{htvX+b4-EXtC+`n=4%F4|!PQyoZ;`GCi9
z1l@Q<rRTU!gFVGN{Z(&pn`%88&8FwS?pXFLSE8mjV}traD2(G<j&!6M!R697Y)KE~
zIDS1A7S`VF^oB;XcrJPe{RLwKx=e_Oi#WV<NBSCF$G+!!Rf)KG@QH5j=I}AxwSaxM
zq;%-A*ePi74O1e3U&um8CQVZX21uS&AMTyrin0vgK#9=WwP{_=rqq5(b1!fDIUC7d
zZabTm5gx+z_>WuZ-4JyxI17L%QlhIvk!s-=tjAvnf%6IBkn0(VP5F;pl)+e}AgnC1
zOQO#?h{jcS=n(0@Vo4#_L^I8I>8BLM`~X~LCbCS<LR%Ux)iE)RTLaZpPveHM4ng}W
zDyy8eq6m*F;^GZZk!HZI7TYs%KVta@$L)Dsf;dQV3R0MB@F#asgZS-_qmjpP%khyG
zaKmS<?T@goyB6USvi^hA56F48-|;OocT#-39?7w@Q|U^AH?y<hE}2#k#V#zXGnaS^
z_~xADht(GgWd&~Y1N|n<Y-klWrX%~*w)oFh2?%$8L3q6RpZQs3>HMAX83zk9<GqZZ
z7L*-*P2qDALR1y%zeoLZHMtSD3tROv0UC4C#fZ}rf@AcVaZQ&f42KGg9l`3)ihAa8
zQC0j{RPjSc%DX1bvD_>=+1`M)jNq%X-V=4Lpq`2I{i1F{Ybg=G-Alffu-J1m0+8`>
zi#_K^N>LI)@s^&bt1gZ=+8*!Sj7H}nmGuqVP1ij~fdS1QmUsl85+BTS5Kkc7$Zs>l
z--k+QC71QPsOw#-iJ|mA&{1oObqTTJ;z!SaIzGsh*H{tQ?gtb6c4pMsf#nLOM~*kr
z;X>6A0b*xj%p6tvsB656f3X=q^#t*yKa?pfWPeHo4K(&$w2f3>Ogm}R)t5=Ra0C_*
zzly{44W*LHpb8J_5swI6q1aNEwzpxo%2C*Pw@7myphE?<j6nS%Wfe+XhgXY%YR<Gs
zC7U40fQ5nLw;vV?k=1hG$jgshGB-_>fhs7>sB6opY>tDd>+oh4%QH?~3%k{+Cl)L>
znLor&Wl-<Wm=gP^B?Hv^w**jcIk;*OTzr)`sT7Y(3RL+n@!O&5m<#+;*HHTR5kR75
zWk+E#3CmZgH0Jk`l6j-DG2XB7YtqXuv56FNp$nY8Tnb4aMh7k8LCWrcO(KbN&Gdz!
zHCfp@y>{fjO{iQo4wrK2{TlQ!r~vt*LRhB9WCw<&lFVp{(B-Fy3xsTe6LmeRJZ*Gz
zvC9C*6{pv?!84Qcc}69zhZA*8P@bi9)bJ?+7@v&=bN(hg82Noe*9-L-8B5w5AjShf
zgwoHCeg6=`*~-QOj^j`JF>|PG%JGC!&-8YO)cyp?kkLzYXvg~v0PYUDI>#Gq@}P?j
zAAqWS5y$WID9q+{6zwcrO-rq}`z~*OCvfO<T)93xYAaNckiC?eNlQ-zGuH>0YhVnt
zuRA~iQ$PuKfNaNTvMrD@g}uQVV^Nz`cLGWEhw%fI$C8I9)Mu!-du!B{&rCw;8Il+1
zu>;BilmOfhQrnz^<t4V@kmL?xyl5kuVK|DKjP6~~UKi!b3iZb<GseG8C;wWDVo>bE
zbjZK_EVfZ^anU27fq|s`&?wAn!EgdqZj1p;ltxRk6ODm*1jO+(&@<|RbzvqBrJpN#
zpvR6vFJaLd=@U>EFdj<pE5&1zY+wS49)saG1;cL>Vr%};)rxeiJnov1StduK9Y*YE
zo9g%H-y|92?}RA^Js*e1$Q7Go3zNDLvHS#y!H=06%xoo@g-Q_I#rj~sgH#KMyh!{+
z7}n-V8;KlDlU{xsie4i%iL`EtE0t<4TFQ}*9cKV=#Y76J(HFoJEN~4>jfHNM_YpO&
z==pz!6OV4_A1U4djJRv6d-?RUr_J#z*a$bG8;pRdK!_h)&hlgY$~kVFKkrLSZ@>as
z&%d&bt`a!myZ(%;B{4`ahBv4PI4=zV>jU^rs0%$%SHFSpJX8{}UoC2)J(KAIcCYev
zQ@&B~;i$+SXoqkPD(9GTdLm3KJm4(}`WJ?5uT^p!<v$S@zLS)1C48oi6kjQ%Pf*xQ
zev9?A1<U|4^#)lQLpOu<`teOzFGfM|S~sjYCuY4%qyp*Rh7)yVDbICuH0$ka!twRu
zh92~5*Oz)=Ux`0!qQ>@ju)c?(8h-zeUEc4(<v)}p_j|-OnEE32Q7yo}s#NrKv(eJX
zS%Oss(9nW)as3ATxUE3;-wD^}p*MJg5fQD}s)OAyQP*7L1lly{P>VPx&TlG=lhN7=
zjOj(7<lDhaM`r`WK-ht`Occ`<3&fzr$zE66V5}CRzM`*F?i6!w5Zs6fZ8#lynlnQQ
zFSxcSh&kk|g|+V-@daB857|<E!EOFi3t{{$o?eoAc&D%XHq6F-=WX*&Z^jNg^!F$y
zj-Hbk4tY|#JNzd(o`qZ6-s$VU!Iz6-T5pJqL}yrai2<?uO_J1G9VIUOeA0ixb*R7F
z2)=)RaRJHY#>i$oy^6&%>$5xjJiatYU~?=$MLu(l894`epqqn0Ff3X;7>U*H)z9-L
z+@i|hbu+QDG&Nj`!#&I?2WMSTgrvxAT{7Hmvo$)FHwByUot^S^{M(50tNeRYyGOL!
zr>FX!t=-Yu{f>4kw0pmHAJ^{l+HKSB``Yc1rt<5jT@zQ|*8a=2YiPGryMFDO@!Pe3
zr*_{@3jbx2zeg`s?kU=JYj?1AecJs<r@LSKAJcAycFpoX)$vwp_b*B1y<}JAy{p}i
zwd>S)9iZI;?cT4$Z_@sG+Pzo1%eDKYb}MxJ@!Iuhcc^yH)oz-0kEE*l?9}c??XJ@9
zS{-kx_WQM4uHEakd!=><Yd1@~8QT4zyDH~Z?XJ=8@3i}4?cSr^nc5wv-OIFlfp$;U
zu1&jpy6OD2yFt6pYWFeiKA_!&+O5#;_1gWNZtssxSJ(e?9X>$2{j}RdyC2GxDK=ZX
zb~kGGPug9j-AA;$OuO^7Tdv*l+BLL0M7vqqEz$k#)P9rh{*;|kL$$_q8*WnmRY|TH
z|K4w@aMS<Kb&o7o`K1*oxBUv`c3vU6m-5Ij>&!pXRlas;M(WS}o@Rc~uIW#~Uwn{T
zdrVhRg*vq!A1^LZ4Zd6doz)62<vMbW_G_=LCiTG^G`uYBrs`R<{j(~nmM`n`?dl5u
ztT|;x^XAQ+xBLgomkq6`o^#VQ_bs#h{<0bF^6HA|{#kP?+|zEFJC7e`RF}E^bKTVy
z^UA8`&b}RAsus+-Y3}Uh^~=ZIw|t;kn$7m3zr4PC&jU4o=yiF?*7NsXe|X->eIAO1
z+H9Q*RDFx|$AUW(`?Yhv@?)!k{I%)$+H32)BPm?Rx595v3fJ+?@B$qk&>z~g-wX%7
z7HK#hU%juNHoH28tsADzn>THNdsY>YGS_|6tef-QQ}JnF)vP<q+~0B&oZ(+kSvGK%
zzibXaPG#KL)8^e=HqY&!IjsUeW$roC=Fgf_J;z-+x2kH^O|#3~j5#n7r4!#8xB2%y
zpKf>0TH^0^-n;dimW`NT)|D>miwTGPeNKI!sIHjC0?>sw&6_)I#`I}be$?{j>N#Z<
ze)mv*pIhy(tma2FtoP_yh<xs-%Sx}SDw|hTdeiJ_v#QDlPM<b=PTB0z!pcg|H2<_x
z-&}ksEtxm>=6TcRRF#%2@XwrEp`F~k(%G|aDy>5MFRq+6{nlwWmsOPtiIrAV;@^T<
zb1G*KtXy#J0QYQMW#Jw?c=!N!V#`&d3iclQ?)jbWcb<PGx-xj+kCDUc=f0uaE6zWo
z{g?h9Zhzy2wUdtATYg?n#wp`d*Ol&i=a);d8zT1i&uIUU|HJM7LB~Bm^E`3rrq>QF
z`2D-zzNK5oz4^b0wO;5S2fpMlp8orOr)=I_&#c>LRe_6HOwKt&e{tWmz<noT98Yoo
zS(CQnNbt8)Iy<v2uY2{mKEL_Kf0fUeRW*Gk$L<vW+{*aE0neF*v0HJoyQt#!S@Y&r
za6Db+p7Nb>Im5@1q2lxKDO2W5n^iGo%AjEh8BG~HWYCqlgQpMoUp0Qzl)>c_t|`8D
zobTG32j6_tkjk+)kGXSPVbPS_8AbDFTw2IM>7C`9HK=G%;jlT?eh%hg(`L`QxuR@F
zQ7m}y=LFCA{L~2kg4F-L;Lof1KUg*CiRsfSF7!h#-86T;;$80Yd2{EuOBP&x(=BDw
z{q9*6;B6Jt{IhN^bKg!LKG5wR58gXh1<#qb;3jy=-P7DNW|fzh%_Bb~=Qg`}YFWkY
z1E<fesPb0{4<C1(&xcIiQ|FaUn=wTKrkZi-Vm!LEf0-Q&uX4fiWj!lq2wR1m9k}~X
zpS}3#rVBo5TVL~_=f2gAzt3NtK+n|`w^q!(qrzQA!R)?cW?6+BToyd?X0sKhY*wX=
z5T=S6&I5n0tR6V3<hoIFaYxX6zuClTes|9N9R3X+GHU3hLyN8~%*nZ2*U|P>%PU!=
z<f|#aS7^VP--^3cxG7(AlEQ(XubX`Biof(44;r!fN@m&oN)Q{gB9PnAAp_hu&-J^<
z&8;Xqp1+*?NN8lnEB6n*ZQs=P+vZe1by?rL_oB1rO{<(cdz$}v4)YcG%ix@(jKE<A
z5C1YOMstua-~l(CfJJ;+2K5Eme{t|X!b1k<UR_B|cG_(A*p$J;rH7^r&KrDbQb76a
zxzo^j2r_Az?!Wm9#n@NjFFB(|dH!+!;;c>NA1k!q>XUm^_+I^?Q~S+u@RzqW9Ip=~
z>F<C2Tzz=ZSJQ_)bEk_QT~#`G$~E6EyM0P^<qQnvs?r?gEr&8)ReIaJvfDv>Qw9&7
zk~0*)r5;nO7_BE;Ri#ibro?oF*k}g*#_7s~;x*+SE7zS?H@n{T**n*Ka7XYb%jZ<2
zJ!ay9?$?3eve~y!n>UMEO;vp50e=PlH)wc5M&N#z4*j3!eqWAvo`|{qo4<Tj{bkUo
zQG-VR<NT%J-bDSSUHfhKC4Mi_{z~mWVAADM70>j#FYG3EtW51XwQGiB{F(a0jms^K
z&@R>i+FiPp4__Wwdf)PIFV7y^eObzg<>SVU<3k`(`59~TF9-KEGlTo$i=Uly{h5Q_
ztGe#5Z~tKNOC7Jh<JPt-73p74UahX)jmxvUYT&FdY~al9%eu`P5<|nxp2s7BapF<c
zRh%*)>bW;Q{oa=JZ$AC}NB2%#FmToP-#D-KZ!wk%24Rb_Ri$kuFbx}hvG}pLV8PPN
zlA!8;9JZFfoMnmh(00G_Uu%Zz_}Xi8>u^&)UZwqJcx6DPH^Xg7;Wiy!tP_-IznLEW
zf2D>$NB*5tQFgPM6U~4Tu!581Dt85xR!lYK-cdFqrnlyvSwH>H&%L_g%;8s_TV)?$
z%x$W?)hvtd*FnB~&FgFE`p5U@spIDQy)dDb(cl0ZfN93<H>+a$+&Pu#Td^n3oK{)p
zF2_Vt%|DZkwaEDO{O4;2_=ZiqaAf*_j;+1^drw_4Xm%35wslc{l0J1_SM)S(URe@N
zEjz2Ca>~52%Cc$xDKr3TxN1rj=D20^mM@#}KW1_i1Ypt+J!*OJO=<Tn*H&v!bMFCF
zdmnjwe{^JZ;dc)|;k^CM@$v1?`JNh&jrwD!_M35X8dUfw{h>zt&G5$Ws&I2qI+Man
zba<goutNLI^z9F*^k#TYQh22fH}h}QelxvI`z?Nv6kZZi`J4QvUHi@SU&HZjq9T60
zB+=_XmH!RXxJoF`TQQ?-zBMc}ZZ7k4DS%dXTASugtGKxf;{Y<6aBbnE3r9TsNZ}Lx
z?wl}S>uFiP_-V3y$F(l<7hm2<#btBm&Rbv=cnapN<mXtf;gZDcSylcRPuJ<ine@%)
ze?BO!IC^@HtNGDa-<Ul7xsT3{4@duZRbKDJ_MKWew^AA2%H~x17ko~6kNjqK_}Z}r
z_s;HBU9s=1l24wz{@FoE<^6BtbGR=5^YEFW@k#kO9h18gw(z1Ke{^d3(l=gg?0?Fz
z9{28FkuoT|Pkj3t_4qf(vondGoB-edll}YqF4~_wUa(SC#U(6Fzc<aTu9#7kAX%2#
zXH`^?hnM}&^6)v+{4>eK$G#*yeD~GUug&(YEdDD_rJOqc>>Zt_p7BIHz6%~n<Zo@-
zZ^mhOI5FIv6khX?3ODTyl{&tezWqmu=_`M%{APHi4mbI4qxPHOZQoB!KlKNR>A!CL
zxAKRo{-%ChlGNVL2UWPKAD8I(CO!&uxG9g?wBHPGSe}S4cT%`phnx7W)P6HP@cF+j
zAHHt(?yp;Y|E(YY6PD!vQT_Pe<LL|bXZ$RYA5PWs#N<ad9d7c&soHObm;5x5-?eMM
z8NT8tiQzd(;W;|o<cBrdZ>Ilu|9wIKef`!e|IH8owfyjHJ+|WK6G{5?zt^As5AUb@
zTIoNHzgGO;+>hb>59K%cdx;Joqd%<BelxuN$;9+IN#Sk3QsL(Or%}f@(>MMyF?^Md
zZ-%dWA~Af0j&FwBbhxSSH)y{Z?tWaQZ`2>!wciY1^$Qhl+TUzR;RQO}RQemV-%Q{3
zm`ZQX$K6TcQ<K71X}_7i^XDqPnSVi2c#RG><!_tzo9WwDsPtz0rzVAe-R6J)0)66t
z+g{X_J?wwdKGRis|F*xKz@qs@_BYVy|JU}euOYwqZ|h56asT?ibidBBOL7jZ-`)48
z%YQj<t>aK$@uxqW@B>p8&_)0KxAE~6_p|>?@zHz2RQFAro__bUgR{2Jee_p1E}8Zq
z?@hA){u%iAiu>FDrTDn-#@_aA-<Wwx?knz3e{k3I`g8wK|EPrw0KkX2pCo>M@lW0F
zo>bll?T;@nasTTX*8W$$iSoodVE=f=sq-gqZ4K4l_p5o{;h`Vr6=wV`sl2Y*mss9^
zbH8nTc_mt3HubOeB>nm8#!owctMp0J{@(UG<u}`0sl!csQ=9gi;f*U3_0f`~aA#8Z
z3hg)3ulS8hZ|b|9+HZyfpQfrDPftnn<A3V<?AK~O_jRK;)c4=^rhlQmsr{+M@s_hH
zalGf~aFf3^YJc2#{JM>g|5g4`sqtyz&-Qd8e!p(xwf$_hU$y2O<#MZO{GUk|U(R(W
zMaxO@yHk@sB>OWG!+V?@?@vid^mpr)=<nV=(Vv={=(pPw{XKdl`g`_F^rxjI`g`?K
zez#SrB^O$t0(!Iqc?6Z)D=9qLZ^I=maev)X6ZVGKx~KN(CP&NVw0w%q*6ZfD*aS~Y
z@TVlDx7qsiz&pa-ZSIsbxJ<u5Cs3}c4>Hz&OD<&J)1%y87otKby^)r#&9>y`OdYP=
zUKgSP-O`aiU9*1o={SggAv!eW6q_@JF2kF3Ny=5_;~IDUr|`Me^dddMElK{Xll-_U
zOaH0(yw*$oDL*a<wEWXFq+C`0i)OGc&)ne1`u{?QDObhc5f}d+4J%iLzZ@4%@YN*$
zt|UL+rqX|^zNQZ->uGLAQm)D$h}QQFsQv<C^<UwIxTrOu|DM)<<@UNT^Z&JXF7Q!R
z2mU`30%XD~C}2dyfd&T^;(%0<iY5e*Xn@%e6lv>ZNG2pDnHgs$nAlfnZA;a*scT#M
zusb9`qd<v$SgNe1UD{In*wmtB?JKR;+HTjT^-)@_{eOS=o*|QowA=k}|GWF&44=dI
ze9t-eoO928-`ttYa+=Qa5;M!|AF`I0Dms?qZ_DwEF3hy&_V<lB`SY{#ud;EMZvU6(
zl+WC&)_AMtt25Q(=h^99di}GkyY%{JTX*U8m)g2rdi{&7yY%`mvF_6Aztp-*um82y
zU3&fN?bAapz5Z*hyY%`O+43&E{`ofFrPsgGx=XMBVq4y&*T2xZORxWT7q~O;((5m`
z)4TNg|7i1Fdi@vK>0NsLdf}~VaOw5`+?IFg^`CF^T~glG@6zk%_lunAU3&f6A*eo=
zUjG-ZyY%|!S$FC6Pq+2C^!m@R?$TZUs?Ys+Gxut(d(N80sy&nag=kE{_Do4zeu<bm
zmFTW7=jZsm952i9tFn9!Kdo`D33x<p4^P<naq0Dc-?~edFXc)oFRJ`P`gsxk73KOh
zEmiN*>(_hJ<u2X%XJ2eTJNz6WJ*7-9YWhpyB3zVbO1j+P=jN|x3HhS(FNc@mqWq+7
z&!yMT#kyx1`J(cfch7QMl#jO!xb*t(u+zJA`+L-;Y5QEkbY)C0YI;rn`>cF<Q2#lf
z9sXXM=hCfjN+z57eq-}py7`QS{p|4f+kBT^|7!MI73{y(WbMB)rRfrfJ7a2DdECzq
zf8T^mTy*O{mEy@Cc79><a(qmV=jZs?93PkC<8!<q$G!QJtLXGKjHk%>ZspkKS&NJE
zY8b>t`9`<_7v-Dab+{<!$6Y)(;G+CS_y$~*hhYd8<@In2F3MS;o(L|=-vk?RQQiW#
z;i5baqqrzf!kchW&IasB;i8=L4$sZFD1QsQ1sCOShi}D2`8!|-F3R5p--(Ozo$%eb
zD8C)P7Z>I4hj-wj{DbfVxG291-ieFykH8P%qWoiU7cR>0h9Ad8c^ZBa7v-EMcy{BW
zd>`C{i}HKmr*Khz0N#s>@<Z?-F3LXxdvQ_DA1gec#YOpHct0-6AB11TMfpSU2rkOM
z1Rusl`BC^~T$FzeegzlhkHBNNDE}7x1}@6`;kR*7{$2PTT$Dc!kK>~JU*Pv}QT_w?
z1TM;d41b7=@}I&1T$KL|K8cI+XW-MgDF0XZ3tW^x3x9=+^54K;<D&d|_*-0*{|>%@
zi}F9flej4VBYX)L<^1n?1sCP~$T7d@ROAumyaUyfkBf3I9FL3giLekC<x}7!T$G;y
zi})wXr^7RGQO>&uJTq`nehxev7v;0zEL@b&f#>0(yadk0MY$i&!$tXpa6T@|7s88h
zQ67M$xG28_F2Y6mrEm!@$}fkP;iCKsxC|HNylTa>92ey)U>PpTSHTKgl&^uSaZ!FP
zti(n6I#`8^I$r%9T#t+L8d!~s^6TM7T$FEyn{ZM7M)-PMl%G15xREJ2epW5zZlt`Z
z@^vtbi}D6okBjmqxD^-W&G1dQD38GwT$CqZ92e!SFo}!uHn<%Z<?3JA^X@qb<Kxf2
z&F!&Eum9=EuDkU5-{LWO?H->;)bu;x+i+3-Zum}Il-~|_;-b70z7H4WchQeK>5nMi
z1wTZ-sO44QHg8<I$Lq{Fjr#iu>iH<;MNQx3F=IZ-+9%5IfqQXL-U|=lqWl_Lze}(G
zI_oaIevX6A$EDZbVcn(I&vDNAxb*tpX5FRNe?R^DEd4ud=kEyqMf@S_U&bHCk6J&5
ze+7TU`nU0K;QiK*<KMv_xBdkFJ^TmO2k;-_Kehfe{v`g4<K<g_h5rJ7*7|SpU*peP
zKZ(D9|H1kz_)9pW%kn7S%FhSpdGdT7ezwo?LVP^VjS0tjb4T7J{0!?e@H6pZ$Gux;
z;b-Hst<S~J!%M8s$LHY}S}(;f!UNWq;EV7}tuMnb!>_PjhA+ogSYM4-;A<Q&-&%!N
z;_Iwe<LmJn>%561ZzI0h`i=PO@mlNkco=W6{w91Y-fTUNx8Mou+wmmc=D2t3Tkv-L
zZPwp`za78T`cC{^_-)qTkKc}WTE7ebApT+NAHzR_f5LhizZ>tez7OxldmJy{dH}x%
z|Freb;D_+fSwD<_9)H04L->PupY@~om+-Gze+2&;{!Q!s__y#!tv`-`7e4{Dz5NhA
zf&bXa_ilX>AHaWR^M46{j*BWkm}kcPDlg!94$8H8-Jf-R@q5a>Ncop+d#}Kk^NKv8
z%5$?T?@#3CL%A1@8{_j#7~|$o!6)KVZT<{+CN8S{tTARx@tA<;JeX<!-mLcLQEo2f
z{Z4)5TNhAnKIKJ~zXV>4i}K4Ue<|ff`BkL9j(jmw|0lBQUrqW7@<rvZg_XD{-#|S<
z>J#Og;6_}O-$=R5lo#b1zs{`s>nRtee1k2&4L0H}c6@Haqj=Khx4~9iRDExw-dm`D
zM^^nES@o;EcVzi{nC@1lf3F=c<qv&w?pNmL_}CmDm*eAeydcNDId05&wcY*Gq2r&t
z{Kv!nfgGQuLe6K0|0z3fF5Tl(Q6}4TTs&y=T^3xdteHdkB~ROYmjxxt@;*Mi{h3@0
zyOX&0H_pz=f9x?g&!yLY!n#YhKTp{-9WPbSn5^mLKgr4eX^wv>%hOM~?YYdkem1(T
zoBi=P=RYsE4)6bo&BOgjj^CH#pU?4!bNtyHe>}(kHOJFz3GQcyzxHLfFD|*?!FqYy
zm?Dp;_4@<xeYhyU6L#XF{6p~XaZ$btei#?!ABP{sMfoS;CvZ`|8~y_>%J;x7T$Fzb
z?!`s<y|4!t<p<$@T$K01PvfHev+zD#l;00Ohl}zr!Y|;W{0Mvi7v&Gbf5b)kmth|+
z%D)2t2^ZzZ;8$@`{tftbT$FzseiIkv-+}*(i}K^}QCyUN4?c#A@+aU4T$KM1ejgX*
z1Mo+<D1Q?E1Q+E`!>4dj{tNhXT$KL`{t_4EzlQ&Yi}K&XL0pu-0H4D}`APU9F3Mkm
zzsE)SEAVAplzZ}x=TEpO&xd*WMIKQ;9*)IDc_A#oMfoH+0T<;(a565+&xBKPQ9c7s
z!$tYo@GM-E&w?{?QGOmg7Z>Gop$`}3^WbZ6Q9d7@kBjn);03rSFNF(mQN9RXjEnLm
za4{~*FN3eeMfozg6c^>o;p=cwUIwqkMR^4*$3^*SxDprTmGEj@lvlxPa8bS<uEj-p
zH4NgSd?Va|i}FqII$V^$9^Qb9@*CkBa8VwHAzYN#!!5Wd-wGqRD1Q@d#6@`v+=h$t
zIE><=JPB{YMfrA^!bN#IycrkeZ_PJ(x8(agZ-??*;ST)W)^CS9@%LH(`+PIz1Ni~Z
zhoQ>f4L^#ft?#2;H|2Y5`GfFY{E+p}!Ta$0tv?74<Ns*=%kW|RsP(VIuj1dZ-VeWp
zKWaU*XLrd~HSFV=A)S9JZHNDaJ^s3Mk7skU@_%IWUFII|I!Zd<=;>hKjIj({?|L9?
zS?khgS>_Zz)xdvVWUBl3`rHb8l5YElEpzj=)c;?8=0EE8C%4UB+rK``W0w7vCoDDH
zXSRNBi#UnEb%nX>QIEOnfq&nObUx;`zw|km1@`y23R2nq3Yuc-&g;nQD5LHr#8c1*
zKfsN$33G`PzWy4I`7z<p`5x274YK2e=PvM=lY|xAATuG#68dL(%tof|Ki6Y=CmM5{
zyaShU9kaz_?j-%{ikd3@<-ebsex>?pR+VL%-c`o+l9nh*ri1dALZ=LAC4s=HI^482
zW8xmu!<Jau#BU6SOJ1w#SO?5fO&18%HE;zmfa`urX+<^f7Op5)nP-NlZC2Xgu(V32
z+{xi-PC1jxuCK}|$9QJ7QRS2?8kQDx$`ub!bISRKrPVm)<_}ME$^}U4e<Sme(7e)N
zmXWq?*Gp<N53Fydc_?jrtU1-9dpTNXe!#k8%ieO!E%jS;aTI^d_B)ZPE2%7Z+HM<`
zw%KXBBRj3eEw^)6np=+b&3UM!G}X+XK#WP_yUwLuTqpaihweDZH@nl6k!HM<X>;eR
z>}8L+m9nJ~*N@_N$lYbUn5YT<(c`SMHtiCnHIkOKE@(A2dU9m8>UO8q>O^9m>E}LQ
zsU61t*?Fds6{K`a(&@7a<e6Ijl2)qUt<*S|;h;2;Ow^SG77;bIr0p6?bNZz8(a-1z
zjE=zQ2#k)v=m?CCz~~5!j=<;$jE=zQ2#k)v=m?CCz~~5!j=<;$jE=zQ2#k)v=m?CC
zz~~5!j=<;$jE=zQ2#k)vf6o!ngJUzQ&xlX0Eed#QXPVl^+2;DT^US)PKC^1q9P_%K
zxn|Mf*BJk?5_9f}dFD*My*M#2Ay^pq))w@Q>(3v^8}uwFG7B2xrljpAGizspGCv<#
z=FCFU*N;p;XQDZ$aV>dwkIZxGt*71y^)^uEpGTH)>b-Vkdh2Y{+BnNJx1DS1cFs08
z>^je^?_r(}Gf&5urxVQ6GtAS;)6CPh*=Aeg)uyg(jd}gfN>jb-8dKSGttmTPWiC6m
z)|8%DXXZV#-po1~q+i`5+jsi)Dq}S^&x~E{srAg7Xl6BPOwJlPt<(1c`tGIgh4g&_
zeV<6*C(-xG^nD6_FQV^LnSS2L=_idhlU8^G1@ZB<<NCCOjPW>i&Y{k^)cG3fETPVM
z)OkL2`l)k1bzVT77gFc-BkOefJAu5j|GGS<T_5evq20N(`x@FUq1}13dp_;@X?H&D
zUO?R&N7n63?`8Tzrk}v{6PbPz(@$plDNJ9)^i!Gs45s&woc_;@v$jZ{%rH}@7ntcO
zubEyy-b`OI&P-oCb>L2(>EUq_vxzU(eT|@Jd-S>RJdb%TA>;bu%(%sQ!KpmJS-7OY
z6jqOmYg#?17Ngwz3E#5Q))U`Hc-s0Q;tyoy%OA|j=Zl3Ve+7?@%p7NCE}3FxCT0vw
zi=SDm`qi7?Q1*L1kNE@jiB4at^Mal!<IEK1WeW2$W$}c#zKrPI!n_vbn}S4sd`xX#
z!1g<!2Tdm}@tR4C#|6D=Z>}+!`p5Ge)CUQHI+vb2#w18R(+bSAdP2>3GmSQ;EiQ_C
zrjKRp9x&7Srs_DAamHhuDO@}*?)HbiEE#L2oG~G;adb!1rDt}bnZ4vpGyBe&gT(_g
z`lt0d`=j8iwv%0NcHEAYXKI0&T2o}ECMFF`==b)GkB_UJGdbT(etwD>yJWJd<%^2X
z5{md@)ExSe?XTKi!5CApB7ZP%z?eD31*SNa&)624!Wu926_^3O=s0(o$J9FYvBr>|
zopx%RGyQutYT0D1KO>gktIg-kLNk+fb|%YVCd*+a%VFl?83VSx_#EwLNE^7)W6rpW
z`Tf1-m;U9Xo$6n98^wjDn0YK_UW%E=;>FVj&lq4C|M@nD)~Vd}Puoa!);i~IGl3EF
zdAjtX@usNDJ2-w|Y&^eqOhD`49QF4&%iw6C>Ac!wwqN5hPdW3*_hGW!8CSLit%sR0
z>Ez3!msIi0A76^gjLGmc>c}cHqMw;%I62QuP80^_XnS~y^&&Oa6xNS3V_6<$d?9r^
z;Vn)ZW|+I{Ss~kxm+gmXs~KB6?`J<f$vpj3mHE-}Yt7>|*O)1um8M{jy?9((tuZIV
zEK3%nvu}WN^7BkS4;c1u@R%PFPTKaeedM;7wQtDGOJ?jd%w8X=3QQI2%@o$PDa@O*
zPq}KGxr($xJGPnrpJJxo40G3szvS8NpDv!MJJaWL^36HbXT@jKP7j=RUDZB}am@bk
zNU-yLoCMF|X}^>Sjd(L5PUz(Uz1@%}M^FF8(9*j2C6_O}xhWb7x71&{gx6Npn_zoo
zv?&$(GgFzvd?_>YIl{aIZ}K?L`eo7r1uSTu1AT_)|1zJuNb4YNY1Xs{ygc_u+M2Ai
zZ<01Z+ODj$XGsgtMsZf!<ib4DOS#j19-&|Q{K)d8<;3xB`h3gPmYXf3mbY2%v3$U?
z-}0B1<20{)W?4GIf7RBnv|MBP2FsLXru=~Q6PCZs$={db;}{ct&am`ZF1D<)eBMrX
zoAoZssAZ=8mu<NNmJj8$cbxsRK2KZz!LrDX>jKL%%iC=J7V8PicU#_N*<%^C<!dZg
zT3%{7*RsI!B>NG4PFOx-*=u>!mfLAPW!YePo#hJ4#g-+O(=4AI<F@mJ<(Dl#ZMn<x
z9hQxj>nxX9o^LtX(pdf~&(?2w%<_KAF3a~@zQr<Xd7b5_?fmY_(DwgIo4>$vmgP9h
z-|{@JJ_DAISRS$Lwfv;z9hPmD4VE>QYb=*omRJVua&~?>&S}y8Zz4OK^2FBA#%MfT
zx2>j?SH_xodBOH|tu0$3iBMH6y48Hv6Kq!`RGt)^w3B0k?V**Cy6Q-Sd5AiAvt(#}
zU0rJ;5#jObX+t%vZjOaH-mIk#CrTwNW39Y>*6?Zv>g9LZLsv&qmAw7Zn6t-)sCHc}
z8rhIawAL}*+4&??MxAy(W80}{j)W7&&(y2h>O$46(G)K!c3y4C`e@5+h@_~ZiQX@?
z<u<e@X~QjcVJ3~;EG;K^A1?22etc+Jb-^sMrE1#aw)(Fz*Bhfvb+P(LXmujiVw*5O
zu~S~3;4PjTqls`+QZK7Fm6UhJG_)>qv$^(^(K}~`U!sMzvHI5LNT?#gTW=#gH+_yu
zHDdJ}Qlw~b%=d<RwtmZ-A`Hw1QYzF9Dqf{`YMLF4xtkeU>%4~2Y*0<~oho?qVpE;^
z@CI63od~x$6PnvGUK1Qkq(Uy-&X}D;b!~_=H)x1VxoS&<>mn;yQ9^4X36|j9`7AFj
z`ijPIQ<S9<iEAWzYW()>8dBw{R3bB0O+z_XSFqlx&TV{!AlQC&WtBV2)e)9(lKN*b
zmn&Ku8dwuoH#Ij`MjK-0WsL>PU3E5XX-#F8{N>c3^r1J$a5bj>Y4r|<6Um6PB+8@p
z*GAfJjwR}op$)wIw1)3t*wI}bOYrtcWA4RQHr1s<8a%W1)KYWQY|4ykWpYF7mXwN^
zqqc_XNHP^;U|t-n^>}5(T@HVuXLc#GFjccL^+ThuIuVJOa%!ju$CFwWC=`q}@!sZy
zF*l5y1KaJ-XL=n;)is*yhU%-J)zz)hIxPuS2xgz<aUXri+J?=IS>?4kT^|W=V_T-=
zdzin>I$oaKShJe1j`V11ZK+y?%;wApRI`${8*|f8U35KrS-)o}r@E=Gu|lt&PDED3
zVtgfIPiB;zI=`CfpEj%OVomkI6mwslY00j^<$ArUxm;tlHj-+L)jJ(JVn<?K%zewP
zF%M>HD^H%PYx7WFG(vVex|nHKw5KA;tnHaqL~DjgP|K-xa#b|h%95?olC_(eI$o*1
z*yZM~AJ?eQ+K;l*v0|I4)S~s$-N4-a&%?IILH1B7bEdP`t4ywLiiVq;ZqZK5Je-*W
zXE#G{LMan(XO^RvTwYq=%=kT)U!7{E=@i|nh_SZty`di{ttuAV)*8>O0jr{^1k312
z<y_a4NVSHWH%2+-WG~$}c4M?L9IbEW1>u=Zm(9#s@7_L^ebi~QeR(YC3hnHpsbC^j
z7vYN{pB!5gNwhEvsYoWt+%cvyX(g6e%?^&f26^W&lbP!LziW*o+Jlh<FCcFTN9&ji
zcIB+}b4J!)S<g{vs7-hONTPX8sqU`5hND0^gV-EV%*^wwboat;;i{%BiEx5t)yx-h
zVq<iY-K5cuY|x6mhBeu|i{7q`Y-!!PHIm5m^4e9^>sD1=ve+H-D@{wJCCLuiT%lN9
zoRM9wSW9esgzxc$LKUH8JW|)x&{P+4hnBT?9_h(ceMtL?P!s#GP(AyJM65le_bwZA
zT}AcAbv2c1S1no+aNBvs#A9q|Oc$<W-`o`25^7+J<|~#?O-->7Z*gylMMK*g;#!3o
z`0h`Gb|dC&)509Gk?<=697CLFjR|jI7jJGh4Gqn$$wtGs$mmB?iotF%mkq^35p$IZ
zB_oZY24^)0X+<kGG!|`W+RCyEMR^Zo?B-B7p&xR)#GTR|#!OZuy1gk8i|W1e+&mao
zf@5l%E^VAcqnT?$k+w)3JMhr3j2X12a>j(?O$%F+x;|RSC@j>QeHS(_TDWMT9i^Q7
zgnJ^8T_mRj3!y3YXYv~(;dpj-)_Kxs^#6|$IQ<pt@cAp-nf-lGV|2~0PrT^Wzh^sq
z>LHtE>j{>a4z<l!GlzZV+I(NltLHDa`F(GG_55L#y^6HzOL<wJ#a%yM_|c2_4dplb
z9&I@8&Xcm$H}^_J-wbs=j^>Wn*5=rjFndsT3CR>^3mFg9w@0;$DqWSm{j&$lq=zb+
zW6lwjTN51Ghti!xdzjNEbMU}^6Rx|dwTa^_r~HYCb|Su1V}#e#a|#&Y5aFxSF@o+T
z>?tCbAb+-;{l=o<DB;v3_r=|_D_=P23rBrA(_)YB&NMA$nlL(7aT+5lGktcxrr@X>
zNpRQ>IlEeCHk^tw<um!docvIf{knTVFAZ_p;+)zt<zVJ}+*b@=|4wGoo!X7@o#C#B
zy{{WyZmCVT#Z1t8XHL1soN^1S*XESV)bk%@+V(qW=SP3%(5J_yAG7r3<oD;yuRGPq
z|2uNVsmxBt*HxWQk!9|Bye6ky@oCE0<iCil!yfA>+eSE=O8SzGvDW5#-<F6k?9;K{
zeeaEbUefs;9#xv#uS`RmGUJj-upC%9e0k?&|Lt^TX34ot^saLI5w!gGHau|VU$_4}
zUA;wr<8g?#w#3^@>$sRJZE0=hR56hVxBD8xoL$!YQZZkXbJcwM<+syrPnq>9GarHf
z<n1d{@9Ef|8Snhz%kOmMPnRCg)ME>zbM_ynn=X^ym$RK_(*IUG^~R}sopt+S&Rvtt
z=2yBn5U`uz>9)U}cKhSv*ZE}1#jpDBvHg{mx&0W(=}+cQ6}O*j^RN6b>hJ&Mf~}1D
z{OhV~Lg9L@a6{30XZz!?BKDZ+U*Pj|5mlG)`<jxzbsMXya+XtW^KrX8gE<+#ivObV
zDI2~$rccw4H97tLpQ^Xo@z3->nByU?AGs+RQvP~EQ*=95=k>nKsZm`x8jYoVb(}`|
zxQtBAzrYuZ`WoCjChU^8>K3Ox{fal$ZPWeb_3PJjy*yMeccSY#WpzI5=Pc9ds&n60
z36abVkfg7rHJS3&bHj9d#Mi*xDBlY1MV`7r;!gfomtPn2g=vi{Pqn1ONP<o`=}u?n
zv%0*dylR=R#kncWnM6z6IpT1p+RXKOGm}T_o5IlrKJI+d$rSg3nfv-C-6Pf6vYQ>{
zhKw(q^4UvbDnEU_t<i1K*v(NpWu_mY^6KjH8$#v5%FG6qIcv_8a5<u*Te%U;_-VrJ
zkwlVvkiJ0Myf&x5sYW)|Olqvb=Nx2Jze@at`d0kM>Ki^ET7^#Cud@dQx81CAD%8Mb
zM!j-|(%o`d=LMmm6N6BzZceDgs_N?X)ysU^b=g%nw4Hm;x`AhJ$2jXS%k1W+R3oQ_
zQ3lACSZJ>i$mEuiZWOWHxL#@Y>8~+iUxa%viDkZd$wId;qyPW)BhbkO!P*jIs`!l^
zuF1_WmKbv@@j-a!GOkOAr{Q@u+#e;*b-6jr-LxMN?}M|OX!Ci};eBs5rkFU_=BD61
z{I!925xj@L{qG{KX8`iwXH2isVe*62MZ5z(`7wS6in#f>F@O97e=&Q3_}#{Q@hHDp
zM7$3^`3-)<hxi~|{$2V>ybNB(??j~)hc^~`%mDFP_)oLw*NZru$8SyPcZ7Q3w-qN{
zzdz-h&F>x&7cb>`u$9D@!Xt#8#KnK}@oWt7LAY>^$4vVj&ve0h!cyYmrE@)|g!oc;
z4`BiE18@_+hgC*gd^169rQw4%egwW?<0s*Q61R+4ZsRLqvyF>8Z2VUE6d^_Z;<S10
zwBmdl7jGh{pDFk!8#g?cwu?|jx_E@3GUCr{{3NWH&p4325_S-F5f_ixcpq%$dAdEM
z55U+Wk2$I`@T-gI5Al8&UBdV&9nQYgV_qg6fSU*fJo}aX9GRXk`{h!`lXUS<ge!;_
z@vPVrgev0M&xq;yv3^1`>Eis?@q19jxwbGH32EZo7cegpdWdsvZ7K-8#JLVPDZ*jm
zTsxcl2z|skKR1^w_n2eE%itFX$CVC$M|g_3xzb}+69$O~;c>!A;@QuP>DjT@5{h}Y
zN(>Vg5a+zv^b^X6bIxiOl=0jN@mknZ?lCpQdtrJ7^QAaEtAgLXBF;IQ=_K4iobx2}
zN5Wmixdt@*344fhU2Lvc#X3lwYf|%Z!sm!{ooXhnX895K!HtA(67Pez^4!;P;@QuC
z>AA2*p5;18y7+EF5zk0vKi{Qi!QM|;K)U!4p^Uir3ZaI0(OUXKs3+bBOL?{{MLhdC
zFFo(|&h_+zbn(4}J;XcVyMpwecp83zaEy2#{L%)-k$Cp=T6&J_Hw5#N%a@2xBVJVF
zF`wVadPSVyB{X{o0i|!E?}ReqLHI7h8seSs^MoMrK3H@E>nm{|e2h>}d;qp@_Lw%}
z9q=Gwr^>^U*HZ`a0Nh5{MZ5z(L`W0whwpp?%bR#7e1UM7IKQ1}-by&8bod<Mgwk(h
zpFntuco2SsFi5;CM7@NUi3e&uW-lTCWtKG@8}^uq#Dnnrgks`)25$NmcVAHqlY{{2
zo$yY=VYba)sAqKhh>Lm#x8p^Zdj3{%QP0jQF6uegiIf-D5SodPe*Pzofcu+y+@t9j
z-fN!uBY|t_5$WE1V{}cX-@D^5KH_u5Rc$TJnRDFb{zat$KWElFFv4GTm;3o^`og7t
zpY9IUhnxAc*K&V*B<a8E%1MP+gp<ig%a-PL9~DHC%l)m1=(1#8W27aVT-ef7mxv`}
z4XK4(5iAQQTS~Vt^85IsX;TA#NxaTI&vsi=RTpRKLPqrPe|bXO{XNf~?%wXh-F@B1
zx=(Zu?lF7Qdwcix?d{(?uy=5;*;lmBw=b}-_F(*A$HC5n`pe5EhOKXR|L%d^gS(Zt
zbl;kN!F{#+;``e6b?mF{iT8B$boQisdVBhM`g;a?277`BY7fK@bR6hBkUr3RpzlEc
zfq?^q2h72ugT8}-gJlP)u#@8=NT++#MP0tGKv!8;u&cH!-qq37*_H0<?dt35=<e)J
z)7QT4{_cV9!EUpsXwUF=%soZ-`0g3}v^i9C$ag4ksO(ViQ0<}kp^ihHhth|75A_}D
zKQwS?@Q@nfyat$bQQDUdq|4I5bZt7G?nrm0)rY=x;NG%(&HkeOzWssyW&4BsYxl?Z
zckJ)npQ)W`dpRxuP3hY`f4Avp|L6E7QW~kPEqx+Am_C{A*u86adUwz6-ra|Ho34Vc
zqORgD3i2?r=6OD?t88a_7cC!7r@MN(4tE{v>hBup8tgjRRnT4B?dy(rw{`Di1a@`z
zIP+hyr)N*!o;7=edpq_X+uO*<5AIv~sWqQ!`_%lNvYum%{4<RF$$N_K-F0u<{+;`2
IDChHk0I1nu=Kufz

literal 0
HcmV?d00001

diff --git a/DischargeFunctions/top_discharge_extrapolation.py b/DischargeFunctions/top_discharge_extrapolation.py
new file mode 100644
index 0000000..e6b19ad
--- /dev/null
+++ b/DischargeFunctions/top_discharge_extrapolation.py
@@ -0,0 +1,299 @@
+"""top_discharge_extrapolation
+Computes the extrapolated discharge in the top unmeasured portion of an ADCP transect. Methods are consistent with
+equations used by TRDI and SonTek.
+
+Example
+-------
+
+from DischargeFunctions.top_discharge_extrapolation import
+
+    trans_select = getattr(data_in.depths, data_in.depths.selected)
+    num_top_method = {'Power': 0, 'Constant': 1, '3-Point': 2, None: -1}
+    self.top_ens =  extrapolate_top(x_prod, data_in.w_vel.valid_data[0, :, :],
+                                    num_top_method[data_in.extrap.top_method],
+                                    data_in.extrap.exponent, data_in.in_transect_idx, trans_select.depth_cell_size_m,
+                                    trans_select.depth_cell_depth_m, trans_select.depth_processed_m, delta_t,
+                                    num_top_method[top_method], exponent)
+"""
+
+import numpy as np
+from numba.pycc import CC
+from numba import njit
+
+cc = CC('top_discharge_extrapolation')
+
+
+# Top Discharge Extrapolation with Numba
+# ======================================
+@cc.export('extrapolate_top', 'f8[:](f8[:, :], b1[:, :], i8, f8, i4[:], f8[:, :], f8[:, :], f8[:], f8[:], '
+                              'optional(i8), optional(f8))')
+def extrapolate_top(xprod,
+                    w_valid_data,
+                    transect_top_method,
+                    transect_exponent,
+                    in_transect_idx,
+                    depth_cell_size_m,
+                    depth_cell_depth_m,
+                    depth_processed_m,
+                    delta_t,
+                    top_method=-1,
+                    exponent=0.1667):
+    """Computes the extrapolated top discharge.
+
+    Parameters
+    ----------
+    xprod: np.array(float)
+        Cross product computed from the cross product method
+    w_valid_data: np.array(bool)
+        Valid water data
+    transect_top_method: int
+        Stored top method (power = 0, constant = 1, 3-point = 2)
+    transect_exponent: float
+        Exponent for power fit
+    in_transect_idx: np.array(int)
+        Indices of ensembles in transect to be used for discharge
+    depth_cell_size_m: np.array(float)
+        Size of each depth cell in m
+    depth_cell_depth_m: np.array(float)
+        Depth of each depth cell in m
+    depth_processed_m: np.array(float)
+        Depth for each ensemble in m
+    delta_t: np.array(float)
+        Duration of each ensemble computed from QComp
+    top_method: int
+        Specifies method to use for top extrapolation
+    exponent: float
+        Exponent to use for power extrapolation
+
+    Returns
+    -------
+    q_top: np.array(float)
+        Top extrapolated discharge for each ensemble
+    """
+
+    if top_method == -1:
+        top_method = transect_top_method
+        exponent = transect_exponent
+
+    # Compute top variables
+    idx_top, idx_top3, top_rng = top_variables(xprod, w_valid_data, depth_cell_size_m, depth_cell_depth_m)
+    idx_top = idx_top[in_transect_idx]
+    idx_top3 = idx_top3[:, in_transect_idx]
+    top_rng = top_rng[in_transect_idx]
+
+    # Get data from transect object
+    cell_size = depth_cell_size_m[:, in_transect_idx]
+    cell_depth = depth_cell_depth_m[:, in_transect_idx]
+    depth_ens = depth_processed_m[in_transect_idx]
+
+    # Compute z
+    z = np.subtract(depth_ens, cell_depth)
+
+    # Use only valid data
+    valid_data = np.logical_not(np.isnan(xprod[:, in_transect_idx]))
+    for row in range(valid_data.shape[0]):
+        for col in range(valid_data.shape[1]):
+            if valid_data[row, col] == False:
+                z[row, col] = np.nan
+                cell_size[row, col] = np.nan
+                cell_depth[row, col] = np.nan
+
+    # Compute top discharge
+    q_top = discharge_top(top_method, exponent, idx_top, idx_top3, top_rng,
+                          xprod[:, in_transect_idx], cell_size, cell_depth,
+                          depth_ens, delta_t, z)
+
+    return q_top
+
+
+@njit
+@cc.export('discharge_top', 'f8[:](i8, f8, i4[:], i4[:, :], f8[:], f8[:, :], f8[:, :], f8[:, :], f8[:], '
+                            'f8[:], f8[:, :])')
+def discharge_top(top_method, exponent, idx_top, idx_top_3, top_rng, component, cell_size, cell_depth,
+                  depth_ens, delta_t, z):
+    """Computes the top extrapolated value of the provided component.
+
+    Parameters
+    ----------
+    top_method: int
+        Top extrapolation method (Power = 0, Constant = 1, 3-Point = 2)
+    exponent: float
+        Exponent for the power extrapolation method
+    idx_top: np.array(int)
+        Index to the topmost valid depth cell in each ensemble
+    idx_top_3: np.array(int)
+        Index to the top 3 valid depth cells in each ensemble
+    top_rng: np.array(float)
+        Range from the water surface to the top of the topmost cell
+    component: np.array(float)
+        The variable to be extrapolated (xprod, u-velocity, v-velocity)
+    cell_size: np.array(float)
+        Array of cell sizes (n cells x n ensembles)
+    cell_depth: np.array(float)
+        Depth of each cell (n cells x n ensembles)
+    depth_ens: np.array(float)
+        Bottom depth for each ensemble
+    delta_t: np.array(float)
+        Duration of each ensemble compute by QComp
+    z: np.array(float)
+        Relative depth from the bottom of each depth cell computed in discharge top method
+
+    Returns
+    -------
+    top_value: np.array(float)
+        total for the specified component integrated over the top range
+    """
+
+    # Initialize return
+    top_value = np.array([0.0])
+
+    # Top power extrapolation
+    if top_method == 0:
+        coef = np.repeat(np.nan, int(component.shape[1]))
+
+        # Compute the coefficient for each ensemble
+        # Loops are used for Numba compile purposes
+
+        # Loop through ensembles
+        for col in range(component.shape[1]):
+            # Initialize variables
+            numerator = 0.0
+            numerator_valid = False
+            denominator_valid = False
+            denominator = 0.0
+
+            # Loop through depth cells in an ensemble
+            for row in range(component.shape[0]):
+
+                # Compute the numerator
+                numerator_temp = component[row, col] * cell_size[row, col]
+                if np.logical_not(np.isnan(numerator_temp)):
+                    numerator_valid = True
+                    numerator = numerator + numerator_temp
+
+                # Compute the denominator
+                denominator_temp = ((z[row, col] + 0.5 * cell_size[row, col]) ** (exponent + 1)) \
+                                   - ((z[row, col] - 0.5 * cell_size[row, col]) ** (exponent + 1))
+                if np.logical_not(np.isnan(denominator_temp)) and denominator_temp != 0:
+                    denominator_valid = True
+                    denominator = denominator + denominator_temp
+
+            # If both numerator and denominator are valid compute the coefficient
+            if numerator_valid and denominator_valid:
+                coef[col] = (numerator * (1 + exponent)) / denominator
+
+        # Compute the top discharge for each ensemble
+        top_value = delta_t * (coef / (exponent + 1)) * \
+            (depth_ens**(exponent + 1) - (depth_ens-top_rng)**(exponent + 1))
+
+    # Top constant extrapolation
+    elif top_method == 1:
+        n_ensembles = len(delta_t)
+        top_value = np.repeat(np.nan, n_ensembles)
+        for j in range(n_ensembles):
+            if idx_top[j] >= 0:
+                top_value[j] = delta_t[j] * component[idx_top[j], j] * top_rng[j]
+
+    # Top 3-point extrapolation
+    elif top_method == 2:
+        # Determine number of bins available in each profile
+        valid_data = np.logical_not(np.isnan(component))
+        n_bins = np.sum(valid_data, axis=0)
+        # Determine number of ensembles
+        n_ensembles = len(delta_t)
+        # Preallocate qtop vector
+        top_value = np.repeat(np.nan, n_ensembles)
+
+        # Loop through ensembles
+        for j in range(n_ensembles):
+
+            # Set default to constant
+            if (n_bins[j] < 6) and (n_bins[j] > 0) and (idx_top[j] >= 0):
+                top_value[j] = delta_t[j] * component[idx_top[j], j] * top_rng[j]
+
+            # If 6 or more bins use 3-pt at top
+            if n_bins[j] > 5:
+                sumd = 0.0
+                sumd2 = 0.0
+                sumq = 0.0
+                sumqd = 0.0
+
+                # Use loop to sum data from top 3 cells
+                for k in range(3):
+                    if np.isnan(cell_depth[idx_top_3[k, j], j]) == False:
+                        sumd = sumd + cell_depth[idx_top_3[k, j], j]
+                        sumd2 = sumd2 + cell_depth[idx_top_3[k, j], j]**2
+                        sumq = sumq + component[idx_top_3[k, j], j]
+                        sumqd = sumqd + (component[idx_top_3[k, j], j] * cell_depth[idx_top_3[k, j], j])
+                delta = 3 * sumd2 - sumd**2
+                a = (3 * sumqd - sumq * sumd) / delta
+                b = (sumq * sumd2 - sumqd * sumd) / delta
+
+                # Compute discharge for 3-pt fit
+                qo = (a * top_rng[j]**2) / 2 + b * top_rng[j]
+                top_value[j] = delta_t[j] * qo
+
+    return top_value
+
+
+@njit
+@cc.export('top_variables', '(f8[:, :], b1[:, :], f8[:, :], f8[:, :])')
+def top_variables(xprod, w_valid_data, depth_cell_size_m, depth_cell_depth_m):
+    """Computes the index to the top and top three valid cells in each ensemble and
+    the range from the water surface to the top of the topmost cell.
+
+    Parameters
+    ----------
+    xprod: np.array(float)
+        Cross product computed from the cross product method
+    w_valid_data: np.array(bool)
+        Valid water data
+    depth_cell_size_m: np.array(float)
+        Size of each depth cell in m
+    depth_cell_depth_m: np.array(float)
+        Depth of each depth cell in m
+
+    Returns
+    -------
+    idx_top: np.array(int)
+        Index to the topmost valid depth cell in each ensemble
+    idx_top_3: np.array(int)
+        Index to the top 3 valid depth cell in each ensemble
+    top_rng: np.array(float)
+        Range from the water surface to the top of the topmost cell
+    """
+
+    # Get data from transect object
+    valid_data1 = np.copy(w_valid_data)
+    valid_data2 = np.logical_not(np.isnan(xprod))
+    valid_data = np.logical_and(valid_data1, valid_data2)
+
+    # Preallocate variables
+    # NOTE: Numba does not support np.tile
+    n_ensembles = int(valid_data.shape[1])
+    idx_top = np.repeat(-1, int(valid_data.shape[1]))
+    idx_top_3 = np.ones((3, int(valid_data.shape[1])), dtype=np.int32)
+    idx_top_3[:] = int(-1)
+    top_rng = np.repeat(np.nan, n_ensembles)
+
+    # Loop through ensembles
+    for n in range(n_ensembles):
+        # Identify topmost 1 and 3 valid cells
+        idx_temp = np.where(np.logical_not(np.isnan(xprod[:, n])))[0]
+        if len(idx_temp) > 0:
+            idx_top[n] = idx_temp[0]
+            if len(idx_temp) > 2:
+                for k in range(3):
+                    idx_top_3[k, n] = idx_temp[k]
+            # Compute top range
+            top_rng[n] = depth_cell_depth_m[idx_top[n], n] - 0.5 * depth_cell_size_m[idx_top[n], n]
+        else:
+            top_rng[n] = 0
+            idx_top[n] = 0
+
+    return idx_top, idx_top_3, top_rng
+
+
+if __name__ == '__main__':
+    # Used to compile code
+    cc.compile()
diff --git a/MiscLibs/__init__.py b/MiscLibs/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/MiscLibs/abba_2d_interpolation.py b/MiscLibs/abba_2d_interpolation.py
new file mode 100644
index 0000000..296806d
--- /dev/null
+++ b/MiscLibs/abba_2d_interpolation.py
@@ -0,0 +1,413 @@
+"""abba_2d_interpolation
+
+This module performs 2-D interpolation on data that is assumed to be arranged in row-column format rather
+than in a random pattern. The rows represent vertical location or y-coordinate of each cell
+in the data array. The columns represent a horizontal location or x-coordinate of the data.
+The cell size and thus the y-coordinate of a cell can change from cell to cell or ensemble to ensemble.
+The interpolation algorithm searches for the all valid cells above, below, before, and after
+that touch the cell to be interpolated. Bathymetry is honored by checking to see if the depth of the streambed
+of the cell before or after is greater than the bottom of the target cell. When searching before or after, if the
+streambed is encountered before a valid cell then no valid cell is used in that direction.
+
+The methods provide the flexibility to determine neighbors based on either a raw vertical location
+or a normalized location. To use a normalized location set normalize to True.
+
+For efficiency the data_list can contain multiple types of data that lie on the same x-y locations.
+This allows multiple interpolations without having to recompute neighbors and distances.
+
+Example
+-------
+
+For interpolating water velocities collected by an ADCP
+
+interpolated_data = abba_idw_interpolation(data_list = [u_processed_mps, v_processed_mps]
+                                           valid_data = valid_data
+                                           cells_above_sl = cells_above_sl
+                                           y_centers = depth_cell_depth_m
+                                           y_cell_size = depth_cell_size_m
+                                           y_depth = depth_processed_m
+                                           x_shiptrack = distance_along_shiptrack,
+                                           normalize = True)
+
+interpolated_u_values = interpolated_data[0]
+interpolated_v_values = interpolated_data[1]
+
+"""
+import numpy as np
+
+
+def find_neighbors(valid_data, cells_above_sl, y_cell_centers, y_cell_size, y_depth, search_loc, normalize=False):
+    """ Finds the nearest valid cells above, below, before, and after each invalid cell. The before and after
+    Cells must have data in the same y range as the invalid cell.
+
+    Parameters
+    ----------
+    valid_data: np.array(logical)
+        Logical array indicating whether each cell is valid (true) or invalid (false)
+    cells_above_sl: np.array(logical)
+        Logical array indicating which cells are above the side lobe cutoff (true)
+    y_cell_centers: np.array(float)
+        Y coordinate corresponding to the center of the data cells
+    y_cell_size: np.array(float)
+        Size of each cell in the y-direction
+    y_depth: np.array(float)
+        1-D array containing values that will be used to normalize the data and specifying the lower boundary for
+        identifying neighbors
+    search_loc: list
+        Identifies location to search (above, below, before, after)
+    normalize: bool
+        Boolean indicating if normalized data should be used
+
+    Returns
+    -------
+    neighbors: list
+        List of dictionaries providing the indices of the above, below, before, and after valid cells.
+    """
+
+    # Compute cell extents
+    y_top = y_cell_centers - 0.5 * y_cell_size
+    y_bottom = y_cell_centers + 0.5 * y_cell_size
+    y_bottom_actual = y_cell_centers + 0.5 * y_cell_size
+
+    if normalize:
+        y_top = np.round(y_top / y_depth, 3)
+        y_bottom = np.round(y_bottom / y_depth, 3)
+
+    # ID cells above side lobe with invalid data
+    valid_data_float = valid_data.astype(float)
+    valid_data_float[np.logical_not(cells_above_sl)] = np.nan
+    invalid_cell_index = np.where(valid_data_float == 0)
+
+    # Initialize output list
+    neighbors = []
+
+    # Process each index
+    for cell, ens in zip(invalid_cell_index[0], invalid_cell_index[1]):
+        points = []
+        target = (cell, ens)
+
+        if 'above' in search_loc:
+            # Identify indices of cells above and below target
+            above = find_above(target, valid_data)
+            if above is not None:
+                points.append(above)
+
+        if 'below' in search_loc:
+            below = find_below(target, valid_data)
+            if below is not None:
+                points.append(below)
+
+        # Find all cells in ensembles before or after the target ensemble that overlap the target cell
+        # This is a change implemented on 2/27/2020 - dsm
+        y_match = np.logical_and(y_top[target] <= y_bottom, y_bottom[target] >= y_top)
+        y_match = np.logical_and(y_match, valid_data)
+
+        if 'before' in search_loc:
+            # Identify indices of cells before and after target
+            before = find_before(target, y_match, y_depth, y_bottom_actual)
+            if before:
+                points = points + before
+
+        if 'after' in search_loc:
+            after = find_after(target, y_match, y_depth, y_bottom_actual)
+            if after:
+                points = points + after
+
+        neighbors.append({'target': target, 'neighbors': points})
+
+    return neighbors
+
+
+def find_above(target, valid_data):
+    """ Finds the nearest valid cell above the target.
+
+    Parameters
+    ----------
+    target: tuple
+        Indices of target cell
+    valid_data: np.array(logical)
+
+    Returns
+    -------
+    above_idx: tuple
+        Indices of valid cell immediately above target
+    """
+
+    # Initialize cell counter
+    above_idx = target[0] - 1
+
+    # Find nearest valid cell above target
+    while above_idx >= 0 and not valid_data[above_idx, target[1]]:
+        above_idx = above_idx - 1
+    if above_idx >= 0:
+        above_idx = (above_idx, target[1])
+    else:
+        above_idx = None
+
+    return above_idx
+
+
+def find_below(target, valid_data):
+    """ Finds the nearest valid cell below the target.
+
+    Parameters
+    ----------
+    target: tuple
+        Indices of target cell
+    valid_data: np.array(logical)
+
+    Returns
+    -------
+    below_idx: tuple
+        Indices of valid cell immediately below target
+    """
+
+    # Initialize cell counter
+    below_idx = target[0] + 1
+
+    # Determine cell row index limit
+    n_cells = len(valid_data[:, target[1]])-1
+
+    # Find nearest valid cell below target
+    while below_idx <= n_cells and not valid_data[below_idx, target[1]]:
+        below_idx = below_idx + 1
+    if below_idx <= n_cells:
+        below_idx = (below_idx, target[1])
+    else:
+        below_idx = None
+
+    return below_idx
+
+
+def find_before(target, y_match, y_depth, y_bottom):
+    """ Finds the nearest ensemble before the target that has valid cells within the vertical range of the target
+
+    Parameters
+    ----------
+    target: tuple
+        Indices of target cell
+    y_match: np.array(logical)
+        2-D array of all cells that are within the vertical range of the target cell
+    y_depth: np.array(float)
+        1-D array containing values that will be used to normalize the data and specifying the lower boundary for
+        identifying neighbors
+    y_bottom: np.array(float)
+        Bottom depth of each cell
+
+    Returns
+    -------
+    before_idx: list
+        List of tuples of indices of all cells in the nearest ensemble before that target that are within
+        the vertical range of the target cell
+    """
+
+    # Initialize ensemble counter
+    before_ens = target[1] - 1
+
+    # Loop until an ensemble is found that has valid data within the vertical range of the target while honoring
+    # the bathymetry. If the streambed is encountered while searching for a previously valid ensemble then
+    # it is determined that there is no available valid data before the target that can be used.
+    found = False
+
+    while (before_ens >= 0) and not found:
+        if y_bottom[target] < y_depth[before_ens] and np.any(y_match[:, before_ens]):
+            found = True
+        elif y_bottom[target] > y_depth[before_ens]:
+            before_ens = -999
+            found = True
+        else:
+            before_ens = before_ens - 1
+
+    # Find and store the indices all cells from the identified ensemble
+    # that are within the vertical range of the target
+    if before_ens >= 0:
+        rows = np.where(y_match[:, before_ens])[0]
+        before_idx = []
+        for row in rows:
+            before_idx.append((row, before_ens))
+    else:
+        before_idx = []
+
+    return before_idx
+
+
+def find_after(target, y_match, y_depth, y_bottom):
+    """ Finds the nearest ensemble after the target that has valid cells within the vertical range of the target
+
+    Parameters
+    ----------
+    target: tuple
+        Indices of target cell
+    y_match: np.array(logical)
+        2-D array of all cells that are within the vertical range of the target cell
+    y_depth: np.array(float)
+        1-D array containing values that will be used to normalize the data and specifying the lower boundary for
+        identifying neighbors
+    y_bottom: np.array(float)
+        Bottom depth of each cell
+    Returns
+    -------
+    after_idx: list
+        List of tuples of indices of all cells in the nearest ensemble after that target that are within
+        the vertical range of the target cell
+    """
+
+    # Initialize ensemble counter
+    after_ens = target[1] + 1
+
+    # Loop until an ensemble is found that has valid data within the vertical range of the target while honoring
+    # the bathymetry. If the streambed is encountered while searching for a next valid ensemble then
+    # it is determined that there is no available valid data after the target that can be used.
+    found = False
+
+    while (after_ens <= y_match.shape[1] - 1) and not found:
+        if (y_bottom[target] < y_depth[after_ens]) and np.any(y_match[:, after_ens]):
+            found = True
+        elif y_bottom[target] > y_depth[after_ens]:
+            after_ens = -999
+            found = True
+        else:
+            after_ens = after_ens + 1
+
+    # Find and store the indices all cells from the identified ensemble
+    # that are within the vertical range of the target
+    if (after_ens <= y_match.shape[1]-1) and (after_ens > 0):
+        rows = np.where(y_match[:, after_ens])[0]
+        after_idx = []
+        for row in rows:
+            after_idx.append((row, after_ens))
+    else:
+        after_idx = []
+
+    return after_idx
+
+
+def compute_distances(target, neighbors, x, y):
+    """ Computes distances between the target and neighbors.
+
+    Parameters
+    ----------
+    target: tuple
+        Indices of target cell
+    neighbors: list
+        List of indices of target's neighboring cells
+    x: np.array(float)
+        1-D array of distances between ensembles
+    y: np.array(float)
+        2-D array of vertical distances of cells for each ensemble
+
+    Returns
+    -------
+    distances: list
+        List of distances from target to each neighbor
+    """
+
+    # Intialize target location
+    target_y = y[target]
+    target_x = x[target[1]]
+
+    # Compute distance from target cell to each neighbor
+    distances = []
+    for neighbor in neighbors:
+        distances.append(np.sqrt((y[neighbor] - target_y) ** 2 + (x[neighbor[1]] - target_x) ** 2))
+
+    return distances
+
+
+def idw_interpolation(data, neighbor_indices, distances):
+    """ Interpolate value using neighbors and inverse distance weighting.
+
+    Parameters
+    ----------
+    data: np.array(float)
+        2-D array containing data to interpolate
+    neighbor_indices: list
+        List of tuples defining the indices of the target's neighbors
+    distances: list
+        List of distances from target to each neighbor
+
+    Returns
+    -------
+    interpolated_value: float
+        Value of target cell interpolated from neighbors
+    """
+
+    # Compute weighted sum or neighbor values
+    sum_of_weights = 0
+    weighted_sum = 0
+    for n, index in enumerate(neighbor_indices):
+        sum_of_weights = sum_of_weights + (1/distances[n])
+        weighted_sum = weighted_sum + data[index] * (1/distances[n])
+
+    # Compute interpolated value
+    if sum_of_weights > 0:
+        interpolated_value = weighted_sum / sum_of_weights
+    else:
+        interpolated_value = np.nan
+
+    return interpolated_value
+
+
+def abba_idw_interpolation(data_list, valid_data, cells_above_sl, y_centers, y_cell_size, y_depth,
+                           x_shiptrack, normalize, search_loc=('above', 'below', 'before', 'after')):
+    """ Interpolates values for invalid cells using the neighboring cells above, below, before, and after and
+    and inverse distance averaging.
+
+    Parameters
+    ----------
+    data_list: list
+        List of np.array(float) data to used for interpolation
+    valid_data: np.array(logical)
+        Logical array of valid data
+    cells_above_sl: np.array(logical)
+        Logical array of all valid cells above the side lobe cutoff
+    y_centers: np.array(float)
+        Y coordinate corresponding to the center of the data cells
+    y_cell_size: np.array(float)
+        Size of each cell in the y-direction
+    y_depth: np.array(float)
+        1-D array containing values that will be used to normalize the data and specifying the lower boundary for
+        identifying neighbors
+    x_shiptrack: np.array(float)
+        X coordinate of cumulative shiptrack
+    normalize: bool
+        Boolean value specifying whether data should be normalized or not.
+    search_loc: list or tuple
+        Identifies location to search (above, below, before, after)
+
+    Returns
+    -------
+    interp_data: list
+        Indices and interpolation values for invalid cells corresponding to data list.
+    """
+
+    # Initialize output list
+    interpolated_data = [[] for _ in range(len(data_list))]
+
+    valid_cells = np.logical_and(cells_above_sl, valid_data)
+    if not np.all(valid_cells):
+        # Find neighbors associated with each target
+        interpolation_points = find_neighbors(valid_data=valid_data,
+                                              cells_above_sl=cells_above_sl,
+                                              y_cell_centers=y_centers,
+                                              y_cell_size=y_cell_size,
+                                              y_depth=y_depth,
+                                              search_loc=search_loc,
+                                              normalize=normalize)
+
+        # Process each target
+        for point in interpolation_points:
+            # Compute distance from target to neighbors
+            distances = compute_distances(target=point['target'],
+                                          neighbors=point['neighbors'],
+                                          x=x_shiptrack,
+                                          y=y_centers)
+
+            # Interpolate target for each data set in data_list
+            for n, data in enumerate(data_list):
+                interpolated_value = idw_interpolation(data=data,
+                                                       neighbor_indices=point['neighbors'],
+                                                       distances=distances)
+                interpolated_data[n].append([point['target'], interpolated_value])
+
+    return interpolated_data
diff --git a/MiscLibs/bayes_cov_compiled.cp39-win_amd64.pyd b/MiscLibs/bayes_cov_compiled.cp39-win_amd64.pyd
new file mode 100644
index 0000000000000000000000000000000000000000..0f8fe8e893cbccb7a46623aeecc6897ec6038cf8
GIT binary patch
literal 94208
zcmeFa4SZD9wKhHj8HhkU6Uj*Ys-uoIsHKVcK>%^0iJZ|vV~qk8F&3n#QBs*{(IN(C
zVm%H+qs10mtkhylTWo3Nwn$ND2qXav1gsL0D)IXa0SqV*K$!P=);^P&fcDyZ-}}D5
z|L^}sIeYEzwbx#I?X}ll=ghco*V+2oY_=@?OD1i$l?eIgmFK^H<lAkwlh0mtvTeml
z&F8GN`<u_Hn11(6cl84^zWczP_qnIsdH?-00`BkJ<$fS|zx(d{-K96)?7nZt)Vqe|
z<P7jwO^<)|hTVgAxL-~EJN>sWy?O@XGhaC8wOoY#SEz923+KOfmZW=LQ~Bq;=0yCD
zzduL9!8^`*^*4yKom24d2QL)7cB+K#*HnCmm42?ozjgPN=`8zYBP=ho*`_|$&z3y4
z>N}}2UAA-G1N)rpwrxeq^D5Q*3__>CH6VwA``B##Bq<%*V#p<t@z=*ziHNF{3Qmk$
zDU<EC0O|w~E@tJ$c3U^YI=gLYsuRYS+ijE3YWZ-x?M%jx`>R2E0n(h8*lk`x)GE-0
z(8BAuDiR$QxN8>s*SFw7lsWd}%DZj0%3%*oy)$s9&35L)$Y+~#@b4}B%lPwxt%j*$
zw(23ssJ6&Zk%^G-UTj#kDkx*bSZ&!TKlOxi-eC{Sd|(Q)geIU3h-<S|eNnl)s%D@d
zX+&B{x%*EjcQtbVfAzPLB<Rw$xLr@i>ZjG|jj<&DtdypD-DoM=+U3{&Y}Xd7@fmib
zrGDBl?d-4+Njux7M@sTDQtI^D^#Mnhp0wB3YiE~&ZxHW?_(=hKed5L@n=P|8^P`T_
zK15*j5lre!#+Te$a!X0Yt>bUGS)aSh1bWj-wW0GoEr0-oJuxO}Lsxq^ivU)5IPK$%
zX5HAP8=LgVB2PJ(V45~QhTO1+%p*We3aCy2l_{V+1$a|HVG3}k04D&wG3+rd{_q=N
zPX}PX5jXpRWO{A87Cr`9svE04t;o=g6`pKV+E=PM=Xq*?6QQtY79f9QwWku{E6BI$
z+Krt+H(3;$R=2N2n=rZ&bD<Zn(`xsiU2Xh<eY$qxr~9<Y$6n?NT;;~Y91L3OMx!37
z&fllyjzQD8*JfkPEF1PrN84y|pLSDsMx7mvHk;2_>z6vte$%vqoO$n&Iu&vBJ28$f
z8Uf_?+ow6N%|0Rd+HBo8WL|hInN-+nXM1g#_y+eKhvN+NCpdbE;|SvDMYGn#kf1U#
zJtXcSde1bJr^-~Ofa(-blLG1h_(5xKU4CPUXCcDKJkR4GficfhhZs9Xf(3;=HGp&!
zsYVgq@Ox%4LXt63?Kft7rZZ)Nl~So2lROjrMwO?W$sMeXg1T;$dJ4%Bm<o|kgkykF
zPgrGnM*7l>^u>Vrr#F^*yxA5oLe(nu6r@tzk`kTgabkpebb+v*&*&~IT5m2GjwB$%
ztWfu{Kz40RtG$BdFlGuvJred5N<8eLJk}#CJR=y7Ec56RF7ix3SPD)gseDr!b#Sx|
zN#vpu?ZU==+C<0f4v<gRE{^F&T#p~RN<U|>ZjYC|f);Il&6m)N_xiPQd;DmflQvHK
z)BY5VOxx$lq~Cbg>^_3k0nX`?aJy+DEUQhv%r<mQ-=z1QC;f5)k>HD?QC%C?;2&!0
zwK1#M4RLM$GUj8@9M5%8|6cOafugNxkplUZT(eJ`P^8;|*_zpVtpyPSV|KA7DPvA2
z9w3h_PA#{vDAqZcnWi+qz*@Wmv;rksv;rk-O0@zdeE=wVq|aST0d?E$cT6ssRC0UC
z<dQq;A<*<)K-1{oc?#fV`xVNqY}>Glkn4OQjq!2vj`&fwbx63v(<0Tj=+Ur;%tVeG
z!3asE*bkAI6)9j&lNE96QP-3Xo2_JJ7E(%H=F^<{h|LB@86VSzmU^lw9cz#*QW7EA
zcQHc#ku@t*z;psycq7KDhpu#g51|%zf@w%KcKDTAj8=H6&DFmU3~E1?w!nI*ovqcj
zG8zeB@!>Ix(y;~^{>TzpzkcxSLd40tizS>VgEoH$ErS9EB6tZvt)Pp1_8dTSF|)WR
zo|mTQ3CYY^=?XKYLN_bi&k8eA6*xQ&j089Duu{X<GgWnU6VnWAjfc!jS@W_~O<8{_
z!ck^G>dp3);~Ai|sPcFzL`L|Hsh&bg88@k{mV4CisnkKC={}=toUzyZtUH;cbf4sz
z4q+TXCKCUI#8C2B;N;q5@M~H@gV_vitFu^-T=ro*QQB*+L4s~{b)G+GthpMnKXQ#T
za&xvG8E9_B>H$Fl5aMz3=@)Ibq8K@x97YcBAYJFKB?I>2=TP!!;Pg;38EDZXIn&rp
z)YX3nMm>dn{~d`XNHlL_>_x=RQ$;>|8;M>fE<|EKl{n)aBwobCP*J;iY_=ezh1X+8
zA@VH;lgY$1Y<cPBQFrTjXaSsiZuB(>p^#vg_eU0}wSlmeso<CngrJq$p#dm4fr;dp
zNa<M!1*j$f3C{WP%lwpber9{Tx>3RTso?zhJq3QOH!LLX5aeFWjSs||06g``Y)`e!
z%q+BA#Re&cCmRhQ@dQ?>GQIx+UOzDB4)YR>)E^n^jEu)jXr}%Lpplx1ft;3T&IhE|
znWTu(zzG^6;9m2MA2OS*y|;oB)xbHcL2sBVlI4!c^`T>&p<~IwN>F<3c9wU&$>!py
z+xcrGK8Hl}WyYSKrVU&F(3yDV{Jza*n|TU`;8+R>De;+E4R(8dgWZ;)(A}zsc9m;G
z10)T^5XFz8iR)Q(Q6loJ<YlZ4n|UfasEsw0dakeqW<pMMQ4J5=sz>~u5oYqCo}nEE
zC@XcZxgVf;fLXP%;8jwUgS|p4*kpbSj8L?`W;UABo{pP?(7IMYJVI;BC)Z<CFJs~{
z)C)P(x6lT}_&aq#w*c*2AVj-BD`+wYpdGYw0a}TnZb8Q2puZj`f0tq))HjvfA+*R-
zW2J!s5XU;jv(!@#)ddWLX8=k7IL1w#pg@nzlWmtGs{rGt2y;_(AfAuVZ2wF)#Z_*!
zYTocFbM*XiwI9qGLB+@y`rIBIfXOJcX+N=ddURuh-}pFVJy=<N{Px}0>h$F1;BKW=
zPDkT}=6Ncm+ZD3AQ<9cj-RlU^k+z_|Xlu~{?QD+AcA(xKyhe}CvHOcZ)}F1$wtKi{
zbP>4lSl|Ql;b@?n8k6}=Mpm*K&J0NAcny1^nH`|46WP9dH)m@#*|Il;WyC}(f8Vv5
zGp|4mdg3dhh3b)67<W>e|4Xzstwbw0LLKxl81k~SF|$xU4*Eh#dvFv|%BW{fsu%rK
zn~2?fqN6X$nC{0|aB_ormH@f^dr%F3noM@y&c4szzK!S|@f*i5-(S!edR=S2Rcp2M
zh5)&J{r3HOs6JayZVGPfX>}M{CC=e1F=V9#r{_k_J5WAEefJl4&;FFk&yjtQXBb`F
zd=M>zqY9cBU1Q$ED4-S&y9M>OqQUi;-z~v@S_Cw*?JHwOsW#DdGKPSvD(iV>fgEw_
z?}M15jc)=;@Ne=4q{YpXIEE5!@*2~@PS7lxNA`q6$c9?Crm-*Rkh2`;b13;u)3jR}
zr)jq~=@INk)3m|a+!?^tS^*dw2!q-26vi)*MNps?1rpUdfh^*}>9ub__i?vD;`9e_
zkU|OEf+b>UK+lnQiGT7ak#U;DdGNUPI16;9K18184j{ZtL6lN%Eq!Q&uprfO<WLc%
zloEZ2T%QtI^+2eXJ~ZX~J^GM3%xO@Gx-b-mHr9knL=~HVOj~S))D0MqA!HfuqWmLF
zm4<(W7b+0y(gR!)p)23`E?U#Vi)4Y!6HQ!{r~kZr9soMfocZ8WN`;aGOK7!hZ&0Bo
zN|S0Er1ar1sPxM)a462^aFEX7kfBK>tLw!m%F%r3sL(abn8%(}T_0r|=$f(sUGHJk
zSf&J>u}B0TPr-G_=6RfT*)`keNg*cTYEd02AX_AVo}5q2H(_qF6s;D3mQJ@3p!qtp
zsMCouW=UFeNxH&wQbBaOpR>X*QWZ`oh8c4sirFkt%y8bD>X}Y;tkQ1;JQJv=K^d5Y
zod#zFw+xM3hDd=GfxHiZyRh+>XA^Y*ZUJys<Lq;)IP)YwX&^dX!mK-_bULNXVg;^H
zrJ?$~iIcu=Y&V+`Qp3xFhKB=4gV{L~YiQIQkGThcKT_t5+yWh9oXza}YqSp~;w(zu
z21Iy<$yo<89rnbJC>+<IhSKr4YN+E?692htxu|-tRrGu_F~Dv?d^?Ausc);MegOf~
z`4Zw+tN2A|T9k@QRQwEfH6C429X=V2bVkxkdRiUV^R44)%HiaTwFaTzi1~~rDskja
zzr~$o4RR+pE$)@%QXx6Y6-s=Ob%b55);P*@d4rN+kqiKEYG=uerAL^9;-BbyoOYak
zIET}z^i|67o?Cdi9P7ynP}Bod1Se4ute_%D>4oW)X~p!aX^oq!MHf_LtBuK=vIzQ|
zCq3G1!P70RZ7~u+ebV7J92j|6gsz7+S<)N~r4Y*Ed8Xq5QlLj!JmXiWm73>w5RrL)
zdoxm_uKO`LX504&=bYOED-H5ZCWEz}lx(Y)XMb<AC8}suvP^i#$=ihxB2aHui@6HG
z3bBz;4_YSnDnd(?eGSkej~C^ch6z$N=Xq$gU=jo;AvQ*X2!IuyF3gSJ*r*%U`Y<+A
zEX?W)Ofi=Q2xaaW2L-wI_(wQq!2mDLNG6-4imbF?8s=h1q+#y9POhuH=3Npd%-gN-
z7KGTyu{ZtVBe3P;oj4xrQ<izE(RJu(Qk&O?1fQ|nmt5yJOrQ3rM*ZHVY^`q1IAa%V
z93P|NqpVo-7nBQc)oOpw1|a&H&@n~fN(oncmI5h`ZLy4Tk!PWV;7Eo*l>DyLqN`V^
z6s#xI(DQGIW_<_{rRI4SBP`l#p8YWG9;0`w<U^|70IOaHQduu$04mxFQ<1q#&Jo*H
z(H$?bxklRmV6B|v(?dt>!BZ*Ohz^(+votJLqW!#I=QsRD7X*@yrN_K!DSknrJ}jjD
z+@{<2(N39MAKXb(=W>ih%x@CnppH-}q|@0rG%*MujvCaMfKW7Jgx5v0j;aA!HV&+M
za)cRAVD4a4U9hGoR<t{%udcCcwa;;=(Q$UYc)RxO8tAM|B{idmVQ9^P9TY=9LQSQJ
z&N?MU-uM5)30<d0%JWx>tOoIkGPwZk$a$FKrNT*$T#ODVjbt_<op?nSyn%#r0#byk
zaFjO_p6FO8VQ!z!`zb()duKLxU_xKkTIY4J_G$%=K~mS5Q-P53j_qg4PxO@w9S!>7
z#4V6pc~JRde*3$=;>O?ysopr`1ty4<v1UN;43j6;Ge$&-2U90xB@u&yRLzKPMW!?2
zwIY>_jIbgT7%8+OFo9wtv?4n9PRPpHYJVj1Y`2hQvaJY?2$PiT<a*}T$hAx^hyf?d
z-0{LBA?pfR^F8*=dr%O}c0haLJ_TL2e<K!M=nlllP>wx_Le>mx&_bdTfB=Qa;~$B!
zRDqO~m!gB0^AR^M{39}<M1%uQ0JO1q^YunAc>-9C1uV4)Hh7)Ak}{g-z{@#sonuW6
z+?zRYoRnqzpsE1MyD9349Z5jUEKBalJE>CzPQs|Q+Bqi^r@iL7k9hb6Z~8PJWF)W}
z1wHGp8kg8IxeFoRf-}3(kT(BOH0z7rm@L`~9lix3p!7|fUc5yM-_8DTPLvj$v)_f>
z8nBh)w01H}A5Wadev!zjN!StbM^}iT^#hq%OOP4ODZ+;Dk2>x_u&Nl{nl(R1I_+R_
zGjK0KF~rRv7|j`vV!E-$oF+Mk%mET^H~XmUQOGv01I!NkvjdP(3Cnqw0X925E5Mcm
zF?1)=@;OzyG2KmsDP@q34SE*1Zh@$nx=}s?<zz~k)*<D~b)=O8WcdkzE2SOelZL1R
zWk9dyn(SUrw#)U-(i>|YM;^=hjf7eFq1sFe7@}{k0`m++ieib@L2ToYIS5g#)8XdL
zKVgD(LNMZx-sda?3xek?dH*f)AqqUs5#TJ$o+qoG`};wRl_*&TKAoRH(?v1Nx8FFV
zYd19M(L4wXR-UJn`sko$7zQCL*T1Srv%oW*jcPtv?tM+<6zv8`^A(<1l8h!$wT5tX
ztdlr234Bd9;cCF8MvP!9K)=zg$2+nZ?XaSuj$@445WOK<b<|eID=I8U*Z3y2F3<Xn
z1)To^fU+W35}+ZW__R7$Gq9Tq`C2gs^Y!-?Ejt)_4G{>u*++#Jov46V5p;<aYT;xq
zY0O&;bwaDwo`gUP55^OvA8kSu^MU1V<NZ{+@^P!re12TU%}Kw3QX4W4ost!0p;~?D
zhP1j8Ss92;?81Tim_HggYBL@s5~4i|&be3N^zEDjMP2PN!0S?b8k3A+^q`;D8~s}Z
z{ZdiJtid0T|9k!n{I4DG74gSDB6us5!Ksg6nJGgJx};P+aofNsCRoD!fAjW1^&2;6
zrC;e5ET2;Y*~xj}0^!2pRoH{K%h=VKp=a;-9w;L8$EYBgz;vJM(Mw0@_C0!E9Fp0V
z{S#lb!efU5@;-JMQ3JT>$lG9BoA!i;QNu(t?64NfB8Z))Q41Y{Hp|6iO4-#tr`H(b
zr4f6{+-}}0YN3}LvXCNGbF)gKUOVp2cgA-PEZS*oi+|jI?#DH^Ui@s)HseVAeQoah
zHe16dMMqF|hE)9tsv2$aokQZCj-oe=HIa%Zvyw61zQ@ghMJ+}={%-%d?@9}Cqbc5@
zoi7bEAtRBG>*%5;Oyq@~`?L#VG$M5(Ho+vc>*Z`<skFf@zvDPyNu8CSGU`o?=|=yI
zB7c-3P@23HPuMEeb<aL+k{ztmYme<=ZvniZuK?vFG#IQBA^)fzK+wt&+nW1XM2SpL
z%`ya(WCk!Hm7?T!FGfjarLRjR<+x?yGg`?bK`fS@P4=|@6l)*#EOX3v7kZ{bO1!y_
z1#O&)uiQbApD-^&NZV<c2Y!ynWM}y688*fDYxUeXhC+>EGmv?y(Jw_wg`KKNDu{K8
z8H~6wv=MBN!!IO3lu^BCJvp9a@EI+p_N<s+!vDkoIHlj1j1FNk?(?ZO1Bx6<P1CAl
zH%!yDIWfo_n>OzNS`u~riCutv$=CHzWA<_S<rPZB!if?B^o}l;KtYVBL92Zm`$_`W
z7?7o5JW01DmSrg{JBZ)jMXs2IzHwkF(zQRO_TWxEdQUd?o|jn-8_*W~OX&7C-Tt;-
zY-&%%{Kadur|RAHHAn1P`0wC*+2YWg(!Z%8rly6<Qk%~q6s=EVPx<bPoa!^6JnPB0
zFVv9jE8ZO3mH0af0)N4bO_<-uIAC!EQErDxWd?Gws@G|u0YIB<P=hdIB!2r|G6Q?{
z8}FsZqz1&+IiNT2ud~Ozl`$~Actd(%w9@q+*qjS6h!j1)2>)=qR{MR(8xppa=>P4X
z+$7c33~AMoT8DaZQ%VQ?ey$#hXH%3RQF}B`2A1_Hc<WPaL&|BvoPl=yeJwlOs3o$$
zYqfVnc}Rq_RDT`pZ>;oZbv}|2*!r^3WFM+ii%}%C*QLm$)t(NK<u^J&EbAB-)j9jG
zpfHKG4*!~Udy8)0q8D%0o{H<El-fZ`Z4?$*gbsvKCKVmy@SJnRzvx2UD9}S|vh`35
z*7)_o-MZ0D3cV>qR2ndZ(QJ;>I=P)N#K(BE_$!c9WSjZ3UvUJ3Kpkj2epG}8+ZSnx
ztuRQN+vh7%#DIAtI^ix8OI&goS+Aaug`As_^F{l?bksF3maY~kMXh$LHvb}|qCdX@
zKZ%C{73-mO7+V};+YsD^@u&G-nUzU$6G&+DuL3KD+j~Z7ai|tHKgZ}OUpUdy>t7c$
zlF_T9r&sQrEokmbde`RDn49iJ3*UGYt;2AxbXW5>G%Z+aq+aAk5KqsZbG<q_r!Hle
z3C6HZ0-@#X%nqN9W-T;`XcV_-p}qhh+ah^QJ*yWh_mjkVaupmmgE-<RzJ`c&^~C}q
zJrYHrIb}ei&&;f%^_??&b-ZEb2!C;G=5S<uJEOf+{W&>O@0OzV2_F5(3<q#0m3SCr
zC63BSIZbnB@#ZGC6v8GH?}J4i?DQ8O#<i#xz79oC9L%MXnn3~OVluOs*jx&1Zu@G@
z*Zu>9NMvIkzFPV4-^Ib@c-p=osn!y)m?u$}k(dZcWS*Q(Iqro*4snF(!i3R;Oj922
zeUxzzfU@@iNLc_2lu=ObhRRi8WjaKPvL*spE%rrTVWUKEQ#9CfDY?XwePptp!}*sT
z10j=G#VEIqffmUPJ>wXd^4rogj)CRrW1uksT1mMa{|-UGAAkHKIxXnKym%XD+B*B4
zZABhOPGa9)l|%J3@hbT^>X^I+@Q=F$et>XD;GB>C0Z(g`vn8HpL5x{Q9nBd>YrBTy
z^Hcym=|;9HJQIa8w?CW@j<escgp7kqf=N~Y?&lP+$Wx<$1!Chw8C(>r6lD*hotD-g
zIcy=6>wbs|U`Yi<=fzs974b6CVMW9r$+RMJ|3+S69no;Qt%w^&m}}~J|JD!VE_s<x
z^Zjp&LPLKfW$tBIh#CyNtO-@C93lUh)?tO*)fpG5hceELGL=^SYAdWk$ad;7%8-p&
zpC)RCG+Hw(v_h{Hl7pFEX@%8RSc8yla(m_<%XC<IT~=rlYFfr2lyxJXDqCljS!{*N
ztuTg=?Y1D4vf;<DG5aG|E`WUtvJ2uEgd+ozuGaOP+Zt%06?ze75{@b)%a~WG>(1Kq
z*ao)r5lDntDGyOvJ6VPsdI9T031)lLFQmOl%5BiPgary+$p3dQFINX;#%otXT_6hG
z7$s2n9iuo?ap}{SQ+6l=nhH^XS$(C1tHnM=6|{-vfN}?_b3^0?NtDSKMA^pjSXS9m
z5BIiGK7ILVB5vM00fp3M$uxkLPs*JD&F?TvZW@Ug#@fLOBhwWwlM3Cm)fKWrQK|y<
zAXaE)g>$Ub@H<qmR9AzM1}9wBJTM+*4&otQbyzA`3u;<qtt1H6s<y%!gqf_&nPnOC
zu7Dz|!8SnFSx5v~A0qSM9I_Bm-mI`}N|7Xz(jEl;<4ia9kdk!(t=?@GQ#!5CZH0wa
z=(WOfE3CA_YAdWk$h(jjCj~b4l@g|ev${+##bnkYosMy0Tv#lq;4nz*DP;0&-pB^%
zd24M95G-Cs6l>tvBm+bfPy&@dApqqg#Y;S$WflA2WW`9#>cLG)@&Lx;mX32*&#}{T
z$%9uaM*teP5<}37Y)3XDd-8VW)emr^i{tSktHcti!HZpb!0^T+A0bR*SX2sF<{7~@
z!MeI6VBaM9uqQ4OAp-!@tx_;QEDNjRUt8!jwAcKjgon)EAPgNn78q8$J6Q5E3fau8
zTUk_a;cV-X>v6}kK5!EZkDtJr1QST-x8W$bTCRZhn$JjDSK?`u)#f*|g?UKI@E3eO
zj9NG|i*|fhtytCQynw6VtJp}?wH)3{<|NcA!d{oN6)~8HI}s@R!kuV<YqbOTaYjrD
zvE&iL%(O&q2mrt_3X+jCjXeS@4cRapm#jo+or2AN_cN`L{xpBsj1ALBsN1bGI0|c|
zmt&+38C7HFRiUFtVG;zkyvz$<P3veRXDP0LfcD))`-KW@z89+WYL)&j&MIaU=`x(Y
zYB*>14Ckp3alnPXLLb_Z0v36eDgbPz0PqQbo&mAK@dLW-KBmE5zSnHqBm-InWQC{3
z0t4MJsV(u&g~}55WnqkR1*EoaFi*8gNp0^1sSUqCsQ^&1s1h8fV&%;LGlshKW{}_l
z(LZwTLjp+iN2osL6HM1s`Z$$7Z8g##VERF5p&;dDD*Xnge;etYm&3|yUJOXCRxX1g
zDfiSls@&IE?h@pZi4)CWW-I_MBF48Po_G%J@;rZ>K0`U}H#U!>=zK%`K9`BNI=3mI
zD<l@@fx>MuLRdYprWF7|13(+UmDkfER1tFVGfwHjkm81+h&_ZgR=nB@E3L2`p=``R
znt!EO^PFz_FvKK;f+!)B0)$kbg_9vQ$oLdIT?~!Xm4Na{rd(W(_IPPcS>%UhezAl~
z{Vv7kr{v>o$VUjtYRg@v3VsV{vqsv4rHqkTRz!qlwG|NoINgePS*+5Ej9_Gf6)A+&
zwBC`>a?vhuV0O8cUC_hMLJ7!3B?DXOto%1v$9S!5%_*yaTojlSBvhLe-8`mC9QW)9
z1!D=1+lWxEPSP>A46%jiR-DttxYvp=wc;bJ_;M>=h!E2Kb&P_4aH-+mw$jQiuu8|Q
zxZ8?1AtWcYNNCwd!bi{mHLp(*QWxQ7z-jA{P@U;GtzC%ocCK4OrGxlUE}N}kTAdOq
zTRR0Eku%RUr1AL|BGVjDj<a>-T4zb*)@)zoWb<se>-UJ@_ZW%hw8NfeetIKOkn6TR
z0v<Ad&oI^MUmxc{R9Bn^=^7j<sDQjS8K6Ry(?Haz510~KLqj>)_l)15HH_fZ8s-}J
zW1C(rx}t8(sU|GsH0DevOu{3*lCVsQF$b&(5vJ0?nXA=g8gq2rnC#V!TSw@|4TZXK
zEe>z@yZy%Ge7|w4({J36?Kk?tmmMO^MCD#0j85t2z$sk|?^JTCc^OCVI<^wT*>D5o
z6fHTBQ!gOhe2oclB*30Te`Ytxm^IXHTeTme(R>@ZByth;v#9GfWMCWn04@2WIah+(
zGzd6wpH#HIHl}N(n{=Z|53O}it}lwIFnuIb{!SV1ukjKbXDukESVv+jrcj`J3~q51
z^vDtsAWT~%5{nQ7lT{9HFj?i)Vi$*+Y9{fd#-&W&Qi~Dqzrj~^wZ}xSR52DJ<f?p}
zaV&>fpd<eXw^}g3um#dWD&}i9S`}fJQNB6@Zn9tst%Ycho;H1(xQ*>Kk2EqoWPWOe
zoe0O(9>8I^WEDj1<8}|)o`M?~;KUi6*L5BejAMEdBEAdPG!2%gjc)8mY!51$m;|k+
z3FYW%6O;0*<%))?A0`U*7pbEGwgo43UM%)(gh~-H>sZ1%EQ*JW5(WNf)@~sO2)G;o
z6o^+QvM5}UFhV_g=6;oJxz8-N+R0`nZ%vFtR6|<LAE6d>6V=;8QmWoB?#&-)^E)w6
zaqAw;OTjYCo4DBYPi$2g&Iiu8K3e*r4K5v8i1zyHqgfaDVIhAkL7ZY>KmFhq_<Yb5
z9eS_77Iq^&Wj|({rR!3SX7v}3oDD%!4;|{SJvswjq`A|2U$0krRX^K8s<gjdB{+K$
z`ltll{|MTp{rjo}cTYl=N`U9ERl=bX3VRazsRVeqTP6C_nCmNEtJTsMEkSjOw&PGo
zmmbw|Q@u%#@5v%Z)UMaU|2&6tvJYcF2E#cno7ZE7%y2vaSGH+&u*f-K{%0_|*~ws1
zvzvjxxsbusX78sZ+KkfXa>lBfD;Wfus}Za@)R)gKeWz*nZo@zLUr*VJl=c=Tub0g2
z%-oKD?Z|9rW-Bw}lKCz(JMixVWPZTR4rcC^%mg#{;NNa!?q;Tm$DBNk&q-PqgQ;3J
zgDPzh0_TF}8cZyqa5H#FpheA#EofddS(Ld;nqwBUy1B)ImNj=+(2C|R1zAYS@LGV!
zzcD;T7#ZHj|K9K_G$%(iNy}p}Rm*2krJV_jsT)Roc(_~-IqW^qfGeN%7RE5Ba##U1
zeSS54Q`PiUG<%shTTS02OkWxJ9n+_M0=ll*4<fHwM@l!~UksGSzZsW}UkXZT?MPem
zAxku~1a7qM#l6cE{A=HY#G}l4lR4?h*V^9#Z%K=5-sUvk+lqWO_gecq988u>pkGa3
zg_^)g+8}>%hgN$N48FLRgA-~i79JsJM1UxZYY<)+&9WqlI{eC(i*)m?HR^g%xw=tg
zbF#T!576K32}J1c<}ZFX?eF$YEDG^=+iw00)v4czoWHjjFJQd?wUqGdjEK3xADNEx
z;O;r!&_hS<)Nv@`f;T~5#%sV8j@sW)cb@2IW(Xs9nJqC2@{=c9ZYHS;iD4pKmsmTD
z>aiy?mo-91A<*<_)*c^TU=Cf5UVPEg-LU$1X^*kbltAMfn#N8g(1w!}aDSS7K{WWB
zvB1Le_XC1?m`BS24wzM}!(AV|4mZI6O=R3pfFCQbu__Nh<+RNEqWVayXn@GeY&2UE
zA@(v8ostOAmzkI^iSFKsZb^g)%q&?Ti4cRCiO{prJ49h-;t09i;v3(7%EK7<Ujs5c
zB@Z|1@lMFYdXa}a&tz_!B@b~fbTlr(kO8>v2My9=C;#BuA`jf|3~a*KoRc;IZ|Cey
zz{xpvlNZm@P33r2ZK}l6zlmb8qKgm8z>01@DDWy`e01=^JCF*BvWb+H6Zi4a#>Zhk
z-s58pA3OQjfJd%c*ttqd=gyXeovWlYE>p@`1X7xTU*vKwrL^W;wy8$g!ntHqodQ;G
zTC9K-o47nsX2GTw1%x-n6tHMhmjdQ(>L6ett8ka`$na!5GDaB>$Ea`@v}I5-9vP#I
zN5&}QS(cL1xl2-VI(KzSPUo&j$?4p1N>1l4NXhBkc_}%ayGZ18S}xuUxrmpsv@o<J
z4(&|NT7}G7mCV{y%o-$$ny`RO9w&^`337q|pJzQx7blNX#0leUmU8+ynewVnfy4J}
z7MzGe6H#~~3QaUoXd((t?83h${3E3>N2DJn&dH!k1T&_pkg)(}tc;go<&4dieU<aa
ztNq&3<>E_;4!+fJ@D<3X4E+<5F?H4i1lfoo^BB^ErOb;1oG@@owjwxUz_MaRaK?b;
z&x%wsveb&;lmVe?MdScX?N?o0Ag9iXczK4#i6fwFfwQdS0@kK2g%xl<P#z6p`h(2k
zQHpvl#UUBahcVTkCbCNT)b)5BU4YW5H(#Lbz;pFnqyV~X)UP1ZLcEvfzTmhNQ4pP;
zt2~VLR=xi#_=?WXV>wz&;Y&ORp0Ds`eh->g4Phd#(=Zrw;%d2AtlbS~!Dc;jjq){p
zF`qiw5@oKe-R(CHYV)5!+t`QTX4>PwVELP7ZR_oB%0`^C--Gi0hd-*Wl$x$y|4M8<
zRVDHel32w^bdiV05OT{&4>6we)QdnM>ez@txtr!zGhYt9juL`<Xz#?)<EU3E{0B<X
z<McB|;j^<)!b4`A%6<mfW~-!aH=lkGWkBW_`vf642ghACu9T#-$En|_cEigQhLGw)
zR#|LgmEKp3axyT|SBxm1zG6h3I#p*n(y?+29P@F_ef(7;jhqduxS?C~^KvQz_B59v
z0-u8?UF>|j`K;uDz0oiXIECgvr!ylgMhMUbN1ET?*URm+J){aOmywHKm*eAKsx-0#
z&;kFzgbBE;7T4b^WTcpUbO}cYC=np8FVhb-nq~Q)#{Qly161ZTuu~h{mstbFTX{xX
zj7|Z^h~ZMer8r6h)UEx_@-r=GG+OpKvqlijxY-ALzOgg(USAqsCa@Lb6JO}#&y6)W
z5v7bV#s(|mW+h_-cF1n#<ue-U0IV(-afA>x&Gp-7k&#;4QLnhd7P50LK#nBp&1R*A
zY#>^ObFI2*r2DK_(n=)lalroQ)GPAgHGM^gRLr`9g_i>M!C2kSgK8y8C2&uZu|wl~
z6(DsBOGUCa`Ha#{*(rik35+3wdR>=<_zH!D%InvBekERVz)aXbOR0W7J7NcPJFd}K
zaMU6ptQKw=%%!Yehu4jFI1%g;t~&Cq4jb??OVJu}AqSgRAObaI^g*86g35r=fef)5
zpeFxVxEo>UWy=Bg6hu&IuH}FWV=r{LjJh=KvkmJy#XR@i41x6ZJgIDX$ASD()apVv
zq|ueAO(j+QSM_M#L1V6RyH&y&><_uFTtR@xL)n6SHjF8jyC;nvXn}t{NS}pB8fj{>
zmFOig8OacFrIztV;e8zb-#JI{Pt{RhV3`b3qrI;_j?@^`TE`e36o1^5wdMcxPKnpt
zzOp|qapY_^A>@uji9%n?-+>dj$0;js&z|#jB&WDt>{Pg+0GYvsZ^ICC5!W2~+h>pI
zeHSUavmIYkDY0`QQ5vOJB^+_ev|-qjtR;d1)6u#1#2>MEnYB|3{n`R?2w4b_Os^AB
zlr2Y?V&6#7++(z+X`w_v_}q5UTQ_>`S8>*zdrbN2Ze+dxwy$ox`07FnJL{fjgnScP
zLBok>O+_rjA@{5~97=;8nb6B;w{J#~!w<6vE&}mN0bVZ$pr`|<U9cuzk<VBM8LuPM
zOEq|1kXU>rCNfYQyx``gXJe~64~Xz$z$EcNL>q}#;5gGt&PEja%nWq{O5xQd1^1eV
zUxH-8cDxTE41uZjqJ_sh(Grm14FvysRw|keVk^>y!Z3|S(Ck~P*%azX1m~siF2vD<
zYg@Fpf9Xm~Jn2fi_Iq++4o5;o?Xw1>M{%R=W|zI(Xnmn$eYAN2Ngo!YfpszZ(w?Yh
zieH=4;Y)7f0Z!~|nzK+Tezzi5`P0JGm4r6m<4-#pIVt-cXJ;wm6#FJep!{hUBgr2f
zRK#l};eG8-Z?a=K;lmA8Ki;nvqEdDT`v%?Kobs>TPy$EG5_i0$<`51Rqj@~XTO{bI
zjy<8d3B+Xn3ebd315U*H&OC{Gd@?w$GYETZZ+BbBLp|bz$KLBE-lNMV&;>wJy7^|E
z&&Y#^aXbsuaMKlc3*R28O$IK+D<s&u53x_~P3bc#yyjr!&l7_ny*ocar)6PS{muqb
ziD7Vb2I0_&qIqYQiedXjw)xzjGu(RffE^fqSUuLh4DU+`tK0Wx&~+RAlzV7fo5U3;
zhMD2stLU2cTkhrF`wb{%Ur1EGqt-)IE%XK&6~bz@By-|2X2a^8+H(J~T<yO$q??V5
zU_!izz~we&W=YK(t_fs-*8J7>$w6D-Ba2aRi=l%-cJm;~fvmwla7C+b->w&{ea7;?
zrLL3qzg<Q)fTj<Wx=>S_|7+ry;tMUrdoVa4;wr56O5`NCAdX`Z>NEfI{6X*EJO?@l
zrb4fu{fWf0^fa8&gb>lJRu<sKK$qypo;pCH#<P9Vtaq3Ynu`SG5B#Gb8?&x7BZJ<8
z;PV;&x1jsQ%}!`+iSM#5I}j42x%a;?-a}juqf?FP(X1xFU7Sp1PNRc1!1FoX0qBVX
z$51g30e70ccBkr+4E;H0K&%Cb;r))>s)jF6AXAE?B1~e#et0r}cBi6WH!3P8=b{6N
zqwoYb+fK}hCC<R73rda6er`QD#(<{gw?LQ_lY&{8#^~b+`{S+76625-dJYl3lZY!R
z^pof#*jn{Z>0SSHE~Hd_suSgG5VA@~sD+A<ozc<)L}l|bH_k*coJlZ!oE<$fK(?*x
zFnK!O!cFn=JIo6m?aOki<$2Qb_s<eum`K@(ixzx|ZZS*3gFjS;w4(Jxn~97QNce{0
z-tWWy0c?%x1x`IOx=A-iZ@{|;da^m#<qLJ=12nx?tM=$Eaub=?l#+drc6g(+q6r9A
zY{qO=G~wJ)vDrx|pAg!wXmS&B6T<vgY%U;#OVWzXg@mB!R&0jR5FAn59Q;*cFTFD8
zRKCd^^#C`xy@`*+QNywcb5$$pVa?2sU&SaBn}x*l<baeWt{%B~h;CdAgsO%B$tow1
ztQrC&tDKSsB&(c2vT6vBta1X$sv$tK$_XT^h5*SbCy=Zf0wk-PKyr;2h;|2;l5F|x
z5wq+@XbYUDM+db)lix<FF-eb%6pwg%`;*(7$?YeML7U$OcEoIEJ%HJ3)j}~&S$g(-
zT-~_%rL;0W8QP3bP52JB`0U;PmFb(ouHdKsKRJB^WjN}>{7iPl0coN4KoK~lzb32Z
z1S*`cMt<C&$Y74B#@y~WOuYX;IJaLT;?~@DX(1;^f85-DE`<4Sugn$y2d1|Ci>H=~
z7pC?ynOeBcOO$UIzzW~wgmrXtAua;6-0)^MLuYA|n>mHdDYtUGR*siB<;<zJaw@Hy
zO6F9<uD-brrrOOlIG8rokhPrin(7!LXE}2gGiR}t!;OeJEzF5ACuZevmtjs9b2?yp
z4mWim3BbIjE&-6~gas*UGuE#gOTSyI{W_50l4nUQaHRkSF4E)giZEs}8py(ZjMJaZ
z%6?6n(4+}$c=*`CCwL=B3x9x(D{&!h%V=%{NS3iyMk<01`F{lcHwe0luSIcdr)d6v
zJor_3u!cPNsP&#q3z&|iTS?9~<UN^6Fd$tvX;ToF%^C$LHy)%Y&mI8E*$05~{{f&}
zf&i=#*FgZvZwMYx+0b`-c|cL=i%Iui$#|_Bv#?KFpHsr;67cy1Y}C~m@5fj#iy7q^
z@5l81Oadha-jSIO#j2Xd>n)FS<@Wwe0{a+&5t~^{$W=IP0CDr+uT^9@5Ka{H<19x1
zm4Iyte)_X$z4!yvcI^OZS9GYNO)R=ta(OFOyAz()iRY{jFSN2MzIe%3YzF=m@Gm|V
z_-w*I7C2`nV19fq=Q;qSmYO8%O`4lfS?qhq`I~m5W!M{NGPEG&)5tXC$WCa99KZm;
zqHPs}$=ibFd1@5&2%OmAPBq(8i6=bR#fcT)lHy0Epdi653uB}?WEMiIIn|8g$g%Qy
zj0Ub$%Mn8GDYv5Ys#wVSWMUXp<r{ZFd>e*7Tf_Ld$o`4@R5C8$@pa^*&|)`hq2)m4
zy6B**^x}l}*sl?RqK6OY(WB^)-#DU&-iJp~J~3uXb!ZFU4$*#+HAFryxiPp$-Ljv8
zrmeoLd?}f&{iJk=T|g&6ejBLt&3uCJtN`dHh*LYj0)h?!3kkyU0I4Gg;`qM?{y^J;
zixa;C_xO#&u-(HK=sJ9FwaFJZvwXPnr!Uac(8)m}J@oD|{owYZ12Fbk?m$x>!@@fo
zI65o#^ErDzb`j^K{ci3n`wQ`$y59=}PwD=0z*YMz@eJ&*hQ(mbp+0<W0qz;DAU%FS
zlDVCkU*HEMneQ^Q1OJZm1Cq=HGxy+Mh93~zfbi1woJm)JFmt9}fvs;&)fIz)(1QIn
zG84}5eqkrgxoH1ll`?NXXA)(W?2jp6_5KzGEZg6qfED|@2+%<(J){=&eVp&lzZ#Mv
zh79Qoe1HDMkSeYK!7-*h8B@ND34T8fPy^z9wAv|HaU}<0utV|afpL_|GzDau{4z~b
zWtuAXdyzX^rfCwUiFe+ZC(Gdn)LgTUB!yE*+Tmwfm`y5&AC_oli4z=tnDZucPIUNT
z*4rH1aSlK5_u)+WWu_`*rY2nhXP?9P7?0)b1G@{>Ee+l&AH-#13a5+uQsnThtp&J<
z<0n_)&l9%?uh*lK;mdPGdp3@5SGCoQz5;Uf9reA{*MF(r^jM3K@{=;c{2L>bU-(ns
zU4`(rCql41Lp3%RBAfl+C0N`7zuCQJ7->Fn1?oU4B|<vB%cCK_;xGXpUfr$DpNhqV
z1yrz$UxwJp7A||?QL!UT-0y>%P|(jSlq?UPlh)!fc^z(FVssOF48_b}7osdu8_eej
zX}QqnEQI@^cM<Rq8IbRZlVGhui&+nxetk4+mtG7T-9Isw>!MkBg|<U`Y%?N7xL5o{
zKe(;rz&d;2Qt5fui9HWu&qI59K40~m)v1S??SA_wTKLBhRJeh;?ESeS55A<G2J^#j
zrm%mP5Omf~mMz`Id!ydNZ7^5%6keeUmv-WWx~`9}cz5vG#F^-_=zuVH?RhwMC;o{C
z$eAnOlHQF|=jYrtrs8YCWm?_`B>7Q7d=xYgg2|58oegk%#7!sM9`Sjq*D&+CaZnF^
z3{khEjO<g2`ML(*lKG6!*`LuZW721Qe2&Lter9&^K{#(5z`rDVEXbJ;A|Q3*AwKrf
z1a0mfh9u-cEVxh*DeWpH4=DwWW;0_jxgr~%OhCeFNyzJB>s>&0atk1QzxMZ5L#;@2
z(N6|xKed{IUkMt1Tr~y96RW2msHVE7T@9C!Po2}Q)pF1Llw%2e=2k;0L_1$Tq~0Sr
z&q5$Ql3c5MHIlxn`}9cmeS<?gaU>l~2T3wV^14dPQzLm*Y8IqN@}g?W8c88XGU-z{
zzd(V{XF%G~=+oI3lcro9H-o>w`piMMdK1q6;)WwGIiTg`7LMt`dMpgpU{6&TrU&a#
z7@ja~pI&_8V0WlAYp`1w(=rAdlZ3q9)8;R>K0KY0eqi>r5F|Ol?^<nvxB^*X;ge^0
z1+-fW--S(HJ{s=RZfw@0HzkdxPzRKN!~JItv_!@hkW60%gCzeR(a*s*e{bsMPrSrG
z>8mFjb$m-9{IM*NqgY_a^yti4^e2qJ*H1tAHrr75xA;cWoX7d7<6|Koi}_f}$8tVa
z@)6^siH{aOTKVYU!{mcE&U3o)7^8>w)NtVkf9pNaXb*f7X`=_@F?(>fJe~5)m#15v
z1@bJE=Lo)Ma5{`$8+-g`kd7}v(AL!y#P^~PnHv}4P6Lan-t=80^S~Lz(i+7WpfCPh
z^gd9FQe1s5ddF;;WA(Xcyhv`t-5>VLf}D4las1b!+b>j-aE-YQ5VBKWi{6YX_)Qo%
z5<{T69+kTEExQF@p1!pOo<E*Kd@b6kz7}21uSHj4+M)I0InPsregP=O7=Y510i?8N
zoOfcXjMA}@rPOYmcc_Q=I`0TqY$egP+8<$YCH{;D)c-+GDmCBnejW8=Kh<l?$w+=l
zfS;Lu0Yy{yZN2pU&@R|0VeQ|H_dW&>0!va4Hcy(2mMDQMguU>6JASAL9%Ha?s$ZGu
z{mDT1I`O*^?EV_-lY!}^+5`B28}g_P#H5U}#;5S{N4_a8SCIm|Fp%w!2JP~}weCLt
z;;u)|9ansC_5|kp4V)MlRGImH`^Wq$`MLy#Zu8xXYl_gCe623#yLS^jfB%5QFg}q?
zRi7S0?USt0JSDxejp}T)`|whJ;WLhoqO%<*`R?78Zf0|K_4IHoIF;2Gsp^68s{2{l
z3fpWw>Tkj)N=v)*VHSs%QMdp{FMhIL*g`iz6K^(p<fT_=?TB7;iD`4qCwBe#sRPGQ
z!kIJI*_^%rNqs$dDVl;%!66Lu%&X!ho*FA@l6;rHLcYt7Uvoen-Vm097uXXAFSie7
z@wL~m!^Vj6zUv1UGYYF5hXtG;umIOg0NByS!|9v*?Ul+ofJZYU9;x_oR#dken(;=w
zb09v<)fRuR|J?WFJ6vr@>-mz`8&@jdfE!q?6~E@d?|9W4O_9KlvZ6QHjUh%CKjhVA
zG{isZKldYPpaE&A4|y?Tk9^3h3us>0iKAKSTV5s-)VI76Snv~e^Ra90A=TffAc@fu
z@uQck?M8oNBhlPwY)^kKY&+6=(A+rpu&UR>dfVd(!E+-9>i;pnH`d?Sz%Q0<Ft(%{
z*n+h8w7Kue2gjI$5035ZSF{26eX7u00|iQ)bh-%ag@ii*U;-7Df^|$<2f1<#Y9#(m
z=*FZD+onCX0BaNlh8+n`(jr{oofUkGXxvy_PvH`ne3bYhmpl?jq!ZUCp0yIsODAqk
zbX$ohrxS691@1)8jb_*Rl!y}@b+Z`aC$F@dVn8Dgr?k8nkN|@4Y3--vnwxU15V8qO
zg{LrBfiPQvFdoEN4bVfDVoeAy@~V8{8Na6{@&><Vb~D~P(`gJSv{>?Wp8UH|G9^^T
zMK8TVE#YbmSZVCTu=@P;^`IDe5^5%2H(kz3p<~VmPWMFz?ei6H)I!fA36W3z_~z$h
zPa}d?m~pm0_=axBM~?R5xQs)7F4F6Kk-WpWr9Z+g6xZ$e+Sd`hCv-WQR{ZXZ3_|{(
zBfg0LFfT!m;6+6LVchBZk2ne8ir;@EpAeRn|A?DV0igmyg@g(TjUa@}pGMqJ9Si<0
z@sB=Sf7E7itiKV*`t`ne0>}DznbA}df^$IVP~pt}MeT_TP|z28_j4bP_wAq`4j^rK
z_}iR#R<ybCoYYo`=hQYYo>gt-cm~=k@toaOjd}C8qhxMYJ06qTQ82fn9hGvc+EFlf
zYC9_B&TePHKszgi+iE};0Q1@?SpY0(TdaUZZ6q~PmbAqbu&k{`0V~=%6tKFjOMu*Y
z?W%fixV=WDENWNPa~HHPRw>KcRrTB@?J<?Iy4|Y2qP;^>fDf;<1s|sZ7PRqd3@MA+
z3Kg)V&8vWAZRHAB(N?K|)om127~Q;fRUP=K26Gp+tLnf<HJH1sT~!A@s=?gV?N;>_
z?N)=Jk7^n8Q2^+p0MJJPOWIl#u&gbnfE8_B3RvCNAppsu01!$6Ad3P(C<QEQSJi=!
zY7qEX)mOB$LHr_z*9ZR+t@Z;PvBq(e#&xI77w_ngPdT<%`l6HCDl>gn>TsePh}#YS
zz~$hcfw;|>G_VD{Gq4M+Gq3}kGtdb~$AMaI>43%HodLKGsTdFg>kPp4N7aB1u*?8l
zcZ?nYei`7@Lx~#PE(U+67a!DWpMh@6)BXG(VEO3rkNR?Tn!v|ze+Sk%EYRztgAVJ(
zo3zlckP0i@F<GDs5GiWsD;x*6`t67LwKG|uP7Wn9x(#>A@9<?0wLTAHeV)n=LU?Ng
z>vs_H{qHnkk+o$LY9Zt#)JiCyPzND5A(IeIj_-64f*#|4r<)KopF!{N7fyV|JFq_K
z1EkD>SfAIQxISO21ae%Y+M|QhEA%zaPI|M3{S9my9_fboB?pfzRKS9fUIi=~S+0O3
zBP$iKY-F_pR*bAs!0M580KmXwSX;{tk709K?xHcQtK}{j!^X7SWn)-V%Uv>tP2t-g
zV^qBrW2}b4BNrn>@K~;Z1tVh$STwRl0ZT@9C}7#hE(NR@X~RB&TB}Do0T2(?CLU~#
zc(5+<U}MCCHHinCA|9&ViZND0poeM^^iTlkp#acB0icHhKo123M%Dm{*(2+O5=awD
zNC-+u2uer@N=OJwNH}{8Dh0-{e(A`?c=|^!$FpK&49`g;TkxDZvJ20uksWvjMmq5v
zJrZkR4A;Pz<yZq_xCX|wU=57n8W_`sH86&)I`z4Y^!-!IFb6|MLJb00jZ?|OSdF;U
zALF-gD_Rc}G_F65>rdnQ)42XLu0M_IPviR2{@wLQ>sjq8tUsTT<?+GePjt<3P|}@@
z$tR-6;R7;mCKWJAKB}3yr9&;%xl2z<{cPV}oVioKL)y8`djG7~uSG#0>~(&ayDO!Z
z;%gKbITh4Jo*F;^Dq#Ib;Q!BmFN#;0d}Fy#LiPP6`fIomH~;Vi4T9FY#b2ucit6{G
z;I@Z;<i%oj2>|7(AAU0`@hp?0RUY{3yhn$5IKAMTcn3Pa%}<_&KKlHIXx57;V!JWw
zzYkuN55i7AKBgNueC`+fx!fg;JxXP)^c%Pd7<&xnnqj5)5fl|IYbfp-{PuO&+~Md~
z|2cjiiHC70RcP+wBD}P{-*1O$319W1J)T&VK_u?clk3EPiHf&+7o8A3VIsp`tvt0b
z>`IB_M*<M`{9Y9fa5co*`n{?du%CXYey?h0&FEsx+b7C?I?vz{#QG_jM;Pg4H|6Ot
zF^bX@cGF5`%b_X46x<P!;yCLT^5?1sSwC0x`)`4s##(W?r5eUEqQF8?Jj4;4RehhF
zCMNEHR%Q?SpyE353#yYEly0dkrWGh-e(wwvgc+^gypND&G3E4||9CuYF+Bi_DSfhh
z_(mlDhjrun=%rV&tMhZvjBe~mi`)}C16pg%{yjbQRXz3J2QypN6}`}sl5d}$!f87p
zEq81-Um^%KVzXO!Ecl$?*h3pu;NlZ|f(rd`6>$G~PZK{?O~4Dvnt`J~0*OJjFuz?X
z79afB?WPkgJ{&TQWdE7PXD!I08)ESRgW<P7afG;(2AF^tU;=|_fC&ty0VXimEl(O?
z0)uIQ2@D<~Pp>?6d6vs_f;=nbIbELB@|>lIcGd_Te%52}`FFbeLVHh9hP~%qv;8q;
z?^$C`JWUvjuih2J&FdNaQhN`rW3=})fh#8Fd~%P-o?+|x%VB?ImNuyoJesqI8OPgu
zI$`A?7skzx0U<kO@1c)!+TQb&)a`BWDM;IU{(HX;rY29BbCzQBP~Yo_I--wS=A5Yj
zXcTa#%sHJ;u#Q|9r@8;Q?`_bY0|C-RM&eRE6Wrp*x4(@RnqR1U^9{Zj!XEpZKY}+8
z#=@KJOYJZCQIE08{=z`)FYhb+%PVLoWq&Er>f*k88(@HmBb6S^wG=#1xA+S}{1ze(
zt~Xv!#GthPj#<)dHGCAAi=h_MD)ZDAS!Gn?jWElg;oy|sjUK*E8pV%JN`th}a6Bhi
zXx=-OyG*LDgt+Bco}aNVGSPeoGvG7iYm|;#t=XXXlzW}dW=kIbSN20}i?|yv7ZZ(F
zOf*nS<zjpj{e#qYnA!%FW1##HC-24=;hP~O<pP8~+hYH5B&Yn0qBj3E$a2{hPXWX5
zI4>KTJ@H$WE}z?|_z;)lTeQ$d%)@ojymS1;JGIAnFUiYsoS5I#55DQQf2ej$9G$Ti
zxlnjN&TUaH$aCd_d=oqrLr3un+B@2#W#~al_57q=ukxxYZ_IP;3g(r0E^i&tB=5{~
z`N;vmyfn|{7a{=j);!mtU|yT&_ERwL&G9ma+?)R+xX}8B2EPyj1^yl=@TM<yZw`Zd
zJ1znJcqs&XNl}vKE%gnJVc!MDJ=n_o;@G?@j#BdAXBeCeCLMJ%@E<K?F!iYVp2pE~
z#;T51G6)>4M(}^-9=(`%&c$+%UM%<M#p+ucM{77`z%6`KG&4YpjxM&+<{e#bK}(Lt
zENJyn$_Lh1cC^ESRvhh8(EpYD^kS!sP43f+^Ld|MjQjLPH}@m0mhX$J`}B1dwxw#?
z0&3d)YTBl%X{$Ku<<MrUX`6&;<99SLZU6FGUAv16{XcW9&WZEO#8t?|O)AE<`g>aK
z?4E1&S&-f*YKU}u{W1*^`Z@JOU5|dLepvMg<OlZ^^E(1rdde+~P+;Q^H|$2g#7$HQ
zjrcfYtvLdj)P928a-HKKU|IiMq~fDBzI$JX(0>}GSGiHy{Gm><DX(8qd7bZ(J?O*U
z8snHLGPH?1L{NRP#&|Bum{-AQ$Gy7V97Tw)Z>3bL=jZg+82^Ee)b9*sDvba94pWcx
z>~|+>aiRrobEf)vgAg>+FVh!a>?!#>Rq`68FV^?yiycex8(J{Ce~HR?9WXM#+tbPv
ztG}LcZFrbxN&M8pxBH|o319zWm9ZTZ{{ofq4SX&GmxTCS2F@j~3tir(=%WUa`E&6&
zpFqvKbO?35A=&a2{c(usk3-y&BKqSH(I1D1{y4-d>7qXl5&dz9=#N7xC0+E#A)-GH
z$<QCcm~vhC@AO9X5#Lf7v^g!K(M6{Im@t1aJEcFKrg636-d6O-fsB2r{`gN2F0DWQ
zsS;&jhZp@Z&MZrR{3A1t*B}3Ivf`Dv`CCB9{$l;{TT(ZzKSFgpRh_^8cc_ly6!;=F
z$8C5NDN$IvAHL&>|Dbnf)UoCvOO<>E0GDi!@{;&Lk~aa+CjXAQulM>x#hY-MTAxa!
zVuFv+!K5HR0eLfik36&scX5aHXn<c}+-`0_V`anfrU6*qAAxtx8&JNIuJ+r|YiMoZ
z9g}aXPo7_kJH_PrKal6$^1Mx+<K%gjJd5Oco;-8pnY^8~67qaQp8p}w-^%kDc}C><
zusmnT^G<o*EYC7|ULjBZCXzL*X+82lAC+?&R#DKY$K%dv2jj_P5O3Ya*E<tGg-ClH
zZ}aUcsNEVU4(+rD3ivoRbl4skAW9&HhimHZL$&C1m}3!Wf2Af_Q16ShFie02ae|NG
z=es4EFDr49C!4f_#S$NUEm7a&i|q49_6}cfeuPGFc^Re#Z{Fvy*`8>99QQ8Xzu7}q
zCUzrHtL1Lj=-7@3TC<t_j&wK{xY2Ap39-Xjc$|hLT<AW4LBsuX3IcNgE_d+@<3r6`
zRB<=*uU7fPng78+HX25wkU2K*CCMi?nj3Hn;|(W+SDDUiGEl1?8ezltgd<Jn5M-g#
ze1D{26)X56P+VdC&3P3hE=g`lI$rqL`oMc<pW!h*GKmhK_(4(rYP=3wxJxG2zl=s~
z=2|@KFf07paklW5_lZLh$6csj8+#;2RraDv;T`qKCd1)JhIuoFDU@-CuH3o{NC&Sl
zzl)KX133OeKsR*$Dx}*mjK1h#_*U-|o6WNT9cVqHJ@mfayc?w>^_|ZvB(6eEUES+?
z1Yf0|;PxBde4`xmqi5fWUvC8mczuzs$YD%T?be`H>}vYRW_zUn@K|CGx`<w#40YRo
zP^L#56S&Yo<^eRfadtZMlCD2s9Yk8OfC_o=`3q7(wa7M*%v=l`emd)CnOQktK9xl;
z*UQo9C8#!hE354V$$7lPkHR-#{i5R?4@$>@lT&4qu1nD^UbJ(Bl6`~To*su~^cWb5
znlGrD!IPpB?3jVV&b(CFq-*cDQ#f??8{W>`NIj<#LVc~bWbz%8DI{dxgr7NiZeOH3
z(xylDVvz(L<}W}5hy+RmFh~y*VE*q&{8)(|FE0k&kR!v_CtW|gm5jDA>3R%6(p4wX
zrd20p*|@IUx9Q3A!%V}W61VlcM!byAxS-rDq+zG%FCT(#9tx&^>qGE?I%JmeYd4Np
zFlBm_Fy2!}d>rEu$8Q)Ijv*+{dwVb7GmzBi*!zLa#%>!Ooq#x(jgI{mq<%j0-WXTc
zZlvjrS$3|1bUJ>A_$@Z<&vK^{p%Ub*OeLb?rL%xxlG6w801(ovB&!#cNV;a+LVRB8
zMx(<aTVr|@7p|aT(ses?Fjn}?OvMi-=}|wv5gtLVKhmU*#S!?@`T6y52z*Lq*npF+
zVW^gD#IJ+Q{xc_!GTE9t_bqy4mzn~HllcAplIM4P<C%*9jj9g@^w3@T=w)1f(shX-
zRfhYOGH23tCSsA8UPL%s!O?MU74L$THd5wBHAvMCw>}*3@G_@9Jg&ct%>sXD{X9g5
zr@PX%(m1FeeG^&W6kR{qg6Lz6V&-qJ2Z{w`Dri|(e)4?~CfG0O`Yk$2OJe9qIWL;1
zkUanE$N+CfbDl*!@mW74MjgKbEbW*><fWu+kNFXB(L>pN&__YN$a?cnC=3FhbC({;
zqIV_G(X%@p`lzNr-tbtLuKhV@IvNV@xkSnO<@U~lq$Th(XR5mW&A(=0@t<bihB(CK
zLY16#NvyNK`6MFC?b0FZK<M*jVhH`GRePzn=-dh?6jnM#_83I^oA)8Lb5}-v>BGD=
z-DrAER&kFBc=D}<d5A&`YcpZK4&euhaw>oQHdjQ39%<GijY-$BTRGke&jhFo<<^R=
zqSy{#ZOF21KwTN@kED@+6GWa4nalwt;z%<$sn+q;QYi|;TI2k)6;CV$pw_+iSI2p%
zT)Y17Tw+>j{o{_oh@0Ps_%r9Cm_>m+GoT_TfnJo|ojadEvk(d7W7Ss8m8~D4k;wMo
zcSDElKe)&I5R-tJEU0Y{dVu8`<jUTNp%d#3jMFg}Wrl&u!|~-r$BO|Dr78KT_K4$2
z6aWwY908*Y5&boS2FHDP+GQGm0tbq{7Z2eBY>V-g;~+ez6+5226AHyFeM(LX0BpKZ
z27NcoEiNwu58bhHAOy#>;*$Jeh5w>8+TU)<qc4-*cZ>G7l73KpYw!&Oybzt|@r~LN
z$d6n%$UiFnz<|ir*?r^wQLzW;vQ*HyWB67m`obxBp@%%ShsiSoz?c0H_7OJCuU9mB
zV!OE+q#eE$V;&f85BAr8<ofxCGJ6<DijF@<itUN*fvoV>;2`rbDqtV|6hUgh<guvZ
z9ImI(`OmO4oW!ty-=asCdAw9V=kAI*I2V?Leyd!_oE6#FJoIHWgnb_?4|qq%b(O4a
z!9<zYAeqsm>w74yN4Rp4@N_FOu_r(ZCS7+h2dIxuNx!K2%?K%!mVo$bO4MXE%0RYB
zz|9Tj>(X>>6Eh(t%XOoc-4GE;ELpjby~-X@`#ABGj{-zg4Q%3-*xJioU|)oEkHhYB
zA=-onz}*GyaTAG_b!iJg5&UWlJB{C{dh;e-e*_5j8wAp4NA<p@{7eo^vMnGM9A`+g
z+aZTYOIfvg^sJkI%Gy`@5p<pcEsNua=&QJyc-Cj@qjo%yP%D+|Q_VxF?ggr@`?$LK
zUs87s7?^#h#fEcuZ1{TZ`O-+dY<PQVWNXRXuKNRbRh7)$clZ5w`AQ=jBVEJSmqf3z
zm%*#x`I5PN$KG{+5I2%#1Gk`^tjHnm)Hp#53f`uNa<**-z0U8tDyiv>j@J;e)xg0l
z*3WJW6kJ)^ua7No&Xv^+PIoLn^f}Zj_YXWeE=~9!MyvfgJ~OX0;`qk<P%=YJ_EN)f
zF2K(INIMO{N@#Jm(x`r9RM+gU6Kx<;8lmEnl|n;X_5MjS2b2wrqwYuVaZ@h@8f?AP
z$oU39ZPmwlTIflYkdK7F_XBL4m8<g3nE4<|oH=tQnzy&W+19NOY~c~fP4&TjZvDvm
z;P{B+{Y_wrrtC(?Rsi5@o8f2yRC1u+9T*X6ayL3&Mii5cjK2Yb;iYd&>3OZCqgsP7
zU85yW$LNk=PAF#~a=pe`^AT{83#bqC(|w_}&O7R5>w^Fs9xHZ)Q36{Cj#{h6k;<#(
z`Ad14@@$jm=kh!zPumSF*+-sP^6V$iY<aFhnb%)mZm&oCl(Oni_(P6@TvXY&Ktry;
z_r7l}A<tpUNQ{f@g>ZG@Of@cI`XlcpT@T(!<nbMpSYQl9;LbKio06`%=td8H6Pi(L
z@FYF5IqBNW6o~7ape*%et*9$YN}rT;y^na(mBSK|9O946A8lm}nGMLqH@hHsu$a^m
zURz#r2X3}+FPU`5WT+1j$6r{d_?qkhE*o+lR;|C)YBf;Hc<3T~aFOVr*P@%wCm|N7
zjGiNSUWD&3IerSB!REo#^8^z|7%3Q8a4;GzP>mLCht8oB8|Rfclwo|1$tV@LAn7bw
zDG5>6(<p-)2hd0<G&hdnFz`#9&T4g37((3|DxQyuGhHgBWTgg<!LYMYxig-ut3xq#
zeI}BkGLW3#2vTV5MK;z9^%9aPa1sjSp+M)`n3;i4sX#~M<2+K^NDVZa^P9~~?Dng~
zzo^9AH<<Xol}JBB2;tIV$20GO2&JMQO#niXoGv6ZI_BWXm89ci0V}&O&Qc@nY(cov
ziPWA5KgO1dV1O6%N+_E1VG9DoF&}O4Ha_AQ#B{?^iv+|@R<TDIi#qgsfVAN_N5!sF
zu`^W3GdatKqj?J&%5|KKL<B=r;6{*ionj5>b)1Djn3qK3Y)-oNNgt)Wiz2UT;Xg=$
zoIiH3)@W%LE7Nb~-h*}R3yt>Jw6LcSda-^V6zXXWD%fBy#XE^ZaqJ7;i1BR^s?R{z
zJV>*3acPRWUYG8%8fLP4a3GNPq5+G`USVp)F$^hEs{>u>Q}C*RT$m+YukiBA!e}k?
zA~}6fmS=`YOVV|NG@7}@s+dySu7!V!()hKT?2>~uOMx%``rwmPNR;zN&Vpuci%Hk(
zf<TvaULhSc<BW{sz!}Uiu60UI9&?hg0}S#cUAZz!Q!<X&X&pEjE6FGV8Rlcbj)M(=
z_8}j!6{%36M?n8o(i4dj7^XME5CfN=v(ak2W^}eKco0C+b*+?XVbitXPRz>^G=sq!
zW8D-bmK^yJsNzRA?y}*y$sZ=kI6PI8SfTrD5=W1fh~tY~T3Ej82?vxqG&wHP03T79
zIZ)s^6ae;cMDrOZvrm4^6z^dMPr^Ao>2gaq>T82O?7s2?tf6(Vl%>+h9<>Sb5wI!n
z35GsOx(_+}G{SGmC=z4jz3L76Md|k=w7>R!JZqpHz0D3)c%R>R1oyNU7#h)y@$Sa4
zh1_1u%TGab#@G=c3^ow7tJT^;KM|@Aq3y({T+&IXl&-;`8*R-Tt8V-Ne(Gbr071&V
zI&)+KR@0yty%Ed~KH8RaUCt~t>yLC<B=*PvE<#7YVAGU{x$G#)7$`In6zcq@5FZZy
zn>s<z92@B$b=fzP%KX9_1XY&R4x2?A&FCnDOs@(`Ae|c1Y4S}EkncB0PYgsXwJr68
zqC4TkD;Y`*IOED)H27k7(x*I>tw%25D~LH)f*HYw(gjD270*IZ*9h5GuqJnm<*;Q<
zE<*#>nj9)M96v`2ne{<XNY3U(EUh>i!ZGI|<fqnQgEW-64nJT@ZF^wUaceMGf~wot
zL)6u}K7&y07A_W{mLSSSE=HAJN3?pifZsr6F5rQJZ8tGRacl8TW@E-!F5cik;lr1}
zE1gTRhH(z;T-rm<Ub<+`Rk9=ej+NCoxu2gd3<YZa`C6odNz(gyk}25FIi}&P6?I)I
zw8g{^23|?ml`Ij-8O9*$`r8J^ka-|ubXZtWYa@<*WynnJ-><0FPKB743hNl>{=H1t
z=rOd{`73MxzLSlsS$qrHgf)v(q-V!k>KE4jJzX{WJv5q_K>5?NfBzDtdhOqDO2;{u
zqeg1~9*G9!7qv3?@4Ha(g#BAb<>U5mejpok_(+ff0d`}siMO$qQu}uW3Sj@{e5WiP
zAWM3lQc&jEA?L^FDCs&x!lmcy9zh-RWoLS?`MQxQ)_jEsM_pk-3>cAKNmsO#ZRN~i
z5Owtjj|us&MP`Z{Q}cB-%cSNjhXaecra{k;3NFUe^Ys?W_RQCxSy9c`*P-cH^Ys!_
zBaS>OPS$*_R4B&L=yCJ42ii`r`8r?G_c7E+&DRrX@PzsL4^%v1z80c`<L2uu8A#6M
z#DENYB^%3}uO*wl;(Wmdgkjyq(M5CKX+&T+G|)Bay89ZN?O^K}F`8C-W}2ccKc#d-
z<@WN!33ZTL!;|Btbd^Qk^+$HX{)#Vx1;2)gdRp3by^~s4Poqs3uD!C8I~wtBoF3tY
zZaKbJy&+JTbWK75koW8UNEtk6Tpu>EM9#HnrblGoq}t#|h{$c#dSo(Qx#rJvQLlJb
z1g)b=n!cV*JK6LA@S8Lpctnp}>jq;ugTd0`nfbvP_yM~S>eue{$eT&mx34A!b0(D4
z9+;#1BR_Qeqxfm0i;iJS@-^1CVp;kxdZ*|9#uu?&UtHS1H3%x+TyTA4P?xXo8`l*N
z>JuD-mn(cY$&VYgY4&zKiI=nM6Jv;T@fdtZK_q?7{S72^T}jF}FVdZKoeMN|Z2FN_
z*iDh68+e;7ax&!GTZlx}myw5W#g8LRz(yut+{9<PXA$NOHcHUf!dPWUQ2T7s<v>Hw
z92I@Rzgq2|k&o>h_Do1EwfVKMw<LT;qc>nQ*Sg_7zn8Z}Bg!HTyi!07H2EgGM84xB
z;)6pNl!4}CU6)L$?A{Y2<=)5!ZPnfZ+Nzxc%5axly2U8>^2>$c1F~B;$)4qCq*hwj
zlFicwzG~RUyfiu`L<jQ0+VL+zRITx9svxt-cs0Zb2IbU2tA|Zf;wW`L(0f0QOw1;a
zMjY1y0bj(_ryK?oAgza2-t>|Q0)ZZ0xu^Jt`9W-}y;$X&Qfkf(waGbG`y<=PC^{I1
z%F9K^(#!%LxlVZGe2Yg~QaqB^!y_9~Jd$x?)q7i2L$Xe~=*MCFgLBGjAwA;jK_04c
zzMvI6S)E9+dU+D9_a*zi%qirW@p2jKw9c0MrhFeJby4Z&z?MD_jY1II#ef_*TH=no
z`Xb)QUk$en$9Dp*!P?BZiTxWhVXgTgTDaGZe+Ag@rKV#Sc4eP&w$FIPjbBs43rjb+
z!6R906@B@6AA`=hpLl&x1bmTeN9a>@w=X&tZ!_JPKhF38e!YJE9KAHMPW_CAb-rOk
z*G2N;{2++|=H>c|&rW2lNANB`8y?BS85p0ZFYoq^O3XI#K|EN)M3xCPQVgPOj=1#z
z4V~sMFC>B(U1XzJclG3AFUFiIj8Ko1;QWnGm4iF7psr9eFt0)_YA%j<0QKfURURMw
zNuGb_mAzV0jnXzPWzgOkdT3U@Es%%%YrN@^3_vql%ZkfU5km0pF$kirrzoQ-^KU|D
zYD5`bav1oRk7|2Ie`$Xh>Pth}jN{@S+|MPHDe475g`W?cafxV7jw8fE2D_Kg@fA=W
zXTuVwKeDw{$%!q#$OcFcH&_Sks$=fG;L&9qle&7!pf2bO?o{`%dgKsfTmTZ%hf@Ho
zr4!JmC2uxB;ys2OIb{0K6sQL>qD<0tI)*K*`#s`^y#!xSz5_HN*N1qGD`0AEAbw*1
z>H9=IiSNH2QtKwtmUP`m<t3VPt>`daN!M8tcR-uK@_-=`fpIMt$EC=?<QS8&Jh0v1
z*Y)tLd>HpGQ3OP5#-}`aB`x|&4-P%!hg8M#yK59$gh-5ZTQd4$j1Ffn(Ysj_^L%&a
zJj;AVb6%+D#`0U#1_3SrKBccR;ioF$R3y9<XF@F!diMAfP13u)H6Q8oOor2dKeA7R
zDCYMN+M9y*AWD<2M@Ir>b&R_RU&0-M3m#wOE%PzqH?dJeRA9c~AOvD@Z-deJhnT(I
zM+t}towR}E>LCsQud*$N9(1FVL&3ALE{ZteQP&e|*)P^bG2*f=L~qA$@}XgIeaAVn
z2R9r$dT1yo7G4r>PL6kr=GQ~+SEPbq)_B}ZDXe<rN79vG+0YLQZ7_IoCoVx8_N!u<
z0~ph;Dd4bi2EKkovL*Gaxr6l3P&@AuZ<e0^mFSTU)BJ%|RxlhV-|cL50DRinst{8B
z^01Q2R_ptL1v1^BOM2LubWOn`J0fdvgbwth8%eSw)B1V>KfpghEU#{Q<U|s@$jBKA
zPJk759Mz5B2u_M<1>?jTP$9#V<KP9Bm8G|dh4jL*=EA)^=_*5gE~9H?UaU1xfFf!j
zUJ>hRD<Q97pX2(51!J@2AFchh|KnN3dh`*oUx?Y_W_Vc4ENHw&nJ#{aMT_=Fkcz*c
z?Yk1ZbpIM6(QI+3AmB~9%4IObT?^j_Jy47k*F&Ji;B=I9J$VHx@!X({6yShISRt*!
zR7?Ga#MP+VLvPBvFJE8Vg`(SD<J#IQ%j-P$qE1twC_3pQ1QQZ}F-4!0ZiS*FlCDoL
zR}}4KuzKr%wf83QQC9c*_?cu9GC)W`f)N!)1&0d6fkau=3<;1xfEX50k;#xul96O)
zIx|7yMGXQ%I>bgr#a?S8U`27k9hGW86s$|Zh2kEUS}5&pv{q@g=KnnBoOklh0QI*2
z+uonQUnZY7&w0*%zGq+F_rO!5XrQA+3(gH!Qx%jleJNuwju>G*=p<u32#YLS1}bU_
z3^SI3hRZfkE(Jw?iX|Tk534*)Q{mt4!?zphHc-_cj5A~;WhzF*R^K>zh+BVqK_*@%
z1q0$?Dk;eLOI4=7RMiq6zao@mX>0_i_iE)ShjB8$a9&i`*Bjs+s-?X%>j|ZTwC-1`
zY}SWZRo$#KF`zYR-Xt~ud-d1UzeL}{krh?xKBs?J+1uN>ABF!6L`8;x1`_K#=qzL_
zWb_!aVkYRtHIzEVQoha^s8HA+oG*v&=v8x2pwRhuo`WvAIi&@x1Y7gyukDabaTLdV
zP_RCODJsQOtmo*Jj0<EMWhRZ1y=_q^N;`U8oK?!?R^$0_BHMH%;6&3<ehKtZ5YYWH
zr?_(tMJ!Lu3KdxA&^O@1tZ%4I?QH`Ci%!Q->_v>3b{Ax#<e|E$=G3YTWyaAk+idvT
zJi7eOykeNK@Z`YiSRwsHdZyakqe8k0hBD#mq=1o%E|l&0G0Fo5EJv3InVSEB@_<~%
zSmv|Q%q(u`)}PRsRX~9uyVH#5#+G^Fa!Sb~nzalQb4-VN51kLeK26aVC7-fy#naqy
z5%nBEAM8s_nc1eq>-3uEV?lHromv0E*fgh$UtoNLfz>CubbLg%wX5Ng>&Z6wjKcb>
z?X;KG8}{Tb-XnAuw~Az=Q!H)HmJ5+`Zxp3@3+y4K_<~SuzS}N?lonn&_y_`l`5-Q=
zp9mjHnz6J{9K94%lf{_6gfSlb7)JoJPG)vPPUx})+{^aajW}U_0kkV)wF>7ZF-{fr
z1<~|k4BR0ys}$MplvkmTz6Yu3zbE%axntF|wrj)6P;<XZtB+M<C{OhCIA&uSniWMa
zLHYR%r?4@4#Yx%TKSel*;RT~u(L0#q*viK`_Cjd5nx}LgTFhQBiqDSM4squG0h2BY
zaI|dirzK}RJ}IfFxy-twJ#)v0*~xpr|F)w&O)9=g-c!)j`#Ma=N+vuP+9!QYgRf8B
zyMMR^3klZfE3T*IH(UlcG7{09uBN^_4i1{pTx`wViEpv^+PqCYYCCr%?{!UhDb!v(
z;hn;!K_$`Gn;zihLNhWZ;oA8Jc&E|s>sYoZv8VX2B>Kt>9E?Q~yk*CsLAkwe@$E>j
zItg#y7F%))Z?P66Td#K|?`VP&?+F!8cs<l7dL4_9blfz>S~TG$cu|ru>BV9ktF7PW
zdXk@FX)$@{jn2P}UJc)Qd|!%J9J18Ew0jCBwxSWeA>ytgepIA8v+g{bGNypr*l-`+
z$sSdBppj5tbSAqlnv9`e^TpQ5JjvNn#5K3SxSpoWUC&V3vH7qo`dnet$q+cYY^y8x
zxyj9!Xpy@*cNQiem^@*B=$+yTUlulvDv9ndY?8N0LF)TUnx2$WUmhp*8PVO8dRff>
zXzyF4_-{4DUj*^rYMqo^xJtN^+nS)3^4|~nukMonRLK8Tcn0}nkqq*G!nIAyKjxp3
z|E*H~$MdCl#X-oQ;zYAp%kB%Wo9~_~v!k$SOkvZrEJY<v&&sr{HqyeOjWcc?z>xi9
zuB;V9t~-%Lj6dSbtE;&*CcQS4*yTqk?9ki0EyTj68&qgF#7oXxZ%d*tmNdJpPkxL7
z!-Wl3v=rHrIioq>n(@@<gI&3$-$Zxr``wCm%U1`6ey}y|w%nbr+(u_A>S<H{H~9XA
zrT*pO<{NmOqHEh)R0O#?(siCZI?bBfla&?AZ-aBEeG}cW?~uu}9l6&A@q$m&q;I6;
z_PL@>%oJB{v2@_dl{DUwD|dMw!q45|YFbXq=HwXV6X(hG#OyrFP9%-3PsLyBr=mrl
z_XL}sFGf;R4jrbT`JM|+*?fO90tybAn^Iof5nF7ZB5kKU0UfrrSP@ZMTr+7~`cmId
z)gq^95{<dwD!5>?&+$0Fqo>B>#^m{r(z%|^9b@v1LXbZf2Vv!;MjlI@k5Ll+$$f{*
zY)zMqEsTB*XvdRZG|rWlv1$SG04J{UOPXgu7bB>W=E-A`!l{ol*_XjBS6fHWRupcy
zM4d@8;nata$c&X-Q{z}9wheC0SoudO!RgtZSwp2DrrXgKVqH%W?7)H?#y}fEVSx<q
z8_q4oF|z}mS)W1^>&f_!Nr>z_Z-iG^zJ&ngzVFRa0_;{J|AF()V3@>UhK{^|mCmfW
zG{=rLgv`kn`ogE_S=SuqK2r1z=M^@EYq3aQR1`(vQ3fs#?Q-CFL^?q?dzPG_G=R!M
z#e6wGTX^YZxSZYWu<d9IMbUaJ99-3YxJPmH%aW%0u|*TkS&q2XyF0T!r1#M^EmBC9
zq6sNAGEo<%m0;aT9)uZ8jpnTRk6}|}Kdm9Ta=8Ib7cdF$L`0H{xrl{rsE@GbCy##~
zdQ|#2GcCG&GgPbE8m|88I;!x?kA#YK8Ql2;2CqgY*TM*^fnAxG&wYUOTN%UVtj+LH
zu1MTXP8s5`GkV<`gm`$GNvIb=!|?uVBRYqNWjxhBD2#QA{n)RklM~_fMCJ9VhSw!h
z7+0ZSVR8LLS+LHV550_)J!Qd~+7&kxtS@DJXW@iE<&aXmOg*HiGixWjE{e{wqr`M(
zJ*EhX7*=xDAf>8^wRaV<70~R=8c8E9gjMi#GG1MX?&n58)@bPkOJBr~_W;09#L(2s
zB8H@V4yW8=hYWd+0rHp+9n&$1Vp6<2;4EyaPAFpM(uhuOmQEniQzab$dbnSFsk$6`
zB~hl6OTWeP7=}DHacCHtb<RP$%A?Wxv%ifsWfRgfWKyuq^_O}t_QPc0rgGOIHB9C1
zZfZ1VwLgq<*Nfu+d*$wOL_>|SyILYlGGe7Kqv;L^_N&!K3Cvh|z05rvyg&=qnKj-)
zS;!Xk+=McSp+))$DlS717g+|+AnzLcej;7NBQ}=Cq!L1j4UV6a7WPZ=RwJr%u9+BL
zSwaWMmZJc6;{A%I|CsKYvw{!lIQ&fdgV1DiaQyvaQzmd(>9fwP8E4W9nb<KUsBm4U
zhVs!iMAL_=kj^ziI)}U~F<-(tBPW}FQ)-79TVHQV|5Yc`V2CC$wOys8OU3n8oO6;+
zOhw~OsiBJoH$vA^qdDvOty&c8z@-#$JpJ#Ll0rnxn2DL!WsMkM@{5mqpsevdmVCTT
zcBu^cEqo6~)_+%A{qHOFuseXNKbBH<azHt(1S)%H);1{w-PvqW1T6uYg{z0MCBWh>
z_R8=s$b2rxmv?4e0d1U5#DAl^S&bsf*@Jw+2<M%pC$hT<V!Z||zPtI9PUE_p5~&m0
z^gEA~Q<0{{XbKPk`E~%;91#3488AAW)zT4)u3dIGX`qL*j<SBSw9dmkDDW6JKm;XF
z+0hc@$L6>R<82OVoGzrAZ!Tazf@>%cfSm$BzAqU<0od{XXaUHm43%LT0&sxE3*a|&
z8Yh71QYR(=oZyNR00ty-!YYGz$bcb$>!hQY0D6Eb0mNCKFRgPQge@YaP|B5<xE+wF
z4Lb@a!lEu<vGtG6thY|bOf(rTOw)P!-VjdUUW60(b_{_rT$;84kc&6jaBCNI<OK^r
zjG&q(4w=CN0&f*YU&k;C+k1J%!}msCnijR<4Hz60#r(6dcP1t#*fE9s-g2OXkMnl!
z=0<u^#qQz>&n?3o^HF)dhfCIr=<9e{`RaiftH|+sNydYg&YeZg*buG2v~Sn4|0=Hd
ztNQQ_N_yJlRj<+RbFujgu0Ji=m5JMpize(|_IXj$ee&kh5+JX|<Vm`6;)ER;tKLH8
z!N{Th4EbKU{1Q%d<&Z`g7e_mbD_;In(H2*8zU9y(uvdu9=`}EM$&tlm8Oe<6FP7Es
zFeVompiagX`CyC#f1QClR$0CB{(#B@zpUq$G*5h!>S@uEi8#Hm1Gn`wFB_6Mt$E_F
zu1cSl@u2ku%BZvLa34fJ!_|qsUA*DB8;9|C)W478@SQ~!N8s^Q<K+FuZ)D40SH3}U
zPG0o~>?>wGSoj7!-gDT-^uK|_7f9b<5F)>+G2z$CUZk}d)1oUU(oIQp;*jZ0*7I@B
zzNlhP@q|5BVPOQ*&nZW+&0W-N#kcVG77gDg<&XJz5BP~C?UEkY2g61zXLpAVpHEMU
zn@c<4>C0*Gbb3?aZbb6UtA|LxaOofX8jedgtr>8S`9NXD5oF^L<(?C^G^~FYSG-*u
z-KSEIL^J6V-pyG1I8P2#Jck#GmSMm+XZMlAX}lMPdBc>X!pAu;U?X_fRhh-j6L%L5
z{}}m&#bIQbEJWON=NG?ZUL)(6_dH)vDsinW$VIs?ADoHTM)015ojL2~$?jOY5o?cE
z{9bNcXLBndzX(O1Q6F1e@gl-1f((qtJS}b-WjOEJ|3nj$KI=X<FHhPT8QF&LyxUpO
zlmpitgQ199`O2V8oS$-_!zrM;Ije9BZDUvLjMHLA)Y%K77Rys@r_d9glEN4w{kO_Z
z{=M4GAIaH^Vt#;DW-IKfJwn-$N-y-v@KrJ{avJwJR?_sDscWbkO=*V?IypHsU#Ws<
zMi7T8u@BO9#_cKCii|d$QU6RvBfShRj5Aa7ju8X%aknM#<EZ0P7sH>!kH?OyV>dMG
z{d*X(^si+6Ae?Up9i1}c<l7~%dl)D2)mTX;I+j~6zr5U&--%PRvCLdvoG+}VIWl(X
zaIhiYZdf2U)bxhCctsWP-n!SU9SZ^G_U|x{8y3*M*(2~wGmpBm&6MG%Mdb<X9R~-;
znW!kNx?!_`Z1Bj}J7V!N6kL2ymy7N_&~aRUswwHx3e7R)!do2b7qeP%lvQQF%c|rU
zBaOQ-En{7q_CWW~u|Ycao}+Ke{rS$Uy&U~Dr{5z7ff%(kN}f`TxW{kK-wFeGeAMRr
zEa_4X5>et~<3t^G3B{f5X8JPs-{C5zht~ss^PlW6FvG;oDU2tXjrrc%^Dr-(u-7mj
z7;i4elLHu$=Jelkw5ZO2JPKtj5R(D{d}uZ@`fKdqz7|0vSeD(Hc^GTebPpe!2|QR(
zDHWUrnR|0%zoRp2rLupSVL$IU_GNykrJv@M2hpy}jbL1K>q@I1$$c&CgVQND`q2bn
zHrv(uU_aVK4~|MbMiRTWMpYnkr4#ev2W4`eA#i|tGZm}8RI5U<4NO^CV~3ucc>hf~
zTP>YI3JWDq`AU`$dAbQQYM>^MI2l_?O<@d}5!c<E<FG|`NcVD!6lWI}XX=Rs!%b`-
z%u|`aVBRbPOiz^_Al~mipyDk*G!QFr<|uzn8_nr&5SVdWBu^<cMVWEM`=u~op=M+Y
zuqA-w28eWL)|JvF{l=uXOZvJD8Rg|IxhCS`!34DQx5z-!s|hS2C}p`&o64P`G~ByT
z*5pZq6IYx!7*<6+J_f*}QWqkRQ3~KMB7|dAnCxI-sWiK@RF=yTDx|;NPr3dI|B(`5
z3U)iRjIfCEd<)ym1b;qJP9Ky0tX5<jK~<l6D9zr))Mo!q((ytR>bjhchZ6OH8qMi*
zyYx?oGUiQ8aa8;C8D<VEb~p>0ttS^J@9E4Mt~}y5K;@#|JLt~2<~++;9NkffEc)tC
z7%C5E{e9{s$gwzM@&VU2?t2s`zg0Y;12V*-`3nbc+Jm7;bIJmaP<Ho#F{Aek#|BMP
zG5Wd!CBTi6a0QC(R9$R?a^xVJH>}3QN?R=MR3yY7n~;b+wx|o6d!JjJj4j^9^aSUL
zq+ckHCqscjK+|Bn4eG>k0H)we={pMJ@LwVsMH!QEvF|a;-Z8?HCyH?S*bZk``*kAQ
zuP-=`X-<Dd4rdVTCMXbqI+7@BYjb)V4O#Y^3ox%mgVPkY^F$2hY0}y8>VR1QruGcw
z8S%jTgLK@SUMBrG(wX%GC>XRB`b7u}5^qj-(E?v;Xu1eNPet`}qxyX<M=@IHiLHZ{
zw`|&Y%1zEPSrb#Rh~2qY<@;0bvlvV`fTbAZyh)CSe*8k$K@yz^Q~W8!=wIm@mf1?P
zU8*D4!uT|&uawb(ATO1i0>fFVSlb*Ei7t8*_9Ez=GA4&kGKb1xE>g;xUdYu1)Xd!$
zmd27MOu?Ei#8RWnj_`&sn9iYpz+fuoHx)%uOK}qxad+0+O7yo4pEI90f;p~cDQYqX
z5R$i<IQmQZ-3_b@pMW5ud$315=1!(Ds}XSU=_hc@I4TM1aGSEwnY9}Vtmol>TwGLq
zQci+CUjl4OxdySt_*KwdQ%ZwkNs5KR@M_;F3Wh4kQ7{Hzj8_uIU#POS(WtpiDIZ~A
z1=dTkcfpJ?k~vZspT!N>jP@2^>9~g;pijM29%Vo_qv_oQJ=yf47DQ*;aXjMqP47c8
z0_oQR@IJR9s|dQqcNuDPyfClhd8IkW{3sL70Y-+**U~QQ2d|<B4xaV+F~83=fyP{Y
zh@V4oyohtJ4LApDMP5Hww7ZS7gjf_%hVHrBly8tv{CM}c=c%xMQye`cyH*^n#OX!6
zUk9I1HkT4OikM~M#%a!lNUa}r`f8NqC1^~i41<Irurs71S+C_-F<&kyak6);V~epY
zMSSIWo&C-X&JCi!#Dw->I{Y+dhSEHL_T(t$kZ-$Ie0&6ZhORG#wIsSPa)JwspED{-
ztzR7|N#2Lqc*z<2iYuPOiD=|^K_)&G!ou)np(QyZ(ktU};EMK+lH@%lxd`UPOAj?a
z24N1($TUfiQR~`2Oc>tkc$>_JUV`$UKNs;w>H6J|X5_QDTz2S`XV4UxwCx9bLZ?L<
zKG$WSEHB<}SVjwlt!qmZ0>+9KUu4JWq%=elYx%+G+#2jGl^?wCY2}j>-zGp1(naoz
z%g3e&(T1!2qC23^O!+?kj$F>hQi$z7g?lyrPUE*42Wop`HI`^B*SJjMEgJ9BxKrc1
z8vF31hv`?QdA~ZP@6>#Jg1uKY-=%STg87d$UzwnPU9Ix>fY$$=#xFHmwfO*zPK{+6
z{ThuZw`g}8G`4Fr%u8!jcvTvgX<V!E4vi0M+@tXmjgvGU(U@k)U*lOC$7`Iaae>C5
z#_KfxL}Q-zFI(eZt5rPPHSX28Q{#OaZ`63L#zh+EXdJKcbdCKqep{vd@6gz;@nwzM
zH9ny64vkASR%$HKI9B878hdN}+^h3f<1UR`HLlnABaKTmR%$HNI9j7!V{eT|DwY4A
zYJ5ZE3mUg-yj|m3jaO=1q;Z<Yu^La;*iYj(6)K$HY22@IkH+m9@71_YW245P#)TTo
zG@h?<gvM-*MtSXhr)Xrz2DY>-zBK_2`(dqb@c(IKXN%u8@;^}d-+qC@4$V6{QygCo
zzA4?Bug#xT82jI-RgFKnjlU<gd&B&Q_QycJ0monKM3oCdq$!P~{#)=fYa0F=Xqa)t
z$De`m{<Z7)8rY$EhY`;<=@|do{;cw&N^3a3RNOFcwBCHFR<BK4RPU>e`268@YkQZ~
z`y;+O@8nP@5L$QBy0z!}>+2SI?3eo@5pSivs@`7_@df;L&!Rwx8kO~4dn915_lLaU
zK<yIfgd6G>1!~u=S~qj`x*Q`kA#Q!<-4A|$ed80h^GjbH`iFU6h5GcqeUYcZ8+KO&
zmcUw_FC6yytL>BhOMIb#zs~EAjJLZlnduxmlM#USShu^*<MX@SqjF;o-J|nHP0Afz
zF*Y)7PJw%L)kPQ1m_4&(cJ=7$MR~#L)l;vW>6+}$t(<(0D-`lH6jzq`Mok`7Fs80P
z;#pMd9pkC>Rr|e_lPfEIT`^Uv3Xk6rLAn+NmfAgZY_AFh>g=Trvld<Mt%%rtek9xP
ziTIXy?Mpnh_1+x2eNK(n9#F=0o`yvrRd$cP(pOdG4Iw@DNJG$Tgj4Td><?Vww|gmQ
z`xP}_zrDg!TT2mWUkzC+7ZYVZ<WvMg-kgeHeNI8?+=2kUDYP_V1nG&`olBkkjhZ~V
z;G9vDMvp$ndHz2Uo-gdK@_NEPNXT&gkLAy~CLfSPdAa|cEUN4Zi>l1OsPfvRwOche
zY_%Ry=99HXhvtSk$~WHFkUw+z?t6(h6e#p9@r6-}V}(4kQtQ|kHQ29&jiZXMc(^gG
z^+@!7cSpyN^H;w8WbZ8}Jz`WX83@9=`WWH$oX4^e3+Pf+lzd@Snt*+guX=*L99lVH
z-<4kbSuAKdl`JJWzKFMun&o!bt@VVey&-#~#^c97uf5K*)K^zuXAcHYXOK&F+RTZM
z^ZXBIzWi8^huZDF<&mT_K3(<F+KCs{2CCg~6Jb36q55VxWM=05cQP}szPXO7Z{J~D
ziL&s$uYaSb<mEa4srqM>C!_UTd)5qZBoqh+YJK7HQ?O}WGQQq#)KGgk;t8Scu10A<
zn-ptuH@xyndEW1LuiiA~;uGig`n3N?k2oG48Wufu85Nm-OQ>I(8|E$FDD#C{qeFAU
z+^+c~tr1KxM|~Khjm-70XR*K1yEIlG`d51+Eb8_^J=(D<d&uLj_Qn@v?a4*6T|Zkk
z@uzpV?mFeli$=UUV93Uw=)PE4KzOr{6<&P(bD~)J0};Ci6{p&757+uCysBaM`GYD7
zUClLXRV*(5QTNbYXE>j^WB=b)oYUvR`zFu*IPWJqJPF^2`d=3-M*mp-Kc~wfS^)Gc
z|7ef<PqYBO?`R{;vZwlA-CmS?{Y%jM`ood1Y?EisEh)huz+N8mdMYu7fkC-BPACsq
zny*bk-yLjNx3-7BQnvpXSLJ-L{co@T?4>im*!x1`&4sJCwLLar+eWkdfBE=e^eE@J
z!v7!bld#C~Kh`qjuSN4s3Hn<#-<6=hSMv^yMn9f!QU1<ft*}&Mk;Z(Djn>OzZ5<=<
z{IABg{P_9eFK)a4HxDiQ=(Nt>e;9L(+0H*>u2QD{BR`Gt#AWMZIhrDSYmSqXs>ZHs
zTC;jx*}7rVlh;}%uA4b?<}^-5<iF`6vTl*bXwd27>#zT9@Vt{p{Wd)J*^h3z=8ePe
zd~ClaHmnt<0hBO0U(r<y;XY;;Wv?#itMw*~pW3t>4Y_6}DS4{Y2w_T(xqbduj*pDw
z|4aI__8uyI#&|K97N6drHlLvd(5AT|4}48imS@9#X@YsaHaGgiR?Q9fdsCEqqd&AK
zm^WILxzS0rYi_s?CM)wlYK`&)bL7`2+Q^(=3FFH`4qivf(WDw_Vv-W_R@SRYTOjBS
zVa9^#n=yjCqQ+NIW3Taq?f!t>SBXiAFVcWMZ*3px922gkZeLxncHLTU!T1Gp!`@JM
z!J=A^FN{fur?$>pyTBC;7J4F{1tkG!EGP{HszaW-@Pg8YNKL@6A$Qz@THm4tVf6GP
zgPw}To@#G+ft=4Q@CO&jkm@{<njEx|GZrSTUdM^!D*wPUEJJ?u{?c1^&%67*nP-Py
zch53pMT9(xjv<c|W(Fd~h_IIvNX+bvY3wOJKkErnFUQL@o*?>tOuSTkoH$v9$L~&f
zYQ>0>G4mbgr9V7<#k?CHykJzV4o`wz=eJQ_wrXzJLHz!YpAQ&uB_h7v<F$2b6YA6d
z?tCpFe~hN(@6`Xnes8r}lc>ZZj-QiUR&5M;Yik2ncq?PWS&@74s)}Dc`SzZZ$4)vu
zoHC*)up_uQA-wNZ`OUYf_On1IqD6BfO?!2Jpcm|fJ;5C1|6Fa?EZ<%;ymf(4L#!R?
zrza3t?&6dkGqOmROpUOJ7oYajoukrbbe`zUdhTa$zqfGglV1!nn<Kojn*Y7<`Wq7#
z<X?p+a{Lg7um9qNsx|Mu-u8ojV^XjCtkp7VSZ{N9<vo>rjP|NSbHh%DmWMH3$WJgw
ze2wvsIllkO@xu42{5g9m`5N+T)ZCEozgvDSy;XdS@lLzuMts_J{EYGtOfW}$zgO}{
zeE+TO<JR=}`jp>CaYMfO+T17)Et(tV|L*eeeap|x3?)yaJulbXkYB5ox6z(=B*-7>
z{lB_BcV@<yzc$Sc`Lt<y8RaiC!MwZt9lbto$d1_BSIO6ykG1rRuitiUZp`NzH8=c#
zeE(PHCkVfLzG!3yF!LIfe_J)`ec~9Hto}EY_5?f51a7w`Xe9Ff@#d+0%-mv$=SfNN
zJUKa@TdnatB_*DxrpEIgJ>q#<T0FPe6t~ABwcHU?Dn)8LBuAw}TY`Ba7dW_O`XyQ8
z#-<|K+B->pA|;a-f?=QBR&BCP<$ySDNpO#22sneElq~EP?3cp}TXlnWpispJ9_!z7
z2Q!{Tq0NB^S+F^hL=<9qb+$HFXmcO|N$K#PXv7b9)Js2L-+>IZ^b?sDBF&%EAt_Yh
zzozwQpUPJg_$vv#Yr#X=|7C*tO9}i}ChpN{g)02lP3BbYGx3dDtx(yQ@A66g<&JB$
zxk6>W-{hXkw-fkV3H+S|9;3q2!<Y;yRQ^NI6efNuzm%HX<-m(`aryU<<_c|&Y~)iG
z@{4HX*UwD(MPG0dJ(hpuS10h{XHl0wCI8D3%!irGFVJd*D*ops*f;uO!@sG@bLszJ
z?OvhHaf;>&ZH_^jE3`RswBHJC4xGl7X;El%<Z7<a<~Ub#g*L}Ink%$9X5FPCqtND<
zuDL>+W0bb9(B>GX%@x`lg_<k0InLJh71|slHCJeJeDwnro<f_$rQIvEIsT^271|ub
zwR?p&$61;yv^jpO?JKl7&eY}#VPE^N(B{C=OersgHU}F7#z>*f!3+1u71|s_HCJeJ
zWNH5t+8q5fSE%xz?$ys`tgUHoIq?(}?SW`7h&VO0#IULD=Mf_@O;!HROyENkczy!E
zz{F3(RY|7`r-jJ+@U~8mLYw0q%@rblVQ&cR6KQ`W;yE1gC6e=WGrd=6bMW2~a)m1W
zXp8l4xdT0<B?s<_be{_x1x_S4Y${ytz~J4I2Xi9L#{tKH6UmS0@D$n{6QMsI=0ut!
z?UwVviR3+W019o6wc5Qx6<^-1W`sWp?(*TDNcYso?T0aQ@>2ejzvYgt+DxJHFT*fK
z`|+7JSE$S(3;kQ}*sIMI+8k5RZWW;YDl)ZShHXR%-JKBSC_L)7+|ky{P!pB^F&&FF
zZhRq<5_oa~w<hqE1fH6}dnE9*1a7nTDM6&CL7qgkf7R&QEHl7~<YmB8a3c9!;B0Ur
z`F!9!a3VPluURezCz3A&E&wNzdw_0mB6%fn5jc??8EWx@6Ui?J)_@bq>wt^FiR3|`
zADl=Y2L2G7NRA3@iGUNyG48Nj0Zt@e2D}oSNPZ3QDsUqCwZKMjBKa!d3UDI%8sK%{
zMDi%G37klNJ#Z~Jk^BbWkHLxL>wq_c6UlD{-V9D8$6*G`dT=5+zPxAoPjDi6EAS3*
zB00thmOH_T<Ua>)0w<E+1H2oYNPZviUT`A$gTVX2iR2Fhw}KPN9|JxDP9)z3d>oue
z{xtAOa3c9L!0q5f@*Tiu!HML%fIGp7<hy~-gA>Vr3ETrtB!30?5;&23FYr}xBKhmU
zec(j${lGWCiR5nqe+^C~e;4>RIFbAV;CtXi@{fS+;6(D@06zvNk{<+q3Qi<H3_Ju*
zB>ykq@4<=Wp8@{}P9*;V_&GR{{43y}!HML51s(w>lK&0(H8_zR|1IBw6Umc-7E7ju
zNS+F`f)mMYz#ia4^4`Fn;6(BaU^+OFydN+Ve?;;u;0fSFa(oNIG60-NelqYRa3c92
z;3?ol^3#BW!HMKUfTx2K$sNF<;6(DXfWyFv<RgK@!HMKfU=BEuJQp|$oJf8yFb|wa
zJ`OkroJfuX!j|#iL~?xk#&RAwk$e&`ADl=&8CU>LBrgI^0Vk4A2Nr`9$!7vfz=`ay
zE(Fd3Cz9jRO-mU#k^CayTyP@!CBXUMMDojkmx2??V||HIWF+tt%VBRJ>=S9f0_XuJ
zlH=IDr4pP-?gLhX6Up(qgynK@B6$E<2Tmjp0fXQ~@_JwxoJhVDxCER?&iLhj(bZpD
zf23lp(B}BGkKzh#j%5~+)L^k&h;+Xkcr`eY{5s$Ya3XmWxCWd^-U4g}Cz7v2JZ?mM
zh~(>mH^ZFB{7O^7>+|BOzc$7+Fn8Pze{O?)BHeGWh~zs^`iSKB0Pg}Pl5Yjx2TmlP
zru|oFbIj9Rq0NDQQ2rF!9F3YQv^mhv$)7@-<7&+n+8mD~UXLJt+jRQ2gFgxWjOIJR
zp9SBg`5y4+!GEdwtKctz@74Sb@O|L>HGdoY*WmAJ-VXjA_(z(53jQ(pLCIa!zXv}A
z{$HAZ4*o~*FEl>_{%7#NYW^+w*Wi#Y^21eaO%h3#B)bLIs7l@wyazZICM3rvfs)d}
z`)NJ^`~>i9$!*oAfS&|DNb}Rd2ZQ5|L%6q94+9?xewOAr;KRY4n&*L!0zX&t@!(^?
zCu*J#ejfNF&8L7DfEP*bsxAR92A`>U8Tc%4+&w7ck1xX{%>}<i^M&A-f|qMv3GM-}
z()@DpYVcakgWz@GA<dV7hryRhZmV7f-T;2J=GTH>1HMvod{iiD75I-dzaBga-lF+B
z@EgEy(flXiw}RiUc`Nu&!8d6BbMTGen<aNu-v@pV_yd|h4E`Yaqnd96e+>L7&7T2(
z8oW*OUEn*wU(o!Q;Jd+J)_gDcE8wqbz90N`@B={R?|Z<v!9S4Zw(5_;+rfXM&3^~{
zEjW?(JCa25?~|OCKLN>Ez3{?mr>bVl9e;tnFJb>{9p1OVZ;~=CMB2w<S5ha;tw3@c
zFg4k3>6NU^Gr)U;_toaOHZ$o2a3bxWk}Q(5lbx2qKqLHnP2mrPz0+aeA^mezkAS^l
zuur7@T;SQ@MDlU4e=h73$uEHZ1eg;I|8F<>KLz@eU{0j@3}7)hk$g7%DTRMT^7+8I
z;6(C;uy+aU6UiyJ7L)&#u;+pODs6u;um-$N%jbvSe(<n1Uka=TC(^&G;qR63f4Ry3
zMw9;x?^+XI4R<TyzDdiA=3T#p^-603Pf6gZ3A{%FPfOso1TI7mhQ0BluJT0g`XHV^
zmB3G-CHY(K_(Z2oq3WM94P()Gc84}sn07Xe26mYb`Ba-LOdCQY+y40Q2Vyd;oT&AU
zlT7AszNyR<+8hToSE%CiiPmF(Nq>?}?#Vw)F#jll?=bP!k5zaI6Xwr?%ev;DMUH>I
zsgIBUflK1~_5}V=0)H%lKbye+D}nD%;D-}<D{6xJEq9dTYspkA#CiwH<&TmxEku^@
z9|N1giR3o|Tfm9rHv?}1Cz7uR-U3b}|4-m;;6(B}fVYDa$?pXI44g>53Ah2ANPai)
zE^s3Gy}-@jMDqKATfm9rTY(RN6UiR|J_JrAe;oKIIFbBG;1l3P^6kK<z=`C~0)GKc
zB;N^a11FL{4}1=sNWKU70yvTUCE$zTMDkaGFM|`w_W^$eP9%Q=_!>Bo{MW!Y!HMK=
z0}p@`$=?IM15PAw2fhzZB>x!rAvlrzQ{X4yMDjzx-+~j#e-Hc}IFbC1z(0T!$v+2n
zfD_684Ez%~k^BhoOK>9j*TBDk6Un~?egjS<w^)Ux6P!qH1twWDEkyDjz!Y#Ic~4*(
zIFURZ*bAIUo(b#&P9#48*cY5gJ^<JsoJf8W@I-JT`6<AG;6(Dlz*E7A<fjAe;6(DF
zz%#&!<imhxf)mMy1AhQcB+mhk04I`<0-g;{B+mnm1}Bn_0iFX+Bp(kP3r-|I4>$pw
zNS+ToADl>D0Ca&9$?+wBOCdOsycjqYoJd{*oCZ!Lp9P!&P9!e_mVy(>=K^Pg6UpZT
z=YbQ+F9lu<P9$FlTmVia_W<4CMDj}DB5)#kHP8!AB)=S315PBb11<(9k_Ul)a3Xma
z_(O0a`4V6RoJigPyaJp^ew9@uU1_yjt^ty-1TF`^PV*>m4S2KWH(5pUkF8G2EkN4;
zDeyM%R?UA7dmCYYv$lUf@Luo-HGdTN5cuPoKMmXl{tL}_0-punrTL4%7r<ZAd@t}7
z@Ygg~?Yi+FwJPps89+KLrnlU2K=;22ReyH6$^3n7t}wB`YaG(@j%bA7gwW`GQ*q}9
z8sArF*O)IY#{&2tQ`moKQSnIh@e37xrp82L=K8<$w>kIN@l4b4%hqVuI838cBjein
z?Nf`0>3A@&5dHHkqW{2SCnR%>3UAvSg=1GMEI0Yb9g1xDmDHHj*oK9wJgC0_+JPV3
zCd8#!><!{M>2@K`#?yI+5TE}{h#{>)6l}n^W$@gHN4zIQFFcty3o#eeev1%WdjmHL
zG5b8sTiRg{`cn($lyIj%6N}b4d^H(*bQ7Ge`H(t4v_xaO5a$A=9q0{lI%9q)y`@5Q
zzyeBnj;|807YsQE43QUNEFj0}tf<0>e=IoHQF02(@NMS;7p>&S>s>;<qBy-`X|FV1
zPuiOwr&l8FL7t{CN~FD-IK5J7FBq>U?JbSdn<MQtLXYEU73Vq9UJLX(&WB!zX@z`{
zN36?=51GR>pnQqiF1;mz+DIMOZdgX%MOo2yue|cg%0&>3=)gjxluam7F{Icf!#)zH
zcZm#Jj4|1pqwJ-{=_z|CZz|rAT5NeB@4?rfig3X>xJNlu;gMgW+_XV2-v&ENRl3AN
zi`>)9@hTnxUkkT7k4MvA;kL*!tMziJl?^>pS;&)mSXi|7jJaiTLP3K)jLdU%aN-(^
z2to)si^4L_qQxRMW9KX<s4cWwL^+)0gu<bUA<j|sq8xhbyYy6?Xwd!ZmO!@zx+Typ
zfo=(OOQ2f<-4f`QK(_?CCD1K_ZV7Ztpj!gn66lsdw*<N+&@F*(33N-KTLRq@=$1gY
z1iB^AErI_dCBTDX1IqdZ`<7=qEo1wMu{8t5*-K9rXRbL#oQjR@6Y%zEZ)dO4o<UoA
zT3hN~YkN|M<%~Y!jGA<D^3pyqAAGcV-&E1JCQaBZCy1n=rJRS^de&G)%8gJ@$re*;
zl12Vfs~Eo~MdYnd6*-%G2*<WGaq1o${L4DpKN(&Y!W#hdvyV0(U=sssdWt?c{?o&m
z8cZo?EQPRy28mG3VBuS87v*bC6Z6)eE@o^#LlkTqBF60*D$YJ|rWktAAx=3m4F1kO
z+TV)@ii>M<#f3}rMDdz)ME?47#hA@wM9#LcV(6Z6;*<m9;qHo~-JQ@A`p1x4N|Hz!
z4H<+7iEz!C;_{^q;aM|GT(tfNh}T(&*KovZ1mZOk@ybEG&W1mpqy1kzNGz^N5#FV#
z!o8-4n7=+v%-n1fh1+_H343~pQ3rYp2d*SK^~iDZC(|Y}7bWH@<W)F86xJk(^Oq)z
zv1_bi^!gMr0tW@o+}1-3-jgN<9I%Pr$e$FaHJDtU)NVPkmpBpj`@w$twKiv3ut#}n
zo0k1C@_FQU<@534H8opIMVWD-%uLW_<~#jDZR?wrCbA+nkyY74WaXubtkHej_v0K7
zj){oPIJ<KqUi##j9saI_dX<Uu1(?*)Nu_;RcJtCi&$84Y-SWWMv#|F*o>$uGQiw;P
z{u0lp?UIU4sC(k@gketJ3*0cr3xdKr38(1>riy`i8Dd~)Kzsk-3FY)3CpUx(b{#mE
z^jG*tlyNCbDz#*!iVUPB18K<^-7CnK3T=yEo@NzkA(<ym9d9QOjHc(=MEdB|QX9iN
zT?oVfi*UZ_Lp;t86<U(;`cEnR>7OS0SK^t|1LtW{MgP&6K}%K&WcQTF!W*Zlv?Jw_
zDteAi4XXGcE_o>;qhGHeQ_uMGN3;y;DF)@8AO_tyup_&Dz~26CvYjbCW;kYl&1x+x
zOW!oncTT408%l5Qwb$0xBbZu#S|6+E^F@Y8$?GGwJ|M*T4+>F(JGxIp*k*pr@Y0e+
z+9Yd7Qo9hRWv7Ykh!wK!DSFPa!M`-&#0$6~JlmvyNkTy1tQV8B5x-+0^Tt$uj>^Ad
zrE_3UF%V^TAo5`#@?jwIVc_Ti?K-^RX>3EF=X^qlMR;oPqT*>R(FuC7_?g4V?kTd7
z#%!b|8)?iQ-M^z>JM#GNhuKx863ZX!NLf;sxvDeHqta>U8z{?JKa{h+C})|X|7$E~
zMqLHwp}wN7%Cyc&75QyKT#u*dQkA!6Zef}tsYq7}(q%=ul0|k0<dqoL%pM|hgRP@S
zdrHt+p6q0KKaH|^1NptHr)YUzh$VZ_F6c1KT;?I_9m~8S|CSeViR3SZ_(a-Q@{iXO
z!px6~k5OLwB#Azup3c)KlTT0vBPpV1WvWO)-siuJIN`}(pwn&Qs!aEU3~i7h+?GLx
zTD~WJnl3*5q(r>`#&ofN&NPu>DHLfPXqkg(wVgs7`2c6{5M|j80Z+3g32Q`%z5CFv
z;n}XkGxMYCy{R2C(qiPVf#$MTk|s(}Ml(>ZGmtjfeqE3%E`VN7En6f0F*N)&FfmWR
zi<`qgp4#x+i1W!-adO#-!2#u2&g0e#wwI8j@xu{f`HsnGoa2-29t?=Xz(?S5;^~+o
z#C#xXt#R8=?2F~N$R>B($SZKCkEgEk+&p~$xl)ui6#IP<_tBQ&LrppTQNIU#7E$_{
zJ{zd@PG1c34k6a_yrBMtpw|e!c9UKUUNYVTy(1>Qt<Y<SUXdw`*P!P_7;PqdpF?jm
z>>cmdeJ`GpefPb1N)9`#6?Wf?r{wT|{k?c-!Cawg_gp`}bU&vapQ0E2lS&(Si<vj%
zo>Cj|pvReEDQ%ESw<}y2sFycw9Z4>2a2I+j%Dh$L8Tf*4PP=DSRMdy?0e>NecKJ~f
z@K=jR-~$!#fFSi|#(bTEyUGlIPfB&e<Cy`!cXlL%n*ea$-wKUlzYOVN9a2H9*Ao&W
z;lt#m74F#$Vcg0izj`eWXdAP=5qN-`hQwKhtw<nP9MN|Qi2Ysm7?^NqTjm?(gczlD
z=irtr<^OJ^d#>MC5vcUK8E+kk_)z<IQ3&@2%=L$G>i{op!==Wh4U+<aT4`CP`+CSm
zUCZW%V<A?Xd~}89&Y3b6?^Hk!sd7&VgmBlu>_{bU9OBjFr42HV+%vsbh`SKC5`59y
zJ=GgQy6S|O9y3SkrPkN5Gb2!0U+Z-j;Odu%7bnC|rmY%Y1T#AV9Xc1k?vjRznwSlp
z0wxkcO|A<@8pQSR3zrq(COx+T<?+nGBz)|9G2UwJO4kg2z`a7uW=N16g7AAoz6!>A
z0eqQ)OBbZaB6@5;#hNa^W_xR^m~FyE-$I@W?<5pecM&eB6XK^<lol4ff*M@5hqB-e
z^2P|9pO2b-h`1t=kdafqE|aMRDEahrv4|ihQ;SQKNXon@Ut#z^fVo%C8}Zyze6_X3
z{;GiZhO$Dbqo2M-^$}e##5njs{jNKlFqUBaSiwp?p|Drxoy%W2-P>?QAXFK4&&F5T
z=U{fLrLS^Zh<m{ceH9Tmg(qgja#d0>-;io?cy|4w2(5@++J`c4ID$L(#Fr_oMTK6K
z51ojaE(OSJ`Yc3cmlURiyk6mg4+WlJm_-GaN_jmOgf1>VIt4n~j~TJ{Mk;E=ye|I=
z5b6|t^&(0Wl8^j&2yrmA0AiuZJ}dJ@Ue98*C9oOlGC@R?^fBV6dzN+<B5{c!gEAE3
z1|fbJ=f<4dV<`z;-54=kPF54BG$g;e%Uv1n7AfGR8zC?2s}P%vT#zBYM;UtSP>mrh
z@q&>WHUaYjzRFV8<T4{xy1ct&Ef`11>R{>Bb}hG=0rj<hA)YpTbA@AmUD6dZMqAg)
zv*C79Lj?CwcC|eyP%K9x1Ycq$c(OlSk8&`_lzN6;%(&=WR;6iLtaO#K8i)^cI&~Em
zeZg6(Rh3t5;IldfrD$Ix;sn`Z7l)_#{J5d=N;bJz3t{=2g7<@Jr4tqhtjHMYl#xfA
z-4VQnAFE-pS{IbpvE+`+sjNjlylE|q)FRRmBuHN(Dc+@CNg%MeK4_G*$^J+PCE;V5
z%=3jJ^`6?fevH=98?~j(_2V|eN?bb>Ya&pcW%<4)1#RVVjqOP&f(7i<{1Mz1Q{ly{
zId`PY@rLS<*ofEA5^Ix-!<qy_Q_#>NuBEtj0nS93^}>2@sG-yw!WAxc9)AT=fon@q
zxlTLU_u@+Qon2w7b{d-EPt;W%-5gw&;exnpy;O;zmO|CnD9a_jMIlcJxm=4^p90C8
zpZJ(B-r1}^MW`=g6=GZHT~uF<dm4>cPM=&hb8<=UXeIOWaeY%=7##!Nl5)EX+~J_N
z!dK<1a4RvQtPh2LI8y0m`{VYZU36EX?Fj`M+`MH%h<OENb7#&eo-uh;o>TexFjhi?
z`06Tdh{ANl?e<ilC-DUqxp{S{NLK#&0-SHv1^n(MRY4Z7D!gA+CF|Ttq7G?7w}LW*
z-b*Uy2zj-uxI$D_)z*h=_@*9W!|Pu|bz)4H+U*q=2zS_9<F1k!?`F};7Q70r%2$ot
za{F<)PT&eRF0WjIH3Q{R3C=LWoodEquUN>5vw;!BQp^M8J*`5VF5KRw-U@UL?l^-1
z2#@xJ@C1D$>%yE&j)W9O@;;%FwWCIk8mVQKU>;J}+?uT<*g#hJ0)NN6#_I{1jm1Rf
za77R`=KqDC8qI%qN*gWey;Q>{*9`az2IKV&^W)(h?N4Ktdha(m!Qw(~ZXbAT^UJk)
z%b;VMhqZazxMQ0q1a}mz$J({|H+sVdBX1jyweZXAuT{OF(oA#84NFh>;l)JtlQdDD
z)wO{|9<(>=%0VFv?yhX`vwG*4E=s01F@1MIZ9w)^cpVa5XqUe1LOmGFi2Lu`B0Lp8
ztoNZm!!#fyFUyiQ%Z9x2s^SW-y@D+&{0-@W8zv@y*PijpCE8T!6S=yQ6gSu6elc8K
z%kdA|N0n%VWCS5NLXd&M$Z&5qrwhDI><ytibjxN+CPR8;*f-4OO-HyX2g#H_w;yey
z>i%-v7#_$0CrmO!s{EBcs`6Xe#(I+0H+a6m6YR}5^tHvH=IxKf`?n^+UWw*g6YLq`
z{j=EiLzSLZ9Z&Ap@V7_nAJiC3Fz?X%%J%X9?@dU5xps%wY2`0dV`4e3NwAlF9D7>(
zJHgatpp4?yO^-hkwufs1^|h7uMP9o{-pMq=UX4qy97Dqn!^6aQ8Tw0g{OpE?M&~c$
z^D)8rUq=g#oYO%(qvFx}kH#nRkF~GIOHldtf2|zk)u`%P^SXLmLN71mvsZa|qa|;a
z_sRLOYB!G+$IZGv8Rg2b)A&DJzYKqmhaF)pZ)-yS9nZmW^ji#nw0T=X`*9p+G40I9
z{<ZlP3rH(40`*qlrbWBF302=kZ&bgW(N3qXg2$_Wn|1xe5{>*B_S&BK_ohP!FGNE9
zYuTpa;nZl@b85w*o3;7>R(y>J{gYViw>xH*&2f7wF&}j63yR&5fbKILBkT@LLn=ZJ
zUPRAJk99akJ|za*qVuyf!65kb|5ASD$JfV><HVyVA->;Nx=G95h<`AFyD|5|QlXpX
zv+(%*OE8zMv>QVsecxaOhFNw@!6L(cV8?y2Rca*y)x4Suk9v6JgL|`o0jxF6I&D0M
zr(81>vu4c_A%CU3LDpA+*&OGQa1-=ZEXG2v{MF;i_sT#W=CGbhILA28SaG!1)rTYY
zN-T3O(RZ{@!U|$+xj{MqhwaY{*gc3Hyo`m!ZYG38;Z|Fu$5mG5y4dY1EoOn|)tRyN
z9o@L8fTy_TxJt&`>*P`}Mk4ACW{z}gF;}jI_x?(s$3Mc3^-Lrpq8mvcS3WV=)y94-
zf7m?{yPl51>*M<yyAfP^tdpM*UwJEe&{%1t9aDOkm?{kGE2)1}#H+E~3wbl)OK{CE
zZwYoT9lBJ;KT-o(7`lNfyX>?nAKIXAf5*Q`|D1na@mBtu#tH7Ofr7i96Mfp4Tvj%#
zY`mRKnJ&uiC0HTml9*oFu(M2~1YhBc)L@wCM*_8tk$O%6Lrmeh^n!fgN}HX#HXgf|
zw+N574-MmcGu=NWfffw-mt+a?GQRPHIk~tQ1F$ckZUuIngzpl~wFu0`#jo%Tfm)o5
z{ft34dj&P-;9}0{STTpX9C+jmAy(4P5UgsC6XJHLF%K7OC*dAu+6Vpt=Lq`GQ|Ac^
zrwTCw>Qdm>SkKIZT41;4Aj)Edc7V5BiSubtw*nVlg?K_;4m@d<5Z^%U1U~vB%=a!r
zSio^>g(!hKADD!1R#if62YT`FJ4OQM<_6(ePkmqzPu_fdUkG^Jt%x)9*8qF3$5{iY
ziRaxW#6;N12VRAz7V1Xeqj*}NCSLawA(lbC2KW~|E1~`dxcNU37SzO-@Gz_nU@v^9
zi|XFM$y!|mT&C5;8?<^IaJyC$_i6PT#GgVY@P}B5hkg>5YBg~O9>#MQa2C$ZQC$i=
zh-VYj#NPN`81;$gXmt_rrdG%S`s;vu@pM2<?0u(FX9BlxKwd!Kxe+Vb_Xu$c&YbN9
z4%mYDKy3#;b05xRP#@^WS+_Y*Hv%`~DTms8-i_zr#y<pEK%Y1Z&pN2ffoDC8I74kd
z%NBy3{SoZ-K%cl2&nHl0?jY{Ma|CMfDB1!%0_V*zcNRC{NrM{mY_SVZCe)Z43tm)_
z4K>E!Vg(*M)GfdskE5(oA2=1yM5s%Fx8Ny)x)nI&2_Z_MHlJ<d8Mjq<g3u>Egl8qx
z7#9osHk1w82QI_Y3iVdtktc<?4{GrgWQ=DQ)lXyX2+zAvV+<zR@q7X`#zA7nb|HQb
zHRgEYYdqgTjd`zFi^q!dU6^x<!+3f_jV)<$8J_-72Z49t83OfI;6XeipzZ+feHM8K
zwfU^t^=&vOx<iNs&?olZiF8422R1&32kN$6NZ<2F7u4o6Xgr%%gEME_p-;RH&l|J@
zG|r(BugB8?ed05CtT@X>{1#6()R`|qHh6|X-3H8g8Q+hC+I-fGXU<l<f_#BKu?bHN
z)GfeOucCZF-3ojH&swP4fIIe~j6rQaGsd%IpW)dBed5=64nUpxnh=k@4tYV1?<tB+
zc)p<ie#j4x6_+iP0$1Vb4Rs6fF+BaDZUbii8g&F}JMc|B!=P>lHXJ~?g1QlSKb{iW
z2M&1)^#y7ta50_*v;%wwPbJiQfh*ody99L$@XvUbL5*)dimULfr9SXacy6cuyEvnZ
zXEW5Lz+3S=3U&T_$U{7BP&?m8*}$_0>aD<(4^Sp(ANUTQccJDvysUQB=41oIc)oys
z3-CrfnW%SLfsf<KhMM>ao&#uCiJxjU@vmA<bbg3*u{{A6;n@Xs_wU~!0rkzkY!rj|
z(0IK9^5q!%HY@T7X63CGaYMW#$tGT~w64||bDrlImE&|^%#1@K*bP0;fioQ=$2#m>
z{j2oUVoT^eM}s%)xZwQso)bOcu(xheZG#;i_`~Np>O=nV;ffk>ohLl9&Q}o%gacKP
zkr=U$_k`<mmW*=Pv4!ZX!d}}vHSSfR(W|o!zd()=$N#6bI?!fO0GrY_Wp2veWZ&f6
z)Oy#}yUI5QH#cr>+1$2e@0Rv09b3eGndrGKVkzymX0~Rx4r_I`=C>BL&TlPmt!WLm
zE^TeSbL*Wg8(KGP-O#pS?}qjb9UH{P%#HSq&W-sSOE;Ep6nACb)v~#D^VZF6o44N6
zcF*2>+V9ESV&BrbW$PA(Bks?<-+sUI{_+Qc4>Uf|@<8K*Ef2Onxb;DiiSMT(*!I?r
zR&i(Mom)3-+aR21AE<Zd!8<$dJaVVlkhY;_L*s@u8!|TzLx^J$X3@sRjcYcxY+R3V
z3BM^B0SrT&8P}H9^{rc5x3%tVJ<!^=Vb6vG8xHFDX2U&Q)7|EcTQ_doxMyR>MzLww
zrahb5H`Uw~ysP!DgLkd@dF#(J@1B2m&E4zoF4|na`QYY`&1v^!-jjW=^WJUuuHUj5
KW1jEB@Bahuvy&bG

literal 0
HcmV?d00001

diff --git a/MiscLibs/bayes_cov_compiled.py b/MiscLibs/bayes_cov_compiled.py
new file mode 100644
index 0000000..9f627e8
--- /dev/null
+++ b/MiscLibs/bayes_cov_compiled.py
@@ -0,0 +1,236 @@
+"""bayes_cov_compiled
+Computes the coefficient of variation using a Bayesian approach and an assumed posterior
+log-normal distribution..
+
+Example
+-------
+
+from MiscLibs.bayes_cov_compiled import bayes_cov
+
+cov_68 = bayes_cov(transects, cov_prior, cov_prior_u, nsim)
+"""
+
+import numpy as np
+from numba.pycc import CC
+from numba import njit
+
+cc = CC('bayes_cov_compiled')
+
+
+# Bayesian COV
+# ============
+@cc.export('bayes_cov', 'f8(f8[::1], f8, f8, i4)')
+def bayes_cov(transects_total_q, cov_prior=0.03, cov_prior_u=0.2, nsim=20000):
+    """Computes the coefficient of variation using a Bayesian approach and an assumed posterior
+    log-normal distribution.
+
+    Parameters
+    ----------
+    transects_total_q: np.array(float)
+        List of total discharge for each transect
+    cov_prior: float
+        Expected COV (68%) based on prior knowledge. Assumed to be 3% by default.
+    cov_prior_u: float
+        Uncertainty (68%) of cov_prior. Assumed to be 20%.
+    nsim: int
+        Number of simulations. 20000 was found to produce stable results.
+
+    Returns
+    -------
+    cov: float
+        Coefficient of variation
+    """
+
+    theta_std = np.abs(np.array([np.mean(transects_total_q), cov_prior])) * cov_prior_u \
+        / np.sqrt(len(transects_total_q))
+
+    # Modified for compatibility with Numba
+    sam, obj_funk = metropolis(theta0=np.array([np.mean(transects_total_q), cov_prior]),
+                               obs_data=transects_total_q,
+                               cov_prior=cov_prior,
+                               cov_prior_u=cov_prior_u,
+                               nsim=nsim,
+                               theta_std=theta_std)
+
+    n_burn = int(nsim / 2)
+
+    cov = np.mean(sam[n_burn:nsim, 1])
+
+    return cov
+
+
+@njit
+@cc.export('metropolis', '(f8[:], f8[:], f8, f8, i4, f8[:])')
+def metropolis(theta0, obs_data, cov_prior, cov_prior_u, nsim, theta_std):
+    """Implements the Metropolis_Hastings Markov chain Monte Carlo (MCMC) algorithm for sampling the
+    posterior distribution, assuming a log-normal posterior distribution.
+
+    Parameters
+    ----------
+    theta0: np.array(float)
+        Starting value of parameters (mean and cov_prior)
+    obs_data: np.array(float)
+        1D array of total discharge for each transect
+    cov_prior: float
+        Expected COV (68%) based on prior knowledge.
+    cov_prior_u: float
+        Uncertainty (68%) of cov_prior.
+    nsim: int
+        Number of simulations.
+    theta_std: np.array(float)
+        Standard deviation for the gaussian Jump distribution. If blank a default value is computed.
+
+    Returns
+    -------
+    sam: np.array(float)
+        Matrix containing the MCMC samples
+    obj_funk: np.array(float)
+        Vector containing the corresponding values of the objective function
+        (i.e. of the unnormalized log-posterior)
+    """
+
+    # Initialize
+    npar = len(theta0)
+    sam = np.zeros((nsim + 1, npar))
+    obj_funk = np.zeros((nsim + 1, 1))
+
+    # Parameters - used for automatic computation of starting stds of the Gaussian Jump distribution
+    if np.any(np.isnan(theta_std)):
+        std_factor = 0.1
+        theta_std = std_factor * np.abs(theta0)
+
+    # Check if starting point is feasible - abandon otherwise
+    f_current = log_post(param=theta0, measures=obs_data, cov_prior=cov_prior, cov_prior_u=cov_prior_u)
+
+    if not is_feasible(f_current):
+        print('Metropolis:FATAL:unfeasible starting point')
+        return sam, obj_funk
+    else:
+        sam[0, :] = list(theta0)
+        obj_funk[0] = f_current
+
+        # MCMC loop
+        np.random.seed(0)
+        candid = np.array([np.nan, np.nan])
+        for i in range(nsim):
+            current = sam[i, :]
+            f_current = obj_funk[i]
+
+            # Propose a new candidate
+            # Under Numba np.random.normal will not accept arrays as input
+            candid[0] = np.random.normal(loc=current[0], scale=theta_std[0])
+            candid[1] = np.random.normal(loc=current[1], scale=theta_std[1])
+
+            # Evaluate objective function at candidate
+            f_candid = log_post(param=candid,
+                                measures=obs_data,
+                                cov_prior=cov_prior,
+                                cov_prior_u=cov_prior_u)
+
+            if not is_feasible(f_candid):
+                sam[i + 1, :] = current
+                obj_funk[i + 1] = f_current
+            else:
+                # Generate deviate ~U[0,1]
+                u = np.random.uniform(0, 1)
+
+                # Compute Metropolis acceptance ratio
+                ratio = np.exp(min((np.max(np.hstack((np.array([float(-100)]), f_candid - f_current))), float(0))))
+
+                # Apply acceptance rule
+                if u <= ratio:
+                    sam[i + 1, :] = candid
+                    obj_funk[i + 1] = f_candid
+                else:
+                    sam[i + 1, :] = current
+                    obj_funk[i + 1] = f_current
+
+        # Modified return for Numba, eliminating need for dictionary
+        return sam, obj_funk
+
+
+@njit
+@cc.export('log_post', 'f8(f8[:], f8[:], f8, f8)')
+def log_post(param, measures, cov_prior, cov_prior_u):
+    """Define function returning the posterior log-pdf using the model measures ~ N(true_value,cov*true_value),
+    with a flat prior on true_value and a log-normal prior for cov (= coefficient of variation)
+
+    Parameters
+    ----------
+    param: np.array(float)
+        Array containing the true value and COV
+
+    measures: np.array(float)
+        Array of observations
+    cov_prior: float
+        Expected COV (68%) based on prior knowledge.
+    cov_prior_u: float
+        Uncertainty (68%) of cov_prior.
+
+    Returns
+    -------
+    logp: float
+        Unnormalized log-posterior
+    """
+    # Check if any parameter is <=0
+    # since  both true_value and cov have to be positive - otherwise sigma = true_value*cov does not make sense
+    # if any(item <= 0 for item in param):
+    #     return -math.inf
+    # Changed for compatibility with Numba
+    if np.any(np.less_equal(param, 0)):
+        return np.NINF
+
+    true_value = param[0]
+    cov = param[1]
+    sigma = cov * true_value  # standard deviation
+
+    # Compute log-likelihood under the model: measures ~ N(true_value,sigma)
+    # You can easily change this model (e.g. lognormal for a positive measurand?)
+    # OPTION 1 : the model follows a Normal distribution
+    # This equation is used for compatibility with Numba, instead of call to scipy.stats.norm.logpdf
+    log_likelihood = np.sum(np.log(np.exp(-(((measures - true_value) / sigma) ** 2) / 2)
+                                   / (np.sqrt(2 * np.pi) * sigma)))
+
+    # Prior on true_value - flat prior used here but you may change this if you have prior knowledge
+    log_prior_1 = 0
+
+    # Lognormal prior
+    x = cov
+    mu = np.log(cov_prior)
+    scale = cov_prior_u
+    pdf = np.exp(-(np.log(x) - mu) ** 2 / (2 * scale ** 2)) / (x * scale * np.sqrt(2 * np.pi))
+    log_prior_2 = np.log(pdf)
+
+    # Joint prior (prior independence)
+    log_prior = log_prior_1 + log_prior_2
+
+    # Return (unnormalized) log-posterior
+    logp = log_likelihood + log_prior
+    if np.isnan(logp):
+        # Used np to eliminate the need for math package
+        logp = np.NINF  # returns -Inf rather than NaN's (required by the MCMC sampler used subsequently)
+    return logp
+
+
+@njit
+@cc.export('is_feasible', 'b1(f8)')
+def is_feasible(value):
+    """Checks that a value is a real value (not infinity or nan)
+
+    Parameters
+    ----------
+    value: float
+
+    Returns
+    -------
+    bool
+    """
+    if np.isinf(value) or np.isnan(value):
+        return False
+    else:
+        return True
+
+
+if __name__ == '__main__':
+    # Used to compile code
+    cc.compile()
diff --git a/MiscLibs/common_functions.py b/MiscLibs/common_functions.py
new file mode 100644
index 0000000..8a3393f
--- /dev/null
+++ b/MiscLibs/common_functions.py
@@ -0,0 +1,466 @@
+import numpy as np
+import scipy.stats as sp
+
+
+def cosd(angle):
+    """Compute cosine of angle in degrees.
+
+    Parameters
+    ----------
+    angle: float
+        Angle in degrees
+    """
+    
+    return np.cos(np.pi * angle/180)
+
+
+def sind(angle):
+    """Compute sine of angle in degrees.
+
+        Parameters
+        ----------
+        angle: float
+            Angle in degrees
+        """
+    
+    return np.sin(np.pi * angle/180)
+
+
+def tand(angle):
+    """Compute tangent of angle in degrees.
+
+        Parameters
+        ----------
+        angle: float
+            Angle in degrees
+        """
+    
+    return np.tan(np.pi * angle/180)
+
+
+def arctand(angle):
+    """Compute arctangent of angle in degrees.
+
+        Parameters
+        ----------
+        angle: float
+            Angle in degrees
+        """
+    
+    return np.arctan(angle) * 180/np.pi
+
+
+def cart2pol(x, y):
+    """Convert cartesian coordinates to polar coordinates.
+
+    Parameters
+    ----------
+    x: np.array(float)
+        x coordinate
+    y: np.array(float)
+        y coordinate
+
+    Returns
+    -------
+    phi: float
+        Angle in radians
+    rho: float
+        Magnitude
+    """
+    
+    rho = np.sqrt(x**2 + y**2)
+    phi = np.arctan2(y, x)
+    
+    return phi, rho
+
+
+def pol2cart(phi, rho):
+    """Convert polar coordinates to cartesian coordinates.
+
+        Parameters
+        ----------
+        phi: np.array(float)
+            Angle in radians
+        rho: np.array(float)
+            Magnitude
+
+        Returns
+        -------
+        x: float
+            x coordinate
+        y: float
+            y coordinate
+
+        """
+    
+    x = rho * np.cos(phi)
+    y = rho * np.sin(phi)
+    
+    return x, y
+
+
+def iqr(data):
+    """This function computes the iqr consistent with Matlab
+
+    Parameters
+    ----------
+    data: np.ndarray
+        Data for which the statistic is required
+
+    Returns
+    -------
+    sp_iqr: float
+        Inner quartile range
+
+    """
+
+    # If 2-D array flatten to 1-D array
+    if len(data.shape) > 1:
+        data_1d = data.flatten()
+    else:
+        data_1d = data
+
+    # Remove nan elements
+    idx = np.where(np.logical_not(np.isnan(data_1d)))[0]
+    data_1d = data_1d[idx]
+
+    # Compute statistics
+    q25, q50, q75 = sp.mstats.mquantiles(data_1d, alphap=0.5, betap=0.5)
+    sp_iqr = q75 - q25
+
+    return sp_iqr
+
+
+def iqr_2d(data):
+    """This function computes the iqr consistent with Matlab
+
+    Parameters
+    ----------
+    data: np.ndarray
+        Data for which the statistic is required
+
+    Returns
+    -------
+    sp_iqr: float
+        Inner quartile range
+
+    """
+
+    # Remove nan elements
+    data = np.array(data)
+    idx = np.where(np.logical_not(np.isnan(data)))[0]
+    data = data[idx]
+
+    # Compute statistics
+    q25, q50, q75 = sp.mstats.mquantiles(data, alphap=0.5, betap=0.5)
+    sp_iqr = q75 - q25
+    return sp_iqr
+
+
+def azdeg2rad(angle) -> float:
+    """Converts an azimuth angle in degrees to radians.
+
+    Parameters
+    ----------
+    angle: float, np.ndarray(float)
+        Azimuth angle in degrees
+
+    Returns
+    -------
+    direction: float, np.ndarray(float)
+        Angle in radians
+    """
+
+    # Convert to radians
+    direction = np.deg2rad(90-angle)
+
+    # Create postive angle
+    idx = np.where(direction < 0)[0]
+    if len(idx) > 1:
+        direction[idx] = direction[idx] + 2 * np.pi
+    else:
+        direction = direction + 2 * np.pi
+        
+    return direction
+
+
+def rad2azdeg(angle) -> float:
+    """Converts an angle in radians to an azimuth in degrees.
+
+    Parameters
+    ----------
+    angle: float, np.ndarray(float)
+        Angle in radians
+
+    Returns
+    -------
+    deg: float, np.ndarray(float)
+        Azimuth in degrees
+    """
+
+    if isinstance(angle, float):
+        deg = np.rad2deg(angle)
+        deg = 90 - deg
+        if deg < 0:
+            deg += 360
+            
+        return deg
+    else:
+        # Multiple values
+        deg = np.rad2deg(angle)
+        deg = 90 - deg
+        sub_zero = np.where(nan_less(deg, 0))
+        deg[sub_zero] = deg[sub_zero] + 360
+        
+        return deg
+
+
+def nandiff(values):
+    """Computes difference in consecutive values with handling of nans.
+
+    Parameters
+    ----------
+    values: np.ndarray()
+        1-D array of numbers
+
+    Returns
+    -------
+    final_values: np.ndarray()
+        1-D array of differences of consecutive non nan numbers
+    """
+    
+    final_values = []
+    for n in range(len(values) - 1):
+        # Check for nan and add nan to final values
+        if np.isnan(values[n]):
+            final_values.append(np.nan)
+        else:
+            # Search for next non nan number and compute difference
+            i = n + 1
+            while np.isnan(values[i]) and i < len(values) - 1:
+                i += 1
+            
+            final_values.append(values[i] - values[n])
+        
+    return np.array(final_values)
+
+
+def valid_number(data_in) -> float:
+    """Check to see if data_in can be converted to float.
+
+    Parameters
+    ----------
+    data_in: str
+        String to be converted to float
+
+    Returns
+    -------
+    data_out: float
+        Returns a float of data_in or nan if conversion is not possible
+    """
+
+    try:
+        data_out = float(data_in)
+    except ValueError:
+        data_out = np.nan
+    return data_out
+
+
+def nans(shape, dtype=float):
+    """Create array of nans.
+
+    Parameters
+    ----------
+    shape: tuple
+        Shape of array to be filled with nans
+    dtype: type
+        Type of array
+
+    Returns
+    -------
+    a: np.ndarray(float)
+        Array of nan
+    """
+    a = np.empty(shape, dtype)
+    a.fill(np.nan)
+    return a
+
+
+def checked_idx(transects):
+    """Create list of transect indices of all checked transects.
+
+    Parameters
+    ----------
+    transects: list
+        List of TransectData objects
+
+    Returns
+    -------
+    checked: list
+        List of indices
+
+    """
+    checked = []
+    for n, transect in enumerate(transects):
+        if transect.checked:
+            checked.append(n)
+
+    return checked
+
+
+def units_conversion(units_id='SI'):
+    """Computes the units conversion from SI units used internally to the
+    desired display units.
+
+    Parameters
+    ----------
+    units_id: str
+        String variable identifying units (English, SI) SI is the default.
+
+    Returns
+    -------
+    units: dict
+        dictionary of unit conversion and labels
+    """
+
+    if units_id == 'SI':
+        units = {'L': 1,
+                 'Q': 1,
+                 'A': 1,
+                 'V': 1,
+                 'label_L': '(m)',
+                 'label_Q': '(m3/s)',
+                 'label_A': '(m2)',
+                 'label_V': '(m/s)',
+                 'ID': 'SI'}
+
+    else:
+        units = {'L': 1.0 / 0.3048,
+                 'Q': (1.0 / 0.3048)**3,
+                 'A': (1.0 / 0.3048)**2,
+                 'V': 1.0 / 0.3048,
+                 'label_L': '(ft)',
+                 'label_Q': '(ft3/s)',
+                 'label_A': '(ft2)',
+                 'label_V': '(ft/s)',
+                 'ID': 'English'}
+
+    return units
+
+
+def convert_temperature(temp_in, units_in, units_out) -> float:
+    """Converts temperature from F to C or C to F.
+
+    Parameters
+    ==========
+    temp_in: np.array
+        temperature in units_in
+    units_in: str
+        C for Celcius or F for Fahrenheit
+    units_out: str
+        C for Celcius or F for Fahrenheit
+
+    Returns
+    =======
+    temp_out: np.array
+        temperature in units_out
+    """
+
+    temp_out = None
+    if units_in == 'F':
+        if units_out == 'C':
+            temp_out = (temp_in - 32) * (5./9.)
+        else:
+            temp_out = temp_in
+
+    elif units_in == 'C':
+        if units_out == 'C':
+            temp_out = temp_in
+        else:
+            temp_out = (temp_in * (9./5.)) + 32
+
+    return temp_out
+
+
+def nan_less_equal(data1, data2) -> bool:
+    """Computes data1 <= data2 and sets all np.nan comparisons to False.
+
+    Parameters
+    ----------
+    data1: np.array()
+        Data arrray.
+    data2: np.array()
+        Data arrray.
+
+    Returns
+    -------
+    :bool
+        Result of comparison.
+    """
+
+    d3 = data2 - data1
+    d3[np.isnan(d3)] = -999.
+    return d3 >= 0
+
+
+def nan_less(data1, data2) -> bool:
+    """Computes data1 < data2 and sets all np.nan comparisons to False.
+
+    Parameters
+    ----------
+    data1: np.array()
+        Data arrray.
+    data2: np.array()
+        Data arrray.
+
+    Returns
+    -------
+    :bool
+        Result of comparison.
+    """
+
+    d3 = data2 - data1
+    d3[np.isnan(d3)] = -999.
+    return d3 > 0
+
+
+def nan_greater_equal(data1, data2) -> bool:
+    """Computes data1 >= data2 and sets all np.nan comparisons to False.
+
+    Parameters
+    ----------
+    data1: np.array()
+        Data arrray.
+    data2: np.array()
+        Data arrray.
+
+    Returns
+    -------
+    :bool
+        Result of comparison.
+    """
+
+    d3 = data1 - data2
+    d3[np.isnan(d3)] = -999.
+    return d3 >= 0
+
+
+def nan_greater(data1, data2) -> bool:
+    """Computes data1 < data2 and sets all np.nan comparisons to False.
+
+    Parameters
+    ----------
+    data1: np.array()
+        Data arrray.
+    data2: np.array()
+        Data arrray.
+
+    Returns
+    -------
+    :bool
+        Result of comparison.
+    """
+
+    d3 = data1 - data2
+    d3[np.isnan(d3)] = -999.
+    return d3 > 0
diff --git a/MiscLibs/non_uniform_savgol.py b/MiscLibs/non_uniform_savgol.py
new file mode 100644
index 0000000..5f52b8a
--- /dev/null
+++ b/MiscLibs/non_uniform_savgol.py
@@ -0,0 +1,120 @@
+import numpy as np
+
+
+def non_uniform_savgol(x, y, window, polynom):
+    """
+    Applies a Savitzky-Golay filter to y with non-uniform spacing
+    as defined in x
+
+    This is based on https://dsp.stackexchange.com/questions/1676/savitzky-golay-smoothing-filter-for-not-equally-spaced-data
+    The borders are interpolated like scipy.signal.savgol_filter would do
+
+    Parameters
+    ----------
+    x : array_like
+        List of floats representing the x values of the data
+    y : array_like
+        List of floats representing the y values. Must have same length
+        as x
+    window : int (odd)
+        Window length of datapoints. Must be odd and smaller than x
+    polynom : int
+        The order of polynom used. Must be smaller than the window size
+
+    Returns
+    -------
+    np.array of float
+        The smoothed y values
+    """
+    if len(x) != len(y):
+        raise ValueError('"x" and "y" must be of the same size')
+
+    if len(x) < window:
+        raise ValueError('The data size must be larger than the window size')
+
+    if type(window) is not int:
+        raise TypeError('"window" must be an integer')
+
+    if window % 2 == 0:
+        raise ValueError('The "window" must be an odd integer')
+
+    if type(polynom) is not int:
+        raise TypeError('"polynom" must be an integer')
+
+    if polynom >= window:
+        raise ValueError('"polynom" must be less than "window"')
+
+    half_window = window // 2
+    polynom += 1
+
+    # Initialize variables
+    A = np.empty((window, polynom))     # Matrix
+    tA = np.empty((polynom, window))    # Transposed matrix
+    t = np.empty(window)                # Local x variables
+    y_smoothed = np.full(len(y), np.nan)
+
+    # Start smoothing
+    for i in range(half_window, len(x) - half_window, 1):
+        # Center a window of x values on x[i]
+        for j in range(0, window, 1):
+            t[j] = x[i + j - half_window] - x[i]
+
+        # Create the initial matrix A and its transposed form tA
+        for j in range(0, window, 1):
+            r = 1.0
+            for k in range(0, polynom, 1):
+                A[j, k] = r
+                tA[k, j] = r
+                r *= t[j]
+        try:
+            # Multiply the two matrices
+            tAA = np.matmul(tA, A)
+
+            # Invert the product of the matrices
+            tAA = np.linalg.inv(tAA)
+
+            # Calculate the pseudoinverse of the design matrix
+            coeffs = np.matmul(tAA, tA)
+
+            # Calculate c0 which is also the y value for y[i]
+            y_smoothed[i] = 0
+            for j in range(0, window, 1):
+                y_smoothed[i] += coeffs[0, j] * y[i + j - half_window]
+
+            # If at the end or beginning, store all coefficients for the polynom
+            if i == half_window:
+                first_coeffs = np.zeros(polynom)
+                for j in range(0, window, 1):
+                    for k in range(polynom):
+                        first_coeffs[k] += coeffs[k, j] * y[j]
+            elif i == len(x) - half_window - 1:
+                last_coeffs = np.zeros(polynom)
+                for j in range(0, window, 1):
+                    for k in range(polynom):
+                        last_coeffs[k] += coeffs[k, j] * y[len(y) - window + j]
+        except BaseException:
+            pass
+
+    # Interpolate the result at the left border
+    for i in range(0, half_window, 1):
+        y_smoothed[i] = 0
+        x_i = 1
+        try:
+            for j in range(0, polynom, 1):
+                y_smoothed[i] += first_coeffs[j] * x_i
+                x_i *= x[i] - x[half_window]
+        except BaseException:
+            y_smoothed[i] = y[i]
+
+    # Interpolate the result at the right border
+    for i in range(len(x) - half_window, len(x), 1):
+        y_smoothed[i] = 0
+        x_i = 1
+        try:
+            for j in range(0, polynom, 1):
+                y_smoothed[i] += last_coeffs[j] * x_i
+                x_i *= x[i] - x[-half_window - 1]
+        except BaseException:
+            y_smoothed[i] = y[i]
+
+    return y_smoothed
diff --git a/MiscLibs/robust_loess.py b/MiscLibs/robust_loess.py
new file mode 100644
index 0000000..805a848
--- /dev/null
+++ b/MiscLibs/robust_loess.py
@@ -0,0 +1,280 @@
+"""robust_loess
+This module computes a robust loess smooth using a quadratic model as defined by
+W.S.Cleveland, (1979) "Robust Locally Weighted Regression and Smoothing Scatterplots",
+Journal of the American Statistical Association, Vol 74, No. 368, pp. 829-836.
+Both x and y values are required and are assumed to be 1D arrays (n,).
+
+Example
+-------
+
+from MiscLibs.matlab_rloess import rloess
+
+smooth_fit = rloess(x, y, span)
+"""
+import numpy as np
+from numba import jit, njit
+
+# Set constants used in multiple functions
+eps = np.finfo('float').eps
+seps = np.sqrt(eps)
+
+# @jit(cache=True, nopython=True)
+@njit
+def nearest_neighbors(num_neighbors, idx, x, valid_x):
+    """Find the nearest k neighbors to x[i] that are not nan.
+
+    Parameters
+    ----------
+    num_neighbors: int
+        Number of neighbors to find
+    idx: int
+        Index for the target x value
+    x: np.array
+        1D array of the independent variable
+    valid_x: bool
+        Boolean array indicating valid x data.
+
+    Returns
+    -------
+    neighbors_idx: int
+        Indices for neighbors in x array
+    """
+
+    # Find neighbors
+    if np.nansum(valid_x) <= num_neighbors:
+        # If there are k points or fewer, then they are all neighbors
+        neighbors_idx = np.where(np.equal(valid_x, np.repeat(True, len(valid_x))))[0]
+    else:
+        # Find the distance to the k closest points
+        distance = np.abs(x - x[idx])
+        distance_sorted = np.sort(distance[valid_x])
+        distance_neighbors = distance_sorted[num_neighbors - 1]
+
+        # Find all points that are as close as or closer than the num_neighbors closest points
+        # close = np.array(distance <= distance_neighbors)
+        close = np.less_equal(distance, distance_neighbors)
+
+        # Find the indices of x that are both close and valid
+        neighbors_idx = np.where(np.equal(np.logical_and(close, valid_x), np.repeat(True, len(valid_x))))[0]
+
+    return neighbors_idx
+
+# @jit(cache=True, nopython=True)
+@njit
+def tricube_weights(distance):
+    """ Convert distances into weights using tri-cubic weight function.
+    Note for Matlab: This function returns the square-root of the weights.
+
+    Parameters
+    ----------
+    distance: np.array
+        1D array of distances
+
+    Returns
+    -------
+    weights: np.array
+        1D array of weights
+    """
+
+    max_distance = np.max(distance)
+    if max_distance > 0:
+        distance = distance / max_distance
+    weights = (1 - distance ** 3) ** 1.5
+    return weights
+
+# @jit(cache=True, nopython=True)
+@njit
+def bisquare(data):
+    """Bisqure weight function which for values greater than are equal to 1 are set to zero.
+
+    Parameters
+    ----------
+    data: np.array
+        1D array of data used to compute weight
+
+    Returns
+    -------
+    weights: np.array
+        Computed weight
+
+    """
+    weights = np.zeros(data.shape)
+    d3 = 1 - np.abs(data)
+    d3[np.isnan(d3)] = -999.
+    idx = d3 > 0
+    # idx = nan_less(np.abs(data), 1)
+    weights[idx] = np.abs(1 - data[idx] ** 2)
+    return weights
+
+# @jit(cache=True, nopython=True)
+@njit
+def robust_weights(residuals, max_eps):
+    """Compute robust weights using residuals.
+
+    Parameters
+    ----------
+    residuals: np.array
+        1D array of residuals from previous fit
+    max_eps: float
+        Smallest value to be represented
+
+    Returns
+    -------
+    weights: np.array
+        1D array of computed weights
+    """
+
+    # Compute median using only valid data
+    s = np.nanmax([1e8 * max_eps, np.nanmedian(np.abs(residuals))])
+
+    # Compute weights
+    weights = bisquare(residuals / (6 * s))
+    weights[np.isnan(residuals)] = 0
+
+    return weights
+
+# @jit(cache=True, nopython=True)
+@njit
+def compute_loess(x, y, neighbors_idx, idx, r_weights=None):
+    """Computes the loess smooth for the specified point x[i]. If robust weights are specified the computed weights
+    are adjusted by the robust weights.
+
+    Parameters
+    ----------
+    x: np.array(float)
+        1D array of independent variable
+    y: np.array(float)
+        1D array of dependent variable
+    neighbors_idx: np.array(int)
+        1D array of indices of x defining neighbors
+    idx: int
+        Index of x defining target
+    r_weights: np.array(float)
+        1D array of robust weights
+
+    Returns
+    -------
+    smoothed_value: float
+        Computed smoothed value for target
+    """
+
+    if len(neighbors_idx) > 0:
+        # Center around current point to improve conditioning
+        distances = x[neighbors_idx] - x[idx]
+        distances_abs = np.abs(distances)
+        neighbors_y = y[neighbors_idx]
+
+        weights = tricube_weights(distances_abs)
+
+        # If all weights are 0, skip weighting
+        if np.all(weights < seps):
+            weights[:] = 1
+
+        if r_weights is not None:
+            weights = weights * r_weights[neighbors_idx]
+
+        weighted_x_matrix = np.vstack((np.ones(distances.shape), distances))
+        weighted_x_matrix = np.vstack((weighted_x_matrix, np.expand_dims(distances * distances, axis=0)))
+        weighted_x_matrix = weights.repeat(weighted_x_matrix.shape[0]).reshape(-1, 3).T * weighted_x_matrix
+        neighbors_y = weights * neighbors_y
+
+        # Solve using least squares
+        # try:
+        #     mask = ~np.isnan(weighted_x_matrix.T) & ~np.isnan(neighbors_y.T)
+        #     smoothed_values, _, _, _ = np.linalg.lstsq(weighted_x_matrix.T[mask],
+        #                                                neighbors_y.T[mask], rcond=None)
+        # except (IndexError, ValueError):
+        smoothed_values, _, _, _ = np.linalg.lstsq(weighted_x_matrix.T,
+                                                   neighbors_y.T)
+        smoothed_value = smoothed_values[0]
+    else:
+        smoothed_value = np.nan
+    return smoothed_value
+
+# @jit(cache=True, nopython=True)
+@njit
+def rloess(x, y, span):
+    """This function computes a robust loess smooth using a quadratic model as defined by
+    W.S.Cleveland, (1979) "Robust Locally Weighted Regression and Smoothing Scatterplots",
+    Journal of the American Statistical Association, Vol 74, No. 368, pp. 829-836.
+    Both x and y values are required and are assumed to be 1D arrays (n,).
+
+    Parameters
+    ----------
+    x: np.array
+        1D array of independent variable
+    y: np.array
+        1D array of dependent variable
+    span: int
+        Number of neighbors to use in the regression
+    """
+
+    # Number of cycles of the robust fit
+    cycles = 5
+
+    n_points = len(y)
+    smoothed_values = np.copy(y)
+
+    if span > 1:
+
+        diff_x = np.diff(x)
+
+        # Assumes non-uniform x
+        y_nan = np.isnan(y)
+        any_nans = np.any(y_nan[:])
+        the_diffs = np.concatenate((np.array([1]), diff_x, np.array([1])), axis=0)
+
+        # Pre-allocate space for lower and upper indices for each fit
+        lower_bound = np.repeat(0, n_points)
+        upper_bound = np.repeat(0, n_points)
+        # lower_bound = np.zeros(n_points).astype(int)
+        # upper_bound = np.zeros(n_points).astype(int)
+
+        # Compute the non-robust smooth
+        for n in range(n_points):
+
+            # if x[i] and x[i-1] are equal just use previous fit
+            if the_diffs[n] == 0:
+
+                smoothed_values[n] = smoothed_values[n-1]
+                lower_bound[n] = int(lower_bound[n-1])
+                upper_bound[n] = int(upper_bound[n-1])
+
+            else:
+
+                # Find nearest neighbors
+                neighbors_idx = nearest_neighbors(span, n, x, np.logical_not(y_nan))
+                # Store neighbors for robust loop
+                lower_bound[n] = int(np.min(neighbors_idx))
+                upper_bound[n] = int(np.max(neighbors_idx))
+
+                if len(neighbors_idx) < 1:
+                    smoothed_values[n] = np.nan
+                else:
+                    smoothed_values[n] = compute_loess(x, y, neighbors_idx, n)
+        # Non-robust fit complete
+
+        # Compute residual and apply robust fit
+        max_absy_eps = np.max(np.abs(y)) * eps
+        for cycle in range(cycles - 1):
+            residuals = y - smoothed_values
+
+            # Compute robust weights
+            r_weights = robust_weights(residuals, max_absy_eps)
+
+            # Find new value for each point
+            for n in range(n_points):
+                if n > 0 and x[n] == x[n-1]:
+                    smoothed_values[n] = smoothed_values[n-1]
+                else:
+                    if not np.isnan(smoothed_values[n]):
+                        neighbors_idx = np.array(list(range(lower_bound[n], upper_bound[n] + 1)))
+
+                        if any_nans:
+                            neighbors_idx = neighbors_idx[np.logical_not(y_nan[neighbors_idx])]
+
+                        if np.any(r_weights[neighbors_idx] <= 0):
+                            neighbors_idx = nearest_neighbors(span, n, x, (r_weights > 0))
+
+                        smoothed_values[n] = compute_loess(x, y, neighbors_idx, n, r_weights)
+    return smoothed_values
diff --git a/MiscLibs/robust_loess_compiled.cp39-win_amd64.pyd b/MiscLibs/robust_loess_compiled.cp39-win_amd64.pyd
new file mode 100644
index 0000000000000000000000000000000000000000..08116c4f307289f31802a023b62e0e224158b185
GIT binary patch
literal 222720
zcmd?SeSBS2mH&U!rcki$xv{NO1-)q0z+g>y&_=5yn98|u3sEB$r$7{_ouNX8CJZ{Y
z1d?E{>8(-gbA>ol6}2<evEx*%dehP-rIe=SDWyWg^SMnQ(gJBpA^E;PYoC*cmIr6P
z-`DH+$M3$9v)A5dKdim?+H0@9_S*Z*{z6M^Y%CVb=YMc87F&y}Uzt7M_z};G#U{P`
zo=LGaCv81_ZC+~Y=~eUpVPT@?#v8tT;}^e@xcZCNUw=bw;!D>iZmhdL@ekK0DlYwO
z;wv{?bM5qr6HhJ-v#$Bn!Tslc;Lblq{~y0<!k=d1zwD|z)_oW^k#g>3SN(Y1M=ks(
z>)iVl>pp@%{h2$=J@*55{HYPY@;Q(HHCO&%U5UAOu5<qCF#M<H|I9yJJzsH;tzqS?
zSnQhHPl&b7U-PBtU2p6?iMNcI^zPVa#>8UNs5HNt3BYj!>n%huK_^(yup3)V>#A}2
zjfqtg)g_9Y<NYqAEHyTEGilc1wkmPU*jT^Z`mwRXlLUmn*0Hg<6cImdZ0yu1Y=m1+
zcoF{2caDvfIn>?TnlZ6Xb^Po6rq^D3b1m=u%CMfQdsvSsNg@`jo_^ysU#$INEOyTx
zLdi3>7xyyU5x+9XX}XIRYkdbVswsF1?ggU}AYOKQjf-e?tmpm7#JlXccxBUXTzKQv
zys|c-ZPfObFHZC;Q@J-@yWj>Qs*Pf;JXD7-$us;b1CwFj|G%KMqKRJLYs>Qovm55M
z`0d$2eql(*20z_dvZFWU{Wi~AzCM}GOLuOVH{E-8qqUOv?wH?PK5axui{G@lc3iJN
zn73hr_wEYl2mcB9PpZw^kh}E4F|m=U^*%H1?bnWp`ROr+$%gXJmtR)?x$>&ZKL5GT
z`VD*M%ua9e(~l(6_xsJO3Ogw^m2L}u0ET|kK5xly$C1NzTgQ9#s|BTN3lpj4b%kAu
zNBj=ldlEc?XDQiFR}_}{&Hgk$bM8I@2}l)Imh4ETdxMwq^}e=nhMzv@QvA+RZ1S6L
zo<>oNKjJsMnpgM9L*-t4R&1VEHSWDvkBJ@f;uHVQ+_@cxyaLZp=Un2Ol$erwYKDTY
zCMdUpQcJS)yrOZ>e90x>i<?U4=0#0PmIY+rNt~UByt$8&?Wyf_O)Bq*U%Iibdnmuj
zDgwk(RwIz}nqFoMkp9GZ-t?^Z<2?T!RWdf?TUkGSH0J@TzXsd<h7IHW!L79?*O$g>
z5BTZHJ$r^5x3<vtY2jG|v~X0LyFO(_sr`ym%{9{&PqW5&mn-YQX3A?T_kM6f4@nOO
zCzG^fU(dC~$-nU(ekT7wvUHQza6QN+Gx^i~(thvuFXB-<9LSQ#gmyCMAcOo9sHkeW
zNBOLORVV}{`%>wH!$ebGn&&q>KB#I8cGMm8uX~)%{|n<emA<C1$WQNd9q`gD_2;Hk
zb2YfGGs1~2E-uOX%}YZnI+UA0i?qznd(|IY?+<RSeQW(kW3|1>=C8++>6!O}Nzc=M
zbE?n}N)3@P;}v9_Ot<^Zb3_)Y<^_e7-m-6!lQl`9=p(=IkaxK%HqoP=bYce`I`M^S
zXy2RsmY!<YFto*9zjTAwu$>V|EA=Ex@AVq)#Z#iR;~up5MHBxhcWA`5sDLT=Qos%<
zq)QY+<~zyw)D2<dJ+8)UdWFVoZnV0V^!ZKM#ZzfU`rq2`iQ)DW;Vg$F2J*b`WfL9r
zx$n(-z%M=MHQh)jotx+VzTa<qe(WsTE`88%$mRLXRfRK>-UXe$clWkbCOMuVzIHq<
zko1ym{CChUzE`o4|E+%BPCu_JmAA()-R^y-Ema!O&G(ik?k}%@CC_U-gAyXrBbB%8
zR|-gXmF!?h<O<Y3$-zy2L!0Q;Yh29R^7>E2YWMh=8}lgsQNMZ0JN$HkpFY&{h?>=F
z=pj+bj$EA@H68d3-LI5DpErzKe$^Przl<)3PuRwzYFm7s_xWA(y!AWt=6Tlv=>KZ|
zcXjcCH0$rv3jqCJ&Hpa?aq(mG%F|OO0o(^rHE9gU<@==_b-(mW1Fx|HQpvr~5pGm=
z(YPYYrne^cUWu#zyI;?__k)4D@p1|+>YnG#eF~R>Kleep^hy4=&+`fjxA3Ui5|7IH
z3**CUd^=+-_lU~LOgTW?E{p@5;g`OA(}`4Ga)Y1V<2O9@O36MdlcfPBewV`Of2uy^
z0FSu`D1CkBSgHpK`oEh0T@<;#OMxRB1oB$^;JoyyXW;Idm!5kDwcbdr_tgD%sP{_u
zc#Tsj7t+mt)G?s^0*$QFhwFOOx8M58QE>hV4_N*qUmX+cIT2tVfS*302cA5xjb^gI
zE&vU^grCl{St0j`YSi}ZSjaPY>=WWfeT$3XQ-1m}zxhl*eTJX+vLllR>h=%e1b4h-
zR^u~X<0@LJr(i_5x9oeOlg+90p4{7jh6kA6uztK~4*vL7qcyTSwL|PC{f?fe$SumH
z?lQl*0*(WoWqx|R-(IFMKxO8~fv3rnO3$YX^Na8o%dfb_!JhuX!BqN4@DwhCs^Q)_
ze}BbQSC-E$zoPug@~fC|+k$`oI4#j?^QY#+%Rc{E|JJ>d-YUE^8Vid7yffAnCd^$^
zC^6hmZ}&5eg-Z;B3R*5R^$}XJ+?uca55$NCn|kwNO<BmnJEH=w#iLq}8o#}<u$~C|
zDgTzxZ4KSkxQdg-)z5FQDD(v=vZiFY-J#oytH?1G$<J$i2q@M3@pmG0&2Ad>8utrb
z*sySYV`ThHRbgdt50W-$H*He7hDC+*$9qlpSRQi=D^oOSwK}3kjeE1-yu7eJ%J*-m
zNg7rrDF9khUsADzbyQ}lwSo}vlj*Kt-r>PPF@0voRP!3wLh&_)KU7iib%l4~R(SEo
z!nFX*LBR>cZx|e?&2Jj4o0@FCl(}Ma?aY!Lfqx*62<<(y;B~|eK7l{g{Hb{JW#j$k
zNx?voXgljQ-f0=N1%HA?=Ds6O^TdOf^D;Aa4+A-vK)Mn}(!>e$aw;?KeK>CB`y*MT
z(raAD6s$2>$SYV^7?4SYSI}74Z&@!AjkvkcPp=<EC!0^CoOC<$MevC}&B6UWQz`yS
zuJ~OlK0_y#WW6Fsd|r_w#tN^<QKx_Fy`t8?hpdgHn<t*<5?^u!gxyAF6P^Fh^8a>t
z^v!D-CI8*JS1p`OclzlEG@FU2L^|WmUG6Bqc~#*w{6<QSkd3O1zD0>5XZ?oA)=w``
zuUDzpQ<52Myoip}-NDyhQeUT<7ZuVIA}#1ERX9J@d`)4E=6i^2MPa=%q+}6sL=jLA
z0&Of-cXld$6M0`x)>kG{>FcMZ((~ee`jYW*IzQb@>!@1CjCw>-VKT?hqzWs7aU~G_
z;NrsIw0(K8+07Tm%bPzxKH2OAe|^7ENbPCj6L)@q=9^4&Zu-2D#@#e$aBwdqmYI09
z;2yycE^(hT@JR5Cy;d3DE+!H6o1^+w<}O62KCb<JB&&H9-Yv@ui$iE>VIqVY3*$iI
zL@|YUx2!4bwfXxIKfO(jqP3P9OY2NG4Z$d?>8p9-7?lwqQc*|M;OZxMMhII>_?Al&
zBYO*ps~n;waP_mmL=>$%ibe(9tFj2FjDYG0sEL632xtKy?^YcBR8H3jxN`*DF#^tx
zfY*+ISC4@28UYta)rDVdI;1(_VDM+$`gMghG+eCqWD(d5z@9NB`!w^l@>atjp>~B!
zESpLe{&gMWy_T$3|0JBtPp>NMChd~O!n=ifnzfMPzz;g`YfV%{Vy%&;DHGfdIw#e<
zvM_7zn!>fXetN0#3&&A@^-<Bu3Ri5pvC!s&=K0e~vP@%sa|zOy-}dtPsnehG^A1TW
zyQq1}*^IA^EH+rT<dAafy8NqQ4HQ|jxhb3U{QLd9Hp!2NF7V>#miQ2h_!APQEAqou
z<no~+<En&KVhu0n)y+o6m?gP-6N{r0g?qnnhA#-ST$^RIGq~-bcvmj?8m`**M%)*?
z#pnI{bCSj$`AA7N_lr|V*YI+Fl*hseZiS$HSioc|9*+5?uzi>F!ik9Kk0R&YlwXqV
z`4lT-iFn9to91~_HkItM`5MtG^*H&*s~$(dlITAdk*;9EZi#Wb5#u%>ICaq;rJc3!
zKpvwm6&s_<<ZGqpm#(kNspo`6u&!6N2%F{IrRs@QL(O8-jaDLBo4!M&p)_O(0#)N~
ztx`oNkL~%iL}N{jdTUEE<nb==_P-Meu=1cpui;(loGi5&m$Y=@5}(W_tS2`rGI>I@
z#9)2Ks3x-rGe3PGxNQc=K^}qiRgIQM<$nf_rMydD3Tb6kVXt-7ioyU_^0?nz>Ze~8
z{rIQvL5fRycR%E$`Y8xuA|F<dbvs4D-Ig(P7D<DZ2jEsF+Wu`kB^oxc*cs|jhkZEn
zCp0&dF@6~A$N>g0IT~ZiEH`E(5><1fESOcYIcU0trAYc-4Iqm}_I`+7rMb%s=a{>~
z7@j(9KK}BCR|aaw&1!n4?t1mGLt=ShuQg>juhE2EYBlJ(Mp>}xqP10aaQA*AX*WjR
z`VluQ^(NI^W?G7cMhR!qY2%(oVTT+(mIYU==kG-u;;E{-I02G%?&qlBL>Z!p2`oQ5
zyoRUXPsz-LBENK}cl*P5N_Hr(ACQ;#?t0QG%lrqmR*#3m{eB7e%iBtJl=P`T``S=M
ztWrst#d&_|0q>`b^Vu(#*Uv1b(1E($nv0&_Cq@=^!nU9tf5i5@#^u6~x8ew{svr2U
zBQ4i1)o>m){CwkJYYMB)T~Rm(w}m;<Pv7V0>5!jypIaqPct<ilL2LTidD2EePvqZC
z{=wW370p$_rRE9Qj#$mvrd(>tCesIcuIHg<RC|lgv$)Cht0D}W=+Z}WT_SGAVubxS
zR~Q>kcO}!0)}r}%IC|kiPG!-PLRYHLuv6C3a<SZ`$+6g7v||rz6W0RoJ5OqdjpBg_
z(Mn*n16LM0-ZIU^Qgmo)7~|dc69P-JB|FgPfsQF9?)AM3JJB2DKM2<}Mw2XUuj|e|
zLNBXNKD(DbVdWlX?Jc`nDj+eI1too^xEisZE*e)@IlP{}PwehK*qyk9R96KPj#;(t
zwAJw@tpI<l6(HGqw~OkN4TCJ&-%G7w$LAO-`~8OJ2E>OBUaDU272{GRQIS>XS-|RZ
zJ&ZIvP<O~LeaUOQ$JW~i{4)*@bx$V0H`IQ`de8UMij4|kTiuJfj74^9V;D%$xZhBU
z*2fER!}ak|t&fjVpY@$m0UWnJK2~9%_3?Jp0TX5b>;y>70LOM1{mXTC)9YSi7wVYY
zfAXNdW4&+u&blWB-#cSH47hz*05<)ODUim}O?0X7zOOEq`w&fZQ6_&dnORVncTr}_
zQNQ$6hG7=1QMvaWmf4Tma|52XlI*O;POs@1N~uWP=QZsDNTvHVTRHs{^A*Gq%~uo9
zH^i$Rq`sHaf0tYTv4k(EdX#osPc1s@cIJLf9z*5(na?t|N0<My_XDsPS^lXAlve&^
zfKlZ`hUgfkWc|zy$i?f2t3R)Wa%c7GLESedDF2nZy%4I`^Z?}8a~31!Wtb6yv>PeU
zpw=AHx#e#dN!o!pY!<XxJOp()$8p3#g8A|w5me)ruM@W~>63cPG2RlV&5~CXnj#mi
zH-fXnRA<T!hg!^A@-r|<rQI4N8s2Wq{u45MYG~duP4aB5C|20I#cRBSF`I1Y&tG^V
zwEr)7Qt7Pv$j?;1;-??>+xCs|2REj34Nsnkka8-~_T|{kfe#!?rg!=c&(fnC`~7r}
z-_|qMAKZ|Btl_D#46SJvlU9NU|97`BhNn#tR5b366vRHHOmzCNg2w&DP$q5OxIYmA
zl@U-D0W}d&9RV#7P#*!SBcL?`IwK$(0lg8>9RYEMq?I0vfZ_;9L_lQ(ltn;I1XM>r
zO9a$M!0HHSjeyPw$VNbK1awD097=QeL_l!_BqE?P0?HzwCIYG>pd|w8BVcs|v_?Q@
z1Y{$iHv+mNAg+a(dWQ{K2NXv@A_6KSpezDvBB0s<75g=|D)!ggbG1EN?b&J1tUWdK
zD)x8VGtM&~ZoID^sr41FX*&(%H=&x{l1e|~HJVo9FzV-`aTi`RLOv*{kZQ3YA1xzb
zu{{&^th8sDJ!|Y)&9m`93&YHS`UqGZ0j&|x83EY{(BQOG-2lcc2G`fsqi8{|tPnJ#
z#@~}8Nxp>?7sPpJ5=o%;uiKw0&DOrn{{u<*X||YP`z@u}MK4N~^LNjRbA&QMCjYJB
zdMnAMRNf<A{cqk9i#4L1`ZwHEdZno-#4@_cEScU7cYj#<ikoN>bDZJx9~{XUoDxl2
z6>x+2LQm4o3X|1y-3(&5ZaZ8)3R@VzIFo-byC|r7v$?+z!Lf~AKQq5qTHm%k`G{ue
z?F~<g85A2c&}Ow$2cw=nz!K#^9F~6|Aw6)h7r*8}nHOJhpwf%i9;o)>iw@M3dj+Yz
zah~~mdCc8g%(H55g6D$0l{~N6TgG$IUPY|kOPa<5^;FM`FSXwE;>!=Tnt#Os^`#eI
zc_3@<ssrjtFTUnLx4G*Ms2|B?>E0FxH14f;z>2*}%B$sjTOF`!uM!ioa&OiF>-L7p
z*X->kM0mtt*1|(;M&Xe#cg2BXb5|aayza$U9gv(ZJgUuIcR<3n;h}^8jeC_7z>2*}
z3b1^y@&Z`3SBU{unpPHI-QF<yn!U=836%zM04?GGngrkl?*`fgKyA`#?y3V>7h}zV
zUJF=vpj!dtOJjr(?Gr*Y2ha=U-Yv8ckRhZT3?XG{2t`?}*{e($AZ40O*(J1L%qaO9
z4aO+hS1tE2LX4BI;uWW~f`96i)e;L%$%63QQ#v7%Q+gqiQ@RPQJtgkNXPzPiE2gx{
zNljTTr)o-8c+H*CiD$u-Zl2dn>E$_dieA@FL891Gk2byT2Mjx}QCfI}Pyg`tR9;&u
zy&f7p(QBMWXtW>dH=os>OrIs$U4l@r-*Dta@3t$5;qdjFC-5HGzB$!CKABG4iyk%A
z9v2iBG)>SnK?y+#K}CY3xo<BPR4izQpc#_4lBL<YUqzB+Dz96E^2T>ulwtL&CDJb`
z8V}2KmPkLwQ?gkLj>9`rdFxY1=nc<O1KU%u_Vk-)Zt>GIZ3kg+tMGpr{Kwj?oSlJu
z-aLi(RUI8{t8GC+Sk=)f2(hTDqf5{<LEVB9f&xJ(7OOgX1r-bG7lcN0N{9A9Sjg4=
zD)%!+d@_BI-EzO-NqECo;SDUa9dCHJfvuE=LGgyc+{>bl%xaN~v53Ku0s>T;e-vUk
zN{3e+O-L>-h7^vLK?+AJp@gH=@Qb50jNgG`CM+m+AYtx`fl70i50sg^YM{p4l>^o0
zt{Z4Ecg;Y(+<4>BdK!~~dsKXYfqS&o+!aT~1sE(xv*xZkDjvYVJ=$&Vx})L%Du1=*
z*ErB>?uvm<bC(Zf&0RInYwpT{ZgbZSXkPUS)(mJ`RrxVmK;_5HU4ArS?uw(u=B_+i
zX6~w^mFBKFT5aySqcw6>f6I^hn@j!8rT*qpe{-q7xr+u`7!U*XViia0$pV17JAldq
zh*cb2t=y@7r@5<+W?hUmM|&+`-O+9fD5w}{Wm-!OFlq~`1{k%s%pK@t5Ds)R1_yc>
zf-I^ikP+A(a)Ja6!IH0b0bJnKYFg1h{5V)tOeUvzYO;<0$u|Bcchc#Td+GGa-Mp)v
z9B2GbCclcfl^VBF<5p^1rN&iiT&2cUYCIhG8|&^&l`_yD@S55Y|0IO(dR&WxquLSc
z1RO0wPnWj4Ino2N{6(##b@}NUpP~!26Z~tpz2F3CHM<GREmKuYt>*%yEKuZy(%W6h
zl#CLu_a<rc9znflp?;{%W3zaKcx>y<c)s?FQ`#@SJj*{j!3Fsb@JKyCt=#S}>)TE@
z>Om;KJ;W$q8Z4>v(3hdndo<YnS1O1)xgTWoLxI(Y>UvF~>wf^({{ZTLd!_zY8uhz=
zTGcEux0bDlH2Y!N&zKW2EbSpsnMH+VGKeTn$s|U}e@GIobOg8H>L<7!S2;`ZRpTPC
z<3WIhR7N4__FTx2a!e@uD92P6t#UeXr5W2&$NYn44ACh@b<uUzo-DPO*R+)s>?PcQ
zY8^<rXyu`e0@ZQeEU%{X5_Q!yd_Lsx+g-=D*6X&EYNAZ^qlib?$5Df*W@-@HVv*E-
zPHCeMc)nZS$-`98==PNGBz!b~BP*9o_oq;bbdhy%{jFi;a>0AS&eVA)QAK7|SMm)m
zEeZwKm)aUrZKWl$o($D1!84DE{Byxyakas+w6K?Gwuus>t+lHXm(Z48W1Ae3WKTa|
znu;^f|67Kr^5r(|`y)=w_mYo$OYRUBzPI?G-?YnX{E@=fvdygDEv8E-ZL2%kZ#c>}
z><LPo?(l!)A3mzx<C2}_-Vd|L_ulu~DiUqp?~nLr^p|WdYh*}<ed5j9;^%Eg!mgX0
z+pN5)l)7KrgAR(cN5QzTdav%(kZugsTg5%+`kC`i`ZBv#Z$-+LsD7*R$(;A_JT2;v
z1Y1YPFJGs+RiwMM)!3_cp*^+RWLu3=dI7i{DFC-g#rB$ZtwwLJWyWbg%&ZIc00_?x
zm7LD&`3ab`?L+QwH1SSYTo__8-g-dP!rr=MaYrq9tP4af44WBb6tgY%@;?xV?O5I`
zGNWS}ZZPL<wu9aHZ9GyHNS!cmwzv49c`0x4R<)h_UhK=d99P-fW(}^G<S)7$wfqNe
z66IMc{dDcrWWy@}J+%{Nm+r;&n*P0qSPg>&9@5)^Jf{5G3CYp{T(9X{dYkS-A-X<X
z!}#gP@$K>I8$e8pn12!0@E46`QC_DNj~$)4ZvVi~%(NAaTghaoC@mN6%%X%2wI<M?
zwALCl%5W{Nx9q26gXTh2Uw=s^-)!(O^<lBo%lneeXkm82q2DcnQwkAITtyGAPv#v>
zmEPwyenDQ8FROV?j!!|@v<Di`8<hj3w#r^<z^W4ST9OS%uoSv*H#KK#VWXedsb(YO
zkQ&HTNA38wp=jj&CO@x(u*>t;^DM|e>JPS~=c>Dh*&*Tq5pubClpWcYjZwXY>W!&n
zW@dZIt_TszA5j{%d6b2`_ZnsGLW7gaqqxWGw)lfva)&iJr+0(YtA1WP`zKUMwLWHk
z8kN6{&ju->!%x3N!u@{UR;$e(tBrPVk}!b#qpIY-t1>d<0vJo~pQYBK4AXUe>JjG=
z0R*Myak^!ZSKZsy2HoJ|U$;$rkD^?Srk=~yWA>t<KQ9WcJ(tC5G_8NrZA{jFh+;oY
zvB~s_PLcOci36E&rwNt4fh#>A&oF_s;SVvY9o3!ku237UT@SE!dqbLq)IzGBBe>n9
zh*WiBZY&fi?DB5<IQ2m99E$G{O_tzXV;J4V@m-v^(gcY3QJgx9BPH&4!WYXevF7~;
znD@s>1!>ZL>9)E(xhsaEYYEOaF2t^~B#fVd8QX+_geOQJN;)hXW$AWbT~F?PN|%|?
zk7PD5lqPc)T3=RH-)ZwpA8=aVz_h+w@MKFi&uZN6HI-3LMPhw~@NtdObO#Y&ax4p8
zVS@m)ACY+T2;xn=>R~1rDS+b@4@=x>L&}+gx1gdEQAeua?R8sobCiMMb{RVzVSNm@
z^=PEu2sdp9z9DX>4CAJDae8W@C&WNcNDb+zg;>=vVqLi&vOm7ju0vHly3sb8s(NIj
zogX3QI`jBOJIzL;H5!PG8A5D=8m~%?_quo?8*Y;ft8RygS$*d&G(@M+yO)e;K_MzT
zT13W2TqlDl&G{gGT(WOgV~5wYhCCA6qeed-&7W&+&z@Anv#&&S<@D^8EGTod*9E;i
zobIck?UlNH?9fHIc}-syZod(AhSYv+0qlvt-ka*EipQqOJ*ANylF4x|Uv$I%{U2l4
z_iAUTbX)CNpl)NSVk@$}8%;gWh4mQ`M%+5;B=bguq83QUfcgliiGb<|sEhy&5{q06
zfb7_cqn~6y*=YFf<;CW2FE0w+MCeWn-FWDZcP>3jGN!tEb1OpGJSEvYB|CGm#;))k
z@~CY?4~w8CkGg3Vp+e&965=ZF*A~qE=+%%su9`^VC<YTXa|~DVc+X-dvce?YD_ljT
zBh?&Y<@3#5;c5=^lmH%lvNL4D@_ZQ2@ZR<Lm{HUr$>f4}g^^TPP#_P|(@Ey;4DxVg
zcyVN#a&nd^x!ay##!i1ESA=Dm_8*S1_gVrfS`gI;C1=y<8z|F#<SIki8V`>#B0MIZ
zsgsAqGpmurGwkIqfws!may%eb{s%pSKerFrL9%QCbCFMX==S2O`D0|KpOIGht~GR5
zhjFsFQJl)~U3KWzgmLOe#EFOR5}{ih#wiQqsN7zcrQx8d*z#}uHru*>!^a<(!L;Bt
zJ|G|cq*{h%Z=GS5-J#oyJF*(048;i+JZ+V4y3=yRERaA9DqHYsC8jtMJe7|u5G)_U
z`^}H+tyZRJwRTu^3$N5lt)aUbS4C%W^;6M`6C8ZX6<rz@eHVcgeWtt!n=!nSj?1hF
z1~h(&Re|z=e<*)Zt>q6&k{d);Dn}@Z8bXvR2@*=xp<AOUpj3~mpHNbq;K~rCKiV|S
zNFWfDR>{j)mh>L2z+_rggnxGWB}uj#L(=olkP9}-P7;rblt5xdg6+-bU*T4iU?~C)
z=553*PiCHTtRS<`o1-wQ+b{eWMKkdS$uMG*jxVW_HW)rO@%tZ=k>@H~FKE|cJh`%=
zNYbzciYZntkO<wl(4Ym%aP?CQC@h%!r0bM`93&qV5l9Q{#EYn&Oz*+qMG|^U8Bjm<
zh6uzmgw2Ro2r(6;DL@6Qw`Sm$ugUw9@hRFv1^4{j>t;EtWFPhln|bbF4bV5M<QZW*
zG#DTc$2%jlqvhf9%}H)P_G)WYE81_hTo6+`;%&uA@%o46LW$-&_A0Hw&5G;#xdK%R
zo`hNg-cc=&dR!jUuc*)EG2MbXUY;2hOI8+kTdu3j5IB{7Nm`ehPzf{F=^E5SijI-7
zQ@TV9d`QKTun;C7)mr^Co+gtl>jnh7#$+L5<jaxAb+l!T#b91!{iq~pT$bJESn%!U
z=7Pnzk_(?ndb73$HTW6^25KiY_0_!#v$ddieO|2Rqki*wpJGEw>&#~fV0F^-{>5d%
zzX8w2G`Sqx<RrGqUwXxewsyR%Mc(&nWB^>Firsqe5}Z*>?uLO^Yv0i@Fj(7#ed_y_
ze`f0Wm@EaKR-qdR{OwuzzAE2;;hXHDti#YHn6AK90-qxa>3)g*Pw;jHHWRqr1%5|?
z`3n33f$Lo0*A;l6jldahM_*(Rj3Jhdw?80mn89quMZZMRvqaZC_36!ngR|4v`#aO4
zpd7!#OL21^tG(BluQXgVUuihZPH#;?aq2cdz4e<|fT?NdZjBxN1PdSi>YrkPv~m<*
zwH{RxhL8H1(Po56M~fyk3Pp}7jWTnWhTMl?jem$eK(>|ggG(1BX>fGn3bzh(mlkHt
zb#^r=xY=s_Vsv-m!qi&LyCNJkXtMB&*)1`5Wnn$8Mn|tzRk9AA?$A>sPgm%%5!D%b
zs`aKL^w@~Xh8~lS)`lJ%UU!9_GNow^Ju~F_VdyDdvfRd<!dk+xBFS3`NpWg(4I;I7
zG973fsbhjuA9TWq!fog%BK`6re*GHy2WqcLZ)(ccq9|T!!NJXA(cknfUJ$hWMb`Qk
z#<9irvBj=DGGatbW*-H!>!s&HQMJV5^qB_A4F(<zz9Kiu@Dws+{6+Tj44)#fc<tfy
z8;JXPP603sYLxd|$*AZ11l~ws@KO1e4C@4rwVw~jSL#JKvkO{f6Q-b*PVWljCRc^I
zsajB|$OLkh*{CMn>}ER3O7fkR-a1Q_l(H2GcaG@!E+a(L*C9dH<La0ni5xpOpqqv^
zKy*2Jgg{J$xwj%ai`E(eMyK>vmH_=sI2I5zZlqtCzr=@_3ctuXYz_&Dq)?4sy2;P@
z81*+i&$ic*vFwX2oWw+vm7M%T67+n|7iZA#7XS2)i!!JFT$x^+Iro~99rR)pkNv&+
zug6%XKQ@R#<5>n=P`~I}7gRrUx`p_8kF$_}rS2hg6JFyx$Ov7`y{Wvc_(%N=i^IO6
z?9mR+zPc|}ddOS0ew+q2mfv85`Bu|DY^2+hQm@o=_>#f4qVQ&O*O=6oy!v4@St#$Y
zc#5A}?6e|z6QL6FaBaWbYL>!I8-u~cpTqDhuWp)KM9sxuxoX^(U?yzJPM#I(`H0lZ
zpTVC>bK3IAFgeX@A;lz2t=aD|6X4k~Kko_ll2KD^u6vpaYO3P|<o9T38L3N^Je)i_
zZ}|9ND(^{@T6G6g*S$oQ77|;N;e6Pu!+q8S;l4b?eVyU17}f5e<&MRLvLMd&Oi1ND
zA8JWz3+m6;`BfR9pDW!~&(lNM^}U7cpiRsrQ=y_!Dv$JI!CN;o4xylWx<$Vs;*5in
z5nCbxiiIUn%W(Az&y6f%LN_k+nl{7AMtrB91fi|)<?KhnPfmihcukGU2xg>s!S3~9
zMw~ne9tM)#2Mfif&AxXDib_rt$7|dBH7)-)Z|N7;RPV65ZJ<|m7FS8;d&`cfICb&4
zbm{auEC9ZHLhcy?IX<f<zcu4jeP3th$Ssn@4G#6Z&s~5=R+#JsXLFJuFV0lF8pBA|
z3{5`4kxer+5#FH<AH?D;-Z@)%!^DrVMK|2zEfgQG+Gocc=k7x-vc3~#$?+zX3Ox3R
z;SL?GoQ?^l&RAAWM@z90jZ;v!D#elEMojh1K9lOzZdCp3w6JVg_Xa;`lVMcIWyB*!
za}Dtkbz~M-Kk;D;%TyKCl&@wYUTTCF>`tp;*14BZqq5S3kl&Q8`+(LSYn56yvF}x5
zr_uvzk}!2Ly{+!7o(t6>HMzYER$eQX%x1$VwG%K{#qTv8(qKk4T{{J%0?rfHdMH0*
zNcFkhywL$;*eb&#?DO}ZB!eLv${H06xO!m{PRV|HE&~NMc&d3W#BiIYX-@QxQB}m=
zv>p*t@NUzt4RZFr;SPh3jg4`%ANPcq_q`1|35*6gKwSjay1#yADWPqNoyFVBm!!s_
zibsXqUbi{->64W&G(^6^DOrh~I|I+$Bf*KdCA)IlCcYjXq9((mrM&)Q|MTMS%k4PH
zW$oQI|3qblF^KYH&Ee|Prn8&g_twKAs8r&?;w|M%<{J7i5Y^z(+_#3ZIGZd|$Ov#g
z!>8O^o^|OrSo-ye$0~}~FNury$P8sO_OPEOwXP0`IQX|d@$v{7>;)5}!R`p?1Q<2b
zL4y_s4c-h523LPW4az)CL@ltCg=lX)>%^uXDm8(UT10;)6gfiinlefeQOYYPh*E0I
zO59OOt4qTk$$tW=)is+qx^R=lFg2+Di<pXNIcfSw5yIQ)r#6wn1Rc{?8$Ou(s<<5l
zf3x3gQozZ%m9(Xg_8((;uhF(hN;ew(1twQ${I7o}M$fVOp|ia=Dn&#|TmK|VIv8xk
zg@V2gbJDCX>V=$F8QR`$XIhPWsWkY0-Ox2>`G(>LmrP6LJsWn@Q@Ji;G6>)O&*E-s
z#fvlfpGanA=lPk><t0svPE|Ha-I^?Y#5C&x+F@Bxcc2g@K?0rnRaTx1sIh4J{cO7S
zdQ{`O_@I`JQuA|gx|aU~D5ZB#{l@9qsjcShM9uo{d0tJHLlu{xfn7hZ1-p#$ULHsR
zZ3%Sh-F5qD%Gz_Ni+@I2DzA6AGrV_Kh@OwDUs$<khZ^mN>s+I?;8NN6!jtIbU1lrn
zST^=QelKeh_Br!0DYlK*Bc^(gA^ZSa?+142nEXvyIQnTj6JSpe0sI5(&?BoypB~Q;
zJ}6+H?B&b7>p2f!?$u&NuG&|#r|?DoYo2<D^W;zK{enqxMd%?yKm?|ZFY;gWk|Mk?
z&wJ=aMY?8EK4CkshuHO?Vn0o6+OC??zW59R=|hMhH7|xqYW67}89uZ}X{#np0o<x&
z$6~QZDHL(PBGyhCPcF|Wu`sKp^aE(bs|omGp9*=H*ZadH5i;IG4=L?6lcp)gdZinU
zoL55(p{?}9Trg<@+4QND7ZiJR#mVCkuOAMRMA&!_J*Tv_lTKAB&nVq!?7W)il!T%m
zdWwGzp4B|8A}F@zIaedSA@Sajwh%Y2VA?P1+sIuDJE36}aE*|oOTAihxrsUW06B)O
z?A7e0h#ST|%KjQT(^(I_O77NP-IlY<fkss7<5H4>;$Iq-L^W}p`vytW?8Bwpt0aY!
zUZW6e=S~&EnUk7_RBK2M&n(dg!P^9JHG}t%?pQFAcxbbr4&t|6LfkglM{Gc(sD1Bi
zjJXz*&v$V@hVgms7ukZm`kwL8&f`z>wDWkcoxzzu&0F^OFt23h(!q*!e?zw|E*5@t
zR>`1tqNVc}@0KlKu2<8q#&eP#<-rcGQPcb<>t}MA1rqn7SD9Gbd!9}<JkOpSHhH?N
zCA4p3+bvo8XzkrhY;TK6`BG((;nQ!+iy=_xCX96LP}mq3#)kB8*w|R@TH<kgf(z^U
zwMOq|jrcbi;6^%48?`U=BPa>x;{7wHuX0F{Vym9_4U;JL<rXm!AUJ-U$Aph1Gi(fI
zZh*gT)h=a{TTZ^SA;~!yG1*Ppt4x;evyIDsZ5Wnh%Nrl_n*K@_GCXN|&;ZV@+y@Xc
zFxW2iaJ?{wN7d4sH|_9#?-7LEYArW*9KaBL_ZBF5Gfa0gjH6~V+qMw%)R*5mM#i!?
zocfk9CMkA3W@f4rnOxf9y1wIy+?=Qe$;`!h|I-@mqXu>V0`_$`apI-!pHj95hvDn*
zDO7WXE)=O{bI)oK&J{Q9ay598SNv-<?PmCiiSC+O0oK%J#1w)=#&s1;9SFuc|J2r7
zcPPmLyT(m6J28FaQhED^<SB_iPJGEGx{5oO%!0%=N_KfX_dOCw_fScdWl&SC2G`mB
z8V4ZNIlyTHc;yrX08VEB;M52JPP2dtL#l>RDhyqhFemp(rf|)0?|oCrtfy;eM%(mX
z%xH|D>R_Eg?zV{FZagC<IGOpp{sbp{+$|`_PH_Ei|63<G&LhE!swRqa9yhg&nBXS<
zEfd_t|BDH3;uKid-!j2Xgf~Z1%5hT&4DyX8xQT!B1UK>jVuG9a|8#<z3tQ?b`XBML
z#>d26H7ltaZ!#6$k4eHCOoeLhH<;I6Ybq2Oyg`!Jm<q3f{*F)b|8y?=Kr|Ny$H<?r
zIUmX<#wjB*Xg!DL!^M{#GZ!v8W-gR6EcXoi{6p)tXg=IV;3w-BeVls_FlGxU#HWmf
z*i;()?zhr`yx(vC6qB(=_LbbT^C`P$hpRRc_}N<dl-;w#1a%tO4b7%gi65;GWyZpM
z>NS3u^$3$F??z3kQ(t3Jo%$M+>QpAx-;J77f6nS=F{kcwr@b(ue#h)@_Zwq9Bie7q
z)Q$jcAY_eO*(E*+17#q<9(Pgbu|4kM&{L#0GeS=S&G7eGaFnlw)3V2{XYklvBloyf
zf0Y@!T3U`=i|SoV_^vf{SBG)3xcVth870cKc>}jv7@$|tw4tA1=^pe`y!tSHOX#+S
z@mGiOy~f`nNBGSjKk)XkXlqfH-mXOnYeCg5D5DOxYqP|bDyB|9n=t52OT>2q21Q|<
zCTvPAWwBD85T(3kIA#AY$&K?Fq<j)E*e}22vk!(Ti%1#mriT^i4&7rLObWhE-UaXC
zzBc7ipmJEJ1_ZKuI>w4!6>g5YJxkkF=e2fuEWc*AoR`)&M$xKvVi<*%u;|v%U5%^u
z&Eo1O6ci`;$p(kQFH{XsxQjrH<$fS9`k}aG?Y3}CYoN3gW%;4vT5EoG(Q(7tmkEnE
z>So(EML)$BC(1`Dq~;)6av2jZSl2I*y){=UYA>cpx~YWJ^ws1_4Y1bG`1Lcx*DHu*
zW~IBOg_}mCp!9k6H?j2(_nT-h!-#E8Uzl;<(i~j7Z)rY&6i?s(ceeswhTHJ!K<(*G
z`|7w=Bs3q@m5<!NbTRiYb^KgZu&JktP0vrqr6fKW^b(f)x$3`c8_(d^_($yW{NrvT
z6u0mB!EWt)_I#cFmoF)KX6nW7W*g?~B;lI9%*3zY<2t6>aU6p@i`2S~sa~ja9n%tX
z(aTvCxQ^*AtAecbX%fXc_By7IVl}6$48ip7RjOvMgs!aeL3Zo3e30EhO#86?IHRPn
zhF4#$J-y*o23LhNFwGNx>>1u;h4;plRQ)U${vN`EJqmcI3%J!q{K9F3uTc0{!r7S*
zeuu{fW;2^6xqZyQCHsOqqCg!2&UFz;S?_mQv@46tm4(jY5I)I;->vX+C8Q5`{DyR`
zydOCO9cv#XeNII>Tb}OZRJV3QMH1Q>tu%Lykw#>Pp$#cfh=yJbt~MB~h4HH&qz7v;
z*Y$_SMJxVr9Arfji~|i7xvjVwf}ObfjbwKk33eKTF@X+NgP>X}Tx`D<Xkc8!#bJz&
zXkcIzkm7BTEW@nhTa_O}T4SM{ZNu}PahFhqXFZ@~ivfZf->R6fQpMZ=USR`R8nO-x
zcX~vSZN#e|w~={`%>ZJv4Y=ACZZX##4iWh&a`4UPU?_v$GQAA$_}9_bPhG((jjnft
zA@Mk#%*5QO^x~~V3~!oRWZ;qDYjUGp3&~XjQaU)bvvPmaYqHeaqltO&)!Ne<1_x`G
zKt$WMXOx-xYZd>>&mgWEN4J*~9-L0$xvK~mNlg_r?#TOPZ{53u*%G}0`<}XUNf5S8
z4iX0xq<zma2Vgn}Fq_M0)5{&xrV(x6!K}*An~L(L;SO0P#w{UP#Uj8iz@cLj1n4I!
zV&~0%W|Fv?$-{tb1axXTAzC-CeuB}_+0SCuL^10lpe0PwIwHxmkx5qLaiv5+CxDd_
zMiUH+v!7sbEd2zJ55p~oVmt<vML;EhqE&~{1jj<yAsof5kAM~cMQaVC2^KGlO4K-w
z;NB3HMMDBuR{}Tw(zeKgMRKn@jMC;0r;#HAVg~&o#d@*&C)#g(nEUf|#~phr?4qtI
zEV7pz(vrPXu#XFDm`W=N%=4BNP=uz^`Lv~su4c%uxy3TZuE1Of!dzx`dRDBZ*Sb73
zPyHJRfqAq{d}M|gN3mE)ID0aNdexus2T_W4qgCzM=_iA)FH%|Idx%fP7!l$Q?H>G@
zhSP8_GsIUJrJ_x66Vw*2^20#Fx80?z@K%F)%aTM+qR97}mXe&k$g%EL&snkDbY5Yr
zg`Jk#*U`N*!B3_?kqia&WcvCzunFk15zz4$;kVg8c<$$}x1PWi`l?Q_T^?q+&F1b5
z+Heu&OSFqb992E{c06FE-O9e4c!)cfyLI8Di!$1tD&5KDTzx%XO|kjg@SNTK>aIUs
z&JLQp?;<`0HBo%-mbf-2snqeClL|b3b22S8fHo%qP{HWnAIBJ(kq1-UZ`}BHka72U
z<)gRN1<V!ap>o-jEbXn^qod7sMH)wh+x#=O>o#3ZKi#n*>0Q#{=j|!!vnw0bKM+{G
zxyajedf$e;y7%~*>#+^r>iv}XvxEAXXA^CI?K7+M7yv0E_q>JCB0#@CjOXF{_P}0I
zIose|5!OCd??=4c!Aq_b(q>VzU8liNWL`MNjt13NzB9^Vm1WUrc8n=*nPP6_j<++%
z>O=_%%EqG#^_J;Ebtz`-oHRz<oRhwr4C$PHzx0TA`@iDRIq4UoqtY+4IZ6j{RGN}T
z?jQTjXK@ziEO%b|0e4>7Pe0+^Hc0o=zJBu*-gDBqxoX>Z21>_xL2*HGLDK}WD_*rN
zAt)h;VN}&oB#2Q})q$;b)iw>X4$gb)y!1BCON;Ml4Qz*{FYndGvZD@5AJt*$UAdQN
zCp#t0ZEy8pjyRgXoAU{~IS^a5I{{toF6OB0?lO+b?yh75e|I(Kqj%SETC^*UEqfQ2
zDRdQcZlNpT6)fng<dkez85b#Z5u>(?xQ)A&Ek}WOi@G@my}Q-i6}wlPyK;Bd+*P|f
z&0Vv*+uU`#d*v1^?Xq+Qja~KTU(sdh3YK@ZntxT7r7Kw3l{NpmE|+diSGW9>AESyY
zKW^^w-5Q%-e8ui!b64&zGk4YQN^{rjt~Phw?i#t2Z|NxC@+nx+W$7s2@+nx=W$7s2
z@+nx?<<hO`Qa%h!8UTQ!&$NI8XaWb&1^`}s<?c>%SMAQa7i)I+I$+)IZUBxrP<;!z
zh4yd&4d4J;1i%9mrS`x?xuI8G*QLY&Yr2#jm!6cNE#St4X%bqw<*ZSM%s7$rc(vTa
zkB9t(`w~Ex`x1D*#(fEda$f>ZE=;fk)0B{k6Zw{^;}1*|I+M7!j5sm<(-b49N%!Yy
zzAF;WjKQRL<_TP$SdiZ@mihfw$4AR}8>I2bIR92$^jmC_$oz{DYCtvuRs(33?Z(wl
zqMC&<%_JBN@F=rBWi|*t+{An4P<szAR&K*H3WsAue?a*|(~kB=tjHGL8c-hrHHxp<
z9yco6!YEr}%E+?sX;j%u3++7;ymFKL<s^8_oI-!s$g*>?|6<CE+sieD({N>9!z8F#
zS~fOGu5@;~3r&~_Qp=LMTCSu9Tut)z<{}{AddtpKwnzpw`0~PYx0iCod55y9R8&sX
zvP&#)G2U4v+A-A(#Ez+V!+F<Z_=k{r!^9>4nUT*SQf?tl5b38<>0ZAf$oq@6ig&@0
zdqQBjS?d2O^sRCRmU->TW>jStrx(41T^47)kV1t-^OQ#~N>4fL=dp^88Y+L!Md|!Q
zTy32!eFFP9T0wVj=J%lpot~{5^DnpGlOXe8oyqAd!g@!tLc23M(D_bUjn4%jPEpWJ
z1%xNO2HED6Y|ho-m^laElHhv&`|1wy{ezFWhSBAjx>s-u$L%-wBuh4j{F8B!{}vvp
zw=f^q@+E*>*cR1t;(pfy41@gfJW}Je+mW#8cB@4E(*4l9*J$^{*Ux+lb_w~|qj6G-
z`!%357R8W|0oe#x4PeBMtDjA?=GsY}1zNH#a#r3Z*2#22-EXoT9$y|4@Jvd>V^SWT
z8G0j8PkInmb=PQBw@5k<rwpT+D7nHZ$plmr;0Bv;g5cx06=WiDb2fvy^mf9bRKpn>
z9#0mw(o=j$Q<W0Qc9<XSlKEHKCAC9?Z~0pl8L3Q>I8IQzBIIGGbGM*Oed8h&aGWm$
z`B{6l;&F?h)p`wqX2WRHaQOpaED-HoE#fI^NWD<ihfYGPjwa=~YsZ=PsbdGRxW_q|
zz0<T+jlUFnSS-==T#Up^b}h1(Xlpj>H^&xnGvaKlMLs*e#c%$ou3+^pfdto^)uCNc
zC^Cnh5B_gzfNtu4f%$j49OfsyA?DuFZ>l{)?9~BIJVsqh@0$GL>N9(EeU^mvnceh^
zw`_s3plg1JtDh*$x(lwN*<5@|Y5?_5tNA$%V=j6O3tttkuhDbJ53!6_o-AVO&EHt)
zgO0lGW@+wfbRKQKiYX3*tL4$6NO9)KlL$RFEVRbe8ygnUfGJb7Xu#O8hz1N2*YJQr
z5E~va3C%jHr-($W%7_=w)~ySU9c!I4m6MM{w7rAvaW$=_gSLP8BPP6%XJ4GIg%^f=
zzUPa__JH1uZ0m0=H-%Ja9DXSoMQNi9eYGsV{#!0iSG+WYUxb_rC=&iGoaW$$47TY`
z%)yk^&4dMqe<%+lRS~04lzfJhH)78s5DDo`JM4yI)q@aknReFgvXdKGe7SGV+|3aT
z?%_Q#_j|n~cg@p1;{|EfRBjrie)?po2#aA#QV~jd!A30%#sN;lL5Nd&pfr>gWR#vo
zx2N3rPCvW=*=x9c61lp|kG<Q!ibocAH;j9?@ELty3P%G|8$nVZ%Xq|YQginibFW0>
z1ohh}mJ=?HTR~bT$TwL1&Vb6T7VBPv`#4r2#xvjIRQ#=DS=?tE-y%R=-}p6EhlaMM
ze{C*Ao5vP?$QE+ne4ko7GYk9j_VI*cEj_*OYo{FYu41ZghbK|hqD^(r_OP*6)Qjmy
z)4nqAPAoVN1_R$89OQ8KdA~SHU!URRhGeE<A6>+BBH4topzfw|1BLY)ySDF^$lHte
z&vcHn$4X^Z+Q|<|5aZrUmL^u<nfiMjK7gqwOxy`SRdetO1wvSGOlEGFfFrD@jIb~w
zw7HynaJU0_;d`0C#apP~iLCAB>Voaz1L?Wkzgrtj3H(xrxVwAwVLPYH6feBZdQR7X
z&T8zayDYK&4<w0sO<&-t1N6(SHi#Q>&hW}fZDq=jPhqat71{9$-LNwkTZw`xG-yEu
z>p5Fw-^YCjrQD#iy>4>PnV58prZ@1--zJ1d9Sq)D_q?0ND#6P^WkXOgkjStaS3fFR
z>-H^J6f$>7^ODdkR;HD-06S!xN<S$k6(dz>*qt3v2QGEzX_=@iMUnVtr~v%c*1?FJ
zq2EsX@!k*pmAn@p5t7;#nu9yU<i7nDW#+IHDahDCB=wu+Vs<D8pc<|d^_G1QOWcby
z`CL{A_f?LPAh|^p?+m=<oDiV{paDmI$Fv{RvH0&)j(D{iGl04)g8sU<dEbnaFxgzY
zuir>E_aubaB5UTh{_}Mx;`BD3%iXDGF(uFi*00iheDfY+YO)bS6qUV~6IB9<E`}$t
z`P}>b_lD<$@&dJ**s|X|vrSzihM=F|X(9ZFX$r_pJ##J`^;+Zz+k!~T4mpkaKi9NF
z<n7jH<nu{jmj&RaviPW=ktHCjp&!uxi{uuSr71}WsJB%(XYO>6`ZFbB9-$fk1u`Bc
z=Tvi1Qr{ob1^gbUETIpSlvau3I+=R@E;kVE@@Bqr^nMC4ljRC!a`80(V|iZYeu&L)
zPPHLo8{5PxweBf9bV@!te5IL01n}eUFqyDl@~^mDnw=t;121mghv^#ygu1;0kEvjX
zQp%Mck&f73JxRS~(@F2FpShF*d5aG!J>j<vaG(Ds&U(}@I*&--!(3I;mwj~WuPiQI
z#lB_vbCyr`#`_k(Pgm2_m6Xe9die?JQ?*6U6|=cG8-u9y3%M86ykU6o<hQk*G}!a^
zBum`*4rus8zP=PTg<)_JUkbx&$ZOzRB;mqMdC5L`=7|#coelEFx&y(F$p-vs8Zv9)
z`&P+@-9z8EvLiI=oH;b_Q0H)v&PLP7PTC-ff{;uPN?p)A>H}eD@WkYk`W}!zJ;iww
z4<+qJUSxZ)PjD@+et9c+sBX$S7$*;buePAW>{E9=4Wy7B+&<SO=vLcUBTI}kq9O4x
z(VS!WL_3&v?YFn=Z0oCjJIk`ilRe?c{_3N@lFg^W3Epnqg>p+DN&D+ii{C)mD7jqc
z?dUS9438Cf%VyEO=<xFDFtnR~J_jt&^g{9q%fE3l9Wn?>E-WxBzv%i=7$CGG!)R2k
zh7XK<=)SaaZ+)8%dLS$4|LQDUf0yiM*KbxZCUN1XwFt(Zb1|<%C98d0E7G!d)crd5
zZ&r}z#qN<)0QS^a_T_teraUvkYEvO9saAHMyE?RroTOFcq;M69U3WXHNGcPxi`VoE
zSvdYD5BQtD5{FLn&MZcJNiO7Zw{&yOO!`spTU!z9a({%qMeB9iYGyl)RWVK!A**;e
zEu5EzQ}$vgeufKS7=;FheH=WqjKiiB!_E2FY7aAIyFdTiBD_pxUaItEcYpqa&Mcp6
zh-#k@5&eSHLwgw~J*tjupWTEyUi18#(4Jt0DK#|EH4jLaQv^2NvO#ipw?nmYQ`Aip
zf%6x#BF}a5LVP^GkXp5SKZus`J-PdNQ*LLu*V>#&ZA7W5v`AXp0FHuly5dbMe$7J*
zC!+o>PL)21Mt*b8KS%VT{O$W^DSiJ;)>rF~xn-SRBUGZ{`nJ5|$;Vr~V_tg7c%zqe
z)%Z{qztwB{ne{5Vft=GS8$DWm?sxSuEdfUVb$j@Z7F&?876a-dpa#IEA6)%x;~v)*
zO0sE~|NllyVDq|~3UPzU$1Eh?Oj}^MF(dVpF47hRlWrOzF?_^&nI)iVL9eTRDrt$_
zu*bwX5Cl|Xb(&=g!Q6~r3!YZoAi2~rGV!Efraa7-EqL@(h9V<nsFjg6#N3+HnVx9I
zY7OK0Af$M`7SDa+MQhjY@V(?8z3&SBCLiG!9a<BG?>nOSVY%7RFA`LGH}{3dwvd9C
z=sireK8!cK)&+UD+(I9t5=``_6XwhF|H;}{?J7Z|7dP72(@?F(AAIXf_93G)KrdNs
zu;j5?Gyi{cSP&dqtkS2QmQyG%(ARSmU)94kyEt(OEfi9jn{2|WRD$%z;L~KN<6o(C
zpQO=l1&1pR?;Ceu)MgutTC9-keDw`@mYKW46s*cv3fJ6APm@z-Viuz;l^hcW>G8RD
z$rS^H%5s+;bW}Etz}y9)M;d_KdGch&-N^O%xo)d8`<se&s$O9EVY7|apLgzijrHdZ
zsjb>RsM6{}tI%DYaQ_M0cvP`Z6kgb=@9%Ivi41e+gk@`B`RPfywq8TN;gb~q38@=1
z6MrE1Y{6h36OP}uNaLo@;!?r$LH2X3TzfuD)K{(<6I)A8vE218pP$SlZJ)FZnQ`Z{
zGU$l|+<tj@8Q~QGmZ=q<=Te?e%4mK$#$I6>h1*8}g@rDORKv)AP`wphm}4NqtX9eD
zT*qec%n3aS#ON=Bars($3zI=TgJ0I$;#i*tWK)0;xf;51tqWlTadOa4VXdL7;V*v{
zSFA$AS3iYS6JKsk=u(EoX~ES`@ilz)vpmCit3x*%#_tT{dyP*}aXu%w=HM9jIl<q7
zyjoCoW71G2-C<_0hrGs{;sjT%uqrowLwTUq?<WxRy_@mc2*9iLS%52ss{BOz))>&b
z6WX|}K}do^<uD4ZL=`Hp*ET{yae}}8#G!C%h{9R|L1Ci2u&{)(hNU@{yUxiFu-F-d
zi<Op{>&*MKNSLPhFahN))bBDkWYM6WHUAoSdkR%hUkP=ebiua82olndtfY-YW1cpJ
z$!zjS@N2-}zr#39nq9*7t|zf7TNAp~xFhEb@U2%lOrSU0)hJSs&kwK0x&}o%GoUX<
z;cCmmTOL>C3a4N%R+hFn=Zz{aPeMz!>ha1tTBK|3X2sLKHZ4%+79xa~xR#MJII_#x
z7otGqjXGK>bdAb<a~<n6`@$tw(h>H*7{3Vd({V@H7rOQywlAFjbrY)1zVP7lf)55Q
zgpIT>ya@k@Z$6&<-`48Rq-rak(7td?KzL^AmLs(KX(TcG!hQI#FFXN#kCWo>|Gl#>
zY!&L*7v5zq_h;hT=N>z(3R>Sr%i3c;_xRZN3}x9DUfQT?ntfrn@<DmnYxxwx0YnqG
z-YW()N>zBAec?w{?aWjTH?vG%MAgqS#yN34J4?Yg6%g7Np6(*<IZXIWh0DGWyRL)U
zg33(2v7hilh4+|_cH&MK{so0kAiO7m+2IEOjAm<)-ql6Tn1`uP)$6L$zu?dPiX7Z|
zs$=-6hW6|<A2#nadqStfjYvZC-F)h=fh5}2EPFGqlo<8qqF*=n4ebau#xy*#p}QJa
z!=oElzmYr;>Q)b?8w>08=6`HesDV={lq6XWSr=M~;fR=>-X0w08d(`Xh1&(Bu^8|H
zu^&}8Yp$~<G|jcJq554G&UAxI74)et{a(|$u$31Q&%}d7e2N^L^=J59WL;SHwb9mv
zgTJ%RbVFhN8j-=o>qH*!C1S{s-eusC;8eL$uABaZM{TE~s75R9n_3sD??;pJNbACd
z&nW+?XQ}u*z6urAIO;sg1!}<t1zbhIvG=Wvr|oTi?j>{0&)w!aMw>#tz;}K0EmK5r
z^^*7(5sU<u2;ir{ISz1CAb7~2u*E!3Q)>iR*U=YQ0`yb*n+3=$?kn|-hU*Ss4)-&g
zQEa2rzMG4f3<m0Jk@S#CXI+O{8Z`(jN$w3v+J&!EqE%tlxU*;$wjg3X=*El2y-STG
zH8S=*&${)N7oSZ8yDa3SRNhMnTqq&#t9#z|nT9LO3=nBsZL<`nKR!f0ywWg*1udJ6
zY8JjQ87kqv@=xNOiwdiUG;weP-tP>)dkf<~GgTKa{Go_6!37(^O|#h=f~XS8O}C6e
zAwTXkAs#9Wc@6wJ3d6DlZ)wxAY390UQ3^E2T5zKVOy8Us?zaq=;RyA5r5r+i2e+OK
zVSNEugAfPgg!`k5>7_`i_R)6N1G?C@Y}(=rwSRCEL}xt_cRg{)yHX#&jP;x`qV-jr
zGL`DyUgwQ${~-zzy_gYZYXpd<#DucA`e~8-CY|F9c>RpsK_7Ka*-_0Ij`eEpXeBT4
zOqDqFd1RVk9BZ1oPpaZid|gaCR7jO?QpMS?7+idg-+U!E{iBS!B_cgp)Q$C<IaM3n
zMXKDLLbQ33-^@K3Y<STU_HL<iY-V}q6<}s{b3Da2xS0pbSY{s#Zs4W+Y;h;4g<`j%
z#`M;!>~6hR<98HKKu<ej!=Y?gKLR@We07U23ckn&HS}%8PgL>ZZ#Tmn&`Rb`CEwv3
z_}<4LS8UkF=)A@%;$SaP+Y_p)(I;lc^&C<wa4C;&1R0QrjHzL?HXzN&{5A}GZ2S$+
zZ~A7qrUGL}WdLGF#pbg9!wr`U*87?+v|RY1n!!Y?%+&vSK`R8&y1UmrXsR_?^-Vt@
z-bxZpc<T3P;uC$T(e=|v@dfB*cj&4E<c|q6BFAy{^BT3Io@zeto@XWT_j!$1f|8iL
za5V|E;`P2}?q{kXm#?-s5&IKL7k)teY|CSjJ|GS=Nsainb2x0tUkwxNX4Hxw!b=|m
zWP?w1dnaJ9O0QrrO|ODrC<<qV(}=3K;vZRc+iNNlFSO$^&R#_AkudI;aZ{J@=DXwl
zvyr-ga)h}We#{n?CTwLWQS~l!-970xwmMW2@tePfh<bFO_U%o(>ds0wUt&jJF5&3Q
z)*Drm_MXpk^yRY01s@DvvasHq<+khrvItfYG(tLEu*xdwrsNO(`;c^cjdI9L{V4<*
z+(Huj82MlDA)R*NIO%jcskJQW7Pd&Ifw^v>Wzwm}o7hdlr|ODz?CF=8&4x8v>)^Lv
z6|?9UhY|T|_==ouYg_cq@U6trSI6g;UtwP!Z#b&MFZ$fUEXED8<xl@=XkR9@JD5vy
zA9UgGBRtrrfKy#Sql>t;m+(6kp6|k|T==YmgnvWfdnKHL-DI%K?UtN5ieC(m$Iod%
zs-cI!XC4>UP}($em%CBfyvmk7HeIQHoE<l9rJ4?{WxcuFF(k5T#iQTQ+yi}S`NR$;
z47^HvnJH-)wN_$Qsu5w$34HsSy(oW<u}X26n<WX<la(0u5@pFMQX@c8pGKO5;#OQ}
zwF*ZfUu|8}VC_Gth-wuvUqw^}7a_Na1Ac=@!Id?mCA`yqJ%XIN<;hGuqBbrkVkqIw
zH1J69QMs1o^S=XvsiUT&tTz?x)X1Zlcta%EeH#+^@ngz%>IRj%@dkR3juUCThwxyJ
z0_GF&I{KHjWX<I1#vwqo&e(>*xjqTUhtJ^@hi@WB+Mv;$N@Tl99Zbtrl04#B)e@k-
z&ekB=FkcNREQvOU=-`Oyp1GtmdEU>+7Dt^)oEgeWd5*HRl9fY_){@-wf(nIY%)nLe
z7US}{0NGq2Ar2c{L1{5>>4(S;H95Pt@3TVlxyB(?i1k9enBcyzz0K?$^7^?#z|0+<
zbLI}SuiLBLPbXbW@4e~l#%$erL$b{ENc%|{pzG?YXBJ<iuNBn2hdjCN-b57Ux$_#c
ze2Aj<6HIqkaVjJCNujOVn4Xk<?TEPSA0Ksd=jOUV+^e;2aPTkV!4{3BSgJo%Zr`7V
zY}?JEO6_Q6myn-qX^OaA?eg$HKJLpq^C)O;8!snL78wk%xIpkIIHBj$gnj%%dmlgV
zFs`(tX|8G*{yz5T{K%W{|HzJ^=z-w(%7<+HJnm%Qgdn`7?t$6q4<*|#6xysjzswnl
z3&S%K|D3?j8Ho$s2}wxZ5B^jeNh}(Rkx&Jv6CsYGahk7_nbAIR@;pE?eZpE=GS)NV
z^QzdNi633|d9fm~Op^^n7b0n6s!(*?I>3%18~r)%G4*y1ij-TH&af}I2|G3D>`lc1
z!<;#>RW%6bMlr7}e*Hwmz5u=K4BhU~?Zs8(7)a^oHDbd9Ie+}$9uF0Opaq#@KcKg7
zh>Q~l{&9&D2aq<0#DRM(4_g<BnMr;$rs!*>ALaYzhor@F;k)5i#W{~YVqRA(g!6h=
z@C%NobH|&T)I))pNnLza=UeZ>9TJ!&Z~Xp7CUqEYe$!yx)MWFeHmT2KQt$sqW1Bs*
zyk);4QLrEXh}ru;R`OcBz@%lZ2Y=28`;Jg)p7^tejS=315<OT)I-A1(ijOJ$B^)<}
zUq=>pI7PTIg?E|jjyl^E-ml!EDO}xm>=gdh2E$RxnEn5x$+BM`44U{>SNv~O{Kr*%
zRx`VZPh>7)w3%Jh3B8M2HM3t?eig#d#Al%bnE0f<sDaw>|H=86$^YBo85nzk(dYft
z!o%~gh)SemQV3mbl+S!Ojqp&Qt0#f+XFaM#iRwuGhRF8yX4`7(?%*B?a41_i8?CSZ
zDi?<K{E;$ev{mJ1-bi}|^{~Moi`AX|rZb+ZHJTehgf?lg5KR?U1pkE~#=5F7*gAjo
zguGOTG+C3KCJFb_f1~*()0{UwXw7>I%^CcR6wK1+2>yv+T~62*eBZz$!E$@8GRi4K
z^+Twj@Z5#WqVrlV^BeXiNWT0poQQsDEF}6PC7?vE?c`e4sXe@a;3~*Im6^B|N4amT
zgCI83US7omW>bG};>rzd%s0q=gZ>R@2<*MiSZ<6&&{E?wg4}}SwPtT_sAKBhs{N}<
zx?WTGxi^SXbHQ11wd%NUylBN;mZPq(RM&+3rW$5H?mua>Xevj&!&^n0Lw+>h|8gpS
z7fq_+dl`jm3a9C`w{_g~t=k?KmAL8q11fOv0Y7u$cy}Jn&EKKDqL!WyfbNjJ;_T~Q
zVFKSUG=a}-+~qYLz$|6RP~=yrm~9f5l@I30bdj4(&tPsmF`URx$T6Jhx?FJb^@D@T
z<2U5trw4MKE_%MshUGtR7CzqvMLf@8M&jL$wYw~Q&?4z8piidKPjSF%tosV6-9M}E
z>6x(LzNd#WiCgQAjP-82nw&vl#Cv=dveC5Y6`N$OS<xOJ@&n=L7ToYXDuyh#f!tg7
zhukTKeNQUgt?flM5Fbu~KJ_W??S5Ns48~9Ct@ho_mnU!(Y=_;I8h%7)NaEM`%i|OH
zczT?Vr_b}QWc%^TPK+7t3o!-uv6$4O342YijChT&AoYmeq#iA^_m!jG<Kw;Fq#mud
z_cf#5<KulbW<T2E<jfLsgA7r<MwtxxZG0Gf=)BSd&LQz(*Z9Z`-iEJW!rMrn;F_qq
zovG4pgfg$`T3g-Zg7X%*;qXCRyWl?5*<=v*jW*^u+Bj!iNE>I<)F;r4qvWi?ZNqZb
zZ=j3d`>ddlkH+SXVp4;EmapxhGNTlTV}sZ5X}SeUdD_UP0#6BF4>^$JX4S)w%+7lb
zts~#Mfmp;SDe>b!wG!gz%_jFqADPdGFNg5}9+uDE=E`>;`r`Af+p&cii``F(nYqln
z?OL!>t`~;69>yJJK7T|0zpt_S<sTK{4PdkKI(RH|@hXlQ#SYLJuM_dwD58<_z_izm
zs6~_(gGZt9c4!=u_%tMu`wlEv6Be>E7b!o_$L24D!Z(H=n=elvD7Q<p`zI_M&*7z9
z+H@w*zM_LnOvh8Ncz|pXO~&m7u=8&^MD*c#JX0SQ+T3k+IsZjm$#!}4za2xYqL(r5
zeOUmPAI{~3ECUQrNATSnXj8Igs7&RkS`@s<1<Mq;sNKc<2L+*kAtIYOQRrtabV@HM
z6x5y*pZ6)~_#EL=@!^C+tC-0DW3*nm{x#8h)&-A5Ylni4Lu;LdMraiat<*V&P{oJK
z9A1_G8@#T0ZM<e5kJm?E6R*NicvU*Q455k-S3A6F{x^8t|CQHlum5tvBirj=6?9yC
zEw#|7y=oj@)yana^;`oD@h*SW*~>NlnUS76yxXX8+m?_Tue@J*${tcx_`jmZv9Dc`
zy$h^<BP+5~LB~~OgM~&F3F$G}uqnY!SKP<R7)+I-EY=7#Cew5lYk}G6x>&M(7PGt-
zh1aquoMnr`Wcmsgh1aquoaGjU>0Iz@Zq;?NvLw-HNocb2Y61Y9$OGV{Af#uBMWf}z
z%(xwFznU@1uLPXP`mq~eXxVtjYwnjy(9~M7-ds)&NhFd~;(sh$KsKF;*=&+ykZi|Q
z^FsP0$<xwX%(y6X7TTzQD;L_`Hz-l8?#FkS(%-}9+r0W0FngEu&?EDAt?9+ADs?^y
zNjCpginW{)ZgDc{KO}O77!ao{)m$l4WL>UO>@B+({%zWLU2-l#KQ2Tv7xoI7`F>lF
zFPqJDf3l&wH@zOILkqZ~^visOJeB!vH<4#&{#A}-0;Q?O#5kGWl6x;`xc1TgWyrjV
zWZu2WyiLix4avOqvvn)uHZfX`#Cr{xinDp2dk+EgT6FH+>U$~*kF$0vZE-3yp*LBI
zZ9MS8cM*Kh&zy>d(&GyYWLxQ~mwO6U%>IEC*u@LI5ij@-1eKdxSUnszJV{}0+1RHK
zJp(N5D|W#0zA^`_=&N+V%D!p`tm><Az?!~#2dwLBF`!`Si!OOV<BKlCf)y{i<OR!L
zbQu<`deJ2>SoxyMuwdPbVe&OEh8Z^YwYr>__N{in^1iGCR`hi`U}ayo16K9*I$%v-
zjIl#4*7d~=03Vked|Za$<C24q%Mg5Aa`15(f=`%y&5L1%w2#Y~_Hh91;{e*n0kn?;
zXdegE_SJ*TqP`Z?M6`?rs%0EN%Q%3RaR4pj09wWYi(XXP+832iMPDn=RNrczRef2W
zbNf1ZUenjhb3tD>&)U8?&zXHBs(6v;1*sQ_UQqQS(F^9jNc4gQFA}lfniok_F!Mzo
zwJ#zMZYtv}J3O=(9xD1NLW}Y*8c1!=v7<J(uxOZES|}m^93H82;ykO)N${L|PBG7G
z&Z*?N;G8m^wdd6ETy#z~MdY8&BXu^>3#!g0dcoYYiC(baY$6t1b2f<z7M-n#wP)A(
z;^!#Aev~ZjM`P+W{)uDl!=>wdBzHs{eo&e|ofvzU4fltKW3(ij^Iu4EkXd&52yDH4
z%=kp_wwsiV@&D+3vbo}g@yT?>A--Ky@j_fsT+lQ@(*z|1B?J`-DiTyIs94YpK{HHg
zS9+NHA*N7*OvJ>}u%W&@JP0n<hxa0Rn?vgn?{3ZyrN%B`V6&i946c*yos~XR4qxGb
zZ?Yi=SNI|clbKIp1WrB2h(9&-pXfDQjsQJ!&vB%+yW}m-c5g|htG2Sl<1*d*>JGrc
zwBCjRZT0iE$oN_)*0tw;Pfp<-7RiR^ddt%fmNz^-HkoNMhL&kIr{O918axlamVy(f
zAIp6ZyhgUS_?yl{aY%>bJ9&Hjy!|P7y$bOfKSLQZ0*A-jB*4dNQ<=gKU4q1(%q(~{
zmg2;~X)v^-7iShd%k3<$!nlTF=dOcs@qOOng(sKn%RQ|Ob-xGn$p;D|{VMD$mw*Gn
zy>i9oF3**jyCPR<?#f)XxvO$D=B~-ro4YR8BDY{^VCf1P1IwpiMPTU)mIszk!K%R0
z6|4*_pMrIPOSdL)`83+4WGa8P<-6Q2G2=#zT&MY0=DN*YmFqQkO)kdxhEL_<aw*@^
zQGTo5Q@*96{H*y`1(uHTEg#Bv>DB};AL?)UQGau(zq!=kT<UKw^*6URSI^MQwOGv+
zqMDma&CR9e=2CNWskym}0ut2*B(KP|!UA%u;Q+ZT3?SFZ_|Nq+{Bzxme&{zoGY1<A
zS{eGmYDRvLW#9*$jQgOQVITCufCA+P|JextX@~z5Q{UP8F!`0xnBVp+D=qF$$v=~#
z&LqdGGZPebW-&#bSxHf6mQmE1HB{!zYS_$likcp$sObrcnqEv%)5|DodL;~IIx%Xe
z!(aBnU%1aDqR3?F{#5#~*LWVRPZRy&&SYL-Lylwch*}T%&1b#LeKP|bAWIJ3U)P&t
z6v_@;<m}xxa149W_zUAVFL-%8vdO@BhR4fsL2*IT1Yr}h;N^s%gdjwo1p`HbiUk!5
zLQTA20RF<!qSAi&%W~r{aDdweC|Saj4XDigewJn|aH}=RbAj{reSY3PU2GlhKj`~J
zBkpW%@tbE(PNuomlz}uE2DoS{2rilo16(u}1Q$()0WO*ff{P}@02fUK!9|l{fQzPr
z;G)Sez(rF*aM5HK;G(HB1eFOY6XXl>1yu^F6f{Q=D!)aOs|8gHnlEU+pc+9nf^HUc
zv){0%9;FL6(|;C{^PBTQwnE5O2-ylDTOnjCglvV7tq`&mLbgK4RtVV&AzLA2D}-!?
zkgWh&&Zjw9Y@?sI!T;D&$EBKd8DZ|1^mj?N<xTi0f+u*TQ29hGZHV7CS8V~ss;!_{
zwFMM0Fc$=hRa-%^Y6~b<Z3V@uEudJn6%?zsfMV5FP^{Vlid9?baLl2&r{b>HX4_uV
zTx*t4iCTu${P2-R_JjPi&#kv*wl)yghn{>&G+nPxflD3Lq<xBHPT=hfqL*o1p(O+I
zy4DNzcv%!2r!-zWgBQcR-{w>D8QYOer^a;%>J-!|s7p|npl(6kf&xK-pk6_}g8BvZ
z`^^*h>Q-u8%pZ%a><^|2XVm>XH$`>Lj7#c*&GZ=CTFQN%uwfLA)8BUqL)~YS1wd=7
zDOm$0<}G=YXG;68tSM+JsY^9UsUxL?mGMNSxvR|DS<2or`BC;>csYo$&2qk6RQ&a1
zrJwd>mzX<zxJ0d{Loco74j+2a+DjJMYjL%3;qw2!KafA4A4@)PbCRnqu1BT#rEz~`
zYZ48}xDDo-+5;WgoDNjUx>$>S!|5Gd$MR(wW;G3yV`qdV%eY3tR|@7P(-MDeKkdZt
z7TXXl-BfonDp59^H%bs;@pV!!tD8jKlyq;&j>89grjWyJ%HdSyzyaYO`G@%&mNu4A
zO&jW&=sXW{i;t(?>S1O<;r{c`uCrAu+yq?oJ*S~wM|5pzRHB|<Q`oA+G`wrPT<{#-
zss`RKH{xwlY^!K?qBrqnhDSv74Z3jdpi&b%OYG!z_hY>H5^{zv2kCRmzEfI7F$gAe
zrty&?&O5S>!mvSC_nm!(R)lOQ`e~8AD4%Yr9v(I6ISKsi&6Pzf>Rcna(?GXAF}(cM
zR#aAK+<LD_ZuHujMfm%|3Y~^qve{vE>zyhZTX1lp_%VL_1#!}{4RC>>f$Iit`l5tR
zuWbKgo^G4EpeUzHGiIlsO?h_*s&~g_nkV;pOHL--ju+QY5Q?mX=ewfbrlPbSP0<7>
z<#iLP%~;EBLy}Bp8oE_35pEW2DnGU~5%O|Vc`<T)%`Sc3Yn)9YeXR;vFWpOva!cLI
zIQsOR-*$wPzzBYAe<Y=d8Qn0ShMN7jPU3J9JasyPVP|+>QL>-!h4GTZzOVXcJnk*~
zDKx6?n1kGktp4g)@AeMRNv8KBlfI%sv0jD<6?`8DL9_#+MbXanuG#X2?h_&DQ@sXG
zRl~e;SC9<(P^a+!JVgAblV=tdvp(6jXKZqC{jBu%@`fk*4oCkq`<_`wvnIvYAeD9{
z5q3w4*2drT9f-zz6U@F2DjtH$hM>wJsCo#h8G`DEpq3%1bqHEL1Z9Vy&LOCK2<jbz
z{ttU^A0O9M-Fr)p9E%`wMu|cU7*H;zl@kMUF<{fA$S{U8$rw~BirtE$fXFpO#A-4E
zw+MS0je-YzT$Q?)v=~siZX2kd8@KgM9qP6t`AvS8-(ou^vGU4HWXFnS$8r+e&-b_X
zIir#6koNi9=RWt3M<31E>+HSHe&1`az1G@mmq4nxvP23{bpfg^Ky?MEz5q29pw<GE
zEI?fasJj5ID?r%-)L(!G3eZpiDq%GZ%2$A@3y^3$7g|?<>I+a)0ctHk$pX|>fVvCN
zx&o9fK>Y=1pa2aOpb~~$P`&~r`q%l@79i2U&QDaagG2{ANR+Xl=;owzbkdl&Zs)9X
zPS!d7&KYpdkess3?gjEUmyDSixj&-`=XGum=V~my%S(MnbBd*Uv7f0p8asEi3xtsB
zvEmA_(h9J`3b3*Yu%ZgEl2WvuB;Q7RltQ$p+n(#}nYCxXJqPSL#Ivl&K+2beHQCel
zfQ(6`-mUVK9*JejkO+#ON+kX2f_QyxFUDt|Wu1X(88037ryVEiF+Z(fB#V37&g6ea
zki)>#tEq^BK^3=vhU12Y7mlJ~;x5eXMMS|=U&<kCoJIuapyg{&y2&@ooK9xqZwa37
z;Ohj(fk)99_+gy9KgW7!^g=&?kp{uJpfs9vS_`299s)^-OZlL=YutHRk$?8iRu@-X
zxcT;uh*<AX30fHRC%E5%<Q15-8Nvf;uv1(yz<7^DttMOj>9aluQnH?0FI@|E%l#Zz
zdakQ_IT^nF<=)b^X{H0c7xFef`z7y(y}maOlA$pxdGEKLq*)GYY5yKbT&srrF5DmJ
z`x>NAWqa)N{P@q0^Z5pS{yTgwbr(AlEL%i~%c&@);v~nFByqk043f_Xn7zg8{Hy`k
zW`U#o>0UqWbb4ax6LD`IwkyQv?R9)!1o;oY;3eP32PrgW->VNHqFQMSx1vQK)QXl0
z&etr|x_%L9vKFHBNUrg73O$<pAD>a%JnJnRg3fWI!D*v-oE>bJ)cNl#@+NrA&#8H3
z_+m_VQ8Q^ZD>tK0#9+1efHH=7TdeALo2M2JExhv4nKxT@^|CB@K_kyN)Xh@qZFeT%
zs+D0EnX9j*2H=YV5KtHT1g9LZov09~Fb$#Ll*3`{6BW+=YTZ?SS-H$n?%xBryAQJH
z+Mw?16a=mM5Uy*3v;R%B49hS^4i-<gJ0V=aFDw9!JgyEkDpm9<e?$MSib1xt%SicX
zU0P5dC3D46glb3j{X;WX5sBfY(V{!a&mx6Gr{iXC*#n|fG83C2rt+7ltTrX8n#R_H
zmdZ7G&eV{aquezNl=|`y$7Vw?oK%!AZ}n#%@m4$qT$oUrK4T};kfIh(D2@?AXE-$v
z6&A6<XhoXYchLS#B=bf+VhPhDnw^^XYF0*}jKDm>EGhKN`+4DP<{E-SDTi1GHqohV
zxoSxpX%L7Qwqyw(v1FYR^WX=A1m@0QSMR!LHY7D2Cu_U^reX7QJ+QW%4a=fR58$Ui
zHRCd6paG}4@uzJF)5-k0arM2lupbKi*5UGR@wDqw7p>!6R%^0BoIYIlojd<}SIoO-
zv-U5q^0oFaSN(%Yza+LT>|ZcSN*vq2e2_8FEnR8ScHLg8tdLi67D{<s>B1;x<b1+n
z>2u*|mZ47-O{Pzq^cmfv;>X#cSW$5Ytmwf3hFtJc*sQ42jnaRVp~^`HJ}${^rOCA1
z{gKV#wSm@;+ZaaaDs`zYGX}H->9qm#N|_d|+vBJ>)XrL;2wi%on6u2~WIS%}<n63m
zn7`?{^fCV?Y{vxn)sfErO*sr>mG(=dDav|?+it<_XZ>l&h<t=VH~bb{sz+xLnoE$p
z&1N#B(782AnA{gDm+xCHM{|FQD>b|C=v^rL8r5(4T<~9AIfFHTm7RgkE|LY;n%}m%
zn!v3N+{xxz5;9@$QXt<}tEDzOb2<IgRBgF1*t3e0o)6M+$r8<d(=iJ^`2)PV?VeeB
z%-lL6jiDnkt@nQCF=~B86F8~F$z-8rvdqR0uNy3Fe{nRu5S+Xil)S=i<<owXU>hK5
z9~=;lXTM(XL+(3Yu*Mf`kvhTk4xS~r*1<4TT5I@4<mY<QFBh!d6fKi}u-j65aG5!a
ztJ%1MO1CAINxA&CCA&t!B%uBvXC^@YDWyJM^DjCY=0~Dojzan~TRwXPc(;S^65Qk9
zKNkEy9sEB8|J1?v3;v;l9~1m%4*r?oPT*WJ#hA%VJTH%g3zYrM{6YucDYy}Mlz_U7
z-!ejB!Mg4Q2LO{7AE)f@r}qlIb<;AUU)Z=Y=#F{Iq&(*CO?=Uo`F0tZbjNBOf}3w+
z*>r3<J_n+%(RV99so3=XgKBDK%3^DB+kyHwVKV&^o6vtHcTJVNKYJfj1=6Em%Ws`O
zEf@D<2y!o#&VjK4($s^S-}_^B`tj?Q_6ld$3mV+}OkQj0%7?Uq(kV8aN0}<=!+z%X
zn7D2{Bes!{#P@I7c6+Akf)xS0q!AFye4c0yN1-e>62A#<vQDIR^bQGV3M4RES7-T|
zFEZZUGVS*aa(Zl2G<~V#m!!@KnD|Wuf~)-W5k=}&`8%G02s+~T3|uYDqH$#vhS#Bc
zTm5s7E022r92EF!hi2LA*2OdHdj=FYGX;idKvQMPw&2D0ZZYQ`7p=TD7;nQrq!*=p
z>3>oqm2Q!N>6rscLQ@a<Hf+`SY}Bt_6K2%6UaE`rUb;ly!-2_>tDqp5)vl|*z7jfw
zPARl;rV0Zz{^W1mh%eViO4>AIf1J#!^SVT-OANhrcuu<SbFx{NVAX~e?mUE7{$Get
z;3^^RntjMV^d5Mc(0D0jb`+&A(UU(r?{&zACaQY>3>pmV4f~m|GT$N7EN#;-oWN0l
zEV@#2S;r!Um;U0HBgHQR#V@(ym!aa9k>VG+(xf0GZ7bpale<((2Zfz8)3Kp<uE5V9
z|B-E~UdU$^713=IE&jAM12pFh^lMR#ili=T!zgIQ#o7Q`bx~VJ`K`XlhQgpV7h3~p
z?L}=L6`HtcNwi-~qOg_ahiG8|=9d<50m1^ra0{O0hir8Lx>=R+^AVn9=Xo@qN3E)|
z;XL@SviUr!OqDI?>v$&4qdrvGe!fYGM=y5EiC<hNr{SW^azz?1_RDF$I3TCx;*gw~
z7xiqH=9`s3B~b}flJTWTolnxO<g((tW(2^h^W6?ueO_Y+pEc*R4p@7>-vR5-4>(}s
z`5^!#FV*zP1h4bwa1LIELsgnsf?i}9dTNpQE0iPB@Jcn$##d^2HosEGv*nd~o{3kQ
zc(%XN`X4B(wUui&*H*6Gnysx`tlbi{m0GM-e8YuyH{E^;7>-vCjGsFcFTG&f^Qi>H
z)7wT7b{FHN>_9Mp%l7$~dbJ5hKlTEWBwhD2Hk`NUf$|`FbRQ7!s1n-vkM0BF9sStg
z9f<ep!aES}HHCK|-fIi*K)lZ?yd&=DMZR^|>*S=4iO;hX2`RVk#D%hd8M*IXfdvIx
zoBt2t#W;V4r=R(4PAYxgE*cRT&;_gf-er0i^q`<_Js#3yogN$X$m-FjN539N^cc`1
zr^k>UBRqb?$)mPRX!|>@I2uma=Ih1*V{wKqF_TC$D|sX;%k3Gl=VW`*9QfDRv(}!V
z7QTvQ+NWYL5ruzh@%OgUSD@X)3L<<ne@1vFemK8U><hKS{xR9Mr3e3Ih8vVzLJr-K
z7b8v>3)&>Wtk<VS5ccGLz&!W2v5aX<Z<V3Kwqruxi^W!IEn>e21#7JyD?>u|@2Bgl
z@hsxSz#AdCTpdYYeq0*LT~xtXmUw33d!7i?$zFl#H+8ZZ_!QL19u3Cx82(JhiCL1>
zKrF5ec1e!NL&YlA^XkW9H|-P?LFPtp>YH&46@)pq{<G3!hqezULw;WR#d*`7oFnN&
z$viLiJOjVfTb0#WFYhmUQhhK~+TJWGrQB+s#~hv4d5s~^a3h9sDL&T5EoZLV_sydZ
zS=k?l4{?}{AKECEHKfN2F)y~yFWt?AjCoPG-O+y8-Y?w}Pd`cQ(=jXs<MtwD2C+lZ
zj_}*+U*)V*z}ex>m-GBsn%BE+S1FeIq5XTg7}=kFN+xyZRQJqDp6AwX?XTy}enOI!
z8gJ!0d5c32NQ%8BP$<E8=A`@HhJLQLpz0V&w>vjd-Tc!$V6$Vs^^@?(v$38WJaXvl
zte1L`py;yr)L);Me&Y5_%ae<BLk%QFJTv9@V`*_BWwO^b(dg_n{S3OSefKhd`K@Tn
zweRi%m@nYL+n3)uIbI6w(!RUf&k(wt^YQ8E?uP(=(=R<Pc%7dCUViIuD%J*ot6(+?
z&c=(r=)0X*w=bVbti<mAcxj)%d?tA$cEgvPc~vY(cYw686EO3G3d#9|ggFk8Jt2gW
z<%rADCB_utzGt%R=tTsrwQ^qR@z~h^dFjFEcWIfhd%|{|^K!N{@x|LS(I;ocmfr~B
z)z=rxRJ}X4eEPc~LHqgvz9`^`zkK@d37~+}f8WoTEWQC5H(%uZ<yEdN8v2Hc;1Pq<
zRekeHhv~n>Er^1K=as%lLp)jB{B_vly_Pv0D}7Q7*BECZ&~G?3H?`gC`~iI*+g7F}
zg6#$;WoC_;qBCGT2Ll8pZ|dUeKbT>M@XP(eB~Ai6xW&dr9A-)_!?-W^wLejy%u6nL
zPc%J%Tn5q&9SJapwbOkc5-kaCs?Gl+SsN6aeb$DoMezRgfc(?v<LT#ftCMUxKBv{-
z(NKyLyJ=TEQ^v4+)z5q$9@;O3sWg)P<(v_^SH1sc+<x2zxEcf~byctzyPYPk2*$v@
z8Ujr93gX_*2$+sPVd>i$02Np`o_>p;zL|cn^GlDi_>Z*xwUrbV6dgvYxjj?yOB(%|
z_*H9M-aP}1w5q9ox-wS!3&Yq*n%YiKGqgs$m1(F39TxbeZUt%tp~O*K>v#rz`a7;H
z$g(pd7mSP>7;#doo>)(w8QEhq@+BcCX52beJhNhGfcH2Ov5tX@GxoE{ABHSBjQY!p
zeYIrmRi^e;%Z)@2qcsyhj5^GUef2>|T@Zo-P5dzGF)Q}91|dyBNRvWZ719-iBrPO$
zI7$8xl83bxDtuiK-W`N@2jMEG!ux~pY!IFe!ux~pp&)!92p<T-hl213V?nhkksC=J
zE}`DaKO&d(HA$_uL3ni#t_{1w>w|FCD*L`J2(J&qTZ8bXAiOCE*NzX0c3&52O#70`
zIT?g>1tIGc(yfs0AY@$-(yx%LLb5?fe-JXHkO74Z1R+B~NCd)ppN&6Lf4Q0F7@RO;
z{~Q^7P5pw#814MjkZYvTKJO1&p}*Q{QPk)CLH+w({iBEL!ana0YToZ^9zEO?^m)9$
zi&FL{t<FJjxZaNUuTx03)j0@p{T%P_S4h_C9E7+Yj`t5KWWeeigt)$q_eZF6KXtCw
z0VKl(ESib;j@Gnd$S-9reI?d2FfsSd+e%9;lA#W}rsh48C1KQf8d8p`<Hg^_&|RH}
zy#M2TG>U5Sr^m~KiVh2Ul4p&fKmO!|XiAe0B>`gTL&)d6WwYoqE_sZmH*ucd7=~^$
z+r3nna7zmMs&%sW7wpM<aupvD`30|;?+I*SXD%+Y-X!DCVBSx1HaG$%Rv@g7)S2P#
zV6mSGlq;~lLJ$kqxWoO5?3k7_w(D?zMZ$3|f2c&*$JW7%M6E|>3FeFS*ZyJ315Yi5
zD@jQzZ6%S01uVC%Mi_N&lOL~K$Cl6io_!IV=la(rR>k=8Cf$p8T+((kV3d3OH$}`9
zPOit&OYP*kipQz-YTj$|ALNbIWZI#C0IUOuVajHMV&#9Gr%L$3hg=Dt#tm8CpH#N6
z1!Cz*oPUoHou$m3k=OZneIHAIrdlof_kTr;B4GVgP2Mdlud+)0LKXQlB`gNiQKBO)
za-BB#ZPiGk4U7=jVSt4M!)|x-O>FecD}9b5%etS>zv*#EIlH$2LT`sZ`*C<oJ0<FH
zf$`F(37@lp1WF)6dx-!Mx+ylTZ^^Y72lxI0GBBU}=|{;?>dAYx#*nkCRk^;3rN3Bc
zw^(`+?e;}G@vj=HadyuPN$w;*S5nfVfy+7Uod55_r<ch)`z)Q^^JKB{u%F&3Eht|N
zj~GJ(PehG|w=|P$^^Vw}n{V_dc%1_&OHXbFWx=9CZW^w}%~H-GvxweZ9#7vF@zeLg
z_x)CNJpJVw&C7Zvmi|PoJ}9?J<X4L4mbqaE=N}%eA>G`%KcZnoeLk%aPXEGydPo_=
zyi%J=meD0QFw8}gz@LP>3gAG;v6fd|wfUh87lsjS^oz@b<I8L3-TizsQp1Rs%xeqK
zv3)Y-yP5LkZU~FkuK;KN47PUp743pyvA?5d^1QEZtz+B8A^Z)rsIvlAZ3kr6k%lC=
zEAOFb((!hrI{0ch*7Hmm+9?yf@3azTim0E4Cg3(rI32=I8&sV*rp5#PPJj759P*3E
zy!i6FC$sH<S5Q_1{SeXkdMaLeI$nxK&eQE%^RvnJGX0i#=@4f*+eh;6Ahd8b1WeVZ
zC<paN<ntgolHhBxv<Q+3Awja2#taA$AEsfJSTpuxCH!pwqcmx}^rd*|<JwkbBw&#O
z9@Lv6bBCMl7d|;zhw(}#TT_<ya}dB@0&fNnme&GrUIOn~#ooLG-hQz+FM;>`VsBmo
z?}f$Qyae8hi@kaI<YXNhOO-66&_NK)s7mJ%Gxd!gb_=rq8%v+pf}Qw-LSmDP6XPYw
zqo&xKm%w{gu{SR+>u8BzlrL{KE`GuU*UVD)*nmg^P{6U{;n3LekZ<9u;z2Ol|8%Fy
z-WIpi&ybcz-IyF0f8*2t730mAt0a-v^>5*LEeuF6^HaRZOSSYO&Q2bEUv7<rhc%Hr
zta<RT#*v3>7<{Z*s9?0*_fkKh{l}CxN&0V8-aO}(C0seX50rUCGc7whc6FX2Y#-T^
zssDg-U^tqr>V9I2F(LG67kY)L29?`6+Fu(I72Xr?K*)6P&3APf0(i(i#Vp`&R(pSX
zj2BePwUlboEpJx~B+9F$hB=L$z=-E%Y{kN0(6XQF%V`(AmFrb*;Sg>`XF(saBWIdV
z7;sPTaO)9gF?Pscw~rxjS*s~Kyd)`=J@)wFdiH{!iC<G1=#R`EA=F#B(NgTma~l)<
zG`_1qwPZW@v@ZFHBF6WQAj946efKHiNt)|@=P*zn)3@-O{mfS{LCux1X^((CS|TV9
zZ7wtudl`g6Va2ZXR{lC4v34|jnOoJ2#j6b>A)9d^{iGVx=pzUyM0*@@Q%`H5($d{z
z$Nz*^v|7ljDU$sb9bx8_wrDOkjpFDNwMJ2^**{I~CaOt`ap$FTIgF6dozA}?!9Su%
zDsG<|b=oUf1z++7E*cDbbcu~ge>urM9ke+#z~ZlQA<?(LLQ|Bwrf5-9B*rxb*WK>1
zBO%2D4za}fFmZ3?E}P;ExbYdhi443I+L|$T7l4Te;V~S|Jv`m5$bT2OKgK1(kv&>}
z0+Wn^p+K4O2*gr_kV@!@K59IMFQA6rfM-0-JS@mcS>sXp;@$Ou&svX#%k)ATqLqd|
znwv`2d5<l8kbnML^z;VWSWEtYr9gw<A!Lb4mY$UVs>Y73Z@>F_^`mear1s+ZYKv~Y
z%<fy#5Qk5f<mHYGH#IPV_q}4gKduR~fl}J9;aRci^e5O3bBFW$vw!?So;}}uAE$78
z<XeH#8Y)pA8oYzN@9z)D(7yx-xf@^n1ASguu~Xx|%#v~+{V}U7NnJ-$eWMwC>+OS6
z6`z=zC&gJ;SbUi{=ozdiPV<iCRhCB?yN9Yxu}uA?d70makU~>}vP~WQLOi`+*5@<d
zr>e2#GY`a;-%8Y({BAvfsZ^$Ke0KKR<*Y&PFWdA@mhT;228E!K{8n0#H$U^)OG|!k
zZE=7!xBu}V&%SRyH~3TcX*)`|JHGwq!Rj!snQyHa8<(>;D#f?4CUjW|2LD-gaK)<H
zbVvGDH0n!dWNEv*FcebY)n92pH5vRhFTKYE#L%Azh}W84T|tefl_6qkJWU}`4<N*l
z#MPgLiVQTMgai$Ugz>9Gpf>ohuK2@j_~8(~qGIo!rZGDA68vK>Li0Xz#EGV(LbQP_
z)NK@cu?3+$h_(=^Pbh7IMGuQcDzW#Fjg0jpOC8l4g)?kTR}30k8e*cgrK2H!TT(8U
zD5!E;VxFj>G@vbC5^>dBrj_QKyVmp<q^GmcBD?FlL?bV@cjks%T1t(>P#_^17kExE
z@0=kit?K_yOjL%9{<<2H8xHhNKlA%s`L-qI`O?d~fU5P!#J5Ifz!}O3Z5*X|1x`e#
zDkKi_!vIqH*&?N%t20jN=UQ_wJG8J(1F-FCZ@H%u!cXjHwbjkF%zZ!D$Kgisp#sj{
znQ!~Knu$ACDCx>i)u?&)uwI}l@#l(%ZNZO(moJtibsZLG9N6=u{=yO-iN6Wa0B0QD
z5l!bf<Zy2A?d1GEavuD|j4X<fZ66wBDaq-m11)vPpK)Q1_aNNw{oY?}idJv(9vMVw
zfPG+H>SWxTdn#7S=Ko~kHWDc(5#c`T+;pj1>|uR^J~h6_qR-64zXU(C#Tz^f*HugN
z3m=2y_}h%0UFs;eG-Xf(<x^Xtd|niymx7wooPSf78MDDzTb)OKFbUN6xf`3^rvJBj
z(T>gr4x_78%?$>1sf7X!M=~n)#Sk62ul*V$B@{K##=oa`MqNZ7Gnyvj>Q4!k%a37S
z9fZ~3mRndj1wu`kV0GsEx-brKmbh+Yrc6CRHIN??lo{KDL21(iKerzE%YOqhCtetz
zXBnTe2*CJ!k@5Kw+@r$yJYSl)M^`roIK}JiRx+F+$ZCv!>AP->e$Cw1AEP>|5YK$*
z6D&>nKV<Jmhkrg;K!Xmq>6E{g)t7j$?q)E^7PCWSUg|b|V>|*9N+O2}$K_@Pa9h-y
z;3d3J-FBS~%1V-WkR<X~D|FmgJomWwy-hPtG8%nGAo>hm=W&|bd*pyW4R*K7G494x
zEErQ+a+PS$&;9jOb!@6|Q9`W-){ylegpgh=Zt*B98eh_&Q`f1BX5r@k@lH3$o}dJF
zkV%0gUmK*@Bs|w=xrf=H6=#ux!Bi+qaIYVO?c{qnvv0)moJUH9fEjq}_{LlIL$dL8
zbzX9{p!AEHJ@CmKGX4-&G(^fW7P~FWg_F(FBXTa2HaG>Z+>c1!HO4BSxhXV77Ll56
zE@u`n;ovu-n#>h$&gLw$$FUkDzQE*Zd}@=oatYiB$<IoUgq)u~fA$cZpFJ9E={?Bm
zUKW{+ToFu(T(LKWt}_wlvShzD&zt+aiaDfWUh0DxwHbsPT-zP%M_7nO>+Ph0^2{p^
zdW=J+6XtpGCv~WrPy5w~oUBHVs868mL(onS0dri|`Yh&s1_WOA1ewq-PE_e<K7pu#
z2GTbo_+OZS{C`gRh|bQ$(#K;Rzl7jF;%ryY97Id^>sSrnIKsD2OIy}H-{W^Yk3tb*
zrKkA_QMi0Fp8!)h)yH|6Kb&<(=2rB6`;ZKVN=(q+km4v$!~+&im->*!ZHLbJuC(QV
z#FAEO+T!8f7%kXVAz>6$d|rslG_~+ZBjv<zZt-ns>^C^p_AkULiFxtIIpF1IW|sP;
z!=iqoPn6>{M!A$Xy;byC*>;J4gHHSmTcZmT{LI%cq6TF<pfLUuB|+rO?WOd~&RC`~
z!YyLG{3zQ>B=Kqz6abv2Ry@7i2FmErvExjxR*4U&fY#d-aKCPXFYUF$9V!<Ao}}bf
z$SozZ#<Uyyf3*!2uQNSSE&OrTWaAF^)9vAWLMdF$Uy5W1oyaKIA-WSWXuOnN8rsx^
z>8@3d`fIo#HLmWvwRBl@>ZiO^MBl0DZAZzdAD{q}?z?`*cFlxz&g=XWJnW`dSf-qZ
z^_N$2Fm-DmvaSjN{kJc#x>LXrIPM-IG>>zeTL-{CRYa*$NDi@Q1p+wY$F~l}GSdkm
zlouq;71*4vnhxd5oXeJD&rMh2{S~(4q#04~3Xzy;Y*9Z`rs=W^o+;AjP~T<wG(TO#
zclulx2fSz>QuJi|)K77ptgZgc?i}@yMw(EeS_)Aa;6k1H9U;i;yjOY?KY{C|;he-v
zbQcuCleW#FopN0)tHKPLx+P1d3LF4p?hKFO2dS9Nzwgs9zD&B=d2CFmWQeWapIU1x
zd2kjv<Q~B^oqZUL9FPs4S>*i3M7d>r_N=e?%fCRW7SQ-prsLA~jbD)mZ9nA$h%7(g
zr@u<mPoF}BDfrX!Q)u7mME)ubKNJ7y_<xmlpFWc~GwtTbp1L?3?T+4&wja~(YH}4a
z@n7gr*WOn#&A9N(U;Z{f{dQ|=)>!mLXzD;YkzuFq!iclBpdGKmws38D@B0|02cU1>
zPFFJ;`@D|LG#M}Zgo3VG<@BZ5vOKUB{~6oUPfXj5H6S~94=Q?YQUcqb(xqHJ1?)40
z-46T;?0D$heIa}0ZO!N1f3<CqtNQc`BfW}VZs;p#81^y08v1k&yN``(Lm#pO9)e)1
z^AJ=cs78>^VfWPvnk8tKrEXH)**n{QoY#3cb-z&Ql=}=lkzM{0RWQ9EyoIvxW9!)m
zkEBoLPTW)P(Y|VV%I=AKnn$#+PQJ2$;-2Oa?Q4>6tNH3_9;v=0nIZV>6Hdgp0E8i_
zzV0B#x-dongdwTEY!IVAj1d4X1U_=AZ-5|x6@5bjETa%|fwW4sMvvDS(MY9Lv{;lR
z-Us^%6^m8&N(yD=JmTdMr4cx6+x0QC_P8dQRvdpeG6-gnm%0^A<NQg}V-of9%sUWJ
zr9YDY(HSz)d{$&WT1Q@JID}_+<yEerF(=={d(JOcT<YfY+;>RZ(&EG19|tcRc**}2
z*VsYu-L=_Bc6pWL->VX2CjKLS`Ipby24vaG=JiFqOqJ;y0EVY$aGNV9HL|wu>f=8=
zr>wx>>;UpdNc~suk2mm*$s4-?V(Eh>Z{%<jn`U=%efOi#FTo+6TCj-3Bs##9emAHk
z_oKIy8#2UJ;t-UWy4ZHkdgD*xvFXSt5jX}c6E>3%5};3f)zydC>o!-~*DO3Hc4WOS
z+KDK#9hfK<JO0Jz9dY<id@Na`Z$}`yloSSp7K<Y^L1lyFy9QS`o2FJ1>XiLs-jXfS
z0`@u|*B3;3?rrb|@7@FT_jWL}WRbzq?pwd4I%6)kNk_miwT(g07j&?D!MhdkZR+C8
zq{Y*1oF;T;(&Fi_RP&AoTWBWjX#U4gfAs3=?Lim*6F@9oW*1|X<lW(|qq#3o4wdI~
zxS=reQ3zpU1R%O3#Rx?%<QnGar{`6xx!;1|lIEUUlb@;lpof|;k4ee7@%W|OrOBku
zIgDj<6llaQHwhArgG!Fhou|BLebAlHL2mfH|3E**GJi5)4aK;E9?Gi-!TLCF_OO>?
z7Y@zyWQ@B+Fel_a%+N7KH9J8qZ8?r)_ZiiM7!yNcI#m*$*S5g^0)6TNxs>QIXF1(@
z?Rm=^&ay6E;;eRs3X^^<*759X;su0fwWCuZJdq(etEhWAp696c2uHQWQAX^O|J%{>
z+T38J2lqe4399Vy#~Ga2YA}m~q}bq~x&kDX2ItpQfTYYIzi2k;oG#~dJ7=A9Ecs}*
z-@OeuXGl(25EO}ZWI=B|pf_w$i^ZOJSFH5N9hoxh&pye0!o}xZk>cvSD<|YTwHYjZ
z#-H|hEK_zgo{og%$(j7uY=weLE&8Lr7<G*~?~0uc*rx@v1H_%5JN{N-8y$)2ro~_{
z2tABvFT8ylc<JN9o0*B97W`oc{};hG0~ceQ@%jzIs+dZ|V1}qhL2He=6|}~7VuIjb
z1)+}cmmfNyfnUn_3}7v1^~umzJz=1%x#`WQwN0&$!SV~Xt70|S?Bczi!Ok7OSw3+a
zBm)Mhqyhb^G+cuc64ReB&td@<D&%57cL=NlQ2Z>e{)#YRKG$)Bo5|n8)iKzVk60?)
zJMD%_FT<MNelR!OImu`8_wtCrANqNyhXyfThN7vTS;*)f<bVd-sZHr8ZFFCl5`K4|
zry5CIdun@LJ6kCYWEM?962J!HnGp_S+%?zg{0$V?7rl4+ABioYHaNu%*?{+a{aKdG
zL=F!aw@v6F+{v%?EDH9=&H$V|2ylXU-_9u>!kAO95)c0Qo%~u4E12IwB!8`^^bv6n
zzDWGG6N*1Xe9Wsh5ppNL){BaQLF0oLczH2M```=2|C}`M6aB=8HQYpn?&Q~cT2amr
z|12;2^|3O;r13ppKXavWUkKAjalZACK34q46o2GO@y{yC8KRt4{4wdjcBS~gB7KoB
zm$ze4`UkPNGA56MYznt9{Wo#N=DnJ?u%6Oe5}(!9yytl?CD(n_uHO!PQwDC;UpL?7
zC1|g)y%px798Rhh>IH^bFJkqrueHJKKZq%p);<iI9PHEjhBir0;<_%buSZ^2hkE$q
z@bYu*r!;TYX()dA7g8r--@lBZy`)9+Xfgi2t&;aw0Wor=53CE<O(%LNUOFOUyj9!W
zVp8(^TH-d_foClxS)>NWxacfpoT8gQNn25nc*Df6e1lOr$Gd(~{s)SL(#G#xClSqm
ze6D7KW4#&nHgndM&LfH1Ul(x@91&Z-#`aYbwUu>d+wK%QYebWlEjp&=+E9^w$rU^H
zd)h2P<8XmFOW}NW9hz3`-o5+(Qc&j8m*%8ja)v$5LxQ>q>~;KvK$aA?W`|J35c*x5
z@1vnmiDp;*V&aGK*)W`5^g8$9IryhEMdV(X#AWB$lX@LDs?m8dBjh?LDXLCJ$i;O`
zpN0`K$xBV6#E5t%eyDBKo^-`FHRBGu<c=n+*d~E1H1D$@8BSf?{ucMi1U|9zMVG~=
zC*oKsFHS`7@{Nr1<+uU9lgIgT`~ctTalRZ!z_(_cFUJ$`tsUpfaRq#5jq{yltJ>_X
zUZ<{0$|tepDl%H=JoiC%1JY2s1s6Lqe~wuewX;cO3HKg&M4kI57n3+%$0OFB`J2m#
zg-IFArz*1dz;-<RG`9ji_ZkMt{cSzuQ?!6_{xI1N(E*Pw*$(#zmRoKAoMln3RQ?n2
z&*G1}s<*ax&awt?g8Z)v{BcA7tK?r6_~VBDWqv057N%$qzmZFAf0qA&3e<6NsrQ{S
z_%YBf{}V^c0<0#i!h*_c9_LX7j`xRr?g;2PStdJd=n1A)(guF|1?kOWRt$T)6R*9G
zJ}(lG)iHP?ReQBj+8g27xVM^T``%if&3o&3Cid3zY}wnyGrG5xIoY^Vj{|zVs>d-s
zhV^(xk4<_E@@VMSqfd_`dI;M#=uB0E@Opz5kA^IdN@a~hF?cF(92Epdx#OrPI4=K6
zSD3YX)svKJ&0e<>M^^0Zc0L>TY9%IU)!u#wr1oYVuzK&11J>^y5D=8r72PUkb+SrW
z6|G{bqAR+U+G=7I>sNp(;i@<UR2dWuC@2|FP&A;RY(PPofP!KI6-rg<3L5Nn(e7zr
zHOPy_3`+QjZayl7%QYi5doRZ^vRn}Wqh1G@zK02pKJWXO=9rT@;&t8*2CClYb$(oE
z%0^6bj`v8#R91~1M+LA#W$*Hq^t`+ek|F9XdduC28ApeRtja8^s?@WwG9stBvRY0{
zWv!e<Wu2V%%6d7`%BJDD-lB%eRvIFqUT68XHIT=EAT=E%z}z4p>phaEEGY3bajx%9
zeqZ9(!tWFo(N7)a_Y}W8znA!Nu<uk4DV*BD<JeI>WV`)XUXQ2rIIqV`dTiIDhf*!A
zx>3)@8&&+J%{Qv}OIvPK@s}oURPmR#->Bj*jo#QKCw^nA9lJsX-g!ZbFbF601PBA^
zBxl`xE)6yGdsU)Z_~6#5Oyf<sLI*vyN9Qc7m=s{#r&%Pp2jZ`Ve*%5EAU?-uEPik8
zjycP!CXM>GvlI#&;N36@tYWDXyS>z574rwHjQTsQV*VJG%9c^ww!h53PY7=NjFZHi
zZfNFXA(E;30I5wE4$CGBo(&%m2Al3MSdp*r*K~)!ihRju3pV?6ZGSEPK&pq)1^Q$;
z`~Y(*Ub^j$OvR*mrJEU1YM(00>ed@6z^2+w%cfK!1&Gbw*Y<3F86Vz}nI6RTq+d81
z-wWb5ax}$#@Jyave(?nO?b0`z0N<fNngC02FPi{RypT>k%!oHJ2pMn*4LS(U<2QWe
z9%cbV=V`CCcK|vmOSYeBf`!QbYfiTwvRyPMtA{KX%^A=`c8f{{u>Hy>A-0ljdu{am
z*s>oxdc4#-@g42S*;e13bCyjBhtD$_KKI^rdnP`MlS-%kX}yK<mYtJy@i#47a3M&e
z*ze*M{4QPY_cpiE^uDI^g~3AlLb+Q=pT-qQa^IUEVJ_uwHcquII&P`i>-U^uso7>r
z&7eXbWwPkc^KUvIvn>buWN?w)QE-}8#4Ffl(L3Mzb()u5h$i}W0s9>`0y-(kwkR(P
zdUg~S+4!KNzQU2;#M699$20-o$*=W<2wwIp_q0K~C!pO=4G{|A>oyq*WtXB&#ywwW
zYt#CyXkxBOGs+o{V4oB94ze1yUI^m1zN~0$R}PZuJzrt3bCAnvF3bFkqAM2Xr0@AQ
zl(r+eDNcf2$0<GwwnrrY7@_Ba_=Qp&d`{(h4Fr5}^JQi7TAx#V3VZN*qP0=6)=MfE
z6&hDlzE|$g2k{HVICx40)BgJ46PFc=ZJOd!T0SO*HdfCE&Es}(g&^4W+Me`sEcre<
zyYA&}sAPy|2C}`U*{h(k^s(AUo0L-&c}xc_n)uGUHLs54!hEb72K1&)CrOmM&oBGb
zeWH!%=qJB$pOpQ|PqYmk{baK~naUQ~`mnfs2h|4sDhZTx>p|5YOu)-r_Dy>1(L)Q*
zLD2~Zcj=);hV7Hr+M|6`kM>d9kj*CGhrZ-b>tX+soW)k^_$6Bqwzs|H-@O-};;OfY
zEYy?pv?W3U%L(Fzlf9Mizkzwgej+`DSw!gdtM0o6hAziDvu&06N(D-Bzt2le1ZF;e
zq$I%>B7xlQQqLgx;`!H12U?7-Kz~6yzS)l@{*}l#mL$qyhgf9r`i-R}i3;pUuor18
z_)Tzr6N>$=DoJc09yrLw8~mAOCK?lT@f*xu8zG%f!Uf9z69Z<(_PUq1Fl5BugFc%o
z%CpfZ&vv6cTcG~fFh-5&j6?PD5{K&KusalJNMahrX>^+COz6?M0oi##&`#}#-n&ra
z4600Yn}|)(pHb0jB1r-0G$T+eDFcL;3W1|i;HVflD!^Qko%MQC33#J8^f0?}uk-7t
z!IbPLHyCY@&#k-4SUa-nG+#W_rX}x=W#(N%|0n27?U}{fL4tD(&7Q$|dbGr0=5R6(
z(;felX3>K94g6tQb3C^z`LWVi#}h)`zh%J3led1;fMok^B?kDVhr!~nwH;!Rc&T@g
z0vuOF_rr!SlfP+^WDD<7U)xLb?tYH(caTj5oNKqe{n{Pc-Xd7DT1eKby)GryuAhO)
z9qYEim3ep@l3qsx%P`~YWwfUgw;)?PXRPqjCkNls6)!~>p+_e_6O|k}Nu2T}Fr9vw
zC4BIiLY^-}nA$0Zg_A#>DLbiZv&Uzx$X!=X2t2f;{P<&t(PyydBsn&qIs|F~6txal
zf6}>~B^GzBplWg7>LjcXP_SmEWF%!s5)fE3Td@1mC60Z)308pDc~&2=6e?cscqvOD
z+xCUILqOuqp^#z95?f$kvqep88Ifq=pDo^M#}5e)_FP9D;dV$wE@NhJ1=OJ4%8%KO
z4n2*||3+SntoMJ!`}3>wHRk>NYqW<AgU@6tzQH0oSq8LUil>j|uUBl^cay`xT%wg~
zXG-aUZjP1CWk*=1R%WEU%lWV!G#`e{ea?r%hH>6jl334IG`sl>+WMa)i~O&-^1+H=
zt8v^ewvto@zQnuYx(`jv^HEj=gY<F7ihzef`Ba~o+q^$7yE>qOUvylD7zBdd$5aSc
z%*3{G@6V%GmpV8ic!Gl`3%<(1)q=|$TqAg*gKGs}?ciC0Ve=!DpS=nC_o$6#DVMNZ
zhL@#XwnILw(NMHQUNrHwW)MeVhwSW^*v`~;$Y^!AovH1SdDG5xuCS9(gl&^~GmO>S
z2fMDfpU2%$m3Ol&etAD}w5arsDCmfSj`L%s{IXEmu_GWR`=WP1t#J3#1VY<<Sxe(F
zEs`$HoCW3yWI6ouNd<!6mQ-EKqw!i+!zInvmh)`6wt_G=!AbQD&<eLx23qa5%0Pwv
zDEsKF;!*y>el)Vy?UM0Z5p10Qw^ys-+KAPw%IY=Q>NRC_U);zP7`%Z)9t{{qM*`9Q
zk4PPUOt*$hjm~DN@)g4K^iV`jgW>tG6C1~LVzc4-pc83p*Xnn(N4=W`@5^GOESh)-
zF(3&YV;OP1X&nt0y11jm-lV7L&BqBG+toHgTvxNpE$(V>Nrif|!FsdNdb9c33K<>j
z?2vIR@X-9+95$x~^yoMq;K^>)R|vBUV2m^(6KLSW5p9Jd%2}b@yRLH_QN@+sqf!Yu
zqDT~+<wJ2q+o1Yhg-|$x(Z<C5!RabNE9wdoC|#5PCo$qIJeR2CULdq#5^th29$trV
z^9Dq9rHPwhW5!@*Pr-WxZzaID&gfqLUJ@WwD`!*TlH`3+EfO$XC2U1xvIN5<7bfUX
z#lm~zWH%vPweJ0*G4DG6jM4{t-Vb110=2oqrsjs%E?TQK<=zY=KH*ikY-_&tW-Iz=
zS~FZ)JwaOe?<*O}h9CGfMpf)jE+h%@Iyi1BZrpEa-(#U~D-OMgH=%v->f-5>a1$@O
zNDsgV#vTF}+o8*LJYX+EiaAzdvi(|Z%R%AoZx*nr`atcbIm;R=v^O`dBHYDmwMVxo
z0t!*kB#U|oczXE3IQ;wk-Ii5R-uGQOZ*A*01v#^WbUF7{_triR2SPb3!hbm5hx595
zh9-nzxtyk@5#9s8*?HuEu&A>gj{Y5)vgvRMKHz75w-kcsme}lF@UGx{J9gGV98Rxp
zm93H3>=Q7z;*5Q<wNeMXP9)_e(dr(rGYc@cdWYBf&j8Y;?ifgVk1%uYF|t%hH32!e
zWP8Znnwa?mJDt0zLOh{GjwiGzVLYKl?eK)ey)vHAq84~Uyul1oDnU{}qB|4f3MK}@
zl?joYiJgkse0@alGmfRjhA!Otd<h)h@+@fMxE`mTQO;AYcd2^Ef58+!DzEtUweq99
z$9LhkSAJZAAwM>H#}Sd8x+uTI_4V>&yLtQ!erM&^a($ir*ry$rknPk9@{C?@Mc;&{
zwmqlB3|rK2eUoA$Q9FK6@69*bJ3ApZfm(y`TsozSnj9OTR!f_!BJv)QcYBpebF5Er
zyh^1#mJ{4krQDAV2~Je0bjSJyH&iL@V_Ct`s!^uex4_xx%VWA%q=qro;-%b)CQkKL
zi5e)FNoW``=yYTm-q)>SKy9kt8P#l<0?#2UaRzI<m}~DO4f_J-n>*uN_~1aTxG7}}
z>Pcd%6@uHZ3TGGLjb)l$lf+#siV)5%iy$W0B(d3wBDf-14vpB6v3pwC5^xXprTll9
z{Tkq~jF6@MKN#R$;Q*JQ$p$!T9N_?mUB&)&Jpa~v<gIx2(%#0;?R|MaC&<Om{Q*KZ
zI$+V%Tl8$aB_bzrOSPPqTWaMr+)^*6{gyf#|F^X2EqaR$i~*%+oZKNn;(t3PZokV)
zoLuh_*0y^XxZ|0)$6??eXK6bQv2q*+?r|ug<L5OK1oU{15PqfajOi=pFW(sRSBzgi
z8PEJRmaj2?jpb{+VB-LfGJmH|r!q?$r>oja6Vp}gr7hD{?WOJ0>*O>{SM8TZr#H!o
zPiLH-L~R~b)GgsSm6-mrar)~*jc+_mG3KISiae8HDqLC^M8U8wjM6cImygkL!Q)5h
zxZu|rpMRwB`GQ;9_A^m#bTzp3t(swT*h|0_u09g2&CXd?R>iP~M_^TKf2&QIu`!}m
z@HS9Oo_(lo{T%l7g*{Gc899*p2TBINsN+6`mU_K>f`)vfT>iN@m&C7yYtyaor`Ww(
zVSVpYyUF*>y!TY^tL=do52+F`HY?ABG{B?)T9~^qe3A2?7eXdyJu}y>BPeofw=jmu
zc}l}zER(ZkDvA}a$K|Y@50NNN2_shv(&v8#mot<PFd_bb{^tKF{LR5>)_ny=*RSZn
zajejaH)e%i_q$?Iy*?|nf<4@2TuNhe8~5}|tk8=8JS&uPUY5!KGFB)GB#C!I&l@Xr
z!11H5#1GX4H;x(g#4O{7LhXwmy5zr%<)3*SJ3uTj$BZM&74E#bz0ij+<0LIzA)a`;
zwxFGxUR=1+0rj?OKfSo1;^q|F>Y3BJbE*j!3AxiH16X0|WdKg&48SR&0XV%h0H>-3
z@cJghyJE=V#jpJwi1!nc#U&tgS^|D;3^fDe*BWof+D95XF%DX#&<YmUuWBsMw(1Hz
z+o~(@Y^&het{=m*{o7)aJC?Z{(b<23<c4f^B=_e4j3S8xj3K$l2$a7eoYx1b@@5d4
zM7E+zj-zq3q$iLwU-dhVE-~qBNQpb?EVQ>4Xy|e+(9q>tpchFxYvg&WlKVeKxLhTw
zp|L6=r@6|}7gZ`gq_EK!?Nus11hAtois)bomyHfyaGpK=csdyRVjLZ;)%h|qIG``y
zN)q-#5rN_27wC&|RB%z~m1y8G!Ix9OV}eKNU!yO+i^gIx{d=X+cE$Z&cE-unCa?1$
zCb8}BBsPs|e<x0<+ejxFQ<E4|3@5Ft35ZC}xZeX;O~PA~Iz3L5t495fv)7SO_t*_x
zOm>LWF+yD!x-bAGv;9v=bz`&9;QWbn>)FkpI~*@P6>eXqI;vz-xOq`SaR93J2o%+H
zY38tX!CoKQ%lChY?-B04m$U9K_|sk}P)XYMj3LIB%|XUnL~M==MA-ZKfsBgR4TOA?
zq7mZXj{G5aq+klXv-}rdl<lS@jeeQFE?&^@OIMklbwM1^3;f(^ba+pkQ<0andc|1c
z8az8jAXJO}9Wz_DOC)uG^YgzUK989#n~&Kt#7sT8&%FRXhi~6RECO}E32&`>lUn;@
zpEk92)Ny?d_6g}L*jJij?kiXLA?n)KKSH3$#YYG}`|IuqfsRqdIpO1u5G0ipM+g>*
z&;RdvN`)ffHn}=N@K=N){R@nj8IPy)N%<iN<cwrK_d{Y97+NK+qH)9l2uv=(g)-x1
zBe{2;QY%fL8lw{Be2(gJ%T(-eMcv`&d9b{y<%KCO&OTTz(?|1j#Cyt2y!o$#69jVr
zLVLFPTNFr<rvyc=!VM1)ES@3$o2;3YS(XuHS;)JPEhpxtR;#s-Afz-DHnk?d&-m(`
z515xeg*{4Zjt#kscA{e0SpqrI<(Bg6l#5Pr=?1uy-pVf$NW0u~Nt*iG7&@)}QSsW^
zCcz|<9;LjtMX{KRO;`>^E-A%h(ObSn<{<n<r+J)05R#Z$KZZYV@}avp;oq7)OO#{1
z2x56#B>gO5h(+2J-UV)GeY~4DZ5hA8mbCR*#N3jpx9$fRMEJYt$-BI}*mSl&L%eV7
z0obl6@M2nzDGEWyq!M<V&91okg9oU{5`flTvt8FCGizRQ=_O__cGiyO<{;N1{z1h5
zl41StE2E0%$~pMB%7_%Sb<=1Wuhllx74b(Wf+AvZbV96SD*~{;XHjz7EKYrRsh=`5
zUbp;jc@o(nEk`40d%lR4DYW1sRFc1uC+CxD#S>vGK0(P)-%>|-cOl?tJ)zc?$lY2S
zE_<sK5C%!^*yE}Nf)fNI-W6-&C2zettjdAG%jQsf(Hx2)k7W*VdGqKTGA{b7&oMFV
zjDxU-NWpGgh&qSP033z`a7d415IJIuKL+v44?&#Y9&=lT<Xz+JIZDdMu%3p1F@QK;
zAV_yu3^8W`)=zaeB0d&eU@=)-WaP0!#510}#SiZBD%c+x(hTyZ1xIudi$<7l`jZUc
zh;RmfE`lbaVh)8siI9ap!7W5fh6(n&Vx^{5hd^xz)P+EO2sDL2YX~F(NV^M1f67N!
zEQWBl2;NWxuPcHdDuTO<;0KG~{-BD&x;H-Sc>Er&oi>(a!0f7YWxeEwr<Rl~OHF-P
ze(8-<`)CCpwe}&sBLk@pnmb}H7655(8MWwE^QYZuKZoIJ(V{cO@XQ@3fIl9}$9<0Q
zaw>ft>D~8R65o!91yMtCvTWA57ytZu%RHa^_A{=eZMevjKlFcns#FKDKLScz@E+wS
zJ@41znX-T71I$8HER{^yPw$JTQAQu({Q2^C`Irdi{+>AZ_h2b{`4movd?APJ>RR0p
zgqiaAO@rK5Prxq-7$V@4R|*)xp3aOb1)$K!S!t>LpkzRP>!)f!7F8+n0gm?`=G-l&
zuuuETKYGPP%#_O(vj|NwxJYw^a_JZtD&CQqo7M10{;6=VdK|o2tA{&FBl(Zc3A9a7
z<VjpD=Z@1)RUy>Od7ptGUA54|WY?(CZ}vJ~B^*Ka8IxVV$ctpxoBVSJ(2#J=iyHmA
z@OSz$%AssqA&bocvl-FCtLV-TXvP$z!<ygY<Lf;tWqDV|=w|8B<Ab7n=p!9^d{B}P
zeWX8+4+`?3k96npK{-D3k={H$D8`3A(wWC+)~L^@e*9O&=lcyC*`~5LDg&>Ft95|G
zL$J{|$=FP4wj^LC#J@P>WETvNNZBb6Z=~Ur<Qb9pscOA7pQ@G9a;jcV`>8rP@l)E1
zMH){vfzx!~U!xSL8WD;%4)9~)Ys~Qjed8w;niER0vP?3H%H|WbdWoM<`jsswlyGJH
z2_=lD60U4K(S#FOaVn|)0a|rRNLtY9Q{4fy=9Gqj{MMez2GGV+{emcd>eN60tvEF#
zNM#%?qKf8<Y2~(BknxHWNyP<Pbt1^Ia`lPsz;DfoAjitJC$fRx#uGt~m8la0@}p`i
zPAJDfWsap)CaPcFg!)sEWTc|d(3HM$TtnBXO6{~awTRNf))%e+vGP|~3#+-U6|3c_
zy>VJx3%AqW6fsH*TPH}V$?JTY^(&WuQdDj*593h9nlDVhI2@U_w`tIoO+|ZZVh>v7
zq<H<@lKefv{JZxZZmQ+H2g;buu?)s9Y!<#uLyRhojGkZlH8mCbT5Y6d{0}8xvDvh=
zNIJd+#Ws`n{L-nBOe7=4pZmJ;l|XzmyfZs`KngYi<C`O+J~jd4n<tO@*aVDkt{(NV
z2^im8GwNd#Fuu8V)W;@ZeDkbPpV0}}tO@w}(FrJavg{1><jU-h1JtJbkt_$-8U)8P
z7R{kWi_lBd#$cpk7tI`L*rnc#H11MwMw)l2HzO^(>g6PMxzVz#Rd3N<O*mG6({V!$
zE<C?5BuGLWG~swi9lw%`;^oA5m&j?}T`Q+?ceR|B-Su)3yX)k%?{1Y7-Q9!}N$pCi
z_kdRH3P<#+UEP7->RsysXw9x{0Il8CA3*DO4Fu4}T|*9X<;{|wD{sFbE9!tC71d6T
zRHk+(-4~TBb_e-ZuG-xl_^sX@<XgFBcQ)`_yF19Ya{casz;EMj<vXfK^5;hPa_vA$
zC(<?B%B<PruGt%0vp2eCZ+7inXnkvsc3PjI`LmCSCZ;xlx*;frUT`{y1Gu0Y>Y=&k
z8a(crzri(lqig<V*WiWbR|aZ+o29yJP1+g*MwzUidc-J`V8Z_3Jt9i#bz05#*qD^t
z!!0!sC_CqLJOiiZ6*kyVgbdC5ChHOnNAbuWj=RO`Ew&7=c7_f%&?@>{iiGo`e^XBw
zeJn-tdr%~OB27gz6%XJ#=!7=#$7va-)ZJsHr(?^jAZ&jJ0%a!VLoN@1KuK_l{t6*b
zC^OL|yzG<f<E2|TG6{i_eXZ?JM+p>H1uB-f4Xm^S0%fm%6@>SgGMakg3x(pn;EKl`
z9OtD=7Q3{w?TI+{S^71Tp*A+q)VaQ+AV^+Bkmvq|mxA@c{{S`41{TvQx?)Sada*6)
z$~6!H3QCFt>H_!}BHIHc#r2q|oTDq{0|K8e>{Be>bLLVUb5~6r!PTiv6-N7pa@;u`
zmo6qI&h5O!K}V`Hpd6uPBl?7-a=dFqY0n_8!ShFqEOlgV>?SP%bbXz(05FMabG1Zi
z+YBhyiSM}0R)P1=E%ClzlDBgJlu0GGCbEo5qZo)yPqxn@_rBwfj1KT~yryF(!RWo#
z#xW=QfUcPox$G{KI_oa1fH<3izjFEqPRw*M58fX=L|$bWnC|uIA5>g)nbaS$GAPz^
zVvP>L;`r{<a_{@mh$$RK4@Cq^Ig2DG3pRzr=%H%Crf?WNR3q3F4x@)^1)IWQ^w2E9
zC>-hnsmIzL%)g}8W^i^qjp*=e;QCwv*TX72t3J^(`V2%Cn4#t^2hbn_STymwV+u@R
zA`K@?<g}lR$Z0%TEhm1mR!;LtF_jlZPuA<L<)mSr#K~6Pde5s`z57bhB^#%ShWB<0
zJgPuA8O54t1ol|qQwjt{6>Fjq_>u+Y75E~*u}X(5kV_hCkUK4Du3^z#(o$2Qf~`84
zq}K$cP6jwSvif9q;J4yrfTJU8PG$qYjVA*f9a(#FAn;p%QaD<bas{-iflG^(?B1wq
zu2HONzx+>Ct)V7jbu-1o8i)nzhT@?e&xV-$9WdkIduE&zLD%^Kn5X%l^O&*SU0t&g
zc6|ObT=u{TEZt?3+8$IX>iRa}`Zn(Rw%PS<!^t|eSi9@nM*3EyAARf9yl>5Htt!1|
z>4n}c>Ix2a-<?Gtk>8>ZeG-%Y@JaCNeNHiUD+ZjaKx5H620N?wf<g`{WOQ_R#~x!4
zT8h0-sGsj<xe5k{cMOi$k}~yh+<LgddbqKsf)P&xunNf{$f`|<uwWObs3F&rsX2ZC
zfX#tYUB&*xO}Ym&dFw22$f>r0-137Cd1|-k@JoJ*@C&GIzIJ(K_m#^;$6BUycrrIr
z&z80yW115n2GczqPciLYnZkuNC-d`&=Ie&MV3F#5#lNW^%#Eh=RxC7fh2_@tY13OD
znS9^-`QTej)7&J=K+%PuR19zBeS8i|EAu*HXhf2IJrX%TgW5h0hbl0=p$i#mCSLeR
zF?O-@_m!z!t=K-C=*(4zKz#_*g+OZvG=)G{2qZ&bT?llCKz|5iLtrQb20|br8ym{K
zBm`<hpgIKVL!d4MT0@{I1iC^X83OA<pgRQmLm(RhLm@B_0uj2yl_vyhL!deY>O-I|
z1X@F&DFnJgAQ=MdLZCYY`a>Wa0z)A%5CR+s56Tk)wILuoFILq05RjD@^JxtM*?7Sx
zn(MM>(w^(=*=^5$domA+Gi1*Jdq#Mc!8kyhsAFpxr~GLkcHAL>5^Mt*R1bYfn;$oS
z^yQ#^i83Extv##lS#Qred$!uMiDzoi5UhYC^Uiz>y~?LM^fBZrpKR!3s8v1#_!wS8
zGL^K`d^gJ{Q-+O^rzUFDPqG3g@m2D5Djr{CtD&9;+D^te@buQu67JB)Zck%b`Vwc7
zhuY6*hGqFG*GWf_;)m^C_^zy|xYE;J@}DCmC8;dNd~rpW$N+;Z3~#_Cx(;LD?|5kr
zS!|pvWC7Du3#tyP=!D{RGqT)&9`ca#Z_=#IlntX!iuZk!q5iO<aO4}FPH;FrQ~o)l
zbc{EGK^1r+(10(%6X{Lw>%fdy$Awz(%ggz<DH<F&uCkr+viLo03x-*Yha)W0!_}<L
z!?j+ddAN?Xc(|U8*l-gFeAtYqor^%Tm!0F$c&?Ub!?|jnE$8ZaHlM5G*?vwD6X*CQ
zHJqfbY^8>~%w09yZSLyfb>^-a&YHV+xZm9M!vp4S93H|Yl@;goAwcR}(gCZ_=~H}G
zo$GeM+H?9CpEc*Q4%m1u_<a4j0en;rwXDh^+K`2OxZ2#+!?otF8Ll&T?Qp%h>xY}n
z-8kHeYvs^~Rt}{Eu=<=n1z2@XX#uP~r;h>FoKtE58_xxwuRo{sIPgV-IDi&$08Qcm
z+Qb2ChWj0`b~x*R^}|CB*f=~OfTU@U0HTQkh!zPT8YzI*p0hNpRF<q2D@<kmIVH?Q
zu9Jp_*LlV!GrRZ>nvHofDVAwC*W_nfhMTzdwy=lqq9)PXx_QQLTc=}ew`F-Y-qz2v
z`L-dREw>HuOxzaX$&oImR`jF9uZ(|`_>~PGC4Ob&M~PqA@=;<|Hh+|_DsTNLkHkkK
ze#a?v70<Ul7-AJRh~XIbQWBeAZZqU<_`>XT_y#MESc#n;+6ZAn$VSK)T$CuEFRvgR
z8zRf&n=osL9S}hgL2z5+n-C&m|3gr<plU&wHN?(`pjtt-f@TQ{*&}T~)xJfelM|Af
z!Q$6eoH28af2T5MDljYml=pz-t1{=h!#{`Z($mf|2U~)-7Rm=(dlz;Z`$9X7PzIF$
zQ?_nrXWiAsevWY6g}A(7mrg(GB1Bjzf{qAc>)OyaAgEeUPEd`YAwlfY8v59HHtd?^
zr>puvc$_cV)ArK<e_;OkDaie=7UPctw#yFi$5i2u%tIPQ1vD}`I-nn*_sY)jh@Syl
zL>kUi^K3i=x`;HNspHvl26PcgoN40Oex{Xqezul{jbVMZ+T7J=>&;ztw$9wOXIsr(
zbGFIcjc2>eU4J%-ODd@|Nm>tJ#hES#tUA-}fYoQzCj_lIlXbw_GyM)&e`de|8_x^@
zs2uAo-PGA`b6213H+R+9thsB?4w<{=?0~r&&uSg^D%YPa!L@Rf&?;7rhyzxgsdm8X
zGqny_bEeJ#YtPg>VEvgU2W&jk3SjMF>CztN(jMm09_G>>=F%SKwx8`{=A2E6(xxd$
zLlA8dKr}`G(Ha4?=1hM8tv!<sp!H{l0%+rz0YOaMv)w%7XKmu1%>p-`wTXLn0J!<A
zCT{y##Y~)K=7y-`yrB||`Y)zo{hntg`I&~HrdX!sj8Mt{4*c<IG5&a|fIsZ4kt{yd
zQ$O8Ru(!|A?pX?&>(xuhCVp?FmgRs=+&bAwYRaab)=kae(W{ka3900D4p1{b^tkt?
zQB*JxhX?(@D<sZ}{j%|m5usm^Z~RXD3;g1;&0#)e9F8`2F=UtUnC`YIo^FA>rYOGH
z#S+p41&wZX@|w-aYh2NzTa8^D-Hf~@^l|c<&B$v)A1ANbjJziFaq^nY$ZJ9$V;4s^
zBd-a4LVW0Tn!M(H#WabukH=W^`6UNyM$amS_*;YbS28LcpTO}e6L2Vr-wuD6G6dIy
zBVfP6W$JWVd{2bfm1TSMY}`{Tr(sXEoaQ}Bwz6eUot(rTC0p6PrwQkOEnE1FZd)hY
z)Se`RK?PW`M+i;Osy*ESw0e(Fn*7%6$p+BcJwj^oTfb)@fHv+CS{r2tuK{Vfv+65d
zj9=Z#b)+`RjBnDLv4h{>vUrN%nm1_cpm~GFZg$Pv;u<?)%}XmgCUB1JpICLba7sZk
z_4&*${pq@HQzEF=*q1gzZ3xtbK)nVU-!$RsPvY-oxWOGq9I(^z_4(n<lopjQc4eKr
zEP}OLgu1+8V7luZ%H;v99?+lQB(6#&-e#+d5NdScQy==wQYpspKUH+`KUF6vcy4FN
zRsBK0{|q)1(ajex!Ol!vBhIGR$>})brg0iv-@^5@G$^Unl?_9dOz4)@kxxsQFWxDq
z(Q_v^jc+{~OrKufa;lLM)8Ad*W89vu_OYD`4N<2T;Yqbhz~IGafXGe>jh9FeT<vod
zv$&4jr6D_IIU?u&K4A3HwM`Vx7`Jbtrd}tTFzczTiZ>5S6O_VT8^9hdEcK#|uD?xz
zrH+_t3l;k=?|OHgw={w-g1B>~syo@7?-d7|dJkAM)c1x2pryh#OX!->5y90?@HWJO
z#HDX3%;>!?dea>m`~L-iGk&8u*I%ytUARAEH>U16K)Q|lGq`a4VN@1!qxZX92<~@D
zEVa8`)Tq)DgP--Ki8ou8jT3*v+{^CG;CP$6+hvt1oB7c1FGUY$qF^en0W`>3;~-VK
zg37AWWpe2goAv2hF{q7?uFG})h_C!&$;X5kHkGbH1pt-5$x1qXB>tu$YEbQU-bv(W
zdWg$3hVx(0huo*ZX|4SIz~Oy{X0cNnOc+?bX$V9ZSQi`vOWKA|h9i!>^rQEVWiQnk
zmWmp)u+p&(aSRhjjdjSIVdAK<4tX<795uFZWr6hylvI;~Zc-uZ6{x%57vjTI$c`mc
zE#tlz01Bkon5F$1WR#R@Qt!c=UC+r9nG|JgJT+DPd9U*05gr^v?X5<|pzTH4&P!c)
zLPmwo`qQ=`6W#*Lj&J!J`j{V@s@>goCVzr6+%vL+zZIO_?(Bqtn!ol{eLAGONnW*k
za-JyMlhY$D;YYS}JsS68q_)Ssxx=wiluchwe3NdN@ogkw%<;EDkQR0I208{kFQ;WC
zeXk+LwoMl)VgWeaEY!TDtTkTEtG!)>&eemDqIuaRSaKi|<S6st-*B(T*vn8fFI0s8
z_V2C;BQ|)@;q)_Ht6_7?6ja@OGK30doTa8Q;hno`Cr)HdK%vBP+}z*2R#MW5v5%dc
zq?J*6O<YNOY_>9}m93a0hH#JsEwlHdxl8}1VmO_d+ys>=jd4$v9z9G2IJ6$zr9(nl
z!n{u1#3wuJN{%Vq>vg_4qKpwSS;D%nsW;^8cn`neCH=QMzD^ykG<4Tlz1O;Hs4}-6
zB3b-b*>x?Buk-BH&I(DNg`}J{65X>(HGJhl{>qKdHcsuv<qC<nX#BY=Ko%1eO&^h+
z6+WorEsp(^j;Ao|_{>FJwxTrVWzcyIK6p?~#^ocZNKp@K1Mgujr3Is_teH8moT)!w
zG0iz#y|~oM>7RRACg#IZYAktOsj2@XESTcMiQ-apDerN_o6;t&@*uqDu!iL>MbdR7
zwZ!F8k$LHzvNTCR7x7}5duU;l&MYCc#&Fphb7$cSv#n}JN`V(znylc8mnrKce=>HL
zsKhd#vSwSXI6)`+nJHTohcGt5oRcApi)xsX@ifLE!QFG=;%5~!Kr|8kldTA=P0@Ss
z!-ZNi-1><T(x2R}7%Lr#mA=X~*x-or_;0SUe)3Wul~Di`j8+~M!bt0O6^t=ES)+PW
zZH~z}yPASm^<asY`W^*S>kRM~jya4GtqpF3!Pt(X-TyaMxECn2#(x$+jei@y>@=)9
z=~ioE!dr<Q-rs=^2C~a}-lulO(|Z`W!!fQ|k?{hAhZB77lUzaZDb#m|*cdE85lols
z^%GQv?0S#+os(a*zdXj#N4r|~P}|QicM~>{%AuyqcdLlOtf8J;g9-)p{IcdVp(^qf
z>ZffIcRGqHP~~X5ed@{q!I_y`t=_?~rM{}C+ClXkU3$HDi+@9Xb&pKPc~$W0(F*pD
zui%S*rtF$g&!C2MYkc<Mwhh#9Uizv0rim)o0ou(;*TX{jGE3t+@hYzayoe+>pD0)4
z{X}**J#sbW41_MOzclSs`LMlY%t7_Iy!=8=uD|w@)T>w{a{UF3UG|tN#Hmj%FB%ET
zE}B}sr@+#^=4rQ&Mu*DTbF-8WxBV>aEbT(Otq{S8qYxV1D%=rs-GF<!7n{HXx^-#*
zUoUmFw(LRQGG9^k=Wdc<ym_BG6h~U6+j2Ax<D~;(KU*yd_1BP7DYxp-sxC729^T~i
zAsKgyl~Vyq2gUSCUkppC1{W?vNfev)L2--9mcQHf1+LhIc@qw_d6}6{diSs7dtqf|
zpo>FEC9Ax+IfZMc7;?foqw|FZharyj4>fgW2`jt7BffA|Pzwz#mI3ucII!r4=x|O|
zyxK24Wc_f8C!W?jcpf%S*Ez%LK4OlJLG|OXBqy=iFL|8_kO+$_V=!!qaNvyZtFY<v
zXBn&ckJ?0HcDlukI`(5AgZU_I8;)KuEVo!VtmtQ|`eWOA$&RHm9OU6ub{uHov(X`H
z_LECbe;ib?KR^q@6-VV0-Cyp!P$tRWo{7E{1$*7$gmsP`@if!fEhBkb9>`pcuDZEy
zA5pkE+Rw~<MHKi#gUh(ti*aRe^p%L8NxU-I&osOOQeS1a;&B_4z6|;kiwdt$*Pp7c
zi~%iYjNXq!*Jx<WQZnf$RLu<nOOIenw(h$dZ8@teA7Sq6m{#d(OIcSsf!nMcrBBv*
z-`gt5&1AR|b>)<B(Cd7c!Q7<elJ+`}()e}d7dMbAWW;XX)XEQ8>C28_z!vLZZe636
zdBK*tEZ7F?1kqJlXh<@|wBAc9#q3^{Gv%5JON69b-4kT8P?<Qh%_^<dM;lmoc~x0>
zY4IH>DMT7}AkL38?m)C1Y2Gml;+2C6RS4W}z=#3%l)Wmfa@@d~kOet@ef*G$FMyR-
z!)~LB6DBp7_o#ZxyY7H@-EPKdysw5Q*Y=2BZtg3W6XmiWaf|Ixg^tCk>MD*uJLRrK
z<NZP2z0UP?PyXF1IQJ#vJ3w;@%S&AKMo3;r-KLQyQ)O9J!|%mWk0|!AI<gbAICIa{
z{y0NJlutPL!nvd&@H%+UOTA6E=u#{rjEJb>F;?CHt0^ntbCy$v`<1a<y!3z)_SH$k
zXZauLL|rLn>VvVWA>yTEYiD%428i)j3|fuN(NW8}1$|FTx}p07!CN$Rm(b#j&JbUC
zod>8`T{-%2#K-!By<L!25^s<4R9W5Noet_{Dp*Tybn_^4D=DRd0lY;6*k$YP6O7G%
zS=rJF-uJWq?9(b@U23hxY5Rk^>LJUf?RS-g28p{dj;{3<V1Y_QxAK#<dQ|J74c66;
zft<a_&8Wdgqa$m*jjXmwd8wUf&2{L-W`dHmvEtBh_{#D)COA428TZb3-ij^Bp|%a|
z0S5nAqjQBbd6%imbI93r#MT?R8-5wxp-h+yg|wqPgy``pq+Yhec^Few6^`yuY@$^A
zndr5gPt+Mju%@lrHENuI#2OvFV^?bppE}}!>gIA^lFmnj3!r!ZA6Q2QD;`Fqz>e~P
zfD=x3dxud3-$h9xWfxVdJwd6sii|74`r4kewJ6vN1aw6H3EkDi^=y!qq?J|5-opsh
zwBqPKW%4m;w8Q@@<Y-===(W-Otj*MPmfw!H=kjSKz)~Po$C4iq49t;!jYdl>EUzP8
z-8P&A_O$(SOliZ}?$R2JLEBO+l3C@F8I@lK<xzU)n4>HsU=$PuemROl*vVSKIQ6nA
z?2`PmTs5Wr#9+7@<o7@`IgF-tC;#tyHEK``UVecns?5ZH{d$3W1Le#7%KbiKCJi)g
zA#<O$m*4~nWW3j@jey2OJ+58Y)Rlb*?`>~ZQ(bU-%Vi^qqhp9rH%^z+Tls+Y&K(!&
zm%rsx4G<-PnJ;C;79rwwEMc7trH>qK2^10&@31z}s9k^xFHU9IPV1wcZP>?^`0-}a
zzCt8<xxvkcZZz$*i4onQ*?^~<%IHox#MF$yQx2XsKcYLTou?c;<q)&RdCI|44xY8n
zQx2YT@SNp5<={C>OH^$39<OsAD^&hocGL(?U<EjWT|`9*t>UFFoT7U|-`V@ZfVSVw
zKckS25&I@$am|OgdPB>z?KkrK?CXF(N?b2B)56e_eEGY<Ni4d!+&;ez&O0OF0G5K)
z??X1WO7n9$lU&qGuMyYv()%Fu#`KbIZ<v&LyXq@MH?EU*w>_^;(y;6%Y484TvroM|
zZjq0=h_;dwCa4a=OHSxj+)F{W#eEcb757l!Rop*;*JZuasNOlqn7+Kh@^4}0&=Q{a
zozvr60#mHd2Q6V(cEMcLG#svbTuXTO%LblmGB%EUg3;<e3z{Gptf8N^K`>TBKeYwl
zs{TQiV@GNjuXvz_erk?zoQ8g*!&GK8@;k4wzNf~IL2+K8@9mI^wWUiTn4i1BT4TEY
zekGJ?*a^S>oIg!_Mc12r1sk((+<X7H_ao!p2gbeU#=Q@XdmkD5E{<s(=Wt=MN*yu(
z+?!-YtYY|%Ox5W-GA%ktbHLfd+hLbI3__d7W<!2{FN@Y`>ZsTGB&`vx?kx=M&N;?9
z4U`mq8-e0sKeXA-!LRK^2S>^}AvQUvdzn4s`y+a3*k3KDalg3Dk>>qkGe=tX*U3rj
zZ#C7_{Y`pnKZHnNfzD?{P^kv&5g0!Nsj@)!vLg+LAYB&d>~y5@P(7dwcSo8J)d4;y
zu;oxI;EMtihm_`mAs&^{11d@7t@^UE;eZNK89$(6RW=?_c`BO^s63S|2U_JM4p@0o
z`;$y;JR&RhcLmU@{oMhydjGlrTC+bJKx_AF1=APn_YVj{?>4nxm`8pa4<!W!RZJbS
zG=o~MIMnUJtfH$9t#fZy*VTuz?#-&Z=1{+Tvl_2GG~nK>((4Zm=`F1H#sf+BTA4b~
z6+kNvbO+F?1C|N(U40-M_^ml$iQ~8Sz(C-){(urSHhjkxL}YIVujUW4q-jvaGM~ln
zB7-VE8*>@o$H?OA4XXbR51v8gb^aBAOct2N%VmS<E8rd*&!SdGN0k(q86LD^149F+
zkzs+;%#groVL0F<7z#M;3<I1vLm=wp)BUu%wq67LhWLfsF?Vi>;lVMQG8-PvZg@1h
z;nCuTN5Tz{b~ilYZg{M4-AP8PTvr0EKGf}6EwaXSA%1IJ_W`YUT?e$$b(@BV>oVIF
z+73}gR8d_qgYsKBR}{K)RiQgq7rJvzp*z<Wx^sP@J2$%Sq?)O5-TBw*&X?H2gg7FG
zd~FPhh|hiklbK^tL?6V>7!<JwMfKP3)!P{^>eaan|8d>wkVS(-7L5*BG&^L`;*dqc
zA&Yj91xds~7XNSdYGKl^Dopy-g-O4rFzMG8CjI)tq~ADx(*J+gt3Oe%K1L^nlT^Jr
zW{zH|S6^j^ZgccB!0Y$sOh!x5n&Z0EuIE7d2<|A8Iek+<Hb)cHG?2~F4CPJp6{pEL
z>e*25*r2We`2?EB=C`ikmn}g31!$lE4FLsWM$pyWS_4zG1hn8Jgsk~f+R6n~TmHY=
zt-Owe%U=W6V8(t~U<<>&)HavwuU^v(>l8+-J8t1d>wj)dt7F~=Yg#>Eu%<Nu25Xwy
z$*pN_F$-3<U@;4pwnm4onjP+HF>J+})@t<mH`q_*_e1oKujpS=(E{Qn8_;KooY%p(
zR^^04yX_!aL(r|1w4a|iU9XSFDuQ)5qD~JGZGd)LT+tJ*=<O6;=Np5f-$`HPF(i4#
zqN}LClI|$<kxB{<AwC#IHEt79Lf2&hhg%k)DzJd#DhnRt$0?-+&-1%TWY_9$jfgvX
zq9G(9YE_W}2~n>%M?zGxVpyxMqv)8hJgKpj_~f{`qxjO*#|dYSr5GKoc0rb6bWk-Q
zVyknSNVqUeJLfQ{3#SK@%uiFUXbpj;5U3A<x)7)hfocFL-r(p@x+SuU;cuH$Ywz3U
z)C6vI;7$(QNZ^(`mmU&dVg1ykxm=rUwzhSL0Xi-7Wre9;Tn+6A<x{7ZKH}<SmR|fI
zq*gE8n!AKlOL1ly(Ui@VRe}64sBi*Om6Eu-Un#;|d5FXUO`xBV3@0IV!cn2x{9A0X
zK=c*<^UYoD+JQY}AMtY6p{J@1<q?v~P2^?tOyq31rXQA^{>2|lCVoEmyt(<@lja`H
zJ%$Tj&F#@U#nCdFZ`de=<WxxF=^Ck!{LOK?v%oyLg8i2qB{9QTR&@9!?64oSCS2j9
z_S8akRqe#6m9Ek<T-9o!y<@7--ZxI2Z|_dKM{RtBbo3`!_*H*}u_fecV@thffNjr-
zT8)SP^jV(+83jGL{XbHjpUUmPwR2ur-=47@Nm$!7O@y+&@I2x$<bT-90k1xcP-M}>
z$$sle@byO;!Pnn|c97wxRrXydFV^>HNk+={*ys82pC9M*$@=_v_*@)^R@GLgW{?B`
zVl<j#^SZP;HQ#`&m->tV>`8Zi)_@l<Kks{=Hc3gA3dPbV;@-Tc&3Bxh!u$9@tl;<R
zLnI=tw1wjtH9Rk$l!>KfiV_JdBK>?k{R+b{*Ye*LdNlWMKUCX1>n$5%FBQ9c7k8_i
zwq2F8obQr)lO?t3_RRF>)Vy|TrMSDOnX%T%Z5iPjU+QX89nd?9cZ-WBRI82;FO*N0
zMT_ccXTA+y+(eZreYUmjx(ub+&A~1bQ(tQi7G_*s=u?<_8b&ttaD&6gzl8swYXQnp
zI_dlug_B=aE*{G&RO7q*sQa}+-Pb7y0pmlst_>zqb*>IDdti$vKT6*Q0VkeS0Ma+E
zNTH(jg>yvzF5H3EVY{>rN9%$SG-bZQasatF3vU0=EE9T&1T<PGbZ#R2bnbMy*<1Di
z8xzLVW-f8fU!t-?0h%2eTW;oElRqbaG&L5Q>-ui_hhxysE2V(!e|f8}URd!IaA87e
z`iz}Wnqrzng$XrYhqAu7BGxH%9m%{=k66MWL(NV@hPVMTJo(w8o_Rmb3i@6{u<*xZ
zl9uX=O>}Bo&b!)3gFyJik|lh^k_C(6Kls5Q0W-4d`Y!ei@wuD<ZJd8AC+^Snm~l+J
zToAK9rqTnr`cpG5QwI2T;p$Ivb4j}7*Nv<1b;|3Zz;7Kc{}yk9{OF=}yi4gN8^r0u
zb>FQq38~ZrF%$BoiEqB7erTL{y}4u617qIJ#6dl9!nZ!i7${lbSc<OOOO=%=g7qs8
zta{;$Rrg{>&c`z*0lyo~!q+NVoS-IsMz^T=Qqi)a;tp8R1HG<X@KPP>b!qn1swR&z
zR09KqxyK%J3SEC>bC`?Cl_dDc$Boie>e4_}$r7a32FxopwU#Ku-lFw^OmT&4eIj%b
zAPhFk+!e;y&s}&>3*0H$i}JA-72;Q&t{uTA2Q#g--(*p~7aPI~I=CIJy^Fr*zvO3b
z`1GBZE<HMnoO7usl{(h_$dE$k)+=FN7L!}<?<|+2xgX<d+5EoVg|c;xF}Ds)mVI6Z
zYXAc{aBE#846Zf5ZFMz)TOGKQ&9x+?uIHkYZ>!bPpRCm5>85JSg~6Uxob-H<hUKc(
za^G~!LWS%BUa>gm>^Elp%&j9bAC~*ROv35nCeKIEmkL(qKsudXX5)uEH(1*K;%Isy
zIC(KBc||}1`AvdtfTaETg57xbA)r>cIg|E%!5UwF+OHE_@8DU2RZ#qE{d8*$zli)?
zPx|G8>nt3x|AXCDiU*gOv$z_iU{L9{<U_od%Wqq<YZOcZ>W?GN6C`mVr9NKsFT&hm
zhFts{Sn{YeNHP=ukz2m<yB)k$aF2r@7c3Jb#JN`Sf9l|;<^Mwme^~y1_J6bY{_$~^
zRo;KnW*|VrOrXJ{D1(MI6~vT+G(shTDcl1Sj1r)g0Cg#dQl%?1ic(87i84$_i(Axn
zt5&VMJiFb;>UObW)ud^g6#65SA1zc$0Kpkj+7yJg6guDc=Un$anY0Dm-Dkhw?;p>+
zUUT2)zJC8W*SW6iT<1E+!AA@30?sYiOmVWI8FENE&wmqqlY@H%w*il=ps;s&RnwJC
zS2VR=dDRt{dtck3CXi(E5~EfCi;TSCgi6vfJWaRWFjZ10H%g$=D~;p>EH{KmAcmZN
z#7l3K&WjalNe1O~wTpn;0-z-T<_IWK{&xEyakZ;W0gC}d+?MgyPpRA_K<lWx?qQ8^
z^Dd^x>tHu)k^?z7)+gJdX3*!Kln=9@RBRN&-_mkFjz0U;2`rVwd}$3`zE7DQUzXP{
z9zXPGpgZ;r7C!yqjy>dcNp{>`uOT1)#tVFiDUP(P?#(62UrVGpI{7pA=1#>6>4ne{
zmYAJ4vSHky4A}b}Kd#@-u{Ndh#wSldh1eP+Rxuvg3#~l45ytqa@b+2hM^A8MDep+}
zn_Dz|PbhwKhlcNH@jGhYb;WP33=<pbi{IRy;d@H)+dl}{<m7?!6Db_}Mgx{)93jI%
zzY_fIX6Qqpb3?X`>q22FF@jun-YS_8@vm*a?AViG<4!u}q~lFG)}&aO+JF9aik0A4
z56}?R<JeeN&l;dk)olaRyn6OP3(w?0BhR@5b9lxEW>SDopUoVZ?{k>pPcF9pf;}nB
zBFqw>r3`a&z_~2<8Og9FhvgC!uUpVoJqGkRM2}Lqx^tl2lBk|Pu)qNe2NpYE(SXaZ
zddWbyb6Gmz@~d7xFz8%X4k$mzUhaioBlC_<)x}*3AS;g6t_d9p(7IF|<}c0Q82`rl
zRqLwiV*JuQb6!;?-%fK<jP3RSt(LInZM8OSv-X{3Z5p&M39Ee*4g11fyF_7vF|Jkd
z6s;~<8qd|zn60d%l+4)%WHGPhW!V6u7Y`*Ax_`17k3=R}JH2Qru$ys+X};Y3cQd(V
zYmTpBYOJH`xzB9DL*m&f;xMYgVRlvwccchHG3uyct{D*Q1-=Lvz=3WClv88W-1-_P
z1bzeyVsJb~5=2d;=U4?=ED+^xQMu=gl>2nf_KHQWqR5m+%WMi>PQi;5Xx8Ie=d6MH
zsC40Pg~*EXp1tT5pAsEMM4gLWOMR-#^3PEps8B^I75X;{QKUkP`4~Bxg9PnI`OVmH
z=X-G<8G{Q*Rfp&<*$PL09yI!cg6wuO83}9S^`P+f3DvPuNz6paL3R9Z(H|Y42j7UW
z^dL{TDYYWcWd)yhU|A_J5I6&=?<azL8jp26FS*BYsL9%v|D~$4*(rB9nVB=cMAIjl
z0VZuLEg|?DNj#}%>yzO9wNTY-g{-Yx`Klhr0Ri4o^R6G|%~y3|t9%kqHkyCSD1W~2
zmrvr!ndU!dls{ki&qNOvH%nK&?#>X=0T%8&lah0*bSyedOkYVP@$NDu@a{-y0u%VE
z9!LA2N2{<lwbh2rM;X(_X>YMQPIKyl0B8p&o%a5Ufd!XCk-*WLXzAv3PGjp{aMbQy
z$PZwVWg*7SVjM(Dkh=l`k3(!jQ*p31cj32$y}r`3e`5U%p9<`qHzm#}onVvaN9cV`
zo@aE2e{h7@I$`2yx<~ZAke6_xmpO*Z?X~&im?{)tXZAoL|7^(w8`A{4*4dr7BwO)O
z*#-K>??^$iEj`|zhZ5!c;0b^T{;hN&+!{a6q76*MFm)UCLO1+M{4F%or%H$!O#7rG
z+~*kkWb$;MW9>5<d{)>eQ>XI{*=K$5i5wN>VB&PX+&4m*84`f)`QJ0J6Lv*`i|dEz
zBovt%(MgEO8ksD+rRMqF>U*shggII-MCNLmk4Q}*D5)tsg3dZNBC!oEz_T~Z<eA(c
zrst~HrB-XzBk1HW(L98zZXGwTT5q0zl;<eDR8s!DYPEU(NuGoJ|Af9HGluz$JpW>z
zJLEY^FBOWX4{r>U6Tc1Z^c^MZbeaP>`P;DAxy*NZ1DRttbUT-o8{C{1S-N4+xh&tH
zId7z3R;2;*FIx9}`zpD{uqutxNA<Q<dK*KS*E3d!j%9iM6>g#B%jsob7gcAkE4<VV
z2Hj?BFgi@#*1Dn5x=P*FMz=BCX3=lqt5A5f>J3_V)fzQ1w+(OZHhoh<h(~u>J4oC5
zs^=7DgTjnxoT7Kp>AmU|c|9sG`0}N#gw838_N4moW~6OzE}F(f{B~Z&u~Ir^)-PMl
z`eoa(<D|g1>zkU$8K~3l!fsRcW7t;St2;j!$_{t@h<Y-xnR=7&<M`}AqWr+nIFrFO
zo7u@@oju|{5rDFx=e-Qv7Zwwh{HI4Wky7>Q#yaslpHBx$!Mh2oOJYI=NR!)97_ky6
z00i3(s8)oOadCSS7z)~~jAAInJO2E7JbM|lHUiTx=c`G9^68qK(d^D9&b(yLw-V-k
z4o?W>&E@U4WAjBW$`F^YmbY`U&0JW{%H_K9b}l_)@5Qg$zk&q|ua+@@3ftY3<<BHx
zH+~P=Pko;Rn%Tyux5T-x1jW@JO1qXv?$nn3)$pBv4<>mxZ)E$Tt2vG%bk0LYZf$zW
z${%J<J1GU+rZ@%@5KAvLV}1-b^q}>530ua~yW_f=#dR!Pk?J3rLA4%e%86`!rA9(I
z#;4emD3|PE#r}9fi>apJ!2cDm=MA_PuO!M}k^hkIzxS>E(SYWXKTLG%j*|J4Rh;He
z(21Gc-!B#8n<h%{l$VeVRQ3;(y+gNt=1-=m#ou(C^UlAcQT=(8o6yD@BT@Z|J4HF(
z>D&-Wx{Rr9*n4Pi)2nL{W^7l$0tJ|;*jQzl?(;@bC8>+rFbR@w9owQ54!@wC<7HRc
zyo!}=?i*{~9e%g7_G)b14q|H3sJ-o_@O1LgVl^!D0GP5^J9O(vLH~>S-=zFi!;$=F
zMn|m8B9O<m^PyuVy&Bk`t<pfe<{OOj^f^NpdzrAfRqd9*(L)WO_{XpF4PVp&c<Cc;
zW*<6PE&n$}Q}p-n4miqt?}(={HNm(P(vKRKCHQCK18#$u4GL#G1LcZDHavqkJOf6@
zeO;uhm3wA_sg-a4ZE<Y8Yoidn&y1*ZOZHn6iOV`T8#;~=DSho%PVmk>I-LC@?p}tQ
z2wr-;j#9ja2UDC;?9P2wY3qLNtr}U{oDi}sd{wu)A0k~B%H^$G;`_6K0iR%$gJDpD
zR<<lzoy@VjsS^YPJyv@t@3GYG$tPFY%F1J@J4W+&?M-mRy3JffHwaM{p8qO|d)ZrA
z?)Dx?4|fc*n%IBjt*8Iq^6X+#s6%x$vrbulzJ&~Z8fN<l@ez{^LBzdY_9_atd!(^_
ze!?aFWb=sul6m~4@T$j|E$W_S8#wq<_`04O!&k6ozLE)|u9xps1lX(qOfIe5)wqIK
zS3X4a3kX$5I;;BS%R-{{34-H$1tG6|P$35K7$C%eLNLj+lG}9*sw*%xapl7bk$c-T
zhTk>~XwO@h<2VAS8mm+cN^QlUkXKOGD>V-^LPMD8o@GAwIyROwoe^+2c@|2M_Sk)+
zqxjxU3cHm8^J*)#vX-KBSE7KFOD7^0b!)lr!BqLq4onMh0{vlU<)U4P;*Wvru5D^-
zy0+=MUeNRM=`0&AR;Jm|HwB+-i~qXeq5DUA>=1t33Q}X*oVc)du@5;T({h7vD!1@N
zaFi!}FSs!n1A_0SlLO6;8cU!3y70XazH`S3l#G?>&6}8Pa`xfVwmR|Y#1VDV&x!{W
z5RP3!h5_KuDk*Ldtsy&-e0tJmHto4p-&82K*LCsQ6%UT5g0t7PGpDX~^R6<HXD#Z>
z%BFm~J2hR$n@7^Fp{JfRnppK`d$)mK)yMvU$nJ8sOY}5L)Dvgx2u&>EACz<T@w`fn
zdKcm-LgX*OfqYEU6-TGqQ^c**6&uYm!FhaWPvI;RJSf&3dy3?T=*Q4>zZf(1rflO`
zi^0~h@zMsKSDzH9y?I(AGyL@`wlj)&vLEsNy#4!pZxUb5t$mdTrQ7DT;k5Qe4)KV%
z7!wZhFxlVMzUUzyQ5U1`5D$|AZtbf-#G~HDm~w~*Y(Q=yH1veKJ}4=n_pq;$0_u)I
zwls&>H@JMfe4n5>vAO*O8kJb0g+hNaysfJ`T|n`~{>B9?M<e44dbSlJdd(_C^_pF%
z*DG0Q)N5{`MXy-F1ZY|dbNG5IZre1U4JWQ#9Y%@TBraM9;c1ghjp$XI<cI2>29WfS
z?xVFycBpPQz#akn03=RWC+XTY*4@>u2k4gSaR+$JIza1Hw;iC>tCI&>c+Ni1$TN0e
z4$rv<W)dgTS!f4!1<fxk@S%l;#XhvCu*`>+6b5`~X`$PPmKTNvA%nB9FzDP`Rhff%
zp+286sP8aAtmLxFYZdPo0I6&16`+c1qi0Pa`_qSQ0qXk_)tG;vT{)vH@PHK`0oVhJ
zolEBdD}1<m$$@U?vhaXa0u?_82A#{&1F8Z7)=`M;m~hu|&^NQj0PnzB<t@A2`g<<D
zPZK2QZ%HH3`=HYOPpeQGk6-}P7JnuZU(LJefaf849O|}^I-+dTJwrlMc_t;^kMbr~
z8+MZFNTCh!rMh9>dw8qY+th%Pu&6#s{hhF`pKbl!YJH!y{+?xhAEUq3>nIB9$C@5O
z;EoWruhA0Sr+?5Q*#P^(JJG+r(Q9}p+<~u@aUaF?wD31X*rkm7VJxL2${)mTn@$jt
zHITUq<{Z(vifYrqy(u;1sLlr&hFZUm`128atkQ*!RU3Qbu}JV~?2X4DAjYDoHulD2
z(cshA8;{impT^#JtUmZO_Qqon1{aqFiPUgEl=@o!UKUMK!eMH-Kgk7?t;Hh&mQ+_o
z<l9G8paoCCx`V^94uWt?sDi%HdW22zc;v_;bGK8_NZWP_6iI9+<H)S-#@^W80z7*=
zEgVU1*SKG`&-f4zF&mF&Zw$zDoq1|eGn&02*8Hj-^L$#KgZzu^jX`-bz4Oe=a};|6
zG?_R4L>_G?R`8s4A`Lum_KD*t(8BHQ!exf~_5}`Dw0*Gy=5Jr-fF;|z9k6oyfCH9p
zA9TR-?ZX0!1+yv*kbjZA@%B}6jbT+9#ol<^Dz%<SgHtC{w|ydLL7h;yEw%oAjMiC<
zR(HIhiJB@_sF#w~O9|_xS=LLf+Z%D8YrWJ)FX<eDZgSiVV@DV_193s0n}PQ1%BPf~
z)1FOpAI!_}wP(x6SQ&?E&z6_tWH6?EPKK{NTOF0Kj%u}zYC91|1{N)U3L_(qS%z48
zQ^%iSV7vwcL+ozG99peT_Od6Tz2G77FM3(dap7=N^62)4vg9r=ecI3q63k=k*OQz2
zWo<<KE2sfC^!q)~IT-mYkvth2ocAtSCQQ}3B?<^1KBEVr|0D9#Stn?_wC6V19WUm?
zA<;)p7T@V*D-K9+PX~I%*^Y|uiDteJs(h?b#P^hegn;k)9_tY{D2yNS7)Rrtf$i7t
z9nAmCLm+G$Tr%H|4$oI&tq6vqJBDatEqLL7i)19$iexx{(Tv2}Xz^P_Be7OQ!}*J9
zB-Yj!zeP3@YehDkzp+JiC$}r5b%ay8A=&{~UrKMZU+d`Q&g&E5&Kp3$_VZ3GOQOW(
z^zH3)PlV$VLMC#1B`_9v4UPbhElYBjboTZMLrC}Sb&HZil=(dc%iCAxr$vdQLGA1b
zLEhSpJfsi~I)l1BuzVv`h|Jw+^k-v>a5}MZCQfZA-jh{vr}SSPyJ~$o7|3?Q*ZdA2
zuHO&x7(TFbord_EpCL#Iy>Y1wC`dO!`jiT541bJ}3a*yX!wRxZG{Q^?Fn^73?nKk(
zjXPG)S@7<7#bDm?f_q14eMNmNKQ{W*Y3atrZZI!#TKc$Zv9a5^taLj1sOtQUgU)G@
zsp#{|AyWWW;eHkMkP533s}-bJ;X#6ps&G(GtCkw)<^ldCq|P-(uTmMQ@W=qCQLMs=
zV<WD{<MnDic7m%hw5!KoY1Dx_PM07;Gi9_yNe|Dp9-eJIoZQ%mXRGVs7(HwqtuCgl
zl)<(6NH?>H2o(I^a_WUoC~29iw2&zCcQkL8YBt2**=;aWj27Yvg%Ivo-7%8u*FC%q
z#RtV2)6v~Zp_f0UxSs#E)vBTE=h?2G#iX_F9_NWm`?%dZ%jbTa6m?qY@6RqNqz8<7
zu<yuQk3w18i+OfvjDm+azu(IZG^?fzN8eKO>Q*Sti=rnOyO?*<30U`~;dilIoJlj>
z^6ZV_PtliicDzbyUbr#*864-ltdExf{zUn=f!}jf5sXNNAAx=F$dmk;GUhyc8NjpR
zYCY?fKdFTSiy!o#!ZkiFQNFvq0i7mK7L4Tfr*?Zczrtqp)7reHce_>8M^^F?R0<N?
zD7#olsLuO@mvvS2`O)8_cG8JQ(vFK5&z^*(;Ft1jNLXuh@+2jr@2l9cCy#6Yi0FIf
z+<1EK1{i)lL#Sp+)Yj*w-%2wE+5h&pK7R=*ADLC1Xhi!B*o5C9IqjHqN{fckaHQ%(
zoJ1p-3k7JcIvyAik7M(DA^Jw*)=k<rR8^#JS3&_XApM@Q<Txw=J{6RqseSHypmbI7
z%-pBY|Lh7TM@XJIq6&J%5!&~^6%wt`eieR<_8vr4(E5g+IL#=iE@Q7=fm*mV;SNp_
zN@kS2qlg?5e?(Qmn9Ju`o3jG0y3*dpf2D%0Yg9uJrfw^>Ggb~YD{g|ZuFP@3*JWT+
zX?(y><2bz+*p~85;yXXikPk^a*a)wQ9M;oqhS$IYI802=RJ)l%)}%V~U$u>DZe&h)
zuxN-6P2zu4PmXL<H7C?n&3}|qYsY#yfX;7K{|)wP;{=YcSscOfn$2MjuO!EByykMK
z!z;l-4m*`royMtXrs$QCGXr8UgA;)nui|YU>j?fP`?5#nr46Z!%Gn0(c>$w#rvCnp
zJ&Ja=ZY$g^$Toxnf`;`N)I)nf?T@Nsn&hfaV!lJMY&}n#?@YUF7_G96xS;Nor=~lq
zQ?&dQDO&#GNN+L7BHyCruiyv|x1!~*h;R8zJ6Ly$_hzbP{RVlJBuh8l;Wll&|FfHS
z+qzO)Ym2=k;KXsP1*|PJg}A24QO}hd+f|t`N^RW~iB4Oqo>bZ{b6$)5DH4UY1I}fs
zKSQF<b{LnDiDwfs3bw`FuYfd1o5P%04s&KZ%t<=TDWXh^P$mJ&Xpaj5g=Yw3AhBSJ
z!yITd*h52g>%muD=uev+o|*wjCKKD;LU(O9i?$rw-Lj9V8!ql@wW)<!O1avEecIjD
z)y!5paBso+RuHxo>fQ=1Z+(qF#J!b8N$Uf6R*x6fv>5`;(i{rz2!jHwX#uuH5=h2A
zn9Bj*Wl|D7ZOm1Ep3?!Jf@6|(-ic^PV<BFgoFY)|ZTY)sGhRR7C3Q`Q%u%19q?5M&
z56(}2#%k-~=cjEW^Aoz!!V7!YfR~$@dBj(}@U#cL2CjqO&vM;v0Gw4GzHhJB^XIV`
z)`_Q|jCZ`qMY-16mU#Gr0S;5S+OQ#!jaOp&V+9i(8hheB{QH?+yl`v{{|#RGZdBr-
zzfFvS?{Mv7ko9?AQ*={PdjV|@pCKn2QdZOJ!r!`wEqJj-Z7#euR-6ZQsDZiIN6^!m
zkC2&k2I}0b(9iB1za|~hzJS3x)Qw@UEi(?Aw9Q&1453wRvp;XWm`A||)G%i8A+9eZ
zf6Cl-Mp^QBFFT_c6VZ+x+|ppSI)f=}{AjPl4uogTkD2TczliV0g!aRO<8fm1_|P{e
z*~seZeUXjBLUdD1+UA6VKD8T%6JNq%ovr!0#>qC+b7mXLj3Y_%HUC*j4`oAVeAGX}
z3ISldeTY-jP($F@0w9cKr{{##^Bnvl%kvycF7vrBKo2-4ns;Io)xahaw$zWj^#hZ(
zSuf=85!#O6xa*p4|8KQ;dTsvmN~h!5y@_na9hg7CIKvYxFf5RlsrJ%W^n01{VcrA0
zzsy_8x+>$DakN?UASV&;!2KP#=e#W8GUEu_Je<g$ggbue=Do;)oqS1V{K<q-A2+gJ
zME%*`)Z&pstxLX}l(y2(PstW<=6Ek%Z77dQlES*wP7Tc%7u6Y>&L#`0%)Ysf8lb82
zMUXZ5eil?a;^jNtF4?eXUGg}?pq<#Y7!Z4k-7=?YYo~P<Qge0UKRuqFhOPT)LUc~P
zuZRFo&_{Fn2JC}hfqC$0CZ4Z`_i|5uMk`n?!X5_F2@1y;;W|z<n3Fqe+ldI^&O89z
zF$l*iVxNsVVx)O<jWmBy{roMgw-VU8>v-;U>a)!T1M|CEk3AXgd{=Qjb_^z)RhM}B
zyg&NOv2jJGxqqzmS7W7a5xIf8zbpwiVd!VFH!)hC`d-S3i3=Sob8V0howVpMQ6kEX
zdULpV6Yc<9-VnfV^a=LJ!L{7m6@Z^2-X<X4uq*tnM*tFO*oYjA`sXzW8`LR-tcM20
z1cyV}S+zGMvi_ak<B6d4zZT-@4VK0NC5Di6P$JQ`7Trojklx*c5XYlYXFiC~cbbH_
zT<Y~PL0R!prw<8g=p)Ku5f_KJOB3fZ*WYPjOkdv=oIsz;A-rz5&Hoa8;hjqv=bhsf
z)*&gO@_bu%B^a(pV~SSMXhMIKavZHAUq~!&N9S9Vn+_|qKrd}NRbaY_p{OuaMd!8u
z)v9<|hlL(DQtq^tvPx;WO?|?hSpaQBx>N|5zzUO}t|}DPjp{ddzOTEU|LyX$E3Yhv
z(tZoy!Z-UY6XN^0BpmG03G>RZHdIlyQ>J3oq;;sXDRA**w>Q>=<;P)FJh-ei#!FvP
zX{Y!?-O)pgPPi@4>3PWof`ji57>WxIIm~}lhxwT$?jX`XU>qsOe1|dtwlEwjt$Zup
z{V6bA_^4kIA=Qj(R{i8S(*dK$#2h*5A{e*9q(h#;2g(|e7pj8Jbvo6LdI<9(v$4>!
z$vtX$&#s*#+%$QO#kiQhe%%J-B-D*2EbgT~7_{vB9NJs+`nEnZ6+Ob$+rU+>fXq@F
zSzRW~&aItk`%+z&)IGg@ac&7^CX!xqnaucQ|DIxdg*C;>S`~_;&-xC0X)GOUHVT8W
zW~0!WkYF|n%Ty;m7=`%}tc7Kd{ZTSAtd&SH>f_V-aHlB*_&MJr$91BYg$*N3O-uN#
zO1J&DibMmbNbO`ND})s3azHE66ChX@>nOjS!kwFO$fHEc=oLlH_u!zpbUJeh&eZZS
zGAI6dY*F4svQt7H9niGf8#(SoW^V0GC;(#G-Qqn|aAs<s@N}bNqt|l)le*gzrj|G|
zPWm4s6`U8H(euS)fvd;u(X(R9PzCGip62wbI5v2jPkExbd=+|0iC{tBBORp`raZDV
zi7&x&sp}a!NelFngkLC4_|nW-TU_jT`m8PS>}`eS?B6k`_2-YK{DJ=1LU*E}`>QoN
z+3wj-fw!S5nAQFn6;FNX*+j#p@ca)Ec%(~pZ6ye|UpbF^O6=M%!ZqLIW$!55v0Y+e
z4ST<O=BSX(ncAn&N?2^5RoDvN(wweEtnI9)V4mlbP`XPqCvB0TNrcK0r?BSeBM{O>
zClxNG{5B%4E=oiM{FzbnWyg*>gypzqBC@fVa_jDIC$|bg64t6tZdt@tWX7BLxQyxj
zxLbjKys91*IyHvz{oX&s_aJDFo!ch~fq~{1m@VMt{J4kJ6w7dQW)CTWY_hUEu6t{l
zDX{S?&7!6e%`u1}-|;u`hMjY7H2-v+d$sMIkSKo^b$=+fE5BoPUZcgk`B7AWu9^_W
z>fX&8U}nKTvOCZJ&O6}`zuXo7L`Y2!yvtl-H&rHBFGb1at3S})>7Kn~64~UDN5yY`
zEJ#oa_vU=Qvpe<KVnX9wLdj1l5risPG^zp=KG8qnvTSU4LVRNXq|Le78-?nmpZ^Fw
z6Yo2ce8mdtSY0=osAof;K{g_uE?Yt4l?~~7`E581y$Em%fXG>t&Kr8l_5-8(>uWna
z=5%(86S+|5Ll6c*x)UZ2k;!-ICZQ+6lucUxw1oJgGpFztU6sLTEr(=g+c7B-ZIQ|W
z;<A<qfHE<6m;t1iK|jGcm6@M~lsZEL1_EFZK*5H27lSPzjs+VIQkGf+bJ4Xh17-%m
zoB(JKfCT}t7=W~w@zT$VQvwf`z*|e;ffBgC1nw?@SCqhmeigMP`SG<9yW_s6z4zt5
z&0C^u-_qj41m)0`|IfhT_K&{{7sN(&UMroJHW|?PjecX-TjMXO?WfVz3EkF3?(F*h
zC`sgg1*RcrXWZB9r5MM4qR9-s?qhef{noe+uUP@1f09d?B*tzc-1Q6*^W!M7N?Y!i
za^E$s)Vq1ZFg$zWDlrbL&tZbR0OBNDm7EB%Rno~L^X0fZ;@NB11fm6Tr^{o6d@b-Y
zAC>ki<Wdu-Jm;0a5zm}Xpqq0n#F^X=^<d?5-bIJ=!06n0?0pZ3aay7rDN`Mb_Pn0D
zTOzx{(~?>yKNQclJu1DU1E{C>G9NiyB3}6_ah}oYp2GG(zZPPNQwE*-@yJ<RL<Ie<
znI3nIJ?-ae2&ZGhL>nP6spCbT#cggwEVuf|{AAF=toe!E`wDB`zHrAYG#TvPJqcZ$
z$ni09fVkVWEEgwE#=S&7i3<{bnkYgaWzXv&uw|sN?Yxm9l!jN4Vr$^xTA5;fmCgaa
zt?yvG+L(B?zJm!Y#nwcN4koA+TT@qbFaf35n);%H2_|v9qUhiYB&`j1&4!xgKVV3W
z+Sft2UBTU=L(c%2CW{8Wg89ZV-Hch+3YX3}aZ$1EB&^SC_96UT9G%#{7-((Ae}m!Q
zHpm|yYa49EHqK6T2vFU+DI(Tc#U|ico9YE8HbwcEy-EDCxtkh!CN|CC*|uq>vYWr5
zoxTyYaD&#ng8VBJKug@6381ALx_w{&)&vFb+%PCNp!q(>mC{*3tZ*unD_~5HJSczl
z{7vnO1iy2W<%f{Zrp3->$);rvSh}g(0V_8R2%s9Bn+APo{-$9dp9jjl#D8Tm<__O7
z3PD*DjtQ#f<x4kMcomzsQD1XuW#fJt6I5Juwd~uKeVp1=ZLH=~v#O2NxN6q9G%*E@
z(XQ6)&We|;IZH@0HA6={{$Shs6B|cti`~5^o#ER}U=vEb1TsV!k3S-RelXs2Ue5^Q
zh+e~u^ZHk9?7h`+1202mqy)hmCviftVUI@(z8h}b|J8-1?HX<b+`^GpFg{0zCpsoU
zgZP}?n*%N266EmS943r7Fz|>%>jWVfcsSIJI56;Vpyh%P3_KjBj5v%8x6f&~oij4r
z;3f<oWH&F((xN!bX#2WQrB$bsrc(7H>b6L$x-DYe7O`%N*qDeU)p?P*8WR!q9o%|j
z-rJFeG4BWEW``R&LEU<Y)6bEH5UhIKMm=Y3tk<gzRyD8L8(Z`evznLAIC$yAmr>&n
zfXIA*07Mr010b@<9{`ah{s4$9bpwENbBF(EC^YmZIaf?8jTLi1ZdSrUK`Nn@`hVPh
zo(*G8vPE?w32Ww7jSd={KBlqNz(HfH1+1~_LVtU|dWUM544o6M8^8gAW<qw*4k{o8
zkk-Kec?){a+FhcRrrkjc`Z8N`V3kBmjo(2w^t+=tDlg!EQ6kgAsq3pvCIp-af9ya!
zdll@-eP%NHH5ML$DxW~%!$RXC+!}>p=`U`~&WUDh_6A*R!NB|h9<WFcBu;sw7--aD
z;|nY{azVUAy5z#C?i0W^eaP$C$Nc>?5<~F}<jqT8#)4xU%-09pj_4UI3*f`5AblAt
zj&Zji)`um>xDyZOb8JH9UDmSZ=zcwwEnRbnB8wF}6{7P3OO9O$&eU0P$=%lDUgjDy
z<xcHu6p&&(fh}DX1EUydo|P`~b*XI$X;sl6CZ<-|xf@$$l1^d|tgxeyIRhdAP$t4x
zy3`iSPs|XOSpH3z#!8h8CQMPUkws!puQj>WH}?|2*Kc&ypAHlpXVL=Ei+dGYOayYm
zz+keXtC0G%dZ$iVV_M25&AFAAi*WnbP)}Z@B=-iXOTH30Ea6tWEv8o14w5cPg-9gc
zv9~;RI+QK+_^(yJGscF$SDt@Z$&;@uq;H5TU7r7St;Ikz3kl6$6W0{(@KG-5q;LF&
z53P9FQ$Cs~U*lvPSy@RmOL?To!}^1aDjr5OE0EpX#rZsx{%Z17xMr25D4wPB<{&&k
z?BPnJvZ@gW0gMP7W8egV$iU$m6$rxtuudT6;{odhN;*FF@Dzc*yyigaC;4aSc6Z?v
z+3nB5X~`AkHPIKdoo0hq$L)~=PH?+w(R~z90-KjxjVNDa{C*LQ{YJm{M}ZUjjj-%D
zd8z#^xXj)^lV@_j=<KR}<$6kh=y39y=4rA`NSln7ZCcf1o=+?0Ab*ZU-E~w^)K$C9
zGcQlhIel>(goL11^Tszwwy}YRnK!F}>2lue29wv^-%kHfvd;ZRC0SeTU+i4w@7EB*
zYsvm@2dvyb;DDw32OY3{|F8h^b_KI4K_W|qnDAWpR#kG1VO2Veyyja}skMOyr%ooX
z*}w!(osfCzgGj7p*(G_+q1xaKtV?HL$4E`5e<(JD+S&XUQ93ipm+I_sRjN>5Cao_M
z)|a!aFI)FF;y%~<vW>oEI*HMl;kpmbU#?bK^$N|tYK_`{lw{`_{~=!|*{Kj46ao>c
zKx}Fohsdkmkk6y?IaIP!UWih0M|CUyoPl4@e;aZ{^D5M(3G32U>(aJ{apK{rZ{iID
z0g()MeOgOAwrRKd)5{#*&9GV)_Hypy4iUd9+LyfrMXzhZKkS9`{X+YB4Um~#sX+;O
zPX8D*2HImgGANfYL!?qfDtDSZ0|NA;@8N?f1D$r*Y$QOF)~%;-GgigZ{#q<X?XR^p
z@7hKp`J)Lo%HrjL_$lnHUGPhM`*K`@Y~(l&uFe}l3W$Z5Qj3_%bq-fs9SiSej`g?-
z`Xn{^BzD!$;vDlZKEsz}&bs)pJ`mEg8V=|4PI!0lv_Q2rqvoT7ny71A@~lg<6|Xgi
zf3df@e3Q|?IQIyHS@ETjS+TNNDQY@;F!je~SV1&V3r&>!W3$>G?zuiT=|el;@!|&!
zsvE>oThJX(zlaeH4k^s7<29D<`|fhAj(GZ&<Qa(6k9Ii;?$u9D7x$`2<5$oJW%&Up
zwhUODQ(sngXJ@Shq;xd57z}Yc@!#Gfmrhe0B~^l`Sd^wPJ7faOG^?TJ;26<O3?a}W
z6nxAVu8wD?u~5m|LS-+q#$O^g!~x)O%p@#W#*~rUm7spo05ZBE!{7QA$JIsT?$}!q
z?x-L>k=wOaA{W&g+Zp#$OwOUPo$(KN1Y7+?IeHV(fxvwxt^UO^m{GBojd}IK`}>P1
zODRG%YEl{%KKOQqx~sLp_d_^C-MDDWVR2$>wYSHmm}_jUHutzhN5W`#kBfF4ek^V8
zanZKJcbVc`Y~LZlHD|B;`)uG7>29|9uXBhFNB4z5bqMFx%yDm=6z=#6EuzJrTW{o#
z8>^G%T%0KJ>>ZWO*^foy+3T=M*+O)bu*uZq93&8nEn02k6m3R7v~A3C8~la|-kxW|
z3pn83?v<~_2*4V~1g~D{dNOF$q(Z#BC;asTtb{t@eZ9(iTIY-5t{2Tg8Ulqh;jZV*
z15)ru^AP2_m+l0A!d;J;AF&@azi?MCZw@5-Fle@h<;Qtz(JCRU1YX0VywEmYz#f6u
z@EGu<!nDqZ!d-V0BRUZ7O7qs~x2RV$OV$;^3OjyE4+)7y$bzj~i*8*Bhr+L6ri>)k
zFt;G`SI4tW8Z9ETBq1(6#SuCfs0YZTzOgnyNXd7bv4LvsD(T5qwvN1Ju}`&rl|Y_2
z*auQSXqG7cFfE(^B&7p$u_<vHEq*BGVg`%y+5Jnyi6ev&MfCOe%imNYS+=S|_lc{F
zKjq^PqUY63{Q*YV*TAT+t)spMM}6f+eGQNL+FSYs=JYgDwY{lt=3f)S6fFzKI7thz
zaAbsW-wjMhhcR_QMi{W(o>6<Y*t5}|bL=^jXXnrYwS<8708pQr%i;hS2!QSY7!H6z
z04R4<<qUV-PJe4JgkE8sv*U-nJuJNHLVv$hD^{giYEQT)h2UtnDxxgIov)Z}viXkg
zPULKJq2aEVtytx;f7GwU#_&394EGX)Nnw|lZ7yf|yc0z?$TESlYs{*~7Q^MWLf`JD
z&aL6D2E@jqpz$Q$364SRlA)LkqhvR~3%Rq7a7xwfw&A38D!+EVtTGVmrj?P{kMa~V
zfJG@x02Vv&09e|<0sxEQ0Q^-t^4Ude<Gac5nr$?=DAolpZKQdLZNW=hX<i8!7ZRjz
zr#S!?`oe{gMH?48m!kar(v98DWw|e07+JY-P%c!XbEE6R83PQkem4Z)%;dqQc10p<
zpN$9P^YMUuMjnvQ$pc#MI2k}IH(7=<d2kRb593dT{25SC9g%hZqZUn^90(Btg4kpf
zkpt3IK1IaNTA#FSm;(zgqllj)<}{JSSq4B7uMS*30npfqvGsW1(r5tGQCucsjUta{
z;8vuD*v7;FgQ(#uM-6)uP(z{CW1^6gHc#t(0%`-zjYa!MCG%(8-kda!H`2)REls0G
zVpG+HiK!8uty7~s+osktU<ScF1SM+_kkoC;@yKA-=-&H#fLXK7KxTeMFMr%;Xn;X0
z$Lkj24&J%&aZiMgTTSD~&X4kJJKs?1^z#jQ&PStEA5^8AWxYnLh-POmi~0PAD0eWH
zyzG^%UrWYPqTw|Ri%90XIF|mI1;579k9oiOSo#a87Ks|A>(+j|<7tf4XgpE8iY<D^
zwnW^^@IJQ1iX7Wwcpux+;{0a1*Bre%w**Mpxn+Tmo}F737a<{%3)fwQ28z&N5gG<^
ztIr-<#KaOVq{wKV@n_EbkMvo@lpzGTic{*O?h)$i&kyLyV?m@TA@;B+eeBW7;<so$
zMs<qcqV};zS<hJbiiar&`pgE<XZ8=|$)fV~_p+!=AK@009o#fPWp^(kns-xZYg9ET
zNCQ{$@HD9r_oPv#xkim5$j^c@Z$IW?qteK&kS1O}H4-P4FLA(mB^_;=3pJ8?fKBMw
zXDPK675MpPYcWcij9DpFK`SZI{EiNK-BOHgi7LfnKU?~-E!NMmE$G-M;RYyu{x8%!
zSn8B?10n%#N%}h1uQ<d*xl1P@t&LcBki8a|WYB9pU~>OjV9Le%z=Zv^zyux9Ou=F$
zYLR#ngyZhHV=5Rj{0p*Ns0r>fBp<BoFgtfTpKPtxI{Gnt4Z_6Ww~~62=ZuQ2O8N=u
z25NXbl^Z&vB-{uJEf@)}Gq{oPTEiO?i^GKpO>PgP&0y{rec(9OcbtNw+gW@n&FV!;
zNT+jP?;k669H_Vzr78Z+1)XTI)YV$`QLpF4F~|dHOTkk2wUY?u-^9N^SlEG_uX(2+
z;<#q7j!JuWjzzUx`XxK1ePe<}Xoi4#*?%1rQSW%3&e#s0huQz0C7S(Xny#ul->7E`
zZ9p^tS^}Uk0Okb1%m7#r0PO*=EC3b<z(4?W2f%Ou3<f}i&5z|?76A1D5DkEq0B8(=
zIRP*;02Ty5djKp8fW-kY5CGi)FdP7b0T7{mU3mhaJ^-Qt&=LTR0Wc>3W(L550B8?@
zWdX1_00sh}I{=0QU@!n8%n7bM0Z<<R(Ew-(fW`oj^rXd_>44Z93;2w^(QeOW_FQaF
zNDuDa_8hk7pgkk@1aEe=%Q9)WOO0oy0vz&&JHO2k6E<D^Q(;p(Uwl@_#9l(1l!5j3
zjM}rso{jdLW6zm91Ku2fQZN^rD&(~|aIuL(F5Q8PrU?r?2w;pc4%^#}5_~`|+3`p^
zJq?4jF7(eoDJ@^gdbguzF!dsLZ<8l@;_&j*YIa<&j_DeP=l(_PgRQvcPMylA&Q^Tb
z^ze%x@k+e>m2mqC<m6@FU>R>&_ArW&t<WhUvxHMv><0g=E`RXf#`frdy8`^OogyM;
zL18Npy4o84#y{{8Y>sp(z+@xfGuPm5;}yB<UM|Q6=k`zx9VE)QvD2T$;RF8T5Ccv?
zCDR-p3W^Ad2;!(C(FgrWAmIQM6@)+}TRB1S_c<991TTqwmhI#k)~5bf{&ylI2uIoS
z{3e#?+KX2pHSNoezfljpc`?5aYA0z9UAndVg{;npSWXQ^m<@-bET)F)!;x7-jV#ZH
zT9^rkX0kXRn!_4;R|KTl#niT|o@s7Zlxb~OOSpR0u140`y9kloMOe2fVzF(zB-YwP
zi`6udMMJ`waAe6)x4oAR31c|88yd9t$|2zk=ij>)*t>IAyS*3fvUt@CcP+N>rMoO%
z^^#rP_PuhKi?@8&puWjpk~GRcV(*1RQF|{Ms<-!&p+<Wz9kR_o`OmcPl|ys%CVz`p
z-MP!ssa~|p;*r0lQ@wPT#Up=9r+Vcs7jOA4rNi2R25<l^-~gJy0knYwmJAIzVChh|
z1C|dBJ7DF|paB%XrK1*cE;NAyXcGr4-Q}WF8kZo&@uM%_r39IJ8nr|Uca4Wy+1Tr#
znHYCRdfC=pGrjEWp_!9jEtyZLLG0|sJQHUx<Jo$4H_x`S2YAjpdzk0!vj=%5&yMh%
zdp0$$j-5&P>cp9ZuWmh)@YQW+625x&nS`vKbtbW@&pVSx@=R9JJKM2h&VAEhhUL`0
zYbjHWPJPtXUin@(05MAc7gO&2bGc^64JR6&4u9kOd<1HBy>hsUX6-cG$x4p_*of@j
zN$b7zN&R+#bwi2)A%nC|QlNXvjSJ$L@%Xp)_j3-no&ng}KOl$!*xJ8U5CgEae^3wu
zu(cn)ZR>glU~B)dAO>J-Kj(z4>luLK`(Zb5eRd<`?@1efzIDLQl#aiLQTq!o%#FYI
z*?hlDom3o#BdyQ+2jPo~9Xt{{B0O7nupNoC?WpHDYeyr`**jWzCU?x_Id=!6ZvM`C
zx)#d2GivWeJ6r6%aA%{vm+qWn?<G5D+I!{B1@>OPvz<4obna-U?EvQQSm1z#I~F@&
z(GK+pUQ2d#J7DRK0S7GKG3bDmJB9&Nj%Ai^=g!6UUbJ(--V1kj+k5HGVS6vxIcV>d
zJ2h{z@!eU*+saW!t5`WA4p_J&>VQQ%>K(9TN23Fl?r3qq@*OiBuyV&70Ba9Rm-etX
z?O|`)!``%qy=f17&)vBI%-q?|a#~G68iHsGAEGgQh}Q6-B|8RuXz7k_A6mX+*oRi`
z7!<_7*l7b}rwxpqHZXSDz}RU6W2X%a&dSK1ff0<<*K8I?S<9DcR}>d(q~>PEv$J<F
zQWr4l{^!T%%cbLUcepES<I_KIdEjifnzniOcmBu7D*9dXC^f>)923ZHP-;Ak!bn9e
zEBbC0=+nybQj=pN{Jp`DPm~XuUg3SJuq5Z=4XeW)Lwp@>s(ml+BSXegU+t$3OLnxp
zTo1!-B&_W>Rhp9UiXU684UdYat&cfq$LSoLyxeMSIh})(ms=ezIyiZ`)pbROI+sR$
z(ZR{ft)5bJ2qtb{UhbVnJp9R9yfuW|cQSdeHHtkQY;p^Y11QQ+`Pb{2Sg)OC1UjWx
z+j{K_BeT}G=rw!2HiVJndTj_JbJx#g2Mhk`s%r5Xw5~vNg6m&{hJbH~2xm8x@od{r
z&$D$yl;^AsEj(v$Xyhq{#?1B`W-8gv_3i2kp!w@XIRq_Szu1QstrzW(+miL&KD2bb
zsE6E^uOIZGmFq=6{IYfP7PCfIwGS?zfBBgazQJ;Xx3*!iMXmO4K8LFV-Nn`Zy=TbF
zhCvr*<p$+5A{Qrp{X=7P9*c_6&TlFZ<$BAeZB&Tau7#7dDPMDG;WxWTJ<_zUQC$PO
zMs*F0#%OC=HE@zfRnyKHH8Z}Uk;m<eX=gI%kocGbF6f7F2$zv3F0VDBBcM#EE+7&B
zQDK#&M0o2bX5pRoo~;Wos2i2?)ak(}bl075Zq)r7Yy-?juAJzaaVr%KSPieA;C9|B
zjf70*xB$f~KiuXDT&5`ZQGzHcvM!gXF3rxmz=t1n0x14!{?n5V6wm^66p|;7VF>&6
z7DUZeA&za7fP_j^K{{d1-pkZxZkX)Oy_3i)pOV+_Et*Nel!y!Vn64wS3TaXKxER1G
z2JK)sbX5LBC2CZTpd!ARgcl#PiqK;}t3ObX<V+>19lJhkNjj5uH#Udot-(r-<4>^t
z2|Do>u2!i=cC*seb)~Cg_;|<WMYbHfPxB7sE+TQE#YcHt{kjz~d<S}(T?!L9hzF8{
z`LKD_g}%Pf+Ja7`Fik6XP<XCv0O)q|@8lpJ-dOHIVE9M3O*qC~8xKQ}!oPrhkvrd)
znuez{CiNIAD+Z3ZMq)>I{R%)vsgOVT+t3#+ueQ)UdrRY6O+RXN`w?k+$qRpg>YvfC
zy_@<hbwoJR#M{fB9S);+6S)?8dAar95PKpANvyu*L$21I-gK1E&r8?a6?N{9B+nf=
zQ)6Q^1_e&3qgi<s!kw#?CNgA4apf)-O`Q-+4^P^>=ar!vGX3PS+@_d1se8!ojs1hS
zhudEddy|Hm!r$*!&F<-qMSH{dZSy8>MO$;{wnTUuo6E;A37Gr>(a$A%P?g;OK4VnL
zOiV7lS6!?t7Sf}wI~Uf$Q3tq-8C;OkO^t(Ze*Cy<)Cu{}FEk$_()PmntcO3&26nxA
zub6wipZiwpdEKVVa<@X$E!bSA4-3vmQ6uheh>sC2BA5v;;fgRi$2-?_v%4`_6Hy*q
zL69fS?hq@Gg%1+-ub6T&^?mD>-MPCzsK#)zaJhy0;Fi7;uw6L2??G}!)fa+bGB0Ld
z)$Fo^k8szPr=WhmI7fLR{ao_oc*pA`;cUAls{<(Bup`{{FAALTMG(>h;OrWo8vyud
zxc#}X;{`m)Th`ZZqdKz8=I-PzuWeWL5X76Kcedh13dc8ej_SDcl{6MN&CwlZ=ju~x
zkie1aYt}#*CLz{MY6Sln8-au;#em4(P)O&bt{wlG)<~S2J`FPwA+$dqkW7hUHD{Z+
z1%EN)n^de!dnNmLFFB*B0OkQLZbfV&CYCOh5a;HJYzr;3$k-uX_HvqZp(#IiMc>0E
z@J=(r66OlZA5@Ft`LjtMl1k!~0b!T;!3%3!$RghJEXuz8`_O6b-1YgMGp$$<YhrQU
zD}hTaaw!Ifih@P-mEoN#oG(73?|Ur*7ovbYg#jBXEzDVKC=>Cgu_r^g13%%qza^BV
zjGahpAj2~ap&@KLksZ<0eRcL;<X?A$mXZwWu}OHDY6+u=c2yt7%dzdYDPI068)7o)
zDW-m|!^Cjslqw=fr_k@&8JH%CU-)vYbfnrJuJvoYxX>nMQbYx(B=x+*oe!%zKCW^}
zcVTTeA;VvP(gHn7u{DH*`RYthuDqkXHIpfE$w*mS_(ga8A~XbRDV+#kyjQk2!@qc}
zIeS4l7lfrva#~p;d{Iw4eDMQFzOv|?!9v4sBtG=g1uScyD@$<KQO=yOaLqCHP$fDD
z#-R48x#8i|&$;{(Pj6GP)Mgqw-F`8wW<Fx8cxvPPODc0v!SL6gBCK8i+h_#FHG}+B
z$C3tEE^pGd-k{1>#qcj4Ru8EU)jeB@;$=_ou~zbJ$Wbh7m2|f%EYcBFxSI;E!c3pl
zY)w#M>ide$6ULM{`xQneH-V0;<y2Vr+gwRpZG+(()G}y|{5y{jdWnj-I|$xbS6m1X
z(nJ4F!BOUa$x)P>0n)E_<47R<i=|fRBKDQ9LBD@VzgtC%{lC&0WAy%n{#NJu{Y{En
z64{HWexaOt!K+h;d*O>WmUKAhKoz@v-;w^->CkS(Jl1kq^5xXMBNg<E>tkO~;Wz7k
zT^%V_#|;|wTDVgR90PPHZ%#iigK~7UFq^`odHW#>@7ww-2ZtSE_!Zk0M-nc;ciSm0
zH6);mL1E&rl3BCxFHknW9UM)R_gJq&XehSEn^*pDBK=Hr`Y&qTQgjtM-9!K#1|g+v
zqg(~fzhv?Y!Kn3nzlg4sTz!KEk5%EX|6UnDdn}{Fpzfp>)tVxHH!eW_DUqE9gJ(Zg
zyQmwfiZM3Lg)i#&vMGnRblg}kd=bm2i;+?);w>GtE2n-N>y<xd^?OCdj_n%u9g9O+
zj(g3q6v4%gg>At3C$pdK_XfzFNI!irUW@36*t~gnyXxk@1m5RQWz6apJZ<R@R|)t)
z{UoTFNDmgN9|7DLcFpHy$2}k<5cy_2zYy<RMX{Kb7x_k;-I%7A%OOxDWC<5g*qkaI
zrHUlVAHO8qdi)rpSRJnwQtO%-j`a3h5i4vU<!y2zNA-_((&dk<NWzIkw)Q0}B1F4e
zzS-F?MZ9d{CFUb%xzo!gU#e_Q9|&rm2>$|>o@)LpRmmUfLgoSDGYVce>c)^#a^-Ju
z)E~Rc&9AXaXcVgqJS0+6egIL7wX9(XC7}$7U;Kf{u}TzU+-Cr{=l;!rK9k%Dcm2c?
z$IhmebzIM-*}2>*q%xXnk==O-j;P7*&?>4=yy0b%UK4M4#DsYED95JGT)68>5^9X@
z_LIGYuecL5Q$6og>gf%JUUuZn)S){~yjuGv=+%ZC8LwHof+R&~;}Mmrk?aJj=f<Ps
zss=rSGjjYT6tk880Dj0?jF&9NbG)ApANO$hxD8A}iJm&1tvyq2<Tb0OQm<GK=Bgx-
zwuarF6p?xjOuH%-RlS#Yxa)gVIDe#?BMo=Nr(oq~eRVYco%MjWFu@c;bb1NolC;{4
z*o$Snsw?&h+uVS2F0p-N-Zv&uzTIosm*R3e=%Vgst`KEcF}m#?EcG}ecJrOPSodvY
zLE`E4a@5K!&e;o-rxi?+pBw=D#4jMEqV5_LmFhGoAu^1%G<K^u74Jb6!o$0H6fF#f
z@d!a|E{E*;B<}($_jEw$^W~JPS$Ac9WW6+-cm<5L6QBT*RYNtjVQ%AHOKoM?Soy*T
z=K55y{6P3S&KP(i{k)INLg8LwH&!k!l1jA`i0iu=@^}5ceeL`mUd1*Icm1RCrIHT0
z$FI`R*0S-E>{+iS8lGtke~<26>Erc&23Dfsc^t7sy-(w(vGdVz*EFRZJuoWeptq2J
zupE{ef}=0O5F8F1BJGj@slKRQkEk9I9!D@$HVOyX*-|XSzjIL~<0o>jUtCwg?NlXy
z3Qk(qfo)dq(|3DXQ9-2K^4Vo5qPAw(h+0o&iPK)^6e)3Xi?kkNFpf292s_dSNEB12
zV=5RW$W!bqq<$`mpo7+D0k#GxaxkehjaZVzb;vyQ9(N^62tIV&2psqqTMMANB0uUg
zmiH?{W6C?0_iIu7<_vWWYRQIDD-KiAgG*SP!ok#;v?)kB^)GI5VQa}R+z1;YSW#;T
zRjMyK_Cl4_^QMsqZo)#hr@~(|xdxG1-Q-xT4Y};yO$Tw`+h1Utl^WL?W(~1OSTytb
zYjH6?Iu4N2Z~dIIvtMM$lASB-NGrQltg`JH)pai0P0BVPCytEdr@t(+v0o*(C8=Fl
zUxjiwR3`<gmsZAK$cu?i^CGIhDYd<%E{XJ0BP~0cVhwrRiHc{>BVm8BP5so=uDCvJ
z<O(}vkaRbu{(Nv@gM5Pe=6{BUH^i;J`UV5xoB_@8X#HRq<o@;DZr9(gk004bQI>|4
zB<dx|H?Apmp|AA7N%u2t?xKw)t8^z(wU)V!ck*Le`zFuv8uloQDMTjZKNB*Zel>X}
zMfl|F;6Qqe=m<8K(L{&a-{6*CyvY@7bR1nSSk7+T9s7Uo1|_uG86IY-PFGmwi)^PK
z{?>Z*+BgKz#!p2Ff8z;N02zUBM<+8<a2SbV+XSS%<JlvVCu+1*Jiv2C#U?F8-D%{5
zG^?FPeu+d62_lEs0~%N}Vw+6#3TvL`ron}q319WwNQ*rn8iAX{uVVe~MB;&{bBmgr
zK1i?5xp6i@IDOz&@7y?^z)c^xO>u6VP~gVt1S;qnHnMPFHqQ4S6V$xBSo1Y-t47t_
zj$ygpKx(!FE3Np3RL)g=9w{9hqWC?k)|SSoI{PjqHTGRf>g&6d)Yf+?sjKgDa7}Me
zO}8IyJ&O?hlF&i*gKO}-u5VC3*Rw~`RDQ>94C?nI;vXDB-+sR;ifpu+i}k8=DeY6|
zQre@=rL;etOKER9m(sp;E+ai@^*g|R70ji69W3(?(|_+!*{p>W->=>VXAV33^*?C4
zn{DlfF}}x}xM@__3;IWY4vhZXI{I^P^k;7L=kVyyy`w(aUae|`PWhtq`43Pilr0V;
z9{AFw+0zlz8IZJb93kEBAUc?1<$%FkoVu|G*@;@vW;U*|DB6f(0I-!|!EyES<E$&U
zcs;u~b?Xz?xAT*<kTi#MJcr<|9SzwtvDuxvZH~&VZL>Rd+w4x=HrokNa`PNhp0L?Y
zh|c3u-3;9}k4TPFw>1D0h;N3ut^wGJ;I6x`5otqeH#|dza3Zsi;|<S{DV)e`q&mYh
zb_z^xQIa!;S+2y)`tx}MfURcyIWeH6OxplSU~Ze(7}_QxhPJI3otxVsU`k~E<^?{q
zaPwjxTC`a+kbIYH?)IUjn+JSo`Q|}EQfaYy*mqm$go*vC`2xj$?dCh7V!wh5w^-VK
z9T#osRtkPKmu#^-{F*M^GU)uQvdg#ld0Bl|4z!yeX1f<SVE({j2P_P%buSv|b}mZ-
zYu!r+2A#|D5^LSsW}L?9U;fJwF`*8Z`pO;ZAU6NKw?Lh7@DL1j#u_u>%}{3#=^D;z
zr8$B_w4cfF(&dIjZ6?D@R~rt^Hj!<*xKZ6O*F<^2lfU9ZgfPJAH4!$vwW;-l!-n~R
zD0pOHAPOE?6o`UHmbf0H7)xDm0WEhu1+>!jlCZ(`(Ax?}b7ULz-ol{w76rYxB<Q`R
zLGLXOdT*ucJ*hW2sQZ?w`@X~M8;txow?_{e`D?=8J80y85V3pfzFL#Nf3xnB{bO|>
z+C~oUJ|h9Gjs&zh5-`h=fZ2`&BpnHu3ke{F1SH^X@3Z_@y018x7ZwNeqT*m)QXI@n
zi-UQ2aWJnucrgEe)qV3=Xa%Fsb>FDbcWC!r#+_;FzG(n&PPiW=-H{o?^`Q(@ADDzh
zL3IUuit1P{Voy=bSgzilqL{H<i#=!Ba}LkWT)QJ6ojITEbmkTZF3SR-I{*d(U@!oN
z0a&d^nYAE0x;~OWodQnU-1urA$EUwnI54H+ttJ<v2t@=ll{_(rVE?Jfg&GEv3)KuJ
z7wQ^JF2Y$Owl=M}8OBX3Zi;EMX=Rp8F|#2gnq0WUz&dds`?dUHR);~o?79W@LO}wX
zZ2;6Q@_ZA@XBA930yr0vC)GnX>kUviF_i}Si?KA!KNk!sE-h4#BZQGZO?^>dNB(A8
zNt0I6xs+6jhy0>?x^D3b%^noh`t`5w0u{|o?2K`t**Vludf~ZGGJ~`48QhJZ@fd&P
zx@J7j|3J{F6)I}NrtMaLuCBKNH}afqMb%u542WOSd6abMvPmxrQ<76hqcp)B68re9
zlwvtGK!C$aHI|!WPws=zNynmupr}-2F+>i@0nQQwpgjQQ1i;JyXbFHu0AwbX@zT#j
z=dN_&VVt?%zWbW${CCuUPw?Ln|6S?c+N{AzUS8YITgTJ4NqxypQmSeKDeVN1bBi{f
zgMOIC1oA3V<Z_emU9XQhj3-}8gA+%>Di80lQYl{GRn&^28tKG)kQBL?EVICIR8vc@
z9(Ry&6nDY`iHWVieBWm8rLOMmN~aSp_p4*w#&-q9$^D#<KyM1><RX*d$>$c}D05SH
zDkSm@?e@;+ZZr4YxjFhK+j~hN|CMFhy*eR>M0!g8Kc2FkZQ@1zs?f3S%kv+k@A3Oo
z{jm0|E0p$|*|Yxim2MY|f9L<TXZ@!3tOv&Z|J$B*^k;=aQ9p7Am;mC9vS)q%-!bU_
z=j>T8__@o!qN_V~<l*gEXODRcd)B*QMI!AQssk8p&-zNDX#`!y`+vfo_47w-<Xtr3
zF!ro3y<B61`x;yf8D-CU%u6)pg15J4{R5@_miDYC++wsC={ZT>{detI-*vGm8TU9+
z_&;UO`YJl)|Jt)Yyj|_%IOF<%)t>bwm4B5z>kHwg2KKDKBzVN0_4<+@!)?+R%FB8K
zC+;`YMx@PnLIS1i(aW#II`j^v+1I?R>7su;_W*0yX_Jw`LDN_w!@xk4qq0zwxrssB
zvAX`cYp?DtoEk!QtZ?eC&+7TKJs-5^zuEK0_PpDk9rjGw^V9Z>+w)v|o@&n{?YZwW
zN^QuVKj#?~nfHwd%XC8UD8ji_6Pb=v5wE8wa^s#JZl9vNt!G^%|6L~S_3NokUDxJh
zL&po{$vQnwK#VbYv~iCqeqC?wi!gqMLb;21h41S&w|HhiZ!?J~)kXIp&%oydTZOdM
zB2AR6jcDvfo312-sF!&uk=Z?YUG8}j;r>}?KPAh34xZv&Tkk}YwDH&F9aA6w9MQvF
zYw+p|t>c5wvfK&+b#^B|mg|GBm3s|NLheMIyll;JG=i6{8p|tpG#q4z?8Mv^E_f9G
zXJ``qALf<&6O7irP?AJhN|ZI8S-9)L+ym&t;A(&B{9J@pkeAu&bxbKs)_R%#+&l0h
z(+P>pT4}K15)Bs0$0jmW*OFLM;h{q4o==zi3aH0S?)EZm$QEXjT8l|$wqtd)>Du1k
zkVsi>HO~d~h(wDk4R;#v+LsAkLHw@nJE~mdMxxYT*IVdMht9(zcRAIxmWg&uT{W9R
zr_RZJnwsT~R{i%;+>TTK1@|(lF_sJ_Z*IJ6Q|{eBuWo(UwvK1ZbDtq}rg!M4t`O&|
z5DPevUG1f(M-%DB3F#L4$E&;&>mPJbV>~mQd5tdW+MEhE)U3F%yzGu6CwJ#}QiJRT
z%%SC9n&xFfIW}(y!={dAEm^PL85&=xxfDFlY-NDdOI^66UTL+=7Md)lTdX5q-z)X1
z0{NU5M+m78ljxa5o4i@kcGKiqn%&crTMJ)<jBjxnCyx!n6lxy%G(BFxbVOlns<D{k
z86`O;Cld2)zsN^rr<c<M^+V%>u!WktNy3$3=!nVN<eTYLSKdZ%G&Ws(-L)bD)^CQ-
z>bz(?vp0hd<=qUDR4Au2TZX(wJP1-m0{Z{D{I@AO@f$C91K{z=>k2j3T`xp?uuyXu
zK%r*3efHmfR7IJ_RrG4FaB4zsP#R9)cTD-sJ7}{)O(SlY$vD!)Qa4Nlsn^~Bmi#X~
zWi4cimk#|KUFKy4OL8C`nyT+i=(~EQLq`%^Asc~@!Kp9wG^QSuZC_{`kovMO^qdcw
z(I@ZbZjGKNar62r$~6i`+%r{=Uao`_716+vqpYg_z>$cn!JS+|-Pf^t3JuxlC}*RW
zdAv|_&flu!|E|-_$<WSjnCQ&)nT>^-Q{_Wl8@!HLwNofT0;&8AzKKk~nx&rZlj9tH
z)QRf0)kX_7uYQVl>O%~B?l0AOqPc$GMX&HOJ6sopqAK5S&;03AAN+^Y$mZPMRMP9X
zVFI}{Pbk#9MjD=!)2F9J3N?S?E7R>w5<KAGY;)9ouh93jDAAy*gHdlX@Z@O`Z*q_K
z8zuvOr1U;MCXZz0g=5HGg?{)6iU6i~-kt$IzpGDr{u=HF?Whw{aq(&D9qefk125Fv
zLPjH$cs<SfLw0&#$+dWZo7t)?-}%?Xn$L#r2DWr^=fDv1Ix1^(&r=ov&Iq97M(cCG
zATSMl$_-wo;?#OFLMvtEP{=#CKRJGKH!@kjteQ?jshwv!TEDD(Xph>G^5t%IagWU1
z2OSzZF?TiJkjw8o=ZZ7ChmOqM&Brq2yvR^-Ao^3wjp*Ow(hJhki3^qj6zMpdWB52S
z_c>gLxK~z;Z+Yad97)vgSCD+R=9pyd1b<-e6A@!<=5y;Ieo&MXkReG`8_Do88@x<k
zq2@<6;#zB`V<e!(AF;DVwsp(HhV5G7T3x?si6oJSs+tZ>R)O+8xaBsw)I+)7SkRGr
z{?h->-wWWzefYl6@uKwWeoGsag=4*UhQ^A(e{n1ceT`s#3yjYt-N#Y1muUCU3wP0c
zkObkeORIfqKSQ@H^LXmh9sA0^^x52VbOJqD*R?HGODTVaug#5AU8TNX-$#)aVa}kH
zCu1lzG<8VT>o2?<z9aNC0)T^e@zST>eEvPJwV`WxmRmPa1XYx|mxtj2(_+tx=1;Ol
zZwP&E=NpC8ZQk`&ivgH)uhDDl17B-snFb19x8fKG$Bhk56H={-(^iFl-GpJak5BN%
z4gem3=vh13^D&M*zBYdDL&*u5OU5P6?fJsdnF}h%_9V{j{sKgBRo&22lQEgP*)U~%
z$5(30zABtK8oWG$_ZUOd3B9h3?s`18iKd;rnK~cSxs6@dH*4+^@w283+VKut%I<nR
zS<$&UH7@rW5tv6`<`q;}IF=2aqVd#mYFeQYGiUz3!owU@BWE4n*LHN%D4EB73jL#N
zteGn^4{n(F8xmsPXXH`tZ1Zr_iUG~q7vikXLQMmKz041D1oN`@NWU#}0#vY2^M3hI
z^z#DhmmM@;3p7Fjd%bKnClAp;r70k)31(-T66=zY1X0NrFWseVR1kA)S|Nsrn<KjJ
zR5_Q@K|u^A@tI8REj6Go?^fX~bdfZ5C4t$6^fZZNrwya6Qn05?)5?_G8VfaF1O)xY
z(e#yBZ*1=OHh&{LKx1*ILv3~qbVzM!qt?s5`+Ze(Pj>1_yoQc*w{4P;zQ0_ja-tI8
z+^4>fcOzBf)Ad6Tch5MBJIt#z?us{wy9xwW-XmlqGP!&5y6{h9nVxBrx5YA>o4z*u
z`Q#0=o4)qaXFh*JJeGMdGdy`+Q})90X~ztNf7<l5-51~R`Bb{1cG@uykxoTs9}Qp0
z4q;sCYOkZ}&xvvt<iqC|!d_o!6CY*mWo1*lkHFYq-PD;!U{~;zsdMytXK2~J*I8CY
zzf|jS<HSwCUifD{w~mixLhru5ysW9CzdV)>9S1mcByL9mtY8&a7R!43&mEroH!3y-
ziDk5KsR)V@zW>>yZasS1F+Ie6>~A&EH&P7sV=P@Y6)=4NpT~zg?sg7!IQ$mMT9$tI
zDbDX*w|<!rC*680$(N&k@R_K0%z(B)Q7s>$@9q6aZ|bT{=;cex%R2fi`$EqE2;I}6
zL7=8rd!xxI9sN<(?|jnPc=Q3SK>O%G?0s8f=Wb2GQy`_<!Se=FRUK9L;@g;h_m6m~
zlk2n|mfGfy)sgFZZR&#nPVR09EhGdME!j$a1<F%6b-q34*>jFP|HYom?RlR)SJ?A@
zd#<$S@9o)b&s99vuU}T)OZuX+?q~R6TRjb`$}3nQw>DkTbY+upjwvJGoY@U=4P9Mc
z)|^2J_cMi>NjB*AYT$kp68OX?RhRxk%_&z4g+Ii~Xlv>yFSCjHTrQC7kF!|%nc!^A
zUuG!uQH7e{l1icG-zXHG-_<Ky^SzJxo<G8Kg%%c)l5|6ObxYHAP1iPE)6{m|wJaYp
zp-(7I!-bW}Iub~!Oxc>X)BOal(Rasb<*7xs{(PRWLw7?Kv>07y<vfjxDxn*}872?8
z_F73ot+;5-e>{msU81K@<h-ZSM5Z3aok#LMgHXx$7a~n7%pqITW!0+MNzyS^ZmQr9
zNHG6SMO&Z+L&sk1M224ZRC(F0HO{4JMHq~s)O#uQP)}jO0)mk>x>0MsY89#ak&3Fs
zH2vf^KwY3r$)gBRp*T;{Gw)(aMLW(=)Nogy2vD}_KG3^R^L^j(SI+SiB(DlJ9lqla
zafArR8bY6;OsSZykET<gOw}L`eWA;EY9x7?@%<|z0I_suWPtjv80Jy@7&w09g9`rT
zDpjfKm1deT9hyfP-1)<OhuqVl6b^hn=Du#xS2i?Z50hv*RPDar>AsF}As?Z*_&s?U
z2~~#<%(T~$`0*9tRjBzDg;v$q>)w2}rq@=S8wxew!Cf>P^boFwJO9-JRLvueLQSo@
z8{cTc_x&v6I^J<!Ez*az%n?O?LdVpbh#<WL)5^jV+4iMAM*U`7p+w%V!m*K7>L=KI
zS2G0<8x6-RdoYl~*O7paW!K0x6FLDGi#4d}==xSI$yn?cYCfli@k_MYrmCtJFsG<J
zLuP>aPTr%2*f=F9?&5a|oyc5R*|ewqUOJ247-vz|_8V18E-7bPEYx&b0fsH}R?A=m
zdt|l)zqKy8C}KY2<x_wUFs`;xv(KXB%ws=I5zidUNJ>wl8GgioZH5N`|L5jn$&;;K
zhW7t}IjC{a|Jw&6MrRhJ+S7hsWm#$uK%wS<g&9!NT@O>_LQN}25mYvPaa8s6>B##D
zCR1?HX_Jw|I>VgVJVg|{<8x*9O^#)Jb1gI6c^9RWeCz^}Y|gAj=Lc*K0$fD^N^cUQ
z$12bFDqz>T)KToi3pL&3t>YfGU!2)D-m#VoQ)}Ra1#Uu*uDtMw($vMRgmF>2E~1tc
z_={GH{71EP$~@qmhOoTPp9<eM_Rfl9yzJHGEQOI6yra%4Mj~GNs%YQE^_pICA3l!c
z(icymVVFS3Cfrp{`x#MvCu!$j){ri+q^voOQSD`X+n4DtP4N0IZX^ks^6o3t3f2KD
z#jwkM3}gq7wiIfvlo!b+GQ)louQr#{$mk!Hv?$^)amr`)MGJj|78?4HHU5X#xen2w
zRXJ5UTZ7SBTA6E)B2-KKbd)S<e2&kOX1Z3sYC2b^6OqHPI$fl!j^QhqmWpp%I~}z5
zVaxbPn{{#CyZ$ScUgpfyr6A*)k84c+y=CFX<iFAm6mSV1HYV?ARkdwQE+heeOs<n_
zCiHb&gsfktg=}yBG@O4o7LyRis=viQ7>74nLZwq(r(96DeeOYHFx5oVKd4OEn%qSt
z4RyO;7Qdlld}@#zQ)Sc^t=A3UXNjx<+^cv-A(9052k!wDHGQ!LZ|WGs!+)n;hwfzz
zvkx4)x7eJc)}mD(f<aKI`P4{?n$M3G*Ff|6$CW5ZGBTh4ST4-xs?)=Qvo*CwZt3_T
zQYq9t`3YPzRcrOi*7RMhr10EWQac6~%i2unI|>ub-~XX9WNVJFo+82z^{x4PsiD!|
zklD~r{Q3JR-w(?APpm!sUL33PWI_wI8u91vnJ&>!k*HaE9htvBPpDDzw{VAYpFxaZ
z{(ge;<v$>@RWg5{O~gay?<qt+X#W14jmoO+Rw(L|Rbo#vmV)^^MgZn-_4f);naZkJ
zM*1m$O7{*`caRa|QVr)YK>hVeD?0u4laDKz(*D{d7r(#Wr&!sVq!o-3olDUQH6K)n
zOw|c`WozC6j#+=b2hRW-gZ_G^g)l*Xg;cR@&6UA|?M0<N(qCUBy<&gO@crnoS6Q|A
z{dKwJ9Qu<MI(~mGb4C0mi5}EnzbDkF{;F~9JC7Jae|>-i59zNiA|BFTcap(D{Z(ZZ
zsX9X?5W+r5Wk>bbLKYx@Re!+)0uA474O+D`M(xs}lWEsN%~7PeXY0GV>1vo${!gTB
zLa^v&$=w*tTaydT*AGw2aG!_UM7z0`oO%x(m9=zhb_N6MY|?~r-Dop;=zCYG|C*tQ
znCUt9fg}rx*FR=!%D+iurlFLx=9Os*QFR8%6iN1Of{@1RqNr@H=6IQFE2aA4Hd|Nx
zP{^4Hoo7j3s-z=I`e^XYl1|>i!D$r4h@?`nhFd43Zt*ggPHB=91o0j%)QquU^QKSh
zdUc+c$lM;~LTg>(v;#~@@xI=zjIYFL2+jEOV?0xKX+!MDttndZ^14eiNN0|H_>zWk
zV^Z&k7q}YRUD<r@#<`Dsg|S|yH~&$Ux#6SX&U=i~SIxUn4c$(ou+3EddkZxmA%Bla
zKeM$^vsQ5WLZ+%?DgTwUMHUGdLQdYC$X4CNMCR6n+0dPWHG^%t%$Ap(4_SI{o-Ne;
zN3)g+q`q7!YANwc{F%<-XF|2y<Tr<?UV%&6*~_TSMNte$pfPZ6?UZSmwK`HDhMLG<
zq=p{X(veLVO!p0i1;f^*Hha&=v@~WO2;aZ^=<xk796b$ls<DUCEsa<z2zS0}vvt4C
zS)uP~Rk~oHQo9Ybj>uAPNpYU0%r-Z;&Dmx?-557BySEAP+EpSu4R9N+BAN#18k`S|
z;zfFy8Q{fku;c^^5YObi>t6#2a4X_Uip-c>lSN$lY{TsnQcSC(P-RO(q2^uIRhV}{
zBBPtnwA<~h`qp`;9T-6taO4uhkyCvf83=G>d=W<;2ykR*<iKk5wCbwl?{vv0Gy?`u
z!j6T!OuSflmc}crSgB*(j+D`>ooH{oaKvxag&H%B8pfWuvn}!KzX(g6wQ4*wFH)$P
zOQNhzuFy*uc)ERuZqRO_Z>*4X@{3?3W3#GB`KND%TXQ=p+#Ka!SE$)<({X4euXy_8
zc>0cLQ|8JE@yumWaKyM}<dx8Q44G>_X^gWFh-WUE;$81W<JpVr;^~h~Xik5rvbk&B
zJTI16BQ;nh<L$EOC7JO(x*Ua3DjQ!tbGhVM9FO2`wvpx-&mMS<mu}e`KQ})&=V9O3
z1AT(l2oS{a969x%4I{bFf0$~rDPbgOg5$tvaU*pO0`vAVP3*rLwQ@;Tuw2o~U^?w>
zE0@q^K%;Tch4<<>6xes^sFYly;$>SH(tDTJaodEl<an9i<=R}5mC<TNY`H|Gsyd4J
zki?&gMvLZ8Br{i|k}OpX{u79ne)IgOYRB11G_**GYSk6`j-XOfZv}Mai-Q^OwUtxo
z=3<0T9WDal*&8-R5}D00x0%Jn*#l6ID5wLvdh~rGIGRwM+|g4>>(ahpr#pxBGW(!$
zolwX)y8!r?(@E2(HxDGTq3_~jo6LON#wyK7m_p41o+<|Fu2U9F)fkYujy4h2cjy?`
zz+i0feQ5q8=ZRin4CQd+Ci6t0<{B+tvQ;0pHOz3KrpmrU7g0B_?+Bye>5DWtYVn|R
z($_LPm~PTghV(q@{YP5}*s7UZxjK>_y^t!_pYcc?YE&60bY`zlqu1lW-pkhAib+3L
zQ~zQetiQ5V{{h+-YAz-)4e)--(|WEPKI|M$z@eA9i1R!gigSF>CL^=G-yb9UnG&Ia
zMCK(UqVzAe*Q;(NJxFPx<~Wk1{WU;N!@T1Z4tV03$8z~0?VXHA4XMBr<<eztZi8uj
zJH0o0E5WYIQ`<nfij@OsbiCz`FGtB_U+QGW#YF1QxcEVnKQ7cBHZE-4j-hZ8w&}G{
z^B5iF%Eq>%*oL-a(IN4s!c|eVHuJFG+}B*J2v)MbgL=x9sz^VWpB1*__WCjyUYd!Y
zCNua~JPipm7uKoT?13+xQW#(+nZQ0ZInL{tSgy0gm#t`TMS3j55&U43)lH^Jv0C1p
zL~m4&QniM3DLcCCr+$x8Pv0J;T}G;1+O7d?v?H@hMLR-bB!$6J?&~4=fP8{+UZaw@
z$>iDbWUCs$1h}xxT5f1$_{h*~JLlEY<giy|e!tIU8O>t(M~fjAOX|agnpa}hm+1?w
zU;Iw#Ck<DTMkDKPDiN+Q&uM(a!N7V{yC_chz9a9fXpn9&@h^;RaXAtex7PJt=vWuG
zGiX7>DJs!F3PERrOW!zOxU|8?P>tgY8w)kV6vmZ3+$lq*#-z9ug4T^t5&Il@8}=BJ
z0t`4=m1T#ca>I0iBS-wT`3s0!Y;V!KHy>Y*TZ{hrJdLg0HoX45ScoJoN+w@02ODpP
zZ_&7AYb`pZQ1i13U5k!lSbdQ+0@Prlgo5k9wPXl7u2A@7+7o`zi2*+dCX3iWUiy~0
zfE9F_NTFi|C4UIZM~w%UCw(#L^$jfw>K(k@_$kyZrC2_qoL=O<sb-`2Z}Y6KRuvY+
zTQ9Iu)A3ikPJi&m1*6O7fyw3_WH%!3QK;FiEIV%Zg7G@Zig0NZzMF$+t!4*}9;%*m
zWI<apekYsbUpIbB=9kRlh^)9l_ma|svo}z94CH?w6vA?tU~6S23!_58j9~-wAh%J3
z(yy_#Z+$Z(ne)NvwsdD!{U`{<l>cKIkePHDUkQ%p#a@P<#50KbfLM3q%b>UzevZ${
z*n^ATNt0~0^$O5RW?f{JB06f^-b4WHSE#@{N|y2`1U*%+p3=!VK(S9s0hbx)N5m*~
z58x7dJIP9ODNC_+teN^QjA*<E<aCKj;?r1pPAK*Krg^??Un$RGezveWTuvW(tDN4A
zCnI1q4hD76+vxdW@PP%(q4;2E_<sN&=w;S2KRI8WCH0A^;|n$asA|yNl|gY=lv{K9
zG7dqRJErE@%a^4t%v61bEobo*rZ2P+cZ<UL%j9)-0+XIUasU&+I)Cxu!(jmHmz!eY
z-#;`h^F&j}(Dlh?p5@7ld4`isO&$A>L~9}>b0+8BA5{-my{uYrPRII&MTgeK)zn1z
zyH7rhnwi16=BD1Jju);^p2`zBN}l0lq=_5u$>T>t&{a3(B}-|*u{1$YbN6n%y!6QN
zYdiL;kusqh&R{@IMtrZcHiwHY=5u6%^psQ_%mHfp^#^Uk3MiKS0^Z@@?@pk{XPLqi
zZ-r9w^{`3B-M!|%sCKcrql?qNiOL&&DDAu&+Yely`~c6dBsurUPgPN>o>9AGYku~T
zVuO@5!W`0hk9J*8b>y#4zN;fINz!~cc{KX|Q`OlSM-7epZ3RgVg??MfOMHfQ-i_h=
zpzylxl3n7I%iz(e065N3sIC6TP5S892aA2Fs^*AH6vubfb(t2qc)BEV8`amO*C>}M
za=A$jqm`Om>JmEA&Mh8OR@UHZ`qks$N2z|<noFj@ZaWPxU4v1EDr4@EqlkZf4ux|t
zDe3$Q!www+rD&Rs=Mg+pwa2B9YbZ9&a|FxlkjT4!zN=Ty(_`Sf5RkJ5#Xh@Gv%tmv
zjf?#+BeDOP*vjnN^3j3KHwc+uPIA^W$E#;jXEgnKmRj#loQF=~ibJQ<@FFKle`ENQ
z-A%u~^=0nv*>_X;zD<YiqA$}$e{fy&2iHYTiPitp-jx7GRh<9X9FPDZQ3;@;J``LO
ziAz8^)r15{Fu)1{QIXwEvPo8w-R<rM61<j*6%4Vd(iZE{AUATPUd1~IRlK93;vJ9L
z_*ZLet>RJh|9vyxY<3@q(pD-ptB>FO=9}}IZ;m%_-U~%&eGxu+#XIo{GORO_WN~Ok
zP<3{D`txIC-M^+v)TMs}?QuULd!f=lC##3j??g!XVCi8vKKK}gqi;NteQI-{zbQ#}
zD$N2m-b6AqST!{v874KepE@XBjjF&>3jfGS$4&v11((pq107R=kFo<cfd)>@rtLwR
z0+DqrF<R@$MVjs>YlYGur3>8+4ulOO#MMnTBihqn87*u4&KWe<rNmQhcjt15>@U<<
zv6><wm8XGf5I~JvDU)GOwK|`N5ii`a2Q=zge(c0Wb;U{>o1!#`8|UNXQ3Jx`o(RX?
z@@oGt{AF>-75;KT=;-$JJStFaN;dLBOT>+U9p8ws1-0p$$6?7#U!`3Gj&-9d`Depr
zTh*oX!S{Gui_)p6*qvR7!0qYBA{E38fqZxt9G9vjs><5aTSmzYp4QR#Km)BO>FCpB
z^w*IEVvXlmL)+6gP^{XN*Hr0-BTI~?bPu`>KBS}dh$40L0o~|Ak<|4I(7zYUN-R|$
z8%3vF3XTCy4ij~12NI!4tMCm2z|xQem^3cXL7>#!erj%mNjZ_eOq^tcGB|?GB1sZU
z>?B!+WGMkTyikipY^;sRNNvO<kv7)C5^NZC3~I)bjcSO!OV#56)hpX^7DK(<Mk9sF
z4O3-7of@F?YcIkc*IXmNHaIB1_T7$61@n9%Z42j~QDL4hno)|#eDRu|FXT2lzRN^)
z#C#D3ratr~%@6A+ylLK``JzGk^YIfr0j>wfXKKTMtZ;h?%@gfGnkTl6pfpqN8wsdO
zze!mMO`N-70Uyvq_S$`qK;}t;UO6nM0z!S14SmXMmgE~FBZD791wPH(p<g*qG$Hif
z^Mv$`%o8*lWYNApKHpJD-}f%TV_2FO@@rpg{WG$`=a<4rN6_5xF>Tz{rQe!~K2;@)
zpav((y_H`1EGTZn+<@;7!H`>}0_5lpb=)EM{8C;c&${%dhDFB5$DyDP@ch9vH*Aox
z<>-pJA*5p8qGDgy5xW+#DYKvqj;HmPAmj(%qP(&-PNvqV!k1q#9mr7-Cl{$7(R2`*
z3qHh7(ncxX-#`pq$ZjLG3<IS`=i!%Le}OV%hhG}0I)>i{gq9kPbO~i#Qq#B@XuX1x
zqoW3!<ZdCVvkDI66NW&IvO$tyUa`qhmh`aqD0>d7PP!-qMn+k(dm3fGdP<J6)G_uk
z%I;%)r8-89uO&3T25Ee49Zsc9d0Um}k)a*q>)ANt95uc^OabvM7$Gnh)}>hCXpKvw
zLSlS90)cV91gT(vOaPJNYrIN=#@Dcl{vfI>qc_3Px;>Snzlysl?4zsbPlF)iDvYm_
zROv1ygQ-j3CG{o8R~Kt1A5ql!`Zj6^CGP|j)M0j|)^7r#bNH8G^a|x+SPjDifp!Dn
z<%g4&-xyOH;6sB|8ksIcav0TvrIY%i3;~H8M)y!~5IKzMrJWo`sYytZMX9wg!7xh0
zG`!^)Dq9f*V<?piE*e0?2n^f1jGoG6tD&=v;_F4$5*|8LQ|@QzlnZX!&&2(}w(P4~
zi$gQ<ue=#(O$|-TzG@HO3>fJDnMl1h<^5rRy7XtU8s|k$0+dRo_Yp~OGhitDy9$pq
z|CV~1I(Ywg?TB6KONLt!x@S0^ZW)fIJBFUPVW{dNZ*R6^Yiuj6$hOP3EcSIfBhTgI
z{@`h>#%Vn!Z(dN|v#7jyM`{S0&>1LkYdD2X==TVXbI7vDiPuw9Dmm^*qIYIhVdZ{r
zZQ|vo^xqgN(|_VLPX8assMXNvPnGGvB6XHH?)#*lXiEQbmHvIY{0~z4O+C2$FJp<^
zo?fHUe*)w?%YO?6iqg+<D*d19^slD$ojQG=O#f(=e$jWQkMlrU@jGl}2Pywh`k$iG
zr^)X%_>iG&r<`LAMcxtbt@?<4(i$&|)_6J-hstD@s>IP9natx%=0q|<AEjH(L9nI$
zwr}{$5=uW9Nx%JcS&2QAO<npcs*$M@y*d4FWdX?AzfbyX<CDn7$CHg`(M_?g%=Id>
zT9w(-j?C7h5~Q(TeecYmv5x(e$Eavf^%K}{qZHImL?+JVsthNo499e2IFd6oAkh`W
z)}P+6P#wbbFU=p=U&Z^5{NUiMe5|{X?WcI?TQw1b|Jlf(J-vsl1kLK93D9YOc*;XX
z-((;#ZR%oPKtw(aGC>wGv7Hvlh=C_|>Il{x3XPEmmV6|N1vA1RskiK~^Of>6V#q^T
z*e`xgCmng(@QIAhPa8VoMOP_n242*Xo9^<pDU9HWl+G|L9PzCF;*q(vUjtfjW#x^^
zPD;IcHr2Lnd{#l-WcXru#^;|ghNu4?D^NVoLR>lxfa;>)h(kv+<-{@OxP_=>>LPmP
zOOHe&$21dC7kwfd?%clEpC3Zfa}9ZNp;Zx_5Zr_FRUpU;4{+P^>SkbanT%VB_u<)|
zZb<FX`YUzO^+*cG3!p%GE^)Qgz#dZSrS0i!!LZ3r@08IYWgG;oBsor0LdmqLj+M#%
zlxfEv`$nQT*LGI!l3)c6kzgW2=CzdhjU*z2O_Pb>*u$xk*1%ud&@B1<V9rGsZ;y?*
zd+z{wwN-eqb$QE&UGr-{FIZAFGH=Wo=ONiD2lka^K8x$6k}P>+5=T;gwdW-j;JK(g
zs6C%N>e6keVukV&1w(b^<1oDG3yi}%yP^1edMb-!QtzB5#QkUlY%ItWrzF)bq-7eG
zUsrtb3v%I?6$<3D4L*+@c>R>(=Mdp(bw}f6(K(frx2S}5>ETnz$T4h4PCCTPc-TKE
zM~)v(ic;$N@X*P3M_gH)4+o{**K&Li&(U6j9Z-}$NP3Y<`X6+ELrF_sdpQgXhd=s|
zEsZ&A5W=J`>Lv|qVn^9vSf9)CPQ?k<k^y$wLCC|oXjz}BMqVwRw_dFXObkyN7lSAh
zYw0kttKiw5{vgVuPfA~nM90_So0%`sL6!7z5Fstnxwkz%7YrsgnO+QsiJ>ap;6q11
zp+h=n3w_L@OgoP8<`c!ct+w1HWhN6llSC9{lZ*n59w%vU&<~G{wW^=Pub`Idi6<_3
z`7(XE`|`o57*}bJyu%(lMu4cHY)r~qGCMqm{j$o9`Bt(hyK9mjQFhl(9(Czkt+2c9
zr2Yfh-6bf78f801B5Z<3tS@!Rtx)V2*Juk&U38snJsb?i2!{3UDWnTIqMm88!I3bO
zi&R;@IhhSbatyT8t<gu{ld+Ky4Wk_+Xh(qUG624P3~kRxQRQH3F0ZuE-g4xajxe!l
z)uy~m@#@m^bQvV4+~jbu|MQJejX;k*KgBpYC{o!GIN%U<syz#1Lv2c>%BiQ$N%D@$
ze2(Z;IMaM_)gY~o7iIctl3AivW3Fw7m2|jheb}BpM@ES<jn&eFO@j{VNqyI)-!hP!
z<0%A?TEJ_f2eOh}6im;>`&uI5;Y{ixJWUn!o^=+!b}(4x{7UK~<b+1cj_LnLNI#rj
zLDwHiTE=!TPG8fmqJjQyr5HTfT&f600t^dRfY6<i9cQ_4k2H&pi#TBy@WyjlsXH~@
zTmuuuks>+WVDdOqisW?TZ%-cy7CYU1OPRz>HwDrs@<{hAqL^+nm(0abKsQmfls%q_
zA^SljBxC-)j0D>@BuxVSFnzD!Ka;`fusKW+>jtQx04@U~K~`kbk=HZP=h<x8s5QM=
zi}NycPX`4UqyV)0ypu@*jWS`g0%TFnkRq*s5n!<j_$LyGRlvE@C!&C{M57ddx>NR0
z0J67|kQ8vMj1*BoS5T#ZnBaaHoX+Xch)7znqwcXMbd)`RI{*pSrCU#ewbrF10dPKP
zp!r!1^@7@DoRImXLljwmwx{2VM6l?*D&`57NxxC&U9g!;AIbO}`>RNfN483o;?onU
z&+No|=u|3ND@GP{$5Tj56%+EXPeUWsEiy>-ndh&`2WOtMFz?Fxr}U>tb?KKN*~zuf
z6p*!IBBgMVA!6T;=%MH{WNJ+zyt~ybHNRArwJqvd?6nl6_R$aT>KG)VT=X3PyirH{
z*amh2vAn$YC{#~9+De0u{?Ij6oJ+<Im`n$LU&*a~0n1Jt*BhQcxViSpNwo>MvBryE
zeX!u{k(S;52u?ua(O3_;u*A2T+aIHYv3bRh<&W8Z*>kkNN#8`vmg7k63-~KNR~&=2
zT`K)OdHh8SOZy{vbvWQy{7C+o9hdzzzxb<rF-#yqyvH;C>gOpe{&)kQ|HBiIf*on}
zXOzY~b{YQM?ne2Sp$dSrsPNbqbrO!jU$J-vsbHm5brStqpzC(Xp=Xi&vs(GJ?fJz|
z{XOsY?7A$=2X{h}%b)qqmP?=UvXH6QT_EFpI3A}AYF`+SBd;f5dGm5=9Zn))I2!&j
zN{Mw-!kGniV_ziqq}qb9IE}oG4n|!zAZ=3J*k><KnUuOE;Zf32d(($KQT){GcAVA3
z7h4{~%eC98-o)#z?Rmv}knrWB==)7eclA|8-9_pgkG~_=9kFtEA>o}LCR6=);V?Do
z_a$<~AJj&~%O0m-lPA?K8cT5sYR3*ZcS*w8cyAWZjPl3qyxfl(=haQvgJY|CbqV-e
z!_9evpOgB}tL@PhY2p^DpeuGGaj+71iB%tbcs3=HUuSPeqEFH0(z#1=A44HuykdY%
ztH(=})?iqDLMr0p{wX(g51Mh0ice=tl5n0SzxdVs+UHd5QE96BnAcO6-a{vWi?`zg
z0heKcUHI4@?1bWRQy5K0HR*yB3m?V7&K;Mh;WP1%<qm!q?S;c0XfkO;bcmK7v&yzc
z(^1>g<5#=HrRpy;V(;+^>;~Zp4_P|a{&-U}GS2EQf$>HjMD3f0(LE~<CQn5lnP2=k
z^2&n_^yHVc&+VU%^NbjhQ+#-`f#>f1sW5WPrS~Bw!BY+&zcS!27oPIo2BXDW<b$!n
ziV6BCidC0hasq|Mcrf@x-YrzC28?3zEtQW>Krnji0c)<5RD?ks=kXlp3rd4l9k`#u
zU=c?0yd6sm519m?^q9dRaohVhtQPsUcPf>qHn9ahSQN-xnT@Jgn~=o^CG^xc^3Lgf
z2o=hVdgt^AS@e_zh@szsIC&o>h<<%#9@5zBb%+||_2E5{*H3Y}G5y=)sD>#gP+>?p
z&p-?2n&~%L79i}0c$%Zuma@{iv4IO+S?zcWF4C9_^Rq-k)S+IiSa47|%g_OcYd0lB
zr}FZOk$ggPw09&2$c^ui$1I~Ld~`zKY0<nkyKgk5{F7?s8<pEyPl%DIB3%8ebppD<
zC12N$luJA4Fina)MAH=$=bYoCsxE_&9Ji==D_i{{L5J-XRE6&dDmkXGQRgM4F4M1c
zoA+mMmbCU&dEOT1?PxJtm-4t=lVX+EAbDaw${u^z_Ky%C;k^#q`eaPGsD{Ny?jpG8
z67t(yqUlp~ZW^&7et6w?U<!^$jlmr`wtFEiiWbv%C=wyh8`G@iqhsZA{e31Cq%Nf^
z0XF-P%B}%XnM87f{j?9XL?@uvD>#w)d<cW!w7%_WAL6(u#Yc*p2<Rd0BI)S(CW%f`
z#m93S75;J^zHmR`WqYWtvbw}A7V1y9!+C}FlB#fmCBHA;zoV#p$Se93EuAlo8jM<p
zcG<^*j8^t|Uamh2gGCkAtvDyDiuWch=x9NIH#x5UT<xMoRy(q@s>9go9`hNs09-4h
zK?_$%p7@2dka(1ybaW$m(kl!4h}e=KZZJ^R$8xaXrv>sv8DAcG#98FIX?!C_`I^Uv
z=vzOOd$P<h<z$%v%KcAT3!>ce!(kDFi!So9D#mAW(TE~HNshWiEOlVi@v_KN*1D8;
zARwcr!sb9_I6jO*<2`*DlhRh7!{ULP8(iDVD{>Dhn+_Udq`yaIl5!0JYD6Ehnhv)%
zk|@;ivteuUWO1LX&N>G{MHN2oMXx@bhc<>4AYBv)&&zOgh&y(B-*&sS%L0|tae(&p
zPx@2cQVjCrY+r|0RCfG)1Y@QtodlK6iwKT=K$XsJ0@ZzfZAvDRB;6li43y~|&E-N7
z>N=uc2q*djdDNv;b@-2iGm=civDf(YDeknc+GfkGOE@+^9*h2SRYC&@Xy5Z|Oy~4=
z)>K=5?Y3Mr(HDQmQh6}--}|0KBj=}%-<`cxK%@EbujG$ugANN)$35D*{Q)dR>Jn$u
z3gxk`Y`SmK+F;2<tiI;M0vxb}`7qmwY_|1s<sgSQ>|RBzEmC%Jn97esg(y6ZoaWZ`
zI5R)~aeN1n5~)imk!ism$H0L_z>*X2br%~Q7()#{m#M=T2S0&o<fV?^mKTrR?8u@a
zDo0a(@(lre&3v1!WBlqv<5w@0?Z<LWfNXcbfed1LKnn@U_Um%64}eppOM(Xa0B$UQ
zBctuF4@4Cwdi@1^Mmew#Op)tS0x}J>SUwz-H7osm<OPk_rMM^ny`;S4d}KWl-OqvU
zcdU%YN;i)VtZzAf(pxL?uAF*eA~yZon^n8t_a>RanBBM`M9W9%@q>r&>o^$Bm53UT
zNr|;zQEJ$hwD-+c6Ty?TxT{N9ML-k5rIM4v@NUH3EySibrWsj3B1`f?esr$1%tcM9
zr_&SOCFn_yS;!g-mf#j_$$8kC%{Gu<E@jg`Fa;2;Zmd5n`Ae|r{YX`E!4ivH9Mfac
zNEpcT+IQwH(F+L4o1!@Sul$Z4-k<1&u28!Z2b9lGr#hxL09w0mY;WfPO)5Zp`W86Q
zuJ|{Sk9J_WdIbFGqaI5Ve=nDbiD?vXN#c!)eT4(5U^jr_8y#a6j8`y4!TA8L0>+k@
z^%uHq@}2T?AaMu;)?Kl0`bDDXqDVyim<5|2Z(qUtN{B+s>Wk=c3yp^udUwE24!!6F
zwbKnc(Y)WG_c@ARmvS2b=Ta0|O~6C1OS?N-gX|Sw(V=5C`}&9~-s`~u64v%K(o^m3
z)&(2(`90M!B&OY(*n22iFXBD68obw*fVSSA_gE9%B}9q<8+!cMC>w%)V%NLs`JG-g
z#O%l5h}{QrYQ?KkcpIrb{b8g84J;?1RfI2!>$kJ!ptfH0=__E#^DvlB;K4LUMw0zn
zt`)Q723+(z5Hanj3Cb(i>oo67rh9|hCvZdCnt~pq?+m3pYx?+F+(Y6}^SgTrY9G)3
zTqG3KJ{LMF8=J+Gi|q+t>@J9Z4tL`PCq0*6yd7^iqP=s{@aaD?hR<^?@u{KisrLY{
zYH2Ho-&rsW**t#H2M?~_Ys8?Eb$nmew7lhFN681Cir$bl1LcQx{ol2dv&dX7{-EFv
zBpl^!w2p>Py%cWm5U9X6;oI-Z4<i&7be38u7(0M;S_^L*j6@;v?89F>qa07-oUN~K
zQ7PH*tubUFV+?zI;mAZGno^f-tK9~F=&$p47;3<IJ`UUA7h=4Q@ixYX7++^>V@#N(
z;u(yi8SRXvjA6!`8Sh}!s@kRb1uDH(=5K0$4p_nR62^IqI-N`tzn=N6j2jsrV%*L6
z6=UxURsI=_VfNSQFJ?ZHags@%J5$A<$#@B4nDJ)DCdTI(-(mcmu?y?B7vmttvl)kT
zxc6tM{GMWLWc&@|FBq#COBg3Hj%GZDu?OSd3RSwtb3RWnzlU)h<8sDDj3LHS#+i&Z
z#(s<mjBV3ZKJPF-#dsg%8pe9Yd5p6e^B9LS_G9eH_|-I(ek<ckj5`<`8E<A>%s7uR
ziR=F=m$#Jt?TopM8H``C{`xWR#`yVpsvPe#zRLIr;|9j%jEfipjI$Z1Fpgzx<nj$-
zuI+SeuedNR|1{P+6mK-44&SA>l575-Mp``JccuInsM2r4t3K){*Q%OCP|G#$;dnZL
z{cPnjBbJr?Q(DK@^vpj_f^<YbjY*2~zm0gs0w0ro`M251qi>eNJt(&=2iJJT^}pmt
zzYhP{^g8mP;;68Zf5Yt32-*_SPg9j@pHdEFn-|+&;e=mb#G<81MOB`PkjEQbwzS8z
zDsRYB=^h^l_yWsrShn;`Z&hWH%ecf73b{*+(kgFp$m8=Gt|DK6JW8tEM#yJWc?0gC
zuVNm2f;E*zzKUg6FPn1BG8})=zu!Le`dc4e7ye`N+4g4!emwJwK+hgGYZ+nMd{w?B
z?AgFvhpFfC&tZ?Yz3VfbMbY)?Hq%v6)lnN;vCHfAg$!3P=qd9W!3s~Y+b9kADh-d<
zUllTZrACRTwA3ANdqYOhv%npx$EB5jSbdW1p<7@2=T&F)Ja6Op8Sf7Noh}<8^m)a1
z=YNqa5OCEPo*<OwGm1QAV+<#}GAJwKRD+x)p&GwC!xM5>lBbixR=5IX?tl?0cX{!T
z+o*I^dn&6c4Zjc7D5`MFl4eATDvL4e?I};)-SrPGhG$_Y?xgpxesby9^NT#epH;a6
zZVFK8L0oT{G2T1R6YzN}QQ^^s<H9MnkyEI_;XTsfsC0R}4#&_;9nq0FeCW7gnZ+YR
zlL~VjnWg8SH+lM$g6U<MWktjN=ax-eFeQ7uV_3=fGqS0g`6UIOq2q^UkEpB)xlqgz
zt_mpKT_XDc8p0Za-4yw%soGGAYzBMH)S^q=#UZo@%*yKudFHu|dDMm(hEa&R`;@rS
zRZ|2~YPg~rj5Nn>Ae?URyo_R>Hy8>^9Zs21P=G`YXTa?$aY%tvhb2V&r88fe2%GoU
zEL+;uTOx~wevt9j1OI&C_fMYuS@WaeU*}%4wdw9LTUSQMukz0I`sRBLH+39ie!1Ig
z6uT-aNX1+W-FH;1v9e#GgaLO(vA-%K$37#+hi~3i!w6LsT_M9(ZKL1N?A#GK!$zHP
z#u+vqzi_X3konbL8r#2&%VNia`UQ%=!IkoJcwNUdmg`RDXRhVYUmm-o``=Mhe4+eG
ze}$V23{9_xm3|&?v9HpP_LcTq?((A_LUBsp=D`CAzdq)^t4=EzG0S>Z${pukHS<?n
z&KX*v^Cm?8&ohtC-_-xNRC&n~X~-%H_*^B$t{?`3Kv@+P&lpPnzN!$71hOFACU_9|
z#GKKyX9V4W;OwFbmnVq!a#d8iD`w~Vie<du>`cdb)7<kMRsIrJ$Q_(*Q?wM3gR_4Y
zaL<D=J2EpJwlneX?8%;Bae=2OI6L4gstSf26+U+`2#c)rBYa7Qzvjf#4A_m^7<!t~
z(WiTj|J}9=atfcgd+S@1ezUQ@d-n?-y2VugAN}}4b*u`xRgI$y5BI^0M<ZhrqCqM1
zKQJnN$DqaH>ivvqo->9{_0!boswl)T<j5Q;EeKdz;d6z~9PY?0c4XRymMF}2WRAlB
z9ZLA2$HRjiFAjQqI&;|Yk(vMc_~Z=7jz3uoW5=h)D;3x4gDeiOHQ0DXZ2Wo-ujT)V
z@geg^JU*N{;&&rnUGUz9A%849p(3v8%6pTpF4J`<ME!sC<AbTZKk4z@>&8M2vxC>|
zDJ#cfI~c7TxDR5y%*@;?F^rjKp7DLgOpoXL4U0wl85Sd@{-MXW|GD*Ej_r(b|HgRQ
zbXDwlX<rmOo*oY4K|n5zlnEV#y$>C#BErZ=j{H7G9%JG;d)=;lhu9+v*lFK%+jtQ7
zGo!MHXZ{=aGq!7D?YoJ&9&nm|q2#>5BGOE9*gsy}mcQuxh3=ExmA*ht<UXYr+Es2C
zR8;tivAD;|JQ(U&mFwJu7`*(mqe>UQ@j}xnM~>+7i%%OYLkIQfkRBfMFW->Nrzt07
zns5%H|7F@@6QTwBh+zla5%=CVD2Y;NXMa1${+2x<(>CH?U!SL48{6L+nCm7v9Ojcy
zz*AgR<aX@Kzz@d0hnf<S20rRPVBlJrz)t%<Nc$c>+%_TmU$^i2I%VH_gT96NXN-xu
zZLU+{3`QN^&%EB`-@;t{GB$lXbHB;IiTUCM@$z|0n6aAC&sfgrWJLeNXM5!D;$@K?
z%tX1HO#Aq8r6ZRuS$xg1Y0Czk8^6>tcG;9EQzp?f5BblzBD4&%x2_}bg%|#L!pvib
z{v|l$;dgGha@U8iy=z<<PPjO-rT?dAN7n!HPx+tSxarS-xa^(&?L9so5%a9&JImua
zxbl4RoA*~udG+aUw*^0c?as#@>|JzEAG}@K@%zsF{sZ;z$W6}YKe)aRb$)uAU&QRM
zsZ#893n_SY9y`|y)6f{H!ag@UBT}49`IYJ)li9DHxeiiaujG3F$1uqchxtd>g%IT(
z$2@xdY1*$o!Rszl&p=A>P|h2~{$kM3>(Tk{l8%+=uw$<-e(>H`cOE-(+=;=&)AD@V
z{Bxu8YraA0Pw(H`mMN~oG+nPaz4A}LMh>qSp3kvd>%W1ymLG=te~-(40R87Ka0h%t
z<kLC1ol6^l0e4B2dI;+Cy8~z)JPy@Q<mQ)qip!017d4XMDZxVpPpAfsw6v$%4MsSJ
z2aiLREp_kbp+dIbpX&;_W*6Xgqhhu_;42HbDuc7_HKB5!mtokb*%h9m*+D!c9O8Es
z&vljIIfHzjG~4T+Epw`Lh05_LVcFz4an~%P2eDUsk2%RQ;MzB<fAiSPwQo#0J@A5K
zs%|?%wBLXA`Hi3LQTP8QlYJiQ{vv)IJKoeY*E&1w?EN>f^-23}Z2xcL`l;OvA)FjO
zs=jmBeSrQl#_Oktu5_o2r=IRXr;O$Y>(qsUhHoAoV@F0ZJZ~$(GhRH7hqV@$cZ_9}
z&*%1M`#n6O+sn7z^!{?!SJyPo+1}b;mxb_f)4%g(rSI3d5?Qyz>fd>jNzUPQot?ik
z$vM21x7?`YdVJ5~@LFzTxgNipm}_}>xr(plX(o9Z%XR;BGLOVR#PiwV*8YcKdH<{P
z0rd48+tvZbn^Qu*kgGzTf1$%5UiW-HOj3y_>jqAU;Jgw}&6LTN($YR&U!W3)P!_ly
zc(y`k4jl4m$+D#-KkRIb6CH9gexZw+j8U;?eQ6m}|NfCZx90uz7r&YMa?0S#FV}sE
z@PD%YJ3Hz>Q~dFj>Z`8%aP#_4M-6M({>`LY%jK){S_j{+{wM8geT)ZZrZw`?<Gg+}
zx!>&y$>kTjgN`5;S?<6gYyYC8Nc-n*?{-@7<6GbPwEe8A>|cJdI&I#9LS1*lhFfFD
z`|$4-*I^oX{?MHy%;B{h`g@e!2k1Y?S61QRACBjs>+p?+>=)|DRghM$|MgDua);6A
z$^kE`yW+~T-@W(KHJ+7oepdUpC6ltdO^g!%Z}y*q7(eVAmA$;oO0jK<wIBQKCOL=K
zI`nT)@`rSM4zK0T^-8X9qU;=A%Ny4zxn56Xad<5kCV4$`Ew5jz;_LNB3v(^cS`(Xo
zGjlC(UL70XFv*)%Dfyo{LL-OQ@mp3Z`NQndz~LkE+mu|d=bAXYmOELl*JDl0wY+VG
zim#tfWtrq*mg`M|X68CR`ZqqoD}OQlJKtO4uI_kzRp!PWsJoI*wb29_pt&%1q0Nb$
zKRx^RmyNyU*6h{&7My?DGrb3_{GE9}?@&K4>s)`#{txX2==I0J&A)f;!@hcE2XPXT
z_B^zWow9VG152!u3b(@@2;j&t4(8JNTHNZDxxK}1haX#iIRCk9Y0(e1$ii_+S4Ejz
zVpYbgDR$TKiC@kbV0?7HW7-q83$AJXRqIl{Un{GH{`2ZS>Sx-2J2@YxRs%@fK>EIb
zCsbZ(l)C7gChY|2TGG+$AD0$<tOc$i;Q2dqMmiTibY<3M>&^_F68FIG&L448hxE|z
zM?(LH)p@iuo`3Wvh~G3Hp+0)3j~R~-;rj=AsyIOY|GE9-!>#=e!}7kO$EP0heS{1(
z$}L;ky};wmt|-$3@Y1pQ(P!z^KDFF$I}I=61ZnoiJ0!Ht7TI~jiwQNf#P^iQp)1+v
zb?5TF^dT9fcO*vcgY+1b?|bOdhO<9<<LmQ|y!Fx*yB>J*lP~|GWrS$IJDD&24t?~_
zruRvLc>h8k-N7z^yV9?=;SYID-_|v~%+cq)7ndZ~e)*TTe4g$9th47|i}$fReyE>*
z7y7&EH$c|UkXszoteA#Ecu@yCt)<vv4H#2gQ-ZQxdpU~B-Nkbqr5-O9ftZ@{aIX3g
z&BLGYa|d0+5#JUp%@}{_u&obPJbU>&Zyi76iV;s}8R1X8{>_`!{eZq-a&A#v>&VG+
zeLvL5T+18z{!YiwGRY6a@vn0}Grhm3+nTVsG4}qm@ei@~VrRKtlr%EeX|%9?X?wDn
z<gk}Rt^dQ|e(!&Gzp;(&M~@%jJ7VqWa2r3e{;0}-I~#l(b8Y|icPaTs_Q<+Bw!Q|-
z_5O^XxsKm>rwXq-i7?5tO!9i>IzIGysP}KaPkzDgyI-_=$CQP;dtQ<D>6_m!eQ8+N
zlO8X*^)_8MLe%e2?-#A${G;!G_O)Np6K}e!Jq$+>Z?P|1TK2<iOyEdq@$8>fd5Y&!
zHncl&?!?Ht_q{LQoso0<nRj3I@kxIld410PCw#lzAFV(CJ<7h;vZrCP-v+i<Ew^)c
zod)!;->?5JeI)x`*wLXqokOXLizBL#MIJHk<B#W_@4x>U&$R*f`{N#)*!xrcDzcRS
z=<72(wmxxh-SE}ab<dXfn$!EABQ3?H3)4Ss)onwF`ZO^AuJ%a@RCyf{J35qYbXP~6
zK40POdc^8wZ*1=U=^rXz`rXL4C$Aa8<(FXFs_a$IA5F}47}WQ$+V2q$$JVEwxvr0R
zNXhl{w-)9R`GZQXpAWP!kH{Y|$(d_;_<kkV>$eu>TAs#Suiu;|x${01U$;jaa~;3s
zUM1K2Sy?7I+Dm60ZJ(z7(VzDBG_)=Wp)b7@IDo$Xqx)}%!Fu^HJD+iI&qs9EKR|mQ
zX7xXa=gYA7pZfWN!)iWjHqB?wCN=)+G3;=gpZ(ia`Stv2-w|7%wnr4#lbD^u>-uJ~
zT+ffq%(c9Jn~JaHhDmO)T+hFL<~sg=VLmvl?D253H<b4<m~a1g_owWSss5;M3T-=M
z`+LKqvHjJ?;dOs(`%|nu%;6()9zS&Y%^Y6K(f<GT_eeUwAM?Y0-%hs`;bAzw{M+_*
zIDfya;c?YJ^?V+FBDQ}UEZ6gSn7MBAMjj9Jd@M|I*hA<2*|Mdk`}hCd`NH|6vLF3E
zqy4E^dun0(()N_a;k7;e2<CfzwDth^m*4ySM4SMiFNS{S`-%A8=b?Q+@i;ywa7ELf
z=XCkS1?ME3{`3uhz4F$XJDNVfMYjcEJ-453uQpSA{<qu9)}ri7-!C*V*L4WL6)R6O
z%imOTy{X{j@R9g$D7hXVog6+Qf88YK@LDcduJz|+9+AJM;_LNK3-gHlRVCNw$y=C5
z<gb|I%(dLut>h1}M+<W;PkUL(wb2Wc-1(A{YyGw{*YO))jFo4Z<XJ4&^Fuvz9lw58
zY<$BcH(0LY`<d(bX)mbw`aHQX$!*Uoxjs+c#$3m@J*VWlebY?x#%3kg?U%;kb^I)r
z>*tmA%(cAvSruPTf`&<s{_}mF4|bj>$I-HV9iaan->21G`vBwlVYXj*aQmg5=gF6L
zo+pnz5B(o~9&)(tf9aw40OP?=T0e(ddv>1J){6}w))RL!{jT%Oo#(YX&ubr!=hc7G
z`*r{A`)%a^Q(sU2@72Ha{6S;}{T|;pINa7phgEqS-d6i1djGio9mRD687$ZPF=6If
z{v#M4^;X&e?k~Rg{xSc>sqfrB{?UHluHi$~etQ4V_EBtmwzMj)_s?t}#LCnD92>uZ
zxo+=<_mo`k54JJa^0s$l<A+UhKg;KEgl6VCKI}=G+5y_L^E_shzB=!(5Agn&jqP3c
zhbHD)e@%Z;_NtYbW|qIN<gamr1`e;|XR%xxZ3A;H{}GIj&0LlP^rvCp^ZTFl8m|6p
zeDpDh?&`%``7ZXM`_`vO=s2(c^vHJKa}W34URL_Ssb@F#8Y?cn+o5$&2>l=G{r<yh
z{)c`J>HACK|6=Eddgi*#>UloW^Gl0qenNS5S3Q6||8(~gn^~XhSfOEF4`}^DfBJcI
zwEp(>{#bWM&|QJQQRxWH_Z_;<y;y%qK$ZC8(SQ2(++S?_Jnq?7<Ht;R<eg1Ng|E?d
zAw+!+tNvF1v9f=?{%T>a>trx5U=P1Z-pK2@Y#qTQhd$0^QFQy7|K7`y{Lu(}ACmqg
z>;C@Qix3y|zi-9Rq{;1lZ0Xy7|LPlaM&A3`@j7cl<fn@gouBFa&GGVsT!T)bzd>mX
z>-eMtzF9#tPt=3PC$9T?bl|04-(F*6KK1U#OlQ}XU9G144(WK+_}AF+D(e%)b(ykQ
zt_P)h=2{N@bk08qSfBa-5vyPEd8~f@pDC`-YuGuw)^F3_W8>R7d_?{?CD)rs4IDlq
z|5VBKd4vWIACb42<Q!hhvsfNk|1sBcwEtmnp89|KdAx4=1B_3HS^Ycr4|VtYKHo<@
ztkwq@FMjIJTMn!CYUcS$?=LvNQ1gX0P$$dv{Y;~2enWffrNaTnr=RZqOxu@ApIX1+
zuVVEZX1P{kGjpx~@E#Rj@8390a=~&ve><5+;{W&O@6P=j)fh4T;|Kiy?_o9mIKNi*
zt)Ji6zlrT1E!<zUeP?m_ACY}`KEKh80kj3w-@|KWq(5C3{fi3vJ)DLqjLpQ21e1rE
z$9%g{h%P;&!&@w|JT5Ml$H&L=goIe0m>A2ubcy9%yT<aQq*$Juthf;=)WVp(0(f_6
zf$~VKFxezGbCDKFiX3qXF@M`u#3%HK6T*VO5@u1DTow~vh~6>WVu~+Bk1j|xUKo}n
zpb*JrH5@^qDi1Q?-$E<3JrRY;Rust64RHyDSXkDV<qDIns6bo_(kG<!Deg5K2H~w}
zP|J}hKOxDtQvm!Fs{Hi=l){tj8Tv>56n|P%mUsMtEhX>x;HBbEn!-OD#a--HsPcay
zN={CF14rdq$ufm1e8(TuSZMt@%M~j5OYEoXqd$nPxjxXSxn3)19*Hh2UG?UZLX|!g
zP0GYiwU=@e&$fPRYQH})SD0+=i*`y!dlBmP`h8S;Q7Su4{0b8vd@6;apVIfmCi$Q!
z`8n)XsLFqqDZKt$pE|vXDsh?q2^?Qxvb7&`g~`_AnJY}TW^mdHldYM|6((DUF;|#u
zJ(Ib@Wa}Bs6((D!u2zLnm~1_lxx!@YP!6v!**b{j3X`q5%oQeEPv`IoldVITD@?Y2
zaf-@MVX`%w<10+I{*&bjldXd}zQSbdsmv87TmQo06((CxX1PLy=kygOTkmJN!elEA
z2vj14$<}+BD@?WyWUesTn$GDdOtv1$T%l@zRYUqWS?^V{m!(fXnD#LkF9?N5n%5Cl
zh96G<h9aKI{!TXWfhL}1;^##1{zAkbCu|l%>JP7SeH12JUt_Ki?Tc^&5T2024?#Hx
zqr8N~n>f6}Wa~EO3RV3v7V~eR6*HtI1Mvwd{xINBa6;lbq{4+(Y_?m5LrzHYQNR)4
zgv9r7ehQPVW8ps<azc`$?v}H_35j>*3=}3?mvVfCs=PbckNW30#LGf_LW)n(zl@R-
zx6?oJx6s<iGKDI=R4qn&pR!z`l0z5#TWD=&xx!@Y1dLlb7{BtO#xETjMZ$C^gcHW2
zehaNl-L;!gr62LJB*d&QF#pE#coR=B@kA5vV&Yv*Jjuk96MEvgi)9k@NeKOxVQ#Zb
z1}7vw4QK}^Bt8Q;9h{K(EZ|IVLgE(zF90VbJ_k4(oRGK+=l~}qUIHuvCnSyrwYb3v
ziC+RN2PY(637iW~NZb$ff)f%C0)GZhNE{v55&|bAj&+A+J~$!q%YX~O35j0`yd0d6
z_|Ji1a6;l&1Fr%nB)%B<3vfc>wZJ9dgv74{E(Iqf{wv@w!3l{k16~hKNc^|JUxO18
z{~d5SI3aO7%(UDDPDs1~cq=#|ajX+8E5HefuK}(CCnUZexE7p{_(tFca6;l+fSbSx
ziQfTi1SceZH}FnyLgHJ2_ka@;zaMxnI3e+efDeEZ65j@V7@Uy!4&Wo;gv1{M{t29r
z_!GdL;Dp4V20jT+NW2;N3^*b27l6-!6B2(3xC@++_$$Dd!3l}K4ty1ykoa4`H^2#r
zzXNOmCnWx7;Je_2#9M*ygA)?}5cmN&A@RQgKLRHt{weSia6;mr0sjV0Nc;=nKfnoz
ze+AqFPDuQpz^}mxiQ`|(x8Q`t<AD}SnuU;f7hnQ7A@O8jS8zh&J%HW735llyQ@{y{
z9|=sue}u%-fk%N867LJ_4Nge>Sm4p%gv5^r_5&v*-XC}ZI3e)?z!Sj<iCcjK!3l|<
z3LFGZNPGxzFgPJ`8!!W$koYj*P;f%xX99<V6A~W<905*9d@OJ@I3e+KfM<ad5+4W5
z0w*Ls9+(48NIVZX0i2NdxxjpILgG_^1>l4<U!4b>3QkD85I7B-koft)8Q_G(F9gm4
zCnSC`@FH+R;*q&TIZ{o$j}zhMAUq+3F9y2635k~iOTY<<dw^x&gv2X=mw*!z_W>)x
z35f@QesDtKRlp!PA@OS9Ja9tdRKBdw)S3?Zu_j!rN~|#1`hHKv6((CRvxvAFi(w(8
z_zQtofD;n`1@J0xLgKZ+#o&a*>w$IPgv6Jj9M_{fgv6Hve+@YywO5kLUMN)awO-Ru
zd2dEKHzGVC#lOuW;%|lZ5fWbyTn$c0yb-t&oRIh=PG4cNbtZF#$yUsR@~1G_8fLCA
z*@}5i{uCx#uVAh)*?JGkbtlTVmFxEa_`To{F@FU7VelQycY^;3{0ZjIfIkV|%zPL4
zbKoyAe-->?@Yk8QfWHC$4)gcH-vw`#JiF{8@DITM%KUHOpMZbHd=L0Pz`tVtE%?{q
z&@S2|yDTA2#987DOBd$dz`KHDV?xT)z*E4FWZoP6DDb|LCztgDKN|da<|l%m06u{E
zAn<|Ur!vm~9}I3|J{)`~_?gT{gO30o%RCGGEbwv6CxGXG=SiMjRsfz4K85)-@TuU1
z%x8hm0Kbs=9Po?4oy<$XUErn6F99zDuVC&6uLKV;p9dZUua-Qy>@x5g@GF@A9Q;b~
zMa&n2Uk!dO^XtHC!RwhX1OFBHZ<zlM{I}pYGj9OD1^hPVYrt26uai8xY$N!3@XgHc
z0N(<B7xS&)cZ1)@{2}oB!JC-x0N)1wDDx-49|M1ic{BLa;LkIE3H$}{-9YMZZvbBf
ze@n`f%iaZV0sk}0{}1>Va6$^-7AN9AinCe%4kS+9>z=6li!TxGbA<nz^ZOR~O<bCV
zkiuiLE3O^#1R(KbV3&Bq(mh_uQ^9+HAHnk8z@xwkDSW?p5#Kl7W;p?<^WPAa|3HL0
z5#g;ez3j5n5N;5{6H@qLz|+AAiH}0~GZCJU_&M+&1395i|K_OlC%}Ik<b))j49o{7
zBt9MK*pVI~@matb;Dp5IAl!urPe`2fS09yr3BtJ$zLdky1(t(XvVMLB?gbCByc$>q
zPDtrpfpixj{e@BKhojP`{C*zAuR**;h`)sOMe>fHX}>bT#1l=ti-~tN@gx&ZHgO@k
zQr;`y>6rhBXTKH8?=$g!6iEISTHoWkDOB@Qnih-j>1`}mm~=Xcj_HsOc%S78lLn9|
z`K8$WkHO8bilX*6j*gPQ_@a_2Ot$W3u27ZdJ@%vdlG2HfickD)ll&bM-xkFi-c|W2
zG~GW7+SZN!S>*cXo2uCI@4hgWKVag2F!8%h{9zOStBJp4;vbrL1A2n`EwnnnQDsqx
z{SMgWwee{dLbC5)0_(sDiC+(_2PY){Yv2vwgv6Hve*;cP{3hUy;Dp3)1>Ou!NPGqG
z_uz!YR{?JWCnUZWxEh?0_y*uQa6;mnfVYDa5^n@<1}7wbC-4v8gv9Rw-UUub{9fQ6
z!3l{!0K5;Jkod#E2f+!6KLTt5CnWwS;C66A;yZzlf)f&d68JbcA@OH`Pk|E>e-8L8
zI3e*}z~{jUiN6ed5uA|ttH9mhgv8$fz6MT6yao6sI3e+Ofp3Eo5`Q1~9ylTK4}gCG
zCnWw6@c+OGiGKq87@Uy!-+*o4gv9>={5v=y@jbxL!3l|f4g3<EkodR2Z@>wOTM~q&
z9h{JO0x&Ki%|b}LD=-n9ka#y>5;!686kvC7LgHz_p5TPUj{+V6PDs2ruopNX@uPu#
zzzK=>10DlTNc;rgao~i+PXrp^gv18|PXZ?-J_vX+I3e-Dz*E2piDv*$11BUt6nHu~
zA@SkBOmITtBY<as6A~W{90^WH{4C%Ya6;l)z_Y;#iRS>b!3l{^0Oo=d63+)t1Sce3
z0GtF)NPH@AGB_deX+S$TA@Lc&>EMLKX8~t|6B54&cmX&e@j1ZR;Dp3oKnFM>@e*JW
zI3e*epc|Z!_$9z{a6;mhz`5Xr#Qi`oI3e*M@MqwJ#ODD+;Dp3$fb+o#iC>-|;ua(r
zmMek87XcT7|AKifa4~ot^BWRG{4W!1mfrv={4Ky6!5f&bLAaF&zmCIi0&W1`!u&4a
zAHeTnem`(4_=C(J0X_`AgZbmYN5P+D-VA&i{CVbjWLJn@74x(7CORnMx6rzq=U;_t
zK07f={wB*6n&-RlfcgvhyaS`HP;uKF#)}mij9Jj~5Ak!l)N^}t+RZ_ge+#2oPOW|@
zf9sChcR6=+`C1v<829LMhg3bNtPSON6%nT3!hMD4b$q<&g$4c(@k{Hk%DeS!g|ikY
zv_+MNo^bR<=5gV;aMN+|VmREN0S#bkN}Q<0)qpEMHBLBioqlATIPs`BQHU#;9w(}C
zJ%UTjjuYK+Ig{eV48*l3$BD)sz}|7fmXG@$N1V7G{u6Tw3+SYP6F2J_A$N5MeiX+z
zUnoAH(hDyUM))&<G7S6%*ldwBlwY+F{y5PL)n#}}Tp`zhGbkQxMvSC*Hd}Ei?ii9n
zadQ79BWGISfLS@&6eca!??UqH8{?NR!x^!DGTfjTzXBN!`i#n>K!zI`<7b!QvSR&Y
zxV#v@LK)5uKU#;YG8f8lPWTzvXwL{xU7;Ve5sd|k4=AKMz&=Gqhu=J3MW~YYe#pjN
zM-y?_1q&9G6w%F8n-Q<d9S9W<$j_E}?}_odQ06WAM};d?;gVwfR5;k1DtD*?k4%Ix
zz?UC~e8K7F0CuSIBYvTZ(*(aTIzvXas#jJ)yg<u}47cK2!Iu(OZJY_G$i)pLE!Np@
z82QbDUz9D78H+*Z#f$5cBjL!LU~I7MBjKl#7e)a2V}mZEC@9M;!tr7N#qo21`gq|)
zw2VM7P&~jkR3-tx<sE*qOyuAB>(oG}20As+sew)nbZVee1DzV^)Ig^OIyKO#fldu{
zYM@gCof_!WK&J*eHPESnP7QQwpi={#8tBwOrv^GT(5ZnRrv~VtS?_5_`j2p?*(_<v
zBJH*ww(j<B{$yuTQ<vt1mbf+{EY=iZEk8n>P<^E6v-l{{(=teOwRQ0)I$3^HH*u8Z
zC}ECi8P`jUD?eR~s<w%ti-!v9@=S5ux?u?G**om9J;br)R>;oVkL(D@2JS7J+fU3b
zPZveiy~PEK`-rK_`-<`FjuvCK9wUbCJXTnD_Y=pp9*4MR?H%{DG;vyaKQW;CIMHwM
z@d$Iy-eF|jj@{cor<cen&k>`mb5XbPsM`e8Z6eb6)!t!cKF1@U6OfOAFgbgNk@+0A
zx4(>=g19{qHx+UF?;U1vsu)~;q&TViC~@rKUgAj0siM2h)P^$s^u6N_>?sD8CyRd7
z)P{ZbmjA15pBN_+Gc8Vw)KfC_)D3#-4n6gNo>Gw4KI&@p6h?Vhacp%G<omFvemz9L
z@*a@wqdlZ-KkY2*n1VX?MBEeht|Qrfk=c4-_cG1>ltt?E=)KdD{`+WOnbv-E{O{QC
z!RU9?kz!Q&*}_(Rj<7Dy62~pihCZ&|yAB7^<&3^!2HK#YIz{9x?kPqsPZhRxY2xIq
zM~LHg9*Oj<d#5LLvXAyWx|=w<JW1g6t4Ovb`MWy1G|`A+IjfgAtGtUCQQZ|blLVVd
zhRy6pchq*{_R)5-u34ySHtISKb<IK9mi+r=!!UkCdXh*FC5!Zut|EPS7m=QML`x5x
z0mJblu^VT?CU%RH=eTIZ-vIaBQJh}P>XI2}KLQ7YyA4kg-KKT%Q(QU-`3b`H>>ekc
z#<?*1y$60W`1{OHhkJCCocNe1IbK{93FC0e^O!E;nBl47m_YBAUjCz;l)eS&{S0Bx
z?-3`yLVAR<EYsrbmeejH6?I8PT~af<`{`xZ<RaLAQi4bdB>3Z<aW*cujSe!W3{Ms*
znO*G3l;4T?Xbq=-%@J|p2%Md>ITc#s@LHN3>GVnxy-IKub``ylN3YB@za>2py1P%L
z<4x!;6h`W!i|Cfw#jnbPvJ6iYsYiDAlP;+I^dqz!-%T7p{3vn!^~bdJZRy?It4YpZ
z_I>6Poo=+7b!9msNgPp_CXNWCv~+JyZtCjq;_TluLG=7ARU{7YDO&o+3Gaz<qG^1*
z=#RXk`9AYY!uhXp32kvLLiF#Ogfn*u&}}!-tuPtsB?;%iIB^@UG`uj>pDYIc(S8v<
z>+<ajscoX{XRr3#S3QsECXRv49)or`2JLVR+TobY-YuM;zdw!F@N=GuGmN;Jbooqv
zk@7|7(YKrEi#qm2UHYPqeKUKt9od35K5(8LHf6Ry>Lb&lY)<tV+g|n5{-mQvpl?}+
z9wI9<P88!>G*Pu#G(Y;}ZlV|FfRwiGEy>MYn-cvA&UkyAZO@!I(Mq<}Ct36<Ia>57
z>?`^V?<4x)m6<eHpo(|gKI1h<;vsxtZ^TPSydz~i-6vJq5`_KiIN`@-<f*bnb930V
za}W+!0f&v|=CFQ*3*)Nhu+iKcwi)4CaINC7(cB!?mK7(ma5Zz-Xl@Q$k8n5Q`i8?s
zb92}>g!>lPpnRo=Xl@Q`&yEvwa20acXl@SMh;UEh3Uk<K9<g1sy`^=<x;MG4Ym2l7
z8;x20$-Z}?&+h0ZoH=n~!h|@ngVT)WvL9mXq;V_KhbH1>$UOAzN!-V&55@S27`~TX
z>ptHT^Fg4Stv`*Q?_nGXC5mn(T|^@KZ`vf33D<0y2hRIOaWyW%&e0!xp+EMThQ6rN
z)O}_@ee>w|Q^fo46^J)?ohx1{oFq~$xgx0z^Rl020U@ZLq@ijuJz)O?%w0I(+-#2%
zZ{pg+<%#BdkL^)&jk#R9e9_}eL6Rtd4pT99rK0X~jyk7{I0t^+Soe{()$w(@8qIC~
zT|7Gf{ke78I?rPh#Ie)*_<K9kZTlVjXikJ4qknjXLB2m2z&T(!ztMPts~o%nmmin2
zL5Q1xLIiw8Rl$&>!siYK9mT#%zo)`ol2Pm*HfqRxkJsU<EID&{hQFpn*lY5=o{(do
z;YGugj`R4_&W{&)KZ}#+i;;o+Jvs|=oIT%-GspU8T!3@L@H3*~j=V5l?1Y~!%5N6@
zcEc|m<+lKSgOG=a^7|e9)*;-!eomB$ej&!|7;j^Ii1BsCHpT=hH+}}=Xhu6@DPx%N
zX2v@hWn}!bOLNqjeyz;k)c(>}hzgdMFwSFSvB)&>>zUumxRLQ8#@&oxG4_Uz=$FA5
zW`CXjV&)?mCz<5AGgbVVjF&Km8E<B6Vtk(Q9mdZYyRd$HF%Dupn{haYdw+(??<vMc
z#@{ghg0Y&hgmDt%XvSk0docd3P^Ei3=ko;fdl=U-E@xcC7-B4CoXKco?8lhE*oGIi
z>GuxfQ;hd9u3@ZaoX0quF^_RLV?V~8j9*Pt>9;bz#JGd8k@056#f<Y9leqq`a(PSH
z-_DrJn8ElJ>#ra4Zj7Jfy#e~Y&-g0iBa9mumoqM63^2}SoWeMkv60J1?<GY4dSE!p
z4wN}&c>S*8xrJ5!3b$BovDXxMFqTbphw?-2N+Iq*sPTb-Bd5ac3J5FY<Ex7u)2h57
zPo*0IUjTPl9A>&ZG(8mXc+12P4mZ6fh*VUtq3l;!<L8Kv#oKFUcs<3w61QVQz*otU
z#oL_T`2kPJJ;NJtd4jGYlx`2gJ96E{)7+(EV4S^XN>yc%JK!krA;&wZ<Zw8$g9Os=
zAx>v%adA~3;KoZSM|W``i77s>8+la~W0stPcoQmou8=H2uCIzR5=SQ>SiV>0bgHgP
z$mfSn0<Mt9=M^t?)R~eKy*kPxlM`ZcB<1X&PWNm0P4<;kRk$5F0k<pU##<c6qE?0F
zU?tN-@FCTUmpf9QT678iqK}vkpBz%u6!-k`mHtqTScn{z&@tInQQ<2V(<vor7fE>C
z0Z%c4irG|(fU8nQ6t!Uam406!<WQgrBNlh0HQilNO4StElv==5>>da6aOAlI(910e
z(4RDia+k*oOK|&18+eDMHadk+b|@6kt?B8IOw56`QJQnbF(?UH(qwm~FF;AcklrEf
zxGNpgT=Tg`A^1$pFHnk^hQDp(3ZfMGR5G$xQU*TYYOfhrRay#5nBb|X$oH1|#5bfr
zq)z4V6jg<^rH<lUXSyn?wBIbM6Lkc;D-d+cbhEuB=eld=`vN6F$8`L)okF}x#w{|z
z7pQc>ZZ?4Ddhn-~Na12~q(qX6S-K_igVU>uLKH;o;1s60gCQT3`FSGQVy;`YT|3oX
zSq2P%QWm15Lvs@XZnwxr3OO!+kjw{x>^_f|8cJNScMZ7Mck5ESL&fD{W=DEC$aO-M
zx0qTFMuh644gY{LMD-ytCn`L3|MT6hx#)Wcc@65Pdv11cM&Sg!{I-r_)7U|cA}-V#
zm<GeH!A?g<S}1z7eO%KandT`j&%yf16>yL9`S4QNlcW`w-;Xgx4@qoRS+Tm90t*+b
z;3wNemf%rc8yZ?>`aC7}5Og(7my7#Iw(L2hD7DG%P`R%}7XJa(+Z3O&Pa*EtsbvQv
zX<gV+a?&UF)YB1nTusOwj2hEmP-I6UfK(#(Jl-3uf*ll+9dJjcg6C6NxRsSPO`<YO
zd$&{95l0{uvToIdRo{6Ve%yDYtvt*%uwy(9iKFDWmmi$q@wzHJ3uuVOUK82h1iaT*
zA(J3;5aMB7PdOl?%#M)o*FZ7eVWToiD$ph`CQJ)epnxG1KF0?O#M_jwlV5?)H@C{K
zt!lhC6hOPbOOlzMK&Z-9F~f`HFh=2~#2Malm$w9~<j4?$t}AW%%0$c}`xP5;FrFM5
z%)KFdz*md~x41R2&>g5m#X@fFC6>nL2buT+6EM)Ctakk6EJPF263(k~2Wsr@Kq>WQ
zZ!v0t=?opI|K6$Rmtdyp$Wx8e@DzV1Up2iIVkXRnYAf925(6!{YQ9o|3p_;uR{$+q
zf!7m#@pJ>=p*C<&r_Pdx-XX3=X>;90Rb^%FfG*{^<EKp-Uob3F>HKU_>8=c7Vi04=
zRqTgmlZ3;O;|TiQ#hy}6u|p{o7CsRE!BB~VMjeL-qoJb&BTm3q<DkD8CdAB~X)~r2
z=1(3!bhu5W{;lx)&}9+NRg9Uz<12EMk}<@qlsrDVsj2jN9rH^4WKN}ccej)VByqH;
zM7=Qe;0rC7!KAZLxQZ~ri20(lw4y3lF7QSz%Hat?#g$@2huh&6=Lr18x^hRUY<vfq
zS6_k5yrrHpw3x$-KU3+O@4%lRn<vCD6;mlsi`?FMo`BC=Nezp)h-E$;0<&^8Zni>l
z=^#!N4tKS?7*l~GM$jAiQ8*!7e$SA~Al<PIffj~flrJArF?7h#A*>~nJfN09(Lqch
z&<Gyi0p#Uwmp@t@y*TT1|H~Sn`}FVgv(<OJA5Y=wo}Yex=f2;!-(Q-$eyP%F=p(|E
zckC)#h&Nas&f2%Ujpa_qzU6UT&W3CDEl*~7^9}o!_hot8P5YMbuU^`}ie5U7`eoks
z){q+q3-R>D#`C4Sl+H*@byqtFc(GaiBsI53S%t62g|QIbJQ%`SLUTt+jhBX<jPcQZ
z8l#f-$32Ko&Y0K+!BpPiFQ;-B7FA-?#@mIf_-9of%&S<w2i!C;7@=~v5pZLH=*D1g
z6w_#r@&$M~i5rRohT}J~1;%@s5^?-VUAgu$TtUO-HOP&zTU8o4=_~}TWMpO#>G;ud
zih=pl9l*5gki(~}hD=C@*K)%ocX%;YtEoAImeF$Ii<tHzWxru-tbNa7f0;P`2{X5u
z!Ywz2Yh<2g3b)e~ZYlGYlVkJM`Tfht={9rz^jwa9N$fv}vB4y_vA+s^fPV+YmbZbw
z3nD*+jc92c)ojP>OyT<OC)@#YjuAyoWw2574VO0*G=k;6s)`b$$Zfdf-~2qyD8pZL
zwhj!+?~+jh=5pyow{rOm?ZN1@#n#s({^4k@ZaI~OZKx_o^Ffv;tK{Iu=lxZv{`4PK
z58kRue@#X)ZuK%Mt17VQ3j|y>MyZQ7hmDZW@W}h{{kLB$+po4OU6029bpO%`?+0yK
z-wCnpx1Zqq^=>XUwVRpmH>MI~nfmE>1pl|i?rJQpLujiKT1gu|xeueeUq+_Q#$9lK
z{jZh#A8w81Plsze^@p{){Z`!6|C$D?ayS`vJpCvnJ(=bIt@7#u{fn6IHLO#n6*^oc
zxL0*}OJx6ZgnT?_T2C{qxG^dYSPf6mm@=cFfb-GqWX`sk+tY3mG-Ujj+A}M*KN|ZP
z&+<&=JzVK~tbbkpMiY17o)Fud4w6sB<?+tLeRheVmqx`duNPBiF_uvVZVf~D+txm>
zQL1)C(9O$e)0CHA3(DLT!4jl1bt>+GJJKy?f0;-B(X?EZyTQZ@vhCS9=NfV^WwbHP
zR~7Pj-NuQS28vvcfNQ=(MLsdZm`W=~e^tmBh$9+5T)coR?7`=^pLklIbFqhm@<Wld
zn;=B6*n@elg7jFHQ7pHL92G9V+^PC5c}bK$50p;udxoR@rmGzhv?#ZTZY5gzV<|;O
zfaO#vZUag(5MI|yuIuGcKZvS$iVE@X%k#0#SBf1P58aAj^9I}9NIGB?$vtbms>eR8
zaiCwoE$?cu60G5RQQ7g}q6D{4!tW`bOV2In=Z@^ktH0V_f=2=KP+#nV8so5k9@%nJ
z!4Eq86rbUO)g#GBPPCi}p!0ZWcU~EMDeWsd-0A_5%n2q#4{TgR^b>MJH;gI>($yYv
zeXI0Omgc~aUF|5w_KL${_)2;88|_OeII^cr%f7&oZO^9xomx4vkIIv`%4b4$VRpf2
zqf&0R!zR?@db*RVz#dfvs^u;5xV)#KfkH-!JH(3s4?SU^CRAcCwjK>v$lz@tB)$K1
ztGsi)zWH8`sY+okFHJ!Du2O}Fst;90+AB%+DtH+l#RNkJReYX15F|^oRS&F|<qwrZ
z7upx9lZ#T)4~0<q>^Hq}|5|z-<yPs-EY&*L(XkMARM7?-g&03=+SF;I4VtQXsCLZ5
zlNj21<}F1zW}`pP_k_xE8{|a=Im8gYCx#F`i#9va4rF=C)C_5Vqm6+<jI*7;pN0nN
zal?L7A0eK?H~z3ECl(!pbGvZY1AF2_<O%q2d>ZhwEFt>AjeR+B+C(Ap;I;w(l#eHG
za5n?b$7&)BH}>Yl4OkC60CxlM?NXdOg}V*7{2H9WB7flj;iG{AvfvLq=a)zm?kwP4
zHwfW`8+&wO+HX-NxShbhH{!V&+y?O7oA6`=ZtT;EUAH2AxLbg88qjXp>b%iiaQB6~
z33$u~oT-7^04}?S))6QJaQj1;7vOFNChQbqG2CfDPqPrK;PwMw*p26raJK;;d`*bg
z;cfz6@*e5}ch>ttoc9mxeT+lC!0lh+yM}N#18W9aL@wN6;2rp*HetA%fOq&UVk5bM
zpVe5zF1W=4i<oqcMYO?f2fDAdhygh$18`BDMT~{J9yoceMHIpv-fR(r8<8g5HsH~5
zabh{#2Jj4=C2fK`3s{J6ckYJU54;-}eb3Q`bKX;MwZWgh1KNOVX)bgEyr)~7_zeDA
zfn#yzdL;Y_D{<y}8Qgy0CR{t<ZUnxEs}*j-d+;67U2ty&o`Um#ufu&Ba5t_txCy_;
zMR^(cR_PdA<Q@yGVs|xgBfAN=v3m#bBX$#7(p208U<s}PIO9yXjQt7Mv77KyTvX1_
zfLG$IH@U;WzBu#!4cvrdagjfvo88sG?f91JNTk05h~o<)4{pM-?9Kvygm0`C!rzJW
z<e&756Cnz79L_l&k8;9o2mbX0=$ibEIB{!#)D!MT;BH)7;f_9^P3N{R83294pYTdt
zAHiJ@yl^1O47U@g&tntXPmUA)aQ2yS9j>u(V-G~Uh|3N)_AkXeT(jWD9-!Ec%LzC3
z3&m%+%HbA+;>1u~ez>uZAnwFf4R;f8#3?wJO#Z<6xR${k20n_b0q$mC;i<?Q?&!1F
zbSC?5Tsz@U_%^QB;l};Fun&f9P<Y@*TzlYd0S+31vza&pZ37w^s290`zr{5iZrn?X
zw9`>XxN$!xF2|J%H}<N;U>nK-H}>zu1Gr|xjlDB*>`>SW+}KYOb+{_vZUDZB>oT~b
z&sx(N>|1dyg+Jl*xEkPY0ls%8zMTtq8!%}U&KJX-2Anh&_5-&an1gfDNjOU!eMXwj
zPT!7e5c~-r#5ES~G@NJFXQc@*!c_==LMN_rxTDWRhv9cWt}y%wn{X|IJNhhi3;d47
z8Rd=eCp2(1!EFQTGtq>X;@S;=!Z5Cn;Eq1~OlP8Z;Yz^yVZzsN^@Th7%rl*Z9+ges
z5ssIb1$Qpo(Py3M4D^k-X2YMb0hb@{=rhiA_PNWrII$T1gekaghT8yMifbd>^}xcM
zIB^%;(Px?IjPuBes0;iFXX0w3Fu?dc^jn<mC7gq+2i%0$;WFSRd<NHWxLbgi<)i#?
z*8{)BRS0+V8D%=VJpEkQ68s4-z_kc&CvZjq+JfA`D{-xZy8-w|T#axy0dJp-{!VV-
zHaqMG?&veebQbwjT;kk#iC@Fr3+}Y@(2u5Jtb-fh0vD@r*~ouJoHz|v7Tk8=)wuHD
zt_R+Y%MN!FFl{FKKHLWIMO-Ctw*YI-M<0hf4BUikF@*;Xm=!1L;kE(i;#y8&fDhqn
zfV&xZ)di>{-1WeJ;Mxkez#lle9M?|r2mT$`Zt}k<PGsYH4{kf~x47Ei&YBI|!}Sf^
zwu|G$YFr68FWd-BoP+j)+YWpUS6{g49Cx}yjsJatL0mTY*8{J|RgST-5lG*(_QOp`
z?<LtVwiD7jN#rJ^_ld|Y(TVW_Zh8kO4_80L{ptKH@;l>wv5bqYGtPX&!?q9q<OHnS
zAamk-%fE_^B-z+=sw*q>J=Iy(p&2$S?(A@g5vOX-vf?b%kdan{p5BzWDsa@`ENhKB
zXg%lblx|~P!JxacsG`O|0^Z<R)~bMabg;PGUFixAsq_>Fd_iAnh;ENZyMmP&^M+at
z98d9-;`GK$bzi4)qohvPX@Tscj6Z2Fzxoz?^UB>TTUNHNY+ET-HLPx2ZC~SD<6jeA
z)40BAee?R3^=<3L2K#2`X8-2!=K9SIn?)G&78KX8vT<d2P5qjNHH~X1O!LN;jjbE^
zY!sW)Huc>!Xp?PI)~38ovo<+5m2dKIs@@ddw0M)K#~gsT_6BEzzaiYvxT0xA^NN-g
zZ7alW_EpYREvwpAHLh-2-MqSGwQX(ITKih(TL0Sc+WNH(Ya7=#t!-Z0vbJrlSm#8F
z;Z60M8n!fUY1-1frDaRo7SVvQni{krt-)xpHDopDT<RMd8X6m#8k!qg8rmAfinJBR
z3fqdT74{X*75)`%Ys6YRO35j13U8_3qSF<P7(bCDm9gQr#@m{1Yrd`Jwzk{E%Cwcn
zO4~~3YX9o+>iX4b>x^|R>)O_d^=a#k^|tj{>+S2E>;3D)>+9DytXG<(l8PqGX-F^J
zuy>jKtHP`5S2e6^T-CIy8Je)KbFTBR3$LqR*RZZ(L*s^~4b2<;8^asZHW^a4(PeJj
z)U>I2Q_H5dO=5G}W@EE$bJphQHf!A6w7Ge6%jUMtVoTZ<V~cG|))xB~=NA8#j@pVA
ztcg%It)snbo3+xu(z#N#PTOj+CT)$e#<oV+luBsd=-fzUtl!wMQI{k(muA&gZJ2M7
zmu@RcCu@y;jk%>e>Rr82q+x8PD7UY>z47*~w>RCs^Y%d-vo_{!Y=+&nNy|%;R#v}h
z`6dK(#*51UgJ22%hH7YhIjmqStY9~+U{6EZioPqv%A}QjR}Ml;Qr%{)EMHl@GQ5&{
z1oekKE0b2)R%NX!UzN1lSiKH?qh<A=H9Oa|tg)|MzINT(-D?-4Z3dye^U&V2HuyK}
P*|7We*4vTkVetEZ5RjgZ

literal 0
HcmV?d00001

diff --git a/MiscLibs/robust_loess_compiled.py b/MiscLibs/robust_loess_compiled.py
new file mode 100644
index 0000000..55e2405
--- /dev/null
+++ b/MiscLibs/robust_loess_compiled.py
@@ -0,0 +1,284 @@
+"""robust_loess_compiled
+This module computes a robust loess smooth using a quadratic model as defined by
+W.S.Cleveland, (1979) "Robust Locally Weighted Regression and Smoothing Scatterplots",
+Journal of the American Statistical Association, Vol 74, No. 368, pp. 829-836.
+Both x and y values are required and are assumed to be 1D arrays (n,).
+
+Example
+-------
+
+from MiscLibs.robust_loess_compiled import rloess
+
+smooth_fit = rloess(x, y, span)
+"""
+import numpy as np
+from numba.pycc import CC
+from numba import njit
+
+cc = CC('robust_loess_compiled')
+
+# Set constants
+eps = np.finfo('float').eps
+seps = np.sqrt(eps)
+
+@njit
+@cc.export('nearest_neighbors', 'i8[:](i4, i4, f8[:], b1[:])')
+def nearest_neighbors(num_neighbors, idx, x, valid_x):
+    """Find the nearest k neighbors to x[i] that are not nan.
+
+    Parameters
+    ----------
+    num_neighbors: int
+        Number of neighbors to find
+    idx: int
+        Index for the target x value
+    x: np.array(float)
+        1D array of the independent variable
+    valid_x: np.array(bool)
+        Boolean array indicating valid x data.
+
+    Returns
+    -------
+    neighbors_idx: np.array(int)
+        Indices for neighbors in x array
+    """
+
+    # Find neighbors
+    if np.nansum(valid_x) <= num_neighbors:
+        # If there are k points or fewer, then they are all neighbors
+        neighbors_idx = np.where(np.equal(valid_x, np.repeat(True, len(valid_x))))[0]
+    else:
+        # Find the distance to the k closest points
+        distance = np.abs(x - x[idx])
+        distance_sorted = np.sort(distance[valid_x])
+        distance_neighbors = distance_sorted[num_neighbors - 1]
+
+        # Find all points that are as close as or closer than the num_neighbors closest points
+        close = np.less_equal(distance, distance_neighbors)
+
+        # Find the indices of x that are both close and valid
+        neighbors_idx = np.where(np.equal(np.logical_and(close, valid_x), np.repeat(True, len(valid_x))))[0]
+
+    return neighbors_idx
+
+@njit
+@cc.export('tricube_weights', 'f8[:](f8[:])')
+def tricube_weights(distance):
+    """ Convert distances into weights using tri-cubic weight function.
+    Note for Matlab: This function returns the square-root of the weights.
+
+    Parameters
+    ----------
+    distance: np.array(float)
+        1D array of distances
+
+    Returns
+    -------
+    weights: np.array(float)
+        1D array of weights
+    """
+
+    max_distance = np.max(distance)
+    if max_distance > 0:
+        distance = distance / max_distance
+    weights = (1 - distance ** 3) ** 1.5
+    return weights
+
+@njit
+@cc.export('bisquare', 'f8[:](f8[:])')
+def bisquare(data):
+    """Bisqure weight function which for values greater than are equal to 1 are set to zero.
+
+    Parameters
+    ----------
+    data: np.array(float)
+        1D array of data used to compute weight
+
+    Returns
+    -------
+    weights: np.array(float)
+        Computed weight
+
+    """
+    weights = np.zeros(data.shape)
+
+    # Code to compute less than with nan's and no runtime warnings
+    d3 = 1 - np.abs(data)
+    d3[np.isnan(d3)] = -999.
+    idx = d3 > 0
+
+    weights[idx] = np.abs(1 - data[idx] ** 2)
+    return weights
+
+@njit
+@cc.export('robust_weights', 'f8[:](f8[:], f8)')
+def robust_weights(residuals, max_eps):
+    """Compute robust weights using residuals.
+
+    Parameters
+    ----------
+    residuals: np.array(float)
+        1D array of residuals from previous fit
+    max_eps: float
+        Smallest value to be represented
+
+    Returns
+    -------
+    weights: np.array(float)
+        1D array of computed weights
+    """
+
+    # Compute median using only valid data
+    s = np.nanmax([1e8 * max_eps, np.nanmedian(np.abs(residuals))])
+
+    # Compute weights
+    weights = bisquare(residuals / (6 * s))
+    weights[np.isnan(residuals)] = 0
+
+    return weights
+
+@njit
+@cc.export('compute_loess', 'f8(f8[:], f8[:], i8[:], i4, optional(f8[:]))')
+def compute_loess(x, y, neighbors_idx, idx, r_weights=None):
+    """Computes the loess smooth for the specified point x[i]. If robust weights are specified the computed weights
+    are adjusted by the robust weights.
+
+    Parameters
+    ----------
+    x: np.array(float)
+        1D array of independent variable
+    y: np.array(float)
+        1D array of dependent variable
+    neighbors_idx: np.array(int8)
+        1D array of indices of x defining neighbors
+    idx: int
+        Index of x defining target
+    r_weights: np.array(float)
+        1D array of robust weights
+
+    Returns
+    -------
+    smoothed_value: float
+        Computed smoothed value for target
+    """
+
+    if len(neighbors_idx) > 0:
+        # Center around current point to improve conditioning
+        distances = x[neighbors_idx] - x[idx]
+        distances_abs = np.abs(distances)
+        neighbors_y = y[neighbors_idx]
+
+        weights = tricube_weights(distances_abs)
+
+        # If all weights are 0, skip weighting
+        if np.all(weights < seps):
+            weights[:] = 1
+
+        if r_weights is not None:
+            weights = weights * r_weights[neighbors_idx]
+
+        weighted_x_matrix = np.vstack((np.ones(distances.shape), distances))
+        weighted_x_matrix = np.vstack((weighted_x_matrix, np.expand_dims(distances * distances, axis=0)))
+        weighted_x_matrix = weights.repeat(weighted_x_matrix.shape[0]).reshape(-1, 3).T * weighted_x_matrix
+        neighbors_y = weights * neighbors_y
+
+        # Solve using least squares
+        # try:
+        #     mask = ~np.isnan(weighted_x_matrix.T) & ~np.isnan(neighbors_y.T)
+        #     smoothed_values, _, _, _ = np.linalg.lstsq(weighted_x_matrix.T[mask],
+        #                                                neighbors_y.T[mask], rcond=None)
+        # except (IndexError, ValueError):
+        smoothed_values, _, _, _ = np.linalg.lstsq(weighted_x_matrix.T,
+                                                   neighbors_y.T)
+        smoothed_value = smoothed_values[0]
+    else:
+        smoothed_value = np.nan
+    return smoothed_value
+
+@cc.export('rloess', 'f8[:](f8[::1], f8[::1], i4)')
+def rloess(x, y, span):
+    """This function computes a robust loess smooth using a quadratic model as defined by
+    W.S.Cleveland, (1979) "Robust Locally Weighted Regression and Smoothing Scatterplots",
+    Journal of the American Statistical Association, Vol 74, No. 368, pp. 829-836.
+    Both x and y values are required and are assumed to be 1D arrays (n,).
+
+    Parameters
+    ----------
+    x: np.array(float)
+        1D array of independent variable
+    y: np.array(float)
+        1D array of dependent variable
+    span: int
+        Number of neighbors to use in the regression
+    """
+
+    # Number of cycles of the robust fit
+    cycles = 5
+
+    n_points = len(y)
+    smoothed_values = np.copy(y)
+
+    if span > 1:
+
+        diff_x = np.diff(x)
+
+        # Assumes non-uniform x
+        y_nan = np.isnan(y)
+        any_nans = np.any(y_nan[:])
+        the_diffs = np.concatenate((np.array([1]), diff_x, np.array([1])), axis=0)
+
+        # Pre-allocate space for lower and upper indices for each fit
+        lower_bound = np.repeat(0, n_points)
+        upper_bound = np.repeat(0, n_points)
+
+        # Compute the non-robust smooth
+        for n in range(n_points):
+
+            # if x[i] and x[i-1] are equal just use previous fit
+            if the_diffs[n] == 0:
+
+                smoothed_values[n] = smoothed_values[n-1]
+                lower_bound[n] = int(lower_bound[n-1])
+                upper_bound[n] = int(upper_bound[n-1])
+
+            else:
+
+                # Find nearest neighbors
+                neighbors_idx = nearest_neighbors(span, n, x, np.logical_not(y_nan))
+                # Store neighbors for robust loop
+                lower_bound[n] = int(np.min(neighbors_idx))
+                upper_bound[n] = int(np.max(neighbors_idx))
+
+                if len(neighbors_idx) < 1:
+                    smoothed_values[n] = np.nan
+                else:
+                    smoothed_values[n] = compute_loess(x, y, neighbors_idx, n)
+        # Non-robust fit complete
+
+        # Compute residual and apply robust fit
+        max_absy_eps = np.max(np.abs(y)) * eps
+        for cycle in range(cycles - 1):
+            residuals = y - smoothed_values
+
+            # Compute robust weights
+            r_weights = robust_weights(residuals, max_absy_eps)
+
+            # Find new value for each point
+            for n in range(n_points):
+                if n > 0 and x[n] == x[n-1]:
+                    smoothed_values[n] = smoothed_values[n-1]
+                else:
+                    if not np.isnan(smoothed_values[n]):
+                        neighbors_idx = np.array(list(range(lower_bound[n], upper_bound[n] + 1)))
+
+                        if any_nans:
+                            neighbors_idx = neighbors_idx[np.logical_not(y_nan[neighbors_idx])]
+
+                        if np.any(r_weights[neighbors_idx] <= 0):
+                            neighbors_idx = nearest_neighbors(span, n, x, (r_weights > 0))
+
+                        smoothed_values[n] = compute_loess(x, y, neighbors_idx, n, r_weights)
+    return smoothed_values
+
+if __name__ == '__main__':
+    cc.compile()
\ No newline at end of file
diff --git a/MiscLibs/run_iqr.cp39-win_amd64.pyd b/MiscLibs/run_iqr.cp39-win_amd64.pyd
new file mode 100644
index 0000000000000000000000000000000000000000..cb919a42c9407054e036f49c7f05488f19e29365
GIT binary patch
literal 69120
zcmeFa3w%_?*+0IUWFY~<*(i(AqO2NBkQNhAAlytK!LzbKR1m5N36KB^Atk#AiWuBP
zSr1F2jjg^?MXRm0v|@{jN;M%!0&<Cn2uam=!;SG0P_8Qb|9)rYY<3rH?fbs}_w#!{
zzu)r7dFDHpXJ(#xX6Bj8ITYMjZHc#7ED87vg)Ekhp#0^C=Rbd>S}m5IeP8Zr*^spJ
z!i`q<&I>2aomcKGTeNV_qM`-PSw*F#3q8)6#m+_EQs=x<XWli}I~OdRT|6*3xm%iI
zJ@KCJZtnHz13k^Z^B=SJ>;t^`v0rJuK`pB!UHsT@v~z|3uQeHdzjhArGY|Yq&|aVZ
zs^?R{<m13!&7=RJr3m_fCh-F0UoG%;^JdLu+#jmp#C(fo_B~xJ2j^~>X~yZWT;%K?
z*R!wXZunFy-|Io`!d(GzNOzpY(nWZ6QcEF1DP;WO1dxeTTKL9d=~IR_nTF6Epld-b
zIabSY(oCymqm2&rn`pK4Ko({DES5f|UnG6UV#xr`d|R;S3|VWjJVXWff!{z+@e&Wh
z8Y}Q%5m|Rt54gn7X|WUzTr|7LQ)IEEE`Zls{FUPG9{ff8a**{v8O_qz8v%9{m^{CU
zhy-Vj#Zo)4Ohy!SL|uhI6F?(=h?g^PQTd`-@DnnDY%GwyrQ4a}70+LYh?J29vJ&wG
z09L;oi)D=I{=bE8q&zxwt<I{4YU@j?^@iFIe#)n@UiUX;?dfo9&snva%`U&y-&9{R
zQ0rSM<fHYq=tgd8gip0zwasJe&_mYxdaZAs#X{T#ILTwJZ+mQs#S)pC;Q?FQuR#29
zq9paXQ*v*}ot!)AhAES;*H?Vrh<r-&wEmT8>*><^Z%SJ$9UIcB;c)riart-Y#=5j&
zsEB^+&P1(Z1zc|bF1NqI4S(v+%T7cMW-Vp>ExIu^Ro9+9ruz@L{hzt~9l@33k+dN%
zO-C%`F{Kvy<fTm%&qDDm(;F(&Dj1BP?w_AFO!rSq%cRT6lsSp?ha*Ec0`9bldSFtT
zUf^%d&)ODjINxHas`c1>$4_~S4r}&JY1CBRK(t`XsZi*!u3dWMu=ev~hqb9!-1h(r
zk+C(HgkmA&ryT-W6{^*<w|Xzp{T+JU@hkNUgSxc?97oo>{T~E>3102jlCSRaYwZ(I
z6|&xXSew?N`|I?&6G+jfTkDwOjc6WunmwWZaIQA(I0|x#+;Um6Ty5%cJW@ty)oK}1
z=30hqggh-nHjBPu3)|_Yn`cbVotArZ?)2Ol^?D%r(8HKcP)|$SLTHrMw*pQJ+xLbk
zY+5#`)_+6VT2S5prXHviEvow=MS_Qfe;x!Ja4d#QayPPT=f1>eFffu5Mg2re@lnV}
zbUU$O5V^B$waRqL-8bs_vq81W8AQe^k=4K8h1Q%Hd~K@8wQ8i`vMg<GqE<!yavPqs
ziEd+ATA>bCnGV+y@}-6bgCkf!bfmiAM{sHxXo>^D_uve^&mdHz%0h-_aSv-L9_rRI
z$ieSn{oQFfLY;D7qIwnhj|boWE)?=NFgG1-=}F6R`ws=H;pq#V^7O0<c`u*_Re^td
zwr*Te@=KQCQ1EtU=<jInw|sce1Gn24XE$z0)Qz6O`q`o^o<tV(o??qdHuze~Z^afi
z)=g+$Hmxu8z2`h%DCBupH<G_ujqrfullxJf3})8|&)x70I*DEedQL{!18D?p^n41Q
zHPW+y={knPvzj&7^(_?mp3Whnr22*_y6^M3T7L*wH!9OaaUj{JQ65yhGHoNv2g&K6
zQ6kKv2;CH+P$6^)iZCbm2(n-U<p{YkDOAZ~LD%$5_isfnol1F+4E_oeS=G_yKM%T5
zI=69^-IxM7X~9P@iJ;A-oTgJ~f#h>oiy?@o`?pejS;BcJcp{s@<aPzPp+&R|*2A}D
z?u_a6zEc$AsgUPYh~v)z#&q-~qYsd({TX<!glF&o(IX}1kQ*~Ta@524c-MwBr*f@J
zvn$t=X;dqQu1V`a73r!;YgMjQX-&#inN~}ecGo5~$b#Ei__(RGlhUT@{#9ue#28K2
zgCj@Vr%55Zx$qYbjO@};XgbR1a6=kxCqf~&zwKowaBy8Y8I`3LRDH2L)#dx%>OBt)
zk&o;L*5+!DbZOgxSov9@whuuOo>1Nu;MV|G2i#v+T-m$Ilg9^o4AisR7AItd+V67v
zztVl5ewVdP4%`cqZ-mq$G)S{g+6nIVX*=yD+UBONCEBff<lijNmN((KpPnu9Zx(3F
zff8-`o|0UDubyx;!!fC6obIbj(6e`IRe!`_=Fa{~t4u|qs!{m7@6FODjMhU9uI%r<
zZ78{2^WBB9Dl3HAp!$0M6K?CV>WV9^F5mkh#M49DyoX%3?#7IDF|0-1zeWvttI}42
zvk({bDR}!L1|EXIbiW)T-9};R@(W#o$)P-d(AOHT`%cDdzQ@ocS)r^wVnBHu9D)_c
zsp=8zf&;<J1s%l{SWp$xR$q>2ZvSx%NTz<djLXw>|6#rE<dyCVKhv#;bz{=o$Unv9
z+mZ+wB<i6?Z!@`~J6nS1t1KQOumiY!p*SrtNbo%rJar6Hh-M#k(7-Iv4$o>)@kR{2
zkYaAd$k7(h7cT$Ey?SVK`^TLM#a@kH)0OPKx*J2GoN(!KGTP}A?p${7B?bNtPPqG=
zVtDYR&1Jt%<Ph5ieoR>iYSC2&!G9xzLS|5=GVnU(kRe$AJznI7;U5z%rp`mbE~qz$
zs|w_W;e1$DZQE0bWa?^5)}A&yf@nYa>Y-NeYnAz!Z6S~gES|0|e;*1Yw_@1^R*R>*
z9w@iE{Fj?vandU;!Ykh5c@eSVl~?;8X#y;1yEL3-haR{F)6Oov?#l!{v=v5~R(Tsj
z+0wobPnG0rU$w9YEv_xm7M`DRA9@hH;DJ9l_V7{5$5B2$hJT2TFG{LQa<x))lU&X7
z7JQpZv_kZ%BK(zoxEDR?BZklKX=j9XL<l0n7l?qrvLlS}b&0n38%CPlGXZ{kj>7LP
z#{LMgTM-+X6@84MYws=uf1{F=eZhEOxc4)roz$}zoI9B;wiKT+1tWgRh@PH_;PNpO
zvt%CR{uRutLU<G%WhQSU_)C?<ER42yFVoKMnaUWOnJ&6;S{Y)n*i4U@^LzFHo1@I-
zYsQYQICvaJ@Y^bhSsHEcL8kTe?896>X1eIoX=MkQ1X=I>5P#p12eUx7Wd~)A7=px~
zqn|o;-{w<V)y>eVoTFQ<TIB)g8#9>A!l6Yq54bGGX!jL3Dk*DapCgNfwsZ_Y;EcxF
z`#rb|d9@18k^_Xu)TgCH3S@sIDhX>M8~2|iq1Zcf*}^1g7<*4kA>`b|OtvG%Ue+2a
z2ar$wfcI{?wj=%C9ehwF_HN^&5%KFLAZ}fWOIzN+0lcc@WGK{jJ09#Vu-e32*NtZG
zT6RvDx$8lNnOiFza?*s4oJZl1Q!5;DwuM7Zz;MVp8O9SQ=_uoveP<i{+rQ{uqUEZ_
zp$*&4!+43c(z`Y2v?whmWvjHU3lPvb-N9a>ZM_!7rESeITG`f|Q<N>_*aMI&lIOmJ
zvRQ(ov;K4Dw6I%!qoyZ<i&l9#p5}b>W6bHY0%=aCFr81D(>B3l8O>=o&1u^79L#j1
zCar>1;NTC6x=iAv;HH_!6$AX8iS&oPmBXPLOQ7Syokw9OW4@ba+R3{AL~!mQ*vY48
zCm)q|^39pBv1lict|a3_!Rz25?c^arH{5PxtlhXC_FwnlBR@w_*#5iGw3CITS8Q2}
z62MOGH10lt9oOVzu^hROdN6@m?SSW>;2HexG9cQ20U2fVz3}{ip0xkUq~|mA+(FM8
zWe+c*JzVJa*Sh>Lw^s=@rGPlA>Bc5u&ru*#frZ2-3WuCVCn>|&4cR#<Ef~C5+_+$=
zJ$8hB0m2uy@l<edGhvKv><->GQ`yFuC{=`Qd>1kd%VrD(6-d5=r9Thxlx^$~&O^a>
z2Qt{5<_?|<Zpt<;gl)VeQhb!n{dY5_V-Xp>j#NT6^Who%JJBPZ+K>4(#y(yNb;7uC
zS7llmT--h=qz?_|iE!l6UVmHnzu`7Eq}k(;57zllv(EU@O$F!A4I3EE2f8hI?pKh~
ztfPlrVrBhvTMAop8WuJ#->F_)upig8f>W+rcfqo2>Iy!v>}Zqg{&m8_%Q{+6b=2)Y
zqH7brlt%Gy<}$Q0jonB$a#HC&E^<4h*R@^g#$x}iRvk+&m*zgISAC(a4$@*oizl+o
z0rya=+yAv*_bqI;uk*8N(FU)w)Vk*CaA$Y8w1T6$e?wXZ*rSYcrv<EN2bl2=xw8+k
zpiUMc(4(0~e=6fMZU3z<O^5xkLGH2y5;n7hnF#(`TV5#y^K~9w9q@v<=hy0?*LB#H
z$M!Q~2a2LifgKDBR&IT?T}LnO54i$fD^-?4ccX9a_=1atAU8MM7k0R<LFzYUq5DVH
zapj@2fT5i_mJTk+5J_|3qpjX&!^)U8vW%it&V={Xfj*bHv%l6>Kgd+z#G+gs$m<_c
z8R*tVS0K+8g2Y}z5GB?qCAJI9Ao$!(^mb|2Y(cUN#xo@rmTdoBkj^(W_hf4%G_ulb
zk<5+KNv$~%{OI$TipA!q8*4-h>HXgTzkhV{8`(&~{0`}<k<}~@e$?gliq<OH1j;zA
zp!|_XDm4Tn<PvTu%%u#t^~r&=&`p6rFdPXc(zMnYF-I+%r(xv9Cf8IlSxaUJ%WU(+
zqlLp8fY>ht4^}Z4&+@7K+{TkaQ{4W8tQ8f74aJNx7ccB++e|L9{j<N<0-Hr4A{zZP
z84r+?so5I(H<6-U+0J#x_o0m_2b#*gl}%+mLzC*hZ#yID*5hn?#(Rwk(e%e<)1T7&
z??w}R*VzPpL=!lX&wsHArv1n!pdcwV%3QUFhtnOmgV0cDnPnWe)zBZP5o*DWp&gEh
zg~?e$%nQv0{z7TKP;{-RFf5`CLWN)!mAyHkMAw$@<Kqp$Mge!zR@%Z|$0pYOZNZ;?
z1`+t%%;7S4J}?%LZHGh?oBb=He`ohX7mn5RsQ!IgQ}^E`+EK@dLp@@jas?80oT*R!
zvhBav@UNg+Z20}M;on2wGc(`s3XI&rj&~_C`7gHmzt!;~yBj;8>V;@})&6H_^?heY
zw0a-31+4hfT7AQRy48mUMXLud1I3tVwmzl8ahpGC${{Gu)(OJCRs)GBv)CxdW~SRe
zn+Av6ywjnxoLs51jpvEdYn5MNrwD8H%1=SXNQDUPVgRx${Sq^zKT&VUNo1~Wf0-Tb
zR01ZD6NT=CKP<wWOw)fQ!h^r~G!)9JZ9kh4If`=re2CO9Z2pPJf;N94D6DsBF~E9X
z2^?j;!wm0z7n9}7?d|ru&H=HybAMa%>0c=f4UX1go`O*>hC{jGy;fweHoTu!05Wnc
zMFvqD-fQ|rSm<)ETLbS3riq#r<d!_cY7s_y+N@w&h79abk=Pc*LeOC71m;hxd<rhN
zaYgxeoL`S>m0nIC5Fv{jEI_d^fMPd+zSxn56?rB6Ty&lY9#2D@6L>IA)lJ~UlxxXX
zZr%TIO=y%B1`BfK5@8_OV~MZ?JMI^W0!)*u1I%b*?WR@-YQoiF&asI_E<Zvd=v<il
zuqhl~Rj7Sqh)OC^xYJ>gnD0OC!WRDhu$ECx?7>gvkh-4v**6FyoJTy3z&KQ?Oxq6!
zxBL%%EVt*+#rFJ9UB-k|o{de&+7s+W0r?x+r(gxL={C9#1)DA=?P#Nvwbk444A#Rd
zV#EF{tezlNxe@&~0Cf9Kn9F0xA>g=pIg<PuNwTnROwI>#2W-<pq{+D#mc1L+m}vi$
z4Wj30DVu}|ny00h(+{`XP<6)A{hOoKjDxUmj#6`oJO?#{xyL0hm7BeY$}V~lyA^s-
zAxPu_!iBJAwMrLPwMO!@i$#%J??jP5x(Hi^S<?Sr_y_mX!zn#VWW+hk;QuoH6XB2j
z)!>5wLJ!w`kHzv~teC+zQ^tOYu~#v6GyFUIs<1y|)(aFj)>XOwChnF&S4DroKp|#h
zW1X~*O-l`}F6v8NmX;Z51$cxHqt*iO*SQ>%cGfiNnm9*1Z9P^-)t}1s*kxZ+!+5ZS
zLC{<Yb5g7TM{dWjBxjU{y3T2u!x=~CwV+VlNgx)U2zjeQo?i%?j{`HdO5JIbg3lqC
zo4{CCS%M`OqI=_%BNxXUE*x`gxLOEQYS(QzdxR2|QIOXlqUz{Z3+IX8S)_vH1h8aD
zMXxM$`xnD_DMeSBjuE*u)$K2_>;4Iey1xrl8%jZGM-8n!S~{OcOZTJs9`=x}V>ol&
z=|uA_l}hjn_y;%BV=g>m+Vch+7eQr{(>{QvABjiIO^C^UlEnpRz;Oox&=Y!rt2>Z<
zGYH45Pkx4uvaPCC*YbAhSXBGAIH%WRAth*M7A0bQk2#-q`*#-jvD|q_tRtTkD-;e0
z>;fE!hJmtCs45(**IML{ZVnp!@tmZtm9uMG@Fh@}?>p;VgWUcBy8qThx4#^6D~J5<
zfE;IK&@L6-%0D)f2_8ZM>@ct9S~K9-o@BASlmVW>>M8Kx;QXOjg<`3TgBt{JowrfY
zO-ggH<iw1M@}a=v;D$pwx8~TDSB_KoVX`&D5Un%C>A`Lzv*6Z)vt76Duxk}<XbIi7
zEfwXv6y*yZxdE6(YhnNnny~71`wJ6wV{B*kV^d}J<5RJrnOZP!YYtYu7%$%&z`~71
z0%t+h$J*-k;K8YBbt#JG3XBWo`w!>(K8!E$f9<OKC_(ofkJIigLs&u89<6dh9Gf}|
zJD^;DZe(@b{&&pt58WtD#c5HV4LRJJs#|xKXekL9y06x*hjx2Ea9a<%tq0uMA1>S4
zmW^z)YCDxf@N-+SE^J+PyzM6l<-*<9`W+NZuloSyK>e|-W+w&1@6f|tb!~C3P@Ry(
zZV1y&)oVRPTelXmvi4+cYdaT?&P?3aLvCxcJNpAIz>{#de>YcgjEr@^xQ{U|bqvaH
zNfn~BHsS1E0ObwkfrOPefbt#|=?$Q~^+<03<&Dv(lL>(Go*U^6puCqvdIOA0EkOw@
zEpqS@jIb(9ksdBzu)^i5!`Ag6bZ(dbMDEc#t7m$cS!yIR0F_5Zq&I-_9v0~hAhXW1
z=*pk@0tvl%LQER+-hohv3J4Wi5Umf`sgBf#w)sdEsRW(vkLG0Ay>7w%8}OpmvF}m=
zL=MouSpNS+-?k|hZRfyY>T6gbwZJp*MqaF?W=D@k8=-cILP+osBBF;74?Ki$=ph9I
zA0ZZ0FfUQpDxVOeZA57+!2e&Bw{1RC1#GsZ7@qgDr$u#;9E&L*+BmH(oTi}wzG1L&
z?ndnJ22Y>>Yc4g^_;qLQCXO^5L^<xcAwv$P?cGbN#Q^ZMNQwbrgj4%v3xF$xrP~Lh
z(;3Ss0Z*cnQ%?Wm7z{KmaDX+z+_&;&rtE)8TfKqh<{0uPI<TU>7*W`t{3mq(b{>c7
zJPzHi`%YQ)z+zsENlAETc?J7+s1~-7871KEW<{VqVYi-r9HH9kO@d=xn+A^6OxI&K
z&VKdm6U(1qMECCFIQ-tG-S+|F!PThU`wm=fRfN@gU>^3WK1_ghYRBj?-|lpU#EM+*
zgVpK&-QkM$)K;e>5$2BBT0q=UiJWb)dnbAvjJ^t##0{Hw2~ChP?)=Q9UUH(MeNgDe
z12(O+@zw6oe4fMNqKU9tm;_Th7A}m6`BobBT-0$!EAR({J~9J~wZP#+*!=*axQ!HQ
z|C4%l)AF13>|JJyE*Cnf8({mP5~$ny;gVbQ?9c8jhA6C36!R$xPmCzG!D4Ich_Uc2
z5VOf#TfIYcaVT7D21&?3TUE{AfNdVk#`eCn*snYwXXK%ZUIvN?L9eWuu)5Sa440L%
z@~pHBNr8#eh#E(kI280oc)I=QhiWbC*D=TE*okEu=$PA<ICcMA2%-#vJ`minA1a~o
zv|Z-kb_ZH|BV^1e|27n;{a%EuX36|r+vZVs#C&wa6t*KroPw%tOVFs&{wT<9Tt3MS
z%dr9>;{yq>$u{e6o)mY=E|xpGm?Ws4J?amSg*IbTXkhK_gYm4p?Ps`<R!83+_4K{t
ziuUoy`vI&w+PlK>3>P5kJ|Fh-5izC58`OK;FAFES9kVQ0ods5nof+J5UV9qji<@XE
zQCZ{E67jn*>-HXzJjWZoL>>wCE`P6jS72hOAaE@VDV+GBY-#N`yZw8hXIkJls4A{E
zyyY@RBI@P%8~K(V+K%q)&aS;P5p%)7g!<bseQ(zSJrTEUE2Idd2d)b(e_6-^XRQOb
z-H!9zfsMC)f^!IwbQ!h{x8K>l-D$?{b%wa{vJ`hZ&QOB(7g+~ctX}ow{UdSj#+p?N
z*_BjB+^rPt*|}cfIbSV`iT!pgI@s-g=z1hq3e+o?xE>jRi)#pBjzS$WfN7>WNwsG?
z<oYmSA{!B_TU?gpMad6&QF6vb@Wi@8M`p<FUtN@Rhc8Of<zz~+Z9_-QMM<31@!rOx
z-*5v+UX+}TL#R!1yB|yZiNU|VjTPe*aqJSlD>;20%YmD2?_P+D7C3srVLs5?3Ps(O
zbYB?0E6J^iSzz}s?n=5(yDO=9MBkMpHwU<()qm7o$yx}HOW}yClKaqQL}zX*W6zD)
zUl;9-+pyPTU88ILG3@f}&*}2-;1Z()tk`xOPznN<50!^bg)sMJD~_JPwnh_jX1kvj
zsX0jbPT3co=L+;V?8<&a^F4v|0AIVa-_-7T5CEG7{%`eTZ|T;rF;nZl54viVDe$j$
z8NE)TpM9$?Fu}h4wySpc5U^JJ`9=?f_xRRjxKAd!{O)hDnslGE(`BbCm9A8}PzU$7
zPP#Ja%AhNgu1vay(KSr>H6-fUr@YU!?d3%6KZdJb;x3EJzYVr?0&M4ee`Bt=19c)3
zSH)V|(h<?+`{28*7TI4gb~SUBZMNe%so9C=wB}4aXE*2IIlp-#o}T7HJeM_>VU%*W
zAZAKJ3m(&25HV#^3lgQwZ$ZSA*)2$vvaE#>JuOUB*<1m+z_F^Cqdpup&1<D&O*4BX
ze4cEsm5z1IP13QUxm7wgHM0%DZB>g*o>JLTA$``g$mA(CEo-IEx)zx{<;j*>>9eUt
zCEw7}Dtu5LuF6Dt?9x%w%&rZeHO=hvqC7d$v95WdbZlrYl#Wf!Wx|2-$mA%GWSFw1
zMJ7jiB*T<-EiyUEBN?V_YEj8Iv?zv<kK_#bNC)I29gvT7JlWhN9qXEFrDH>LhjeUe
zZWRuSMLHlT>3~?I1A>x{buBVE$|D(~JSzEy7Ba+bs~oH@zSF8c#y(L27ofNiXK~fF
zc69|NwG_Gn)0zu$F(cdotFe^oz831!eLb|Ndo7fwdlPi0dk0jfdn+`jyB&9_yTfMg
zwide6Z9Vj*TP;+lTN8AqTPswiTZfi1vK#cJn_c&{Rk*Snz0c^`$F!=4abH(l!<&Qd
zqt_jbXYUN6JZ|fIxJQQ$ikptGk#=dm$KiW*pvNiEL2CfAT6BN2er%81dJ;QuvV+=L
z6=NjMZb!bShw6F35dHHc`ezqr;M+Q^1pSw1yYBZI(aD-I>$~4;q6_;A?)Uc7h2h@)
zUMpQrx`K3N(A7Z~Y!UZ+$LSiT`+K|xc_zBD-$(y^65Y;?A;b3(`setL)B5LYg(#fv
zJCUi~9j|)mYqDwVmBXDdyr{9mGo_<uc#d?e89q@uo*Z5%9qWddNymoa71FV3IPErc
zpwUdNrBse4J1u3+Xr|RtYDSZpma=X%Q)($sjwUPYb&i(lHjGw`Du=H{fGEd$>8KfA
zD;;ZwH%Z5n!&{|e-S7_S*f89JAseYS4Y$L=axgW^L3S($)3O|7#&R$v%RyEwhfKF&
zv|<E#NRE()bU+@`0eMIV<RKl9hje&`SD+BfhF1$oz)wg*P)I^hNJ3CZLQqIT&}E~M
z$TOPh^M<d*(>;7Wo|A^x;yG=26P~k&ci=gHcq^Ws;dVSn4u{T<rp}LE51k)Pogdu<
zogYn|AKd|+A5E@yeMJKger2~yMwQ`F0U~<eY^p4JpC0Hn+HHL&Ya5E7vA=2TZyNiX
z#{Q<UziI4m8vC2}@Afx$_P1KqcJw!wKOxNp`y&w<W1ZqjvlEE}qyikaIq8)_FR|^<
zJMfrowG1wId|PxmZOiu?Y=_FA(aO$k#|BabS$5iI;{6qxUA%uH%`3Wo<o)dar{B+3
z2mY|~e)xVijR-EHIUv%^n#(fLvBY!~nhunnUA8Xh+96gs(qiZ_jz+>ixu3loLkaI^
z9|q+u=S4p;FJ@~#&PD2Swsy})09ja+?beUI<F<Z<cWrRh6lTPQ@b4U8d4HPsv&HT2
zgq}J!!$RA=ZMf`$Ve99}qcf_Q&tu{Xi0g<Vd0tPe!1geX6PQa4sPZFuJ>vy-hH-+x
z8DVS_I5Uj92s|u|yYdFKD|@F_<w4JBlRI!I><1Y1uI=C!aNSo4G>y2?_%t>;b^l3T
zu+8GV*(ux5{tIoNO^>V;_8KQ0LH+H^j-c4~X-7~vyZZ==V4r;?=WA_Y-jRuL&p%R#
zr{_o+Rw<iLbmj9+-dcYLf7r>yRrXw7g@pGu5qN-sxW>+F?YPp8!2JxY6M-KvuoZu}
zTRV&QGh132ct`}cG4M<LeSyF)7#PH3d9NXSP8*UyGJ6PDOUvgE!J^DwbEJYT3QOon
zwREgGvQ|1)9a%3OPadh2j!j3Jq+{KYR_WMqq=Sxd;Y78XkpI6?Eh!LDtxx~GYE2r#
ztI4QWFIlftS+72rES>TO?M)b}a*j6O2JL29;5^Y>9?@KG(Ok1db4@yu10gIE%{2|p
zg+)Rh+KacOwarH$)y=O{v|I643(?|l;T7*qqvTrPxA_ys*vS|>@Q1shv+&ol3!dLI
z<ZXs@wzk%?4;cuKo8M)--MSy)vaz+6_gISr(PD1dVv}TxO&ij~o!y3;uN(=*D+(jg
z#iCzPi0p7VmNUMu@IG&!;Shn_3f$3N2`YvUzOS%7m!7A+uW)|s`w9h^6x90)-fM$<
z8!$0ory|TMcsKZncNLb%cNMx~NOJ|Q!8;GfrM(|teyP0crd9nF1!y~nhkC!j^Q?Tg
z;E?%lK@j&0T-b`ohO!=*g58xm+)dK@=3uZK&6SS0I@hjSzr|J2cTxNsgSxRC=Fu5K
zIuHVL=?o#q5d!n+^pNwQN>C@NRNZ&pRoJxWfmAjOx`y$AURydP&ihn@bn_}7%Ja1<
z&$-|be*L1w++qqoiY*EPHbn4#0KR^4r-QE`_<%5ajk*<ARNHnTymLC>Y7xKaw<|Ei
z{n)oFmf!*;SVz8H!FM~DK_MBR`R$5-o<SiGJ<Yc(&Orv@J;Rrvp~x-b?TV*gXRYNy
zt9rZQaYTxIyW%VqL(CW9_bVQR!T;a<ennb1Q?({-bkzm1Cao3kSNuYRZszds3M}r!
zYuG#lI@XLK0b`qd)#C3{F#w3E;k3KpU+|WA3m@i77d)>j(Mn4&Y>S)mtuYt(`JO}R
z;%^Xo7<qJr-)qpyPVvowZ{>CV($XOA>8N)gG|y*<3J>*GhPXnnm3^<`n=di&9SvlB
z92_trotC-zTE^aEh_(E4us7e&@SFsDBrC*rWKjr1*4}*xJx2C;yCGbPy`Laq>30m@
z0{+ou(#k$ze7^W_kQGG5%0j&Qj|#=q6zQ3}`JzO)7<<1$!qH^^F49DoNh|w`>>)d(
zMGU^aV7^aL2H8gheWIp?IL!Z;7IKf)Vp?d1s$snSQ7=~P!m%e9{_txOFlA1A>4EQL
z$RwCaB3_gDhs-tMpC<Y5c(Y<Blo0K1zFF}G@|CYw{NS4v&z^vF)P#q+o%28WX2p~<
zzF865n?&1H<)IbZ&V?zLbyUppRei9--gZE4vwu*Gp$uD(Q^W6Cj6lpC(Ld~WFMY>i
zsSouVJ_%pY=*K%3p?<x^Gf_NGUQeIT#dDu{Hj3w8#dD2#E*H--@ti51BgJ!|csj+i
z`8s?BLj6jx*;dJ+)}oI{&aSgqymq~=&R%k?E)?=w>-D;A_O@SR{MoSs+lCocdpy~`
z&#aydK6?93T0PywW+PTK8TG-%n9SR4!K*;D7ixu{%V;7MCstghJy<7TDrR8bNW{E|
zbrbJPIdkQG3*)fcI5c=$@F18}VpF9NB?~UFSuFSMUx`iMoENRWQU15!sa3s>XM=4i
z0JvF#{JukFt>>Cx1FYrX$pk#kg%=Jru12kKkz)i%(B6aBz`Xr~lVx-#!q1c8-5LH_
zcQQ?TOMccNXHe+AH-g)-3z}0I@?IIVCxVl)pT~^2=Vdeo&p{9}O?4Yvc>st_6WsN|
z?o0BoA>-W8n;~2NPPHN19yhp_ua9K%o)=E1cu5+ULUO0qzXT?h;1)cq(JDeNd{;R$
z=e`q$&4%<<wRa`U#5qWkIio(*=(l~o1BtFjHHBoHzEO+zpwQl-!8xc|up8@t0>$<9
zyAl2tR3jc44BnG--|pb~a2?(MlNR4c*5EwEHtO4-kcGICg{a0`C0lg=R40y-QvDOr
zK6>H}c-tOj%5fPT#z{0$)gG^w?U;))+|_k(ZQGZ~A}|J1t@XBi-LP$(1vx>?-MIW^
z>$LX@Iegb5r~NqP%j9m^U*QX>8Sg-3!L^uDJA-(<5)Mj+^2s1y$m$&erh^bRcn^~u
zg2=fc{a{^iEBYTYzEWoF>1oCZIeH*lTw1dELh;_5Fx!C<IePjdrA-w%DKOOv&CG1?
zWyTFTwoNq4(B5Tm3quXuGns}{Pv4fD-03)8G22b(GaJw8GL9Q~yY&z{iPsi<1VV5d
z4aQqK6(p<#X#W{){MG`}=Wt`*sT&6dZwongyQyezgdDHJ5ppyN*!Xf%f`$Fcd6ORM
zw~2nZZx3Vc^I<QY1DS;!>){7`u&emA!Os3r`o(WR4|tx>%T(BDv)#M{?f~7sT690#
z7l?rBkE6HW_9E^FGG-3kuOOtsc0YWWZG+8#i*4Cpdr-MHLcj2-j9T`t08=(3SlJ6Y
z{f*>LHo@QTPH>u@cu6C9zv+pDHAN_4h|TBQG7N&uxn8`fp94vR9Df*()W+YAgX!Q<
zKKcM`yF$n?<am%Fw0(8o?6hGhftyFq2z48c6iX(WkDg@&8H2HXfIXRzqXfx94X|yO
zJ;%nIG5sd9?VOW!1MfSs32d1x-z$Tjc>iY)4}fb#z1O4ric^tGL2Afxizrk+4D@_^
z$T11XsMWLR&XMjwfm7lP;(RBPVN`8(>Vx4PoNw0$*Xb`|2EY&8F9sOgnH4UM;Vj<v
z=T{KKx6SlpO@O~AM9be?k0NGJsjvk+>Y73b#@i+2c>iiO${<cg{SJt=0G2wmWETQ3
zrUa5l0k@sRsET<e7jD5PcrRyGUt(JDAj$#<UQQS3kw@@ZM23JbEY^*LewoNfH}u4I
zn?9n^(`#@ozU%T_av~UczZ@jT`t{cKW9$b^TQ1Xf4ZeVJsQujFO~4o~AC=w-gKFEm
z27d#v-YPO=8VvfQpk5gCAC&w|UiTtiWv!Qq&I;ZxLD%3f;AsCmB7JZMe1bRPp*%bL
z<b2LCp0rfeF;8$1L*Hx*?!e%O5v8~9TdOxk3>mtyQ#Tqyj(zA!;yq5D7^)u2OO_bB
zbJz|u+L%_<^<KfmV~3CvF<7%gZ4K}X?vdPW!Pi7o2v*hed4=27!6Ey-_JXY*U9f7~
zE9_!gUj0hj-++UQG5iIq5KT#-S8$00Nst%k#rDtc16vFU)MLr3V$>2OwHODyb9^VP
zx7`x_1dV`}%&2PdrlFKG$+$1lqUtR5EyS9@o`H}u&gK`|lCim@`x--EYj@eQ5CJ+k
z9E6Y*a4g7HTQZ(jA#oJpB{0F<i-*tyOpA3J3$Sb~$+o@w2N)Df^jXRFr=e*9$4+29
ze*21Sejm#_V`Fy=93|PgsoqKM0h_fKb9-SQELD$hg0qVr=uzPg^w_TZd!@NXyy;0b
zuI}L;QMag@F(xs-&OM@b5eC7{8SU>6#y1Q02u<nbyFJZvCv~P9^s)<RoKVx$dMTs(
z4g|UXICu}_(!H|9+f~2UG5ihDdZ>pf$IIYjx$l4{p>mJ6NAMI9U>-dNVpf<s7O=g9
zX0P}4%VunxxWlfQtOwSm<?sU`I9i8Lj00+f?s=7HiUi}0o&C8U#k`N6hjIrBI&wEQ
zan^Rhn=s^fW*l6`Z-a=Y2OivyK+Fjk1w)SCFa$*(X^wt@RuL*9b&i1ecupY50FHs=
zNr#gY%yhxJs*!;hB`50sDrUoi2+!P&+zHUd98tBB<@_rrTSKP6BwmQAeWC;E3p$B9
zi*XS=VF7S<0Y6S6f&31w1`@$RKQldnre<zAA;&>Dp?*Ce`ow@9AAD8J-)qvEAS_&V
zq0DZ^I7V5DUaJSrKf8jdNA&<{KMPhC>yM%Dr!IKo^hh7^{-QS7Q`tXdJD6!d#aj?a
z+X@nywnIO2+Rad4=G{4L*az1R-ljc~XVm2nZpkzD<gVx_^%T#~U2%9`X|XHMc*E!z
zye&5{)|!t~y(e;49J;Ew)Qb~>{O)gpPl9m*f=|SY#yz|@>AvJ+&qH4QI<5?9dV}qK
z01GNLs<sP0(vUH#unRuSaN(#jlJjiqPkaZn%6VIwju))?4<*)qS6ALE&#>)mgpuiM
zwC4G3o8fHl3cr`&*a$1mk{8gw9nrDu5*8Z;$unrUB$&m}Uj8Vlyjy<vI;6b^Ng8Yu
zQ4Dg=^CuU;slEJZFU@zq^cV(@SGvIMKR-)`{iJ*;V)Q952YYK1E>k)6?oE8>!AbMM
zc~SlFdhZm&7LVAz#>58O;SCn?bwIzZ1Fqbo^-j+)U!$|Zb^s8KZSmXQfeRZ-@lAOb
z@6Q{t-;2vt;FQ)nveldHOWugk9RK-$0bwI&&>r@PvlRP`da($>064fd+x8@4U<$!H
z>a($w@^|9-rFgcA=S6w+?<Jm>h-Zd)UMikH70-d<nJJ!w#Pf1I*IVn62S-};d+LXL
z^_fDQ3=4S@?sMOeOP#}%(N<s_!r<Dk4yQv#&~1Dea?FF$>&9^^?r02wCu<2C;Sbij
z?=o0M`@KoJu{-2=kv<r%@d>WvcDe(O4I*|@$Wae2A%}y$MsgP!_y-71HxT%H1a72Z
z!OQ%z3vZd2I|H9vx;b~+jOj2R4BH+SBYSM32lvL3Zzc#h2AbT9iG2gC-ZjGhq#WAs
z$FM+SG)aYF@E-!5!Q?^T?xG|%zNUhjd2xL$IZ-lAc?~v)&eGUNVNr(q*-kZBES~g`
zJ$Iw<2snxn2PyV}NggaW*5O06lK4zPsQMu&9qw5o;p<3P?vOsY8+ii~rQZK4N>>-E
zu0}Lu-3ML^;01p7h(dwcgJiZ7qYFkRPZA=0^LM0q7cDaYCRNCBWDJvPRSg_~0?AvU
zupvjQ^8Bmx%))zzA;%la^SAKCAe@(N%SM^JdBQ%LiUJwQ)$nMrU4W<Pr$(>Fjdld&
z`77;BsPD!OJi?&q>`s6R&dz0(l5H=7_-zBh1J|P9KaKu=TR(UJjgzP^QNXrxJqqo&
z)om5%K8ao-GHg9iafE#Vep-s{FYp9;QIcAakmDqHvFbap7J^{j5*FvqkfT}Tk%t*W
z=&DxvClMief=Jaz@I}vbtI%8Gd44xC&7xJN#UU4SKfyQ3i3Ap$@aBuRSTeoWpnfk2
zsn38cxsc{cus6~p`;8)d^oE<5Jv0!7Z+$(iWlz!9u<e46NY#q0^jVW=lEK&yIo{4;
zIQy1XxrJdy@)L;5HG|O<atsknBfHon`c$=OmA^)8e3l_G_gKZ#C@+3-TICdYiuFeF
zhQD!I3^_K50(6KBCW#DoVvUUDKs*Ed<Ln}&7ehkW2k4O&avTy#f+FBsD|ZKaq9^&Y
zAcnu7!xmr<ATi|loronyg-N;r`7g?0@hKe7K)nGQF*t(9{oznu#mGd9_ZS?iKG<Ag
z`(d*Jt-0w+l(Sa%U*+UbqQ;ShV8yq7ocV)~>b`-(8;hr@%)z(7A~15)fyFC%_o0+L
z^{ocW0%I%gXknWJ5q^mXC_OHMxcoht=S7TIwauG^b$G~;h+y8Sru<x(eO<n-i7wyk
zI2|$ls2){q)FYH8!zD`S;z+j>#f~WOtD+WdqiH%B+w}psEoy!u@`d=73HXH3O;(t~
zhu!|WGDO9|ME6f|He8hnHNv;k&H_9CRl^_{Od!ZctFl6VVo;q4-fdsAONU@mI!2&Z
z>xOy7_BOmNauupUQ63o05YYge@&|9jCy$`)wYu?6$k9a=*KKqt5%&;21CY@pWX%!r
z1k$$!h#;ZikWl+&Lim?q<=PH`Cbu$8z;WSolrk^aW1#w+nR4L83;-U_14fAH_o7G;
zovhQj;#)Hi-)d&n9Z1Yckq==z@s4$;5d+N_wQ4h}h}lV>b$cR?h`hLPo7@k|i1AHi
z!CIroGg838>wsd0Y8WL8hd${B12yueh!nQr@S(CUg@nY~+{M^(p2Ognd>+Ehe)t!`
zD6${!pifncXGBaN^yVVzi_9V5IR0#epz2jwl%Q4tvXhHVm8d0Jz3jkWBQZPhw@fEX
zh3KmDj-#jlDt2CPccF)WgRZncjXsQZVEfaZ5{<M)ldlmo!o!^$IiFuD6bd8Q!y~~o
zYCivzKA6v0r=KuY!11w=EgJr7;E*GqF^uFPBmu`e>xmG!4}oUw(6M0F8n&}26m$Om
zt>ji}a(jxHYP3$MQ7QPfKd$C)(JpdiU0fS(#qlC%+idty3Y@=n$@E&x9c@!N{)Ff6
z(TEi_e?P_f%1G{w6z2SW5g4{z%48Aqw+#tTo4-?$IA;F-^9zFCR#7Mj;O!A$_AdI8
zIe%Y{2$;Xw-ooq?t8r<Pbv71UQSJ2@G734`z)NWW+v|2wbhKC7aQH{H*HFe(?R6{N
z0mtPcHcB)WMT1?zG_W&B0*-X(m}sv{5E$EDGg%k2y}pp#UKP~@w<E-z?e!}7h1=_4
z6^357_0@2DJtA_pZGey2UJps8kHdJ0X|L6Y71drx5C`QhK?<|I7J=bu?KK$*PiwEa
zNF38%-wT60xi?Edh3!XXk?mE92tTU5um=R?p3K?>l282^#BaL{at%4I&9+#M?f*$F
zw^qU}&7m#TG`eAO=a}=>;84M=j(p<-xA7VFU-6L}?**u^SMYV5GCS6t;E9dvoH&--
zHs|jo1sD-0rs^ZMdtgxHAp*L?C2k`>5iM*JEu4G_*o24dY{}z0&V^VFl<CHFynew?
zw$iTntwpfzAy|(mYdcwYgT4(TYtLPH!O986uzS6E+2yI;g}O0*n0!}XH{K38t`^aj
zPtC{ILv^=-S2hFVGTZ}BVM=l})bB_Cau0YfJU_Y&%lPcPuKT@^;`JHhjUFAY__wak
z?h)rb2bY^&SjiWR*tP6{9*WnE`nJ(5bM|Ph@<Em|ki2>w1-*7C-uOfaWN|#?xEuN7
zgRYnfG4G+Ox}Pt+^hA4XVjB6L^58vq`E>~<GOqGbLfb=ha|XLu6kn@+Q}C1XY{+pw
zvVi4?bvrrIlt6r(xZj2S%<$y55s~yzKHjUHfZB|6;wl)vDTHq~<{Mjiq<|F21D{(G
zMyl<ZR{*gH!>0SrP<4lBQZak~+?beSY}Z~s)J=Q&vu^o##WU|s|HK@=uvK|f%+`%!
z&a%w`hfsA>B3BzBt%|RAM--<&iz7@HVMm+?DEoLBO^_v2yb6{`oCe5+R#;8LW6~Mt
zW8>%0i$hSrgD=y-3hJ{?LJ8m()|I~x5xU|D>&h+JccgkTtwyOzUlD8hgsRZ;F>d1>
zDvAz;q46@{RHw2)N3Iq+(og9~lc^)U!aB0u)RBk-tLSM}R!OaNk&mrE3hx%jLb~A!
z7apduMHI{1Q?5wSd$|&=cZIsV#3rOfa@dV^?#@nh&035tb=aydT#JEzfGHHyMnVnz
z$4kVvVy+e%;;E#A@r|aj=$pxh5F}r1z5^U?b>c50<oFX47`9a)E`MK_|1M{)aRXi$
znBattBrptlxp^Lg%sHRrxL^pljB&&CS-R5|xC(C*T$5Vhzbz3jJT2GrjMsV1C!)Vi
za9(Zns^cvUKa`jKt8Q6aWH`3mav+XgSOep&?uo};Bifb)bpsWK`k2x%6-4ei;?x5&
zv<KgFvVf?a@dhV29QdrwL7mOWp!i-o*57#73nNJa%oSP&jaMKQEf?FXa7F8&jIU?l
zP#_zD5uBuxd($Eg`29rpElIU_dg%r>R!@ZR<xrFs3D*y1sgkd~93<fQE5~S#`B~s6
zYZQ?stAW2%B-@7gKad})`gLEjvGoX-J&rD97{QG773Fh5GxoeF611=sqOxz&6`KI@
zu{O-LyNx}0a-4Y6Wo*aj0m)Kb@og8F-XTW~>m-k!qRJSyA2N001je`;jF2v@0<eFH
zm*23A6XVTxjCj9Ch*)H1ffb|&F(OXLF=!C|p}M&&ALbHG_s@V#sP*C)S2)eSfn(j%
z^Y1)Q)I)J7hwL{-bI6fQAq0{S3Ok}B<oG)s0oyp#O>e+PB8Yz+JI7uGpmF@uu@1m=
z<HyM0UygdO5-Ek%jL#bKNLtvJ;WC8lkN$`{+?fTA7@>{h%I@xk+F|V_>~5w+JO3PB
zo$1+Lf#lbp;l%Pwq=o>};W7J3dMuY7z2WiJALy|F9^pCOlu4ut#EhR2mj>L%VKIoJ
zeQ_r**#~|YN-=**`Llxz&;$A~9Pqe|eZi0T@hxmEP83500^_MgVs68x@f~Qr=%oaP
z37xXR$Q7;}9654t*|*e*Mn2)~i++*93<Hio{t<-z;#^?SFNEEWuR4ISm|jDUaToyQ
z5);dgunfhDg@?quLsOiTHs)a^?x#coQL=`ZmXf6kj~`gSGOq8AOba%=I1~2=4msK{
z6K$YjSjnJFum&DKEYyUVB$eNf>7o1jTX~i^Lge(%h8~e&r+%Q9WekQ$50Gm&+)N}_
zAxOy?iidGoaeo)30Cqwyoz;$%tLYYFhq0MO>%wa!p;mL}RmO<X2mWslvPBF`p=~_K
zNOnOJunSv2b7PdKm9Pp5#4O%}1(D0abhXP8y~XmDh`=ot?~st==7F*sjTP;p8pVz%
zvLZQRSeH`?bp`Vr*J{|o*sbpKYcF(NnUJjq?h^YKV%y?++^{ImXc#NEF7805g&Gc{
z6d$327DAVPjV%=&i&d$i?8jqsLXKf5j4ZoWxd3*c*rXVb0j*Xy-U>O27%a3$Y*Ii2
z<_Mqgw$N0Oerwwpqz%g(+KCGR=6*qJ|2iNfdh99otwW-FwTBDgfkctX%RghOgow{9
z(G?<vkmxY%8zVX^6t!XXs`UKfE+}_}w7$4vg#Dm%&HW&3vTz$n_b<#aw}J+UF;Hv;
zd47v6ALfN!9{*K2T-mQN>uplme?pFLej2XDPs6)!+)a+!e|tgHRa6BAaSqaeJsSQd
z+30VSR7aOjhbD_)GrP|F9_XKB=DR~T`|DCsgz?Uh<1y)iv>ueM=&o8VJz>yB^xr##
z=a1{Z5%Y_209RIIr#no3*x73e?St}Pj1fZQ0OKF~JD4oQC}hqUqGQJRMGcgy#zMZC
z1?b5*ADk*ocg(62pir3nKSg6<Canc><7l2s35+K$1J`_@SO*Y_PH`3XbJR}8WulHk
z)w1Nxx)8=nal=lx^QiF>BoSjeDsWDODL)<lqNOs~Qam|_BitsD4w+e~-xj2Z^f%oR
z<Lx|VW{ig^whcB@!&o~s4-G1Z)6Xvzfui9Um>L<lTLk{gwql;){Dfh3rkw8nXE_xk
z&{R>g3Q<&zr?`)&hEb&tFj!942T_~<27N#)!<I?v&&J|`uJ`Pa<7`S3&7Ei#H@wXg
z)l$AuFZ~&!^o})>&;5(V5<Z1#ussTYVF2O$W$-p}0^?uRc>rU&dH$l@XVQRJdH6<M
z%lv3*%sT143e(G%PjFhTVd3SdHx2{#A$W={)0zx8I0*Y~=OUrnGJTJ}hOH}nSVvgc
za?7;0@TWq1&qBSUyBIVz`R#RT6BXn8Jd<_UI=ELPSJuH9j5+GhWL&Hglcc!jG!>;F
zpTRA-&8Pcs9+qc(3(|_e96dW1U&ouq`6!T|;|>(S7d_G)$RCCph8}0NkEF9tYE?gF
zECXq{<zXinaegjDqE&K@#wASnSS119WfT$|pBi!mkRE-;DRvqvB*2b7W@;Y*vciH3
zm*FQ*#I=R}uovNpa|RKw15(l|9{?+C2S9-09N}j|0>>%TJ3@}jfG{cY4w%u1D6+iV
zghx|x^2VXX^<>t$s?Q_DKJ=(OfBAfD`sliW%nfFqC9=gp^pHq`ZHi09gqH#+lys_?
z@2B5%bA5Ee!qX$@h8KqDovrO%T?W40>>Z|$xFQ9m_09-6mWq5<Rtq6n^%1t?!sFUR
zH`Xy?WA<5k1RRNIg0_8(hUyCQ6*qH1<8a5;6Yqktz+>gxFTtrj8o|=GL2#UqXsmb|
zuBzIl%a7ed7k)XOMJ}a-_w@rqVG(JScOwX^%g)Bj;l5IGy3Bv-T86^H9ZF0Y;_@6$
zBP__V{ptvzeg&2$`(K&GY0uDJZoQx!cJDr{E12XQu-*X&s<!hn#rhw_#+^_sY}TA3
z6l+uvep)5}Q#ReWs<UiRtfQj5*WkoyHoriOC3eVhsi@C2PTj!z=zIyFVpthC0i=qx
zcB<G<;2Cl}mBuNEdZblWfoVRj!W#RyvE<k;SWx>sr6I=-AgEY@flpgN#ZZ+8;KL&u
z$dKn^kVglYMI5_*K#04jnR)&>F)Fr;K}>Q5GoeJ!7w|9eZ$B$qtGXeI+h9GpmEaP`
zdg2&v1tN<%ty3R6hxmx8enj$P%!SLQ6W-#G!zxgO-c<;X2)!FhkAS24Q7BV)ivOSK
zT^x!b$L!9L2$NP=`!qk;SdQImYM@rRTC^Sx@-Tvh9B~)3a2yLgLt=Dr7!>7q6ji{n
z7yGM12meNlfmYe%%S380;xK9&DVXTP5pEI@_6hNZ%*e6j6&l)Gj2tb96V1y=?kmgS
zG|O-qF>@2c0hW86ayMPE^9h$Jhj5SMBBm6T9f}Q!h*Rhsj15Ne^DJS&ac~Wzp~t;W
z+*z5`NY27VnPjMKhAEB(g1?+(QhunmW*H+X_gipJOC*m_jbgz=sRlRJ`^W-%%>t$Y
z3n}2b<3H1qe_Uv4N$k#zECq}yt@29f8t+Bt;O&HVQIrE(Wk1pXbEEpd_@5uR+k~z^
zjH;f=1q?oV<l(JfC-MqUHfKwKkpRO&ZvZzhtYPmJcZ=~QY<$i<<j~-ab37-L$?4`^
zs3^7o#dL$o<5j^#OgCjAM=enFbaM~Gqo$jW$*gm_d7BW^%^?4+7z$7TaodzOEV8c^
z8Dqjp7i^%qPBG!Eg@3!_wCE>9bRG^v1v9x4#T0{z!@A80+e}LT%=I0Oyv$tGnm1vw
zzeSrI0&r3QX!o9AiTO@!&BY2p6y@(CPg4M|0YwX-is4ZL_=wCp1+bSeLI5yG#O*PW
z{c9p)2;dUICM<x*;4cLb75%t~&V4nGh~_FhdPmJj!VcAoh^Ul2Y)^+Ak6!>wFg|8|
zCgdee;Xv|?M<|YGL^h(%JU<)`PchLxgY>MKvMn%*AEOV>57W^>1~3)w9sLJmqX);M
zN^*ls^jfS6#i{N{)|97&81s@FWSWP}G`oq}>tWB(4gvdE7HVZwtYX6KeBg<&**S;h
zF>Dp^L8la^#B7<xf`>7r&&EDL=Na#fh~*g+aYw5TaZsp1NMW8Cjy^*PB9D7}gU4yd
zhyB5){e|##E$jw5UVTWgen*rK!Z~>U=`}M?eupD&J8q)Mv0cb5V!&EaloP_Ok?_d7
z!j)r5h}Z<jE)_QRbDU-bV$)4F+{Myofqg<#q<1(6gggI5c;t2^`GxaR{Oys9`QM#}
znalhW4RHghF4#9JM8>MRpY!EL{E6$0kx7M{i_c`S=<EtbU)(36>fvyhh`7Nhf8nU(
zPm3z!cSaQ$V|w<riCT@i{wUAaBa8~{y&BD8e>&tSTZ>l3{u>ELS*O4@7s?*JIXx5s
zV0_x<^jD8DrI^&9@!?rq6<rWls5KJqK&6wwE$oNQaGQa_eFQ9G>>MKaLof{o^WOF+
z_aT=NO=kGZ(cxnLdw~TBB;QLS&p;+c#p>i|jwjf$+=GuD8`q_V?Csc2oh><xBH>1Q
zFX7<M12&5~ldJW8$Z@%hpKQke_)#W2J$~m_om#KswEG0sIgB1i-X&5&^iR=&|DH&!
zDx=Tw<Y9uexn7mYej}2hl%)dOj)@i%r)^+LyA7Kv_-MY`;=rebxt9x^fruC(hl=!k
zM*=1G--^8h(T2=7w!cQH4)$k7hRMAJ1C;v<npP;cc=0W)0Fg!RE>nCgY%mo06*>a8
zyoe&Rtbyb`2%wU&uw$_qjqPs~I^?)dq$F<?wsS02ZDO|P0kNja<wiM1-rtK%l5Znn
z3qq~N!X43$rqfKH3tbR9<LCHBU52O@FJ4aL(cQI9?zWLH3WQ@m7)w}~PD~9=6nZmK
z=9CPA_j_nv1(NObQ`zo-mzftS?KLrm#qjw?CVd?Q?+r-MNFvp_3eHG2VRt0Ib5*%e
zggZNvUJWPqKj;xiF7EUn1ZS8{*m0WudkB_?uP!q$knj^%d?U^sB%@v=+y`jK^vc@>
z*JQekx;!+|v9GaNF@XK+<u@QlmzKXjcO!RtT=557BRU{Mw>Ad<+wmTp?*weq;=reY
z-+B=JfpfM0Dr|7y1`U`2O}GtedzDh#3Ne!lRTZD%KV4fPU%5YS?vG*dgu-LXG%wI&
zh%0_u$Z-dm1d=BSUeLz@a9~pQ_r==@nLK7j4UU?NgaO-84PDEx)8o&edlXtse&U1+
zpMa^$>>OXt;rNmuvwc^X3dnW?9ME=oL@s)3Ai0Tw)cwF%*u}WT;V*Zh55Ug5M<n~9
zJ`ip^vpnY3Aj=VQ_?Z~*6A2z@v1~Y)wG#Ri<OPWblCNb1Ub6E~LDpBH`^`Z2>n@Th
zp~sF6s{SHBp1KMvl|9jhec4cxZ1<PnqZ*9ZkEI-1K0=Odwx-U54ONK<dyYahPLdmz
z=@V0P<v{%ktsI>53S$R?yj5Td3}<^RW^@^q>WyeZ)|*91+J3n^B3GW^pr-8UxUm8U
z{vQ!5wbAXz9SQ%n*s92NBEQR-O{m}ZFm58n4f_HAEfqF_HhYuY>eI0GktbjX$N!+J
z`^^bm;Mz!ROTi8&FOmCg?+Y`_W`_sR0f|nYCD2<yxEj7pk}647OY$2KHA!J?iLzhN
z#ggtuPxITxAQDh_iAHuNiK$Pt!DEh(c*`@13}BYt#O*?kgBU6k;3tMkXsvNQhDvN2
zHvPa*nIa01{NZmn9&NxAV*P|f4V90Z?#TI;2l!}*NqmU|-|Z4_a>!Rtp&U3z#(%~k
zUZcf1GUa6K58hZSPujSq#LK)mYQz1AkYg}|d7dxdB{eHedkps`mf{{o0@|itZ$R=0
z`;_(y%4J4B-Cq2`^EedGWweQ@5J&Sk{SG-ozZR9=PeOz1*$8aj2AGc8mZA?XM_m_V
z@EfNFKc7e>`k}D3a>N2M@&)y986zE>Ya-<-!#ckw5M!RHq!Ho$7C{?xCS#sK>wb1$
zK@OYi=C)~#=tL3T&fwMXsTP6todGiPpYUxO>4!#x0c15$L98J8A`(uQ^w)4l&d4t_
zCeOuAZej3qeD8&vGVyL1vJfc-Z(A|UX0g<2HFZWE{GoRL@AsdOz}6%VZWc?kqTeW*
z$n9GE(i9z|=nabAqUb$}u2u8}MRzEAP|-N<KjN3Fr~r5jSGYvc35w>$#QTTBdlfwx
z6TVjAhhqFY6#h=pZari<`Y1X?(W#1h6kVmLS<a_b`j=z)H;Ml>!(#bR(W8p?P;$&r
zbflv9sPO3umnwR<qScB%spu^#{zOIP|I%Tx3{v<4MH3YLM$F<C%U(sdD!NY5S{1KS
z;W9;MDteuwqZJ*fs8dn9qMwP&N*2pLMPF6)kBY8Q^d3da6um*wk&0$0+Dp;zY_fg6
zRJ2*qMnzvx^hrgl6<wz2EsEZt=pR*mYfP%jKTd@YRJ6CE35tH1Aj|QdqK%5apy(5d
z)+oAC(Zz}uDmp>YF^UdUw2z{0)y|0uoBFonMoR@tU{ZUv#F;VFj9)!XhMV}mm$trU
z@)Nd@2|%TkI;jjgewU=JS6cClh;FHVQkJvC^n5D1+!YBA-lT$7CC>EDo9~%dT3)lN
z$93LP&%6c2`HL1UTvYR`npHzey$fa*Id7Tg@f6Q?mUv5NdFCxFbr#KBxQHIJy~R$?
zLZ`QMQE~ag`HSIGzI4INh4X8wY9`!Ug9|O@?>Ddh<NeQ9SG<@sdg88rU)*qPQO_P&
z$#ni^EnHCM^%T$eg|`R=m|xr(B3%pSm6y*eo#V_eT|95m!qNrBrJmu=88=SI95#Uk
zg!ix+GZqxhE1fZ8P-ZN%8G|m*8Z&s%tYMz3CykvksAS5u1=mk-Uq5HioSB!Gjh}PX
z?GtkIXAGX5e?{)1MMX<pv)%Ku^0UScUEuW;&75C6v}pdkIi<z3^JhmCYF1Hcx(D^1
zxp0ZIh^d?<ixw_$PF#A;%v*|Qd7Se~A(ql2&%DLO&c#LZy~P8a&Pj8NoeO2~f}*7}
z0ZN=j&e`)yN{SbuK2FclvT(h<rMH$Yysgw(%!Y8@Hn+IcIjd;?e2PGEHQPcKYNTkV
zfwLAaDjqni%sX)G#K~h9;vMWI9y7}#k27;gCVyGuE*~>!$k3sgmuHSPtMqT=H}4mV
zES7(xJ%%9xTVU{iLlc<Awfygwe_n2O-hWE|LI!0|%l_;)CGyN^{d>6zitOLrZlErP
zd-qLJb?23%6EAemoHu8Lvk+bb%jex*?7WnUJ#aR)a^O5q@dA1lIuUn%(V{uUi=3Xh
zMWuKYI~No!nYX~Zz*)8seH0zW$(RG9hk&UcOxW>!*XLTD^HzA`E<RZG#;TE17I{l&
z2xUS+{|$Z08l)0JsRm{K7^Rx`FFH2z|NmQ`3X4k@qj#2;d&-3jCroy`F-SNI7Zn%H
zo*@DXReK47@!6s9DjWKC+0vR-T}x++_QJS4@cm8SzxCJ~mweN-wc^3NdpFiTKVswW
zBGdmH{aGaY_s5ShSLBWT&+G4!KS_PX+Cuy)6t0Q!U#oDfqWLPmeO<J^ufN19=JBT$
zU8`tC!p&kDdVTk>>R-O@`sU$3fAZY24=)V$_+sc?W*AwXDO_bG2-;(~!Z+81Tf`=M
z2j|JSl3_LemG{<MSCcV5ewB4(&4dXPuI7qXJjPo*H8XkdH2iz(t?&EZ@RO`h%O}6~
z;jix6+urh#Q_bZvO6(M7VMmGRFlDO#;r{>UXnCN#|Ht?LurT1LeNLADPs+3GuhI3f
zKPxfrd5K@G!U|f}{zZn5RURD*o8g%Xk5L{KG2yjJerC8mCLHn{s**;Hr>@f3#Y@8D
zX|FlO9uAexg<j0NCC)`frE`j-2SUYU%JsRAEgSjUM{}R*bNiH^?&_WPyZ?x4{~wuO
z`H#)-sVx&$?C-hUe(1eZtM(1<dhxc|kKzon^N0L~DHK`1?ghmQ7A{)aSwW1Gr3*by
zm?8^j6?uxC^XHX&&Q!qx4}EiX$;x-%s_lQ)&@T5JT5HY9=+T**#e)1EKSO>!bIZ$%
z7tzEtv+TdBCoEIxH=`U@XU(cPV~0<h46pKOGv^o0D~G{TG=D+y{AsymWqC!OqG@gv
zZT_^0ix$pVRJ5Rc+Qg-vxeH4b8Js<B{=Avf%F#+cD=V6HYtfwI@@c|$nuaN8*0f)E
z=gqp6*$l*pJN~MBYm^oH@rjQnkNwk-=a+qP@z=v182d`!Q}tz$_32nI_3sbLbFI>6
zGmbMRyh&j*9P%*j;Yj(PNghdMMUW6>^gk(zo5P|IMIJili!W}SQufNOdG{?UJ~*b~
zs@{j*HS0q9e|&wWMb{_p{a>BD=IPyYdrj~C{V?mSk`<0a!I=82Rrp`EPcnz;upa$S
zw$X;pIvstyv}?*!HShkV_o3$&?E8;l9~AulXOZ<;uf|t%{>b@T^mvQ*`(HJGRH*Ws
z^|5dG-(8=Hs{CesTK|{V$N9W$53@e&6*hJ9f1AFQT_EjEdtXUu>P7y{di^-+jERP~
zv7&xXT-AyC#o}I3p_VQ^BeB&QjpO2?aeRC<PDqHxHd{3A(j^*q?HY{}6QglblElt%
zp;q8xbkwg)D}+ZMNt0s2W3ffNLlWi?mk?!JSmG0U;CLJRVRn9S6*Os1IIeW`j>6WM
z^cG8xF8Iz-yv1ov6rVaynzK|TkW`ijjMZ-i|36Sseo2$kQ6OtKq$RajR?JCN;gTk$
zqXKcs;7@9n?_L!L@zc?u*0WH4Qij*7oFtX`?=iDSd^&f<;GHq}ni#x22J_m0{7HVh
zO$h!)$}OqP|E-8{I^T@I=3-P*8UO8w@Vk{;QW?H4BAm|V7<?cGx5VHuv{<{E#;l~0
zKLkx-;wRfnx`}hs@kxWI_IplYNt4o3(M}Gu7pd7^k43Z>x!@p9@F0KU<uP~wUNVwD
z8Gdt2ct%9{mC7xtEdQvO_~u&J%<n3bJ@W6X(o33@-bZ0clhQ9xSkk2Qfr_`JN$LDw
zp~+6tr1ZfGOPZ8EL}5vj(yvfh(xmijo|1);G%0<&!jdMXXQ}v-CZ%Vna7mNW^Awge
zDg81PU(%%XpD8S9Qu?v}GCxU^(sNaMNt4pQSK*Q-r4LZ)B~41_2YJXr(xmiHReVX4
z(l1frk|Mt1FKJTxD=J*lq;w7l<R@uTIzMPkENN1DKZPYtN_QxJk|w2}rLd&3{h40=
zlFYTJ!q#*8K()`qctL8hBrY~%s`!_adcvMk|M0qq`bpf+gi3#NV(^s__(FV&@_b9C
zm6ZLVS=C3<r1TbrB}MxpUK-+)GCqt^>j0FOl$fXgWG`t_I=|~iEUBzN#$xqbk&YSC
zIuPkenSL;67BDHX8B@{~=|kXuIl@U9o((z_n3VXq%1_dy^pWr%j&M?jqwdyGz@)@o
zRR)qKrLR)yC6(pnw`<M(#~@t}(vvbh{qc3_a5(WqKE!WDIzCn?!X%aaG&30Ob4Z0t
zD#IZQ^;?mS8$Kevq)F-HFm8>-_@zgTUuJ9+3Dcd$QV8XdzZL1V-Ay;C<RA92CPdj6
zmbe%kAA=KOuq_66iNRfCaAFKjO6ciEp|6HKNiqJ-!Q5so0462A4s;?gDe+{`>w!s$
zr-I%9OiGNyFze5ONr|U}P6H+-E&`naOiGNiAnQzEQerfywHTO`_!iK)z@)?rKyL*m
zB`yOk1tujf2mJ*wDKR>*)dNgQ47<a68!##HGSJ(BNr~?Qy%U&}_-@b&U{c~LP#-WU
z@k-EpfJuoBP(Ls!@oLajz@)_YgZ>hjl(+`;0bo*M+|{u@2uw<h+iTW`fJupQHPiYC
zFe&j`&_{tuiD4&L9|tBS{ypfEz@)^_fc^oPlz1KJAAw1Up9TFhFe&k0LDvJ55<d_6
zH(*lYji4_8lM=rI`VufH@vESlfJuq#KwkqUCEfzM8JLu~0rYiXQsV8PjliVDJ3-$7
zCM9kH-33fa{1)h&z@)_cK=%TZ5+4A42bh%jAE3>^q{Q!oz6(rB{2^#7Fe&jTpdSH~
z5(hyK0+SNAgSG*a5`PK$IWQ^lA<(aYNr}G!Jq%1rd<^smFe&j#(Br_Q#NUH{3rtFk
zr}Y#tDKRcl;{@AoB_+l+1#1E@DKU=ktzCghiE(Yj+6|bLSOZN4CM7-#)Q*3o#17E2
zfk}x|L3;y}68{ABTwqe-3qbn-lM-JD+83CVI1Th7U{d0A(0;(A#Fv6*0Fx5`40Hf6
zDRCy~Kwwhh!Jt{dq{Kr&F9#+i&ITO{OiDZwbT}|6F<u3;jshkn9s`;KOiG*&Iu@9e
zSO*;kOiDZ+)CEjRJOR`VOv?G{TF`5NNr~}MbL(}$q{LG|Cj*lb-v~Mtn3VWt(3^ls
ziNkY=bZ9a7oI=E#j`*aEKMS-7n3T8#bT%+4@jTEuz@)_UL2m&jC0+=+0GO0`5oj4O
zDX|x{9GH}N3Fu;AQeu`b2mfojEa!^!g!`mR(xmi*JtdYjDSesM61UXqw30IY3ee@i
zq{R1t`hZD^4bYXqq{P*r0bo+%8kFM!l!uh~A<zdAPRjO5l-XM(mGiY}(;z&34fs5a
z_@qq#gw+!ND7251_!-crfJuqhgRTQ6CB9nmmozE;28AU}O2<4Xev&4oS12rLQaa{2
z@sl(ueYwJtCZ)fCa{UeE+o<Zd3HT-8R~6n2{2K5Ug&Toi2i~smF5owSn-ty){3h@|
zg`0uj0se==t-$XBf2i<5;E#ZV0_V>89JmemON9>ue+B%F!pDJ+0H0L&6!5pekS^LI
zcTPf_CC(b>wBj@M0(S%M3XF{jf$hM_z-KAk8~AMCRDqM`^Z`B>_yUD50`3cpZ%8A3
z(wq$7e!!P1JP>#QaHhhS17`sbQFu7;P~edY=Kzla9;5I$;IY8Ez`1kWz%Jkk3SS3&
z4e%s|rvgs~zER=nz&8OGDm)vw2)IPyTY%>P&sVq%cmePtg%<;t11}LcY0fg>rNGM-
zz8m;1;7Wy80#^awr|@cE1Grk@8sPhZf35I;0RIMfjlyeze+&GC!oLUp9q`iv=gwIN
z{0#746vqGW5ce$b28A~QKM(w}!mk3q0$i)`7T`MItqN}kZUEk)a1-!O;5`cO1AYs5
zKPcPxUC?IW_eFTpoR5H8fj?2<pMib~Ov?BjahCYc<1(#ZgA%iQy%4d!IDvRa5&v72
z-zm`V;_OyZ#>Zw?TnOO_pu|a_UE-bA?(s5Q1MUHwqQdc6^0>2sNg2OSyd^$0KGWJ4
z)Xe{n5&8E+yo(S&UGU4D^HapjKzvfh9}IdKFe!01;txT5QsOJ&KLX*TCjT`N{Kvt6
z48lnnUI6L>CMCWed?tb)De+X$$-tz<(-H4R#3v=D{Hi1P&qll=#4l0tZv~wTyg<nZ
z{|jYYDR8+8Ujpg{CMCb+;CnmxuZZAZ5y7AN;SGWC{BkeSRU*A#$&2BgznJ~Xgcxj#
z!Chi-*BG1_gOg&g#nP2||L()i@)760AB|s*!F?D>{8prYtm-DIoS*Dwu%+U%4izqG
z;$;jvuQNQ2{{s`Dq={*CC+&;Q|2!;)>CgF&&vPTf-+o($Nt%?tUtvjQc|KNtX8QPu
z^yK$JO!$W}xGn;({Yd5~Y0UcB!foBipH<jD-+80UzyHQ)yeS4h7lWUV!LP;OFJth&
z7~CF%*P<uL--`4?{7=SoON#vt=;eL!b}K3M`<I{rU{c}-K&ydCi5~?06)-9BL!iF~
zCMA9Z^kHC9;zvQ(0Fx3w4*D1{De;q_PXLn={{i$VU{c~gf<6sQO8jTgKLL{xuLu1L
zFe&lhK%WC9C4K>P128G^OQ0_TlM-(NeHoaP_%+bK1CtVO2CW4qC4L>W9+;H45p*js
zDe)Vi+ki=lcY*EzCMA9obT=?5@m|n9z@)_QfW8e(O56;(ADEQ*UC<U_QsP$7_kc->
zKLY&#n3VV+=*PgM#BHFT0+SMd4*D4|De+gJUjUO59|r9JCM7-s`ZX{q@o~_jz@)_A
zf}Q{-CI0`~I~(A*uIi2-$&xI|v9-2kTXAAor#5nmrK}YZoRAO6vK0$tdn3ul2`Rg)
z-6!wj{dn13*-|Dn3S+=+_%M(%Vg}MhvTQYqi=dQon?O`b!IaR^&=g$C&~2M3o(?lr
zr)kH95c)gsLt3q^1ZV?k**A0df6jU5oqO)N_kG;A_ucyv_#(Wb@?{lL_EUI8<txDQ
z3Qw7$^2@<Wctzz|;jye5UQzj#;0ky}<vrjkctz#cf~(;bm0u6m!z(I(Ew}+*QTgk^
zjqr-fzaHEKuc-VDpch_I`K{n@z$+@h4SWN<qVjJ9Z-iG=z8TyOuc-VE@FsXg<y*lP
zctz#k4893oQTf}zo$!jv-vPb_UQzir@J@I|<vYN3ctzzq!QJqR%6EZx!z(J^4Zam#
zQTYSlK6pjtdq6+DqVfm9UU)_24}*u`6_tMncrU!7^6v!ihgVcS1P0(0m5+e^@QTVG
z0S$OX<=+KH;T4rnfJfmKl{dj8yrS|M@Bw&5<&S|`ctz!h!Q=3X%D=lp${(!omOTtA
ze*%02{(TPrD0mY7gAV_(3R(8y3SZgBLDl~h_zC!&!=FKK9Q{d0{~Y)P{AV2g^WbOU
zzu@psf#>1B=<r_#pMn31!+#z8D*QJbeir;D{I?w5zOK8!oK=gqvh~Vto0mP(c+t84
z+GyX;ZYbFQeP_Ro#rM0BE%`@fgusd4!MXR^yzj#fe#l0zgKet(YHX{^EI*1(kB-~<
zPdZq<Uu*rBxo!K7?N4!@Imf?g2hTe=<KP7c^|*hIe!gFEC5e88JoX*Z7gh4h-%fqk
zF3--(HjX;ub1xV8tN(yB;9B`e`A8eSwnAq?FZlbtBG>ha_(*?yP~=}p&+zj8bN7lo
zPg-_A|9X*nNd6Csyo1zs02#;T!Rc$zW8d{Ri`;Tb<fF*%?&#~*kpyjbi8Sk{63FzJ
znKp;tl1L(=Bfv3$rGspX&o^(wmJNyIM0l2w=2(Owh+E#IhtZ~Fryl3?g$L+S%)+Zb
z%^f|=rPR@`Iu{qq?o(NQk*v$oyR=wl>CG*Ybz6GGvmlRdOHWQ0%KVmI^<tT&=UF7{
zv-BDk%Pc)FvV05WNNZVfm^#vHh0W{A-0;Za3$kOWcs3D%*EaY)+M=WT;DZlF`n9uV
ze%J9aoegj4YPa$pS|q#A%6p_x)@SRTTqLvgXm8dak>+eXgL%opk8UDg_$~0XLpx5&
z-)A4whRjD#r+LUOm&p?^O-7mxn|~Jm1IpWVOaw#@b75#$kkPs*3$lW?(5f>1?#xp&
zf2=kq94wO{?+u%sFa~+q9HoIY_h&4h#aAY}Zg;ckD72x^Va@4GI=scVLrnycjV{P6
zpHyDjN)aeUpcH{p1WFMoMW7UcQUpp7C`F(Yfl>rY5hz8V6oFC%N)aeUpcH{p1WFMo
zMW7UcQUpp7C`F(Yfl>rsn-M6x$s;#KYvc_>wX*5t3Rzortz6?<;jcAog4HusvlaRB
zxw7>&vOZcXs~Ens+*f5*230Bye5;fd(MqZMa=BR+gg<({97V5xXocK+@)|iXx>EK`
zu9CN$uag}YJkoe^wQRbyhCCXt%wx@RS%V+73}Id7bL?66tEB259lqHk&CzwTb!a`l
zZotltEA2F{l*Z^<dHqnmY&f|NpSLged2KDnt-JEL%5te}DGQca`>zs%<~s7OA@5qb
zo;a=Hi~SY8!YEPx4J%|rv|QE<Et8e+sqs~t%Y#)jPOSb1vC)=OMq0hDTGnN2WL;#r
ztZS{3buFv&pJX5b!yY8+XSf7O$DrtT+dV7|MRIxlV5F+0+`pQEi?yxQQrlBy>ai8*
zK8)Vuq<zk@S?Dj21|0qw=)Wx3ulzR(_VYoVRP18(*~Ti_*jgtW)9dr~=GvgzSHJaR
z*&mP|#Gaz%OHaAKtgcGxC`%n>scTtb>PL7r{p_!<km_`Wxhz=jbNu${0LzuFHL|j$
z%3q`T-5}!H-*8yu)1+zaE0&e>xr`sIua^1<sc*T|lSh4v$1Gb{N$mbq*71dAmFigW
zsFK>2D%17{Us@}rZp{i)<EZ}YW|iZ0weq^wweq@;Zk%h#ub-`-S!1padi+<-r_gR8
z?Zm2Vb+xSS^T_J-%KVDinwjNhRdDmF3R(3+om9525;H3D66ySpMK+UvA%E5Us+URi
zu8O(xJo*jQ(vYnnwzX2*SA)H3nLEmy3k;m8eo^hwVvrZg=Hu-8cQw?yDQG`e)Zf+0
zxv^F@(q=bO4;!h6jnu=&mi2ijKXbF5cgW_5_6-kEUe~`O*}Q*+c{J2Y17&QWEDe;g
zp{0IqO`bY_`8*feRB`*$bEK!B&DrOS?}~CRCHE{B&sfde@_eOP5#%aH``WDjK1UsV
zrB-soBJX>Mu|9mPs&!h(TXE%F(01s?B=>HS4nAbNVChq5#ALAy8wEO7`03W+s&ZMC
zuJvu!^Wk~gMYd9EBUMsKefUT4iL~V<+owX_ZfCWeKQ){`9NR-|-Le<fLvbA4d|h1^
zyQ`&}GStx)>d4zV=kKbLyO7m7WuLe29^=|_u{f_+;tTy;igxq5u0pQs*<h{@uJbL`
zhNw1SNUHVC!Wh5Sbpe^;DEuc$Inr#i$QMCzC)Z^5<eP78I?hDWp+w}CR;B@rh<~^%
z8OsLT={Kj#)QY{G=UUkIX?x6<E8D(;{W+1j`KuCjhinAd&Vpk<&d19akogN_UqF^e
zHd-LNh-@2qG!)1#Bb!8Tsjal$fL%wW^#<%Xl-3)t<M69qZ{Qj(Chh4XfrCk%eY9`T
zWUBl6a{sW-VsE4ao$*vC%M)Iif7l`e?U~)FLFUvJxxCCj9N2AydyE135>}WXHE<vt
z9!#eV2G;JMKcbU)=-p$ktqO3^{#4TF&87##SiZIbL08hU@;S#!N8AXdr3njrhQfi~
z;S4j_TGOw}MMtC8$YOxW=jAtDt!&Ed${J~AxKAbJ+Y5S{nKU|DJq4=?i0p8reat9s
z+y5G6I+%=wQxPMeemj}SKREW@o@SobgUK|ryy{H+vUMR+-6_2JBP=RGm=rA8gvywe
zHgLC*#lwWikCE(4MF!(Wpo8hQvj(GNuOsKas7}7!n?<6^3}d#VN@?v`{G}I3hdQ_4
zIKC%gW{2ewJg|2L_F0qQO0Qa?M3_h#=~x(5xt|<6nS9ndP#%TrN|;P48?eE4Mouo+
z>NVm68cu0fYw1wf*hTXQ>}6p9kxx}ni(1_sQ5HC$jto<y&KTiG3oT^Zv)Qy8!Pvr{
zyE|xYYV)XM@#OBVZo9}m2KAo7ewXH{Em4ap>nZcDL0yv|&>4%zyOIMbc~K)n+t3_h
z{jAeqSD)O9J(VxaD$j^$^;Vm>Ka|cGmfiMb<ZZ_A@l-mJ3G^~Id>{Q5C)S;*bV6JF
z6Y#rZ;cP%dDf{L<vRFCn#<(lfJJ_F975R!|p~uK%S@l5vvr^moZo{sXpW>NQ=Txm)
z7Ktpxq%&<8(vF3WkeShjfs#MPq777m+<RpS9Jl}2_1egWqjG4$UI)2$I*Vh_JSaZ3
z^I3c-Xr1!T0)0K@-fo1BayFy+e#+;b2kn`IeVsc+CiQ4-$66_JpBsT5nk>(!4=mWi
z^}?Ee|H7W0SUB3j!WyBpu`89LS8>{nvSstDu77&!9ZJO_{w$^MadUFobGuIUEVpW`
z_8H8i9I+gF+KI&els!YV$WyMh_RPGk`xboB2svlfO&q&xIBR4I&Uu=MwhT#QWxj>(
zNoEGA**>jV=cH4|yVMt_-t6}AR`uCx!G3M#vKlMeF74CJZmiEZ9{ahDWo4~(W$emy
z#*!@W@t|JX<QcaF)};t<0$DMw$)j(+wK)<eet%QZlZ}&g7Pp+`HRO9L>rSPP4w`NY
z*ptkrsiS|=J%?iH>|iK<Fv%k&*XfzcgGts+h{Rb>#66KY&8&9!a3xosC3kzvspt;9
z+$XclZ5=lFNZ|3xJ|mr=2wB4w$p@BoWgI4z?&NZZuYQ(i;V|i`xM$Ev5BrVufSz~B
zFs0xUO{3p@rS+}|*V~0W?W-k1`FEAtw~0R1ifJc^aYHHDTDIH1QrpVivHo-@O<l$L
z2qU#jAEIL#ZKGEk_Fh`DycchG8~uYvSW&|D@@;#1_V4Mwxy6q8?UFDOVUu{?sZ@pw
zHD7K70v&;jX@p|~v2ef+C~bEu@=P`o&?`hB#+54&;p&i14F`0da*;zFJqP#qb?w`;
zqt$0y|B?*EQ>nB(Bw-^)4GR`RVXoA%RDVF1SmAR$%VsRaDh-K5Dj7I7U}_T@;Jc3j
zy^P4UlAwf~Bz&>X9mOJV5%SkXbJ7WU^8y_(<Sq$hjA&rMs`P+1s|L{pAO>PbsHs4b
zB_2}811zy|jO8NjBkiEMyI4uWT>?kHk+Sk}G`RN;(NVFMt)ZVT0b|Gr>#{nFcC05q
z)e{Msv8F^udpk{(xrr-rv?;!$X-AV2oud78fOK)7LX{#7f*(u0%>JkmG7EPv)qVMY
zC%2zIU%6O&iq^HzKCEu0=jz`rE@jc@*fH1G-`g!KQfznj&u+T<e%BBGjaT1aWbO)f
zaCEV#W$f>pKJiA8Z~m6|TLaJ8Wl{}|3vW!=`0&ZzEVejaN8+je5LXd<2^A5S4@8EO
zdYU#DEDWV)=H-Eoc*?qq&`0O?wjj4|Zy_EX<lK`}5(+b8JNFtMjnjs%E@iE3lQyiS
znZkxQtk+5Grk#f-9>iZ(&-r3YxKdh{lxKk)>j}df%6LObT_Vchs%ICPoZKu#@7Gpl
z>~fDU+^<LQwW5*c))lZWH&!t$L$1EN-(9JJVp>}u$+gYCsWk_<bz6^j?3r6_w=Yl9
zu5VkLUWX%hdG8uqZzi!=Z>J-7_52Q>E7FS=>1}s-vq;ai^NV86@jK|0N8g|8HtEPO
zIOr?dKU-8@`=~2_PZh<f%{k7RNE3CBb4YPJ-dm*Cu!P=~IbK~-a5JEdvb+$>r)9jE
zXlgJX@%9_ukhMa`cJC1u?P%PZX>_d=`{l|HJN|iHfrE3$7T0&t?pKf9?be)~#msTr
zkD!A;YsUGtKi~CpDZAcRIS$rRPxdlyi9r_Svle{v4up8Zis(vvF{>YGUpKGj<4Nay
za@&=w)Aql7ez|s+!dW-o6^rX{DgCA7rfbJh$Q4~bmO9Rr&la7huKd+PV~B@RYh^@z
z26finb5FnK7N5^K1(!PiPCDlw9YAYy^+q0eZJdApwC%@Skv~4i-^l~ce)Cu4uj9ur
z3SCKW<Nlt$Kqx{lFW@Y=7RaWYduHQyZzElbaJo?!!qY|EoQ&LhD$dt*>XW~i7Hz)4
zUr~IR@TG8@Uc!&Pg-ZX2=@U-;-6Hsl_yD~sUeg71KT|(QEXit}v541wG;-Fv3iFud
zrE`_tcB7Z&PY3L`23(Sl=u1E6@ylfCD1B3V;J^X;j0<*oryH?9=x5*N+w^hOdTXkA
z8tzMgj5jfu$$BHaLOf>Ji;3>y?cDrJ0##=FdG+_FydiSK<ouC(8%g6dD`(j@^|!OV
zuf6*=Z^C*_%9BLGwC*@OsmAHW#yLD0iG`Bey}bRy$*gmcn_s)s-ks#7gqMXzosQzm
z+MQpa&@$|L5@_$~X}>qn?(cF>F!!n1)v)mjN00Dol=$g^$5<MS_oFO#x^>9%Hyh<-
zb)_uJX5D1fzG}RJy<NYUy~X9wCDZ2LSvfZaJKq95RT`k15z##ha$B$9xgfCcU=SG8
zF1xK!`h6`&;1nH#*c#@SxlBJ4e5{`L(a<^Y>^hN$pr^sV*u=O4X!_;yhp%T$fy%)@
z_cCva%E3K1iu@3ozPbF{cKRCsMai33D}n#AebD-!_LiGO_Cou>@4gvb=;>R@_brS;
zg3f_m9pnY=2X}Dsn}nVLzfZaZod<o~xK8}Y#!ki({Sf(H@L7@%x#CZHumjECC2|Mp
zY3MdEOUgqlcJ`qQy%+ow(sR&HgHh)3xdg5FC`t302EXCZ&w`bQY<V?!yF)A9<IsKJ
zLk_L@5r=*Ze1UX5V{;U@yxq2=xYMB(A0erqC&0SHHthi?NiQH*eAbaG{)a<X-%FA2
zBM#W_15c6KpcPq9+@fc}hZtYgjeHJ#ILy2_ssnzTGyMc~9{lYA{_|Hk*vuJy8oCXP
zlBS^x#}(;#qaVGCJdi8a@y7dkXfOCLq!*wI#|-IsqCBadF*}Mo`A@7Bnm&Q(|6_Zh
z>Ca15lC}U%pICZHhoR{|%L!5tnm(`m87T_QbG3A&@C(`xevR~y%E9F(?E%^Y9wLoG
z2f^n_Iq1SML^_se4`~Lu;sd1Tpn2YvJn53^gSYekVh-8_&u2tlgq{JXvy_W*H#1=4
zpvZP;o~<NDYK7+cMpho<cxd`=@)W5Xn*OYGAE(}+=}XEK>3(SXm$GSya}(MJ8l)`r
zEcn<kWrQvq52WLShTe?~x#Gu3=b;P7`{>x9$4Sp2SNto|C1}M}4^tLsUH$cG(lsxU
z7kK9g?H1Y(y5oKnKlTV^M6UP=Qa5xC{Jr<!KlC*CHIfNE3l5&ZFKFgllgRt1OO=B^
zBs~T#?-zL!>66fHpg}qfZGu}*iaZJJ1HVq1fu03_LYjs4{0^}pU4$M1pCaW|2VC{L
zlo8qs{xxY1IuGu7lstb*xxjCdJkZPyCmkO^SLNVGNZX-v;15VUp?x0`nIP?ko(3yF
zOxaZ*{5#TNXwNB;HKZWz-v{cL$|$s=j+xx{BO7(Rq|%By)=_Ci9hdldXvGVpdX9Z{
zHha#f2Ew-3zD^&G?^lu6RB)cMPv?93$l`-6?)JNe5^?t#=Z?l5&Avt++4y6ff41&u
z<R8VRosC|7I}{1U`7_{-#$hAVc-NgPYi|#+;%cHlKJ3LnGIK}cU^;nQCLA>qp-fXE
z7EY%!sex=0PlUIHGKuD6I~rN<I~g0`f0aY_bC#W(n!3rg1#=Z2f2~D1+A9GV9nXzV
zj!%!DAHOheo*p^vpA1fVro2<zrgl!XP5GySQ_(4N%Kv2WN$)w|xwdoubHQ`wxsh|H
z&gITcpUZz{?lTyf=3D_J=gFNK%Z&xc&2iu9w$n1<nV6lpI3crE8{&kiM$$0ho!B;U
zaw0b|IpI6A_l*C{;WNQA(KB+k`mE<{!&&Sw519#U%eCdq+)(aRZZtQYJD<yqO^%%(
zyD&C8mLHoNyF6As-Z1VZuc7gi<EO?)DaY)1e!Tkh<ms8ydnf!8BNG=UqGv|V%$?bJ
mcJJAtv)d-yCNE6RPF|Xvo4ou7o+n117@9gcrH=plZT}4k#&Grk

literal 0
HcmV?d00001

diff --git a/MiscLibs/run_iqr.py b/MiscLibs/run_iqr.py
new file mode 100644
index 0000000..541d9a8
--- /dev/null
+++ b/MiscLibs/run_iqr.py
@@ -0,0 +1,102 @@
+import numpy as np
+from numba.pycc import CC
+from numba import njit
+
+cc = CC('run_iqr')
+
+@cc.export('run_iqr', 'f8[:](i4, f8[::1])')
+def run_iqr(half_width, data):
+    """Computes a running Innerquartile Range
+    The routine accepts a column vector as input.  "halfWidth" number of data
+    points for computing the Innerquartile Range are selected before and
+    after the target data point, but no including the target data point.
+    Near the ends of the series the number of points before or after are reduced.
+    Nan in the data are counted as points.  The IQR is computed on the selected
+    subset of points.  The process occurs for each point in the provided column vector.
+    A column vector with the computed IQR at each point is returned.
+
+    Parameters
+    ----------
+    half_width: int
+        Number of ensembles before and after current ensemble which are used to compute the IQR
+    data: np.array(float)
+        Data for which the IQR is computed
+    """
+    npts = len(data)
+    half_width = int(half_width)
+
+    if npts < 20:
+        half_width = int(np.floor(npts / 2))
+
+    iqr_array = []
+
+    # Compute IQR for each point
+    for n in range(npts):
+
+        # Sample selection for 1st point
+        if n == 0:
+            sample = data[1:1 + half_width]
+
+        # Sample selection a end of data set
+        elif n + half_width > npts:
+            sample = np.hstack((data[n - half_width - 1:n - 1], data[n:npts]))
+
+        # Sample selection at beginning of data set
+        elif half_width >= n + 1:
+            sample = np.hstack((data[0:n], data[n + 1:n + half_width + 1]))
+
+        # Sample selection in body of data set
+        else:
+            sample = np.hstack((data[n - half_width:n], data[n + 1:n + half_width + 1]))
+
+        iqr_array.append(iqr(sample))
+
+    return np.array(iqr_array)
+
+@njit
+@cc.export('iqr', 'f8(f8[::1])')
+def iqr(data_1d):
+    """This function computes the iqr consistent with Matlab
+
+    Parameters
+    ----------
+    data: np.ndarray
+        Data for which the statistic is required
+
+    Returns
+    -------
+    sp_iqr: float
+        Inner quartile range
+
+    """
+
+    # Remove nan elements
+    idx = np.where(np.logical_not(np.isnan(data_1d)))[0]
+    data_1d = data_1d[idx]
+    if len(data_1d) < 2:
+        sp_iqr = np.nan
+    else:
+        # Compute statistics
+        q25 = compute_quantile(data_1d, 0.25)
+        q75 = compute_quantile(data_1d, 0.75)
+        sp_iqr = q75 - q25
+
+    return sp_iqr
+
+@njit
+@cc.export('compute_quantile', 'f8(f8[::1], f8)')
+def compute_quantile(data_1d, q):
+
+    sorted_data = np.sort(data_1d)
+    n_samples = len(sorted_data)
+    sample_idx = q * (n_samples) - 0.5
+    x1 = int(np.floor(sample_idx))
+    x2 = int(np.ceil(sample_idx))
+    if x1 != x2:
+        result = (sample_idx - x1) * (sorted_data[x2] - sorted_data[x1]) + sorted_data[x1]
+    else:
+        result = sorted_data[x1]
+    return result
+
+if __name__ == '__main__':
+    cc.compile()
\ No newline at end of file
diff --git a/main.py b/main.py
new file mode 100644
index 0000000..58140bd
--- /dev/null
+++ b/main.py
@@ -0,0 +1,353 @@
+import os
+import pandas as pd
+import open_functions as of
+# from Classes.Oursin import Oursin
+from Classes.Measurement import Measurement
+import tkinter as tk
+from tkinter import ttk
+from threading import Thread
+import datetime
+
+
+class App:
+    @property
+    def __init__(self):
+        self.choice_list = []
+        self.root = tk.Tk()
+        self.root.geometry('250x400')
+        self.root.resizable(0, 0)
+        self.root.title('MAQIE')
+        self.root.wm_iconbitmap(r'ressources/10.ico')
+        # self.root.wm_attributes("-topmost")
+        self.root.bind("<Return>", lambda event: Thread(target=self.run).start())
+
+        self.choice1 = tk.BooleanVar(value=True)
+        self.choice2 = tk.BooleanVar(value=True)
+        self.choice3 = tk.BooleanVar(value=True)
+        default_text3 = "Code station HYDRO"
+        self.text3 = tk.StringVar(value=default_text3)
+        self.nav_menu4 = tk.StringVar(self.root)
+        self.extrap_menu5 = tk.StringVar(self.root)
+        self.choice5 = tk.BooleanVar(value=False)
+        self.text5 = tk.StringVar(value="0.1667")
+
+        self.choice6 = tk.BooleanVar(value=False)
+
+        # .dat data with entry box for Bareme station's name
+        def some_callback(event):
+            print(self.text3.get())
+            if self.text3.get() == default_text3:
+                self.Entry_3.delete(0, "end")
+            return None
+
+        # Entry text
+        self.Entry_3 = tk.Entry(self.root, textvariable=self.text3)
+        # self.Entry_3.insert(0, self.text3.get())
+        self.Entry_3.bind("<Button-1>", some_callback)
+        # Create checkbutton and connect it to entry
+        self.Checkbutton_3 = tk.Checkbutton(self.root, text='Save as .dat (Barème)', variable=self.choice3,
+                                            command=lambda e=self.Entry_3, v=self.choice3: self.naccheck(e, v))
+        # Position checkbutton and entrybox
+        self.Checkbutton_3.pack(side=tk.TOP, anchor=tk.W, ipadx=15)
+        self.Entry_3.pack(anchor=tk.W, padx=35, ipadx=35)
+
+        # .csv data
+        self.Checkbutton_1 = tk.Checkbutton(self.root, text='Save as .csv (BaRatinAGE)', variable=self.choice1)
+        self.Checkbutton_1.pack(side=tk.TOP, anchor=tk.W, ipadx=15)
+        # .BAT data
+        self.Checkbutton_2 = tk.Checkbutton(self.root, text='Save as .BAD (BaRatinAGE)', variable=self.choice2)
+        self.Checkbutton_2.pack(side=tk.TOP, anchor=tk.W, ipadx=15)
+
+        # Chose navigation reference
+        self.label4 = tk.Label(self.root, text="Navigation reference")
+        self.label4.pack(side=tk.TOP, anchor=tk.W, padx=15)
+        self.nav_menu4.set("Default")  # default value
+        self.Option_nav4 = tk.OptionMenu(self.root, self.nav_menu4, "Default", "Bottom track")
+        self.Option_nav4.pack(anchor=tk.W, padx=35, ipadx=35)
+
+        # Chose extrap
+        self.label5 = tk.Label(self.root, text="Extrapolation law")
+        self.label5.pack(side=tk.TOP, anchor=tk.W, padx=15)
+
+        self.extrap_menu5.set("Default")  # default value
+        self.Entry_5 = tk.Entry(self.root, textvariable=self.text5)
+        self.Checkbutton_5 = tk.Checkbutton(self.root, text='Personnalize exp.', variable=self.choice5,
+                                            command=lambda e=self.Entry_5, v=self.choice5: self.naccheck(e, v))
+
+        self.Option_extrap5 = tk.OptionMenu(self.root, self.extrap_menu5, "Default", "Power", "CNS", "3-points",
+                                            command=lambda e=self.Checkbutton_5, v=self.extrap_menu5: self.nanmenu(e,
+                                                                                                                   v))
+        self.Option_extrap5.pack(anchor=tk.W, padx=35, ipadx=35)
+
+        self.Checkbutton_5.pack(side=tk.TOP, anchor=tk.W, ipadx=15)
+        self.Entry_5.pack(anchor=tk.W, padx=35, ipadx=35)
+        # Lock by default
+        self.Checkbutton_5["state"] = "disabled"
+        self.Entry_5.configure(state='disabled')
+
+        # No moving-bed detected
+        self.Checkbutton_6 = tk.Checkbutton(self.root, text='No moving-bed detected', variable=self.choice6)
+        self.Checkbutton_6.pack(side=tk.TOP, anchor=tk.W, ipadx=15)
+
+        # Progress bar
+        self.pb = ttk.Progressbar(self.root, orient='horizontal', length=100)
+        self.pb.pack(side=tk.TOP, anchor=tk.W, padx=10, ipadx=200)
+        self.value_label = ttk.Label(self.root, text=self.update_progress_label())
+        self.value_label.pack()
+
+        # Run Button
+        self.button = tk.Button(self.root, text='Run [Return]', command=lambda: Thread(target=self.run).start(),
+                                padx=50, pady=10, height=1, width=12)
+        self.button.pack()
+
+        # Close button
+        self.close_button = tk.Button(self.root, text='Close', command=self.close, padx=50, pady=10,
+                                      height=1, width=12)
+        self.close_button.pack()
+
+        self.root.protocol('WM_DELETE_WINDOW', self.close)
+        self.root.mainloop()
+
+    def close(self):
+        self.root.destroy()
+
+    def progress(self, value):
+        self.pb['value'] = value
+        self.value_label['text'] = self.update_progress_label()
+
+    def update_progress_label(self, run=True):
+        if run:
+            return f"Current Progress: {self.pb['value']}%"
+        else:
+            return f"Extract over"
+
+    def naccheck(self, entry, var):
+        # if self.choice3.get() == 0:
+        if var.get() == 0:
+            entry.configure(state='disabled')
+            # self.Entry_3.configure(state='disabled')
+        else:
+            entry.configure(state='normal')
+            # self.Entry_3.configure(state='normal')
+
+    def nanmenu(self, entry, var):
+        if var.get() == "Default":
+            self.Checkbutton_5["state"] = "disabled"
+            self.Entry_5.configure(state='disabled')
+        else:
+            self.Checkbutton_5["state"] = "normal"
+            self.naccheck(self.Entry_5, self.choice5)
+            # self.Entry_5.configure(state='normal')
+
+    def switch_all(self, way=True):
+        # Able/Disable button
+        if way:
+            self.Checkbutton_3["state"] = "disabled"
+            self.Entry_3.configure(state='disabled')
+            self.Checkbutton_1["state"] = "disabled"
+            self.Checkbutton_2["state"] = "disabled"
+            self.Option_nav4["state"] = "disabled"
+            self.Option_extrap5["state"] = "disabled"
+            self.Checkbutton_5["state"] = "disabled"
+            self.Entry_5.configure(state='disabled')
+            self.Checkbutton_6["state"] = "disabled"
+            self.button["state"] = "disabled"
+            self.close_button["state"] = "disabled"
+        else:
+            self.Checkbutton_3["state"] = "normal"
+            self.naccheck(self.Entry_3, self.choice3)
+            self.Checkbutton_1["state"] = "normal"
+            self.Checkbutton_2["state"] = "normal"
+            self.Option_nav4["state"] = "normal"
+            self.Option_extrap5["state"] = "normal"
+            self.nanmenu(self.Checkbutton_5, self.extrap_menu5)
+            self.Checkbutton_6["state"] = "normal"
+            self.button["state"] = "normal"
+            self.close_button["state"] = "normal"
+
+    def run(self):
+        self.switch_all(way=True)
+
+        nav_ref = None
+        fit_method = 'Automatic'
+        top = None
+        bot = None
+        exponent = None
+
+        save_as_dat = self.choice3.get()
+        code_station = self.text3.get()
+        save_as_csv = self.choice1.get()
+        save_as_bad = self.choice2.get()
+        input_nav = self.nav_menu4.get()
+        input_extrap_type = self.extrap_menu5.get()
+        input_extrap_mode = self.choice5.get()
+        input_extrap_exp = self.text5.get()
+        no_bt = self.choice6.get()
+
+        if input_nav == 'Bottom track':
+            nav_ref = 'BT'
+        if input_extrap_type != 'Default':
+            if input_extrap_type == 'Power':
+                top = 'Power'
+                bot = 'Power'
+                fit_method = 'Manual'
+            elif input_extrap_type == 'CNS':
+                top = 'Constant'
+                bot = 'No Slip'
+                fit_method = 'Manual'
+            elif input_extrap_type == '3-points':
+                top = '3-Point'
+                bot = 'No Slip'
+                fit_method = 'Manual'
+
+        if input_extrap_mode:
+            exponent = input_extrap_exp
+
+        # Check if Code station is correct
+        if save_as_dat and code_station in ['Code station HYDRO', '']:
+            tk.messagebox.showerror(title='Invalid code station', message='Please enter a valid station code.')
+            self.switch_all(way=False)
+            return
+
+        # Check if exponent is correct
+        if exponent is not None:
+            exponent = (exponent.replace(',', '.'))
+            try:
+                exponent = float(exponent)
+            except:
+                tk.messagebox.showerror(title='Invalid exponent', message='Please enter a valid exponent.')
+                self.switch_all(way=False)
+                return
+            if exponent > 1 or exponent < 0:
+                tk.messagebox.showerror(title='Invalid exponent', message='Exponent value is wrong, please enter an \n '
+                                                                          'exponent value between 0 and 1.')
+                self.switch_all(way=False)
+                return
+
+        try:
+            with open(os.getcwd() + '\\path_file.txt') as f:
+                path = f.readlines()
+            if len(path) == 0:
+                path = [None]
+            path_folder, path_meas, type_meas, name_meas = of.select_directory(path[0])
+
+            self.pb['value'] = 0
+            self.pb.update_idletasks()
+            self.value_label['text'] = self.update_progress_label()
+
+            with open(os.getcwd() + '\\path_file.txt', 'w') as f:
+                f.write('\\'.join(path_folder.split('\\')[:-1]))
+
+
+            if save_as_csv or save_as_bad or save_as_dat:
+                empty_list = []
+                uh_list = []
+                q_list = []
+                uq_list = []
+                date_list = []
+                date_day_list = []
+                date_hour_start = []
+                date_hour_end = []
+
+                width_list = []
+                area_list = []
+
+                for id_meas in range(len(path_meas)):
+                    self.pb['value'] = 100 * (id_meas + 1) / len(path_meas)
+                    self.pb.update_idletasks()
+                    self.value_label['text'] = self.update_progress_label()
+
+                    # Open measurement
+                    meas, checked_transect, navigation_reference = of.open_measurement(path_meas[id_meas],
+                                                                                       type_meas[id_meas],
+                                                                                       use_weighted=True,
+                                                                                       navigation_reference=nav_ref,
+                                                                                       run_oursin=True)
+                    # Change extrapolation method
+                    if fit_method == 'Manual':
+                        meas.extrap_fit.change_fit_method(meas.transects, 'Manual', len(meas.transects), top, bot,
+                                                          exponent)
+                        print(f'Top : {top}, bot : {bot}, exp : {exponent}')
+                        settings = meas.current_settings()
+                        meas.apply_settings(settings)
+                    # No moving-bed obeserved
+                    if no_bt and meas.current_settings()['NavRef'] == 'bt_vel':
+                        meas.observed_no_moving_bed = True
+                        meas.oursin.compute_oursin(meas)
+
+                    # Date data
+                    date_value = meas.transects[checked_transect[0]].date_time.start_serial_time
+                    date_list.append(datetime.datetime.fromtimestamp(date_value).strftime('%Y-%m-%d %H:%M'))
+                    date_day_list.append(datetime.datetime.fromtimestamp(date_value).strftime('%Y%m%d'))
+                    date_hour_start.append(datetime.datetime.fromtimestamp(date_value).strftime('%H:%M'))
+
+                    date_hour_end.append(datetime.datetime.fromtimestamp(meas.transects[checked_transect[-1]].date_time.
+                                                                         end_serial_time).strftime('%H:%M'))
+
+                    # Cross-section width and area
+                    trans_prop = Measurement.compute_measurement_properties(meas)
+                    n_transects = len(meas.transects)
+                    width_list.append(trans_prop['width'][n_transects])
+                    area_list.append(trans_prop['area'][n_transects])
+
+                    empty_list.append('')
+                    uh_list.append(0)
+                    q_list.append(meas.mean_discharges(meas)['total_mean'])
+                    uq_list.append(meas.oursin.u_measurement['total_95'][0])
+
+                # Name of the parent folder
+                if save_as_dat:
+                    name_folder = code_station
+                else:
+                    name_folder = path_folder.split('\\')[-1]
+
+                if save_as_csv:
+                    df_csv = pd.DataFrame({'H': empty_list, 'uH': uh_list, 'Q': q_list, 'uQ': uq_list, 'Active': 1,
+                                           'date': date_list})
+                    df_csv.to_csv(path_folder + '\\' + name_folder + '.csv', sep=';', index=False)
+
+                if save_as_bad:
+                    bad_uq = []
+                    for i in range(len(uq_list)):
+                        bad_uq.append(q_list[i] * uq_list[i] / 200)
+
+                    df_bad = pd.DataFrame(
+                        {'H': empty_list, 'uH': uh_list, 'Q': q_list, 'uQ': bad_uq, 'date': date_list})
+                    df_bad.to_csv(path_folder + '\\' + name_folder + '.BAD', sep=' ', index=False)
+
+                if save_as_dat:
+                    print('save as dat')
+                    length = len(q_list)
+                    df = pd.DataFrame(
+                        {'C': ['C'] * length, 'JGG': ['JGG'] * length, 'station_name': [code_station] * length,
+                         'date': date_day_list, 'date_start': date_hour_start, 'date_end': date_hour_end,
+                         'cote': [-999] * length, 'cote_start': [''] * length, 'cote_end': [''] * length,
+                         'code_station': [''] * length, 'debit': q_list, 'incertitude': uq_list,
+                         'distance_station': [''] * length, 'sect_mouillee': area_list,
+                         'peri_mouille': [''] * length, 'larg_miroir': width_list,
+                         'vitesse_moyenne': [''] * length, 'vitesse_maxi': [''] * length,
+                         'deniv': [''] * length, 'commentaire': [''] * length, 'mode': ['PC'] * length
+                         })
+
+                    df = df.to_csv(header=None, index=False, sep=';').strip('\r\n').split('\n')
+                    df_str = [i.strip('\r') for i in df]
+                    df_bareme = '\n'.join(df_str)
+                    print(path_folder + '\\' + name_folder + '.dat')
+                    with open(path_folder + '\\' + name_folder + '.dat', "w") as myfile:
+                        myfile.write("DEC;  6 13 \nDEB;BA-HYDRO;;;;;;;\n")
+
+                    with open(path_folder + '\\' + name_folder + '.dat', "a") as myfile:
+                        myfile.write(df_bareme)
+                        myfile.write('\nFIN;BA-HYDRO;33;')
+                else:
+                    print('we dont save as dat')
+
+            self.value_label['text'] = self.update_progress_label(run=False)
+            self.switch_all(way=False)
+
+        except:
+            self.switch_all(way=False)
+
+
+if __name__ == '__main__':
+    App()
diff --git a/open_functions.py b/open_functions.py
new file mode 100644
index 0000000..f9d23cf
--- /dev/null
+++ b/open_functions.py
@@ -0,0 +1,189 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Fri Feb 18 14:28:25 2022
+
+@author: blais
+"""
+# ========================================
+# External imports
+# ========================================
+import os
+import glob
+import tkinter as tk
+import tkinter.filedialog
+import numpy as np
+import scipy.io as sio
+import warnings
+import sys
+
+# ========================================
+# Internal imports
+# ========================================
+from Classes.Measurement import Measurement
+
+
+# =============================================================================
+# Functions
+# =============================================================================
+def select_file(path_window=None):
+    if not path_window or not os.path.isfile(path_window):
+        path_window = os.getcwd()
+    # Open a window to select a measurement
+    root = tk.Tk()
+    root.withdraw()
+    root.attributes("-topmost", True)
+    path_meas = tk.filedialog.askopenfilenames(parent=root, initialdir=path_window)
+    if path_meas:
+        keys = [elem.split('.')[-1].split('.')[0] for elem in path_meas]
+        name_folders = [path_meas[0].split('/')[-2].split('.')[0]][0]
+
+        if 'mmt' in keys:
+            type_meas = 'TRDI'
+            fileName = path_meas[0]
+        elif 'mat' in keys:
+            type_meas = 'SonTek'
+            ind_meas = [i for i, s in enumerate(keys) if "mat" in s]
+            fileName = [(path_meas[x]) for x in ind_meas]
+
+        return fileName, type_meas, name_folders
+    else:
+        warnings.warn('No file selected')
+
+
+def select_directory(path_window=None):
+    if not path_window or not os.path.isdir(path_window):
+        path_window = os.getcwd()
+    # Open a window to select a folder which contains measurements
+    root = tk.Tk()
+    root.withdraw()
+    root.attributes("-topmost", True)
+    path_folder = tk.filedialog.askdirectory(parent=root, initialdir=path_window,
+                                             title='Select folder')
+
+    if path_folder:
+        # ADCP folders path
+        path_folder = '\\'.join(path_folder.split('/'))
+        path_folders = np.array(glob.glob(path_folder + "/*"))
+        # Load their name
+        name_folders = np.array([os.path.basename((x)) for x in path_folders])
+        # Exclude files
+        excluded_folders = [s.find('.') == -1 for s in name_folders]
+        path_folders = path_folders[excluded_folders]
+        name_folders = name_folders[excluded_folders]
+
+        # Open measurement
+        type_meas = list()
+        path_meas = list()
+        name_meas = list()
+        for id_meas in range(len(path_folders)):
+            list_files = os.listdir(path_folders[id_meas])
+            exte_files = list([i.split('.', 1)[-1] for i in list_files])
+            if 'mmt' in exte_files or 'mat' in exte_files:
+                if 'mat' in exte_files:
+                    loca_meas = [i for i, s in enumerate(exte_files) if "mat" in s]  # transects index
+                    fileNameRaw = [(list_files[x]) for x in loca_meas]
+                    qrev_data = False
+                    for name in fileNameRaw:
+                        path = os.path.join(path_folders[id_meas], name)
+                        mat_data = sio.loadmat(path, struct_as_record=False, squeeze_me=True)
+                        if 'version' in mat_data:
+                            type_meas.append('QRev')
+                            name_meas.append(name_folders[id_meas])
+                            path_meas.append(mat_data)
+                            qrev_data = True
+                            print('QRev detected')
+                            break
+                    if not qrev_data:
+                        type_meas.append('SonTek')
+                        fileName = [s for i, s in enumerate(fileNameRaw)
+                                    if "QRev.mat" not in s]
+                        name_meas.append(name_folders[id_meas])
+                        path_meas.append(
+                            [os.path.join(path_folders[id_meas], fileName[x]) for x in
+                             range(len(fileName))])
+
+                elif 'mmt' in exte_files:
+                    type_meas.append('TRDI')
+                    loca_meas = exte_files.index("mmt")  # index of the measurement file
+                    fileName = list_files[loca_meas]
+                    path_meas.append(os.path.join(path_folders[id_meas], fileName))
+                    name_meas.append(name_folders[id_meas])
+        return path_folder, path_meas, type_meas, name_meas
+    else:
+        warnings.warn('No folder selected - end')
+        return None, None, None
+
+
+def open_measurement(path_meas, type_meas, apply_settings=False, navigation_reference=None,
+                     checked_transect=None, extrap_velocity=False, run_oursin=False, use_weighted=True,
+                     use_measurement_thresholds=False):
+    # Open measurement
+    meas = Measurement(in_file=path_meas, source=type_meas, proc_type='QRev', run_oursin=run_oursin,
+                       use_weighted=use_weighted, use_measurement_thresholds=use_measurement_thresholds)
+
+    if apply_settings:
+        if navigation_reference == 'GPS':
+            if not meas.transects[meas.checked_transect_idx[0]].gps:
+                print('No GPS available : switch to BT')
+                navigation_reference = 'BT'
+        meas, checked_transect, navigation_reference = new_settings(meas,
+                                                                    navigation_reference,
+                                                                    checked_transect,
+                                                                    extrap_velocity)
+    else:
+        if meas.current_settings()['NavRef'] == 'bt_vel':
+            navigation_reference = 'BT'
+        elif meas.current_settings()['NavRef'] == 'gga_vel':
+            navigation_reference = 'GPS'
+        checked_transect = meas.checked_transect_idx
+
+    return meas, checked_transect, navigation_reference
+
+
+def new_settings(meas, navigation_reference_user=None, checked_transect_user=None, extrap_velocity=False):
+    # Apply settings
+    settings = meas.current_settings()
+
+    settings_change = False
+
+    # Default Navigation reference
+    if navigation_reference_user == None:
+        if meas.current_settings()['NavRef'] == 'bt_vel':
+            navigation_reference = 'BT'
+        elif meas.current_settings()['NavRef'] == 'gga_vel' or meas.current_settings()['NavRef'] == 'vtg_vel':
+            navigation_reference = 'GPS'
+
+    # Change Navigation reference
+    else:
+        navigation_reference = navigation_reference_user
+        if navigation_reference_user == 'BT' and meas.current_settings()['NavRef'] != 'bt_vel':
+            settings['NavRef'] = 'BT'
+            settings_change = True
+        elif navigation_reference_user == 'GPS' and meas.current_settings()['NavRef'] != 'gga_vel':
+            settings['NavRef'] = 'GGA'
+            settings_change = True
+
+    # Change checked transects
+    if not checked_transect_user or checked_transect_user == meas.checked_transect_idx:
+        checked_transect_idx = meas.checked_transect_idx
+    else:
+        checked_transect_idx = checked_transect_user
+        meas.checked_transect_idx = []
+        for n in range(len(meas.transects)):
+            if n in checked_transect_idx:
+                meas.transects[n].checked = True
+                meas.checked_transect_idx.append(n)
+            else:
+                meas.transects[n].checked = False
+        meas.selected_transects_changed(checked_transect_user)
+        # selected_transects_changed already contains apply_settings
+
+    # Apply Extrapolation on velocities
+    if extrap_velocity:
+        meas.extrap_fit.change_data_type(meas.transects, 'v')
+        settings_change = True
+
+    if settings_change:
+        meas.apply_settings(settings)
+
+    return meas, checked_transect_idx, navigation_reference
diff --git a/path_file.txt b/path_file.txt
new file mode 100644
index 0000000..a4ae750
--- /dev/null
+++ b/path_file.txt
@@ -0,0 +1 @@
+C:\Users\blaise.calmel\Documents\2_QRev_updates\2_Intercomparaison\intercomp_files
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..18c022c
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,12 @@
+llvmlite==0.39.1
+numba==0.56.4
+numpy==1.23.5
+pandas==1.5.2
+profilehooks==1.12.0
+python-dateutil==2.8.2
+pytz==2022.6
+scipy==1.9.3
+six==1.16.0
+tk==0.1.0
+utm==0.7.0
+xmltodict==0.13.0
diff --git a/ressources/10.ico b/ressources/10.ico
new file mode 100644
index 0000000000000000000000000000000000000000..16c2dbc783121b7f0cea71e6c0d185640b606f2b
GIT binary patch
literal 2462
zcmb7G2}~4M7@pllP>SL~EaJhTAfO0>!eWKuQShv(v1qk1tu~FKsi{?iiETBusjW$E
znl{qd#KamY8bK%!RIrHREh=6V*8>m~L}3?a*<INE`u&63#KssM^6}ohH}CuYb7mDq
z$3K66h5v(;ZG9AFu%am81ecT;Le{m5d#`n^$Tlh~vNMTXqJ~hPLFTe1d&bw|l!r5N
zl87w*hGL8759i?h`z`d%c?jP^3w-m<@cFYDy??7kukX)5zoE!1&uV90MQHVb4E_2-
zllRY8Ffg|TgO8|q^N5vDIk#%}euq^IDzRcfu@yf1Tj06lGIZ+-O$yJxOhUINZ)2}5
z<qqGybr^EgilJpz3_oVY2*KYy7{+-3*N2pfmKCBkpjd_X?z_-$E_dkG<ZgVChVJu%
zj9yzS9De(oG|r&%PJ|rq#K;rGNurYXL@?)pTpP~4q0}5gEzuEweD*cMu%*JGtj&MU
z58e90B#*DkOn%w*2qteR&yPN3L--jRMxV1|^o1UTanu>-JqOVVpilnv*RMpy8++=Y
z|FYDiTVIsq_CuFhlI6SWCPr3tBK)Kck!NifbJ315mmP?_+J%T~#Pu$XH+E(=7N)mg
zQTijq&e*4|jo>+nqd#LN8ohtL4&fAj-KH|5=Xa+qfhBE-pv_~?*${Qvj&bCRuIWZp
zZ8yf5INt8oxFRB|5FTCy86!A~Kf$MM2&9JO#-DZ2|6nT&-&UG+n@WxP&BrtQ)Aoo{
zorpSbL(COB#^2~dEIB9Kbz)+j6Y<2v2B*g5>2(-hgr_G*Lik6~*ATac<U@Sec@sKv
z>%XbY4LI0{ag0gaB|GBC9Z&9{o%!0?|Hu19_k(}T!zkt^g#HIgK8~vBx3?a;t(Cd@
zZD(sjOU;--+vBe}U?jJjBkh3;vzwim+v37pjx&krTuW|pA^yG-(ex|Q)Z>?&38jr8
z%%9|9(4kfcw|@IYb=)Z{5*YKuf4Y!#&xvXGU6}RIh4)&VNPpzQqQ@>QBo=V~F16CA
zlT4k7)QDmXBblQJ)`8SnC~G*Voc&nR0sZzWRllQJ9e>7(#A-Vx-|EH`##y-MlV=Gz
zKYHTA@-`QiwE;^xU)b8CG5w(vlNy~EUr!DCJcjiUS!G8!^AN&Z2ONG1{ZG}Z;g=gV
zQ70@&y6QkOZAoo(VP3Nfi^;Q$yer#*)fQlt*@aIzU&^%w+?z%Hw;wn$LE?0W^>wog
zQOsW?du9~-$G_MN;WlK|=7t@8f)w&kZRo~K+P;8imp*o4MY{`Y$i1!uSWB$pd^t5f
zpkETFY4jn1{4sZ^!CH=Hzl>!Lq(A*6{^U07x|11@--0RRpF&?HHj=9u<<+#^g)hjv
zr1Gk^_8I4&kYDyEAFI~*6UiS|d_`M}A%8Ua!^q!ne=`ib?q(WxHyC}gA6ODDbYj|F
zvEQlL@2(;Dxb|s}{gMZ<-@Wc0;~1Y<#wVKb2`*FNwYSk?*j=xE8w`ImWrZAR$Mo7R
zq&7LVx>-yP$*cIVnz3JL7TXy&=J<VyKXa7A_#5eS{B8E%bq6M}&!U)LzrUWqu%}6T
zKgc`6V_!>>_kmW^_|qy<?{#Z2l3H}vkL3Ad@_tC9OZ{*!m3}5OHb(lKK)a0W`?&LV
z1eSK7cg`b|VPErq?>Esf<UGz8SkUH3sInoAH9CjqwfZODB0}nNK6$0r(wL(Z*5+jP
z$|T{wZbv-Ng;l8No7e6z<g`BbeinYYJPx#P98_d+#GUWN4EAOk`*b$@Xbxi`klK2O
zF_}WUlgTap^41Lp5-&LrD%=HThsS}o7k*!Q6sU2~lHpU(VG3i9B=cP~qplmN^k*tL
zr#8@T>ZUN>DYW}-zPBYN!<f%LhdNAh*3I*>(If9^5|QOwq*~;A6U%Q*a<v1~Y3nrR
zF;V)6@3Ns~o$x7CE%L0Kc^T)6_B?VeM%t9gF_&Wvp+1AmWi69<>F*Pl^0i`90+n{f
eSGlKnDdmbm+00)|f>*uV&&yfi5H8^q?tcMC>>WV>

literal 0
HcmV?d00001

diff --git a/ressources/11.png b/ressources/11.png
new file mode 100644
index 0000000000000000000000000000000000000000..98308ce0531909c470622a29744e7fc03a3266e5
GIT binary patch
literal 1046
zcmV+x1nK*UP)<h;3K|Lk000e1NJLTq000;O000;W1^@s6;CDUv000BsNkl<Zc-pO%
zZA?>V6vzK}5@V)&(ZuNVgW1q68fP2?F`Aex`>-$WgNfM)5f>&wMwl@!apoFjzHrGF
z2uwr`I<jHL#5h#Iic(Th1j;1P+S1mxP)e;RN-a`QP|lv`(znY^mL_mN+&;bM_WaKO
zf6nt%z)Y_^JN;i+m@(kk1G<p*5k@8^RiTUvu9*Xbqc$o_`4Eo-6Cj$ZI3(#%t!EY#
z{A^)>cz8!9STYJH=ZSOcW&?1jo6e-h!!tb2lCg9G1tkbLR_C4tiw>HoG<7BV{o~UD
zKz=NMxN1;TQ3{Xw^}5}Nk{zqy_KgRhldOcBRd6j9z*UfG%T3t*x1x)alWj9dc++e^
zQQF6F4ZW0~3G!{~jpCR9OhHYe2<<~j6+1Bi2IxfL&QIWcIwqqDq1h*#()7gur~+L{
z7ICv*3Rll>d?ZhK8KC7Je*Jbe?1N+SvuF>`X=2z1apyt*Y!*@PRx3E`VLfPV$1mw?
zU>O)y#5kkkIs>?tEmRIKDVHnre5l$YLgR`81cQILfywq(<bAgmX78vXIe(?Hz8Av7
z<%?*^eFrA}epQ8on-j6(@DTp`TtrQ;geZgMbyXj;sG|i(cCE#IPeAqiOB~Sm!9rTv
z)P{+k9@Ps^(^C=ulMfYNil}x-h*ruumz_+3u^G7;pQ6(ph>&S&!`0mAfnZzm9<9*c
z!7Hmp1sKT5#F~A6ly4Pr(JIN)M^P&3?d-d`2?sOR<DToq)C{rY!Th+cbwV&KewS8h
z8Zg$?sRDS!{Yc0eM%gwIm6k{k<U@@mxn*N(n+^vuKSO)(aJX+F7qtxxpkK6rR$s2g
zNNbC71lZMIkdIASBPiW2qQaz}BE6Ma<ZQ(L-3e%OPVb?B+gb+%-TQCTy2^`q-l&}x
z2Hd}w;fst>{FWl3+$hP?<yi3OFm|)gz6Si5m593zf3PqLH>r&SH1Eu#gz`V}q~@w>
z%sf}B@m2au6z&jF)+t3|K`(43_TjyOz27IH+4?+EfP4}O=xDS<xHNw*C7nDCPu1TF
zi+G}eJGb#o+Bn-tB2ITmkwY%ngI*(BKd#4~>?AZ<eM*NI&7gY#;rzTf+E7pgXZbmm
z$3xv_>`Z-){9PhS{*_dJuwDb7<!i{vPKM6n3pY<bPubF}V+zjBeT$NhOE8y~g!>=L
z^WxDX>`r-u<C!9g^-_2Thb$WG;#JfkJ7*&r&CkN#hk9h=3Qo=uC^<J5U4;cI1+M}D
z?D?8-Buhk*J~Bn20(^Y3eYME?VI#Drp=li?IJkmX0mXXtm?{C!RB;aY4^gn)U<S#1
Q5dZ)H07*qoM6N<$f(KdemjD0&

literal 0
HcmV?d00001

-- 
GitLab