import sqlite3, re, os, traceback, code, csv, glob, argparse, pdb
from math import floor
from contextlib import closing
from datetime import datetime, timedelta
from itertools import groupby
from copy import copy
import shutil
from collections import OrderedDict
import struct
import ConfigParser

class SWMMOUT2SQLITE_NoNames(Exception):
    def __init__(self):
        Exception.__init__(self, 'No names supplied found in file.')

def curdir():
    return os.path.dirname(__file__) if '__file__' in globals() else os.curdir

def swmmout2sqlite(swmmout_path, dbpath, element_type, names=None, variables=None, 
        start=None, end=None, ignore_missing_names=False):

    assert names is None or all(isinstance(name, basestring) for name in names)

    element_types = ['subcatchments', 'nodes', 'links', 'pollutants', 'system']
    if element_type not in element_types:
        raise Exception("Unknown element type: " + element_type)


    class SWMMOUT2SQLITE_Error(Exception):
        def __init__(self, exc):
            Exception.__init__(self, "Can't convert SWMM binary output to SQLite database. " + exc)

    with open(swmmout_path, 'rb') as f:
        RECORD_BYTES = 4

        # opening records constants 
        NRECORDS_OPENING = 7
        OPENING_BYTES = RECORD_BYTES * NRECORDS_OPENING
        NRECORDS_HEAD = 3 # head is the beginning records of the opening
        HEAD_BYTES = NRECORDS_HEAD * RECORD_BYTES
        HEAD_FORMAT = str(NRECORDS_HEAD) + 'i'
        NELEMENT_TYPES = 4 # subcatches, nodes, links, pollutants
        ELEMENTCOUNTS_BYTES = OPENING_BYTES - HEAD_BYTES
        if not NELEMENT_TYPES * RECORD_BYTES == ELEMENTCOUNTS_BYTES:
            raise SWMMOUT2SQLITE_Error("NELEMENT_TYPES and ELEMENTCOUNTS_BYTES contants are inconsistent.")
        ELEMENTCOUNTS_FORMAT = str(NELEMENT_TYPES) + 'i'

        # closing records constants
        NRECORDS_CLOSING = 6
        CLOSING_BYTES = RECORD_BYTES * NRECORDS_CLOSING
        NSECTIONS = 3 # Names, Properties, Results
        SECTION_POS_BYTES = RECORD_BYTES * NSECTIONS
        SECTION_POS_FORMAT = str(NSECTIONS) + 'i'
        NRECORDS_TAIL = NRECORDS_CLOSING - NSECTIONS
        TAIL_BYTES = CLOSING_BYTES - SECTION_POS_BYTES
        TAIL_FORMAT = str(NRECORDS_TAIL) + 'i'

        EXPECTED_ID_NUM = 516114522 # should appear at the start/end of file

        NRECORDS_DAYS_SINCE_EPOCH = 2
        DAYS_SINCE_EPOCH_BYTES  = NRECORDS_DAYS_SINCE_EPOCH * RECORD_BYTES
        EPOCH = datetime(1899, 12, 30)
        REPORT_INTERVAL_BYTES = RECORD_BYTES
        HOURS_IN_DAY = 24.0
        MINUTES_IN_HOUR = 60.0
        SECONDS_IN_MINUTE = 60.0
        TIMESTEP_TOLERANCE = (((1/HOURS_IN_DAY)/MINUTES_IN_HOUR)/SECONDS_IN_MINUTE)

        # move 
        f.seek(-CLOSING_BYTES, 2) 
        section_pos_records = f.read(SECTION_POS_BYTES)
        section_positions = struct.unpack(SECTION_POS_FORMAT, section_pos_records)
        positions_by_section = dict(zip(('names', 'properties', 'results'), section_positions))
        tail_records = f.read(TAIL_BYTES)
        ntimesteps, errorcode, id_num = struct.unpack(TAIL_FORMAT, tail_records)

        if id_num != EXPECTED_ID_NUM:
            raise SWMMOUT2SQLITE_Error("Unexpected identifying number encountered at end of file.")
        elif errorcode:
            raise SWMMOUT2SQLITE_Error("Output contains errors.")
        elif not ntimesteps:
            raise SWMMOUT2SQLITE_Error("Output has zero timesteps.")

        f.seek(0)
        head_records = f.read(HEAD_BYTES)
        id_num, version, flowunits = struct.unpack(HEAD_FORMAT, head_records)
        if id_num != EXPECTED_ID_NUM:
            raise SWMMOUT2SQLITE_Error("Unexpected identifying number encountered at beginning of file.")

        units = {'depth_rate1' : ['inPerHour', 'mmPerHour'],
                 'depth_rate2' : ['ftPerSec', 'mPerSec'],
                 'depth_rate3' : ['inPerDay', 'mmPerDay'],
                 'depth1' : ['in', 'mm'],
                 'depth2' : ['ft', 'm'],
                 'volume' : ['ft3', 'm3'],
                 'temp' : ['degF', 'degC']
        }
        flowunit_options = ['CFS', 'GPM', 'MGD', 'CMS', 'LPS', 'LPD']
        flowunits = flowunit_options[flowunits]
        units_choice = 0 if flowunits in ('CFS', 'GPM', 'MGD') else 1
        for unit_group in units:
            units[unit_group] = units[unit_group][units_choice]
        units['flow'] = flowunits

        element_counts_records = f.read(ELEMENTCOUNTS_BYTES)
        element_counts = list(struct.unpack(ELEMENTCOUNTS_FORMAT, element_counts_records))
        nsystem_elements = 1
        element_counts.append(nsystem_elements)
        element_counts_by_type = OrderedDict(zip(element_types, element_counts))

        if not element_counts_by_type[element_type]:
            raise Exception("SWMM output does not report any " + element_type + " elements.")

        # Read names
        f.seek(positions_by_section['names'], 0)
        element_names_by_type = OrderedDict()
        for type_ in element_types:
            if type_ in ('system',):
                element_names_by_type[type_] = None
            else:
                for i in range(element_counts_by_type[type_]):
                    name_bytes_record = f.read(RECORD_BYTES)
                    name_bytes = struct.unpack('i', name_bytes_record)[0]
                    name_record = f.read(name_bytes)
                    name = struct.unpack(str(name_bytes) + 's', name_record)[0]
                    element_names_by_type.setdefault(type_, []).append(name)

        if names:
            type_names = element_names_by_type[element_type]
            in_report = [name in type_names for name in names]

            if not all(in_report) and not ignore_missing_names:
                missing_names = [name for name, is_in_report in zip(names, in_report) if not is_in_report]
                raise Exception("The following element names aren't in the report: " + ','.join(missing_names))

            user_names_in_rpt = [name for name in names if name in type_names]
        else:
            user_names_in_rpt = element_names_by_type[element_type]


        if not user_names_in_rpt:
            raise SWMMOUT2SQLITE_NoNames()

        # Read pollutant units
        pollutant_labels = []
        if element_counts_by_type['pollutants']:
            pollutant_units_records = f.read(element_counts_by_type['pollutants'] * RECORD_BYTES)
            pollutant_units = struct.unpack(str(element_counts_by_type['pollutants']) + 'i', pollutant_units_records)
            pollutant_unit_label_options = ['mgL', 'ugL', 'countPerL']
            pollutant_unit_labels = [pollutant_unit_label_options[i] for i in pollutant_units]
            pollutant_labels = ['_'.join(tup) for tup in zip(element_names_by_type['pollutants'], pollutant_unit_labels)]

        # Read properties
        f.seek(positions_by_section['properties'], 0)
        element_property_codes = OrderedDict()
        element_properties_by_type = OrderedDict()
        for type_ in element_types:
            if type_ in ('pollutants', 'system'):
                element_properties_by_type[type_] = None
            else:
                nprops_record = f.read(RECORD_BYTES)
                nprops = struct.unpack('i', nprops_record)[0]
                property_code_records = f.read(nprops*RECORD_BYTES)
                property_codes = struct.unpack(str(nprops) + 'i', property_code_records)
                element_property_codes[type_] = property_codes

                for i in range(element_counts_by_type[type_]):
                    property_records = f.read(nprops*RECORD_BYTES)
                    properties = struct.unpack(str(nprops) + 'f', property_records)
                    element_properties_by_type.setdefault(type_, []).append(zip(property_codes, properties))

        # Read reporting variables
        var_labels_by_type = {
            'subcatchments' : ['rainfall_{depth_rate1}', 
                               'snow_depth_{depth1}', 
                               'evap_plus_infil_losses_{depth_rate1}', 
                               'runoff_rate_{flow}', 
                               'gw_outflow_rate_{flow}',
                               'gw_table_elev_{depth2}'],
            'nodes' : ['depth_above_invert_{depth2}', 
                       'hydraulic_head_{depth2}', 
                       'stored_and_ponded_vol_{volume}', 
                       'lateral_inflow_{flow}', 
                       'total_inflow_{flow}',
                       'flow_lost_to_flooding_{flow}'],
            'links' : ['flow_rate_{flow}', 
                       'flow_depth_{depth2}', 
                       'flow_velocity_{depth_rate2}', 
                       'Froude_number', 
                       'Capacity'],
            'system' : ['air_temp_{temp}',
                        'rainfall_{depth_rate1}',
                        'snow_depth_{depth1}',
                        'evap_plus_infil_losses_{depth_rate1}',
                        'runoff_rate_{flow}',
                        'dry_weather_inflow_{flow}',
                        'gw_inflow_{flow}',
                        'RDII_inflow_{flow}',
                        'user_supplied_direct_inflow_{flow}',
                        'total_lateral_inflow_{flow}',
                        'flow_lost_to_flooding_{flow}',
                        'flow_leaving_through_outfalls_{flow}',
                        'volume_of_stored_water_{volume}',
                        'evaporation_rate_{depth_rate3}']
        }
        if element_counts_by_type['pollutants']:
            for type_ in element_types:
                if type_ not in ('pollutants', 'system'):
                    var_labels_by_type[type_].extend(pollutant_labels)
        for type_ in var_labels_by_type:
            var_labels_by_type[type_] = [name.format(**units) for name in var_labels_by_type[type_]]

        report_vars_by_type = OrderedDict()
        for type_ in element_types:
            if type_ in ('pollutants',):
                report_vars_by_type[type_] = None
            else:
                nvariable_codes_record = f.read(RECORD_BYTES)
                nvariable_codes = struct.unpack('i', nvariable_codes_record)[0]
                variable_code_records = f.read(nvariable_codes * RECORD_BYTES)
                variable_codes = struct.unpack(str(nvariable_codes) + 'i', variable_code_records)
                expected_num_vars = len(var_labels_by_type[type_])
                if len(variable_codes) != expected_num_vars:
                    exc = "Unexpected number of variables for " + type_ + ". Expected " + str(expected_num_vars) \
                          + ", encountered " + str(len(variable_codes))
                    raise Exception(exc)
                report_vars_by_type[type_] = variable_codes

        # Read reporting interval
        rpt_start_days_since_epoch_record = f.read(DAYS_SINCE_EPOCH_BYTES)
        rpt_start_days_since_epoch = struct.unpack('d', rpt_start_days_since_epoch_record)[0]
        rpt_interval_record = f.read(REPORT_INTERVAL_BYTES) 
        rpt_interval = struct.unpack('i', rpt_interval_record)[0]
        
        def to_days_since_epoch(dtime):
            in_seconds = (dtime - EPOCH).total_seconds()
            return ((in_seconds / SECONDS_IN_MINUTE) / MINUTES_IN_HOUR) / HOURS_IN_DAY

        user_start_days_since_epoch = to_days_since_epoch(start) if start else rpt_start_days_since_epoch
        # user_start_dtime = EPOCH + timedelta(days=user_start_day
        rpt_interval_days = ((rpt_interval / SECONDS_IN_MINUTE) / MINUTES_IN_HOUR) / HOURS_IN_DAY
        rpt_end_days_since_epoch = rpt_start_days_since_epoch + rpt_interval_days * (1 + ntimesteps)
        user_end_days_since_epoch = to_days_since_epoch(end) if end else rpt_end_days_since_epoch

        if user_end_days_since_epoch <= user_start_days_since_epoch:
            raise SWMMOUT2SQLITE_Error("Start and end datetimes inconsistent.")

        # calculate byte size per timestep for each element type
        report_bytes_by_type = OrderedDict()
        for type_ in element_types:
            if type_ == 'pollutants':
                report_bytes_by_type[type_] = 0
            else:
                byte_count = element_counts_by_type[type_] * len(report_vars_by_type[type_]) * RECORD_BYTES
                report_bytes_by_type[type_] = byte_count

        bytes_per_timestep = DAYS_SINCE_EPOCH_BYTES + sum(report_bytes_by_type.values())

        # define offsets to locate element type within timestep
        type_offsets = [0]
        for bytes_ in report_bytes_by_type.values()[:-1]:
            type_offsets.append(type_offsets[-1] + bytes_)

        type_offsets_by_type = OrderedDict(zip(element_types, type_offsets))

        bytes_per_element = len(report_vars_by_type[element_type]) * RECORD_BYTES

        if not variables:
            variables = var_labels_by_type[element_type]

        user_var_labels_by_index = []
        for var in variables:
            for i, label in enumerate(var_labels_by_type[element_type]):
                if re.match(var, label):
                    user_var_labels_by_index.append((i, label))
                    break
                elif i == len(var_labels_by_type[element_type]) - 1:
                    exc = "Could not match variable " + var + " with any of the known variable labels for " + element_type
                    raise Exception(exc)
        user_var_labels_by_index = OrderedDict(user_var_labels_by_index)
        max_user_var_index = max(user_var_labels_by_index.keys())
        user_var_byte_range = (max_user_var_index + 1) * RECORD_BYTES

        user_element_indices = [[i, name] for i, name in enumerate(element_names_by_type[element_type]) 
                                if name in user_names_in_rpt]
        lagged = [user_element_indices[i] + [user_element_indices[i-1][0]] for i in range(1, len(user_element_indices))]

        
        #user_element_offsets_and_names = user_element_indices[:1]
        first_offset = user_element_indices[0]
        user_element_offsets_and_names = [[first_offset[0] * bytes_per_element, first_offset[1]]]
        for idx, name, idx_lag1 in lagged:
            offset = (idx - idx_lag1) * bytes_per_element - user_var_byte_range
            user_element_offsets_and_names.append((offset, name))
        max_user_element_index = max(dict(user_element_indices).keys())

        with closing(sqlite3.connect(dbpath)) as cnxn:
            cnxn.isolation_level = None
            cursor = cnxn.cursor()
            cursor.executescript("""
                PRAGMA synchronous=OFF;
                PRAGMA count_changes=OFF;
                PRAGMA journal_mode=OFF;
            """)

            try:
                cursor.execute("DROP TABLE " + element_type)
            except:
                pass

            create_table_stmt = "CREATE TABLE {type} (timestep integer, name text, {variables})"
            variable_defs = ','.join([name + ' real' for name in user_var_labels_by_index.values()])

            create_table_stmt = create_table_stmt.format(type=element_type, variables=variable_defs)
            cursor.execute(create_table_stmt)

            insert_stmt = """
                INSERT INTO {type} (timestep, name, {variables})
                VALUES (?, ?, {values})
            """
            insert_stmt = insert_stmt.format(type=element_type, variables=','.join(user_var_labels_by_index.values()),
                values=','.join(['?' for _ in range(len(user_var_labels_by_index))]))

            f.seek(positions_by_section['results'], 0)

            INSERT_BATCH_SIZE = 500
            batch = []
            results_origin = positions_by_section['results']
            var_format = 'f' * (max_user_var_index + 1)
            element_type_offset = type_offsets_by_type[element_type]
            head_timestep_bytes_range = (((max_user_element_index + 1) - 1) * bytes_per_element) + user_var_byte_range
            tail_timestep_bytes_range = bytes_per_timestep - DAYS_SINCE_EPOCH_BYTES - element_type_offset - head_timestep_bytes_range
            days_since_epoch_record = f.read(DAYS_SINCE_EPOCH_BYTES)
            days_since_epoch = struct.unpack('d', days_since_epoch_record)[0]
            for timestep in range(ntimesteps):
                if days_since_epoch > user_end_days_since_epoch:
                    break
                elif days_since_epoch >= user_start_days_since_epoch:
                    f.seek(element_type_offset, 1)
                    for i, tup in enumerate(user_element_offsets_and_names):
                        element_offset, name = tup
                        f.seek(element_offset, 1)

                        entry = [timestep, name]
                        vars_records = f.read(user_var_byte_range)
                        vars_vals = struct.unpack(var_format, vars_records)
                        user_var_vals = [val for j, val in enumerate(vars_vals) if j in user_var_labels_by_index.keys()]
                        entry.extend(user_var_vals)
                        batch.append(entry)

                        if len(batch) == INSERT_BATCH_SIZE:
                            cursor.executemany(insert_stmt, batch)
                            del batch[:]

                    f.seek(tail_timestep_bytes_range, 1)

                else:
                    f.seek(bytes_per_timestep - DAYS_SINCE_EPOCH_BYTES, 1)

                days_since_epoch_record = f.read(DAYS_SINCE_EPOCH_BYTES)
                days_since_epoch = struct.unpack('d', days_since_epoch_record)[0]

            cursor.executemany(insert_stmt, batch)
            idx_stmt = "CREATE INDEX idx_{type}_{col} on {type}({col})"
            cursor.execute(idx_stmt.format(type=element_type, col='timestep'))
            cursor.execute(idx_stmt.format(type=element_type, col='name'))
            cnxn.commit()

        user_start_dtime = EPOCH + timedelta(days=user_start_days_since_epoch)
        user_end_dtime = EPOCH + timedelta(days=user_end_days_since_epoch)
        return dbpath, element_type, len(user_names_in_rpt), len(variables), user_start_dtime, user_end_dtime

def tune_db(cursor):
    cursor.execute("PRAGMA synchronous=OFF") # don't wait for disk writes to complete before continuing
    cursor.execute("PRAGMA count_changes=OFF") # don't count num. rows affected by DELETE, INSERT, or UPDATE
    cursor.execute("PRAGMA journal_mode=OFF") # turn off journaling
    #cursor.execute("PRAGMA cache_size=1048576") # number of btree pages to cache (1 page = 1 KB)
    #cursor.execute("PRAGMA temp_store=2") # store temporary files in memory

def get_segtable(segmap_paths):
    segtable = [] # this will be a list of dicts, where each dict is a row from one of the .map.csv files
    for fname in segmap_paths: # get a list of all of the .map.csv files
        with open(fname, 'r') as f:
            table = list(csv.DictReader(f))
            # if the table has rows, check that the header is right and add the rows to segtable
            if table:
                assert all(fieldname in ('Name', 'SWMM', 'Type', 'WASP') for fieldname in table[0].keys())
                segtable.extend(table)

    # the following two loops filling any missing entries in the SWMM and WASP columns and renumber the segments sequentially.
    for row in segtable:
        if not row['WASP']:
            row['WASP'] = prev_wasp
        else:
            prev_wasp = row['WASP']

        if not row['SWMM']:
            row['SWMM'] = prev_swmm
        else:
            prev_swmm = row['SWMM']

    current_swmm = 1
    current_wasp = 1
    for i, row in enumerate(segtable):
        if i == 0:
            prev_wasp = row['WASP']
            prev_swmm = row['SWMM']
            row['WASP'] = current_wasp
            row['SWMM'] = current_swmm
        else:
            if row['WASP'] == prev_wasp:
                row['WASP'] = current_wasp
            else:
                prev_wasp = row['WASP']
                current_wasp += 1
                row['WASP'] = current_wasp

            if row['SWMM'] == prev_swmm:
                row['SWMM'] = current_swmm
            else:
                prev_swmm = row['SWMM']
                current_swmm += 1
                row['SWMM'] = current_swmm

    # check that there is only one segment with and entry with type 'END' and no segment name. This indicates 
    # the end of the system. A tributary end would have type end, but the name for that entry would be the 
    # conduit it links into.
    if len([row for row in segtable if not row['Name'] and row['Type'] == 'END']) != 1:
        raise Exception('There are multiple terminating segments in the segment maps.')

    # check that all types are are either CONDUIT, INFLOW, or END
    if any(row['Type'] not in ('CONDUIT', 'INFLOW', 'END') for row in segtable):
        raise Exception('Unknown Type found in seg map file')

    # extract trib connection conduit names and check that they exist somewhere in the system
    trib_connection_names = [row['Name'] for row in segtable if row['Type'] == 'END' and row['Name']]
    conduit_names = [row['Name'] for row in segtable if row['Type'] == 'CONDUIT']
    if not all([name in conduit_names for name in trib_connection_names]):
        raise Exception('There are END rows in your segment map(s) that refer do conduits not included in the map.')

    # check that there are no repeated conduits
    names = [row['Name'] for row in segtable if row['Type'] != 'END']
    if len(names) != len(set(names)):
        raise Exception('There are repeated conduits in one of the map files.')

    return segtable

def process(filter_mins,
            dummy_end, 
            inppath, 
            outpath, 
            segmap_paths, 
            rptpath=None, 
            binarypath=None,
            correct_vol=False, 
            window_size=400, 
            correction_threshold=0.05, 
            abs_basevol_signal=5, 
            event_start=None, 
            event_end=None, 
            negative_threshold_pct=0.2,
            micro_window_pct=.25, # .25
            round_num=5,
            flow_weight_sums=False,
            event_start_percentage=None):
    """
        filter_mins : number of minutes for the optional moving average smoothing window
        rptpath : path to SWMM rpt file
        binarypath : path to SWMM binary output file
        inppath : path to SWMM .inp file
        outpath : path to directory to write output
        segmap_path : path to segmap file
        correct_vol : boolean value to determine whether or not flow should be corrected

        TODO : go into more detail about the correction parameters, in the mean time their details 
        are describe in the load_ini function
        window_size : correction parameter
        correction_threshhold : correction parameter
        abs_basevol_signal : correction parameter
        negative_threshold_pct : correction paramater
        micro_window_pct : correction parameter

        event_start : event start datetime
        event_end : event end datetime
    """

    if not (binarypath or rptpath):
        raise Exception("Must supply either an SWMM .rpt path or a .out path.")

    # process the segmap files and return a validated list of dictionaries for each row in each segmap
    segtable = get_segtable(segmap_paths)

    # generate the name for the database from the filename of the rpt
    db_name_path = binarypath if binarypath else rptpath
    dbname = os.path.splitext(os.path.basename(db_name_path))[0] + '.db'
    dbpath = os.path.join(os.path.dirname(outpath), dbname)
    # remove the database if it already exists
    if os.path.exists(dbpath):
        os.unlink(dbpath)

    # open a connection to the database with a context manager that will close the connection
    # when the with-block is exited.
    with closing(sqlite3.connect(dbpath)) as conn:
        # set autocommit
        conn.isolation_level = None
        # obtain a cursor
        cur = conn.cursor()
        # optimize the cursor. tune_db is user function defined in this file.
        tune_db(cur)

        # create table to store seg map data
        cur.execute("""
            CREATE TABLE segs (
                wasp int,           -- wasp segment number
                DS_wasp int,        -- wasp segment number of the DS segment (initially NULL)
                swmm int,           -- swmm segment number 
                DS_swmm int,        -- swmm segment number of the DS segment (initiall NULL)
                type varchar(8),    -- element type, either 'CONDUIT', 'INFLOW', or 'END'
                name varchar(64)    -- name as it appears in the .rpt file
            )""")

        for row in segtable:
            # read the map file into segs
            cur.execute("""
                INSERT INTO segs (wasp, swmm, type, name) 
                VALUES (%(WASP)s, %(SWMM)s, '%(Type)s', '%(Name)s')
                """ % row)
        
        # get list of unique swmm seg nums
        cur.execute('SELECT DISTINCT swmm, wasp FROM segs')
        segs = cur.fetchall()

        # get final wasp and swmm segs
        cur.execute('SELECT MAX(wasp) FROM segs')
        last_wasp_seg = cur.fetchone()[0]
        cur.execute('SELECT MAX(swmm) FROM segs')
        last_swmm_seg = cur.fetchone()[0]

        # this loop cycles through the swmm segment numbers and updates DS_wasp and DS_swmm columns in the segs table. 
        # It also creates a dummy segment for the final segment
        for swmm, wasp in segs:
            # check if the current swmm segment includes an 'END' row
            cur.execute("SELECT name FROM segs WHERE swmm = ? AND type = 'END'", (swmm,)) 
            end_row = cur.fetchone()
            if end_row: 
                end_name = end_row[0]
                if end_name:
                    # if the end row includes the name of a conduit (meaning this segment is the end of a tributary, 
                    # not the end of a main trunk), then
                    # find the wasp and swmm segment numbers for the conduit and assign them to the DS_wasp and 
                    # DS_swmm columns for the conduits in this segment
                    
                    cur.execute("SELECT wasp, swmm FROM segs WHERE name = ? AND type <> 'END'", (end_name,))

                    DS_wasp, DS_swmm = cur.fetchone()
                    cur.execute("UPDATE segs SET DS_wasp = ?, DS_swmm = ? WHERE type = 'CONDUIT' AND swmm = ?", 
                        (DS_wasp, DS_swmm, swmm))
                else:
                    if dummy_end:
                        # if user wants a dummy end segment (dummy_end is True), add it to the segs table, and 
                        # make this the down stream segment for the actual final segments
                        dummy_wasp = last_wasp_seg + 1
                        dummy_swmm = last_swmm_seg + 1
                        cur.execute("""
                            INSERT INTO segs (wasp, swmm, DS_wasp, DS_swmm, type, name)
                            SELECT ?, ?, 0, 0, 'DUMMY', name 
                                FROM segs WHERE swmm = ? AND type = 'CONDUIT' LIMIT 1
                            """, (dummy_wasp, dummy_swmm, swmm))

                        cur.execute("UPDATE segs SET DS_wasp = ?, DS_swmm = ? WHERE type = 'CONDUIT' AND swmm = ?", 
                            (dummy_wasp, dummy_swmm, swmm))
                    else:
                        # if the user doesn't want a dummy end segment, set the DS segments of the final 
                        # segment to the empty seg
                        cur.execute("UPDATE segs SET DS_wasp = ?, DS_swmm = ? WHERE type = 'CONDUIT' AND swmm = ?", 
                            (0, 0, swmm))
            else:
                # if this isn't the end segment, update the the DS segment numbers to the next segment
                cur.execute("UPDATE segs SET DS_wasp = ?, DS_swmm = ? WHERE type = 'CONDUIT' AND swmm = ?", 
                    (wasp + 1, swmm + 1, swmm))

        # for rows of type 'INFLOW', the DS_wasp and DS_swmm are the same as its swmm and wasp seg numbers
        cur.execute("UPDATE segs SET DS_wasp = wasp, DS_swmm = swmm WHERE type = 'INFLOW'")
        cur.execute("DELETE FROM segs WHERE type = 'END'")

        # define the path to the directory that will hold the output timeseries
        series_path = os.path.join(os.path.dirname(outpath), 'segment_series_out')
        # if necessary, create the directory 
        if not os.path.isdir(series_path):
            os.mkdir(series_path)

        # write segment map to file
        with open(os.path.join(series_path, 'segment_map_out.csv'), 'w') as f:
            fieldnames = ['wasp', 'ds_wasp', 'swmm', 'ds_swmm', 'type', 'name']
            writer = csv.DictWriter(f, fieldnames = fieldnames, lineterminator='\n')
            writer.writeheader()
            cur.execute('SELECT * FROM segs ORDER BY wasp, swmm, type, name')
            for row in cur.fetchall():
                row = dict(zip(fieldnames, row))
                writer.writerow(row)

        # create a table to store the length and dwf for conduits
        cur.execute('CREATE TABLE conduit_params (name varchar(64), length float, inlet varchar(64), dwf float DEFAULT 0)')

        # the following block loops through the .inp and updates conduit_params with the lengths and DWF's for each conduit
        meters_in_foot = 0.3048
        first_step = 0
        last_step = 0
        with open(inppath, 'r') as f:
            # churn through the .inp lines until encountering the [OPTIONS] marker
            line = f.readline().strip()
            while not re.match(re.compile(r'\[opt', re.IGNORECASE), line.strip()):
                line = f.readline()
                if not line:
                    raise Exception("Can't find [OPTIONS] section in .inp file.")

            line = f.readline()
            if not line:
                raise Exception("Can't find REPORT_STEP in the [OPTIONS] section of the .inp")

            # read the following variables from the OPTIONS sections
            timestep_secs = None
            start_date = None
            start_time = None
            end_date = None
            end_time = None
            while True:
                if re.match(re.compile(r'report_start_date', re.IGNORECASE), line.strip()):
                    _, start_date = line.split()
                elif re.match(re.compile(r'report_start_time', re.IGNORECASE), line.strip()):
                    _, start_time = line.split()
                elif re.match(re.compile(r'end_date', re.IGNORECASE), line.strip()):
                    _, end_date = line.split()
                elif re.match(re.compile(r'end_time', re.IGNORECASE), line.strip()):
                    _, end_time = line.split()
                elif re.match(re.compile(r'report_step', re.IGNORECASE), line.strip()):
                    _, ts_str = line.split()
                    hours, mins, secs = ts_str.split(':')
                    timestep_secs = float(hours) * 120 + float(mins) * 60 + float(secs)
                     
                if (not line) or re.match(r'\[', line.strip()):
                    if not timestep_secs:
                        raise Exception(".inp [OPTIONS] missing REPORT_STEP")
                    elif not start_time:
                        raise Exception(".inp [OPTIONS] missing REPORT_START_TIME")
                    elif not start_date:
                        raise Exception(".inp [OPTIONS] missing REPORT_START_DATE")
                    elif not end_date:
                        raise Exception(".inp [OPTIONS] missing REPORT_END_DATE")
                    elif not end_time:
                        raise Exception(".inp [OPTIONS] missing REPORT_END_TIME")
                    else:
                        try:
                            report_start_dtime = datetime.strptime(' '.join([start_date, start_time]), '%m/%d/%Y %H:%M:%S')
                            report_end_dtime = datetime.strptime(' '.join([end_date, end_time]), '%m/%d/%Y %H:%M:%S')
                        except:
                            raise Exception('Unexpected datetime format encountered in .inp file for report dates.')
                        else:
                            break
                else:
                    line = f.readline()

            # if user hasn't set a start time, set it to the report time from the .inp
            if not event_start:
                event_start = report_start_dtime

            # get the index for the timestep corresponding to event_start
            first_step = int(floor((event_start - report_start_dtime).total_seconds() / timestep_secs))

            # if user hasn't set an end time, set the end to the end time for the report
            if not event_end:
                event_end = report_end_dtime
            
            # use event_end to calculate the total number of timesteps
            event_step_count = int((event_end - event_start).total_seconds() / timestep_secs)

            # calculate index for the final timestep
            last_step = first_step + event_step_count

            # get a list of all the conduit names from segs
            cur.execute("SELECT name FROM segs WHERE type = 'CONDUIT'")
            conduit_names = [row[0] for row in cur.fetchall()]
            
            # churn through the .inp lines until encountering the [CONDUITS] marker
            line = f.readline()
            if not line:
                raise Exception("Can't find [CONDUITS] section in .inp file")

            while not re.match(re.compile(r'\[cond', re.IGNORECASE), line.strip()):
                line = f.readline()
                if not line:
                    raise Exception("Can't find [CONDUITS] section in .inp file.")

            # loop through the lines in [CONDUITS] and for each conduit with its name in the list, 
            # update conduit params with that conduits 
            # length and inlet node.
            line = f.readline()
            if not line:
                raise Exception("There are no conduits listed in the [CONDUIT] section of the .inp")
            line = line.strip()
            while not re.match(r'\[', line):
                if not (re.match('^$', line) or re.match(r';', line)):
                    name, inlet, _, length, _ = line.split(None, 4)
                    if name in conduit_names:
                        cur.execute('INSERT INTO conduit_params (name, length, inlet) VALUES (?, ?, ?)', 
                            (name, float(length) * meters_in_foot, inlet))
                line = f.readline().strip()

            # churn through the lines until encountering the [DWF] marker
            line = f.readline()
            if not line:
                raise Exception("Can't find [DWF] section in the .inp")
            line = line.strip()
            while not re.match(re.compile(r'\[dwf', re.IGNORECASE), line):
                line = f.readline().strip()

            # get a list of the inlet node names from conduit_params
            cur.execute('SELECT inlet FROM conduit_params')
            inlet_names = [row[0] for row in cur.fetchall()]

            # cycle through the [DWF] section and for each node that is in our inlet list, update 
            # conduit_params with its DWF value
            line = f.readline().strip()
            while not re.match(r'\[', line):
                if not (re.match('^$', line) or re.match(r';', line)):
                    name, param, avg = line.split()
                    if name in inlet_names and param == 'FLOW':
                        cur.execute('UPDATE conduit_params SET dwf = ? WHERE inlet = ?', 
                            (round(float(avg) * pow(meters_in_foot, 3), round_num), name))
                line = f.readline().strip()

        # create a second conduit_params table that adds the swmm number for each segment
        cur.execute("CREATE TABLE conduit_params2 (name varchar(64), swmm int, length float, dwf float DEFAULT 0)")

        # populate the second conduit_params table with the current one inner joining with the segs table
        cur.execute("""
            INSERT INTO conduit_params2 (name, swmm, length, dwf)
                SELECT cp.name, segs.swmm, cp.length, cp.dwf
                FROM conduit_params AS cp INNER JOIN segs ON cp.name = segs.name
        """)

        # create the first flows table. 
        # this table will hold the flow data for each segment, and operations on this table will eventually
        # lead to generating the flow series for the final hyd file.
        cur.execute("""
            CREATE TABLE flows (
                timestep int, 
                wasp_source int,    -- wasp seg number indicating which segment the flow is coming from
                wasp_sink int,      -- wasp seg number indicating which segment the flow is going to
                swmm_source int,
                swmm_sink int, 
                name varchar(64), 
                flow float
            )""")

        # create the first volumes table
        # this table will hold the volume, depth, and velocity data for all segments and the operations on this 
        # table and the tables derived from it will eventually lead to generating the data for the volume, depth, and
        # velocity timeseries.
        cur.execute("""
            CREATE TABLE volumes (
                timestep int, 
                wasp int, 
                swmm int, 
                name varchar(64),
                init_vol float, 
                flow float, 
                depth float, 
                velocity float
            )""")

        # select one conduit to represent each swmm segment. The names of these representative conduits will 
        # be used to look up the base flows.
        # for the their respective swmm segments.
        cur.execute("SELECT MAX(name) FROM segs WHERE type = 'CONDUIT' GROUP BY swmm")
        rep_conduits = [row[0] for row in cur.fetchall()]

        # if the user provided their data in a swmm binary file, parse this file out into
        # the volumes and flows tables.
        if binarypath:
            # get a list of segment names from the segs table
            cur.execute("SELECT DISTINCT name FROM segs")
            names = [row[0] for row in cur.fetchall()]
            # these are the names of the link variables as they need to be named to request them from 
            # the swmmout2sqlite function
            link_vars = ['flow_rate', 'flow_depth', 'flow_velocity']
            # subcatchment variables
            catch_vars = ['runoff_rate']

            # read the links data into the databas4e
            links_result = swmmout2sqlite(binarypath, dbpath, 'links', names=names, 
                    variables=link_vars, start=event_start, end=event_end,
                    ignore_missing_names=True)

            # read the subcatchments into the database
            # if this raises a SWMMOUT2SQLITE_NoNames error, that means there are no subcatchments in the binary file.
            try:
                catches_result = swmmout2sqlite(binarypath, dbpath, 'subcatchments', names=names, 
                        variables=catch_vars, start=event_start, end=event_end, ignore_missing_names=True)
            except SWMMOUT2SQLITE_NoNames:
                catches_result = None
            
            # these are the columns of the links and subcatchments table generated in the database by swmmout2sqlite
            link_fields = ['timestep', 'name'] + link_vars
            catch_fields = ['timestep', 'name'] + catch_vars

            # get a list of unique link names
            cur.execute("SELECT DISTINCT name FROM links")
            link_names = [row[0] for row in cur.fetchall()]
            
            # ge a list of unique subcatchment names, if there are any
            if catches_result:
                cur.execute("SELECT DISTINCT name FROM subcatchments")
                catch_names = [row[0] for row in cur.fetchall()]
            else:
                catch_names = []

            # check if there are any conduits in the seg table that aren't in the links read from the binary file
            cur.execute("SELECT name FROM segs WHERE type = 'CONDUIT'")
            seg_conduits = [row[0] for row in cur.fetchall()]
            if [name for name in seg_conduits if name not in link_names]:
                raise Exception("There are conduits in the map file not found in the binary file.")

            # check if there are any inflows not in either the links table or the subcatchment table
            cur.execute("SELECT name FROM segs WHERE type = 'INFLOW'")
            seg_inflows = [row[0] for row in cur.fetchall()]
            if [name for name in seg_inflows if name not in link_names + catch_names]:
                raise Exception("There are inflows in the map file not found in the binary file.")

            for name in names:
                # get each row from the seg table where this name appears
                cur.execute("SELECT wasp, swmm, DS_wasp, DS_swmm, type, name FROM segs WHERE name = ?", (name,))
                elements = cur.fetchall()

                # if this name belongs to a link, select select its data from the links table 
                # and set variables identifying the column names of the links table. If this isn't a link,
                # get the data from the subcatchments table.
                if name in link_names:
                    cur.execute("SELECT * FROM links WHERE name = ? ORDER BY timestep", (name,))
                    fields = link_fields
                    flow_field = 'flow_rate'
                else:
                    cur.execute("SELECT * FROM subcatchments WHERE name = ? ORDER BY timestep", (name,))
                    fields = catch_fields
                    flow_field = 'runoff_rate'
                    
                # zip the data wih the field names to create a list of dicts, where each dict is a row.
                data = [dict(zip(fields, row)) for row in cur.fetchall()]

                # these lists will contain the unit converted values for their respective variables
                flows = []
                velocities = []
                depths = []

                # if the user requested a moving average filter be passed over over the data, calculate how many
                # timesteps correspond to the duration of the window they entered.
                if filter_mins:
                    filter_steps = round((filter_mins * 60) / timestep_secs)

                # if the user wants a moving average window, these vectors will play the role of the windows in the 
                # following loop.
                flow_window = []
                velocity_window = []
                depth_window = []

                # for each row in the data, convert it to the approrpiate units. 
                # if a filter is to be applied, put the converted values into their respected window.
                # if the windows are full, caluclate the average and append it to the main vectors, 
                # and then remove one element from the vector.
                for i, row in enumerate(data):
                    flow = round(float(row[flow_field]) * pow(meters_in_foot, 3), round_num)
                    depth = round(float(row.get('flow_depth', 0)) * meters_in_foot, round_num)
                    velocity = round(float(row.get('flow_velocity', 0)) * meters_in_foot, round_num)

                    #orig_depths.append(float(row.get('flow_depth', 0)))
                    #orig_velocities.append(float(row.get('flow_velocity', 0)))

                    if filter_mins:
                        flow_window.append(flow)
                        velocity_window.append(velocity)
                        depth_window.append(depth)

                        if len(flow_window) == filter_steps:
                            flows.append(sum(flow_window) / filter_steps)
                            del flow_window[0]
                            velocities.append(sum(velocity_window) / filter_steps)
                            del velocity_window[0]
                            depths.append(summ(depth_wifow) / filter_steps)
                            del depth_window[0]
                    else:
                        flows.append(flow)
                        velocities.append(velocity)
                        depths.append(depth)

                # for each element from the seg table,
                for element in elements:
                    # extract its swmm/wasp numbering
                    wasp, swmm, DS_wasp, DS_swmm, eltype, name  = element

                    # the set sources to 0 if this is an INFLOW
                    wasp_source, swmm_source = (0,0) if eltype =='INFLOW' else (wasp, swmm)

                    # insert the flow data into the flows table
                    cur.executemany("""
                    INSERT INTO flows (timestep, wasp_source, swmm_source, name, wasp_sink, swmm_sink, flow)
                    VALUES (?,?,?, ?,?,?, ?)""",
                    [(i, wasp_source, swmm_source, name, DS_wasp, DS_swmm, flow) for i, flow in enumerate(flows)])

                    # if this is a conduit or a dummy, we need to insert the relevant data into the volumes table.
                    if eltype in ('CONDUIT', 'DUMMY'):
                        # if this is a representative conduit, look up the dwf value, and insert 
                        # a constant timeseries into flows
                        if eltype == 'CONDUIT' and name in rep_conduits:
                            cur.execute('SELECT dwf FROM conduit_params WHERE name = ?', (name,))
                            dwf = cur.fetchone()[0]

                            cur.executemany("""
                                INSERT INTO flows (timestep, wasp_source, swmm_source, name, wasp_sink, swmm_sink, flow)
                                VALUES (?,?,?, ?,?,?, ?)
                            """, [(i, 0, 0, name + '_DWF', wasp, swmm, dwf) for i in xrange(len(flows))])

                        # get the length for the conduit and calculate the initial volume 
                        cur.execute("SELECT length FROM conduit_params WHERE name = ?", (name,))
                        length = cur.fetchone()[0]
                        init_vol = (flows[0] / velocities[0]) * length if velocities[0] else 0 

                        # insert the flow, depth, velocity series into the volumes table, along with the init_vol repeated 
                        # for each timestep.
                        cur.executemany("""
                            INSERT INTO volumes (timestep, wasp, swmm, name, init_vol, flow, depth, velocity)
                            VALUES (?,?,?, ?,?,?, ?,?)
                        """, [(i, wasp, swmm, name, init_vol, tup[0], tup[1], tup[2])
                              for i, tup in enumerate(zip(flows, depths, velocities))])

        elif rptpath:
            with open(rptpath, 'r') as f:
                # loop through the .rpt file. Each time a report series is encountered, check if the 
                # name is in the segs table, and if so insert the appropriate values into the flows and volumes tables
                line = f.readline()
                while line:
                    while line and not re.match('<<<', line.strip()):
                        line = f.readline()

                    if line:
                        _, kind, name, _ = line.strip().split() # assumes line is of the format '<<< Subcatchment CATCH01 >>>'
                        # check if there are any entries for this name in segs ...
                        cur.execute('SELECT wasp, swmm, DS_wasp, DS_swmm, type, name FROM segs WHERE name = ?', (name,))
                        elements = cur.fetchall()
                        if not (elements and kind in ('Link', 'Subcatchment')): 
                            # ... if this element isn't in segs, keep reading
                            line = f.readline()
                        else:
                            header_nline = 4
                            for i in range(header_nline): # eat header
                                _ = f.readline() 

                            if kind == 'Link':
                                fieldnames = ['date', 'time', 'flow', 'velocity', 'depth', 'percent'] 
                            else:
                                fieldnames = ['date', 'time', 'percip', 'losses', 'flow']

                            flows = []
                            velocities = []
                            depths = []

                            orig_velocities = []
                            orig_depths = []

                            if filter_mins:
                                filter_steps = round((filter_mins * 60) / timestep_secs)
                            flow_window = []
                            velocity_window = []
                            depth_window = []

                            line = f.readline()
                            # loop through each row in the series and update the flows, velocities, and depths 
                            # lists, filtering if told to do so.
                            counter = 1

                            while line and not re.match('^$', line.strip()):
                                if counter < first_step:
                                    counter += 1
                                    line = f.readline()
                                    continue
                                elif counter >  last_step:
                                    break
                                else:
                                    record = dict(zip(fieldnames, line.strip().split()))

                                    flow = round(float(record['flow']) * pow(meters_in_foot, 3), round_num)
                                    depth = round(float(record.get('depth', 0)) * meters_in_foot, round_num)
                                    velocity = round(float(record.get('velocity', 0)) * meters_in_foot, round_num)

                                    orig_velocities.append(float(record.get('velocity', 0)))
                                    orig_depths.append(float(record.get('depth', 0)))

                                    if filter_mins:
                                        flow_window.append(flow)
                                        velocity_window.append(velocity)
                                        depth_window.append(depth)

                                        if len(flow_window) == filter_steps:
                                            flows.append(sum(flow_window) / filter_steps)
                                            del flow_window[0]
                                            velocities.append(sum(velocity_window) / filter_steps)
                                            del velocity_window[0]
                                            depths.append(sum(depth_window) / filter_steps)
                                            del depth_window[0]
                                    else:
                                        flows.append(flow)
                                        velocities.append(velocity)
                                        depths.append(depth)

                                counter += 1 
                                line = f.readline()
        
                            for element in elements:
                                wasp, swmm, DS_wasp, DS_swmm, eltype, name  = element
                                wasp_source = 0 if eltype == 'INFLOW' else wasp
                                swmm_source = 0 if eltype == 'INFLOW' else swmm

                                cur.executemany("""
                                 INSERT INTO flows (timestep, wasp_source, swmm_source, name, wasp_sink, swmm_sink, flow) 
                                 VALUES (?, ?, ?, ?, ?, ?, ?)""",
                                 [(i, wasp_source, swmm_source, name, DS_wasp, DS_swmm, flow) for i, flow in enumerate(flows)])


                                if eltype in ('CONDUIT', 'DUMMY'):
                                    if eltype == 'CONDUIT' and name in rep_conduits:
                                        cur.execute('SELECT dwf FROM conduit_params WHERE name = ?', (name,))
                                        dwf = cur.fetchone()[0]

                                        cur.executemany("""
                                            INSERT INTO flows (timestep, wasp_source, swmm_source, name, 
                                                wasp_sink, swmm_sink, flow) 
                                            VALUES (?, ?, ?, ?, ?, ?, ?)
                                            """, [(i, 0, 0, name + '_DWF', wasp, swmm, dwf) for i in xrange(len(flows))])

                                    cur.execute('SELECT length FROM conduit_params WHERE name = ?', (name,))
                                    length = cur.fetchone()[0]
                                    init_vol = (flows[0] / velocities[0]) * length if velocities[0] else 0

                                    cur.executemany("""
                                        INSERT INTO volumes (timestep, wasp, swmm, name, init_vol, flow, depth, velocity)
                                        VALUES (?, ?, ?, ?, ?, ?, ?, ?)
                                        """, [(i, wasp, swmm, name, init_vol, tup[0], tup[1], tup[2]) 
                                               for i, tup in enumerate(zip(flows, depths, velocities))])
        else:
            pass
            #path_to_out_db = swmmout2sqlite(binarypath, dbpath=dbpath, el)
            
        # create indexes on the volumes and flows tables
        cur.execute('CREATE INDEX idx_flows ON flows(timestep, name)')
        cur.execute('CREATE INDEX idx_volumes ON volumes(timestep)')
    
        # check that all conduits in segs are represented in the volumes table
        cur.execute("SELECT DISTINCT name FROM segs WHERE type = 'CONDUIT'")
        segs_conduit_names = [row[0] for row in cur.fetchall()]
        cur.execute("SELECT DISTINCT name FROM volumes")
        volumes_names = [row[0] for row in cur.fetchall()]
        missing_conduits = [name for name in segs_conduit_names if name not in volumes_names]
        if missing_conduits:
            raise Exception('There are conduits in the segment map not found in the .rpt file: ' + ', '.join(missing_conduits))

        # check that all conduits and their DWF's are represented in the flows table
        cur.execute("SELECT DISTINCT name FROM segs WHERE name <> ''")
        segs_all_names = [row[0] for row in cur.fetchall()]
        cur.execute("SELECT DISTINCT name FROM segs WHERE type = 'CONDUIT'")
        dwf_names = [cond + '_DWF' for cond in rep_conduits]
        cur.execute("SELECT DISTINCT name FROM flows")
        flows_names = [row[0] for row in cur.fetchall()]
        if set(segs_all_names + dwf_names) != set(flows_names):
            raise Exception('There are elements missing from the flows table.')

        # create or clear the directory for external flow percent series
        ext_flows_pct_path = os.path.join(series_path, 'external_flow_percentages_by_WASP_segment')
        if not os.path.isdir(ext_flows_pct_path):
            os.mkdir(ext_flows_pct_path)
        else:
            for fname in os.listdir(ext_flows_pct_path):
                os.unlink(os.path.join(ext_flows_pct_path, fname))

        # create or clear the directory for external flow series
        ext_flows_path = os.path.join(series_path, 'external_flows_by_WASP_segment')
        if not os.path.isdir(ext_flows_path):
            os.mkdir(ext_flows_path)
        else:
            for fname in os.listdir(ext_flows_path):
                os.unlink(os.path.join(ext_flows_path, fname))

        # get a list of the unique wasp segment numbers. we will loop over the list to get the external 
        # flow output for each wasp segment
        cur.execute('SELECT DISTINCT wasp_sink FROM flows WHERE wasp_sink > 0')
        wasp_segs = [row[0] for row in cur.fetchall()]

        # output the external flows and external flow percentages by wasp seg num
        for segnum in wasp_segs:
            # get a list of inflow names for this wasp segment
            cur.execute('SELECT DISTINCT name FROM flows WHERE wasp_sink = ? AND wasp_source = 0', (segnum, ))
            inflow_names = [row[0] for row in cur.fetchall()]

            if inflow_names: # if the segment has no inflows, do nothing, otherwise...

                # this query returns a table with columns indicating timestep, the name of the inflow, 
                # its flow at each timestep,
                # the total flow going into the wasp segment at each timestep, and the inflow's percentage 
                # of that total flow at each timestep
                cur.execute("""
                    SELECT f.timestep, f.name, f.flow, tf.total_flow, 
                        CASE WHEN tf.total_flow > 0 THEN f.flow / tf.total_flow ELSE 0 END
                    FROM flows f INNER JOIN (
                        SELECT timestep, SUM(flow) AS total_flow
                        FROM flows 
                        WHERE wasp_sink = ? AND wasp_source = 0 
                        GROUP BY timestep) as tf
                    ON f.timestep = tf.timestep
                    WHERE f.wasp_sink = ? AND f.wasp_source = 0 
                    ORDER BY f.name, f.timestep
                """, (segnum, segnum))

                results = cur.fetchall()
                
                pct_columns = []
                flow_columns = []
                total_col = []

                names = ['timestep']

                for key, group in groupby(results, lambda row: row[1]):
                    names.append(key)
                    group = list(group)
                    group = sorted(group, key = lambda row: row[0])
                    pct_flow = [row[4] for row in group]
                    pct_columns.append(pct_flow)
                    flow = [row[2] for row in group]
                    total_col = [row[3] for row in group]
                    flow_columns.append(flow)

                pct_columns.insert(0, range(0, len(pct_columns[0])))
                flow_columns.insert(0, range(0, len(flow_columns[0])))
                pct_rows = zip(*pct_columns) # transform list of columns into a list of rows
                flow_rows = zip(*flow_columns)
                named_pct_rows = [dict(zip(names, row)) for row in pct_rows]
                named_flow_rows = [dict(zip(names, row)) for row in flow_rows]

                with open(os.path.join(ext_flows_pct_path, str(segnum) + '.csv'), 'w') as f:
                    writer = csv.DictWriter(f, fieldnames = names, lineterminator='\n')
                    writer.writeheader()
                    for row in named_pct_rows:
                        writer.writerow(row)

                with open(os.path.join(ext_flows_path, str(segnum) + '_flow.csv'), 'w') as f:
                    writer = csv.DictWriter(f, fieldnames=names, lineterminator='\n')
                    writer.writeheader()
                    for row in named_flow_rows:
                        writer.writerow(row)

        # clear or create a directory for swmm external flow percentages
        ext_swmm_flows_pct_path = os.path.join(series_path, 'external_flow_percentages_by_SWMM_segment')
        if not os.path.isdir(ext_swmm_flows_pct_path):
            os.mkdir(ext_swmm_flows_pct_path)
        else:
            for fname in os.listdir(ext_swmm_flows_pct_path):
                os.unlink(os.path.join(ext_swmm_flows_pct_path, fname))

        # create or clear the directory for external flow series
        ext_swmm_flows_path = os.path.join(series_path, 'external_flows_by_SWMM_segment')
        if not os.path.isdir(ext_swmm_flows_path):
            os.mkdir(ext_swmm_flows_path)
        else:
            for fname in os.listdir(ext_swmm_flows_path):
                os.unlink(os.path.join(ext_swmm_flows_path, fname))

        # get a list of the unique wasp segment numbers. we will over the list to get the external 
        # flow output for each wasp segment
        cur.execute('SELECT DISTINCT swmm_sink FROM flows WHERE swmm_sink > 0')
        swmm_segs = [row[0] for row in cur.fetchall()]

        # output the external flows and external flow percentages by wasp seg num
        for segnum in swmm_segs:
            # get a list of inflow names for this wasp segment
            cur.execute('SELECT DISTINCT name FROM flows WHERE swmm_sink = ? AND swmm_source = 0', (segnum, ))
            inflow_names = [row[0] for row in cur.fetchall()]

            if inflow_names: # if the segment has no inflows, do nothing, otherwise...

                cur.execute("""
                    SELECT f.timestep, f.name, f.flow, tf.total_flow, 
                        CASE WHEN tf.total_flow > 0 THEN f.flow / tf.total_flow ELSE 0 END
                    FROM flows f INNER JOIN (
                        SELECT timestep, SUM(flow) AS total_flow
                        FROM flows 
                        WHERE swmm_sink = ? AND swmm_source = 0 
                        GROUP BY timestep) as tf
                    ON f.timestep = tf.timestep
                    WHERE f.swmm_sink = ? AND f.swmm_source = 0 
                    ORDER BY f.name, f.timestep
                """, (segnum, segnum))

                results = cur.fetchall()
                
                pct_columns = []
                flow_columns = []
                total_col = []

                names = ['timestep']

                for key, group in groupby(results, lambda row: row[1]):
                    names.append(key)
                    group = list(group)
                    group = sorted(group, key = lambda row: row[0])
                    pct_flow = [row[4] for row in group]
                    pct_columns.append(pct_flow)
                    flow = [row[2] for row in group]
                    total_col = [row[3] for row in group]
                    flow_columns.append(flow)

                pct_columns.insert(0, range(0, len(pct_columns[0])))
                flow_columns.insert(0, range(0, len(flow_columns[0])))
                pct_rows = zip(*pct_columns) # transform list of columns into a list of rows
                flow_rows = zip(*flow_columns)
                named_pct_rows = [dict(zip(names, row)) for row in pct_rows]
                named_flow_rows = [dict(zip(names, row)) for row in flow_rows]

                with open(os.path.join(ext_swmm_flows_pct_path, str(segnum) + '.csv'), 'w') as f:
                    writer = csv.DictWriter(f, fieldnames = names, lineterminator='\n')
                    writer.writeheader()
                    for row in named_pct_rows:
                        writer.writerow(row)

                with open(os.path.join(ext_swmm_flows_path, str(segnum) + '_flow.csv'), 'w') as f:
                    writer = csv.DictWriter(f, fieldnames=names, lineterminator='\n')
                    writer.writeheader()
                    for row in named_flow_rows:
                        writer.writerow(row)

        # volumes2 is the same as volumes but with three additional columns, delta_vol, flow_in, and vol
        cur.execute("""
            CREATE TABLE volumes2 (
                timestep int, 
                wasp int, 
                swmm int, 
                name varchar(64), 
                flow float,
                depth float, 
                velocity float
            )""")


        if flow_weight_sums:
            depth_calc = "depth * (ABS(flow) / total_flow)"
            velocity_calc = "velocity * (ABS(flow) / total_flow)"
        else:
            depth_calc = "depth * (1.0 / seg_count)"
            velocity_calc = "velocity * (1.0 / seg_count)"

        #depth_calc1 = 
        #depth_calc2 = "depth * (ABS(flow)/total_flow)"
        #depth_calc3 = """
        #    CASE WHEN total_flow = 0
        #         THEN depth * (1/seg_count)
        #         ELSE depth * (ABS(flow) / total_flow)
        #    END
        #"""

        #velocity_calc1 = 
        #velocity_calc2 = "velocity * (ABS(flow)/ total_flow)"
        #velocity_calc3 = """
        #    CASE WHEN total_flow = 0
        #         THEN depth * (1/seg_count)
        #         ELSE depth * (ABS(flow) / total_flow)
        #    END
        #"""

        #tflow_calc1 = "SUM(flow)"
        tflow_calc = "SUM(ABS(flow))"

        # inner join volumes with the aggregation of itself containing the total flow and segment count 
        # for each swmm segment. Use the aggregated values to calculate the weighted depth and velocity for 
        # each swmm segment and insert the results along with columns carried over from volumes into volumes2
        # 
        cur.execute(""" 
            INSERT INTO volumes2 (timestep, wasp, swmm, name, depth, velocity, flow)
                SELECT v.timestep, v.wasp, v.swmm, v.name,""" + depth_calc + """,""" + velocity_calc + """, flow
                FROM (
                    SELECT """ + tflow_calc + """ AS total_flow, timestep, swmm, COUNT(*) AS seg_count
                    FROM volumes
                    GROUP BY timestep, swmm) AS total_flows
                INNER JOIN volumes v ON v.timestep = total_flows.timestep AND v.swmm = total_flows.swmm
            """)
        cur.execute('CREATE INDEX idx_volumes2 ON volumes2(timestep, swmm)')

        # volumes2_a will contain the summed weighted depth, weighted velocity, and flow for each swmm segment in volumes2
        cur.execute("""
            CREATE TABLE volumes2_a (
                timestep int,
                wasp int,
                swmm int,
                depth float,
                velocity float,
                flow float
            )
        """)

        # total the depth, velocity, and flow in volumes2 by swmm segment and insert it into volumes2_a
        cur.execute("""
            INSERT INTO volumes2_a (timestep, wasp, swmm, depth, velocity, flow)
            SELECT timestep, wasp, swmm, SUM(depth), SUM(velocity), SUM(flow)
            FROM volumes2
            GROUP BY wasp, swmm, timestep
        """)
        cur.execute('CREATE INDEX idx_volumes2_a ON volumes2_a(timestep, swmm)')

        # this table gets populated by the calc_swmm_vols function defined ahead.
        # it differs from volumes2_a in that it includes delta_vol and vol columns
        cur.execute("""
            CREATE TABLE volumes2_b (
                timestep int,
                swmm int,
                wasp int,
                depth float,
                velocity float,
                flow float,
                delta_vol float,
                vol float NULL
            )
        """)
        cur.execute('CREATE INDEX idx_volumes2_b ON volumes2_b(timestep, swmm)')

        cur.execute("""
            CREATE TABLE volumes3x (
                timestep int, 
                wasp int, 
                vol float, 
                wasp_depth float, 
                wasp_velocity float,
                flow float,
                seg_count int
            )""")
        cur.execute('CREATE INDEX idx_volumes3x ON volumes3x(timestep, wasp)')

        cur.execute("""
            CREATE TABLE volumes4x (
                timestep int, 
                wasp int, 
                vol float, 
                depth float, 
                velocity float
            )""")
        cur.execute('CREATE INDEX idx_volumes4x ON volumes4x(timestep, wasp)')

        def calc_swmm_vols():
            """
            populates the rows of volumes2_b with the values from volumes2_a plus the swmm segment 
            volume calculation, first calculating the delta vol for each segment at each timestep, via inner 
            joining volumes2_a with an aggregation of the flows data, then looping over volumes2_b and
            update the volume columns in order.
            """

            # clear the values from the table, if there are any
            cur.execute("DELETE FROM volumes2_b")

            # calculate delta_vol by swmm segment by innner joining volumes2_b with a flow aggregation over
            # the flows table
            cur.execute("""
                INSERT INTO volumes2_b (timestep, wasp, swmm, depth, velocity, flow, delta_vol)
                SELECT v.timestep, v.wasp, v.swmm, v.depth, v.velocity, v.flow, (inflows.flow_in - v.flow) * ?
                FROM (
                    SELECT SUM(flow) AS flow_in, timestep, swmm_sink
                    FROM flows
                    GROUP BY timestep, swmm_sink) AS inflows
                INNER JOIN volumes2_a v ON v.timestep = inflows.timestep AND v.swmm = inflows.swmm_sink
            """, (timestep_secs, ))

            #  the following loop cycles through each element and updates its respective part of the 
            # vol column in volumes2_b with the calculated volume 

            #get a list of timesteps and swmm segments
            cur.execute('SELECT DISTINCT timestep FROM volumes2_b')
            timesteps = [row[0] for row in cur.fetchall()]
            cur.execute('SELECT DISTINCT swmm FROM volumes2_b')
            swmm_segs = [row[0] for row in cur.fetchall()]
            for swmm in swmm_segs:
                for t in timesteps:
                    # if the timestep is 0, set the vol to init_vol and update volumes2_b[:w
                    if t == 0:
                        # for the first timestep, set the vol in volumes2 to the initial volume
                        cur.execute('SELECT flow, velocity FROM volumes2_b WHERE timestep = 0 AND swmm = ?', (swmm, ))
                        flow, velocity = cur.fetchone()

                        # obtain length information to calculate init_volume
                        cur.execute("SELECT DISTINCT length FROM conduit_params2 WHERE swmm = ?", (swmm, ))
                        length = cur.fetchall()
                        assert len(length) == 1
                        length = length[0][0]

                        init_vol = (flow / velocity) * length

                        cur.execute("""
                            UPDATE volumes2_b
                            SET vol = ?
                            WHERE timestep = 0 AND swmm = ?
                        """, (init_vol, swmm))
                    else:
                        # .... otherwise, obtain the volumn calculation from the previous step.
                        cur.execute('SELECT vol FROM volumes2_b WHERE timestep = ? AND swmm = ?', (t - 1, swmm))

                        # get the volume from the previous timestep
                        prev_vol = cur.fetchall()
                        assert len(prev_vol) == 1
                        prev_vol = prev_vol[0][0]

                        # select the delta_vol value at the current timestep
                        cur.execute('SELECT delta_vol FROM volumes2_b WHERE timestep = ? AND swmm = ?', (t, swmm))
                        delta_vol = cur.fetchall()
                        assert len(delta_vol) == 1
                        delta_vol = delta_vol[0][0]

                        # select the deltav
                        cur.execute('SELECT depth FROM volumes2_b WHERE timestep = ? AND swmm = ?', (t, swmm))
                        depth = cur.fetchall()
                        assert len(depth) == 1
                        depth = depth[0][0]

                        # when depth is 0, due to backflow most likely, set the volume to 0:
                        if depth == 0:
                            vol = 0
                        else:
                            vol = prev_vol + delta_vol

                        # update volumes2_b 
                        cur.execute('UPDATE volumes2_b SET vol = ? WHERE timestep = ? AND swmm = ?', (vol, t, swmm))

        calc_swmm_vols()

        if correct_vol:
            # begin correction procedure
            print('correcting')

            # get all of the swmm segment numbers
            cur.execute("SELECT DISTINCT swmm FROM volumes2_b WHERE swmm IS NOT NULL")
            swmm_segs = [row[0] for row in cur.fetchall()]

            # for each swmm segment, correct it's volume
            for swmm in swmm_segs:
                cur.execute("""
                    SELECT vol
                    FROM volumes2_b
                    WHERE swmm = ? 
                    ORDER BY timestep
                """, (swmm, ))
                vol = [row[0] for row in cur.fetchall()]

                # determine the correct base volume for the segment by looking 
                # by take the average of the volume at the beginning of the series.
                # use the rolling window size as an arbitrary cut off point 
                # to end the beginning average
                correct_basevol = sum(vol[:window_size]) / window_size

                # calculatute the range of the the volume timeseries, from it's base vol to its peak
                vol_range = max(vol) - correct_basevol

                # all volume before the event_start index is not corrected and is factored 
                # into the correct base volume calculation
                event_start_idx = 0

                # if user sets event_start_percentage, find the index of the first timestep 
                # where the volume exceeds this percentage of the vol_range
                if event_start_percentage:
                    for i, v in enumerate(vol):
                        if v > ((vol_range * event_start_percentage) + correct_basevol):
                            event_start_idx = i
                            break

                # if an event_start_idx other than 0 is specified
                # calculate the correct base volume as the average volume from the beginning of the 
                # series to the start index.
                if event_start_idx:
                    start_range = event_start_idx - window_size
                    correct_basevol = sum(vol[:start_range]) / len(range(start_range))

                # initialize an vector of 0s the same length as the volume time series.
                # this vector will hold the volume correction required at each timestep
                correction = [0 for i in range(len(vol))]

                # i is the timestep. start at time step 0
                i = 0

                # the following functions are utility functions used to check and calculate properties 
                # of portions of the volume time series.
                def is_basevol(vol, lowerbound, upperbound):
                    # returns True or False whether the part of the series within lowerbound and upperbound
                    # is a period of dry weather
                    return (max(vol[lowerbound:upperbound]) - min(vol[lowerbound:upperbound])) < abs_basevol_signal

                def is_increasing(vol, lowerbound, upperbound):
                    # returns True or False whether the part of the series within lowerbound and upperbound is increasing 
                    return vol[lowerbound] < vol[upperbound]

                def is_off_basevol(basevol, correct_basevol):
                    # returns True or False whether the amount in basevol is significantly different from the correct_basevol
                    # this uses the correction_threshold variable, which can be set by the user
                    return abs(basevol - correct_basevol) > correction_threshold * vol_range

                def get_basevol(vol, lowerbound, upperbound):
                    # calculates the average volume of the timeseries between lowerbound and upperbound
                    return sum(vol[lowerbound:upperbound]) / float((upperbound - lowerbound))

                def is_negative(vol, correct_basevol, lowerbound, upperbound):
                    # returns True or False whether the volume between lowerbound and upperbound is 
                    # significantly negative, ie, below the correct basevolume

                    # margin for error
                    dev_thresh = correction_threshold * vol_range
                    # extract the volume from the period in question
                    vol_window = vol[lowerbound:upperbound]
                    # calculate the percentage of the timesteps where the volume is below the correct_basevol 
                    # by an amount exceeding the margin of error.
                    pct = sum(correct_basevol - v > dev_thresh for v in vol_window) / float(len(vol_window))

                    # return True if the percentage of significantly negative timesteps exceeds the maximum percentage
                    # determined by negative_threshold_pct. This variable can be set by the user.
                    return pct > negative_threshold_pct

                # the index to signal stop moving the locator window forward.
                max_window_index = len(vol) - window_size
                # number of timesteps for the length of the secondary window (called micro window in what follows)
                micro_window_size = int(window_size * micro_window_pct)
                #correction_window_size = micro_window_size
                #correction_window_size = window_size
                while i < max_window_index:
                    # before inspection, assume no correction necessary
                    needs_correction = False

                    # if the timestep is after the event_start_idx (defaults to 0, but otherwise control by the user, 
                    # see description of event start percentage parameter)
                    if i > event_start_idx:
                        # define the upperbound of the moving window for this timestep
                        upperbound = i + window_size
                        # calculate the average volume in this window
                        basevol = get_basevol(vol, i, upperbound)
                        #correction_start = i
                        # if the volume with in this window passes the negative volume test, proceed with correction
                        if is_negative(vol, correct_basevol, i, upperbound):
                            needs_correction = True
                            # define the start and end of the secondary window that will move either forwards or 
                            # backwards to locate the end of the negative volume period.
                            micro_start = upperbound - micro_window_size
                            micro_end = upperbound
                            # if the volume in secondary window positioned at the end of the main moving window still 
                            # passes the negative volume test, then we have inch the secondary window forward until 
                            # the test no longer passes, indicating we've found the end the negative volume period.
                            # otherwise, if the volume in the secondary window at the end of the main window fails the negative 
                            # test, then we are already outside the negative period and need to inch the secondary 
                            # window inward to find where negative period ends. 
                            if is_negative(vol, correct_basevol, micro_start, micro_end):
                                while micro_end < max_window_index and is_negative(vol, correct_basevol, micro_start, micro_end):
                                    micro_start += 1
                                    micro_end += 1
                            else:
                                # inch the secondary window backwards until negative period is found
                                while micro_start > i and not is_negative(vol, correct_basevol, micro_start, micro_end):
                                    micro_start = micro_start - 1
                                    micro_end = micro_end - 1

                            # if the secondary window is still before the end of the time series ...
                            if micro_end < max_window_index:
                                # set the starting point of the correction to the current timestep
                                correction_start = i
                                # the end of the correction will be the end point of the secondary window
                                correction_end = micro_end
                                # extract the 
                                vol_block = vol[correction_start:correction_end]
                                min_vol = min(vol_block)
                                min_idx = vol_block.index(min(vol_block))
                                # initialize two zero vectors. These will be joined momentarily 
                                # to create the full correction vector for this negative period.
                                upper_correction_spread = [0 for _ in range(min_idx, len(vol_block))]
                                lower_correction_spread = [0 for _ in range(min_idx)]
                                # for the volume amounts in this period before the minimum value ...
                                for j, v in enumerate(vol_block[:min_idx]):
                                    # if the amount is less than the correct basevol, calculate the difference.
                                    # if the magnitude of the difference is already covered by the values in the 
                                    # lower_correction_spread, then skip over it. If the magnitude is larger,
                                    # add the difference between the magnitude and the sum of the corrections 
                                    # already in the lower_correction_spread
                                    if v < correct_basevol:
                                        diff = correct_basevol - v
                                        total_correction = sum(lower_correction_spread)
                                        remainder = diff - total_correction
                                        if remainder > 0:
                                            lower_correction_spread[j] = remainder

                                # for those values after the minimum value, we want to undo the 
                                # effects of the correction and bring them down to base volume, 
                                # preserving the rest of the volume curve
                                for j, v in enumerate(vol_block[min_idx:]):
                                    if v < correct_basevol:
                                        diff = v - min_vol
                                        total_correction = abs(sum(upper_correction_spread))
                                        remainder = diff - total_correction
                                        if remainder > 0:
                                            upper_correction_spread[j] = -remainder

                                # combine the two spreads to get the resulting correction_vector
                                correction_spread = lower_correction_spread + upper_correction_spread
                            else:
                                # if it was necessary to move the secondary window forward to find the end 
                                # of the negative period, it's possible that the secondary window will 
                                # reach the end of the volume time series. 
                                # if the negative period is at the end of the volume time series, we take different 
                                # steps to correct the volume. These steps preserves patterns in the tail.
                                basevol = get_basevol(vol, micro_start, len(vol))
                                correction_total = correct_basevol - basevol
                                correction_start = i - 2 * window_size
                                correction_end = i
                                correction_window_size = correction_end - correction_start
                                avg_correction = correction_total / correction_window_size
                                correction_end = correction_start + correction_window_size
                                correction_spread = [avg_correction for j in range(correction_window_size)]

                            i = correction_end
                        elif is_basevol(vol, i, upperbound) and is_off_basevol(basevol, correct_basevol):
                            # if this is a period of base vol and it is significantly off of the 
                            # expected base vol ...

                            # starting at the end of the current window, advance until the volume
                            # series is no longer is dry weather and the volume is increasing
                            micro_start = upperbound
                            while micro_start < max_window_index:
                                micro_end = micro_start + micro_window_size
                                if not is_basevol(vol, micro_start, micro_end) and is_increasing(vol, micro_start, micro_end):
                                    moving_end = micro_end
                                    # now that the general end of the dry weather period has been found, 
                                    # move secondary window inward until dry weather conditions are found again.
                                    while True:
                                        while moving_end > micro_start and not is_basevol(vol, micro_start, moving_end):
                                            moving_end = moving_end - 1
                                        if moving_end == micro_start:
                                            micro_start = micro_start - micro_window_size + 1
                                            micro_end = moving_end
                                        elif moving_end == micro_end:
                                            break
                                        else:
                                            micro_start = moving_end - micro_window_size + 1
                                            micro_end = moving_end

                                    # calculate the average volume at the end of the dry weather period
                                    basevol = get_basevol(vol, micro_start, micro_end)
                                    needs_correction = is_off_basevol(basevol, correct_basevol)
                                    correction_total = correct_basevol - basevol

                                    basevol_range = micro_end - i
                                    correction_start = int(max(0, i - (.5 * basevol_range)))
                                    correction_end = min(len(vol)-1, i + (basevol_range - (i - correction_start)))

                                    i += micro_window_size

                                    break
                                else:
                                    micro_start += 1

                            if micro_start >= max_window_index:
                                basevol = get_basevol(vol, micro_start, len(vol))
                                needs_correction = is_off_basevol(basevol, correct_basevol)
                                correction_total = correct_basevol - basevol

                                correction_start = i
                                correction_end = max_window_index

                                i += micro_window_size

                            if needs_correction:
                                correction_window_size = correction_end - correction_start
                                avg_correction = correction_total / correction_window_size
                                correction_end = correction_start + correction_window_size
                                correction_spread = [avg_correction for j in range(correction_window_size)]

                    if needs_correction:
                        correction_total = sum(correction_spread)
                        existing_correction = correction[correction_start:correction_end]
                        correction_vals = [sum(tup) for tup in zip(correction_spread, existing_correction)]
                        correction[correction_start:correction_end] = correction_vals 
                        vol[correction_start:] = [v + correction_total for v in vol[correction_start:]]
                        i += 1
                    else:
                        i += 1

                final_window_start = len(vol) - window_size
                final_window_end = len(vol) - 1
                final_basevol = get_basevol(vol, final_window_start, final_window_end)
                if is_off_basevol(final_basevol, correct_basevol):
                    correction_start = final_window_start
                    correction_total = correct_basevol - final_basevol
                    avg_correction = correction_total / correction_window_size
                    correction_spread = [avg_correction for j in range(correction_window_size)]
                    correction_end = correction_start + correction_window_size
                    existing_correction = correction[correction_start:correction_end]
                    correction_vals = [sum(tup) for tup in zip(correction_spread, existing_correction)]
                    correction[correction_start:correction_end] = correction_vals

                # use the data to create an inlet 
                cur.execute("""
                    SELECT DISTINCT wasp
                    FROM segs 
                    WHERE swmm = ?
                """, (swmm, ))
                wasp = cur.fetchone()[0]

                balance_bucket = [c/timestep_secs for c in correction]
                update_tups = []
                update_tups = [[i,0,swmm,0,wasp,str(swmm)+'balbucket',qin] for i, qin in enumerate(balance_bucket)]
                cur.executemany("""
                    INSERT INTO flows (timestep, swmm_source, swmm_sink, wasp_source, wasp_sink, name, flow) 
                    VALUES (?, ?, ?, ?, ?, ?, ?)
                """, update_tups)

            # recalculate swmm volumes with the new dummy inlets
            calc_swmm_vols()

        # volumes3 is similar to volumes2, but now depth are replaced with new columns swmm_depth and swmm_velocity. 
        # These columns will hold the volume weighted depths and velocity by swmm segment. additionally, this 
        # table also has a swmm_vol col that will hold the total volume for each swmm segment
        # populate volumes3. swmm_vol is the sum of the volumes for each parallel conduit in the swmm segment. 
        # swmm_depth and swmm_velocity are the volume weighted depth and velocity for each parallel in the swmm segment

        # these are alternative definitions  for the depth calculation
        #depth_calc1 = "(v2.flow / wasp_flows.wasp_flow) * v2.depth"
        #depth_calc2 = "(ABS(v2.flow)/ wasp_flows.wasp_flow) * v2.depth"
        #depth_calc3 = """
        #    CASE WHEN wasp_flows.wasp_flow = 0
        #        THEN (1 / wasp_flows.seg_count) * v2.depth
        #        ELSE (ABS(v2.flow) / wasp_flows.wasp_flow) * v2.depth
        #    END
        #"""
        depth_calc4 = "v2.depth * (1.0 / wasp_flows.seg_count)"

        # these are alternative defitions for the velocity calculation
        #velocity_calc1 = "(v2.flow / wasp_flows.wasp_flow) * v2.velocity"
        #velocity_calc2 = "(ABS(v2.flow) / wasp_flows.wasp_flow) * v2.velocity"
        #velocity_calc3 = """
        #    CASE WHEN wasp_flows.wasp_flow = 0
        #        THEN (1 / wasp_flows.seg_count) * v2.velocity
        #        ELSE (ABS(v2.flow) / wasp_flows.wasp_flow) * v2.depth
        #    END
        #"""
        velocity_calc4 = "velocity * (1.0 / wasp_flows.seg_count)"

        tflow_calc1 = "SUM(flow)"
        tflow_calc2 = "SUM(ABS(flow))"

        cur.execute("""
            INSERT INTO volumes3x  (timestep, wasp, vol, wasp_depth, wasp_velocity, flow, seg_count)
            SELECT v2.timestep, v2.wasp, v2.vol, """ + depth_calc4 + """, 
                """ + velocity_calc4 + """, v2.flow, wasp_flows.seg_count
            FROM (SELECT """ + tflow_calc2 + """ AS wasp_flow, timestep, wasp, COUNT(*) AS seg_count
                FROM volumes2_b
                GROUP BY wasp, timestep
            ) AS wasp_flows
            INNER JOIN volumes2_b v2 ON v2.timestep = wasp_flows.timestep AND v2.wasp = wasp_flows.wasp
        """)

        cur.execute("""
            INSERT INTO volumes4x (timestep, wasp, vol, depth, velocity)
            SELECT timestep, wasp, SUM(vol), SUM(wasp_depth), SUM(wasp_velocity)
            FROM volumes3x
            GROUP BY wasp,timestep
        """)

        conn.commit()

    finalize(dbpath, outpath, timestep_secs, round_num=round_num)


def finalize(dbpath, outpath, timestep_secs, round_num=5):

    series_path = os.path.join(os.path.dirname(outpath), 'segment_series_out')
    if not os.path.isdir(series_path):
        os.mkdir(series_path)

    with closing(sqlite3.connect(dbpath)) as conn:
        conn.isolation_level = None
        cur = conn.cursor()
        tune_db(cur)

        cur.execute("CREATE TABLE flows2 (timestep int, wasp_source int, wasp_sink int, flow float)")
        cur.execute("""
            INSERT INTO flows2 (timestep, wasp_source, wasp_sink, flow)
                SELECT * 
                FROM (SELECT timestep, wasp_source, wasp_sink, SUM(flow) AS flow
                      FROM flows
                      WHERE wasp_source = 0
                      GROUP BY wasp_sink, timestep
                    UNION SELECT timestep, wasp_source, wasp_sink, SUM(flow)
                        FROM flows
                        WHERE wasp_source <> 0 AND swmm_source IN (SELECT MAX(swmm) FROM segs GROUP BY wasp)
                        GROUP BY wasp_source, wasp_sink, timestep
                    )
             """)

        # column names for the final table. The datatype indicates what kind of data the row is, 
        # volume/depth/velocity data (1) or flow data (2) ordering1 and 2 contain the values that will 
        # determine how the rows are finally ordered to produce the order found in .hyd files.
        final_colnames = ['datatype', 'timestep', 'wasp_source', 'wasp_sink', 'flow', 'vol', 'depth', 
            'velocity', 'ordering1', 'ordering2']
        final_datatypes = ['int', 'int', 'int', 'int', 'float', 'float', 'float', 'float', 'int', 'int', 'int']

        # put them into a comma separated list that can be used in a query string
        final_cols = ', '.join([colname + ' ' + datatype for colname, datatype in zip(final_colnames, final_datatypes)])
        final_colnames_str = ', '.join(final_colnames)
        
        # create the final tables
        cur.execute("CREATE TABLE flows_final (" + final_cols + ")")
        cur.execute("CREATE TABLE volumes_final (" + final_cols + ")")
        cur.execute("CREATE TABLE final (" + final_cols + ")")

        # populate the flows_final table. note that the columns vol, depth, and velocity 
        # are NULL. These are included so that this table can be unioned with the volumes_final table shortly. 
        # flow data is ordered first by its wasp_source number, unless it is zero, in which case its wasp_sink number 
        # is used, and second by its wasp_source, regardless of whether its zero or not. This forces the order that 
        # each segment's outflow interface will follow the external flow interface for that segment.
        cur.execute("""
            INSERT INTO flows_final (""" + final_colnames_str + """)
                SELECT 2, timestep, wasp_source, wasp_sink, flow, NULL AS vol, NULL AS depth, NULL AS velocity, 
                CASE WHEN wasp_source = 0 THEN wasp_sink ELSE wasp_source END AS ordering1, wasp_source AS ordering2
                FROM flows2
            """)

        # the following for loops output the consolidated external flow series and the internal flow series.
        # the code between this comment the for loop retrieves the list of wasp seg nums to loop over and 
        # creates/clears the required directories
        consol_ext_flows_outpath = os.path.join(series_path, 'consolidated_external_flows_by_WASP_segment')
        if not os.path.isdir(consol_ext_flows_outpath):
            os.mkdir(consol_ext_flows_outpath)
        else:
            for fname in os.listdir(consol_ext_flows_outpath):
                os.unlink(os.path.join(consol_ext_flows_outpath, fname)) # delete the files in consol_ext_flows_outpath

        cur.execute('SELECT DISTINCT wasp_sink FROM flows_final WHERE wasp_sink <> 0')
        wasp_sinks = [row[0] for row in cur.fetchall()]
        for sink in wasp_sinks:
            cur.execute('SELECT timestep, flow FROM flows_final WHERE wasp_sink = ? AND wasp_source = 0 ORDER BY timestep ', 
                (sink,))
            external_flows = cur.fetchall()

            with open(os.path.join(consol_ext_flows_outpath, str(sink) + '.csv'), 'w') as f:
                fieldnames = ['timestep', 'consolidated_external_flows']
                writer = csv.DictWriter(f, fieldnames = fieldnames, lineterminator='\n')
                writer.writeheader()
                for row in external_flows:
                    row = dict(zip(fieldnames, row))
                    writer.writerow(row)

        internal_flows_outpath = os.path.join(series_path, 'internal_flows_by_WASP_segment')
        if not os.path.isdir(internal_flows_outpath):
            os.mkdir(internal_flows_outpath)
        else:
            for fname in os.listdir(internal_flows_outpath):
                os.unlink(os.path.join(internal_flows_outpath, fname))

        cur.execute('SELECT DISTINCT wasp_source FROM flows_final WHERE wasp_source <> 0')
        wasp_sources = [row[0] for row in cur.fetchall()]
        for source in wasp_sources:
            cur.execute("""
                SELECT timestep, flow, wasp_sink
                FROM flows_final  
                WHERE wasp_source = ?
                ORDER BY wasp_source, timestep
                """, (source, ))

            internal_flows = cur.fetchall()

            with open(os.path.join(internal_flows_outpath, str(source) + '.csv'), 'w') as f:
                fieldnames = ['timestep', 'flow', 'sink']
                writer = csv.DictWriter(f, fieldnames = fieldnames, lineterminator='\n')
                writer.writeheader()
                for row in internal_flows:
                    row = dict(zip(fieldnames, row))
                    writer.writerow(row)

        internal_flows_outpath = os.path.join(series_path, 'internal_flows_by_SWMM_segment')
        if not os.path.isdir(internal_flows_outpath):
            os.mkdir(internal_flows_outpath)
        else:
            for fname in os.listdir(internal_flows_outpath):
                os.unlink(os.path.join(internal_flows_outpath, fname))

        cur.execute('SELECT DISTINCT swmm_source FROM flows WHERE swmm_source <> 0')
        swmm_sources = [row[0] for row in cur.fetchall()]
        for source in swmm_sources:
            cur.execute("""
                SELECT timestep, flow, swmm_sink
                FROM flows  
                WHERE swmm_source = ?
                ORDER BY swmm_source, timestep
                """, (source, ))

            internal_flows = cur.fetchall()

            with open(os.path.join(internal_flows_outpath, str(source) + '.csv'), 'w') as f:
                fieldnames = ['timestep', 'flow', 'sink']
                writer = csv.DictWriter(f, fieldnames = fieldnames, lineterminator='\n')
                writer.writeheader()
                for row in internal_flows:
                    row = dict(zip(fieldnames, row))
                    writer.writerow(row)

        internal_flows_outpath = os.path.join(series_path, 'velocities_by_SWMM_segment')
        if not os.path.isdir(internal_flows_outpath):
            os.mkdir(internal_flows_outpath)
        else:
            for fname in os.listdir(internal_flows_outpath):
                os.unlink(os.path.join(internal_flows_outpath, fname))

        cur.execute('SELECT DISTINCT swmm FROM volumes WHERE swmm <> 0')
        swmm_sources = [row[0] for row in cur.fetchall()]
        for source in swmm_sources:
            cur.execute("""
                SELECT timestep, velocity, swmm
                FROM volumes
                WHERE swmm = ?
                ORDER BY swmm, timestep
                """, (source, ))

            internal_flows = cur.fetchall()

            with open(os.path.join(internal_flows_outpath, str(source) + '.csv'), 'w') as f:
                fieldnames = ['timestep', 'velocity', 'swmm']
                writer = csv.DictWriter(f, fieldnames = fieldnames, lineterminator='\n')
                writer.writeheader()
                for row in internal_flows:
                    row = dict(zip(fieldnames, row))
                    writer.writerow(row)

        internal_flows_outpath = os.path.join(series_path, 'depth_by_SWMM_segment')
        if not os.path.isdir(internal_flows_outpath):
            os.mkdir(internal_flows_outpath)
        else:
            for fname in os.listdir(internal_flows_outpath):
                os.unlink(os.path.join(internal_flows_outpath, fname))

        cur.execute('SELECT DISTINCT swmm FROM volumes WHERE swmm <> 0')
        swmm_sources = [row[0] for row in cur.fetchall()]
        for source in swmm_sources:
            cur.execute("""
                SELECT timestep, depth, swmm
                FROM volumes
                WHERE swmm = ?
                ORDER BY swmm, timestep
                """, (source, ))

            internal_flows = cur.fetchall()

            with open(os.path.join(internal_flows_outpath, str(source) + '.csv'), 'w') as f:
                fieldnames = ['timestep', 'depth', 'swmm']
                writer = csv.DictWriter(f, fieldnames = fieldnames, lineterminator='\n')
                writer.writeheader()
                for row in internal_flows:
                    row = dict(zip(fieldnames, row))
                    writer.writerow(row)

        # populate the volumes_final table. note that the wasp_source and flow columns are null, 
        # as these columns only apply to the flow data.
        cur.execute("""
            INSERT INTO volumes_final (""" + final_colnames_str + """)
                SELECT 1, timestep, NULL AS wasp_source, wasp AS wasp_sink, NULL AS flow, vol, depth, velocity, 
                       wasp AS ordering1, wasp AS ordering2
                FROM volumes4x
            """)

        # combine the two final tables into one
        cur.execute("""
            INSERT INTO final (""" + final_colnames_str + """)
                SELECT  *
                FROM (
                    SELECT * FROM flows_final
                    UNION
                    SELECT * FROM volumes_final
                )
            """)
        cur.execute("CREATE INDEX idx_final ON final(timestep, datatype, ordering1, ordering2)")

        export_data(outpath, dbpath, timestep_secs, final_colnames, round_num=round_num)


        export_volumes(dbpath, os.path.dirname(outpath), by_wasp_seg=True)

        export_volumes(dbpath, os.path.dirname(outpath), by_wasp_seg=False)

        export_internal_flows(dbpath, os.path.dirname(outpath), by_wasp_seg=True)

        export_internal_flows(dbpath, os.path.dirname(outpath), by_wasp_seg=False)

def export_data(outpath, dbpath, timestep_secs, final_colnames, round_num=5):

    with closing(sqlite3.connect(dbpath)) as cnxn:
        cnxn.isolation_level = None
        cur = cnxn.cursor()
        tune_db(cur)

        # open the hyd file outpath
        with open(outpath, 'w') as f:
            # the following lines get the numbers for the top of the hyd file.
            # count the number of volume entries for the first timestep to get the number of segs
            cur.execute('SELECT COUNT(*) FROM final WHERE timestep = 0 AND datatype = 1')
            num_segs = cur.fetchone()[0]
            # count the number of flow entires for the first timestep to get the total number of interfaces
            cur.execute('SELECT COUNT(*) FROM final WHERE timestep = 0 AND datatype = 2')
            num_interfaces = cur.fetchone()[0]

            # retrieve the maximum timestep and calculate the duration
            cur.execute('SELECT MAX(timestep) FROM final')
            duration = timestep_secs * cur.fetchone()[0]

            # create the top line for the hyd file
            f.write('\t'.join([str(num_segs), str(num_interfaces), str(timestep_secs), '0', str(duration), '1']) + '\n')

            # select the flow rows for the first timestep and extract the interface numbers then write them to file
            cur.execute("""
                SELECT wasp_source, wasp_sink 
                FROM final 
                WHERE timestep = 0 AND datatype = 2 
                ORDER BY ordering1, ordering2""")
            for row in cur.fetchall():
                f.write(str(row[0]) + '\t' + str(row[1]) + '\n')

            # select all the data from the final type in the appropriate order ...
            cur.execute('SELECT * FROM final ORDER BY timestep, datatype, ordering1, ordering2')

            # then write it to file ...
            for row in cur.fetchall():
                row = dict(zip(final_colnames, row))
                if row['datatype'] == 1:
                    row['vol'] = round(row['vol'], round_num)
                    row['velocity'] = round(row['velocity'], round_num)
                    row['depth'] = round(row['depth'], round_num)
                    for key in row.keys():
                        row[key] = str(row[key])
                    f.write('\t'.join(['', row['vol'], '0', row['depth'], row['velocity']]) + '\n')
                else:
                    row['flow'] = round(row['flow'], round_num)
                    for key in row.keys():
                        row[key] = str(row[key])
                    f.write('\t'.join(['', row['flow']]) + '\n')

def export_internal_flows(dbpath, outdir, append_to_folder_name='', by_wasp_seg=True):

    with closing(sqlite3.connect(dbpath)) as cnxn:
        cur = cnxn.cursor()
        tune_db(cur)

        folder_name = 'internal_flows_by_' + ('WASP' if by_wasp_seg else 'SWMM') + append_to_folder_name
        outpath = os.path.join(outdir, folder_name)
        if not os.path.isdir(outpath):
            os.mkdir(outpath)
        else:
            for fname in os.listdir(outpath):
                os.unlink(os.path.join(outpath, fname))

        fieldnames = ['timestep', 'flow', 'depth', 'velocity']
        if by_wasp_seg:
            cur.execute('SELECT DISTINCT wasp_source FROM flows_final WHERE wasp_source <> 0')
            query = """
                SELECT """ + ','.join(fieldnames) + """
                FROM flows_final  
                WHERE wasp_source = ?
                ORDER BY wasp_source, timestep
            """
        else:
            cur.execute('SELECT DISTINCT swmm FROM volumes2_b')
            query = """ 
                SELECT """ + ','.join(fieldnames) + """ 
                FROM volumes2_b
                WHERE swmm = ?
                ORDER BY timestep 
            """

        segs = [row[0] for row in cur.fetchall()]
        for seg in segs:
            with open(os.path.join(outpath, str(seg) + '.csv'), 'w') as f:
                writer = csv.DictWriter(f, fieldnames=fieldnames, lineterminator='\n')
                writer.writeheader()
                cur.execute(query, (seg, ))
                for row in cur.fetchall():
                    row = dict(zip(fieldnames, row))
                    writer.writerow(row) 

def export_volumes(dbpath, outdir, append_to_folder_name = '', by_wasp_seg=True):
    # the following for loop will cycle through each wasp seg and output its volume time series

    with closing(sqlite3.connect(dbpath)) as cnxn:
        cur = cnxn.cursor()
        tune_db(cur)

        folder_name = 'volumes_by_' + ('WASP' if by_wasp_seg else 'SWMM') + append_to_folder_name
        outpath = os.path.join(outdir, folder_name)
        if not os.path.isdir(outpath):
            os.mkdir(outpath)
        else:
            for fname in os.listdir(outpath):
                os.unlink(os.path.join(outpath, fname))

        if by_wasp_seg:
            fieldnames = ['timestep', 'vol', 'depth', 'velocity']
            segs = cur.execute('SELECT DISTINCT wasp_sink FROM final WHERE wasp_sink > 0')
            query = """
                SELECT """ + ','.join(fieldnames) + """
                FROM final 
                WHERE wasp_sink = ? AND datatype = 1
                ORDER BY timestep
            """
        else:
            fieldnames = ['timestep', 'vol', 'depth', 'velocity']
            segs = cur.execute("SELECT DISTINCT swmm FROM volumes2_b")
            query = """
                SELECT """ + ','.join(fieldnames) + """
                FROM volumes2_b
                WHERE swmm = ? 
                ORDER BY timestep
            """

        segs = [row[0] for row in segs]
        for seg in segs:
            with open(os.path.join(outpath, str(seg) + '.csv'), 'w') as f:
                writer = csv.DictWriter(f, fieldnames = fieldnames, lineterminator='\n')
                writer.writeheader()
                cur.execute(query, (seg, ))
                for row in cur.fetchall():
                    row = dict(zip(fieldnames, row))
                    writer.writerow(row)

def load_ini(path):
    ini = {}

    def process_ini(path):
        parser = ConfigParser.SafeConfigParser()
        parser.read(path)
        for key, value in parser.items('main'):
            ini[key] = eval(value, {}, {})
            if key in ('event_start', 'event_end') and ini[key]:
                ini[key] = datetime.strptime(ini[key], '%Y-%m-%d %H:%M:%S')
        return ini

    if os.path.exists(path):
        print("A settings file was found at " + path + ". Do you want to use the options in this file?")
        if raw_input("y/n >>> ")[0] in ('Y', 'y'):
            return process_ini(path)

    key_err_msg = "Keyboard error. Try again, fat fingers\n"

    while True:
        try:
            response = raw_input("Do you want to load the settings from a hydmaker.ini file? (y/n) >> ")[0]
        except KeyboardInterrupt, IndexError:
            print("Invalid input.")

        if response.lower == 'y':
            while True:
                try:
                    ini_path = raw_input("Path to hydmaker.ini file >> ")
                except KeyboardInterrupt:
                    print('Invalid input.')

                if os.path.exists(ini_path) and not os.path.isdir(ini_path):
                    return process_ini(ini_path)
                
                print("Invalid path.")
        else:
            break

    msg = "Please note that this program assumes data coming from an .rpt or .out file are in US units: CFS, FEET, and " + \
          "feet per second."
    print(msg)
    while True:
        try:
            use_bin_output = raw_input("Is your data in a SWMM binary output file? (y/n) >> ")[0]
        except KeyboardInterrupt:
            print(key_err_msg)
            continue

        if use_bin_output.lower() == 'y':
            use_bin_output = True
        elif use_bin_output.lower() == 'n':
            use_bin_output = False
        break

    if use_bin_output:
        ini['rptpath'] = None
        while True:
            try:
                binary_path = raw_input("Path to SWMM binary output file (*.out) >> ")
            except KeyboardInterrupt:
                print(key_err_msg)
                continue

            if os.path.exists(binary_path) and not os.path.isdir(binary_path):
                if os.path.splitext(binary_path)[1].lower() != '.out':
                    print("That doesn't look like binary output file.")
                else:
                    ini['binarypath'] = binary_path
                    break
            else:
                print("Invalid path.")
    else:
        ini['binarypath'] = None
        while True:
            try:
                rpt_path = raw_input("Path to SWMM report file (*.rpt) >> ")
            except KeyboardInterrupt:
                print(key_err_msg)
                continue
            
            if os.path.exists(rpt_path) and not os.path.isdir(rpt_path):
                if os.path.splitext(rpt_path)[1].lower() != '.rpt':
                    print("That doesn't look like an .rpt file.")
                else:
                    ini['rptpath'] = rpt_path
                    break
            else:
                print('Invalid path.')

    while True:
        try:
            inp_path = raw_input("Path to SWMM input file (*.inp) >> ")
        except KeyboardInterrupt:
            print(key_err_msg)

        if os.path.exists(inp_path) and not os.path.isdir(inp_path):
            if os.path.splitext(inp_path)[1].lower() != '.inp':
                print("That doesn't look like an .inp file.")
            else:
                ini['inppath'] = inp_path
                break
        else:
            print("Invalid path.")

    segmap_paths = []
    while True:
        if segmap_paths:
            print('Segmentation file paths:')
            for i, segmap_path in enumerate(segmap_paths):
                print('  ' + str(i + 1) + '.  ' + os.path.basename(segmap_path))

        try:
            segmap_path = raw_input("Add a path to a segmentation file (press enter to continue) >> ")
        except KeyboardInterrupt:
            print(key_err_msg)

        if os.path.exists(segmap_path) and not os.path.isdir(segmap_path):
            if segmap_path in segmap_paths:
                print("You already entered that one.")
            else:
                segmap_paths.append(segmap_path)
        else:
            if not segmap_path and len(segmap_paths) > 0:
                ini['segmap_paths'] = segmap_paths
                break
            print("No such file.")

    while True:
        try:
            output_dir = raw_input('Path to output directory >> ')
        except KeyboardInterrupt:
            print(key_err_msg)

        if os.path.isdir(output_dir):
            break

        print("Invalid directory.")

    while True:
        try:
            out_name = raw_input('File name for .hyd text file output >> ')
        except KeyboardInterrupt:
            print(key_err_msg)

        if out_name:
            out_path = os.path.join(output_dir, out_name)
        
            if os.path.exists(out_path):
                try:
                    response = raw_input('A file with that name already exists. Do you want to overwrite it? (y/n) >> ')[0]
                except KeyboardInterrupt, IndexError:
                    print(key_err_msg)

                if response.lower() == 'y':
                    ini['outpath'] = out_path
                    break
            else:
                ini['outpath'] = out_path
                break
    
    while True:
        try:
            round_num = raw_input("Number of digits after the decimal place? >> ")
        except KeyboardInterrupt:
            print(key_err_msg)
            continue

        try:
            assert int(round_num) == float(round_num)
        except:
            print("Invalid input.")
        else:
            ini['round_num'] = int(round_num)
            break

        
    while True:
        try:
            msg = 'Do you want to pass flow, depth, and velocity data through a moving average filter? (y/n) >> '
            filter_data = raw_input(msg)[0]
        except KeyboardInterrupt, IndexError:
            print(key_err_msg)
            continue

        filter_data = filter_data.lower() == 'y'

        break

    if filter_data:
        while True:
            try:
                filter_mins = raw_input('Moving average window size in minutes >> ')
            except KeyboardInterrupt:
                print(key_err_msg)

            try:
                ini['filter_mins'] = int(filter_mins)
            except:
                print('Invalid input.')
                continue
            else:
                break
    else:
        ini['filter_mins'] = None

    correction_params = {
        'correction_threshold' : 0.0005,
        'abs_basevol_signal' : 2, # 5
        'window_size' : 400,
        'negative_threshold_pct' : .2,
        'micro_window_pct' : .25, # .25,
        'event_start_percentage' : 0.1
    }

    ini['correct_vol'] = False
    while True:
        try:
            msg = "Do you want to correct segments when their volumes stabilize at levels above or below their " \
                  + "known baselines? The process will add dummy inflows to segments that balance the volume " \
                  + "as needed. (y/n) >> "
            correct_vol = raw_input(msg)[0]
        except KeyboardInterrupt, IndexError:
            print(key_err_msg)

        if correct_vol.lower() == 'y':
            ini['correct_vol'] = True
        elif correct_vol.lower() == 'n':
            ini['correct_vol'] = False
        else:
            print('Invalid input.')
            continue

        break

    while True:
        try:
            correct_vol = re.match('y', correct_vol, re.IGNORECASE)

            if not correct_vol:
                break
            else:
                ini['correct_vol'] = bool(correct_vol)

                while True:
                    msg = "There are several parameters that you can change to control the behavior of the volume correction " \
                          + "procedure. Would you like to change them from their default values?"
                    print(msg)
                    try:
                        reject_defaults = raw_input("Change default volume correction parameters? (y/n) >> ")[0]
                    except KeyboardInterrupt, IndexError:
                        print("Invalid input.")

                    break

                if reject_defaults.lower() == 'n':
                    break
                else:
                    while True:
                        print("Volume correction parameter 1: Moving window size for location of dry weather periods")
                        msg = ("The procedure locates periods of dry weather that may require correction by moving "
                               "a fixed window of specified size over the volume timeseries for each segment. The window "
                               "size is measured in number of timesteps. The default is 400.")
                        print(msg) 
                        window_size = raw_input('window size >> ')
                        try:
                            correction_params['window_size'] = int(window_size)
                        except:
                            continue
                        else:
                            break

                    while True:
                        print("Volume correction parameter 2: Dry-weather-test threshold")
                        msg = ("For each block of the volume timeseries examined by the dry-weather-locator window, "
                               "the procedure performs a test to decide whether the current block is part of a period "
                               "of dry weather, in which case it must be checked to see if the volume matches expectations. "
                               "The decision as to whether the block is in a period of dry weather is determined by comparing "
                               "the absolute difference between the minimum and maximum values within the block against a "
                               "specified threshold (the dry-weather-test threshold). When this difference is below the "
                               "threshold, the block is considered to be dry weather and gets treated further to detemine if "
                               "it has the expected volume or needs to be corrected. The default value for "
                               "the dry-weather-test threshold is 5 m^3.")
                        print(msg)
                        abs_basevol_signal = raw_input("enter the dry-weather test threshold in m^3 >> ")
                        try:
                            correction_params['abs_basevol_signal'] = float(abs_basevol_signal)
                        except:
                            continue
                        else:
                            break

                    while True:
                        print("Volume correction parameter 3: Expected maximum base volume deviation percentage")

                        msg = ("This parameter determines the minimum allowable absolute difference between the average volume "
                        "in a dry weather period and the expected base volume. This parameter determines when "
                        "dry weather periods should be corrected. It's "
                        "entered as a percentage of the total event volume range, where event volume range is taken as "
                        "the difference between peak and base volume.  The default percentage is .005 m^3.")
                        print(msg)
                        correction_threshold = raw_input("correction threshold in m^3 >> ")
                        try:
                            correction_params['correction_threshold'] = float(correction_threshold)
                        except:
                            continue
                        else:
                            break

                    # no longer can the user set the secondary window parameter. Preserving the code for debugging value, but 
                    # changing this parameter is rarely required when accomodating the procedure to unsual circumstances, 
                    # so I'm elimating it in favor of simplicity.
                    # 
                    # this is the text for an elaborated description of the parameter for documentation. Storing it here.
                    #When the procedure finds a dry weather period where the volume is significantly 
                    #above the expected base volume (as determined by the expected max base volume deviation percentage), 
                    #the procedure searches for the end of the 
                    #dry weather period to determine how much correction is necessary. To find the end 
                    #of the dry weather period, the procedure advances by small increments starting at the tail of the 
                    #current dry-weather-period locator window and checks within each 
                    #increment if the volume series is no longer in dry weather conditions. As soon as it finds an increment
                    #window where the series is no longer in dry weather, it stops and concludes that the current dry weather 
                    #period ends there.
                    #The secondary moving window size determines the size of the small increment that's used to look ahead for 
                    #the end of the dry weather condition. It's size is determined as a percentage of the dry-weather-period 
                    #locator window (volume correction parameter 1). The default is size is .25.
                    #while True:
                    #    print("Volume correction parameter 4: Secondary moving window")
                    #    msg = ("Once it's been established that a period of base "
                    #           "volume needs correction, to determine how much correction is necessary, "
                    #           "the procedure examines the period in question using a smaller "
                    #           "window than the primary one to find the exact location and amount for the correction."
                    #           "Enter the size of this window as a "
                    #           "percentage of the larger correction window previously entered. The default is .25.")
                    #    print(msg)
                    #    msg = "Enter micro window pct >> "
                    #    micro_window_pct = raw_input(msg)
                    #    try:
                    #        correction_params['micro_window_pct'] = float(micro_window_pct)
                    #    except:
                    #        continue
                    #    else:
                    #        break

                    while True:
                        print("Volume correction parameter 4: Negative volume timestep threshold percent")

                        msg = ("This parameter determines what percentage of timesteps within the current dry weather locator "
                        "period window are allowed to be below the expected base volume before a correction is triggered. "
                        "For instance, if the parameter is set to .2 (the default), when more than 20% of the timesteps "
                        "within the window are " "negative, then that portion of the timeseries will be corrected.")
                        print(msg)
                        negative_threshold_pct = raw_input("Negative volume timestep threshold percent >> ")
                        try:
                            correction_params['negative_threshold_pct'] = float(negative_threshold_pct)
                        except:
                            continue
                        else:
                            break

                    while True:
                        print("Volume correction parameter 5: Event start percent")

                        msg = ("This parameter controls when the procedure begins looking for periods that need "
                        "correction. By default, this parameter is set to None, and the procedure begins looking "
                        "for looking for correction spots right away. Alternatively, you can set this to some percentage "
                        "and the procedure will only begin looking for periods to correct after its observed fluctations "
                        "in the volume that exceed this percentage multiplied by the volume range of the timeseries. "
                        "Leave blank for default.")
                        print(msg)
                        event_start_pct = raw_input("Event start percent >> ")
                        if not len(event_start_pct):
                            correction_params['event_start_percentage'] = None
                            break
                        else:
                            try:
                                x = float(event_start_pct)
                                assert x > 0 and x < 1
                                correction_params['event_start_percentage'] = x
                            except:
                                continue
                            else:
                                break
                    break

        except KeyboardInterrupt:
            print(key_err_msg)
            pass

    ini = dict(ini.items() + correction_params.items())
    while True:
        try:
            dummy_end = raw_input('Include final dummy segment? (y/n) >> ')
            if re.match('y|n', dummy_end, re.IGNORECASE):
                ini['dummy_end'] = bool(re.match('y', dummy_end, re.IGNORECASE))
                break
        except KeyboardInterrupt:
            print(key_err_msg)
            pass

    ini['event_start'] = None
    ini['event_end'] = None
    while True:
        try:
            set_event_limits = raw_input("Set event start and end dates? >> ")
            if re.match('n', set_event_limits, re.IGNORECASE):
                break
            elif re.match('y', set_event_limits, re.IGNORECASE):
                while True:
                    event_start = raw_input("event start (yyyy-mm-dd HH:MM:SS >> ")
                    try:
                        event_start = datetime.strptime(event_start, "%Y-%m-%d %H:%M:%S")
                    except:
                        continue
                    else:
                        ini['event_start'] = event_start
                        break

                while True:
                    event_end = raw_input("event end (yyyy-mm-dd HH:MM:SS) >> ")

                    try:
                        event_end = datetime.strptime(event_end, "%Y-%m-%d %H:%M:%S")
                    except:
                        continue
                    else:
                        ini['event_end'] = event_end
                        break
                break

        except KeyboardInterrupt:
            print(key_err_msg)

    while True:
        try:
            msg = "By default, the velocity and depth for combined parallel and adjacent segments will " \
                  + "calculated as the arithmetic average of the velocities and depths of the constituent segments. " \
                  + "Alternatively, you can choose to use a flow-weighted average instead. Choosing this option " \
                  + "will raise an error if any of the segments to be combined have 0 flow at some timesteps."
            print(msg)
            resp = raw_input("Do you want to use flow-weighted sums instead of averages? (y/n) >> ")[0]
            if re.match('y', resp[0], re.IGNORECASE):
                ini['flow_weight_sums'] = True
            else:
                ini['flow_weight_sums'] = False
            break
        except KeyboardInterrupt, IndexError:
            print(key_err_msg)

    parser = ConfigParser.SafeConfigParser()
    parser.add_section('main')
    for key, value in ini.items():
        if key in ('event_start', 'event_end') and value:
            value = value.strftime('%Y-%m-%d %H:%M:%S')
        parser.set('main', str(key), "r'" + value + "'" if isinstance(value, str) else str(value))

    with open(path, 'w') as f:
        parser.write(f)

    return ini


def run():
    # load settings
    ini = load_ini(os.path.join(curdir(), 'hydmaker.ini'))

    # call main function, passing settings as arguments
    process(**ini)

if __name__ == '__main__':
    run()

