import sqlite3, re, os, traceback, code, csv, glob, argparse, pdb
from math import floor
from contextlib import closing
from datetime import datetime, timedelta
from itertools import groupby
from copy import copy
import shutil
from collections import OrderedDict
import struct
import ConfigParser

def curdir():
    return os.path.dirname(__file__) if '__file__' in globals() else os.curdir

def swmmout2sqlite(swmmout_path, dbpath, element_type, names=None, variables=None, 
        start=None, end=None, ignore_missing_names=False):

    assert names is None or all(isinstance(name, basestring) for name in names)

    element_types = ['subcatchments', 'nodes', 'links', 'pollutants', 'system']
    if element_type not in element_types:
        raise Exception("Unknown element type: " + element_type)


    class SWMMOUT2SQLITE_Error(Exception):
        def __init__(self, exc):
            Exception.__init__(self, "Can't convert SWMM binary output to SQLite database. " + exc)

    with open(swmmout_path, 'rb') as f:
        RECORD_BYTES = 4

        # opening records constants 
        NRECORDS_OPENING = 7
        OPENING_BYTES = RECORD_BYTES * NRECORDS_OPENING
        NRECORDS_HEAD = 3 # head is the beginning records of the opening
        HEAD_BYTES = NRECORDS_HEAD * RECORD_BYTES
        HEAD_FORMAT = str(NRECORDS_HEAD) + 'i'
        NELEMENT_TYPES = 4 # subcatches, nodes, links, pollutants
        ELEMENTCOUNTS_BYTES = OPENING_BYTES - HEAD_BYTES
        if not NELEMENT_TYPES * RECORD_BYTES == ELEMENTCOUNTS_BYTES:
            raise SWMMOUT2SQLITE_Error("NELEMENT_TYPES and ELEMENTCOUNTS_BYTES contants are inconsistent.")
        ELEMENTCOUNTS_FORMAT = str(NELEMENT_TYPES) + 'i'

        # closing records constants
        NRECORDS_CLOSING = 6
        CLOSING_BYTES = RECORD_BYTES * NRECORDS_CLOSING
        NSECTIONS = 3 # Names, Properties, Results
        SECTION_POS_BYTES = RECORD_BYTES * NSECTIONS
        SECTION_POS_FORMAT = str(NSECTIONS) + 'i'
        NRECORDS_TAIL = NRECORDS_CLOSING - NSECTIONS
        TAIL_BYTES = CLOSING_BYTES - SECTION_POS_BYTES
        TAIL_FORMAT = str(NRECORDS_TAIL) + 'i'

        EXPECTED_ID_NUM = 516114522 # should appear at the start/end of file

        NRECORDS_DAYS_SINCE_EPOCH = 2
        DAYS_SINCE_EPOCH_BYTES  = NRECORDS_DAYS_SINCE_EPOCH * RECORD_BYTES
        EPOCH = datetime(1899, 12, 30)
        REPORT_INTERVAL_BYTES = RECORD_BYTES
        HOURS_IN_DAY = 24.0
        MINUTES_IN_HOUR = 60.0
        SECONDS_IN_MINUTE = 60.0
        TIMESTEP_TOLERANCE = (((1/HOURS_IN_DAY)/MINUTES_IN_HOUR)/SECONDS_IN_MINUTE)

        # move 
        f.seek(-CLOSING_BYTES, 2) 
        section_pos_records = f.read(SECTION_POS_BYTES)
        section_positions = struct.unpack(SECTION_POS_FORMAT, section_pos_records)
        positions_by_section = dict(zip(('names', 'properties', 'results'), section_positions))
        tail_records = f.read(TAIL_BYTES)
        ntimesteps, errorcode, id_num = struct.unpack(TAIL_FORMAT, tail_records)

        if id_num != EXPECTED_ID_NUM:
            raise SWMMOUT2SQLITE_Error("Unexpected identifying number encountered at end of file.")
        elif errorcode:
            raise SWMMOUT2SQLITE_Error("Output contains errors.")
        elif not ntimesteps:
            raise SWMMOUT2SQLITE_Error("Output has zero timesteps.")

        f.seek(0)
        head_records = f.read(HEAD_BYTES)
        id_num, version, flowunits = struct.unpack(HEAD_FORMAT, head_records)
        if id_num != EXPECTED_ID_NUM:
            raise SWMMOUT2SQLITE_Error("Unexpected identifying number encountered at beginning of file.")

        units = {'depth_rate1' : ['inPerHour', 'mmPerHour'],
                 'depth_rate2' : ['ftPerSec', 'mPerSec'],
                 'depth_rate3' : ['inPerDay', 'mmPerDay'],
                 'depth1' : ['in', 'mm'],
                 'depth2' : ['ft', 'm'],
                 'volume' : ['ft3', 'm3'],
                 'temp' : ['degF', 'degC']
        }
        flowunit_options = ['CFS', 'GPM', 'MGD', 'CMS', 'LPS', 'LPD']
        flowunits = flowunit_options[flowunits]
        units_choice = 0 if flowunits in ('CFS', 'GPM', 'MGD') else 1
        for unit_group in units:
            units[unit_group] = units[unit_group][units_choice]
        units['flow'] = flowunits

        element_counts_records = f.read(ELEMENTCOUNTS_BYTES)
        element_counts = list(struct.unpack(ELEMENTCOUNTS_FORMAT, element_counts_records))
        nsystem_elements = 1
        element_counts.append(nsystem_elements)
        element_counts_by_type = OrderedDict(zip(element_types, element_counts))

        if not element_counts_by_type[element_type]:
            raise Exception("SWMM output does not report any " + element_type + " elements.")

        # Read names
        f.seek(positions_by_section['names'], 0)
        element_names_by_type = OrderedDict()
        for type_ in element_types:
            if type_ in ('system',):
                element_names_by_type[type_] = None
            else:
                for i in range(element_counts_by_type[type_]):
                    name_bytes_record = f.read(RECORD_BYTES)
                    name_bytes = struct.unpack('i', name_bytes_record)[0]
                    name_record = f.read(name_bytes)
                    name = struct.unpack(str(name_bytes) + 's', name_record)[0]
                    element_names_by_type.setdefault(type_, []).append(name)

        if names:
            type_names = element_names_by_type[element_type]
            in_report = [name in type_names for name in names]

            if not all(in_report) and not ignore_missing_names:
                missing_names = [name for name, is_in_report in zip(names, in_report) if not is_in_report]
                raise Exception("The following element names aren't in the report: " + ','.join(missing_names))

            user_names_in_rpt = [name for name in names if name in type_names]
        else:
            user_names_in_rpt = element_names_by_type[element_type]

        # Read pollutant units
        pollutant_labels = []
        if element_counts_by_type['pollutants']:
            pollutant_units_records = f.read(element_counts_by_type['pollutants'] * RECORD_BYTES)
            pollutant_units = struct.unpack(str(element_counts_by_type['pollutants']) + 'i', pollutant_units_records)
            pollutant_unit_label_options = ['mgL', 'ugL', 'countPerL']
            pollutant_unit_labels = [pollutant_unit_label_options[i] for i in pollutant_units]
            pollutant_labels = ['_'.join(tup) for tup in zip(element_names_by_type['pollutants'], pollutant_unit_labels)]

        # Read properties
        f.seek(positions_by_section['properties'], 0)
        element_property_codes = OrderedDict()
        element_properties_by_type = OrderedDict()
        for type_ in element_types:
            if type_ in ('pollutants', 'system'):
                element_properties_by_type[type_] = None
            else:
                nprops_record = f.read(RECORD_BYTES)
                nprops = struct.unpack('i', nprops_record)[0]
                property_code_records = f.read(nprops*RECORD_BYTES)
                property_codes = struct.unpack(str(nprops) + 'i', property_code_records)
                element_property_codes[type_] = property_codes

                for i in range(element_counts_by_type[type_]):
                    property_records = f.read(nprops*RECORD_BYTES)
                    properties = struct.unpack(str(nprops) + 'f', property_records)
                    element_properties_by_type.setdefault(type_, []).append(zip(property_codes, properties))

        # Read reporting variables
        var_labels_by_type = {
            'subcatchments' : ['rainfall_{depth_rate1}', 
                               'snow_depth_{depth1}', 
                               'evap_plus_infil_losses_{depth_rate1}', 
                               'runoff_rate_{flow}', 
                               'gw_outflow_rate_{flow}',
                               'gw_table_elev_{depth2}'],
            'nodes' : ['depth_above_invert_{depth2}', 
                       'hydraulic_head_{depth2}', 
                       'stored_and_ponded_vol_{volume}', 
                       'lateral_inflow_{flow}', 
                       'total_inflow_{flow}',
                       'flow_lost_to_flooding_{flow}'],
            'links' : ['flow_rate_{flow}', 
                       'flow_depth_{depth2}', 
                       'flow_velocity_{depth_rate2}', 
                       'Froude_number', 
                       'Capacity'],
            'system' : ['air_temp_{temp}',
                        'rainfall_{depth_rate1}',
                        'snow_depth_{depth1}',
                        'evap_plus_infil_losses_{depth_rate1}',
                        'runoff_rate_{flow}',
                        'dry_weather_inflow_{flow}',
                        'gw_inflow_{flow}',
                        'RDII_inflow_{flow}',
                        'user_supplied_direct_inflow_{flow}',
                        'total_lateral_inflow_{flow}',
                        'flow_lost_to_flooding_{flow}',
                        'flow_leaving_through_outfalls_{flow}',
                        'volume_of_stored_water_{volume}',
                        'evaporation_rate_{depth_rate3}']
        }
        if element_counts_by_type['pollutants']:
            for type_ in element_types:
                if type_ not in ('pollutants', 'system'):
                    var_labels_by_type[type_].extend(pollutant_labels)
        for type_ in var_labels_by_type:
            var_labels_by_type[type_] = [name.format(**units) for name in var_labels_by_type[type_]]

        report_vars_by_type = OrderedDict()
        for type_ in element_types:
            if type_ in ('pollutants',):
                report_vars_by_type[type_] = None
            else:
                nvariable_codes_record = f.read(RECORD_BYTES)
                nvariable_codes = struct.unpack('i', nvariable_codes_record)[0]
                variable_code_records = f.read(nvariable_codes * RECORD_BYTES)
                variable_codes = struct.unpack(str(nvariable_codes) + 'i', variable_code_records)
                expected_num_vars = len(var_labels_by_type[type_])
                if len(variable_codes) != expected_num_vars:
                    exc = "Unexpected number of variables for " + type_ + ". Expected " + str(expected_num_vars) \
                          + ", encountered " + str(len(variable_codes))
                    raise Exception(exc)
                report_vars_by_type[type_] = variable_codes

        # Read reporting interval
        rpt_start_days_since_epoch_record = f.read(DAYS_SINCE_EPOCH_BYTES)
        rpt_start_days_since_epoch = struct.unpack('d', rpt_start_days_since_epoch_record)[0]
        rpt_interval_record = f.read(REPORT_INTERVAL_BYTES) 
        rpt_interval = struct.unpack('i', rpt_interval_record)[0]
        
        def to_days_since_epoch(dtime):
            in_seconds = (dtime - EPOCH).total_seconds()
            return ((in_seconds / SECONDS_IN_MINUTE) / MINUTES_IN_HOUR) / HOURS_IN_DAY

        user_start_days_since_epoch = to_days_since_epoch(start) if start else rpt_start_days_since_epoch
        # user_start_dtime = EPOCH + timedelta(days=user_start_day
        rpt_interval_days = ((rpt_interval / SECONDS_IN_MINUTE) / MINUTES_IN_HOUR) / HOURS_IN_DAY
        rpt_end_days_since_epoch = rpt_start_days_since_epoch + rpt_interval_days * (1 + ntimesteps)
        user_end_days_since_epoch = to_days_since_epoch(end) if end else rpt_end_days_since_epoch

        if user_end_days_since_epoch <= user_start_days_since_epoch:
            raise SWMMOUT2SQLITE_Error("Start and end datetimes inconsistent.")

        # calculate byte size per timestep for each element type
        report_bytes_by_type = OrderedDict()
        for type_ in element_types:
            if type_ == 'pollutants':
                report_bytes_by_type[type_] = 0
            else:
                byte_count = element_counts_by_type[type_] * len(report_vars_by_type[type_]) * RECORD_BYTES
                report_bytes_by_type[type_] = byte_count

        bytes_per_timestep = DAYS_SINCE_EPOCH_BYTES + sum(report_bytes_by_type.values())

        # define offsets to locate element type within timestep
        type_offsets = [0]
        for bytes_ in report_bytes_by_type.values()[:-1]:
            type_offsets.append(type_offsets[-1] + bytes_)

        type_offsets_by_type = OrderedDict(zip(element_types, type_offsets))

        bytes_per_element = len(report_vars_by_type[element_type]) * RECORD_BYTES

        if not variables:
            variables = var_labels_by_type[element_type]

        user_var_labels_by_index = []
        for var in variables:
            for i, label in enumerate(var_labels_by_type[element_type]):
                if re.match(var, label):
                    user_var_labels_by_index.append((i, label))
                    break
                elif i == len(var_labels_by_type[element_type]) - 1:
                    exc = "Could not match variable " + var + " with any of the known variable labels for " + element_type
                    raise Exception(exc)
        user_var_labels_by_index = OrderedDict(user_var_labels_by_index)
        max_user_var_index = max(user_var_labels_by_index.keys())
        user_var_byte_range = (max_user_var_index + 1) * RECORD_BYTES

        user_element_indices = [[i, name] for i, name in enumerate(element_names_by_type[element_type]) 
                                if name in user_names_in_rpt]
        lagged = [user_element_indices[i] + [user_element_indices[i-1][0]] for i in range(1, len(user_element_indices))]

        
        #user_element_offsets_and_names = user_element_indices[:1]
        first_offset = user_element_indices[0]
        user_element_offsets_and_names = [[first_offset[0] * bytes_per_element, first_offset[1]]]
        for idx, name, idx_lag1 in lagged:
            offset = (idx - idx_lag1) * bytes_per_element - user_var_byte_range
            user_element_offsets_and_names.append((offset, name))
        max_user_element_index = max(dict(user_element_indices).keys())

        with closing(sqlite3.connect(dbpath)) as cnxn:
            cnxn.isolation_level = None
            cursor = cnxn.cursor()
            cursor.executescript("""
                PRAGMA synchronous=OFF;
                PRAGMA count_changes=OFF;
                PRAGMA journal_mode=OFF;
            """)

            try:
                cursor.execute("DROP TABLE " + element_type)
            except:
                pass

            create_table_stmt = "CREATE TABLE {type} (timestep integer, name text, {variables})"
            variable_defs = ','.join([name + ' real' for name in user_var_labels_by_index.values()])

            create_table_stmt = create_table_stmt.format(type=element_type, variables=variable_defs)
            cursor.execute(create_table_stmt)

            insert_stmt = """
                INSERT INTO {type} (timestep, name, {variables})
                VALUES (?, ?, {values})
            """
            insert_stmt = insert_stmt.format(type=element_type, variables=','.join(user_var_labels_by_index.values()),
                values=','.join(['?' for _ in range(len(user_var_labels_by_index))]))

            f.seek(positions_by_section['results'], 0)

            INSERT_BATCH_SIZE = 500
            batch = []
            results_origin = positions_by_section['results']
            var_format = 'f' * (max_user_var_index + 1)
            element_type_offset = type_offsets_by_type[element_type]
            head_timestep_bytes_range = (((max_user_element_index + 1) - 1) * bytes_per_element) + user_var_byte_range
            tail_timestep_bytes_range = bytes_per_timestep - DAYS_SINCE_EPOCH_BYTES - element_type_offset - head_timestep_bytes_range
            days_since_epoch_record = f.read(DAYS_SINCE_EPOCH_BYTES)
            days_since_epoch = struct.unpack('d', days_since_epoch_record)[0]
            for timestep in range(ntimesteps):
                if days_since_epoch > user_end_days_since_epoch:
                    break
                elif days_since_epoch >= user_start_days_since_epoch:
                    f.seek(element_type_offset, 1)
                    for i, tup in enumerate(user_element_offsets_and_names):
                        element_offset, name = tup
                        f.seek(element_offset, 1)

                        entry = [timestep, name]
                        vars_records = f.read(user_var_byte_range)
                        vars_vals = struct.unpack(var_format, vars_records)
                        user_var_vals = [val for j, val in enumerate(vars_vals) if j in user_var_labels_by_index.keys()]
                        entry.extend(user_var_vals)
                        batch.append(entry)

                        if len(batch) == INSERT_BATCH_SIZE:
                            cursor.executemany(insert_stmt, batch)
                            del batch[:]

                    f.seek(tail_timestep_bytes_range, 1)

                else:
                    f.seek(bytes_per_timestep - DAYS_SINCE_EPOCH_BYTES, 1)

                days_since_epoch_record = f.read(DAYS_SINCE_EPOCH_BYTES)
                days_since_epoch = struct.unpack('d', days_since_epoch_record)[0]

            cursor.executemany(insert_stmt, batch)
            idx_stmt = "CREATE INDEX idx_{type}_{col} on {type}({col})"
            cursor.execute(idx_stmt.format(type=element_type, col='timestep'))
            cursor.execute(idx_stmt.format(type=element_type, col='name'))
            cnxn.commit()

        user_start_dtime = EPOCH + timedelta(days=user_start_days_since_epoch)
        user_end_dtime = EPOCH + timedelta(days=user_end_days_since_epoch)
        return dbpath, element_type, len(user_names_in_rpt), len(variables), user_start_dtime, user_end_dtime

def tune_db(cursor):
    cursor.execute("PRAGMA synchronous=OFF") # don't wait for disk writes to complete before continuing
    cursor.execute("PRAGMA count_changes=OFF") # don't count num. rows affected by DELETE, INSERT, or UPDATE
    cursor.execute("PRAGMA journal_mode=OFF") # turn off journaling
    #cursor.execute("PRAGMA cache_size=1048576") # number of btree pages to cache (1 page = 1 KB)
    #cursor.execute("PRAGMA temp_store=2") # store temporary files in memory

def get_segtable(segmap_paths):
    segtable = [] # this will be a list of dicts, where each dict is a row from one of the .map.csv files
    for fname in segmap_paths: # get a list of all of the .map.csv files
        with open(fname, 'r') as f:
            table = list(csv.DictReader(f))
            # if the table has rows, check that the header is right and add the rows to segtable
            if table:
                assert all(fieldname in ('Name', 'SWMM', 'Type', 'WASP') for fieldname in table[0].keys())
                segtable.extend(table)

    # the following two loops filling any missing entries in the SWMM and WASP columns and renumber the segments sequentially.
    for row in segtable:
        if not row['WASP']:
            row['WASP'] = prev_wasp
        else:
            prev_wasp = row['WASP']

        if not row['SWMM']:
            row['SWMM'] = prev_swmm
        else:
            prev_swmm = row['SWMM']

    current_swmm = 1
    current_wasp = 1
    for i, row in enumerate(segtable):
        if i == 0:
            prev_wasp = row['WASP']
            prev_swmm = row['SWMM']
            row['WASP'] = current_wasp
            row['SWMM'] = current_swmm
        else:
            if row['WASP'] == prev_wasp:
                row['WASP'] = current_wasp
            else:
                prev_wasp = row['WASP']
                current_wasp += 1
                row['WASP'] = current_wasp

            if row['SWMM'] == prev_swmm:
                row['SWMM'] = current_swmm
            else:
                prev_swmm = row['SWMM']
                current_swmm += 1
                row['SWMM'] = current_swmm

    # check that there is only one segment with and entry with type 'END' and no segment name. This indicates 
    # the end of the system. A tributary end would have type end, but the name for that entry would be the 
    # conduit it links into.
    if len([row for row in segtable if not row['Name'] and row['Type'] == 'END']) != 1:
        raise Exception('There are multiple terminating segments in the segment maps.')

    # check that all types are are either CONDUIT, INFLOW, or END
    if any(row['Type'] not in ('CONDUIT', 'INFLOW', 'END') for row in segtable):
        raise Exception('Unknown Type found in seg map file')

    # extract trib connection conduit names and check that they exist somewhere in the system
    trib_connection_names = [row['Name'] for row in segtable if row['Type'] == 'END' and row['Name']]
    conduit_names = [row['Name'] for row in segtable if row['Type'] == 'CONDUIT']
    if not all([name in conduit_names for name in trib_connection_names]):
        raise Exception('There are END rows in your segment map(s) that refer do conduits not included in the map.')

    # check that there are no repeated conduits
    names = [row['Name'] for row in segtable if row['Type'] != 'END']
    if len(names) != len(set(names)):
        raise Exception('There are repeated conduits in one of the map files.')

    return segtable

def process(filter_mins,
            dummy_end, 
            inppath, 
            outpath, 
            segmap_paths, 
            rptpath=None, 
            binarypath=None,
            correct_vol=False, 
            window_size=400, 
            correction_threshold=0.0005, 
            abs_basevol_signal=5, 
            event_start=None, 
            event_end=None, 
            negative_threshold_pct=0.2,
            micro_window_pct=.25,
            round_num=5):
    """
        filter_mins : number of minutes for the optional moving average smoothing window
        rptpath : path to SWMM rpt file
        binarypath : path to SWMM binary output file
        inppath : path to SWMM .inp file
        outpath : path to directory to write output
        segmap_path : path to segmap file
        correct_vol : boolean value to determine whether or not flow should be corrected

        TODO : go into more detail about the correction parameters
        window_size : correction parameter
        correction_threshhold : correction parameter
        abs_basevol_signal : correction parameter
        negative_threshold_pct : correction paramater
        micro_window_pct : correction parameter

        event_start : event start datetime
        event_end : event end datetime
    """

    if not (binarypath or rptpath):
        raise Exception("Must supply either an SWMM .rpt path or a .out path.")

    # process the segmap files and return a validated list of dictionaries for each row in any segmap
    segtable = get_segtable(segmap_paths)

    # generate the name for the database from the filename of the rpt
    db_name_path = binarypath if binarypath else rptpath
    dbname = os.path.splitext(os.path.basename(db_name_path))[0] + '.db'
    dbpath = os.path.join(os.path.dirname(outpath), dbname)
    if os.path.exists(dbpath):
        os.unlink(dbpath)

    with closing(sqlite3.connect(dbpath)) as conn:
        #conn.isolation_level = None
        cur = conn.cursor()
        tune_db(cur)

        # create table to store seg map data
        cur.execute("""
            CREATE TABLE segs (
                wasp int,           -- wasp segment number
                DS_wasp int,        -- wasp segment number of the DS segment (initially NULL)
                swmm int,           -- swmm segment number 
                DS_swmm int,        -- swmm segment number of the DS segment (initiall NULL)
                type varchar(8),    -- element type, either 'CONDUIT', 'INFLOW', or 'END'
                name varchar(64)    -- name as it appears in the .rpt file
            )""")

        for row in segtable:
            # read the map file into segs
            cur.execute("""
                INSERT INTO segs (wasp, swmm, type, name) 
                VALUES (%(WASP)s, %(SWMM)s, '%(Type)s', '%(Name)s')
                """ % row)
        
        # get list of unique swmm seg nums
        cur.execute('SELECT DISTINCT swmm, wasp FROM segs')
        segs = cur.fetchall()

        cur.execute('SELECT MAX(wasp) FROM segs')
        last_wasp_seg = cur.fetchone()[0]
        cur.execute('SELECT MAX(swmm) FROM segs')
        last_swmm_seg = cur.fetchone()[0]

        # this loop cycles through the swmm segment numbers and updates DS_wasp and DS_swmm columns in the segs table. 
        # It also creates a dummy segment for the final segment
        for swmm, wasp in segs:
            # check if the current swmm segment includes an 'END' row
            cur.execute("SELECT name FROM segs WHERE swmm = ? AND type = 'END'", (swmm,)) 
            end_row = cur.fetchone()
            if end_row: 
                end_name = end_row[0]
                if end_name:
                    # if the end row includes the name of a conduit (meaning this segment is the end of a tributary, 
                    # not the end of the network), then
                    # find the wasp and swmm segment numbers for the conduit and assign them to the DS_wasp and 
                    # DS_swmm columns for the conduits in this segment
                    cur.execute("SELECT wasp, swmm FROM segs WHERE name = ?", (end_name,))
                    DS_wasp, DS_swmm = cur.fetchone()
                    cur.execute("UPDATE segs SET DS_wasp = ?, DS_swmm = ? WHERE type = 'CONDUIT' AND swmm = ?", 
                        (DS_wasp, DS_swmm, swmm))
                else:
                    if dummy_end:
                        # create dummy segment
                        dummy_wasp = last_wasp_seg + 1
                        dummy_swmm = last_swmm_seg + 1
                        cur.execute("""
                            INSERT INTO segs (wasp, swmm, DS_wasp, DS_swmm, type, name)
                            SELECT ?, ?, 0, 0, 'DUMMY', name 
                                FROM segs WHERE swmm = ? AND type = 'CONDUIT' LIMIT 1
                            """, (dummy_wasp, dummy_swmm, swmm))

                        cur.execute("UPDATE segs SET DS_wasp = ?, DS_swmm = ? WHERE type = 'CONDUIT' AND swmm = ?", 
                            (dummy_wasp, dummy_swmm, swmm))
                    else:
                        cur.execute("UPDATE segs SET DS_wasp = ?, DS_swmm = ? WHERE type = 'CONDUIT' AND swmm = ?", 
                            (0, 0, swmm))
            else:
                cur.execute("UPDATE segs SET DS_wasp = ?, DS_swmm = ? WHERE type = 'CONDUIT' AND swmm = ?", 
                    (wasp + 1, swmm + 1, swmm))

        # for rows of type 'INFLOW', the DS_wasp and DS_swmm are the same as the swmm and wasp
        cur.execute("UPDATE segs SET DS_wasp = wasp, DS_swmm = swmm WHERE type = 'INFLOW'")
        cur.execute("DELETE FROM segs WHERE type = 'END'")

        series_path = os.path.join(os.path.dirname(outpath), 'segment_series_out')
        # if necessary, create a directory to hold all of the output series
        if not os.path.isdir(series_path):
            os.mkdir(series_path)

        with open(os.path.join(series_path, 'segment_map_out.csv'), 'w') as f:
            fieldnames = ['wasp', 'ds_wasp', 'swmm', 'ds_swmm', 'type', 'name']
            writer = csv.DictWriter(f, fieldnames = fieldnames, lineterminator='\n')
            writer.writeheader()
            cur.execute('SELECT * FROM segs ORDER BY wasp, swmm, type, name')
            for row in cur.fetchall():
                row = dict(zip(fieldnames, row))
                writer.writerow(row)

        # create a table to store the length and dwf for conduits
        cur.execute('CREATE TABLE conduit_params (name varchar(64), length float, inlet varchar(64), dwf float DEFAULT 0)')

        # the following block loops through the .inp and updates conduit_params with the lengths and DWF's for each conduit
        meters_in_foot = 0.3048
        first_step = 0
        last_step = 0
        
        print 'reading .inp'
        with open(inppath, 'r') as f:
            # churn through the .inp lines until encountering the [OPTIONS] marker
            line = f.readline().strip()
            while not re.match(re.compile(r'\[opt', re.IGNORECASE), line.strip()):
                line = f.readline()
                if not line:
                    raise Exception("Can't find [OPTIONS] section in .inp file.")

            line = f.readline()
            if not line:
                raise Exception("Can't find REPORT_STEP in the [OPTIONS] section of the .inp")

            #while not re.match(re.compile(r'report_step', re.IGNORECASE), line.strip()):
            timestep_secs = None
            start_date = None
            start_time = None
            end_date = None
            end_time = None
            while True:
                if re.match(re.compile(r'report_start_date', re.IGNORECASE), line.strip()):
                    _, start_date = line.split()
                elif re.match(re.compile(r'report_start_time', re.IGNORECASE), line.strip()):
                    _, start_time = line.split()
                elif re.match(re.compile(r'end_date', re.IGNORECASE), line.strip()):
                    _, end_date = line.split()
                elif re.match(re.compile(r'end_time', re.IGNORECASE), line.strip()):
                    _, end_time = line.split()
                elif re.match(re.compile(r'report_step', re.IGNORECASE), line.strip()):
                    _, ts_str = line.split()
                    hours, mins, secs = ts_str.split(':')
                    timestep_secs = float(hours) * 120 + float(mins) * 60 + float(secs)
                     
                if (not line) or re.match(r'\[', line.strip()):
                    if not timestep_secs:
                        raise Exception(".inp [OPTIONS] missing REPORT_STEP")
                    elif not start_time:
                        raise Exception(".inp [OPTIONS] missing REPORT_START_TIME")
                    elif not start_date:
                        raise Exception(".inp [OPTIONS] missing REPORT_START_DATE")
                    elif not end_date:
                        raise Exception(".inp [OPTIONS] missing REPORT_END_DATE")
                    elif not end_time:
                        raise Exception(".inp [OPTIONS] missing REPORT_END_TIME")
                    else:
                        try:
                            report_start_dtime = datetime.strptime(' '.join([start_date, start_time]), '%m/%d/%Y %H:%M:%S')
                            report_end_dtime = datetime.strptime(' '.join([end_date, end_time]), '%m/%d/%Y %H:%M:%S')
                        except:
                            raise Exception('Unexpected datetime format encountered in .inp file for report dates.')
                        else:
                            break
                else:
                    line = f.readline()

            if not event_start:
                event_start = report_start_dtime

            first_step = int(floor((event_start - report_start_dtime).total_seconds() / timestep_secs))

            if not event_end:
                event_end = report_end_dtime
            
            event_step_count = int((event_end - event_start).total_seconds() / timestep_secs)

            last_step = first_step + event_step_count

            # get a list of all the conduit names from segs
            cur.execute("SELECT name FROM segs WHERE type = 'CONDUIT'")
            conduit_names = [row[0] for row in cur.fetchall()]
            
            # churn through the .inp lines until encountering the [CONDUITS] marker
            line = f.readline()
            if not line:
                raise Exception("Can't find [CONDUITS] section in .inp file")

            while not re.match(re.compile(r'\[cond', re.IGNORECASE), line.strip()):
                line = f.readline()
                if not line:
                    raise Exception("Can't find [CONDUITS] section in .inp file.")

            # loop through the lines in [CONDUITS] and for each conduit with its name in the list, 
            # update conduit params with that conduits 
            # length and inlet node.
            line = f.readline()
            if not line:
                raise Exception("There are no conduits listed in the [CONDUIT] section of the .inp")
            line = line.strip()
            while not re.match(r'\[', line):
                if not (re.match('^$', line) or re.match(r';', line)):
                    name, inlet, _, length, _ = line.split(None, 4)
                    if name in conduit_names:
                        cur.execute('INSERT INTO conduit_params (name, length, inlet) VALUES (?, ?, ?)', 
                            (name, float(length) * meters_in_foot, inlet))
                line = f.readline().strip()

            # churn through the lines until encountering the [DWF] marker
            line = f.readline()
            if not line:
                raise Exception("Can't find [DWF] section in the .inp")
            line = line.strip()
            while not re.match(re.compile(r'\[dwf', re.IGNORECASE), line):
                line = f.readline().strip()
            

            # get a list of the inlet node names from conduit_params
            cur.execute('SELECT inlet FROM conduit_params')
            inlet_names = [row[0] for row in cur.fetchall()]

            # cycle through the [DWF] section and for each node that is in our inlet list, update 
            # conduit_params with its DWF value
            line = f.readline().strip()
            while not re.match(r'\[', line):
                if not (re.match('^$', line) or re.match(r';', line)):
                    name, param, avg = line.split()
                    if name in inlet_names and param == 'FLOW':
                        cur.execute('UPDATE conduit_params SET dwf = ? WHERE inlet = ?', 
                            (round(float(avg) * pow(meters_in_foot, 3), round_num), name))
                line = f.readline().strip()


        print 'done reading .inp'
        cur.execute("CREATE TABLE conduit_params2 (name varchar(64), swmm int, length float, dwf float DEFAULT 0)")

        cur.execute("""
            INSERT INTO conduit_params2 (name, swmm, length, dwf)
                SELECT cp.name, segs.swmm, cp.length, cp.dwf
                FROM conduit_params AS cp INNER JOIN segs ON cp.name = segs.name
        """)

        # create the first flows table. 
        cur.execute("""
            CREATE TABLE flows (
                timestep int, 
                wasp_source int,    -- wasp seg number indicating which segment the flow is coming from
                wasp_sink int,      -- wasp seg number indicating which segment the flow is going to
                swmm_source int,
                swmm_sink int, 
                name varchar(64), 
                flow float
            )""")

        # create the first volumes table
        cur.execute("""
            CREATE TABLE volumes (
                timestep int, 
                wasp int, 
                swmm int, 
                name varchar(64),
                init_vol float, 
                flow float, 
                depth float, 
                velocity float
            )""")

        # select one conduit to represent each swmm segment. These conduits will determine the base flow 
        # for the their respective swmm segments.
        cur.execute("SELECT MAX(name) FROM segs WHERE type = 'CONDUIT' GROUP BY swmm")
        rep_conduits = [row[0] for row in cur.fetchall()]

        cur.execute("""
            CREATE TABLE subcatch_bin (
                timestep int,
                catch varchar(64),
                flow float
            )
        """)

        cur.execute("""
            CREATE TABLE link_bin (
                timestep int,
                link varchar(64),
                flow float
            )
        """)

        #if subcatch_bin_path:
        #    with open(subcatch_bin_path, 'r') as f:
        #        line = f.readline()
        #        while not line.startswith('Time Stamp'):
        #            line = f.readline()

        #        if line:
        #            header = [name.strip(' \n') for name in line.split(',')]
        #            reader = csv.DictReader(f, fieldnames=header)
        #            first_row = reader.next()
        #            second_row = reader.next()
        #            d2 = datetime.datetime.strptime(second_row['Time Stamp'], '%m/%d/%Y %H:%M')
        #            d1 = datetime.datetime.strptime(first_row['Time Stamp'], '%m/%d/%Y %H:%M')
        #            ts_secs = (d2 - d1).toal_seconds()
        #            if ts_secs != timestep_secs:
        #                raise Exception("Timestep in processed binary output does not match the timestep in the .rpt")
        #            
        #            bin_out_first_step = int(floor((event_start - d1).total_seconds() / ts_secs))

        #            for i, row in enumerate(reader):
        #                if i < bin_out_first_step:
        #                    continue
        #                else:
        #                    del row['Time Stamp']
        #                    insert_rows = zip(itertools.cycle(i - bin_out_first_step), row.keys(), row.values())
        #                    insert_rows = [dict(zip(('timestep', 'link', 'flow'), row)) for row in insert_rows]

        #                    cur.executemany("""
        #                        INSERT INTO subcatch_bin (timestep, link, flow)
        #                        VALUES (:timestep, :link, :flow)
        #                    """, insert_rows)

        if binarypath:
            cur.execute("SELECT name FROM segs")
            names = [row[0] for row in cur.fetchall()]
            conn.commit()
            link_vars = ['flow_rate', 'flow_depth', 'flow_velocity']
            catch_vars = ['runoff_rate']
            links_result = swmmout2sqlite(binarypath, dbpath, 'links', names=names, 
                    variables=link_vars, start=event_start, end=event_end,
                    ignore_missing_names=True)
            catches_result = swmmout2sqlite(binarypath, dbpath, 'subcatchments', names=names, 
                    variables=catch_vars, start=event_start, end=event_end, ignore_missing_names=True)
            
            link_fields = ['timestep', 'name'] + link_vars
            catch_fields = ['timestep', 'name'] + catch_vars

            cur.execute("SELECT DISTINCT name FROM links")
            link_names = [row[0] for row in cur.fetchall()]
            
            cur.execute("SELECT DISTINCT name FROM subcatchments")
            catch_names = [row[0] for row in cur.fetchall()]

            cur.execute("SELECT name FROM segs WHERE type = 'CONDUIT'")
            seg_conduits = [row[0] for row in cur.fetchall()]
            if [name for name in seg_conduits if name not in link_names]:
                raise Exception("There are conduits in the map file not found in the binary file.")

            cur.execute("SELECT name FROM segs WHERE type = 'INFLOW'")
            seg_inflows = [row[0] for row in cur.fetchall()]
            if [name for name in seg_inflows if name not in link_names + catch_names]:
                raise Exception("There are inflows in the map file not found in the binary file.")

            for name in names:
                cur.execute("SELECT wasp, swmm, DS_wasp, DS_swmm, type, name FROM segs WHERE name = ?", (name,))
                elements = cur.fetchall()

                if name in link_names:
                    cur.execute("SELECT * FROM links WHERE name = ? ORDER BY timestep", (name,))
                    fields = link_fields
                    flow_field = 'flow_rate'
                else:
                    cur.execute("SELECT * FROM subcatchments WHERE name = ? ORDER BY timestep", (name,))
                    fields = catch_fields
                    flow_field = 'runoff_rate'
                    
                data = [dict(zip(fields, row)) for row in cur.fetchall()]

                flows = []
                velocities = []
                depths = []

                orig_velocities = []
                orig_depths = []

                if filter_mins:
                    filter_steps = round((filter_mins * 60) / timestep_secs)

                flow_window = []
                velocity_window = []
                depth_window = []

                for i, row in enumerate(data):
                    flow = round(float(row[flow_field]) * pow(meters_in_foot, 3), round_num)
                    depth = round(float(row.get('flow_depth', 0)) * meters_in_foot, round_num)
                    velocity = round(float(row.get('flow_velocity', 0)) * meters_in_foot, round_num)

                    orig_depths.append(float(row.get('flow_depth', 0)))
                    orig_velocities.append(float(row.get('flow_velocity', 0)))

                    if filter_mins:
                        flow_window.append(flow)
                        velocity_window.append(velocity)
                        depth_window.append(depth)

                        if len(flow_window) == filter_steps:
                            flows.append(sum(flow_window) / filter_steps)
                            del flow_window[0]
                            velocities.append(sum(velocity_window) / filter_steps)
                            del velocity_window[0]
                            depths.append(summ(depth_wifow) / filter_steps)
                            del depth_window[0]
                    else:
                        flows.append(flow)
                        velocities.append(velocity)
                        depths.append(depth)

                for element in elements:
                    wasp, swmm, DS_wasp, DS_swmm, eltype, name  = element

                    wasp_source, swmm_source = (0,0) if eltype =='INFLOW' else (wasp, swmm)

                    cur.executemany("""
                    INSERT INTO flows (timestep, wasp_source, swmm_source, name, wasp_sink, swmm_sink, flow)
                    VALUES (?,?,?, ?,?,?, ?)""",
                    [(i, wasp_source, swmm_source, name, DS_wasp, DS_swmm, flow) for i, flow in enumerate(flows)])

                    if eltype in ('CONDUIT', 'DUMMY'):
                        if eltype == 'CONDUIT' and name in rep_conduits:
                            cur.execute('SELECT dwf FROM conduit_params WHERE name = ?', (name,))
                            dwf = cur.fetchone()[0]

                            cur.executemany("""
                                INSERT INTO flows (timestep, wasp_source, swmm_source, name, wasp_sink, swmm_sink, flow)
                                VALUES (?,?,?, ?,?,?, ?)
                            """, [(i, 0, 0, name + '_DWF', wasp, swmm, dwf) for i in xrange(len(flows))])

                        cur.execute("SELECT length FROM conduit_params WHERE name = ?", (name,))
                        length = cur.fetchone()[0]
                        init_vol = (flows[0] / velocities[0]) * length if velocities[0] else 0 

                        cur.executemany("""
                            INSERT INTO volumes (timestep, wasp, swmm, name, init_vol, flow, depth, velocity)
                            VALUES (?,?,?, ?,?,?, ?,?)
                        """, [(i, wasp, swmm, name, init_vol, tup[0], tup[1], tup[2])
                              for i, tup in enumerate(zip(flows, depths, velocities))])

        elif rptpath:
            with open(rptpath, 'r') as f:
                # loop through the .rpt file. Each time a report series is encountered, check if the 
                # name is in the segs table, and if so insert the appropriate values into the flows and volumes tables
                line = f.readline()
                while line:
                    while line and not re.match('<<<', line.strip()):
                        line = f.readline()

                    if line:
                        _, kind, name, _ = line.strip().split() # assumes line is of the format '<<< Subcatchment CATCH01 >>>'
                        # check if there are any entries for this name in segs ...
                        cur.execute('SELECT wasp, swmm, DS_wasp, DS_swmm, type, name FROM segs WHERE name = ?', (name,))
                        elements = cur.fetchall()
                        if not (elements and kind in ('Link', 'Subcatchment')): 
                            # ... if this element isn't in segs, keep reading
                            line = f.readline()
                        else:
                            header_nline = 4
                            for i in range(header_nline): # eat header
                                _ = f.readline() 

                            if kind == 'Link':
                                fieldnames = ['date', 'time', 'flow', 'velocity', 'depth', 'percent'] 
                            else:
                                fieldnames = ['date', 'time', 'percip', 'losses', 'flow']

                            flows = []
                            velocities = []
                            depths = []

                            orig_velocities = []
                            orig_depths = []

                            if filter_mins:
                                filter_steps = round((filter_mins * 60) / timestep_secs)
                            flow_window = []
                            velocity_window = []
                            depth_window = []

                            line = f.readline()
                            # loop through each row in the series and update the flows, velocities, and depths 
                            # lists, filtering if told to do so.
                            counter = 1

                            while line and not re.match('^$', line.strip()):
                                if counter < first_step:
                                    counter += 1
                                    line = f.readline()
                                    continue
                                elif counter >  last_step:
                                    break
                                else:
                                    record = dict(zip(fieldnames, line.strip().split()))

                                    flow = round(float(record['flow']) * pow(meters_in_foot, 3), round_num)
                                    depth = round(float(record.get('depth', 0)) * meters_in_foot, round_num)
                                    velocity = round(float(record.get('velocity', 0)) * meters_in_foot, round_num)

                                    orig_velocities.append(float(record.get('velocity', 0)))
                                    orig_depths.append(float(record.get('depth', 0)))

                                    if filter_mins:
                                        flow_window.append(flow)
                                        velocity_window.append(velocity)
                                        depth_window.append(depth)

                                        if len(flow_window) == filter_steps:
                                            flows.append(sum(flow_window) / filter_steps)
                                            del flow_window[0]
                                            velocities.append(sum(velocity_window) / filter_steps)
                                            del velocity_window[0]
                                            depths.append(sum(depth_window) / filter_steps)
                                            del depth_window[0]
                                    else:
                                        flows.append(flow)
                                        velocities.append(velocity)
                                        depths.append(depth)

                                counter += 1 
                                line = f.readline()
        
                            for element in elements:
                                wasp, swmm, DS_wasp, DS_swmm, eltype, name  = element
                                wasp_source = 0 if eltype == 'INFLOW' else wasp
                                swmm_source = 0 if eltype == 'INFLOW' else swmm

                                cur.executemany("""
                                 INSERT INTO flows (timestep, wasp_source, swmm_source, name, wasp_sink, swmm_sink, flow) 
                                 VALUES (?, ?, ?, ?, ?, ?, ?)""",
                                 [(i, wasp_source, swmm_source, name, DS_wasp, DS_swmm, flow) for i, flow in enumerate(flows)])

                                if eltype in ('CONDUIT', 'DUMMY'):
                                    if eltype == 'CONDUIT' and name in rep_conduits:
                                        cur.execute('SELECT dwf FROM conduit_params WHERE name = ?', (name,))
                                        dwf = cur.fetchone()[0]

                                        cur.executemany("""
                                            INSERT INTO flows (timestep, wasp_source, swmm_source, name, 
                                                wasp_sink, swmm_sink, flow) 
                                            VALUES (?, ?, ?, ?, ?, ?, ?)
                                            """, [(i, 0, 0, name + '_DWF', wasp, swmm, dwf) for i in xrange(len(flows))])

                                    cur.execute('SELECT length FROM conduit_params WHERE name = ?', (name,))
                                    length = cur.fetchone()[0]
                                    init_vol = (flows[0] / velocities[0]) * length if velocities[0] else 0

                                    cur.executemany("""
                                        INSERT INTO volumes (timestep, wasp, swmm, name, init_vol, flow, depth, velocity)
                                        VALUES (?, ?, ?, ?, ?, ?, ?, ?)
                                        """, [(i, wasp, swmm, name, init_vol, tup[0], tup[1], tup[2]) 
                                               for i, tup in enumerate(zip(flows, depths, velocities))])
        else:
            pass
            #path_to_out_db = swmmout2sqlite(binarypath, dbpath=dbpath, el)
            
        cur.execute('CREATE INDEX idx_flows ON flows(timestep, name)')
        cur.execute('CREATE INDEX idx_volumes ON volumes(timestep)')
        conn.commit()

        # check that all conduits in segs are represented in the volumes table
        cur.execute("SELECT DISTINCT name FROM segs WHERE type = 'CONDUIT'")
        segs_conduit_names = [row[0] for row in cur.fetchall()]
        cur.execute("SELECT DISTINCT name FROM volumes")
        volumes_names = [row[0] for row in cur.fetchall()]
        missing_conduits = [name for name in segs_conduit_names if name not in volumes_names]
        if missing_conduits:
            raise Exception('There are conduits in the segment map not found in the .rpt file: ' + \
                    ', '.join(missing_conduits))

        # check that all conduits and their DWF's are represented in the flows table
        cur.execute("SELECT DISTINCT name FROM segs WHERE name <> ''")
        segs_all_names = [row[0] for row in cur.fetchall()]
        cur.execute("SELECT DISTINCT name FROM segs WHERE type = 'CONDUIT'")
        dwf_names = [cond + '_DWF' for cond in rep_conduits]
        cur.execute("SELECT DISTINCT name FROM flows")
        flows_names = [row[0] for row in cur.fetchall()]
        if set(segs_all_names + dwf_names) != set(flows_names):
            raise Exception('There are elements missing from the flows table.')

        # create or clear the directory for external flow percent series
        ext_flows_pct_path = os.path.join(series_path, 'external_flow_percentages_by_WASP_segment')
        if not os.path.isdir(ext_flows_pct_path):
            os.mkdir(ext_flows_pct_path)
        else:
            for fname in os.listdir(ext_flows_pct_path):
                os.unlink(os.path.join(ext_flows_pct_path, fname))

        # create or clear the directory for external flow series
        ext_flows_path = os.path.join(series_path, 'external_flows_by_WASP_segment')
        if not os.path.isdir(ext_flows_path):
            os.mkdir(ext_flows_path)
        else:
            for fname in os.listdir(ext_flows_path):
                os.unlink(os.path.join(ext_flows_path, fname))

        # get a list of the unique wasp segment numbers. we will over the list to get the external 
        # flow output for each wasp segment
        cur.execute('SELECT DISTINCT wasp_sink FROM flows WHERE wasp_sink > 0')
        wasp_segs = [row[0] for row in cur.fetchall()]

        # output the external flows and external flow percentages by wasp seg num
        for segnum in wasp_segs:
            # get a list of inflow names for this wasp segment
            cur.execute('SELECT DISTINCT name FROM flows WHERE wasp_sink = ? AND wasp_source = 0', (segnum, ))
            inflow_names = [row[0] for row in cur.fetchall()]

            if inflow_names: # if the segment has no inflows, do nothing, otherwise...

                # this query returns a table with columns indicating timestep, the name of the inflow, 
                # its flow at each timestep,
                # the total flow going into the wasp segment at each timestep, and the inflow's percentage 
                # of that total flow at each timestep
                cur.execute("""
                    SELECT f.timestep, f.name, f.flow, tf.total_flow, 
                        CASE WHEN tf.total_flow > 0 THEN f.flow / tf.total_flow ELSE 0 END
                    FROM flows f INNER JOIN (
                        SELECT timestep, SUM(flow) AS total_flow
                        FROM flows 
                        WHERE wasp_sink = ? AND wasp_source = 0 
                        GROUP BY timestep) as tf
                    ON f.timestep = tf.timestep
                    WHERE f.wasp_sink = ? AND f.wasp_source = 0 
                    ORDER BY f.name, f.timestep
                """, (segnum, segnum))

                results = cur.fetchall()
                
                pct_columns = []
                flow_columns = []
                total_col = []

                names = ['timestep']

                for key, group in groupby(results, lambda row: row[1]):
                    names.append(key)
                    group = list(group)
                    group = sorted(group, key = lambda row: row[0])
                    pct_flow = [row[4] for row in group]
                    pct_columns.append(pct_flow)
                    flow = [row[2] for row in group]
                    total_col = [row[3] for row in group]
                    flow_columns.append(flow)

                pct_columns.insert(0, range(0, len(pct_columns[0])))
                flow_columns.insert(0, range(0, len(flow_columns[0])))
                pct_rows = zip(*pct_columns) # transform list of columns into a list of rows
                flow_rows = zip(*flow_columns)
                named_pct_rows = [dict(zip(names, row)) for row in pct_rows]
                named_flow_rows = [dict(zip(names, row)) for row in flow_rows]

                with open(os.path.join(ext_flows_pct_path, str(segnum) + '.csv'), 'w') as f:
                    writer = csv.DictWriter(f, fieldnames = names, lineterminator='\n')
                    writer.writeheader()
                    for row in named_pct_rows:
                        writer.writerow(row)

                with open(os.path.join(ext_flows_path, str(segnum) + '_flow.csv'), 'w') as f:
                    writer = csv.DictWriter(f, fieldnames=names, lineterminator='\n')
                    writer.writeheader()
                    for row in named_flow_rows:
                        writer.writerow(row)

        ext_swmm_flows_pct_path = os.path.join(series_path, 'external_flow_percentages_by_SWMM_segment')
        if not os.path.isdir(ext_swmm_flows_pct_path):
            os.mkdir(ext_swmm_flows_pct_path)
        else:
            for fname in os.listdir(ext_swmm_flows_pct_path):
                os.unlink(os.path.join(ext_swmm_flows_pct_path, fname))

        # create or clear the directory for external flow series
        ext_swmm_flows_path = os.path.join(series_path, 'external_flows_by_SWMM_segment')
        if not os.path.isdir(ext_swmm_flows_path):
            os.mkdir(ext_swmm_flows_path)
        else:
            for fname in os.listdir(ext_swmm_flows_path):
                os.unlink(os.path.join(ext_swmm_flows_path, fname))

        # get a list of the unique wasp segment numbers. we will over the list to get the external 
        # flow output for each wasp segment
        cur.execute('SELECT DISTINCT swmm_sink FROM flows WHERE swmm_sink > 0')
        swmm_segs = [row[0] for row in cur.fetchall()]

        # output the external flows and external flow percentages by wasp seg num
        for segnum in swmm_segs:
            # get a list of inflow names for this wasp segment
            cur.execute('SELECT DISTINCT name FROM flows WHERE swmm_sink = ? AND swmm_source = 0', (segnum, ))
            inflow_names = [row[0] for row in cur.fetchall()]

            if inflow_names: # if the segment has no inflows, do nothing, otherwise...

                cur.execute("""
                    SELECT f.timestep, f.name, f.flow, tf.total_flow, 
                        CASE WHEN tf.total_flow > 0 THEN f.flow / tf.total_flow ELSE 0 END
                    FROM flows f INNER JOIN (
                        SELECT timestep, SUM(flow) AS total_flow
                        FROM flows 
                        WHERE swmm_sink = ? AND swmm_source = 0 
                        GROUP BY timestep) as tf
                    ON f.timestep = tf.timestep
                    WHERE f.swmm_sink = ? AND f.swmm_source = 0 
                    ORDER BY f.name, f.timestep
                """, (segnum, segnum))

                results = cur.fetchall()
                
                pct_columns = []
                flow_columns = []
                total_col = []

                names = ['timestep']

                for key, group in groupby(results, lambda row: row[1]):
                    names.append(key)
                    group = list(group)
                    group = sorted(group, key = lambda row: row[0])
                    pct_flow = [row[4] for row in group]
                    pct_columns.append(pct_flow)
                    flow = [row[2] for row in group]
                    total_col = [row[3] for row in group]
                    flow_columns.append(flow)

                pct_columns.insert(0, range(0, len(pct_columns[0])))
                flow_columns.insert(0, range(0, len(flow_columns[0])))
                pct_rows = zip(*pct_columns) # transform list of columns into a list of rows
                flow_rows = zip(*flow_columns)
                named_pct_rows = [dict(zip(names, row)) for row in pct_rows]
                named_flow_rows = [dict(zip(names, row)) for row in flow_rows]

                with open(os.path.join(ext_swmm_flows_pct_path, str(segnum) + '.csv'), 'w') as f:
                    writer = csv.DictWriter(f, fieldnames = names, lineterminator='\n')
                    writer.writeheader()
                    for row in named_pct_rows:
                        writer.writerow(row)

                with open(os.path.join(ext_swmm_flows_path, str(segnum) + '_flow.csv'), 'w') as f:
                    writer = csv.DictWriter(f, fieldnames=names, lineterminator='\n')
                    writer.writeheader()
                    for row in named_flow_rows:
                        writer.writerow(row)

        # volumes2 is the same as volumes but with three additional columns, delta_vol, flow_in, and vol
        cur.execute("""
            CREATE TABLE volumes2 (
                timestep int, 
                wasp int, 
                swmm int, 
                name varchar(64), 
                flow float,
                depth float, 
                velocity float
            )""")


        #depth_calc1 = "depth * (flow / total_flow)"
        #depth_calc2 = "depth * (ABS(flow)/total_flow)"
        #depth_calc3 = """
        #    CASE WHEN total_flow = 0
        #         THEN depth * (1/seg_count)
        #         ELSE depth * (ABS(flow) / total_flow)
        #    END
        #"""
        depth_calc4 = "depth * (1.0 / seg_count)"

        #velocity_calc1 = "velocity * (flow / total_flow)"
        #velocity_calc2 = "velocity * (ABS(flow)/ total_flow)"
        #velocity_calc3 = """
        #    CASE WHEN total_flow = 0
        #         THEN depth * (1/seg_count)
        #         ELSE depth * (ABS(flow) / total_flow)
        #    END
        #"""
        velocity_calc4 = "velocity * (1.0 / seg_count)"

        tflow_calc1 = "SUM(flow)"
        tflow_calc2 = "SUM(ABS(flow))"

        # flow weight the depth and velocity in volumes by swwm segment
        cur.execute(""" 
            INSERT INTO volumes2 (timestep, wasp, swmm, name, depth, velocity, flow)
                SELECT v.timestep, v.wasp, v.swmm, v.name,""" + depth_calc4 + """,""" + velocity_calc4 + """, flow
                FROM (
                    SELECT """ + tflow_calc2 + """ AS total_flow, timestep, swmm, COUNT(*) AS seg_count
                    FROM volumes
                    GROUP BY timestep, swmm) AS total_flows
                INNER JOIN volumes v ON v.timestep = total_flows.timestep AND v.swmm = total_flows.swmm
            """)
        cur.execute('CREATE INDEX idx_volumes2 ON volumes2(timestep, swmm)')

        cur.execute("""
            CREATE TABLE volumes2_a (
                timestep int,
                wasp int,
                swmm int,
                depth float,
                velocity float,
                flow float
            )
        """)

        # total the depth, velocity, and flow in volumes2 by swmmm segment 
        cur.execute("""
            INSERT INTO volumes2_a (timestep, wasp, swmm, depth, velocity, flow)
            SELECT timestep, wasp, swmm, SUM(depth), SUM(velocity), SUM(flow)
            FROM volumes2
            GROUP BY wasp, swmm, timestep
        """)
        cur.execute('CREATE INDEX idx_volumes2_a ON volumes2_a(timestep, swmm)')

        cur.execute("""
            CREATE TABLE volumes2_b (
                timestep int,
                swmm int,
                wasp int,
                depth float,
                velocity float,
                flow float,
                delta_vol float,
                vol float NULL
            )
        """)
        cur.execute('CREATE INDEX idx_volumes2_b ON volumes2_b(timestep, swmm)')

        cur.execute("""
            CREATE TABLE volumes3x (
                timestep int, 
                wasp int, 
                vol float, 
                wasp_depth float, 
                wasp_velocity float,
                flow float,
                seg_count int
            )""")
        cur.execute('CREATE INDEX idx_volumes3x ON volumes3x(timestep, wasp)')

        cur.execute("""
            CREATE TABLE volumes4x (
                timestep int, 
                wasp int, 
                vol float, 
                depth float, 
                velocity float
            )""")
        cur.execute('CREATE INDEX idx_volumes4x ON volumes4x(timestep, wasp)')

        def calc_swmm_vols():
            cur.execute("DELETE FROM volumes2_b")

            # calculate delta_vol by swmm segment
            cur.execute("""
                INSERT INTO volumes2_b (timestep, wasp, swmm, depth, velocity, flow, delta_vol)
                SELECT v.timestep, v.wasp, v.swmm, v.depth, v.velocity, v.flow, (inflows.flow_in - v.flow) * ?
                FROM (
                    SELECT SUM(flow) AS flow_in, timestep, swmm_sink
                    FROM flows
                    GROUP BY timestep, swmm_sink) AS inflows
                INNER JOIN volumes2_a v ON v.timestep = inflows.timestep AND v.swmm = inflows.swmm_sink
            """, (timestep_secs, ))

            #  the following loop cycles through each element and updates its respective part of the 
            # vol column in volumes2 with the calculated volume get a list of timesteps
            cur.execute('SELECT DISTINCT timestep FROM volumes2_b')
            timesteps = [row[0] for row in cur.fetchall()]
            cur.execute('SELECT DISTINCT swmm FROM volumes2_b')
            swmm_segs = [row[0] for row in cur.fetchall()]
            for swmm in swmm_segs:
                for t in timesteps:
                    if t == 0:
                        # for the first timestep, set the vol in volumes2 to the initial volume
                        cur.execute('SELECT flow, velocity FROM volumes2_b WHERE timestep = 0 AND swmm = ?', (swmm, ))
                        flow, velocity = cur.fetchone()

                        cur.execute("SELECT DISTINCT length FROM conduit_params2 WHERE swmm = ?", (swmm, ))
                        length = cur.fetchall()
                        assert len(length) == 1
                        length = length[0][0]

                        init_vol = (flow / velocity) * length

                        cur.execute("""
                            UPDATE volumes2_b
                            SET vol = ?
                            WHERE timestep = 0 AND swmm = ?
                        """, (init_vol, swmm))
                    else:
                        cur.execute('SELECT vol FROM volumes2_b WHERE timestep = ? AND swmm = ?', (t - 1, swmm))

                        prev_vol = cur.fetchall()
                        assert len(prev_vol) == 1
                        prev_vol = prev_vol[0][0]

                        cur.execute('SELECT delta_vol FROM volumes2_b WHERE timestep = ? AND swmm = ?', (t, swmm))
                        delta_vol = cur.fetchall()
                        assert len(delta_vol) == 1
                        delta_vol = delta_vol[0][0]

                        cur.execute('SELECT depth FROM volumes2_b WHERE timestep = ? AND swmm = ?', (t, swmm))
                        depth = cur.fetchall()
                        assert len(depth) == 1
                        depth = depth[0][0]

                        if depth == 0:
                            vol = 0
                        else:
                            vol = prev_vol + delta_vol

                        cur.execute('UPDATE volumes2_b SET vol = ? WHERE timestep = ? AND swmm = ?', (vol, t, swmm))

        calc_swmm_vols()

        if correct_vol:
            print('correcting')
            cur.execute("SELECT DISTINCT swmm FROM volumes2_b WHERE swmm IS NOT NULL")
            swmm_segs = [row[0] for row in cur.fetchall()]

            def export_test(*args):
                import os
                fpath = os.path.join(os.path.dirname(outpath), 'vol.csv')
                f = open(fpath, 'w')
                import csv
                writer = csv.writer(f, lineterminator='\n')
                writer.writerows(zip(*args))

            for swmm in swmm_segs:
                query_cols = ['vol', 'delta_vol']
                cur.execute("""
                    SELECT """ + ','.join(query_cols) + """
                    FROM volumes2_b
                    WHERE swmm = ? 
                    ORDER BY timestep
                """, (swmm, ))
                named_results = [dict(zip(query_cols, row)) for row in cur.fetchall()]
                abs_delta_vol = [abs(row['delta_vol']) for row in named_results]
                vol = [row['vol'] for row in named_results]

                correct_basevol = sum(vol[:window_size]) / window_size
                vol_range = max(vol) - correct_basevol

                segs_of_interest = [13]

                event_start_threshold = 0.3
                event_start_idx = 0

                for i, v in enumerate(vol):
                    if v > ((vol_range * event_start_threshold) + correct_basevol):
                        event_start_idx = i
                        break

                #if swmm in segs_of_interest:
                #    print('bacon')
                #    print event_start_idx

                correction = [0 for i in range(len(abs_delta_vol))]

                i = 0
                prev_i = i
                prev_vol = vol
                last_correction = event_start_idx
                    
                prev_vols = []
                prev_corrections = []
                baseflow_points = []
                correction_points = []
                correction_ends = []
                correction_baseflows = []
                correction_baseflow_diffs = []
                correction_totals = []

                def is_basevol(vol, lowerbound, upperbound):
                    return (max(vol[lowerbound:upperbound]) - min(vol[lowerbound:upperbound])) < abs_basevol_signal

                def is_off_basevol(basevol, correct_basevol):
                    return abs(basevol - correct_basevol) > correction_threshold * vol_range

                def get_basevol(vol, lowerbound, upperbound):
                    return sum(vol[lowerbound:upperbound]) / (upperbound - lowerbound)

                def is_negative(vol, correct_basevol, lowerbound, upperbound):
                    vol_window = vol[lowerbound:upperbound]
                    return (sum(v < correct_basevol for v in vol_window) / len(vol_window)) > negative_threshold_pct

                basevol_starts = [] 

                max_window_index = len(abs_delta_vol) - window_size
                micro_window_size = int(window_size * micro_window_pct)
                correction_window_size = micro_window_size
                while i < max_window_index:
                    needs_correction = False
                    if i > event_start_idx:
                        upperbound = i + window_size
                        basevol = get_basevol(vol, i, upperbound)
                        correction_start = i
                        if is_negative(vol, correct_basevol, i, upperbound):
                            needs_correction = True
                            j = i + micro_window_size
                            also_basevol = is_basevol(vol, i, upperbound)
                            while j < max_window_index:
                                if not also_basevol and not is_negative(vol, correct_basevol, j, j + micro_window_size):
                                    k = j + micro_window_size
                                    while k > j and not is_negative(vol, correct, j, k):
                                        k = k - 1
                                    while k > j and not is_basevol(vol, j, k):
                                        k = k - 1
                                    correction_total = correct_basevol - get_basevol(vol, j, k)
                                    i += micro_window_size
                                    break
                                elif also_basevol and not is_basevol(vol, j, j + micro_window_size):
                                    k = j + micro_window_size
                                    while k > j and not is_basevol(vol, j, k):
                                        k = k - 1
                                    correction_total = correct_basevol - get_basevol(vol, j, k)
                                    i += micro_window_size
                                    break
                                else:
                                    also_basevol = is_basevol(vol, j, j + micro_window_size)
                                j += 1
                            if j >= max_window_index:
                                basevol = get_basevol(vol, j, len(vol))
                                correction_total = correct_basevol - basevol
                                i += micro_window_size
                        elif is_basevol(vol, i, upperbound) and is_off_basevol(basevol, correct_basevol):
                            j = upperbound
                            basevol_starts.append(i)
                            while j < max_window_index:
                                if not is_basevol(vol, j, j + micro_window_size):
                                    k = j + micro_window_size
                                    while k > j and not is_basevol(vol, j, k):
                                        k = k - 1
                                    basevol = get_basevol(vol, j, k)
                                    needs_correction = is_off_basevol(basevol, correct_basevol)
                                    correction_total = correct_basevol - basevol
                                    i += micro_window_size
                                    break
                                j += 1

                            if j >= max_window_index:
                                basevol = get_basevol(vol, j, len(vol))
                                needs_correction = is_off_basevol(basevol, correct_basevol)
                                correction_total = correct_basevol - basevol
                                i += micro_window_size

                    if needs_correction:
                        avg_correction = correction_total / correction_window_size
                        correction_end = correction_start + correction_window_size
                        correction_ends.append(correction_end)
                        correction_spread = [avg_correction for j in range(correction_window_size)]
                        existing_correction = correction[correction_start:correction_end]
                        correction_vals = [sum(tup) for tup in zip(correction_spread, existing_correction)]
                        correction[correction_start:correction_end] = correction_vals 
                        prev_vols.append(copy(vol))
                        vol[correction_start:] = [v + correction_total for v in vol[correction_start:]]
                    else:
                        i += 1

                final_window_start = int(len(vol) - .5 * window_size)
                final_window_end = len(vol) - 1
                final_basevol = get_basevol(vol, final_window_start, final_window_end)
                if is_off_basevol(final_basevol, correct_basevol):
                    correction_start = final_window_start
                    correction_total = correct_basevol - final_basevol
                    avg_correction = correction_total / correction_window_size
                    correction_spread = [avg_correction for j in range(correction_window_size)]
                    correction_end = correction_start + correction_window_size
                    existing_correction = correction[correction_start:correction_end]
                    correction_vals = [sum(tup) for tup in zip(correction_spread, existing_correction)]
                    correction[correction_start:correction_end] = correction_vals

                if swmm in segs_of_interest:
                    prev_vols.append(vol)
                    #print 'done with seg'
                    #code.interact(local=locals())

                cur.execute("""
                    SELECT DISTINCT wasp
                    FROM segs 
                    WHERE swmm = ?
                """, (swmm, ))
                wasp = cur.fetchone()[0]

                balance_bucket = [c/timestep_secs for c in correction]
                update_tups = []
                update_tups = [[i,0,swmm,0,wasp,str(swmm)+'balbucket',qin] for i, qin in enumerate(balance_bucket)]
                cur.executemany("""
                    INSERT INTO flows (timestep, swmm_source, swmm_sink, wasp_source, wasp_sink, name, flow) 
                    VALUES (?, ?, ?, ?, ?, ?, ?)
                """, update_tups)

            calc_swmm_vols()

        # volumes3 is similar to volumes2, but now depth are replaced with new columns swmm_depth and swmm_velocity. 
        # These columns will hold the volume weighted depths and velocity by swmm segment. additionally, this 
        # table also has a swmm_vol col that will hold the total volume for each swmm segment
        # populate volumes3. swmm_vol is the sum of the volumes for each parallel conduit in the swmm segment. 
        # swmm_depth and swmm_velocity are the volume weighted depth and velocity for each parallel in the swmm segment

        #depth_calc1 = "(v2.flow / wasp_flows.wasp_flow) * v2.depth"
        #depth_calc2 = "(ABS(v2.flow)/ wasp_flows.wasp_flow) * v2.depth"
        #depth_calc3 = """
        #    CASE WHEN wasp_flows.wasp_flow = 0
        #        THEN (1 / wasp_flows.seg_count) * v2.depth
        #        ELSE (ABS(v2.flow) / wasp_flows.wasp_flow) * v2.depth
        #    END
        #"""
        depth_calc4 = "v2.depth * (1.0 / wasp_flows.seg_count)"

        #velocity_calc1 = "(v2.flow / wasp_flows.wasp_flow) * v2.velocity"
        #velocity_calc2 = "(ABS(v2.flow) / wasp_flows.wasp_flow) * v2.velocity"
        #velocity_calc3 = """
        #    CASE WHEN wasp_flows.wasp_flow = 0
        #        THEN (1 / wasp_flows.seg_count) * v2.velocity
        #        ELSE (ABS(v2.flow) / wasp_flows.wasp_flow) * v2.depth
        #    END
        #"""
        velocity_calc4 = "velocity * (1.0 / wasp_flows.seg_count)"

        tflow_calc1 = "SUM(flow)"
        tflow_calc2 = "SUM(ABS(flow))"

        cur.execute("""
            INSERT INTO volumes3x  (timestep, wasp, vol, wasp_depth, wasp_velocity, flow, seg_count)
            SELECT v2.timestep, v2.wasp, v2.vol, """ + depth_calc4 + """, 
                """ + velocity_calc4 + """, v2.flow, wasp_flows.seg_count
            FROM (SELECT """ + tflow_calc2 + """ AS wasp_flow, timestep, wasp, COUNT(*) AS seg_count
                FROM volumes2_b
                GROUP BY wasp, timestep
            ) AS wasp_flows
            INNER JOIN volumes2_b v2 ON v2.timestep = wasp_flows.timestep AND v2.wasp = wasp_flows.wasp
        """)

        cur.execute("""
            INSERT INTO volumes4x (timestep, wasp, vol, depth, velocity)
            SELECT timestep, wasp, SUM(vol), SUM(wasp_depth), SUM(wasp_velocity)
            FROM volumes3x
            GROUP BY wasp,timestep
        """)

        conn.commit()

        # TODO this is junk
        #cur.execute("SELECT DISTINCT wasp FROM volumes4x WHERE wasp > 0")
        #wasp_sinks = cur.fetchall()
        #cur.execute("SELECT MAX(timestep) FROM final")
        #max_ts = cur.fetchone()

        #for wasp in wasp_sinks:
        #    cur.execute("SELECT volume FROM final WHERE wasp = ? AND timestep >= 15", (wasp, ))
        #    lower_vols = [row[1] for row in cur.fetchall()]
        #    max_freq = max(map(lower_vols.count, lower_vols))
        #    lower_mode = set(vol for vol in lower_vols if lower_vols.count(vol) == max_freq)


        #    cur.execute("SELECT volume FROM final WHERE wasp = ? AND timestep <= ? - 15", (wasp, max_ts))
        #    upper_vols = [row[1] for row in cur.fetchall()]
        #    max_freq = max(map(upper_vols.count, upper_vols))
        #    upper_mode = set(vol for vol in upper_vols if upper_vols.count(vol) == max_freq)


#        # volumes 4 aggregates the volume weighted depths and velocities from volumes3 by swmm segment.
#        cur.execute("CREATE TABLE volumes4 (timestep int, wasp int, swmm int, vol float, depth float, velocity float)")
#        cur.execute("""
#            INSERT INTO volumes4 (timwaspp, wasp, swmm, vol, depth, velocity)
#            SELECT timestep, wasp, swmm, vol, depth, velocity
#            FROM (SELECT timestep, wasp, swmm, SUM(vol) AS vol, SUM(swmm_depth) AS depth, SUM(swmm_velocity) AS velocity
#                FROM volumes3
#                GROUP BY wasp, swmm, timestep)
#            """)
#        cur.execute("CREATE INDEX idx_volumes4 ON volumes4(timestep, wasp)")
#
#   waspvolumes5 weights the swmm depths and velocities by their respective portions of their respective total wasp volumes
#        cur.execute("CREATE TABLE volumes5 (timestep int, wasp int, vol float, wasp_depth float, wasp_velocity float)")
#        cur.execute("""
#            INSERT INTO volumes5 (timestep, wasp, vol, wasp_depth, wasp_velocity)
#            SELECT v4.timestep, v4.wasp, v4.vol, (v4.vol / wasp_vols.wasp_vol) * v4.depth, 
#                (v4.vol / wasp_vols.wasp_vol) * v4.velocity
#            FROM (SElECT timestep, wasp, SUM(vol) AS wasp_vol
#                FROM volumes4
#                GROUP BY wasp, timestep
#            ) AS wasp_vols
#            INNER JOIN volumes4 v4 ON v4.timestep = wasp_vols.timestep AND v4.wasp = wasp_vols.wasp
#            """)
#
#        # volumes6 just sums up the depths, velocities, and volumes from the previous table
#        cur.execute("CREATE TABLE volumes6 (timestep int, wasp int, vol float, depth float, velocity float)")
#        cur.execute("""
#            INSERT INTO volumes6 (timestep, wasp, vol, depth, velocity)
#                SELECT timestep, wasp, SUM(vol) AS vol, SUM(wasp_depth) AS depth, SUM(wasp_velocity) AS velocity
#                FROM volumes5
#                GROUP BY wasp, timestep
#            """)

        # flows2 splits the flow data into two categories, aggregates them each separately, then unions them back together.
        # the first group are the external flows (wasp_source = 0). These flows are sumed by their destination (wasp_sink) 
        # and their timesteps. The second group are the internal flows (wasp_source <> 0). This group is filtered 
        # so that the swmm segments remaining are those that terminate a wasp segment (by the condition that their 
        # swmm segment number must be the maximum swmm segment number for a wasp segment). 
        # After filtering the set, the remaining segment flows are sumed by their wasp_source, wasp_sink, and timestep.

    finalize(dbpath, outpath, timestep_secs, round_num=round_num)


def finalize(dbpath, outpath, timestep_secs, round_num=5):

    series_path = os.path.join(os.path.dirname(outpath), 'segment_series_out')
    if not os.path.isdir(series_path):
        os.mkdir(series_path)

    with closing(sqlite3.connect(dbpath)) as conn:
        conn.isolation_level = None
        cur = conn.cursor()
        tune_db(cur)

        cur.execute("CREATE TABLE flows2 (timestep int, wasp_source int, wasp_sink int, flow float)")
        cur.execute("""
            INSERT INTO flows2 (timestep, wasp_source, wasp_sink, flow)
                SELECT * 
                FROM (SELECT timestep, wasp_source, wasp_sink, SUM(flow) AS flow
                      FROM flows
                      WHERE wasp_source = 0
                      GROUP BY wasp_sink, timestep
                    UNION SELECT timestep, wasp_source, wasp_sink, SUM(flow)
                        FROM flows
                        WHERE wasp_source <> 0 AND swmm_source IN (SELECT MAX(swmm) FROM segs GROUP BY wasp)
                        GROUP BY wasp_source, wasp_sink, timestep
                    )
             """)

        # column names for the final table. The datatype indicates what kind of data the row is, 
        # volume/depth/velocity data (1) or flow data (2) ordering1 and 2 contain the values that will 
        # determine how the rows are finally ordered to produce the order found in .hyd files.
        final_colnames = ['datatype', 'timestep', 'wasp_source', 'wasp_sink', 'flow', 'vol', 'depth', 
            'velocity', 'ordering1', 'ordering2']
        final_datatypes = ['int', 'int', 'int', 'int', 'float', 'float', 'float', 'float', 'int', 'int', 'int']

        # put them into a comma separated list that can be used in a query string
        final_cols = ', '.join([colname + ' ' + datatype for colname, datatype in zip(final_colnames, final_datatypes)])
        final_colnames_str = ', '.join(final_colnames)
        
        # create the final tables
        cur.execute("CREATE TABLE flows_final (" + final_cols + ")")
        cur.execute("CREATE TABLE volumes_final (" + final_cols + ")")
        cur.execute("CREATE TABLE final (" + final_cols + ")")

        # populate the flows_final table. note that the columns vol, depth, and velocity 
        # are NULL. These are included so that this table can be unioned with the volumes_final table shortly. 
        # flow data is ordered first by its wasp_source number, unless it is zero, in which case its wasp_sink number 
        # is used, and second by its wasp_source, regardless of whether its zero or not. This forces the order that 
        # each segment's outflow interface will follow the external flow interface for that segment.
        cur.execute("""
            INSERT INTO flows_final (""" + final_colnames_str + """)
                SELECT 2, timestep, wasp_source, wasp_sink, flow, NULL AS vol, NULL AS depth, NULL AS velocity, 
                CASE WHEN wasp_source = 0 THEN wasp_sink ELSE wasp_source END AS ordering1, wasp_source AS ordering2
                FROM flows2
            """)

        # the following for loops output the consolidated external flow series and the internal flow series.
        # the code between this comment the for loop retrieves the list of wasp seg nums to loop over and 
        # creates/clears the required directories
        consol_ext_flows_outpath = os.path.join(series_path, 'consolidated_external_flows_by_WASP_segment')
        if not os.path.isdir(consol_ext_flows_outpath):
            os.mkdir(consol_ext_flows_outpath)
        else:
            for fname in os.listdir(consol_ext_flows_outpath):
                os.unlink(os.path.join(consol_ext_flows_outpath, fname)) # delete the files in consol_ext_flows_outpath

        cur.execute('SELECT DISTINCT wasp_sink FROM flows_final WHERE wasp_sink <> 0')
        wasp_sinks = [row[0] for row in cur.fetchall()]
        for sink in wasp_sinks:
            cur.execute('SELECT timestep, flow FROM flows_final WHERE wasp_sink = ? AND wasp_source = 0 ORDER BY timestep ', 
                (sink,))
            external_flows = cur.fetchall()

            with open(os.path.join(consol_ext_flows_outpath, str(sink) + '.csv'), 'w') as f:
                fieldnames = ['timestep', 'consolidated_external_flows']
                writer = csv.DictWriter(f, fieldnames = fieldnames, lineterminator='\n')
                writer.writeheader()
                for row in external_flows:
                    row = dict(zip(fieldnames, row))
                    writer.writerow(row)

        internal_flows_outpath = os.path.join(series_path, 'internal_flows_by_WASP_segment')
        if not os.path.isdir(internal_flows_outpath):
            os.mkdir(internal_flows_outpath)
        else:
            for fname in os.listdir(internal_flows_outpath):
                os.unlink(os.path.join(internal_flows_outpath, fname))

        cur.execute('SELECT DISTINCT wasp_source FROM flows_final WHERE wasp_source <> 0')
        wasp_sources = [row[0] for row in cur.fetchall()]
        for source in wasp_sources:
            cur.execute("""
                SELECT timestep, flow, wasp_sink
                FROM flows_final  
                WHERE wasp_source = ?
                ORDER BY wasp_source, timestep
                """, (source, ))

            internal_flows = cur.fetchall()

            with open(os.path.join(internal_flows_outpath, str(source) + '.csv'), 'w') as f:
                fieldnames = ['timestep', 'flow', 'sink']
                writer = csv.DictWriter(f, fieldnames = fieldnames, lineterminator='\n')
                writer.writeheader()
                for row in internal_flows:
                    row = dict(zip(fieldnames, row))
                    writer.writerow(row)

        internal_flows_outpath = os.path.join(series_path, 'internal_flows_by_SWMM_segment')
        if not os.path.isdir(internal_flows_outpath):
            os.mkdir(internal_flows_outpath)
        else:
            for fname in os.listdir(internal_flows_outpath):
                os.unlink(os.path.join(internal_flows_outpath, fname))

        cur.execute('SELECT DISTINCT swmm_source FROM flows WHERE swmm_source <> 0')
        swmm_sources = [row[0] for row in cur.fetchall()]
        for source in swmm_sources:
            cur.execute("""
                SELECT timestep, flow, swmm_sink
                FROM flows  
                WHERE swmm_source = ?
                ORDER BY swmm_source, timestep
                """, (source, ))

            internal_flows = cur.fetchall()

            with open(os.path.join(internal_flows_outpath, str(source) + '.csv'), 'w') as f:
                fieldnames = ['timestep', 'flow', 'sink']
                writer = csv.DictWriter(f, fieldnames = fieldnames, lineterminator='\n')
                writer.writeheader()
                for row in internal_flows:
                    row = dict(zip(fieldnames, row))
                    writer.writerow(row)

        internal_flows_outpath = os.path.join(series_path, 'velocities_by_SWMM_segment')
        if not os.path.isdir(internal_flows_outpath):
            os.mkdir(internal_flows_outpath)
        else:
            for fname in os.listdir(internal_flows_outpath):
                os.unlink(os.path.join(internal_flows_outpath, fname))

        cur.execute('SELECT DISTINCT swmm FROM volumes WHERE swmm <> 0')
        swmm_sources = [row[0] for row in cur.fetchall()]
        for source in swmm_sources:
            cur.execute("""
                SELECT timestep, velocity, swmm
                FROM volumes
                WHERE swmm = ?
                ORDER BY swmm, timestep
                """, (source, ))

            internal_flows = cur.fetchall()

            with open(os.path.join(internal_flows_outpath, str(source) + '.csv'), 'w') as f:
                fieldnames = ['timestep', 'velocity', 'swmm']
                writer = csv.DictWriter(f, fieldnames = fieldnames, lineterminator='\n')
                writer.writeheader()
                for row in internal_flows:
                    row = dict(zip(fieldnames, row))
                    writer.writerow(row)

        internal_flows_outpath = os.path.join(series_path, 'depth_by_SWMM_segment')
        if not os.path.isdir(internal_flows_outpath):
            os.mkdir(internal_flows_outpath)
        else:
            for fname in os.listdir(internal_flows_outpath):
                os.unlink(os.path.join(internal_flows_outpath, fname))

        cur.execute('SELECT DISTINCT swmm FROM volumes WHERE swmm <> 0')
        swmm_sources = [row[0] for row in cur.fetchall()]
        for source in swmm_sources:
            cur.execute("""
                SELECT timestep, depth, swmm
                FROM volumes
                WHERE swmm = ?
                ORDER BY swmm, timestep
                """, (source, ))

            internal_flows = cur.fetchall()

            with open(os.path.join(internal_flows_outpath, str(source) + '.csv'), 'w') as f:
                fieldnames = ['timestep', 'depth', 'swmm']
                writer = csv.DictWriter(f, fieldnames = fieldnames, lineterminator='\n')
                writer.writeheader()
                for row in internal_flows:
                    row = dict(zip(fieldnames, row))
                    writer.writerow(row)

        # populate the volumes_final table. note that the wasp_source and flow columns are null, 
        # as these columns only apply to the flow data.
        cur.execute("""
            INSERT INTO volumes_final (""" + final_colnames_str + """)
                SELECT 1, timestep, NULL AS wasp_source, wasp AS wasp_sink, NULL AS flow, vol, depth, velocity, 
                       wasp AS ordering1, wasp AS ordering2
                FROM volumes4x
            """)

        # TODO : remove this code, called from an external function 'export_volumes'
#        # the following for loop will cycle through each wasp seg and output its volume time series
#        wasp_segs = cur.execute('SELECT DISTINCT wasp_sink FROM volumes_final')
#        wasp_segs = [row[0] for row in wasp_segs]
#        volumes_outpath = os.path.join(series_path, 'volumes_by_WASP_segnum')
#        if not os.path.isdir(volumes_outpath):
#            os.mkdir(volumes_outpath)
#        else:
#            for fname in os.listdir(volumes_outpath):
#                os.unlink(os.path.join(volumes_outpath, fname))
#        for seg in wasp_segs:
#            volumes = cur.execute("""
#                SELECT timestep, vol, depth, velocity 
#                FROM volumes_final 
#                WHERE wasp_sink = ? ORDER BY timestep
#            """, (seg,))
#
#            with open(os.path.join(volumes_outpath, str(seg) + '.csv'), 'w') as f:
#                fieldnames = ['timestep', 'volume', 'depth', 'velocity']
#                writer = csv.DictWriter(f, fieldnames = fieldnames, lineterminator='\n')
#                writer.writeheader()
#                for row in volumes:
#                    row = dict(zip(fieldnames, row))
#                    writer.writerow(row)

        # combine the two final tables into one
        cur.execute("""
            INSERT INTO final (""" + final_colnames_str + """)
                SELECT  *
                FROM (
                    SELECT * FROM flows_final
                    UNION
                    SELECT * FROM volumes_final
                )
            """)
        cur.execute("CREATE INDEX idx_final ON final(timestep, datatype, ordering1, ordering2)")

        export_data(outpath, dbpath, timestep_secs, final_colnames, round_num=round_num)


        export_volumes(dbpath, os.path.dirname(outpath), by_wasp_seg=True)

        export_volumes(dbpath, os.path.dirname(outpath), by_wasp_seg=False)

        export_internal_flows(dbpath, os.path.dirname(outpath), by_wasp_seg=True)

        export_internal_flows(dbpath, os.path.dirname(outpath), by_wasp_seg=False)

def export_data(outpath, dbpath, timestep_secs, final_colnames, round_num=5):

    with closing(sqlite3.connect(dbpath)) as cnxn:
        cnxn.isolation_level = None
        cur = cnxn.cursor()
        tune_db(cur)

        # open the hyd file outpath
        with open(outpath, 'w') as f:
            # the following lines get the numbers for the top of the hyd file.
            # count the number of volume entries for the first timestep to get the number of segs
            cur.execute('SELECT COUNT(*) FROM final WHERE timestep = 0 AND datatype = 1')
            num_segs = cur.fetchone()[0]
            # count the number of flow entires for the first timestep to get the total number of interfaces
            cur.execute('SELECT COUNT(*) FROM final WHERE timestep = 0 AND datatype = 2')
            num_interfaces = cur.fetchone()[0]

            # retrieve the maximum timestep and calculate the duration
            cur.execute('SELECT MAX(timestep) FROM final')
            duration = timestep_secs * cur.fetchone()[0]

            # create the top line for the hyd file
            f.write('\t'.join([str(num_segs), str(num_interfaces), str(timestep_secs), '0', str(duration), '1']) + '\n')

            # select the flow rows for the first timestep and extract the interface numbers then write them to file
            cur.execute("""
                SELECT wasp_source, wasp_sink 
                FROM final 
                WHERE timestep = 0 AND datatype = 2 
                ORDER BY ordering1, ordering2""")
            for row in cur.fetchall():
                f.write(str(row[0]) + '\t' + str(row[1]) + '\n')

            # select all the data from the final type in the appropriate order ...
            cur.execute('SELECT * FROM final ORDER BY timestep, datatype, ordering1, ordering2')

            # then write it to file ...
            for row in cur.fetchall():
                row = dict(zip(final_colnames, row))
                if row['datatype'] == 1:
                    row['vol'] = round(row['vol'], round_num)
                    row['velocity'] = round(row['velocity'], round_num)
                    row['depth'] = round(row['depth'], round_num)
                    for key in row.keys():
                        row[key] = str(row[key])
                    f.write('\t'.join(['', row['vol'], '0', row['depth'], row['velocity']]) + '\n')
                else:
                    row['flow'] = round(row['flow'], round_num)
                    for key in row.keys():
                        row[key] = str(row[key])
                    f.write('\t'.join(['', row['flow']]) + '\n')

def export_internal_flows(dbpath, outdir, append_to_folder_name='', by_wasp_seg=True):

    with closing(sqlite3.connect(dbpath)) as cnxn:
        cur = cnxn.cursor()
        tune_db(cur)

        folder_name = 'internal_flows_by_' + ('WASP' if by_wasp_seg else 'SWMM') + append_to_folder_name
        outpath = os.path.join(outdir, folder_name)
        if not os.path.isdir(outpath):
            os.mkdir(outpath)
        else:
            for fname in os.listdir(outpath):
                os.unlink(os.path.join(outpath, fname))

        fieldnames = ['timestep', 'flow', 'depth', 'velocity']
        if by_wasp_seg:
            cur.execute('SELECT DISTINCT wasp_source FROM flows_final WHERE wasp_source <> 0')
            query = """
                SELECT """ + ','.join(fieldnames) + """
                FROM flows_final  
                WHERE wasp_source = ?
                ORDER BY wasp_source, timestep
            """
        else:
            cur.execute('SELECT DISTINCT swmm FROM volumes2_b')
            query = """ 
                SELECT """ + ','.join(fieldnames) + """ 
                FROM volumes2_b
                WHERE swmm = ?
                ORDER BY timestep 
            """

        segs = [row[0] for row in cur.fetchall()]
        for seg in segs:
            with open(os.path.join(outpath, str(seg) + '.csv'), 'w') as f:
                writer = csv.DictWriter(f, fieldnames=fieldnames, lineterminator='\n')
                writer.writeheader()
                cur.execute(query, (seg, ))
                for row in cur.fetchall():
                    row = dict(zip(fieldnames, row))
                    writer.writerow(row) 

def export_volumes(dbpath, outdir, append_to_folder_name = '', by_wasp_seg=True):
    # the following for loop will cycle through each wasp seg and output its volume time series

    with closing(sqlite3.connect(dbpath)) as cnxn:
        cur = cnxn.cursor()
        tune_db(cur)

        folder_name = 'volumes_by_' + ('WASP' if by_wasp_seg else 'SWMM') + append_to_folder_name
        outpath = os.path.join(outdir, folder_name)
        if not os.path.isdir(outpath):
            os.mkdir(outpath)
        else:
            for fname in os.listdir(outpath):
                os.unlink(os.path.join(outpath, fname))

        if by_wasp_seg:
            fieldnames = ['timestep', 'vol', 'depth', 'velocity']
            segs = cur.execute('SELECT DISTINCT wasp_sink FROM final WHERE wasp_sink > 0')
            query = """
                SELECT """ + ','.join(fieldnames) + """
                FROM final 
                WHERE wasp_sink = ? AND datatype = 1
                ORDER BY timestep
            """
        else:
            fieldnames = ['timestep', 'vol', 'depth', 'velocity']
            segs = cur.execute("SELECT DISTINCT swmm FROM volumes2_b")
            query = """
                SELECT """ + ','.join(fieldnames) + """
                FROM volumes2_b
                WHERE swmm = ? 
                ORDER BY timestep
            """

        segs = [row[0] for row in segs]
        for seg in segs:
            with open(os.path.join(outpath, str(seg) + '.csv'), 'w') as f:
                writer = csv.DictWriter(f, fieldnames = fieldnames, lineterminator='\n')
                writer.writeheader()
                cur.execute(query, (seg, ))
                for row in cur.fetchall():
                    row = dict(zip(fieldnames, row))
                    writer.writerow(row)

def corrector(vol, final_mean, init_mean, final_inflection_threshold=0.04):
    for i, v in enumerate(list(reversed(vol))):
        if v - final_mean > final_inflection_threshold * (max(vol) - final_mean):
            inflection_idx = len(vol) - i + 1
            break
    
    scale_block = vol[inflection_idx:]
    scale_factor = init_mean / final_mean

    scaled = [v * scale_factor for v in scale_block]
    corrected_vol = vol[:inflection_idx] + scaled

    return corrected_vol

def correct2(dbpath, final_init_threshold=0.01):
    with closing(sqlite3.connect(dbpath)) as cnxn:
        cursor = cnxn.cursor()
        cursor.execute("SELECT DISTINCT wasp_sink FROM final WHERE wasp_sink IS NOT NULL AND datatype = 1")
        wasp_segs = [row[0] for row in cursor.fetchall()]

        for wasp in wasp_segs:
            final_cols = ['timestep', 'vol']

            cursor.execute("""
                SELECT """ + ','.join(final_cols) + """
                FROM final 
                WHERE datatype = 1 AND wasp_sink = ?
                ORDER BY timestep
            """, (wasp, ))

            final = [dict(zip(final_cols, row)) for row in cursor.fetchall()]
            vol = [row['vol'] for row in final]

            sample_window = int(floor(len(vol) * .1))
            init_mean = sum(v for v in vol[:sample_window]) / sample_window
            final_mean = sum(v for v in vol[-sample_window:]) / sample_window

            print wasp
            print 'abs', abs(final_mean - init_mean)
            print 'final', final_init_threshold * (max(vol) - min(vol))

            if abs(final_mean - init_mean) > final_init_threshold * (max(vol) - min(vol)):
                if final_mean > init_mean:
                    corrected_vol = corrector(vol, final_mean, init_mean)
                else:
                    #original_range = max(vol) - init_mean
                    #corrected_vol = corrector(list(reversed(vol)), final_mean=init_mean, init_mean=final_mean)
                    #corrected_vol = list(reversed(corrected_vol))
                    #changed_range = max(corrected_vol) - final_mean
                    #scale_factor = original_range / changed_range
                    #corrected_vol = [v * scale_factor for v in corrected_vol]
                    #new_init_mean = final_mean * scale_factor
                    #corrected_vol = [v + (init_mean - new_init_mean) for v in corrected_vol]

                    final_crossing_threshold = 0.5
                    last_idx_above_init = 0
                    for i, v in enumerate(vol):
                        if v < init_mean - (init_mean - final_mean) * final_crossing_threshold:
                            break
                        elif v >= init_mean:
                            last_idx_above_init = i

                    shift_side = vol[(last_idx_above_init+1):]

                    for i, v in enumerate(list(reversed(vol[:last_idx_above_init]))):
                        if v - init_mean > (init_mean - final_mean):
                            length_shift_overlap = i
                            break

                    tck = interpolate.splrep(range(length_shift_overlap + 1), shift_side[:(length_shift_overlap + 1)], s=0)
                    newx = np.arange(0, length_shift_overlap + .5, .5)

                    shift_side = list(interpolate.splev(newx, tck, der=0)) + shift_side[length_shift_overlap:]
                        
                    shift_side = [v + (init_mean - final_mean) for v in shift_side]
                    corrected_vol = vol[:(last_idx_above_init - length_shift_overlap)] + shift_side

                cursor.executemany("""
                    UPDATE final
                    SET vol = ?
                    WHERE datatype = 1 AND wasp_sink = ? AND timestep = ?
                """, [(v, wasp, i) for i, v in enumerate(corrected_vol)])

                cnxn.commit()

def correct(dbpath):
    cnxn = sqlite3.connect(dbpath)
    cursor = cnxn.cursor()
    cursor.execute("SELECT DISTINCT wasp_sink FROM final WHERE wasp_sink IS NOT NULL AND datatype = 1")
    wasp_segs = [row[0] for row in cursor.fetchall()]

    for wasp in wasp_segs:
        final_cols = ['timestep', 'vol']
        cursor.execute("""
            SELECT """  + ','.join(final_cols) + """ 
            FROM final
            WHERE datatype = 1 AND wasp_sink = ?
            ORDER BY timestep
        """, (wasp,))

        final = [dict(zip(final_cols, row)) for row in cursor.fetchall()]
        vol = [row['vol'] for row in final]

        sample_window = int(floor(len(vol) * .1))
        init_mean = sum(v for v in vol[:sample_window]) / sample_window
        final_mean = sum(v for v in vol[-sample_window:]) / sample_window

        def fix(vol, init_mean, final_mean):
            max_vol = max(vol)
            current_range = max_vol - final_mean
            target_range = max_vol - init_mean
            scale_factor = target_range / current_range
            shift_factor = final_mean - init_mean

            cut_off = final_mean - (final_mean - init_mean) * .5
            stationary_vol = [v for v in vol if v < cut_off]
            scaled_vol = [v * scale_factor + shift_factor for v in vol if v >= cut_off]


            stationary_inflect = []
            for v in reversed(stationary_vol):
                if v <= init_mean:
                    break
                else:
                    stationary_inflect.append(v)

            stationary_inflect = list(reversed(stationary_inflect))
            front = stationary_vol[:-len(stationary_inflect)]
            corrected_inflect = [(v1 + v2) / 2 for v1, v2 in zip(stationary_inflect, scaled_vol[:len(stationary_inflect)])]

            if corrected_inflect:
                tck = interpolate.splrep(range(len(corrected_inflect)), corrected_inflect, s=0)
                newx = [x + '.5' for x in range(len(corrected_inflect))]
                newx = [list(x_pair) for x_pair in zip(range(len(corrected_inflect)), newx)]
                corrected_inflect_interp = interpolate.splev(range(0, len(corrected_inflect) + .5, .5), tck, der=0)
            else:
                corrected_inflect_interp = []

            return front + corrected_inflect_interp + scaled_vol[len(corrected_inflect):]

        corrected_vol = fix(vol, init_mean, final_mean) if final_mean-init_mean > 0 else fix(vol, init_mean, final_mean)

        cursor.executemany("""
            UPDATE final 
            SET vol = ?
            WHERE datatype = 1 AND wasp_sink = ? AND timestep = ?
        """, [ (v, wasp, i) for i, v in enumerate(corrected_vol)])
        
    print 'done'

def load_ini(path):
    ini = {}

    def process_ini(path):
        parser = ConfigParser.SafeConfigParser()
        parser.read(path)
        for key, value in parser.items('main'):
            ini[key] = eval(value, {}, {})
            if key in ('event_start', 'event_end'):
                ini[key] = datetime.strptime(ini[key], '%Y-%m-%d %H:%M:%S')
        return ini

    if os.path.exists(path):
        print("A settings file was found at " + path + ". Do you want to use the options in this file?")
        if raw_input("y/n >>> ")[0] in ('Y', 'y'):
            return process_ini(path)

    key_err_msg = "Keyboard error. Try again, fat fingers\n"

    while True:
        try:
            response = raw_input("Do you want to load the settings from a hydmaker.ini file? (y/n) >> ")[0]
        except KeyboardInterrupt, IndexError:
            print("Invalid input.")

        if response.lower == 'y':
            while True:
                try:
                    ini_path = raw_input("Path to hydmaker.ini file >> ")
                except KeyboardInterrupt:
                    print('Invalid input.')

                if os.path.exists(ini_path) and not os.path.isdir(ini_path):
                    return process_ini(ini_path)
                
                print("Invalid path.")
        else:
            break

    msg = "Please note that this program assumes data coming from an .rpt or .out file are in US units: CFS, FEET, and " + \
          "feet per second."
    print(msg)
    while True:
        try:
            use_bin_output = raw_input("Is your data in a SWMM binary output file? (y/n) >> ")[0]
        except KeyboardInterrupt:
            print(key_err_msg)
            continue

        if use_bin_output.lower() == 'y':
            use_bin_output = True
        elif use_bin_output.lower() == 'n':
            use_bin_output = False
        break

    if use_bin_output:
        ini['rptpath'] = None
        while True:
            try:
                binary_path = raw_input("Path to SWMM binary output file (*.out) >> ")
            except KeyboardInterrupt:
                print(key_err_msg)
                continue

            if os.path.exists(binary_path) and not os.path.isdir(binary_path):
                if os.path.splitext(binary_path)[1].lower() != '.out':
                    print("That doesn't look like binary output file.")
                else:
                    ini['binarypath'] = binary_path
                    break
            else:
                print("Invalid path.")
    else:
        ini['binarypath'] = None
        while True:
            try:
                rpt_path = raw_input("Path to SWMM report file (*.rpt) >> ")
            except KeyboardInterrupt:
                print(key_err_msg)
                continue
            
            if os.path.exists(rpt_path) and not os.path.isdir(rpt_path):
                if os.path.splitext(rpt_path)[1].lower() != '.rpt':
                    print("That doesn't look like an .rpt file.")
                else:
                    ini['rptpath'] = rpt_path
                    break
            else:
                print('Invalid path.')

    while True:
        try:
            inp_path = raw_input("Path to SWMM input file (*.inp) >> ")
        except KeyboardInterrupt:
            print(key_err_msg)

        if os.path.exists(inp_path) and not os.path.isdir(inp_path):
            if os.path.splitext(inp_path)[1].lower() != '.inp':
                print("That doesn't look like an .inp file.")
            else:
                ini['inppath'] = inp_path
                break
        else:
            print("Invalid path.")

    segmap_paths = []
    while True:
        if segmap_paths:
            print('\n')
            print('Segmentation file paths:')
            for i, segmap_path in enumerate(segmap_paths):
                print('  ' + str(i + 1) + '.  ' + os.path.basename(segmap_path))

        try:
            segmap_path = raw_input("Add a path to a segmentaion file (press enter to continue) >> ")
        except KeyboardInterrupt:
            print(key_err_msg)

        if os.path.exists(segmap_path) and not os.path.isdir(segmap_path):
            if segmap_path in segmap_paths:
                print("You already entered that one.")
            else:
                segmap_paths.append(segmap_path)
        else:
            if not segmap_path and len(segmap_paths) > 0:
                ini['segmap_paths'] = segmap_paths
                break
            print("No such file.")

    while True:
        try:
            output_dir = raw_input('Path to output directory >> ')
        except KeyboardInterrupt:
            print(key_err_msg)

        if os.path.isdir(output_dir):
            break

        print("Invalid directory.")

    while True:
        try:
            out_name = raw_input('File name for .hyd text file output >> ')
        except KeyboardInterrupt:
            print(key_err_msg)

        if out_name:
            out_path = os.path.join(output_dir, out_name)
        
            if os.path.exists(out_path):
                try:
                    response = raw_input('A file with that name already exists. Do you want to overwrite it? (y/n) >> ')[0]
                except KeyboardInterrupt, IndexError:
                    print(key_err_msg)

                if response.lower() == 'y':
                    ini['outpath'] = out_path
                    break
            else:
                ini['outpath'] = out_path
                break
    
    while True:
        try:
            round_num = raw_input("Number of digits after the decimal place? >> ")
        except KeyboardInterrupt:
            print(key_err_msg)
            continue

        try:
            assert int(round_num) == float(round_num)
        except:
            print("Invalid input.")
        else:
            ini['round_num'] = int(round_num)
            break

        
    while True:
        try:
            msg = 'Do you want to pass flow, depth, and velocity data through a moving average filter? (y/n) >> '
            filter_data = raw_input(msg)[0]
        except KeyboardInterrupt, IndexError:
            print(key_err_msg)
            continue

        filter_data = filter_data.lower() == 'y'

        break

    if filter_data:
        while True:
            try:
                filter_mins = raw_input('Moving average window size in minutes >> ')
            except KeyboardInterrupt:
                print(key_err_msg)

            try:
                ini['filter_mins'] = int(filter_mins)
            except:
                print('Invalid input.')
                continue
            else:
                break
    else:
        ini['filter_mins'] = None

    correction_params = {
        'correction_threshold' : 0.0005,
        'abs_basevol_signal' : 5,
        'window_size' : 400,
        'negative_threshold_pct' : .2,
        'micro_window_pct' : .25
    }

    ini['correct_vol'] = False
    while True:
        try:
            msg = "Do you want to correct segments when their volumes stabilize at levels above or below their " \
                  + "known baselines? The process will add dummy inflows to segments that balance the volume " \
                  + "as needed. (y/n) >> "
            correct_vol = raw_input(msg)[0]
        except KeyboardInterrupt, IndexError:
            print(key_err_msg)

        if correct_vol.lower() == 'y':
            ini['correct_vol'] = True
        elif correct_vol.lower() == 'n':
            ini['correct_vol'] = False
        else:
            print('Invalid input.')
            continue

        break

    while True:
        try:
            correct_vol = re.match('y', correct_vol, re.IGNORECASE)

            if not correct_vol:
                break
            else:
                ini['correct_vol'] = bool(correct_vol)

                while True:
                    msg = "There are several parameters that you can change to control the behavior of the volume correction " \
                          + "procedure. Would you like to change them from their default values?"
                    print(msg)
                    try:
                        reject_defaults = raw_input("Change default volume correction parameters? (y/n) >> ")[0]
                    except KeyboardInterrupt, IndexError:
                        print("Invalid input.")

                    break

                if reject_defaults.lower() == 'n':
                    break
                else:
                    while True:
                        print("Volume correction parameter 1: Moving window size to locate stable volume periods")
                        msg = ("The correction procedure locates periods of stable volume that may require correction by moving "
                               "a window of specified size over the timeseries for each segment. The window " 
                               "size is measured in number of timesteps. The default is 400.")
                        print(msg) 
                        window_size = raw_input('window size >> ')
                        try:
                            correction_params['window_size'] = int(window_size)
                        except:
                            continue
                        else:
                            break

                    while True:
                        print("Volume correction parameter 2: Stable volume period test threshold")
                        msg = ('As the stable volume period locator window moves over the timeseries, the procedure tests each '
                               'section of the series to determine if it should be considered a period of stable volume.'
                               'This is determined by comparing the absolute difference '
                               'between the minimum and maximum values within that section against a specified threshold. When ' 
                               'the difference is below this threshold, the section is considered a period of stable volume '
                               'that should be further examined to determine whether correction is required. If the difference '
                               'is above the threshold, the procedure continues to the next section. The default is 5 m^3.')
                        print(msg)
                        abs_basevol_signal = raw_input("enter the test threshold in m^3 >> ")
                        try:
                            correction_params['abs_basevol_signal'] = float(abs_basevol_signal)
                        except:
                            continue
                        else:
                            break

                    while True:
                        print("Volume correction parameter 3: Expected maximum base volume deviation multiplier")
                        msg = ("This sets the minimum allowable absolute difference between the average volume "
                              "in a period of base volume and the initial base volume, as a "
                              "percentage of the total volume range. For example, if the volume of the event ranges "
                              "from -5 to 100 m^3 " 
                              "and the multiplier is 0.005, then a window with a max deviation below 105 * .005 = .525 "
                              "will be treated as a stable volume period and examined for potential correction. "
                              "The default is .0005 m^3.")
                        print(msg)
                        correction_threshold = raw_input("correction threshold in m^3 >> ")
                        try:
                            correction_params['correction_threshold'] = float(correction_threshold)
                        except:
                            continue
                        else:
                            break

                    while True:
                        print("Volume correction parameter 4: Secondary moving window")
                        msg = ("Once it's been established that a period of base "
                               "volume needs correction, to determine how much correction is necessary, "
                               "the procedure examines the period in question using a smaller "
                               "window than the primary one to find the exact location and amount for the correction."
                               "Enter the size of this window as a "
                               "percentage of the larger correction window previously entered. The default is .25.")
                        print(msg)
                        msg = "Enter micro window pct >> "
                        micro_window_pct = raw_input(msg)
                        try:
                            correction_params['micro_window_pct'] = float(micro_window_pct)
                        except:
                            continue
                        else:
                            break

                    while True:
                        print("Volume correction parameter 5: Negative volume timestep count threshold")
                        msg = ("Enter the percentage of timesteps within a window below the initial base volume "
                               "that is sufficient for categorizing the current period as negative. The default is .2.")
                        print(msg)
                        negative_threshold_pct = raw_input("Negative threshold pct >> ")
                        try:
                            correction_params['negative_threshold_pct'] = float(negative_threshold_pct)
                        except:
                            continue
                        else:
                            break
                    break

        except KeyboardInterrupt:
            print(key_err_msg)
            pass

    ini = dict(ini.items() + correction_params.items())
    while True:
        try:
            dummy_end = raw_input('Include final dummy segment? (y/n): ')
            if re.match('y|n', dummy_end, re.IGNORECASE):
                ini['dummy_end'] = bool(re.match('y', dummy_end, re.IGNORECASE))
                break
        except KeyboardInterrupt:
            print(key_err_msg)
            pass

    event_start = None
    event_end = None
    while True:
        try:
            set_event_limits = raw_input("Set event start and end dates? >> ")
            if re.match('n', set_event_limits, re.IGNORECASE):
                break
            elif re.match('y', set_event_limits, re.IGNORECASE):
                while True:
                    event_start = raw_input("event start (yyyy-mm-dd HH:MM:SS >> ")
                    try:
                        event_start = datetime.strptime(event_start, "%Y-%m-%d %H:%M:%S")
                    except:
                        continue
                    else:
                        ini['event_start'] = event_start
                        break

                while True:
                    event_end = raw_input("event end (yyyy-mm-dd HH:MM:SS) >> ")

                    try:
                        event_end = datetime.strptime(event_end, "%Y-%m-%d %H:%M:%S")
                    except:
                        continue
                    else:
                        ini['event_end'] = event_end
                        break
                break

        except KeyboardInterrupt:
            print(key_err_msg)

    parser = ConfigParser.SafeConfigParser()
    parser.add_section('main')
    for key, value in ini.items():
        if key in ('event_start', 'event_end'):
            value = value.strftime('%Y-%m-%d %H:%M:%S')
        parser.set('main', str(key), "r'" + value + "'" if isinstance(value, str) else str(value))

    with open(path, 'w') as f:
        parser.write(f)

    return ini


def run():

    ini = load_ini(os.path.join(curdir(), 'hydmaker.ini'))
    process(**ini)


    #process(filter_mins=filter_mins, 
    #        dummy_end=dummy_end,
    #        rptpath=rpt_path, 
    #        inppath=inp_path, 
    #        outpath=out_path, 
    #        segmap_paths=segmappaths, 
    #        correct_vol=correct_vol,
    #        correction_threshold=correction_threshold, 
    #        window_size=window_size,
    #        abs_basevol_signal=abs_basevol_signal, 
    #        event_start=event_start, 
    #        event_end=event_end, 
    #        negative_threshold_pct=negative_threshold_pct,
    #        micro_window_pct=micro_window_pct,
    #        subcatch_bin_path=subcatch_bin_path,
    #        link_bin_path=link_bin_path)

if __name__ == '__main__':
    run()

