import csv
import sqlite3
import re
import os
import copy
from math import floor
from contextlib import closing
from datetime import datetime, timedelta
from itertools import groupby
import itertools
from collections import OrderedDict
import struct
import ConfigParser
import traceback, code
from collections import OrderedDict

def curdir():
    # if this script runs as a module, return the directory of the module, otherwise return the current working directory
    return os.path.dirname(__file__) if '__file__' in globals() else os.curdir

class SWMMOUT2SQLITE_NoNames(Exception):
    # custom exception for handling the situation when the names supplied to swmmout2sqlite 
    # aren't found in the swmm binary file
    def __init__(self):
        Exception.__init__(self, 'No names supplied found in file.')

class SWMMOUT2SQLITE_Error(Exception):
    # custom exception for errors raised by swmmout2sqlite
    def __init__(self, exc):
        Exception.__init__(self, "Can't convert SWMM binary output to SQLite database. " + exc)

def swmmout2sqlite(swmmout_path, dbpath, element_type, names=None, variables=None, 
        start=None, end=None, ignore_missing_names=False, using_swmm_volume=False):
    """
        swmmout_path : path to SWMM binary out file (.out)
        dbpath : path to SQLite database into which the binary should be read
        element_type : desired element type to extract from the binary file: subcatchments, nodes, links, system
        names : list of desired element names from element_type to extract. If None, then the function 
            will extract all.
        variables : list of desired variables to extract for each element in names.
        start : start datetime to begin extracting the timeseries
        end : end datetime to end extraction
        ignore_missing_names : if there are names in the names argument not found in the .out file, should an exception 
            be raised?

        For a detailed description of the binary file layout, see the SWMM 5.0 Interfacing Guide Appendix B 
        available on the EPA's SWMM page.
    """

    # validate names argument
    try:
        assert names is None or all(isinstance(name, basestring) for name in names)
    except:
        raise Exception("Incorrect names argument supplied.")

    # there are types of model elements reported on in the binary file
    element_types = ['subcatchments', 'nodes', 'links', 'pollutants', 'system']

    # validate element_type arg
    if element_type not in element_types:
        raise Exception("Unknown element type: " + element_type)

    with open(swmmout_path, 'rb') as f:
        # a record is the smallest unit of information in the binary file. 
        RECORD_BYTES = 4

        # constants used to read the opening of the binary file
        NRECORDS_OPENING = 7 # number of records in the opening
        OPENING_BYTES = RECORD_BYTES * NRECORDS_OPENING
        NRECORDS_HEAD = 3 # head is the first three records of the opening: id, version, and flowunits
        HEAD_BYTES = NRECORDS_HEAD * RECORD_BYTES
        # format strings are for use with the struct.unpack function. 6i means 6 integer records
        HEAD_FORMAT = str(NRECORDS_HEAD) + 'i'
        NELEMENT_TYPES = 4 # ie. subcatches, nodes, links, pollutants
        # the opening bytes contain the head information and element counts
        ELEMENTCOUNTS_BYTES = OPENING_BYTES - HEAD_BYTES
        # check that these constant definitions are consistent with one another
        if not NELEMENT_TYPES * RECORD_BYTES == ELEMENTCOUNTS_BYTES:
            raise SWMMOUT2SQLITE_Error("NELEMENT_TYPES and ELEMENTCOUNTS_BYTES contants are inconsistent.")
        ELEMENTCOUNTS_FORMAT = str(NELEMENT_TYPES) + 'i'

        # constants used to read the closing of the binary file
        NRECORDS_CLOSING = 6
        CLOSING_BYTES = RECORD_BYTES * NRECORDS_CLOSING
        # Names, Properties, Results. These are three main sections of the binary file. The closing contains their file offests
        NSECTIONS = 3 
        SECTION_OFFSET_BYTES = RECORD_BYTES * NSECTIONS
        SECTION_OFFSET_FORMAT = str(NSECTIONS) + 'i'
        NRECORDS_TAIL = NRECORDS_CLOSING - NSECTIONS
        TAIL_BYTES = CLOSING_BYTES - SECTION_OFFSET_BYTES
        TAIL_FORMAT = str(NRECORDS_TAIL) + 'i'

        EXPECTED_ID_NUM = 516114522 # should appear at the start and end of file

        NRECORDS_DAYS_SINCE_EPOCH = 2 # dates are stored in two adjacent records as decimal days since the epoch
        DAYS_SINCE_EPOCH_BYTES  = NRECORDS_DAYS_SINCE_EPOCH * RECORD_BYTES
        EPOCH = datetime(1899, 12, 30)
        REPORT_INTERVAL_BYTES = RECORD_BYTES
        HOURS_IN_DAY = 24.0 
        MINUTES_IN_HOUR = 60.0
        SECONDS_IN_MINUTE = 60.0
        TIMESTEP_TOLERANCE = (((1/HOURS_IN_DAY)/MINUTES_IN_HOUR)/SECONDS_IN_MINUTE)

        # READ CLOSING 
        # move to the beginning of the closing
        f.seek(-CLOSING_BYTES, 2) 
        # read section offsets
        section_offset_records = f.read(SECTION_OFFSET_BYTES)
        section_offsets = struct.unpack(SECTION_OFFSET_FORMAT, section_offset_records)
        # create a dict that indexes the section offsets by name
        offsets_by_section = dict(zip(('names', 'properties', 'results'), section_offsets))
        # read the remaining records in the closing and unpack them into the appropriate variables
        tail_records = f.read(TAIL_BYTES)
        ntimesteps, errorcode, id_num = struct.unpack(TAIL_FORMAT, tail_records)

        # if the identifying number doesn't match the expected, either the file is corrupt
        # or it was generated with a different version of SWMM
        if id_num != EXPECTED_ID_NUM:
            raise SWMMOUT2SQLITE_Error("Unexpected identifying number encountered at end of file.")
        elif errorcode:
            raise SWMMOUT2SQLITE_Error("Output contains errors.")
        elif not ntimesteps:
            raise SWMMOUT2SQLITE_Error("Output has zero timesteps.")

        # READ OPENING
        # move to the beginning, read the head, unpack into the appropriate variables
        f.seek(0)
        head_records = f.read(HEAD_BYTES)
        id_num, version, flowunits = struct.unpack(HEAD_FORMAT, head_records)
        if id_num != EXPECTED_ID_NUM:
            raise SWMMOUT2SQLITE_Error("Unexpected identifying number encountered at beginning of file.")

        # the units dict contains the two possible unit types for each kind of measurement.
        # the first unit in the pair is the choice when the flow units are CFS, GPM, or MGD.
        # the second unit in the pair is the choice when the flow units are CMS, LPS, LPD
        units = {
            'depth_rate1' : ['inPerHour', 'mmPerHour'],
            'depth_rate2' : ['ftPerSec', 'mPerSec'],
            'depth_rate3' : ['inPerDay', 'mmPerDay'],
            'depth1' : ['in', 'mm'],
            'depth2' : ['ft', 'm'],
            'volume' : ['ft3', 'm3'],
            'temp' : ['degF', 'degC']
        }
        # flow units are stored in the binary file as an integer 1-6 that maps to the following list
        flowunit_options = ['CFS', 'GPM', 'MGD', 'CMS', 'LPS', 'LPD']
        flowunits = flowunit_options[flowunits]
        # the choice of flow units determines the units type for all other measurements
        units_choice = 0 if flowunits in ('CFS', 'GPM', 'MGD') else 1
        # update units dict so that it indexes the correct units for each measurement type
        for unit_group in units:
            units[unit_group] = units[unit_group][units_choice]
        # add flowunits to units dict. to be used later
        units['flow'] = flowunits

        # read in the number of elements for each element type, index the counts by element type
        element_counts_records = f.read(ELEMENTCOUNTS_BYTES)
        element_counts = list(struct.unpack(ELEMENTCOUNTS_FORMAT, element_counts_records))
        nsystem_elements = 1 # there is only one system time series
        element_counts.append(nsystem_elements)
        element_counts_by_type = OrderedDict(zip(element_types, element_counts))

        # validate element_type argument
        if not element_counts_by_type[element_type]:
            raise Exception("SWMM output does not report any " + element_type + " elements.")

        # Go to the element names section
        f.seek(offsets_by_section['names'], 0)

        # create dictionary containing the lists of all element names for each element type
        element_names_by_type = OrderedDict()
        for type_ in element_types:
            if type_ in ('system',):
                # system does not have elements
                element_names_by_type[type_] = None
            else:
                for i in range(element_counts_by_type[type_]):
                    name_bytes_record = f.read(RECORD_BYTES)
                    name_bytes = struct.unpack('i', name_bytes_record)[0]
                    name_record = f.read(name_bytes)
                    name = struct.unpack(str(name_bytes) + 's', name_record)[0]
                    element_names_by_type.setdefault(type_, []).append(name)

        # if the caller supplied a names argument, make sure all of those names are in the 
        # the names list for the desired element type, unless ignore_missing_names is True, 
        # in which case throw out the names that aren't in the binary file
        if names:
            type_names = element_names_by_type[element_type]
            in_report = [name in type_names for name in names]

            if not all(in_report) and not ignore_missing_names:
                missing_names = [name for name, is_in_report in zip(names, in_report) if not is_in_report]
                raise Exception("The following element names aren't in the report: " + ','.join(missing_names))

            user_names_in_rpt = [name for name in names if name in type_names]
        else:
            # if no names argument is supplied, extract all elements
            user_names_in_rpt = element_names_by_type[element_type]

        # this conditional catches the case when the user supplies a names list
        # with True ignore_missing_names and none of the supplied names are in the file
        if not user_names_in_rpt:
            raise SWMMOUT2SQLITE_NoNames()

        # if there are any pollutants, read pollutant the units
        pollutant_labels = []
        if element_counts_by_type['pollutants']:
            pollutant_units_records = f.read(element_counts_by_type['pollutants'] * RECORD_BYTES)
            pollutant_units = struct.unpack(str(element_counts_by_type['pollutants']) + 'i', pollutant_units_records)
            # pollutant units are stored as integers 0 - 2 corresponding to the following list
            pollutant_unit_label_options = ['mgL', 'ugL', 'countPerL']
            pollutant_unit_labels = [pollutant_unit_label_options[i] for i in pollutant_units]
            # create output labels for the pollutants by joining their names with their units
            pollutant_labels = ['_'.join(tup) for tup in zip(element_names_by_type['pollutants'], pollutant_unit_labels)]

        # Read properties. The properties describe the aspects of the elements in each 
        # element class such as area, link length, node depth. These data are read here, but never used.
        f.seek(offsets_by_section['properties'], 0)
        element_property_codes = OrderedDict()
        element_properties_by_type = OrderedDict()

        for type_ in element_types:
            if type_ in ('pollutants', 'system'):
                element_properties_by_type[type_] = None
            else:
                nprops_record = f.read(RECORD_BYTES)
                nprops = struct.unpack('i', nprops_record)[0]
                property_code_records = f.read(nprops*RECORD_BYTES)
                property_codes = struct.unpack(str(nprops) + 'i', property_code_records)
                element_property_codes[type_] = property_codes

                for i in range(element_counts_by_type[type_]):
                    property_records = f.read(nprops*RECORD_BYTES)
                    properties = struct.unpack(str(nprops) + 'f', property_records)
                    element_properties_by_type.setdefault(type_, []).append(zip(property_codes, properties))

        # These are labels for each of the reporting variables for each of the element types.
        # They're listed in the order they appear in the report series. The part of the labels
        # in braces are place holders for future substitution of the correct units, e.g., {depth_rate1},
        # will be replaced with something like 'inPerHour'.
        var_labels_by_type = {
            'subcatchments' : ['rainfall_{depth_rate1}', 
                               'snow_depth_{depth1}', 
                               'evap_plus_infil_losses_{depth_rate1}', 
                               'runoff_rate_{flow}', 
                               'gw_outflow_rate_{flow}',
                               'gw_table_elev_{depth2}'],
            'nodes' : ['depth_above_invert_{depth2}', 
                       'hydraulic_head_{depth2}', 
                       'stored_and_ponded_vol_{volume}', 
                       'lateral_inflow_{flow}', 
                       'total_inflow_{flow}',
                       'flow_lost_to_flooding_{flow}'],
            'links' : ['flow_rate_{flow}', 
                       'flow_depth_{depth2}', 
                       'flow_velocity_{depth_rate2}', 
                       'Froude_number', 
                       'Capacity'],
            'system' : ['air_temp_{temp}',
                        'rainfall_{depth_rate1}',
                        'snow_depth_{depth1}',
                        'evap_plus_infil_losses_{depth_rate1}',
                        'runoff_rate_{flow}',
                        'dry_weather_inflow_{flow}',
                        'gw_inflow_{flow}',
                        'RDII_inflow_{flow}',
                        'user_supplied_direct_inflow_{flow}',
                        'total_lateral_inflow_{flow}',
                        'flow_lost_to_flooding_{flow}',
                        'flow_leaving_through_outfalls_{flow}',
                        'volume_of_stored_water_{volume}',
                        'evaporation_rate_{depth_rate3}']
        }
        if using_swmm_volume:
            var_labels_by_type['links'] += ['volume_{volume}']
        # pollutant concentrations are recorded in the report series as 
        # the final parameters for subcatchments, nodes, and links
        # if there are pollutants, add their labels to each list of labels except for system
        if element_counts_by_type['pollutants']:
            for type_ in element_types:
                if type_ not in ('pollutants', 'system'):
                    var_labels_by_type[type_].extend(pollutant_labels)

        # update the label units place holders with the correct units for each parameter using the units dictionary
        for type_ in var_labels_by_type:
            var_labels_by_type[type_] = [name.format(**units) for name in var_labels_by_type[type_]]

        # get a count of the number of variables for each element type and check that it matches the expectations
        # in var_labels_by_type
        report_vars_by_type = OrderedDict()
        for type_ in element_types:
            if type_ in ('pollutants',):
                report_vars_by_type[type_] = None
            else:
                nvariable_codes_record = f.read(RECORD_BYTES)
                nvariable_codes = struct.unpack('i', nvariable_codes_record)[0]
                variable_code_records = f.read(nvariable_codes * RECORD_BYTES)
                variable_codes = struct.unpack(str(nvariable_codes) + 'i', variable_code_records)
                expected_num_vars = len(var_labels_by_type[type_])
                if len(variable_codes) != expected_num_vars:
                    exc = "Unexpected number of variables for " + type_ + ". Expected " + str(expected_num_vars) \
                          + ", encountered " + str(len(variable_codes))
                    raise Exception(exc)
                report_vars_by_type[type_] = variable_codes

        # Read simulation start datetime and reporting interval
        rpt_start_days_since_epoch_record = f.read(DAYS_SINCE_EPOCH_BYTES)
        rpt_start_days_since_epoch = struct.unpack('d', rpt_start_days_since_epoch_record)[0]
        rpt_interval_record = f.read(REPORT_INTERVAL_BYTES) 
        rpt_interval = struct.unpack('i', rpt_interval_record)[0]
        
        def to_days_since_epoch(dtime):
            # takes dtime object and calculate the number of days since the EPOCH
            in_seconds = (dtime - EPOCH).total_seconds()
            return ((in_seconds / SECONDS_IN_MINUTE) / MINUTES_IN_HOUR) / HOURS_IN_DAY

        # if the user supplied a start time, get the days since epoch for that start and 
        # register this as the start of the desired data.
        user_start_days_since_epoch = to_days_since_epoch(start) if start else rpt_start_days_since_epoch
        # calculate the reporting interval duration in decimal days
        rpt_interval_days = ((rpt_interval / SECONDS_IN_MINUTE) / MINUTES_IN_HOUR) / HOURS_IN_DAY
        # using the number of timesteps calculate the final datetime of the report
        rpt_end_days_since_epoch = rpt_start_days_since_epoch + rpt_interval_days * (1 + ntimesteps)
        # if the user supplied and end datetime, use this as the cut off, otherwise use the final report datetime
        user_end_days_since_epoch = to_days_since_epoch(end) if end else rpt_end_days_since_epoch

        # raise an exception if the start and end times are inconsistent
        if user_end_days_since_epoch <= user_start_days_since_epoch:
            raise SWMMOUT2SQLITE_Error("Start and end datetimes inconsistent.")

        # calculate the number of bytes per timestep for each element type
        # this will be used to skip over sections that aren't needed.
        report_bytes_by_type = OrderedDict()
        for type_ in element_types:
            if type_ == 'pollutants':
                report_bytes_by_type[type_] = 0
            else:
                byte_count = element_counts_by_type[type_] * len(report_vars_by_type[type_]) * RECORD_BYTES
                report_bytes_by_type[type_] = byte_count

        # calculate the total number of bytes for each timestep
        bytes_per_timestep = DAYS_SINCE_EPOCH_BYTES + sum(report_bytes_by_type.values())

        # generate the offsets for each element type relative 
        # to end of the datetime records for each timestep. Since 
        # subcatchments starts right after the datetime, its offset is zero.
        type_offsets = [0]
        for bytes_ in report_bytes_by_type.values()[:-1]:
            type_offsets.append(type_offsets[-1] + bytes_)

        # index the timestep-relative offsets by element_type
        type_offsets_by_type = OrderedDict(zip(element_types, type_offsets))

        # for the desired element type, calculate how many bytes each 
        # element takes up per timestep
        bytes_per_element = len(report_vars_by_type[element_type]) * RECORD_BYTES

        # determine which variable should be extracted.
        # if the user has not supplied a list of variables to extract, extract them all
        if not variables:
            variables = var_labels_by_type[element_type]

        # create a dict where each label for a variable desired by the user 
        # is indexed by its position in the full list of variables extracted from the file.
        # e.g., if the user wants rainfall and runoff rate for subcatchments, the dict would look 
        # something like {0 : 'rainfall', 4 : 'runoff'}.
        # the order in which the element type variables are listed in the file corresponds
        # to the order they are written in the report time series.
        user_var_labels_by_index = []
        for var in variables:
            for i, label in enumerate(var_labels_by_type[element_type]):
                if re.match(var, label):
                    user_var_labels_by_index.append((i, label))
                    break
                elif i == len(var_labels_by_type[element_type]) - 1:
                    exc = "Could not match variable " + var + " with any of the known variable labels for " + element_type
                    raise Exception(exc)
        user_var_labels_by_index = OrderedDict(user_var_labels_by_index)

        # get the maximum index for all the variables desired by the user and use this to calculate 
        # how many bytes will have to be read for each element at each time step until.
        max_user_var_index = max(user_var_labels_by_index.keys())
        user_var_byte_range = (max_user_var_index + 1) * RECORD_BYTES

        # we will need a list of desired element names with their sequential byte offsets, for example,
        # if elementA is the first desired element and is 16 bytes from the start of the section and elementB
        # is 32 bytes away from elementA, then the beginning of the list 
        # would look like: [[16, 'elementA'], [32, 'elementB'], ...]
        #
        # first create a list of desired element names paired with their index from the list of names belonging 
        # to the desired element type
        user_element_indices = [[i, name] for i, name in enumerate(element_names_by_type[element_type]) 
                                if name in user_names_in_rpt]
        # to each pair in user_element_indices add the names index of the element name pair just before it.
        # we'll use this to calculate the index between each element
        lagged = [user_element_indices[i] + [user_element_indices[i-1][0]] for i in range(1, len(user_element_indices))]
        first_offset = user_element_indices[0]
        user_element_offsets_and_names = [[first_offset[0] * bytes_per_element, first_offset[1]]]
        for idx, name, idx_lag1 in lagged:
            offset = (idx - idx_lag1) * bytes_per_element - user_var_byte_range
            user_element_offsets_and_names.append((offset, name))
        # get the largest element index from the list of desired element names
        max_user_element_index = max(dict(user_element_indices).keys())

        # open a db connection in a context manager. When the with block exits, the db will be closed.
        with closing(sqlite3.connect(dbpath)) as cnxn:
            # turn on autocommit
            cnxn.isolation_level = None
            # generate a cursor for the database 
            cursor = cnxn.cursor()
            # turn off extraneous features to optimize the database's performance
            cursor.executescript("""
                PRAGMA synchronous=OFF;
                PRAGMA count_changes=OFF;
                PRAGMA journal_mode=OFF;
            """)

            # if a table for the element type already exists, delete it.
            try:
                cursor.execute("DROP TABLE " + element_type)
            except:
                pass

            # create a table table with a column for each variable to be extracted
            create_table_stmt = "CREATE TABLE {type} (timestep integer, name text, {variables})"
            variable_defs = ','.join([name + ' real' for name in user_var_labels_by_index.values()])
            create_table_stmt = create_table_stmt.format(type=element_type, variables=variable_defs)
            cursor.execute(create_table_stmt)

            # create the insert query that will be used to import data from the file to the database
            insert_stmt = """
                INSERT INTO {type} (timestep, name, {variables})
                VALUES (?, ?, {values})
            """
            insert_stmt = insert_stmt.format(type=element_type, variables=','.join(user_var_labels_by_index.values()),
                values=','.join(['?' for _ in range(len(user_var_labels_by_index))]))

            # move the file position to the start of the report section
            f.seek(offsets_by_section['results'], 0)

            # executing the insert statement on each line of data would have a large overhead. 
            # we'll store each line of data in a list (batch) and insert everything in that list at the same time
            # once it reaches a certain size.
            INSERT_BATCH_SIZE = 500
            batch = []
            # define the struct.unpack format for extracting a row of variables for an element
            # even if the the user only wants, for instance, variables 2, 3, 10, all variables 1-10 will be extracted.
            var_format = 'f' * (max_user_var_index + 1)
            # get the offset from the start of the timestep for the desired element type
            element_type_offset = type_offsets_by_type[element_type]
            # calcualte the number of bytes from the beginning of a timestep to the last byte for that timestep 
            # that must be read to get the desired data.
            head_timestep_bytes_range = (((max_user_element_index + 1) - 1) * bytes_per_element) + user_var_byte_range
            # using head_timestep_bytes_range, calculate the number of bytes from the last variable for the last element 
            # that must be read to the beginning of the next timestep. 
            tail_timestep_bytes_range = bytes_per_timestep - DAYS_SINCE_EPOCH_BYTES - element_type_offset - head_timestep_bytes_range

            # read the datetime record for the first timestep 
            days_since_epoch_record = f.read(DAYS_SINCE_EPOCH_BYTES)
            days_since_epoch = struct.unpack('d', days_since_epoch_record)[0]
            # iterate through the timesteps. at the beginning of each iteration, the file 
            # position as at the start of the next timestep
            for timestep in range(ntimesteps):
                if days_since_epoch > user_end_days_since_epoch:
                    # if the datetime entry at the current timestep is later than the user specified end datetime, break 
                    # out of the loop ...
                    break
                elif days_since_epoch >= user_start_days_since_epoch:
                    # ... otherwise skip to the position for the desired element type
                    f.seek(element_type_offset, 1)
                    for i, tup in enumerate(user_element_offsets_and_names):
                        # for each desired element skip ahead from the current position using its relative offset
                        element_offset, name = tup
                        f.seek(element_offset, 1)
                        # initialize the entry for the insert statement 
                        entry = [timestep, name]
                        # read in the variables for this element and extract the desired ones
                        vars_records = f.read(user_var_byte_range)
                        vars_vals = struct.unpack(var_format, vars_records)
                        vars_vals = [float('%.7g' % val) for val in vars_vals]
                        user_var_vals = [val for j, val in enumerate(vars_vals) if j in user_var_labels_by_index.keys()]
                        # extend the entry for the insert statement with the variable values
                        entry.extend(user_var_vals)
                        # add the entry to the batch
                        batch.append(entry)

                        # once batch gets to the insert batch size, execute the insert statement on
                        # the batch and then clear it.
                        if len(batch) == INSERT_BATCH_SIZE:
                            cursor.executemany(insert_stmt, batch)
                            del batch[:]

                    # skip ahead to the next timestep
                    f.seek(tail_timestep_bytes_range, 1)

                else:
                    # move ahead to the next timestep
                    f.seek(bytes_per_timestep - DAYS_SINCE_EPOCH_BYTES, 1)

                prev_days_since_epoch = days_since_epoch
                # read the datetime entry for the next timestep
                days_since_epoch_record = f.read(DAYS_SINCE_EPOCH_BYTES)
                days_since_epoch = struct.unpack('d', days_since_epoch_record)[0]

            # run the insert statement on any remaining entries in the batch list
            cursor.executemany(insert_stmt, batch)
            # create an indices on the table to improve query performance
            idx_stmt = "CREATE INDEX idx_{type}_{col} on {type}({col})"
            cursor.execute(idx_stmt.format(type=element_type, col='timestep'))
            cursor.execute(idx_stmt.format(type=element_type, col='name'))
            cnxn.commit()

        # return a number of informative parameters about the data to the user
        user_start_dtime = EPOCH + timedelta(days=user_start_days_since_epoch)
        user_end_dtime = EPOCH + timedelta(days=user_end_days_since_epoch)
        return dbpath, element_type, len(user_names_in_rpt), len(variables), user_start_dtime, user_end_dtime

def tune_db(cursor):
    # turns off extraneous database features to improve performance
    cursor.execute("PRAGMA synchronous=OFF") # don't wait for disk writes to complete before continuing
    cursor.execute("PRAGMA count_changes=OFF") # don't count num. rows affected by DELETE, INSERT, or UPDATE
    cursor.execute("PRAGMA journal_mode=OFF") # turn off journaling
    #cursor.execute("PRAGMA cache_size=1048576") # number of btree pages to cache (1 page = 1 KB)
    #cursor.execute("PRAGMA temp_store=2") # store temporary files in memory

def get_segtable(segmap_paths):
    # takes a list of paths to segmentation maps and 
    # returns a list of rows.
    segtable = [] # this will be a list of dicts, where each dict is a row from one of the .map.csv files
    for fname in segmap_paths: # get a list of all of the .map.csv files
        with open(fname, 'r') as f:
            table = list(csv.DictReader(f))
            # if the table has rows, check that the header is right and add the rows to segtable
            if table:
                assert all(fieldname in ('Name', 'SWMM', 'Type', 'WASP') for fieldname in table[0].keys())
                for row in table:
                    row['segmap'] = os.path.basename(fname)
                segtable.extend(table)

    # the following two loops fill any missing entries in the SWMM and WASP columns and renumber the segments sequentially.
    for row in segtable:
        if not row['WASP']:
            try:
                row['WASP'] = prev_wasp
            except NameError:
                raise Exception("The first entry in " + row['segmap'] + ' needs a WASP number.')
        else:
            prev_wasp = row['WASP']

        if not row['SWMM']:
            try:
                row['SWMM'] = prev_swmm
            except NameError:
                raise Exception("The first entry in " + row['segmap'] + 'needs a SWMM number.')
        else:
            prev_swmm = row['SWMM']

    current_swmm = 1
    current_wasp = 1
    for i, row in enumerate(segtable):
        if i == 0:
            prev_wasp = row['WASP']
            prev_swmm = row['SWMM']
            prev_segmap = row['segmap']
            row['WASP'] = current_wasp
            row['SWMM'] = current_swmm
        else:
            if row['WASP'] == prev_wasp and row['segmap'] == prev_segmap:
                row['WASP'] = current_wasp
            else:
                prev_wasp = row['WASP']
                current_wasp += 1
                row['WASP'] = current_wasp

            if row['SWMM'] == prev_swmm and row['segmap'] == prev_segmap:
                row['SWMM'] = current_swmm
            else:
                prev_swmm = row['SWMM']
                current_swmm += 1
                row['SWMM'] = current_swmm

            prev_segmap = row['segmap']

    # check that there is only one segment with an entry with type 'END' and no segment name. This indicates 
    # the end of the system. A tributary end would have type end, but the name for that entry would be the 
    # conduit it links into.
    num_end_segs = len([row for row in segtable if not row['Name'] and row['Type'] == 'END'])
    if not num_end_segs:
        raise Exception("Segmentation map is missing an END segment.")
    elif num_end_segs != 1:
        raise Exception('There are multiple terminating segments in the segment maps.')

    # capitalize all segment types
    for row in segtable:
        row['Type'] = row['Type'].upper()

    # check that all types are are either CONDUIT, INFLOW, or END
    if any(row['Type'] not in ('CONDUIT', 'INFLOW', 'END') for row in segtable):
        raise Exception('Unknown Type found in seg map file')

    # a tributary end segment is an end segment with a name. The name should identify the segment 
    # to which the tributary connects.
    # extract trib connection conduit names and check that they exist somewhere in the system
    trib_connection_names = [row['Name'] for row in segtable if row['Type'] == 'END' and row['Name']]
    conduit_names = [row['Name'] for row in segtable if row['Type'] == 'CONDUIT']
    if not all([name in conduit_names for name in trib_connection_names]):
        raise Exception('There are END rows in your segment map(s) that refer do conduits not included in the map.')

    # check that there are no repeated conduits
    names = [row['Name'] for row in segtable if row['Type'] != 'END']
    if len(names) != len(set(names)):
        raise Exception('There are repeated conduits in one of the map files.')

    # necessary segment map characteristics have been checked, return the table.
    return segtable

current_start = None

def timing(s):
    pass
    global current_start
    if current_start is not None:
        timing = (datetime.now() - current_start).total_seconds()
        print s
        print timing / 60
        print timing
        print '\n'

    current_start = datetime.now()

def process(filter_mins,  # number of minutes for the optional moving average smoothing window
            dummy_end, 
            inppath, # path to SWMM .inp file
            outpath, # path to directory to write output
            segmap_paths, # segmap_paths
            rptpath=None,   # path to SWMM rpt file
            binarypath=None, # path to SWMM binary output file
            correct_vol=False, # boolean value to determine whether or not flow should be corrected
            window_size=400,  # volume correction parameter
            correction_threshold=0.05, # volume correction parameter
            abs_basevol_signal=5, # volume correction parameter
            event_start=None, # event start datetime 
            event_end=None,  # event end datetime
            negative_threshold_pct=0.2, # volume correction parameter
            micro_window_pct=.25, # volume correciton parameter
            round_num=5, # precision to round data
            flow_weight_sums=False, # type of calculation to use for combining segment depth and velocity
            event_start_percentage=None, # volune correction parameter
            window_increment_percent=.25,
            using_swmm_volume=False,
            use_inp_inflows=True,
            use_simple_correction=True
    ):

    timing('')
    # the user must supply either a path to a binary out file or an rpt file
    if not (binarypath or rptpath):
        raise Exception("Must supply either an SWMM .rpt path or a .out path.")

    # process the segmap files and return a validated list of dictionaries for each row in each segmap
    segtable = get_segtable(segmap_paths)

    # generate the name for the database from the filename of the rpt or binary file
    db_name_path = binarypath if binarypath else rptpath
    dbname = os.path.splitext(os.path.basename(db_name_path))[0] + '.db'
    dbpath = os.path.join(os.path.dirname(outpath), dbname)

    # remove the database if it already exists
    if os.path.exists(dbpath):
        os.unlink(dbpath)

    # open a connection to the database with a context manager that will close the connection when the with-block exits.
    with closing(sqlite3.connect(dbpath)) as conn:
        # set autocommit
        conn.isolation_level = None
        # obtain a cursor
        cur = conn.cursor()
        # optimize the database by turning off unneeded features
        cur.executescript("""
            PRAGMA synchronous=OFF;
            PRAGMA count_changes=OFF;
            PRAGMA journal_mode=OFF;
        """)

        cur.execute("""
            CREATE TABLE settings (
                using_swmm_volume integer
            )     
        """)

        cur.execute("INSERT INTO settings (using_swmm_volume) VALUES (?)", (int(using_swmm_volume),))

        # create table to store the data in segtable so it can be used in queries to come
        cur.execute("""
            CREATE TABLE segs (
                wasp int,           -- wasp segment number
                DS_wasp int,        -- wasp segment number of the DS segment (initially NULL)
                swmm int,           -- swmm segment number 
                DS_swmm int,        -- swmm segment number of the DS segment (initiall NULL)
                type varchar(8),    -- element type, either 'CONDUIT', 'INFLOW', or 'END'
                name varchar(64)    -- name as it appears in the .rpt or binary file
            )""")

        for row in segtable:
            # load the segmap data into the segs tables
            cur.execute("""
                INSERT INTO segs (wasp, swmm, type, name) 
                VALUES (%(WASP)s, %(SWMM)s, '%(Type)s', '%(Name)s')
                """ % row)
        
        # get list of unique swmm-wasp segment pairs
        cur.execute('SELECT DISTINCT swmm, wasp FROM segs')
        segs = cur.fetchall()

        # get final wasp and swmm segs
        cur.execute('SELECT MAX(wasp) FROM segs')
        last_wasp_seg = cur.fetchone()[0]
        cur.execute('SELECT MAX(swmm) FROM segs')
        last_swmm_seg = cur.fetchone()[0]

        if not dummy_end:
            cur.execute("SELECT * FROM segs WHERE type = 'INFLOW' AND wasp = ?", (last_wasp_seg,))
            last_wasp_inflows = cur.fetchall()

            if last_wasp_inflows:
                msg = "If you choose not to have a dummy end segment, the final segment in " \
                     + "in your segmentation file can't have INFLOW elements. Please remove the INFLOWs " \
                     + "for the final WASP segment and restart, or restart and choose to use a dummy end segment."
                raise Exception(msg)

        #cur.execute("SELECT name FROM segs WHERE type = 'CONDUIT' AND wasp = ?", (last_wasp_seg,))
        #last_wasp_names = [row[0] for row in cur.fetchall()]

        # this loop cycles through the swmm segment numbers and updates DS_wasp and DS_swmm columns in the segs table. 
        # It also creates a dummy segment for the final segment, if this is requested by the user (dummy_end = True)
        for swmm, wasp in segs:
            # check if the current swmm segment includes an 'END' row
            cur.execute("SELECT name FROM segs WHERE swmm = ? AND type = 'END'", (swmm,)) 
            end_row = cur.fetchone()
            if end_row: 
                end_name = end_row[0]
                if end_name:
                    # if the end row includes the name of a conduit (meaning this segment is the end of a tributary, 
                    # not the end of a main trunk), then
                    # find the wasp and swmm segment numbers for the conduit and assign them to the DS_wasp and 
                    # DS_swmm columns for the conduits in this segment
                    
                    cur.execute("SELECT wasp, swmm FROM segs WHERE name = ? AND type <> 'END'", (end_name,))

                    DS_wasp, DS_swmm = cur.fetchone()
                    cur.execute("UPDATE segs SET DS_wasp = ?, DS_swmm = ? WHERE type = 'CONDUIT' AND swmm = ?", 
                        (DS_wasp, DS_swmm, swmm))
                else:
                    # ... otherwise, this is the end of the main stem. 
                    if dummy_end:
                        # if user wants a dummy end segment (dummy_end is True), add it to the segs table, and 
                        # make this the down stream segment for the actual final conduits
                        dummy_wasp = last_wasp_seg + 1
                        dummy_swmm = last_swmm_seg + 1
                        cur.execute("""
                            INSERT INTO segs (wasp, swmm, DS_wasp, DS_swmm, type, name)
                            SELECT ?, ?, 0, 0, 'DUMMY', name 
                                FROM segs WHERE swmm = ? AND type = 'CONDUIT' LIMIT 1
                            """, (dummy_wasp, dummy_swmm, swmm))

                        cur.execute("UPDATE segs SET DS_wasp = ?, DS_swmm = ? WHERE type = 'CONDUIT' AND swmm = ?", 
                            (dummy_wasp, dummy_swmm, swmm))
                    else:
                        # if the user doesn't want a dummy end segment, set the DS segments of the final 
                        # segment to the empty seg
                        cur.execute("UPDATE segs SET DS_wasp = ?, DS_swmm = ? WHERE type = 'CONDUIT' AND swmm = ?", 
                            (0, 0, swmm))
            else:
                # if this isn't the end segment, update the the DS segment numbers to the next segment
                cur.execute("UPDATE segs SET DS_wasp = ?, DS_swmm = ? WHERE type = 'CONDUIT' AND swmm = ?", 
                    (wasp + 1, swmm + 1, swmm))

        # for rows of type 'INFLOW', the DS_wasp and DS_swmm are the same as its swmm and wasp seg numbers
        cur.execute("UPDATE segs SET DS_wasp = wasp, DS_swmm = swmm WHERE type = 'INFLOW'")
        cur.execute("DELETE FROM segs WHERE type = 'END'")

        # write segment map to file
        with open(os.path.join(os.path.dirname(outpath), 'segment_map_out.csv'), 'w') as f:
            fieldnames = ['wasp', 'ds_wasp', 'swmm', 'ds_swmm', 'type', 'name']
            writer = csv.DictWriter(f, fieldnames = fieldnames, lineterminator='\n')
            writer.writeheader()
            cur.execute('SELECT * FROM segs ORDER BY wasp, swmm, type, name')
            for row in cur.fetchall():
                row = dict(zip(fieldnames, row))
                writer.writerow(row)

        # create a table to store the length and constant dwf values for conduits
        cur.execute("""CREATE TABLE conduit_params (
            name varchar(64), 
            length float, 
            inlet varchar(64), 
            dwf float DEFAULT 0,
            timeseries text,
            scaling_factor real
        )""")

        # the following block loops through the .inp and updates conduit_params with the lengths and DWFs for each conduit
        meters_in_foot = 0.3048
        first_step = 0
        last_step = 0
        with open(inppath, 'r') as f:
            # churn through the .inp lines until encountering the [OPTIONS] marker
            line = f.readline().strip()
            while not re.match(re.compile(r'\[opt', re.IGNORECASE), line.strip()):
                line = f.readline()
                if not line:
                    raise Exception("Can't find [OPTIONS] section in .inp file.")

            line = f.readline()

            # read the following variables from the OPTIONS sections
            timestep_secs = None
            sim_start_date = None
            sim_start_time = None
            start_date = None
            start_time = None
            end_date = None
            end_time = None
            while True:
                if re.match(re.compile(r'report_start_date', re.IGNORECASE), line.strip()):
                    _, start_date = line.split()
                elif re.match(re.compile(r'report_start_time', re.IGNORECASE), line.strip()):
                    _, start_time = line.split()
                elif re.match(re.compile(r'end_date', re.IGNORECASE), line.strip()):
                    _, end_date = line.split()
                elif re.match(re.compile(r'end_time', re.IGNORECASE), line.strip()):
                    _, end_time = line.split()
                elif re.match(re.compile(r'report_step', re.IGNORECASE), line.strip()):
                    _, ts_str = line.split()
                    hours, mins, secs = ts_str.split(':')
                    timestep_secs = float(hours) * 120 + float(mins) * 60 + float(secs)
                elif re.match(re.compile(r'start_date', re.IGNORECASE), line.strip()):
                    _, sim_start_date = line.split()
                elif re.match(re.compile(r'start_time', re.IGNORECASE), line.strip()):
                    _, sim_start_time = line.split()
                     
                if (not line) or re.match(r'\[', line.strip()):
                    if not timestep_secs:
                        raise Exception(".inp [OPTIONS] missing REPORT_STEP")
                    elif not start_time:
                        raise Exception(".inp [OPTIONS] missing REPORT_START_TIME")
                    elif not start_date:
                        raise Exception(".inp [OPTIONS] missing REPORT_START_DATE")
                    elif not end_date:
                        raise Exception(".inp [OPTIONS] missing REPORT_END_DATE")
                    elif not end_time:
                        raise Exception(".inp [OPTIONS] missing REPORT_END_TIME")
                    else:
                        try:
                            report_start_dtime = datetime.strptime(' '.join([start_date, start_time]), '%m/%d/%Y %H:%M:%S')
                            report_end_dtime = datetime.strptime(' '.join([end_date, end_time]), '%m/%d/%Y %H:%M:%S')
                            sim_start_dtime = datetime.strptime(' '.join([sim_start_date, sim_start_time]), '%m/%d/%Y %H:%M:%S')
                        except:
                            raise Exception('Unexpected datetime format encountered in .inp file for report dates.')
                        else:
                            break
                else:
                    line = f.readline()

            
            if (report_start_dtime.minute * 60 + report_start_dtime.second) % timestep_secs != 0:
                raise Exception("Report start datetime must be an integral multiple of the timestep seconds after some hour.")

            # if user hasn't set a start time, set it to the report time from the .inp
            if not event_start:
                event_start = report_start_dtime

            # get the index for the timestep corresponding to event_start
            first_step = int(floor((event_start - report_start_dtime).total_seconds() / timestep_secs))

            # if user hasn't set an end time, set the end to the end time for the report
            if not event_end:
                event_end = report_end_dtime
            
            # use event_end to calculate the total number of timesteps
            event_step_count = int((event_end - event_start).total_seconds() / timestep_secs)

            # calculate index for the final timestep
            last_step = first_step + event_step_count

            # get a list of all the conduit names from segs
            cur.execute("SELECT name FROM segs WHERE type = 'CONDUIT'")
            conduit_names = [row[0] for row in cur.fetchall()]
            
            # churn through the .inp lines until encountering the [CONDUITS] marker
            line = f.readline()
            if not line:
                raise Exception("Can't find [CONDUITS] section in .inp file")

            while not re.match(re.compile(r'\[cond', re.IGNORECASE), line.strip()):
                line = f.readline()
                if not line:
                    raise Exception("Can't find [CONDUITS] section in .inp file.")

            # loop through the lines in [CONDUITS] and for each conduit with its name in the list, 
            # update conduit params with that conduit's length and inlet node.
            line = f.readline()
            if not line:
                raise Exception("There are no conduits listed in the [CONDUIT] section of the .inp")
            line = line.strip()
            while not re.match(r'\[', line):
                if not (re.match('^$', line) or re.match(r';', line)):
                    name, inlet, _, length, _ = line.split(None, 4)
                    if name in conduit_names:
                        cur.execute('INSERT INTO conduit_params (name, length, inlet) VALUES (?, ?, ?)', 
                            (name, float(length) * meters_in_foot, inlet))
                line = f.readline().strip()

            # churn through the lines until encountering the [DWF] marker
            line = f.readline()
            if not line:
                raise Exception("Can't find [DWF] section in the .inp")
            line = line.strip()
            while not re.match(re.compile(r'\[dwf', re.IGNORECASE), line):
                line = f.readline().strip()

            # get a list of the inlet node names from conduit_params
            cur.execute('SELECT inlet FROM conduit_params')
            inlet_names = [row[0] for row in cur.fetchall()]

            cur.execute("""
                SELECT inlet 
                FROM conduit_params 
                WHERE name IN (
                    SELECT name FROM segs WHERE type = 'CONDUIT' AND wasp = ?
                )
            """, (last_wasp_seg,))

            last_wasp_inlets = [row[0] for row in cur.fetchall()]

            # cycle through the [DWF] section and for each node that is in our inlet list, update 
            # conduit_params with its DWF value
            line = f.readline().strip()
            while not re.match(r'\[', line):
                if not (re.match('^$', line) or re.match(r';', line)):
                    name, param, avg = line.split()
                    if name in inlet_names and param == 'FLOW':
                        dwf_value = round(float(avg) * pow(meters_in_foot, 3), round_num)
                        if (not dummy_end) and dwf_value != 0 and name in last_wasp_inlets:
                            msg = "Unless you are using a dummy end segment, the conduits in the final WASP segment " \
                                  + "must have 0 dry weather flow values for their inlets in the [DWF] section of the *.inp."
                            raise Exception(msg)
                        cur.execute('UPDATE conduit_params SET dwf = ? WHERE inlet = ?', 
                            (dwf_value, name))
                line = f.readline().strip()


        if use_inp_inflows:
            with open(inppath, 'r') as f:
                line = f.readline().strip()
                while line and not re.match(re.compile(r'\[infl', re.IGNORECASE), line.strip()):
                    line = f.readline()

                line = f.readline()
                names = ['node', 'parameter', 'time_series', 'param_type', 'units_factor', 'scale_factor', 'bl_value', 'bl_pat']
                cur.execute("SELECT inlet FROM conduit_params")
                inlet_names = [row[0] for row in cur.fetchall()]

                while line and not re.match(r'\[', line):
                    if not re.match(r';|^$', line.strip()):
                        line = dict(zip(names, line.split()))
                        if line['node'] in inlet_names:
                            cur.execute("""
                                UPDATE conduit_params
                                SET timeseries = ?, scaling_factor = ?
                                WHERE inlet = ?
                            """, (line['time_series'], float(line['scale_factor']), line['node']))
                    line = f.readline()

        ## create a second conduit_params table that adds the swmm number for each segment
        cur.execute("""
            CREATE TABLE conduit_params2 (
                name varchar(64), 
                swmm int, 
                length float, 
                dwf float DEFAULT 0,
                timeseries text,
                scaling_factor real
        )""")

        # populate the second conduit_params table with the current one inner joined with the segs table
        cur.execute("""
            INSERT INTO conduit_params2 (name, swmm, length, dwf, timeseries, scaling_factor)
                SELECT cp.name, segs.swmm, cp.length, cp.dwf, cp.timeseries, cp.scaling_factor
                FROM conduit_params AS cp INNER JOIN segs ON cp.name = segs.name
        """)

        cur.execute("""
            CREATE TABLE inflows (
                timeseries text,
                timestep int,
                value real
            )""")

        cur.execute("SELECT DISTINCT timeseries FROM conduit_params2")
        ts_names = [row[0] for row in cur.fetchall() if row[0] is not None]
        with open(inppath, 'r') as f:
            line = f.readline().strip()
            while not re.match(re.compile(r'\[time', re.IGNORECASE), line.strip()):
                line = f.readline()
                if not line:
                    if [x for x in ts_names if x]:
                        raise Exception("Error: [INFLOWS] references timeseries in .inp, but no [TIMESERIES] section found.")

            line = f.readline()

            current_series = ''
            series = []
            prev_ts_name = None
            dtime_counter = 0
            have_names = []
            prev_value = None
            prev_dtime = None
            this_series = []
            while line and not re.match(r'\[', line):
                if not re.match(r';|^$', line.strip()):
                    line = line.split(';')[0].split()
                    current_ts_name = line[0]
                    if current_ts_name in ts_names:
                        dtime = datetime.strptime(' '.join([line[1], line[2]]), '%m/%d/%Y %H:%M')

                        if current_ts_name != prev_ts_name:
                            if current_ts_name in have_names:
                                raise Exception('Timeseries out of order')
                            elif prev_ts_name is not None:
                                have_names.append(prev_ts_name)

                            prev_dtime = None
                            prev_value = None
                            series.extend(this_series)
                            this_series = []
                            dtime_counter = 0
                            total_seconds_since_start_of_hour = dtime.minute * 60 + dtime.second
                            if total_seconds_since_start_of_hour % timestep_secs != 0:
                                msg = "The inflows timeseries timestep must be an integral multiple of the reporting timestep."
                                raise Exception(msg)
                            
                        value = float(line[3])
                        if prev_dtime:
                            timeseries_timestep = (dtime - prev_dtime).total_seconds()
                            interp_slope = (value - prev_value) / timeseries_timestep
                            current_ts = 0
                            while current_ts <= timeseries_timestep:
                                current_dtime = prev_dtime + timedelta(seconds=current_ts)
                                current_value = (interp_slope * current_ts) + prev_value
                                if current_dtime > event_start and current_dtime <= event_end:
                                    if this_series and current_ts == 0:
                                        _ = this_series.pop()
                                    this_series.append([current_ts_name, dtime_counter, current_value])
                                    #this_series.append([current_ts_name, dtime_counter, current_dtime, current_value])
                                    dtime_counter += 1 
                                current_ts += timestep_secs

                        prev_value = value
                        prev_dtime = dtime

                    prev_ts_name = current_ts_name

                line = f.readline()

        series.extend(this_series)

        cur.executemany("""
            INSERT INTO inflows (timeseries, timestep, value)
            VALUES (?, ?, ?)
        """, series)

        # create the first flows table. 
        # this table will hold the flow data for each segment, and operations on this table will eventually
        # lead to generating the flow series for the final hyd file.
        cur.execute("""
            CREATE TABLE flows (
                timestep int, 
                wasp_source int,    -- wasp seg number indicating which segment the flow is coming from
                wasp_sink int,      -- wasp seg number indicating which segment the flow is going to
                swmm_source int,
                swmm_sink int, 
                name varchar(64), 
                flow float
            )""")

        # create the first volumes table
        # this table will hold the volume, depth, and velocity data for all segments and the operations on this 
        # table and the tables derived from it will eventually lead to generating the data for the final volume, depth, and
        # velocity timeseries.
        cur.execute("""
            CREATE TABLE volumes (
                timestep int, 
                wasp int, 
                swmm int, 
                name varchar(64),
                init_vol float, 
                flow float, 
                volume float,
                depth float, 
                velocity float
            )""")

        # select one conduit to represent each swmm segment. The names of these representative conduits will 
        # be used to look up the base flows for the their respective swmm segments.
        cur.execute("SELECT MAX(name) FROM segs WHERE type = 'CONDUIT' GROUP BY swmm")
        rep_conduits = [row[0] for row in cur.fetchall()]

        cur.execute("SELECT MAX(wasp) FROM segs")
        last_wasp = cur.fetchone()[0]
        
        # if the user provided their data in a swmm binary file, read this file into the volumes and flows tables.
        if binarypath:
            # get a list of segment names from the segs table
            cur.execute("SELECT DISTINCT name FROM segs")
            names = [row[0] for row in cur.fetchall()]
            # these are the names of the link variables as they need to be named to request them from 
            # the swmmout2sqlite function
            link_vars = ['flow_rate', 'flow_depth', 'flow_velocity']
            if using_swmm_volume:
                link_vars = link_vars + ['volume']
            # subcatchment variables
            catch_vars = ['runoff_rate']

            # read the links data into the database
            links_result = swmmout2sqlite(binarypath, dbpath, 'links', names=names, 
                    variables=link_vars, start=event_start, end=event_end,
                    ignore_missing_names=True, using_swmm_volume=using_swmm_volume)

            # read the subcatchments into the database
            # if this raises a SWMMOUT2SQLITE_NoNames error, that means there are no subcatchments in the binary file.
            try:
                catches_result = swmmout2sqlite(binarypath, dbpath, 'subcatchments', names=names, 
                        variables=catch_vars, start=event_start, end=event_end, ignore_missing_names=True, 
                        using_swmm_volume=using_swmm_volume)
            except SWMMOUT2SQLITE_NoNames:
                catches_result = None
            
            # these are the columns of the links and subcatchments table generated in the database by swmmout2sqlite
            link_fields = ['timestep', 'name'] + link_vars
            catch_fields = ['timestep', 'name'] + catch_vars

            # get a list of unique link names
            cur.execute("SELECT DISTINCT name FROM links")
            link_names = [row[0] for row in cur.fetchall()]
            
            # get a list of unique subcatchment names, if there are any
            if catches_result:
                cur.execute("SELECT DISTINCT name FROM subcatchments")
                catch_names = [row[0] for row in cur.fetchall()]
            else:
                catch_names = []

            # check if there are any conduits in the seg table that aren't in the links read from the binary file
            cur.execute("SELECT name FROM segs WHERE type = 'CONDUIT'")
            seg_conduits = [row[0] for row in cur.fetchall()]
            if [name for name in seg_conduits if name not in link_names]:
                raise Exception("There are conduits in the map file not found in the binary file.")

            # check if there are any inflows in neither the links table nor the subcatchment table
            cur.execute("SELECT name FROM segs WHERE type = 'INFLOW'")
            seg_inflows = [row[0] for row in cur.fetchall()]
            if [name for name in seg_inflows if name not in link_names + catch_names]:
                raise Exception("There are inflows in the map file not found in the binary file.")

            for name in names:
                # get each row from the seg table where this name appears
                cur.execute("SELECT wasp, swmm, DS_wasp, DS_swmm, type, name FROM segs WHERE name = ?", (name,))
                elements = cur.fetchall()

                # if this name belongs to a link, select its data from the links table 
                # and set variables identifying the column names of the links table. If this isn't a link,
                # get the data from the subcatchments table.
                if name in link_names:
                    cur.execute("SELECT * FROM links WHERE name = ? ORDER BY timestep", (name,))
                    fields = link_fields
                    flow_field = 'flow_rate'
                else:
                    cur.execute("SELECT * FROM subcatchments WHERE name = ? ORDER BY timestep", (name,))
                    fields = catch_fields
                    flow_field = 'runoff_rate'
                    
                # zip the data wih the field names to create a list of dicts, where each dict is a row.
                data = [dict(zip(fields, row)) for row in cur.fetchall()]

                # these lists will contain the unit converted values for their respective variables
                flows = []
                velocities = []
                depths = []
                volumes = []

                # if the user requested a moving average filter be passed over the data, calculate how many
                # timesteps correspond to the duration of the window they entered.
                if filter_mins:
                    filter_steps = round((filter_mins * 60) / timestep_secs)

                # if the user wants a moving average window, these vectors will serve as the moving windows in the 
                # following loop.
                flow_window = []
                velocity_window = []
                depth_window = []
                volume_window = []

                # for each row in the data, convert it to the approrpiate units. 
                # if a filter is to be applied, put the converted values into their respected window.
                # if the windows are full, caluclate the average and append it to the main vectors, 
                # and then remove one element from the vector.
                for i, row in enumerate(data):
                    flow = round(float(row[flow_field]) * pow(meters_in_foot, 3), round_num)
                    depth = round(float(row.get('flow_depth', 0)) * meters_in_foot, round_num)
                    velocity = round(float(row.get('flow_velocity', 0)) * meters_in_foot, round_num)
                    volume = round(float(row.get('volume', 0)) * pow(meters_in_foot, 3), round_num)

                    if filter_mins:
                        flow_window.append(flow)
                        velocity_window.append(velocity)
                        depth_window.append(depth)
                        volume_window.append(volume)

                        if len(flow_window) == filter_steps:
                            flows.append(sum(flow_window) / filter_steps)
                            del flow_window[0]
                            velocities.append(sum(velocity_window) / filter_steps)
                            del velocity_window[0]
                            depths.append(sum(depth_window) / filter_steps)
                            del depth_window[0]
                            volumes.append(sum(volume_window) / filter_steps)
                            del volume_window[0]
                    else:
                        flows.append(flow)
                        velocities.append(velocity)
                        depths.append(depth)
                        volumes.append(volume)

                # for each element from the seg table corresponding to this element,
                # insert the appropriate series into a volumes and flows
                for element in elements:
                    # extract its swmm/wasp numbering
                    wasp, swmm, DS_wasp, DS_swmm, eltype, name  = element

                    # the set sources to 0 if this is an INFLOW
                    wasp_source, swmm_source = (0,0) if eltype =='INFLOW' else (wasp, swmm)

                    #if dummy_end or DS_wasp != last_wasp or wasp_source != 0:
                    # insert the flow data into the flows table
                    cur.executemany("""
                        INSERT INTO flows (timestep, wasp_source, swmm_source, name, wasp_sink, swmm_sink, flow)
                        VALUES (?,?,?, ?,?,?, ?)
                    """,
                    [(i, wasp_source, swmm_source, name, DS_wasp, DS_swmm, flow) for i, flow in enumerate(flows)])

                    # if this is a conduit or a dummy, we need to insert the relevant data into the volumes table.
                    if eltype in ('CONDUIT', 'DUMMY'):
                        # if this is a representative conduit, look up the dwf value, and insert 
                        # a constant timeseries into flows
                        if eltype == 'CONDUIT' and name in rep_conduits:
                            cur.execute('SELECT dwf FROM conduit_params WHERE name = ?', (name,))
                            dwf = cur.fetchone()[0]

                            dwf_series = [dwf for _ in xrange(len(flows))]
                            if dwf == 0:
                                cur.execute("SELECT scaling_factor, timeseries FROM conduit_params2 WHERE name = ?", (name,))
                                scaling_factor, t_series = cur.fetchone()
                                if t_series:
                                    cur.execute("SELECT value FROM inflows WHERE timeseries = ? ORDER BY timestep", (t_series, ))
                                    dwf_series = [row[0] * scaling_factor for row in cur.fetchall()]
                                    dwf_series = [round(x * pow(meters_in_foot, 3), round_num) for x in dwf_series]

                            if wasp != last_wasp:
                                cur.executemany("""
                                    INSERT INTO flows (timestep, wasp_source, swmm_source, name, wasp_sink, swmm_sink, flow)
                                    VALUES (?,?,?, ?,?,?, ?)
                                """, [(i, 0, 0, name + '_DWF', wasp, swmm, x) for i, x in enumerate(dwf_series)])

                        # get the length for the conduit and calculate the initial volume 
                        cur.execute("SELECT length FROM conduit_params WHERE name = ?", (name,))
                        length = cur.fetchone()[0]
                        init_vol = (flows[0] / velocities[0]) * length if velocities[0] else 0 


                        # insert the flow, depth, velocity series into the volumes table, along with the init_vol repeated 
                        # for each timestep.
                        if using_swmm_volume:
                            if eltype == 'DUMMY':
                                volumes = itertools.cycle([volumes[0]])
                            cur.executemany("""
                                INSERT INTO volumes (timestep, wasp, swmm, name, volume, depth, velocity)
                                VALUES (?,?,?, ?,?,?, ?)
                            """, [(i, wasp, swmm, name, tup[0], tup[1], tup[2])
                                  for i, tup in enumerate(zip(volumes, depths, velocities))])
                        else:
                            cur.executemany("""
                                INSERT INTO volumes (timestep, wasp, swmm, name, init_vol, flow, depth, velocity)
                                VALUES (?,?,?, ?,?,?, ?,?)
                            """, [(i, wasp, swmm, name, init_vol, tup[0], tup[1], tup[2])
                                  for i, tup in enumerate(zip(flows, depths, velocities))])
        elif rptpath:
            # if the data is supplied via an rpt file, process it 

            with open(rptpath, 'r') as f:
                # loop through the .rpt file. Each time a report series is encountered, check if the 
                # name is in the segs table, and if so insert the appropriate values into the flows and volumes tables
                line = f.readline()

                while line: # while not at the end of the file ... 
                    # eat lines until encountering the start of a time series
                    while line and not re.match('<<<', line.strip()):
                        line = f.readline()

                    if line:
                        # read in element type and name
                        _, kind, name, _ = line.strip().split() # assumes line is of the format '<<< Subcatchment CATCH01 >>>'
                        # check if there are any entries for this name in segs ...
                        cur.execute('SELECT wasp, swmm, DS_wasp, DS_swmm, type, name FROM segs WHERE name = ?', (name,))
                        elements = cur.fetchall()
                        if not (elements and kind in ('Link', 'Subcatchment')): 
                            # ... if this element isn't in segs, keep reading ...
                            line = f.readline()
                        else:
                            # ... otherwise, consume the header and proceed with processing the data
                            header_nline = 4
                            for i in range(header_nline): # eat header
                                _ = f.readline() 

                            # set the expected column names depending on whether the current element is
                            # a link or subcatchment
                            if kind == 'Link':
                                fieldnames = ['date', 'time', 'flow', 'velocity', 'depth', 'percent'] 
                                if using_swmm_volume:
                                    fieldnames = fieldnames + ['volume']
                            else:
                                fieldnames = ['date', 'time', 'percip', 'losses', 'flow']

                            # these lists will contain the different columns for this element
                            flows = []
                            velocities = []
                            depths = []
                            volumes = []

                            # if user requested a moving average filter, calculate the number of timesteps required
                            if filter_mins:
                                filter_steps = round((filter_mins * 60) / timestep_secs)

                            # these vectors will hold portions of the time series currently inside the moving 
                            # average filter window. If the moving average filter isn't used, these will be empty.
                            flow_window = []
                            velocity_window = []
                            depth_window = []
                            volume_window = []

                            line = f.readline()

                            # loop through each row in the series and update the flows, velocities, and depths 
                            # lists, filtering if told to do so.
                            # the counter is defined to start and stop reading data at the correct beginning and end times.
                            counter = 1

                            while line and not re.match('^$', line.strip()):
                                if counter < first_step:
                                    counter += 1
                                    line = f.readline()
                                    continue
                                elif counter >  last_step:
                                    break
                                else:
                                    record = dict(zip(fieldnames, line.strip().split()))

                                    # extract the parameters and convert them to the appropriate units
                                    flow = round(float(record['flow']) * pow(meters_in_foot, 3), round_num)
                                    depth = round(float(record.get('depth', 0)) * meters_in_foot, round_num)
                                    velocity = round(float(record.get('velocity', 0)) * meters_in_foot, round_num)
                                    volume = round(float(record.get('volume', 0)) * pow(meters_in_foot, 3), round_num)

                                    # if filtering ...
                                    if filter_mins:
                                        # add the parameters to the current window
                                        flow_window.append(flow)
                                        velocity_window.append(velocity)
                                        depth_window.append(depth)

                                        # if the filter's reached the window size, calculate the average
                                        # and append it to the final vectors, then remove the last element
                                        # from the moving average filter window
                                        if len(flow_window) == filter_steps:
                                            flows.append(sum(flow_window) / filter_steps)
                                            del flow_window[0]
                                            velocities.append(sum(velocity_window) / filter_steps)
                                            del velocity_window[0]
                                            depths.append(sum(depth_window) / filter_steps)
                                            del depth_window[0]
                                            volumes.append(sum(volume_window) / filter_steps)
                                            del volume_window[0]
                                    else:
                                        flows.append(flow)
                                        velocities.append(velocity)
                                        depths.append(depth)
                                        volumes.append(volume)

                                counter += 1 
                                line = f.readline()

                            for element in elements:
                                # extract the elements parameters and set the wasp_source and swmm_source appropriately
                                wasp, swmm, DS_wasp, DS_swmm, eltype, name  = element
                                wasp_source = 0 if eltype == 'INFLOW' else wasp
                                swmm_source = 0 if eltype == 'INFLOW' else swmm

                                #if dummy_end or DS_wasp != last_wasp or wasp_source != 0:
                                cur.executemany("""
                                 INSERT INTO flows (timestep, wasp_source, swmm_source, name, wasp_sink, swmm_sink, flow) 
                                 VALUES (?, ?, ?, ?, ?, ?, ?)""",
                                 [(i, wasp_source, swmm_source, name, DS_wasp, DS_swmm, flow) 
                                   for i, flow in enumerate(flows)])


                                if eltype in ('CONDUIT', 'DUMMY'):
                                    if eltype == 'CONDUIT' and name in rep_conduits:
                                        cur.execute('SELECT dwf FROM conduit_params WHERE name = ?', (name,))
                                        dwf = cur.fetchone()[0]

                                        dwf_series = [dwf for _ in xrange(len(flows))]
                                        if dwf == 0:
                                            cur.execute("""
                                                SELECT scaling_factor, timeseries 
                                                FROM conduit_params2
                                                WHERE name = ?
                                            """, (name,))
                                            scaling_factor, t_series = cur.fetchone()
                                            if t_series:
                                                cur.execute("""
                                                    SELECT value 
                                                    FROM inflows
                                                    WHERE timeseries = ?
                                                    ORDER BY timestep
                                                """, (t_series,))
                                                dwf_series = [row[0] * scaling_factor for row in cur.fetchall()]
                                                dwf_series = [round(x * pow(meters_in_foot, 3), round_num) for x in dwf_series]

                                        if wasp != last_wasp:
                                            cur.executemany("""
                                                INSERT INTO flows (timestep, wasp_source, swmm_source, name, 
                                                    wasp_sink, swmm_sink, flow) 
                                                VALUES (?, ?, ?, ?, ?, ?, ?)
                                                """, [(i, 0, 0, name + '_DWF', wasp, swmm, x) for i, x in enumerate(dwf_series)])

                                    cur.execute('SELECT length FROM conduit_params WHERE name = ?', (name,))
                                    length = cur.fetchone()[0]
                                    init_vol = (flows[0] / velocities[0]) * length if velocities[0] else 0

                                    if using_swmm_volume:
                                        cur.executemany("""
                                            INSERT INTO volumes (timestep, wasp, swmm, name, volume, depth, velocity)
                                            VALUES (?, ?, ?, ?, ?, ?, ?)
                                            """, [(i, wasp, swmm, name, tup[0], tup[1], tup[2]) 
                                                   for i, tup in enumerate(zip(volumes, depths, velocities))])
                                    else:
                                        cur.executemany("""
                                            INSERT INTO volumes (timestep, wasp, swmm, name, init_vol, flow, depth, velocity)
                                            VALUES (?, ?, ?, ?, ?, ?, ?, ?)
                                            """, [(i, wasp, swmm, name, init_vol, tup[0], tup[1], tup[2]) 
                                                   for i, tup in enumerate(zip(flows, depths, velocities))])
            

        timing('loading')

        # create indexes on the volumes and flows tables
        cur.execute('CREATE INDEX idx_flows ON flows(timestep, name)')
        cur.execute('CREATE INDEX idx_volumes ON volumes(timestep)')

        timing('make flow and volumes indexes')
    
        # check that all conduits in segs are represented in the volumes table
        cur.execute("SELECT DISTINCT name FROM segs WHERE type = 'CONDUIT'")
        segs_conduit_names = [row[0] for row in cur.fetchall()]
        cur.execute("SELECT DISTINCT name FROM volumes")
        volumes_names = [row[0] for row in cur.fetchall()]
        missing_conduits = [name for name in segs_conduit_names if name not in volumes_names]
        if missing_conduits:
            raise Exception('There are conduits in the segment map not found in the .rpt file: ' + ', '.join(missing_conduits))

        # check that all conduits and their DWFs are represented in the flows table
        cur.execute("SELECT DISTINCT name FROM segs WHERE name <> ''")
        segs_all_names = [row[0] for row in cur.fetchall()]
        cur.execute("SELECT DISTINCT name FROM segs WHERE type = 'CONDUIT'")
        dwf_conduits = rep_conduits
        if not dummy_end:
            cur.execute("SELECT MAX(wasp) FROM segs")
            max_wasp = cur.fetchone()[0]
            cur.execute("SELECT name FROM segs WHERE wasp = ?", (max_wasp,))
            final_conduits = [row[0] for row in cur.fetchall()]
            dwf_conduits = [name for name in rep_conduits if name not in final_conduits]
        dwf_names = [cond + '_DWF' for cond in dwf_conduits]
        cur.execute("SELECT DISTINCT name FROM flows")
        flows_names = [row[0] for row in cur.fetchall()]
        if set(segs_all_names + dwf_names) != set(flows_names):
            raise Exception('There are elements missing from the flows table.')

        # volumes2 is the same as volumes but with three additional columns, delta_vol, flow_in, and vol
        cur.execute("""
            CREATE TABLE volumes2 (
                timestep int, 
                wasp int, 
                swmm int, 
                name varchar(64), 
                flow float,
                volume float,
                swmm_vol float,
                depth float, 
                velocity float
            )""")

        if not using_swmm_volume:
            # the user may choose to either take arithmetic averages for combined segment depth and velocity, 
            # or use each segment's flow to weight its contribution to the combined velocity and depth
            if flow_weight_sums:
                depth_calc = """
                    CASE WHEN total_flow = 0
                        THEN (1/seg_count) * depth
                        ELSE depth * (ABS(flow) / total_flow)
                    END
                """
                velocity_calc = """
                    CASE WHEN total_flow = 0
                        THEN (1/seg_count) * velocity
                        ELSE velocity * (ABS(flow)/total_flow)
                    END
                """
            else:
                depth_calc = "depth * (1.0 / seg_count)"
                velocity_calc = "velocity * (1.0 / seg_count)"

            tflow_calc = "SUM(ABS(flow))"

            # inner join volumes with the aggregation of itself, which contains the total flow and segment count 
            # for each swmm segment. Use the aggregated values to calculate the weighted depth and velocity for 
            # each swmm segment and insert the results along with columns carried over from volumes into volumes2
            cur.execute(""" 
                INSERT INTO volumes2 (timestep, wasp, swmm, name, depth, velocity, flow)
                    SELECT v.timestep, v.wasp, v.swmm, v.name,""" + depth_calc + """,""" + velocity_calc + """, flow
                    FROM (
                        SELECT """ + tflow_calc + """ AS total_flow, timestep, swmm, COUNT(*) AS seg_count
                        FROM volumes
                        GROUP BY timestep, swmm) AS total_flows
                    INNER JOIN volumes v ON v.timestep = total_flows.timestep AND v.swmm = total_flows.swmm
                """)

        else:
            cur.execute("""
                INSERT INTO volumes2  (timestep, wasp, swmm, name, volume, depth, velocity, swmm_vol)
                SELECT v.timestep, v.wasp, v.swmm, v.name, v.volume, (v.volume / swmm_vols.swmm_vol) * v.depth, 
                    (v.volume / swmm_vols.swmm_vol) * v.velocity, swmm_vols.swmm_vol
                FROM (SELECT SUM(volume) AS swmm_vol, timestep, swmm
                    FROM volumes
                    GROUP BY swmm, timestep
                ) AS swmm_vols
                INNER JOIN volumes v ON v.timestep = swmm_vols.timestep AND v.swmm = swmm_vols.swmm
                """)
        cur.execute('CREATE INDEX idx_volumes2 ON volumes2(timestep, swmm)')
        timing('populate volumes2')

        # volumes2_a will contain the summed weighted depth, weighted velocity, and flow for each swmm segment in volumes2.
        cur.execute("""
            CREATE TABLE volumes2_a (
                timestep int,
                wasp int,
                swmm int,
                depth float,
                velocity float,
                flow float,
                volume float
            )
        """)

        if not using_swmm_volume:
            # total the depth, velocity, and flow in volumes2 by swmm segment and insert it into volumes2_a
            # at this stage the combination of all parallel and lateral swmm segments is complete.
            cur.execute("""
                INSERT INTO volumes2_a (timestep, wasp, swmm, depth, velocity, flow)
                SELECT timestep, wasp, swmm, SUM(depth), SUM(velocity), SUM(flow)
                FROM volumes2
                GROUP BY wasp, swmm, timestep
            """)
        else:
            cur.execute("""
                INSERT INTO volumes2_a (timestep, wasp, swmm, volume, depth, velocity)
                SELECT timestep, wasp, swmm, volume, depth, velocity
                FROM (SELECT timestep, wasp, swmm, SUM(volume) AS volume, SUM(depth) AS depth, SUM(velocity) AS velocity
                    FROM volumes2
                    GROUP BY wasp, swmm, timestep)
                """)

        cur.execute('CREATE INDEX idx_volumes2_a ON volumes2_a(timestep, swmm)')
        timing('populate volumes2_a')

        # this table gets populated by the calc_swmm_vols function defined below.
        # this differs from volumes2_a in that it includes delta_vol and vol columns
        cur.execute("""
            CREATE TABLE volumes2_b (
                timestep int,
                swmm int,
                wasp int,
                depth float,
                velocity float,
                flow float,
                delta_vol float,
                vol float NULL
            )
        """)
        cur.execute('CREATE INDEX idx_volumes2_b ON volumes2_b(timestep, swmm)')

        # volumes3x will contain the weighted velocity and depth for combined wasp segments
        cur.execute("""
            CREATE TABLE volumes3x (
                timestep int, 
                wasp int, 
                vol float, 
                wasp_depth float, 
                wasp_velocity float,
                flow float,
                seg_count int
            )""")
        cur.execute('CREATE INDEX idx_volumes3x ON volumes3x(timestep, wasp)')

        # volumes4x will contain the summed depth and velocity for combined wasp segments
        cur.execute("""
            CREATE TABLE volumes4x (
                timestep int, 
                wasp int, 
                vol float, 
                depth float, 
                velocity float
            )""")
        cur.execute('CREATE INDEX idx_volumes4x ON volumes4x(timestep, wasp)')

        if not using_swmm_volume:
            def calc_swmm_vols():
                """
                populates the rows of volumes2_b with the values from volumes2_a plus the swmm segment 
                volume calculation, first calculating the delta vol for each segment at each timestep, via inner 
                joining volumes2_a with an aggregation of the flows data, then looping over volumes2_b and
                updating the volume columns in order.
                """

                # clear the values from the table, if there are any
                cur.execute("DELETE FROM volumes2_b")

                # calculate delta_vol by swmm segment by inner joining volumes2_a with a flow aggregation over the flows table
                # and insert the results into volumes2_b
                cur.execute("""
                    INSERT INTO volumes2_b (timestep, wasp, swmm, depth, velocity, flow, delta_vol)
                    SELECT v.timestep, v.wasp, v.swmm, v.depth, v.velocity, v.flow, (inflows.flow_in - v.flow) * ?
                    FROM (
                        SELECT SUM(flow) AS flow_in, timestep, swmm_sink
                        FROM flows
                        GROUP BY timestep, swmm_sink) AS inflows
                    INNER JOIN volumes2_a v ON v.timestep = inflows.timestep AND v.swmm = inflows.swmm_sink
                """, (timestep_secs, ))

                #print 'do some splainin'
                #code.interact(local=locals())

                #  the following loop cycles through each element and updates its respective part of the 
                # vol column in volumes2_b with the calculated volume 

                #get a list of timesteps and swmm segments
                cur.execute('SELECT DISTINCT timestep FROM volumes2_b')
                timesteps = [row[0] for row in cur.fetchall()]
                cur.execute('SELECT DISTINCT swmm FROM volumes2_b')
                swmm_segs = [row[0] for row in cur.fetchall()]
                for swmm in swmm_segs:
                    for t in timesteps:
                        # if the timestep is 0, set the vol to init_vol and update volumes2_b
                        if t == 0:
                            # for the first timestep, set the vol in volumes2_b to the initial volume
                            cur.execute('SELECT flow, velocity FROM volumes2_b WHERE timestep = 0 AND swmm = ?', (swmm, ))
                            flow, velocity = cur.fetchone()

                            # obtain length information to calculate init_volume
                            cur.execute("SELECT DISTINCT length FROM conduit_params2 WHERE swmm = ?", (swmm, ))
                            length = cur.fetchall()
                            assert len(length) == 1
                            length = length[0][0]

                            init_vol = (flow / velocity) * length

                            cur.execute("""
                                UPDATE volumes2_b
                                SET vol = ?
                                WHERE timestep = 0 AND swmm = ?
                            """, (init_vol, swmm))
                        else:
                            # .... otherwise, get the volumn calculation from the previous step.
                            cur.execute('SELECT vol FROM volumes2_b WHERE timestep = ? AND swmm = ?', (t - 1, swmm))
                            prev_vol = cur.fetchall()
                            assert len(prev_vol) == 1
                            prev_vol = prev_vol[0][0]

                            # select the delta_vol value at the current step
                            cur.execute('SELECT delta_vol FROM volumes2_b WHERE timestep = ? AND swmm = ?', (t, swmm))
                            delta_vol = cur.fetchall()
                            assert len(delta_vol) == 1
                            delta_vol = delta_vol[0][0]

                            # select the depth and use this as flag to know when to set the volume to 0
                            cur.execute('SELECT depth FROM volumes2_b WHERE timestep = ? AND swmm = ?', (t, swmm))
                            depth = cur.fetchall()
                            assert len(depth) == 1
                            depth = depth[0][0]
                            # when depth is 0, due to backflow most likely, set the volume to 0:
                            if depth == 0:
                                vol = 0
                            else:
                                vol = prev_vol + delta_vol

                            # update volumes2_b 
                            cur.execute('UPDATE volumes2_b SET vol = ? WHERE timestep = ? AND swmm = ?', (vol, t, swmm))

            # call the calc_swmm_vols() function above to populate volumes2_b with the calculated swmm volume time series
            calc_swmm_vols()
            timing('populate volumes2_b')

            # now that the volume has been calculated for all swmm segments, if the user wishes to correct
            # periods in segments where the volume doesn't return to basse volume or drops below base volume, they can.

            if correct_vol and use_simple_correction:
                # begin correction procedure
                print('correcting')

                # get all of the swmm segment numbers
                cur.execute("SELECT DISTINCT swmm FROM volumes2_b WHERE swmm IS NOT NULL")
                swmm_segs = [row[0] for row in cur.fetchall()]

                # for each swmm segment, correct it's volume
                for swmm in swmm_segs:

                    cur.execute("""
                        SELECT vol
                        FROM volumes2_b
                        WHERE swmm = ? 
                        ORDER BY timestep
                    """, (swmm, ))
                    vol = [row[0] for row in cur.fetchall()]

                    # determine the correct base volume for the segment by looking 
                    # at the average of the volume at the beginning of the series.
                    # use the rolling window size as an arbitrary cut off point 
                    # to end the beginning average
                    correct_basevol = sum(vol[:window_size]) / window_size

                    # calculatute the range of the the volume timeseries, from it's base vol to its peak
                    vol_range = max(vol) - correct_basevol

                    # all volume before the event_start index is not corrected and gets included
                    # in the calculation that determines the correct base volume
                    event_start_idx = 0

                    # if user sets event_start_percentage, find the index of the first timestep 
                    # where the volume exceeds this percentage of the vol_range
                    if event_start_percentage:
                        for i, v in enumerate(vol):
                            if v > ((vol_range * event_start_percentage) + correct_basevol):
                                event_start_idx = i
                                break

                    # if an event_start_idx other than 0 is specified
                    # calculate the correct base volume as the average volume from the beginning of the 
                    # series to the start index.
                    if event_start_idx:
                        start_range = event_start_idx - window_size
                        if start_range <= 0:
                            start_range = int(event_start_idx * .5)
                        correct_basevol = sum(vol[:start_range]) / len(range(start_range))

                    # initialize a vector of 0s the same length as the volume time series.
                    # this vector will hold the final volume correction required at each timestep
                    correction = [0 for i in range(len(vol))]

                    # i is the timestep. start at time step 0
                    i = 0

                    # the following functions are utility functions used to check and calculate properties 
                    # of portions of the volume time series.
                    def is_basevol(vol, lowerbound, upperbound):
                        # returns True or False whether the part of the series within lowerbound and upperbound
                        # is a period of dry weather
                        return (max(vol[lowerbound:upperbound]) - min(vol[lowerbound:upperbound])) < abs_basevol_signal

                    def is_increasing(vol, lowerbound, upperbound):
                        # returns True or False whether the part of the series within lowerbound and upperbound is increasing 
                        return vol[lowerbound] < vol[upperbound]

                    def is_off_basevol(basevol, correct_basevol):
                        # returns True or False whether the amount in basevol is significantly different from the correct_basevol.
                        # this uses the correction_threshold variable, which can be set by the user
                        return abs(basevol - correct_basevol) > correction_threshold * vol_range

                    def get_basevol(vol, lowerbound, upperbound):
                        # calculates the average volume of the timeseries between lowerbound and upperbound
                        return sum(vol[lowerbound:upperbound]) / float((upperbound - lowerbound))

                    def is_negative(vol, correct_basevol, lowerbound, upperbound):
                        # returns True or False whether the volume between lowerbound and upperbound is 
                        # significantly negative, ie, below the correct basevolume

                        # margin for error
                        dev_thresh = correction_threshold * vol_range
                        # extract the volume from the period in question
                        vol_window = vol[lowerbound:upperbound]
                        # calculate the percentage of the timesteps where the volume is below the correct_basevol 
                        # by an amount exceeding the margin of error.
                        pct = sum(correct_basevol - v > dev_thresh for v in vol_window) / float(len(vol_window))

                        # return True if the percentage of significantly negative timesteps exceeds the maximum percentage
                        # determined by negative_threshold_pct. This variable can be set by the user.
                        return pct > negative_threshold_pct

                    # max_window_index is the timestep that signals to stop moving the locator window forward.
                    max_window_index = len(vol) - window_size
                    # number of timesteps for the length of the secondary window (called micro window in what follows)
                    micro_window_size = int(window_size * micro_window_pct)

                    target_swmm = -10
                    while i < max_window_index:
                        # before inspection, assume no correction necessary
                        needs_correction = False

                        # if the timestep is after the event_start_idx (defaults to 0, but otherwise control by the user, 
                        # see description of event start percentage parameter), continue without doing anything, otherwise 
                        # inspect to see if corection is necessary.
                        if i > event_start_idx:
                            # define the upperbound of the moving window for this timestep
                            upperbound = i + window_size
                            # calculate the average volume in this window
                            basevol = get_basevol(vol, i, upperbound)
                            # if the volume with in this window passes the negative volume test, proceed with correction
                            if is_negative(vol, correct_basevol, i, upperbound):
                                needs_correction = True
                                # define the start and end of the secondary window that will move either forwards or 
                                # backwards to locate the end of the negative volume period.
                                micro_start = upperbound - micro_window_size
                                micro_end = upperbound
                                # if the volume in secondary window positioned at the end of the main moving window still 
                                # passes the is_negative volume test, then we have to push the secondary window forward until 
                                # the test no longer passes, indicating we've found the end of the is_negative volume period.
                                # otherwise, if the volume in the secondary window at the end of the main window fails the 
                                # is_negative test, then we are already outside the negative period and need to push the secondary 
                                # window inward to find where negative period ends. 
                                if is_negative(vol, correct_basevol, micro_start, micro_end):
                                    while micro_end < max_window_index and is_negative(vol, correct_basevol, micro_start, micro_end):
                                        micro_start += 1
                                        micro_end += 1
                                else:
                                    # advance the secondary window backwards until negative period is found
                                    while micro_start > i and not is_negative(vol, correct_basevol, micro_start, micro_end):
                                        micro_start = micro_start - 1
                                        micro_end = micro_end - 1

                                # after looking the end of the negative volume period, 
                                # if the secondary window is still before the end of the time series ...
                                if micro_end < max_window_index:
                                    # set the starting point of the correction to the current timestep
                                    correction_start = i
                                    # the end of the correction will be the end point of the secondary window, that is,
                                    # the approximate point just before the negative period ends
                                    correction_end = micro_end
                                    # extract the volume for this period and find the index of the minimum value
                                    vol_block = vol[correction_start:correction_end]
                                    min_vol = min(vol_block)
                                    min_idx = vol_block.index(min(vol_block))
                                    # initialize two zero vectors. These will be joined eventually 
                                    # to create the full correction vector for this negative period.
                                    # lower_correction_spread will have the corrections from the beginning
                                    # of the negative period to the lowest point. upper_correction_spread 
                                    # will have the correction values from the step after the lowest point 
                                    # to the end of the negative period.
                                    lower_correction_spread = [0 for _ in range(min_idx)]
                                    upper_correction_spread = [0 for _ in range(min_idx, len(vol_block))]
                                    # loop over the volume amounts in this period before the minimum value ...
                                    for j, v in enumerate(vol_block[:min_idx]):
                                        # if the amount is less than the correct basevol, calculate the difference between 
                                        # the two. if the magnitude of the difference is smaller than the sum of the 
                                        # values in the lower_correction_spread, then skip over it---this correction is covered
                                        # by the earlier corrections. If the magnitude is larger,
                                        # add the difference between the magnitude and the sum of the corrections 
                                        # already in the lower_correction_spread to lower correction spread at the index 
                                        # correctioning to the current index in the vol_block
                                        if v < correct_basevol:
                                            diff = correct_basevol - v
                                            total_correction = sum(lower_correction_spread)
                                            remainder = diff - total_correction
                                            if remainder > 0:
                                                lower_correction_spread[j] = remainder

                                    # for those values after the minimum value, we want to undo the 
                                    # effects of the correction and bring them down to base volume, 
                                    # preserving the rest of the volume curve as it was before the correction.
                                    # The result of this approach is a flat line as base volume during the negative
                                    # period.
                                    for j, v in enumerate(vol_block[min_idx:]):
                                        if v < correct_basevol:
                                            diff = v - min_vol
                                            total_correction = abs(sum(upper_correction_spread))
                                            remainder = diff - total_correction
                                            if remainder > 0:
                                                upper_correction_spread[j] = -remainder

                                    # combine the two spreads to get the resulting correction_vector
                                    correction_spread = lower_correction_spread + upper_correction_spread
                                else:
                                    # if it was necessary to move the secondary window forward to find the end 
                                    # of the negative period, it's possible that the secondary window will 
                                    # reach the end of the volume time series before finding it (that is, the negative
                                    # period is the whole tail of the event).
                                    # if the negative period is at the of the volume time series, we take different 
                                    # steps to correct the volume. These steps preserve patterns in the tail,
                                    # rather than flat lining it, as it would be the case if the 
                                    # negative period was encountered before the end of the event.
                                    basevol = get_basevol(vol, micro_start, len(vol))
                                    correction_total = correct_basevol - basevol
                                    correction_start = i - 2 * window_size
                                    correction_end = i
                                    correction_window_size = correction_end - correction_start
                                    avg_correction = correction_total / correction_window_size
                                    correction_end = correction_start + correction_window_size
                                    correction_spread = [avg_correction for j in range(correction_window_size)]

                                i = correction_end
                            elif is_basevol(vol, i, upperbound) and is_off_basevol(basevol, correct_basevol):
                                # if this is a period of base vol and it is significantly off of the 
                                # expected base vol ...

                                # starting at the end of the current window, advance the secondary window until the volume
                                # series within the window is increasing and no longer in dry weather
                                min_vol = get_basevol(vol, i, upperbound)
                                micro_start = i
                                while micro_start < max_window_index:
                                    micro_end = micro_start + micro_window_size
                                    this_vol = get_basevol(vol, micro_start, micro_end)
                                    if min_vol > this_vol:
                                        min_vol = this_vol

                                    if not is_basevol(vol, micro_start, micro_end) and is_increasing(vol, micro_start, micro_end):
                                        moving_end = micro_end
                                        # now that the general end of the dry weather period has been found, 
                                        # gradually move the secondary window inward until it encounters dry 
                                        # weather conditions again.
                                        while True:
                                            while moving_end > micro_start and not is_basevol(vol, micro_start, moving_end):
                                                moving_end = moving_end - 1
                                            if moving_end == micro_start:
                                                micro_start = micro_start - micro_window_size + 1
                                                micro_end = moving_end
                                            elif moving_end == micro_end:
                                                break
                                            else:
                                                micro_start = moving_end - micro_window_size + 1
                                                micro_end = moving_end


                                        # calculate the average volume at the end of the dry weather period
                                        #basevol = get_basevol(vol, micro_start, micro_end)

                                        # does the volume need correcting?
                                        needs_correction = is_off_basevol(min_vol, correct_basevol)
                                        correction_total = correct_basevol - basevol

                                        basevol_range = micro_end - i
                                        # start the correction before the current timestep by half the distance
                                        # between the current timestep and the end of the dry weather period.
                                        correction_start = int(max(0, i - (.5 * basevol_range)))
                                        correction_end = min(len(vol)-1, i + (basevol_range - (i - correction_start)))
                                        
                                        i += micro_window_size

                                        break
                                    else:
                                        micro_start += 1

                                # if the secondary window reached the end of the series in the search for the end of the 
                                # dry weather period, that means the dry weather conditions belong to the full tail of the event.
                                # look at the volume at the end of the series to determine
                                # whether correction is necessary.
                                if micro_start >= max_window_index:
                                    basevol = get_basevol(vol, micro_start, len(vol))
                                    needs_correction = is_off_basevol(basevol, correct_basevol)
                                    correction_total = correct_basevol - basevol

                                    correction_start = i - 4 * window_size
                                    correction_end = max_window_index


                                    i += micro_window_size

                                # if correction is necessary, create the correction spread vector,
                                # which will be added to the primary correction vector at the corresponding timesteps
                                if needs_correction:
                                    correction_window_size = correction_end - correction_start
                                    avg_correction = correction_total / correction_window_size
                                    correction_end = correction_start + correction_window_size
                                    correction_spread = [avg_correction for j in range(correction_window_size)]


                        if needs_correction:
                            correction_total = sum(correction_spread)
                            existing_correction = correction[correction_start:correction_end]
                            correction_vals = [sum(tup) for tup in zip(correction_spread, existing_correction)]
                            correction[correction_start:correction_end] = correction_vals 
                            vol[correction_start:] = [v + correction_total for v in vol[correction_start:]]
                            i += int(window_increment_percent * window_size) if window_increment_percent else 1
                            #i += 1
                        else:
                            #i += 1
                            i += int(window_increment_percent * window_size) if window_increment_percent else 1

                    final_window_start = len(vol) - window_size
                    final_window_end = len(vol) - 1
                    final_basevol = get_basevol(vol, final_window_start, final_window_end)
                    if is_off_basevol(final_basevol, correct_basevol):
                        correction_start = int(len(vol) * .75)
                        correction_end = len(vol)
                        correction_window_size = correction_end - correction_start
                        correction_total = correct_basevol - final_basevol
                        avg_correction = correction_total / correction_window_size
                        correction_spread = [avg_correction for j in range(correction_window_size)]
                        correction_end = correction_start + correction_window_size
                        existing_correction = correction[correction_start:correction_end]
                        correction_vals = [sum(tup) for tup in zip(correction_spread, existing_correction)]
                        correction[correction_start:correction_end] = correction_vals

                    # get the name of the wasp seg
                    cur.execute("""
                        SELECT DISTINCT wasp
                        FROM segs 
                        WHERE swmm = ?
                    """, (swmm, ))
                    wasp = cur.fetchone()[0]

                    # turn the correction volume into flows and add to flows table
                    balance_bucket = [c/timestep_secs for c in correction]
                    update_tups = []
                    update_tups = [[i, 0, swmm, 0, wasp, str(swmm)+'balbucket', qin] for i, qin in enumerate(balance_bucket)]

                    # update the flows table with the correction inlet 
                    cur.executemany("""
                        INSERT INTO flows (timestep, swmm_source, swmm_sink, wasp_source, wasp_sink, name, flow) 
                        VALUES (?, ?, ?, ?, ?, ?, ?)
                    """, update_tups)


                # recalculate swmm volumes with the new dummy inlets
                calc_swmm_vols()
            elif correct_vol:
                # begin correction procedure
                print('correcting')

                # get all of the swmm segment numbers
                cur.execute("SELECT DISTINCT swmm FROM volumes2_b WHERE swmm IS NOT NULL")
                swmm_segs = [row[0] for row in cur.fetchall()]

                series_dir = os.path.join(os.path.dirname(outpath), 'correction_series')
                if not os.path.isdir(series_dir):
                    os.mkdir(series_dir)
                # for each swmm segment, correct it's volume
                for swmm in swmm_segs:

                    correction_series = []
                    correction_series_names = []

                    cur.execute("""
                        SELECT vol
                        FROM volumes2_b
                        WHERE swmm = ? 
                        ORDER BY timestep
                    """, (swmm, ))
                    vol = [row[0] for row in cur.fetchall()]

                    k = copy.copy(vol)
                    correction_series.append(k)
                    correction_series_names.append('vol')

                    cur.execute("""
                        SELECT flow, velocity 
                        FROM volumes2_a
                        WHERE swmm = ?
                        ORDER BY timestep
                    """, (swmm,))

                    flow, velocity = zip(*cur.fetchall())
                    correction_series.append(flow)
                    correction_series_names.append('flow')
                    correction_series.append(velocity)
                    correction_series_names.append('velocity')

                    cur.execute("""
                        SELECT DISTINCT length
                        FROM conduit_params2 WHERE swmm = ?
                    """, (swmm,))
                    swmm_length = cur.fetchone()[0]
                    correction_series.append([swmm_length for _ in range(len(vol))])
                    correction_series_names.append('length')

                    try:
                        qv_series = [(f / v) * swmm_length for f, v in zip(flow, velocity)]
                    except:
                        traceback.print_exc()
                        code.interact(local=locals())

                    correction_series.append(qv_series)
                    correction_series_names.append('qv_series')

                    # determine the correct base volume for the segment by looking 
                    # at the average of the volume at the beginning of the series.
                    # use the rolling window size as an arbitrary cut off point 
                    # to end the beginning average
                    correct_basevol = sum(vol[:window_size]) / window_size

                    # calculatute the range of the the volume timeseries, from it's base vol to its peak
                    vol_range = max(vol) - correct_basevol

                    # all volume before the event_start index is not corrected and gets included
                    # in the calculation that determines the correct base volume
                    event_start_idx = 0

                    # if user sets event_start_percentage, find the index of the first timestep 
                    # where the volume exceeds this percentage of the vol_range
                    if event_start_percentage:
                        for i, v in enumerate(vol):
                            if v > ((vol_range * event_start_percentage) + correct_basevol):
                                event_start_idx = i
                                break

                    # if an event_start_idx other than 0 is specified
                    # calculate the correct base volume as the average volume from the beginning of the 
                    # series to the start index.
                    if event_start_idx:
                        start_range = event_start_idx - window_size
                        if start_range <= 0:
                            start_range = int(event_start_idx * .5)
                        correct_basevol = sum(vol[:start_range]) / len(range(start_range))

                    correction_series.append([correct_basevol for _ in range(len(vol))])
                    correction_series_names.append('constant_basevol')

                    # initialize a vector of 0s the same length as the volume time series.
                    # this vector will hold the final volume correction required at each timestep
                    correction = [0 for i in range(len(vol))]

                    # i is the timestep. start at time step 0
                    i = 0

                    # the following functions are utility functions used to check and calculate properties 
                    # of portions of the volume time series.
                    def is_basevol(vol, lowerbound, upperbound):
                        # returns True or False whether the part of the series within lowerbound and upperbound
                        # is a period of dry weather
                        return (max(vol[lowerbound:upperbound]) - min(vol[lowerbound:upperbound])) < abs_basevol_signal

                    def is_increasing(vol, lowerbound, upperbound):
                        # returns True or False whether the part of the series within lowerbound and upperbound is increasing 
                        return vol[lowerbound] < vol[upperbound]

                    def is_off_basevol(basevol, correct_basevol):
                        # returns True or False whether the amount in basevol is significantly different from the correct_basevol.
                        # this uses the correction_threshold variable, which can be set by the user
                        return abs(basevol - correct_basevol) > correction_threshold * vol_range

                    def get_basevol(vol, lowerbound, upperbound):
                        # calculates the average volume of the timeseries between lowerbound and upperbound
                        return sum(vol[lowerbound:upperbound]) / float((upperbound - lowerbound))

                    def is_negative(vol, correct_basevol, lowerbound, upperbound):
                        # returns True or False whether the volume between lowerbound and upperbound is 
                        # significantly negative, ie, below the correct basevolume

                        # margin for error
                        dev_thresh = correction_threshold * vol_range
                        # extract the volume from the period in question
                        vol_window = vol[lowerbound:upperbound]
                        # calculate the percentage of the timesteps where the volume is below the correct_basevol 
                        # by an amount exceeding the margin of error.
                        pct = sum(correct_basevol - v > dev_thresh for v in vol_window) / float(len(vol_window))

                        # return True if the percentage of significantly negative timesteps exceeds the maximum percentage
                        # determined by negative_threshold_pct. This variable can be set by the user.
                        return pct > negative_threshold_pct

                    # max_window_index is the timestep that signals to stop moving the locator window forward.
                    max_window_index = len(vol) - window_size
                    # number of timesteps for the length of the secondary window (called micro window in what follows)
                    micro_window_size = int(window_size * micro_window_pct)

                    target_swmm = -10
                    while i < max_window_index:
                        # before inspection, assume no correction necessary
                        needs_correction = False

                        # if the timestep is after the event_start_idx (defaults to 0, but otherwise control by the user, 
                        # see description of event start percentage parameter), continue without doing anything, otherwise 
                        # inspect to see if corection is necessary.
                        if i > event_start_idx:
                            # define the upperbound of the moving window for this timestep
                            upperbound = i + window_size
                            # calculate the average volume in this window
                            basevol = get_basevol(vol, i, upperbound)
                            # if the volume with in this window passes the negative volume test, proceed with correction
                            ##if is_negative(vol, correct_basevol, i, upperbound):
                            qv_basevol = get_basevol(qv_series, i, upperbound)
                            if is_negative(vol, qv_basevol, i, upperbound):
                                needs_correction = True
                                # define the start and end of the secondary window that will move either forwards or 
                                # backwards to locate the end of the negative volume period.
                                micro_start = upperbound - micro_window_size
                                micro_end = upperbound
                                # if the volume in secondary window positioned at the end of the main moving window still 
                                # passes the is_negative volume test, then we have to push the secondary window forward until 
                                # the test no longer passes, indicating we've found the end of the is_negative volume period.
                                # otherwise, if the volume in the secondary window at the end of the main window fails the 
                                # is_negative test, then we are already outside the negative period and need to push the secondary 
                                # window inward to find where negative period ends. 
                                #if is_negative(vol, correct_basevol, micro_start, micro_end):
                                micro_qv_basevol = qv_basevol
                                if is_negative(vol, qv_basevol, micro_start, micro_end):
                                    while micro_end < max_window_index and is_negative(vol, micro_qv_basevol, micro_start, micro_end):
                                        micro_start += 1
                                        micro_end += 1
                                        micro_qv_basevol = get_basevol(qv_series, micro_start, micro_end)
                                else:
                                    # advance the secondary window backwards until negative period is found
                                    while micro_start > i and not is_negative(vol, micro_qv_basevol, micro_start, micro_end):
                                        micro_start = micro_start - 1
                                        micro_end = micro_end - 1
                                        micro_qv_basevol = get_basevol(qv_series, micro_start, micro_end)

                                # after looking the end of the negative volume period, 
                                # if the secondary window is still before the end of the time series ...
                                if micro_end < max_window_index:
                                    # set the starting point of the correction to the current timestep
                                    correction_start = i
                                    # the end of the correction will be the end point of the secondary window, that is,
                                    # the approximate point just before the negative period ends
                                    correction_end = micro_end
                                    # extract the volume for this period and find the index of the minimum value
                                    vol_block = vol[correction_start:correction_end]
                                    qv_block = qv_series[correction_start:correction_end]

                                    min_vol = min(vol_block)
                                    min_idx = vol_block.index(min(vol_block))
                                    # initialize two zero vectors. These will be joined eventually 
                                    # to create the full correction vector for this negative period.
                                    # lower_correction_spread will have the corrections from the beginning
                                    # of the negative period to the lowest point. upper_correction_spread 
                                    # will have the correction values from the step after the lowest point 
                                    # to the end of the negative period.
                                    lower_correction_spread = [0 for _ in range(min_idx)]
                                    upper_correction_spread = [0 for _ in range(min_idx, len(vol_block))]
                                    # loop over the volume amounts in this period before the minimum value ...
                                    for j, qv_v in enumerate(zip(qv_block[:min_idx], vol_block[:min_idx])):
                                        # if the amount is less than the correct basevol, calculate the difference between 
                                        # the two. if the magnitude of the difference is smaller than the sum of the 
                                        # values in the lower_correction_spread, then skip over it---this correction is covered
                                        # by the earlier corrections. If the magnitude is larger,
                                        # add the difference between the magnitude and the sum of the corrections 
                                        # already in the lower_correction_spread to lower correction spread at the index 
                                        # correctioning to the current index in the vol_block
                                        qv, v = qv_v
                                        if v < qv:
                                            diff = qv - v
                                            total_correction = sum(lower_correction_spread)
                                            remainder = diff - total_correction
                                            if remainder > 0:
                                                lower_correction_spread[j] = remainder

                                    # for those values after the minimum value, we want to undo the 
                                    # effects of the correction and bring them down to base volume, 
                                    # preserving the rest of the volume curve as it was before the correction.
                                    # The result of this approach is a flat line as base volume during the negative
                                    # period.
                                    for j, qv_v in enumerate(zip(qv_block[min_idx:], vol_block[min_idx:])):
                                        qv, v = qv_v
                                        if v < qv:
                                            diff = v - min_vol
                                            total_correction = abs(sum(upper_correction_spread))
                                            remainder = diff - total_correction
                                            if remainder > 0:
                                                upper_correction_spread[j] = -remainder

                                    # combine the two spreads to get the resulting correction_vector
                                    correction_spread = lower_correction_spread + upper_correction_spread
                                else:
                                    # if it was necessary to move the secondary window forward to find the end 
                                    # of the negative period, it's possible that the secondary window will 
                                    # reach the end of the volume time series before finding it (that is, the negative
                                    # period is the whole tail of the event).
                                    # if the negative period is at the of the volume time series, we take different 
                                    # steps to correct the volume. These steps preserve patterns in the tail,
                                    # rather than flat lining it, as it would be the case if the 
                                    # negative period was encountered before the end of the event.
                                    basevol = get_basevol(vol, micro_start, len(vol))
                                    correct_basevol = get_basevol(qv_series, micro_start, len(vol))
                                    correction_total = correct_basevol - basevol
                                    correction_start = i - 2 * window_size
                                    correction_end = i
                                    correction_window_size = correction_end - correction_start
                                    avg_correction = correction_total / correction_window_size
                                    correction_end = correction_start + correction_window_size
                                    correction_spread = [avg_correction for j in range(correction_window_size)]

                                i = correction_end
                            elif is_basevol(vol, i, upperbound) and is_off_basevol(basevol, qv_basevol):
                            #elif is_basevol(vol, i, upperbound) and is_off_basevol(basevol, correct_basevol):
                                # if this is a period of base vol and it is significantly off of the 
                                # expected base vol ...

                                # starting at the end of the current window, advance the secondary window until the volume
                                # series within the window is increasing and no longer in dry weather
                                min_vol = get_basevol(vol, i, upperbound)
                                micro_start = i
                                while micro_start < max_window_index:
                                    micro_end = micro_start + micro_window_size
                                    this_vol = get_basevol(vol, micro_start, micro_end)
                                    if min_vol > this_vol:
                                        min_vol = this_vol

                                    if not is_basevol(vol, micro_start, micro_end) and is_increasing(vol, micro_start, micro_end):
                                        moving_end = micro_end
                                        # now that the general end of the dry weather period has been found, 
                                        # gradually move the secondary window inward until it encounters dry 
                                        # weather conditions again.
                                        while True:
                                            while moving_end > micro_start and not is_basevol(vol, micro_start, moving_end):
                                                moving_end = moving_end - 1
                                            if moving_end == micro_start:
                                                micro_start = micro_start - micro_window_size + 1
                                                micro_end = moving_end
                                            elif moving_end == micro_end:
                                                break
                                            else:
                                                micro_start = moving_end - micro_window_size + 1
                                                micro_end = moving_end


                                        # calculate the average volume at the end of the dry weather period
                                        #basevol = get_basevol(vol, micro_start, micro_end)

                                        # does the volume need correcting?
                                        correct_basevol = get_basevol(qv_series, i, upperbound)
                                        needs_correction = is_off_basevol(min_vol, correct_basevol)
                                        correction_total = correct_basevol - basevol

                                        basevol_range = micro_end - i
                                        # start the correction before the current timestep by half the distance
                                        # between the current timestep and the end of the dry weather period.
                                        correction_start = int(max(0, i - (.5 * basevol_range)))
                                        correction_end = min(len(vol)-1, i + (basevol_range - (i - correction_start)))
                                        
                                        i += micro_window_size

                                        break
                                    else:
                                        micro_start += 1

                                # if the secondary window reached the end of the series in the search for the end of the 
                                # dry weather period, that means the dry weather conditions belong to the full tail of the event.
                                # look at the volume at the end of the series to determine
                                # whether correction is necessary.
                                if micro_start >= max_window_index:
                                    basevol = get_basevol(vol, micro_start, len(vol))
                                    correct_basevol = get_basevol(qv_series, micro_start, len(vol))
                                    needs_correction = is_off_basevol(basevol, correct_basevol)
                                    correction_total = correct_basevol - basevol

                                    correction_start = i - 4 * window_size
                                    correction_end = max_window_index


                                    i += micro_window_size

                                # if correction is necessary, create the correction spread vector,
                                # which will be added to the primary correction vector at the corresponding timesteps
                                if needs_correction:
                                    correction_window_size = correction_end - correction_start
                                    avg_correction = correction_total / correction_window_size
                                    correction_end = correction_start + correction_window_size
                                    correction_spread = [avg_correction for j in range(correction_window_size)]


                        if needs_correction:
                            correction_total = sum(correction_spread)
                            existing_correction = correction[correction_start:correction_end]
                            correction_vals = [sum(tup) for tup in zip(correction_spread, existing_correction)]
                            correction[correction_start:correction_end] = correction_vals 
                            vol[correction_start:] = [v + correction_total for v in vol[correction_start:]]
                            i += int(window_increment_percent * window_size) if window_increment_percent else 1
                            #i += 1
                        else:
                            #i += 1
                            i += int(window_increment_percent * window_size) if window_increment_percent else 1

                    final_window_start = len(vol) - window_size
                    final_window_end = len(vol) - 1
                    final_basevol = get_basevol(vol, final_window_start, final_window_end)
                    correct_basevol = get_basevol(qv_series, final_window_start, final_window_end)
                    if is_off_basevol(final_basevol, correct_basevol):
                        correction_start = int(len(vol) * .75)
                        correction_end = len(vol)
                        correction_window_size = correction_end - correction_start
                        correction_total = correct_basevol - final_basevol
                        avg_correction = correction_total / correction_window_size
                        correction_spread = [avg_correction for j in range(correction_window_size)]
                        correction_end = correction_start + correction_window_size
                        existing_correction = correction[correction_start:correction_end]
                        correction_vals = [sum(tup) for tup in zip(correction_spread, existing_correction)]
                        correction[correction_start:correction_end] = correction_vals

                    # get the name of the wasp seg
                    cur.execute("""
                        SELECT DISTINCT wasp
                        FROM segs 
                        WHERE swmm = ?
                    """, (swmm, ))
                    wasp = cur.fetchone()[0]

                    correction_series.append(correction)
                    correction_series_names.append('correction')
                    correction_series.append(vol)
                    correction_series_names.append('vol_corrected')

                    correction_series_rows = zip(*correction_series)

                    with open(os.path.join(series_dir, str(swmm) + '.csv'), 'w') as f:
                        writer = csv.writer(f, lineterminator='\n')
                        writer.writerow(correction_series_names)
                        writer.writerows(correction_series_rows)

                    # turn the correction volume into flows and add to flows table
                    balance_bucket = [c/timestep_secs for c in correction]
                    update_tups = []
                    update_tups = [[i, 0, swmm, 0, wasp, str(swmm)+'balbucket', qin] for i, qin in enumerate(balance_bucket)]

                    # update the flows table with the correction inlet 
                    cur.executemany("""
                        INSERT INTO flows (timestep, swmm_source, swmm_sink, wasp_source, wasp_sink, name, flow) 
                        VALUES (?, ?, ?, ?, ?, ?, ?)
                    """, update_tups)

                # recalculate swmm volumes with the new dummy inlets
                calc_swmm_vols()

            # volumes3 is similar to volumes2, but now depth are replaced with new columns swmm_depth and swmm_velocity. 
            # These columns will hold the volume weighted depths and velocity by swmm segment. additionally, this 
            # table also has a swmm_vol col that will hold the total volume for each swmm segment
            # populate volumes3. swmm_vol is the sum of the volumes for each parallel conduit in the swmm segment. 
            # swmm_depth and swmm_velocity are the volume weighted depth and velocity for each parallel in the swmm segment

            # these are alternative definitions  for the depth calculation
            #depth_calc1 = "(v2.flow / wasp_flows.wasp_flow) * v2.depth"
            #depth_calc2 = "(ABS(v2.flow)/ wasp_flows.wasp_flow) * v2.depth"

            # these are alternative defitions for the velocity calculation
            #velocity_calc1 = "(v2.flow / wasp_flows.wasp_flow) * v2.velocity"
            #velocity_calc2 = "(ABS(v2.flow) / wasp_flows.wasp_flow) * v2.velocity"

            # formulas for weighting component segment velocities and depths for combined swmm segments
            if flow_weight_sums:
                depth_calc = """
                    CASE WHEN wasp_flows.wasp_flow = 0
                        THEN (1 / wasp_flows.seg_count) * v2.depth
                        ELSE (ABS(v2.flow) / wasp_flows.wasp_flow) * v2.depth
                    END
                """
                velocity_calc = """
                    CASE WHEN wasp_flows.wasp_flow = 0
                        THEN (1 / wasp_flows.seg_count) * v2.velocity
                        ELSE (ABS(v2.flow) / wasp_flows.wasp_flow) * v2.depth
                    END
                """
            else:
                velocity_calc = "velocity * (1.0 / wasp_flows.seg_count)"
                depth_calc = "v2.depth * (1.0 / wasp_flows.seg_count)"

            # formula for calculating total flow, note that this is only used when when flow_weight_sums is True
            tflow_calc = "SUM(ABS(flow))"

            cur.execute("""
                INSERT INTO volumes3x  (timestep, wasp, vol, wasp_depth, wasp_velocity, flow, seg_count)
                SELECT v2.timestep, v2.wasp, v2.vol, """ + depth_calc + """, 
                    """ + velocity_calc + """, v2.flow, wasp_flows.seg_count
                FROM (SELECT """ + tflow_calc + """ AS wasp_flow, timestep, wasp, COUNT(*) AS seg_count
                    FROM volumes2_b
                    GROUP BY wasp, timestep
                ) AS wasp_flows
                INNER JOIN volumes2_b v2 ON v2.timestep = wasp_flows.timestep AND v2.wasp = wasp_flows.wasp
            """)

        elif using_swmm_volume:
            cur.execute("""
                INSERT INTO volumes3x (timestep, wasp, vol, wasp_depth, wasp_velocity)
                SELECT v4.timestep, v4.wasp, v4.volume, (v4.volume / wasp_vols.wasp_vol) * v4.depth, 
                    (v4.volume / wasp_vols.wasp_vol) * v4.velocity
                FROM (SElECT timestep, wasp, SUM(volume) AS wasp_vol
                    FROM volumes2
                    GROUP BY wasp, timestep
                ) AS wasp_vols
                INNER JOIN volumes2 v4 ON v4.timestep = wasp_vols.timestep AND v4.wasp = wasp_vols.wasp
                """)
        timing('populate volumes3x')

        # sum the quantities weighted in the previous and put the results into volumes4x
        cur.execute("""
            INSERT INTO volumes4x (timestep, wasp, vol, depth, velocity)
            SELECT timestep, wasp, SUM(vol), SUM(wasp_depth), SUM(wasp_velocity)
            FROM volumes3x
            GROUP BY wasp,timestep
        """)

        timing('populate volumes4x')

        conn.commit()

    finalize(dbpath, outpath, timestep_secs, round_num=round_num, using_swmm_volume=using_swmm_volume)

def finalize(dbpath, outpath, timestep_secs, round_num=5, using_swmm_volume=False):
    """
        this function takes the results of the process function in the db pointed to 
        by dbpath and generates the final tables corresponding to how the output will be written
        to file. It also outputs the remaining time series
    """

    with closing(sqlite3.connect(dbpath)) as conn:
        conn.isolation_level = None
        cur = conn.cursor()
        tune_db(cur)

        # aggregate segment inflows into single inflows and aggregate the flows for 
        # the finl swmm segment for each wasp segment---these are the final interface flows.
        cur.execute("CREATE TABLE flows2 (timestep int, wasp_source int, wasp_sink int, flow float)")
        cur.execute("""
            INSERT INTO flows2 (timestep, wasp_source, wasp_sink, flow)
                SELECT * 
                FROM (SELECT timestep, wasp_source, wasp_sink, SUM(flow) AS flow
                      FROM flows
                      WHERE wasp_source = 0
                      GROUP BY wasp_sink, timestep
                    UNION SELECT timestep, wasp_source, wasp_sink, SUM(flow)
                        FROM flows
                        WHERE wasp_source <> 0 AND swmm_source IN (SELECT MAX(swmm) FROM segs GROUP BY wasp)
                        GROUP BY wasp_source, wasp_sink, timestep
                )
         """)
        timing('populate flows2')

        # column names for the final table. The datatype indicates what kind of data the row is, 
        # volume/depth/velocity data (1) or flow data (2) ordering1 and 2 contain the values that will 
        # determine how the rows are finally ordered to produce the order found in .hyd files.
        final_colnames = ['datatype', 'timestep', 'wasp_source', 'wasp_sink', 'flow', 'vol', 'depth', 
            'velocity', 'ordering1', 'ordering2']
        final_datatypes = ['int', 'int', 'int', 'int', 'float', 'float', 'float', 'float', 'int', 'int', 'int']

        # put them into a comma separated list that can be used in a query string
        final_cols = ', '.join([colname + ' ' + datatype for colname, datatype in zip(final_colnames, final_datatypes)])
        final_colnames_str = ', '.join(final_colnames)
        
        # create the final tables
        cur.execute("CREATE TABLE flows_final (" + final_cols + ")")
        cur.execute("CREATE TABLE volumes_final (" + final_cols + ")")
        cur.execute("CREATE TABLE final (" + final_cols + ")")

        # populate the flows_final table. note that the columns vol, depth, and velocity 
        # are NULL. These are included so that this table can be unioned with the volumes_final table shortly. 
        # flow data is ordered first by its wasp_source number, unless it is zero, in which case its wasp_sink number 
        # is used, and second by its wasp_source, regardless of whether its zero or not. This forces the order that 
        # each segment's outflow interface will follow the external flow interface for that segment.
        cur.execute("""
            INSERT INTO flows_final (""" + final_colnames_str + """)
                SELECT 2, timestep, wasp_source, wasp_sink, flow, NULL AS vol, NULL AS depth, NULL AS velocity, 
                CASE WHEN wasp_source = 0 THEN wasp_sink ELSE wasp_source END AS ordering1, wasp_source AS ordering2
                FROM flows2
            """)

        timing('populate flows_final')

        # populate the volumes_final table. note that the wasp_source and flow columns are null, 
        # as these columns only apply to the flow data.
        cur.execute("""
            INSERT INTO volumes_final (""" + final_colnames_str + """)
                SELECT 1, timestep, NULL AS wasp_source, wasp AS wasp_sink, NULL AS flow, vol, depth, velocity, 
                       wasp AS ordering1, wasp AS ordering2
                FROM volumes4x
            """)

        timing('populate volumes_final')

        # combine the two final tables into one
        cur.execute("""
            INSERT INTO final (""" + final_colnames_str + """)
                SELECT  *
                FROM (
                    SELECT * FROM flows_final
                    UNION
                    SELECT * FROM volumes_final
                )
            """)
        cur.execute("CREATE INDEX idx_final ON final(timestep, datatype, ordering1, ordering2)")
        timing('populate final')

        export_data(outpath, dbpath, timestep_secs, final_colnames, round_num=round_num)

        timing('export hyd')

def export_data(outpath, dbpath, timestep_secs, final_colnames, round_num=5):

    with closing(sqlite3.connect(dbpath)) as cnxn:
        cnxn.isolation_level = None
        cur = cnxn.cursor()
        tune_db(cur)

        # open the hyd file outpath
        with open(outpath, 'w') as f:
            # the following lines get the numbers for the top of the hyd file.
            # count the number of volume entries for the first timestep to get the number of segs
            cur.execute('SELECT COUNT(*) FROM final WHERE timestep = 0 AND datatype = 1')
            num_segs = cur.fetchone()[0]
            # count the number of flow entires for the first timestep to get the total number of interfaces
            cur.execute('SELECT COUNT(*) FROM final WHERE timestep = 0 AND datatype = 2')
            num_interfaces = cur.fetchone()[0]

            # retrieve the maximum timestep and calculate the duration
            cur.execute('SELECT MAX(timestep) FROM final')
            duration = timestep_secs * cur.fetchone()[0]

            tab = ' '*4
            # create the top line for the hyd file
            f.write(tab.join([str(num_segs), str(num_interfaces), str(int(timestep_secs)), '0', str(int(duration)), '1']) + '\n')

            # select the flow rows for the first timestep and extract the interface numbers then write them to file
            cur.execute("""
                SELECT wasp_source, wasp_sink 
                FROM final 
                WHERE timestep = 0 AND datatype = 2 
                ORDER BY ordering1, ordering2""")
            for row in cur.fetchall():
                f.write(str(row[0]) + tab + str(row[1]) + '\n')

            # select all the data from the final type in the appropriate order ...
            cur.execute('SELECT * FROM final ORDER BY timestep, datatype, ordering1, ordering2')

            # then write it to file ...
            for row in cur.fetchall():
                row = dict(zip(final_colnames, row))
                if row['datatype'] == 1:
                    row['vol'] = round(row['vol'], round_num)
                    row['velocity'] = round(row['velocity'], round_num)
                    row['depth'] = round(row['depth'], round_num)
                    for key in row.keys():
                        row[key] = str(row[key])
                    f.write(tab.join(['', row['vol'], '0', row['depth'], row['velocity']]) + '\n')
                else:
                    row['flow'] = round(row['flow'], round_num)
                    for key in row.keys():
                        row[key] = str(row[key])
                    f.write(tab.join(['', row['flow']]) + '\n')

def load_ini(path):
    """
        this function loads the input parameters for the process function. It also generates the hydmaker.ini 
        file with the settings chosen
    """
    ini = {}

    def process_ini(path):
        # takes the path to an ini file, parses the settings, and returns them in a dictionary
        parser = ConfigParser.SafeConfigParser()
        parser.read(path)
        for key, value in parser.items('main'):
            ini[key] = eval(value, {}, {})
            if key in ('event_start', 'event_end') and ini[key]:
                ini[key] = datetime.strptime(ini[key], '%Y-%m-%d %H:%M:%S')
        return ini

    def write_cfg(ini):
        # create a parser object for the settings and write them to file.
        parser = ConfigParser.SafeConfigParser()
        parser.add_section('main')
        for key, value in ini.items():
            if key in ('event_start', 'event_end') and value:
                value = value.strftime('%Y-%m-%d %H:%M:%S')
            parser.set('main', str(key), "r'" + value + "'" if isinstance(value, str) else str(value))

        with open(path, 'w') as f:
            parser.write(f)

        with open(os.path.join(os.path.dirname(ini['outpath']), 'ini_file_used.ini'), 'w') as f:
            parser.write(f)

    # if any .ini file is found, ask user if they want to load it from file
    if os.path.exists(path):
        print("A settings file was found at " + path + ". Do you want to use the options in this file?")
        if raw_input("y/n >>> ")[0] in ('Y', 'y'):
            ini = process_ini(path)
            write_cfg(ini)
            return ini

    # error message to print to the user when they accidentally generate a keyboard interrupt exception
    key_err_msg = "Keyboard error. Try again, fat fingers\n"

    ### ask the user if they want to load their settings from an .ini file of their choosing
    while True:
        try:
            response = raw_input("Do you want to load the settings from a hydmaker.ini file? (y/n) >> ")[0]
        except KeyboardInterrupt, IndexError:
            print("Invalid input.")

        if response.lower() == 'y':
            while True:
                try:
                    ini_path = raw_input("Path to hydmaker.ini file >> ")
                except KeyboardInterrupt:
                    print('Invalid input.')

                if os.path.exists(ini_path) and not os.path.isdir(ini_path):
                    return process_ini(ini_path)
                
                print("Invalid path.")
        else:
            break

    msg = "Please note that this program assumes data coming from an .rpt or .out file are in US units: CFS, FEET, and " + \
          "feet per second."
    print(msg)

    ini['using_swmm_volume'] = False

    while True:
        try:
            using_swmm_volume = raw_input('Are you using the volume data from modified SWMM output? (y/n) >> ')
        except KeyboardInterrupt:
            print(key_err_msg)
            continue

        if using_swmm_volume.lower() == 'y':
            ini['using_swmm_volume'] = True
        elif using_swmm_volume.lower() == 'n':
            ini['using_swmm_volume'] = False

        break


    # determine if user's data is binary out file
    while True:
        try:
            use_bin_output = raw_input("Is your data in a SWMM binary output file? (y/n) >> ")[0]
        except KeyboardInterrupt:
            print(key_err_msg)
            continue

        if use_bin_output.lower() == 'y':
            use_bin_output = True
        elif use_bin_output.lower() == 'n':
            use_bin_output = False
        break

    if use_bin_output:
        # if the user has a binary out file, get the path ...
        ini['rptpath'] = None
        while True:
            try:
                binary_path = raw_input("Path to SWMM binary output file (*.out) >> ")
            except KeyboardInterrupt:
                print(key_err_msg)
                continue

            if os.path.exists(binary_path) and not os.path.isdir(binary_path):
                if os.path.splitext(binary_path)[1].lower() != '.out':
                    print("That doesn't look like binary output file.")
                else:
                    ini['binarypath'] = binary_path
                    break
            else:
                print("Invalid path.")
    else:
        # ... otherwise get the path to the rpt file
        ini['binarypath'] = None
        while True:
            try:
                rpt_path = raw_input("Path to SWMM report file (*.rpt) >> ")
            except KeyboardInterrupt:
                print(key_err_msg)
                continue
            
            if os.path.exists(rpt_path) and not os.path.isdir(rpt_path):
                if os.path.splitext(rpt_path)[1].lower() != '.rpt':
                    print("That doesn't look like an .rpt file.")
                else:
                    ini['rptpath'] = rpt_path
                    break
            else:
                print('Invalid path.')

    # get the path to the .inp file
    while True:
        try:
            inp_path = raw_input("Path to SWMM input file (*.inp) >> ")
        except KeyboardInterrupt:
            print(key_err_msg)

        if os.path.exists(inp_path) and not os.path.isdir(inp_path):
            if os.path.splitext(inp_path)[1].lower() != '.inp':
                print("That doesn't look like an .inp file.")
            else:
                ini['inppath'] = inp_path
                break
        else:
            print("Invalid path.")

    # the segmentation map paths
    segmap_paths = []
    while True:
        if segmap_paths:
            print('Segmentation file paths:')
            for i, segmap_path in enumerate(segmap_paths):
                print('  ' + str(i + 1) + '.  ' + os.path.basename(segmap_path))

        try:
            segmap_path = raw_input("Add a path to a segmentation file (press enter to continue) >> ")
        except KeyboardInterrupt:
            print(key_err_msg)

        if os.path.exists(segmap_path) and not os.path.isdir(segmap_path):
            if segmap_path in segmap_paths:
                print("You already entered that one.")
            else:
                segmap_paths.append(segmap_path)
        else:
            if not segmap_path and len(segmap_paths) > 0:
                ini['segmap_paths'] = segmap_paths
                break
            print("No such file.")

    # get the output directory path
    while True:
        try:
            output_dir = raw_input('Path to output directory >> ')
        except KeyboardInterrupt:
            print(key_err_msg)

        if os.path.isdir(output_dir):
            break

        print("Invalid directory.")

    # get the name for the resulting hyd text file
    while True:
        try:
            out_name = raw_input('File name for .hyd text file output >> ')
        except KeyboardInterrupt:
            print(key_err_msg)

        if out_name:
            out_path = os.path.join(output_dir, out_name)
        
            if os.path.exists(out_path):
                try:
                    response = raw_input('A file with that name already exists. Do you want to overwrite it? (y/n) >> ')[0]
                except KeyboardInterrupt, IndexError:
                    print(key_err_msg)

                if response.lower() == 'y':
                    ini['outpath'] = out_path
                    break
            else:
                ini['outpath'] = out_path
                break
    
    # get input regarding the desired numerical precision
    while True:
        try:
            round_num = raw_input("Number of digits after the decimal place? >> ")
        except KeyboardInterrupt:
            print(key_err_msg)
            continue

        try:
            assert int(round_num) == float(round_num)
        except:
            print("Invalid input.")
        else:
            ini['round_num'] = int(round_num)
            break

        
    if not ini['using_swmm_volume']:
        # determine if the user wants to using a moving average filter
        while True:
            try:
                msg = 'Do you want to pass flow, depth, and velocity data through a moving average filter? (y/n) >> '
                filter_data = raw_input(msg)[0]
            except KeyboardInterrupt, IndexError:
                print(key_err_msg)
                continue

            filter_data = filter_data.lower() == 'y'

            break

        # if the user wants a moving average filter, determine the desired window size
        if filter_data:
            while True:
                try:
                    filter_mins = raw_input('Moving average window size in minutes >> ')
                except KeyboardInterrupt:
                    print(key_err_msg)

                try:
                    ini['filter_mins'] = int(filter_mins)
                except:
                    print('Invalid input.')
                    continue
                else:
                    break
        else:
            ini['filter_mins'] = None
    else:
        ini['filter_mins'] = None

    # correction_params is a dictionary of the default correction parameters.
    # the following prompts allow the user to change these parameters, if they desire the correction 
    # routine to run on their data

    class CorrectionParameter(object):
        def __init__(self, variable, label, value, msg, prompt, conversion, validator=None):
            self.variable = variable
            self.label = label
            self.value = value
            self.msg = msg
            self.prompt = prompt 
            self.conversion = conversion
            self.validator = validator

    correction_params = [
        CorrectionParameter(variable='window_size',
                            label='Moving window size for for location of dry weather periods',
                            value=400, 
                            msg=("The procedure locates periods of dry weather that "
                            "may require correction by moving a fixed window of "
                            "specified size over the volume timeseries for each "
                            "segment. The window size is measured in number of "
                            "timesteps. The default is 400. The moving window "
                            "size also affects how the procedure determines the "
                            "expected base volume. When the use doesn't specify "
                            "an event start percentage (parameter 5 below), the "
                            "expected base volume is the average of the volume "
                            "within the locator window at the start of the "
                            "event. If the user does supply an event start "
                            "percentage, the base volume is the average of the "
                            "values from the start of the series to the "
                            "timestep half way between the start of the series "
                            "and the timestep at which the volume first exceeds "
                            "the amount of volume determined by the event start "
                            "percentage."),
                            prompt="window size",
                            conversion=int,
                            validator=lambda x: x > 1),
        CorrectionParameter(variable='abs_basevol_signal',
                            label='Dry-weather-test threshold',
                            value=5,
                            msg=("For each block of the volume timeseries examined "
                            "by the dry-weather-locator window, the procedure "
                            "performs a test to decide whether the current "
                            "block is part of a period of dry weather, in which "
                            "case it must be checked further to see if the "
                            "volume matches expectations. The decision as to "
                            "whether the block is in a period of dry weather is "
                            "decided by comparing the absolute difference "
                            "between the minimum and maximum values within the "
                            "block against a specified threshold (the dry-weather-test  "
                            "threshold). When this "
                            "difference is below the threshold, the block is a "
                            "dry weather period and gets treated further to "
                            "determine if it has the expected volume or needs "
                            "to be corrected. The default value for the "
                            "dry-weather-test threshold is 5 m3. "),
                            prompt='dry weather threshold',
                            conversion=float,
                            validator=lambda x: x > 0),
        CorrectionParameter(variable='correction_threshold',
                            label='Expected maximum base volume deviation percentage',
                            value=.0005,
                            msg=("This parameter sets the minimum allowable absolute "
                            "difference between the average volume in a dry "
                            "weather period and the expected base volume, i.e. "
                            "it determines when dry weather periods should be "
                            "corrected. It's entered as a percentage of the "
                            "total event volume range, where event volume range "
                            "is taken to be the difference between peak and "
                            "base volume. The default percentage is .0005. "),
                            prompt='maximum base volume deviation percentage',
                            conversion=float,
                            validator=lambda x: x >= 0 and x <= 1),
        CorrectionParameter(variable='negative_threshold_pct',
                            label='Negative volume timestep threshold percent',
                            value=.05,
                            msg=("This parameter determines what percentage of the "
                            "timesteps within the current dry weather locator "
                            "period window are permitted to be below the "
                            "expected base volume before a correction is "
                            "triggered.  For instance, if the parameter is set "
                            "to .05 (the default), when more than 5% of the "
                            "timesteps within the window are negative, then "
                            "Hydmaker will correct that portion of the "
                            "timeseries. "),
                            prompt='negative volume percent',
                            conversion=float,
                            validator=lambda x: x <= 1 and x >= 0),
        CorrectionParameter(variable='event_start_percentage',
                            label='Event start percentage',
                            value=None,
                            msg=("This parameter controls when the procedure begins "
                            "looking for periods that need correction. By "
                            "default, this parameter is set to None, and the "
                            "procedure begins looking for correction spots "
                            "right away. Alternatively, you can set this to "
                            "some percentage and the procedure will only begin "
                            "looking for periods to correct after it's observed "
                            "fluctuations in the volume that exceed this "
                            "percentage multiplied by the volume range of the "
                            "timeseries. You can use this parameter to avoid "
                            "undesired corrections at the beginning of events. "),
                            prompt='event start percent',
                            conversion=float,
                            validator=lambda x: x >= 0 and x <= 1),
        CorrectionParameter(variable='window_increment_percent',
                            label='Locator window increment percent',
                            value=.25,
                            msg=("This parameter controls how a far forward the "
                            "locator window moves at each step. The default is "
                            ".25, meaning the window advances one quarter of "
                            "its size at each step. You can increase this "
                            "percentage to improve the run time of the "
                            "correction procedure, although doing so will mean "
                            "the series is examined less closely. If you set "
                            "the percentage to 0, the window will advance one "
                            "timestep at a time."),
                            prompt='window increment percent',
                            conversion=float,
                            validator=lambda x: x >= 0 <= 1)
        ]

    # by default, assume that the user does not want to correct volume
    ini['correct_vol'] = False
    if not ini['using_swmm_volume']:
        # determine if user wants to correct the volume
        while True:
            try:
                msg = "Do you want to correct segments when their volumes stabilize at levels above or below their " \
                      + "known baselines? The process will add dummy inflows to segments that balance the volume " \
                      + "as needed. (y/n) >> "
                correct_vol = raw_input(msg)[0]
            except KeyboardInterrupt, IndexError:
                print(key_err_msg)

            if correct_vol.lower() == 'y':
                ini['correct_vol'] = True
            elif correct_vol.lower() == 'n':
                ini['correct_vol'] = False
            else:
                print('Invalid input.')
                continue

            break

        if ini['correct_vol']:
            while True:
                msg = "There are several parameters that you can change to control the behavior of the volume correction " \
                    + "procedure. Would you like to change them from their default values?"
                print(msg)
                try:
                    reject_defaults = raw_input("Change default volume correction parameters? (y/n) >> ")[0]
                    reject_defaults = bool(re.match('y', reject_defaults, re.IGNORECASE))
                except:
                    print("Invalid input.")
                else:
                    break

            if reject_defaults:
                while True:
                    msg = "There are " + str(len(correction_params)) + " parameters that you can set to change the behavior " + \
                        "of the volume correction procedure."
                    print(msg)
                    menu = [str(i) + ' - ' + param.label + ' (current value: ' + str(param.value) + ')' 
                            for i, param in enumerate(correction_params)]
                    print('\n'.join(menu))
                    print("Enter the parameter number you want to change. Leave blank and press enter to continue.")
                    choice = raw_input(">> ")

                    if choice == '':
                        break
                    else:
                        try:
                            choice = int(choice)
                            assert choice >= 0 and choice < len(correction_params)
                        except:
                            print("Invalid choice.")
                        else:
                            while True:
                                param = correction_params[choice]
                                print("Volume correction parameter " + str(choice) + ": " + param.label)

                                print("Description: " + param.msg)
                                print("Current value: " + str(param.value))
                                value = raw_input('Enter new value >> ')

                                try:
                                    value = param.conversion(value)
                                    if param.validator:
                                        assert param.validator(value)
                                except:
                                    print("Invalid value.")
                                else:
                                    print("Correction parameter value set.")
                                    correction_params[choice].value = value
                                    break
            
    correction_param_args = {param.variable : param.value for param in correction_params}
    correction_param_args['micro_window_pct'] = .25

    #if ini['correct_vol']:
    #    resp = raw_input("Use simple correction (y/n) >> ")
    #    correction_param_args['use_simple_correction'] = bool(re.match('y', resp))

    ini = dict(ini.items() + correction_param_args.items())
    # determine if the user wants to include a final dummy segment. This option should be used if the 
    # final segment in the user's segment map has inflows. The final segment of a wasp model cannot have inflows.
    while True:
        try:
            msg = "WASP requires that the final segment have no external interface flows. " \
                  + "If the inlet nodes of any of the CONDUITs in your final WASP segment have " \
                  + "non-zero flow in the [DWF] section of the *.inp file or if there are any INFLOWs " \
                  + "in the final WASP segment, you must choose to create a dummy segment at the end " \
                  + "to meet WASP's requirements."
            print(msg)
            dummy_end = raw_input('Include final dummy segment? (y/n) >> ')
            if re.match('y|n', dummy_end, re.IGNORECASE):
                ini['dummy_end'] = bool(re.match('y', dummy_end, re.IGNORECASE))
                break
        except KeyboardInterrupt:
            print(key_err_msg)
            pass

    # prompt the user for a desired start and end time, if they don't want to use the start and end time of 
    # the rpt or binary file
    ini['event_start'] = None
    ini['event_end'] = None
    while True:
        try:
            set_event_limits = raw_input("Set event start and end dates? >> ")
            if re.match('n', set_event_limits, re.IGNORECASE):
                break
            elif re.match('y', set_event_limits, re.IGNORECASE):
                while True:
                    event_start = raw_input("event start (yyyy-mm-dd HH:MM:SS >> ")
                    try:
                        event_start = datetime.strptime(event_start, "%Y-%m-%d %H:%M:%S")
                    except:
                        continue
                    else:
                        ini['event_start'] = event_start
                        break

                while True:
                    event_end = raw_input("event end (yyyy-mm-dd HH:MM:SS) >> ")

                    try:
                        event_end = datetime.strptime(event_end, "%Y-%m-%d %H:%M:%S")
                    except:
                        continue
                    else:
                        ini['event_end'] = event_end
                        break
                break

        except KeyboardInterrupt:
            print(key_err_msg)

    ini['use_inp_inflows'] = False
    while True:
        try:
            msg = 'If the .inp file contains any [INFLOWS] timeseries for any of the inlets in system, ' \
                  + 'should these series be used as the dry weather flow for the conduits that reference them?'
            print(msg)
            resp = raw_input('(y/n) >> ')
            if re.match('y', resp[0], re.IGNORECASE):
                ini['use_inp_inflows'] = True
            else:
                ini['use_inp_inflows'] = False
            break
        except KeyboardInterrupt:
            print(key_err_msg)

    if not ini['using_swmm_volume']:
        # the following prompt allows the user to change the method used to calculate the depth and velocity
        # for combined swmm and wasp segments.
        while True:
            try:
                msg = "By default, the velocity and depth for combined parallel and adjacent segments will " \
                      + "calculated as the arithmetic average of the velocities and depths of the constituent segments. " \
                      + "Alternatively, you can choose to use a flow-weighted average instead. If you choose to use flow  " \
                      + "weighted averages, the standard arithmetic average will be used when the flow is 0."
                print(msg)
                resp = raw_input("Do you want to use flow-weighted sums instead of averages? (y/n) >> ")[0]
                if re.match('y', resp[0], re.IGNORECASE):
                    ini['flow_weight_sums'] = True
                else:
                    ini['flow_weight_sums'] = False
                break
            except KeyboardInterrupt, IndexError:
                print(key_err_msg)

    write_cfg(ini)

    return ini

    

def extract_external_flows_percentage(cursor, outpath, WASP):

    cursor.execute("SELECT DISTINCT wasp_sink FROM flows WHERE wasp_sink > 0")
    wasp_segs = [row[0] for row in cursor.fetchall()]

    cursor.execute("SELECT DISTINCT swmm_sink FROM flows WHERE swmm_sink > 0")
    swmm_segs = [row[0] for row in cursor.fetchall()]

    segnums = wasp_segs if WASP else swmm_segs
     
    for segnum in segnums:
        source = 'wasp' if WASP else 'swmm'
        cursor.execute('SELECT DISTINCT name FROM flows WHERE ' + source + '_sink = ? AND ' + source + '_source = 0', (segnum, ))
        inflow_names = [row[0] for row in cursor.fetchall()]

        if inflow_names:
            cursor.execute("""
                SELECT f.timestep, f.name, f.flow, tf.total_flow, 
                    CASE WHEN tf.total_flow > 0 THEN f.flow / tf.total_flow ELSE 0 END
                FROM flows f INNER JOIN (
                    SELECT timestep, SUM(flow) AS total_flow
                    FROM flows 
                    WHERE """ + source + """_sink = ? AND """ + source+ """_source = 0 AND name NOT LIKE '%balbucket'
                    GROUP BY timestep) as tf
                ON f.timestep = tf.timestep
                WHERE f.""" + source + """_sink = ? AND f.""" + source + """_source = 0 AND f.name NOT LIKE '%balbucket'
                ORDER BY f.name, f.timestep
            """, (segnum, segnum))

            results = cursor.fetchall()
            
            if results:
           
                pct_columns = []
                flow_columns = []
                total_col = []

                names = ['timestep']

                timestep_col = 0
                name_col = 1
                flow_col = 2
                total_colnum = 3
                pct_col = 4
                # group the results together by inflow name, extract its columns and append them to the columns lists
                for key, group in groupby(results, lambda row: row[name_col]):
                    names.append(key)
                    group = list(group)
                    group = sorted(group, key = lambda row: row[timestep_col])
                    pct_flow = [row[pct_col] for row in group]
                    pct_columns.append(pct_flow)
                    flow = [row[flow_col] for row in group]
                    total_col = [row[total_colnum] for row in group]
                    flow_columns.append(flow)

                # add timestep column to pct_columns and flow_columns
                pct_columns.insert(0, range(0, len(pct_columns[0])))

                flow_columns.insert(0, range(0, len(flow_columns[0])))
                pct_rows = zip(*pct_columns) # transform list of columns into a list of rows
                flow_rows = zip(*flow_columns)
                named_pct_rows = [dict(zip(names, row)) for row in pct_rows]
                named_flow_rows = [dict(zip(names, row)) for row in flow_rows]

                # write the external flow percentages to file
                with open(os.path.join(outpath, str(segnum) + '.csv'), 'w') as f:
                    writer = csv.DictWriter(f, fieldnames=names, lineterminator='\n')
                    writer.writeheader()
                    for row in named_pct_rows:
                        writer.writerow(row)

def extract_external_flows(cursor, outpath, percentage, WASP):
    if percentage:
        return extract_external_flows_percentage(cursor=cursor, outpath=outpath, WASP=WASP)

    cursor.execute("SELECT DISTINCT wasp_sink FROM flows WHERE wasp_sink > 0")
    wasp_segs = [row[0] for row in cursor.fetchall()]

    cursor.execute("SELECT DISTINCT swmm_sink FROM flows WHERE swmm_sink > 0")
    swmm_segs = [row[0] for row in cursor.fetchall()]

    segnums = wasp_segs if WASP else swmm_segs
     
    for segnum in segnums:
        source = 'wasp' if WASP else 'swmm'
        cursor.execute('SELECT DISTINCT name FROM flows WHERE ' + source + '_sink = ? AND ' + source + '_source = 0', (segnum, ))
        inflow_names = [row[0] for row in cursor.fetchall()]

        if inflow_names:
            cursor.execute("""
                SELECT f.timestep, f.name, f.flow, tf.total_flow, 
                    CASE WHEN tf.total_flow > 0 THEN f.flow / tf.total_flow ELSE 0 END
                FROM flows f INNER JOIN (
                    SELECT timestep, SUM(flow) AS total_flow
                    FROM flows 
                    WHERE """ + source + """_sink = ? AND """ + source+ """_source = 0 
                    GROUP BY timestep) as tf
                ON f.timestep = tf.timestep
                WHERE f.""" + source + """_sink = ? AND f.""" + source + """_source = 0 
                ORDER BY f.name, f.timestep
            """, (segnum, segnum))

            results = cursor.fetchall()
            
            if results:
                pct_columns = []
                flow_columns = []
                total_col = []

                names = ['timestep']

                timestep_col = 0
                name_col = 1
                flow_col = 2
                total_colnum = 3
                pct_col = 4
                # group the results together by inflow name, extract its columns and append them to the columns lists
                for key, group in groupby(results, lambda row: row[name_col]):
                    names.append(key)
                    group = list(group)
                    group = sorted(group, key = lambda row: row[timestep_col])
                    pct_flow = [row[pct_col] for row in group]
                    pct_columns.append(pct_flow)
                    flow = [row[flow_col] for row in group]
                    total_col = [row[total_colnum] for row in group]
                    flow_columns.append(flow)

                # add timestep column to pct_columns and flow_columns
                pct_columns.insert(0, range(0, len(pct_columns[0])))
                flow_columns.insert(0, range(0, len(flow_columns[0])))
                pct_rows = zip(*pct_columns) # transform list of columns into a list of rows
                flow_rows = zip(*flow_columns)
                named_pct_rows = [dict(zip(names, row)) for row in pct_rows]
                named_flow_rows = [dict(zip(names, row)) for row in flow_rows]

                if percentage:
                    # write the external flow percentages to file
                    with open(os.path.join(outpath, str(segnum) + '.csv'), 'w') as f:
                        writer = csv.DictWriter(f, fieldnames=names, lineterminator='\n')
                        writer.writeheader()
                        for row in named_pct_rows:
                            writer.writerow(row)
                else:
                    # write the external flows to file
                    with open(os.path.join(outpath, str(segnum) + '_flow.csv'), 'w') as f:
                        writer = csv.DictWriter(f, fieldnames=names, lineterminator='\n')
                        writer.writeheader()
                        for row in named_flow_rows:
                            writer.writerow(row)


def extract_volumes(cursor, outpath, WASP):

    cursor.execute("SELECT using_swmm_volume FROM settings")
    using_swmm_volume = bool(cursor.fetchone()[0])
    if WASP:
        fieldnames = ['timestep', 'vol', 'depth', 'velocity']
        segs = cursor.execute('SELECT DISTINCT wasp_sink FROM final WHERE wasp_sink > 0')
        query = """
            SELECT """ + ','.join(fieldnames) + """
            FROM final 
            WHERE wasp_sink = ? AND datatype = 1
            ORDER BY timestep
        """
    else:
        fieldnames = ['timestep'] + ['vol' if not using_swmm_volume else 'volume'] + ['depth', 'velocity']
        tname = 'volumes2_b' if not using_swmm_volume else 'volumes2_a'
        segs = cursor.execute("SELECT DISTINCT swmm FROM " + tname)
        query = """
            SELECT """ + ','.join(fieldnames) + """
            FROM """ + tname + """
            WHERE swmm = ? 
            ORDER BY timestep
        """

    segs = [row[0] for row in segs]

    for seg in segs:
        with open(os.path.join(outpath, str(seg) + '.csv'), 'w') as f:
            if using_swmm_volume:
                try:
                    vol_ind = fieldnames.index('volume')
                except:
                    pass
                else:
                    fieldnames[vol_ind] = 'vol'

            writer = csv.DictWriter(f, fieldnames = fieldnames, lineterminator='\n')
            writer.writeheader()
            cursor.execute(query, (seg, ))
            for row in cursor.fetchall():
                row = dict(zip(fieldnames, row))
                writer.writerow(row)

def export_internal_flows(cursor, outdir, WASP):
    """
        extracts the internal flows for either the wasp segs or swmm segs (by_wasp_seg=False), optionally adding 
        the string specified in append_to_folder_name to the folder name, and writes the data to file.
    """

    fieldnames = ['timestep', 'flow', 'depth', 'velocity']
    if WASP:
        cursor.execute('SELECT DISTINCT wasp_source FROM flows_final WHERE wasp_source <> 0')
        wasp_sources = [row[0] for row in cursor.fetchall()]
        for source in wasp_sources:
            cursor.execute("""
                SELECT timestep, flow, wasp_sink
                FROM flows_final  
                WHERE wasp_source = ?
                ORDER BY wasp_source, timestep
                """, (source, ))

            internal_flows = cursor.fetchall()

            with open(os.path.join(outdir, str(source) + '.csv'), 'w') as f:
                fieldnames = ['timestep', 'flow', 'sink']
                writer = csv.DictWriter(f, fieldnames = fieldnames, lineterminator='\n')
                writer.writeheader()
                for row in internal_flows:
                    row = dict(zip(fieldnames, row))
                    writer.writerow(row)
    else:
        # loop through swmm segments and output internal flows
        cursor.execute('SELECT DISTINCT swmm_source FROM flows WHERE swmm_source <> 0')
        swmm_sources = [row[0] for row in cursor.fetchall()]
        for source in swmm_sources:
            cursor.execute("""
                SELECT timestep, flow, swmm_sink
                FROM flows  
                WHERE swmm_source = ?
                ORDER BY swmm_source, timestep
                """, (source, ))

            internal_flows = cursor.fetchall()

            with open(os.path.join(outpath, str(source) + '.csv'), 'w') as f:
                fieldnames = ['timestep', 'flow', 'sink']
                writer = csv.DictWriter(f, fieldnames = fieldnames, lineterminator='\n')
                writer.writeheader()
                for row in internal_flows:
                    row = dict(zip(fieldnames, row))
                    writer.writerow(row)

def extract_external_flow_percentages_by_WASP_segment(cursor, outpath):
    extract_external_flows(cursor, outpath, percentage=True, WASP=True)

def extract_external_flows_by_WASP_segment(cursor, outpath):
    extract_external_flows(cursor, outpath, percentage=False, WASP=True)

def extract_external_flow_percentages_by_SWMM_segment(cursor, outpath):
    extract_external_flows(cursor, outpath, percentage=True, WASP=False)

def extract_external_flows_by_SWMM_segment(cursor, outpath):
    extract_external_flows(cursor, outpath, percentage=False, WASP=False)

def extract_volumes_by_WASP_segment(cursor, outpath):
    extract_volumes(cursor, outpath, WASP=True)

def extract_volumes_by_SWMM_segment(cursor, outpath):
    extract_volumes(cursor, outpath, WASP=False)

def extract_internal_flows_by_WASP_segment(cursor, outpath):
    export_internal_flows(cursor, outpath, WASP=True)

def extract_internal_flows_by_SWMM_segment(cursor, outpath):
    export_internal_flows(cursor, outpath, WASP=True)

def extract_consolidated_flows_by_WASP_segment(cursor, outpath):
    cursor.execute('SELECT DISTINCT wasp_sink FROM flows_final WHERE wasp_sink <> 0')
    wasp_sinks = [row[0] for row in cursor.fetchall()]
    for sink in wasp_sinks:
        cursor.execute('SELECT timestep, flow FROM flows_final WHERE wasp_sink = ? AND wasp_source = 0 ORDER BY timestep ', 
            (sink,))
        external_flows = cursor.fetchall()

        with open(os.path.join(outpath, str(sink) + '.csv'), 'w') as f:
            fieldnames = ['timestep', 'consolidated_external_flows']
            writer = csv.DictWriter(f, fieldnames = fieldnames, lineterminator='\n')
            writer.writeheader()
            for row in external_flows:
                row = dict(zip(fieldnames, row))
                writer.writerow(row)


def extract_data():

    while True:
        dbpath = raw_input("Enter the path the Hydmaker database >> ")
        if os.path.exists(dbpath) and not os.path.isdir(dbpath):
            cnxn = sqlite3.connect(dbpath)
            cursor = cnxn.cursor()
            break
        else:
            print('Invalid path.')

    labels = ['external flow percentages by WASP segment',
              'external flows by WASP segment',
              'external flow percentages by SWMM segment',
              'external flows by SWMM segment',
              'volumes by WASP segment',
              'volumes by SWMM segment',
              'internal flows by WASP segment',
              'interval flows by SWMM segment',
              'consolidated external flows by WASP segment']

    extraction_functions = [extract_external_flow_percentages_by_WASP_segment,
        extract_external_flows_by_WASP_segment,
        extract_external_flow_percentages_by_SWMM_segment,
        extract_external_flows_by_SWMM_segment,
        extract_volumes_by_WASP_segment,
        extract_volumes_by_SWMM_segment,
        extract_internal_flows_by_WASP_segment,
        extract_internal_flows_by_SWMM_segment,
        extract_consolidated_flows_by_WASP_segment]

    print("Select the timeseries you want to extract.")
    chosen_labels = [False for _ in range(len(labels))]
    while True:
        print('\n'.join([(str(i+1) if not chosen else 'x') + ' - ' + label for i, chosen, label in zip(range(len(labels)), chosen_labels, labels)]))
        choice = raw_input('(leave blank and press Enter to continue) >> ')
        if not choice:
            if not any(chosen_labels):
                chosen_labels = [True for _ in chosen_labels]
                print("Extracting all timeseries.")
            break

        try:
            choice = int(choice)
            assert choice >= 1 and choice <= len(labels)
        except:
            print('Invalid input.')
        else:
            chosen_labels[choice - 1] = True

        if all(chosen_labels):
            break

    print("WARNING: any existing series will be overwritten")
    while True:
        series_path = raw_input("Enter the output directory >> ")
        if not os.path.isdir(series_path):
            print('Invalid path.')
        else:
            break

    for i, chosen in enumerate(chosen_labels):
        if chosen:
            outdir = os.path.join(series_path, re.sub(' ', '_', labels[i]))
            if not os.path.isdir(outdir):
                os.mkdir(outdir)
            else:
                for fname in os.listdir(outdir):
                    os.unlink(os.path.join(outdir, fname))
            
            start = datetime.now()
            print(labels[i])
            extraction_functions[i](cursor, outdir)
            print((datetime.now() - start).total_seconds() / 60)

def run():
    print('Hydmaker')
    print('-' * 20)
    print("Options")
    opts = ['Generate an ASCII .hyd file.', 'Extract timeseries from existing Hydmaker database.']
    while True:
        print('\n'.join([str(i+1) + ' - ' + opt for i, opt in enumerate(opts)]))
        choice = raw_input('Enter option number >> ')
        try:
            choice = int(choice)
            assert choice >= 1 and choice <= len(opts) + 1
        except:
            print("Invalid choice.")
        else:
            if choice == 1:
                # load settings
                ini = load_ini(os.path.join(curdir(), 'hydmaker.ini'))
                start = datetime.now()
                # call main function, passing settings as arguments
                print('Processing ... ')
                process(**ini)

                print('done')
                print((datetime.now() - start).total_seconds() / 60)
                break
            else:
                extract_data()
                break


if __name__ == '__main__':
    run()

