import csv
import sqlite3
import re
import os
import copy
from math import floor
from contextlib import closing
from datetime import datetime, timedelta
from itertools import groupby
import itertools
from collections import OrderedDict
import struct
import ConfigParser
import traceback, code
from collections import OrderedDict
import threading

import wx
import matplotlib
from matplotlib.patches import Rectangle
matplotlib.use('WXAgg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import \
        FigureCanvasWxAgg as FigCanvas, \
        NavigationToolbar2WxAgg as NavigationToolbar

def curdir():
    # if this script runs as a module, return the directory of the module, otherwise return the current working directory
    return os.path.dirname(__file__) if '__file__' in globals() else os.curdir

class SWMMOUT2SQLITE_NoNames(Exception):
    # custom exception for handling the situation when the names supplied to swmmout2sqlite 
    # aren't found in the swmm binary file
    def __init__(self):
        Exception.__init__(self, 'No names supplied found in file.')

class SWMMOUT2SQLITE_Error(Exception):
    # custom exception for errors raised by swmmout2sqlite
    def __init__(self, exc):
        Exception.__init__(self, "Can't convert SWMM binary output to SQLite database. " + exc)

def swmmout2sqlite(swmmout_path, dbpath, element_type, names=None, variables=None, 
        start=None, end=None, ignore_missing_names=False, using_swmm_volume=False):
    """
        swmmout_path : path to SWMM binary out file (.out)
        dbpath : path to SQLite database into which the binary should be read
        element_type : desired element type to extract from the binary file: subcatchments, nodes, links, system
        names : list of desired element names from element_type to extract. If None, then the function 
            will extract all.
        variables : list of desired variables to extract for each element in names.
        start : start datetime to begin extracting the timeseries
        end : end datetime to end extraction
        ignore_missing_names : if there are names in the names argument not found in the .out file, should an exception 
            be raised?

        For a detailed description of the binary file layout, see the SWMM 5.0 Interfacing Guide Appendix B 
        available on the EPA's SWMM page.
    """

    # validate names argument
    try:
        assert names is None or all(isinstance(name, basestring) for name in names)
    except:
        raise Exception("Incorrect names argument supplied.")

    # there are types of model elements reported on in the binary file
    element_types = ['subcatchments', 'nodes', 'links', 'pollutants', 'system']

    # validate element_type arg
    if element_type not in element_types:
        raise Exception("Unknown element type: " + element_type)

    with open(swmmout_path, 'rb') as f:
        # a record is the smallest unit of information in the binary file. 
        RECORD_BYTES = 4

        # constants used to read the opening of the binary file
        NRECORDS_OPENING = 7 # number of records in the opening
        OPENING_BYTES = RECORD_BYTES * NRECORDS_OPENING
        NRECORDS_HEAD = 3 # head is the first three records of the opening: id, version, and flowunits
        HEAD_BYTES = NRECORDS_HEAD * RECORD_BYTES
        # format strings are for use with the struct.unpack function. 6i means 6 integer records
        HEAD_FORMAT = str(NRECORDS_HEAD) + 'i'
        NELEMENT_TYPES = 4 # ie. subcatches, nodes, links, pollutants
        # the opening bytes contain the head information and element counts
        ELEMENTCOUNTS_BYTES = OPENING_BYTES - HEAD_BYTES
        # check that these constant definitions are consistent with one another
        if not NELEMENT_TYPES * RECORD_BYTES == ELEMENTCOUNTS_BYTES:
            raise SWMMOUT2SQLITE_Error("NELEMENT_TYPES and ELEMENTCOUNTS_BYTES contants are inconsistent.")
        ELEMENTCOUNTS_FORMAT = str(NELEMENT_TYPES) + 'i'

        # constants used to read the closing of the binary file
        NRECORDS_CLOSING = 6
        CLOSING_BYTES = RECORD_BYTES * NRECORDS_CLOSING
        # Names, Properties, Results. These are three main sections of the binary file. The closing contains their file offests
        NSECTIONS = 3 
        SECTION_OFFSET_BYTES = RECORD_BYTES * NSECTIONS
        SECTION_OFFSET_FORMAT = str(NSECTIONS) + 'i'
        NRECORDS_TAIL = NRECORDS_CLOSING - NSECTIONS
        TAIL_BYTES = CLOSING_BYTES - SECTION_OFFSET_BYTES
        TAIL_FORMAT = str(NRECORDS_TAIL) + 'i'

        EXPECTED_ID_NUM = 516114522 # should appear at the start and end of file

        NRECORDS_DAYS_SINCE_EPOCH = 2 # dates are stored in two adjacent records as decimal days since the epoch
        DAYS_SINCE_EPOCH_BYTES  = NRECORDS_DAYS_SINCE_EPOCH * RECORD_BYTES
        EPOCH = datetime(1899, 12, 30)
        REPORT_INTERVAL_BYTES = RECORD_BYTES
        HOURS_IN_DAY = 24.0 
        MINUTES_IN_HOUR = 60.0
        SECONDS_IN_MINUTE = 60.0
        TIMESTEP_TOLERANCE = (((1/HOURS_IN_DAY)/MINUTES_IN_HOUR)/SECONDS_IN_MINUTE)

        # READ CLOSING 
        # move to the beginning of the closing
        f.seek(-CLOSING_BYTES, 2) 
        # read section offsets
        section_offset_records = f.read(SECTION_OFFSET_BYTES)
        section_offsets = struct.unpack(SECTION_OFFSET_FORMAT, section_offset_records)
        # create a dict that indexes the section offsets by name
        offsets_by_section = dict(zip(('names', 'properties', 'results'), section_offsets))
        # read the remaining records in the closing and unpack them into the appropriate variables
        tail_records = f.read(TAIL_BYTES)
        ntimesteps, errorcode, id_num = struct.unpack(TAIL_FORMAT, tail_records)

        # if the identifying number doesn't match the expected, either the file is corrupt
        # or it was generated with a different version of SWMM
        if id_num != EXPECTED_ID_NUM:
            raise SWMMOUT2SQLITE_Error("Unexpected identifying number encountered at end of file.")
        elif errorcode:
            raise SWMMOUT2SQLITE_Error("Output contains errors.")
        elif not ntimesteps:
            raise SWMMOUT2SQLITE_Error("Output has zero timesteps.")

        # READ OPENING
        # move to the beginning, read the head, unpack into the appropriate variables
        f.seek(0)
        head_records = f.read(HEAD_BYTES)
        id_num, version, flowunits = struct.unpack(HEAD_FORMAT, head_records)
        if id_num != EXPECTED_ID_NUM:
            raise SWMMOUT2SQLITE_Error("Unexpected identifying number encountered at beginning of file.")

        # the units dict contains the two possible unit types for each kind of measurement.
        # the first unit in the pair is the choice when the flow units are CFS, GPM, or MGD.
        # the second unit in the pair is the choice when the flow units are CMS, LPS, LPD
        units = {
            'depth_rate1' : ['inPerHour', 'mmPerHour'],
            'depth_rate2' : ['ftPerSec', 'mPerSec'],
            'depth_rate3' : ['inPerDay', 'mmPerDay'],
            'depth1' : ['in', 'mm'],
            'depth2' : ['ft', 'm'],
            'volume' : ['ft3', 'm3'],
            'temp' : ['degF', 'degC']
        }
        # flow units are stored in the binary file as an integer 1-6 that maps to the following list
        flowunit_options = ['CFS', 'GPM', 'MGD', 'CMS', 'LPS', 'LPD']
        flowunits = flowunit_options[flowunits]
        # the choice of flow units determines the units type for all other measurements
        units_choice = 0 if flowunits in ('CFS', 'GPM', 'MGD') else 1
        # update units dict so that it indexes the correct units for each measurement type
        for unit_group in units:
            units[unit_group] = units[unit_group][units_choice]
        # add flowunits to units dict. to be used later
        units['flow'] = flowunits

        # read in the number of elements for each element type, index the counts by element type
        element_counts_records = f.read(ELEMENTCOUNTS_BYTES)
        element_counts = list(struct.unpack(ELEMENTCOUNTS_FORMAT, element_counts_records))
        nsystem_elements = 1 # there is only one system time series
        element_counts.append(nsystem_elements)
        element_counts_by_type = OrderedDict(zip(element_types, element_counts))

        # validate element_type argument
        if not element_counts_by_type[element_type]:
            raise Exception("SWMM output does not report any " + element_type + " elements.")

        # Go to the element names section
        f.seek(offsets_by_section['names'], 0)

        # create dictionary containing the lists of all element names for each element type
        element_names_by_type = OrderedDict()
        for type_ in element_types:
            if type_ in ('system',):
                # system does not have elements
                element_names_by_type[type_] = None
            else:
                for i in range(element_counts_by_type[type_]):
                    name_bytes_record = f.read(RECORD_BYTES)
                    name_bytes = struct.unpack('i', name_bytes_record)[0]
                    name_record = f.read(name_bytes)
                    name = struct.unpack(str(name_bytes) + 's', name_record)[0]
                    element_names_by_type.setdefault(type_, []).append(name)

        # if the caller supplied a names argument, make sure all of those names are in the 
        # the names list for the desired element type, unless ignore_missing_names is True, 
        # in which case throw out the names that aren't in the binary file
        if names:
            type_names = element_names_by_type[element_type]
            in_report = [name in type_names for name in names]

            if not all(in_report) and not ignore_missing_names:
                missing_names = [name for name, is_in_report in zip(names, in_report) if not is_in_report]
                raise Exception("The following element names aren't in the report: " + ','.join(missing_names))

            user_names_in_rpt = [name for name in names if name in type_names]
        else:
            # if no names argument is supplied, extract all elements
            user_names_in_rpt = element_names_by_type[element_type]

        # this conditional catches the case when the user supplies a names list
        # with True ignore_missing_names and none of the supplied names are in the file
        if not user_names_in_rpt:
            raise SWMMOUT2SQLITE_NoNames()

        # if there are any pollutants, read pollutant the units
        pollutant_labels = []
        if element_counts_by_type['pollutants']:
            pollutant_units_records = f.read(element_counts_by_type['pollutants'] * RECORD_BYTES)
            pollutant_units = struct.unpack(str(element_counts_by_type['pollutants']) + 'i', pollutant_units_records)
            # pollutant units are stored as integers 0 - 2 corresponding to the following list
            pollutant_unit_label_options = ['mgL', 'ugL', 'countPerL']
            pollutant_unit_labels = [pollutant_unit_label_options[i] for i in pollutant_units]
            # create output labels for the pollutants by joining their names with their units
            pollutant_labels = ['_'.join(tup) for tup in zip(element_names_by_type['pollutants'], pollutant_unit_labels)]

        # Read properties. The properties describe the aspects of the elements in each 
        # element class such as area, link length, node depth. These data are read here, but never used.
        f.seek(offsets_by_section['properties'], 0)
        element_property_codes = OrderedDict()
        element_properties_by_type = OrderedDict()

        for type_ in element_types:
            if type_ in ('pollutants', 'system'):
                element_properties_by_type[type_] = None
            else:
                nprops_record = f.read(RECORD_BYTES)
                nprops = struct.unpack('i', nprops_record)[0]
                property_code_records = f.read(nprops*RECORD_BYTES)
                property_codes = struct.unpack(str(nprops) + 'i', property_code_records)
                element_property_codes[type_] = property_codes

                for i in range(element_counts_by_type[type_]):
                    property_records = f.read(nprops*RECORD_BYTES)
                    properties = struct.unpack(str(nprops) + 'f', property_records)
                    element_properties_by_type.setdefault(type_, []).append(zip(property_codes, properties))

        # These are labels for each of the reporting variables for each of the element types.
        # They're listed in the order they appear in the report series. The part of the labels
        # in braces are place holders for future substitution of the correct units, e.g., {depth_rate1},
        # will be replaced with something like 'inPerHour'.
        var_labels_by_type = {
            'subcatchments' : ['rainfall_{depth_rate1}', 
                               'snow_depth_{depth1}', 
                               'evap_plus_infil_losses_{depth_rate1}', 
                               'runoff_rate_{flow}', 
                               'gw_outflow_rate_{flow}',
                               'gw_table_elev_{depth2}'],
            'nodes' : ['depth_above_invert_{depth2}', 
                       'hydraulic_head_{depth2}', 
                       'stored_and_ponded_vol_{volume}', 
                       'lateral_inflow_{flow}', 
                       'total_inflow_{flow}',
                       'flow_lost_to_flooding_{flow}'],
            'links' : ['flow_rate_{flow}', 
                       'flow_depth_{depth2}', 
                       'flow_velocity_{depth_rate2}', 
                       'Froude_number', 
                       'Capacity'],
            'system' : ['air_temp_{temp}',
                        'rainfall_{depth_rate1}',
                        'snow_depth_{depth1}',
                        'evap_plus_infil_losses_{depth_rate1}',
                        'runoff_rate_{flow}',
                        'dry_weather_inflow_{flow}',
                        'gw_inflow_{flow}',
                        'RDII_inflow_{flow}',
                        'user_supplied_direct_inflow_{flow}',
                        'total_lateral_inflow_{flow}',
                        'flow_lost_to_flooding_{flow}',
                        'flow_leaving_through_outfalls_{flow}',
                        'volume_of_stored_water_{volume}',
                        'evaporation_rate_{depth_rate3}']
        }
        if using_swmm_volume:
            var_labels_by_type['links'] += ['volume_{volume}']
        # pollutant concentrations are recorded in the report series as 
        # the final parameters for subcatchments, nodes, and links
        # if there are pollutants, add their labels to each list of labels except for system
        if element_counts_by_type['pollutants']:
            for type_ in element_types:
                if type_ not in ('pollutants', 'system'):
                    var_labels_by_type[type_].extend(pollutant_labels)

        # update the label units place holders with the correct units for each parameter using the units dictionary
        for type_ in var_labels_by_type:
            var_labels_by_type[type_] = [name.format(**units) for name in var_labels_by_type[type_]]

        # get a count of the number of variables for each element type and check that it matches the expectations
        # in var_labels_by_type
        report_vars_by_type = OrderedDict()
        for type_ in element_types:
            if type_ in ('pollutants',):
                report_vars_by_type[type_] = None
            else:
                nvariable_codes_record = f.read(RECORD_BYTES)
                nvariable_codes = struct.unpack('i', nvariable_codes_record)[0]
                variable_code_records = f.read(nvariable_codes * RECORD_BYTES)
                variable_codes = struct.unpack(str(nvariable_codes) + 'i', variable_code_records)
                expected_num_vars = len(var_labels_by_type[type_])
                if len(variable_codes) != expected_num_vars:
                    exc = "Unexpected number of variables for " + type_ + ". Expected " + str(expected_num_vars) \
                          + ", encountered " + str(len(variable_codes))
                    raise Exception(exc)
                report_vars_by_type[type_] = variable_codes

        # Read simulation start datetime and reporting interval
        rpt_start_days_since_epoch_record = f.read(DAYS_SINCE_EPOCH_BYTES)
        rpt_start_days_since_epoch = struct.unpack('d', rpt_start_days_since_epoch_record)[0]
        rpt_interval_record = f.read(REPORT_INTERVAL_BYTES) 
        rpt_interval = struct.unpack('i', rpt_interval_record)[0]
        
        def to_days_since_epoch(dtime):
            # takes dtime object and calculate the number of days since the EPOCH
            in_seconds = (dtime - EPOCH).total_seconds()
            return ((in_seconds / SECONDS_IN_MINUTE) / MINUTES_IN_HOUR) / HOURS_IN_DAY

        # if the user supplied a start time, get the days since epoch for that start and 
        # register this as the start of the desired data.
        user_start_days_since_epoch = to_days_since_epoch(start) if start else rpt_start_days_since_epoch
        # calculate the reporting interval duration in decimal days
        rpt_interval_days = ((rpt_interval / SECONDS_IN_MINUTE) / MINUTES_IN_HOUR) / HOURS_IN_DAY
        # using the number of timesteps calculate the final datetime of the report
        rpt_end_days_since_epoch = rpt_start_days_since_epoch + rpt_interval_days * (1 + ntimesteps)
        # if the user supplied and end datetime, use this as the cut off, otherwise use the final report datetime
        user_end_days_since_epoch = to_days_since_epoch(end) if end else rpt_end_days_since_epoch

        # raise an exception if the start and end times are inconsistent
        if user_end_days_since_epoch <= user_start_days_since_epoch:
            raise SWMMOUT2SQLITE_Error("Start and end datetimes inconsistent.")

        # calculate the number of bytes per timestep for each element type
        # this will be used to skip over sections that aren't needed.
        report_bytes_by_type = OrderedDict()
        for type_ in element_types:
            if type_ == 'pollutants':
                report_bytes_by_type[type_] = 0
            else:
                byte_count = element_counts_by_type[type_] * len(report_vars_by_type[type_]) * RECORD_BYTES
                report_bytes_by_type[type_] = byte_count

        # calculate the total number of bytes for each timestep
        bytes_per_timestep = DAYS_SINCE_EPOCH_BYTES + sum(report_bytes_by_type.values())

        # generate the offsets for each element type relative 
        # to end of the datetime records for each timestep. Since 
        # subcatchments starts right after the datetime, its offset is zero.
        type_offsets = [0]
        for bytes_ in report_bytes_by_type.values()[:-1]:
            type_offsets.append(type_offsets[-1] + bytes_)

        # index the timestep-relative offsets by element_type
        type_offsets_by_type = OrderedDict(zip(element_types, type_offsets))

        # for the desired element type, calculate how many bytes each 
        # element takes up per timestep
        bytes_per_element = len(report_vars_by_type[element_type]) * RECORD_BYTES

        # determine which variable should be extracted.
        # if the user has not supplied a list of variables to extract, extract them all
        if not variables:
            variables = var_labels_by_type[element_type]

        # create a dict where each label for a variable desired by the user 
        # is indexed by its position in the full list of variables extracted from the file.
        # e.g., if the user wants rainfall and runoff rate for subcatchments, the dict would look 
        # something like {0 : 'rainfall', 4 : 'runoff'}.
        # the order in which the element type variables are listed in the file corresponds
        # to the order they are written in the report time series.
        user_var_labels_by_index = []
        for var in variables:
            for i, label in enumerate(var_labels_by_type[element_type]):
                if re.match(var, label):
                    user_var_labels_by_index.append((i, label))
                    break
                elif i == len(var_labels_by_type[element_type]) - 1:
                    exc = "Could not match variable " + var + " with any of the known variable labels for " + element_type
                    raise Exception(exc)
        user_var_labels_by_index = OrderedDict(user_var_labels_by_index)

        # get the maximum index for all the variables desired by the user and use this to calculate 
        # how many bytes will have to be read for each element at each time step until.
        max_user_var_index = max(user_var_labels_by_index.keys())
        user_var_byte_range = (max_user_var_index + 1) * RECORD_BYTES

        # we will need a list of desired element names with their sequential byte offsets, for example,
        # if elementA is the first desired element and is 16 bytes from the start of the section and elementB
        # is 32 bytes away from elementA, then the beginning of the list 
        # would look like: [[16, 'elementA'], [32, 'elementB'], ...]
        #
        # first create a list of desired element names paired with their index from the list of names belonging 
        # to the desired element type
        user_element_indices = [[i, name] for i, name in enumerate(element_names_by_type[element_type]) 
                                if name in user_names_in_rpt]
        # to each pair in user_element_indices add the names index of the element name pair just before it.
        # we'll use this to calculate the index between each element
        lagged = [user_element_indices[i] + [user_element_indices[i-1][0]] for i in range(1, len(user_element_indices))]
        first_offset = user_element_indices[0]
        user_element_offsets_and_names = [[first_offset[0] * bytes_per_element, first_offset[1]]]
        for idx, name, idx_lag1 in lagged:
            offset = (idx - idx_lag1) * bytes_per_element - user_var_byte_range
            user_element_offsets_and_names.append((offset, name))
        # get the largest element index from the list of desired element names
        max_user_element_index = max(dict(user_element_indices).keys())

        # open a db connection in a context manager. When the with block exits, the db will be closed.
        with closing(sqlite3.connect(dbpath)) as cnxn:
            # turn on autocommit
            cnxn.isolation_level = None
            # generate a cursor for the database 
            cursor = cnxn.cursor()
            # turn off extraneous features to optimize the database's performance
            cursor.executescript("""
                PRAGMA synchronous=OFF;
                PRAGMA count_changes=OFF;
                PRAGMA journal_mode=OFF;
            """)

            # if a table for the element type already exists, delete it.
            try:
                cursor.execute("DROP TABLE " + element_type)
            except:
                pass

            # create a table table with a column for each variable to be extracted
            create_table_stmt = "CREATE TABLE {type} (timestep integer, name text, {variables})"
            variable_defs = ','.join([name + ' real' for name in user_var_labels_by_index.values()])
            create_table_stmt = create_table_stmt.format(type=element_type, variables=variable_defs)
            cursor.execute(create_table_stmt)

            # create the insert query that will be used to import data from the file to the database
            insert_stmt = """
                INSERT INTO {type} (timestep, name, {variables})
                VALUES (?, ?, {values})
            """
            insert_stmt = insert_stmt.format(type=element_type, variables=','.join(user_var_labels_by_index.values()),
                values=','.join(['?' for _ in range(len(user_var_labels_by_index))]))

            # move the file position to the start of the report section
            f.seek(offsets_by_section['results'], 0)

            # executing the insert statement on each line of data would have a large overhead. 
            # we'll store each line of data in a list (batch) and insert everything in that list at the same time
            # once it reaches a certain size.
            INSERT_BATCH_SIZE = 500
            batch = []
            # define the struct.unpack format for extracting a row of variables for an element
            # even if the the user only wants, for instance, variables 2, 3, 10, all variables 1-10 will be extracted.
            var_format = 'f' * (max_user_var_index + 1)
            # get the offset from the start of the timestep for the desired element type
            element_type_offset = type_offsets_by_type[element_type]
            # calcualte the number of bytes from the beginning of a timestep to the last byte for that timestep 
            # that must be read to get the desired data.
            head_timestep_bytes_range = (((max_user_element_index + 1) - 1) * bytes_per_element) + user_var_byte_range
            # using head_timestep_bytes_range, calculate the number of bytes from the last variable for the last element 
            # that must be read to the beginning of the next timestep. 
            tail_timestep_bytes_range = bytes_per_timestep - DAYS_SINCE_EPOCH_BYTES - element_type_offset - head_timestep_bytes_range

            # read the datetime record for the first timestep 
            days_since_epoch_record = f.read(DAYS_SINCE_EPOCH_BYTES)
            days_since_epoch = struct.unpack('d', days_since_epoch_record)[0]
            # iterate through the timesteps. at the beginning of each iteration, the file 
            # position as at the start of the next timestep
            for timestep in range(ntimesteps):
                if days_since_epoch > user_end_days_since_epoch:
                    # if the datetime entry at the current timestep is later than the user specified end datetime, break 
                    # out of the loop ...
                    break
                elif days_since_epoch >= user_start_days_since_epoch:
                    # ... otherwise skip to the position for the desired element type
                    f.seek(element_type_offset, 1)
                    for i, tup in enumerate(user_element_offsets_and_names):
                        # for each desired element skip ahead from the current position using its relative offset
                        element_offset, name = tup
                        f.seek(element_offset, 1)
                        # initialize the entry for the insert statement 
                        entry = [timestep, name]
                        # read in the variables for this element and extract the desired ones
                        vars_records = f.read(user_var_byte_range)
                        vars_vals = struct.unpack(var_format, vars_records)
                        vars_vals = [float('%.7g' % val) for val in vars_vals]
                        user_var_vals = [val for j, val in enumerate(vars_vals) if j in user_var_labels_by_index.keys()]
                        # extend the entry for the insert statement with the variable values
                        entry.extend(user_var_vals)
                        # add the entry to the batch
                        batch.append(entry)

                        # once batch gets to the insert batch size, execute the insert statement on
                        # the batch and then clear it.
                        if len(batch) == INSERT_BATCH_SIZE:
                            cursor.executemany(insert_stmt, batch)
                            del batch[:]

                    # skip ahead to the next timestep
                    f.seek(tail_timestep_bytes_range, 1)

                else:
                    # move ahead to the next timestep
                    f.seek(bytes_per_timestep - DAYS_SINCE_EPOCH_BYTES, 1)

                prev_days_since_epoch = days_since_epoch
                # read the datetime entry for the next timestep
                days_since_epoch_record = f.read(DAYS_SINCE_EPOCH_BYTES)
                days_since_epoch = struct.unpack('d', days_since_epoch_record)[0]

            # run the insert statement on any remaining entries in the batch list
            cursor.executemany(insert_stmt, batch)
            # create an indices on the table to improve query performance
            idx_stmt = "CREATE INDEX idx_{type}_{col} on {type}({col})"
            cursor.execute(idx_stmt.format(type=element_type, col='timestep'))
            cursor.execute(idx_stmt.format(type=element_type, col='name'))
            cnxn.commit()

        # return a number of informative parameters about the data to the user
        user_start_dtime = EPOCH + timedelta(days=user_start_days_since_epoch)
        user_end_dtime = EPOCH + timedelta(days=user_end_days_since_epoch)
        return dbpath, element_type, len(user_names_in_rpt), len(variables), user_start_dtime, user_end_dtime

def tune_db(cursor):
    # turns off extraneous database features to improve performance
    cursor.execute("PRAGMA synchronous=OFF") # don't wait for disk writes to complete before continuing
    cursor.execute("PRAGMA count_changes=OFF") # don't count num. rows affected by DELETE, INSERT, or UPDATE
    cursor.execute("PRAGMA journal_mode=OFF") # turn off journaling
    #cursor.execute("PRAGMA cache_size=1048576") # number of btree pages to cache (1 page = 1 KB)
    #cursor.execute("PRAGMA temp_store=2") # store temporary files in memory

def get_segtable(segmap_paths):
    # takes a list of paths to segmentation maps and 
    # returns a list of rows.
    segtable = [] # this will be a list of dicts, where each dict is a row from one of the .map.csv files
    for fname in segmap_paths: # get a list of all of the .map.csv files
        with open(fname, 'r') as f:
            table = list(csv.DictReader(f))
            # if the table has rows, check that the header is right and add the rows to segtable
            if table:
                assert all(fieldname in ('Name', 'SWMM', 'Type', 'WASP') for fieldname in table[0].keys())
                for row in table:
                    row['segmap'] = os.path.basename(fname)
                segtable.extend(table)

    # the following two loops fill any missing entries in the SWMM and WASP columns and renumber the segments sequentially.
    for row in segtable:
        if not row['WASP']:
            try:
                row['WASP'] = prev_wasp
            except NameError:
                raise Exception("The first entry in " + row['segmap'] + ' needs a WASP number.')
        else:
            prev_wasp = row['WASP']

        if not row['SWMM']:
            try:
                row['SWMM'] = prev_swmm
            except NameError:
                raise Exception("The first entry in " + row['segmap'] + 'needs a SWMM number.')
        else:
            prev_swmm = row['SWMM']

    current_swmm = 1
    current_wasp = 1
    for i, row in enumerate(segtable):
        if i == 0:
            prev_wasp = row['WASP']
            prev_swmm = row['SWMM']
            prev_segmap = row['segmap']
            row['WASP'] = current_wasp
            row['SWMM'] = current_swmm
        else:
            if row['WASP'] == prev_wasp and row['segmap'] == prev_segmap:
                row['WASP'] = current_wasp
            else:
                prev_wasp = row['WASP']
                current_wasp += 1
                row['WASP'] = current_wasp

            if row['SWMM'] == prev_swmm and row['segmap'] == prev_segmap:
                row['SWMM'] = current_swmm
            else:
                prev_swmm = row['SWMM']
                current_swmm += 1
                row['SWMM'] = current_swmm

            prev_segmap = row['segmap']

    # check that there is only one segment with an entry with type 'END' and no segment name. This indicates 
    # the end of the system. A tributary end would have type end, but the name for that entry would be the 
    # conduit it links into.
    num_end_segs = len([row for row in segtable if not row['Name'] and row['Type'] == 'END'])
    if not num_end_segs:
        raise Exception("Segmentation map is missing an END segment.")
    elif num_end_segs != 1:
        raise Exception('There are multiple terminating segments in the segment maps.')

    # capitalize all segment types
    for row in segtable:
        row['Type'] = row['Type'].upper()

    # check that all types are are either CONDUIT, INFLOW, or END
    if any(row['Type'] not in ('CONDUIT', 'INFLOW', 'END') for row in segtable):
        raise Exception('Unknown Type found in seg map file')

    # a tributary end segment is an end segment with a name. The name should identify the segment 
    # to which the tributary connects.
    # extract trib connection conduit names and check that they exist somewhere in the system
    trib_connection_names = [row['Name'] for row in segtable if row['Type'] == 'END' and row['Name']]
    conduit_names = [row['Name'] for row in segtable if row['Type'] == 'CONDUIT']
    if not all([name in conduit_names for name in trib_connection_names]):
        raise Exception('There are END rows in your segment map(s) that refer do conduits not included in the map.')

    # check that there are no repeated conduits
    names = [row['Name'] for row in segtable if row['Type'] != 'END']
    if len(names) != len(set(names)):
        raise Exception('There are repeated conduits in one of the map files.')

    # necessary segment map characteristics have been checked, return the table.
    return segtable

current_start = None

def timing(s):
    pass
    global current_start
    if current_start is not None:
        pass
        #timing = (datetime.now() - current_start).total_seconds()
        #print s
        #print timing / 60
        #print timing
        #print '\n'

    current_start = datetime.now()

def calc_swmm_vols(cur, timestep_secs):
    """
    populates the rows of volumes2_b with the values from volumes2_a plus the swmm segment 
    volume calculation, first calculating the delta vol for each segment at each timestep, via inner 
    joining volumes2_a with an aggregation of the flows data, then looping over volumes2_b and
    updating the volume columns in order.
    """

    # clear the values from the table, if there are any
    cur.execute("DELETE FROM volumes2_b")

    # calculate delta_vol by swmm segment by inner joining volumes2_a with a flow aggregation over the flows table
    # and insert the results into volumes2_b
    cur.execute("""
        INSERT INTO volumes2_b (timestep, wasp, swmm, depth, velocity, flow, delta_vol)
        SELECT v.timestep, v.wasp, v.swmm, v.depth, v.velocity, v.flow, (inflows.flow_in - v.flow) * ?
        FROM (
            SELECT SUM(flow) AS flow_in, timestep, swmm_sink
            FROM flows
            GROUP BY timestep, swmm_sink) AS inflows
        INNER JOIN volumes2_a v ON v.timestep = inflows.timestep AND v.swmm = inflows.swmm_sink
    """, (timestep_secs, ))

    #print 'do some splainin'
    #code.interact(local=locals())

    #  the following loop cycles through each element and updates its respective part of the 
    # vol column in volumes2_b with the calculated volume 

    #get a list of timesteps and swmm segments
    cur.execute('SELECT DISTINCT timestep FROM volumes2_b')
    timesteps = [row[0] for row in cur.fetchall()]
    cur.execute('SELECT DISTINCT swmm FROM volumes2_b')
    swmm_segs = [row[0] for row in cur.fetchall()]
    for swmm in swmm_segs:
        for t in timesteps:
            # if the timestep is 0, set the vol to init_vol and update volumes2_b
            if t == 0:
                # for the first timestep, set the vol in volumes2_b to the initial volume
                cur.execute('SELECT flow, velocity FROM volumes2_b WHERE timestep = 0 AND swmm = ?', (swmm, ))
                flow, velocity = cur.fetchone()

                # obtain length information to calculate init_volume
                cur.execute("SELECT DISTINCT length FROM conduit_params2 WHERE swmm = ?", (swmm, ))
                length = cur.fetchall()
                assert len(length) == 1
                length = length[0][0]

                init_vol = (flow / velocity) * length

                cur.execute("""
                    UPDATE volumes2_b
                    SET vol = ?
                    WHERE timestep = 0 AND swmm = ?
                """, (init_vol, swmm))
            else:
                # .... otherwise, get the volumn calculation from the previous step.
                cur.execute('SELECT vol FROM volumes2_b WHERE timestep = ? AND swmm = ?', (t - 1, swmm))
                prev_vol = cur.fetchall()
                assert len(prev_vol) == 1
                prev_vol = prev_vol[0][0]

                # select the delta_vol value at the current step
                cur.execute('SELECT delta_vol FROM volumes2_b WHERE timestep = ? AND swmm = ?', (t, swmm))
                delta_vol = cur.fetchall()
                assert len(delta_vol) == 1
                delta_vol = delta_vol[0][0]

                # select the depth and use this as flag to know when to set the volume to 0
                cur.execute('SELECT depth FROM volumes2_b WHERE timestep = ? AND swmm = ?', (t, swmm))
                depth = cur.fetchall()
                assert len(depth) == 1
                depth = depth[0][0]
                # when depth is 0, due to backflow most likely, set the volume to 0:
                if depth == 0:
                    vol = 0
                else:
                    vol = prev_vol + delta_vol

                # update volumes2_b 
                cur.execute('UPDATE volumes2_b SET vol = ? WHERE timestep = ? AND swmm = ?', (vol, t, swmm))

def process(dummy_end, 
            inppath, # path to SWMM .inp file
            outpath, # path to directory to write output
            segmap_paths, # segmap_paths
            binarypath, # path to SWMM binary output file
            correct_vol=False, # boolean value to determine whether or not flow should be corrected
            window_size=200,  # volume correction parameter
            basevol_scale=30,
            vol_rating_weight=.5,
            side=1,
            event_start=None, # event start datetime 
            event_end=None,  # event end datetime
            round_num=7, # precision to round data
            flow_weight_sums=False, # type of calculation to use for combining segment depth and velocity
            using_swmm_volume=False,
            use_inp_inflows=True,
            continue_at_correction=True,
            quit_before_correction=False
    ):

    quit_before_correction = False
    continue_at_correction = False
    
    db_name_path = binarypath
    dbname = os.path.splitext(os.path.basename(db_name_path))[0] + '.db'
    dbpath = os.path.join(os.path.dirname(outpath), dbname)

    # remove the database if it already exists
    if os.path.exists(dbpath) and not continue_at_correction:
        os.unlink(dbpath)

    conn = sqlite3.connect(dbpath)
    # set autocommit
    conn.isolation_level = None
    # obtain a cursor
    cur = conn.cursor()
    # optimize the database by turning off unneeded features
    cur.executescript("""
        PRAGMA synchronous=OFF;
        PRAGMA count_changes=OFF;
        PRAGMA journal_mode=OFF;
    """)

    if not continue_at_correction:
        filter_mins = None
        timing('')
        # the user must supply either a path to a binary out file or an rpt file
        #if not (binarypath or rptpath):
        #    raise Exception("Must supply either an SWMM .rpt path or a .out path.")

        # process the segmap files and return a validated list of dictionaries for each row in each segmap
        segtable = get_segtable(segmap_paths)

        cur.execute("""
            CREATE TABLE settings (
                using_swmm_volume integer,
                timestep_secs float,
                flow_weight_sums integer
            )     
        """)

        cur.execute("INSERT INTO settings (using_swmm_volume, flow_weight_sums) VALUES (?, ?)", 
            (int(using_swmm_volume), int(flow_weight_sums)))

        # create table to store the data in segtable so it can be used in queries to come
        cur.execute("""
            CREATE TABLE segs (
                wasp int,           -- wasp segment number
                DS_wasp int,        -- wasp segment number of the DS segment (initially NULL)
                swmm int,           -- swmm segment number 
                DS_swmm int,        -- swmm segment number of the DS segment (initiall NULL)
                type varchar(8),    -- element type, either 'CONDUIT', 'INFLOW', or 'END'
                name varchar(64)    -- name as it appears in the .rpt or binary file
            )""")

        for row in segtable:
            # load the segmap data into the segs tables
            cur.execute("""
                INSERT INTO segs (wasp, swmm, type, name) 
                VALUES (%(WASP)s, %(SWMM)s, '%(Type)s', '%(Name)s')
                """ % row)
        
        # get list of unique swmm-wasp segment pairs
        cur.execute('SELECT DISTINCT swmm, wasp FROM segs')
        segs = cur.fetchall()

        # get final wasp and swmm segs
        cur.execute('SELECT MAX(wasp) FROM segs')
        last_wasp_seg = cur.fetchone()[0]
        cur.execute('SELECT MAX(swmm) FROM segs')
        last_swmm_seg = cur.fetchone()[0]

        if not dummy_end:
            cur.execute("SELECT * FROM segs WHERE type = 'INFLOW' AND wasp = ?", (last_wasp_seg,))
            last_wasp_inflows = cur.fetchall()

            if last_wasp_inflows:
                msg = "If you choose not to have a dummy end segment, the final segment in " \
                     + "in your segmentation file can't have INFLOW elements. Please remove the INFLOWs " \
                     + "for the final WASP segment and restart, or restart and choose to use a dummy end segment."
                raise Exception(msg)

        #cur.execute("SELECT name FROM segs WHERE type = 'CONDUIT' AND wasp = ?", (last_wasp_seg,))
        #last_wasp_names = [row[0] for row in cur.fetchall()]

        # this loop cycles through the swmm segment numbers and updates DS_wasp and DS_swmm columns in the segs table. 
        # It also creates a dummy segment for the final segment, if this is requested by the user (dummy_end = True)
        for swmm, wasp in segs:
            # check if the current swmm segment includes an 'END' row
            cur.execute("SELECT name FROM segs WHERE swmm = ? AND type = 'END'", (swmm,)) 
            end_row = cur.fetchone()
            if end_row: 
                end_name = end_row[0]
                if end_name:
                    # if the end row includes the name of a conduit (meaning this segment is the end of a tributary, 
                    # not the end of a main trunk), then
                    # find the wasp and swmm segment numbers for the conduit and assign them to the DS_wasp and 
                    # DS_swmm columns for the conduits in this segment
                    
                    cur.execute("SELECT wasp, swmm FROM segs WHERE name = ? AND type <> 'END'", (end_name,))

                    DS_wasp, DS_swmm = cur.fetchone()
                    cur.execute("UPDATE segs SET DS_wasp = ?, DS_swmm = ? WHERE type = 'CONDUIT' AND swmm = ?", 
                        (DS_wasp, DS_swmm, swmm))
                else:
                    # ... otherwise, this is the end of the main stem. 
                    if dummy_end:
                        # if user wants a dummy end segment (dummy_end is True), add it to the segs table, and 
                        # make this the down stream segment for the actual final conduits
                        dummy_wasp = last_wasp_seg + 1
                        dummy_swmm = last_swmm_seg + 1
                        cur.execute("""
                            INSERT INTO segs (wasp, swmm, DS_wasp, DS_swmm, type, name)
                            SELECT ?, ?, 0, 0, 'DUMMY', name 
                                FROM segs WHERE swmm = ? AND type = 'CONDUIT' LIMIT 1
                            """, (dummy_wasp, dummy_swmm, swmm))

                        cur.execute("UPDATE segs SET DS_wasp = ?, DS_swmm = ? WHERE type = 'CONDUIT' AND swmm = ?", 
                            (dummy_wasp, dummy_swmm, swmm))
                    else:
                        # if the user doesn't want a dummy end segment, set the DS segments of the final 
                        # segment to the empty seg
                        cur.execute("UPDATE segs SET DS_wasp = ?, DS_swmm = ? WHERE type = 'CONDUIT' AND swmm = ?", 
                            (0, 0, swmm))
            else:
                # if this isn't the end segment, update the the DS segment numbers to the next segment
                cur.execute("UPDATE segs SET DS_wasp = ?, DS_swmm = ? WHERE type = 'CONDUIT' AND swmm = ?", 
                    (wasp + 1, swmm + 1, swmm))

        # for rows of type 'INFLOW', the DS_wasp and DS_swmm are the same as its swmm and wasp seg numbers
        cur.execute("UPDATE segs SET DS_wasp = wasp, DS_swmm = swmm WHERE type = 'INFLOW'")
        cur.execute("DELETE FROM segs WHERE type = 'END'")

        # write segment map to file
        with open(os.path.join(os.path.dirname(outpath), 'segment_map_out.csv'), 'w') as f:
            fieldnames = ['wasp', 'ds_wasp', 'swmm', 'ds_swmm', 'type', 'name']
            writer = csv.DictWriter(f, fieldnames = fieldnames, lineterminator='\n')
            writer.writeheader()
            cur.execute('SELECT * FROM segs ORDER BY wasp, swmm, type, name')
            for row in cur.fetchall():
                row = dict(zip(fieldnames, row))
                writer.writerow(row)

        # create a table to store the length and constant dwf values for conduits
        cur.execute("""CREATE TABLE conduit_params (
            name varchar(64), 
            length float, 
            inlet varchar(64), 
            dwf float DEFAULT 0,
            timeseries text,
            scaling_factor real
        )""")

        # the following block loops through the .inp and updates conduit_params with the lengths and DWFs for each conduit
        meters_in_foot = 0.3048
        with open(inppath, 'r') as f:
            # churn through the .inp lines until encountering the [OPTIONS] marker
            line = f.readline().strip()
            while not re.match(re.compile(r'\[opt', re.IGNORECASE), line.strip()):
                line = f.readline()
                if not line:
                    raise Exception("Can't find [OPTIONS] section in .inp file.")

            line = f.readline()

            # read the following variables from the OPTIONS sections
            timestep_secs = None
            sim_start_date = None
            sim_start_time = None
            start_date = None
            start_time = None
            end_date = None
            end_time = None
            while True:
                if re.match(re.compile(r'report_start_date', re.IGNORECASE), line.strip()):
                    _, start_date = line.split()
                elif re.match(re.compile(r'report_start_time', re.IGNORECASE), line.strip()):
                    _, start_time = line.split()
                elif re.match(re.compile(r'end_date', re.IGNORECASE), line.strip()):
                    _, end_date = line.split()
                elif re.match(re.compile(r'end_time', re.IGNORECASE), line.strip()):
                    _, end_time = line.split()
                elif re.match(re.compile(r'report_step', re.IGNORECASE), line.strip()):
                    _, ts_str = line.split()
                    hours, mins, secs = ts_str.split(':')
                    timestep_secs = float(hours) * 120 + float(mins) * 60 + float(secs)
                elif re.match(re.compile(r'start_date', re.IGNORECASE), line.strip()):
                    _, sim_start_date = line.split()
                elif re.match(re.compile(r'start_time', re.IGNORECASE), line.strip()):
                    _, sim_start_time = line.split()
                     
                if (not line) or re.match(r'\[', line.strip()):
                    if not timestep_secs:
                        raise Exception(".inp [OPTIONS] missing REPORT_STEP")
                    elif not start_time:
                        raise Exception(".inp [OPTIONS] missing REPORT_START_TIME")
                    elif not start_date:
                        raise Exception(".inp [OPTIONS] missing REPORT_START_DATE")
                    elif not end_date:
                        raise Exception(".inp [OPTIONS] missing REPORT_END_DATE")
                    elif not end_time:
                        raise Exception(".inp [OPTIONS] missing REPORT_END_TIME")
                    else:
                        try:
                            report_start_dtime = datetime.strptime(' '.join([start_date, start_time]), '%m/%d/%Y %H:%M:%S')
                            report_end_dtime = datetime.strptime(' '.join([end_date, end_time]), '%m/%d/%Y %H:%M:%S')
                            sim_start_dtime = datetime.strptime(' '.join([sim_start_date, sim_start_time]), '%m/%d/%Y %H:%M:%S')
                        except:
                            raise Exception('Unexpected datetime format encountered in .inp file for report dates.')
                        else:
                            break
                else:
                    line = f.readline()

            
            cur.execute("UPDATE settings SET timestep_secs = ?", (timestep_secs,))
            if (report_start_dtime.minute * 60 + report_start_dtime.second) % timestep_secs != 0:
                raise Exception("Report start datetime must be an integral multiple of the timestep seconds after some hour.")

            # if user hasn't set a start time, set it to the report time from the .inp
            first_report_timestamp = report_start_dtime + timedelta(seconds=timestep_secs)
            if not event_start:
                event_start = first_report_timestamp

            event_start = max(first_report_timestamp, event_start)

            # if user hasn't set an end time, set the end to the end time for the report
            if not event_end:
                event_end = report_end_dtime
            
            # get a list of all the conduit names from segs
            cur.execute("SELECT name FROM segs WHERE type = 'CONDUIT'")
            conduit_names = [row[0] for row in cur.fetchall()]
            
            # churn through the .inp lines until encountering the [CONDUITS] marker
            line = f.readline()
            if not line:
                raise Exception("Can't find [CONDUITS] section in .inp file")

            while not re.match(re.compile(r'\[cond', re.IGNORECASE), line.strip()):
                line = f.readline()
                if not line:
                    raise Exception("Can't find [CONDUITS] section in .inp file.")

            # loop through the lines in [CONDUITS] and for each conduit with its name in the list, 
            # update conduit params with that conduit's length and inlet node.
            line = f.readline()
            if not line:
                raise Exception("There are no conduits listed in the [CONDUIT] section of the .inp")
            line = line.strip()
            while not re.match(r'\[', line):
                if not (re.match('^$', line) or re.match(r';', line)):
                    name, inlet, _, length, _ = line.split(None, 4)
                    if name in conduit_names:
                        cur.execute('INSERT INTO conduit_params (name, length, inlet) VALUES (?, ?, ?)', 
                            (name, float(length) * meters_in_foot, inlet))
                line = f.readline().strip()

            # churn through the lines until encountering the [DWF] marker
            line = f.readline()
            if not line:
                raise Exception("Can't find [DWF] section in the .inp")
            line = line.strip()
            while not re.match(re.compile(r'\[dwf', re.IGNORECASE), line):
                line = f.readline().strip()

            # get a list of the inlet node names from conduit_params
            cur.execute('SELECT inlet FROM conduit_params')
            inlet_names = [row[0] for row in cur.fetchall()]

            cur.execute("""
                SELECT inlet 
                FROM conduit_params 
                WHERE name IN (
                    SELECT name FROM segs WHERE type = 'CONDUIT' AND wasp = ?
                )
            """, (last_wasp_seg,))

            last_wasp_inlets = [row[0] for row in cur.fetchall()]

            # cycle through the [DWF] section and for each node that is in our inlet list, update 
            # conduit_params with its DWF value
            line = f.readline().strip()
            while not re.match(r'\[', line):
                if not (re.match('^$', line) or re.match(r';', line)):
                    lsplit = line.split()
                    name, param, avg = lsplit[:3]
                    if name in inlet_names and param == 'FLOW':
                        dwf_value = round(float(avg) * pow(meters_in_foot, 3), round_num)
                        if (not dummy_end) and dwf_value != 0 and name in last_wasp_inlets:
                            msg = "Unless you are using a dummy end segment, the conduits in the final WASP segment " \
                                  + "must have 0 dry weather flow values for their inlets in the [DWF] section of the *.inp."
                            raise Exception(msg)
                        cur.execute('UPDATE conduit_params SET dwf = ? WHERE inlet = ?', 
                            (dwf_value, name))
                line = f.readline().strip()


        if use_inp_inflows:
            with open(inppath, 'r') as f:
                line = f.readline().strip()
                while line and not re.match(re.compile(r'\[infl', re.IGNORECASE), line.strip()):
                    line = f.readline()

                line = f.readline()
                names = ['node', 'parameter', 'time_series', 'param_type', 'units_factor', 'scale_factor', 'bl_value', 'bl_pat']
                cur.execute("SELECT inlet FROM conduit_params")
                inlet_names = [row[0] for row in cur.fetchall()]

                while line and not re.match(r'\[', line):
                    if not re.match(r';|^$', line.strip()):
                        line = dict(zip(names, line.split()))
                        if line['node'] in inlet_names:
                            cur.execute("""
                                UPDATE conduit_params
                                SET timeseries = ?, scaling_factor = ?
                                WHERE inlet = ?
                            """, (line['time_series'], float(line['scale_factor']), line['node']))
                    line = f.readline()

        ## create a second conduit_params table that adds the swmm number for each segment
        cur.execute("""
            CREATE TABLE conduit_params2 (
                name varchar(64), 
                swmm int, 
                length float, 
                dwf float DEFAULT 0,
                timeseries text,
                scaling_factor real
        )""")

        # populate the second conduit_params table with the current one inner joined with the segs table
        cur.execute("""
            INSERT INTO conduit_params2 (name, swmm, length, dwf, timeseries, scaling_factor)
                SELECT cp.name, segs.swmm, cp.length, cp.dwf, cp.timeseries, cp.scaling_factor
                FROM conduit_params AS cp INNER JOIN segs ON cp.name = segs.name
        """)

        cur.execute("""
            CREATE TABLE inflows (
                timeseries text,
                timestep int,
                value real
            )""")

        cur.execute("SELECT DISTINCT timeseries FROM conduit_params2")
        ts_names = [row[0] for row in cur.fetchall() if row[0] is not None]
        with open(inppath, 'r') as f:
            line = f.readline().strip()
            while not re.match(re.compile(r'\[time', re.IGNORECASE), line.strip()):
                line = f.readline()
                if not line:
                    if [x for x in ts_names if x]:
                        raise Exception("Error: [INFLOWS] references timeseries in .inp, but no [TIMESERIES] section found.")

            line = f.readline()

            current_series = ''
            series = []
            prev_ts_name = None
            dtime_counter = 0
            have_names = []
            prev_value = None
            prev_dtime = None
            this_series = []
            while line and not re.match(r'\[', line):
                if not re.match(r';|^$', line.strip()):
                    line = line.split(';')[0].split()
                    current_ts_name = line[0]
                    if current_ts_name in ts_names:
                        dtime = datetime.strptime(' '.join([line[1], line[2]]), '%m/%d/%Y %H:%M')

                        if current_ts_name != prev_ts_name:
                            if current_ts_name in have_names:
                                raise Exception('Timeseries out of order')
                            elif prev_ts_name is not None:
                                have_names.append(prev_ts_name)

                            prev_dtime = None
                            prev_value = None
                            series.extend(this_series)
                            this_series = []
                            dtime_counter = 0
                            total_seconds_since_start_of_hour = dtime.minute * 60 + dtime.second
                            if total_seconds_since_start_of_hour % timestep_secs != 0:
                                msg = "The inflows timeseries timestep must be an integral multiple of the reporting timestep."
                                raise Exception(msg)
                            
                        value = float(line[3])
                        if prev_dtime:
                            timeseries_timestep = (dtime - prev_dtime).total_seconds()
                            interp_slope = (value - prev_value) / timeseries_timestep
                            current_ts = 0
                            while current_ts <= timeseries_timestep:
                                current_dtime = prev_dtime + timedelta(seconds=current_ts)
                                current_value = (interp_slope * current_ts) + prev_value
                                #if current_dtime > event_start and current_dtime <= event_end:
                                if current_dtime >= event_start and current_dtime <= event_end:
                                    if this_series and current_ts == 0:
                                        _ = this_series.pop()
                                    this_series.append([current_ts_name, dtime_counter, current_value])
                                    #this_series.append([current_ts_name, dtime_counter, current_dtime, current_value])
                                    dtime_counter += 1 
                                current_ts += timestep_secs

                        prev_value = value
                        prev_dtime = dtime

                    prev_ts_name = current_ts_name

                line = f.readline()

        series.extend(this_series)

        cur.executemany("""
            INSERT INTO inflows (timeseries, timestep, value)
            VALUES (?, ?, ?)
        """, series)

        # create the first flows table. 
        # this table will hold the flow data for each segment, and operations on this table will eventually
        # lead to generating the flow series for the final hyd file.
        cur.execute("""
            CREATE TABLE flows (
                timestep int, 
                wasp_source int,    -- wasp seg number indicating which segment the flow is coming from
                wasp_sink int,      -- wasp seg number indicating which segment the flow is going to
                swmm_source int,
                swmm_sink int, 
                name varchar(64), 
                flow float
            )""")

        # create the first volumes table
        # this table will hold the volume, depth, and velocity data for all segments and the operations on this 
        # table and the tables derived from it will eventually lead to generating the data for the final volume, depth, and
        # velocity timeseries.
        cur.execute("""
            CREATE TABLE volumes (
                timestep int, 
                wasp int, 
                swmm int, 
                name varchar(64),
                init_vol float, 
                flow float, 
                volume float,
                depth float, 
                velocity float
            )""")

        # select one conduit to represent each swmm segment. The names of these representative conduits will 
        # be used to look up the base flows for the their respective swmm segments.
        cur.execute("SELECT MAX(name) FROM segs WHERE type = 'CONDUIT' GROUP BY swmm")
        rep_conduits = [row[0] for row in cur.fetchall()]

        cur.execute("SELECT MAX(wasp) FROM segs")
        last_wasp = cur.fetchone()[0]
        
        # if the user provided their data in a swmm binary file, read this file into the volumes and flows tables.
        # get a list of segment names from the segs table
        cur.execute("SELECT DISTINCT name FROM segs")
        names = [row[0] for row in cur.fetchall()]
        # these are the names of the link variables as they need to be named to request them from 
        # the swmmout2sqlite function
        link_vars = ['flow_rate', 'flow_depth', 'flow_velocity']
        if using_swmm_volume:
            link_vars = link_vars + ['volume']
        # subcatchment variables
        catch_vars = ['runoff_rate']

        # read the links data into the database
        links_result = swmmout2sqlite(binarypath, dbpath, 'links', names=names, 
                variables=link_vars, start=event_start, end=event_end,
                ignore_missing_names=True, using_swmm_volume=using_swmm_volume)

        # read the subcatchments into the database
        # if this raises a SWMMOUT2SQLITE_NoNames error, that means there are no subcatchments in the binary file.
        try:
            catches_result = swmmout2sqlite(binarypath, dbpath, 'subcatchments', names=names, 
                    variables=catch_vars, start=event_start, end=event_end, ignore_missing_names=True, 
                    using_swmm_volume=using_swmm_volume)
        except SWMMOUT2SQLITE_NoNames:
            catches_result = None
        
        # these are the columns of the links and subcatchments table generated in the database by swmmout2sqlite
        link_fields = ['timestep', 'name'] + link_vars
        catch_fields = ['timestep', 'name'] + catch_vars

        # get a list of unique link names
        cur.execute("SELECT DISTINCT name FROM links")
        link_names = [row[0] for row in cur.fetchall()]
        
        # get a list of unique subcatchment names, if there are any
        if catches_result:
            cur.execute("SELECT DISTINCT name FROM subcatchments")
            catch_names = [row[0] for row in cur.fetchall()]
        else:
            catch_names = []

        # check if there are any conduits in the seg table that aren't in the links read from the binary file
        cur.execute("SELECT name FROM segs WHERE type = 'CONDUIT'")
        seg_conduits = [row[0] for row in cur.fetchall()]
        if [name for name in seg_conduits if name not in link_names]:
            raise Exception("There are conduits in the map file not found in the binary file.")

        # check if there are any inflows in neither the links table nor the subcatchment table
        cur.execute("SELECT name FROM segs WHERE type = 'INFLOW'")
        seg_inflows = [row[0] for row in cur.fetchall()]
        if [name for name in seg_inflows if name not in link_names + catch_names]:
            raise Exception("There are inflows in the map file not found in the binary file.")

        for name in names:
            # get each row from the seg table where this name appears
            cur.execute("SELECT wasp, swmm, DS_wasp, DS_swmm, type, name FROM segs WHERE name = ?", (name,))
            elements = cur.fetchall()

            # if this name belongs to a link, select its data from the links table 
            # and set variables identifying the column names of the links table. If this isn't a link,
            # get the data from the subcatchments table.
            if name in link_names:
                cur.execute("SELECT * FROM links WHERE name = ? ORDER BY timestep", (name,))
                fields = link_fields
                flow_field = 'flow_rate'
            else:
                cur.execute("SELECT * FROM subcatchments WHERE name = ? ORDER BY timestep", (name,))
                fields = catch_fields
                flow_field = 'runoff_rate'
                
            # zip the data wih the field names to create a list of dicts, where each dict is a row.
            data = [dict(zip(fields, row)) for row in cur.fetchall()]

            # these lists will contain the unit converted values for their respective variables
            flows = []
            velocities = []
            depths = []
            volumes = []

            # if the user requested a moving average filter be passed over the data, calculate how many
            # timesteps correspond to the duration of the window they entered.
            if filter_mins:
                filter_steps = round((filter_mins * 60) / timestep_secs)

            # if the user wants a moving average window, these vectors will serve as the moving windows in the 
            # following loop.
            flow_window = []
            velocity_window = []
            depth_window = []
            volume_window = []

            # for each row in the data, convert it to the approrpiate units. 
            # if a filter is to be applied, put the converted values into their respected window.
            # if the windows are full, caluclate the average and append it to the main vectors, 
            # and then remove one element from the vector.
            for i, row in enumerate(data):
                flow = round(float(row[flow_field]) * pow(meters_in_foot, 3), round_num)
                depth = round(float(row.get('flow_depth', 0)) * meters_in_foot, round_num)
                velocity = round(float(row.get('flow_velocity', 0)) * meters_in_foot, round_num)
                volume = round(float(row.get('volume', 0)) * pow(meters_in_foot, 3), round_num)

                if filter_mins:
                    flow_window.append(flow)
                    velocity_window.append(velocity)
                    depth_window.append(depth)
                    volume_window.append(volume)

                    if len(flow_window) == filter_steps:
                        flows.append(sum(flow_window) / filter_steps)
                        del flow_window[0]
                        velocities.append(sum(velocity_window) / filter_steps)
                        del velocity_window[0]
                        depths.append(sum(depth_window) / filter_steps)
                        del depth_window[0]
                        volumes.append(sum(volume_window) / filter_steps)
                        del volume_window[0]
                else:
                    flows.append(flow)
                    velocities.append(velocity)
                    depths.append(depth)
                    volumes.append(volume)

            # for each element from the seg table corresponding to this element,
            # insert the appropriate series into a volumes and flows
            for element in elements:
                # extract its swmm/wasp numbering
                wasp, swmm, DS_wasp, DS_swmm, eltype, name  = element

                # the set sources to 0 if this is an INFLOW
                wasp_source, swmm_source = (0,0) if eltype =='INFLOW' else (wasp, swmm)

                #if dummy_end or DS_wasp != last_wasp or wasp_source != 0:
                # insert the flow data into the flows table
                cur.executemany("""
                    INSERT INTO flows (timestep, wasp_source, swmm_source, name, wasp_sink, swmm_sink, flow)
                    VALUES (?,?,?, ?,?,?, ?)
                """,
                [(i, wasp_source, swmm_source, name, DS_wasp, DS_swmm, flow) for i, flow in enumerate(flows)])

                # if this is a conduit or a dummy, we need to insert the relevant data into the volumes table.
                if eltype in ('CONDUIT', 'DUMMY'):
                    # if this is a representative conduit, look up the dwf value, and insert 
                    # a constant timeseries into flows
                    if eltype == 'CONDUIT' and name in rep_conduits:
                        cur.execute('SELECT dwf FROM conduit_params WHERE name = ?', (name,))
                        dwf = cur.fetchone()[0]

                        dwf_series = [dwf for _ in xrange(len(flows))]
                        if dwf == 0:
                            cur.execute("SELECT scaling_factor, timeseries FROM conduit_params2 WHERE name = ?", (name,))
                            scaling_factor, t_series = cur.fetchone()
                            if t_series:
                                cur.execute("SELECT value FROM inflows WHERE timeseries = ? ORDER BY timestep", (t_series, ))
                                dwf_series = [row[0] * scaling_factor for row in cur.fetchall()]
                                dwf_series = [round(x * pow(meters_in_foot, 3), round_num) for x in dwf_series]

                        if wasp != last_wasp:
                            cur.executemany("""
                                INSERT INTO flows (timestep, wasp_source, swmm_source, name, wasp_sink, swmm_sink, flow)
                                VALUES (?,?,?, ?,?,?, ?)
                            """, [(i, 0, 0, name + '_DWF', wasp, swmm, x) for i, x in enumerate(dwf_series)])

                    # get the length for the conduit and calculate the initial volume 
                    cur.execute("SELECT length FROM conduit_params WHERE name = ?", (name,))
                    length = cur.fetchone()[0]
                    init_vol = (flows[0] / velocities[0]) * length if velocities[0] else 0 


                    # insert the flow, depth, velocity series into the volumes table, along with the init_vol repeated 
                    # for each timestep.
                    if using_swmm_volume:
                        if eltype == 'DUMMY':
                            volumes = itertools.cycle([volumes[0]])
                        cur.executemany("""
                            INSERT INTO volumes (timestep, wasp, swmm, name, volume, depth, velocity)
                            VALUES (?,?,?, ?,?,?, ?)
                        """, [(i, wasp, swmm, name, tup[0], tup[1], tup[2])
                              for i, tup in enumerate(zip(volumes, depths, velocities))])
                    else:
                        cur.executemany("""
                            INSERT INTO volumes (timestep, wasp, swmm, name, init_vol, flow, depth, velocity)
                            VALUES (?,?,?, ?,?,?, ?,?)
                        """, [(i, wasp, swmm, name, init_vol, tup[0], tup[1], tup[2])
                              for i, tup in enumerate(zip(flows, depths, velocities))])

        timing('loading')

        # create indexes on the volumes and flows tables
        cur.execute('CREATE INDEX idx_flows ON flows(timestep, name)')
        cur.execute('CREATE INDEX idx_flows2 ON flows(swmm_sink, swmm_source, timestep)')
        cur.execute('CREATE INDEX idx_volumes ON volumes(timestep)')

        timing('make flow and volumes indexes')
    
        # check that all conduits in segs are represented in the volumes table
        cur.execute("SELECT DISTINCT name FROM segs WHERE type = 'CONDUIT'")
        segs_conduit_names = [row[0] for row in cur.fetchall()]
        cur.execute("SELECT DISTINCT name FROM volumes")
        volumes_names = [row[0] for row in cur.fetchall()]
        missing_conduits = [name for name in segs_conduit_names if name not in volumes_names]
        if missing_conduits:
            raise Exception('There are conduits in the segment map not found in the .rpt file: ' + ', '.join(missing_conduits))

        # check that all conduits and their DWFs are represented in the flows table
        cur.execute("SELECT DISTINCT name FROM segs WHERE name <> ''")
        segs_all_names = [row[0] for row in cur.fetchall()]
        cur.execute("SELECT DISTINCT name FROM segs WHERE type = 'CONDUIT'")
        dwf_conduits = rep_conduits
        if not dummy_end:
            cur.execute("SELECT MAX(wasp) FROM segs")
            max_wasp = cur.fetchone()[0]
            cur.execute("SELECT name FROM segs WHERE wasp = ?", (max_wasp,))
            final_conduits = [row[0] for row in cur.fetchall()]
            dwf_conduits = [name for name in rep_conduits if name not in final_conduits]
        dwf_names = [cond + '_DWF' for cond in dwf_conduits]
        cur.execute("SELECT DISTINCT name FROM flows")
        flows_names = [row[0] for row in cur.fetchall()]
        if set(segs_all_names + dwf_names) != set(flows_names):
            raise Exception('There are elements missing from the flows table.')

        # volumes2 is the same as volumes but with three additional columns, delta_vol, flow_in, and vol
        cur.execute("""
            CREATE TABLE volumes2 (
                timestep int, 
                wasp int, 
                swmm int, 
                name varchar(64), 
                flow float,
                volume float,
                swmm_vol float,
                depth float, 
                velocity float
            )""")

        if not using_swmm_volume:
            # the user may choose to either take arithmetic averages for combined segment depth and velocity, 
            # or use each segment's flow to weight its contribution to the combined velocity and depth
            if flow_weight_sums:
                depth_calc = """
                    CASE WHEN total_flow = 0
                        THEN (1/seg_count) * depth
                        ELSE depth * (ABS(flow) / total_flow)
                    END
                """
                velocity_calc = """
                    CASE WHEN total_flow = 0
                        THEN (1/seg_count) * velocity
                        ELSE velocity * (ABS(flow)/total_flow)
                    END
                """
            else:
                depth_calc = "depth * (1.0 / seg_count)"
                velocity_calc = "velocity * (1.0 / seg_count)"

            tflow_calc = "SUM(ABS(flow))"

            # inner join volumes with the aggregation of itself, which contains the total flow and segment count 
            # for each swmm segment. Use the aggregated values to calculate the weighted depth and velocity for 
            # each swmm segment and insert the results along with columns carried over from volumes into volumes2
            cur.execute(""" 
                INSERT INTO volumes2 (timestep, wasp, swmm, name, depth, velocity, flow)
                    SELECT v.timestep, v.wasp, v.swmm, v.name,""" + depth_calc + """,""" + velocity_calc + """, flow
                    FROM (
                        SELECT """ + tflow_calc + """ AS total_flow, timestep, swmm, COUNT(*) AS seg_count
                        FROM volumes
                        GROUP BY timestep, swmm) AS total_flows
                    INNER JOIN volumes v ON v.timestep = total_flows.timestep AND v.swmm = total_flows.swmm
                """)

        else:
            cur.execute("""
                INSERT INTO volumes2  (timestep, wasp, swmm, name, volume, depth, velocity, swmm_vol)
                SELECT v.timestep, v.wasp, v.swmm, v.name, v.volume, (v.volume / swmm_vols.swmm_vol) * v.depth, 
                    (v.volume / swmm_vols.swmm_vol) * v.velocity, swmm_vols.swmm_vol
                FROM (SELECT SUM(volume) AS swmm_vol, timestep, swmm
                    FROM volumes
                    GROUP BY swmm, timestep
                ) AS swmm_vols
                INNER JOIN volumes v ON v.timestep = swmm_vols.timestep AND v.swmm = swmm_vols.swmm
                """)
        cur.execute('CREATE INDEX idx_volumes2 ON volumes2(timestep, swmm)')
        timing('populate volumes2')

        # volumes2_a will contain the summed weighted depth, weighted velocity, and flow for each swmm segment in volumes2.
        cur.execute("""
            CREATE TABLE volumes2_a (
                timestep int,
                wasp int,
                swmm int,
                depth float,
                velocity float,
                flow float,
                volume float
            )
        """)

        if not using_swmm_volume:
            # total the depth, velocity, and flow in volumes2 by swmm segment and insert it into volumes2_a
            # at this stage the combination of all parallel and lateral swmm segments is complete.
            cur.execute("""
                INSERT INTO volumes2_a (timestep, wasp, swmm, depth, velocity, flow)
                SELECT timestep, wasp, swmm, SUM(depth), SUM(velocity), SUM(flow)
                FROM volumes2
                GROUP BY wasp, swmm, timestep
            """)
        else:
            cur.execute("""
                INSERT INTO volumes2_a (timestep, wasp, swmm, volume, depth, velocity)
                SELECT timestep, wasp, swmm, volume, depth, velocity
                FROM (SELECT timestep, wasp, swmm, SUM(volume) AS volume, SUM(depth) AS depth, SUM(velocity) AS velocity
                    FROM volumes2
                    GROUP BY wasp, swmm, timestep)
                """)

        cur.execute('CREATE INDEX idx_volumes2_a ON volumes2_a(timestep, swmm)')
        timing('populate volumes2_a')

        # this table gets populated by the calc_swmm_vols function defined below.
        # this differs from volumes2_a in that it includes delta_vol and vol columns
        cur.execute("""
            CREATE TABLE volumes2_b (
                timestep int,
                swmm int,
                wasp int,
                depth float,
                velocity float,
                flow float,
                delta_vol float,
                vol float NULL
            )
        """)
        cur.execute('CREATE INDEX idx_volumes2_b ON volumes2_b(timestep, swmm)')

        if not using_swmm_volume:
            cur.execute("SELECT timestep_secs FROM settings")
            timestep_secs = cur.fetchone()[0]
            # call the calc_swmm_vols() function above to populate volumes2_b with the calculated swmm volume time series
            calc_swmm_vols(cur, timestep_secs)

            # now that the volume has been calculated for all swmm segments, if the user wishes to correct
            # periods in segments where the volume doesn't return to basse volume or drops below base volume, they can.
            cur.execute("SELECT DISTINCT swmm FROM volumes2_b ORDER BY swmm DESC")
            segs = [row[0] for row in cur.fetchall()]
            for seg in segs:
                cur.execute("""
                    SELECT vol, flow, velocity
                    FROM volumes2_b
                    WHERE swmm = ?
                    ORDER BY timestep
                """, (seg,))
                vol, flow, vel = zip(*cur.fetchall())
            
                running_total = 0
                cur.execute("""
                    SELECT DISTINCT length
                    FROM conduit_params2 WHERE swmm = ?
                """, (seg,))
                l = cur.fetchone()[0]

                qvl_vol = [f/v * l for f, v in zip(flow, vel)]
                vol_correction = [0 for _ in range(len(vol))]

                i = 0
                while i < len(vol):
                    lowerbound = int(max(0, i - side * window_size))
                    upperbound = int(min(len(vol), i + (1 - side) * window_size))

                    if lowerbound == upperbound:
                        if lowerbound < len(vol) - 1:
                            upperbound = lowerbound + 1
                        else:
                            lowerbound = upperbound - 1

                    error = qvl_vol[i] - vol[i]
                    correction = error - running_total
                    vol_correction[i] = correction
                    running_total += correction

                    i += 1

                balanced_flow = [c/timestep_secs for c in vol_correction]

                cur.executemany("""
                    UPDATE volumes2_a
                    SET flow = flow + ?
                    WHERE timestep = ? AND swmm = ?
                """, [(f, i, seg - 1) for i, f in enumerate(balanced_flow)])

                cur.executemany("""
                    UPDATE flows
                    SET flow = flow + ?
                    WHERE timestep = ? AND swmm_source = ? AND swmm_sink = ?
                """, [(f, i, seg - 1, seg) for i, f in enumerate(balanced_flow)])

                calc_swmm_vols(cur, timestep_secs)


    conn.commit()

    finalize(cur, outpath, round_num=round_num, 
        using_swmm_volume=using_swmm_volume, flow_weight_sums=flow_weight_sums)

def finalize(cur, outpath, round_num=5, using_swmm_volume=False, flow_weight_sums=False):
    """
        this function takes the results of the process function in the db pointed to 
        by dbpath and generates the final tables corresponding to how the output will be written
        to file. It also outputs the remaining time series
    """

    #with closing(sqlite3.connect(dbpath)) as conn:
        #conn.isolation_level = None
        #cur = conn.cursor()
        #tune_db(cur)

    final_tables = ['flows2', 'flows_final', 'volumes_final', 'final', 'volumes3x', 'volumes4x']
    for t in final_tables:
        cur.execute("SELECT * FROM sqlite_master WHERE type='table' AND name = ?", (t,))
        if cur.fetchone():
            cur.execute("DROP TABLE " + t)

    #print 'finalize'

    # volumes3x will contain the weighted velocity and depth for combined wasp segments
    cur.execute("""
        CREATE TABLE volumes3x (
            timestep int, 
            wasp int, 
            vol float, 
            wasp_depth float, 
            wasp_velocity float,
            flow float,
            seg_count int
        )""")
    cur.execute('CREATE INDEX idx_volumes3x ON volumes3x(timestep, wasp)')

    # volumes4x will contain the summed depth and velocity for combined wasp segments
    cur.execute("""
        CREATE TABLE volumes4x (
            timestep int, 
            wasp int, 
            vol float, 
            depth float, 
            velocity float
        )""")
    cur.execute('CREATE INDEX idx_volumes4x ON volumes4x(timestep, wasp)')

    if using_swmm_volume:
        cur.execute("""
            INSERT INTO volumes3x (timestep, wasp, vol, wasp_depth, wasp_velocity)
            SELECT v4.timestep, v4.wasp, v4.volume, (v4.volume / wasp_vols.wasp_vol) * v4.depth, 
                (v4.volume / wasp_vols.wasp_vol) * v4.velocity
            FROM (SElECT timestep, wasp, SUM(volume) AS wasp_vol
                FROM volumes2
                GROUP BY wasp, timestep
            ) AS wasp_vols
            INNER JOIN volumes2 v4 ON v4.timestep = wasp_vols.timestep AND v4.wasp = wasp_vols.wasp
            """)
    else:
        # volumes3 is similar to volumes2, but now depth are replaced with new columns swmm_depth and swmm_velocity. 
        # These columns will hold the volume weighted depths and velocity by swmm segment. additionally, this 
        # table also has a swmm_vol col that will hold the total volume for each swmm segment
        # populate volumes3. swmm_vol is the sum of the volumes for each parallel conduit in the swmm segment. 
        # swmm_depth and swmm_velocity are the volume weighted depth and velocity for each parallel in the swmm segment

        # these are alternative definitions  for the depth calculation
        #depth_calc1 = "(v2.flow / wasp_flows.wasp_flow) * v2.depth"
        #depth_calc2 = "(ABS(v2.flow)/ wasp_flows.wasp_flow) * v2.depth"

        # these are alternative defitions for the velocity calculation
        #velocity_calc1 = "(v2.flow / wasp_flows.wasp_flow) * v2.velocity"
        #velocity_calc2 = "(ABS(v2.flow) / wasp_flows.wasp_flow) * v2.velocity"

        # formulas for weighting component segment velocities and depths for combined swmm segments
        if flow_weight_sums:
            depth_calc = """
                CASE WHEN wasp_flows.wasp_flow = 0
                    THEN (1 / wasp_flows.seg_count) * v2.depth
                    ELSE (ABS(v2.flow) / wasp_flows.wasp_flow) * v2.depth
                END
            """
            velocity_calc = """
                CASE WHEN wasp_flows.wasp_flow = 0
                    THEN (1 / wasp_flows.seg_count) * v2.velocity
                    ELSE (ABS(v2.flow) / wasp_flows.wasp_flow) * v2.depth
                END
            """
        else:
            velocity_calc = "velocity * (1.0 / wasp_flows.seg_count)"
            depth_calc = "v2.depth * (1.0 / wasp_flows.seg_count)"

        # formula for calculating total flow, note that this is only used when when flow_weight_sums is True
        tflow_calc = "SUM(ABS(flow))"

        cur.execute("""
            INSERT INTO volumes3x  (timestep, wasp, vol, wasp_depth, wasp_velocity, flow, seg_count)
            SELECT v2.timestep, v2.wasp, v2.vol, """ + depth_calc + """, 
                """ + velocity_calc + """, v2.flow, wasp_flows.seg_count
            FROM (SELECT """ + tflow_calc + """ AS wasp_flow, timestep, wasp, COUNT(*) AS seg_count
                FROM volumes2_b
                GROUP BY wasp, timestep
            ) AS wasp_flows
            INNER JOIN volumes2_b v2 ON v2.timestep = wasp_flows.timestep AND v2.wasp = wasp_flows.wasp
        """)

    # sum the quantities weighted in the previous and put the results into volumes4x
    cur.execute("""
        INSERT INTO volumes4x (timestep, wasp, vol, depth, velocity)
        SELECT timestep, wasp, SUM(vol), SUM(wasp_depth), SUM(wasp_velocity)
        FROM volumes3x
        GROUP BY wasp,timestep
    """)

    # aggregate segment inflows into single inflows and aggregate the flows for 
    # the finl swmm segment for each wasp segment---these are the final interface flows.
    cur.execute("CREATE TABLE flows2 (timestep int, wasp_source int, wasp_sink int, flow float)")
    cur.execute("""
        INSERT INTO flows2 (timestep, wasp_source, wasp_sink, flow)
            SELECT * 
            FROM (SELECT timestep, wasp_source, wasp_sink, SUM(flow) AS flow
                  FROM flows
                  WHERE wasp_source = 0
                  GROUP BY wasp_sink, timestep
                UNION SELECT timestep, wasp_source, wasp_sink, SUM(flow)
                    FROM flows
                    WHERE wasp_source <> 0 AND swmm_source IN (SELECT MAX(swmm) FROM segs GROUP BY wasp)
                    GROUP BY wasp_source, wasp_sink, timestep
            )
     """)

    # column names for the final table. The datatype indicates what kind of data the row is, 
    # volume/depth/velocity data (1) or flow data (2) ordering1 and 2 contain the values that will 
    # determine how the rows are finally ordered to produce the order found in .hyd files.
    final_colnames = ['datatype', 'timestep', 'wasp_source', 'wasp_sink', 'flow', 'vol', 'depth', 
        'velocity', 'ordering1', 'ordering2']
    final_datatypes = ['int', 'int', 'int', 'int', 'float', 'float', 'float', 'float', 'int', 'int', 'int']

    # put them into a comma separated list that can be used in a query string
    final_cols = ', '.join([colname + ' ' + datatype for colname, datatype in zip(final_colnames, final_datatypes)])
    final_colnames_str = ', '.join(final_colnames)
    
    # create the final tables
    cur.execute("CREATE TABLE flows_final (" + final_cols + ")")
    cur.execute("CREATE TABLE volumes_final (" + final_cols + ")")
    cur.execute("CREATE TABLE final (" + final_cols + ")")

    # populate the flows_final table. note that the columns vol, depth, and velocity 
    # are NULL. These are included so that this table can be unioned with the volumes_final table shortly. 
    # flow data is ordered first by its wasp_source number, unless it is zero, in which case its wasp_sink number 
    # is used, and second by its wasp_source, regardless of whether its zero or not. This forces the order that 
    # each segment's outflow interface will follow the external flow interface for that segment.
    cur.execute("""
        INSERT INTO flows_final (""" + final_colnames_str + """)
            SELECT 2, timestep, wasp_source, wasp_sink, flow, NULL AS vol, NULL AS depth, NULL AS velocity, 
            CASE WHEN wasp_source = 0 THEN wasp_sink ELSE wasp_source END AS ordering1, wasp_source AS ordering2
            FROM flows2
        """)

    # populate the volumes_final table. note that the wasp_source and flow columns are null, 
    # as these columns only apply to the flow data.
    cur.execute("""
        INSERT INTO volumes_final (""" + final_colnames_str + """)
            SELECT 1, timestep, NULL AS wasp_source, wasp AS wasp_sink, NULL AS flow, vol, depth, velocity, 
                   wasp AS ordering1, wasp AS ordering2
            FROM volumes4x
        """)

    # combine the two final tables into one
    cur.execute("""
        INSERT INTO final (""" + final_colnames_str + """)
            SELECT  *
            FROM (
                SELECT * FROM flows_final
                UNION
                SELECT * FROM volumes_final
            )
        """)
    final_idx_name = 'idx_final'
    cur.execute("SELECT * FROM sqlite_master WHERE name = ? AND type = 'index'", (final_idx_name,))
    if not cur.fetchone():
        cur.execute("CREATE INDEX " + final_idx_name + " ON final(timestep, datatype, ordering1, ordering2)")

    #print 'exporting'
    export_data(outpath, cur, final_colnames, round_num=round_num)


def export_data(outpath, cur, final_colnames, round_num=5):

    #with closing(sqlite3.connect(dbpath)) as cnxn:
    #    cnxn.isolation_level = None
    #    cur = cnxn.cursor()
    #    tune_db(cur)

    cur.execute("SELECT timestep_secs FROM settings")
    timestep_secs = cur.fetchone()[0]

    # open the hyd file outpath
    with open(outpath, 'w') as f:
        # the following lines get the numbers for the top of the hyd file.
        # count the number of volume entries for the first timestep to get the number of segs
        cur.execute('SELECT COUNT(*) FROM final WHERE timestep = 0 AND datatype = 1')
        num_segs = cur.fetchone()[0]
        # count the number of flow entires for the first timestep to get the total number of interfaces
        cur.execute('SELECT COUNT(*) FROM final WHERE timestep = 0 AND datatype = 2')
        num_interfaces = cur.fetchone()[0]

        # retrieve the maximum timestep and calculate the duration
        cur.execute('SELECT MAX(timestep) FROM final')
        duration = timestep_secs * cur.fetchone()[0]

        tab = ' '*4
        # create the top line for the hyd file
        f.write(tab.join([str(num_segs), str(num_interfaces), str(int(timestep_secs)), '0', str(int(duration)), '1']) + '\n')

        # select the flow rows for the first timestep and extract the interface numbers then write them to file
        cur.execute("""
            SELECT wasp_source, wasp_sink 
            FROM final 
            WHERE timestep = 0 AND datatype = 2 
            ORDER BY ordering1, ordering2""")
        for row in cur.fetchall():
            f.write(str(row[0]) + tab + str(row[1]) + '\n')

        # select all the data from the final type in the appropriate order ...
        cur.execute('SELECT * FROM final ORDER BY timestep, datatype, ordering1, ordering2')

        # then write it to file ...
        for i, row in enumerate(cur.fetchall()):
            row = dict(zip(final_colnames, row))
            if row['datatype'] == 1:
                row['vol'] = round(row['vol'], round_num)
                row['velocity'] = round(row['velocity'], round_num)
                row['depth'] = round(row['depth'], round_num)
                for key in row.keys():
                    row[key] = str(row[key])
                f.write(tab.join(['', row['vol'], '0', row['depth'], row['velocity']]) + '\n')
            else:
                row['flow'] = round(row['flow'], round_num)
                for key in row.keys():
                    row[key] = str(row[key])
                f.write(tab.join(['', row['flow']]) + '\n')


def load_ini(path):
    """
        this function loads the input parameters for the process function. It also generates the hydmaker.ini 
        file with the settings chosen
    """
    ini = {}

    def process_ini(path):
        # takes the path to an ini file, parses the settings, and returns them in a dictionary
        parser = ConfigParser.SafeConfigParser()
        parser.read(path)
        for key, value in parser.items('main'):
            ini[key] = eval(value, {}, {})
            if key in ('event_start', 'event_end') and ini[key]:
                ini[key] = datetime.strptime(ini[key], '%Y-%m-%d %H:%M:%S')
        return ini

    def write_cfg(ini):
        # create a parser object for the settings and write them to file.
        parser = ConfigParser.SafeConfigParser()
        parser.add_section('main')
        for key, value in ini.items():
            if key in ('event_start', 'event_end') and value:
                value = value.strftime('%Y-%m-%d %H:%M:%S')
            parser.set('main', str(key), "r'" + value + "'" if isinstance(value, str) else str(value))

        with open(path, 'w') as f:
            parser.write(f)

        with open(os.path.join(os.path.dirname(ini['outpath']), 'ini_file_used.ini'), 'w') as f:
            parser.write(f)

    # if any .ini file is found, ask user if they want to load it from file
    if os.path.exists(path):
        print("A settings file was found at " + path + ". Do you want to use the options in this file?")
        if raw_input("y/n >>> ")[0] in ('Y', 'y'):
            ini = process_ini(path)
            write_cfg(ini)
            return ini

    # error message to print to the user when they accidentally generate a keyboard interrupt exception
    key_err_msg = "Keyboard error. Try again, fat fingers\n"

    ### ask the user if they want to load their settings from an .ini file of their choosing
    while True:
        try:
            response = raw_input("Do you want to load the settings from a hydmaker.ini file? (y/n) >> ")[0]
        except KeyboardInterrupt, IndexError:
            print("Invalid input.")

        if response.lower() == 'y':
            while True:
                try:
                    ini_path = raw_input("Path to hydmaker.ini file >> ")
                except KeyboardInterrupt:
                    print('Invalid input.')

                if os.path.exists(ini_path) and not os.path.isdir(ini_path):
                    return process_ini(ini_path)
                
                print("Invalid path.")
        else:
            break

    msg = "Please note that this program assumes data coming from an .rpt or .out file are in US units: CFS, FEET, and " + \
          "feet per second."
    print(msg)

    ini['using_swmm_volume'] = False

    while True:
        try:
            using_swmm_volume = raw_input('Are you using the volume data from modified SWMM output? (y/n) >> ')
        except KeyboardInterrupt:
            print(key_err_msg)
            continue

        if using_swmm_volume.lower() == 'y':
            ini['using_swmm_volume'] = True
        elif using_swmm_volume.lower() == 'n':
            ini['using_swmm_volume'] = False

        break


    #if use_bin_output:
    # if the user has a binary out file, get the path ...
    #ini['rptpath'] = None
    while True:
        try:
            binary_path = raw_input("Path to SWMM binary output file (*.out) >> ")
        except KeyboardInterrupt:
            print(key_err_msg)
            continue

        if os.path.exists(binary_path) and not os.path.isdir(binary_path):
            if os.path.splitext(binary_path)[1].lower() != '.out':
                print("That doesn't look like binary output file.")
            else:
                ini['binarypath'] = binary_path
                break
        else:
            print("Invalid path.")

    # get the path to the .inp file
    while True:
        try:
            inp_path = raw_input("Path to SWMM input file (*.inp) >> ")
        except KeyboardInterrupt:
            print(key_err_msg)

        if os.path.exists(inp_path) and not os.path.isdir(inp_path):
            if os.path.splitext(inp_path)[1].lower() != '.inp':
                print("That doesn't look like an .inp file.")
            else:
                ini['inppath'] = inp_path
                break
        else:
            print("Invalid path.")

    # the segmentation map paths
    segmap_paths = []
    while True:
        if segmap_paths:
            print('Segmentation file paths:')
            for i, segmap_path in enumerate(segmap_paths):
                print('  ' + str(i + 1) + '.  ' + os.path.basename(segmap_path))

        try:
            segmap_path = raw_input("Add a path to a segmentation file (press enter to continue) >> ")
        except KeyboardInterrupt:
            print(key_err_msg)

        if os.path.exists(segmap_path) and not os.path.isdir(segmap_path):
            if segmap_path in segmap_paths:
                print("You already entered that one.")
            else:
                segmap_paths.append(segmap_path)
        else:
            if not segmap_path and len(segmap_paths) > 0:
                ini['segmap_paths'] = segmap_paths
                break
            print("No such file.")

    # get the output directory path
    while True:
        try:
            output_dir = raw_input('Path to output directory >> ')
        except KeyboardInterrupt:
            print(key_err_msg)

        if os.path.isdir(output_dir):
            break

        print("Invalid directory.")

    # get the name for the resulting hyd text file
    while True:
        try:
            out_name = raw_input('File name for .hyd text file output >> ')
        except KeyboardInterrupt:
            print(key_err_msg)

        if out_name:
            out_path = os.path.join(output_dir, out_name)
        
            if os.path.exists(out_path):
                try:
                    response = raw_input('A file with that name already exists. Do you want to overwrite it? (y/n) >> ')[0]
                except KeyboardInterrupt, IndexError:
                    print(key_err_msg)

                if response.lower() == 'y':
                    ini['outpath'] = out_path
                    break
            else:
                ini['outpath'] = out_path
                break
    
   
    # determine if the user wants to include a final dummy segment. This option should be used if the 
    # final segment in the user's segment map has inflows. The final segment of a wasp model cannot have inflows.
    while True:
        try:
            msg = "WASP requires that the final segment have no external interface flows. " \
                  + "If the inlet nodes of any of the CONDUITs in your final WASP segment have " \
                  + "non-zero flow in the [DWF] section of the *.inp file or if there are any INFLOWs " \
                  + "in the final WASP segment, you must choose to create a dummy segment at the end " \
                  + "to meet WASP's requirements."
            print(msg)
            dummy_end = raw_input('Include final dummy segment? (y/n) >> ')
            if re.match('y|n', dummy_end, re.IGNORECASE):
                ini['dummy_end'] = bool(re.match('y', dummy_end, re.IGNORECASE))
                break
        except KeyboardInterrupt:
            print(key_err_msg)
            pass

    # prompt the user for a desired start and end time, if they don't want to use the start and end time of 
    # the rpt or binary file
    ini['event_start'] = None
    ini['event_end'] = None
    while True:
        try:
            set_event_limits = raw_input("Set event start and end dates? >> ")
            if re.match('n', set_event_limits, re.IGNORECASE):
                break
            elif re.match('y', set_event_limits, re.IGNORECASE):
                while True:
                    event_start = raw_input("event start (yyyy-mm-dd HH:MM:SS >> ")
                    try:
                        event_start = datetime.strptime(event_start, "%Y-%m-%d %H:%M:%S")
                    except:
                        continue
                    else:
                        ini['event_start'] = event_start
                        break

                while True:
                    event_end = raw_input("event end (yyyy-mm-dd HH:MM:SS) >> ")

                    try:
                        event_end = datetime.strptime(event_end, "%Y-%m-%d %H:%M:%S")
                    except:
                        continue
                    else:
                        ini['event_end'] = event_end
                        break
                break

        except KeyboardInterrupt:
            print(key_err_msg)

    write_cfg(ini)

    return ini

def extract_external_flows_percentage(cursor, outpath, WASP):

    cursor.execute("SELECT DISTINCT wasp_sink FROM flows WHERE wasp_sink > 0")
    wasp_segs = [row[0] for row in cursor.fetchall()]

    cursor.execute("SELECT DISTINCT swmm_sink FROM flows WHERE swmm_sink > 0")
    swmm_segs = [row[0] for row in cursor.fetchall()]

    segnums = wasp_segs if WASP else swmm_segs
     
    for segnum in segnums:
        source = 'wasp' if WASP else 'swmm'
        cursor.execute('SELECT DISTINCT name FROM flows WHERE ' + source + '_sink = ? AND ' + source + '_source = 0', (segnum, ))
        inflow_names = [row[0] for row in cursor.fetchall()]

        if inflow_names:
            cursor.execute("""
                SELECT f.timestep, f.name, f.flow, tf.total_flow, 
                    CASE WHEN tf.total_flow > 0 THEN f.flow / tf.total_flow ELSE 0 END
                FROM flows f INNER JOIN (
                    SELECT timestep, SUM(flow) AS total_flow
                    FROM flows 
                    WHERE """ + source + """_sink = ? AND """ + source+ """_source = 0 AND name NOT LIKE '%balbucket'
                    GROUP BY timestep) as tf
                ON f.timestep = tf.timestep
                WHERE f.""" + source + """_sink = ? AND f.""" + source + """_source = 0 AND f.name NOT LIKE '%balbucket'
                ORDER BY f.name, f.timestep
            """, (segnum, segnum))

            results = cursor.fetchall()
            
            if results:
           
                pct_columns = []
                flow_columns = []
                total_col = []

                names = ['timestep']

                timestep_col = 0
                name_col = 1
                flow_col = 2
                total_colnum = 3
                pct_col = 4
                # group the results together by inflow name, extract its columns and append them to the columns lists
                for key, group in groupby(results, lambda row: row[name_col]):
                    names.append(key)
                    group = list(group)
                    group = sorted(group, key = lambda row: row[timestep_col])
                    pct_flow = [row[pct_col] for row in group]
                    pct_columns.append(pct_flow)
                    flow = [row[flow_col] for row in group]
                    total_col = [row[total_colnum] for row in group]
                    flow_columns.append(flow)

                # add timestep column to pct_columns and flow_columns
                pct_columns.insert(0, range(0, len(pct_columns[0])))

                flow_columns.insert(0, range(0, len(flow_columns[0])))
                pct_rows = zip(*pct_columns) # transform list of columns into a list of rows
                flow_rows = zip(*flow_columns)
                named_pct_rows = [dict(zip(names, row)) for row in pct_rows]
                named_flow_rows = [dict(zip(names, row)) for row in flow_rows]

                # write the external flow percentages to file
                with open(os.path.join(outpath, str(segnum) + '.csv'), 'w') as f:
                    writer = csv.DictWriter(f, fieldnames=names, lineterminator='\n')
                    writer.writeheader()
                    for row in named_pct_rows:
                        writer.writerow(row)

def extract_external_flows(cursor, outpath, percentage, WASP):
    if percentage:
        return extract_external_flows_percentage(cursor=cursor, outpath=outpath, WASP=WASP)

    cursor.execute("SELECT DISTINCT wasp_sink FROM flows WHERE wasp_sink > 0")
    wasp_segs = [row[0] for row in cursor.fetchall()]

    cursor.execute("SELECT DISTINCT swmm_sink FROM flows WHERE swmm_sink > 0")
    swmm_segs = [row[0] for row in cursor.fetchall()]

    segnums = wasp_segs if WASP else swmm_segs
     
    for segnum in segnums:
        source = 'wasp' if WASP else 'swmm'
        cursor.execute('SELECT DISTINCT name FROM flows WHERE ' + source + '_sink = ? AND ' + source + '_source = 0', (segnum, ))
        inflow_names = [row[0] for row in cursor.fetchall()]

        if inflow_names:
            cursor.execute("""
                SELECT f.timestep, f.name, f.flow, tf.total_flow, 
                    CASE WHEN tf.total_flow > 0 THEN f.flow / tf.total_flow ELSE 0 END
                FROM flows f INNER JOIN (
                    SELECT timestep, SUM(flow) AS total_flow
                    FROM flows 
                    WHERE """ + source + """_sink = ? AND """ + source+ """_source = 0 
                    GROUP BY timestep) as tf
                ON f.timestep = tf.timestep
                WHERE f.""" + source + """_sink = ? AND f.""" + source + """_source = 0 
                ORDER BY f.name, f.timestep
            """, (segnum, segnum))

            results = cursor.fetchall()
            
            if results:
                pct_columns = []
                flow_columns = []
                total_col = []

                names = ['timestep']

                timestep_col = 0
                name_col = 1
                flow_col = 2
                total_colnum = 3
                pct_col = 4
                # group the results together by inflow name, extract its columns and append them to the columns lists
                for key, group in groupby(results, lambda row: row[name_col]):
                    names.append(key)
                    group = list(group)
                    group = sorted(group, key = lambda row: row[timestep_col])
                    pct_flow = [row[pct_col] for row in group]
                    pct_columns.append(pct_flow)
                    flow = [row[flow_col] for row in group]
                    total_col = [row[total_colnum] for row in group]
                    flow_columns.append(flow)

                # add timestep column to pct_columns and flow_columns
                pct_columns.insert(0, range(0, len(pct_columns[0])))
                flow_columns.insert(0, range(0, len(flow_columns[0])))
                pct_rows = zip(*pct_columns) # transform list of columns into a list of rows
                flow_rows = zip(*flow_columns)
                named_pct_rows = [dict(zip(names, row)) for row in pct_rows]
                named_flow_rows = [dict(zip(names, row)) for row in flow_rows]

                if percentage:
                    # write the external flow percentages to file
                    with open(os.path.join(outpath, str(segnum) + '.csv'), 'w') as f:
                        writer = csv.DictWriter(f, fieldnames=names, lineterminator='\n')
                        writer.writeheader()
                        for row in named_pct_rows:
                            writer.writerow(row)
                else:
                    # write the external flows to file
                    with open(os.path.join(outpath, str(segnum) + '_flow.csv'), 'w') as f:
                        writer = csv.DictWriter(f, fieldnames=names, lineterminator='\n')
                        writer.writeheader()
                        for row in named_flow_rows:
                            writer.writerow(row)


def extract_volumes(cursor, outpath, WASP):

    cursor.execute("SELECT using_swmm_volume FROM settings")
    using_swmm_volume = bool(cursor.fetchone()[0])
    if WASP:
        fieldnames = ['timestep', 'vol', 'depth', 'velocity']
        segs = cursor.execute('SELECT DISTINCT wasp_sink FROM final WHERE wasp_sink > 0')
        query = """
            SELECT """ + ','.join(fieldnames) + """
            FROM final 
            WHERE wasp_sink = ? AND datatype = 1
            ORDER BY timestep
        """
    else:
        fieldnames = ['timestep'] + ['vol' if not using_swmm_volume else 'volume'] + ['depth', 'velocity']
        tname = 'volumes2_b' if not using_swmm_volume else 'volumes2_a'
        segs = cursor.execute("SELECT DISTINCT swmm FROM " + tname)
        query = """
            SELECT """ + ','.join(fieldnames) + """
            FROM """ + tname + """
            WHERE swmm = ? 
            ORDER BY timestep
        """

    segs = [row[0] for row in segs]

    for seg in segs:
        with open(os.path.join(outpath, str(seg) + '.csv'), 'w') as f:
            if using_swmm_volume:
                try:
                    vol_ind = fieldnames.index('volume')
                except:
                    pass
                else:
                    fieldnames[vol_ind] = 'vol'

            writer = csv.DictWriter(f, fieldnames = fieldnames, lineterminator='\n')
            writer.writeheader()
            cursor.execute(query, (seg, ))
            for row in cursor.fetchall():
                row = dict(zip(fieldnames, row))
                writer.writerow(row)

def export_internal_flows(cursor, outdir, WASP):
    """
        extracts the internal flows for either the wasp segs or swmm segs (by_wasp_seg=False), optionally adding 
        the string specified in append_to_folder_name to the folder name, and writes the data to file.
    """

    fieldnames = ['timestep', 'flow', 'depth', 'velocity']
    if WASP:
        cursor.execute('SELECT DISTINCT wasp_source FROM flows_final WHERE wasp_source <> 0')
        wasp_sources = [row[0] for row in cursor.fetchall()]
        for source in wasp_sources:
            cursor.execute("""
                SELECT timestep, flow, wasp_sink
                FROM flows_final  
                WHERE wasp_source = ?
                ORDER BY wasp_source, timestep
                """, (source, ))

            internal_flows = cursor.fetchall()

            with open(os.path.join(outdir, str(source) + '.csv'), 'w') as f:
                fieldnames = ['timestep', 'flow', 'sink']
                writer = csv.DictWriter(f, fieldnames = fieldnames, lineterminator='\n')
                writer.writeheader()
                for row in internal_flows:
                    row = dict(zip(fieldnames, row))
                    writer.writerow(row)
    else:
        # loop through swmm segments and output internal flows
        cursor.execute('SELECT DISTINCT swmm_source FROM flows WHERE swmm_source <> 0')
        swmm_sources = [row[0] for row in cursor.fetchall()]
        for source in swmm_sources:
            cursor.execute("""
                SELECT timestep, flow, swmm_sink
                FROM flows  
                WHERE swmm_source = ?
                ORDER BY swmm_source, timestep
                """, (source, ))

            internal_flows = cursor.fetchall()

            with open(os.path.join(outpath, str(source) + '.csv'), 'w') as f:
                fieldnames = ['timestep', 'flow', 'sink']
                writer = csv.DictWriter(f, fieldnames = fieldnames, lineterminator='\n')
                writer.writeheader()
                for row in internal_flows:
                    row = dict(zip(fieldnames, row))
                    writer.writerow(row)

def extract_external_flow_percentages_by_WASP_segment(cursor, outpath):
    extract_external_flows(cursor, outpath, percentage=True, WASP=True)

def extract_external_flows_by_WASP_segment(cursor, outpath):
    extract_external_flows(cursor, outpath, percentage=False, WASP=True)

def extract_external_flow_percentages_by_SWMM_segment(cursor, outpath):
    extract_external_flows(cursor, outpath, percentage=True, WASP=False)

def extract_external_flows_by_SWMM_segment(cursor, outpath):
    extract_external_flows(cursor, outpath, percentage=False, WASP=False)

def extract_volumes_by_WASP_segment(cursor, outpath):
    extract_volumes(cursor, outpath, WASP=True)

def extract_volumes_by_SWMM_segment(cursor, outpath):
    extract_volumes(cursor, outpath, WASP=False)

def extract_internal_flows_by_WASP_segment(cursor, outpath):
    export_internal_flows(cursor, outpath, WASP=True)

def extract_internal_flows_by_SWMM_segment(cursor, outpath):
    export_internal_flows(cursor, outpath, WASP=True)

def extract_consolidated_flows_by_WASP_segment(cursor, outpath):
    cursor.execute('SELECT DISTINCT wasp_sink FROM flows_final WHERE wasp_sink <> 0')
    wasp_sinks = [row[0] for row in cursor.fetchall()]
    for sink in wasp_sinks:
        cursor.execute('SELECT timestep, flow FROM flows_final WHERE wasp_sink = ? AND wasp_source = 0 ORDER BY timestep ', 
            (sink,))
        external_flows = cursor.fetchall()

        with open(os.path.join(outpath, str(sink) + '.csv'), 'w') as f:
            fieldnames = ['timestep', 'consolidated_external_flows']
            writer = csv.DictWriter(f, fieldnames = fieldnames, lineterminator='\n')
            writer.writeheader()
            for row in external_flows:
                row = dict(zip(fieldnames, row))
                writer.writerow(row)


def extract_data():

    while True:
        dbpath = raw_input("Enter the path the Hydmaker database >> ")
        if os.path.exists(dbpath) and not os.path.isdir(dbpath):
            cnxn = sqlite3.connect(dbpath)
            cursor = cnxn.cursor()
            break
        else:
            print('Invalid path.')

    labels = ['external flow percentages by WASP segment',
              'external flows by WASP segment',
              'external flow percentages by SWMM segment',
              'external flows by SWMM segment',
              'volumes by WASP segment',
              'volumes by SWMM segment',
              'internal flows by WASP segment',
              'interval flows by SWMM segment',
              'consolidated external flows by WASP segment']

    extraction_functions = [extract_external_flow_percentages_by_WASP_segment,
        extract_external_flows_by_WASP_segment,
        extract_external_flow_percentages_by_SWMM_segment,
        extract_external_flows_by_SWMM_segment,
        extract_volumes_by_WASP_segment,
        extract_volumes_by_SWMM_segment,
        extract_internal_flows_by_WASP_segment,
        extract_internal_flows_by_SWMM_segment,
        extract_consolidated_flows_by_WASP_segment]

    print("Select the timeseries you want to extract.")
    chosen_labels = [False for _ in range(len(labels))]
    while True:
        print('\n'.join([(str(i+1) if not chosen else 'x') + ' - ' + label for i, chosen, label in zip(range(len(labels)), chosen_labels, labels)]))
        choice = raw_input('(leave blank and press Enter to continue) >> ')
        if not choice:
            if not any(chosen_labels):
                chosen_labels = [True for _ in chosen_labels]
                print("Extracting all timeseries.")
            break

        try:
            choice = int(choice)
            assert choice >= 1 and choice <= len(labels)
        except:
            print('Invalid input.')
        else:
            chosen_labels[choice - 1] = True

        if all(chosen_labels):
            break

    print("WARNING: any existing series will be overwritten")
    while True:
        series_path = raw_input("Enter the output directory >> ")
        if not os.path.isdir(series_path):
            print('Invalid path.')
        else:
            break

    for i, chosen in enumerate(chosen_labels):
        if chosen:
            outdir = os.path.join(series_path, re.sub(' ', '_', labels[i]))
            if not os.path.isdir(outdir):
                os.mkdir(outdir)
            else:
                for fname in os.listdir(outdir):
                    os.unlink(os.path.join(outdir, fname))
            
            start = datetime.now()
            print(labels[i])
            extraction_functions[i](cursor, outdir)
            print((datetime.now() - start).total_seconds() / 60)


class MasterFrame(wx.Frame):

    title = 'Hydmaker volume correction'

    def __init__(self, dbpath, window_size, basevol_scale, vol_rating_weight, side):
        wx.Frame.__init__(self, None, -1, self.title)
        self.dbpath = dbpath
        self.cnxn = sqlite3.connect(dbpath)
        self.cnxn.row_factory = sqlite3.Row
        self.cnxn.isolation_level = None
        self.cursor = self.cnxn.cursor()
        self.cursor.execute("PRAGMA synchronous=OFF") # don't wait for disk writes to complete before continuing
        self.cursor.execute("PRAGMA count_changes=OFF") # don't count num. rows affected by DELETE, INSERT, or UPDATE
        self.cursor.execute("PRAGMA journal_mode=OFF") # turn off journaling
        self.Bind(wx.EVT_CLOSE, self.OnClose)

        self.cursor.execute("SELECT timestep_secs FROM settings")
        self.timestep_secs = self.cursor.fetchone()[0]
        
        self.default_settings = {
                'window_size' : window_size,
                'basevol_scale' : basevol_scale,
                'vol_rating_weight' : vol_rating_weight,
                'side' : side
        }

        self.cursor.execute("SELECT max(timestep) FROM volumes2_b")
        ntimesteps = self.cursor.fetchone()[0]

        self.cursor.execute("PRAGMA table_info(correction_settings)")
        if not self.cursor.fetchall():
            self.cursor.execute("""
                CREATE TABLE correction_settings (
                    swmm integer,
                    event_id int,
                    start integer,
                    window_size integer,
                    basevol_scale real,
                    vol_rating_weight real,
                    side real
                )
            """)

        self.cursor.execute("DELETE FROM correction_settings")

        self.panel = wx.Panel(self)
        self.dpi = 100
        self.fig = Figure(figsize=(15, 5), dpi=self.dpi)
        self.canvas = FigCanvas(self.panel, -1, self.fig)

        self.canvas.mpl_connect('pick_event', self.on_pick)

        self.menubar = wx.MenuBar()
        
        menu_file = wx.Menu()
        m_load = menu_file.Append(-1, "&Load correction settings")
        self.Bind(wx.EVT_MENU, self.load_correction_settings, m_load)
        m_export = menu_file.Append(-1, "&Export correction settings")
        self.Bind(wx.EVT_MENU, self.export_correction_settings, m_export)
        m_export_hyd = menu_file.Append(-1, "&Update database and export .hyd file")
        self.Bind(wx.EVT_MENU, self.export_hyd, m_export_hyd)
        menu_file.AppendSeparator()
        m_exit = menu_file.Append(-1, "E&xit")
        self.Bind(wx.EVT_MENU, self.OnClose, m_exit)
        
        self.menubar.Append(menu_file, "&File")
        self.SetMenuBar(self.menubar)

        self.statusbar = self.CreateStatusBar()

        self.fig.subplots_adjust(top=.9, bottom=.1, right=.98, left=.05)

        gs = matplotlib.gridspec.GridSpec(2, 1, height_ratios=[3,1])
        self.axes = self.fig.add_subplot(gs[0], picker=5)
        self.bv_axes = self.fig.add_subplot(gs[1], sharex=self.axes, picker=5)

        self.cursor.execute("SELECT DISTINCT swmm FROM volumes2_b WHERE swmm IS NOT NULL")
        self.swmm_segs = [row[0] for row in self.cursor.fetchall()]

        if not self.swmm_segs:
            raise Exception("That database is empty.")

        self.current_seg = None
        self.current_event = None

        self.current_interp_percentage = .01

        seg_choice_lbl = wx.StaticText(self.panel, -1, "SWMM Segment")
        self.seg_choice = wx.Choice(self.panel, -1, (100, 50), choices=[str(x) for x in self.swmm_segs])
        self.Bind(wx.EVT_CHOICE, self.on_seg_choice, self.seg_choice)

        self.prev_seg_btn = wx.Button(self.panel, -1, 'Previous Segment')
        self.prev_seg_btn.Disable()
        self.Bind(wx.EVT_BUTTON, self.on_prev, self.prev_seg_btn)

        self.next_seg_btn = wx.Button(self.panel, -1, 'Next Segment')
        self.Bind(wx.EVT_BUTTON, self.on_next, self.next_seg_btn)

        event_choice_lbl = wx.StaticText(self.panel, -1, 'Event')
        self.event_choice = wx.Choice(self.panel, -1, (100, 50), choices=[])
        self.Bind(wx.EVT_CHOICE, self.on_event_choice, self.event_choice)

        self.prev_event_btn = wx.Button(self.panel, -1, 'Previous Event')
        self.prev_event_btn.Disable()
        self.Bind(wx.EVT_BUTTON, self.on_prev_event, self.prev_event_btn)

        self.next_event_btn = wx.Button(self.panel, -1, 'Next Event')
        self.next_event_btn.Disable()
        self.Bind(wx.EVT_BUTTON, self.on_next_event, self.next_event_btn)

        self.delete_event_btn = wx.Button(self.panel, -1, 'Delete Event')
        self.Bind(wx.EVT_BUTTON, self.on_delete_event, self.delete_event_btn)
        self.delete_event_btn.Disable()

        class WinsizeValidator(wx.PyValidator):
            def __init__(self, max_size):
                wx.PyValidator.__init__(self)
                self.max_size = max_size

            def Clone(self):
                return WinsizeValidator(self.max_size)

            def Validate(self, win):
                textCtrl = self.GetWindow()
                msg = "The neighborhood width needs to be greater than 2 and less than " + str(self.max_size)
                try:
                    value = int(textCtrl.GetValue())
                except:
                    wx.MessageBox(msg)
                    return False

                if value < 2 or value > self.max_size:
                    wx.MessageBox(msg)
                    return False
                else:
                    textCtrl.SetValue(int(value))
                    return True
            
            def TransferToWindow(self):
                return True

            def TransferFromWindow(self):
                return True

        class BVScaleValidator(wx.PyValidator):
            def __init__(self):
                wx.PyValidator.__init__(self)

            def Clone(self):
                return BVScaleValidator()

            def Validate(self, win):
                textCtrl = self.GetWindow()
                msg = "DWV tolerance rating has to be a number greater than zero."
                try:
                    value = float(textCtrl.GetValue())
                except:
                    wx.MessageBox(msg)
                    return False

                if value < 0:
                    wx.MessageBox(msg)
                    return False
                else:
                    return True
            
            def TransferToWindow(self):
                return True

            def TransferFromWindow(self):
                return True

        class ZeroOneValidator(wx.PyValidator):
            def __init__(self, name):
                wx.PyValidator.__init__(self)
                self.name = name

            def Clone(self):
                return ZeroOneValidator(self.name)

            def Validate(self, win):
                textCtrl = self.GetWindow()
                msg = self.name + " value should be a number between 0 and 1"
                try:
                    value = float(textCtrl.GetValue())
                except:
                    wx.MessageBox(msg)
                    return False

                if value < 0 or value > 1:
                    wx.MessageBox(msg)
                    return False
                else:
                    return True
            
            def TransferToWindow(self):
                return True

            def TransferFromWindow(self):
                return True

        winsize_text_lbl = wx.StaticText(self.panel, -1, 'DWV rating neighborhood size:')
        self.winsize_text = wx.TextCtrl(self.panel, -1, "", size=(40, -1), validator=WinsizeValidator(int(ntimesteps/2)))
        self.Bind(wx.EVT_TEXT, self.on_setting_change, self.winsize_text)

        side_text_lbl = wx.StaticText(self.panel, -1, 'DWV rating neighborhood position:')
        self.side_text = wx.TextCtrl(self.panel, -1, "", size=(40, -1), validator=ZeroOneValidator('Neighborhood position'))
        self.Bind(wx.EVT_TEXT, self.on_setting_change, self.side_text)

        basevol_scale_lbl = wx.StaticText(self.panel, -1, 'DWV tolerance volume:')
        self.basevol_scale_text = wx.TextCtrl(self.panel, -1, "", size=(40, -1), validator=BVScaleValidator())
        self.Bind(wx.EVT_TEXT, self.on_setting_change, self.basevol_scale_text)

        vol_rating_weight_lbl = wx.StaticText(self.panel, -1, 'QinQout rating bias:')
        self.vol_rating_weight_text = wx.TextCtrl(self.panel, -1, "", size=(40, -1), validator=ZeroOneValidator("QinQout rating bias"))
        self.Bind(wx.EVT_TEXT, self.on_setting_change, self.vol_rating_weight_text)


        interp_size_lbl = wx.StaticText(self.panel, -1, 'Parameter transition percentage')
        self.interp_size_text = wx.TextCtrl(self.panel, -1, str(self.current_interp_percentage), size=(40, -1), 
                validator=ZeroOneValidator('Parameter transition percentage'))
        self.Bind(wx.EVT_TEXT, self.on_setting_change, self.interp_size_text)
        

        #self.vol_rating_weight_text.Disable()

        #self.area_based_rating_cb = wx.CheckBox(self.panel, -1, 'Use area based rating')
        #def on_area_based_rating_cb(event):
        #    if event.Checked():
        #        self.vol_rating_weight_text.Disable()
        #    else:
        #        self.vol_rating_weight_text.Enable()
        #    self.on_setting_change(event)

        #self.Bind(wx.EVT_CHECKBOX, on_area_based_rating_cb, self.area_based_rating_cb)
    
        self.update_btn = wx.Button(self.panel, -1, "Update")
        self.Bind(wx.EVT_BUTTON, self.on_update_button, self.update_btn)
        self.update_btn.Disable()


        self.toolbar = NavigationToolbar(self.canvas)

        self.vbox = wx.BoxSizer(wx.VERTICAL)
        self.vbox.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
        self.vbox.Add(self.toolbar, 0, wx.EXPAND)
        self.vbox.AddSpacer(10)

        self.hbox = wx.BoxSizer(wx.HORIZONTAL)
        flags = wx.ALIGN_LEFT | wx.ALL | wx.ALIGN_CENTER_VERTICAL

        self.hbox.Add(seg_choice_lbl, 0, border=3, flag=flags)
        self.hbox.Add(self.seg_choice, 0, border=3, flag=flags)
        self.hbox.Add(self.prev_seg_btn, border=3, flag=flags)
        self.hbox.Add(self.next_seg_btn, border=3, flag=flags)
        
        self.hbox.Add(event_choice_lbl, border=3, flag=flags)
        self.hbox.Add(self.event_choice, border=3, flag=flags)
        self.hbox.Add(self.prev_event_btn, border=3, flag=flags)
        self.hbox.Add(self.next_event_btn, border=3, flag=flags)
        self.hbox.Add(self.delete_event_btn, border=3, flag=flags)

        self.hbox.Add(self.update_btn, 0, border=3, flag=flags)
        self.hbox.Add(interp_size_lbl, 0, border=3, flag=flags)
        self.hbox.Add(self.interp_size_text, 0, border=3, flag=flags)

        self.hbox.Add(winsize_text_lbl, 0, border=3, flag=flags)
        self.hbox.Add(self.winsize_text, 0, border=3, flag=flags)

        self.hbox.Add(side_text_lbl, 0, border=3, flag=flags)
        self.hbox.Add(self.side_text, 0, border=3, flag=flags)

        self.hbox.Add(basevol_scale_lbl, 0, border=3, flag=flags)
        self.hbox.Add(self.basevol_scale_text, 0, border=3, flag=flags)

        #self.hbox.Add(self.area_based_rating_cb, 0, border=3, flag=flags)
        self.hbox.Add(vol_rating_weight_lbl, 0, border=3, flag=flags)
        self.hbox.Add(self.vol_rating_weight_text, 0, border=3, flag=flags)


        self.vbox.Add(self.hbox, 0, flag=wx.ALIGN_LEFT | wx.TOP)

        self.plot_changed = False

        self.panel.SetSizer(self.vbox)
        self.vbox.Fit(self)

        if self.swmm_segs:
            self.seg_choice.SetSelection(0)
            self.load_seg(1)

        self.Maximize()

    def on_prev_event(self, event):
        current_event_selection = self.event_choice.GetSelection()
        self.event_choice.SetSelection(current_event_selection - 1)
        self.update_event_buttons()
        
        selection = int(self.event_choice.GetItems()[current_event_selection - 1])
        self.load_event(self.current_seg, selection)

    def on_next_event(self, event):
        current_event_selection = self.event_choice.GetSelection()
        self.event_choice.SetSelection(current_event_selection + 1)
        self.update_event_buttons()
        
        selection = int(self.event_choice.GetItems()[current_event_selection + 1])
        self.load_event(self.current_seg, selection)


    def update_event_buttons(self):
        num_events = len(self.event_choice.GetItems())
        current_choice = self.event_choice.GetSelection()
        if current_choice > 0:
            self.prev_event_btn.Enable()
            self.delete_event_btn.Enable()
        else:
            self.prev_event_btn.Disable()
            self.delete_event_btn.Disable()

        if current_choice < num_events - 1:
            self.next_event_btn.Enable()
        else:
            self.next_event_btn.Disable()


    def on_delete_event(self, event):
        self.cursor.execute("""
            DELETE FROM correction_settings
            WHERE swmm = ? AND event_id = ?
        """, (self.current_seg, self.current_event))
        xlim = self.axes.get_xlim()
        ylim = self.axes.get_ylim()

        events = self.event_choice.GetItems()
        event_str = events[self.event_choice.GetSelection()]
        events.remove(event_str)
        self.load_event(self.current_seg, 1)
        self.delete_event_btn.Disable()

        self.correct()
        self.draw_figure(xlim, ylim)
        self.event_choice.SetItems(events)
        self.event_choice.SetSelection(0)
        self.statusbar.SetStatusText("Deleted event " + event_str + " in segment " + str(self.current_seg) + ".")
        self.update_event_buttons()

    def on_pick(self, event):
        # required for the popup menu not to raise an exception when mulitple clicks are fired in sequence
        event.mouseevent.guiEvent.GetEventObject().ReleaseMouse() 
        plot_x = event.mouseevent.xdata
        plot_y = event.mouseevent.ydata

        gui_x = event.mouseevent.guiEvent.m_x
        gui_y = event.mouseevent.guiEvent.m_y

        def action(event):
            start_point = int(plot_x)
            self.cursor.execute("""
                SELECT * 
                FROM correction_settings 
                WHERE swmm = ? AND start < ?
                ORDER BY start
            """, (self.current_seg, start_point))

            new_settings = dict(self.cursor.fetchone())

            self.cursor.execute("SELECT MAX(event_id) FROM correction_settings WHERE swmm = ?", (self.current_seg,))
            new_id = self.cursor.fetchone()[0] + 1

            new_settings['start'] = start_point
            new_settings['event_id'] = new_id

            self.cursor.execute("""
                INSERT INTO correction_settings (swmm, window_size, basevol_scale, vol_rating_weight, side, event_id, start)
                VALUES (:swmm, :window_size, :basevol_scale, :vol_rating_weight, :side, :event_id, :start)
            """, new_settings)

            self.axes.axvline(x=start_point, color='orange')
            self.bv_axes.axvline(x=start_point, color='orange')
            self.bv_axes.annotate(str(new_id), xy=(start_point + .01 * len(self.vol), .9),
                    bbox=dict(boxstyle='round,pad=0.5', fc='orange', alpha=.5))
            rwidth = int(self.current_interp_percentage * len(self.vol))
            llx = int(start_point - .5 * rwidth)
            lly = 0
            self.bv_axes.add_patch(Rectangle((llx, lly), rwidth, 1, color='gray'))
            current_event_choices = self.event_choice.GetItems()
            current_event_choices.append(str(new_id))
            self.event_choice.SetItems(current_event_choices)
            self.event_choice.SetSelection(len(current_event_choices) - 1)
            self.current_event = new_id
            self.load_event(self.current_seg, new_id)
            self.delete_event_btn.Enable()
            self.canvas.draw()

            self.update_event_buttons()
            self.update_btn.Enable()

        menu = wx.Menu()
        menu.Append(1, 'Add parameter divider')
        self.Bind(wx.EVT_MENU, action, id=1)
        #wx.EVT_MENU(menu, -1, self.callback)
        self.PopupMenu(menu, wx.Point(gui_x, gui_y))
        menu.Destroy()

    def update_next_prev_btns(self):
        current_seg_selection = self.seg_choice.GetSelection()
        if current_seg_selection > 0:
            self.prev_seg_btn.Enable()
        else:
            self.prev_seg_btn.Disable()

        if current_seg_selection < len(self.swmm_segs) - 1:
            self.next_seg_btn.Enable()
        else:
            self.next_seg_btn.Disable()

    def on_next(self, event):
        current_seg_selection = self.seg_choice.GetSelection()
        if current_seg_selection < len(self.swmm_segs) - 1:
            self.seg_choice.SetSelection(current_seg_selection + 1)
            self.update_next_prev_btns()
            self.load_seg(self.swmm_segs[current_seg_selection + 1])


    def on_prev(self, event):
        current_seg_selection = self.seg_choice.GetSelection()
        if current_seg_selection > 0:
            self.seg_choice.SetSelection(current_seg_selection - 1)
            self.update_next_prev_btns()
            self.load_seg(self.swmm_segs[current_seg_selection - 1])

    def on_setting_change(self, event):
        self.update_btn.Enable()

    def on_seg_choice(self, event):
        seg_choice = int(event.GetString())
        #seg_choice_idx = self.swmm_segs.index(seg_choice)
        self.update_next_prev_btns()
        self.load_seg(seg_choice)

    def on_event_choice(self, event):
        event_choice = int(event.GetString())
        self.load_event(self.current_seg, event_choice)
        if event_choice > 1:
            self.delete_event_btn.Enable()
        else:
            self.delete_event_btn.Disable()

        self.update_event_buttons()

    def get_vol_and_qvl(self, seg, cursor=None):
        if not cursor:
            cursor = self.cursor

        cursor.execute("""
            SELECT vol, flow, velocity
            FROM volumes2_b
            WHERE swmm = ?
            ORDER BY timestep
        """, (seg,))
        vol, flow, vel = zip(*cursor.fetchall())

        cursor.execute("""
            SELECT DISTINCT length
            FROM conduit_params2 WHERE swmm = ?
        """, (seg,))

        l = cursor.fetchone()[0]
        qvl_vol = [f/v * l for f, v in zip(flow, vel)]

        return vol, qvl_vol

    #def set_correction_settings(self, seg, event):
    #    self.cursor.execute("SELECT * FROM correction_settings WHERE swmm = ?", (seg, event))
    #    result = self.cursor.fetchone()
    #    if not result:
    #        self.cursor.execute("""
    #            INSERT INTO correction_settings (swmm, window_size, basevol_scale, vol_rating_weight, side, event_id)
    #            VALUES (:swmm, :window_size, :basevol_scale, :vol_rating_weight, :side, :event)
    #        """, dict(self.default_settings.items() + [('swmm', seg), ('event_id', 1)]))

    #        return {key:value for key, value in self.default_settings.items()}
    #    else:
    #        self.current_settings = dict(zip([x[0] for x in self.cursor.description], result))

    def load_event(self, seg, event):
        if self.current_seg != seg or self.current_event != event:
            self.current_event = event
            self.cursor.execute("SELECT * FROM correction_settings WHERE swmm = ?", (seg,))
            results = self.cursor.fetchall()
            if not results:
                non_correction_params = {
                    'swmm':seg,
                    'event_id':1,
                    'start':0
                }
                self.cursor.execute("""
                    INSERT INTO correction_settings (swmm, window_size, basevol_scale, vol_rating_weight, side, event_id, start)
                    VALUES (:swmm, :window_size, :basevol_scale, :vol_rating_weight, :side, :event_id, :start)
                """, dict(self.default_settings.items() + non_correction_params.items()))

                settings = {key:value for key, value in self.default_settings.items()}
            else:
                event_settings = [setting for setting in results if setting['event_id'] == event]
                if not event_settings:
                    raise Exception("No such event.")
                
                settings = event_settings[0]

            self.winsize_text.SetValue(str(settings['window_size']))
            self.side_text.SetValue(str(settings['side']))
            self.basevol_scale_text.SetValue(str(settings['basevol_scale']))
            self.vol_rating_weight_text.SetValue(str(settings['vol_rating_weight']))
            #self.interp_size_text.SetValue(str(self.current_interp_percentage))
            self.update_btn.Disable()

    def load_seg(self, seg_choice, refresh=False):
        self.statusbar.SetStatusText(('Refreshing' if refresh else 'Loading') + " segment " + str(seg_choice) + " ...")
        if seg_choice != self.current_seg or refresh:
            #if self.current_seg is not None and not refresh:
            #    self.update_database_settings(self.current_seg, self.current_settings)


            self.cursor.execute("SELECT event_id FROM correction_settings WHERE swmm = ?", (seg_choice,))
            event_ids = [row[0] for row in self.cursor.fetchall()]
            if not event_ids:
                event_ids = [1]

            event = event_ids[0]
            self.event_choice.SetItems([str(x) for x in event_ids])
            self.event_choice.SetSelection(0)

            self.vol, self.qvl_vol = self.get_vol_and_qvl(seg_choice)
            self.vol = list(self.vol)

            self.load_event(seg_choice, event)
            self.current_seg = seg_choice

            #self.set_correction_settings(self.current_seg, self.current_event)
            #self.update_settings_widgets()
            self.correct()
            self.update_btn.Disable()
            
            if refresh:
                self.statusbar.SetStatusText("Segment " + str(seg_choice) + ' refreshed.')
            else:
                self.statusbar.SetStatusText("Showing segment " + str(seg_choice) + '.')
            self.draw_figure()

    def draw_figure(self, xlim=None, ylim=None):
        self.axes.clear()
        x = range(len(self.vol))
        self.axes.plot(x, self.vol, label='(Qin - Qout) * timestep')
        self.axes.plot(x, self.qvl_vol, label='(Q/v) * l')
        self.axes.plot(x, self.corrected_vol, label='corrected')

        self.axes.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=3, borderaxespad=0., prop={'size':12})
        self.axes.set_ylabel('volume (meters-cubed)')
        self.axes.grid(True)

        self.bv_axes.clear()
        self.bv_axes.plot(self.vol_ratings, label='vol_rating')
        self.bv_axes.plot(self.qvl_ratings, label='qvl_rating')
        self.bv_axes.plot(x, self.basevol_ratings, label='basevol_rating')

        self.bv_axes.set_xlabel("timestep")
        self.bv_axes.set_ylabel("DWV tol. rating")
        self.bv_axes.grid(True)


        self.cursor.execute("SELECT event_id, start FROM correction_settings WHERE swmm = ?", (self.current_seg,))
        for row in self.cursor.fetchall():
            if row['start'] != len(self.vol):
                #self.lines[row['event_id']] =
                self.axes.axvline(x=row['start'], color='orange')
                self.bv_axes.axvline(x=row['start'], color='orange')
            self.bv_axes.annotate(str(row['event_id']), xy=(row['start'] + 0.01 * len(self.vol), .9),
                    bbox=dict(boxstyle='round,pad=0.5', fc='orange', alpha=.5))

            rwidth = int(self.current_interp_percentage * len(self.vol))
            llx = int(row['start'] - .5 * rwidth)
            lly = 0
            self.bv_axes.add_patch(Rectangle((llx, lly), rwidth, 1, color='gray'))


        if xlim:
            self.axes.set_xlim(*xlim)
        if ylim:
            self.axes.set_ylim(*ylim)

        self.canvas.draw()

    #def update_settings(self):
    #    self.current_settings['window_size'] = int(self.winsize_text.GetValue())
    #    self.current_settings['side'] = self.side_rb.GetSelection() - 1
    #    self.current_settings['basevol_scale'] = float(self.basevol_scale_text.GetValue())
    #    self.current_settings['area_based_rating'] = bool(self.area_based_rating_cb.GetValue())
    #    self.current_settings['vol_rating_weight'] = float(self.vol_rating_weight_text.GetValue())

    #def update_settings_widgets(self):
    #    self.winsize_text.SetValue(str(self.current_settings['window_size']))
    #    self.side_rb.SetSelection(self.current_settings['side'] + 1)
    #    self.basevol_scale_text.SetValue(str(self.current_settings['basevol_scale']))
    #    #area_based_rating = self.current_settings['area_based_rating']
    #    #self.area_based_rating_cb.SetValue(area_based_rating)

    #    self.vol_rating_weight_text.SetValue(str(self.current_settings['vol_rating_weight']))
    #    #if area_based_rating:
    #    #    self.vol_rating_weight_text.Disable()
    #    #else:
    #    #    self.vol_rating_weight_text.Enable()
        
    def update_database_settings(self, seg, event):
        settings = {
            'window_size' : int(self.winsize_text.GetValue()),
            'side' : float(self.side_text.GetValue()),
            'basevol_scale' : float(self.basevol_scale_text.GetValue()),
            'vol_rating_weight' : float(self.vol_rating_weight_text.GetValue())
        }

        self.cursor.execute("""
            UPDATE correction_settings
            SET window_size = :window_size,
                basevol_scale = :basevol_scale,
                vol_rating_weight = :vol_rating_weight,
                side = :side
            WHERE swmm = :swmm AND event_id = :event_id
        """, dict(settings.items() + [('swmm', seg), ('event_id', event)]))

    def on_update_button(self, event):
        xlim = self.axes.get_xlim()
        ylim = self.axes.get_ylim()

        self.statusbar.SetStatusText("Updating segment " + str(self.current_seg) + ".")
        self.update_database_settings(self.current_seg, self.current_event)
        self.current_interp_percentage = float(self.interp_size_text.GetValue())
        self.correct()
        self.draw_figure(xlim, ylim)
        self.update_btn.Disable()
        self.statusbar.SetStatusText("Segment " + str(self.current_seg) + " Event "  + str(self.current_event) + " udpated.")

    def correct(self, cursor=None):
        if not cursor:
            cursor = self.cursor
        total_correction = 0
        vol_correction = [0 for _ in range(len(self.vol))]

        self.corrected_vol = copy.copy(self.vol)

        i = 0
        running_total = 0

        self.basevol_ratings = [0 for _ in range(len(self.vol))]

        self.vol_ratings = [0 for _ in range(len(self.vol))]
        self.qvl_ratings = [0 for _ in range(len(self.vol))]

        qvl_area = sum([x * self.timestep_secs for x in self.qvl_vol])
        vol_area = sum([x * self.timestep_secs for x in self.vol])

        def interpolate_settings(settings1, settings2, length):
            params = ['window_size', 'basevol_scale', 'vol_rating_weight', 'side']
            values = {}
            for param in params:
                slope = (settings2[param] - settings1[param]) / float(length)
                values[param] = [slope * x + settings1[param] for x in range(length)]

            new_settings = zip(*[values[param] for param in params])
            return [dict(zip(params, setting)) for setting in new_settings]

        cursor.execute("SELECT * FROM correction_settings WHERE swmm = ? ORDER BY start", (self.current_seg,))
        event_settings = cursor.fetchall()

        interpolate_length = int(self.current_interp_percentage * len(self.vol))
        if not interpolate_length % 2:
            interpolate_length = interpolate_length + 1
        half_interp_len = int(interpolate_length/2)

        point_settings = []
        switch_points = []
        for i, settings in enumerate(event_settings):
            start = settings['start']
            extension = None
            if i > 0:
                prior_setting = point_settings
                #start = int(start + half_interp_len) + 1
                interp_settings = interpolate_settings(event_settings[i-1], settings, interpolate_length)
                point_settings += interp_settings[1:]
                extension = range(start - half_interp_len + 1, start + half_interp_len + 1)
                switch_points.extend(extension)
                start = start + half_interp_len + 1

            point_settings.append(dict(settings))
            switch_points.append(start)

        point_settings.reverse()
        switch_points.reverse()

        if switch_points:
            next_switch = switch_points.pop()
        else:
            point_settings = [self.default_settings]
            next_switch = 0

        current_settings = None
        past_settings = []
        i = 0
        while i < len(self.vol):

            if i == next_switch:
                if current_settings:
                    past_settings.append(current_settings)
                current_settings = point_settings.pop()
                window_size = current_settings['window_size']
                side = current_settings['side']
                basevol_scale = current_settings['basevol_scale']
                vol_rating_weight = current_settings['vol_rating_weight']

                if switch_points:
                    next_switch = switch_points.pop()

            lowerbound = int(max(0, i - side * window_size))
            upperbound = int(min(len(self.vol), i + (1 - side) * window_size))

            #if side == 0:
            #    lowerbound = max(0, i - int(window_size/2.0))
            #    upperbound = min(len(self.vol), i + int(window_size/2.0))
            #elif side == -1:
            #    lowerbound = max(0, i - window_size)
            #    upperbound = i
            #    if lowerbound == upperbound:
            #        if i + 1 < len(self.vol):
            #            upperbound += 1
            #        else:
            #            lowerbound = lowerbound - 1
            #else:
            #    lowerbound = i
            #    upperbound = min(len(self.vol), i + window_size)
            #    if lowerbound == upperbound:
            #        lowerbound = lowerbound - 1

            if lowerbound == upperbound:
                if lowerbound < len(self.vol) - 1:
                    upperbound = lowerbound + 1
                else:
                    lowerbound = upperbound - 1

            vol_diff = max(self.vol[lowerbound:upperbound]) - min(self.vol[lowerbound:upperbound])

            vol_rating = min(1, vol_diff / basevol_scale)
            self.vol_ratings[i] = vol_rating

            qvl_diff = max(self.qvl_vol[lowerbound:upperbound]) - min(self.qvl_vol[lowerbound:upperbound])
            qvl_rating = min(1, qvl_diff / basevol_scale)
            self.qvl_ratings[i] = qvl_rating

            bv_rating = vol_rating_weight * vol_rating + (1.0 - vol_rating_weight) * qvl_rating
            self.basevol_ratings[i] = bv_rating

            error = self.qvl_vol[i] - self.vol[i]
            #if area_based_rating:
            #    if error > 0:
            #        error = error - (qvl_rating if vol_area > qvl_area else vol_rating) * error
            #    else:
            #        # consider the possibly of always using vol_rating when the error is negative
            #        error = error - (vol_rating if vol_area > qvl_area else qvl_rating) * error
            #else:
            error = error - bv_rating * error

            self.corrected_vol[i] += error
            correction = error - running_total
            vol_correction[i] = correction
            running_total += correction

            i += 1

        return vol_correction

    def OnClose(self, event):
        self.cnxn.close()
        self.Destroy()

    def export_correction_settings(self, event):
        self.statusbar.SetStatusText("Exporting correction settings to database directory.")
        self.cursor.execute("SELECT * FROM correction_settings")
        rows = [dict(zip([x[0] for x in self.cursor.description], row)) for row in self.cursor.fetchall()]
        settings_segs = [row['swmm'] for row in rows]
        for seg in [seg for seg in self.swmm_segs if seg not in settings_segs]:
            new_row = copy.copy(self.default_settings)
            new_row['swmm'] = seg
            new_row['event_id'] = 1
            new_row['start'] = 0
            rows.append(new_row)

        fieldnames = ['swmm', 'event_id', 'start'] + self.default_settings.keys()
        out_fname = os.path.splitext(os.path.basename(self.dbpath))[0] + '_correction_settings.csv'
        outpath = os.path.join(os.path.dirname(self.dbpath), out_fname)
        rows = sorted(rows, key=lambda x: x['swmm'])
        #for row in rows:
        #    row['area_based_rating'] = int(row['area_based_rating'])
        with open(outpath, 'w') as f:
            writer = csv.DictWriter(f, lineterminator='\n', fieldnames=fieldnames)
            writer.writeheader()
            writer.writerows(rows)

        self.statusbar.SetStatusText("Correction settings exported to database directory.")

    def export_hyd(self, event):
        self.cursor.close()
        self.cnxn.close()
        threading.Thread(target=self.finalize_and_export, args=(True,)).start()

    def finalize_and_export(self, sep_thread=False):
        if sep_thread:
            cnxn = sqlite3.connect(self.dbpath)
            cnxn.isolation_level = None
            cnxn.row_factory = sqlite3.Row
            cursor = cnxn.cursor()
            cursor.execute("PRAGMA synchronous=OFF") # don't wait for disk writes to complete before continuing
            cursor.execute("PRAGMA count_changes=OFF") # don't count num. rows affected by DELETE, INSERT, or UPDATE
            cursor.execute("PRAGMA journal_mode=OFF") # turn off journaling
        else:
            cursor = self.cursor
        self.statusbar.SetStatusText("Finalizing corrections. This may take a moment.")
        cursor.execute("SELECT timestep_secs FROM settings")
        timestep_secs = cursor.fetchone()[0]

        #cursor.execute("DELETE FROM flows WHERE name LIKE '%balbucket' OR name LIKE '%nonzero'")
        cursor.execute("DELETE FROM flows WHERE name LIKE '%balbucket'")
        try:
            cursor.execute('CREATE INDEX idx_flows2 ON flows(swmm_sink, swmm_source, timestep)')
        except:
            pass

        cursor.execute("PRAGMA table_info(negative_inflows)")
        if cursor.fetchall():
            cursor.execute("DROP TABLE negative_inflows")

        cursor.execute("CREATE TABLE negative_inflows (swmm text, timestep integer, bucket real, total_flow real)")

        has_negative_inflows = False
        for seg in self.swmm_segs:
            self.vol, self.qvl_vol = self.get_vol_and_qvl(seg, cursor=cursor)
            self.vol = list(self.vol)
            self.current_seg = seg
            #self.load_event(self.current_seg, 1)
            #self.set_correction_settings(seg, cursor=cursor)
            correction = self.correct(cursor=cursor)

            balance_bucket = [c/timestep_secs for c in correction]
            update_tups = []
            cursor.execute("""
                SELECT DISTINCT wasp
                FROM segs 
                WHERE swmm = ?
            """, (seg,))
            wasp = cursor.fetchone()[0]

            update_tups = [[i, 0, seg, 0, wasp, str(seg)+'balbucket', qin] for i, qin in enumerate(balance_bucket)]

            # update the flows table with the correction inlet 
            insert_qry = """
                INSERT INTO flows (timestep, swmm_source, swmm_sink, wasp_source, wasp_sink, name, flow) 
                VALUES (?, ?, ?, ?, ?, ?, ?)
            """
            cursor.executemany(insert_qry, update_tups)

            cursor.execute("""
                SELECT SUM(flow) 
                FROM flows 
                WHERE swmm_source = 0 AND swmm_sink = ?
                GROUP BY swmm_sink, timestep
                ORDER BY timestep
            """, (seg,))

            total_inflows = [row[0] for row in cursor.fetchall()]

            #nonzero_guarantee = [0 for _ in range(len(balance_bucket))]

            
            negative_inflows = []
            for i, bb, tot_inf in zip(itertools.count(), balance_bucket, total_inflows):
                if bb != 0 and tot_inf <= 0:
                    negative_inflows.append([seg, i, bb, tot_inf])

                    #nonzero_guarantee[total_inflows[0]] = 0.0001

            if negative_inflows:
                has_negative_inflows = True
                cursor.executemany("""
                    INSERT INTO negative_inflows (swmm, timestep, bucket, total_flow) VALUES (?, ?, ?, ?)""", negative_inflows)

            #update_tups = [[i, 0, seg, 0, wasp, str(seg) + 'nonzero', x] for i, x in enumerate(nonzero_guarantee)]
            #cursor.executemany(insert_qry, update_tups)

        cursor.execute("SELECT * FROM negative_inflows ORDER BY swmm, timestep")
        negative_inflow = [dict(row) for row in cursor.fetchall()]

        self.statusbar.SetStatusText("Corrections finalized, recalculating volumes.")

        # recalculate swmm volumes with the new dummy inlets
        calc_swmm_vols(cursor, timestep_secs)

        try:
            cursor.execute("SELECT flow_weight_sums FROM settings")
        except:
            flow_weight_sums = False
        else:
            flow_weight_sums = bool(cursor.fetchone()[0])

        self.statusbar.SetStatusText("Volumes recalculated, preparing data .hyd file.")
        outpath = os.path.join(os.path.dirname(self.dbpath), 
                os.path.splitext(os.path.basename(self.dbpath))[0] + '_corrected.hyd.txt')
        finalize(cursor, outpath, 5, using_swmm_volume=False, flow_weight_sums=flow_weight_sums)

        if sep_thread:
            cursor.close()
            cnxn.close()

        if negative_inflow:
            zero_inflows_timesteps_outfile = os.path.join(os.path.dirname(self.dbpath), 'negative_inflows.csv')
            with open(zero_inflows_timesteps_outfile, 'w') as f:
                fieldnames = ['swmm', 'timestep', 'bucket', 'total_flow']
                writer = csv.DictWriter(f, fieldnames=fieldnames, lineterminator='\n')
                writer.writeheader()
                writer.writerows(negative_inflow)

            msg = "WARNING: there are timesteps where the correction balance bucket completely overrides segment inflows. " \
                  + "See output file " + os.path.basename(zero_inflows_timesteps_outfile) + "."
            print(msg)
            wx.MessageBox(msg)

        if sep_thread:
            wx.CallAfter(self.export_hyd_complete)

    def export_hyd_complete(self):
        self.statusbar.SetStatusText("Export complete.")
        self.cnxn = sqlite3.connect(self.dbpath)
        self.cursor = self.cnxn.cursor()
        self.cursor.execute("PRAGMA synchronous=OFF") # don't wait for disk writes to complete before continuing
        self.cursor.execute("PRAGMA count_changes=OFF") # don't count num. rows affected by DELETE, INSERT, or UPDATE
        self.cursor.execute("PRAGMA journal_mode=OFF") # turn off journaling

    def load_correction_settings(self, event):
        wildcard = 'CSV file (*.csv)|*.csv'
        dlg = wx.FileDialog(self, message='Load correction settings', 
                defaultDir=os.path.dirname(self.dbpath),
                wildcard=wildcard,
                style=wx.OPEN)
        dlg.CenterOnParent()

        if dlg.ShowModal() == wx.ID_OK:
            self.statusbar.SetStatusText("Loading correction settings.")
            path = str(dlg.GetPaths()[0])

            with open(path, 'r') as f:
                fieldnames = f.readline()
                fieldnames = [n.strip() for n in fieldnames.split(',')]
                reader = csv.DictReader(f, fieldnames=fieldnames)

                self.cursor.execute("DELETE FROM correction_settings")
                rows = list(reader)
                self.cursor.executemany("""
                    INSERT INTO correction_settings (swmm, window_size, basevol_scale, vol_rating_weight, side, event_id, start)
                    VALUES (:swmm, :window_size, :basevol_scale, :vol_rating_weight, :side, :event_id, :start)
                """, rows)

            self.statusbar.SetStatusText("Correction settings loaded.")
            if self.current_seg is not None:
                self.load_seg(self.current_seg, refresh=True)

class CorrectionParameter(object):
    def __init__(self, variable, label, value, msg, prompt, conversion, validator=None):
        self.variable = variable
        self.label = label
        self.value = value
        self.msg = msg
        self.prompt = prompt 
        self.conversion = conversion
        self.validator = validator

def prompt_correction_parameters(default=False):
    # correction_params is a dictionary of the default correction parameters.
    # the following prompts allow the user to change these parameters, if they desire the correction 
    # routine to run on their data

    correction_params = [
        CorrectionParameter(variable='window_size',
                            label='Dry weather volume rating neighborhood size',
                            value=800,  # old 600
                            msg="Sets the size of the neighborhood around a point that determines its dry weather volume rating.",
                            prompt="DWV rating neighborhood size",
                            conversion=int,
                            validator=lambda x: x > 1),
        CorrectionParameter(variable='basevol_scale',
                            label='Dry weather volume tolerance volume',
                            value=90, # old 30
                            msg="Sets the scale on which the margnitude of the range of a dry weather volume neighborhood is graded",
                            prompt='DWV tolerance volume',
                            conversion=float,
                            validator=lambda x: x > 0),
        CorrectionParameter(variable='vol_rating_weight',
                            label='QinQout rating bias',
                            value=.5,
                            msg=("When the rating is calculated as an average of the QinQout and QVL volumes, by what "
                                 "percentage should the QinQout rating be preferred? This is only used if the area based."),
                            prompt='QinQout rating bias',
                            conversion=float,
                            validator=lambda x: x >= 0 and x <= 1),
        CorrectionParameter(variable='side',
                            label='Dry weather volume rating neighborhood position',
                            value=.5,
                            msg="How should the neighborhood be positioned around the point?",
                            prompt='DWV rating neighborhood position (a number between 0 and 1)',
                            conversion=float,
                            validator=lambda x: x >= 0 and x <= 1)
        ]
    if not default:
        while True:
            msg = "There are " + str(len(correction_params)) + " parameters that you can set to change the behavior " + \
                "of the volume correction procedure."
            print(msg)
            menu = [str(i) + ' - ' + param.label + ' (current value: ' + str(param.value) + ')' 
                    for i, param in enumerate(correction_params)]
            print('\n'.join(menu))
            print("Enter the parameter number you want to change. Leave blank and press enter to continue.")
            choice = raw_input(">> ")

            if choice == '':
                break
            else:
                try:
                    choice = int(choice)
                    assert choice >= 0 and choice < len(correction_params)
                except:
                    print("Invalid choice.")
                else:
                    while True:
                        param = correction_params[choice]
                        print("Volume correction parameter " + str(choice) + ": " + param.label)

                        print("Description: " + param.msg)
                        print("Current value: " + str(param.value))
                        value = raw_input(param.prompt + ' >> ')

                        try:
                            value = param.conversion(value)
                            if param.validator:
                                assert param.validator(value)
                        except:
                            print("Invalid value.")
                        else:
                            print("Correction parameter value set.")
                            correction_params[choice].value = value
                            break

    return {param.variable : param.value for param in correction_params}


def correct_data():
    while True:
        dbpath = raw_input('Hydmaker database path >> ')
        if os.path.exists(dbpath):
            break
        print("No such database found.")

    # by default, assume that the user does not want to correct volume
    correction_param_args = prompt_correction_parameters(default=True)
    # determine if user wants to correct the volume
    while True:
        msg = "There are several parameters that you can change to control the behavior of the volume correction " \
            + "procedure. Would you like to change them from their default values?"
        print(msg)
        try:
            reject_defaults = raw_input("Change default volume correction parameters? (y/n) >> ")[0]
            reject_defaults = bool(re.match('y', reject_defaults, re.IGNORECASE))
        except:
            print("Invalid input.")
        else:
            if reject_defaults:
                correction_param_args = prompt_correction_parameters()
            break

    print("Running graphical correction tool ... ")
    app = wx.PySimpleApp()
    app.frame = MasterFrame(dbpath, **correction_param_args)
    app.frame.Show()
    app.MainLoop()

def run():
    print('Hydmaker')
    print('-' * 20)
    print("Options")
    opts = ['Generate an ASCII .hyd file.', 
            'Extract timeseries from existing Hydmaker database.',
            'Correct hydmaker database volume']
    while True:
        print('\n'.join([str(i+1) + ' - ' + opt for i, opt in enumerate(opts)]))
        choice = raw_input('Enter option number >> ')
        try:
            choice = int(choice)
            assert choice >= 1 and choice <= len(opts) + 1
        except:
            print("Invalid choice.")
        else:
            if choice == 1:
                # load settings
                ini = load_ini(os.path.join(curdir(), 'hydmaker.ini'))

                start = datetime.now()
                # call main function, passing settings as arguments
                print("target directory: " + os.path.dirname(ini['outpath']))
                print('Processing ... ')
                process(**ini)

                print('done')
                print((datetime.now() - start).total_seconds() / 60)
                break
            elif choice == 2:
                extract_data()
                break
            elif choice == 3:
                correct_data()
                break
            else:
                print("Invalid input.")


if __name__ == '__main__':
    run()

