import sqlite3, re, os, traceback, code, csv, glob, argparse, pdb
from math import floor
from contextlib import closing
from datetime import datetime
from itertools import groupby
from copy import copy
import shutil

def tune_db(cursor):
    cursor.execute("PRAGMA synchronous=OFF") # don't wait for disk writes to complete before continuing
    cursor.execute("PRAGMA count_changes=OFF") # don't count num. rows affected by DELETE, INSERT, or UPDATE
    cursor.execute("PRAGMA journal_mode=OFF") # turn off journaling
    #cursor.execute("PRAGMA cache_size=1048576") # number of btree pages to cache (1 page = 1 KB)
    #cursor.execute("PRAGMA temp_store=2") # store temporary files in memory

def process(filter_mins, 
            dummy_end, 
            rptpath, 
            inppath, 
            outpath, 
            segmap_paths, 
            correct_vol=False, 
            window_size=400, 
            correction_threshold=0.0005, 
            abs_basevol_signal=5, 
            event_start=None, 
            event_end=None, 
            negative_threshold_pct=0.2,
            micro_window_pct=.25,
            baseconditions_rpt=None,
            baseflow_smoothing_days=None,
            missing_inflows_are_zero=True
            ):

    print 'correction_threshold', correction_threshold 
    segtable = [] # this will be a list of dicts, where each dict is a row from one of the .map.csv files
    for fname in segmap_paths: # get a list of all of the .map.csv files
        with open(fname, 'r') as f:
            table = list(csv.DictReader(f))
            if table:
                assert all(fieldname in ('Name', 'SWMM', 'Type', 'WASP') for fieldname in table[0].keys())
                segtable.extend(table)

    for row in segtable:
        if not row['WASP']:
            row['WASP'] = prev_wasp
        else:
            prev_wasp = row['WASP']

        if not row['SWMM']:
            row['SWMM'] = prev_swmm
        else:
            prev_swmm = row['SWMM']

    current_swmm = 1
    current_wasp = 1
    for i, row in enumerate(segtable):
        if i == 0:
            prev_wasp = row['WASP']
            prev_swmm = row['SWMM']
            row['WASP'] = current_wasp
            row['SWMM'] = current_swmm
        else:
            if row['WASP'] == prev_wasp:
                row['WASP'] = current_wasp
            else:
                prev_wasp = row['WASP']
                current_wasp += 1
                row['WASP'] = current_wasp

            if row['SWMM'] == prev_swmm:
                row['SWMM'] = current_swmm
            else:
                prev_swmm = row['SWMM']
                current_swmm += 1
                row['SWMM'] = current_swmm

    if len([row for row in segtable if not row['Name'] and row['Type'] == 'END']) != 1:
        raise Exception('There are multiple terminating segments in the segment maps.')

    if any(row['Type'] not in ('CONDUIT', 'INFLOW', 'END') for row in segtable):
        raise Exception('Unknown Type found in seg map file')

    trib_connection_names = [row['Name'] for row in segtable if row['Type'] == 'END' and row['Name']]
    conduit_names = [row['Name'] for row in segtable if row['Type'] == 'CONDUIT']
    if not all([name in conduit_names for name in trib_connection_names]):
        raise Exception('There are END rows in your segment map(s) that refer do conduits not included in the map.')

    names = [row['Name'] for row in segtable if row['Type'] != 'END']
    if len(names) != len(set(names)):
        raise Exception('There are repeated conduits in one of the map files.')

    dbpath = os.path.join(os.path.dirname(outpath), os.path.splitext(os.path.basename(rptpath))[0] + '.db')
    if os.path.exists(dbpath):
        os.unlink(dbpath)

    with closing(sqlite3.connect(dbpath)) as conn:
        #conn.isolation_level = None
        cur = conn.cursor()
        tune_db(cur)

        # create table to store seg map data
        cur.execute("""
            CREATE TABLE segs (
                wasp int,           -- wasp segment number
                DS_wasp int,        -- wasp segment number of the DS segment (initially NULL)
                swmm int,           -- swmm segment number 
                DS_swmm int,        -- swmm segment number of the DS segment (initiall NULL)
                type varchar(8),    -- element type, either 'CONDUIT', 'INFLOW', or 'END'
                name varchar(64)    -- name as it appears in the .rpt file
            )""")

        for row in segtable:
            # read the map file into segs
            cur.execute("""
                INSERT INTO segs (wasp, swmm, type, name) 
                VALUES (%(WASP)s, %(SWMM)s, '%(Type)s', '%(Name)s')
                """ % row)
        
        # get list of unique swmm seg nums
        cur.execute('SELECT DISTINCT swmm, wasp FROM segs')
        segs = cur.fetchall()

        cur.execute('SELECT MAX(wasp) FROM segs')
        last_wasp_seg = cur.fetchone()[0]
        cur.execute('SELECT MAX(swmm) FROM segs')
        last_swmm_seg = cur.fetchone()[0]

        # this loop cycles through the swmm segment numbers and updates DS_wasp and DS_swmm columns in the segs table. 
        # It also creates a dummy segment for the final segment
        for swmm, wasp in segs:
            # check if the current swmm segment includes an 'END' row
            cur.execute("SELECT name FROM segs WHERE swmm = ? AND type = 'END'", (swmm,)) 
            end_row = cur.fetchone()
            if end_row: 
                end_name = end_row[0]
                if end_name:
                    # if the end row includes the name of a conduit (meaning this segment is the end of a tributary, 
                    # not the end of the network), then
                    # find the wasp and swmm segment numbers for the conduit and assign them to the DS_wasp and 
                    # DS_swmm columns for the conduits in this segment
                    cur.execute("SELECT wasp, swmm FROM segs WHERE name = ?", (end_name,))
                    DS_wasp, DS_swmm = cur.fetchone()
                    cur.execute("UPDATE segs SET DS_wasp = ?, DS_swmm = ? WHERE type = 'CONDUIT' AND swmm = ?", 
                        (DS_wasp, DS_swmm, swmm))
                else:
                    if dummy_end:
                        # create dummy segment
                        dummy_wasp = last_wasp_seg + 1
                        dummy_swmm = last_swmm_seg + 1
                        cur.execute("""
                            INSERT INTO segs (wasp, swmm, DS_wasp, DS_swmm, type, name)
                            SELECT ?, ?, 0, 0, 'DUMMY', name 
                                FROM segs WHERE swmm = ? AND type = 'CONDUIT' LIMIT 1
                            """, (dummy_wasp, dummy_swmm, swmm))

                        cur.execute("UPDATE segs SET DS_wasp = ?, DS_swmm = ? WHERE type = 'CONDUIT' AND swmm = ?", 
                            (dummy_wasp, dummy_swmm, swmm))
                    else:
                        cur.execute("UPDATE segs SET DS_wasp = ?, DS_swmm = ? WHERE type = 'CONDUIT' AND swmm = ?", 
                            (0, 0, swmm))
            else:
                cur.execute("UPDATE segs SET DS_wasp = ?, DS_swmm = ? WHERE type = 'CONDUIT' AND swmm = ?", 
                    (wasp + 1, swmm + 1, swmm))

        # for rows of type 'INFLOW', the DS_wasp and DS_swmm are the same as the swmm and wasp
        cur.execute("UPDATE segs SET DS_wasp = wasp, DS_swmm = swmm WHERE type = 'INFLOW'")
        cur.execute("DELETE FROM segs WHERE type = 'END'")

        series_path = os.path.join(os.path.dirname(outpath), 'segment_series_out')
        # if necessary, create a directory to hold all of the output series
        if not os.path.isdir(series_path):
            os.mkdir(series_path)

        with open(os.path.join(series_path, 'segment_map_out.csv'), 'w') as f:
            fieldnames = ['wasp', 'ds_wasp', 'swmm', 'ds_swmm', 'type', 'name']
            writer = csv.DictWriter(f, fieldnames = fieldnames, lineterminator='\n')
            writer.writeheader()
            cur.execute('SELECT * FROM segs ORDER BY wasp, swmm, type, name')
            for row in cur.fetchall():
                row = dict(zip(fieldnames, row))
                writer.writerow(row)

        # create a table to store the length and dwf for conduits
        cur.execute('CREATE TABLE conduit_params (name varchar(64), length float, inlet varchar(64), dwf float DEFAULT 0)')

        # the following block loops through the .inp and updates conduit_params with the lengths and DWF's for each conduit
        meters_in_foot = 0.3048
        first_step = 0
        last_step = 0
        
        print 'reading .inp'
        with open(inppath, 'r') as f:
            # churn through the .inp lines until encountering the [OPTIONS] marker
            line = f.readline().strip()
            while not re.match(re.compile(r'\[opt', re.IGNORECASE), line.strip()):
                line = f.readline()
                if not line:
                    raise Exception("Can't find [OPTIONS] section in .inp file.")

            line = f.readline()
            if not line:
                raise Exception("Can't find REPORT_STEP in the [OPTIONS] section of the .inp")

            #while not re.match(re.compile(r'report_step', re.IGNORECASE), line.strip()):
            timestep_secs = None
            start_date = None
            start_time = None
            end_date = None
            end_time = None
            while True:
                if re.match(re.compile(r'report_start_date', re.IGNORECASE), line.strip()):
                    _, start_date = line.split()
                elif re.match(re.compile(r'report_start_time', re.IGNORECASE), line.strip()):
                    _, start_time = line.split()
                elif re.match(re.compile(r'end_date', re.IGNORECASE), line.strip()):
                    _, end_date = line.split()
                elif re.match(re.compile(r'end_time', re.IGNORECASE), line.strip()):
                    _, end_time = line.split()
                elif re.match(re.compile(r'report_step', re.IGNORECASE), line.strip()):
                    _, ts_str = line.split()
                    hours, mins, secs = ts_str.split(':')
                    timestep_secs = float(hours) * 120 + float(mins) * 60 + float(secs)
                     
                if (not line) or re.match(r'\[', line.strip()):
                    if not timestep_secs:
                        raise Exception(".inp [OPTIONS] missing REPORT_STEP")
                    elif not start_time:
                        raise Exception(".inp [OPTIONS] missing REPORT_START_TIME")
                    elif not start_date:
                        raise Exception(".inp [OPTIONS] missing REPORT_START_DATE")
                    elif not end_date:
                        raise Exception(".inp [OPTIONS] missing REPORT_END_DATE")
                    elif not end_time:
                        raise Exception(".inp [OPTIONS] missing REPORT_END_TIME")
                    else:
                        try:
                            report_start_dtime = datetime.strptime(' '.join([start_date, start_time]), '%m/%d/%Y %H:%M:%S')
                            report_end_dtime = datetime.strptime(' '.join([end_date, end_time]), '%m/%d/%Y %H:%M:%S')
                        except:
                            raise Exception('Unexpected datetime format encountered in .inp file for report dates.')
                        else:
                            break
                else:
                    line = f.readline()

            if not event_start:
                event_start = report_start_dtime

            first_step = int(floor((event_start - report_start_dtime).total_seconds() / timestep_secs))

            if not event_end:
                event_end = report_end_dtime
            
            event_step_count = int((event_end - event_start).total_seconds() / timestep_secs)

            last_step = first_step + event_step_count

            # get a list of all the conduit names from segs
            cur.execute("SELECT name FROM segs WHERE type = 'CONDUIT'")
            conduit_names = [row[0] for row in cur.fetchall()]
            
            # churn through the .inp lines until encountering the [CONDUITS] marker
            line = f.readline()
            if not line:
                raise Exception("Can't find [CONDUITS] section in .inp file")

            while not re.match(re.compile(r'\[cond', re.IGNORECASE), line.strip()):
                line = f.readline()
                if not line:
                    raise Exception("Can't find [CONDUITS] section in .inp file.")

            # loop through the lines in [CONDUITS] and for each conduit with its name in the list, 
            # update conduit params with that conduits 
            # length and inlet node.
            line = f.readline()
            if not line:
                raise Exception("There are no conduits listed in the [CONDUIT] section of the .inp")
            line = line.strip()
            while not re.match(r'\[', line):
                if not (re.match('^$', line) or re.match(r';', line)):
                    name, inlet, _, length, _ = line.split(None, 4)
                    if name in conduit_names:
                        cur.execute('INSERT INTO conduit_params (name, length, inlet) VALUES (?, ?, ?)', 
                            (name, float(length) * meters_in_foot, inlet))
                line = f.readline().strip()

            # churn through the lines until encountering the [DWF] marker
            line = f.readline()
            if not line:
                raise Exception("Can't find [DWF] section in the .inp")
            line = line.strip()
            while not re.match(re.compile(r'\[dwf', re.IGNORECASE), line):
                line = f.readline().strip()

            # get a list of the inlet node names from conduit_params
            cur.execute('SELECT inlet FROM conduit_params')
            inlet_names = [row[0] for row in cur.fetchall()]

            # cycle through the [DWF] section and for each node that is in our inlet list, update 
            # conduit_params with its DWF value
            line = f.readline().strip()
            while not re.match(r'\[', line):
                if not (re.match('^$', line) or re.match(r';', line)):
                    name, param, avg = line.split()
                    if name in inlet_names and re.match('flow', param, re.IGNORECASE):
                        cur.execute('UPDATE conduit_params SET dwf = ? WHERE inlet = ?', 
                            (round(float(avg) * pow(meters_in_foot, 3), 5), name))
                line = f.readline().strip()

        print 'done reading .inp'
        cur.execute("CREATE TABLE conduit_params2 (name varchar(64), swmm int, length float, dwf float DEFAULT 0)")

        cur.execute("""
            INSERT INTO conduit_params2 (name, swmm, length, dwf)
                SELECT cp.name, segs.swmm, cp.length, cp.dwf
                FROM conduit_params AS cp INNER JOIN segs ON cp.name = segs.name
        """)

        # create the first flows table. 
        cur.execute("""
            CREATE TABLE flows (
                timestep int, 
                wasp_source int,    -- wasp seg number indicating which segment the flow is coming from
                wasp_sink int,      -- wasp seg number indicating which segment the flow is going to
                swmm_source int,
                swmm_sink int, 
                name varchar(64), 
                flow float
            )""")

        # create the first volumes table
        cur.execute("""
            CREATE TABLE volumes (
                timestep int, 
                wasp int, 
                swmm int, 
                name varchar(64),
                init_vol float, 
                flow float, 
                depth float, 
                velocity float
            )""")

        # select one conduit to represent each swmm segment. These conduits will determine the base flow 
        # for the their respective swmm segments.
        cur.execute("SELECT MAX(name) FROM segs WHERE type = 'CONDUIT' GROUP BY swmm")
        rep_conduits = [row[0] for row in cur.fetchall()] 

        print "reading .rpt"
        with open(rptpath, 'r') as f:
            # loop through the .rpt file. Each time a report series is encountered, check if the 
            # name is in the segs table, and if so insert the appropriate values into the flows and volumes tables
            line = f.readline()
            while line:
                while line and not re.match('<<<', line.strip()):
                    line = f.readline()

                if line:
                    _, kind, name, _ = line.strip().split() # assumes line is of the format '<<< Subcatchment CATCH01 >>>'
                    # check if there are any entries for this name in segs ...
                    cur.execute('SELECT wasp, swmm, DS_wasp, DS_swmm, type, name FROM segs WHERE name = ?', (name,))
                    elements = cur.fetchall()
                    if not elements or kind not in ('Link', 'Subcatchment'): 
                        # ... if this element isn't in segs, keep reading
                        line = f.readline()
                    else:
                        header_nline = 4
                        for i in range(header_nline): # eat header
                            _ = f.readline() 

                        if kind == 'Link':
                            fieldnames = ['date', 'time', 'flow', 'velocity', 'depth', 'percent'] 
                        else:
                            fieldnames = ['date', 'time', 'percip', 'losses', 'flow']

                        flows = []
                        velocities = []
                        depths = []

                        if filter_mins:
                            filter_steps = round((filter_mins * 60) / timestep_secs)
                        flow_window = []
                        velocity_window = []
                        depth_window = []

                        line = f.readline()
                        # loop through each row in the series and update the flows, velocities, and depths 
                        # lists, filtering if told to do so.
                        counter = 0

                        while line and not re.match('^$', line.strip()):
                            if counter < first_step:
                                counter += 1
                                line = f.readline()
                                continue
                            elif counter >  last_step:
                                break
                            else:
                                record = dict(zip(fieldnames, line.strip().split()))

                                flow = round(float(record['flow']) * pow(meters_in_foot, 3), 5)
                                depth = round(float(record.get('depth', 0)) * meters_in_foot, 5)
                                velocity = round(float(record.get('velocity', 0)) * meters_in_foot, 5)

                                if filter_mins:
                                    flow_window.append(flow)
                                    velocity_window.append(velocity)
                                    depth_window.append(depth)

                                    if len(flow_window) == filter_steps:
                                        flows.append(sum(flow_window) / filter_steps)
                                        del flow_window[0]
                                        velocities.append(sum(velocity_window) / filter_steps)
                                        del velocity_window[0]
                                        depths.append(sum(depth_window) / filter_steps)
                                        del depth_window[0]
                                else:
                                    flows.append(flow)
                                    velocities.append(velocity)
                                    depths.append(depth)

                            counter += 1
                            line = f.readline()
    
                        for element in elements:
                            wasp, swmm, DS_wasp, DS_swmm, eltype, name  = element
                            wasp_source = 0 if eltype == 'INFLOW' else wasp
                            swmm_source = 0 if eltype == 'INFLOW' else swmm

                            cur.executemany("""
                             INSERT INTO flows (timestep, wasp_source, swmm_source, name, wasp_sink, swmm_sink, flow) 
                             VALUES (?, ?, ?, ?, ?, ?, ?)""",
                             [(i, wasp_source, swmm_source, name, DS_wasp, DS_swmm, flow) for i, flow in enumerate(flows)])

                            if eltype in ('CONDUIT', 'DUMMY'):
                                if eltype == 'CONDUIT' and name in rep_conduits:
                                    cur.execute('SELECT dwf FROM conduit_params WHERE name = ?', (name,))
                                    dwf = cur.fetchone()[0]

                                    cur.executemany("""
                                        INSERT INTO flows (timestep, wasp_source, swmm_source, name, 
                                            wasp_sink, swmm_sink, flow) 
                                        VALUES (?, ?, ?, ?, ?, ?, ?)
                                        """, [(i, 0, 0, name + '_DWF', wasp, swmm, dwf) for i in xrange(len(flows))])

                                cur.execute('SELECT length FROM conduit_params WHERE name = ?', (name,))
                                length = cur.fetchone()[0]
                                init_vol = (flows[0] / velocities[0]) * length if velocities[0] else 0

                                cur.executemany("""
                                    INSERT INTO volumes (timestep, wasp, swmm, name, init_vol, flow, depth, velocity)
                                    VALUES (?, ?, ?, ?, ?, ?, ?, ?)
                                    """, [(i, wasp, swmm, name, init_vol, tup[0], tup[1], tup[2]) 
                                           for i, tup in enumerate(zip(flows, depths, velocities))])

        print 'done reading .rpt'
        cur.execute('CREATE INDEX idx_flows ON flows(timestep, name)')
        cur.execute('CREATE INDEX idx_volumes ON volumes(timestep)')
        conn.commit()

        if baseconditions_rpt: 
            cur.execute("""
                CREATE TABLE base_vols (
                    timestep int,
                    swmm int,
                    name varchar(64),
                    vol float,
                    flow float,
                    velocity float,
                    depth float
                )
            """)

            with open(baseconditions_rpt, 'r') as f:
                line = f.readline()
                while line:
                    while line and not re.match('<<<', line.strip()):
                        line = f.readline()

                    if line:
                        _, kind, name, _ = line.strip().split()
                        cur.execute("""
                            SELECT swmm, DS_swmm, name, type 
                            FROM segs 
                            WHERE type = ? AND name = ?
                        """, (name,))

                        elements = cur.fetchall()
                        if not elements or kind not in ('Link', 'Subcatchment'):
                            line = f.readline()
                        else:
                            header_nline = 4
                            for i in range(header_nline):
                                _ = f.readline()

                            if kind == 'Link':
                                fieldnames = ['date', 'time', 'flow', 'velocity', 'depth']
                            else:
                                fieldnames = ['date', 'time', 'precip', 'losses', 'flow']
                            
                            flows = []
                            velocities = []
                            depths = []

                            seconds_in_day = 24 * 60 * 60

                            if baseflow_smoothing_days:
                                filter_steps = round((baseflow_smoothing_days * seconds_in_day) / timestep_secs)

                            flow_window = []
                            velocity_window = []
                            depth_window = []

                            line = f.readline()
                            counter = 0
                            
                            while line and not re.match('^$', line.strip()):
                                if counter < first_step:
                                    counter += 1
                                    line = f.readline()
                                    continue
                                elif counter > last_step:
                                    break
                                else:
                                    record = dict(zip(fieldnames, line.strip().split()))

                                    flow = round(float(record['flow']) * pow(meters_in_foot, 3), 5)
                                    depth = round(float(record.get('depth', 0)) * meters_in_foot, 5)
                                    velocity = round(float(record.get('velocity', 0)) * meters_in_foot, 5)

                                    if baseflow_smoothing_days:
                                        flow_window.append(flow)
                                        velocity_window.append(velocity)
                                        depth_window.append(depth)

                                        if len(flow_window) == filter_steps:
                                            flows.append(sum(flow_window) / filter_steps)
                                            del flow_window[0]
                                            velocities.append(sum(velocity_window) / filter_steps)
                                            del velocity_window[0]
                                            depths.append(sum(depth_window) / filter_steps)
                                            del depth_window[0]
                                    else:
                                        flows.append(flow)
                                        velocities.append(velocity)
                                        depths.append(depth)

                                counter += 1
                                line = f.readline() 
                                
                            for element in elements:
                                swmm, DS_swmm, name, eltype = element
                                swmm_source = 0 if eltype == 'INFLOW' else swmm

                                cur.executemany("""
                                    INSERT INTO flows (timestep, swmm_source, name, swmm_sink, flow)
                                    VALUES (?, ?, ?, ?, ?, ?, ?)
                                """,
                                [(i, swmm_source, name, DS_swmm, flow) for i, flow in enumerate(flows)])

                                if eltype in ('CONDUIT', 'DUMMY'):
                                    cur.execute("SELECT length FROM conduit_params WHERE name = ?", (name,))
                                    length = cur.fetchone()[0]
                                    
                                    cur.executemany("""
                                       INSERT INTO base_vols (timestep, swmm, name, vol, flow, depth, velocity)
                                       VALUES (?, ?, ?, ?, ?, ?, ?)
                                    """, [(i,swmm,name,(tup[0] / tup[2])*length if tup[2] > 0 else 0,tup[0],tup[1],tup[2])
                                           for i, tup in enumerate(zip(flows, depths, velocities))])
                                    
                                    cur.executemany("""
                                        INSERT INTO base_flows (timestep, swmm_source, name, swmm_sink, flow)
                                        VALUES (?, ?, ?, ?, ?)""",
                                        [(i, swmm, name, DS_swmm, flow) for i, flow in enumerate(flows)])


            cur.execute("CREATE INDEX idx_base_vols ON base_vols(timestep, name)") 

            print("a")
            code.interact(local=locals())

            cur.execute("""
                CREATE TABLE base_vols_weighted_depth_vel (
                    timestep int,
                    swmm int,
                    name varchar(64),
                    flow float,
                    depth float,
                    velocity float
                )
            """)

            cur.execute("""
                INSERT INTO base_vols_weighted_depth_vel (timestep, swmm, name, depth, velocity, flow)
                    SELECT v.timestep, v.swmm, v.name, depth*(flow/total_flow), velocity*(flow/total_flow), flow
                    FROM (
                        SELECT SUM(flow) AS total_flow, timestep, swmm
                        FROM base_vols
                        GROUP BY timestep, swmm) AS total_flows
                    INNER JOIN base_vols v ON v.timestep = total_flows.timestep AND v.swmm = total_flows.swmm
            """)
            cur.execute("CREATE INDEX idx_base_vols_weighted ON base_vols_weighted_depth_vel(timestep, swmm)")

            print("b")
            code.interact(local=locals())

            cur.execute("""
                CREATE TABLE base_vols_weighted_depth_vel_sum  (
                    timestep int,
                    swmm int,
                    depth float,
                    velocity float,
                    flow float
                )
            """)

            cur.execute("""
                INSERT INTO base_vols_weighted_depth_veL_sum (timestep, swmm, depth, velocity, flow)
                    SELECT timestep, swmm, SUM(depth), SUM(velocity), SUM(flow) 
                    FROM base_vols_weighted_depth_vel
                    GROUP BY swmm, timestep
            """)
            cur.execute("CREATE INDEX idx_base_vols_weighted_sum ON base_vols_weighted_depth_vel_sum(timestep, swmm)")

            print("c")
            code.interact(local=locals())

            cur.execute("""
                CREATE TABLE base_vols_final (
                    timestep int,
                    swmm int,
                    depth float,
                    velocity float,
                    flow float,
                    delta_vol float,
                    vol float NULL
                )
            """)
            cur.execute("CREATE INDEX idx_base_vols_final ON base_vols_final(timestep, swmm)")

            cur.execute("""
                INSERT INTO base_vols_final (timestep, swmm, depth, velocity, flow, delta_vol)
                SELECT v.timestep, v.swmm, v.depth, v.velocity, v.flow, (inflows.flow_in - v.flow) * ?
                FROM (
                    SELECT SUM(flow) AS flow_in, timestep, swmm_sink
                    FROM base_flows 
                    GROUP BY timestep, swmm_sink) AS inflows
                INNER JOIN base_vols_weighted_depth_vel_sum v 
                    ON v.timestep = inflows.timestep AND v.swmm = inflows.swmm_sink
            """, (timestep_secs, ))

            print("d")
            code.interact(local=locals())

            cur.execute("SELECT DISTINCT timestep FROM base_vols_final")
            timesteps = [row[0] for row in cur.fetchall()]
            cur.execute("SELECT DISTINCT swmm FROM base_vols_final")
            swmm_segs = [row[0] for row in cur.fetchall()]
            for swmm in swmm_segs:
                for t in timesteps:
                    if t == 0:
                        cur.execute("SELECT flow, velocity FROM base_vols_final WHERE timestep = 0 AND swmm = ?", (swmm, ))
                        flow, velocity = cur.fetchone()

                        cur.execute("SELECT DISTINCT length FROM conduit_params2 WHERE swmm = ?", (swmm,))
                        length = cur.fetchall()
                        assert len(length) == 1
                        length = length[0][0]

                        init_vol = (flow / velocity) * length

                        cur.execute("""
                            UPDATE base_vols_final 
                            SET vol = ?
                            WHERE timestep = 0 AND swmm = ? 
                        """, (init_vol, swmm))
                    else:
                        cur.execute("SELECT vol FROM base_vols_final WHERE timestep = ? AND swmm = ?", (t-1, swmm))

                        prev_vol = cur.fetchall()
                        assert len(prev_vol) == 1
                        prev_vol = prev_vol[0][0]

                        cur.execute("SELECT delta_vol FROM base_vols_final WHERE timestep = ? AND swmm = ?", (t, swmm))
                        delta_vol = cur.fetchall()
                        assert len(delta_vol) == 1
                        delta_vol = delta_vol[0][0]

                        cur.execute("SELECT depth FROM base_vols_final WHERE timestep = ? AND swmm = ?", (t, swmm))
                        depth = cur.fetchall()
                        assert len(depth) == 1
                        depth = depth[0][0]

                        if depth == 0:
                            vol = 0
                        else:
                            vol = prev_vol + delta_vol

                        cur.execute("UPDATE base_vols_final SET vol = ? WHERE timestep = ? AND swmm = ?", (vol, t, swmm))
            
            print 'check baseconditions'
            code.interact(local=locals())

        print 5
        # check that all conduits in segs are represented in the volumes table
        cur.execute("SELECT DISTINCT name FROM segs WHERE type = 'CONDUIT'")
        segs_conduit_names = [row[0] for row in cur.fetchall()]
        cur.execute("SELECT DISTINCT name FROM volumes")
        volumes_names = [row[0] for row in cur.fetchall()]
        missing_conduits = [name for name in segs_conduit_names if name not in volumes_names]
        if missing_conduits:
            raise Exception('There are conduits in the segment map not found in the .rpt file: ' + \
                    ', '.join(missing_conduits))

        # check that all conduits and their DWF's are represented in the flows table
        cur.execute("SELECT DISTINCT name FROM segs WHERE name <> ''")
        segs_all_names = [row[0] for row in cur.fetchall()]
        cur.execute("SELECT DISTINCT name FROM segs WHERE type = 'CONDUIT'")
        dwf_names = [cond + '_DWF' for cond in rep_conduits]
        cur.execute("SELECT DISTINCT name FROM flows")
        flows_names = [row[0] for row in cur.fetchall()]
        if set(segs_all_names + dwf_names) != set(flows_names):
            if not missing_inflows_are_zero:
                print('a')
                code.interact(local=locals())
                raise Exception('There are elements missing from the flows table.')
            else:
                try:
                    cur.execute("SELECT DISTINCT name FROM segs WHERE type='INFLOW'")
                    inflow_names = [name[0] for name in cur.fetchall()]
                    missing_names = [name for name in inflow_names if name not in flows_names]

                    cur.execute("""
                        SELECT MIN(timestep), MAX(timestep) 
                        FROM flows
                    """)
                    min_tstep, max_tstep = cur.fetchone()
                    for name in missing_names:
                        cur.execute("""
                            SELECT wasp, DS_wasp, swmm, DS_swmm
                            FROM segs
                            WHERE name = ?
                        """, (name,))

                        wasp, DS_wasp, swmm, DS_swmm = cur.fetchone()

                        cur.executemany("""
                            INSERT INTO flows (timestep, wasp_source, wasp_sink, swmm_source, swmm_sink, name, flow) 
                            VALUES (?, ?, ?, ?, ?, ?, ?)
                        """, [(i, wasp, wasp, swmm, swmm, name, 0) for i in range(min_tstep, max_tstep+1)])
                except:
                    traceback.print_exc()
                    code.interact(local=locals())

        # create or clear the directory for external flow percent series
        ext_flows_pct_path = os.path.join(series_path, 'external_flow_percentages_by_WASP_segment')
        if not os.path.isdir(ext_flows_pct_path):
            os.mkdir(ext_flows_pct_path)
        else:
            for fname in os.listdir(ext_flows_pct_path):
                os.unlink(os.path.join(ext_flows_pct_path, fname))

        # create or clear the directory for external flow series
        ext_flows_path = os.path.join(series_path, 'external_flows_by_WASP_segment')
        if not os.path.isdir(ext_flows_path):
            os.mkdir(ext_flows_path)
        else:
            for fname in os.listdir(ext_flows_path):
                os.unlink(os.path.join(ext_flows_path, fname))

        # get a list of the unique wasp segment numbers. we will over the list to get the external 
        # flow output for each wasp segment
        cur.execute('SELECT DISTINCT wasp_sink FROM flows WHERE wasp_sink > 0')
        wasp_segs = [row[0] for row in cur.fetchall()]

        # output the external flows and external flow percentages by wasp seg num
        for segnum in wasp_segs:
            # get a list of inflow names for this wasp segment
            cur.execute('SELECT DISTINCT name FROM flows WHERE wasp_sink = ? AND wasp_source = 0', (segnum, ))
            inflow_names = [row[0] for row in cur.fetchall()]

            if inflow_names: # if the segment has no inflows, do nothing, otherwise...

                # this query returns a table with columns indicating timestep, the name of the inflow, 
                # its flow at each timestep,
                # the total flow going into the wasp segment at each timestep, and the inflow's percentage 
                # of that total flow at each timestep
                cur.execute("""
                    SELECT f.timestep, f.name, f.flow, tf.total_flow, 
                        CASE WHEN tf.total_flow > 0 THEN f.flow / tf.total_flow ELSE 0 END
                    FROM flows f INNER JOIN (
                        SELECT timestep, SUM(flow) AS total_flow
                        FROM flows 
                        WHERE wasp_sink = ? AND wasp_source = 0 
                        GROUP BY timestep) as tf
                    ON f.timestep = tf.timestep
                    WHERE f.wasp_sink = ? AND f.wasp_source = 0 
                    ORDER BY f.name, f.timestep
                """, (segnum, segnum))

                results = cur.fetchall()
                
                pct_columns = []
                flow_columns = []
                total_col = []

                names = ['timestep']

                for key, group in groupby(results, lambda row: row[1]):
                    names.append(key)
                    group = list(group)
                    group = sorted(group, key = lambda row: row[0])
                    pct_flow = [row[4] for row in group]
                    pct_columns.append(pct_flow)
                    flow = [row[2] for row in group]
                    total_col = [row[3] for row in group]
                    flow_columns.append(flow)

                pct_columns.insert(0, range(0, len(pct_columns[0])))
                flow_columns.insert(0, range(0, len(flow_columns[0])))
                pct_rows = zip(*pct_columns) # transform list of columns into a list of rows
                flow_rows = zip(*flow_columns)
                named_pct_rows = [dict(zip(names, row)) for row in pct_rows]
                named_flow_rows = [dict(zip(names, row)) for row in flow_rows]

                with open(os.path.join(ext_flows_pct_path, str(segnum) + '.csv'), 'w') as f:
                    writer = csv.DictWriter(f, fieldnames = names, lineterminator='\n')
                    writer.writeheader()
                    for row in named_pct_rows:
                        writer.writerow(row)

                with open(os.path.join(ext_flows_path, str(segnum) + '_flow.csv'), 'w') as f:
                    writer = csv.DictWriter(f, fieldnames=names, lineterminator='\n')
                    writer.writeheader()
                    for row in named_flow_rows:
                        writer.writerow(row)

        ext_swmm_flows_pct_path = os.path.join(series_path, 'external_flow_percentages_by_SWMM_segment')
        if not os.path.isdir(ext_swmm_flows_pct_path):
            os.mkdir(ext_swmm_flows_pct_path)
        else:
            for fname in os.listdir(ext_swmm_flows_pct_path):
                os.unlink(os.path.join(ext_swmm_flows_pct_path, fname))

        # create or clear the directory for external flow series
        ext_swmm_flows_path = os.path.join(series_path, 'external_flows_by_SWMM_segment')
        if not os.path.isdir(ext_swmm_flows_path):
            os.mkdir(ext_swmm_flows_path)
        else:
            for fname in os.listdir(ext_swmm_flows_path):
                os.unlink(os.path.join(ext_swmm_flows_path, fname))

        # get a list of the unique wasp segment numbers. we will over the list to get the external 
        # flow output for each wasp segment
        cur.execute('SELECT DISTINCT swmm_sink FROM flows WHERE swmm_sink > 0')
        swmm_segs = [row[0] for row in cur.fetchall()]

        # output the external flows and external flow percentages by wasp seg num
        for segnum in swmm_segs:
            # get a list of inflow names for this wasp segment
            cur.execute('SELECT DISTINCT name FROM flows WHERE swmm_sink = ? AND swmm_source = 0', (segnum, ))
            inflow_names = [row[0] for row in cur.fetchall()]

            if inflow_names: # if the segment has no inflows, do nothing, otherwise...

                cur.execute("""
                    SELECT f.timestep, f.name, f.flow, tf.total_flow, 
                        CASE WHEN tf.total_flow > 0 THEN f.flow / tf.total_flow ELSE 0 END
                    FROM flows f INNER JOIN (
                        SELECT timestep, SUM(flow) AS total_flow
                        FROM flows 
                        WHERE swmm_sink = ? AND swmm_source = 0 
                        GROUP BY timestep) as tf
                    ON f.timestep = tf.timestep
                    WHERE f.swmm_sink = ? AND f.swmm_source = 0 
                    ORDER BY f.name, f.timestep
                """, (segnum, segnum))

                results = cur.fetchall()
                
                pct_columns = []
                flow_columns = []
                total_col = []

                names = ['timestep']

                for key, group in groupby(results, lambda row: row[1]):
                    names.append(key)
                    group = list(group)
                    group = sorted(group, key = lambda row: row[0])
                    pct_flow = [row[4] for row in group]
                    pct_columns.append(pct_flow)
                    flow = [row[2] for row in group]
                    total_col = [row[3] for row in group]
                    flow_columns.append(flow)

                pct_columns.insert(0, range(0, len(pct_columns[0])))
                flow_columns.insert(0, range(0, len(flow_columns[0])))
                pct_rows = zip(*pct_columns) # transform list of columns into a list of rows
                flow_rows = zip(*flow_columns)
                named_pct_rows = [dict(zip(names, row)) for row in pct_rows]
                named_flow_rows = [dict(zip(names, row)) for row in flow_rows]

                with open(os.path.join(ext_swmm_flows_pct_path, str(segnum) + '.csv'), 'w') as f:
                    writer = csv.DictWriter(f, fieldnames = names, lineterminator='\n')
                    writer.writeheader()
                    for row in named_pct_rows:
                        writer.writerow(row)

                with open(os.path.join(ext_swmm_flows_path, str(segnum) + '_flow.csv'), 'w') as f:
                    writer = csv.DictWriter(f, fieldnames=names, lineterminator='\n')
                    writer.writeheader()
                    for row in named_flow_rows:
                        writer.writerow(row)

        # volumes2 is the same as volumes but with three additional columns, delta_vol, flow_in, and vol
        cur.execute("""
            CREATE TABLE volumes2 (
                timestep int, 
                wasp int, 
                swmm int, 
                name varchar(64), 
                flow float,
                depth float, 
                velocity float
            )""")

        print 'here3'

        # flow weight the depth and velocity in volumes by swwm segment
        cur.execute(""" 
            INSERT INTO volumes2 (timestep, wasp, swmm, name, depth, velocity, flow)
                SELECT v.timestep, v.wasp, v.swmm, v.name, depth * (flow / total_flow), velocity * (flow / total_flow), flow
                FROM (
                    SELECT SUM(flow) AS total_flow, timestep, swmm
                    FROM volumes
                    GROUP BY timestep, swmm) AS total_flows
                INNER JOIN volumes v ON v.timestep = total_flows.timestep AND v.swmm = total_flows.swmm
            """)
        cur.execute('CREATE INDEX idx_volumes2 ON volumes2(timestep, swmm)')

        cur.execute("""
            CREATE TABLE volumes2_a (
                timestep int,
                wasp int,
                swmm int,
                depth float,
                velocity float,
                flow float
            )
        """)

        print 'here2'
        # total the depth, velocity, and flow in volumes2 by swmmm segment 
        cur.execute("""
            INSERT INTO volumes2_a (timestep, wasp, swmm, depth, velocity, flow)
            SELECT timestep, wasp, swmm, SUM(depth), SUM(velocity), SUM(flow)
            FROM volumes2
            GROUP BY wasp, swmm, timestep
        """)
        cur.execute('CREATE INDEX idx_volumes2_a ON volumes2_a(timestep, swmm)')

        print 'here1'
        cur.execute("""
            CREATE TABLE volumes2_b (
                timestep int,
                swmm int,
                wasp int,
                depth float,
                velocity float,
                flow float,
                delta_vol float,
                vol float NULL
            )
        """)
        cur.execute('CREATE INDEX idx_volumes2_b ON volumes2_b(timestep, swmm)')

        cur.execute("""
            CREATE TABLE volumes3x (
                timestep int, 
                wasp int, 
                vol float, 
                wasp_depth float, 
                wasp_velocity float,
                flow float
            )""")
        cur.execute('CREATE INDEX idx_volumes3x ON volumes3x(timestep, wasp)')

        cur.execute("""
            CREATE TABLE volumes4x (
                timestep int, 
                wasp int, 
                vol float, 
                depth float, 
                velocity float
            )""")
        cur.execute('CREATE INDEX idx_volumes4x ON volumes4x(timestep, wasp)')

        def calc_swmm_vols():
            cur.execute("DELETE FROM volumes2_b")

            # calculate delta_vol by swmm segment
            cur.execute("""
                INSERT INTO volumes2_b (timestep, wasp, swmm, depth, velocity, flow, delta_vol)
                SELECT v.timestep, v.wasp, v.swmm, v.depth, v.velocity, v.flow, (inflows.flow_in - v.flow) * ?
                FROM (
                    SELECT SUM(flow) AS flow_in, timestep, swmm_sink
                    FROM flows
                    GROUP BY timestep, swmm_sink) AS inflows
                INNER JOIN volumes2_a v ON v.timestep = inflows.timestep AND v.swmm = inflows.swmm_sink
            """, (timestep_secs, ))

            #  the following loop cycles through each element and updates its respective part of the 
            # vol column in volumes2 with the calculated volume get a list of timesteps
            cur.execute('SELECT DISTINCT timestep FROM volumes2_b')
            timesteps = [row[0] for row in cur.fetchall()]
            cur.execute('SELECT DISTINCT swmm FROM volumes2_b')
            swmm_segs = [row[0] for row in cur.fetchall()]
            for swmm in swmm_segs:
                for t in timesteps:
                    if t == 0:
                        # for the first timestep, set the vol in volumes2 to the initial volume
                        cur.execute('SELECT flow, velocity FROM volumes2_b WHERE timestep = 0 AND swmm = ?', (swmm, ))
                        flow, velocity = cur.fetchone()

                        cur.execute("SELECT DISTINCT length FROM conduit_params2 WHERE swmm = ?", (swmm, ))
                        length = cur.fetchall()
                        assert len(length) == 1
                        length = length[0][0]

                        init_vol = (flow / velocity) * length

                        cur.execute("""
                            UPDATE volumes2_b
                            SET vol = ?
                            WHERE timestep = 0 AND swmm = ?
                        """, (init_vol, swmm))
                    else:
                        cur.execute('SELECT vol FROM volumes2_b WHERE timestep = ? AND swmm = ?', (t - 1, swmm))

                        prev_vol = cur.fetchall()
                        assert len(prev_vol) == 1
                        prev_vol = prev_vol[0][0]

                        cur.execute('SELECT delta_vol FROM volumes2_b WHERE timestep = ? AND swmm = ?', (t, swmm))
                        delta_vol = cur.fetchall()
                        assert len(delta_vol) == 1
                        delta_vol = delta_vol[0][0]

                        cur.execute('SELECT depth FROM volumes2_b WHERE timestep = ? AND swmm = ?', (t, swmm))
                        depth = cur.fetchall()
                        assert len(depth) == 1
                        depth = depth[0][0]

                        if depth == 0:
                            vol = 0
                        else:
                            vol = prev_vol + delta_vol

                        cur.execute('UPDATE volumes2_b SET vol = ? WHERE timestep = ? AND swmm = ?', (vol, t, swmm))

        calc_swmm_vols()

        if correct_vol:
            print('correcting')
            cur.execute("SELECT DISTINCT swmm FROM volumes2_b WHERE swmm IS NOT NULL")
            swmm_segs = [row[0] for row in cur.fetchall()]

            def export_test(*args):
                import os
                fpath = os.path.join(os.path.dirname(outpath), 'vol.csv')
                f = open(fpath, 'w')
                import csv
                writer = csv.writer(f, lineterminator='\n')
                writer.writerows(zip(*args))

            for swmm in swmm_segs:
                query_cols = ['vol', 'delta_vol']
                cur.execute("""
                    SELECT """ + ','.join(query_cols) + """
                    FROM volumes2_b
                    WHERE swmm = ? 
                    ORDER BY timestep
                """, (swmm, ))
                named_results = [dict(zip(query_cols, row)) for row in cur.fetchall()]
                abs_delta_vol = [abs(row['delta_vol']) for row in named_results]
                vol = [row['vol'] for row in named_results]

                correct_basevol = sum(vol[:window_size]) / window_size
                vol_range = max(vol) - correct_basevol

                segs_of_interest = [13]

                event_start_threshold = 0.3
                event_start_idx = 0

                for i, v in enumerate(vol):
                    if v > ((vol_range * event_start_threshold) + correct_basevol):
                        event_start_idx = i
                        break

                if swmm in segs_of_interest:
                    print('bacon')
                    print event_start_idx

                correction = [0 for i in range(len(abs_delta_vol))]

                i = 0
                prev_i = i
                prev_vol = vol
                last_correction = event_start_idx
                    
                prev_vols = []
                prev_corrections = []
                baseflow_points = []
                correction_points = []
                correction_ends = []
                correction_baseflows = []
                correction_baseflow_diffs = []
                correction_totals = []

                def is_basevol(vol, lowerbound, upperbound):
                    return (max(vol[lowerbound:upperbound]) - min(vol[lowerbound:upperbound])) < abs_basevol_signal

                def is_off_basevol(basevol, correct_basevol):
                    return abs(basevol - correct_basevol) > correction_threshold * vol_range

                def get_basevol(vol, lowerbound, upperbound):
                    return sum(vol[lowerbound:upperbound]) / (upperbound - lowerbound)

                def is_negative(vol, correct_basevol, lowerbound, upperbound):
                    vol_window = vol[lowerbound:upperbound]
                    return (sum(v < correct_basevol for v in vol_window) / len(vol_window)) > negative_threshold_pct

                basevol_starts = [] 

                max_window_index = len(abs_delta_vol) - window_size
                micro_window_size = int(window_size * micro_window_pct)
                correction_window_size = micro_window_size
                print('correcting ' + str(swmm))
                while i < max_window_index:
                    needs_correction = False
                    if i > event_start_idx:
                        upperbound = i + window_size
                        basevol = get_basevol(vol, i, upperbound)
                        correction_start = i
                        if is_negative(vol, correct_basevol, i, upperbound):
                            needs_correction = True
                            j = i + micro_window_size
                            also_basevol = is_basevol(vol, i, upperbound)
                            while j < max_window_index:
                                if not also_basevol and not is_negative(vol, correct_basevol, j, j + micro_window_size):
                                    k = j + micro_window_size
                                    while k > j and not is_negative(vol, correct, j, k):
                                        k = k - 1
                                    while k > j and not is_basevol(vol, j, k):
                                        k = k - 1
                                    correction_total = correct_basevol - get_basevol(vol, j, k)
                                    i += micro_window_size
                                    break
                                elif also_basevol and not is_basevol(vol, j, j + micro_window_size):
                                    k = j + micro_window_size
                                    while k > j and not is_basevol(vol, j, k):
                                        k = k - 1
                                    correction_total = correct_basevol - get_basevol(vol, j, k)
                                    i += micro_window_size
                                    break
                                else:
                                    also_basevol = is_basevol(vol, j, j + micro_window_size)
                                j += 1
                            if j >= max_window_index:
                                basevol = get_basevol(vol, j, len(vol))
                                correction_total = correct_basevol - basevol
                                i += micro_window_size
                        elif is_basevol(vol, i, upperbound) and is_off_basevol(basevol, correct_basevol):
                            j = upperbound
                            basevol_starts.append(i)
                            while j < max_window_index:
                                if not is_basevol(vol, j, j + micro_window_size):
                                    k = j + micro_window_size
                                    while k > j and not is_basevol(vol, j, k):
                                        k = k - 1
                                    basevol = get_basevol(vol, j, k)
                                    needs_correction = is_off_basevol(basevol, correct_basevol)
                                    correction_total = correct_basevol - basevol
                                    i += micro_window_size
                                    break
                                j += 1

                            if j >= max_window_index:
                                basevol = get_basevol(vol, j, len(vol))
                                needs_correction = is_off_basevol(basevol, correct_basevol)
                                correction_total = correct_basevol - basevol
                                i += micro_window_size

                    if needs_correction:
                        avg_correction = correction_total / correction_window_size
                        correction_end = correction_start + correction_window_size
                        correction_ends.append(correction_end)
                        correction_spread = [avg_correction for j in range(correction_window_size)]
                        existing_correction = correction[correction_start:correction_end]
                        correction_vals = [sum(tup) for tup in zip(correction_spread, existing_correction)]
                        correction[correction_start:correction_end] = correction_vals 
                        prev_vols.append(copy(vol))
                        vol[correction_start:] = [v + correction_total for v in vol[correction_start:]]
                    else:
                        i += 1

                final_window_start = int(len(vol) - .5 * window_size)
                final_window_end = len(vol) - 1
                final_basevol = get_basevol(vol, final_window_start, final_window_end)
                if is_off_basevol(final_basevol, correct_basevol):
                    correction_start = final_window_start
                    correction_total = correct_basevol - final_basevol
                    avg_correction = correction_total / correction_window_size
                    correction_spread = [avg_correction for j in range(correction_window_size)]
                    correction_end = correction_start + correction_window_size
                    existing_correction = correction[correction_start:correction_end]
                    correction_vals = [sum(tup) for tup in zip(correction_spread, existing_correction)]
                    correction[correction_start:correction_end] = correction_vals

                if swmm in segs_of_interest:
                    prev_vols.append(vol)
                    print 'done with seg'
                    #code.interact(local=locals())

                cur.execute("""
                    SELECT DISTINCT wasp
                    FROM segs 
                    WHERE swmm = ?
                """, (swmm, ))
                wasp = cur.fetchone()[0]

                balance_bucket = [c/timestep_secs for c in correction]
                update_tups = []
                update_tups = [[i,0,swmm,0,wasp,str(swmm)+'balbucket',qin] for i, qin in enumerate(balance_bucket)]
                cur.executemany("""
                    INSERT INTO flows (timestep, swmm_source, swmm_sink, wasp_source, wasp_sink, name, flow) 
                    VALUES (?, ?, ?, ?, ?, ?, ?)
                """, update_tups)

            calc_swmm_vols()

        # volumes3 is similar to volumes2, but now depth are replaced with new columns swmm_depth and swmm_velocity. 
        # These columns will hold the volume weighted depths and velocity by swmm segment. additionally, this 
        # table also has a swmm_vol col that will hold the total volume for each swmm segment
        # populate volumes3. swmm_vol is the sum of the volumes for each parallel conduit in the swmm segment. 
        # swmm_depth and swmm_velocity are the volume weighted depth and velocity for each parallel in the swmm segment
        cur.execute("""
            INSERT INTO volumes3x  (timestep, wasp, vol, wasp_depth, wasp_velocity, flow)
            SELECT v2.timestep, v2.wasp, v2.vol, (v2.flow / wasp_flows.wasp_flow) * v2.depth, 
                (v2.flow / wasp_flows.wasp_flow) * v2.velocity, v2.flow
            FROM (SELECT SUM(flow) AS wasp_flow, timestep, wasp
                FROM volumes2_b
                GROUP BY wasp, timestep
            ) AS wasp_flows
            INNER JOIN volumes2_b v2 ON v2.timestep = wasp_flows.timestep AND v2.wasp = wasp_flows.wasp
        """)

        cur.execute("""
            INSERT INTO volumes4x (timestep, wasp, vol, depth, velocity)
            SELECT timestep, wasp, SUM(vol), SUM(wasp_depth), SUM(wasp_velocity)
            FROM volumes3x
            GROUP BY wasp,timestep
        """)

        conn.commit()

        # TODO this is junk
        #cur.execute("SELECT DISTINCT wasp FROM volumes4x WHERE wasp > 0")
        #wasp_sinks = cur.fetchall()
        #cur.execute("SELECT MAX(timestep) FROM final")
        #max_ts = cur.fetchone()

        #for wasp in wasp_sinks:
        #    cur.execute("SELECT volume FROM final WHERE wasp = ? AND timestep >= 15", (wasp, ))
        #    lower_vols = [row[1] for row in cur.fetchall()]
        #    max_freq = max(map(lower_vols.count, lower_vols))
        #    lower_mode = set(vol for vol in lower_vols if lower_vols.count(vol) == max_freq)


        #    cur.execute("SELECT volume FROM final WHERE wasp = ? AND timestep <= ? - 15", (wasp, max_ts))
        #    upper_vols = [row[1] for row in cur.fetchall()]
        #    max_freq = max(map(upper_vols.count, upper_vols))
        #    upper_mode = set(vol for vol in upper_vols if upper_vols.count(vol) == max_freq)


#        # volumes 4 aggregates the volume weighted depths and velocities from volumes3 by swmm segment.
#        cur.execute("CREATE TABLE volumes4 (timestep int, wasp int, swmm int, vol float, depth float, velocity float)")
#        cur.execute("""
#            INSERT INTO volumes4 (timwaspp, wasp, swmm, vol, depth, velocity)
#            SELECT timestep, wasp, swmm, vol, depth, velocity
#            FROM (SELECT timestep, wasp, swmm, SUM(vol) AS vol, SUM(swmm_depth) AS depth, SUM(swmm_velocity) AS velocity
#                FROM volumes3
#                GROUP BY wasp, swmm, timestep)
#            """)
#        cur.execute("CREATE INDEX idx_volumes4 ON volumes4(timestep, wasp)")
#
#   waspvolumes5 weights the swmm depths and velocities by their respective portions of their respective total wasp volumes
#        cur.execute("CREATE TABLE volumes5 (timestep int, wasp int, vol float, wasp_depth float, wasp_velocity float)")
#        cur.execute("""
#            INSERT INTO volumes5 (timestep, wasp, vol, wasp_depth, wasp_velocity)
#            SELECT v4.timestep, v4.wasp, v4.vol, (v4.vol / wasp_vols.wasp_vol) * v4.depth, 
#                (v4.vol / wasp_vols.wasp_vol) * v4.velocity
#            FROM (SElECT timestep, wasp, SUM(vol) AS wasp_vol
#                FROM volumes4
#                GROUP BY wasp, timestep
#            ) AS wasp_vols
#            INNER JOIN volumes4 v4 ON v4.timestep = wasp_vols.timestep AND v4.wasp = wasp_vols.wasp
#            """)
#
#        # volumes6 just sums up the depths, velocities, and volumes from the previous table
#        cur.execute("CREATE TABLE volumes6 (timestep int, wasp int, vol float, depth float, velocity float)")
#        cur.execute("""
#            INSERT INTO volumes6 (timestep, wasp, vol, depth, velocity)
#                SELECT timestep, wasp, SUM(vol) AS vol, SUM(wasp_depth) AS depth, SUM(wasp_velocity) AS velocity
#                FROM volumes5
#                GROUP BY wasp, timestep
#            """)

        # flows2 splits the flow data into two categories, aggregates them each separately, then unions them back together.
        # the first group are the external flows (wasp_source = 0). These flows are sumed by their destination (wasp_sink) 
        # and their timesteps. The second group are the internal flows (wasp_source <> 0). This group is filtered 
        # so that the swmm segments remaining are those that terminate a wasp segment (by the condition that their 
        # swmm segment number must be the maximum swmm segment number for a wasp segment). 
        # After filtering the set, the remaining segment flows are sumed by their wasp_source, wasp_sink, and timestep.

    try:
        finalize(dbpath, outpath, timestep_secs)
    except:
        print(dbpath)
        print(outpath)
        print(timestep_secs)
        traceback.print_exc()
        raise


def finalize(dbpath, outpath, timestep_secs):

    series_path = os.path.join(os.path.dirname(outpath), 'segment_series_out')
    if not os.path.isdir(series_path):
        os.mkdir(series_path)

    print 3
    with closing(sqlite3.connect(dbpath)) as conn:
        conn.isolation_level = None
        cur = conn.cursor()
        tune_db(cur)

        cur.execute("CREATE TABLE flows2 (timestep int, wasp_source int, wasp_sink int, flow float)")
        cur.execute("""
            INSERT INTO flows2 (timestep, wasp_source, wasp_sink, flow)
                SELECT * 
                FROM (SELECT timestep, wasp_source, wasp_sink, SUM(flow) AS flow
                      FROM flows
                      WHERE wasp_source = 0
                      GROUP BY wasp_sink, timestep
                    UNION SELECT timestep, wasp_source, wasp_sink, SUM(flow)
                        FROM flows
                        WHERE wasp_source <> 0 AND swmm_source IN (SELECT MAX(swmm) FROM segs GROUP BY wasp)
                        GROUP BY wasp_source, wasp_sink, timestep
                    )
             """)

        # column names for the final table. The datatype indicates what kind of data the row is, 
        # volume/depth/velocity data (1) or flow data (2) ordering1 and 2 contain the values that will 
        # determine how the rows are finally ordered to produce the order found in .hyd files.
        final_colnames = ['datatype', 'timestep', 'wasp_source', 'wasp_sink', 'flow', 'vol', 'depth', 
            'velocity', 'ordering1', 'ordering2']
        final_datatypes = ['int', 'int', 'int', 'int', 'float', 'float', 'float', 'float', 'int', 'int', 'int']

        # put them into a comma separated list that can be used in a query string
        final_cols = ', '.join([colname + ' ' + datatype for colname, datatype in zip(final_colnames, final_datatypes)])
        final_colnames_str = ', '.join(final_colnames)
        
        # create the final tables
        cur.execute("CREATE TABLE flows_final (" + final_cols + ")")
        cur.execute("CREATE TABLE volumes_final (" + final_cols + ")")
        cur.execute("CREATE TABLE final (" + final_cols + ")")

        # populate the flows_final table. note that the columns vol, depth, and velocity 
        # are NULL. These are included so that this table can be unioned with the volumes_final table shortly. 
        # flow data is ordered first by its wasp_source number, unless it is zero, in which case its wasp_sink number 
        # is used, and second by its wasp_source, regardless of whether its zero or not. This forces the order that 
        # each segment's outflow interface will follow the external flow interface for that segment.
        cur.execute("""
            INSERT INTO flows_final (""" + final_colnames_str + """)
                SELECT 2, timestep, wasp_source, wasp_sink, flow, NULL AS vol, NULL AS depth, NULL AS velocity, 
                CASE WHEN wasp_source = 0 THEN wasp_sink ELSE wasp_source END AS ordering1, wasp_source AS ordering2
                FROM flows2
            """)

        # the following for loops output the consolidated external flow series and the internal flow series.
        # the code between this comment the for loop retrieves the list of wasp seg nums to loop over and 
        # creates/clears the required directories
        consol_ext_flows_outpath = os.path.join(series_path, 'consolidated_external_flows_by_WASP_segment')
        if not os.path.isdir(consol_ext_flows_outpath):
            os.mkdir(consol_ext_flows_outpath)
        else:
            for fname in os.listdir(consol_ext_flows_outpath):
                os.unlink(os.path.join(consol_ext_flows_outpath, fname)) # delete the files in consol_ext_flows_outpath

        print 2
        cur.execute('SELECT DISTINCT wasp_sink FROM flows_final WHERE wasp_sink <> 0')
        wasp_sinks = [row[0] for row in cur.fetchall()]
        for sink in wasp_sinks:
            cur.execute('SELECT timestep, flow FROM flows_final WHERE wasp_sink = ? AND wasp_source = 0 ORDER BY timestep ', 
                (sink,))
            external_flows = cur.fetchall()

            with open(os.path.join(consol_ext_flows_outpath, str(sink) + '.csv'), 'w') as f:
                fieldnames = ['timestep', 'consolidated_external_flows']
                writer = csv.DictWriter(f, fieldnames = fieldnames, lineterminator='\n')
                writer.writeheader()
                for row in external_flows:
                    row = dict(zip(fieldnames, row))
                    writer.writerow(row)

        internal_flows_outpath = os.path.join(series_path, 'internal_flows_by_WASP_segment')
        if not os.path.isdir(internal_flows_outpath):
            os.mkdir(internal_flows_outpath)
        else:
            for fname in os.listdir(internal_flows_outpath):
                os.unlink(os.path.join(internal_flows_outpath, fname))

        cur.execute('SELECT DISTINCT wasp_source FROM flows_final WHERE wasp_source <> 0')
        wasp_sources = [row[0] for row in cur.fetchall()]
        for source in wasp_sources:
            cur.execute("""
                SELECT timestep, flow, wasp_sink
                FROM flows_final  
                WHERE wasp_source = ?
                ORDER BY wasp_source, timestep
                """, (source, ))

            internal_flows = cur.fetchall()

            with open(os.path.join(internal_flows_outpath, str(source) + '.csv'), 'w') as f:
                fieldnames = ['timestep', 'flow', 'sink']
                writer = csv.DictWriter(f, fieldnames = fieldnames, lineterminator='\n')
                writer.writeheader()
                for row in internal_flows:
                    row = dict(zip(fieldnames, row))
                    writer.writerow(row)

        # populate the volumes_final table. note that the wasp_source and flow columns are null, 
        # as these columns only apply to the flow data.
        cur.execute("""
            INSERT INTO volumes_final (""" + final_colnames_str + """)
                SELECT 1, timestep, NULL AS wasp_source, wasp AS wasp_sink, NULL AS flow, vol, depth, velocity, 
                       wasp AS ordering1, wasp AS ordering2
                FROM volumes4x
            """)

        # TODO : remove this code, called from an external function 'export_volumes'
#        # the following for loop will cycle through each wasp seg and output its volume time series
#        wasp_segs = cur.execute('SELECT DISTINCT wasp_sink FROM volumes_final')
#        wasp_segs = [row[0] for row in wasp_segs]
#        volumes_outpath = os.path.join(series_path, 'volumes_by_WASP_segnum')
#        if not os.path.isdir(volumes_outpath):
#            os.mkdir(volumes_outpath)
#        else:
#            for fname in os.listdir(volumes_outpath):
#                os.unlink(os.path.join(volumes_outpath, fname))
#        for seg in wasp_segs:
#            volumes = cur.execute("""
#                SELECT timestep, vol, depth, velocity 
#                FROM volumes_final 
#                WHERE wasp_sink = ? ORDER BY timestep
#            """, (seg,))
#
#            with open(os.path.join(volumes_outpath, str(seg) + '.csv'), 'w') as f:
#                fieldnames = ['timestep', 'volume', 'depth', 'velocity']
#                writer = csv.DictWriter(f, fieldnames = fieldnames, lineterminator='\n')
#                writer.writeheader()
#                for row in volumes:
#                    row = dict(zip(fieldnames, row))
#                    writer.writerow(row)

        # combine the two final tables into one
        cur.execute("""
            INSERT INTO final (""" + final_colnames_str + """)
                SELECT  *
                FROM (
                    SELECT * FROM flows_final
                    UNION
                    SELECT * FROM volumes_final
                )
            """)
        cur.execute("CREATE INDEX idx_final ON final(timestep, datatype, ordering1, ordering2)")

        try:
            export_data(outpath, dbpath, timestep_secs, final_colnames)
        except:
            print(outpath)
            print(dbpath)
            print(timestep_secs)
            traceback.print_exc()
            code.interact(local=locals())

        try:
            export_volumes(dbpath, os.path.dirname(outpath), by_wasp_seg=True)
        except:
            traceback.print_exc()
            code.interact(local=locals())

        try:
            export_volumes(dbpath, os.path.dirname(outpath), by_wasp_seg=False)
        except:
            traceback.print_exc()
            code.interact(local=locals())

        try:
            export_internal_flows(dbpath, os.path.dirname(outpath), by_wasp_seg=True)
        except:
            traceback.print_exc()
            code.interact(local=locals())

        try:
            export_internal_flows(dbpath, os.path.dirname(outpath), by_wasp_seg=False)
        except:
            traceback.print_exc()
            code.interact(local=locals())

def export_data(outpath, dbpath, timestep_secs, final_colnames):

    with closing(sqlite3.connect(dbpath)) as cnxn:
        cnxn.isolation_level = None
        cur = cnxn.cursor()
        tune_db(cur)

        # open the hyd file outpath
        with open(outpath, 'w') as f:
            # the following lines get the numbers for the top of the hyd file.
            # count the number of volume entries for the first timestep to get the number of segs
            cur.execute('SELECT COUNT(*) FROM final WHERE timestep = 0 AND datatype = 1')
            num_segs = cur.fetchone()[0]
            # count the number of flow entires for the first timestep to get the total number of interfaces
            cur.execute('SELECT COUNT(*) FROM final WHERE timestep = 0 AND datatype = 2')
            num_interfaces = cur.fetchone()[0]

            # retrieve the maximum timestep and calculate the duration
            cur.execute('SELECT MAX(timestep) FROM final')
            duration = timestep_secs * cur.fetchone()[0]

            # create the top line for the hyd file
            f.write('\t'.join([str(num_segs), str(num_interfaces), str(timestep_secs), '0', str(duration), '1']) + '\n')

            # select the flow rows for the first timestep and extract the interface numbers then write them to file
            cur.execute("""
                SELECT wasp_source, wasp_sink 
                FROM final 
                WHERE timestep = 0 AND datatype = 2 
                ORDER BY ordering1, ordering2""")
            for row in cur.fetchall():
                f.write(str(row[0]) + '\t' + str(row[1]) + '\n')

            # select all the data from the final type in the appropriate order ...
            cur.execute('SELECT * FROM final ORDER BY timestep, datatype, ordering1, ordering2')

            # then write it to file ...
            for row in cur.fetchall():
                row = dict(zip(final_colnames, row))
                if row['datatype'] == 1:
                    row['vol'] = round(row['vol'], 5)
                    row['velocity'] = round(row['velocity'], 5)
                    row['depth'] = round(row['depth'], 5)
                    for key in row.keys():
                        row[key] = str(row[key])
                    f.write('\t'.join(['', row['vol'], '0', row['depth'], row['velocity']]) + '\n')
                else:
                    row['flow'] = round(row['flow'], 5)
                    for key in row.keys():
                        row[key] = str(row[key])
                    f.write('\t'.join(['', row['flow']]) + '\n')

def export_internal_flows(dbpath, outdir, append_to_folder_name='', by_wasp_seg=True):

    with closing(sqlite3.connect(dbpath)) as cnxn:
        cur = cnxn.cursor()
        tune_db(cur)

        folder_name = 'internal_flows_by_' + ('WASP' if by_wasp_seg else 'SWMM') + append_to_folder_name
        outpath = os.path.join(outdir, folder_name)
        if not os.path.isdir(outpath):
            os.mkdir(outpath)
        else:
            for fname in os.listdir(outpath):
                os.unlink(os.path.join(outpath, fname))

        fieldnames = ['timestep', 'flow', 'depth', 'velocity']
        if by_wasp_seg:
            cur.execute('SELECT DISTINCT wasp_source FROM flows_final WHERE wasp_source <> 0')
            query = """
                SELECT """ + ','.join(fieldnames) + """
                FROM flows_final  
                WHERE wasp_source = ?
                ORDER BY wasp_source, timestep
            """
            print 'wasp internal'
        else:
            cur.execute('SELECT DISTINCT swmm FROM volumes2_b')
            query = """ 
                SELECT """ + ','.join(fieldnames) + """ 
                FROM volumes2_b
                WHERE swmm = ?
                ORDER BY timestep 
            """
            print 'swmm internal'

        segs = [row[0] for row in cur.fetchall()]
        for seg in segs:
            with open(os.path.join(outpath, str(seg) + '.csv'), 'w') as f:
                writer = csv.DictWriter(f, fieldnames=fieldnames, lineterminator='\n')
                writer.writeheader()
                cur.execute(query, (seg, ))
                for row in cur.fetchall():
                    row = dict(zip(fieldnames, row))
                    writer.writerow(row) 

def export_volumes(dbpath, outdir, append_to_folder_name = '', by_wasp_seg=True):
    # the following for loop will cycle through each wasp seg and output its volume time series

    with closing(sqlite3.connect(dbpath)) as cnxn:
        cur = cnxn.cursor()
        tune_db(cur)

        folder_name = 'volumes_by_' + ('WASP' if by_wasp_seg else 'SWMM') + append_to_folder_name
        outpath = os.path.join(outdir, folder_name)
        if not os.path.isdir(outpath):
            os.mkdir(outpath)
        else:
            for fname in os.listdir(outpath):
                os.unlink(os.path.join(outpath, fname))

        if by_wasp_seg:
            fieldnames = ['timestep', 'vol', 'depth', 'velocity']
            segs = cur.execute('SELECT DISTINCT wasp_sink FROM final WHERE wasp_sink > 0')
            query = """
                SELECT """ + ','.join(fieldnames) + """
                FROM final 
                WHERE wasp_sink = ? AND datatype = 1
                ORDER BY timestep
            """
            print 'wasp vol'
        else:
            fieldnames = ['timestep', 'vol', 'depth', 'velocity']
            segs = cur.execute("SELECT DISTINCT swmm FROM volumes2_b")
            query = """
                SELECT """ + ','.join(fieldnames) + """
                FROM volumes2_b
                WHERE swmm = ? 
                ORDER BY timestep
            """
            print 'swmm vol'

        segs = [row[0] for row in segs]
        for seg in segs:
            with open(os.path.join(outpath, str(seg) + '.csv'), 'w') as f:
                writer = csv.DictWriter(f, fieldnames = fieldnames, lineterminator='\n')
                writer.writeheader()
                cur.execute(query, (seg, ))
                for row in cur.fetchall():
                    row = dict(zip(fieldnames, row))
                    writer.writerow(row)

def corrector(vol, final_mean, init_mean, final_inflection_threshold=0.04):
    for i, v in enumerate(list(reversed(vol))):
        if v - final_mean > final_inflection_threshold * (max(vol) - final_mean):
            inflection_idx = len(vol) - i + 1
            break
    
    scale_block = vol[inflection_idx:]
    scale_factor = init_mean / final_mean

    scaled = [v * scale_factor for v in scale_block]
    corrected_vol = vol[:inflection_idx] + scaled

    return corrected_vol

def correct2(dbpath, final_init_threshold=0.01):
    with closing(sqlite3.connect(dbpath)) as cnxn:
        cursor = cnxn.cursor()
        cursor.execute("SELECT DISTINCT wasp_sink FROM final WHERE wasp_sink IS NOT NULL AND datatype = 1")
        wasp_segs = [row[0] for row in cursor.fetchall()]

        for wasp in wasp_segs:
            final_cols = ['timestep', 'vol']

            cursor.execute("""
                SELECT """ + ','.join(final_cols) + """
                FROM final 
                WHERE datatype = 1 AND wasp_sink = ?
                ORDER BY timestep
            """, (wasp, ))

            final = [dict(zip(final_cols, row)) for row in cursor.fetchall()]
            vol = [row['vol'] for row in final]

            sample_window = int(floor(len(vol) * .1))
            init_mean = sum(v for v in vol[:sample_window]) / sample_window
            final_mean = sum(v for v in vol[-sample_window:]) / sample_window

            print wasp
            print 'abs', abs(final_mean - init_mean)
            print 'final', final_init_threshold * (max(vol) - min(vol))

            if abs(final_mean - init_mean) > final_init_threshold * (max(vol) - min(vol)):
                if final_mean > init_mean:
                    corrected_vol = corrector(vol, final_mean, init_mean)
                else:
                    #original_range = max(vol) - init_mean
                    #corrected_vol = corrector(list(reversed(vol)), final_mean=init_mean, init_mean=final_mean)
                    #corrected_vol = list(reversed(corrected_vol))
                    #changed_range = max(corrected_vol) - final_mean
                    #scale_factor = original_range / changed_range
                    #corrected_vol = [v * scale_factor for v in corrected_vol]
                    #new_init_mean = final_mean * scale_factor
                    #corrected_vol = [v + (init_mean - new_init_mean) for v in corrected_vol]

                    final_crossing_threshold = 0.5
                    last_idx_above_init = 0
                    for i, v in enumerate(vol):
                        if v < init_mean - (init_mean - final_mean) * final_crossing_threshold:
                            break
                        elif v >= init_mean:
                            last_idx_above_init = i

                    shift_side = vol[(last_idx_above_init+1):]

                    for i, v in enumerate(list(reversed(vol[:last_idx_above_init]))):
                        if v - init_mean > (init_mean - final_mean):
                            length_shift_overlap = i
                            break

                    tck = interpolate.splrep(range(length_shift_overlap + 1), shift_side[:(length_shift_overlap + 1)], s=0)
                    newx = np.arange(0, length_shift_overlap + .5, .5)

                    try:
                        shift_side = list(interpolate.splev(newx, tck, der=0)) + shift_side[length_shift_overlap:]
                    except:
                        print 1
                        traceback.print_exc()
                        code.interact(local=locals())
                        
                    shift_side = [v + (init_mean - final_mean) for v in shift_side]
                    corrected_vol = vol[:(last_idx_above_init - length_shift_overlap)] + shift_side

                cursor.executemany("""
                    UPDATE final
                    SET vol = ?
                    WHERE datatype = 1 AND wasp_sink = ? AND timestep = ?
                """, [(v, wasp, i) for i, v in enumerate(corrected_vol)])

                cnxn.commit()

def correct(dbpath):
    cnxn = sqlite3.connect(dbpath)
    cursor = cnxn.cursor()
    cursor.execute("SELECT DISTINCT wasp_sink FROM final WHERE wasp_sink IS NOT NULL AND datatype = 1")
    wasp_segs = [row[0] for row in cursor.fetchall()]

    for wasp in wasp_segs:
        final_cols = ['timestep', 'vol']
        cursor.execute("""
            SELECT """  + ','.join(final_cols) + """ 
            FROM final
            WHERE datatype = 1 AND wasp_sink = ?
            ORDER BY timestep
        """, (wasp,))

        final = [dict(zip(final_cols, row)) for row in cursor.fetchall()]
        vol = [row['vol'] for row in final]

        sample_window = int(floor(len(vol) * .1))
        init_mean = sum(v for v in vol[:sample_window]) / sample_window
        final_mean = sum(v for v in vol[-sample_window:]) / sample_window

        def fix(vol, init_mean, final_mean):
            max_vol = max(vol)
            current_range = max_vol - final_mean
            target_range = max_vol - init_mean
            scale_factor = target_range / current_range
            shift_factor = final_mean - init_mean

            cut_off = final_mean - (final_mean - init_mean) * .5
            stationary_vol = [v for v in vol if v < cut_off]
            scaled_vol = [v * scale_factor + shift_factor for v in vol if v >= cut_off]

            if wasp == 3:
                print 'bacon'
                code.interact(local=locals())

            stationary_inflect = []
            for v in reversed(stationary_vol):
                if v <= init_mean:
                    break
                else:
                    stationary_inflect.append(v)

            if wasp == 3:
                print 'bits'
                code.interact(local=locals())

            stationary_inflect = list(reversed(stationary_inflect))
            front = stationary_vol[:-len(stationary_inflect)]
            corrected_inflect = [(v1 + v2) / 2 for v1, v2 in zip(stationary_inflect, scaled_vol[:len(stationary_inflect)])]

            if corrected_inflect:
                tck = interpolate.splrep(range(len(corrected_inflect)), corrected_inflect, s=0)
                newx = [x + '.5' for x in range(len(corrected_inflect))]
                newx = [list(x_pair) for x_pair in zip(range(len(corrected_inflect)), newx)]
                corrected_inflect_interp = interpolate.splev(range(0, len(corrected_inflect) + .5, .5), tck, der=0)
            else:
                corrected_inflect_interp = []

            if wasp == 3:
                print 'kibble'
                code.interact(local=locals())

            return front + corrected_inflect_interp + scaled_vol[len(corrected_inflect):]

        corrected_vol = fix(vol, init_mean, final_mean) if final_mean-init_mean > 0 else fix(vol, init_mean, final_mean)

        try:
            cursor.executemany("""
                UPDATE final 
                SET vol = ?
                WHERE datatype = 1 AND wasp_sink = ? AND timestep = ?
            """, [ (v, wasp, i) for i, v in enumerate(corrected_vol)])
        except:
            traceback.print_exc()
            code.interact(local=locals())
        
    print 'done'

def test(skip_correct=True):
    dbfolder = r'C:\Data\MatthewPlourde\hydmaker\hydmaker_no_svol\22seg\TTF_WQModel_Nobridges_May7_03_A2\output' 
    dbpath = os.path.join(dbfolder, 'TTF_WQModel_Nobridges_May7_03_old_test.db')
    if not skip_correct:
        correct(dbpath)

    export_volumes(dbpath, dbfolder, 'corrected')

def test2(skip_correct=True):
    #dbfolder = r'C:\Data\MatthewPlourde\hydmaker\hydmaker_no_svol\22seg\TTF_WQModel_Nobridges_May7_03_A2\output' 
    #dbpath = os.path.join(dbfolder, 'TTF_WQModel_Nobridges_May7_03_old_test.db')

    dbfolder = r'C:\Data\MatthewPlourde\hydmaker\hydmaker_no_svol\22seg\TTF_WQModel_Nobridges_May7_03_A2\output_no_svol_notestsegmap'  
    dbpath = os.path.join(dbfolder, 'TTF_WQModel_Nobridges_May7_03_test.db')
    if not skip_correct:
        correct2(dbpath)

    export_volumes(dbpath, dbfolder, 'corrected')

def run(rpt_path=None, inp_path='', segmappaths=None, output_dir='', outfilename='', filter_data=None, filter_mins=None):
    key_err_msg = "Keyboard error. Try again, fat fingers.\n"
    if not rpt_path:
        while True:
            try:
                rpt_path = raw_input('.rpt file path >> ')
            except KeyboardInterrupt:
                print(key_err_msg)
            if os.path.exists(rpt_path) and not os.path.isdir(rpt_path) and re.search('\\.rpt$', rpt_path):
                break

    if not inp_path:
        while True:
            try:
                inp_path = raw_input('.inp file name >> ')
            except KeyboardInterrupt:
                print(key_err_msg)
            if os.path.exists(inp_path) and not os.path.isdir(inp_path) and re.search('\\.inp$', inp_path):
                break

    segmappaths = [segmappaths] if isinstance(segmappaths, str) else []
    if not segmappaths:
        while True:
            if segmappaths:
                print('\n')
                print('Segmentation file paths:')
                for i, path in enumerate(segmappaths):
                    print(str(i + 1) + '. ' + path)
            try:
                path = raw_input('Add a path to a segmentation file (press enter to continue) >> ')
            except KeyboardInterrupt:
                print(key_err_msg)

            if os.path.exists(path) and not os.path.isdir(path):
                segmappaths.append(path)
            else:
                if len(path) == 0 and len(segmappaths) > 0:
                    break
                print('No such file.')
    
    if not output_dir:
        while True:
            try:
                output_dir = raw_input('Output directory path >> ') 
            except KeyboardInterrupt:
                print(key_err_msg)

            if os.path.isdir(output_dir):
                break

    if not outfilename:
        while True:
            try:
                out_name = raw_input('hyd text output file name >> ')
            except KeyboardInterrupt:
                print(key_err_msg)

            if out_name:
                out_path = os.path.join(output_dir, out_name)

                if os.path.exists(out_path):
                    try:
                        response = raw_input('A file with that name already exists. Do you want to overwrite it? (y/n)>> ')
                    except KeyboardInterrupt:
                        print(key_err_msg)

                    if re.match('y', response, re.IGNORECASE):
                        break
                else:
                    break
    else:
        out_path = os.path.join(output_dir, outfilename)

    if filter_data is None:
        while True:
            try:
                filter_data = raw_input('Filter data? (Y/N): ')
            except KeyboardInterrupt:
                print(key_err_msg)

            if re.match('y|n', filter_data, re.IGNORECASE):
                filter_data = re.match('y', filter_data, re.IGNORECASE)
                break

    if filter_data and not filter_mins:
        while True:
            try:
                filter_mins = raw_input('Filter window in minutes: ')
            except KeyboardInterrupt:
                print(key_err_msg)
            try: 
                filter_mins = int(filter_mins)
            except:
                continue
            else:
                break
    else:
        filter_mins = None

    correction_threshold= 0.0005
    abs_basevol_signal = 5
    window_size = 400
    negative_threshold_pct = .2
    micro_window_pct = .25 
    micro_basevol_pct = .25
    baseconditions_rpt = None
    baseflow_smoothing_days = None
    while True:
        try:
            correct_vol = raw_input("Do you want to force events to end at their respective initial "
                            "volumes by adding dummy inflows that correct the volume during base conditions? (Y/N): ")
            correct_vol = re.match('y', correct_vol, re.IGNORECASE)
            if not correct_vol:
                break
            else:
                msg = '\n'.join([])
                print('There are several parameters that affect how the program will make the correction'
                      ' Do you want to choose them manually or accept the default?')

                accept_defaults = raw_input("Accept Defaults? (Y/N): ")
                if re.match('y', accept_defaults, re.IGNORECASE):
                    break
                else:
                    while True:
                        msg = ("By default, the correction procedure will use the initial volume of the conduit "
                               "to determine when a correction is necessary and by how much. Alternatively, you can "
                               "supply the path to a base conditions .rpt. The procedure will calculate a weekly moving "
                               "average of base volume for each conduit and use this as the reference for correction."
                               "Supply a path to the base conditions .rpt, or press enter to use the initial volume "
                               "method.")
                        print(msg)
                        baseconditions_rpt = raw_input("base conditions .rpt file path (or press Enter to continue) >> ")
                        if not baseconditions_rpt:
                            break
                        if os.path.exists(baseconditions_rpt):
                            while True:
                                msg = "Base flow smoothing window size in days (press Enter to skip smoothing) >> "
                                baseflow_smoothing_days = raw_input(msg)
                                if baseflow_smoothing_days:
                                    try:
                                        baseflow_smoothing_days = int(baseflow_smoothing_days)
                                    except:
                                        print("Invalid input.")
                                    else:
                                        break
                                else:
                                    break
                            break
                        else:
                            print("Could not find the file specified.")

                    while True:
                        msg = ("window size: The correction procedure looks for periods of base volume by moving "
                               "a window of specified size over the volume timeseries for each segment. The window " 
                               "size is measured in number of timesteps. The default is 400.")
                        print(msg) 
                        window_size = raw_input('window size >> ')
                        try:
                            window_size = int(window_size)
                        except:
                            continue
                        else:
                            break

                    while True:
                        msg = ('base volume absolute-difference: Within a given window, consider a difference '
                               'of this size between the max and min values as a signal that the window '
                               'contains a period of base volume. The default is 5 m^3.')
                        print(msg)
                        abs_basevol_signal = raw_input("base volume absolute-difference in m^3 >> ")
                        try:
                            abs_basevol_signal = float(abs_basevol_signal)
                        except:
                            continue
                        else:
                            break

                    while True:
                        msg = ("Correction threshold: Minimum allowable absolute difference between the average volume "
                              "in a period of base volume and the initial base volume. Anything larger than this will "
                              "trigger a correction for the period within the current window. The default is .0005 m^3.")
                        print(msg)
                        correction_threshold = raw_input("correction threshold in m^3 >> ")
                        try:
                            correction_threshold = float(correction_threshold)
                        except:
                            continue
                        else:
                            break

                    while True:
                        msg = ("micro window size: Once it's been established that a period of base "
                               "volume needs correction, to determine how much correction is necessary, "
                               "the correction procedure examines the period in question using a smaller "
                               "window than the primary one "
                               "Enter the size of this window as a "
                               "percentage of the larger correction window previously entered. The default is .25.")
                        print(msg)
                        msg = "Enter micro window pct >> "
                        micro_window_pct = raw_input(msg)
                        try:
                            micro_window_pct = float(micro_window_pct)
                        except:
                            continue
                        else:
                            break

                    while True:
                        msg = ("Enter the percentage of timesteps within a window below the initial base volume "
                               "that is sufficient for categorizing the current period as negative. The default is .2.")
                        print(msg)
                        negative_threshold_pct = raw_input("Negative threshold pct >> ")
                        try:
                            negative_threshold_pct = float(negative_threshold_pct)
                        except:
                            continue
                        else:
                            break

                    break

        except KeyboardInterrupt:
            print(key_err_msg)
            pass
                            
    while True:
        try:
            dummy_end = raw_input('Include final dummy segment? (Y/N): ')
            if re.match('y|n', dummy_end, re.IGNORECASE):
                dummy_end = re.match('y', dummy_end, re.IGNORECASE)
                break
        except KeyboardInterrupt:
            print(key_err_msg)
            pass

    event_start = None
    event_end = None
    while True:
        try:
            set_event_limits = raw_input("Set event start and end dates? >> ")
            if re.match('n', set_event_limits, re.IGNORECASE):
                break
            elif re.match('y', set_event_limits, re.IGNORECASE):
                while True:
                    event_start = raw_input("event start (yyyy-mm-dd HH:MM:SS >> ")
                    
                    try:
                        event_start = datetime.strptime(event_start, "%Y-%m-%d %H:%M:%S")
                    except:
                        continue
                    else:
                        break

                while True:
                    event_end = raw_input("event end (yyyy-mm-dd HH:MM:SS) >> ")

                    try:
                        event_end = datetime.strptime(event_end, "%Y-%m-%d %H:%M:%S")
                    except:
                        continue
                    else:
                        break

                if baseflow_smoothing_days:
                    if ((event_end - event_start).total_seconds()) / (60 * 60 * 24) < 2 * baseflow_smoothing_days:
                        msg = "DateTime range is too small for baseflow smoothing days. Enter a period of " \
                         + "at least " + str(2 * baseflow_smoothing_days) + " days."
                        print(msg)
                    else:
                        break
                else:
                    break
        except KeyboardInterrupt:
            print(key_err_msg)


    missing_inflows_are_zero = False
    while True:
        try:
            msg = "If INFLOW elements from the segment map are missing from the RPT, would you like " \
                + "the procedure to assume that the contribution of these elements is a constant 0?"
            print(msg)
            yesno = raw_input('(Y/N) >> ')

            if re.match('y', yesno, re.IGNORECASE):
                missing_inflows_are_zero = True
                break
            elif re.match('n', yesno, re.IGNORECASE):
                missing_inlows_are_zero = False
                break
            else:
                print('Unexpected input.')
        except KeyboardInterrupt:
            print(key_err_msg)

    print correct_vol
    
    process(filter_mins=filter_mins, 
            dummy_end=dummy_end,
            rptpath=rpt_path, 
            inppath=inp_path, 
            outpath=out_path, 
            segmap_paths=segmappaths, 
            correct_vol=correct_vol,
            correction_threshold=correction_threshold, 
            window_size=window_size,
            abs_basevol_signal=abs_basevol_signal, 
            event_start=event_start, 
            event_end=event_end, 
            negative_threshold_pct=negative_threshold_pct,
            micro_window_pct=micro_window_pct,
            baseconditions_rpt=baseconditions_rpt,
            baseflow_smoothing_days=baseflow_smoothing_days,
            missing_inflows_are_zero=missing_inflows_are_zero)

def test():
    #windows = [360, 720, 1080, 1440]
    #thresholds = [0.05, 0.005, 0.0005]
    windows = [720, 1080]
    thresholds = [0.005]
    data_dir = r'C:\Data\MatthewPlourde\JOSEF\hydmaker\hydmaker_no_svol\22seg\TTF_WQModel_Nobridges_Aug30_04'
    rpt_path = os.path.join(data_dir, 'TTF_WQModel_Nobridges_Aug30_04.rpt')
    inp_path = os.path.join(data_dir, 'TTF_WQModel_Nobridges_Aug30_04.inp')
    seg_map = os.path.join(data_dir, 'D2_S1_22seg.map_short_portion.csv')

    test_dir = os.path.join(data_dir, 'parameter_tests')
    if os.path.isdir(test_dir):
        shutil.rmtree(test_dir)
    os.mkdir(test_dir)

    for win in windows:
        for thresh in thresholds:
            outdir = os.path.join(test_dir, '_'.join(['test', str(win), str(thresh)]))
            os.mkdir(outdir)
            outpath = os.path.join(outdir, 'out.hyd.txt')
            print(win, thresh)
            process(filter_mins=None, dummy_end=True, rptpath=rpt_path, inppath=inp_path, outpath=outpath, 
                    segmap_paths=[seg_map], correct_vol=True, window_size=win, correction_threshold=thresh, 
                    correction_lookahead=True)

if __name__ == '__main__':
#    parser = argparse.ArgumentParser()
#    parser.add_argument('--filter_mins', type=int)
#    parser.add_argument('--dummy_end', action='store_true')
#    args = parser.parse_args() 
#    run(filter_mins=args.filter_mins, dummy_end=args.dummy_end)
    run()

