import os
import sys
import csv
import psycopg2 as pg
import pyodbc
import traceback, code
import VERSION
import itertools
import StringIO
import glob
import stat
from datetime import datetime
import fnmatch
import xlrd
import string
import re
import shutil
import pyfmdb
import pyodbc
from collections import OrderedDict
import collections
from contextlib import closing

__version__ = VERSION.api
__doc__ = ''
__dir__ = os.path.dirname(__file__) if '__file__' in globals() else os.path.abspath(os.curdir)
__resources__ = sys._MEIPASS if hasattr(sys, '_MEIPASS') else __dir__

# TODO make sure that when you filter for xls files, you ignore locks, which start with a tilde

#exec("""from fmdb.pyfmdb import *
#build_database(host='localhost', admin='postgres', password='watershed.123', port=5433, delete_existing=True)
#cnxn = connection('localhost', user='postgres', password='watershed.123', port=5433)
#cnxn.batch_update(r'D:\Data\Flow Monitoring by ManholeID', is_sses=True)""")

#exec("""from fmdb.pyfmdb import *
#cnxn = connect('localhost', 'postgres', 'watershed.123')
#cnxn.batch_update(r'D:\Data\Flow Monitoring by ManholeID', is_sses=False)""")

#cnxn.batch_update(r'D:\Data\Flow Monitoring by ManholeID', is_sses=False)""")

#   exec("""from fmdb.pyfmdb import *
#build_database(host='localhost', admin='postgres', password='watershed.123', port=5434, delete_existing=True)
#cnxn = connection('localhost', user='postgres', password='watershed.123', port=5434)
#cnxn.batch_update(r'D:\Data\FM_bugs', is_sses=False)""")


_port = 5432
def mysetup():
    build_database(host='localhost', port=_port, admin='postgres', password='watershed.123', delete_existing=True)
    cnxn = connect(host='localhost', port=_port, user='postgres', password='watershed.123')
    cnxn.make_user('matthew.plourde', 'sunnyday.1')
    cnxn.add_admin('matthew.plourde')

def loaddata():
    cnxn = connect(host='localhost', port=_port, user='matthew.plourde', password='sunnyday.1')
    cnxn.batch_update(r'D:\Data\FlowMonitoring')
    #cnxn.batch_update(r'\\pwdoows1\Modeling\Data\Temporary Monitors\Flow Monitoring\Flow Monitoring by ManholeID')
    #cnxn.batch_update(r'D:\Data\FM_tests')

class ColumnError(Exception):
    def __init__(self, col):
        super(ColumnError, self).__init__("Column '" + col + "' couldn't be found.")

def yesno_prompt(prompt):
    while True:
        print(prompt)
        ans = raw_input("[y/n] >> ")
        try:
            ans = ans.lower()[0]
            if ans == 'y':
                return True
            elif ans == 'n':
                return False
            else:
                raise ValueError()
        except IndexError, ValueError:
            print('Invalid input.')

def matches_pat(v, pat):
    return isinstance(v, basestring) and re.match(pat + '$', str(v), re.IGNORECASE)

def identify_struck_out_events(wb, sheetname, id_col, title=None):
    ws = wb.sheet_by_name(sheetname)

    if title:
        for i in range(ws.nrows):
            title_vals = [j for j, v in enumerate(ws.row_values(i)) if matches_pat(v, title)]

            if title_vals:
                rows_lb = i + 1
                cols_lb = title_vals[0]
        try:
            rows_lb
        except:
            raise Exception('Could not find worksheet table title.')
    else:
        rows_lb = 0
        cols_lb = 0

    for i in range(cols_lb, ws.ncols):
        header_vals = [j for j, v in enumerate(ws.col_values(i, rows_lb)) if matches_pat(v, id_col)]

        if header_vals:
            header_row = rows_lb + header_vals[0]
            start_col = i
            break
        
    is_strike_out = []
    for i in range(header_row + 1, ws.nrows):
        cell_value = ws.cell_value(i, start_col)
        if isinstance(cell_value, basestring):
            break
        else:
            cell_ix = ws.cell_xf_index(i, start_col)
            xf = wb.xf_list[cell_ix]
            font = wb.font_list[xf.font_index]
            is_strike_out.append(font.struck_out)

    return is_strike_out


def query_worksheet(ws, id_col, cols, title=None, return_rows=False, return_full_names=True, 
        max_cols_hint=None, browse=False, return_cols=False):
    if not max_cols_hint:
        max_cols_hint = ws.ncols

    if title:
        for i in range(ws.nrows):
            title_vals = [j for j, v in enumerate(ws.row_values(i)) if matches_pat(v, title)]

            if title_vals:
                rows_lb = i + 1
                cols_lb = title_vals[0]
        try:
            rows_lb
        except:
            raise Exception('Could not find worksheet table title.')
    else:
        rows_lb = 0
        cols_lb = 0

    for i in range(cols_lb, ws.ncols):
        header_vals = [j for j, v in enumerate(ws.col_values(i, rows_lb)) if matches_pat(v, id_col)]

        if header_vals:
            header_row = rows_lb + header_vals[0]
            start_col = i
            break


    try:
        header_row
    except:
        raise Exception("Could not find worksheet table.")

    header_values = ws.row_values(header_row, start_col)

    for i, h in enumerate(header_values):
        if i == max_cols_hint or not h or not isinstance(h, basestring):
            break
        else:
            table_ncol = i + 1

    id_coltypes = ws.col_types(start_col, header_row + 1)
    id_values = ws.col_values(start_col, header_row + 1)
    rows_to_determine_idtype = 6
    id_coltype = max(set([x for x in id_coltypes[:rows_to_determine_idtype] if x != xlrd.XL_CELL_BLANK]), key=id_coltypes.count)
    coltypes_enum = enumerate(id_coltypes)

    for i, coltype in coltypes_enum:
        if coltype == id_coltype:
            start_row = i + header_row + 1
            break
    try:
        start_row
    except:
        raise Exception("Could not find worksheet table starting row.")

    for i, coltype, value in zip(itertools.count(), id_coltypes, id_values):
        if coltype != id_coltype or value == '' or value is None:
            table_nrow = i
            break
    try:
        table_nrow
    except:
        table_nrow = ws.nrows - start_row

    header = header_values[:table_ncol]

    if browse:
        print 'browsing'
        code.interact(local=locals())

    fields = []
    out_cols = []
    cols_not_found = []
    MATCHES_EXACTLY = 2
    MATCHES = 1

    #print('moolaa')
    #code.interact(local=locals())
    header = [re.sub('\n', '', h) for h in header]
    for col in cols:
        matches = [0 for _ in range(len(header))]
        col_cleaned = re.escape(col.strip('$').strip())
        col_escaped = re.escape(col)
        #print('baconpie')
        #code.interact(local=locals())
        for i, field in enumerate(header):
            if re.match(col_cleaned + '$', field):
                matches[i] = MATCHES_EXACTLY
            elif re.match(col_escaped, field, re.IGNORECASE):
                matches[i] = MATCHES
        
        if MATCHES_EXACTLY in matches:
            if matches.count(MATCHES_EXACTLY) > 1:
                raise Exception("Worksheet has non-unique columns.")
            else:
                fields.append(header[matches.index(MATCHES_EXACTLY)])
                out_cols.append(col)
        elif MATCHES in matches:
            if matches.count(MATCHES) > 1:
                header_lengths = [len(h) for h in header]
                options = [(x, l) for i, h, l in zip(matches, header, header_lengths) if i == 1]
                min_l = min([tup[1] for tup in options])
                selection = [tup[0] for tup in options if tup[1] == min_l]
                selection = selection[0]
                fields.append(selection)
                out_cols.append(col)
                #try:
                #    raise Exception("Column matches multiple fields in worksheet.")
                #except:
                #    traceback.print_exc()
                #    code.interact(local=locals())
            else:
                fields.append(header[matches.index(MATCHES)])
                out_cols.append(col)
        else:
            raise ColumnError(col)

    results = {}
    for col, field in zip(out_cols, fields):
        final_colname = col if return_cols else field
        results[final_colname] = ws.col_values(start_col + header.index(field), start_row, start_row + table_nrow)

    if not return_full_names:
        results_shortnames = {}
        for field, col in zip(fields, cols):
            results_shortnames[col] = results[field]
        results = results_shortnames

    return results if not return_rows else [dict(zip(results.keys(), row)) for row in zip(*results.values())]

class FMDBException(Exception):
    def __init__(self, msg):
        Exception.__init__(self, msg)

def update_database(script, host=None, port=5432, admin=None, password=None):
    if not host:
        host = '\\28-aramplour3'
        admin='postgres'
        password='watershed.123'

    # open the setup script that defines tables and procedures
    with open(script, 'r') as f:
        script_sql = f.read()

    # execute the script
    with pg.connect(host=host, port=port, dbname='fmdb', user=admin, password=password) as cnxn:
        cnxn.autocommit = True
        with cnxn.cursor() as cursor:
            try:
                cursor.execute(script_sql)
            except:
                import traceback, code
                traceback.print_exc()
                code.interact(local=dict(locals().items() + globals().items()))

def build_database(script=os.path.join(__dir__, "database_setup.sql"), host=None, port=5432, admin=None, 
        password=None, delete_existing=False, make_test_user=False):
    with pg.connect(host=host, port=port, user=admin, password=password) as cnxn:
        cnxn.autocommit = True
        with cnxn.cursor() as cursor:
            cursor.execute("SELECT * FROM pg_catalog.pg_database WHERE datname = 'fmdb'") 
            if cursor.fetchall():
                if not delete_existing:
                    while True:
                        print("The fmdb database already exists on this server. Do you want to delete it?")
                        ans = raw_input("[y/n] >> ")
                        try:
                            ans = ans.lower()[0]
                            if ans == 'y':
                                break
                            elif ans == 'n':
                                raise FMDBException("Database build cancelled by user.")
                            else:
                                raise ValueError()
                        except IndexError, ValueError:
                            print('Invalid input.')

                # disconnect any other users that are currently connected, then drop the database
                cursor.execute("""
                    SELECT pg_terminate_backend(pg_stat_activity.pid)
                    FROM pg_stat_activity
                    WHERE pg_stat_activity.datname = 'fmdb' AND pid <> pg_backend_pid();
                """)
                cursor.execute("DROP DATABASE fmdb")
                
            cursor.execute("CREATE DATABASE fmdb")

            cursor.execute("SELECT 1 FROM pg_catalog.pg_group WHERE groname='fmdb_user'")
            if cursor.fetchone():
                if not delete_existing:
                    prompt = "There is already a role for fmdb_user. This installation will overwrite it. Do you want to proceed?"
                    resp = yesno_prompt(prompt)
                    if not resp:
                        raise FMDBException("Database build cancelled by user.")

                cursor.execute("DROP ROLE fmdb_user")

            cursor.execute("SELECT 1 FROM pg_catalog.pg_group WHERE groname='fmdb_admin'")
            if cursor.fetchone():
                if not delete_existing:
                    prompt = "There is already a role for fmdb_admin. This installation will overwrite it. " + \
                            "Do you want to proceed?"
                    resp = yesno_prompt(prompt)
                    if not resp:
                        raise FMDBException("Database build cancelled by user.")

                cursor.execute("DROP ROLE fmdb_admin")

    update_database(script, host=host, port=port, admin=admin, password=password)

    if make_test_user:
        with connect(host=host, port=port, user=admin, password=password) as cnxn:
            #cnxn.autocommit = True
            #with cnxn.cursor() as cursor:
            try:
                cnxn.execute("DROP USER fmdb_tester")
            except:
                pass

            cnxn.make_user('fmdb_tester', 'fmdb_tester')
            cnxn.add_admin('fmdb_tester')
            #cnxn.execute("SELECT make_user('fmdb_tester', 'fmdb_tester')")
            #cnxn.execute("SELECT add_fmdb_admin('fmdb_tester')")
            #cursor.execute("CREATE ROLE fmdb_tester WITH LOGIN PASSWORD 'fmdb_tester' IN GROUP fmdb_admin")

def connect(host, user, password, port=5432, autocommit=True):
    return connection(host=host, user=user, password=password, port=port, autocommit=autocommit)

class connection(object):
    def __init__(self, host, user, password, port=5432, autocommit=True):
        self.cnxn = pg.connect(host=host, dbname='fmdb', user=user, port=port, password=password)
        self.cnxn.autocommit = autocommit
        self.cursor = self.cnxn.cursor()
        self.cursor_columns = None
        self.cursor.execute("""
            SELECT column_name
            FROM information_schema.columns
            WHERE table_name = 'filesummary_type'
        """)
        filesum_cols = [x[0] for x in self.cursor.fetchall()]
        self.filesummary_type = collections.namedtuple('filesummary_type', filesum_cols)

        self.cursor.execute("""
            SELECT column_name 
            FROM information_schema.columns 
            WHERE table_name = 'sites_type'
        """)
        sites_cols = [row[0] for row in self.cursor.fetchall()]
        self.sites_type = collections.namedtuple('sites_type', sites_cols)
        self.cursor.execute("""
            SELECT column_name 
            FROM information_schema.columns 
            WHERE table_name='eventstatistics'
        """)
        event_stats_colnames = [row[0] for row in self.cursor.fetchall()]
        self.eventstatistics = collections.namedtuple('eventstatistics', event_stats_colnames)
        self.cursor.execute("""
            SELECT column_name
            FROM information_schema.columns
            WHERE table_name = 'allflows'
        """)

        allflows_colnames = [row[0] for row in self.cursor.fetchall()]
        self.allflows = collections.namedtuple('allflows', allflows_colnames)

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.cursor.close()
        self.cnxn.close()
        if exc_type is not None:
            return False
        
        return True

    def close(self):
        self.cursor.close()
        self.cnxn.close()

    def execute(self, *args):
        try:
            self.cursor.execute(*args)
        except:
            self.cursor_columns = None
            raise
        else:
            if self.cursor.description:
                self.cursor_columns = [x.name for x in self.cursor.description]
            else:
                self.cursor_columns = None

    def fetchall(self, dicts=False):
        result = self.cursor.fetchall()
        if result and dicts:
            result = [dict(zip(self.cursor_columns, row)) for row in result]
        return result
    
    def fetchone(self, dicts=False):
        result = self.cursor.fetchone()
        if result and dicts:
            result = dict(zip(self.cursor_columns, result))
        return result

    def fetchmany(self, size=None, dicts=False):
        if not size:
            size = self.cursor.arraysize

        result = self.cursor.fetchmany(size)

        if result and dicts:
            result = [dict(zip(self.cursor_columns, row)) for row in result]

        return result

    def make_user(self, name, password):
        self.execute("SELECT make_user('" + name + "', '"  + password + "'"+ ")")
    
    def add_user(self, name):
        self.execute("SELECT add_fmdb_user('" + name + "')")

    def add_admin(self, name):
        self.execute("SELECT add_fmdb_admin('" + name + "')")

    def update_filesum(self, filesum_row):
        self.cursor.execute("SELECT update_filesummary(%s)", (self.filesummary_type(*filesum_row.values()),))

    def batch_update(self, root, is_sses=False):
        manholes = [dirname for dirname in os.listdir(root) if not any(ch in string.ascii_lowercase for ch in dirname)]
        manhole_paths = [os.path.join(root, dirname) for dirname in manholes]
        manhole_paths = [m for m in manhole_paths if os.path.isdir(m)]

        for i, manhole_path in enumerate(manhole_paths):
            manhole_id = os.path.basename(manhole_path)
            manhole_folder_contents = [os.path.join(manhole_path, f) for f in os.listdir(manhole_path)]
            subdirs = filter(os.path.isdir, manhole_folder_contents)
            nonsite_names_pat = re.compile('video|report|photo|picture|invest|supplementary|scans|bk|deploy', re.IGNORECASE)
            site_paths = [p for p in subdirs if not re.search(nonsite_names_pat, os.path.basename(p))]

            deploy_paths = [p for p in subdirs if re.search('deploy', p, re.IGNORECASE)]

            if site_paths:
                for site_path in site_paths:
                    site_id = os.path.basename(site_path)
                    if re.search(re.compile('site', re.IGNORECASE), site_id): 
                        site_id = re.findall('\d+', site_id)
                        if site_id:
                            site_id = str(int(site_id[0]))
                        else:
                            continue
                    site_folder_contents = [os.path.join(site_path, f) for f in os.listdir(site_path)]
                    deploy_paths = filter(os.path.isdir, site_folder_contents)
                    deploy_paths = [f for f in deploy_paths if re.search('[dD]eployment', os.path.basename(f))]
                    if deploy_paths:
                        for deploy_path in deploy_paths:
                            deployment = os.path.basename(deploy_path)
                            deployment_year = re.findall('\d{4}', deployment)
                            if len(deployment_year) == 1:
                                deployment_year = int(deployment_year[0])
                                self._doImportSSOAP(manhole_id, site_id, deployment_year, deploy_path, is_sses=is_sses)
                            else:
                                self._doImportSSOAP(manhole_id, site_id, None, deploy_path, is_sses=is_sses)
                    else:
                        self._doImportSSOAP(manhole_id, site_id, None, site_path, is_sses=is_sses)
            elif deploy_paths:
                for deploy_path in deploy_paths:
                    deployment = os.path.basename(deploy_path)
                    deployment_year = re.findall('\d{4}', deployment)
                    if len(deployment_year) == 1:
                        deployment_year = int(deployment_year[0])
                        self._doImportSSOAP(manhole_id, manhole_id, deployment_year, deploy_path, is_sses=is_sses)
                    else:
                        continue
            else:
                self._doImportSSOAP(manhole_id, None, None, manhole_path, is_sses=is_sses)

    def _doImportSSOAP(self, manhole_id, site_id, deployment, path, is_sses=False):
        filesum_row = OrderedDict(zip(self.filesummary_type._fields, itertools.cycle([False])))
        filesum_row['manholeid'] = manhole_id 
        filesum_row['siteid'] = site_id
        filesum_row['deployment'] = deployment
        filesum_row['modificationtimesconsistent'] = True
        filesum_row['importerror'] = None

        print ' '.join(['starting ', str(manhole_id), str(site_id), str(deployment)])
        ssoap_dir = [fname for fname in os.listdir(path) if re.match(re.compile('ssoap', re.IGNORECASE), fname)]
        if len(ssoap_dir) == 1:
            filesum_row['hasssoapfolder'] = True
            ssoap_dir = ssoap_dir[0]
            ssoap_path = os.path.join(path, ssoap_dir)
            if is_sses:
                final_pat = 'sses'
            else:
                final_pat = 'final'

            try:
                final_dir = [fname for fname in os.listdir(ssoap_path) if re.match(re.compile(final_pat, re.IGNORECASE), fname)]
            except:
                final_dir = []
                filesum_row['importerror'] = traceback.format_exc().encode('string-escape')

            if len(final_dir) == 1:
                filesum_row['hasfinalfolder'] = True
                final_dir = final_dir[0]
                final_path = os.path.join(ssoap_path, final_dir)
                try:
                    if is_sses:
                        res = self._importSSOAP_SSES(final_path, filesum_row, manhole_id, site_id, deployment)
                    else:
                        res = self._importSSOAP(final_path, filesum_row, manhole_id, site_id, deployment)
                except:
                    filesum_row['importerror'] = traceback.format_exc().encode('string-escape')
                    self.update_filesum(filesum_row)
                else:
                    if res != 0:
                        try:
                            filesum_row['importerror'] = '\n'.join(res)
                            self.update_filesum(filesum_row)
                        except:
                            traceback.print_exc()
                            code.interact(local=locals())

            elif len(final_dir) > 1:
                filesum_row['hasfinalfolder'] = True
                filesum_row['filesareambiguous'] = True
                self.update_filesum(filesum_row)
            else:
                self.update_filesum(filesum_row)
        elif len(ssoap_dir) > 1:
            filesum_row['hassoapfolder'] = True
            filesum_row['filesareambiguous'] = True
            self.update_filesum(filesum_row)
        else:
            self.update_filesum(filesum_row)

        print 'Done'
        self.cnxn.commit()
    
    def _importSSOAP(self, ssoap_path, filesum_row, manholeid, siteid=None, deployment=None):
        sdb_filepath = os.path.join(ssoap_path, '*.sdb')
        sdb_filepaths = glob.glob(sdb_filepath)
        mdb_filepath = os.path.join(ssoap_path, '*.mdb')
        mdb_filepaths = glob.glob(mdb_filepath)

        dtime_fmt = '%Y-%m-%d %H:%M:%S'
        ssoap_filepaths = sdb_filepaths if sdb_filepaths else mdb_filepaths

        if ssoap_filepaths:
            ssoap_filepath_mtimes = [os.path.getmtime(path) for path in ssoap_filepaths]
            max_mtime = max(ssoap_filepath_mtimes)
            ssoap_filepath = ssoap_filepaths[ssoap_filepath_mtimes.index(max_mtime)]
            ssoap_fname = os.path.basename(ssoap_filepath)
            ssoap_mtime = datetime.fromtimestamp(os.path.getmtime(ssoap_filepath))

            sites_row = OrderedDict([(k, None) for k in self.sites_type._fields])
            sites_row['ssoapfilename'] = ssoap_fname
            sites_row['ssoapmtime'] = ssoap_mtime.strftime(dtime_fmt)
            sites_row['manholeid'] = manholeid
            sites_row['siteid'] = siteid
            sites_row['deployment'] = deployment
            sites_row['directory'] = ssoap_path

            # create temp copy to work with to avoid corrupting original
            temp_ssoap_filepath = os.path.join(os.environ.get('TEMP'), '.temp_ssoap.mdb') 
            if os.path.exists(temp_ssoap_filepath):
                os.chmod(temp_ssoap_filepath, stat.S_IWRITE)
                os.remove(temp_ssoap_filepath)

            shutil.copy(ssoap_filepath, temp_ssoap_filepath)
            os.chmod(temp_ssoap_filepath, stat.S_IWRITE)
            cnxn = pyodbc.connect('DRIVER={Microsoft Access Driver (*.mdb)};DBQ=' + temp_ssoap_filepath)
            cursor = cnxn.cursor()

            cursor.execute('SELECT MeterID, RainGaugeID FROM Analyses')
            metergage_ids = cursor.fetchall()
            raingage, area, units = (None, None, None)

            file_conflict_excs = []
            if not metergage_ids:
                msg = 'The SSOAP file in "' + ssoap_path + '" is missing its meter and gage.'
                file_conflict_excs.append(msg)
            if len(metergage_ids) > 1:
                msg = 'The SSOAP file in "' + ssoap_path + '" has multiple meters and gages.'
                file_conflict_excs.append(msg)
            else:
                meterid = metergage_ids[0].MeterID
                gageid = metergage_ids[0].RainGaugeID

                cursor.execute("SELECT StartDateTime, EndDateTime FROM Meters WHERE MeterID = ?", meterid)
                startdtime, enddtime = cursor.fetchone()
                sites_row['startdatetime'] = startdtime.strftime(dtime_fmt)
                sites_row['enddatetime'] = enddtime.strftime(dtime_fmt)

                cursor.execute('SELECT RaingaugeName FROM Raingauges WHERE RaingaugeID = ?', gageid)
                sites_row['raingage'] = str(cursor.fetchone().RaingaugeName)
                try:
                    cursor.execute('SELECT Area, AreaUnitID FROM Meters WHERE MeterID = ?', meterid)
                except:
                    # the Meters table in old ssoap files doesn't have AreaUnitID
                    cursor.execute('SELECT Area FROM Meters WHERE MeterID = ?', meterid)
                    sites_row['area'] = cursor.fetchone().Area
                    sites_row['areaunits'] = None
                else:
                    sites_row['area'], units_id = cursor.fetchone()
                    cursor.execute('SELECT ShortLabel FROM AreaUnits WHERE AreaUnitID = ?', units_id)
                    sites_row['areaunits'] = str(cursor.fetchone().ShortLabel)

            cursor.close()
            cnxn.close()
            os.remove(temp_ssoap_filepath)

            ## GET EVENT NOTES DATA FROM RVALUE SPREADSHEET
            xls_paths = glob.glob(os.path.join(ssoap_path, '*.xls')) + glob.glob(os.path.join(ssoap_path, '*.xlsx'))
            xls_fnames = [os.path.basename(path) for path in xls_paths]
            eventnotes_pat = re.compile('rvalue', re.IGNORECASE)
            eventnotes_fnames = [fname for fname in xls_fnames 
                                 if re.search(eventnotes_pat, fname) and not fname.startswith('~')]

            if len(eventnotes_fnames) == 0:
                file_conflict_excs.append("No event notes found for " + ssoap_path)
            elif len(eventnotes_fnames) > 1:
                file_conflict_excs.append('Multiple event notes spreadsheets found in ' + ssoap_path)
                filesum_row['haseventnotes'] = True
                filesum_row['filesareambiguous'] = True
                file_conflict_excs.append("No eventnotes found in " + ssoap_path)
            else:
                filesum_row['haseventnotes'] = True
                eventnotes_path = os.path.join(ssoap_path, eventnotes_fnames[0])
                sites_row['eventnotesmtime'] = datetime.fromtimestamp(os.path.getmtime(eventnotes_path)).strftime(dtime_fmt)
                sites_row['eventnotesfilename'] = eventnotes_fnames[0]

            csv_paths = glob.glob(os.path.join(ssoap_path, '*.csv'))
            csv_fnames = [os.path.basename(path) for path in csv_paths]

            rtk_path = None
            rtk_csvs = [fname for fname in csv_fnames if re.search(re.compile('simul', re.IGNORECASE), fname)]
            if len(rtk_csvs) == 1:
                filesum_row['hasrtk'] = True
                sites_row['rtkfilename'] = rtk_csvs[0]
                rtk_path = os.path.join(ssoap_path, rtk_csvs[0])
                rtk_mtime = datetime.fromtimestamp(os.path.getmtime(rtk_path))
                sites_row['rtkmtime'] = rtk_mtime.strftime(dtime_fmt)
                if rtk_mtime < ssoap_mtime:
                    filesum_row['modificationtimesconsistent'] = False
            elif len(rtk_csvs) > 1:
                file_conflict_excs.append('Multiple RTK files found.')
                filesum_row['hasrtk'] = True
                filesum_row['filesareambiguous'] = True

            self.cursor.execute("SELECT get_analysis_id(%s, %s, %s)", (manholeid, siteid, deployment))
            analysisid = self.cursor.fetchone()[0]
            allflows_outofdate = True
            eventstats_outofdate = True

            all_up_to_date = False
            if analysisid:
                self.execute("SELECT SSOAPMTime, EventNotesMTime, RTKMtime FROM Sites WHERE AnalysisID = %s", (analysisid,))
                db_ssoap_mtime, db_notes_mtime, db_rtk_mtime = self.fetchone()
                db_ssoap_mtime_str = db_ssoap_mtime.strftime(dtime_fmt)
                db_notes_mtime_str = db_notes_mtime.strftime(dtime_fmt)
                db_rtk_mtime_str = db_rtk_mtime.strftime(dtime_fmt) if db_rtk_mtime else None

                ssoap_uptodate = db_ssoap_mtime_str == sites_row['ssoapmtime']
                notes_uptodate = db_notes_mtime_str == sites_row['eventnotesmtime']
                rtk_uptodate = db_rtk_mtime_str == sites_row['rtkmtime']
                
                if ssoap_uptodate and notes_uptodate and rtk_uptodate:
                    all_up_to_date = True

                if ssoap_uptodate:
                    allflows_outofdate = False

                if notes_uptodate and rtk_uptodate:
                    eventstats_outofdate = False

            # GET RVALUE DATA
            rvalue_csvs = [fname for fname in csv_fnames 
                           if re.search(re.compile('rvalue|event_*stat', re.IGNORECASE), fname)]
            if len(rvalue_csvs) == 1:
                filesum_row['hasrvalue'] = True
                sites_row['rvaluefilename'] = rvalue_csvs[0]
                rvalue_path = os.path.join(ssoap_path, rvalue_csvs[0])
                rvalue_mtime = datetime.fromtimestamp(os.path.getmtime(rvalue_path))
                if rvalue_mtime < ssoap_mtime:
                    filesum_row['modificationtimesconsistent'] = False
                sites_row['rvaluemtime'] = rvalue_mtime.strftime(dtime_fmt)
            elif len(rvalue_csvs) > 1:
                file_conflict_excs.append("Multiple RValue CSVs found in " + ssoap_path)
                filesum_row['filesareambiguous'] = True
                filesum_row['hasrvalue'] = True
            else: 
                file_conflict_excs.append("No RValue CSVs found in " + ssoap_path)


            allflows_pat = re.compile('all_?flow', re.IGNORECASE)
            allflows_fnames = [fname for fname in csv_fnames if re.search(allflows_pat, fname)]
            if len(allflows_fnames) == 0:
                file_conflict_excs.append("No all flows file found.")
            elif len(allflows_fnames) > 1:
                filesum_row['hasallflows'] = False
                file_conflict_excs.append("Multiple allflows files found.")
            else:
                filesum_row['hasallflows'] = True
                sites_row['allflowsfilename'] = allflows_fnames[0]
                allflows_path = os.path.join(ssoap_path, allflows_fnames[0])
                allflows_mtime = datetime.fromtimestamp(os.path.getmtime(allflows_path))
                sites_row['allflowsmtime'] = allflows_mtime.strftime(dtime_fmt)
                if allflows_mtime < ssoap_mtime:
                    filesum_row['modificationtimesconsistent'] = False

            self.update_filesum(filesum_row)

            self.cursor.execute("SELECT update_analysis(%s)", (self.sites_type(*sites_row.values()), ))
            analysisid = self.cursor.fetchone()[0]

            if file_conflict_excs:
                return file_conflict_excs

            os.chmod(ssoap_filepath, stat.S_IREAD)

            if all_up_to_date:
                return 0

            if (allflows_outofdate or eventstats_outofdate):
                wb = xlrd.open_workbook(eventnotes_path)
                sheet_names = [sheet.name for sheet in wb.sheets()]
                en_label = [name for name in sheet_names if re.search(re.compile('event\s*notes', re.IGNORECASE), name)]
                if len(en_label) == 1:
                    en_label = en_label[0]
                    event_notes = wb.sheet_by_name(en_label)
                    en_titles = '(Event Notes)|(Storms Events Cut)|(Storm Event Tails Cut)'
                    en_fields = ['Event Number', 'Good or Bad', 'Notes']
                    eventnotes_table = query_worksheet(event_notes, 'Event Number', en_fields, title=en_titles, return_rows=True,
                            max_cols_hint=5)
                    for row in eventnotes_table:
                        for field in ['Good or Bad', 'Notes']:
                            row[field] = str(row[field].replace(u'\u2019', "'"))
                        # remain columns to match database names
                        row['flag'] = row['Good or Bad']
                        row['notes'] = row['Notes']
                        row['event'] = row['Event Number']
                        del row['Good or Bad']
                        del row['Notes']
                        del row['Event Number']
                elif len(en_label) > 1:
                    raise FMDBException('Ambiguous event tables in ' + path)
                else:
                    raise FMDBException("No event table found in " + path)
                outlier_label_pat = re.compile(r'\(-\)\s*outlier\s*table$', re.IGNORECASE)
                outlier_label = [name for name in sheet_names if re.search(outlier_label_pat, name)]
                if len(outlier_label) > 1:
                    raise FMDBException("Ambiguous outlier table in " + path)
                elif len(outlier_label) == 0:
                    raise FMDBException("Can't find outlier table in " + path)
                outlier_label = outlier_label[0]
                outlier = wb.sheet_by_name(outlier_label)
                outlier_table = query_worksheet(outlier, 'Event', ['Event'])
                good_events = [int(x) for x in outlier_table['Event']]
                for i in range(len(eventnotes_table)):
                    eventnotes_table[i]['outlier'] = 'true' if eventnotes_table[i]['event'] not in good_events else 'false'

                with open(rvalue_path, 'r') as f:
                    # eat header lines
                    header_lines = 8
                    for i in range(header_lines):
                        _ = f.readline()

                    event_time_cols = ['startdatetime', 'enddatetime']
                    rainfall_time_cols = ['rainfall_startdatetime', 'rainfall_enddatetime']
                    header = ['event'] + event_time_cols + ['duration', 'iivolume_in', 'rainvolume_in', 
                        'totalrvalue', 'peakiiflow_mgd', 'peaktotalflow', 'peakfactor', 'rainfall_in_15min', 
                        'observedflow_mgd', 'gwiflow_mgd', 
                        'basewastewaterflow_mgd'] + rainfall_time_cols + ['rainfallduration_hrs']
                    
                    f_buffer = StringIO.StringIO()
                    for line in f:
                        line = line.strip().strip(',') + '\n'
                        f_buffer.write(line)
                    
                    f_buffer.seek(0)
                    reader = csv.DictReader(f_buffer, fieldnames=header)
                    rvalue_table = list(reader)

                    # the rvalue csv lines all end in a comma, leading DictReader to think there is a unnamed 
                    # empty column at the end of the table. This key is stored in the dict with a None key, and here
                    # we remove it.
                    for row in rvalue_table:
                        try:
                            del row[None] # some of the event stats csv end with a comma, resulting in an empty column
                        except:
                            pass
                        for k in row.keys():
                            if k not in event_time_cols and k not in rainfall_time_cols:
                                row[k] = float(row[k])

                # GET RTK DATA
                rtk_csvs = [fname for fname in csv_fnames if re.search(re.compile('simul', re.IGNORECASE), fname)]
                rtk_header_keep = ['event'] + event_time_cols + ['duration', 'r1', 't1', 
                        'k1', 'r2', 't2', 'k2', 
                        'r3', 't3', 'k3', 
                        'rainvolume_in', # duplicates rvalue column
                        'peakrainfall', 
                        'totalrvalue', # obs volume in sim vs obs.
                        'simvolume']
                rtk_header_discard1 = [
                        'differencevolume',
                        'percentdifvolume',
                        'peak_i-i_flow_mgd']
                rtk_header_discard2 = ['difference_peak', 
                        'percent_dif_peak']
                simpeak_header = ['simpeak']
                rtk_header_discard = rtk_header_discard1 + rtk_header_discard2
                rtk_header = rtk_header_keep + rtk_header_discard1 + simpeak_header + rtk_header_discard2
                rtk_header_keep = rtk_header_keep + simpeak_header

                rtk_table = []
                if rtk_path:
                    with open(rtk_path, 'r') as f:
                        num_header = 3
                        for i in range(num_header):
                            _ = f.readline()

                        # these are columns in the rtk csv, but they are computed from the others and 
                        # therefore excluded from the database

                        while True:
                            line = f.readline().strip().strip(',')
                            if line.strip() == '':
                                break
                            line = line.split(',')
                            line = zip(rtk_header, line)
                            line = dict([(name, value) for name, value in line if name in rtk_header_keep])
                            rtk_table.append(line)

                        for row in rtk_table:
                            for k in row.keys():
                                if k not in event_time_cols:
                                    row[k] = float(row[k])
                else:
                    rtk_table = [dict(zip(rtk_header_keep, itertools.cycle([None]))) for _ in range(len(rvalue_table))]

                event_stats_table = []
                if len(eventnotes_table) != len(rvalue_table) or len(eventnotes_table) != len(rtk_table):
                    exc = "The number of events in the eventnotes, rvalue CSV, and RTK csv don not match in " + ssoap_path
                    raise FMDBException(exc)

                for notes, rvalues, rtks in zip(eventnotes_table, rvalue_table, rtk_table):
                    event_stats_table.append(dict(rtks.items() + notes.items() + rvalues.items()))


                for row in event_stats_table:
                    row['analysisid'] = analysisid

                if len(self.eventstatistics._fields) != len(event_stats_table[0]):
                    msg = "The number of columns read from the event statistics and simulated vs. " + \
                          "observed CSV files does not " + \
                          "match the number of columns in EventStatistics table."
                    raise FMDBException(msg)

                event_stats_array = [[row[x] for x in self.eventstatistics._fields] for row in event_stats_table]
                event_stats_array = [self.eventstatistics(*row) for row in event_stats_array]
                event_stats_array = [self.cursor.mogrify('%s::eventstatistics', (row,)) for row in event_stats_array]
                event_stats_array = 'ARRAY[' + ','.join(event_stats_array) + ']'
                self.cursor.execute("SELECT update_eventstatistics(" + event_stats_array + ")")

                if allflows_outofdate:
                    with open(allflows_path, 'r') as f:
                        header = f.readline().split(',')
                        origcols = [col.strip("\"' \n") for col in header if col]
                        date_fields = ["month", "day", "year", "hour"] 
                        fields_min_sec = ["minute", "second"]
                        nondate_fields = ["lotus date", "obs flow", "avg DWF", "gwi adjustment", 
                                  "adjusted DWF", "gwi flow", "BWF", "iandi", "curve 1", "curve 2",
                                  "curve 3", "total"]

                        field_mapping = {
                                'analysisid' : 'analysisid',
                                'datetime' : 'DateTime',
                                'lotusdate' : 'lotus date',
                                'obsflow' : 'obs flow',
                                'avgdwf' : 'avg DWF',
                                'gwiadjustment' : 'gwi adjustment',
                                'adjusteddwf' : 'adjusted DWF',
                                'gwiflow' : 'gwi flow',
                                'bwf' : 'BWF',
                                'iandi' : 'iandi',
                                'curve1' : 'curve 1',
                                'curve2' : 'curve 2',
                                'curve3' : 'curve 3',
                                'total' : 'total'
                        }
                        if len(origcols) == 18:
                            if origcols != date_fields + fields_min_sec + nondate_fields:
                                raise Exception("Unexpected fields encountered.")
                            has_min_sec = True
                        elif len(origcols) == 16:
                            if origcols != date_fields + nondate_fields:
                                raise Exception("Unexpected fields encountered.")
                            has_min_sec = False
                        else:
                            raise Exception("Unexpected number of columns in allflows.")

                        reader = csv.DictReader(f, fieldnames=origcols)
                        chunk_size = 10000
                        row_chunks = itertools.izip_longest(*[iter(reader)]*chunk_size, fillvalue=None)
                        for chunk in row_chunks:
                            chunk_table = []
                            for row in chunk:
                                if row:
                                    year = int(row['year'])
                                    month = int(row['month'])
                                    day = int(row['day'])
                                    if has_min_sec:
                                        hour = int(row['hour'])
                                        minute = int(row['minute'])
                                        second = int(row['second'])
                                    else:
                                        hours = row['hour']
                                        hour_split = hours.split('.')
                                        if len(hour_split) > 1:
                                            hour = int(hour_split[0])
                                            minutes = str(float('.' + hour_split[1]) * 60)
                                            minute_split = minutes.split('.')
                                            if len(minute_split) > 1:
                                                minute = int(minute_split[0])
                                                seconds = str(float('.' + minute_split[1]) * 60)
                                                second_split = seconds.split('.')
                                                second = int(second_split[0])
                                            else:
                                                minute = int(minute_split[0])
                                                second = 0
                                        else:
                                            hour = int(hour_split[0])
                                            minute = 0
                                            second = 0

                                    dtime = datetime(year, month, day, hour, minute, second).strftime('%m/%d/%Y %H:%M:%S')
                                    for field in date_fields + fields_min_sec:
                                        if field in row.keys():
                                            del row[field]
                                    row['DateTime'] = dtime
                                    row['analysisid'] = analysisid
                                    new_row = self.allflows(*[row[field_mapping[k]] for k in self.allflows._fields])
                                    chunk_table.append(new_row)

                            allflows_array = [self.cursor.mogrify('%s::allflows', (row,)) for row in chunk_table]
                            allflows_array = 'ARRAY[' + ','.join(allflows_array) + ']'
                            self.cursor.execute("SELECT update_allflows(" + allflows_array + ")")
        return 0

    def _importSSOAP_SSES(self, ssoap_path, filesum_row, manholeid, siteid=None, deployment=None):
        sdb_filepath = os.path.join(ssoap_path, '*.sdb')
        sdb_filepaths = glob.glob(sdb_filepath)
        mdb_filepath = os.path.join(ssoap_path, '*.mdb')
        mdb_filepaths = glob.glob(mdb_filepath)

        dtime_fmt = '%Y-%m-%d %H:%M:%S'
        ssoap_filepaths = sdb_filepaths if sdb_filepaths else mdb_filepaths

        if ssoap_filepaths:
            ssoap_filepath_mtimes = [os.path.getmtime(path) for path in ssoap_filepaths]
            max_mtime = max(ssoap_filepath_mtimes)
            ssoap_filepath = ssoap_filepaths[ssoap_filepath_mtimes.index(max_mtime)]
            ssoap_fname = os.path.basename(ssoap_filepath)
            ssoap_mtime = datetime.fromtimestamp(os.path.getmtime(ssoap_filepath))

            sites_row = OrderedDict([(k, None) for k in self.sites_type._fields])
            sites_row['ssoapfilename'] = ssoap_fname
            sites_row['ssoapmtime'] = ssoap_mtime.strftime(dtime_fmt)
            sites_row['manholeid'] = manholeid
            sites_row['siteid'] = siteid
            sites_row['deployment'] = deployment
            sites_row['directory'] = ssoap_path

            # create temp copy to work with to avoid corrupting original
            temp_ssoap_filepath = os.path.join(os.environ.get('TEMP'), '.temp_ssoap.mdb') 
            if os.path.exists(temp_ssoap_filepath):
                os.chmod(temp_ssoap_filepath, stat.S_IWRITE)
                os.remove(temp_ssoap_filepath)

            shutil.copy(ssoap_filepath, temp_ssoap_filepath)
            os.chmod(temp_ssoap_filepath, stat.S_IWRITE)
            cnxn = pyodbc.connect('DRIVER={Microsoft Access Driver (*.mdb)};DBQ=' + temp_ssoap_filepath)
            cursor = cnxn.cursor()

            cursor.execute('SELECT MeterID, RainGaugeID FROM Analyses')
            metergage_ids = cursor.fetchall()
            raingage, area, units = (None, None, None)

            file_conflict_excs = []
            if not metergage_ids:
                msg = 'The SSOAP file in "' + ssoap_path + '" is missing its meter and gage.'
                file_conflict_excs.append(msg)
            if len(metergage_ids) > 1:
                msg = 'The SSOAP file in "' + ssoap_path + '" has multiple meters and gages.'
                file_conflict_excs.append(msg)
            else:
                meterid = metergage_ids[0].MeterID
                gageid = metergage_ids[0].RainGaugeID

                cursor.execute("SELECT StartDateTime, EndDateTime FROM Meters WHERE MeterID = ?", meterid)
                startdtime, enddtime = cursor.fetchone()
                sites_row['startdatetime'] = startdtime.strftime(dtime_fmt)
                sites_row['enddatetime'] = enddtime.strftime(dtime_fmt)

                cursor.execute('SELECT RaingaugeName FROM Raingauges WHERE RaingaugeID = ?', gageid)
                sites_row['raingage'] = str(cursor.fetchone().RaingaugeName)
                try:
                    cursor.execute('SELECT Area, AreaUnitID FROM Meters WHERE MeterID = ?', meterid)
                except:
                    # the Meters table in old ssoap files doesn't have AreaUnitID
                    cursor.execute('SELECT Area FROM Meters WHERE MeterID = ?', meterid)
                    sites_row['area'] = cursor.fetchone().Area
                    sites_row['areaunits'] = None
                else:
                    sites_row['area'], units_id = cursor.fetchone()
                    cursor.execute('SELECT ShortLabel FROM AreaUnits WHERE AreaUnitID = ?', units_id)
                    sites_row['areaunits'] = str(cursor.fetchone().ShortLabel)

            cursor.close()
            cnxn.close()
            os.remove(temp_ssoap_filepath)

            xls_paths = glob.glob(os.path.join(ssoap_path, '*.xls')) + glob.glob(os.path.join(ssoap_path, '*.xlsx'))
            xls_fnames = [os.path.basename(path) for path in xls_paths]
            eventnotes_pat = re.compile('rdii', re.IGNORECASE) # TODO
            eventnotes_fnames = [fname for fname in xls_fnames 
                                 if re.search(eventnotes_pat, fname) and not fname.startswith('~')]

            if len(eventnotes_fnames) == 0:
                sites_row['eventnotesfilename'] = None
            elif len(eventnotes_fnames) > 1:
                file_conflict_excs.append('Multiple event notes spreadsheets found in ' + ssoap_path)
                filesum_row['haseventnotes'] = True
                filesum_row['filesareambiguous'] = True
            else:
                filesum_row['haseventnotes'] = True
                sites_row['eventnotesfilename'] = eventnotes_fnames[0]
                eventnotes_path = os.path.join(ssoap_path, eventnotes_fnames[0])
                sites_row['eventnotesmtime'] = datetime.fromtimestamp(os.path.getmtime(eventnotes_path)).strftime(dtime_fmt)

            csv_paths = glob.glob(os.path.join(ssoap_path, '*.csv'))
            csv_fnames = [os.path.basename(path) for path in csv_paths]

            rtk_path = None
            rtk_xls = [fname for fname in xls_fnames if re.search(re.compile('simul', re.IGNORECASE), fname)]
            if len(rtk_xls) == 1:
                filesum_row['hasrtk'] = True
                sites_row['rtkfilename'] = rtk_xls[0]
                rtk_path = os.path.join(ssoap_path, rtk_xls[0])
                rtk_mtime = datetime.fromtimestamp(os.path.getmtime(rtk_path))
                sites_row['rtkmtime'] = rtk_mtime.strftime(dtime_fmt)
                if rtk_mtime < ssoap_mtime:
                    filesum_row['modificationtimesconsistent'] = False
            elif len(rtk_xls) > 1:
                file_conflict_excs.append('Multiple RTK files found')
                filesum_row['hasrtk'] = True
                filesum_row['filesareambiguous'] = True

            self.cursor.execute("SELECT get_analysis_id(%s, %s, %s)", (manholeid, siteid, deployment))
            analysisid = self.cursor.fetchone()[0]
            allflows_outofdate = True
            eventstats_outofdate = True

            all_up_to_date = False
            if analysisid:
                self.execute("SELECT SSOAPMTime, EventNotesMTime, RTKMtime FROM Sites WHERE AnalysisID = %s", (analysisid,))
                db_ssoap_mtime, db_notes_mtime, db_rtk_mtime = self.fetchone()
                db_ssoap_mtime_str = db_ssoap_mtime.strftime(dtime_fmt)
                db_notes_mtime_str = db_notes_mtime.strftime(dtime_fmt)
                db_rtk_mtime_str = db_rtk_mtime.strftime(dtime_fmt) if db_rtk_mtime else None

                ssoap_uptodate = db_ssoap_mtime_str == sites_row['ssoapmtime']
                notes_uptodate = db_notes_mtime_str == sites_row['eventnotesmtime']
                rtk_uptodate = db_rtk_mtime_str == sites_row['rtkmtime']
                
                if ssoap_uptodate and notes_uptodate and rtk_uptodate:
                    all_up_to_date = True

                if ssoap_uptodate:
                    allflows_outofdate = False

                if notes_uptodate and rtk_uptodate:
                    eventstats_outofdate = False

            # TODO Rvalue code differs
            rvalue_xls = [fname for fname in xls_fnames if re.search('event', fname, re.IGNORECASE)]
            rvalue_xls = [fname for fname in rvalue_xls if not re.match('~', fname)]
            if len(rvalue_xls) == 1:
                filesum_row['hasrvalue'] = True
                sites_row['rvaluefilename'] = rvalue_xls[0]
                rvalue_path = os.path.join(ssoap_path, rvalue_xls[0])
                rvalue_mtime = datetime.fromtimestamp(os.path.getmtime(rvalue_path))
                if rvalue_mtime < ssoap_mtime:
                    filesum_row['modificationtimesconsistent'] = False
                sites_row['rvaluemtime'] = rvalue_mtime.strftime(dtime_fmt)
            elif len(rvalue_xls) > 1:
                file_conflict_excs.append("Multiple RValue XLSs found in " + ssoap_path)
                filesum_row['filesareambiguous'] = True
                filesum_row['hasrvalue'] = True
            else: 
                file_conflict_excs.append("No RValue XLSs found in " + ssoap_path)

            allflows_pat = re.compile('all_?flow', re.IGNORECASE)
            allflows_fnames = [fname for fname in csv_fnames if re.search(allflows_pat, fname)]
            if len(allflows_fnames) == 0:
                file_conflict_excs.append("No all flows file found.")
            elif len(allflows_fnames) > 1:
                filesum_row['hasallflows'] = False
                file_conflict_excs.append("Multiple allflows files found.")
            else:
                filesum_row['hasallflows'] = True
                sites_row['allflowsfilename'] = allflows_fnames[0]
                allflows_path = os.path.join(ssoap_path, allflows_fnames[0])
                allflows_mtime = datetime.fromtimestamp(os.path.getmtime(allflows_path))
                sites_row['allflowsmtime'] = allflows_mtime.strftime(dtime_fmt)
                if allflows_mtime < ssoap_mtime:
                    filesum_row['modificationtimesconsistent'] = False

            self.update_filesum(filesum_row)

            if file_conflict_excs:
                return file_conflict_excs

            os.chmod(ssoap_filepath, stat.S_IREAD)

            self.cursor.execute("SELECT update_analysis(%s)", (self.sites_type(*sites_row.values()), ))
            analysisid = self.cursor.fetchone()[0]

            if all_up_to_date:
                return 0

            if (allflows_outofdate or eventstats_outofdate):

                if filesum_row['haseventnotes']:
                    wb = xlrd.open_workbook(eventnotes_path)
                    sheet_name = wb.sheets()[0].name
                    event_notes = wb.sheet_by_name(sheet_name)
                    en_fields = ['Accept Storm', 'Comment']
                    eventnotes_table_raw = query_worksheet(event_notes, 'Event', en_fields, return_cols=True)

                    new_en_fields = {'Accept Storm' : 'outlier', 'Comment' : 'notes'}
                    
                    eventnotes_table = {}
                    for key in eventnotes_table_raw:
                        if key == 'Accept Storm':
                            eventnotes_table[new_en_fields[key]] = [x == 'No' for x in eventnotes_table_raw[key]]
                        else:
                            eventnotes_table[new_en_fields[key]] = eventnotes_table_raw[key]

                    eventnotes_table['flag'] = [None for _ in range(len(eventnotes_table['outlier']))]

                wb = xlrd.open_workbook(rvalue_path)
                sheet_name = wb.sheets()[0].name
                rvalues = wb.sheet_by_name(sheet_name)
                rvalue_fields = ['Event', 'Event Start', 'Event End', 'Duration (hours)', 'I/I Volume (In.)', 
                 'Rain Volume (In.)', 'Total R-value (ratio)', 'Peak I/I Flow (mgd)', 'Peak Total Flow (mgd)', 
                 'EventPeak Rainfall (In./15 min)', 'Observed Flow (mgd)', 'GWI Flow (mgd)', 
                 'Base Wastewater Flow (mgd)', 'Rainfall Start Date', 'Rainfall End Date', 'Rainfall Duration (hours)']

                raw_rvalue_table = query_worksheet(rvalues, 'Event', return_cols=True, cols=rvalue_fields)

                new_rvalue_fields = {'Event' : 'event', 
                        'Event Start' : 'startdatetime', 
                        'Event End' : 'enddatetime', 
                        'Duration (hours)' : 'duration', 
                        'I/I Volume (In.)' : 'iivolume_in', 
                        'Rain Volume (In.)' : 'rainvolume_in', 
                        'Total R-value (ratio)' : 'totalrvalue', 
                        'Peak I/I Flow (mgd)' : 'peakiiflow_mgd', 
                        'Peak Total Flow (mgd)' : 'peaktotalflow', 
                        'EventPeak Rainfall (In./15 min)' : 'rainfall_in_15min', 
                        'Observed Flow (mgd)' : 'observedflow_mgd', 
                        'GWI Flow (mgd)' : 'gwiflow_mgd', 
                        'Base Wastewater Flow (mgd)' : 'basewastewaterflow_mgd', 
                        'Rainfall Start Date' : 'rainfall_startdatetime', 
                        'Rainfall End Date' : 'rainfall_enddatetime', 
                        'Rainfall Duration (hours)' : 'rainfallduration_hrs'}

                rvalue_table = {}
                for key in raw_rvalue_table:
                    rvalue_table[new_rvalue_fields[key]] = raw_rvalue_table[key][1:] # drop the first row of column numbers
                
                rvalue_table['peakfactor'] = [None for _ in range(len(rvalue_table['gwiflow_mgd']))]

                if not filesum_row['haseventnotes']:
                    num_events = len(rvalue_table['duration'])
                    eventnotes_table = {
                        'outlier' : [False for _ in range(num_events)],
                        'notes'   : ['' for _ in range(num_events)],
                        'flag'    : [None for _ in range(num_events)]
                    }

                #new_rtk_fields = {
                #    "R1(ratio)" : 'r1', 
                #    "T1(hours)" : 't1', 
                #    "K1(ratio)" : 'k1', 
                #    "R2(ratio)" : 'r2', 
                #    "T2(hours)" : 't2', 
                #    "K2(ratio)" : 'k2', 
                #    "R3(ratio)" : 'r3',
                #    "T3(hours)" : 't3', 
                #    "K3(ratio)" : 'k3', 
                #    "Peak 15 Minute Rainfall(inches)" : 'peakrainfall',
                #    "Simulated Volume(total R ratio)" : 'simvolume', 
                #    "Simulated Peak(mgd)" : 'simpeak'}
                new_rtk_fields = {
                    "R1" : 'r1', 
                    "T1" : 't1', 
                    "K1" : 'k1', 
                    "R2" : 'r2', 
                    "T2" : 't2', 
                    "K2" : 'k2', 
                    "R3" : 'r3',
                    "T3" : 't3', 
                    "K3" : 'k3', 
                    "Peak 15 Minute Rainfall" : 'peakrainfall',
                    "Simulated Volume" : 'simvolume', 
                    "Simulated Peak" : 'simpeak'}

                if rtk_path:
                    wb = xlrd.open_workbook(rtk_path)
                    sheet_name = wb.sheets()[0].name
                    rtks = wb.sheet_by_name(sheet_name)
                    rtk_fields = ["R1", "T1", 
                        "K1", "R2", "T2", "K2", "R3", "T3", "K3", 
                        "Peak 15 Minute Rainfall", "Simulated Volume", "Simulated Peak"]
                    #rtk_fields = ["R1(ratio)", "T1(hours)", 
                    #    "K1(ratio)", "R2(ratio)", "T2(hours)", "K2(ratio)", "R3(ratio)", "T3(hours)", "K3(ratio)", 
                    #    "Peak 15 Minute Rainfall(inches)", "Simulated Volume(total R ratio)", "Simulated Peak(mgd)"]

                    raw_rtk_table = query_worksheet(rtks, 'Event', cols=rtk_fields, return_cols=True)


                    rtk_table = {}
                    for key in raw_rtk_table:
                        rtk_table[new_rtk_fields[key]] = raw_rtk_table[key][1:] # drop the first row of column numbers

                else:
                    rtk_table = {}
                    for name in new_rtk_fields.values():
                        rtk_table[name] = [None for _ in range(len(rvalue_table))]
                    #rtk_table = [dict(zip(new_rtk_fields.values(), itertools.cycle([None]))) for _ in range(len(rvalue_table))]

                event_stats_table_cols = OrderedDict(rvalue_table.items() + rtk_table.items() + eventnotes_table.items())
                event_stats_table_rows = zip(*event_stats_table_cols.values())

                event_stats_table = []
                for row in event_stats_table_rows:
                    new_row = dict(zip(event_stats_table_cols.keys(), row))
                    new_row['analysisid'] = analysisid
                    event_stats_table.append(new_row)

                if len(self.eventstatistics._fields) != len(event_stats_table[0]):
                    msg = "The number of columns read from the event statistics and simulated vs. " + \
                          "observed CSV files does not " + \
                          "match the number of columns in EventStatistics table."
                    raise FMDBException(msg)

                datetime_fields = ['startdatetime', 'enddatetime', 'rainfall_startdatetime', 'rainfall_enddatetime']
                days_between_epochs = 25569
                seconds_in_day = 86400
                four_hours = 60 * 60 * 4
                for row in event_stats_table:
                    for field in datetime_fields:
                        if isinstance(row[field], basestring):
                            row[field] = datetime.strptime(row[field], '%m/%d/%Y %H:%M')
                        else:
                            row[field] = datetime.fromtimestamp((row[field] - days_between_epochs) * seconds_in_day + four_hours)

                event_stats_array = [[row[x] for x in self.eventstatistics._fields] for row in event_stats_table]
                event_stats_array = [self.eventstatistics(*row) for row in event_stats_array]
                event_stats_array = [self.cursor.mogrify('%s::eventstatistics', (row,)) for row in event_stats_array]
                event_stats_array = 'ARRAY[' + ','.join(event_stats_array) + ']'
                self.cursor.execute("SELECT update_eventstatistics(" + event_stats_array + ")")

                if allflows_outofdate:
                    with open(allflows_path, 'r') as f:
                        header = f.readline().split(',')
                        origcols = [col.strip("\"' \n") for col in header if col]
                        date_fields = ["month", "day", "year", "hour"] 
                        fields_min_sec = ["minute", "second"]
                        nondate_fields = ["lotus date", "obs flow", "avg DWF", "gwi adjustment", 
                                  "adjusted DWF", "gwi flow", "BWF", "iandi", "curve 1", "curve 2",
                                  "curve 3", "total"]

                        field_mapping = {
                                'analysisid' : 'analysisid',
                                'datetime' : 'DateTime',
                                'lotusdate' : 'lotus date',
                                'obsflow' : 'obs flow',
                                'avgdwf' : 'avg DWF',
                                'gwiadjustment' : 'gwi adjustment',
                                'adjusteddwf' : 'adjusted DWF',
                                'gwiflow' : 'gwi flow',
                                'bwf' : 'BWF',
                                'iandi' : 'iandi',
                                'curve1' : 'curve 1',
                                'curve2' : 'curve 2',
                                'curve3' : 'curve 3',
                                'total' : 'total'
                        }
                        if len(origcols) == 18:
                            if origcols != date_fields + fields_min_sec + nondate_fields:
                                raise Exception("Unexpected fields encountered.")
                            has_min_sec = True
                        elif len(origcols) == 16:
                            if origcols != date_fields + nondate_fields:
                                raise Exception("Unexpected fields encountered.")
                            has_min_sec = False
                        else:
                            raise Exception("Unexpected number of columns in allflows.")

                        reader = csv.DictReader(f, fieldnames=origcols)
                        chunk_size = 10000
                        row_chunks = itertools.izip_longest(*[iter(reader)]*chunk_size, fillvalue=None)
                        for chunk in row_chunks:
                            chunk_table = []
                            for row in chunk:
                                if row:
                                    year = int(row['year'])
                                    month = int(row['month'])
                                    day = int(row['day'])
                                    if has_min_sec:
                                        hour = int(row['hour'])
                                        minute = int(row['minute'])
                                        second = int(row['second'])
                                    else:
                                        hours = row['hour']
                                        hour_split = hours.split('.')
                                        if len(hour_split) > 1:
                                            hour = int(hour_split[0])
                                            minutes = str(float('.' + hour_split[1]) * 60)
                                            minute_split = minutes.split('.')
                                            if len(minute_split) > 1:
                                                minute = int(minute_split[0])
                                                seconds = str(float('.' + minute_split[1]) * 60)
                                                second_split = seconds.split('.')
                                                second = int(second_split[0])
                                            else:
                                                minute = int(minute_split[0])
                                                second = 0
                                        else:
                                            hour = int(hour_split[0])
                                            minute = 0
                                            second = 0

                                    dtime = datetime(year, month, day, hour, minute, second).strftime('%m/%d/%Y %H:%M:%S')
                                    for field in date_fields + fields_min_sec:
                                        if field in row.keys():
                                            del row[field]
                                    row['DateTime'] = dtime
                                    row['analysisid'] = analysisid
                                    new_row = self.allflows(*[row[field_mapping[k]] for k in self.allflows._fields])
                                    chunk_table.append(new_row)

                            allflows_array = [self.cursor.mogrify('%s::allflows', (row,)) for row in chunk_table]
                            allflows_array = 'ARRAY[' + ','.join(allflows_array) + ']'
                            self.cursor.execute("SELECT update_allflows(" + allflows_array + ")")
        return 0

#def collect_ssoap(root=None):
#    if not root:
#        root = os.path.join("\\\\pwdoows1", "Modeling", "Data", "Temporary Monitors", 
#            "Flow Monitoring", "Flow Monitoring by ManholeID")
#
#    manholes = [d for d in os.listdir(root) if not any(ch in string.ascii_lowercase for ch in dirname)]

