import numpy as np
import csv
from datetime import datetime
from time import strftime


'''
Global and Local Empirical Bayes Smoothers with Gamma Model
'''

def getCurTime():
    """
    get current time
    Return value of the date string format(%Y-%m-%d %H:%M:%S)
    """
    format='%Y-%m-%d %H:%M:%S'
    sdate = None
    cdate = datetime.now()
    try:
        sdate = cdate.strftime(format)
    except:
        raise ValueError
    return sdate

def build_data_list(inputCSV):
    sKey = []
    fn = inputCSV
    f = open(inputCSV)
    #ra = csv.DictReader(file(fn), dialect="excel")
    ra = csv.DictReader(f, dialect="excel")
    
    for record in ra:
        #print record[ra.fieldnames[0]], type(record[ra.fieldnames[-1]])
        for item in ra.fieldnames:
            temp = float(record[item])
            sKey.append(temp)
    sKey = np.array(sKey)
    sKey.shape=(-1,len(ra.fieldnames))
    return sKey



#--------------------------------------------------------------------------
#MAIN

if __name__ == "__main__":
    print '===================================================='
    print "begin at " + getCurTime()
    #filepath = 'C:/_STUDY/GEOG 731/final project/significant_flow_diff_distance_1.csv'
    #filepath = 'C:/_DATA/migration89_08/COUNTY Migration/clean/test/min_pop_500thousand/flow_measure_large_than_1000_1.csv'
    filepath = 'C:/_DATA/migration_census_2000/6-27-2012/new_expectation.csv'
    #a = [158, 184, 327, 330, 1275, 2070, 2572, 1123, 2600 ,2671, 2657]
    data = build_data_list(filepath)

    '''
    list = []
    for item in data:
        if int(item[0]) == 3:
            list.append(item)
    list = np.array(list)
    list.shape = (-1, len(data[0,:]))
    '''

    uniqueFlow = []
    uniqueID = []
    for item in data:
        uniqueID.append(int(item[1]))
        uniqueID.append(int(item[2]))
        uniqueFlow.append(str(int(item[1])) + ',' + str(int(item[2])))
    uniqueFlow = np.unique(uniqueFlow)
    uniqueID = np.unique(uniqueID)

    uniqueFlowDic = {}
    i = 0
    for item in uniqueFlow:
        uniqueFlowDic[item] = i
        i += 1
        #uniqueFlowDic[item] = []
    
    #for item in data:
        #uniqueFlowDic[str(int(item[1])) + ',' + str(int(item[2]))].append(int(item[3]+1995))
    dataindex = []
    for item in data:
        dataindex.append(uniqueFlowDic[str(int(item[1])) + ',' + str(int(item[2]))])

    output = []
    for item in uniqueFlow:
        temp = item.split(',')
        #print item[0], item[1]
        output.append([int(temp[0]), int(temp[1]), 0, 0, 0, 0]) # id1, id2, dis, flow, expected, measure
    output = np.array(output)

    i = 0
    for item in data:
        if item[-1] > output[dataindex[i],-1]:
            output[dataindex[i],-1] = item[-1]
            output[dataindex[i],2] = item[0]
            output[dataindex[i],3] = item[4]
            output[dataindex[i],4] = item[5]
        i += 1
         
        #print item, uniqueFlowDic[item]
    #print uniqueFlowDic
    print uniqueID
    print len(uniqueID)
    
    temp = '"FID" = '
    for item in uniqueID:
        temp += str(int(item))
        temp += ' OR "FID" = '
    print temp
    

    print '========================'
    '''
    temp = '"OID" = '
    for item in data:
        temp += str(int(item))
        temp += ' OR "OID" = '
    print temp
    '''
    np.savetxt(filepath[:-4] + '_unique.csv', output, delimiter=',', fmt = '%i')