import csv
import random
from datetime import datetime
from time import strftime
from dbfpy import dbf
import numpy as np

'''
to calculate the false nagetive, false positive for REDCAPSpatial

'''

def getCurTime():
    """
    get current time
    Return value of the date string format(%Y-%m-%d %H:%M:%S)
    """
    format='%Y-%m-%d %H:%M:%S'
    sdate = None
    cdate = datetime.now()
    try:
        sdate = cdate.strftime(format)
    except:
        raise ValueError
    return sdate

def build_satscanresult_dbf(inputDBF):
    sKey = np.array([])
    fn = inputDBF
    db = dbf.Dbf(fn)
    for record in db:
        temp = float(record[db.fieldNames[2]])
        if temp < 0.05:
            temp_id = int(float(record[db.fieldNames[0]]))
            sKey = np.append(sKey, temp_id)
    return sKey

def build_data_list(inputCSV):
    sKey = []
    fn = inputCSV
    ra = csv.DictReader(file(fn), dialect="excel")
    
    for record in ra:
        #print record[ra.fieldnames[0]], type(record[ra.fieldnames[-1]])
        for item in ra.fieldnames:
            temp = float(record[item])
            sKey.append(temp)
    sKey = np.array(sKey)
    sKey.shape=(-1,len(ra.fieldnames))
    return sKey

def build_pvalue_list(inputCSV):
    sKey = []
    fn = inputCSV
    ra = csv.DictReader(file(fn), dialect="excel")

    i = 0
    for record in ra:
        #print record[ra.fieldnames[0]], type(record[ra.fieldnames[-1]])
        #for item in ra.fieldnames:
        temp = float(record[ra.fieldnames[-2]])
        if temp < 0.05:
            sKey.append(i)
        i += 1
    sKey = np.array(sKey)
    #sKey.shape=(-1,len(ra.fieldnames))
    return sKey

#--------------------------------------------------------------------------
#MAIN
if __name__ == "__main__":
    print "begin at " + getCurTime()
    mixed = [91,98,101,104,114,115,119,126,131,142,146,147,154,162,168,172]
    rural = [8,9,10,11,12,13,14,15,17,19,20,26,28,33,34,37]
    urban = [105,107,112,120,122,125,127,128,130,133,134,141,143,149,152,155]

    hot_1 = [9,130,147]
    hot_2 = hot_1 + [10,133,154]
    hot_4 = hot_2 + [12,17,125,131,141,146]
    hot_8 = hot_4 + [14,19,20,26,114,115,119,120,128,134,149,168]
    hot_16 = mixed + rural + urban

    H1 = [8,16,844,915,919,921,923,924]
    L2 = [5,103,106,513,517,518,520,531,534,535,536,541]
    H3 = [63,265,267,268,333,336,337,339,340,342,343,348]
    H4 = [13,174,178,198,886,887,888,889,890]
    L5 = [146,171,182,810,811,814,815,864,867]
    L6 = [20,133,692,694,695,696,698,702,705]
    H7 = [69,70,87,88,369,370,372,442,443]
    high_risk_area_id = H1 + H3 + H4 + H7
    low_risk_area_id = L2 + L5 + L6
    risk_area_id = high_risk_area_id + low_risk_area_id

    dataused = risk_area_id

    '''
    if len(dataused)/3 < 10:
        unitCSV = 'C:/_DATA/CancerData/SatScan/mult6000/three0' + str(len(dataused)/3) + '_format.csv'
    else:
        unitCSV = 'C:/_DATA/CancerData/SatScan/mult6000/three' + str(len(dataused)/3) + '_format.csv'
    '''
    unitCSV = 'C:/_DATA/CancerData/test/Jan15/TP1000_1m_16_04.csv'
    dataMatrix = build_data_list(unitCSV)  # [id, pop, cancer1, cancer2, cancer3]
    riskarea_pop = 0
    for item in dataused:
        riskarea_pop += dataMatrix[int(item), 1]

    power = 0
    output = np.array([])
    for repeat in range(0,1000):
        print repeat
        riskarea_dis = 0
        for item in dataused:
            riskarea_dis += dataMatrix[int(item), repeat + 2]
        '''
        if len(dataused)/3 < 10:
            pvalueCSV = 'C:/_DATA/CancerData/SatScan/mult6000/redcap/three0'
            pvalueCSV += str(len(dataused)/3) + '/_Full-Order-ALK_OO' + str(repeat) + '_pvalue.csv'
        else:
            pvalueCSV = 'C:/_DATA/CancerData/SatScan/mult6000/redcap/three'
            pvalueCSV += str(len(dataused)/3) + '/_Full-Order-ALK_OO' + str(repeat) + '_pvalue.csv'
        '''
        #pvalueCSV = 'C:/_DATA/CancerData/test/Jan15/spatialREDCAP/10000/_Full-Order-ALK_SO' + str(repeat) + '_pvalue.csv'
        pvalueCSV = 'C:/_DATA/CancerData/test/Jun08/satscan/highlow/hl' + str(repeat) + '.gis.dbf'
        #p_id = build_pvalue_list(pvalueCSV)
        p_id = build_satscanresult_dbf(pvalueCSV)
        '''
        p_id = []
        i = 0
        for item in pvalue:
            if item < 0.05:
                p_id.append(i)
            i += 1
        '''
        if len(p_id) > 0:
            power += 1
        #print p_id
        
        temp_result = np.zeros(12)
        #[POP_TP, POP_FP, POP_FN, DIS_TP, DIS_FP, DIS_FN, COUNT_TP, COUNT_FP, COUNT_FN,
        #   POP_TP/(FP+FN), DIS_TP/(FP+FN), COUNT_TP/(FP+FN)]
        for item in p_id:
            if int(item) in dataused:
                temp_result[0] += dataMatrix[int(item), 1]
                temp_result[3] += dataMatrix[int(item), repeat + 2]
                temp_result[6] += 1
            else:
                temp_result[1] += dataMatrix[int(item), 1]
                temp_result[4] += dataMatrix[int(item), repeat + 2]
                temp_result[7] += 1
                
        temp_result[2] = riskarea_pop - temp_result[0]
        temp_result[5] = riskarea_dis - temp_result[3]
        temp_result[8] = len(dataused) - temp_result[6]

        temp_result[9] = temp_result[0]/(temp_result[1] + temp_result[2])
        temp_result[10] = temp_result[3]/(temp_result[4] + temp_result[5])
        temp_result[11] = temp_result[6]/(temp_result[7] + temp_result[8])
        output = np.append(output, temp_result)
        
    output.shape = (-1, 12)
    filePath = 'C:/_DATA/CancerData/test/Jun08/satscan/result_hl.csv'
    #np.savetxt(filePath, output, delimiter=',')
    #filePath = 'C:/_DATA/CancerData/test/Jan15/spatialREDCAP/result_new.csv'
    #np.savetxt(filePath, output, delimiter=',', fmt='%4e')
    print np.mean(output[:,9]), np.mean(output[:,10]), np.mean(output[:,11])
    print 'power = ' + str(power)
    print "end at " + getCurTime()
    print "========================================================================"  