import csv
import random
from datetime import datetime
from time import strftime
from dbfpy import dbf
import numpy as np

'''

'''

def getCurTime():
    """
    get current time
    Return value of the date string format(%Y-%m-%d %H:%M:%S)
    """
    format='%Y-%m-%d %H:%M:%S'
    sdate = None
    cdate = datetime.now()
    try:
        sdate = cdate.strftime(format)
    except:
        raise ValueError
    return sdate

def build_data_list(inputCSV):
    sKey = []
    fn = inputCSV
    ra = csv.DictReader(file(fn), dialect="excel")
    
    for record in ra:
        #print record[ra.fieldnames[0]], type(record[ra.fieldnames[-1]])
        for item in ra.fieldnames:
            temp = float(record[item])
            sKey.append(temp)
    sKey = np.array(sKey)
    sKey.shape=(-1,len(ra.fieldnames))
    return sKey

def build_pvalue_list(inputCSV):
    sKey = []
    fn = inputCSV
    ra = csv.DictReader(file(fn), dialect="excel")
    i = 0
    for record in ra:
        temp = float(record[ra.fieldnames[pvalueLevel]])
        if temp < significanceLevel:
            sKey.append(i)
        i += 1
    sKey = np.array(sKey)
    #sKey.shape=(-1,len(ra.fieldnames))
    return sKey

def build_region_list(inputCSV, list):
    sKey = []
    fn = inputCSV
    ra = csv.DictReader(file(fn), dialect="excel")
    i = 0
    for record in ra:
        #temp = float(record[ra.fieldnames[pvalueLevel]])
        if i in list:
            temp = int(float(record[ra.fieldnames[-1]]))
            sKey.append(temp)
        i += 1
    sKey = np.array(sKey)
    #sKey.shape=(-1,len(ra.fieldnames))
    return sKey

def build_dbf_list(pvalueDBF):
    #sKey = np.array([])
    tempPvalueid = []
    tempRegionid = []
    fn = pvalueDBF
    db = dbf.Dbf(fn)
    for record in db:
        temp = float(record[db.fieldNames[2]])
        if temp < significanceLevel:
            temp_id = int(float(record[db.fieldNames[0]]))
            tempPvalueid.append(temp_id)
            temp_id = int(float(record[db.fieldNames[1]]))
            tempRegionid.append(temp_id)
    #sKey.shape = (-1, 3)
    return tempPvalueid, tempRegionid


def countID(listA, listB):
    # count the number of item within both of listA and listB
    k = 0
    for item in listA:
        if item in listB:
            k += 1
    return k

def findClusterID(clusterList):
    # 0: rural, 1: mixed, 2: urban
    tempCount = [0, 0, 0]
    tempCount[0] = countID(clusterList, rural)
    tempCount[1] = countID(clusterList, mixed)
    tempCount[2] = countID(clusterList, urban)
    temp = 0
    tempMax = -1
    i = 0
    for item in tempCount:
        if item > tempMax:
            tempMax = item
            temp = i
        i +=1
    #print clusterList, tempCount
    return temp, tempMax

#--------------------------------------------------------------------------
#MAIN
if __name__ == "__main__":
    print "begin at " + getCurTime()
    '''
    rural = [8,9,10,11,12,13,14,15,17,19,20,26,28,33,34,37]
    mixed = [91,98,101,104,114,115,119,126,131,142,146,147,154,162,168,172]
    urban = [105,107,112,120,122,125,127,128,130,133,134,141,143,149,152,155]

    hot_1 = [9,130,147]
    hot_2 = hot_1 + [10,133,154]
    hot_4 = hot_2 + [12,17,125,131,141,146]
    hot_8 = hot_4 + [14,19,20,26,114,115,119,120,128,134,149,168]
    hot_16 = mixed + rural + urban
    '''
    
    rural = [9,10,12,13,14,17,20,23,26,27,28,33,34,35,38,41]
    mixed = [95,99,109,110,114,115,117,119,126,131,139,140,142,146,147,237]
    urban = [125,127,128,130,133,134,136,137,138,141,143,149,151,152,156,157]    
    hot_16 = [9, 10, 12, 13, 14, 17, 20, 23, 26, 27, 28, 33, 34, 35, 38, 41,
              95, 99, 109, 110, 114, 115, 117, 119, 125, 126, 127, 128, 130,
              131, 133, 134, 136, 137, 138, 139, 140, 141, 142, 143, 146, 147,
              149, 151, 152, 156, 157, 237]
    
    #print len(rural), len(mixed), len(urban), len(hot_16)

    dataused = hot_16
    #unitCSV = 'C:/_DATA/CancerData/SatScan/mult6000/three16_format.csv'
    unitCSV = 'C:/_DATA/CancerData/SatScan/mult6000/three16_modify/three16_format_modify.csv'
    #filePath = 'C:/_DATA/CancerData/SatScan/mult6000/redcap/three16/LLR/EBS/constraint/5p_improved/SSD/'
    #filePath = 'C:/_DATA/CancerData/SatScan/mult6000/redcap/three16/LLR/EBS/constraint/SSD/'
    #filePath = 'C:/_DATA/CancerData/SatScan/mult6000/redcap/three16/WARD/EBS/constraint/SSD/'
    #filePath = 'C:/_DATA/CancerData/SatScan/mult6000/redcap/three16/CLK/EBS/constraint/SSD/'
    #filePath = 'C:/_DATA/CancerData/SatScan/mult6000/redcap/three16/ALK/newCTG/EBS/3/SSD/'
    filePath = 'C:/_DATA/CancerData/SatScan/mult6000/three16_modify/satscan/'
        
    #dataMatrix = build_data_list(unitCSV)  # [id, pop, cancer1, cancer2, cancer3]
    #riskarea_pop = calSum(dataMatrix[:,1])  # [mixed, rural, urban, total]

    pvalueLevel = -1 # -1: min pvalue, -2: last level
    type = 2 # 0: pop, 1: cancer, 2: count
    saveOutput = []
    for significanceLevel in range(1, 20):
        significanceLevel = 0.05 * significanceLevel

        #dataCount = np.ones(len(dataMatrix))
        power = 0
        power2 = 0
        power3 = np.zeros(15)

        output = np.array([])

        for repeat in range(0,1000):
            regionCSV = filePath + 'Full-Order-ALK_EBS_high_' + str(repeat) + ".csv"
            #regionCSV = filePath + '_Full-Order-ALK_SO' + str(repeat) + ".csv"  
            #regionCSV = filePath + '_Full-Order-CLK_SO' + str(repeat) + ".csv"
            #regionCSV = filePath + 'Full-Order-CLK_EBS_high_' + str(repeat) + ".csv"
            #regionCSV = filePath + '_WARD_SO' + str(repeat) + ".csv"
            #regionCSV = filePath + 'WARD_EBS_high_' + str(repeat) + ".csv"
            #regionCSV = filePath + 'LLR_EBS_high_' + str(repeat) + ".csv"
            #regionCSV = filePath + str(repeat) + ".gis.dbf"
            #pvalueCSV = regionCSV[:-4] + '_whole_high_pvalue.csv'
            pvalueCSV = regionCSV[:-4] + '_pvalue.csv'
            pvalueDBF = filePath + str(repeat) + ".gis.dbf"
            pvalueID, regionID = build_dbf_list(pvalueDBF)
            #print pvalueID, regionID
            #regionID = build_region_list(regionCSV, pvalueID)
            if len(pvalueID) > 0:
                power += 1
                #regionID = build_region_list(regionCSV, pvalueID)
                regionIDunique = np.unique(regionID)
                clusterID = []
                for rID in regionIDunique:
                    temp = []
                    i = 0
                    for item in regionID:
                        if rID == item:
                            temp.append(pvalueID[i])
                        i += 1
                    clusterID.append(temp)

                detectedID = [-1, -1, -1]
                for cluster in clusterID:
                    areaID, areaCount = findClusterID(cluster)
                    if detectedID[areaID] < 0:
                        detectedID[areaID] = areaID
                        power2 += 1
                        #cccc = 0
                        for r in range(1,16):
                            if areaCount > r:
                                power3[int(r)-1] += 1
                            

        print '------------------------'
        print significanceLevel
        print power, power2, power3

        saveOutput = np.append(saveOutput, [power, power2])
        saveOutput = np.append(saveOutput, power3)
    saveOutput.shape = (-1, 17)
    np.savetxt(filePath + 'whole_power1.csv', saveOutput, delimiter=',', fmt = '%10.5f')
    print "end at " + getCurTime()
    print "========================================================================"  