#--------------------------------------------------------------------------
# Tool Name:  Outlier Analysis
# Source Name: OutlierAnalysis.py
# Version: 1.0
# Author: Hu Wang(wanghugigi@gmail.com)
#
#--------------------------------------------------------------------------
#ArcGIS 9.2 + PythonWin 2.6.4 + numpy + scipy

#differences between ArcGIS 9.2 and ArcGIS 9.3
#1. polygon.centroid as a point in ArcGIS 9.3 while a unicode string in ArcGIS 9.2

#--------------------------------------------------------------------------
#Imports
#--------------------------------------------------------------------------
import HelperFunctions as HF
import numpy as np
from datetime import datetime
from time import strftime

def getCurTime():
    """
    get current time
    Return value of the date string format(%Y-%m-%d %H:%M:%S)
    """
    format='%Y-%m-%d %H:%M:%S'
    sdate = None
    cdate = datetime.now()
    try:
        sdate = cdate.strftime(format)
    except:
        raise ValueError
    return sdate

def get_inputs():
    """Given the list of inputs for the script when run, return an object
    containing the variables for the script"""
    try:
        '''
        sInputFC = HF.pGP.GetParameterAsText(0)                     # Input Feature Class
        sZField = HF.pGP.GetParameterAsText(1).upper()              # Attribute Field Name/after Normalization
        sCluster = HF.pGP.GetParameterAsText(2).upper()              # Cluster Field Name
        sOutputFC = HF.pGP.GetParameterAsText(3)                    # Output Feature Class
        tDissimilarity = HF.pGP.GetParameterAsText(4)               # Dissimilarity
        lsField = sZField.split(";")

        if tDissimilarity == "5%":
            fDissimilarity = 0.05
        elif tDissimilarity == "3%":
            fDissimilarity = 0.03
        elif tDissimilarity == "1%":
            fDissimilarity = 0.01
        elif tDissimilarity == "8%":
            fDissimilarity = 0.08
        elif tDissimilarity == "10%":
            fDissimilarity = 0.1
        elif tDissimilarity == "15%":
            fDissimilarity = 0.15
        else:
            raise HF.ReportError (HF.msgParseErr)
        
        '''
        #This block can be used to test the code using provided datasets
        sInputFC = "F:\Clustering by Python\noxy_0602_01.shp"
        sZField = "ElevMean_N;Temp_N;Rain_N;Yield_N"
        sWeight = "0.15;0.2;0.2;0.25"
        sCluster = "CLUSTER_AD"
        sOutputFC = "F:\Clustering by Python\noxy_0602_01_O.shp"
        fDissimilarity = "0.05"   #5%
        lsField = sZField.split(";")
        lsWeight = sWeight.split(";") 

        if len(lsField) != len(lsWeight):
            raise HF.ReportError ("The format of weight does not meet specifications")
        
    except: raise HF.ReportError (HF.msgParseErr)
    
    #Make a generic object, populate it with our variables, then return it
    try:
        obj = HF.Gen()
        obj.add('sInputFC',sInputFC)
        obj.add('sZField',lsField)
        obj.add('sCluster',sCluster)
        obj.add('sOutputFC', sOutputFC)
        obj.add('fDissimilarity', fDissimilarity)
        obj.add('lsWeight', lsWeight)

    except: raise HF.ReportError (HF.msgInputsErr)

    return obj

def build_value_lists():
    """Reads through feature table extracting geometry and field info.
       Returns this information in dictionary structures."""

    pRows = HF.pGP.SearchCursor(inputs.sInputFC)
    # pRows = pGP.SearchCursor(inputs.sOutputFC)
    pRow = pRows.Next()
    sKey = np.array([])
    lsBadRecs = []
    iBadRecCnt = 0
    iRec = 0 

    HF.pGP.AddMessage ("--------" + getCurTime() + "--------")
    HF.pGP.AddMessage (HF.sBuildingLists)

    while pRow <> None:
        iRec = iRec + 1
        try:
            dTemp = []
            dTemp.append(pRow.GetValue("FID"))
            for Field in inputs.sZField:
                dTemp.append(pRow.GetValue(Field))
            dTemp.append(pRow.GetValue(inputs.sCluster))
            sKey=np.append(sKey,dTemp)
        except:
            iBadRecCnt += 1
        pRow = pRows.Next()

    #print iBadRecCnt     
    # Report any problems encountered reading feature input data.
    if iBadRecCnt:
        sMessage = HF.msgReadErr % (iBadRecCnt, iRec)
        HF.pGP.AddWarning (sMessage)
        HF.pGP.AddWarning(`lsBadRecs`)

    sKey.shape = (-1,len(inputs.sZField)+2)    
    #print sKey    
    return sKey

  
def calculate_certain_average(data, iClusterID):
    # this function is to find the average of the certain cluster which ID is ClusterID
    # data includes only attribute and cluster id
    # data does not include the index(FID)
    iLen = data.shape #iLen[0]:Line; iLen[1]:Row
    i = 0   #Line

    ndTempData = np.array([])

    while i < iLen[0]:
        if data[i,-1] == iClusterID:
            ndTempData = np.append(ndTempData, data[i,0:-1])
        i = i + 1
    ndTempData.shape = (-1,iLen[1]-1)
    average = np.average(ndTempData, axis = 0)
    average = np.append(average,iClusterID)
    #average = [average[0],average[1],...,iCluster,]

    return average
    
def calculate_all_average(data, iCluster):
    # this function is to find the average of each average in data
    # data includes only attribute and cluster id
    # data does not include the index(FID)
    # iCluster is a ndarray
    HF.pGP.AddMessage ("--------" + getCurTime() + "--------")
    HF.pGP.AddMessage ("Calculating average...")
    iLen = iCluster.shape
    i = 0
    ndAverage = np.array([])
    while i < iLen[0]:
        ndAverage = np.append(ndAverage, calculate_certain_average(data, iCluster[i]))
        i = i + 1
    ndAverage.shape = (iLen[0],-1)
    #print ndAverage
    return ndAverage


def calculate_deviation(data, average):
    #print data[:,:-1]
    #print "------------------"
    #print average[:,:-1]

    iLenData = data.shape   #iLen[0]:Line; iLen[1]:Row
    i = 0   #Line
    
    iLenAve = average.shape
    
    dist = np.array([])
    
    while i < iLenData[0]:
        iClusterID = int(data[i,-1])
        j = 0
        while j < iLenAve[0]:
            if int(average[j,-1]) == iClusterID:
                temp = np.linalg.norm(data[i,0:-1] - average[j,0:-1])
                dist = np.append(dist,temp)
                break
            j = j + 1
        i = i + 1
    return dist
    

def calculate_single_deviation(data, average, iIndex):
    #data = data[0:-2] * np.sqrt(ffWeight)
    #average = average[0:-2] * np.sqrt(ffWeight)
    
    iClusterID = int(data[-1])
    i = 0
    iLen = average.shape
    #flag = 0
    #print iClusterID
    #print data

    while i < iLen[0]:
        if int(average[i,-1]) == iClusterID:
            #print "a"
            if abs(data[iIndex] - average[i, iIndex])< fRange:
                flag = 0
            else:
                flag = 1
            #print data[iIndex], average[i, iIndex], (abs(data[iIndex] - average[i, iIndex]) - fRange), flag
            break
        i = i + 1
    #print flag
    return flag
           
def output_results(deviation):
    """ This function writes results to the output feature class. """
    HF.pGP.AddMessage ("--------" + getCurTime() + "--------")
    HF.pGP.AddMessage ("Outputting results...")
    outField = "Outlier"
    
    # Add results field.
    if not properties.dcFields.has_key(outField.upper()):
        try:
            HF.pGP.AddField(inputs.sOutputFC, outField, "TEXT")
        except:
            HF.pGP.GetMessages(2)
    else:
        outField = "Outlier_1"
        if not properties.dcFields.has_key(outField.upper()):
            try:
                HF.pGP.AddField(inputs.sOutputFC, outField, "TEXT")
            except:
                HF.pGP.GetMessages(2)

    addField = []
    
    for Field in inputs.sZField:
        if len(Field) > 8:
            Field = Field[0:8]
        #Field = Field + "_N"
        addField.append(Field + "_O")

    for Field in addField:
        if not properties.dcFields.has_key(Field.upper()):
            HF.pGP.AddField(inputs.sOutputFC, Field, "TEXT")        
                
    # Add results to output FC
    HF.pGP.AddMessage (HF.sWritingResults)
    #sFieldList = properties.sFID + ";" + sField + ";" + cField
    pRows = HF.pGP.UpdateCursor(inputs.sOutputFC)
    pRow = pRows.Next()

    # add message of processes 
    iError = 0
    iCnt = 0
    fInterval = len(attri) / 5.0
    fMore = fInterval
    iComplete = 20
    iKey = 0

    #test = []    
    
    while pRow <> None:
        #iKey = pRow.GetValue("FID")
        
        #print iKey
        try:
            if deviation[iKey] > fRange:  # make sure we have a non-Null result.
                pRow.SetValue(outField, "Y")
                for Field in addField:
                    iIndex = addField.index(Field)
                    #print iIndex
                    #print clusterAttri[iKey,0:-1], average[iIndex,0:-1]
                    #test.append(calculate_single_deviation(clusterAttri[iKey,0:-1], average, iIndex))
                    if calculate_single_deviation(clusterAttri[iKey,:], average, iIndex):
                        pRow.SetValue(Field, "Y")
                    else:
                        pRow.SetValue(Field, "N")
                        #pRow.SetValue(Field, temp)
            else:
                pRow.SetValue(outField, "N")
                for Field in addField:
                    pRow.SetValue(Field, "N")
            pRows.UpdateRow(pRow)
            iCnt = iCnt + 1
            if iCnt > fInterval: fInterval, iComplete = HF.check_progress(fInterval, fMore, iComplete)
            #pRow = pRows.Next()
            
        #except: pass
        except:
            iError = iError + 1
        
        pRow = pRows.Next()
        iKey = iKey + 1
        

    #print "++++++++++++++++++++++"
    #print iError

    HF.pGP.AddMessage (HF.s100Percent)
    HF.pGP.AddMessage(" ")
    pRows = None

    #print test    
    #return sField  



#--------------------------------------------------------------------------
#MAIN

if __name__ == "__main__":
    """This function performs the 1995 Getis and Ord Gi* statistic."""
    
    inputs = get_inputs()
    iNumRecs = HF.pGP.GetCount(inputs.sInputFC)
    fDissimilarity = float(inputs.fDissimilarity)

    # set weight
    fWeight = np.array([],dtype=float)
    for item in inputs.lsWeight:
        fWeight = np.append(fWeight, float(item))

    if sum(fWeight) >1.01 or sum(fWeight) <0.99:
        ffWeight = np.array([], dtype=float)
        fWeight_sum = sum(fWeight)
        for item in fWeight:
            ffWeight =np.append(ffWeight, item/fWeight_sum)  
    print ffWeight
    
    if iNumRecs < 30:
        HF.pGP.AddWarning (msgFewRecsWrn)
    
    if HF.pGP.exists(inputs.sOutputFC):
        HF.pGP.delete(inputs.sOutputFC)

    #Copy the input feature class to the output feature class.
    try:
        HF.pGP.QualifiedFieldNames = 0
        HF.pGP.Copyfeatures(inputs.sInputFC, inputs.sOutputFC)
    except:
        sMessage = HF.msgOverwriteErr % (inputs.sOutputFC)
        raise HF.ReportError (sMessage)

    properties = HF.get_featureclass_properties(inputs.sOutputFC)    
    
    attri = build_value_lists()
    clusterAttri = attri[:,1:-1]
    
    cluster = attri[:,-1]
    cluster.shape = (-1,1)
    clusterAttri = np.append(clusterAttri, cluster, axis = 1)
    iCluster = np.unique(cluster.astype(int))
    iCluster = np.sort(iCluster)   #iCluster is the unique cluster id in the cluster column, which is also sorted by ASC
    #print ffWeight
    #print clusterAttri
    #print "--------------------"
    clusterAttri[:,:-1] = clusterAttri[:,:-1] * np.sqrt(ffWeight)     
    #print clusterAttri    
    average = calculate_all_average(clusterAttri, iCluster)
    deviation = calculate_deviation(clusterAttri, average)

    #print average
    
    #find the top 5% or top fDissimilarity
    deviation_sort = np.sort(deviation)
    iCount = int(len(deviation_sort)*(1-fDissimilarity))
    fRange = deviation_sort[iCount-1]
    #print fRange
    
    #print "clusterAttri = "
    #print clusterAttri
    #print "average = "
    #print average
    

    output_results(deviation)