import os,csv,sys
from datetime import datetime
import time

# Array of dicts which contain the distinct values for each attribute as keys
distinctVal=[]
# column header of the attribute
colNames=[]
#colAlphaNum contains 'a' if the attribute is alphanumeric and 'n' if it is numeric
colAlphaNum=['n']*273


baseDir='C:\Hearst'
os.chdir(baseDir)
newModel=open('newModeling100.csv')
validation=open('validation100.csv')
distinctValFile=open('distinctVal100.txt','w')
csvfile=csv.reader(newModel,delimiter=',',quotechar='"')
validationfile=csv.reader(validation,delimiter=',',quotechar='"')

#function to convert string class in to numerical class label
def classToNum(cls):
    if cls=='Y':
        return '+1'
    else:
        return '-1'
    return '-1'

#function to split categorical attribute header to multiple nominal attribute headers
def cat2NomHeaders(col,colDict):
   tup=['']*len(colDict)
   for ctr,key in enumerate(colDict.keys()):
      tup[colDict[key]-1]=(col+'^'+key)
   return tup

#function to categorical value to nominal values
def cat2NomVal(key,colDict):
    tup=[0]*len(colDict)
    idx=colDict[key]
    tup[idx-1]=1
    return tup

#function to find the distinct values,datatype for each attribute
def findDistinctVal(dataFile):
    for row in dataFile:
        if len(row)>273:
            print row
            sys.exit(2)
        for ctr,val in enumerate(row):
            if ctr==newidIdx:
                distinctVal[ctr][val+','+row[mailingidIdx]]=1
            elif ctr==272:
                distinctVal[ctr][str(int(time.mktime(datetime.strptime(val,"%a, %m/%d/%y %H:%M %p").date().timetuple())))]=1
            else:
                distinctVal[ctr][val]=1
            #find if the col is numeric or alphanumeric
            try:
                if colAlphaNum[ctr]=='n' and val!='':
                    tmpvar=float(val)
            except ValueError:
               colAlphaNum[ctr]='a'

def findMinMax():
    minMaxMap={}
    for loopctr,dtype in enumerate(colAlphaNum[2:len(colAlphaNum)+1]):
        ctr=loopctr+2
        minval,maxval=(0,0)
        if ctr==newidIdx:
            continue
        if ctr==cityIdx:
            minval=min(distCenUSA.values())
            maxval=max(distCenUSA.values())            
        elif dtype=='n':
            val=[]
            for i in distinctVal[ctr].keys():
                if i!='':
                    val.append(float(i))
            if len(val)==0:
                val.append(0)
            minval=min(val)
            maxval=max(val)
        minMaxMap[ctr]=(minval,maxval)
    return minMaxMap

def normalize(val,min,max):
    if max==min and min==0:
        return 0
    elif max==min:
        return 1
    else:
        return (val-min)/(max-min)
        

def getDistCenUSA():
  distMap={}  
  distFile=open('c:\Hearst\distFromCentreOfUSA_new.csv')
  for cityDist in distFile:
      cityDist=cityDist.rstrip('\n')
      cityDistSplit=cityDist.split('^')
      dist=float(cityDistSplit[1])
      distMap[cityDistSplit[0]]=float(dist)
  return distMap
  


#Start of the program
#get the program start time
tstart=datetime.now()
#populate the distMap
#Dict which contains the distance in kms between center of the entire USA and each city
distCenUSA=getDistCenUSA()

#populate the column headers
for colname in csvfile.next():
    colNames.append(colname)
    distinctVal.append({})

#get the index for the columns which need special treatment
newidIdx=colNames.index('new_id')
mailingidIdx=colNames.index('new_mailing_id')
cityIdx=colNames.index('CITY')
stateIdx=cityIdx+2
colNames[newidIdx]=colNames[newidIdx]+','+colNames[mailingidIdx]

#find the distinct values from Modeling data file
findDistinctVal(csvfile);

#ignore the first line containing headers in the validation file
validationfile.next()

#find the distinct values from Validation file
findDistinctVal(validationfile);

#setting the datefield as numeric, as date is converted in to unix timestamp
colAlphaNum[272]='n'

for ctr,colDict in enumerate(distinctVal):
    for ctr,item in enumerate(sorted(colDict.keys())):
           colDict[item]=ctr+1

#Dict {attr_key:(min,max)} which contains the tuple (min,max) for each attribute, will be used for Normalization
minMax=findMinMax()

#write the distinct values for attributes with distinct value count < 1000
for ctr,name in enumerate(colNames):
    distinctValFile.write("#column:"+name+","+colAlphaNum[ctr]+","+str(len(distinctVal[ctr]))+"\n")
    if len(distinctVal[ctr])<1000:
       for item in distinctVal[ctr].keys():
          distinctValFile.write(item+'\n')

## the distinct values for each attribute is now available in memory
## if the number of distinct values is less than 1000 we convert each distinct value in to an attribute i.e nominal to binary
## if the number of distinct values is greater than 1000, we assign an index to each distinct value and convert the nominal attribute in to numerical attribute
## remove the attribute new_id


#rest the file seek position for Training and Validation data to zero
newModel.seek(0)
validation.seek(0)

# create 2 seperate training files one for open_flg and one for click_flg
open_flgTrain=open('open_flag.train','w')
click_flgTrain=open('click_flag.train','w')

#ingore header which is first line in the file
csvfile.next()
validationfile.next()

#write the new header
newHeader=[]
for loopctr,name in enumerate(colNames[2:len(colNames)+1]):
    ctr=loopctr+2
    if ctr==newidIdx:
         continue
    elif ctr>cityIdx and ctr<=cityIdx+2:
            continue
    elif colAlphaNum[ctr]=='n' or (ctr==cityIdx):
        newHeader.append(colNames[ctr])
    else:
        newHeader+=(cat2NomHeaders(colNames[ctr],distinctVal[ctr]))
distinctValFile.write("The new header is"+'\n')
distinctValFile.write(','.join(newHeader)+'\n')

#preparing the training files for svm light
for row in csvfile:
    data=[]
    for loopctr,item in enumerate(row[2:len(row)+1]):
        ctr=loopctr+2
        if ctr==newidIdx:
            continue
        elif ctr>cityIdx and ctr<=cityIdx+2:
            continue
        elif ctr==cityIdx:
            try:
               data.append(normalize(distCenUSA[item+','+row[stateIdx]],minMax[ctr][0],minMax[ctr][1]))
            except KeyError:
                 data.append(0.99)
            #add the logic for getting the distance of the city from the geo center of entire U.S.A
        #convert campaign id in to a categorical attribute
        elif ctr==mailingidIdx:
            data+=cat2NomVal(item,distinctVal[ctr])
        elif ctr==272:
            data.append(normalize(time.mktime(datetime.strptime(item,"%a, %m/%d/%y %H:%M %p").date().timetuple()),minMax[ctr][0],minMax[ctr][1]))
        elif colAlphaNum[ctr]=='n':
            if item=='':
                data.append('')
            else:
                data.append(normalize(float(item),minMax[ctr][0],minMax[ctr][1]))
        elif colAlphaNum[ctr]=='a' and len(distinctVal[ctr])<1000:
            try:
               data+=cat2NomVal(item,distinctVal[ctr])
            except KeyError:
               distinctValFile.write('the column is '+colNames[ctr]+'\n')
               distinctValFile.write('the Key is '+item+'\n')
               distinctValFile.close()
               sys.exit(-1)
           
    sparseMap={}
    for ctr,val in enumerate(data):
        if val!='' and val!='0' and val!=0:
           sparseMap[ctr+1]=str(round(val,4))         
            
    open_flgTrain.write(classToNum(row[1])+' ')
    for key in sorted(sparseMap.keys()):
        open_flgTrain.write(str(key)+':'+sparseMap[key]+' ')
    open_flgTrain.write('\n')

    click_flgTrain.write(classToNum(row[0])+' ')
    for key in sorted(sparseMap.keys()):
        click_flgTrain.write(str(key)+':'+sparseMap[key]+' ')
    click_flgTrain.write('\n')

open_flgTrain.close()
click_flgTrain.close()    

#preparing the test files for svm light from validation file
    
valid_test=open('valid.test','w')
valid_id=open('valid_id.test','w')

for row in validationfile:
    data=[]
    id=[]
    for loopctr,item in enumerate(row[2:len(row)+1]):
        ctr=loopctr+2
        if ctr==newidIdx:
            id.append(row[newidIdx]+','+row[mailingidIdx])
        elif ctr>cityIdx and ctr<=cityIdx+2:
            continue
        elif ctr==cityIdx:
            try:
               data.append(normalize(distCenUSA[item+','+row[stateIdx]],minMax[ctr][0],minMax[ctr][1]))
            except KeyError:
                 data.append(0.99)
            #add the logic for getting the distance of the city from the geo center of entire U.S.A
        #convert campaign id in to a categorical attribute
        elif ctr==mailingidIdx:
            data+=cat2NomVal(item,distinctVal[ctr])
        elif ctr==272:
            data.append(normalize(time.mktime(datetime.strptime(item,"%a, %m/%d/%y %H:%M %p").date().timetuple()),minMax[ctr][0],minMax[ctr][1]))
        elif colAlphaNum[ctr]=='n':
            if item=='':
                data.append('')
            else:
                data.append(normalize(float(item),minMax[ctr][0],minMax[ctr][1]))
        elif colAlphaNum[ctr]=='a' and len(distinctVal[ctr])<1000:
            try:
               data+=cat2NomVal(item,distinctVal[ctr])
            except KeyError:
               distinctValFile.write('the column is '+colNames[ctr]+'\n')
               distinctValFile.write('the Key is '+item+'\n')
               distinctValFile.close()
               sys.exit(-1)
           
    sparseMap={}
    for ctr,val in enumerate(data):
        if val!='' and val!='0' and val!=0:
           sparseMap[ctr+1]=str(round(val,4))
           
    for key in sorted(sparseMap.keys()):
        valid_test.write(str(key)+':'+sparseMap[key]+' ')
    valid_test.write('\n')
    valid_id.write(id[0]+'\n')

    
valid_test.close()
valid_id.close()

distinctValFile.close()
tend=datetime.now()
c=tend-tstart
print str(c.seconds/60)+' Minutes to complete'



