"""@author: sarjanshrestha"""

import collections
import numpy as np
import itertools
import sys


def is_number(s):
    try:
        float(s)
        return True
    except ValueError:
        return False

probClass=collections.defaultdict(float)
jointCount=collections.defaultdict(float)
jointProbClass=collections.defaultdict(float)
classAttribute=[]
predictCounts=collections.defaultdict(float)
predictClass=collections.defaultdict(float)
predictResult={}
data=[]
coln=[]
mu=[]
sd=[]
maxm=[]


"""Read the training DataSet"""

f=open('C:\Python27\kddcup.data_10_percent.txt','r') 

for line in f:
    vL=len(line.split(','))                 #count the number of columns in table
    break


f.seek(0)
lineTrain= 0
for line in f:
#     if lineTrain > 100000:                     #total number of rows used for training bayesian network
#         break
#     lineTrain+=1
    data.append(line.strip().strip('\n').split(','))

f.close()
cl=[1,2,3,6,11,20,21]                           #columns of training data with discrete probability
for rows in data:
    for counter in range(len(rows)):
        if is_number(rows[counter]) and (counter not in coln) and (counter not in cl):
            coln.append(counter)                                        #find the number of columns with continuous values

        if (rows[len(rows)-1]) not in classAttribute:                   #get the number of classes to predict the given instance
                classAttribute.append((rows[len(rows)-1]))


data1=[]                            #data1 is to store the normal data
data2=[]                            #data2 is to store the attack data
 
for line in data:
    if (line[len(line)-1]=='normal.'):
        data1.append(line)
    else:                            #include all kinds of attack   
        data2.append(line)

"Compute the probability of normal and anomaly events"
        
nCount=len(data1)                   #total number of normal event counts
aCount=len(data2)                   #total number of anomaly event counts

probNE=np.float64(nCount)/np.float64(len(data))     #probability of normal events
probAE=np.float64(aCount)/np.float64(len(data))      #probability of anomaly events

"Separate the data into two parts, normal and anomaly"

arrN=np.array(data1)
arrA=np.array(data2)


arrN1=np.zeros((len(data1),len(coln)))                              #normal data ,two dimensional matrix


for rows,j in itertools.izip(arrN,range(len(arrN))):                           # get array of numbers from data1
    for count,i in itertools.izip(coln,range(len(coln))):
        arrN1[j][i]=rows[count]
        
for i in range(len(coln)):                                                  #avoid zero probability
    if np.sum(arrN1[:,i])==0:
        arrN1[1][i]=1

   
arrA1=np.zeros((len(data2),len(coln)))                              #attack data, two dimensional matrix
        
for rows,j in itertools.izip(arrA,range(len(arrA))):                           # get array of numbers from data2
    for count,i in itertools.izip(coln,range(len(coln))):
        arrA1[j][i]=rows[count]
       

for i in range(len(coln)):                                                  #avoid zero probability
    if np.sum(arrA1[:,i])==0:
        arrA1[1][i]=1        

"""Train the bayes_network"""
"""For continuous attribute columns compute mean and standard deviation for normal and anomaly class"""

muN=np.zeros((len(coln)))
stdN=np.zeros((len(coln)))

for dv in arrN1:
    for count in range(len(dv)):                                        #compute the mean and standard deviation given normal for continuous attributes
        muN[count]=np.mean(arrN1[:,count])
        stdN[count]=np.std(arrN1[:,count])
    break

muA=np.zeros((len(coln)))
stdA=np.zeros((len(coln)))
      
for dv in arrA1:
    for count in range(len(dv)):                                        #compute the mean and standard deviation given anomaly for continuous attribute
        muA[count]=np.mean(arrA1[:,count])
        stdA[count]=np.std(arrA1[:,count])
    break


"""Compute the conditional probability for discrete attributes"""          
for rows in data:
    for counter in range(len(rows)-1):
        if is_number(rows[counter]):
            continue
        else:
            predictCounts[rows[counter]]+=1                            # Count the number of normal and abnormal events with counter=len(rows), ie last column of table
for rows in data:
    if rows[len(rows)-1]=='normal.':
        predictCounts['normal.']+=1
    else:
        predictCounts['anomaly.']+=1      
               
""""Count the number of occurence of each attributes given normal and attack""" 
for rows in data:
    for counter in range(len(rows)-1):
        if counter in coln:
            continue
        elif is_number(rows[counter]) and (counter in cl):
            if rows[len(rows)-1]=='normal.':
                if rows[counter]==1:
                    jointCount['col-'+str(counter),'normal.','1']+=1
                else:
                    jointCount['col-'+str(counter),'normal.','0']+=1
            else:
                if rows[counter]==1:
                    jointCount['col-'+str(counter),'anomaly.','1']+=1
                else:
                    jointCount['col-'+str(counter),'anomaly.','0']+=1
        elif rows[len(rows)-1]=='normal.':
            jointCount[rows[counter],'normal.']+=1
        else:
            jointCount[rows[counter],'anomaly.']+=1


for key in jointCount:
        if key[1]=='normal.':
            jointProbClass[(key)]=np.float64((jointCount[(key)])/(predictCounts['normal.']))
        else:
            jointProbClass[(key)]=np.float64((jointCount[(key)])/(predictCounts['anomaly.']))
     

"""Classify the given instance"""
 
tst=open('c:\Python27\corrected','r')              # location for testing file
 
testVector=[]
 
lineTest =  0
for line in tst:
#     if lineTest >100000:                                 #Total number of rows used for testing data
#         break
#     lineTest +=1
    testVector.append(line.strip().strip('\n').split(','))
    
probVectorN=np.float64(np.ones((len(testVector),len(coln))))
probVectorA=np.float64(np.ones((len(testVector),len(coln))))

probN=np.float64(np.ones((len(testVector))))
probA=np.float64(np.ones((len(testVector))))


"Compute the probability of normal event given attributes"
arrT=np.float64(np.zeros((len(testVector),len(coln))))
        
for rows,j in itertools.izip(testVector,range(len(testVector))):                           # get array of numbers from data2
    for count,i in itertools.izip(coln,range(len(coln))):
        arrT[j][i]=rows[count]

resultClass=[]
          

for rows,rN in itertools.izip(arrT,range(len(arrT))):
    for counter in range(len(coln)):
        probVectorN[rN][counter]=np.float64(np.log(probNE)-np.log(stdN[counter])-0.5*np.square(rows[counter])-muN[counter])/np.square(stdN[counter])
        probVectorA[rN][counter]=np.float64(np.log(probAE)-np.log(stdA[counter])-0.5*np.square(rows[counter])-muA[counter])/np.square(stdA[counter])

           
for tv,testL in itertools.izip(testVector,range(len(testVector))):
    for counter in range(len(tv)-1):
                if is_number(tv[counter]):
                    if counter in cl and tv[(len(tv)-1)]=='normal.':
                        probN[testL]*=jointProbClass['col-'+str(counter),'normal.',tv[counter]]
                    elif counter in cl and tv[(len(tv)-1)]!='normal.':
                        probA[testL]*=jointProbClass['col-'+str(counter),'anomaly.',tv[counter]]
                    else:
                        continue
                elif tv[(len(tv)-1)]=='normal.':
                    if (tv[counter],'normal.') not in jointCount.keys():                  #smoothing the joint probability for any unencountered keys during training
                        probN[testL]*=1/len(data1) 
                    else: 
                        probN[testL]*=jointProbClass[(tv[counter],'normal.')]
                else:
                    if (tv[counter],'anomaly.') not in jointCount.keys():                    #smoothing the joint probability for any unencountered keys during training
                        probA[testL]*=1/len(data2)           
                    else:
                        probA[testL]*=jointProbClass[(tv[counter],'anomaly.')]
    
    for i in probVectorN[testL]:
            if i==0:                                                # if probability is zero replace with occurrence of one in whole data
                probN[testL]*=np.float64(1/len(data1))
            else:
                probN[testL]*=i                                     # if probability is zero replace with occurrence of one in whole data
    
    probN[testL]*=probNE                           #multiplied by the probability of normal event
       
    for j in probVectorA[testL]:
            if j==0:
                probA[testL]*=np.float64(1/len(data2))
            else:
                probA[testL]*=j
        
    
    probA[testL]*=probAE                           #multiplied by the probability of anomaly event
    
    "Predict the class for the given instance"
    if probN[testL]>probA[testL]:
        resultClass.append('normal.')
    else:
        resultClass.append('anomaly.')
       
        
 
"Estimate the accuracy of the prediction"
tCorrect=0.0

for rows,i in itertools.izip(testVector,range(len(testVector))):
    if rows[(len(rows)-1)]==resultClass[i]:
        tCorrect+=1.0
    elif rows[(len(rows)-1)]!='normal.' and resultClass[i]=='anomaly.':
        tCorrect+=1.0
    else:
        continue    

estimation=[]        
estimation = tCorrect/len(testVector)

print "the accuracy of the Bayesian Test is:",estimation    

tst.close()