"""@author: sarjanshrestha"""

import collections
import operator
import numpy as np
import itertools


def prod(factors):
    return reduce(operator.mul,factors, 1)

def is_number(s):
    try:
        float(s)
        return True
    except ValueError:
        return False
    
def continuous_Prob(x,m,s):
    result=round(np.exp(-np.square(x-m)/2*np.square(s)))/np.sqrt(2*np.pi*np.square(s))
    return result

    
# def normalize_columns(arr):
#     rows, cols = arr.shape
#     for col in xrange(cols):
#         arr[:,col] = arr[:,col]/abs(arr[:,col]).max()


f=open('c:\python27\intrDetTrainingDataSet.txt')
f.seek(0)
#count={}
columns=collections.defaultdict(int)
probClass=collections.defaultdict(int)
jointCount=collections.defaultdict(int)
jointProbClass=collections.defaultdict(int)
classAttribute=[]
features={}
featureNameList=[]
featureCounts= collections.defaultdict(int)
featureVectors=[]
predictCounts=collections.defaultdict(int)
predictClass=collections.defaultdict(int)
predictResult={}
data=[]
coln=[]
mu=[]
sd=[]
sd1=[]
maxm=[]



"""
# for line in f:
#     if line[0]!='@':
#         featureVectors.append(line.strip('').strip('\n').lower().split(','))
#     else:
#         if line.strip(' ').lower().find('@data')==-1 and (not line.lower().startswith('@relation')):
#             featureNameList.append(line.strip(' ').split()[1])
#             features[featureNameList[len(featureNameList)-1]]=line[line.find('{')+1:line.find('}')].strip(' ').split(',')
#                 
"""
"""Read the training files"""
for line in f:
    vL=len(line.split(','))                 #count the number of columns in table
    break


# for i in range(vL):
#     columns['col-'+str(i)]+=1
f.seek(0)
for line in f:
    data.append(line.strip().strip('\n').split(','))
    featureVectors.append(line.strip().strip('\n').split(','))
    
    
for rows in data:
#     predictCounts[rows[len(rows)-1]]+=1
    for counter in range(len(rows)):
        if is_number(rows[counter]) and (rows[counter]>1) and (counter not in coln):
            coln.append(counter)                                        #find the number of columns with continous values
# #             featureCounts['col-'+str(counter),rows[counter]]+=1
# #         elif is_number(rows[counter]) and (rows[counter])<=1:
# #             print l[counter]
        if (rows[len(rows)-1]) not in classAttribute:                   #get the number of classes to predict the given instance
                classAttribute.append((rows[len(rows)-1]))
#        else:
            
#             predictCounts[rows[counter]]+=1
#             columns['col-'+str(counter),rows[counter]]+=1
#             featureCounts[('col-'+str(counter),rows[counter],rows[(len(rows)-1)])]+=1
#             if counter!=(len(rows)-1):
#                 jointCount[(rows[counter],rows[(len(rows)-1)])]+=1
#arr1=np.zeros((len(data),vL))            
arr1=np.array(data)                         #convert into matrix of data array

"""normalize the data in columns with continous attributes"""                
arr=np.zeros((len(data),vL))



for rows,i in itertools.izip(data,range(len(data))):
    for col,j in itertools.izip(coln,range(len(coln))):
        arr[i][j]=rows[col]

       
        
for rows,i in itertools.izip(data,range(len(data))):
    for col,j in itertools.izip(coln,range(vL)):
        arr1[i][j]=rows[col]

#normalize the value in array
rowz,colz=arr.shape

mu=np.zeros(colz)
sd=np.zeros(colz)
sd1=np.zeros(colz)
maxm=np.zeros(colz)
for i in range(colz-1):
    mu[i]=np.mean(arr[:,i])
    sd[i]=np.std(arr[:,i])
    maxm[i]=np.max(arr[:,i])

 
      
""" computation for continous probability"""
# for rows in data:
#      for counter in coln:
#               conProbClass['col-'+str(counter),rows[len(rows)-1]]=round(np.exp(-np.square(x-mu[counter])/2*np.square(sd[counter])))/np.sqrt(2*np.pi*np.square(sd[counter]))


# 
# for col in xrange(colz):
#     if arr[:,col].max()!=0:                             #avoiding the divide by zero for the column with zero value
#         arr[:,col] = arr[:,col]/abs(arr[:,col].max())
#         
# 
#          
# for rows,i in itertools.izip(data,range(len(data))):
#     for col,j in itertools.izip(coln,range(len(coln))):
#         rows[col]=arr[i][j]
#         if rows[col]<0.5:
#             rows[col]=0
#         else:
#             rows[col]=1
            
"""Normalize the array of data"""
# rw,cl=  arr1.shape
# for col in xrange(cl):
#     if is_number(arr1[1,col]) and (arr1[:,col].max())!=0:
#         arr1[:,col]=arr1[:,col]/abs(arr1[:,col].max())

# for rows in arr1:
#     print rows

 
"""Train the bayes_network"""            
# for rows in data:
# #     predictCounts[rows[len(rows)-1]]+=1
#     for counter in range(len(rows)):
#         if is_number(rows[counter]):
#             if rows[counter]!=0:
#                 predictCounts['col-'+str(counter),'1']+=1
#                 jointCount['col-'+str(counter),'1',rows[(len(rows)-1)]]+=1
#             else:
#                 predictCounts['col'+str(counter),'0']+=1
#                 jointCount['col-'+str(counter),'0',rows[(len(rows)-1)]]+=1
# #             featureCounts['col-'+str(counter),rows[counter]]+=1
# #         elif is_number(rows[counter]) and (rows[counter])<=1:
# #             print l[counter]
#         else:
#             predictCounts[rows[counter]]+=1                            # Count the number of normal and abnormal events with counter=len(rows), ie last column of table
#             columns['col-'+str(counter),rows[counter]]+=1
#             featureCounts[('col-'+str(counter),rows[counter],rows[(len(rows)-1)])]+=1
#             if counter!=(len(rows)-1):                              #excludes the column with attribute decision of normal or attack
#                 jointCount[(rows[counter],rows[(len(rows)-1)])]+=1


countSum=collections.defaultdict(int)
numCount=collections.defaultdict(int)
classCount=collections.defaultdict(int)
meanValue=collections.defaultdict(int)

for rows in data:
    classCount[rows[(len(rows)-1)]]+=1
    for counter in range(len(rows)):
        if is_number(rows[counter]):
            numCount['col-'+str(counter),rows[(len(rows)-1)]]+=1
            countSum['col-'+str(counter),rows[(len(rows)-1)]]+=float(rows[counter])
        else:
            numCount['col-'+str(counter),rows[(len(rows)-1)]]+=1                            
            

for keys in countSum:
    if(countSum[(keys)]==0):
        countSum[(keys)]=1


"""Compute mean value"""

for keys in countSum:
    if is_number(countSum[(keys)]):
        meanValue[(keys)]=round(countSum[(keys)])/round(numCount[(keys)])

        
# for rows in arr1:
# #     predictCounts[rows[len(rows)-1]]+=1
#     for counter in range(len(rows)):
#         if is_number(rows[counter]):
#             if rows[counter]!=0:
#                 predictCounts['col-'+str(counter),'1']+=1
#                 jointCount['col-'+str(counter),'1',rows[(len(rows)-1)]]+=1
#             else:
#                 predictCounts['col'+str(counter),'0']=1
#                 jointCount['col-'+str(counter),'0',rows[(len(rows)-1)]]=1
# #             featureCounts['col-'+str(counter),rows[counter]]+=1
# #         elif is_number(rows[counter]) and (rows[counter])<=1:
# #             print l[counter]
#         else:
#             predictCounts[rows[counter]]+=1                            # Count the number of normal and abnormal events with counter=len(rows), ie last column of table
#             columns['col-'+str(counter),rows[counter]]+=1
#             featureCounts[('col-'+str(counter),rows[counter],rows[(len(rows)-1)])]+=1
#             if counter!=(len(rows)-1):                              #excludes the column with attribute decision of normal or attack
#                 jointCount[(rows[counter],rows[(len(rows)-1)])]+=1


"""Compute the probability values"""
          
# for key in columns:
#     probClass[(key)]=round(columns[(key)])/round(len(data))
    
for rows in data:
    for counter in range(len(rows)):
        if is_number(rows[counter]):
            probClass['col-'+str(counter),'1']=round(predictCounts['col-'+str(counter),'1'])/round(len(data))
            probClass['col-'+str(counter),'0']=round(predictCounts['col-'+str(counter),'0'])/round(len(data))
        else:
            probClass[rows[counter]]=round(predictCounts[rows[counter]])/round(len(data))
            
#             predictCounts[rows[counter]]+=1                            # Count the number of normal and abnormal events with counter=len(rows), ie last column of table
#             columns['col-'+str(counter),rows[counter]]+=1
#             featureCounts[('col-'+str(counter),rows[counter],rows[(len(rows)-1)])]+=1
#             if counter!=(len(rows)-1):                              #excludes the column with attribute decision of normal or attack
#                 jointCount[(rows[counter],rows[(len(rows)-1)])]+=1


"""Compute joint probability"""
for key in jointCount:
    for attr in classAttribute:
        jointProbClass[(key)]=round(jointCount[(key)])/round(predictCounts[(attr)])
        

"""Predict the class for given new instance"""

newInstance=collections.defaultdict(int)

# get the value of the instance
#X=[0,tcp,http,SF,239,968,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,3,3,0,0,0,0,1,0,0,3,239,1,0,0.33,0.03,0,0,0,0]

"""Classify the given instance"""
# for col in range(len(X)):
#     for atr in classAttribute:
#         predictClass[atr,X[col]]*=jointProbClass[X[col],atr]
#         
# predictResult=max(predictClass)


    



# for rows in data:
#     for counter in range(len(rows)):
#         if counter in coln:
#             conProbClass['col-'+str(counter),rows[len(rows)-1]]=round(np.exp(-np.square(x-mu[counter])/2*np.square(sd[counter])))/np.sqrt(2*np.pi*np.square(sd[counter]))
