"""@author: sarjanshrestha"""

import collections
import operator
import numpy as np
import itertools


def prod(factors):
    return reduce(operator.mul,factors, 1)

def is_number(s):
    try:
        float(s)
        return True
    except ValueError:
        return False
    
    
    
# def normalize_columns(arr):
#     rows, cols = arr.shape
#     for col in xrange(cols):
#         arr[:,col] = arr[:,col]/abs(arr[:,col]).max()


f=open('c:\python27\intrDetTrainingDataSet.txt')
f.seek(0)
#count={}
columns=collections.defaultdict(int)
probClass=collections.defaultdict(int)
jointCount=collections.defaultdict(int)
jointProbClass=collections.defaultdict(int)
conProbClass=collections.defaultdict(int)
classAttribute=[]
features={}
featureNameList=[]
featureCounts= collections.defaultdict(int)
featureVectors=[]
predictCounts=collections.defaultdict(int)
predictResult={}
data=[]
coln=[]
mu=[]
sd=[]



"""
# for line in f:
#     if line[0]!='@':
#         featureVectors.append(line.strip('').strip('\n').lower().split(','))
#     else:
#         if line.strip(' ').lower().find('@data')==-1 and (not line.lower().startswith('@relation')):
#             featureNameList.append(line.strip(' ').split()[1])
#             features[featureNameList[len(featureNameList)-1]]=line[line.find('{')+1:line.find('}')].strip(' ').split(',')
#                 
"""
"""Read the training files"""
for line in f:
    vL=len(line.split(','))                 #count the number of columns in table
    break


# for i in range(vL):
#     columns['col-'+str(i)]+=1
f.seek(0)
for line in f:
    data.append(line.strip().strip('\n').split(','))
    featureVectors.append(line.strip().strip('\n').split(','))
    
    
for rows in data:
#     predictCounts[rows[len(rows)-1]]+=1
    for counter in range(len(rows)):
        if is_number(rows[counter]) and (rows[counter]>1) and (counter not in coln):
            coln.append(counter)                                        #find the number of columns with continous values
# #             featureCounts['col-'+str(counter),rows[counter]]+=1
# #         elif is_number(rows[counter]) and (rows[counter])<=1:
# #             print l[counter]
        if (rows[len(rows)-1]) not in classAttribute:                   #get the number of classes to predict the given instance
                classAttribute.append((rows[len(rows)-1]))
#        else:
            
#             predictCounts[rows[counter]]+=1
#             columns['col-'+str(counter),rows[counter]]+=1
#             featureCounts[('col-'+str(counter),rows[counter],rows[(len(rows)-1)])]+=1
#             if counter!=(len(rows)-1):
#                 jointCount[(rows[counter],rows[(len(rows)-1)])]+=1
                
"""normalize the data in columns with continous attributes"""                
arr=np.zeros((len(data),len(coln)))



for rows,i in itertools.izip(data,range(len(data))):
    for col,j in itertools.izip(coln,range(len(coln))):
        arr[i][j]=rows[col]

#normalize the value in array
rowz,colz=arr.shape

for col in xrange(colz):
    if arr[:,col].max()!=0:                             #avoiding the divide by zero for the column with zero value
        arr[:,col] = arr[:,col]/abs(arr[:,col].max())

for rows,i in itertools.izip(data,range(len(data))):
    for col,j in itertools.izip(coln,range(len(coln))):
        rows[col]=arr[i][j]
        if rows[col]<0.5:
            rows[col]=0
        else:
            rows[col]=1
 
"""Train the bayes_network"""            
for rows in data:
#     predictCounts[rows[len(rows)-1]]+=1
    for counter in range(len(rows)):
        if is_number(rows[counter]):
            if rows[counter]!=0:
                predictCounts['col-'+str(counter),'1']+=1
                jointCount['col-'+str(counter),'1',rows[(len(rows)-1)]]+=1
            else:
                predictCounts['col'+str(counter),'0']=1
                jointCount['col-'+str(counter),'0',rows[(len(rows)-1)]]=1
#             featureCounts['col-'+str(counter),rows[counter]]+=1
#         elif is_number(rows[counter]) and (rows[counter])<=1:
#             print l[counter]
        else:
            predictCounts[rows[counter]]+=1                            # Count the number of normal and abnormal events with counter=len(rows), ie last column of table
            columns['col-'+str(counter),rows[counter]]+=1
            featureCounts[('col-'+str(counter),rows[counter],rows[(len(rows)-1)])]+=1
            if counter!=(len(rows)-1):                              #excludes the column with attribute decision of normal or attack
                jointCount[(rows[counter],rows[(len(rows)-1)])]+=1


"""Compute the probability values"""
          
# for key in columns:
#     probClass[(key)]=round(columns[(key)])/round(len(data))
    
for rows in data:
    for counter in range(len(rows)):
        if is_number(rows[counter]):
            probClass['col-'+str(counter),'1']=round(predictCounts['col-'+str(counter),'1'])/round(len(data))
            probClass['col-'+str(counter),'0']=round(predictCounts['col-'+str(counter),'0'])/round(len(data))
        else:
            probClass[rows[counter]]=round(predictCounts[rows[counter]])/round(len(data))
            
#             predictCounts[rows[counter]]+=1                            # Count the number of normal and abnormal events with counter=len(rows), ie last column of table
#             columns['col-'+str(counter),rows[counter]]+=1
#             featureCounts[('col-'+str(counter),rows[counter],rows[(len(rows)-1)])]+=1
#             if counter!=(len(rows)-1):                              #excludes the column with attribute decision of normal or attack
#                 jointCount[(rows[counter],rows[(len(rows)-1)])]+=1
for i in coln:
    mu[i]=np.mean(data[:,i:(i+1)])

print mu

# for rows in data:
#     for counter in range(len(rows)):
#         if counter in coln:
#             conProbClass['col-'+str(counter),rows[len(rows)-1]]=round(np.exp(-np.square(x-mu[counter])/2*np.square(sd[counter])))/np.sqrt(2*np.pi*np.square(sd[counter]))

"""Compute joint probability"""
for key in jointCount:
    for attr in classAttribute:
        jointProbClass[(key)]=round(jointCount[(key)])/round(predictCounts[(attr)])
        


"""Predict the class for given new instance"""


# for i in X:
#     for counter in range(len(X)-2):
#         prob_predict[(X[len(X)-1],i)]*=prob_features[(X[counter],X[len(X)-1])]
#     #print probClass[(i)]    
#     prob_predict[(X[len(X)-1],i)]=prob_predict[(X[len(X)-1],i)]*probClass[(X[len(X)-1])]/probClass[(i)]   # compute posterior probability
#  
# predictResult=max(prob_predict)    
#  
# print predictResult   
# prob_predict[(X[len(X)-1],i)]*=probClass[X[len(X)-1]]/probClass[(X[counter])]
# print prob_predict


newInstance=collections.defaultdict(int)

# X=[0,'icmp','ecr_i','SF',1032,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,511,511,0,0,0,0,1,0,0,255,255,1,0,1,0,0,0,0,0]
# for dt in X:
# #     predictCounts[rows[len(rows)-1]]+=1
#     for counter in range(len(dt)):
#         if is_number(dt[counter]) and dt[counter]!=0:
#                 #normalize the value
#             
#             

                                                    
# #             featureCounts['col-'+str(counter),rows[counter]]+=1
# #         elif is_number(rows[counter]) and (rows[counter])<=1:
# #             print l[counter]

# print probClass
# # for fv in featureVectors:
# #     for a in range(len(fv)):
# #         predictCounts[fv[a]]+=1
# # X=['senior','medium','no','fair','c_yes']  
# X=['youth','medium','yes','fair','c_yes']                                    # given instance of new sample
# 
# # for fi in X:
# #     print featureCounts[(fi,X[len(X)-1])]
#     
# prob_features=collections.defaultdict(int)
#       
# for fi in X:
#     prob_features[(fi,X[len(X)-1])]=round(featureCounts[(fi,X[len(X)-1])])/(predictCounts[(X[len(X)-1])])
#     #print prob_features[(fi,X[len(X)-1])]
# #print predictCounts[X[len(X)-1]]
# 
# prob_predict=collections.defaultdict(int)

# 
# for i in X:
#     prob_predict[(X[len(X)-1],i)]=1
#     
