import perceptron
import perceptron_helpers
from math import sqrt

eventSize = 28   #number of features
outcomeSize = 2 #number of outcomes, in our case always =2
#Training Files below
#trainingEvent1 = 'perceptron_trainning.txt'
#trainingEvent2 = 'testData/test2.txt'
#testEvent = 'perceptron_learning.txt'
#trainingEvent1 = 'first_999_36_features.txt'
#testEvent = 'second_9999_features.txt'
#testEvent = 'first_999_36_features.txt'
#trainingEvent1 = 'trainx.txt'
#trainingEvent1 = 'traind.txt'
#trainingEvent1 = 'train_eq.txt'
#testEvent = 'test_eq.txt'
trainingEvent1 = 'first_unbiased_features.txt'  # Change this to the training data.
testEvent = 'second_unbiased_features.txt'      # Change this to the testing data.

#create perceptron
percep = perceptron.Perceptron(eventSize, outcomeSize, optimism=0.0)  # Increase optimism to increase the chance of predicting the runner-up.
#do the training!
perceptron_helpers.train(percep, trainingEvent1, eventSize, outcomeSize)
#perceptron_helpers.train(percep, trainingEvent2, eventSize, outcomeSize)
correct, count, tp, fn, fp  = perceptron_helpers.test(percep, testEvent, eventSize, outcomeSize)
tn = count-tp-fn-fp
denom = sqrt((tp+fp)*(tp+fn)*(tn+fp)*(tn+fn))
if denom == 0:
    denom = 1
mcc=(tp*tn-fp*fn)/denom

if tp:
    precision = tp / (tp + fp)
    recall = tp / (tp + fn)
else:
    precision = 0.0
    recall = 0.0
f_score = perceptron_helpers.getFscore(precision, recall)
 
print 'Perceptron Predictions:'
print 'Prediction: ' + str(correct)
print 'Total Correct Predictions: ' + str(sum(correct))
print 'Total Predictions Made   : ' + str(count)
for index, value in enumerate(correct):
    if index is 0:
        print 'Number of Correct "Un-reciprocated" Predictions: ' + str(value)
    if index is 1:
        print 'Number of Correct "Reciprocated"    Predictions: ' + str(value)
print 'True positives: ', tp
print 'False positives:', fp
print 'True negatives: ', count-tp-fp-fn
print 'False negatives:', fn
print 'Perceptron Accuracy: ' + str(100.0 * sum(correct) / count) + '%'
#print 'maxScore = ' + str(maxScore)
print 'F score is: ' + str(f_score)
print 'MCC is:', str(mcc)
print 'Weights are [%s]' % ', '.join(map(str, percep._weights[0]))

# AveragePerceptron
percep2 = perceptron.AveragedPerceptron(eventSize, outcomeSize)

perceptron_helpers.train(percep2, trainingEvent1, eventSize, outcomeSize)

correct2, count2, tp2, fn2, fp2 = perceptron_helpers.test(percep2, testEvent,eventSize,outcomeSize)
tn2 = count2-tp2-fn2-fp2
denom = sqrt((tp2+fp2)*(tp2+fn2)*(tn2+fp2)*(tn2+fn2))
if denom == 0:
    denom = 1
mcc2=(tp2*tn2-fp2*fn2)/denom

if tp2:
    precision2 = tp2 / (tp2 + fp2)
    recall2 = tp2 /(tp2 + fn2)
else:
    precision2 = 0.0
    recall2 = 0.0
f_score2 = perceptron_helpers.getFscore(precision2, recall2)

print ''
print ''
print 'Average Perceptron Predictions:'
print 'Prediction: ' + str(correct2)
print 'Total Correct Predictions: ' + str(sum(correct2))
print 'Total Predictions Made   : ' + str(count2)
for index, value in enumerate(correct2):
    if index is 0:
        print 'Number of Correct "Un-reciprocated" Predictions: ' + str(value)
    if index is 1:
        print 'Number of Correct "Reciprocated"    Predictions: ' + str(value)
print 'True positives: ', tp2
print 'False positives:', fp2
print 'True negatives: ', count2-tp2-fp2-fn2
print 'False negatives:', fn2
print 'Perceptron Accuracy: ' + str(100.0 * sum(correct2) / count2) + '%'
print 'F score is:' + str(f_score2)
print 'MCC is:', str(mcc2)
print 'Weights are [%s]' % ', '.join(map(str, percep2._weights[0]))


# SparseAveragePerceptron
percep3 = perceptron.SparseAveragedPerceptron(eventSize, outcomeSize)

perceptron_helpers.train(percep3, trainingEvent1, eventSize, outcomeSize)

correct3, count3, tp3, fn3, fp3 = perceptron_helpers.test(percep3, testEvent,eventSize,outcomeSize)
tn3 = count3-tp3-fn3-fp3
denom = sqrt((tp3+fp3)*(tp3+fn3)*(tn3+fp3)*(tn3+fn3))
if denom == 0:
    denom = 1
mcc3=(tp3*tn3-fp3*fn3)/denom

if tp3:
    precision3 = tp3 / (tp3 + fp3)
    recall3 = tp3 /(tp3 + fn3)
else:
    precision3 = 0.0
    recall3 = 0.0
f_score3 = perceptron_helpers.getFscore(precision3, recall3)

print ''
print ''
print 'SparseAverage Perceptron Predictions:'
print 'Prediction: ' + str(correct3)
print 'Total Correct Predictions: ' + str(sum(correct3))
print 'Total Predictions Made   : ' + str(count3)
for index, value in enumerate(correct3):
    if index is 0:
        print 'Number of Correct "Un-reciprocated" Predictions: ' + str(value)
    if index is 1:
        print 'Number of Correct "Reciprocated"    Predictions: ' + str(value)
print 'True positives: ', tp3
print 'False positives:', fp3
print 'True negatives: ', count3-tp3-fp3-fn3
print 'False negatives:', fn3
print 'Perceptron Accuracy: ' + str(100.0 * sum(correct3) / count3) + '%'
print 'F score is:' + str(f_score3)
print 'MCC is:', str(mcc3)
print 'Weights are [%s]' % ', '.join(map(str, percep3._weights[0]))