from __future__ import division
from operator import itemgetter, attrgetter
import os
import sys
import math

print "program begins..."
print "Updated on 2013/07/31 night by Wei at school."
print "The sole purpose of this program is to generate the BLUE curve with the combination of the 1st factor"
print "Definition of BLUE curve:NOT yet filled"
print "The probability bucketing here is the 3rd factor in our formula"
# The variable for TOPKLabel
# TOPKLabel can be selected from the following:
#                                  "TOP10000",     "TOP1000",      "TOP100",       "TOP10"
# corresponds with following data: inputFileNameD, inputFileNameC, inputFileNameB, inputFileNameA
TOPKLabel = "TOP10" 
print "This is the model for predicting",TOPKLabel


#########################################################################################Adding component begins...since 2013/07/31
# key: # of times this object appears
# value: the probability that this term will occur in the next query
freq1stFactorProbabilityDict = {}

# key: the terms which have been seen in the training queries
# value: which freq it belongs to
termsWithCorrespondingSpeciesBelongingToDict = {}

inputAuxFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/effectToMake1StFactor/good_turing_estimation_output_for_terms_0_1_95K_95%_probabilityInQueryAdded_20130731"
inputAuxFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/effectToMake1StFactor/freqOfFreqInQueries_0_1_95K_95%_with_query_terms_appended_sortedByFreqR"
inputAuxFileHanlder1 = open(inputAuxFileName1,"r")
inputAuxFileHanlder2 = open(inputAuxFileName2,"r")

# skip 4 not related lines
inputAuxFileHanlder1.readline()
inputAuxFileHanlder1.readline()
inputAuxFileHanlder1.readline()
inputAuxFileHanlder1.readline()

for line in inputAuxFileHanlder1.readlines():
    lineElements = line.strip().split(" ")
    currentFreq = int(lineElements[0])
    currentProbability = float(lineElements[5])
    if currentProbability != 0:
        if currentFreq not in freq1stFactorProbabilityDict:
            freq1stFactorProbabilityDict[currentFreq] = currentProbability
        else:
            print "system error"
            exit(1)
'''
# for debug
print "len(freq1stFactorProbabilityDict):",len(freq1stFactorProbabilityDict)
print "freq1stFactorProbabilityDict[1]:",freq1stFactorProbabilityDict[1]
'''

numOfFreq = 0
for line in inputAuxFileHanlder2.readlines():
    lineElements = line.strip().split(" ")
    freq = int( lineElements[0] )
    numOfTerms = int( lineElements[1] )
    if numOfTerms != 0:
        numOfFreq += 1
        if numOfTerms == len(lineElements[2:]):
            for term in lineElements[2:]:
                if term not in termsWithCorrespondingSpeciesBelongingToDict:
                    termsWithCorrespondingSpeciesBelongingToDict[term] = freq
        else:
            print "critical error, mark1"
    else:
        # just do NOT need to be processed
        pass

'''
# for debug       
print "termsWithCorrespondingSpeciesBelongingToDict['of']:",termsWithCorrespondingSpeciesBelongingToDict['of']
print "numOfFreq:",numOfFreq
'''

inputAuxFileHanlder1.close()
inputAuxFileHanlder2.close()
#########################################################################################Adding component ends.since 2013/07/31



# ------>The following are for the small training data set
# step1: the set of file located at pangolin:
# inputFileNameA = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/probabilityDistribution_TOP10.txt"
# inputFileNameB = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/probabilityDistribution_TOP100.txt"
# inputFileNameC = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/probabilityDistribution_TOP1000.txt"
# inputFileNameD = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/probabilityDistribution_TOP10000.txt"

# step2: CURRENT set of comparison files at pangolin:
# inputArffTestingFileNameONLYOne = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/Training_Set_2013_07_15_tail_10000_for_testing.arff"

# ------>The following are for the larger training data set
# Model17 for large data set:
# step1: the set of file located at pangolin:
# inputFileNameA = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model17/TOPK_sorted_by_queryID/probabilityDistribution_TOP10_sorted_by_queryID_testing.txt"
# inputFileNameB = "N/A"
# inputFileNameC = "N/A"
# inputFileNameD = "N/A"

# step2: CURRENT set of comparison files at pangolin:
# inputArffTestingFileNameONLYOne = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model17/TOPK_sorted_by_queryID/Training_Set_2013_07_19_sorted_by_queryID_middle_DOT1M_testing.arff"

# Model18 for large data set:
# step1: the set of file located at pangolin:
# inputFileNameA = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model18/TOPK_sorted_by_queryID/probabilityDistribution_TOP10_sorted_by_queryID_testing.txt"
# inputFileNameB = "N/A"
# inputFileNameC = "N/A"
# inputFileNameD = "N/A"

# step2: CURRENT set of comparison files at pangolin:
# inputArffTestingFileNameONLYOne = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model18/TOPK_sorted_by_queryID/Training_Set_2013_07_19_sorted_by_queryID_middle_DOT1M_testing.arff"

# Model19 for large data set:
# step1: the set of file located at pangolin:
inputFileNameA = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model19/TOPK_sorted_by_queryID/probabilityDistribution_TOP10_sorted_by_queryID_testing.txt"
inputFileNameB = "N/A"
inputFileNameC = "N/A"
inputFileNameD = "N/A"

# step2: CURRENT set of comparison files at pangolin:
inputArffTestingFileNameONLYOne = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model19/TOPK_sorted_by_queryID/Training_Set_2013_07_19_sorted_by_queryID_middle_DOT1M_testing.arff"




# OLD set of comparison files
# inputArffTestingFileNameA = "/home/diaosi/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/Training_Set_2013_07_15_TOP10_for_programming_testing.arff"
# inputArffTestingFileNameB = "/home/diaosi/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/Training_Set_2013_07_15_TOP100_for_programming_testing.arff"
# inputArffTestingFileNameC = "/home/diaosi/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/Training_Set_2013_07_15_TOP1000_for_programming_testing.arff"
# inputArffTestingFileNameD = "/home/diaosi/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/Training_Set_2013_07_15_TOP10000_for_programming_testing.arff"

inputFileHandler1 = open(inputFileNameA,"r")
inputFileHandler2 = open(inputArffTestingFileNameONLYOne,"r")

################################
# init: newly added
buckets100ArrayCombineFirstANDSecondFactorProbabilityList = []

# init the variable buckets100ArrayThirdFactorCounterDict
# key: the lower bound of the probability starts from 0, 0.01, 0.02, 0.03, 0.04 ... till 0.99
# value:
buckets100ArrayThirdFactorCounterDict = {}
baseBeginningPoint = 0.0
stepGapInFloat = 0.01
keyInFloat = baseBeginningPoint
tempCounter = 0

# the meaning of this tuple pair
# first column value: how many postings are in this bucket
# second column value: how many postings are label 1 (top100)
tupleValuePair = (0,0)
for i in range(0,100):
    tempCounter += 1
    keyInString = "{0:.2f}".format(keyInFloat)
    if keyInString not in buckets100ArrayThirdFactorCounterDict:
        buckets100ArrayThirdFactorCounterDict[keyInString] = tupleValuePair
    keyInFloat += stepGapInFloat

# for debug only
# print "tempCounter:",tempCounter
# print "len(buckets100ArrayThirdFactorCounterDict):",len(buckets100ArrayThirdFactorCounterDict)

# for debug begins...
# print "buckets100ArrayThirdFactorCounterDict:",buckets100ArrayThirdFactorCounterDict
#baseValue = 0
#keyInFloat = baseValue
#for i in range(0,100):
#    keyInString = str("{0:.2f}".format(keyInFloat))
#    print keyInString,buckets100ArrayThirdFactorCounterDict[keyInString]
#    keyInFloat += 0.01 
# for debug ends.

################################

for i in range(0,26):
    inputFileHandler2.readline()

# for debug
# print len(inputFileHandler1.readlines())
# print len(inputFileHandler2.readlines())


# Let's compare the answer one by one
currentLineFromFile1 = inputFileHandler1.readline()
currentLineFromFile2 = inputFileHandler2.readline()

numOfPositiveInstances = 0
numOfNegativeInstances = 0

numOfCorrectlyClassifiedInstances = 0
numOfIncorrectlyClassifiedInstances = 0

numOfCorrectlyClassifiedPositiveInstances = 0
numOfCorrectlyClassifiedNegativeInstances = 0

numOfIncorrectlyClassifiedPositiveInstances = 0
numOfIncorrectlyClassifiedNegativeInstances = 0

while currentLineFromFile1:
    currentLineFromFile1Elements = currentLineFromFile1.strip().split(" ")
    currentLineFromFile2Elements = currentLineFromFile2.strip().split(" ")
    # used for getting the query content
    currentQIDInStringFormat = currentLineFromFile2Elements[1]
    currentRankingIndex = int(currentLineFromFile2Elements[0])
    currentTerm = currentLineFromFile2Elements[4]
    
    # select the right index value.
    # here, if index = 0, the probability will be predicted probability made into the TOP-K
    
    third_factor_probability_value = float(currentLineFromFile1Elements[0])
    first_AND_third_factor_combined_probability_value = 0.0
    first_factor_probability_value = 0.0
    if currentTerm in termsWithCorrespondingSpeciesBelongingToDict:
        first_factor_probability_value = freq1stFactorProbabilityDict[ termsWithCorrespondingSpeciesBelongingToDict[currentTerm] ] 
    else:
        first_factor_probability_value = freq1stFactorProbabilityDict[0]
    first_AND_third_factor_combined_probability_value = first_factor_probability_value * third_factor_probability_value
    
    # for debug
    '''
    print "currentTerm:",currentTerm
    print "first_factor_probability_value:",first_factor_probability_value
    print "third_factor_probability_value:",third_factor_probability_value
    print "first_AND_third_factor_combined_probability_value:",first_AND_third_factor_combined_probability_value
    print
    '''
    buckets100ArrayCombineFirstANDSecondFactorProbabilityList.append(first_AND_third_factor_combined_probability_value)
    
    thePredictedClassValueInFloat = float(currentLineFromFile1Elements[2])
    predictedClassLabelInString = ""
    if TOPKLabel == "TOP10":
        actualClassLabelInString = currentLineFromFile2Elements[-5]
    elif TOPKLabel == "TOP100":
        actualClassLabelInString = currentLineFromFile2Elements[-4]
    elif TOPKLabel == "TOP1000":
        actualClassLabelInString = currentLineFromFile2Elements[-3]
    elif TOPKLabel == "TOP10000":
        actualClassLabelInString = currentLineFromFile2Elements[-2]
    
    if thePredictedClassValueInFloat == 1.0:
        # NOT in TOP10
        predictedClassLabelInString = "False"
    elif thePredictedClassValueInFloat == 0.0:
        # in TOP10
        predictedClassLabelInString = "True"

    # print "actualClassLabelInString:",actualClassLabelInString
    # print "predictedClassLabelInString:",predictedClassLabelInString
    
    if actualClassLabelInString == predictedClassLabelInString:
        numOfCorrectlyClassifiedInstances += 1
        if actualClassLabelInString == "True":
            numOfPositiveInstances += 1
            numOfCorrectlyClassifiedPositiveInstances += 1
        elif actualClassLabelInString == "False":
            numOfNegativeInstances += 1
            numOfCorrectlyClassifiedNegativeInstances += 1
    else:
        numOfIncorrectlyClassifiedInstances += 1
        if actualClassLabelInString == "True":
            numOfPositiveInstances += 1
            numOfIncorrectlyClassifiedPositiveInstances += 1
        elif actualClassLabelInString == "False":
            numOfNegativeInstances += 1
            numOfIncorrectlyClassifiedNegativeInstances += 1        
    
    #######################################################
    # print "third_factor_probability_value:",third_factor_probability_value
    probability_value_left_ONLY_two_digit_in_float_format = float( "{0:.2f}".format(third_factor_probability_value) )
    
    # deal with the rounding problem
    if probability_value_left_ONLY_two_digit_in_float_format > third_factor_probability_value:
        current_posting_key_in_float_format = probability_value_left_ONLY_two_digit_in_float_format - 0.01
    else:
        current_posting_key_in_float_format = probability_value_left_ONLY_two_digit_in_float_format
    
    # print "current_posting_key_in_float_format:",current_posting_key_in_float_format
    current_posting_key_in_string_format = "{0:.2f}".format(current_posting_key_in_float_format)
    (currentBucketCounter,currentBucketTOPKLabelCounter) = buckets100ArrayThirdFactorCounterDict[current_posting_key_in_string_format]
    
    # increase the currentBucketCounter by one cause the predicted probablity belongs to this bucket
    currentBucketCounter += 1
    
    if actualClassLabelInString == "True":
        currentBucketTOPKLabelCounter += 1 
    else:
        # just do nothing if actualClassLabelInString != "True"
        pass
    
    newPairValueTuple = (currentBucketCounter,currentBucketTOPKLabelCounter)
    buckets100ArrayThirdFactorCounterDict[current_posting_key_in_string_format] = newPairValueTuple
    #######################################################
    currentLineFromFile1 = inputFileHandler1.readline()
    currentLineFromFile2 = inputFileHandler2.readline()
    
inputFileHandler1.close()
inputFileHandler2.close()

print "num Of Positive Instances:",numOfPositiveInstances
print "num Of Negative Instances:",numOfNegativeInstances
print "num Of Correctly Classified Instances:",numOfCorrectlyClassifiedInstances
print "num Of Incorrectly Classified Instances:",numOfIncorrectlyClassifiedInstances
print "num Of Correctly Classified Positive Instances:",numOfCorrectlyClassifiedPositiveInstances
print "num Of Correctly Classified Negative Instances:",numOfCorrectlyClassifiedNegativeInstances
print "num Of Incorrectly Classified Positive Instances:",numOfIncorrectlyClassifiedPositiveInstances
print "num Of Incorrectly Classified Negative Instances:",numOfIncorrectlyClassifiedNegativeInstances

##################################
# print "final check begins..."
baseValue = 0
keyInFloat = baseValue
totalNumOfInstancesInTheBuckets = 0
numOfPositiveInstancesInTheBuckets = 0
print "probability_lower_bound num_of_instances_predicted_in_this_bucket num_of_instances_actually_in_TOPK rate"
for i in range(0,100):
    keyInString = "{0:.2f}".format(keyInFloat)
    (value1,value2) = buckets100ArrayThirdFactorCounterDict[keyInString]
    totalNumOfInstancesInTheBuckets += value1
    numOfPositiveInstancesInTheBuckets += value2
    if value1 == 0.0:
        valueRatio = 0.0
    else:
        valueRatio = value2/value1
    print keyInString,value1,value2,valueRatio
    keyInFloat += 0.01

print "Total Number of Instances In The Buckets:",totalNumOfInstancesInTheBuckets
print "number Of Positive Instances(TOPK) In The Buckets:",numOfPositiveInstancesInTheBuckets
# print "final check ends."

buckets100ArrayCombineFirstANDSecondFactorProbabilityList.sort(cmp=None, key=None, reverse=True)
TOP1PercentLowerBoundIndex = int( len(buckets100ArrayCombineFirstANDSecondFactorProbabilityList) * 0.01 )
TOP10PercentLowerBoundIndex = int( len(buckets100ArrayCombineFirstANDSecondFactorProbabilityList) * 0.1 ) 
Top50PercentLowerBoundIndex = int( len(buckets100ArrayCombineFirstANDSecondFactorProbabilityList) * 0.5 )
print
print "len(buckets100ArrayCombineFirstANDSecondFactorProbabilityList):",len(buckets100ArrayCombineFirstANDSecondFactorProbabilityList)
print "max combine 1st AND 3rd factor probability:",buckets100ArrayCombineFirstANDSecondFactorProbabilityList[0]
print "TOP1PercentLowerBound:",buckets100ArrayCombineFirstANDSecondFactorProbabilityList[TOP1PercentLowerBoundIndex]
print "TOP10PercentLowerBound:",buckets100ArrayCombineFirstANDSecondFactorProbabilityList[TOP10PercentLowerBoundIndex]
print "TOP50PercentLowerBound:",buckets100ArrayCombineFirstANDSecondFactorProbabilityList[Top50PercentLowerBoundIndex]
print "min combine 1st AND 3rd factor probability:",buckets100ArrayCombineFirstANDSecondFactorProbabilityList[-1]
##################################
print "program ends."
