# Updated by Wei 2013/07/23 afternoon at school
# The sole purpose of this program is to generate the connected postings related buckets to show how many queries can be survived from the pruning operations 

from __future__ import division
from operator import itemgetter, attrgetter
import os
import sys
import math

def get_probablity(valueTuple):
    
    (valueOfPartialBM25ScoreComponentPart1_IDF,valueOfPartialBM25ScoreComponentPart2_TF,valueOfPartialBM25Score,valueOfCurrentPostingLengthOfTheInvertedList,f_d_t,doc_len,valueOfcurrentPostingTermFreqInQueries,valueOfCurrentPostingTermFreqInCollection,valueOfPostingRankInDoc,valueOfPostingRankInList) = valueTuple
    
    # current TOP10 prediction model weights
    intercept_weight_0                             = -5.66860601155366;
    partialBM25ScoreComponentPart1_IDF_weight_1    =  0.7859618832630518;
    partialBM25ScoreComponentPart2_TF_weight_2     =  1.0561102619894307;
    partialBM25_weight_3                           =  -0.06647681560102658;
    length_of_the_inverted_index_weight_4          =  -1.1073691594385867E-8;
    term_freq_in_doc_weight_5                      =  -9.313567074454872E-4;
    doc_words_weight_6                             =  -1.486537851743183E-5;
    term_freq_in_training_head95K_queries_weight_7 =  -3.779843056413164E-5;
    term_freq_in_collection_weight_8               =  3.1870676569686953E-12;
    posting_rank_in_doc_weight_9                   =  8.195911362040283E-6;
    posting_rank_in_list_weight_10                 =  6.528019473067528E-8;
    
    
    matrixMultiplicationScore =       0.0
    matrixMultiplicationScore +=      intercept_weight_0                                  * 1
    matrixMultiplicationScore +=      partialBM25ScoreComponentPart1_IDF_weight_1         * valueOfPartialBM25ScoreComponentPart1_IDF
    matrixMultiplicationScore +=      partialBM25ScoreComponentPart2_TF_weight_2          * valueOfPartialBM25ScoreComponentPart2_TF
    matrixMultiplicationScore +=      partialBM25_weight_3                                * valueOfPartialBM25Score
    matrixMultiplicationScore +=      length_of_the_inverted_index_weight_4               * valueOfCurrentPostingLengthOfTheInvertedList
    matrixMultiplicationScore +=      term_freq_in_doc_weight_5                           * f_d_t
    matrixMultiplicationScore +=      doc_words_weight_6                                  * doc_len
    matrixMultiplicationScore +=      term_freq_in_training_head95K_queries_weight_7      * valueOfcurrentPostingTermFreqInQueries
    matrixMultiplicationScore +=      term_freq_in_collection_weight_8                    * valueOfCurrentPostingTermFreqInCollection
    matrixMultiplicationScore +=      posting_rank_in_doc_weight_9                        * valueOfPostingRankInDoc 
    matrixMultiplicationScore +=      posting_rank_in_list_weight_10                      * valueOfPostingRankInList 
    matrixMultiplicationScore +=      0.0

    # Updated by Wei 2013/07/17 night:
    # For this formula, I need to clearly understand when it is 1 - sth, when it is just sth.
    probabilityGivenTheQueryTerms = 1 - 1/(1 + math.exp( matrixMultiplicationScore ))
    
    return probabilityGivenTheQueryTerms

print "program begins..."
##############################################
# load the p(t) into memory
# key: term t (maybe in the whole universe/or ONLY query terms)
# value: p(t)

termProbabilityDict = {}

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom100KWithTheirTrueProbablityAndOurOwnModelPredictedProbablity1D_2D_GoodTuringProbabilityAdded20130429"
inputFileHandler = open(inputFileName,"r")
# skip the headline
inputFileHandler.readline()


for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    currentTerm = lineElements[0]
    current2DProbability = float(lineElements[3])
    if currentTerm not in termProbabilityDict:
        termProbabilityDict[currentTerm] = current2DProbability

print "len(termProbabilityDict):",len(termProbabilityDict)
inputFileHandler.close()
##############################################

# key: queryIDInStringFormat
# value: NO USE
queryWhichHasAlreadyBucketDict = {}
processingLineNumberIndex = 0
##############################################
# preload the connected postings into the dict
# key: trecID_term
# value: ()
connectedPostingsDict = {}
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/Connected_Postings_Training_Set_2013_07_15_tfqAdded_tfcAdded_postingRankInDocAdd_postingRankInListAdd_percentageForPostingRankInDoc_percentageForPostingRankInList_classLabelsAdded_WITH_headline.train"
inputFileHandler = open(inputFileName,"r")

# skip the headline
inputFileHandler.readline()

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    externalTrecID = lineElements[2]
    term = lineElements[4]
    key = externalTrecID + "_" + term
    
    '''
    # the valueTuple format
    valueOfPartialBM25ScoreComponentPart1_IDF = float(currentLineFromFile2Elements[5])
    valueOfPartialBM25ScoreComponentPart2_TF = float(currentLineFromFile2Elements[6])
    valueOfPartialBM25Score = float(currentLineFromFile2Elements[7])
    valueOfCurrentPostingLengthOfTheInvertedList = float(currentLineFromFile2Elements[8])
    f_d_t = float(currentLineFromFile2Elements[9])
    doc_len = float(currentLineFromFile2Elements[10])
    valueOfcurrentPostingTermFreqInQueries = float(currentLineFromFile2Elements[13])
    valueOfCurrentPostingTermFreqInCollection = float(currentLineFromFile2Elements[14])
    valueOfPostingRankInDoc = float(currentLineFromFile2Elements[15])
    valueOfPostingRankInList = float(currentLineFromFile2Elements[16])
    '''
    
    valueTuple = (float(lineElements[5]), float(lineElements[6]), float(lineElements[7]), float(lineElements[8]), float(lineElements[9]), float(lineElements[10]), float(lineElements[13]), float(lineElements[14]), float(lineElements[15]), float(lineElements[16]))
    if key not in connectedPostingsDict:
        connectedPostingsDict[key] = valueTuple

print "len(connectedPostingsDict):",len(connectedPostingsDict)
inputFileHandler.close()
##############################################
##############################################
# Let's preload the queryDict
# key: qid in string format
# value: queryContent in string format
queryDict = {}

# option1
# Updated by Wei 2013/02/22
# No need to include the gov2 150 human judge queries, but ONLY consider the 100K efficiency task queries will be enough.
# note: (Old answers)Now, it is NOT only gov2 150 queries but also has the efficiency task queries as well
# inputQueryFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/final-150Gov2Queries.txt"

# option2
# note: Should contain all the queries in the 100K efficiency task query log
# file location at dodo:
inputQueryFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/100KQueries_head_95K"
inputQueryHandler = open(inputQueryFileName,"r")

for line in inputQueryHandler.readlines():
    elements = line.strip().split(":")
    queryID = elements[0]

    data = elements[1]
    data = data.lower()

    for i in range(0,len(data)):
        # print "data[i]:",ord(data[i])
        if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
            # Just replace them with a space.
            data = data[:i] + " " + data[i+1:]

    queryContent = data

    if queryID not in queryDict:
        queryDict[queryID] = queryContent

print "----->","len(queryDict):",len(queryDict)
print "queryDict['1']:",queryDict['1']
print "queryDict['2']:",queryDict['2']
##############################################

# The variable for TOPKLabel
# TOPKLabel can be selected from the following:
#                                  "TOP10000",     "TOP1000",      "TOP100",       "TOP10"
# corresponds with following data: inputFileNameD, inputFileNameC, inputFileNameB, inputFileNameA
TOPKLabel = "TOP10" 
print "This is the model for predicting",TOPKLabel
# the set of file located at dodo:
# inputFileNameA = "/home/diaosi/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/probabilityDistribution_TOP10.txt"
# inputFileNameB = "/home/diaosi/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/probabilityDistribution_TOP100.txt"
# inputFileNameC = "/home/diaosi/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/probabilityDistribution_TOP1000.txt"
# inputFileNameD = "/home/diaosi/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/probabilityDistribution_TOP10000.txt"

# the set of file located at pangolin:
inputFileNameA = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/probabilityDistribution_TOP10.txt"
inputFileNameB = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/probabilityDistribution_TOP100.txt"
inputFileNameC = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/probabilityDistribution_TOP1000.txt"
inputFileNameD = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/probabilityDistribution_TOP10000.txt"

# CURRENT set of comparison files at dodo:
# inputArffTestingFileNameONLYOne = "/home/diaosi/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/Training_Set_2013_07_15_tail_10000_for_testing.arff"
# CURRENT set of comparison files at pangolin:
inputArffTestingFileNameONLYOne = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/Training_Set_2013_07_15_tail_10000_for_testing.arff"

# OLD set of comparison files
# inputArffTestingFileNameA = "/home/diaosi/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/Training_Set_2013_07_15_TOP10_for_programming_testing.arff"
# inputArffTestingFileNameB = "/home/diaosi/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/Training_Set_2013_07_15_TOP100_for_programming_testing.arff"
# inputArffTestingFileNameC = "/home/diaosi/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/Training_Set_2013_07_15_TOP1000_for_programming_testing.arff"
# inputArffTestingFileNameD = "/home/diaosi/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/Training_Set_2013_07_15_TOP10000_for_programming_testing.arff"

inputFileHandler1 = open(inputFileNameA,"r")
inputFileHandler2 = open(inputArffTestingFileNameONLYOne,"r")

################################
# init the variable buckets100ArrayCounterForIndividualPostingDict
# key: the lower bound of the probability starts from 0, 0.01, 0.02, 0.03, 0.04 ... till 0.99
# value:
buckets100ArrayCounterForIndividualPostingDict = {}
baseBeginningPoint = 0.0
stepGapInFloat = 0.01
keyInFloat = baseBeginningPoint
tempCounter = 0

# the meaning of this tuple pair
# first column value: how many postings are in this bucket
# second column value: how many postings are label 1 (top100)
tupleValuePair = (0,0)
for i in range(0,100):
    tempCounter += 1
    keyInString = "{0:.2f}".format(keyInFloat)
    if keyInString not in buckets100ArrayCounterForIndividualPostingDict:
        buckets100ArrayCounterForIndividualPostingDict[keyInString] = tupleValuePair
    keyInFloat += stepGapInFloat

# for debug only
# print "tempCounter:",tempCounter
# print "len(buckets100ArrayCounterForIndividualPostingDict):",len(buckets100ArrayCounterForIndividualPostingDict)

# for debug begins...
# print "buckets100ArrayCounterForIndividualPostingDict:",buckets100ArrayCounterForIndividualPostingDict
#baseValue = 0
#keyInFloat = baseValue
#for i in range(0,100):
#    keyInString = str("{0:.2f}".format(keyInFloat))
#    print keyInString,buckets100ArrayCounterForIndividualPostingDict[keyInString]
#    keyInFloat += 0.01 
# for debug ends.

################################

################################
# init the variable buckets100ArrayCounterForQueriesDictForProbabilityGivenQueryTerms
# key: the lower bound of the probability starts from 0, 0.01, 0.02, 0.03, 0.04 ... till 0.99
# value:
buckets100ArrayCounterForQueriesDictForProbabilityGivenQueryTerms = {}
baseBeginningPoint = 0.0
stepGapInFloat = 0.01
keyInFloat = baseBeginningPoint
tempCounter = 0

# the meaning of this tuple pair
# first column value: how many postings are in this bucket
# second column value: how many postings are label 1 (top100)
for i in range(0,100):
    tempCounter += 1
    keyInString = "{0:.2f}".format(keyInFloat)
    if keyInString not in buckets100ArrayCounterForQueriesDictForProbabilityGivenQueryTerms:
        buckets100ArrayCounterForQueriesDictForProbabilityGivenQueryTerms[keyInString] = 0
    keyInFloat += stepGapInFloat
################################

################################
# init the variable buckets100ArrayCounterForQueriesDictForProbabilityNOTGivenQueryTerms
# key: the lower bound of the probability starts from 0, 0.01, 0.02, 0.03, 0.04 ... till 0.99
# value:
buckets100ArrayCounterForQueriesDictForProbabilityNOTGivenQueryTerms = {}
baseBeginningPoint = 0.0
stepGapInFloat = 0.01
keyInFloat = baseBeginningPoint
tempCounter = 0

# the meaning of this tuple pair
# first column value: how many postings are in this bucket
# second column value: how many postings are label 1 (top100)
for i in range(0,100):
    tempCounter += 1
    keyInString = "{0:.2f}".format(keyInFloat)
    if keyInString not in buckets100ArrayCounterForQueriesDictForProbabilityNOTGivenQueryTerms:
        buckets100ArrayCounterForQueriesDictForProbabilityNOTGivenQueryTerms[keyInString] = 0
    keyInFloat += stepGapInFloat
################################


for i in range(0,26):
    inputFileHandler2.readline()

# for debug
# print len(inputFileHandler1.readlines())
# print len(inputFileHandler2.readlines())


# Let's compare the answer one by one
currentLineFromFile1 = inputFileHandler1.readline()
currentLineFromFile2 = inputFileHandler2.readline()

numOfPositiveInstances = 0
numOfNegativeInstances = 0

numOfCorrectlyClassifiedInstances = 0
numOfIncorrectlyClassifiedInstances = 0

numOfCorrectlyClassifiedPositiveInstances = 0
numOfCorrectlyClassifiedNegativeInstances = 0

numOfIncorrectlyClassifiedPositiveInstances = 0
numOfIncorrectlyClassifiedNegativeInstances = 0

while currentLineFromFile1:
    # print "processingLineNumberIndex:",processingLineNumberIndex
    currentLineFromFile1Elements = currentLineFromFile1.strip().split(" ")
    currentLineFromFile2Elements = currentLineFromFile2.strip().split(" ")
    # used for getting the query content
    currentRankingIndex = int(currentLineFromFile2Elements[0])
    currentQIDInStringFormat = currentLineFromFile2Elements[1]
    currentTrecIDInStringFormat = currentLineFromFile2Elements[2]
    currentTerm = currentLineFromFile2Elements[4]
    # select the right index value.
    # here, if index = 0, the probability will be predicted probability made into the TOP-K
    probability_value = float(currentLineFromFile1Elements[0])
    thePredictedClassValueInFloat = float(currentLineFromFile1Elements[2])
    predictedClassLabelInString = ""
    if TOPKLabel == "TOP10":
        actualClassLabelInString = currentLineFromFile2Elements[-5]
    elif TOPKLabel == "TOP100":
        actualClassLabelInString = currentLineFromFile2Elements[-4]
    elif TOPKLabel == "TOP1000":
        actualClassLabelInString = currentLineFromFile2Elements[-3]
    elif TOPKLabel == "TOP10000":
        actualClassLabelInString = currentLineFromFile2Elements[-2]
    
    if thePredictedClassValueInFloat == 1.0:
        # NOT in TOP10
        predictedClassLabelInString = "False"
    elif thePredictedClassValueInFloat == 0.0:
        # in TOP10
        predictedClassLabelInString = "True"

    # print "actualClassLabelInString:",actualClassLabelInString
    # print "predictedClassLabelInString:",predictedClassLabelInString
    
    if actualClassLabelInString == predictedClassLabelInString:
        numOfCorrectlyClassifiedInstances += 1
        if actualClassLabelInString == "True":
            numOfPositiveInstances += 1
            numOfCorrectlyClassifiedPositiveInstances += 1
        elif actualClassLabelInString == "False":
            numOfNegativeInstances += 1
            numOfCorrectlyClassifiedNegativeInstances += 1
    else:
        numOfIncorrectlyClassifiedInstances += 1
        if actualClassLabelInString == "True":
            numOfPositiveInstances += 1
            numOfIncorrectlyClassifiedPositiveInstances += 1
        elif actualClassLabelInString == "False":
            numOfNegativeInstances += 1
            numOfIncorrectlyClassifiedNegativeInstances += 1        
    
    #######################################################
    # print "probability_value:",probability_value
    probability_value_left_ONLY_two_digit_in_float_format = float( "{0:.2f}".format(probability_value) )
    
    # deal with the rounding problem
    if probability_value_left_ONLY_two_digit_in_float_format > probability_value:
        current_posting_key_in_float_format = probability_value_left_ONLY_two_digit_in_float_format - 0.01
    else:
        current_posting_key_in_float_format = probability_value_left_ONLY_two_digit_in_float_format
    
    # print "current_posting_key_in_float_format:",current_posting_key_in_float_format
    current_posting_key_in_string_format = "{0:.2f}".format(current_posting_key_in_float_format)
    (currentBucketCounter,currentBucketTOPKLabelCounter) = buckets100ArrayCounterForIndividualPostingDict[current_posting_key_in_string_format]
    
    # for debug ONLY
    # print "currentLineFromFile1:",currentLineFromFile1.strip()
    # print "currentLineFromFile2:",currentLineFromFile2.strip()
    # print "probability_value from file(offline):",probability_value
    
    probabilityGivenTheQueryTerms = 0.0
    
    valueOfPartialBM25ScoreComponentPart1_IDF = float(currentLineFromFile2Elements[5])
    valueOfPartialBM25ScoreComponentPart2_TF = float(currentLineFromFile2Elements[6])
    valueOfPartialBM25Score = float(currentLineFromFile2Elements[7])
    valueOfCurrentPostingLengthOfTheInvertedList = float(currentLineFromFile2Elements[8])
    f_d_t = float(currentLineFromFile2Elements[9])
    doc_len = float(currentLineFromFile2Elements[10])
    valueOfcurrentPostingTermFreqInQueries = float(currentLineFromFile2Elements[13])
    valueOfCurrentPostingTermFreqInCollection = float(currentLineFromFile2Elements[14])
    valueOfPostingRankInDoc = float(currentLineFromFile2Elements[15])
    valueOfPostingRankInList = float(currentLineFromFile2Elements[16])
    
    valueTuple = (valueOfPartialBM25ScoreComponentPart1_IDF,valueOfPartialBM25ScoreComponentPart2_TF,valueOfPartialBM25Score,valueOfCurrentPostingLengthOfTheInvertedList,f_d_t,doc_len,valueOfcurrentPostingTermFreqInQueries,valueOfCurrentPostingTermFreqInCollection,valueOfPostingRankInDoc,valueOfPostingRankInList)
    probabilityGivenTheQueryTerms = get_probablity(valueTuple)
    
    # for debug ONLY
    # print "probabilityGivenTheQueryTerms(online):",probabilityGivenTheQueryTerms
    # print "queryDict[currentQIDInStringFormat]:",queryDict[currentQIDInStringFormat]
    
    # increase the currentBucketCounter by one cause the predicted probablity belongs to this bucket
    currentBucketCounter += 1
    
    if actualClassLabelInString == "True":
        currentBucketTOPKLabelCounter += 1 
    else:
        # just do nothing if actualClassLabelInString != "True"
        pass
    
    newPairValueTuple = (currentBucketCounter,currentBucketTOPKLabelCounter)
    buckets100ArrayCounterForIndividualPostingDict[current_posting_key_in_string_format] = newPairValueTuple
    
    if currentQIDInStringFormat not in queryWhichHasAlreadyBucketDict and actualClassLabelInString == "True":
        # do the bucket operation for the current query and update the data structure 
        queryWhichHasAlreadyBucketDict[currentQIDInStringFormat] = 1
        queryTerms = queryDict[currentQIDInStringFormat].strip().split(" ")
        # for debug
        print "positive label term:",currentTerm
        print "queryContent:",queryDict[currentQIDInStringFormat]
        min_probability_value = 1.0
        for term in queryTerms:
            if term.strip() != "":
                lookUpKey = currentTrecIDInStringFormat + "_" + term
                if lookUpKey not in connectedPostingsDict:
                    print "lookUpKey:",lookUpKey
                    exit(1)
                else:
                    probabilityGivenTheQueryTerms = get_probablity(connectedPostingsDict[lookUpKey])
                    print lookUpKey,probabilityGivenTheQueryTerms
                    if min_probability_value >= probabilityGivenTheQueryTerms:
                        min_probability_value = probabilityGivenTheQueryTerms
            else:
                pass
        print "min_probability_value:",min_probability_value
        print
        min_probability_value_left_ONLY_two_digit_in_float_format = float( "{0:.2f}".format(min_probability_value) )
    
        # deal with the rounding problem
        if min_probability_value_left_ONLY_two_digit_in_float_format > min_probability_value:
            current_posting_key_in_float_format = min_probability_value_left_ONLY_two_digit_in_float_format - 0.01
        else:
            current_posting_key_in_float_format = min_probability_value_left_ONLY_two_digit_in_float_format
        
        # print "current_posting_key_in_float_format:",current_posting_key_in_float_format
        current_posting_key_in_string_format = "{0:.2f}".format(current_posting_key_in_float_format)
        
        # update this variable
        # print "min_probability_value_left_ONLY_two_digit_in_float_format:",min_probability_value_left_ONLY_two_digit_in_float_format
        # print "min_probability_value:",min_probability_value
        # print "current_posting_key_in_float_format:",current_posting_key_in_float_format
        # print "current_posting_key_in_string_format_for_queries_dict:",current_posting_key_in_string_format
        # print
        buckets100ArrayCounterForQueriesDictForProbabilityGivenQueryTerms[current_posting_key_in_string_format] += 1
        

    #######################################################
    currentLineFromFile1 = inputFileHandler1.readline()
    currentLineFromFile2 = inputFileHandler2.readline()
           
    processingLineNumberIndex += 1
    

inputFileHandler1.close()
inputFileHandler2.close()

print "num Of Positive Instances:",numOfPositiveInstances
print "num Of Negative Instances:",numOfNegativeInstances
print "num Of Correctly Classified Instances:",numOfCorrectlyClassifiedInstances
print "num Of Incorrectly Classified Instances:",numOfIncorrectlyClassifiedInstances
print "num Of Correctly Classified Positive Instances:",numOfCorrectlyClassifiedPositiveInstances
print "num Of Correctly Classified Negative Instances:",numOfCorrectlyClassifiedNegativeInstances
print "num Of Incorrectly Classified Positive Instances:",numOfIncorrectlyClassifiedPositiveInstances
print "num Of Incorrectly Classified Negative Instances:",numOfIncorrectlyClassifiedNegativeInstances

##################################
# print "final check begins..."
baseValue = 0
keyInFloat = baseValue
totalNumOfInstancesInTheBuckets = 0
numOfPositiveInstancesInTheBuckets = 0
print "probability_lower_bound num_of_instances_predicted_in_this_bucket num_of_instances_actually_in_TOPK rate"
for i in range(0,100):
    keyInString = "{0:.2f}".format(keyInFloat)
    (value1,value2) = buckets100ArrayCounterForIndividualPostingDict[keyInString]
    totalNumOfInstancesInTheBuckets += value1
    numOfPositiveInstancesInTheBuckets += value2
    if value1 == 0.0:
        valueRatio = 0.0
    else:
        valueRatio = value2/value1
    print keyInString,value1,value2,valueRatio
    keyInFloat += 0.01

print "Total Number of Instances In The Buckets:",totalNumOfInstancesInTheBuckets
print "number Of Positive Instances(TOPK) In The Buckets:",numOfPositiveInstancesInTheBuckets
print
# print "final check ends."
##################################

##################################
# print "final check begins..."
baseValue = 0
keyInFloat = baseValue
totalNumOfQueriesInTheBuckets = 0
print "probability_lower_bound total_num_of_queries_in_the_specific_bucket"
for i in range(0,100):
    keyInString = "{0:.2f}".format(keyInFloat)
    value1 = buckets100ArrayCounterForQueriesDictForProbabilityGivenQueryTerms[keyInString]
    totalNumOfQueriesInTheBuckets += value1
    print keyInString,value1
    keyInFloat += 0.01
print "Total Number of Queries In The Buckets:",totalNumOfQueriesInTheBuckets
print
# print "final check ends."
##################################

##################################
# print "final check begins..."
baseValue = 0
keyInFloat = baseValue
totalNumOfQueriesInTheBuckets = 0
print "probability_lower_bound total_num_of_queries_in_the_specific_bucket"
for i in range(0,100):
    keyInString = "{0:.2f}".format(keyInFloat)
    value1 = buckets100ArrayCounterForQueriesDictForProbabilityNOTGivenQueryTerms[keyInString]
    totalNumOfQueriesInTheBuckets += value1
    print keyInString,value1
    keyInFloat += 0.01
print "Total Number of Queries In The Buckets:",totalNumOfQueriesInTheBuckets
print
# print "final check ends."
##################################

# at the end, I can finally close this original raw result file
print "program ends."
