# Updated by Wei 2013/08/01 night at school

# Updated by Wei 2013/07/27 night at school
# Note: (1) ONLY deal with the 3rd probability factor, not deal with the 1st and 2ed probability factor

# Updated by Wei 2013/07/25 afternoon at school
# The sole purpose of this program is to generate the connected postings related buckets to show how many queries can be survived from the pruning operations 

from __future__ import division
from operator import itemgetter, attrgetter
import os
import sys
import math

def get_probablity_given_query_terms(valueTuple):
    
    (valueOfPartialBM25ScoreComponentPart1_IDF,valueOfPartialBM25ScoreComponentPart2_TF,valueOfPartialBM25Score,valueOfCurrentPostingLengthOfTheInvertedList,f_d_t,doc_len,valueOfcurrentPostingTermFreqInQueries,valueOfCurrentPostingTermFreqInCollection,valueOfPostingRankInDoc,valueOfPostingRankInList) = valueTuple
    
    '''
    # current TOP10 prediction model weights for the small training dataset (correspond to the set of weights for the training set 2013/07/15)
    intercept_weight_0                             = -5.66860601155366;
    partialBM25ScoreComponentPart1_IDF_weight_1    =  0.7859618832630518;
    partialBM25ScoreComponentPart2_TF_weight_2     =  1.0561102619894307;
    partialBM25_weight_3                           =  -0.06647681560102658;
    length_of_the_inverted_index_weight_4          =  -1.1073691594385867E-8;
    term_freq_in_doc_weight_5                      =  -9.313567074454872E-4;
    doc_words_weight_6                             =  -1.486537851743183E-5;
    term_freq_in_training_head95K_queries_weight_7 =  -3.779843056413164E-5;
    term_freq_in_collection_weight_8               =  3.1870676569686953E-12;
    posting_rank_in_doc_weight_9                   =  8.195911362040283E-6;
    posting_rank_in_list_weight_10                 =  6.528019473067528E-8;
    '''

    '''
    # Model17
    # current TOP10 prediction model weights for the larger training dataset (correspond to the set of weights for the training set 2013/07/19)
    intercept_weight_0                             = -5.249082886875503;
    partialBM25ScoreComponentPart1_IDF_weight_1    =  0.6387728940771575;
    partialBM25ScoreComponentPart2_TF_weight_2     =  0.8428609713388355;
    partialBM25_weight_3                           =  0.017612566641205217;
    length_of_the_inverted_index_weight_4          =  -1.4312699125905865E-8;
    term_freq_in_doc_weight_5                      =  -5.583739856196939E-4;
    doc_words_weight_6                             =  -1.65525511371603E-5;
    term_freq_in_training_head95K_queries_weight_7 =  -1.2427967983967744E-5;
    term_freq_in_collection_weight_8               =  1.756117667726548E-10;
    posting_rank_in_doc_weight_9                   =  -1.1872327916929713E-6;
    posting_rank_in_list_weight_10                 =  5.3612849755764095E-8;
    '''
    
    '''
    # Model18
    # current TOP10 prediction model weights for the larger training dataset (correspond to the set of weights for the training set 2013/07/19)
    intercept_weight_0                             = -4.013040562192128;
    partialBM25ScoreComponentPart1_IDF_weight_1    =  0.0;
    partialBM25ScoreComponentPart2_TF_weight_2     =  0.0;
    partialBM25_weight_3                           =  0.4150058509887912;
    length_of_the_inverted_index_weight_4          =  0.0;
    term_freq_in_doc_weight_5                      =  0.0;
    doc_words_weight_6                             =  0.0;
    term_freq_in_training_head95K_queries_weight_7 =  0.0;
    term_freq_in_collection_weight_8               =  0.0;
    posting_rank_in_doc_weight_9                   =  0.0;
    posting_rank_in_list_weight_10                 =  0.0;
    '''

    # Model19
    # current TOP10 prediction model weights for the larger training dataset (correspond to the set of weights for the training set 2013/07/19)
    intercept_weight_0                             =  -5.729427121406548;
    partialBM25ScoreComponentPart1_IDF_weight_1    =  0.7311304467682848;
    partialBM25ScoreComponentPart2_TF_weight_2     =  0.9032571917744138;
    partialBM25_weight_3                           =  0.0;
    length_of_the_inverted_index_weight_4          =  0.0;
    term_freq_in_doc_weight_5                      =  0.0;
    doc_words_weight_6                             =  0.0;
    term_freq_in_training_head95K_queries_weight_7 =  0.0;
    term_freq_in_collection_weight_8               =  0.0;
    posting_rank_in_doc_weight_9                   =  0.0;
    posting_rank_in_list_weight_10                 =  0.0;
          
    
    matrixMultiplicationScore =       0.0
    matrixMultiplicationScore +=      intercept_weight_0                                  * 1
    matrixMultiplicationScore +=      partialBM25ScoreComponentPart1_IDF_weight_1         * valueOfPartialBM25ScoreComponentPart1_IDF
    matrixMultiplicationScore +=      partialBM25ScoreComponentPart2_TF_weight_2          * valueOfPartialBM25ScoreComponentPart2_TF
    matrixMultiplicationScore +=      partialBM25_weight_3                                * valueOfPartialBM25Score
    matrixMultiplicationScore +=      length_of_the_inverted_index_weight_4               * valueOfCurrentPostingLengthOfTheInvertedList
    matrixMultiplicationScore +=      term_freq_in_doc_weight_5                           * f_d_t
    matrixMultiplicationScore +=      doc_words_weight_6                                  * doc_len
    matrixMultiplicationScore +=      term_freq_in_training_head95K_queries_weight_7      * valueOfcurrentPostingTermFreqInQueries
    matrixMultiplicationScore +=      term_freq_in_collection_weight_8                    * valueOfCurrentPostingTermFreqInCollection
    matrixMultiplicationScore +=      posting_rank_in_doc_weight_9                        * valueOfPostingRankInDoc 
    matrixMultiplicationScore +=      posting_rank_in_list_weight_10                      * valueOfPostingRankInList 
    matrixMultiplicationScore +=      0.0

    # Updated by Wei 2013/07/17 night:
    # For this formula, I need to clearly understand when it is 1 - sth, when it is just sth.
    third_factor_probability_value = 1 - 1/(1 + math.exp( matrixMultiplicationScore ))
    
    return third_factor_probability_value

print "program begins..."
print "Updated on 2013/08/01 night by Wei at school."
print "The sole purpose of this program is to generate the RED curve with the combination of 3 factors"
print "Definition of RED curve:NOT yet filled"
print "The probability bucketing here is the 3rd factor in our formula"

##############################################
# correspond to a bigger training data set(Updated on 2013/07/27 by Wei)
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model19/TrainingSetForTOPKMissingResults20130719_tfqAdded_tfcAdded_postingRankInDocAdd_postingRankInListAdd_percentageForPostingRankInDoc_percentageForPostingRankInList_classLabelsAdded_WITH_headline.train"
# correspond to a small training data set
# inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/TrainingSetForFixingTheMissingTOPKResults20130715_tfqAdded_tfcAdded_postingRankInDocAdd_postingRankInListAdd_percentageForPostingRankInDoc_percentageForPostingRankInList_classLabelsAdded_WITH_headline.train"
inputFileHandler = open(inputFileName,"r")

# skip the headline
inputFileHandler.readline()

'''
# the valueTuple format
valueOfPartialBM25ScoreComponentPart1_IDF = float(currentLineFromFile2Elements[5])
valueOfPartialBM25ScoreComponentPart2_TF = float(currentLineFromFile2Elements[6])
valueOfPartialBM25Score = float(currentLineFromFile2Elements[7])
valueOfCurrentPostingLengthOfTheInvertedList = float(currentLineFromFile2Elements[8])
f_d_t = float(currentLineFromFile2Elements[9])
doc_len = float(currentLineFromFile2Elements[10])
valueOfcurrentPostingTermFreqInQueries = float(currentLineFromFile2Elements[13])
valueOfCurrentPostingTermFreqInCollection = float(currentLineFromFile2Elements[14])
valueOfPostingRankInDoc = float(currentLineFromFile2Elements[15])
valueOfPostingRankInList = float(currentLineFromFile2Elements[16])
'''
# key: as the same key as the TOPKMissingResultsForTestingQueriesDict does
# value: 1 represent already used. 0 means no use
TOPKMissingResultsForTestingQueriesUsingRecordDict = {}
TOPKMissingResultsForTestingQueriesDict = {}

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    # The key format are as following: selectedRankIndex_term_externalTrecID
    selectedRankIndex = lineElements[0]
    term = lineElements[4]
    queryID = lineElements[1]
    key = selectedRankIndex + "_" + term + "_" + queryID
    
    valueOfPartialBM25ScoreComponentPart1_IDF = float(lineElements[5])
    valueOfPartialBM25ScoreComponentPart2_TF = float(lineElements[6])
    valueOfPartialBM25Score = float(lineElements[7])
    valueOfCurrentPostingLengthOfTheInvertedList = float(lineElements[8])
    f_d_t = float(lineElements[9])
    doc_len = float(lineElements[10])
    valueOfcurrentPostingTermFreqInQueries = float(lineElements[13])
    valueOfCurrentPostingTermFreqInCollection = float(lineElements[14])
    valueOfPostingRankInDoc = float(lineElements[15])
    valueOfPostingRankInList = float(lineElements[16])    
    
    valueTuple = (valueOfPartialBM25ScoreComponentPart1_IDF,valueOfPartialBM25ScoreComponentPart2_TF,valueOfPartialBM25Score,valueOfCurrentPostingLengthOfTheInvertedList,f_d_t,doc_len,valueOfcurrentPostingTermFreqInQueries,valueOfCurrentPostingTermFreqInCollection,valueOfPostingRankInDoc,valueOfPostingRankInList)
    if key not in TOPKMissingResultsForTestingQueriesDict:
        TOPKMissingResultsForTestingQueriesDict[key] = valueTuple

print "len(TOPKMissingResultsForTestingQueriesDict):",len(TOPKMissingResultsForTestingQueriesDict)   
inputFileHandler.close()
##############################################



# key: queryIDInStringFormat
# value: NO USE
queryWhichHasAlreadyBucketDict = {}
processingLineNumberIndex = 0

##############################################
# Let's preload the queryDict
# key: qid in string format
# value: queryContent in string format
queryDict = {}

# option1
# Updated by Wei 2013/02/22
# No need to include the gov2 150 human judge queries, but ONLY consider the 100K efficiency task queries will be enough.
# note: (Old answers)Now, it is NOT only gov2 150 queries but also has the efficiency task queries as well
# inputQueryFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/final-150Gov2Queries.txt"

# option2
# note: Should contain all the queries in the 100K efficiency task query log
# file location at dodo:
inputQueryFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/100KQueries_head_95K"
inputQueryHandler = open(inputQueryFileName,"r")

for line in inputQueryHandler.readlines():
    elements = line.strip().split(":")
    queryID = elements[0]

    data = elements[1]
    data = data.lower()

    for i in range(0,len(data)):
        # print "data[i]:",ord(data[i])
        if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
            # Just replace them with a space.
            data = data[:i] + " " + data[i+1:]

    queryContent = data

    if queryID not in queryDict:
        queryDict[queryID] = queryContent

print "len(queryDict):",len(queryDict)
# print "queryDict['1']:",queryDict['1']
# print "queryDict['2']:",queryDict['2']
##############################################

# The variable for TOPKLabel
# TOPKLabel can be selected from the following:
#                                  "TOP10000",     "TOP1000",      "TOP100",       "TOP10"
# corresponds with following data: inputFileNameD, inputFileNameC, inputFileNameB, inputFileNameA
TOPKLabel = "TOP10" 
print "This is the model for predicting",TOPKLabel

#########################################################################################Adding component begins...since 2013/08/01
# key: queryLength
# value: probabilityOfKTermQuery
queryLengthProbabilityDict = {}
inputAuxFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/effectToMake1StFactor/queryLengthDistributionForHead95KQueries"
inputAuxFileHanlder = open(inputAuxFileName,"r")
# skip the headline
inputAuxFileHanlder.readline()

for line in inputAuxFileHanlder.readlines():
    lineElements = line.strip().split(" ")
    queryLength = int(lineElements[0])
    probabilityOfKTermQuery = float(lineElements[1])
    if queryLength not in queryLengthProbabilityDict:
        queryLengthProbabilityDict[queryLength] = probabilityOfKTermQuery

print "queryLengthProbabilityDict[1]:",queryLengthProbabilityDict[1]
print "len(queryLengthProbabilityDict):",len(queryLengthProbabilityDict)


# key: trecID
# value: tuple with Xdoc values
trecIDWithXdocValues = {}
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model19/TOPK_sorted_by_queryID/set_of_documents_with_their_Xdoc_values"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    trecID = lineElements[0]
    valuesTuple = (float(lineElements[1]),float(lineElements[2]),float(lineElements[3]),float(lineElements[4]))
    if trecID not in trecIDWithXdocValues:
        trecIDWithXdocValues[trecID] = valuesTuple
print "trecIDWithXdocValues['GX000-00-14963368']:",trecIDWithXdocValues['GX000-00-14963368']
inputFileHandler.close()




# key: # of times this object appears
# value: the probability that this term will occur in the next query
freq1stFactorProbabilityDict = {}

# key: the terms which have been seen in the training queries
# value: which freq it belongs to
termsWithCorrespondingSpeciesBelongingToDict = {}

inputAuxFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/effectToMake1StFactor/good_turing_estimation_output_for_terms_0_1_95K_95%_probabilityInQueryAdded_20130731"
inputAuxFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/effectToMake1StFactor/freqOfFreqInQueries_0_1_95K_95%_with_query_terms_appended_sortedByFreqR"
inputAuxFileHanlder1 = open(inputAuxFileName1,"r")
inputAuxFileHanlder2 = open(inputAuxFileName2,"r")

# skip 4 not related lines
inputAuxFileHanlder1.readline()
inputAuxFileHanlder1.readline()
inputAuxFileHanlder1.readline()
inputAuxFileHanlder1.readline()

for line in inputAuxFileHanlder1.readlines():
    lineElements = line.strip().split(" ")
    currentFreq = int(lineElements[0])
    currentProbability = float(lineElements[5])
    if currentProbability != 0:
        if currentFreq not in freq1stFactorProbabilityDict:
            freq1stFactorProbabilityDict[currentFreq] = currentProbability
        else:
            print "system error"
            exit(1)
'''
# for debug
print "len(freq1stFactorProbabilityDict):",len(freq1stFactorProbabilityDict)
print "freq1stFactorProbabilityDict[1]:",freq1stFactorProbabilityDict[1]
'''

numOfFreq = 0
for line in inputAuxFileHanlder2.readlines():
    lineElements = line.strip().split(" ")
    freq = int( lineElements[0] )
    numOfTerms = int( lineElements[1] )
    if numOfTerms != 0:
        numOfFreq += 1
        if numOfTerms == len(lineElements[2:]):
            for term in lineElements[2:]:
                if term not in termsWithCorrespondingSpeciesBelongingToDict:
                    termsWithCorrespondingSpeciesBelongingToDict[term] = freq
        else:
            print "critical error, mark1"
    else:
        # just do NOT need to be processed
        pass


# for debug       
# print "termsWithCorrespondingSpeciesBelongingToDict['of']:",termsWithCorrespondingSpeciesBelongingToDict['of']
# print "numOfFreq:",numOfFreq
print "len(freq1stFactorProbabilityDict):",len(freq1stFactorProbabilityDict)
print "len(termsWithCorrespondingSpeciesBelongingToDict):",len(termsWithCorrespondingSpeciesBelongingToDict)
print "termsWithCorrespondingSpeciesBelongingToDict['soalr']:",termsWithCorrespondingSpeciesBelongingToDict["soalr"]

inputAuxFileHanlder1.close()
inputAuxFileHanlder2.close()
#########################################################################################Adding component ends.since 2013/08/01








# ------>The following are for the small training data set
# step1: the set of file located at pangolin:
# inputFileNameA = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/probabilityDistribution_TOP10.txt"
# inputFileNameB = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/probabilityDistribution_TOP100.txt"
# inputFileNameC = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/probabilityDistribution_TOP1000.txt"
# inputFileNameD = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/probabilityDistribution_TOP10000.txt"

# step2: CURRENT set of comparison files at pangolin:
# inputArffTestingFileNameONLYOne = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/Training_Set_2013_07_15_tail_10000_for_testing.arff"

# ------>The following are for the larger training data set
# model17:
# step1: the set of file located at pangolin:
# inputFileNameA = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model17/TOPK_sorted_by_queryID/probabilityDistribution_TOP10_sorted_by_queryID_testing.txt"
# inputFileNameB = "N/A"
# inputFileNameC = "N/A"
# inputFileNameD = "N/A"

# step2: CURRENT set of comparison files at pangolin:
# inputArffTestingFileNameONLYOne = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model17/TOPK_sorted_by_queryID/Training_Set_2013_07_19_sorted_by_queryID_middle_DOT1M_testing.arff"

# model18:
# step1: the set of file located at pangolin:
# inputFileNameA = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model18/TOPK_sorted_by_queryID/probabilityDistribution_TOP10_sorted_by_queryID_testing.txt"
# inputFileNameB = "N/A"
# inputFileNameC = "N/A"
# inputFileNameD = "N/A"

# step2: CURRENT set of comparison files at pangolin:
# inputArffTestingFileNameONLYOne = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model18/TOPK_sorted_by_queryID/Training_Set_2013_07_19_sorted_by_queryID_middle_DOT1M_testing.arff"

# model19:
# step1: the set of file located at pangolin:
inputFileNameA = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model19/TOPK_sorted_by_queryID/probabilityDistribution_TOP10_sorted_by_queryID_testing.txt"
inputFileNameB = "N/A"
inputFileNameC = "N/A"
inputFileNameD = "N/A"

# step2: CURRENT set of comparison files at pangolin:
inputArffTestingFileNameONLYOne = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model19/TOPK_sorted_by_queryID/Training_Set_2013_07_19_sorted_by_queryID_middle_DOT1M_testing.arff"


inputFileHandler1 = open(inputFileNameA,"r")
inputFileHandler2 = open(inputArffTestingFileNameONLYOne,"r")

################################
# init the variable buckets100ArrayThirdFactorCounterDict
# key: the lower bound of the probability starts from 0, 0.01, 0.02, 0.03, 0.04 ... till 0.99
# value:
buckets100ArrayThirdFactorCounterDict = {}
baseBeginningPoint = 0.0
stepGapInFloat = 0.01
keyInFloat = baseBeginningPoint
tempCounter = 0

# the meaning of this tuple pair
# first column value: how many postings are in this bucket
# second column value: how many postings are label 1 (top100)
for i in range(0,100):
    tempCounter += 1
    keyInString = "{0:.2f}".format(keyInFloat)
    if keyInString not in buckets100ArrayThirdFactorCounterDict:
        buckets100ArrayThirdFactorCounterDict[keyInString] = 0
    keyInFloat += stepGapInFloat
################################
buckets100ArrayCombineFirstANDSecondFactorProbabilityList = []
buckets100ArrayCombineThreeFactorsProbabilityList = []


for i in range(0,26):
    inputFileHandler2.readline()

# for debug
# print len(inputFileHandler1.readlines())
# print len(inputFileHandler2.readlines())


# Let's compare the answer one by one
currentLineFromFile1 = inputFileHandler1.readline()
currentLineFromFile2 = inputFileHandler2.readline()

numOfPositiveInstances = 0
numOfNegativeInstances = 0

numOfCorrectlyClassifiedInstances = 0
numOfIncorrectlyClassifiedInstances = 0

numOfCorrectlyClassifiedPositiveInstances = 0
numOfCorrectlyClassifiedNegativeInstances = 0

numOfIncorrectlyClassifiedPositiveInstances = 0
numOfIncorrectlyClassifiedNegativeInstances = 0

while currentLineFromFile1:
    # print "processingLineNumberIndex:",processingLineNumberIndex
    currentLineFromFile1Elements = currentLineFromFile1.strip().split(" ")
    currentLineFromFile2Elements = currentLineFromFile2.strip().split(" ")
    # used for getting the query content
    currentRankingIndex = int(currentLineFromFile2Elements[0])
    currentQIDInStringFormat = currentLineFromFile2Elements[1]
    currentTrecIDInStringFormat = currentLineFromFile2Elements[2]
    currentDocIDInStringFormat = currentLineFromFile2Elements[3]
    currentTerm = currentLineFromFile2Elements[4]
    # select the right index value.
    # here, if index = 0, the probability will be predicted probability made into the TOP-K
    probability_value = float(currentLineFromFile1Elements[0])
    thePredictedClassValueInFloat = float(currentLineFromFile1Elements[2])
    predictedClassLabelInString = ""
    if TOPKLabel == "TOP10":
        actualClassLabelInString = currentLineFromFile2Elements[-5]
    elif TOPKLabel == "TOP100":
        actualClassLabelInString = currentLineFromFile2Elements[-4]
    elif TOPKLabel == "TOP1000":
        actualClassLabelInString = currentLineFromFile2Elements[-3]
    elif TOPKLabel == "TOP10000":
        actualClassLabelInString = currentLineFromFile2Elements[-2]
    
    if thePredictedClassValueInFloat == 1.0:
        # NOT in TOP10
        predictedClassLabelInString = "False"
    elif thePredictedClassValueInFloat == 0.0:
        # in TOP10
        predictedClassLabelInString = "True"

    # print "actualClassLabelInString:",actualClassLabelInString
    # print "predictedClassLabelInString:",predictedClassLabelInString
    
    if actualClassLabelInString == predictedClassLabelInString:
        numOfCorrectlyClassifiedInstances += 1
        if actualClassLabelInString == "True":
            numOfPositiveInstances += 1
            numOfCorrectlyClassifiedPositiveInstances += 1
        elif actualClassLabelInString == "False":
            numOfNegativeInstances += 1
            numOfCorrectlyClassifiedNegativeInstances += 1
    else:
        numOfIncorrectlyClassifiedInstances += 1
        if actualClassLabelInString == "True":
            numOfPositiveInstances += 1
            numOfIncorrectlyClassifiedPositiveInstances += 1
        elif actualClassLabelInString == "False":
            numOfNegativeInstances += 1
            numOfIncorrectlyClassifiedNegativeInstances += 1        
    
    #######################################################
    third_factor_probability_value = 0.0
    
    valueOfPartialBM25ScoreComponentPart1_IDF = float(currentLineFromFile2Elements[5])
    valueOfPartialBM25ScoreComponentPart2_TF = float(currentLineFromFile2Elements[6])
    valueOfPartialBM25Score = float(currentLineFromFile2Elements[7])
    valueOfCurrentPostingLengthOfTheInvertedList = float(currentLineFromFile2Elements[8])
    f_d_t = float(currentLineFromFile2Elements[9])
    doc_len = float(currentLineFromFile2Elements[10])
    valueOfcurrentPostingTermFreqInQueries = float(currentLineFromFile2Elements[13])
    valueOfCurrentPostingTermFreqInCollection = float(currentLineFromFile2Elements[14])
    valueOfPostingRankInDoc = float(currentLineFromFile2Elements[15])
    valueOfPostingRankInList = float(currentLineFromFile2Elements[16])
    
    valueTuple = (valueOfPartialBM25ScoreComponentPart1_IDF,valueOfPartialBM25ScoreComponentPart2_TF,valueOfPartialBM25Score,valueOfCurrentPostingLengthOfTheInvertedList,f_d_t,doc_len,valueOfcurrentPostingTermFreqInQueries,valueOfCurrentPostingTermFreqInCollection,valueOfPostingRankInDoc,valueOfPostingRankInList)
    third_factor_probability_value = get_probablity_given_query_terms(valueTuple)
    
    # for debug ONLY
    # print "currentTerm:",currentTerm
    # print "probability_value from file(offline):",probability_value
    # print "third_factor_probability_value(online):",third_factor_probability_value
    # print
    
    if currentQIDInStringFormat not in queryWhichHasAlreadyBucketDict and actualClassLabelInString == "True": 
        queryWhichHasAlreadyBucketDict[currentQIDInStringFormat] = 1
        queryTerms = queryDict[currentQIDInStringFormat].strip().split(" ")
        distinctQueryTermsDict = {}
        for term in queryTerms:
            if term.strip() != "":
                if term.strip() not in distinctQueryTermsDict:
                    distinctQueryTermsDict[ term.strip() ] = 1
                else:
                    pass
        # for debug
        '''
        print "*******************************************************"
        print "currentRankingIndexFromTestingFile:",currentRankingIndex
        print "currentQIDInStringFormatFromTestingFile:",currentQIDInStringFormat
        print "currentDocIDInStringFormatFromTestingFile:",currentDocIDInStringFormat
        print "positive class label term:",currentTerm
        print "queryContent:",queryDict[currentQIDInStringFormat]
        print "************************"
        '''
        
        # add the other missing results as well for this query
        # accessing the more OK data structure called: TOPKMissingResultsForTestingQueriesDict
        loopCounter = 0
        if TOPKLabel == "TOP10":
            loopCounter = 10
        elif TOPKLabel == "TOP100":
            loopCounter = 100
        elif TOPKLabel == "TOP1000":
            loopCounter = 1000
        elif TOPKLabel == "TOP10000":
            loopCounter = 10000
        
        for i in range(0,loopCounter):
            # for debug
            # print "processingRankingIndex:",i
            min_third_factor_probability_value = 1.0
            min_first_AND_third_factor_combined_probability_value = 1.0
            min_three_factors_combined_probability_value = 1.0
            num_of_valid_terms_in_current_query = 0
            doLoopFlag = True
            for term in distinctQueryTermsDict:
                if term.strip() != "":
                    num_of_valid_terms_in_current_query += 1            
                    lookUpKey = str(i) + "_" + term + "_" + currentQIDInStringFormat
                    if lookUpKey not in TOPKMissingResultsForTestingQueriesDict:
                        '''
                        print "--->[Maybe input file missing info error]"
                        print "--->can NOT find the lookUpKey:",lookUpKey
                        print "--->queryDict[currentQIDInStringFormat]:",queryDict[currentQIDInStringFormat]
                        print
                        '''
                        doLoopFlag = False
                    else:
                        if lookUpKey not in TOPKMissingResultsForTestingQueriesUsingRecordDict:
                            TOPKMissingResultsForTestingQueriesUsingRecordDict[lookUpKey] = 1
                        else:
                            # for debug
                            # print "the lookUpKey",lookUpKey,"has been used before."
                            exit(1)
                        
                        # The probability which the term appears in the next query
                        first_factor_probability_value = 0.0
                        # The second factor is independent of the query log
                        second_factor_probability_value = 0.0
                        # The third factor is the machine learned probability
                        third_factor_probability_value = 0.0
                        # compute the first_factor_probability_value
                        if term in termsWithCorrespondingSpeciesBelongingToDict:
                            first_factor_probability_value = freq1stFactorProbabilityDict[ termsWithCorrespondingSpeciesBelongingToDict[term] ] 
                        else:
                            first_factor_probability_value = freq1stFactorProbabilityDict[0]
                         
                        
                        # compute the second_factor_probability_value
                        if currentTrecIDInStringFormat in trecIDWithXdocValues:
                            (_,_,_,goodTuringXDocValue) = trecIDWithXdocValues[currentTrecIDInStringFormat]
                        else:
                            print "System Error, mark3"
                            exit(1)
                        
                        for currentQueryLength in queryLengthProbabilityDict:
                            partialProbability = queryLengthProbabilityDict[currentQueryLength] * math.pow(goodTuringXDocValue,currentQueryLength-1)
                            second_factor_probability_value += partialProbability
                            '''
                            print "currentQueryLength:",currentQueryLength
                            print "partialProbability:",partialProbability
                            print "math.pow(goodTuringXDocValue,currentQueryLength-1):",math.pow(goodTuringXDocValue,currentQueryLength-1)
                            print "second_factor_probability_value:",second_factor_probability_value
                            '''
 
                        third_factor_probability_value = get_probablity_given_query_terms(TOPKMissingResultsForTestingQueriesDict[lookUpKey])
                        first_AND_third_factor_combined_probability_value = first_factor_probability_value * third_factor_probability_value
                        three_factors_combined_probability_value = first_factor_probability_value * second_factor_probability_value * third_factor_probability_value
                        # for debug
                        # print lookUpKey,third_factor_probability_value,first_AND_third_factor_combined_probability_value
                        
                        if min_third_factor_probability_value >= third_factor_probability_value:
                            min_third_factor_probability_value = third_factor_probability_value
                            
                        if min_first_AND_third_factor_combined_probability_value >= first_AND_third_factor_combined_probability_value:
                            min_first_AND_third_factor_combined_probability_value = first_AND_third_factor_combined_probability_value
                            
                        if min_three_factors_combined_probability_value >= three_factors_combined_probability_value:
                            min_three_factors_combined_probability_value = three_factors_combined_probability_value
                         
            ##############################
            if doLoopFlag:
                buckets100ArrayCombineFirstANDSecondFactorProbabilityList.append(min_first_AND_third_factor_combined_probability_value)
                buckets100ArrayCombineThreeFactorsProbabilityList.append(min_three_factors_combined_probability_value)
                
                # for debug
                '''
                # print "currentTrecIDInStringFormat:",currentTrecIDInStringFormat
                # print "currentDocIDInStringFormat:",currentDocIDInStringFormat
                # print "min_third_factor_probability_value:",min_third_factor_probability_value
                # print "min_first_AND_third_factor_combined_probability_value:",min_first_AND_third_factor_combined_probability_value
                # print
                '''
                
                # Update the data structure : buckets100ArrayThirdFactorCounterDict
                min_probability_value_left_ONLY_two_digit_in_float_format = float( "{0:.2f}".format(min_third_factor_probability_value) )
            
                # deal with the rounding problem
                if min_probability_value_left_ONLY_two_digit_in_float_format > min_third_factor_probability_value:
                    current_posting_key_in_float_format = min_probability_value_left_ONLY_two_digit_in_float_format - 0.01
                else:
                    current_posting_key_in_float_format = min_probability_value_left_ONLY_two_digit_in_float_format
                
                # print "current_posting_key_in_float_format:",current_posting_key_in_float_format
                current_posting_key_in_string_format = "{0:.2f}".format(current_posting_key_in_float_format)
                
                # update this variable
                # print "min_probability_value_left_ONLY_two_digit_in_float_format:",min_probability_value_left_ONLY_two_digit_in_float_format
                # print "min_third_factor_probability_value:",min_third_factor_probability_value
                # print "current_posting_key_in_float_format:",current_posting_key_in_float_format
                # print "current_posting_key_in_string_format_for_queries_dict:",current_posting_key_in_string_format
                # print
                buckets100ArrayThirdFactorCounterDict[current_posting_key_in_string_format] += 1 * num_of_valid_terms_in_current_query
            ##############################            
        # for debug
        # the operation is to traverse all the entries from the 2 dicts
        baseValue = 0
        keyInFloat = baseValue
        totalNumOfPostingsInTheBuckets = 0
        
        # for debug (debug priority: high)
        '''
        print "debug begins..."
        for i in range(0,100):
            keyInString = "{0:.2f}".format(keyInFloat)
            value1 = buckets100ArrayThirdFactorCounterDict[keyInString]
            # print keyInString,value1
            if value1 != 0:
                totalNumOfPostingsInTheBuckets += value1
                print keyInString,value1
            keyInFloat += 0.01
        print "currentQIDInStringFormat:",currentQIDInStringFormat
        print "queryDict[currentQIDInStringFormat]:",queryDict[currentQIDInStringFormat]
        print "totalNumOfPostingsInTheBuckets:",totalNumOfPostingsInTheBuckets
        print "debug ends."
        print
        '''
    #######################################################
    currentLineFromFile1 = inputFileHandler1.readline()
    currentLineFromFile2 = inputFileHandler2.readline()
           
    processingLineNumberIndex += 1
    

inputFileHandler1.close()
inputFileHandler2.close()

print "num Of Positive Instances:",numOfPositiveInstances
print "num Of Negative Instances:",numOfNegativeInstances
print "num Of Correctly Classified Instances:",numOfCorrectlyClassifiedInstances
print "num Of Incorrectly Classified Instances:",numOfIncorrectlyClassifiedInstances
print "num Of Correctly Classified Positive Instances:",numOfCorrectlyClassifiedPositiveInstances
print "num Of Correctly Classified Negative Instances:",numOfCorrectlyClassifiedNegativeInstances
print "num Of Incorrectly Classified Positive Instances:",numOfIncorrectlyClassifiedPositiveInstances
print "num Of Incorrectly Classified Negative Instances:",numOfIncorrectlyClassifiedNegativeInstances

##################################
# print "final check begins..."
baseValue = 0
keyInFloat = baseValue
totalNumOfPostingsInTheBuckets = 0
print "probability_lower_bound total_num_of_postings_in_the_specific_bucket"
for i in range(0,100):
    keyInString = "{0:.2f}".format(keyInFloat)
    value1 = buckets100ArrayThirdFactorCounterDict[keyInString]
    totalNumOfPostingsInTheBuckets += value1
    print keyInString,value1
    keyInFloat += 0.01
print "Total Number of Postings In The Buckets:",totalNumOfPostingsInTheBuckets
print
# print "final check ends."
##################################

buckets100ArrayCombineFirstANDSecondFactorProbabilityList.sort(cmp=None, key=None, reverse=True)
TOP1PercentLowerBoundIndex = int( len(buckets100ArrayCombineFirstANDSecondFactorProbabilityList) * 0.01 )
TOP5PercentLowerBoundIndex = int( len(buckets100ArrayCombineFirstANDSecondFactorProbabilityList) * 0.05 )
TOP10PercentLowerBoundIndex = int( len(buckets100ArrayCombineFirstANDSecondFactorProbabilityList) * 0.1 )
TOP20PercentLowerBoundIndex = int( len(buckets100ArrayCombineFirstANDSecondFactorProbabilityList) * 0.2 )
TOP30PercentLowerBoundIndex = int( len(buckets100ArrayCombineFirstANDSecondFactorProbabilityList) * 0.3 )
TOP40PercentLowerBoundIndex = int( len(buckets100ArrayCombineFirstANDSecondFactorProbabilityList) * 0.4 ) 
Top50PercentLowerBoundIndex = int( len(buckets100ArrayCombineFirstANDSecondFactorProbabilityList) * 0.5 )
Top60PercentLowerBoundIndex = int( len(buckets100ArrayCombineFirstANDSecondFactorProbabilityList) * 0.6 )
Top70PercentLowerBoundIndex = int( len(buckets100ArrayCombineFirstANDSecondFactorProbabilityList) * 0.7 )
Top80PercentLowerBoundIndex = int( len(buckets100ArrayCombineFirstANDSecondFactorProbabilityList) * 0.8 )
Top90PercentLowerBoundIndex = int( len(buckets100ArrayCombineFirstANDSecondFactorProbabilityList) * 0.9 )

print
print "len(buckets100ArrayCombineFirstANDSecondFactorProbabilityList):",len(buckets100ArrayCombineFirstANDSecondFactorProbabilityList)
print "max combine 1st AND 3rd factor probability:",buckets100ArrayCombineFirstANDSecondFactorProbabilityList[0]
print "TOP1PercentLowerBound:",buckets100ArrayCombineFirstANDSecondFactorProbabilityList[TOP1PercentLowerBoundIndex]
print "TOP5PercentLowerBound:",buckets100ArrayCombineFirstANDSecondFactorProbabilityList[TOP5PercentLowerBoundIndex]
print "TOP10PercentLowerBound:",buckets100ArrayCombineFirstANDSecondFactorProbabilityList[TOP10PercentLowerBoundIndex]
print "TOP20PercentLowerBound:",buckets100ArrayCombineFirstANDSecondFactorProbabilityList[TOP20PercentLowerBoundIndex]
print "TOP30PercentLowerBound:",buckets100ArrayCombineFirstANDSecondFactorProbabilityList[TOP30PercentLowerBoundIndex]
print "TOP40PercentLowerBound:",buckets100ArrayCombineFirstANDSecondFactorProbabilityList[TOP40PercentLowerBoundIndex]
print "TOP50PercentLowerBound:",buckets100ArrayCombineFirstANDSecondFactorProbabilityList[Top50PercentLowerBoundIndex]
print "TOP60PercentLowerBound:",buckets100ArrayCombineFirstANDSecondFactorProbabilityList[Top60PercentLowerBoundIndex]
print "TOP70PercentLowerBound:",buckets100ArrayCombineFirstANDSecondFactorProbabilityList[Top70PercentLowerBoundIndex]
print "TOP80PercentLowerBound:",buckets100ArrayCombineFirstANDSecondFactorProbabilityList[Top80PercentLowerBoundIndex]
print "TOP90PercentLowerBound:",buckets100ArrayCombineFirstANDSecondFactorProbabilityList[Top90PercentLowerBoundIndex]
print "min combine 1st AND 3rd factor probability:",buckets100ArrayCombineFirstANDSecondFactorProbabilityList[-1]

buckets100ArrayCombineThreeFactorsProbabilityList.sort(cmp=None, key=None, reverse=True)
TOP1PercentLowerBoundIndex = int( len(buckets100ArrayCombineThreeFactorsProbabilityList) * 0.01 )
TOP5PercentLowerBoundIndex = int( len(buckets100ArrayCombineThreeFactorsProbabilityList) * 0.05 )
TOP10PercentLowerBoundIndex = int( len(buckets100ArrayCombineThreeFactorsProbabilityList) * 0.1 )
TOP20PercentLowerBoundIndex = int( len(buckets100ArrayCombineThreeFactorsProbabilityList) * 0.2 )
TOP30PercentLowerBoundIndex = int( len(buckets100ArrayCombineThreeFactorsProbabilityList) * 0.3 )
TOP40PercentLowerBoundIndex = int( len(buckets100ArrayCombineThreeFactorsProbabilityList) * 0.4 ) 
Top50PercentLowerBoundIndex = int( len(buckets100ArrayCombineThreeFactorsProbabilityList) * 0.5 )
Top60PercentLowerBoundIndex = int( len(buckets100ArrayCombineThreeFactorsProbabilityList) * 0.6 )
Top70PercentLowerBoundIndex = int( len(buckets100ArrayCombineThreeFactorsProbabilityList) * 0.7 )
Top80PercentLowerBoundIndex = int( len(buckets100ArrayCombineThreeFactorsProbabilityList) * 0.8 )
Top90PercentLowerBoundIndex = int( len(buckets100ArrayCombineThreeFactorsProbabilityList) * 0.9 )

print
print "len(buckets100ArrayCombineThreeFactorsProbabilityList):",len(buckets100ArrayCombineThreeFactorsProbabilityList)
print "max combine 3 factors probability:",buckets100ArrayCombineThreeFactorsProbabilityList[0]
print "TOP1PercentLowerBound:",buckets100ArrayCombineThreeFactorsProbabilityList[TOP1PercentLowerBoundIndex]
print "TOP5PercentLowerBound:",buckets100ArrayCombineThreeFactorsProbabilityList[TOP5PercentLowerBoundIndex]
print "TOP10PercentLowerBound:",buckets100ArrayCombineThreeFactorsProbabilityList[TOP10PercentLowerBoundIndex]
print "TOP20PercentLowerBound:",buckets100ArrayCombineThreeFactorsProbabilityList[TOP20PercentLowerBoundIndex]
print "TOP30PercentLowerBound:",buckets100ArrayCombineThreeFactorsProbabilityList[TOP30PercentLowerBoundIndex]
print "TOP40PercentLowerBound:",buckets100ArrayCombineThreeFactorsProbabilityList[TOP40PercentLowerBoundIndex]
print "TOP50PercentLowerBound:",buckets100ArrayCombineThreeFactorsProbabilityList[Top50PercentLowerBoundIndex]
print "TOP60PercentLowerBound:",buckets100ArrayCombineThreeFactorsProbabilityList[Top60PercentLowerBoundIndex]
print "TOP70PercentLowerBound:",buckets100ArrayCombineThreeFactorsProbabilityList[Top70PercentLowerBoundIndex]
print "TOP80PercentLowerBound:",buckets100ArrayCombineThreeFactorsProbabilityList[Top80PercentLowerBoundIndex]
print "TOP90PercentLowerBound:",buckets100ArrayCombineThreeFactorsProbabilityList[Top90PercentLowerBoundIndex]
print "min combine 3 factors probability:",buckets100ArrayCombineThreeFactorsProbabilityList[-1]
print "program ends."
