# Updated by Wei 2013/08/24 night at school
# Purpose: This is the simulation of how either a piece is IN or OUT work in the context of static index pruning
# This is mostly from the piece prospect
# Overall, the quality of this piece IN and OUT simulation should be better than the term IN or OUT

from __future__ import division
from operator import itemgetter, attrgetter
import os
import sys
import gc
import math
import time

# Some testing statistics:
# Just read all the lines(37M) one by one into main memory, it takes 26 seconds.
# Just split the list into elements(37M lines) and it takes 55 seconds.
# Take 630 seconds to assign some variables in memory
# It takes about 13.2 mins only to load the info to the main memory

# index value = 0, 0.01
# index value = 1, 0.05
# index value = 2, 0.1
# index value = 3, 0.2
# index value = 4, 0.3
# index value = 5, 0.4
# index value = 6, 0.5
# index value = 7, 0.6
# index value = 8, 0.7
# index value = 9, 0.8
# index value = 10, 0.9

# This includes every terms in the lexicon
TOTAL_NUM_OF_DESIGNED_POSTINGS_IN_THE_INVERTED_INDEX = 6451948010 # 100% of the whole index (all the unseen term lists have been added)
# This do NOT includes the terms in the row1_0 (in the total of 36872359 terms)
# TOTAL_NUM_OF_DESIGNED_POSTINGS_IN_THE_INVERTED_INDEX = 6323944039 # 98% of the whole index (with 4 unseen term list added and NO shortcut exit flag)
# The following option is used for evaluation debug
# TOTAL_NUM_OF_DESIGNED_POSTINGS_IN_THE_INVERTED_INDEX = 5009645906 # 79.22% of the whole index

# index = 0
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_1Percent_kept: 64519480
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_1Percent_kept = int( TOTAL_NUM_OF_DESIGNED_POSTINGS_IN_THE_INVERTED_INDEX * 0.01 )
# index = 1
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_5Percent_kept: 322597400
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_5Percent_kept = int( TOTAL_NUM_OF_DESIGNED_POSTINGS_IN_THE_INVERTED_INDEX * 0.05 )
# index = 2
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_10Percent_kept: 645194801
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_10Percent_kept = int( TOTAL_NUM_OF_DESIGNED_POSTINGS_IN_THE_INVERTED_INDEX * 0.1 )
# index = 3
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_20Percent_kept: 1290389602
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_20Percent_kept = int( TOTAL_NUM_OF_DESIGNED_POSTINGS_IN_THE_INVERTED_INDEX * 0.2 )
# index = 4
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_30Percent_kept: 1935584403
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_30Percent_kept = int( TOTAL_NUM_OF_DESIGNED_POSTINGS_IN_THE_INVERTED_INDEX * 0.3 )
# index = 5
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_40Percent_kept: 2580779204
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_40Percent_kept = int( TOTAL_NUM_OF_DESIGNED_POSTINGS_IN_THE_INVERTED_INDEX * 0.4 )
# index = 6
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_50Percent_kept: 3225974005
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_50Percent_kept = int( TOTAL_NUM_OF_DESIGNED_POSTINGS_IN_THE_INVERTED_INDEX * 0.5 )
# index = 7
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_60Percent_kept: 3871168806
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_60Percent_kept = int( TOTAL_NUM_OF_DESIGNED_POSTINGS_IN_THE_INVERTED_INDEX * 0.6 )
# index = 8
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_70Percent_kept 4516363607
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_70Percent_kept = int( TOTAL_NUM_OF_DESIGNED_POSTINGS_IN_THE_INVERTED_INDEX * 0.7 )
# Note:
# For the query terms which have at least seen once. The total number of postings is: 5009420937
# index = 9
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_80Percent_kept: 5161558408
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_80Percent_kept = int( TOTAL_NUM_OF_DESIGNED_POSTINGS_IN_THE_INVERTED_INDEX * 0.8 )
# index = 10
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_90Percent_kept: 5806753209
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_90Percent_kept = int( TOTAL_NUM_OF_DESIGNED_POSTINGS_IN_THE_INVERTED_INDEX * 0.9 )


print "TOTAL_NUM_OF_DESIGNED_POSTINGS_IN_THE_INVERTED_INDEX:",TOTAL_NUM_OF_DESIGNED_POSTINGS_IN_THE_INVERTED_INDEX
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_1Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_1Percent_kept 
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_5Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_5Percent_kept 
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_10Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_10Percent_kept 
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_20Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_20Percent_kept 
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_30Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_30Percent_kept 
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_40Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_40Percent_kept 
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_50Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_50Percent_kept 
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_60Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_60Percent_kept 
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_70Percent_kept",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_70Percent_kept 
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_80Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_80Percent_kept 
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_90Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_90Percent_kept


LIST_FOR_STORING_THRESHOLD_NUMBERS = []
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_1Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_5Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_10Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_20Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_30Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_40Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_50Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_60Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_70Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_80Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_90Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_DESIGNED_POSTINGS_IN_THE_INVERTED_INDEX)

# for debug ONLY
print "len(LIST_FOR_STORING_THRESHOLD_NUMBERS):",len(LIST_FOR_STORING_THRESHOLD_NUMBERS)

# assumption: we only take care of the query terms first.
# I see and that's it. 
# The data structure is a list, inside the list, contains many tuples.
termPiecesInfoList = []
TOTAL_NUM_OF_PIECES_IN_LEXICON = 39873753 # around 39M
TOTAL_NUM_OF_TERMS_IN_LEXICON  = 37728619 # around 37M
total_num_of_pieces_left_unprocessed = TOTAL_NUM_OF_PIECES_IN_LEXICON
total_num_of_terms_processed = 0
total_num_of_terms_left_unprocessed = TOTAL_NUM_OF_TERMS_IN_LEXICON

# # of terms in the lexicon: 37728619
# # of terms in the table which has freq < 20: 37726113
# # of terms in the table which are shown in the query log and has freq >= 20: 2506
# # of terms in the table which appear at least once and has freq < 20: 32752
# # of terms in the table which are popular query terms with freq >= 20: 2506
# # of terms in the table which appear at least once: 35258 
# # of query terms in the 85K queries: 35627 = 35258(has been recorded in the table) + 369(out of the lexicon and we will NOT care)
# # of query terms which are out of the lexicon: 432 - 369 = 63 (Is it? DONE, almost, I compute the value of 58, missing 5 terms :) )


# key: classLabel in int format
# value: a dict containing pieces info
    # key: the pieceID
    # value: the probability that the next posting will hit this piece area
# del operation set on 2013/08/24 afternoon
classLabelWithPiecesMetaInfoDict = {}
inputFileNameMinus1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/classLabelWithPiecesInfo_COMPLETED"
inputFileHandler = open(inputFileNameMinus1,"r")

for line in inputFileHandler.readlines():
    lineElements = line.strip().split("\t")
    # print "lineElements:",lineElements
    currentClassLabel = int(lineElements[0])
    if currentClassLabel not in classLabelWithPiecesMetaInfoDict:
        classLabelWithPiecesMetaInfoDict[currentClassLabel] = {}
    else:
        print "mark1,error"
        exit(1)
    numOfPiecesInCurrentClass = int(lineElements[1])
    assert numOfPiecesInCurrentClass == len(lineElements[2:])
    for pieceIndex,probabilityThatTheNextPostingWillHitThisPieceAreaInStringFormat in enumerate(lineElements[2:]):
        probabilityThatTheNextPostingWillHitThisPieceAreaInFloatFormat = float(probabilityThatTheNextPostingWillHitThisPieceAreaInStringFormat)
        if pieceIndex not in classLabelWithPiecesMetaInfoDict[currentClassLabel]:
            classLabelWithPiecesMetaInfoDict[currentClassLabel][pieceIndex] = probabilityThatTheNextPostingWillHitThisPieceAreaInFloatFormat
        else:
            print "mark2,error"
            exit(1)

print "len(classLabelWithPiecesMetaInfoDict):",len(classLabelWithPiecesMetaInfoDict)
print "classLabelWithPiecesMetaInfoDict[0]:",classLabelWithPiecesMetaInfoDict[0]
print "classLabelWithPiecesMetaInfoDict[4]:",classLabelWithPiecesMetaInfoDict[4]
print "classLabelWithPiecesMetaInfoDict[70]:",classLabelWithPiecesMetaInfoDict[70]
# classLabelWithPiecesMetaInfoDict[0]: {0: 1.0}
# classLabelWithPiecesMetaInfoDict[4]: {0: 0.37657715949530901, 1: 0.62342284050469099}
inputFileHandler.close()


startingTime = time.clock()
# key: term in string format
# value: class label in int format
# del operation set on 2013/08/24 afternoon
termClassLabelDict = {}

# key: term in string format
# value: a dict
    # key: pieceNumber
    # value: numOfPostingsInThisPiece
# del operation set on 2013/08/24 afternoon
termPiecesInfoDict = {}

# This file maybe NOT big enough
# option1: 100K query terms ONLY
# inputFileName0 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/100KQueryTermsWithTermFreqInCollectionANDClassLabelANDNumOfPostingsInEachPieces20130821"
# option2: 100K query terms + some unseen terms for debug
# inputFileName0 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/100KQueryTermsPlusSomeUnseenTermsWithTermFreqInCollectionANDClassLabelANDNumOfPostingsInEachPieces20130821"
# option3: whole lexicon terms for production
inputFileName0 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithLengthOfListANDClassLabelANDNumOfPostingsInEachPieces20130819"

inputFileHandler = open(inputFileName0,"r")
lineCounter = 0 # init

currentLine = inputFileHandler.readline()
lineCounter += 1

while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentTerm = currentLineElements[0]
    # ignore currentLineElements[1], cause we will some other sources provide this info about length of the inverted list for this term
    currentTermClassLabelInIntFormat = int( currentLineElements[2] )
    currentTermNumOfPiecesHave = int( currentLineElements[3] )
    
    # CURRENT version
    # fill the variable: termClassLabelDict
    termClassLabelDict[currentTerm] = currentTermClassLabelInIntFormat 
    termPiecesInfoDict[currentTerm] = {}
    
    # fill the variable: termPiecesInfoDict
    baseIndex = 4
    for i in range( 0,len( currentLineElements[4:]),2):
        currentPieceNum = int( currentLineElements[4+i] )
        currentNumOfPostingsInThiePiece = int( currentLineElements[4+i+1])
        termPiecesInfoDict[currentTerm][currentPieceNum] = currentNumOfPostingsInThiePiece
        
    
    '''
    # OLD version
    # fill the variable: termClassLabelDict
    if currentTerm not in termClassLabelDict:
        termClassLabelDict[currentTerm] = currentTermClassLabelInIntFormat 
    
    if currentTerm not in termPiecesInfoDict:
        termPiecesInfoDict[currentTerm] = {}
    
    # fill the variable: termPiecesInfoDict
    baseIndex = 4
    for i in range( 0,len( currentLineElements[4:]),2):
        currentPieceNum = int( currentLineElements[4+i] )
        currentNumOfPostingsInThiePiece = int( currentLineElements[4+i+1])
        if currentPieceNum not in termPiecesInfoDict[currentTerm]:
            termPiecesInfoDict[currentTerm][currentPieceNum] = currentNumOfPostingsInThiePiece
        else:
            print "system error, mark1."
    '''
    
    currentLine = inputFileHandler.readline()
    lineCounter += 1
    
    if lineCounter % 1000000 == 0:
        print "lineCounter:",lineCounter,"processed."
    
inputFileHandler.close()
print "len(termClassLabelDict):",len(termClassLabelDict)
print "len(termPiecesInfoDict):",len(termPiecesInfoDict)
print "termClassLabelDict['0']:",termClassLabelDict['0']
print "termPiecesInfoDict['0']:",termPiecesInfoDict['0']
# print "termClassLabelDict['0120j4']:",termClassLabelDict['0120j4']
# print "termPiecesInfoDict['0120j4']:",termPiecesInfoDict['0120j4']
# len(termClassLabelDict): 38871
# len(termPiecesInfoDict): 38871
# termClassLabelDict['0']: 63
# termPiecesInfoDict['0']: {0: 4200166, 1: 2100083, 2: 1050041, 3: 525020, 4: 262510, 5: 131255, 6: 65627, 7: 32813, 8: 16406, 9: 8203, 10: 4101, 11: 2050, 12: 1025, 13: 512, 14: 256, 15: 128, 16: 64, 17: 73}
# termClassLabelDict['0120j4']: -1
# termPiecesInfoDict['0120j4']: {}
endingTime = (time.clock() - startingTime)
print "endingTime:",endingTime,"seconds."



# key: the term in string format
# value: the length of the inverted list(# of postings in the list) for that specific term in int format
# del operation set on 2013/08/24 afternoon
allQueryTermsWithTheirTermFreqInCollection = {}
# 38871 query terms in total for the head 100K queries
inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/100KQueryTermsWithTermFreqInCollection.txt"
inputFileHandler = open(inputFileName1,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    currentProcessingQueryTerm = lineElements[0]
    queryTermFreq = int(lineElements[1])
    if currentProcessingQueryTerm not in allQueryTermsWithTheirTermFreqInCollection:
        allQueryTermsWithTheirTermFreqInCollection[currentProcessingQueryTerm] = queryTermFreq
    else:
        print "error,Mark3"
        exit(1)
# for debug ONLY
print "len(allQueryTermsWithTheirTermFreqInCollection):",len(allQueryTermsWithTheirTermFreqInCollection)
inputFileHandler.close()


# key: term in string format
# value: real freq(unsmoothed) of term in the head 95K queries
# del operation set on 2013/08/24 afternoon
queryTermWithTheirRealFreqIn95KQueriesDict = {}
# The real freq of terms in 95K queries are used for the good turining/ricardo method
inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsIn_100KQueries_0_1_95%"
inputFileHandler = open(inputFileName2,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    currentProcessingQueryTerm = lineElements[0]
    queryTermFreq = int(lineElements[1])
    if currentProcessingQueryTerm not in queryTermWithTheirRealFreqIn95KQueriesDict:
        queryTermWithTheirRealFreqIn95KQueriesDict[currentProcessingQueryTerm] = queryTermFreq
    else:
        print "error,Mark3"
        exit(1)
print "len(queryTermWithTheirRealFreqIn95KQueriesDict):",len(queryTermWithTheirRealFreqIn95KQueriesDict)
inputFileHandler.close()

# key: term in string format
# value: real freq(unsmoothed) of term in the head 95K queries
# del operation set on 2013/08/24 afternoon
queryTermWithTheirRealFreqIn85KQueriesDict = {}
# The real freq of terms in 85K queries are used for our 2D/1D estimation
inputFileName3 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsInQueries_head_85K_0_85%"
inputFileHandler = open(inputFileName3,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    currentProcessingQueryTerm = lineElements[0]
    queryTermFreq = int(lineElements[1])
    if currentProcessingQueryTerm not in queryTermWithTheirRealFreqIn85KQueriesDict:
        queryTermWithTheirRealFreqIn85KQueriesDict[currentProcessingQueryTerm] = queryTermFreq
    else:
        print "error,Mark3"
        exit(1)
# for debug ONLY
print "len(queryTermWithTheirRealFreqIn85KQueriesDict):",len(queryTermWithTheirRealFreqIn85KQueriesDict)
inputFileHandler.close()

# key: the cell identifier like 1_0, 1_1, 1_2 ... 5_16 5_17 5_18 5_19
# value: the probability which this cell of terms belonging to
# the sum of all the cells' corresponding terms should be the WHOLE universe of the terms that the search system can handle 
# del operation set on 2013/08/24 afternoon
cellProbabilityDict = {}
# This is the 1D / 2D final probabilities we want, note that the probability in the 3 tables are NOT the final probability we want, but just a middle step 
# The SUM row, I think is the final 1D probability
# From row1,row2 ... row5, I think this is the final 2D probability
inputFileName4 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/probabilityWeWantFinally20130418"
inputFileHandler = open(inputFileName4,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    cellKey = lineElements[0][3:]
    probability = float(lineElements[1])
    if cellKey not in cellProbabilityDict:
        cellProbabilityDict[cellKey] = probability
print "len(cellProbabilityDict):",len(cellProbabilityDict)
# print "cellProbabilityDict:",cellProbabilityDict
# for a specific 2D probability
print "cellProbabilityDict['1_1']:",cellProbabilityDict['1_1'],"(specific cell '1_1')"
# for a specific 1D probability
print "cellProbabilityDict['_1']:",cellProbabilityDict['_1'],"(specific cell '_1' combining the cells '1_1','2_1','3_1','4_1','5_1')"
inputFileHandler.close()


# key: freq value in int format
# value: # of terms appearing exactly this freq
# got probability from the good turing setting
# del operation set on 2013/08/24 afternoon
goodTuringFreqOfFreqProbabilityDict = {}
# Use the good turing probability estimation applying to the whole 95K queries, but NOT 85K queries.
inputFileName5 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/freqOfFreqInQueries_0_1_95K_95%_good_turing_output"
inputFileHandler = open(inputFileName5,"r")

# ignore the meta headlines.
inputFileHandler.readline()
inputFileHandler.readline()
inputFileHandler.readline()
inputFileHandler.readline()

# The upperbound of the following statement inputFileHandler.readlines()[0:31]
# 31 will be changed according to the real freq of terms in the 95K queries
for line in inputFileHandler.readlines()[0:31]:
    lineElements = line.strip().split(" ")
    freqOfFreq = int(lineElements[0])
    goodTuringProbability = float(lineElements[3])
    if freqOfFreq not in goodTuringFreqOfFreqProbabilityDict:
        goodTuringFreqOfFreqProbabilityDict[freqOfFreq] = goodTuringProbability
    else:
        print "ERROR,mark3"
        exit(1)

print "len(goodTuringFreqOfFreqProbabilityDict):",len(goodTuringFreqOfFreqProbabilityDict)
print "goodTuringFreqOfFreqProbabilityDict[0]:",goodTuringFreqOfFreqProbabilityDict[0]
print "goodTuringFreqOfFreqProbabilityDict:",goodTuringFreqOfFreqProbabilityDict
inputFileHandler.close()


# key: the cell identifier
# value: a list of term which belong to this cell
# del operation set on 2013/08/24 afternoon
smallCellKeyWithItsCorrespondingQueryTermListDict = {}
inputFileName6 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/freqOfFreqInQueries_head_85K_0_85%_2D_with_query_terms"
inputFileHandler = open(inputFileName6,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    smallCellKey = lineElements[0]
    numOfQueryTerms = int( lineElements[1] )
    if smallCellKey not in smallCellKeyWithItsCorrespondingQueryTermListDict:
        smallCellKeyWithItsCorrespondingQueryTermListDict[smallCellKey] = []
        for i in range(2,2+numOfQueryTerms):
            currentProcessingQueryTerm = lineElements[i]
            smallCellKeyWithItsCorrespondingQueryTermListDict[smallCellKey].append( currentProcessingQueryTerm )
    else:
        print "error,Mark1"
        exit(1)

print "len(smallCellKeyWithItsCorrespondingQueryTermListDict):",len(smallCellKeyWithItsCorrespondingQueryTermListDict)
print "smallCellKeyWithItsCorrespondingQueryTermListDict['3_195']:",smallCellKeyWithItsCorrespondingQueryTermListDict['3_195']
print "smallCellKeyWithItsCorrespondingQueryTermListDict['3_191']:",smallCellKeyWithItsCorrespondingQueryTermListDict['3_191']   
inputFileHandler.close()


dataLines = []
# The purpose of loading the info from the "table:denominator:freqOfFreqForTheLexiconTerm" is to get the ranges for each big cell(combine the small cells together)
inputFileName7 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/probabilityTableUsingProfIdea20130420_fixed"
inputFileHandler = open(inputFileName7,"r")
dataLine = inputFileHandler.readline()
while not dataLine.strip().startswith("table:denominator:freqOfFreqForTheLexiconTerm"):
    dataLine = inputFileHandler.readline()
# for debug ONLY
# print dataLine.strip()

inputFileHandler.readline()
inputFileHandler.readline()
inputFileHandler.readline()
inputFileHandler.readline()

row1DataLine = inputFileHandler.readline()
row2DataLine = inputFileHandler.readline()
row3DataLine = inputFileHandler.readline()
row4DataLine = inputFileHandler.readline()
row5DataLine = inputFileHandler.readline()
sumDataLine = inputFileHandler.readline()

dataLines.append(row1DataLine)
dataLines.append(row2DataLine)
dataLines.append(row3DataLine)
dataLines.append(row4DataLine)
dataLines.append(row5DataLine)
inputFileHandler.close()
print "len(dataLines):",len(dataLines)


print "Part1: Load the query terms which are in the 2D probability estimation table and compute the final compared values."
for indexOutside,dataLine in enumerate(dataLines):
    for indexInside,cellString in enumerate( dataLine.strip().split(" ")[2:] ):
        currentFreq = int( cellString.split(":")[1] )
        rangeString = cellString.split(":")[0]
        # for debug ONLY
        # print indexOutside+1,indexInside+1,rangeString,currentFreq
        cellKeyFor2D = str(indexOutside+1) + "_" + str(indexInside+1)
        smallRangeLowerBoundID = int( rangeString.split("[")[1].split("]")[0].split(",")[0] )
        smallRangeUpperBoundID = int( rangeString.split("[")[1].split("]")[0].split(",")[1] )
        
        for i in range(smallRangeLowerBoundID,smallRangeUpperBoundID+1):
            smallCellKey = str(indexInside+1) + "_" + str(i)
            # before adding the whole thing, we can just have the query term with their designed tuples
            for currentProcessingQueryTerm in smallCellKeyWithItsCorrespondingQueryTermListDict[smallCellKey]:
                # This is where you can put the query term and the tuple in.
                if currentProcessingQueryTerm in queryTermWithTheirRealFreqIn95KQueriesDict and currentProcessingQueryTerm in allQueryTermsWithTheirTermFreqInCollection:
                    total_num_of_terms_processed += 1
                    currentProcessingQueryTermFreqInCollection = allQueryTermsWithTheirTermFreqInCollection[currentProcessingQueryTerm]
                    
                    # 1
                    # queryTermWithTheirRealFreqIn95KQueriesDict[currentProcessingQueryTerm]
                    
                    # 2
                    # Note that the 2D and 1D thing are all based on the 85K training queries, 10K for justification queries
                    currentProcessingQueryTermPredictedProbability2D = cellProbabilityDict[cellKeyFor2D]
                    
                    # 3
                    # tableColumnNum is actually the real freq for the term in a certain set of queries.
                    tableColumnNum = int(cellKeyFor2D.split("_")[1])
                    cellKeyFor1D = "_" + str(tableColumnNum)
                    currentProcessingQueryTermPredictedProbability1D = cellProbabilityDict[cellKeyFor1D]
                    
                    # 4
                    # BUG fixed 2013/08/22 morning by Wei at school
                    currentProcessingQueryTermGoodTuringProbability = goodTuringFreqOfFreqProbabilityDict[ queryTermWithTheirRealFreqIn95KQueriesDict[currentProcessingQueryTerm] ]
                    
                    currentProcessingQueryTermClassLabel = termClassLabelDict[currentProcessingQueryTerm]
                    
                    '''
                    # for debug purpose
                    # the term candidates for DEBUG: '000sites', 'instructors'
                    # debug variables:
                    debugIntSumVariable = 0
                    debugSumVariable1 = 0.0
                    debugSumVariable2 = 0.0
                    debugSumVariable3 = 0.0
                    debugSumVariable4 = 0.0
                    '''
                    
                    '''
                    # for debug purpose
                    if currentProcessingQueryTerm == "instructors":
                        ricardoRatio = queryTermWithTheirRealFreqIn95KQueriesDict[currentProcessingQueryTerm] / currentProcessingQueryTermFreqInCollection
                        our2DProbabilityModelRatio = currentProcessingQueryTermPredictedProbability2D / currentProcessingQueryTermFreqInCollection
                        our1DProbabilityModelRatio = currentProcessingQueryTermPredictedProbability1D / currentProcessingQueryTermFreqInCollection
                        goodTuringProbabilityModelRatio = currentProcessingQueryTermGoodTuringProbability / currentProcessingQueryTermFreqInCollection
                        print "(Before)DEBUG:",currentProcessingQueryTerm,currentProcessingQueryTermFreqInCollection,ricardoRatio,our2DProbabilityModelRatio,our1DProbabilityModelRatio,goodTuringProbabilityModelRatio
                    '''
                    
                    for pieceNumIndex in termPiecesInfoDict[currentProcessingQueryTerm]:
                        pieceIdentifier = currentProcessingQueryTerm + "_" + str(pieceNumIndex)
                        exp_results_of_current_piece_for_one_posting = classLabelWithPiecesMetaInfoDict[currentProcessingQueryTermClassLabel][pieceNumIndex]
                        real_size_of_current_piece = termPiecesInfoDict[currentProcessingQueryTerm][pieceNumIndex]
                        
                        # comparedAlg1 [SIGIR 2007 Ricardo A.Baeza-Yates et al]
                        # 2 parameters:
                        # (1) frequency f for each query term from query log
                        # (2) size s of the associate part of the inverted list
                        # (3) define the ratio f  * exp_num_of_results_for_this_piece / s, and greedily select the posting list with maximum ratio
                        ricardoRatioPiece = queryTermWithTheirRealFreqIn95KQueriesDict[currentProcessingQueryTerm]  * exp_results_of_current_piece_for_one_posting / real_size_of_current_piece
                        
                        # ourProbabilityModel2D
                        # 2 parameters:
                        # (1) probability p2D (currentProcessingQueryTermPredictedProbability2D) 2D settings
                        # (2) size s of the associate part of the inverted list
                        # (3) define the ratio p2D  * exp_num_of_results_for_this_piece / s, and greedily select the posting list with maximum ratio
                        our2DProbabilityModelRatioPiece = currentProcessingQueryTermPredictedProbability2D  * exp_results_of_current_piece_for_one_posting / real_size_of_current_piece
                        
                        # ourProbabilityModel1D
                        # 2 parameters:
                        # (1) probability p1D settings
                        # (2) size s of the associate part of the inverted list
                        # (3) define the ratio p1D  * exp_num_of_results_for_this_piece / s, and greedily select the posting list with maximum ratio
                        our1DProbabilityModelRatioPiece = currentProcessingQueryTermPredictedProbability1D  * exp_results_of_current_piece_for_one_posting / real_size_of_current_piece
    
                        # goodTuringProbabilityModel
                        # 2 parameters:
                        # (1) probability p predicted by good turing
                        # (2) size s of the associate part of the inverted list
                        # (3) define the ratio p  * exp_num_of_results_for_this_piece / s, and greedily select the posting list with maximum ratio
                        goodTuringProbabilityModelRatioPiece = currentProcessingQueryTermGoodTuringProbability  * exp_results_of_current_piece_for_one_posting / real_size_of_current_piece                    
                        '''
                        # for debug purpose
                        if currentProcessingQueryTerm == "instructors":                        
                            print "(Middle)DEBUG:",pieceIdentifier,real_size_of_current_piece,ricardoRatioPiece,our2DProbabilityModelRatioPiece,our1DProbabilityModelRatioPiece,goodTuringProbabilityModelRatioPiece
                            debugIntSumVariable += real_size_of_current_piece
                            debugSumVariable1 += real_size_of_current_piece * ricardoRatioPiece
                            debugSumVariable2 += real_size_of_current_piece * our2DProbabilityModelRatioPiece
                            debugSumVariable3 += real_size_of_current_piece * our1DProbabilityModelRatioPiece
                            debugSumVariable4 += real_size_of_current_piece * goodTuringProbabilityModelRatioPiece
                        ''' 
                        currentProcessingPieceTuple = (pieceIdentifier,real_size_of_current_piece,ricardoRatioPiece,our2DProbabilityModelRatioPiece,our1DProbabilityModelRatioPiece,goodTuringProbabilityModelRatioPiece)
                        termPiecesInfoList.append(currentProcessingPieceTuple)
                    '''
                    # for debug purpose
                    if currentProcessingQueryTerm == "instructors":
                        print "(After)DEBUG:",currentProcessingQueryTerm,debugIntSumVariable,debugSumVariable1/debugIntSumVariable,debugSumVariable2/debugIntSumVariable,debugSumVariable3/debugIntSumVariable,debugSumVariable4/debugIntSumVariable
                    '''
                else:
                    print "error,Mark4"
                    exit(1)

# for debug ONLY
# print "len(termPiecesInfoList):",len(termPiecesInfoList)
total_num_of_pieces_left_unprocessed = TOTAL_NUM_OF_PIECES_IN_LEXICON - len(termPiecesInfoList)
total_num_of_terms_left_unprocessed  = TOTAL_NUM_OF_TERMS_IN_LEXICON  - total_num_of_terms_processed
print "len(termPiecesInfoList) / # of pieces processed so far:",len(termPiecesInfoList)
print "total_num_of_pieces_left_unprocessed:",total_num_of_pieces_left_unprocessed
print "total_num_of_terms_processed:",total_num_of_terms_processed
print "total_num_of_terms_left_unprocessed:",total_num_of_terms_left_unprocessed
print "termPiecesInfoList[0]:",termPiecesInfoList[0]
print "termPiecesInfoList[1]:",termPiecesInfoList[1]
print "termPiecesInfoList[2]:",termPiecesInfoList[2]
print "termPiecesInfoList[3]:",termPiecesInfoList[3]
print 


# Let's add the popular query words( real freq of this term >=20 ) into the list as well based on the counting of the head 85K queries
popularTermsCounter = 0
NUM_QUERY_TERM_POSITIONS_FOR_95K_QUERIES = 392955
NUM_QUERY_TERM_POSITIONS_FOR_85K_QUERIES = 351734
for currentProcessingQueryTerm in queryTermWithTheirRealFreqIn85KQueriesDict:
    if queryTermWithTheirRealFreqIn85KQueriesDict[currentProcessingQueryTerm] >= 20:
        popularTermsCounter += 1
        total_num_of_terms_processed += 1
        
        currentProcessingQueryTermFreqInCollection = allQueryTermsWithTheirTermFreqInCollection[currentProcessingQueryTerm]
        currentProcessingQueryTermClassLabel = termClassLabelDict[currentProcessingQueryTerm]
        
        ourProbabilityForWhichTermsHaveFreqGreaterOrEqualThan20 = queryTermWithTheirRealFreqIn85KQueriesDict[currentProcessingQueryTerm] / NUM_QUERY_TERM_POSITIONS_FOR_85K_QUERIES
        goodTuringProbabilityForWhichTermsHaveFreqGreaterOrEqualThan20 = queryTermWithTheirRealFreqIn95KQueriesDict[currentProcessingQueryTerm] / NUM_QUERY_TERM_POSITIONS_FOR_95K_QUERIES
        
        '''
        # for debug purpose
        # the term candidates for DEBUG: 'of'
        # debug variables:
        debugIntSumVariable = 0
        debugSumVariable1 = 0.0
        debugSumVariable2 = 0.0
        debugSumVariable3 = 0.0
        debugSumVariable4 = 0.0
        '''
        
        '''
        # for debug purpose
        if currentProcessingQueryTerm == "gov":
            ricardoRatio = queryTermWithTheirRealFreqIn95KQueriesDict[currentProcessingQueryTerm] / currentProcessingQueryTermFreqInCollection
            our2DProbabilityModelRatio = ourProbabilityForWhichTermsHaveFreqGreaterOrEqualThan20 / currentProcessingQueryTermFreqInCollection
            our1DProbabilityModelRatio = ourProbabilityForWhichTermsHaveFreqGreaterOrEqualThan20 / currentProcessingQueryTermFreqInCollection
            assert our2DProbabilityModelRatio == our1DProbabilityModelRatio
            goodTuringProbabilityModelRatio = goodTuringProbabilityForWhichTermsHaveFreqGreaterOrEqualThan20 / currentProcessingQueryTermFreqInCollection
            print "(Before)DEBUG:",currentProcessingQueryTerm,currentProcessingQueryTermFreqInCollection,ricardoRatio,our2DProbabilityModelRatio,our1DProbabilityModelRatio,goodTuringProbabilityModelRatio
        '''
        
        for pieceNumIndex in termPiecesInfoDict[currentProcessingQueryTerm]:
            pieceIdentifier = currentProcessingQueryTerm + "_" + str(pieceNumIndex)
            exp_results_of_current_piece_for_one_posting = classLabelWithPiecesMetaInfoDict[currentProcessingQueryTermClassLabel][pieceNumIndex]
            real_size_of_current_piece = termPiecesInfoDict[currentProcessingQueryTerm][pieceNumIndex]   
        
        
            # comparedAlg1 [SIGIR 2007 Ricardo A.Baeza-Yates et al]
            # 2 parameters:
            # (1) frequency f for each query term from query log
            # (2) size s of the associate part of the inverted list
            # (3) define the ratio f  * exp_num_of_results_for_this_piece / s, and greedily select the posting list with maximum ratio
            ricardoRatioPiece = queryTermWithTheirRealFreqIn95KQueriesDict[currentProcessingQueryTerm] * exp_results_of_current_piece_for_one_posting / real_size_of_current_piece
            
            
            # ourProbabilityModel2D
            # 2 parameters:
            # (1) probability p2D (For those terms which has freq >= 20, probability is exactly like the ricardo model)
            # (2) size s of the associate part of the inverted list
            # (3) define the ratio p2D * exp_num_of_results_for_this_piece / s , and greedily select the posting list with maximum ratio
            our2DProbabilityModelRatioPiece = ourProbabilityForWhichTermsHaveFreqGreaterOrEqualThan20 * exp_results_of_current_piece_for_one_posting / real_size_of_current_piece
            
            # ourProbabilityModel1D (There is NO difference between 2D model and 1D model when dealing with terms which has freq >= 20)
            # 2 parameters:
            # (1) probability p1D (For those terms which has freq >= 20, probability is exactly like the ricardo model)
            # (2) size s of the associate inverted list(currentProcessingQueryTermFreqInCollection)
            # (3) define the ratio p1D * exp_num_of_results_for_this_piece / s , and greedily select the posting list with maximum ratio
            our1DProbabilityModelRatioPiece = ourProbabilityForWhichTermsHaveFreqGreaterOrEqualThan20 * exp_results_of_current_piece_for_one_posting / real_size_of_current_piece
            
            # for the term which has the real freq >= 20, there is just NO difference between 1D and 2D.
            assert our2DProbabilityModelRatioPiece == our1DProbabilityModelRatioPiece
            
            # goodTuringProbabilityModel (There is NO difference between this good turing model with the ricardo model when dealing with terms which has freq >= 20)
            # 2 parameters:
            # (1) probability p predicted by good turing
            # (2) size s of the associate part of the inverted list
            # (3) define the ratio p * exp_num_of_results_for_this_piece / s, and greedily select the posting list with maximum ratio
            goodTuringProbabilityModelRatioPiece = goodTuringProbabilityForWhichTermsHaveFreqGreaterOrEqualThan20 * exp_results_of_current_piece_for_one_posting / real_size_of_current_piece
            '''
            # for debug purpose
            if currentProcessingQueryTerm == "gov":                        
                print "(Middle)DEBUG:",pieceIdentifier,real_size_of_current_piece,ricardoRatioPiece,our2DProbabilityModelRatioPiece,our1DProbabilityModelRatioPiece,goodTuringProbabilityModelRatioPiece
                debugIntSumVariable += real_size_of_current_piece
                debugSumVariable1 += real_size_of_current_piece * ricardoRatioPiece
                debugSumVariable2 += real_size_of_current_piece * our2DProbabilityModelRatioPiece
                debugSumVariable3 += real_size_of_current_piece * our1DProbabilityModelRatioPiece
                debugSumVariable4 += real_size_of_current_piece * goodTuringProbabilityModelRatioPiece
            ''' 
            currentProcessingPopularPieceTuple = (pieceIdentifier, real_size_of_current_piece, ricardoRatioPiece,our2DProbabilityModelRatioPiece,our1DProbabilityModelRatioPiece,goodTuringProbabilityModelRatioPiece)
            termPiecesInfoList.append(currentProcessingPopularPieceTuple)
        '''
        # for debug purpose
        if currentProcessingQueryTerm == "gov":
            print "(After)DEBUG:",currentProcessingQueryTerm,debugIntSumVariable,debugSumVariable1/debugIntSumVariable,debugSumVariable2/debugIntSumVariable,debugSumVariable3/debugIntSumVariable,debugSumVariable4/debugIntSumVariable
        '''
    else:
        pass # this situation has been handled by the previous case :)


total_num_of_pieces_left_unprocessed = TOTAL_NUM_OF_PIECES_IN_LEXICON - len(termPiecesInfoList)
total_num_of_terms_left_unprocessed  = TOTAL_NUM_OF_TERMS_IN_LEXICON  - total_num_of_terms_processed
print "(DEBUG)popularTermsCounter:",popularTermsCounter
print "len(termPiecesInfoList) / # of pieces processed so far:",len(termPiecesInfoList)
print "total_num_of_pieces_left_unprocessed:",total_num_of_pieces_left_unprocessed
print "total_num_of_terms_processed:",total_num_of_terms_processed
print "total_num_of_terms_left_unprocessed:",total_num_of_terms_left_unprocessed
print "termPiecesInfoList[-1]:",termPiecesInfoList[-1]
print "termPiecesInfoList[-2]:",termPiecesInfoList[-2]
print "termPiecesInfoList[-3]:",termPiecesInfoList[-3]
print "termPiecesInfoList[-4]:",termPiecesInfoList[-4]
print



print "Part2: Load the UNSEEN terms which are NOT in the 2D probability estimation table and compute the final compared values."
# # of query terms in the cell ROW5_0 : about 13296
# # of query terms in the cell ROW4_0 : about 32085
# # of query terms in the cell ROW3_0 : about 87252
# # of query terms in the cell ROW2_0 : about 688369
# # of query terms in the cell ROW1_0 : about 36872359

# The input file format(This input file format design is just NAIVE):
# lineElements[0]: currentProcessingQueryTerm
# lineElements[1]: queryTermRealFreqIn85KQueries
# lineElements[2]: queryTermFreqInCollection
# lineElements[3]: queryTermPredictedProbability
# lineElements[4]: ricardoRatio
fileNameList = []
basePathFileNamePart1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/CellROW"
basePathFileNamePart2 = "_0QueryTermsWithMetaInfo"

# This argument controls how many files will be in the list
# current # of files:5
for i in range(5,0,-1):
    completedFilePath = basePathFileNamePart1 + str(i) + basePathFileNamePart2
    fileNameList.append(completedFilePath)

print "len(fileNameList):",len(fileNameList)

exitFlagForDebug = False
unseenTermsCounter = 0
for fileName in fileNameList:
    if exitFlagForDebug:
        break
    
    print "fileName:",fileName
    # special logic begins...
    # extract the cell info from the fileName for our1DProbabilityModelRatio
    # example file name:
    # /data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/CellROW2_0QueryTermsWithMetaInfo
    
    # Updated by Wei 2013/08/22 morning
    # The following 2 statements are just WRONG!! 
    # cellNum = str(fileName.strip().split("/")[-1][7])
    # print "cell num for unseen query terms :",cellNum
    
    # special logic ends.
    inputFileHandler = open(fileName,"r")
    for line in inputFileHandler.readlines():
        if exitFlagForDebug:
            break
        
        lineElements = line.strip().split(" ")
        # the file format of the following input files(5 columns):
        # lineElements[0]: currentProcessingQueryTerm
        # lineElements[1]: real term freq in the 85K query trace (Which should be ALL 0s in this case, cause the defination is UNSEEN)
        # lineElements[2]: freq in the collection
        # lineElements[3]: ourProbabilityModel2D
        # lineElements[4]: ricardo ratio (the value in file is super large, maybe we need the super small value. It depends on our sorting)
        
        # The standard format of the tuple:
        # (currentProcessingQueryTerm, ricardoRatio,our2DProbabilityModelRatio,our1DProbabilityModelRatio,goodTuringProbabilityModelRatio)
        # 0 currentProcessingQueryTerm
        # 1 ricardoRatio
        # 2 our2DProbabilityModelRatio
        # 3 our1DProbabilityModelRatio
        # 4 goodTuringProbabilityModelRatio
        currentProcessingQueryTerm = str( lineElements[0] )
        currentProcessingQueryTermFreqInCollection = int(lineElements[2])
        currentProcessingQueryTermClassLabel = termClassLabelDict[currentProcessingQueryTerm]
        unseenTermsCounter += 1
        total_num_of_terms_processed += 1
        
        '''
        # for debug purpose
        # the term candidates for DEBUG: '0000'
        # debug variables:
        debugIntSumVariable = 0
        debugSumVariable1 = 0.0
        debugSumVariable2 = 0.0
        debugSumVariable3 = 0.0
        debugSumVariable4 = 0.0
        '''
        
        '''
        # for debug purpose
        if currentProcessingQueryTerm == "0000":
            if currentProcessingQueryTerm in queryTermWithTheirRealFreqIn95KQueriesDict:
                ricardoRatio = queryTermWithTheirRealFreqIn95KQueriesDict[currentProcessingQueryTerm] / currentProcessingQueryTermFreqInCollection
            else:
                # Updated by Wei 2013/08/22 morning
                # for those terms which are NOT in the head 95K queries, ricardo method just try to pick up the terms at random.
                # That is why I have a -99999999.0 for the ricardoRatio here
                # Remember to pick up the term from the largest value to the smallest value
                ricardoRatio = float(-99999999.0)
    
            # There are 5 unseen probability for the 2D probability estimation, and the values are as following:
            # based on the file: /data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/probabilityWeWantFinally20130418
            
            # ROW5_0 8.031888903939055e-07
            # ROW4_0 3.2578867212223025e-07
            # ROW3_0 1.1538218688794478e-07
            # ROW2_0 1.6040397629079132e-08
            # ROW1_0 2.7962236405996843e-10
            our2DProbabilityModelRatio = float(lineElements[3]) / currentProcessingQueryTermFreqInCollection
            
            # Updated by Wei 2013/08/22 morning
            # The following statements are just WRONG for 1D estimation!!
            # print "cellNum:",cellNum
            # BUG Fixed
            # if cellNum == "5":
            #    our1DProbabilityModelRatio = 1.20772275331e-05
            #elif cellNum == "4":
            #    our1DProbabilityModelRatio = 9.66505251504e-06
            #elif cellNum == "3":
            #    our1DProbabilityModelRatio = 6.56625976833e-06
            #elif cellNum == "2":
            #    our1DProbabilityModelRatio = 4.13057762416e-06
            #elif cellNum == "1":
            #    our1DProbabilityModelRatio = 1.32367943426e-06
            #else:
            #    print "ERROR,mark4"
            #    exit(1)
    
            # I think the correct assignment for 1D estimation
            # There is ONLY 1 unseen probability for the 1D probability estimation, and the value is 1.38700255851e-09
            # based on the file: /data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/probabilityWeWantFinally20130418
            # SUM_0 1.3870025585078873e-09
            our1DProbabilityModelRatio = 1.38700255851e-09 / currentProcessingQueryTermFreqInCollection
    
            if currentProcessingQueryTerm in queryTermWithTheirRealFreqIn95KQueriesDict:
                if queryTermWithTheirRealFreqIn95KQueriesDict[currentProcessingQueryTerm] >= 20:
                    goodTuringProbabilityForWhichTermsHaveFreqGreaterOrEqualThan20 = queryTermWithTheirRealFreqIn95KQueriesDict[currentProcessingQueryTerm] / NUM_QUERY_TERM_POSITIONS_FOR_95K_QUERIES
                    # The following statement should have some problems.
                    goodTuringProbabilityModelRatio = goodTuringProbabilityForWhichTermsHaveFreqGreaterOrEqualThan20 / currentProcessingQueryTermFreqInCollection
                else:
                    goodTuringProbabilityModelRatio = goodTuringFreqOfFreqProbabilityDict[ queryTermWithTheirRealFreqIn95KQueriesDict[currentProcessingQueryTerm] ] / currentProcessingQueryTermFreqInCollection
            else:
                # for the good turning method, the query term is actually NOT seen even in the head 95K queries.
                # at this point, all I need to do is to assign the good turning probability(freq = 0) for this term
                goodTuringProbabilityModelRatio = goodTuringFreqOfFreqProbabilityDict[0] / currentProcessingQueryTermFreqInCollection
            
            print "(Before)DEBUG:",currentProcessingQueryTerm,currentProcessingQueryTermFreqInCollection,ricardoRatio,our2DProbabilityModelRatio,our1DProbabilityModelRatio,goodTuringProbabilityModelRatio 
        '''
        
        
        for pieceNumIndex in termPiecesInfoDict[currentProcessingQueryTerm]:   
            pieceIdentifier = currentProcessingQueryTerm + "_" + str(pieceNumIndex)
            exp_results_of_current_piece_for_one_posting = classLabelWithPiecesMetaInfoDict[currentProcessingQueryTermClassLabel][pieceNumIndex]
            real_size_of_current_piece = termPiecesInfoDict[currentProcessingQueryTerm][pieceNumIndex]
            
            if currentProcessingQueryTerm in queryTermWithTheirRealFreqIn95KQueriesDict:
                ricardoRatioPiece = queryTermWithTheirRealFreqIn95KQueriesDict[currentProcessingQueryTerm] * exp_results_of_current_piece_for_one_posting / real_size_of_current_piece
            else:
                # Updated by Wei 2013/08/22 morning
                # for those terms which are NOT in the head 95K queries, ricardo method just try to pick up the terms at random.
                # That is why I have a -99999999.0 for the ricardoRatio here
                # Remember to pick up the term from the largest value to the smallest value
                ricardoRatioPiece = float(-99999999.0)
            
            our2DProbabilityModelRatioPiece = float(lineElements[3]) * exp_results_of_current_piece_for_one_posting / real_size_of_current_piece
            
            our1DProbabilityModelRatioPiece = 1.38700255851e-09 * exp_results_of_current_piece_for_one_posting / real_size_of_current_piece
            
            if currentProcessingQueryTerm in queryTermWithTheirRealFreqIn95KQueriesDict:
                if queryTermWithTheirRealFreqIn95KQueriesDict[currentProcessingQueryTerm] >= 20:
                    goodTuringProbabilityForWhichTermsHaveFreqGreaterOrEqualThan20 = queryTermWithTheirRealFreqIn95KQueriesDict[currentProcessingQueryTerm] / NUM_QUERY_TERM_POSITIONS_FOR_95K_QUERIES
                    # The following statement should have some problems.
                    goodTuringProbabilityModelRatioPiece = goodTuringProbabilityForWhichTermsHaveFreqGreaterOrEqualThan20 * exp_results_of_current_piece_for_one_posting / real_size_of_current_piece
                else:
                    goodTuringProbabilityModelRatioPiece = goodTuringFreqOfFreqProbabilityDict[ queryTermWithTheirRealFreqIn95KQueriesDict[currentProcessingQueryTerm] ] * exp_results_of_current_piece_for_one_posting / real_size_of_current_piece
            else:
                # for the good turning method, the query term is actually NOT seen even in the head 95K queries.
                # at this point, all I need to do is to assign the good turning probability(freq = 0) for this term
                goodTuringProbabilityModelRatioPiece = goodTuringFreqOfFreqProbabilityDict[0] * exp_results_of_current_piece_for_one_posting / real_size_of_current_piece            
            
            '''
            # for debug purpose
            if currentProcessingQueryTerm == "0000":                        
                print "(Middle)DEBUG:",pieceIdentifier,real_size_of_current_piece,ricardoRatioPiece,our2DProbabilityModelRatioPiece,our1DProbabilityModelRatioPiece,goodTuringProbabilityModelRatioPiece
                debugIntSumVariable += real_size_of_current_piece
                debugSumVariable1 += real_size_of_current_piece * ricardoRatioPiece
                debugSumVariable2 += real_size_of_current_piece * our2DProbabilityModelRatioPiece
                debugSumVariable3 += real_size_of_current_piece * our1DProbabilityModelRatioPiece
                debugSumVariable4 += real_size_of_current_piece * goodTuringProbabilityModelRatioPiece
            '''
            
            currentProcessingPieceTuple = (pieceIdentifier, real_size_of_current_piece, ricardoRatioPiece,our2DProbabilityModelRatioPiece,our1DProbabilityModelRatioPiece,goodTuringProbabilityModelRatioPiece)
            termPiecesInfoList.append(currentProcessingPieceTuple)
        
        '''
        # for debug purpose
        if currentProcessingQueryTerm == "0000":
            print "(After)DEBUG:",currentProcessingQueryTerm,debugIntSumVariable,debugSumVariable1/debugIntSumVariable,debugSumVariable2/debugIntSumVariable,debugSumVariable3/debugIntSumVariable,debugSumVariable4/debugIntSumVariable
        '''
            
        '''
        # for debug purpose
        if unseenTermsCounter == 1:
            exitFlagForDebug = True
        '''
        
    inputFileHandler.close()
    print "len(termPiecesInfoList):",len(termPiecesInfoList)


total_num_of_pieces_left_unprocessed = TOTAL_NUM_OF_PIECES_IN_LEXICON - len(termPiecesInfoList)
total_num_of_terms_left_unprocessed  = TOTAL_NUM_OF_TERMS_IN_LEXICON  - total_num_of_terms_processed
print "(DEBUG)unseenTermsCounter:",unseenTermsCounter
print "len(termPiecesInfoList) / # of pieces processed(FINAL):",len(termPiecesInfoList)
print "total_num_of_pieces_left_unprocessed:",total_num_of_pieces_left_unprocessed
print "total_num_of_terms_processed:",total_num_of_terms_processed
print "total_num_of_terms_left_unprocessed:",total_num_of_terms_left_unprocessed
print "termPiecesInfoList[-1]:",termPiecesInfoList[-1]
print "termPiecesInfoList[-2]:",termPiecesInfoList[-2]
print "termPiecesInfoList[-3]:",termPiecesInfoList[-3]
print "termPiecesInfoList[-4]:",termPiecesInfoList[-4]
endingTime = (time.clock() - startingTime)
print "endingTime:",endingTime,"seconds."
print

# del some variables including:
del classLabelWithPiecesMetaInfoDict
del termClassLabelDict
del termPiecesInfoDict
del allQueryTermsWithTheirTermFreqInCollection
classLabelWithPiecesMetaInfoDict = None
termClassLabelDict = None
termPiecesInfoDict = None
allQueryTermsWithTheirTermFreqInCollection = None
gc.collect()

# The below can be treated as an whole new program. If it doesn't work this time, I would rather output the middle results into the disk and then load them back :)
time.sleep(5)

print "Part3: Assume all the related terms have been loaded into the main memory in list, let's sort them and evaluate them"
currentTotalNumOfPosting = 0
for tuple in termPiecesInfoList:
    # unpack the tuple
    (pieceIdentifier, real_size_of_current_piece, _,_,_,_) = tuple
    '''
    # for DEBUG
    if pieceIdentifier == "suckle_0":
        print pieceIdentifier,"is here."
        exit(1)
    else:
        pass
    '''
    currentTotalNumOfPosting += real_size_of_current_piece
TOTAL_NUM_OF_ACTUAL_POSTINGS_IN_THE_INVERTED_INDEX = currentTotalNumOfPosting
print "TOTAL_NUM_OF_ACTUAL_POSTINGS_IN_THE_INVERTED_INDEX:",TOTAL_NUM_OF_ACTUAL_POSTINGS_IN_THE_INVERTED_INDEX
# Updated by Wei 2013/08/22, why do you know what I am thinking about? :) My previous me 3 months ago
# (1)# of terms: 856260 (2)# of postings: 6323944039 (3)percentage of the total postings: 98%

for turnNum in range(0,4):
    print
    print "turnNum:",turnNum
    # Now, let's operate on this termPiecesInfoList
    # current example of the tuple format
    # (currentProcessingQueryTerm, queryTermFreqInCollection, ricardoRatio, our2DProbabilityModelRatio, our1DProbabilityModelRatio, goodTuringProbabilityModelRatio)
    # example: 
    # (0) currentProcessingQueryTerm
    # (1) queryTermFreqInCollection
    # (2) ricardoRatio
    # (3) our2DProbabilityModelRatio
    # (4) our1DProbabilityModelRatio
    # (5) goodTuringProbabilityModelRatio
    sortedKeyNum = turnNum + 2
    if sortedKeyNum == 2:
        print "sort the list by ricardoRatio, greedily select from the largest to smallest"
    elif sortedKeyNum == 3:
        print "sort the list by our2DProbabilityModelRatio, greedily select from largest to smallest"
    elif sortedKeyNum == 4:
        print "sort the list by our1DProbabilityModelRatio, greedily select from largest to smallest"
    elif sortedKeyNum == 5:
        print "sort the list by goodTuringProbabilityModelRatio, greedily select from largest to smallest"      
    else:
        print "Unsupported Operations."
        exit(1)
    
    print "startingTime:",startingTime
    startingTime = time.clock()
    print "Sort the list begins..."
    termPiecesInfoList.sort(cmp=None, key=itemgetter(sortedKeyNum), reverse=True)
    print "Sort the list ends."
    endingTime = (time.clock() - startingTime)
    print "endingTime:",endingTime
    
    print "First 20 term pieces which will be injected."
    for i in range(0,20):
        print "termPiecesInfoList[",i,"]:",termPiecesInfoList[i]
    
    # init some variables
    eachPercentageInfoLinesList = []
    listIndex = 0
    previousTotalNumOfPostingCounter = 0
    currentTotalNumOfPostingCounter = 0
    current_INVERTED_INDEX_PiecesAndRealSizeOfCurrentPieceTuple_list = []
    leftDistance = 0
    rightDistance = 0
    
    # The purpose of looping through the variable termPiecesInfoList is that:
    # I want to classify the pieceIdentifiers based on different percentage rate
    for tupleIndex,tuple in enumerate( termPiecesInfoList ):
        # greedily select the terms from the sorted list
        
        # current version
        # not deal with variables: currentProcessingQueryTerm and currentProcessingQueryTermFreqInCollection
        (pieceIdentifier, real_size_of_current_piece,_,_,_,_) = tuple
        
        previousTotalNumOfPostingCounter = currentTotalNumOfPostingCounter
        currentTotalNumOfPostingCounter += real_size_of_current_piece
        current_INVERTED_INDEX_PiecesAndRealSizeOfCurrentPieceTuple_list.append( (pieceIdentifier,real_size_of_current_piece) )
        
        if currentTotalNumOfPostingCounter >= LIST_FOR_STORING_THRESHOLD_NUMBERS[listIndex]:
            # good, has the left distance and right distance to automatically adjust :)
            # LIKE on 2013/08/23 morning by Wei at school
            
            # If this line of percentage info is NOT the final line of percentage info, then we can play the 
            # leftDistance and rightDistance game
            if listIndex != len(LIST_FOR_STORING_THRESHOLD_NUMBERS) - 1:
                leftDistance = LIST_FOR_STORING_THRESHOLD_NUMBERS[listIndex] - previousTotalNumOfPostingCounter 
                rightDistance = currentTotalNumOfPostingCounter - LIST_FOR_STORING_THRESHOLD_NUMBERS[listIndex]
                if rightDistance >= leftDistance:
                    # for debug ONLY
                    '''
                    print "listIndex:",listIndex
                    print "tupleIndex:",tupleIndex
                    print "currentTotalNumOfPostingCounter(Used):",currentTotalNumOfPostingCounter
                    print "previousTotalNumOfPostingCounter:",previousTotalNumOfPostingCounter
                    print "len(current_INVERTED_INDEX_PiecesAndRealSizeOfCurrentPieceTuple_list):",len(current_INVERTED_INDEX_PiecesAndRealSizeOfCurrentPieceTuple_list)
                    # print "current_INVERTED_INDEX_PiecesAndRealSizeOfCurrentPieceTuple_list:",current_INVERTED_INDEX_PiecesAndRealSizeOfCurrentPieceTuple_list
                    print
                    '''
                    pieceDict = {}
                    
                    for tuple in current_INVERTED_INDEX_PiecesAndRealSizeOfCurrentPieceTuple_list:
                        (pieceIdentifier,real_size_of_current_piece) = tuple
                        if pieceIdentifier not in pieceDict:
                            pieceDict[pieceIdentifier] = real_size_of_current_piece
                        else:
                            print "Error,Mark1"
                            exit(1)
                    
                    eachPercentageInfoLinesList.append( (listIndex,currentTotalNumOfPostingCounter,len(pieceDict),pieceDict) )
                    
                else:
                    # for debug ONLY
                    '''
                    print "listIndex:",listIndex
                    print "tupleIndex:",tupleIndex
                    print "currentTotalNumOfPostingCounter:",currentTotalNumOfPostingCounter
                    print "previousTotalNumOfPostingCounter(Used):",previousTotalNumOfPostingCounter
                    print "len(current_INVERTED_INDEX_PiecesAndRealSizeOfCurrentPieceTuple_list)",len(current_INVERTED_INDEX_PiecesAndRealSizeOfCurrentPieceTuple_list)-1
                    # print "current_INVERTED_INDEX_PiecesAndRealSizeOfCurrentPieceTuple_list - lastElement:",current_INVERTED_INDEX_PiecesAndRealSizeOfCurrentPieceTuple_list[:-1]
                    print
                    '''
                    pieceDict = {}
                    
                    for tuple in current_INVERTED_INDEX_PiecesAndRealSizeOfCurrentPieceTuple_list[:-1]:
                        (pieceIdentifier,real_size_of_current_piece) = tuple
                        if pieceIdentifier not in pieceDict:
                            pieceDict[pieceIdentifier] = real_size_of_current_piece
                        else:
                            print "Error,Mark1"
                            exit(1)
                    
                    eachPercentageInfoLinesList.append( (listIndex,previousTotalNumOfPostingCounter,len(pieceDict),pieceDict) )
                    
                listIndex += 1
            else:
                # for debug ONLY
                '''
                print "listIndex:",listIndex
                print "tupleIndex:",tupleIndex
                print "currentTotalNumOfPostingCounter(Used):",currentTotalNumOfPostingCounter
                print "previousTotalNumOfPostingCounter:",previousTotalNumOfPostingCounter
                print "len(current_INVERTED_INDEX_PiecesAndRealSizeOfCurrentPieceTuple_list):",len(current_INVERTED_INDEX_PiecesAndRealSizeOfCurrentPieceTuple_list)
                # print "current_INVERTED_INDEX_PiecesAndRealSizeOfCurrentPieceTuple_list:",current_INVERTED_INDEX_PiecesAndRealSizeOfCurrentPieceTuple_list
                print
                '''
                pieceDict = {}
                
                for tuple in current_INVERTED_INDEX_PiecesAndRealSizeOfCurrentPieceTuple_list:
                    (pieceIdentifier,real_size_of_current_piece) = tuple
                    if pieceIdentifier not in pieceDict:
                        pieceDict[pieceIdentifier] = real_size_of_current_piece
                    else:
                        print "Error,Mark1"
                        exit(1)
                eachPercentageInfoLinesList.append( (listIndex,currentTotalNumOfPostingCounter,len(pieceDict),pieceDict) )
    
    # Then there is NOT need to run the following logic cause ALL the postings and the related pieces have been counted and included.
    # If the following CONDITION is true, then there are some postings and pieces which are there but NOT included
    if listIndex < len(LIST_FOR_STORING_THRESHOLD_NUMBERS)-1:
        # for DEBUG ONLY
        # print "debug: listIndex",listIndex
        # print "debug: len(LIST_FOR_STORING_THRESHOLD_NUMBERS):",len(LIST_FOR_STORING_THRESHOLD_NUMBERS)
        
        # for debug ONLY
        '''
        print "listIndex:",listIndex
        print "tupleIndex:",tupleIndex
        print "currentTotalNumOfPostingCounter(Used):",currentTotalNumOfPostingCounter
        print "previousTotalNumOfPostingCounter:",previousTotalNumOfPostingCounter
        print "len(current_INVERTED_INDEX_PiecesAndRealSizeOfCurrentPieceTuple_list):",len(current_INVERTED_INDEX_PiecesAndRealSizeOfCurrentPieceTuple_list)
        # print "current_INVERTED_INDEX_PiecesAndRealSizeOfCurrentPieceTuple_list:",current_INVERTED_INDEX_PiecesAndRealSizeOfCurrentPieceTuple_list
        print
        '''
        pieceDict = {}
        
        for tuple in current_INVERTED_INDEX_PiecesAndRealSizeOfCurrentPieceTuple_list:
            (pieceIdentifier,real_size_of_current_piece) = tuple
            if pieceIdentifier not in pieceDict:
                pieceDict[pieceIdentifier] = real_size_of_current_piece
            else:
                print "Error,Mark1"
                exit(1)
        eachPercentageInfoLinesList.append( (listIndex,currentTotalNumOfPostingCounter,len(pieceDict),pieceDict) ) 
           
    # use the thing in eachPercentageInfoLinesList to answer queries.
    # print "Use the thing in eachPercentageInfoLinesList to answer queries"
    # print "len(eachPercentageInfoLinesList):",len(eachPercentageInfoLinesList)
    
    # Let's record those query terms in file first
    print "len(eachPercentageInfoLinesList):",len(eachPercentageInfoLinesList)
    print "--->","id","designed#OfPostingsInjected","percentageOfTOTAL#OfDesignedPostings","actual#OfPostingsInjected","percentageOf#TOTAL#OfActualPostings","#OfPiecesIn"
    for index,tuple in enumerate(eachPercentageInfoLinesList[0:12]):
        (listIndex,currentTotalNumOfPostingCounter,numOfPiecesInDict,pieceDict) = tuple
        
        # check
        # maybe too much cost here?
        tempCheckTotalNumPostings = 0
        for pieceIdentifier in pieceDict:
            tempCheckTotalNumPostings += pieceDict[pieceIdentifier]
        if tempCheckTotalNumPostings != currentTotalNumOfPostingCounter:
            print "tempCheckTotalNumPostings != currentTotalNumOfPostingCounter"
            print "tempCheckTotalNumPostings:",tempCheckTotalNumPostings
            print "currentTotalNumOfPostingCounter:",currentTotalNumOfPostingCounter
            exit(1)
        
        print listIndex,LIST_FOR_STORING_THRESHOLD_NUMBERS[listIndex], LIST_FOR_STORING_THRESHOLD_NUMBERS[listIndex]/TOTAL_NUM_OF_DESIGNED_POSTINGS_IN_THE_INVERTED_INDEX,currentTotalNumOfPostingCounter,currentTotalNumOfPostingCounter/TOTAL_NUM_OF_ACTUAL_POSTINGS_IN_THE_INVERTED_INDEX, numOfPiecesInDict
        
        #if listIndex == 0:
        #    tempList = list(pieceDict)
        #    print "    --->(DEBUG):",sorted(tempList)
        

    # The following logic is for iterate all the testing queries to see whether the pruned index can answer those queries.
    # init for the variable eachPercentageLineIndexWithNumOfResultsCanBeFullyAnsweredDict
    # key: each Percentage Line Index in int format
    # value: # of Queries Can Be Fully Answered in int format
    eachPercentageLineIndexWithNumOfResultsCanBeFullyAnsweredDict = {}
    for i in range(0,len(eachPercentageInfoLinesList)):
        eachPercentageLineIndexWithNumOfResultsCanBeFullyAnsweredDict[i] = 0
    
    # CURRENT version, updated by Wei 2013/08/23 night
    # The input file should be sorted by QID
    inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/tail5KResultsWithRelatedPieceNum_sortedByQID"
    inputFileHandler = open(inputFileName,"r")
    previousProcessingQIDInIntFormat = -1
    previousProcessingDocIDInIntFormat = -1
    previousProcessingTerm = ""
    currentProcessingQIDInIntFormat = -1
    currentProcessingDocIDInIntFormat = -1
    currentProcessingTerm = ""
    
    # key: index num in int format
    # value: True or False for this piece maintained
    resultMaintainFlagsDict = {} # assume the world is beautiful :)
    # init step
    for index in range(0,len(eachPercentageInfoLinesList)):
        resultMaintainFlagsDict[index] = True
    
    print "len(resultMaintainFlagsDict):",len(resultMaintainFlagsDict)
    
    numOfCurrentDocumentsProcessedInResultsSet = 0
    for line in inputFileHandler.readlines():
        previousProcessingQIDInIntFormat = currentProcessingQIDInIntFormat
        previousProcessingDocIDInIntFormat = currentProcessingDocIDInIntFormat
        previousProcessingTerm = currentProcessingTerm
        
        lineElements = line.strip().split(" ")
        currentProcessingQIDInIntFormat = int(lineElements[0])
        currentProcessingDocIDInIntFormat = int(lineElements[1])
        currentProcessingTerm = str(lineElements[2])
        pieceNumIndex = int(lineElements[4])
        
        # This check is NOT hold for ONE term query:) That is why this needs to be commented it out
        '''
        if previousProcessingTerm == currentProcessingTerm:
            print "previousProcessingTerm:",previousProcessingTerm
            print "currentProcessingTerm:",currentProcessingTerm
            exit(1)
        '''
        
        if currentProcessingQIDInIntFormat == previousProcessingQIDInIntFormat:
            pass
        else:
            pass
        
        if currentProcessingDocIDInIntFormat == previousProcessingDocIDInIntFormat:
            # we are dealing the same specific result
            pieceIdentifier = currentProcessingTerm + "_" + str(pieceNumIndex)
            for index,tuple in enumerate(eachPercentageInfoLinesList[0:12]):
                (_,_,_,pieceDict) = tuple
                if pieceIdentifier not in pieceDict:
                    resultMaintainFlagsDict[index] = False
                    '''
                    if index == 11:
                        print "--->debug:"
                        print "--->index:",index
                        print "--->currentProcessingQIDInIntFormat:",currentProcessingQIDInIntFormat
                        print "--->currentProcessingDocIDInIntFormat:",currentProcessingDocIDInIntFormat
                        print "--->currentProcessingTerm:",currentProcessingTerm
                        print "--->pieceIdentifier:",pieceIdentifier
                        exit(1)
                    '''
                else:
                    # maintain the related flag to be True
                    pass
                
                # OLD statements are just for references
                # listIndex,currentTotalNumOfPostingCounter,numOfPiecesInDict,pieceDict
                # print listIndex,LIST_FOR_STORING_THRESHOLD_NUMBERS[listIndex], LIST_FOR_STORING_THRESHOLD_NUMBERS[listIndex]/TOTAL_NUM_OF_DESIGNED_POSTINGS_IN_THE_INVERTED_INDEX,currentTotalNumOfPostingCounter,currentTotalNumOfPostingCounter/TOTAL_NUM_OF_DESIGNED_POSTINGS_IN_THE_INVERTED_INDEX, numOfPiecesInDict
        else:
            numOfCurrentDocumentsProcessedInResultsSet += 1
            # It is time to wrap the previous results up :)
            for index in range(0,len(eachPercentageInfoLinesList) ):
                if resultMaintainFlagsDict[index] == True:
                    eachPercentageLineIndexWithNumOfResultsCanBeFullyAnsweredDict[index] += 1
            
            # init the variable resultMaintainFlagsDict again
            for index in range(0,len(eachPercentageInfoLinesList) ):
                resultMaintainFlagsDict[index] = True
        
    # some overall statistics:
    # index value = 0, 0.01
    # index value = 1, 0.05
    # index value = 2, 0.1
    # index value = 3, 0.2
    # index value = 4, 0.3
    # index value = 5, 0.4
    # index value = 6, 0.5
    # index value = 7, 0.6
    # index value = 8, 0.7
    # index value = 9, 0.8
    # index value = 10, 0.9
    NUM_OF_DOCUMENTS_IN_RESULTS_SET = numOfCurrentDocumentsProcessedInResultsSet
    print "NUM_OF_DOCUMENTS_IN_RESULTS_SET:",NUM_OF_DOCUMENTS_IN_RESULTS_SET
    print "--->","id","#OfResultsMaintainedUnderANDSemantics","PercentageOfResultsMaintainedUnderANDSemantics"
    for i in range(0,len(eachPercentageLineIndexWithNumOfResultsCanBeFullyAnsweredDict) ):
        print i,eachPercentageLineIndexWithNumOfResultsCanBeFullyAnsweredDict[i],eachPercentageLineIndexWithNumOfResultsCanBeFullyAnsweredDict[i]/NUM_OF_DOCUMENTS_IN_RESULTS_SET
         
    inputFileHandler.close()
    

    

