from __future__ import division
from operator import itemgetter, attrgetter
import os
import sys
import math

# index value = 0, 0.01
# index value = 1, 0.05
# index value = 2, 0.1
# index value = 3, 0.2
# index value = 4, 0.3
# index value = 5, 0.4
# index value = 6, 0.5
# index value = 7, 0.6
# index value = 8, 0.7
# index value = 9, 0.8
# index value = 10, 0.9

TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX = 6451948010

# index = 0
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_1Percent_kept: 64519480
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_1Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.01 )
# index = 1
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_5Percent_kept: 322597400
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_5Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.05 )
# index = 2
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_10Percent_kept: 645194801
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_10Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.1 )
# index = 3
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_20Percent_kept: 1290389602
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_20Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.2 )
# index = 4
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_30Percent_kept: 1935584403
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_30Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.3 )
# index = 5
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_40Percent_kept: 2580779204
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_40Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.4 )
# index = 6
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_50Percent_kept: 3225974005
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_50Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.5 )
# index = 7
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_60Percent_kept: 3871168806
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_60Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.6 )
# index = 8
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_70Percent_kept 4516363607
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_70Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.7 )
# Note:
# For the query terms which have at least seen once. The total number of postings is: 5009420937
# index = 9
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_80Percent_kept: 5161558408
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_80Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.8 )
# index = 10
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_90Percent_kept: 5806753209
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_90Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.9 )

# my question has now become: what are the query terms hidden behind this table. Let's get down to term level.

# assumption: we only take care of the query terms first.
# I see and that's it. 
# The data structure is a list, inside the list, contains many tuples.
queryTermInfoList = []

# # of terms in the lexicon: 37728619
# # of terms in the table which has freq < 20: 37726113
# # of terms in the table which are shown in the query log and has freq >= 20: 2506
# # of terms in the table which appear at least once and has freq < 20: 32752
# # of terms in the table which are popular query terms with freq >= 20: 2506
# # of terms in the table which appear at least once: 35258 
# # of query terms in the 85K queries: 35627 = 35258(has been recorded in the table) + 369(out of the lexicon and we will NOT care)
# # of query terms which are out of the lexicon: 432 - 369 = 63 (Is it? DONE, almost, I compute the value of 58, missing 5 terms :) )

allQueryTermsWithTheirTermFreqInCollection = {}
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/100KQueryTermsWithTermFreqInCollection.txt"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    queryTermFreq = int(lineElements[1])
    if queryTerm not in allQueryTermsWithTheirTermFreqInCollection:
        allQueryTermsWithTheirTermFreqInCollection[queryTerm] = queryTermFreq
    else:
        print "error,Mark3"
        exit(1)
print "len(allQueryTermsWithTheirTermFreqInCollection):",len(allQueryTermsWithTheirTermFreqInCollection)
inputFileHandler.close()


queryTermWithTheirRealFreqIn85KQueriesDict = {}
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsInQueries_head_85K_0_85%"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    queryTermFreq = int(lineElements[1])
    if queryTerm not in queryTermWithTheirRealFreqIn85KQueriesDict:
        queryTermWithTheirRealFreqIn85KQueriesDict[queryTerm] = queryTermFreq
    else:
        print "error,Mark3"
        exit(1)
print "len(queryTermWithTheirRealFreqIn85KQueriesDict):",len(queryTermWithTheirRealFreqIn85KQueriesDict)
inputFileHandler.close()

# add another variable for storing the following info: queryTermWithTheirRealFreqIn95KQueriesDict (for ricardo ratio)
queryTermWithTheirRealFreqIn95KQueriesDict = {}
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsIn_100KQueries_0_1_95%"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    queryTermFreq = int(lineElements[1])
    if queryTerm not in queryTermWithTheirRealFreqIn95KQueriesDict:
        queryTermWithTheirRealFreqIn95KQueriesDict[queryTerm] = queryTermFreq
    else:
        print "error,Mark3"
        exit(1)
print "len(queryTermWithTheirRealFreqIn95KQueriesDict):",len(queryTermWithTheirRealFreqIn95KQueriesDict)
inputFileHandler.close()


cellProbabilityDict = {}
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/probabilityWeWantFinally20130418"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    cellKey = lineElements[0][3:]
    probability = float(lineElements[1])
    if cellKey not in cellProbabilityDict:
        cellProbabilityDict[cellKey] = probability
print "len(cellProbabilityDict):",len(cellProbabilityDict)
print "cellProbabilityDict['1_0']:",cellProbabilityDict['1_0']
inputFileHandler.close()


# Let's play with the query terms.
smallCellKeyWithItsCorrespondingQueryTermListDict = {}
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/freqOfFreqInQueries_head_85K_0_85%_2D_with_query_terms"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    smallCellKey = lineElements[0]
    numOfQueryTerms = int( lineElements[1] )
    if smallCellKey not in smallCellKeyWithItsCorrespondingQueryTermListDict:
        smallCellKeyWithItsCorrespondingQueryTermListDict[smallCellKey] = []
        for i in range(2,2+numOfQueryTerms):
            currentProcessingQueryTerm = lineElements[i]
            smallCellKeyWithItsCorrespondingQueryTermListDict[smallCellKey].append( currentProcessingQueryTerm )
    else:
        print "error,Mark1"
        exit(1)

print "len(smallCellKeyWithItsCorrespondingQueryTermListDict):",len(smallCellKeyWithItsCorrespondingQueryTermListDict)
print "smallCellKeyWithItsCorrespondingQueryTermListDict['3_195']:",smallCellKeyWithItsCorrespondingQueryTermListDict['3_195']
print "smallCellKeyWithItsCorrespondingQueryTermListDict['3_191']:",smallCellKeyWithItsCorrespondingQueryTermListDict['3_191']   
inputFileHandler.close()

# Purpose: Load our 2D probability model
# Logic begins...
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/probabilityTableUsingProfIdea20130420_fixed"
inputFileHandler = open(inputFileName,"r")
dataLine = inputFileHandler.readline()
while not dataLine.strip().startswith("table:denominator:freqOfFreqForTheLexiconTerm"):
    dataLine = inputFileHandler.readline()
# for debug ONLY
# print dataLine.strip()

inputFileHandler.readline()
inputFileHandler.readline()
inputFileHandler.readline()
inputFileHandler.readline()

row1DataLine = inputFileHandler.readline()
row2DataLine = inputFileHandler.readline()
row3DataLine = inputFileHandler.readline()
row4DataLine = inputFileHandler.readline()
row5DataLine = inputFileHandler.readline()
sumDataLine = inputFileHandler.readline()

dataLines = []
dataLines.append(row1DataLine)
dataLines.append(row2DataLine)
dataLines.append(row3DataLine)
dataLines.append(row4DataLine)
dataLines.append(row5DataLine)
inputFileHandler.close()
# Logic ends.


# Check the correctness of row1
for indexOutside,dataLine in enumerate(dataLines):
    for indexInside,cellString in enumerate( dataLine.strip().split(" ")[2:] ):
        currentFreq = int( cellString.split(":")[1] )
        rangeString = cellString.split(":")[0]
        # for debug ONLY
        # print indexOutside+1,indexInside+1,rangeString,currentFreq
        cellKey = str(indexOutside+1) + "_" + str(indexInside+1)
        smallRangeLowerBoundID = int( rangeString.split("[")[1].split("]")[0].split(",")[0] )
        smallRangeUpperBoundID = int( rangeString.split("[")[1].split("]")[0].split(",")[1] )
        # Let's operate on the query terms themselves
        tempQueryTermList = []
        for i in range(smallRangeLowerBoundID,smallRangeUpperBoundID+1):
            smallCellKey = str(indexInside+1) + "_" + str(i)
            # before adding the whole thing, we can just have the query term with their designed tuples
            for currentProcessingQueryTerm in smallCellKeyWithItsCorrespondingQueryTermListDict[smallCellKey]:
                # This is where you can put the query term and the tuple in.
                if currentProcessingQueryTerm in queryTermWithTheirRealFreqIn95KQueriesDict and currentProcessingQueryTerm in allQueryTermsWithTheirTermFreqInCollection:
                    currentProcessingQueryTermFreqInCollection = allQueryTermsWithTheirTermFreqInCollection[currentProcessingQueryTerm]
                    currentProcessingQueryTermPredictedProbability = cellProbabilityDict[cellKey]
                    # comparedAlg1 from [SIGIR 2007 Ricardo A.Baeza-Yates et al]
                    # 2 parameters:
                    # (1) frequency f for each query term from query log
                    # (2) size s of the associate inverted list
                    # (3) define the ratio s/f, and greedily select the posting list with minimum ratio
                    ricardoRatio = currentProcessingQueryTermFreqInCollection / queryTermWithTheirRealFreqIn95KQueriesDict[currentProcessingQueryTerm]
                    # newly added, 2013/05/02
                    ourProbabilityModelRatio = currentProcessingQueryTermPredictedProbability / currentProcessingQueryTermFreqInCollection
                    currentProcessingQueryTermTuple = (currentProcessingQueryTerm,ricardoRatio,ourProbabilityModelRatio)
                    queryTermInfoList.append(currentProcessingQueryTermTuple)
                else:
                    print "error,Mark4"
                    exit(1)
            
            tempQueryTermList += smallCellKeyWithItsCorrespondingQueryTermListDict[smallCellKey]
      
        if len(tempQueryTermList) != currentFreq:
            print "error,Mark2"
            exit(1)
        else:
            pass

    # for debug ONLY
    # print "len(queryTermInfoList):",len(queryTermInfoList)
    


print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_1Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_1Percent_kept 
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_5Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_5Percent_kept 
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_10Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_10Percent_kept 
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_20Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_20Percent_kept 
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_30Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_30Percent_kept 
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_40Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_40Percent_kept 
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_50Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_50Percent_kept 
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_60Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_60Percent_kept 
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_70Percent_kept",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_70Percent_kept 
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_80Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_80Percent_kept 
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_90Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_90Percent_kept


LIST_FOR_STORING_THRESHOLD_NUMBERS = []
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_1Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_5Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_10Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_20Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_30Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_40Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_50Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_60Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_70Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_80Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_90Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX)

print "len(LIST_FOR_STORING_THRESHOLD_NUMBERS):",len(LIST_FOR_STORING_THRESHOLD_NUMBERS)



# Let's add the popular words into the list as well.
popularTermsCounter = 0
NUM_QUERY_TERM_POSITIONS = 351734
for queryTerm in queryTermWithTheirRealFreqIn85KQueriesDict:
    if queryTermWithTheirRealFreqIn85KQueriesDict[queryTerm] >= 20:
        popularTermsCounter += 1
        
        if queryTerm in queryTermWithTheirRealFreqIn95KQueriesDict:
            ricardoRatio = allQueryTermsWithTheirTermFreqInCollection[queryTerm] / queryTermWithTheirRealFreqIn95KQueriesDict[queryTerm]
        else:
            print "Error,Mark2"
            exit(1)
        
        # newly added, 2013/05/02
        ourProbabilityModelRatio = queryTermWithTheirRealFreqIn85KQueriesDict[queryTerm] / NUM_QUERY_TERM_POSITIONS / allQueryTermsWithTheirTermFreqInCollection[queryTerm]
        currentProcessingPopularTermTuple = (queryTerm,queryTermWithTheirRealFreqIn85KQueriesDict[queryTerm],allQueryTermsWithTheirTermFreqInCollection[queryTerm],queryTermWithTheirRealFreqIn85KQueriesDict[queryTerm] / NUM_QUERY_TERM_POSITIONS, ricardoRatio,ourProbabilityModelRatio)
        queryTermInfoList.append(currentProcessingPopularTermTuple)

print "popularTermsCounter:",popularTermsCounter
print "len(queryTermInfoList):",len(queryTermInfoList)

# The input file format:
# lineElements[0]: queryTerm
# lineElements[1]: queryTermRealFreqIn85KQueries
# lineElements[2]: queryTermFreqInCollection
# lineElements[3]: queryTermPredictedProbability
# lineElements[4]: ricardoRatio

# Let's add the terms in the cell with the key ROW5_0 (about 13296 terms) into the queryTermInfoList as well.
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/CellROW5_0QueryTermsWithMetaInfo"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    # index value = 0: str(queryTerm)
    # index value = 1: int(queryTermRealFreqIn85KQueries)
    # index value = 2: int(queryTermFreqInCollection)
    # index value = 3: float(queryTermPredictedProbability)
    # index value = 4: float(ricardoRatio)
    # index value = 5: float(ourProbabilityModelRatio)
    # Updated on 2013/05/02
    ourProbabilityModelRatio = float(lineElements[3]) / int(lineElements[2])
    currentProcessingPopularTermTuple = ( str(lineElements[0]), int(lineElements[1]), int(lineElements[2]), float(lineElements[3]), float(lineElements[4]), ourProbabilityModelRatio )
    queryTermInfoList.append(currentProcessingPopularTermTuple)
inputFileHandler.close()
print "len(queryTermInfoList):",len(queryTermInfoList)

# Let's add the terms in the cell with the key ROW4_0 (about 32085 terms) into the queryTermInfoList as well.
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/CellROW4_0QueryTermsWithMetaInfo"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    # index value = 0: str(queryTerm)
    # index value = 1: int(queryTermRealFreqIn85KQueries)
    # index value = 2: int(queryTermFreqInCollection)
    # index value = 3: float(queryTermPredictedProbability)
    # index value = 4: float(ricardoRatio)
    # index value = 5: float(ourProbabilityModelRatio)
    # Updated on 2013/05/02
    ourProbabilityModelRatio = float(lineElements[3]) / int(lineElements[2])
    currentProcessingPopularTermTuple = ( str(lineElements[0]), int(lineElements[1]), int(lineElements[2]), float(lineElements[3]), float(lineElements[4]), ourProbabilityModelRatio )
    queryTermInfoList.append(currentProcessingPopularTermTuple)
inputFileHandler.close()
print "len(queryTermInfoList):",len(queryTermInfoList)

# Let's add the terms in the cell with the key ROW3_0 (about 87252 terms) into the queryTermInfoList as well.
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/CellROW3_0QueryTermsWithMetaInfo"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    # index value = 0: str(queryTerm)
    # index value = 1: int(queryTermRealFreqIn85KQueries)
    # index value = 2: int(queryTermFreqInCollection)
    # index value = 3: float(queryTermPredictedProbability)
    # index value = 4: float(ricardoRatio)
    # index value = 5: float(ourProbabilityModelRatio)
    # Updated on 2013/05/02
    ourProbabilityModelRatio = float(lineElements[3]) / int(lineElements[2])
    currentProcessingPopularTermTuple = ( str(lineElements[0]), int(lineElements[1]), int(lineElements[2]), float(lineElements[3]), float(lineElements[4]), ourProbabilityModelRatio )
    queryTermInfoList.append(currentProcessingPopularTermTuple)
inputFileHandler.close()
print "len(queryTermInfoList):",len(queryTermInfoList)

# Let's add the terms in the cell with the key ROW2_0 (about 688369 terms) into the queryTermInfoList as well.
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/CellROW2_0QueryTermsWithMetaInfo"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    # index value = 0: str(queryTerm)
    # index value = 1: int(queryTermRealFreqIn85KQueries)
    # index value = 2: int(queryTermFreqInCollection)
    # index value = 3: float(queryTermPredictedProbability)
    # index value = 4: float(ricardoRatio)
    # index value = 5: float(ourProbabilityModelRatio)
    # Updated on 2013/05/02
    ourProbabilityModelRatio = float(lineElements[3]) / int(lineElements[2])
    currentProcessingPopularTermTuple = ( str(lineElements[0]), int(lineElements[1]), int(lineElements[2]), float(lineElements[3]), float(lineElements[4]), ourProbabilityModelRatio )
    queryTermInfoList.append(currentProcessingPopularTermTuple)
inputFileHandler.close()
print "len(queryTermInfoList):",len(queryTermInfoList)
exit(1)

'''
# Let's add the terms in the cell with the key ROW1_0 (about 36872359 terms) into the queryTermInfoList as well.
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/CellROW1_0QueryTermsWithMetaInfo"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    # index value = 0: str(queryTerm)
    # index value = 1: int(queryTermRealFreqIn85KQueries)
    # index value = 2: int(queryTermFreqInCollection)
    # index value = 3: float(queryTermPredictedProbability)
    # index value = 4: float(ricardoRatio)
    # index value = 5: float(ourProbabilityModelRatio)
    # Updated on 2013/05/02
    ourProbabilityModelRatio = float(lineElements[3]) / int(lineElements[2])
    currentProcessingPopularTermTuple = ( str(lineElements[0]), int(lineElements[1]), int(lineElements[2]), float(lineElements[3]), float(lineElements[4]), ourProbabilityModelRatio )
    queryTermInfoList.append(currentProcessingPopularTermTuple)
inputFileHandler.close()
print "len(queryTermInfoList):",len(queryTermInfoList)
'''



totalNumOfPostingInQueryTermInfoList = 0
for tuple in queryTermInfoList:
    # print tuple
    (currentProcessingQueryTerm, currentProcessingQueryTermRealFreqInTrainingQL, currentProcessingQueryTermFreqInCollection, currentProcessingQueryTermPredictedProbability,ricardoRatio,ourProbabilityModelRatio) = tuple
    totalNumOfPostingInQueryTermInfoList += currentProcessingQueryTermFreqInCollection
print "totalNumOfPostingInQueryTermInfoList:",totalNumOfPostingInQueryTermInfoList


outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/term_centric_pruning_simulation/queryTermsRecorded"
outputFileHandler = open(outputFileName,"w")

for turnNum in range(0,2):
    print
    print "turnNum:",turnNum
    # Now, let's operate on this queryTermInfoList
    # example: 
    # (0) currentProcessingQueryTerm
    # (1) currentProcessingQueryTermRealFreqInTrainingQL
    # (2) currentProcessingQueryTermFreqInCollection
    # (3) currentProcessingQueryTermPredictedProbability
    # (4) ricardoRatio (s/f)
    # (5) ourProbabilityModelRatio (ourPredictedProbability / s(the length of the lists) )
    # 2013/05/03 newly added
    # (6) goodTuringProbabilityModelRatio (goodTuringPredictedProbability / s(the length of the lists) )
    # queryTermInfoList[0]: ('000sites', 1, 2, 5.10096995281e-07,2)
    sortedKeyNum = turnNum + 4
    if sortedKeyNum == 4:
        print "sort the list by s/f, from smallest to largest"
        queryTermInfoList.sort(cmp=None, key=itemgetter(sortedKeyNum), reverse=False)
    elif sortedKeyNum == 5:
        print "sort the list by ourProbabilityModelRatio( probability/s ), from largest to smallest"
        # Temp try, sort and pick from smallest to largest
        # The results are terrible
        # 0 0
        # 1 1
        # 2 2
        # 3 5
        # 4 28
        # 5 81
        # 6 171
        # 7 392
        # 8 1131
        # 9 4502
        # 10 35893
        # 11 97164
        queryTermInfoList.sort(cmp=None, key=itemgetter(sortedKeyNum), reverse=True)
    elif sortedKeyNum == 6:
        '''
        # This is NOT a valid method tested. Let's change it to good-turing method for testing
        secondSortedKeyNum = turnNum + 2
        print "first level sorting. Sort the list by probability, from largest to smallest"
        print "Among those terms which has the same predicted probability. Do a second level sorting."
        print "And then sort the list by their list length, this time, from smallest to largest."
        # step1: sort on second key: the length of the list, from smallest to largest.
        queryTermInfoList.sort(cmp=None, key=itemgetter(secondSortedKeyNum) )
        # step2: sort on the first main key: the probability of these query terms, from largest to smallest.
        queryTermInfoList.sort(cmp=None, key=itemgetter(sortedKeyNum), reverse=True)
        '''
        print "sort the list by good-turingProbabilityModelRatio( probability/s ), from largest to smallest"
        # In this setting sortedKeyNum equals to 6
        queryTermInfoList.sort(cmp=None, key=itemgetter(sortedKeyNum), reverse=True)   
    else:
        print "Unsupported Operations."
        exit(1)
    
    print "first 20 query terms which will be injected."
    for i in range(0,20):
        print "queryTermInfoList[",i,"]:",queryTermInfoList[i]
    
    # init some variables
    queryTermDictsForEachPercentageStoringInList = []
    listIndex = 0
    previousTotalNumOfPostingCounter = 0
    currentTotalNumOfPostingCounter = 0
    current_INVERTED_INDEX_queryTermsAndFreqInCollectionTuple_list = []
    leftDistance = 0
    rightDistance = 0
    
    for tupleIndex,tuple in enumerate( queryTermInfoList ):
        (currentProcessingQueryTerm, currentProcessingQueryTermRealFreqInTrainingQL, currentProcessingQueryTermFreqInCollection, currentProcessingQueryTermPredictedProbability,ricardoRatio,ourProbabilityModelRatio) = tuple
        previousTotalNumOfPostingCounter = currentTotalNumOfPostingCounter
        currentTotalNumOfPostingCounter += currentProcessingQueryTermFreqInCollection
        current_INVERTED_INDEX_queryTermsAndFreqInCollectionTuple_list.append( (currentProcessingQueryTerm,currentProcessingQueryTermFreqInCollection) )
        
        if currentTotalNumOfPostingCounter >= LIST_FOR_STORING_THRESHOLD_NUMBERS[listIndex]:
            leftDistance = LIST_FOR_STORING_THRESHOLD_NUMBERS[listIndex] - previousTotalNumOfPostingCounter 
            rightDistance = currentTotalNumOfPostingCounter - LIST_FOR_STORING_THRESHOLD_NUMBERS[listIndex]
            if rightDistance >= leftDistance:
                # for debug ONLY
                '''
                print "listIndex:",listIndex
                print "tupleIndex:",tupleIndex
                print "currentTotalNumOfPostingCounter(Used):",currentTotalNumOfPostingCounter
                print "previousTotalNumOfPostingCounter:",previousTotalNumOfPostingCounter
                print "len(current_INVERTED_INDEX_queryTermsAndFreqInCollectionTuple_list):",len(current_INVERTED_INDEX_queryTermsAndFreqInCollectionTuple_list)
                # print "current_INVERTED_INDEX_queryTermsAndFreqInCollectionTuple_list:",current_INVERTED_INDEX_queryTermsAndFreqInCollectionTuple_list
                print
                '''
                tempDict = {}
                
                for tuple in current_INVERTED_INDEX_queryTermsAndFreqInCollectionTuple_list:
                    (queryTerm,queryTermFreqInCollection) = tuple
                    if queryTerm not in tempDict:
                        tempDict[queryTerm] = queryTermFreqInCollection
                    else:
                        print "Error,Mark1"
                        exit(1)
                
                queryTermDictsForEachPercentageStoringInList.append( (listIndex,currentTotalNumOfPostingCounter,len(tempDict),tempDict) )
                
            else:
                # for debug ONLY
                '''
                print "listIndex:",listIndex
                print "tupleIndex:",tupleIndex
                print "currentTotalNumOfPostingCounter:",currentTotalNumOfPostingCounter
                print "previousTotalNumOfPostingCounter(Used):",previousTotalNumOfPostingCounter
                print "len(current_INVERTED_INDEX_queryTermsAndFreqInCollectionTuple_list)",len(current_INVERTED_INDEX_queryTermsAndFreqInCollectionTuple_list)-1
                # print "current_INVERTED_INDEX_queryTermsAndFreqInCollectionTuple_list - lastElement:",current_INVERTED_INDEX_queryTermsAndFreqInCollectionTuple_list[:-1]
                print
                '''
                tempDict = {}
                
                for tuple in current_INVERTED_INDEX_queryTermsAndFreqInCollectionTuple_list[:-1]:
                    (queryTerm,queryTermFreqInCollection) = tuple
                    if queryTerm not in tempDict:
                        tempDict[queryTerm] = queryTermFreqInCollection
                    else:
                        print "Error,Mark1"
                        exit(1)
                
                queryTermDictsForEachPercentageStoringInList.append( (listIndex,previousTotalNumOfPostingCounter,len(tempDict),tempDict) )
                
            listIndex += 1
    # for debug ONLY
    '''
    print "NOT attain the listIndex:",listIndex
    print "Percentage of the total # of postings in the lexicon:",currentTotalNumOfPostingCounter / TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX
    print "tupleIndex:",tupleIndex 
    print "currentTotalNumOfPostingCounter(Used):",currentTotalNumOfPostingCounter
    print "previousTotalNumOfPostingCounter(NOT Used):",previousTotalNumOfPostingCounter
    print "len(current_INVERTED_INDEX_queryTermsAndFreqInCollectionTuple_list)",len(current_INVERTED_INDEX_queryTermsAndFreqInCollectionTuple_list)
    print
    '''
    tempDict = {}
    for tuple in current_INVERTED_INDEX_queryTermsAndFreqInCollectionTuple_list:
        (queryTerm,queryTermFreqInCollection) = tuple
        if queryTerm not in tempDict:
            tempDict[queryTerm] = queryTermFreqInCollection
        else:
            print "Error,Mark1"
            exit(1)
    
    queryTermDictsForEachPercentageStoringInList.append( (listIndex,currentTotalNumOfPostingCounter,len(tempDict),tempDict) )
    
    
    # use the thing in queryTermDictsForEachPercentageStoringInList to answer queries.
    # print "Use the thing in queryTermDictsForEachPercentageStoringInList to answer queries"
    # print "len(queryTermDictsForEachPercentageStoringInList):",len(queryTermDictsForEachPercentageStoringInList)
    
    # Let's record those query terms in file first
    print "--->","id","designed#OfPostingsInjected","percentageOfAll","actual#OfPostingsInjected","percentageOfAll","#OfTermsIn"
    for index,tuple in enumerate(queryTermDictsForEachPercentageStoringInList):
        (listIndex,currentTotalNumOfPostingCounter,numOfQueryTermsInDict,queryTermsInDict) = tuple
        
        tempCheckTotalNumPostings = 0
        for queryTerm in queryTermsInDict:
            tempCheckTotalNumPostings += queryTermsInDict[queryTerm]
        if tempCheckTotalNumPostings != currentTotalNumOfPostingCounter:
            print "tempCheckTotalNumPostings != currentTotalNumOfPostingCounter"
            print "tempCheckTotalNumPostings:",tempCheckTotalNumPostings
            print "currentTotalNumOfPostingCounter:",currentTotalNumOfPostingCounter
            exit(1)
        
        print listIndex,LIST_FOR_STORING_THRESHOLD_NUMBERS[listIndex], LIST_FOR_STORING_THRESHOLD_NUMBERS[listIndex]/TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX,currentTotalNumOfPostingCounter,currentTotalNumOfPostingCounter/TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX, numOfQueryTermsInDict
        
        #if listIndex == 0:
        #    tempList = list(queryTermsInDict)
        #    print "    --->(DEBUG):",sorted(tempList)
        
    # The following logic is for iterate all the 100K queries to see whether the pruned index can answer those queries.
    # init for the variable indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict = {}
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict[0] = 0
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict[1] = 0
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict[2] = 0
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict[3] = 0
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict[4] = 0
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict[5] = 0
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict[6] = 0
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict[7] = 0
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict[8] = 0
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict[9] = 0
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict[10] = 0
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict[11] = 0
    
    # init for the variable indexForQueryTermDictsForEachPercentageStoringInListAndCurrentSolvableQueriesIDListDict
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentSolvableQueriesIDListDict = {}
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentSolvableQueriesIDListDict[0] = []
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentSolvableQueriesIDListDict[1] = []
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentSolvableQueriesIDListDict[2] = []
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentSolvableQueriesIDListDict[3] = []
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentSolvableQueriesIDListDict[4] = []
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentSolvableQueriesIDListDict[5] = []
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentSolvableQueriesIDListDict[6] = []
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentSolvableQueriesIDListDict[7] = []
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentSolvableQueriesIDListDict[8] = []
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentSolvableQueriesIDListDict[9] = []
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentSolvableQueriesIDListDict[10] = []
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentSolvableQueriesIDListDict[11] = []
    
    
    
    # option0
    inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/100KQueries_2_3_5%"
    inputFileHandler = open(inputFileName,"r")
    
    for line in inputFileHandler.readlines():
        # print "line:",line.strip()
        queryID = line.strip().split(":")[0]
        queryTermList = line.strip().split(":")[1].strip().split(" ")
        # print "queryTermList:",queryTermList
        
        data = ""
        for element in queryTermList:
            data += element + " "
        
        # print "data(old):",data
        # print "original data:",data
        
        for i in range(0,len(data)):
            # print "data[i]:",ord(data[i])
            if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
                # Just replace them with a space.
                data = data[:i] + " " + data[i+1:]
    
        # print "data(new):",data
        
        currentNewQueryTermList = data.strip().split(" ")
        currentNewQueryTermDict = {}
        
        for queryTerm in currentNewQueryTermList:
            if queryTerm.strip() != "":
                queryTermLower = queryTerm.lower()
                if queryTermLower not in currentNewQueryTermDict:
                    currentNewQueryTermDict[queryTermLower] = 1
        
        indexForQueryTermDictsForEachPercentageStoringInListAndCurrentQuerySolvableFlagDict = {}
        indexForQueryTermDictsForEachPercentageStoringInListAndCurrentQuerySolvableFlagDict[0] = True
        indexForQueryTermDictsForEachPercentageStoringInListAndCurrentQuerySolvableFlagDict[1] = True
        indexForQueryTermDictsForEachPercentageStoringInListAndCurrentQuerySolvableFlagDict[2] = True
        indexForQueryTermDictsForEachPercentageStoringInListAndCurrentQuerySolvableFlagDict[3] = True
        indexForQueryTermDictsForEachPercentageStoringInListAndCurrentQuerySolvableFlagDict[4] = True
        indexForQueryTermDictsForEachPercentageStoringInListAndCurrentQuerySolvableFlagDict[5] = True
        indexForQueryTermDictsForEachPercentageStoringInListAndCurrentQuerySolvableFlagDict[6] = True
        indexForQueryTermDictsForEachPercentageStoringInListAndCurrentQuerySolvableFlagDict[7] = True
        indexForQueryTermDictsForEachPercentageStoringInListAndCurrentQuerySolvableFlagDict[8] = True
        indexForQueryTermDictsForEachPercentageStoringInListAndCurrentQuerySolvableFlagDict[9] = True
        indexForQueryTermDictsForEachPercentageStoringInListAndCurrentQuerySolvableFlagDict[10] = True
        indexForQueryTermDictsForEachPercentageStoringInListAndCurrentQuerySolvableFlagDict[11] = True
        
        for queryTerm in currentNewQueryTermDict:
            for indexForQueryTermDictsForEachPercentageStoringInList,tuple in enumerate( queryTermDictsForEachPercentageStoringInList ):
                (listIndex,currentTotalNumOfPostingCounter,_,tempDict) = tuple
                if queryTerm not in tempDict:
                    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentQuerySolvableFlagDict[indexForQueryTermDictsForEachPercentageStoringInList] = False
        
        for key in indexForQueryTermDictsForEachPercentageStoringInListAndCurrentQuerySolvableFlagDict:
            if indexForQueryTermDictsForEachPercentageStoringInListAndCurrentQuerySolvableFlagDict[key] == True:
                indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict[key] += 1
                indexForQueryTermDictsForEachPercentageStoringInListAndCurrentSolvableQueriesIDListDict[key].append(queryID)
    # some overall statistics:
    # index value = 0, 0.01
    # index value = 1, 0.05
    # index value = 2, 0.1
    # index value = 3, 0.2
    # index value = 4, 0.3
    # index value = 5, 0.4
    # index value = 6, 0.5
    # index value = 7, 0.6
    # index value = 8, 0.7
    # index value = 9, 0.8
    # index value = 10, 0.9
    print "--->","id","#OfQueriesAnsweredUsingANDSemantics"
    for i in range(0,len(indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict) ):
        print i,indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict[i]
        
        #if i == 0:
        #    print "    --->(DEBUG):",indexForQueryTermDictsForEachPercentageStoringInListAndCurrentSolvableQueriesIDListDict[i]
        
    inputFileHandler.close()
outputFileHandler.close()

'''
print "check begins..."
for tuple in queryTermInfoList:
    print tuple
print "check ends."
'''

# print "queryTermInfoList[0]:",queryTermInfoList[0]
# print "queryTermInfoList[1]:",queryTermInfoList[1]

'''
print "sort the list by real freq in the query trace"
queryTermInfoList.sort(cmp=None, key=itemgetter(1), reverse=True)
print "queryTermInfoList[0]:",queryTermInfoList[0]
print "queryTermInfoList[1]:",queryTermInfoList[1]
print "queryTermInfoList[2]:",queryTermInfoList[2]
print "queryTermInfoList[3]:",queryTermInfoList[3]
'''

'''
# Purpose: another part of checking
# do some logic on the sumDataLine
totalFreq = 0
for cellString in sumDataLine.strip().split(" ")[1:]:
    currentFreq = int( cellString.split(":")[1] )
    totalFreq += currentFreq
print "totalFreq:",totalFreq
inputFileHandler.close()
'''







