# Updated by Wei 2013/08/22 afternoon at school
# Purpose: This is the simulation of how either a term is IN or OUT work in the context of static index pruning
# This is mostly from the term prospect
# Overall, the quality of this term IN and OUT simulation is good in my sense.

from __future__ import division
from operator import itemgetter, attrgetter
import os
import sys
import math

# index value = 0, 0.01
# index value = 1, 0.05
# index value = 2, 0.1
# index value = 3, 0.2
# index value = 4, 0.3
# index value = 5, 0.4
# index value = 6, 0.5
# index value = 7, 0.6
# index value = 8, 0.7
# index value = 9, 0.8
# index value = 10, 0.9

# This includes every terms in the lexicon
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX = 6451948010
# This do NOT includes the terms in the row1_0 (in the total of 36872359 terms)
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX = 6323944039 # 98% of the whole index

# index = 0
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_1Percent_kept: 64519480
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_1Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.01 )
# index = 1
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_5Percent_kept: 322597400
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_5Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.05 )
# index = 2
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_10Percent_kept: 645194801
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_10Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.1 )
# index = 3
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_20Percent_kept: 1290389602
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_20Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.2 )
# index = 4
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_30Percent_kept: 1935584403
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_30Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.3 )
# index = 5
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_40Percent_kept: 2580779204
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_40Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.4 )
# index = 6
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_50Percent_kept: 3225974005
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_50Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.5 )
# index = 7
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_60Percent_kept: 3871168806
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_60Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.6 )
# index = 8
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_70Percent_kept 4516363607
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_70Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.7 )
# Note:
# For the query terms which have at least seen once. The total number of postings is: 5009420937
# index = 9
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_80Percent_kept: 5161558408
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_80Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.8 )
# index = 10
# TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_90Percent_kept: 5806753209
TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_90Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.9 )


print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_1Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_1Percent_kept 
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_5Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_5Percent_kept 
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_10Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_10Percent_kept 
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_20Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_20Percent_kept 
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_30Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_30Percent_kept 
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_40Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_40Percent_kept 
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_50Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_50Percent_kept 
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_60Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_60Percent_kept 
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_70Percent_kept",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_70Percent_kept 
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_80Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_80Percent_kept 
print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_90Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_90Percent_kept


LIST_FOR_STORING_THRESHOLD_NUMBERS = []
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_1Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_5Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_10Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_20Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_30Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_40Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_50Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_60Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_70Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_80Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_90Percent_kept)
LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX)

# for debug ONLY
print "len(LIST_FOR_STORING_THRESHOLD_NUMBERS):",len(LIST_FOR_STORING_THRESHOLD_NUMBERS)

# assumption: we only take care of the query terms first.
# I see and that's it. 
# The data structure is a list, inside the list, contains many tuples.
queryTermInfoList = []
TOTAL_NUM_OF_TERMS_IN_LEXICON = 37728619 # around 37M
total_num_of_terms_left_unprocessed = TOTAL_NUM_OF_TERMS_IN_LEXICON

# # of terms in the lexicon: 37728619
# # of terms in the table which has freq < 20: 37726113
# # of terms in the table which are shown in the query log and has freq >= 20: 2506
# # of terms in the table which appear at least once and has freq < 20: 32752
# # of terms in the table which are popular query terms with freq >= 20: 2506
# # of terms in the table which appear at least once: 35258 
# # of query terms in the 85K queries: 35627 = 35258(has been recorded in the table) + 369(out of the lexicon and we will NOT care)
# # of query terms which are out of the lexicon: 432 - 369 = 63 (Is it? DONE, almost, I compute the value of 58, missing 5 terms :) )

# key: the term in string format
# value: the length of the inverted list(# of postings in the list) for that specific term in int format
allQueryTermsWithTheirTermFreqInCollection = {}
# 38871 query terms in total for the head 100K queries
inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/100KQueryTermsWithTermFreqInCollection.txt"
inputFileHandler = open(inputFileName1,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    queryTermFreq = int(lineElements[1])
    if queryTerm not in allQueryTermsWithTheirTermFreqInCollection:
        allQueryTermsWithTheirTermFreqInCollection[queryTerm] = queryTermFreq
    else:
        print "error,Mark3"
        exit(1)
# for debug ONLY
print "len(allQueryTermsWithTheirTermFreqInCollection):",len(allQueryTermsWithTheirTermFreqInCollection)
inputFileHandler.close()

# key: term in string format
# value: real freq(unsmoothed) of term in the head 95K queries
queryTermWithTheirRealFreqIn95KQueriesDict = {}
# The real freq of terms in 95K queries are used for the good turining/ricardo method
inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsIn_100KQueries_0_1_95%"
inputFileHandler = open(inputFileName2,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    queryTermFreq = int(lineElements[1])
    if queryTerm not in queryTermWithTheirRealFreqIn95KQueriesDict:
        queryTermWithTheirRealFreqIn95KQueriesDict[queryTerm] = queryTermFreq
    else:
        print "error,Mark3"
        exit(1)
print "len(queryTermWithTheirRealFreqIn95KQueriesDict):",len(queryTermWithTheirRealFreqIn95KQueriesDict)
inputFileHandler.close()

# key: term in string format
# value: real freq(unsmoothed) of term in the head 95K queries
queryTermWithTheirRealFreqIn85KQueriesDict = {}
# The real freq of terms in 85K queries are used for our 2D/1D estimation
inputFileName3 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsInQueries_head_85K_0_85%"
inputFileHandler = open(inputFileName3,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    queryTermFreq = int(lineElements[1])
    if queryTerm not in queryTermWithTheirRealFreqIn85KQueriesDict:
        queryTermWithTheirRealFreqIn85KQueriesDict[queryTerm] = queryTermFreq
    else:
        print "error,Mark3"
        exit(1)
# for debug ONLY
print "len(queryTermWithTheirRealFreqIn85KQueriesDict):",len(queryTermWithTheirRealFreqIn85KQueriesDict)
inputFileHandler.close()

# key: the cell identifier like 1_0, 1_1, 1_2 ... 5_16 5_17 5_18 5_19
# value: the probability which this cell of terms belonging to
# the sum of all the cells' corresponding terms should be the WHOLE universe of the terms that the search system can handle 
cellProbabilityDict = {}
# This is the 1D / 2D final probabilities we want, note that the probability in the 3 tables are NOT the final probability we want, but just a middle step 
# The SUM row, I think is the final 1D probability
# From row1,row2 ... row5, I think this is the final 2D probability
inputFileName4 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/probabilityWeWantFinally20130418"
inputFileHandler = open(inputFileName4,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    cellKey = lineElements[0][3:]
    probability = float(lineElements[1])
    if cellKey not in cellProbabilityDict:
        cellProbabilityDict[cellKey] = probability
print "len(cellProbabilityDict):",len(cellProbabilityDict)
# print "cellProbabilityDict:",cellProbabilityDict
# for a specific 2D probability
print "cellProbabilityDict['1_1']:",cellProbabilityDict['1_1'],"(specific cell '1_1')"
# for a specific 1D probability
print "cellProbabilityDict['_1']:",cellProbabilityDict['_1'],"(specific cell '_1' combining the cells '1_1','2_1','3_1','4_1','5_1')"
inputFileHandler.close()


# key: freq value in int format
# value: # of terms appearing exactly this freq
# got probability from the good turing setting
goodTuringFreqOfFreqProbabilityDict = {}
# Use the good turing probability estimation applying to the whole 95K queries, but NOT 85K queries.
inputFileName5 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/freqOfFreqInQueries_0_1_95K_95%_good_turing_output"
inputFileHandler = open(inputFileName5,"r")

# ignore the meta headlines.
inputFileHandler.readline()
inputFileHandler.readline()
inputFileHandler.readline()
inputFileHandler.readline()

# The upperbound of the following statement inputFileHandler.readlines()[0:31]
# 31 will be changed according to the real freq of terms in the 95K queries
for line in inputFileHandler.readlines()[0:31]:
    lineElements = line.strip().split(" ")
    freqOfFreq = int(lineElements[0])
    goodTuringProbability = float(lineElements[3])
    if freqOfFreq not in goodTuringFreqOfFreqProbabilityDict:
        goodTuringFreqOfFreqProbabilityDict[freqOfFreq] = goodTuringProbability
    else:
        print "ERROR,mark3"
        exit(1)

print "len(goodTuringFreqOfFreqProbabilityDict):",len(goodTuringFreqOfFreqProbabilityDict)
print "goodTuringFreqOfFreqProbabilityDict[0]:",goodTuringFreqOfFreqProbabilityDict[0]
print "goodTuringFreqOfFreqProbabilityDict:",goodTuringFreqOfFreqProbabilityDict
inputFileHandler.close()


# key: the cell identifier
# value: a list of term which belong to this cell
smallCellKeyWithItsCorrespondingQueryTermListDict = {}
inputFileName6 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/freqOfFreqInQueries_head_85K_0_85%_2D_with_query_terms"
inputFileHandler = open(inputFileName6,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    smallCellKey = lineElements[0]
    numOfQueryTerms = int( lineElements[1] )
    if smallCellKey not in smallCellKeyWithItsCorrespondingQueryTermListDict:
        smallCellKeyWithItsCorrespondingQueryTermListDict[smallCellKey] = []
        for i in range(2,2+numOfQueryTerms):
            currentProcessingQueryTerm = lineElements[i]
            smallCellKeyWithItsCorrespondingQueryTermListDict[smallCellKey].append( currentProcessingQueryTerm )
    else:
        print "error,Mark1"
        exit(1)

print "len(smallCellKeyWithItsCorrespondingQueryTermListDict):",len(smallCellKeyWithItsCorrespondingQueryTermListDict)
print "smallCellKeyWithItsCorrespondingQueryTermListDict['3_195']:",smallCellKeyWithItsCorrespondingQueryTermListDict['3_195']
print "smallCellKeyWithItsCorrespondingQueryTermListDict['3_191']:",smallCellKeyWithItsCorrespondingQueryTermListDict['3_191']   
inputFileHandler.close()


dataLines = []
# The purpose of loading the info from the "table:denominator:freqOfFreqForTheLexiconTerm" is to get the ranges for each big cell(combine the small cells together)
inputFileName7 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/probabilityTableUsingProfIdea20130420_fixed"
inputFileHandler = open(inputFileName7,"r")
dataLine = inputFileHandler.readline()
while not dataLine.strip().startswith("table:denominator:freqOfFreqForTheLexiconTerm"):
    dataLine = inputFileHandler.readline()
# for debug ONLY
# print dataLine.strip()

inputFileHandler.readline()
inputFileHandler.readline()
inputFileHandler.readline()
inputFileHandler.readline()

row1DataLine = inputFileHandler.readline()
row2DataLine = inputFileHandler.readline()
row3DataLine = inputFileHandler.readline()
row4DataLine = inputFileHandler.readline()
row5DataLine = inputFileHandler.readline()
sumDataLine = inputFileHandler.readline()

dataLines.append(row1DataLine)
dataLines.append(row2DataLine)
dataLines.append(row3DataLine)
dataLines.append(row4DataLine)
dataLines.append(row5DataLine)
inputFileHandler.close()
print "len(dataLines):",len(dataLines)


print "Part1: Load the query terms which are in the 2D probability estimation table and compute the final compared values."
for indexOutside,dataLine in enumerate(dataLines):
    for indexInside,cellString in enumerate( dataLine.strip().split(" ")[2:] ):
        currentFreq = int( cellString.split(":")[1] )
        rangeString = cellString.split(":")[0]
        # for debug ONLY
        # print indexOutside+1,indexInside+1,rangeString,currentFreq
        cellKeyFor2D = str(indexOutside+1) + "_" + str(indexInside+1)
        smallRangeLowerBoundID = int( rangeString.split("[")[1].split("]")[0].split(",")[0] )
        smallRangeUpperBoundID = int( rangeString.split("[")[1].split("]")[0].split(",")[1] )
        # Let's operate on the query terms themselves
        tempQueryTermList = []
        for i in range(smallRangeLowerBoundID,smallRangeUpperBoundID+1):
            smallCellKey = str(indexInside+1) + "_" + str(i)
            # before adding the whole thing, we can just have the query term with their designed tuples
            for currentProcessingQueryTerm in smallCellKeyWithItsCorrespondingQueryTermListDict[smallCellKey]:
                # This is where you can put the query term and the tuple in.
                if currentProcessingQueryTerm in queryTermWithTheirRealFreqIn95KQueriesDict and currentProcessingQueryTerm in allQueryTermsWithTheirTermFreqInCollection:
                    
                    currentProcessingQueryTermFreqInCollection = allQueryTermsWithTheirTermFreqInCollection[currentProcessingQueryTerm]
                    
                    # Note that the 2D and 1D thing are all based on the 85K training queries, 10K for justification queries
                    currentProcessingQueryTermPredictedProbability2D = cellProbabilityDict[cellKeyFor2D]
                    # tableColumnNum is actually the real freq for the term in a certain set of queries.
                    tableColumnNum = int(cellKeyFor2D.split("_")[1])
                    cellKeyFor1D = "_" + str(tableColumnNum)
                    currentProcessingQueryTermPredictedProbability1D = cellProbabilityDict[cellKeyFor1D]
                    
                    # BUG fixed 2013/08/22 morning by Wei at school
                    currentProcessingQueryTermGoodTuringProbability = goodTuringFreqOfFreqProbabilityDict[ queryTermWithTheirRealFreqIn95KQueriesDict[currentProcessingQueryTerm] ]
                    
                    # comparedAlg1 [SIGIR 2007 Ricardo A.Baeza-Yates et al]
                    # 2 parameters:
                    # (1) frequency f for each query term from query log
                    # (2) size s of the associate inverted list
                    # (3) define the ratio f/s, and greedily select the posting list with maximum ratio
                    ricardoRatio = queryTermWithTheirRealFreqIn95KQueriesDict[currentProcessingQueryTerm] / currentProcessingQueryTermFreqInCollection
                    
                    # ourProbabilityModel2D
                    # 2 parameters:
                    # (1) probability p2D (currentProcessingQueryTermPredictedProbability2D) 2D settings
                    # (2) size s of the associate inverted list(currentProcessingQueryTermFreqInCollection)
                    # (3) define the ratio p2D/s, and greedily select the posting list with maximum ratio
                    our2DProbabilityModelRatio = currentProcessingQueryTermPredictedProbability2D / currentProcessingQueryTermFreqInCollection
                    
                    # ourProbabilityModel1D
                    # 2 parameters:
                    # (1) probability p1D settings
                    # (2) size s of the associate inverted list(currentProcessingQueryTermFreqInCollection)
                    # (3) define the ratio p1D/s, and greedily select the posting list with maximum ratio
                    our1DProbabilityModelRatio = currentProcessingQueryTermPredictedProbability1D / currentProcessingQueryTermFreqInCollection

                    # goodTuringProbabilityModel
                    # 2 parameters:
                    # (1) probability p predicted by good turing
                    # (2) size s of the associate inverted list(currentProcessingQueryTermFreqInCollection)
                    # (3) define the ratio p/s, and greedily select the posting list with maximum ratio
                    goodTuringProbabilityModelRatio = currentProcessingQueryTermGoodTuringProbability / currentProcessingQueryTermFreqInCollection                    
                                       
                    
                    currentProcessingQueryTermTuple = (currentProcessingQueryTerm,currentProcessingQueryTermFreqInCollection,ricardoRatio,our2DProbabilityModelRatio,our1DProbabilityModelRatio,goodTuringProbabilityModelRatio)
                    queryTermInfoList.append(currentProcessingQueryTermTuple)
                else:
                    print "error,Mark4"
                    exit(1)
            
            tempQueryTermList += smallCellKeyWithItsCorrespondingQueryTermListDict[smallCellKey]
      
        if len(tempQueryTermList) != currentFreq:
            print "error,Mark2"
            exit(1)
        else:
            pass

    # for debug ONLY
    # print "len(queryTermInfoList):",len(queryTermInfoList)
total_num_of_terms_left_unprocessed = TOTAL_NUM_OF_TERMS_IN_LEXICON - len(queryTermInfoList)
print "len(queryTermInfoList) / # of query terms processed so far:",len(queryTermInfoList)
print "total_num_of_terms_left_unprocessed:",total_num_of_terms_left_unprocessed
print 

# Let's add the popular query words( real freq of this term >=20 ) into the list as well based on the counting of the head 85K queries
popularTermsCounter = 0
NUM_QUERY_TERM_POSITIONS_FOR_95K_QUERIES = 392955
NUM_QUERY_TERM_POSITIONS_FOR_85K_QUERIES = 351734
for queryTerm in queryTermWithTheirRealFreqIn85KQueriesDict:
    if queryTermWithTheirRealFreqIn85KQueriesDict[queryTerm] >= 20:
        popularTermsCounter += 1
        
        # comparedAlg1 [SIGIR 2007 Ricardo A.Baeza-Yates et al]
        # 2 parameters:
        # (1) frequency f for each query term from query log
        # (2) size s of the associate inverted list
        # (3) define the ratio f/s, and greedily select the posting list with maximum ratio
        if queryTerm in queryTermWithTheirRealFreqIn95KQueriesDict:
            ricardoRatio = queryTermWithTheirRealFreqIn95KQueriesDict[queryTerm] / allQueryTermsWithTheirTermFreqInCollection[queryTerm]
        else:
            print "Error,Mark2"
            exit(1)
        
        # ourProbabilityModel2D
        # 2 parameters:
        # (1) probability p2D (For those terms which has freq >= 20, probability is exactly like the ricardo model)
        # (2) size s of the associate inverted list(currentProcessingQueryTermFreqInCollection)
        # (3) define the ratio p2D/s, and greedily select the posting list with maximum ratio
        ourProbabilityForWhichTermsHaveFreqGreaterOrEqualThan20 = queryTermWithTheirRealFreqIn85KQueriesDict[queryTerm] / NUM_QUERY_TERM_POSITIONS_FOR_85K_QUERIES
        our2DProbabilityModelRatio = ourProbabilityForWhichTermsHaveFreqGreaterOrEqualThan20 / allQueryTermsWithTheirTermFreqInCollection[queryTerm]
        
        # ourProbabilityModel1D (There is NO difference between 2D model and 1D model when dealing with terms which has freq >= 20)
        # 2 parameters:
        # (1) probability p1D (For those terms which has freq >= 20, probability is exactly like the ricardo model)
        # (2) size s of the associate inverted list(currentProcessingQueryTermFreqInCollection)
        # (3) define the ratio p1D/s, and greedily select the posting list with maximum ratio
        ourProbabilityForWhichTermsHaveFreqGreaterOrEqualThan20 = queryTermWithTheirRealFreqIn85KQueriesDict[queryTerm] / NUM_QUERY_TERM_POSITIONS_FOR_85K_QUERIES
        our1DProbabilityModelRatio = ourProbabilityForWhichTermsHaveFreqGreaterOrEqualThan20 / allQueryTermsWithTheirTermFreqInCollection[queryTerm]
        
        # for the term which has the real freq >= 20, there is just NO difference between 1D and 2D.
        assert our2DProbabilityModelRatio == our1DProbabilityModelRatio
        
        # goodTuringProbabilityModel (There is NO difference between this good turing model with the ricardo model when dealing with terms which has freq >= 20)
        # 2 parameters:
        # (1) probability p predicted by good turing
        # (2) size s of the associate inverted list(currentProcessingQueryTermFreqInCollection)
        # (3) define the ratio p/s, and greedily select the posting list with maximum ratio
        goodTuringProbabilityForWhichTermsHaveFreqGreaterOrEqualThan20 = queryTermWithTheirRealFreqIn95KQueriesDict[queryTerm] / NUM_QUERY_TERM_POSITIONS_FOR_95K_QUERIES
        goodTuringProbabilityModelRatio = goodTuringProbabilityForWhichTermsHaveFreqGreaterOrEqualThan20 / allQueryTermsWithTheirTermFreqInCollection[queryTerm]
        
        currentProcessingPopularTermTuple = (queryTerm, allQueryTermsWithTheirTermFreqInCollection[queryTerm], ricardoRatio,our2DProbabilityModelRatio,our1DProbabilityModelRatio,goodTuringProbabilityModelRatio)
        queryTermInfoList.append(currentProcessingPopularTermTuple)

print "(DEBUG)popularTermsCounter:",popularTermsCounter
total_num_of_terms_left_unprocessed = TOTAL_NUM_OF_TERMS_IN_LEXICON - len(queryTermInfoList)
print "len(queryTermInfoList) / # of query terms processed so far:",len(queryTermInfoList)
print "total_num_of_terms_left_unprocessed:",total_num_of_terms_left_unprocessed
print



print "Part2: Load the UNSEEN terms which are NOT in the 2D probability estimation table and compute the final compared values."
# # of query terms in the cell ROW5_0 : about 13296
# # of query terms in the cell ROW4_0 : about 32085
# # of query terms in the cell ROW3_0 : about 87252
# # of query terms in the cell ROW2_0 : about 688369
# # of query terms in the cell ROW1_0 : about 36872359

# The input file format(This input file format design is just NAIVE):
# lineElements[0]: queryTerm
# lineElements[1]: queryTermRealFreqIn85KQueries
# lineElements[2]: queryTermFreqInCollection
# lineElements[3]: queryTermPredictedProbability
# lineElements[4]: ricardoRatio
fileNameList = []
basePathFileNamePart1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/CellROW"
basePathFileNamePart2 = "_0QueryTermsWithMetaInfo"

# This argument controls how many files will be in the list
# current # of files:5
for i in range(5,0,-1):
    completedFilePath = basePathFileNamePart1 + str(i) + basePathFileNamePart2
    fileNameList.append(completedFilePath)

print "len(fileNameList):",len(fileNameList)


for fileName in fileNameList:
    print "fileName:",fileName
    # special logic begins...
    # extract the cell info from the fileName for our1DProbabilityModelRatio
    # example file name:
    # /data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/CellROW2_0QueryTermsWithMetaInfo
    
    # Updated by Wei 2013/08/22 morning
    # The following 2 statements are just WRONG!! 
    # cellNum = str(fileName.strip().split("/")[-1][7])
    # print "cell num for unseen query terms :",cellNum
    
    # special logic ends.
    inputFileHandler = open(fileName,"r")
    for line in inputFileHandler.readlines():
        lineElements = line.strip().split(" ")
        # the file format of the following input files(5 columns):
        # lineElements[0]: queryTerm
        # lineElements[1]: real term freq in the 85K query trace (Which should be ALL 0s in this case, cause the defination is UNSEEN)
        # lineElements[2]: freq in the collection
        # lineElements[3]: ourProbabilityModel2D
        # lineElements[4]: ricardo ratio (the value in file is super large, maybe we need the super small value. It depends on our sorting)
        
        # The standard format of the tuple:
        # (queryTerm, ricardoRatio,our2DProbabilityModelRatio,our1DProbabilityModelRatio,goodTuringProbabilityModelRatio)
        # 0 queryTerm
        # 1 ricardoRatio
        # 2 our2DProbabilityModelRatio
        # 3 our1DProbabilityModelRatio
        # 4 goodTuringProbabilityModelRatio
        queryTerm = str( lineElements[0] )
        
        if queryTerm in queryTermWithTheirRealFreqIn95KQueriesDict:
            ricardoRatio = queryTermWithTheirRealFreqIn95KQueriesDict[queryTerm] / int(lineElements[2])
        else:
            # Updated by Wei 2013/08/22 morning
            # for those terms which are NOT in the head 95K queries, ricardo method just try to pick up the terms at random.
            # That is why I have a -99999999.0 for the ricardoRatio here
            # Remember to pick up the term from the largest value to the smallest value
            ricardoRatio = float(-99999999.0)

        # There are 5 unseen probability for the 2D probability estimation, and the values are as following:
        # based on the file: /data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/probabilityWeWantFinally20130418
        
        # ROW5_0 8.031888903939055e-07
        # ROW4_0 3.2578867212223025e-07
        # ROW3_0 1.1538218688794478e-07
        # ROW2_0 1.6040397629079132e-08
        # ROW1_0 2.7962236405996843e-10
        our2DProbabilityModelRatio = float(lineElements[3]) / int(lineElements[2])
        
        # Updated by Wei 2013/08/22 morning
        # The following statements are just WRONG for 1D estimation!!
        # print "cellNum:",cellNum
        # BUG Fixed
        # if cellNum == "5":
        #    our1DProbabilityModelRatio = 1.20772275331e-05
        #elif cellNum == "4":
        #    our1DProbabilityModelRatio = 9.66505251504e-06
        #elif cellNum == "3":
        #    our1DProbabilityModelRatio = 6.56625976833e-06
        #elif cellNum == "2":
        #    our1DProbabilityModelRatio = 4.13057762416e-06
        #elif cellNum == "1":
        #    our1DProbabilityModelRatio = 1.32367943426e-06
        #else:
        #    print "ERROR,mark4"
        #    exit(1)

        # I think the correct assignment for 1D estimation
        # There is ONLY 1 unseen probability for the 1D probability estimation, and the value is 1.38700255851e-09
        # based on the file: /data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/probabilityWeWantFinally20130418
        # SUM_0 1.3870025585078873e-09
        our1DProbabilityModelRatio = 1.38700255851e-09 / int(lineElements[2])

        if queryTerm in queryTermWithTheirRealFreqIn95KQueriesDict:
            if queryTermWithTheirRealFreqIn95KQueriesDict[queryTerm] >= 20:
                goodTuringProbabilityForWhichTermsHaveFreqGreaterOrEqualThan20 = queryTermWithTheirRealFreqIn95KQueriesDict[queryTerm] / NUM_QUERY_TERM_POSITIONS_FOR_95K_QUERIES
                # The following statement should have some problems.
                goodTuringProbabilityModelRatio = goodTuringProbabilityForWhichTermsHaveFreqGreaterOrEqualThan20 / int(lineElements[2])
            else:
                goodTuringProbabilityModelRatio = goodTuringFreqOfFreqProbabilityDict[ queryTermWithTheirRealFreqIn95KQueriesDict[queryTerm] ] / int(lineElements[2])
        else:
            # for the good turning method, the query term is actually NOT seen even in the head 95K queries.
            # at this point, all I need to do is to assign the good turning probability(freq = 0) for this term
            goodTuringProbabilityModelRatio = goodTuringFreqOfFreqProbabilityDict[0] / int(lineElements[2])
        currentProcessingPopularTermTuple = (queryTerm, int(lineElements[2]), ricardoRatio,our2DProbabilityModelRatio,our1DProbabilityModelRatio,goodTuringProbabilityModelRatio)
        queryTermInfoList.append(currentProcessingPopularTermTuple)
    inputFileHandler.close()
    print "len(queryTermInfoList):",len(queryTermInfoList)


total_num_of_terms_left_unprocessed = TOTAL_NUM_OF_TERMS_IN_LEXICON - len(queryTermInfoList)
print "len(queryTermInfoList) / # of query terms processed so far(FINAL):",len(queryTermInfoList)
print "total_num_of_terms_left_unprocessed:",total_num_of_terms_left_unprocessed
print


currentTotalNumOfPosting = 0
for tuple in queryTermInfoList:
    # unpack the tuple
    (queryTerm, queryTermFreqInCollection, ricardoRatio,our2DProbabilityModelRatio,our1DProbabilityModelRatio,goodTuringProbabilityModelRatio) = tuple
    currentTotalNumOfPosting += queryTermFreqInCollection
print "currentTotalNumOfPosting:",currentTotalNumOfPosting
# Updated by Wei 2013/08/22, why do you know what I am thinking about? :) My previous me 3 months ago
# (1)# of terms: 856260 (2)# of postings: 6323944039 (3)percentage of the total postings: 98%

for turnNum in range(0,4):
    print
    print "turnNum:",turnNum
    # Now, let's operate on this queryTermInfoList
    # current example of the tuple format
    # (queryTerm, queryTermFreqInCollection, ricardoRatio, our2DProbabilityModelRatio, our1DProbabilityModelRatio, goodTuringProbabilityModelRatio)
    # example: 
    # (0) queryTerm
    # (1) queryTermFreqInCollection
    # (2) ricardoRatio
    # (3) our2DProbabilityModelRatio
    # (4) our1DProbabilityModelRatio
    # (5) goodTuringProbabilityModelRatio
    sortedKeyNum = turnNum + 2
    if sortedKeyNum == 2:
        print "sort the list by ricardoRatio, greedily select from the largest to smallest"
    elif sortedKeyNum == 3:
        print "sort the list by our2DProbabilityModelRatio, greedily select from largest to smallest"
    elif sortedKeyNum == 4:
        print "sort the list by our1DProbabilityModelRatio, greedily select from largest to smallest"
    elif sortedKeyNum == 5:
        print "sort the list by goodTuringProbabilityModelRatio, greedily select from largest to smallest"      
    else:
        print "Unsupported Operations."
        exit(1)
    
    print "Sort the list begins..."
    queryTermInfoList.sort(cmp=None, key=itemgetter(sortedKeyNum), reverse=True)
    print "Sort the list ends."
    
    
    print "First 20 terms which will be injected."
    for i in range(0,20):
        print "queryTermInfoList[",i,"]:",queryTermInfoList[i]
    
    # init some variables
    queryTermDictsForEachPercentageStoringInList = []
    listIndex = 0
    previousTotalNumOfPostingCounter = 0
    currentTotalNumOfPostingCounter = 0
    current_INVERTED_INDEX_queryTermsAndFreqInCollectionTuple_list = []
    leftDistance = 0
    rightDistance = 0
    
    for tupleIndex,tuple in enumerate( queryTermInfoList ):
        # greedily select the terms from the sorted list
        
        # current version
        (currentProcessingQueryTerm, currentProcessingQueryTermFreqInCollection, ricardoRatio,our2DProbabilityModelRatio,our1DProbabilityModelRatio,goodTuringProbabilityModelRatio) = tuple
        
        # old version
        # (currentProcessingQueryTerm, currentProcessingQueryTermRealFreqInTrainingQL, currentProcessingQueryTermFreqInCollection, currentProcessingQueryTermPredictedProbability,ricardoRatio,ourProbabilityModelRatio) = tuple
        previousTotalNumOfPostingCounter = currentTotalNumOfPostingCounter
        currentTotalNumOfPostingCounter += currentProcessingQueryTermFreqInCollection
        current_INVERTED_INDEX_queryTermsAndFreqInCollectionTuple_list.append( (currentProcessingQueryTerm,currentProcessingQueryTermFreqInCollection) )
        
        if currentTotalNumOfPostingCounter >= LIST_FOR_STORING_THRESHOLD_NUMBERS[listIndex]:
            leftDistance = LIST_FOR_STORING_THRESHOLD_NUMBERS[listIndex] - previousTotalNumOfPostingCounter 
            rightDistance = currentTotalNumOfPostingCounter - LIST_FOR_STORING_THRESHOLD_NUMBERS[listIndex]
            if rightDistance >= leftDistance:
                # for debug ONLY
                '''
                print "listIndex:",listIndex
                print "tupleIndex:",tupleIndex
                print "currentTotalNumOfPostingCounter(Used):",currentTotalNumOfPostingCounter
                print "previousTotalNumOfPostingCounter:",previousTotalNumOfPostingCounter
                print "len(current_INVERTED_INDEX_queryTermsAndFreqInCollectionTuple_list):",len(current_INVERTED_INDEX_queryTermsAndFreqInCollectionTuple_list)
                # print "current_INVERTED_INDEX_queryTermsAndFreqInCollectionTuple_list:",current_INVERTED_INDEX_queryTermsAndFreqInCollectionTuple_list
                print
                '''
                tempDict = {}
                
                for tuple in current_INVERTED_INDEX_queryTermsAndFreqInCollectionTuple_list:
                    (queryTerm,queryTermFreqInCollection) = tuple
                    if queryTerm not in tempDict:
                        tempDict[queryTerm] = queryTermFreqInCollection
                    else:
                        print "Error,Mark1"
                        exit(1)
                
                queryTermDictsForEachPercentageStoringInList.append( (listIndex,currentTotalNumOfPostingCounter,len(tempDict),tempDict) )
                
            else:
                # for debug ONLY
                '''
                print "listIndex:",listIndex
                print "tupleIndex:",tupleIndex
                print "currentTotalNumOfPostingCounter:",currentTotalNumOfPostingCounter
                print "previousTotalNumOfPostingCounter(Used):",previousTotalNumOfPostingCounter
                print "len(current_INVERTED_INDEX_queryTermsAndFreqInCollectionTuple_list)",len(current_INVERTED_INDEX_queryTermsAndFreqInCollectionTuple_list)-1
                # print "current_INVERTED_INDEX_queryTermsAndFreqInCollectionTuple_list - lastElement:",current_INVERTED_INDEX_queryTermsAndFreqInCollectionTuple_list[:-1]
                print
                '''
                tempDict = {}
                
                for tuple in current_INVERTED_INDEX_queryTermsAndFreqInCollectionTuple_list[:-1]:
                    (queryTerm,queryTermFreqInCollection) = tuple
                    if queryTerm not in tempDict:
                        tempDict[queryTerm] = queryTermFreqInCollection
                    else:
                        print "Error,Mark1"
                        exit(1)
                
                queryTermDictsForEachPercentageStoringInList.append( (listIndex,previousTotalNumOfPostingCounter,len(tempDict),tempDict) )
                
            listIndex += 1
    # for debug ONLY
    '''
    print "NOT attain the listIndex:",listIndex
    print "Percentage of the total # of postings in the lexicon:",currentTotalNumOfPostingCounter / TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX
    print "tupleIndex:",tupleIndex 
    print "currentTotalNumOfPostingCounter(Used):",currentTotalNumOfPostingCounter
    print "previousTotalNumOfPostingCounter(NOT Used):",previousTotalNumOfPostingCounter
    print "len(current_INVERTED_INDEX_queryTermsAndFreqInCollectionTuple_list)",len(current_INVERTED_INDEX_queryTermsAndFreqInCollectionTuple_list)
    print
    '''
    tempDict = {}
    for tuple in current_INVERTED_INDEX_queryTermsAndFreqInCollectionTuple_list:
        (queryTerm,queryTermFreqInCollection) = tuple
        if queryTerm not in tempDict:
            tempDict[queryTerm] = queryTermFreqInCollection
        else:
            print "Error,Mark1"
            exit(1)
    
    queryTermDictsForEachPercentageStoringInList.append( (listIndex,currentTotalNumOfPostingCounter,len(tempDict),tempDict) )
    
    
    # use the thing in queryTermDictsForEachPercentageStoringInList to answer queries.
    # print "Use the thing in queryTermDictsForEachPercentageStoringInList to answer queries"
    # print "len(queryTermDictsForEachPercentageStoringInList):",len(queryTermDictsForEachPercentageStoringInList)
    
    # Let's record those query terms in file first
    print "--->","id","designed#OfPostingsInjected","percentageOfAll","actual#OfPostingsInjected","percentageOfAll","#OfTermsIn"
    for index,tuple in enumerate(queryTermDictsForEachPercentageStoringInList[0:12]):
        (listIndex,currentTotalNumOfPostingCounter,numOfQueryTermsInDict,queryTermsInDict) = tuple
        
        tempCheckTotalNumPostings = 0
        for queryTerm in queryTermsInDict:
            tempCheckTotalNumPostings += queryTermsInDict[queryTerm]
        if tempCheckTotalNumPostings != currentTotalNumOfPostingCounter:
            print "tempCheckTotalNumPostings != currentTotalNumOfPostingCounter"
            print "tempCheckTotalNumPostings:",tempCheckTotalNumPostings
            print "currentTotalNumOfPostingCounter:",currentTotalNumOfPostingCounter
            exit(1)
        
        print listIndex,LIST_FOR_STORING_THRESHOLD_NUMBERS[listIndex], LIST_FOR_STORING_THRESHOLD_NUMBERS[listIndex]/TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX,currentTotalNumOfPostingCounter,currentTotalNumOfPostingCounter/TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX, numOfQueryTermsInDict
        
        #if listIndex == 0:
        #    tempList = list(queryTermsInDict)
        #    print "    --->(DEBUG):",sorted(tempList)
        
    # The following logic is for iterate all the 100K queries to see whether the pruned index can answer those queries.
    # init for the variable indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict = {}
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict[0] = 0
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict[1] = 0
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict[2] = 0
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict[3] = 0
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict[4] = 0
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict[5] = 0
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict[6] = 0
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict[7] = 0
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict[8] = 0
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict[9] = 0
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict[10] = 0
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict[11] = 0
    
    # init for the variable indexForQueryTermDictsForEachPercentageStoringInListAndCurrentSolvableQueriesIDListDict
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentSolvableQueriesIDListDict = {}
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentSolvableQueriesIDListDict[0] = []
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentSolvableQueriesIDListDict[1] = []
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentSolvableQueriesIDListDict[2] = []
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentSolvableQueriesIDListDict[3] = []
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentSolvableQueriesIDListDict[4] = []
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentSolvableQueriesIDListDict[5] = []
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentSolvableQueriesIDListDict[6] = []
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentSolvableQueriesIDListDict[7] = []
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentSolvableQueriesIDListDict[8] = []
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentSolvableQueriesIDListDict[9] = []
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentSolvableQueriesIDListDict[10] = []
    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentSolvableQueriesIDListDict[11] = []
    
    
    
    # option0
    inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/100KQueries_2_3_5%"
    inputFileHandler = open(inputFileName,"r")
    
    for line in inputFileHandler.readlines():
        # print "line:",line.strip()
        queryID = line.strip().split(":")[0]
        queryTermList = line.strip().split(":")[1].strip().split(" ")
        # print "queryTermList:",queryTermList
        
        data = ""
        for element in queryTermList:
            data += element + " "
        
        # print "data(old):",data
        # print "original data:",data
        
        for i in range(0,len(data)):
            # print "data[i]:",ord(data[i])
            if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
                # Just replace them with a space.
                data = data[:i] + " " + data[i+1:]
    
        # print "data(new):",data
        
        currentNewQueryTermList = data.strip().split(" ")
        currentNewQueryTermDict = {}
        
        for queryTerm in currentNewQueryTermList:
            if queryTerm.strip() != "":
                queryTermLower = queryTerm.lower()
                if queryTermLower not in currentNewQueryTermDict:
                    currentNewQueryTermDict[queryTermLower] = 1
        
        indexForQueryTermDictsForEachPercentageStoringInListAndCurrentQuerySolvableFlagDict = {}
        indexForQueryTermDictsForEachPercentageStoringInListAndCurrentQuerySolvableFlagDict[0] = True
        indexForQueryTermDictsForEachPercentageStoringInListAndCurrentQuerySolvableFlagDict[1] = True
        indexForQueryTermDictsForEachPercentageStoringInListAndCurrentQuerySolvableFlagDict[2] = True
        indexForQueryTermDictsForEachPercentageStoringInListAndCurrentQuerySolvableFlagDict[3] = True
        indexForQueryTermDictsForEachPercentageStoringInListAndCurrentQuerySolvableFlagDict[4] = True
        indexForQueryTermDictsForEachPercentageStoringInListAndCurrentQuerySolvableFlagDict[5] = True
        indexForQueryTermDictsForEachPercentageStoringInListAndCurrentQuerySolvableFlagDict[6] = True
        indexForQueryTermDictsForEachPercentageStoringInListAndCurrentQuerySolvableFlagDict[7] = True
        indexForQueryTermDictsForEachPercentageStoringInListAndCurrentQuerySolvableFlagDict[8] = True
        indexForQueryTermDictsForEachPercentageStoringInListAndCurrentQuerySolvableFlagDict[9] = True
        indexForQueryTermDictsForEachPercentageStoringInListAndCurrentQuerySolvableFlagDict[10] = True
        indexForQueryTermDictsForEachPercentageStoringInListAndCurrentQuerySolvableFlagDict[11] = True
        
        for queryTerm in currentNewQueryTermDict:
            for indexForQueryTermDictsForEachPercentageStoringInList,tuple in enumerate( queryTermDictsForEachPercentageStoringInList[0:12] ):
                (listIndex,currentTotalNumOfPostingCounter,_,tempDict) = tuple
                if queryTerm not in tempDict:
                    indexForQueryTermDictsForEachPercentageStoringInListAndCurrentQuerySolvableFlagDict[indexForQueryTermDictsForEachPercentageStoringInList] = False
        
        for key in indexForQueryTermDictsForEachPercentageStoringInListAndCurrentQuerySolvableFlagDict:
            if indexForQueryTermDictsForEachPercentageStoringInListAndCurrentQuerySolvableFlagDict[key] == True:
                indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict[key] += 1
                indexForQueryTermDictsForEachPercentageStoringInListAndCurrentSolvableQueriesIDListDict[key].append(queryID)
    # some overall statistics:
    # index value = 0, 0.01
    # index value = 1, 0.05
    # index value = 2, 0.1
    # index value = 3, 0.2
    # index value = 4, 0.3
    # index value = 5, 0.4
    # index value = 6, 0.5
    # index value = 7, 0.6
    # index value = 8, 0.7
    # index value = 9, 0.8
    # index value = 10, 0.9
    print "--->","id","#OfQueriesAnsweredUsingANDSemantics"
    for i in range(0,len(indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict) ):
        print i,indexForQueryTermDictsForEachPercentageStoringInListAndCurrentTotalNumOfSolvableQueriesDict[i]
        
        #if i == 0:
        #    print "    --->(DEBUG):",indexForQueryTermDictsForEachPercentageStoringInListAndCurrentSolvableQueriesIDListDict[i]
        
    inputFileHandler.close()

'''
print "check begins..."
for tuple in queryTermInfoList:
    print tuple
print "check ends."
'''

# print "queryTermInfoList[0]:",queryTermInfoList[0]
# print "queryTermInfoList[1]:",queryTermInfoList[1]

'''
print "sort the list by real freq in the query trace"
queryTermInfoList.sort(cmp=None, key=itemgetter(1), reverse=True)
print "queryTermInfoList[0]:",queryTermInfoList[0]
print "queryTermInfoList[1]:",queryTermInfoList[1]
print "queryTermInfoList[2]:",queryTermInfoList[2]
print "queryTermInfoList[3]:",queryTermInfoList[3]
'''

'''
# Purpose: another part of checking
# do some logic on the sumDataLine
totalFreq = 0
for cellString in sumDataLine.strip().split(" ")[1:]:
    currentFreq = int( cellString.split(":")[1] )
    totalFreq += currentFreq
print "totalFreq:",totalFreq
inputFileHandler.close()
'''







