from __future__ import division
from operator import itemgetter, attrgetter

import math
import matplotlib
import os
import pylab
import random
import sys
import time
from sets import Set

def step1_producePiecesInfoGivenSetOfTerms():
    # step1: classify each term in the lexicon (37M) into existing classes
    # input: the whole lexicon of the index(37M) with their length of the inverted list
    # output: the whole lexicon of the index(37M) with their length of the inverted list, class label and the # of postings for each piece
    
    # sub_step1
    # key: class label in int format
    # value: LowerBoundForTheCurrentClass in int format, the lower bound is based on the length of the list (Here,I only record the lower bound :) )
    classLabelWithTheirLowerBoundDict = {}
    
    # key: classLabel in int format
    # value: num of pieces in int forat
    classLabelWithTheirNumOfPiecesNeededDict = {}
    
    
    inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/classLabelWithClassLowerBoundANDNumOfPiecesNeeded"
    inputFileHandler = open(inputFileName,"r")
    for currentLine in inputFileHandler.readlines():
        # input file format: 4 columns:
        # column0: class label
        # column1: class lower bound
        # column2: # of query terms
        # column3: # of pieces needed for this class
        
        # for DEBUG
        # print "currentLine",currentLine,
        currentLineElements = currentLine.strip().split("\t")
        # for DEBUG
        # print "currentLineElements:",currentLineElements
        classLabel = int(currentLineElements[0])
        classLowerBound = int(currentLineElements[1])
        # ignore currentLineElements[1]
        numOfPiecesNeededForThisClass = int(currentLineElements[3])
        if classLabel not in classLabelWithTheirLowerBoundDict and classLabel not in classLabelWithTheirNumOfPiecesNeededDict:
            classLabelWithTheirLowerBoundDict[classLabel] = classLowerBound
            classLabelWithTheirNumOfPiecesNeededDict[classLabel] = numOfPiecesNeededForThisClass
    inputFileHandler.close()
    
    print "len(classLabelWithTheirLowerBoundDict):",len(classLabelWithTheirLowerBoundDict)
    print "len(classLabelWithTheirNumOfPiecesNeededDict):",len(classLabelWithTheirNumOfPiecesNeededDict)
    print "classLabelWithTheirLowerBoundDict[0]:",classLabelWithTheirLowerBoundDict[0]
    print "classLabelWithTheirLowerBoundDict[1]:",classLabelWithTheirLowerBoundDict[1]
    print "classLabelWithTheirNumOfPiecesNeededDict[0]:",classLabelWithTheirNumOfPiecesNeededDict[0]
    print "classLabelWithTheirNumOfPiecesNeededDict[1]:",classLabelWithTheirNumOfPiecesNeededDict[1]
    print
    
    # option1:
    # inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection.txt"
    # option2:
    inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/100KQueryTermsWithTermFreqInCollection.txt"
    inputFileHandler = open(inputFileName2,"r")
    
    # option1:
    # outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithLengthOfListANDClassLabelANDNumOfPostingsInEachPieces20130819"
    # option2:
    outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/100KQueryTermsWithTermFreqInCollectionANDClassLabelANDNumOfPostingsInEachPieces20130821"
    outputFileHandler = open(outputFileName,"w")
    
    lineCounter = 0 # init to 0
    
    currentLine = inputFileHandler.readline()
    lineCounter += 1
    while currentLine:
        currentLineElements = currentLine.strip().split(" ")
        currentTerm = currentLineElements[0]
        currentTermLengthOfList = int( currentLineElements[1] )
        # for DEBUG
        # print "currentTerm:",currentTerm
        # print "currentTermLengthOfList:",currentTermLengthOfList
        
        if currentTermLengthOfList != 0:    # make sure that this term also exist in the lexicon, the least length of the list is 1
            # output variables
            actualBelongClassLabel = -1
            
            # sub_step1: decide the class label
            for classLabel in classLabelWithTheirLowerBoundDict:
                # compare the class upperBound with the length of the list for this term
                if classLabelWithTheirLowerBoundDict[classLabel] > currentTermLengthOfList:
                    actualBelongClassLabel = classLabel - 1
                    break
                else:
                    pass # don't need to do anything
            
            # sub_step2: get the # of pieces for this class label
            # key: term in string format
            # value format1 (used currently)
            # value: a list of tuples which have the following format (pieceNum,numOfPostingsInThisPiece)
            # value format2 (NOT used currently)
            # value: a list of integers representing the numOfPostingsInThisPiece in each piece
            currentTermPiecesInfoList = []
            
            # The partitions will ALL based on the pre-set parameter: NUM_OF_PIECES_NEEDED_TO_PARTITION_FOR_THIS_CLASS
            # print "currentTermLengthOfList:",currentTermLengthOfList
            NUM_OF_PIECES_NEEDED_TO_PARTITION_FOR_THIS_CLASS = classLabelWithTheirNumOfPiecesNeededDict[actualBelongClassLabel] 
            currentPieceNumber = 0
            if NUM_OF_PIECES_NEEDED_TO_PARTITION_FOR_THIS_CLASS != 1:
                patitionFactor = 1/2
                patitionStep = 1/2
                numOfPostingsInCurrentPiece = int(currentTermLengthOfList * patitionFactor)
                numOfPostingsLeft = currentTermLengthOfList - numOfPostingsInCurrentPiece
                # print "piece",str(currentPieceNumber),numOfPostingsInCurrentPiece,numOfPostingsLeft
                currentTermPiecesInfoList.append( (currentPieceNumber,numOfPostingsInCurrentPiece) )
                while currentPieceNumber < NUM_OF_PIECES_NEEDED_TO_PARTITION_FOR_THIS_CLASS-2:
                    patitionFactor = patitionFactor * patitionStep
                    numOfPostingsInCurrentPiece = int( currentTermLengthOfList * patitionFactor)
                    numOfPostingsLeft = numOfPostingsLeft - numOfPostingsInCurrentPiece
                    currentPieceNumber += 1
                    # print "piece",str(currentPieceNumber),numOfPostingsInCurrentPiece,numOfPostingsLeft
                    currentTermPiecesInfoList.append( (currentPieceNumber,numOfPostingsInCurrentPiece))
                currentPieceNumber += 1
                # print "piece",str(currentPieceNumber),numOfPostingsLeft,str(0)
                currentTermPiecesInfoList.append( (currentPieceNumber,numOfPostingsLeft) )
            else:
                # print "piece",str(0),str(currentTermLengthOfList),str(0)
                currentTermPiecesInfoList.append( (currentPieceNumber,currentTermLengthOfList) )
        
            if len(currentTermPiecesInfoList) != NUM_OF_PIECES_NEEDED_TO_PARTITION_FOR_THIS_CLASS:
                print "len(currentTermPiecesInfoList):",len(currentTermPiecesInfoList)
                print "NUM_OF_PIECES_NEEDED_TO_PARTITION_FOR_THIS_CLASS:",NUM_OF_PIECES_NEEDED_TO_PARTITION_FOR_THIS_CLASS
                print "currentTerm:",currentTerm
                print "currentTermLengthOfList:",currentTermLengthOfList
                exit(1)
            
            outputLine = currentLine.strip() + " " + str(actualBelongClassLabel) + " " + str(NUM_OF_PIECES_NEEDED_TO_PARTITION_FOR_THIS_CLASS) + " "
            for pieceInfoTuple in currentTermPiecesInfoList:
                (currentPieceNumber,numOfPostingsLeft) = pieceInfoTuple
                outputLine += str(currentPieceNumber) + " " + str(numOfPostingsLeft) + " "
            outputLine =  outputLine.strip() + "\n"
            outputFileHandler.write(outputLine)
            
            currentLine = inputFileHandler.readline()
            lineCounter += 1
            
            if lineCounter % 1000000 == 0:
                print "lineCounter:",lineCounter,"Processed."
            
        else:
            outputLine = currentLine.strip() + " " + "-1" + " " + "0" + "\n"
            outputFileHandler.write(outputLine)
            currentLine = inputFileHandler.readline()
    outputFileHandler.close()
    inputFileHandler.close()
    print "inputFileName:",inputFileName
    print "inputFileName2:",inputFileName2
    print "outputFileName:",outputFileName
    
    # the input and output files:
    # inputFileName: /data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/classLabelWithClassLowerBoundANDNumOfPiecesNeeded
    # inputFileName2: /data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection.txt
    # outputFileName: /data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithLengthOfListANDClassLabelANDNumOfPostingsInEachPieces20130819


def step2_makeClassLabelWithPieceInfoComplete():
    # fix the classLabelWithPiecesInfoIMCOMPLETED file 2013/08/19 night
    
    outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/classLabelWithPiecesInfo_COMPLETED"
    outputFileHandler = open(outputFileName,"w")
    
    # key: classLabel in int format
    # value: num of pieces in int forat
    classLabelWithTheirNumOfPiecesNeededDict = {}
    inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/classLabelWithClassLowerBoundANDNumOfPiecesNeeded"
    inputFileHandler = open(inputFileName1,"r")
    for currentLine in inputFileHandler.readlines():
        # input file format: 4 columns:
        # column0: class label
        # column1: class lower bound
        # column2: # of query terms
        # column3: # of pieces needed for this class
        
        # for DEBUG
        # print "currentLine",currentLine,
        currentLineElements = currentLine.strip().split("\t")
        # for DEBUG
        # print "currentLineElements:",currentLineElements
        classLabel = int(currentLineElements[0])
        # ignore currentLineElements[1]
        # ignore currentLineElements[1]
        numOfPiecesNeededForThisClass = int(currentLineElements[3])
        if classLabel not in classLabelWithTheirNumOfPiecesNeededDict:
            classLabelWithTheirNumOfPiecesNeededDict[classLabel] = numOfPiecesNeededForThisClass
    
    print "len(classLabelWithTheirNumOfPiecesNeededDict):",len(classLabelWithTheirNumOfPiecesNeededDict)
    inputFileHandler.close()
        
    inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/classLabelWithPiecesInfoIMCOMPLETED"
    inputFileHandler = open(inputFileName2,"r")
    for index,currentLine in enumerate( inputFileHandler.readlines() ):
        currentLineElements = currentLine.strip().split("\t")
        if len(currentLineElements) != classLabelWithTheirNumOfPiecesNeededDict[index]:
            print "currentLine:",currentLine,
            print "len(currentLineElements):",len(currentLineElements)
            print "index:",index
            print "classLabelWithTheirNumOfPiecesNeededDict[index]:",classLabelWithTheirNumOfPiecesNeededDict[index]
            exit(1)
        outputLine = str(index) + "\t" + str(classLabelWithTheirNumOfPiecesNeededDict[index]) + "\t" + currentLine.strip() + "\n"
        outputFileHandler.write(outputLine)
    
    inputFileHandler.close()
    outputFileHandler.close()
    
    print "inputFileName1:",inputFileName1
    print "inputFileName2:",inputFileName2
    print "outputFileName:",outputFileName
    
    # the input and output files:
    # inputFileName: /data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/classLabelWithClassLowerBoundANDNumOfPiecesNeeded
    # inputFileName2: /data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/classLabelWithPiecesInfoIMCOMPLETED
    # outputFileName: /data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/classLabelWithPiecesInfo_COMPLETED

def step3_produceCuttingThreshold():
    # index value = 0, 0.01
    # index value = 1, 0.05
    # index value = 2, 0.1
    # index value = 3, 0.2
    # index value = 4, 0.3
    # index value = 5, 0.4
    # index value = 6, 0.5
    # index value = 7, 0.6
    # index value = 8, 0.7
    # index value = 9, 0.8
    # index value = 10, 0.9
    
    # in debug, apply to about the head 1M lines
    TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX = 43506399
    # in production, apply to the WHOLE lexicon
    # TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX = 6451948010
    
    # index = 0
    # TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_1Percent_kept: 64519480
    TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_1Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.01 )
    # index = 1
    # TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_5Percent_kept: 322597400
    TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_5Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.05 )
    # index = 2
    # TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_10Percent_kept: 645194801
    TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_10Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.1 )
    # index = 3
    # TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_20Percent_kept: 1290389602
    TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_20Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.2 )
    # index = 4
    # TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_30Percent_kept: 1935584403
    TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_30Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.3 )
    # index = 5
    # TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_40Percent_kept: 2580779204
    TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_40Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.4 )
    # index = 6
    # TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_50Percent_kept: 3225974005
    TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_50Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.5 )
    # index = 7
    # TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_60Percent_kept: 3871168806
    TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_60Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.6 )
    # index = 8
    # TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_70Percent_kept 4516363607
    TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_70Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.7 )
    # Note:
    # For the query terms which have at least seen once. The total number of postings is: 5009420937
    # index = 9
    # TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_80Percent_kept: 5161558408
    TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_80Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.8 )
    # index = 10
    # TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_90Percent_kept: 5806753209
    TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_90Percent_kept = int( TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX * 0.9 )
    
    
    print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX
    print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_90Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_90Percent_kept
    # print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_80Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_80Percent_kept
    # print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_70Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_70Percent_kept
    # print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_60Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_60Percent_kept
    # print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_50Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_50Percent_kept
    # print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_40Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_40Percent_kept
    # print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_30Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_30Percent_kept
    # print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_20Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_20Percent_kept
    # print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_10Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_10Percent_kept
    # print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_5Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_5Percent_kept
    # print "TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_1Percent_kept:",TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_1Percent_kept 
     
    
    LIST_FOR_STORING_THRESHOLD_NUMBERS = []
    LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_90Percent_kept)
    # LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_80Percent_kept)
    # LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_70Percent_kept)
    # LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_60Percent_kept)
    # LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_50Percent_kept)
    # LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_40Percent_kept)
    # LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_30Percent_kept)
    # LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_20Percent_kept)
    # LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_10Percent_kept)
    # LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_5Percent_kept)
    # LIST_FOR_STORING_THRESHOLD_NUMBERS.append(TOTAL_NUM_OF_POSTINGS_IN_THE_INVERTED_INDEX_1Percent_kept)
    
    # for debug ONLY
    print "len(LIST_FOR_STORING_THRESHOLD_NUMBERS):",len(LIST_FOR_STORING_THRESHOLD_NUMBERS)
    
    # # of terms in the lexicon: 37728619 (37M)
    # # of terms in the table which has freq < 20: 37726113
    # # of terms in the table which are shown in the query log and has freq >= 20 (so called popular query terms): 2506
    # # of terms in the table which appear at least once and has freq < 20: 32752
    # # of terms in the table which appear at least once: 35258 
    # # of query terms in the 85K queries: 35627 = 35258(has been recorded in the table) + 369(out of the lexicon and we will NOT care)
    # # of query terms which are out of the lexicon: 432 - 369 = 63 (Is it? DONE, almost, I compute the value of 58, missing 5 terms :) )
    
    # key: term in string format
    # value: the tuple with following format (goldStandardRealProbability,1D,2D,goodTuring)
    queryTermsWithEstimatedProbabilitiesDict = {}
    inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom100KWithTheirTrueProbablityAndOurOwnModelPredictedProbablity1D_2D_GoodTuringProbabilityAdded20130820"
    inputFileHandler = open(inputFileName1,"r")
    
    # skip the headline e.g. queryTerm goldStandardRealProbability 1D 2D goodTuring
    inputFileHandler.readline()
    
    for currentLine in inputFileHandler.readlines():
        lineElements = currentLine.strip().split(" ")
        currentTerm = lineElements[0]
        
        currentTermGoldStandardRealProbability = float(lineElements[1])
        currentTerm1DProbability = float(lineElements[2])
        currentTerm2DProbability = float(lineElements[3])
        currentTermGoodTuringProbability = float(lineElements[4])
        probabilitiesTuple = (currentTermGoldStandardRealProbability,currentTerm1DProbability,currentTerm2DProbability,currentTermGoodTuringProbability)
        
        if currentTerm not in queryTermsWithEstimatedProbabilitiesDict:
            queryTermsWithEstimatedProbabilitiesDict[currentTerm] = (currentTermGoldStandardRealProbability,currentTerm1DProbability,currentTerm2DProbability,currentTermGoodTuringProbability)
    print "len(queryTermsWithEstimatedProbabilitiesDict):",len(queryTermsWithEstimatedProbabilitiesDict)
    inputFileHandler.close()
    
    # key: classLabel in int format
    # value: a dict containing pieces info
        # key: the pieceID
        # value: the probability that the next posting will hit this piece area
    classLabelWithPiecesMetaInfoDict = {}
    inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/classLabelWithPiecesInfo_COMPLETED"
    inputFileHandler = open(inputFileName2,"r")
    for line in inputFileHandler.readlines():
        lineElements = line.strip().split("\t")
        # print "lineElements:",lineElements
        currentClassLabel = int(lineElements[0])
        if currentClassLabel not in classLabelWithPiecesMetaInfoDict:
            classLabelWithPiecesMetaInfoDict[currentClassLabel] = {}
        else:
            print "mark1,error"
            exit(1)
        numOfPiecesInCurrentClass = int(lineElements[1])
        assert numOfPiecesInCurrentClass == len(lineElements[2:])
        for pieceIndex,probabilityThatTheNextPostingWillHitThisPieceAreaInStringFormat in enumerate(lineElements[2:]):
            probabilityThatTheNextPostingWillHitThisPieceAreaInFloatFormat = float(probabilityThatTheNextPostingWillHitThisPieceAreaInStringFormat)
            if pieceIndex not in classLabelWithPiecesMetaInfoDict[currentClassLabel]:
                classLabelWithPiecesMetaInfoDict[currentClassLabel][pieceIndex] = probabilityThatTheNextPostingWillHitThisPieceAreaInFloatFormat
            else:
                print "mark2,error"
                exit(1)
    print "classLabelWithPiecesMetaInfoDict[0]:",classLabelWithPiecesMetaInfoDict[0]
    print "classLabelWithPiecesMetaInfoDict[4]:",classLabelWithPiecesMetaInfoDict[4]
    inputFileHandler.close()
    
    
    # actual doing overall sorting thing here
    # the list will contain tuple with the following tuple format (term(string),pieceNum(int),realSizeOfPiece(int),finalCompareValue(float))
    globalTermWithFinalComparedValueList = []
    inputFileName3 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithLengthOfListANDClassLabelANDNumOfPostingsInEachPieces20130819"
    inputFileHandler = open(inputFileName3,"r")
    lineCounter = 0 #init
    
    currentLine = inputFileHandler.readline()
    lineCounter += 1
    
    while currentLine:
        currentLineElements = currentLine.strip().split(" ")
        currentTerm = currentLineElements[0]
        currentTermLengthOfInvertedList = int(currentLineElements[1])
        currentTermClassLabel = int(currentLineElements[2])
        currentTermNumOfPieces = int(currentLineElements[3])
        
        '''
        # for DEBUG
        print "currentLineElements:",currentLineElements
        print "len(currentLineElements):",len(currentLineElements)        
        print "currentTerm:",currentTerm
        print "currentTermLengthOfInvertedList:",currentTermLengthOfInvertedList
        print "currentTermClassLabel:",currentTermClassLabel
        print "currentTermNumOfPieces:",currentTermNumOfPieces
        '''
        
        # Updated by Wei 2013/08/20 afternoon. NOT use the currentTermGoldStandardRealProbability,currentTerm1DProbability and currentTerm2DProbability currently
        # Cause this is NOT so KAO PU
        # currentTermGoldStandardRealProbability = 0.0
        # currentTerm1DProbability = 0.0
        # currentTerm2DProbability = 0.0
        currentTermGoodTuringProbability = 0.0
        
        if currentTerm in queryTermsWithEstimatedProbabilitiesDict:
            # the currentTerm is in the 100K queries and we can extract the probabilities from the tuple info
            (_,_,_,currentTermGoodTuringProbability) = queryTermsWithEstimatedProbabilitiesDict[currentTerm]
        else:
            # the currentTerm is NOT in the 100K queries.
            # the probability is based on the file called: 
            # /data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/freqOfFreqInQueries_0_1_95K_95%_good_turing_output
            currentTermGoodTuringProbability = 1.37838705705e-09


        if currentTermNumOfPieces != int(len(currentLineElements[4:])/2) :
            print "mark3"
            print "currentTermNumOfPieces:",currentTermNumOfPieces
            print "len(currentLineElements[3:])/2:",len(currentLineElements[4:])/2
            exit(1)
        
        baseIndex = 4
        for i in range(0,(len(currentLineElements[4:])),2):
            # do the computation for each pieces of the term
            pieceNum = int(currentLineElements[4+i])
            realSizeOfPiece = int(currentLineElements[4+i+1])
            expNumOfResultsInCurrentPiece = classLabelWithPiecesMetaInfoDict[currentTermClassLabel][pieceNum]
            finalCompareValue = currentTermGoodTuringProbability * 10 * expNumOfResultsInCurrentPiece / realSizeOfPiece
            
            '''
            # for DEBUG
            print "i:",i
            print "pieceNum:",pieceNum
            print "realSizeOfPiece:",realSizeOfPiece
            print "expNumOfResultsInCurrentPiece:",expNumOfResultsInCurrentPiece
            print "currentTermGoodTuringProbability:",currentTermGoodTuringProbability
            print "finalCompareValue:",finalCompareValue
            print
            '''
            
            currentTermPieceTuple = (currentTerm,pieceNum,realSizeOfPiece,finalCompareValue)
            globalTermWithFinalComparedValueList.append( currentTermPieceTuple )
        
        # for debug
        # print "len(globalTermWithFinalComparedValueList):",len(globalTermWithFinalComparedValueList)
        # print "globalTermWithFinalComparedValueList:",globalTermWithFinalComparedValueList
        
        
        
 
        
        currentLine = inputFileHandler.readline()
        lineCounter += 1
        
        
        # for temp test
        if lineCounter % 1000000 == 0:
            print lineCounter,"lines processed."
            # comment the following line: in production mode
            # NOT comment the following line: in debug mode
            break
            
    
    print "len(globalTermWithFinalComparedValueList):",len(globalTermWithFinalComparedValueList)
    print "sort begins..."
    # from smallest(at the beginning of the list) to largest(at the end of the list)
    globalTermWithFinalComparedValueList.sort(cmp=None, key=itemgetter(2), reverse=True)
    print "globalTermWithFinalComparedValueList[0:5]:",globalTermWithFinalComparedValueList[0:5]
    print "globalTermWithFinalComparedValueList[-5:-1]:",globalTermWithFinalComparedValueList[-5:-1]
    print "sort ends."
    totalNumOfPosting = 0
    for currentTermPieceTuple in globalTermWithFinalComparedValueList:
        (currentTerm,pieceNum,realSizeOfPiece,finalCompareValue) = currentTermPieceTuple
        totalNumOfPosting += realSizeOfPiece
    print "totalNumOfPosting:",totalNumOfPosting
    
    # The getting rid of pieces process
    outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/piecesWhichHaveBeenGettingRidOf"
    outputFileHandler = open(outputFileName,"w")
    
    numOfPostingsLeft = totalNumOfPosting
    for currentNumOfPostingsThreshold in LIST_FOR_STORING_THRESHOLD_NUMBERS:
        # for DEBUG purpose
        # print "currentNumOfPostingsThreshold:",currentNumOfPostingsThreshold
        outputLine = "currentNumOfPostingsLeftThreshold:" + " " + str(currentNumOfPostingsThreshold) + " " + "\n"
        outputFileHandler.write(outputLine)
        while currentNumOfPostingsThreshold < numOfPostingsLeft:
            # for DEBUG purpose
            # print "numOfPostingsLeft:",numOfPostingsLeft
            # print "len(globalTermWithFinalComparedValueList):",len(globalTermWithFinalComparedValueList)
            # print
            currentPoppedTuple = globalTermWithFinalComparedValueList.pop(0)
            (currentTerm,pieceNum,realSizeOfPiece,finalCompareValue) = currentPoppedTuple
            outputLine = str(currentTerm) + " " + str(pieceNum) + " " + str(realSizeOfPiece) + " " + str(finalCompareValue) + "\n"
            outputFileHandler.write(outputLine)
            numOfPostingsLeft -= realSizeOfPiece
        
        print "Overall Statistics:"
        print "len(globalTermWithFinalComparedValueList):",len(globalTermWithFinalComparedValueList)
        print "currentNumOfPostingsThreshold:",currentNumOfPostingsThreshold
        print "percentageOfPostingsIdeallyKept:",currentNumOfPostingsThreshold/totalNumOfPosting
        print "numOfPostingsLeft:",numOfPostingsLeft
        print "percentageOfPostingsActuallyKept:",numOfPostingsLeft/totalNumOfPosting
        print 
        
    print "inputFileName1:",inputFileName1
    print "inputFileName2:",inputFileName2
    print "inputFileName3:",inputFileName3
    print "outputFileName:",outputFileName
    
    inputFileHandler.close()
    outputFileHandler.close()

def middleStep_analyzeMyFaultInDoingThisPruning():
    # key: query term in string format
    # value: length of list in int format
    queryTermsWithTheirLengthOfList = {}
    inputFileName0 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/100KQueryTermsWithTermFreqInCollection.txt"
    inputFileHandler = open(inputFileName0,"r")
    for line in inputFileHandler.readlines():
        lineElements = line.strip().split(" ")
        currentTerm = lineElements[0]
        currentTermLengthOfList = int(lineElements[1])
        if currentTerm not in queryTermsWithTheirLengthOfList:
            queryTermsWithTheirLengthOfList[currentTerm] = currentTermLengthOfList
    print "len(queryTermsWithTheirLengthOfList):",len(queryTermsWithTheirLengthOfList)
    print "queryTermsWithTheirLengthOfList['deficits']:",queryTermsWithTheirLengthOfList['deficits']
    inputFileHandler.close()
    
    
    # key: term in string format
    # value: the tuple with following format (goldStandardRealProbability,1D,2D,goodTuring)
    queryTermsWithEstimatedProbabilitiesDict = {}
    inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom100KWithTheirTrueProbablityAndOurOwnModelPredictedProbablity1D_2D_GoodTuringProbabilityAdded20130820"
    inputFileHandler = open(inputFileName1,"r")
    
    # skip the headline e.g. queryTerm goldStandardRealProbability 1D 2D goodTuring
    inputFileHandler.readline()
    
    for currentLine in inputFileHandler.readlines():
        lineElements = currentLine.strip().split(" ")
        currentTerm = lineElements[0]
        
        currentTermGoldStandardRealProbability = float(lineElements[1])
        currentTerm1DProbability = float(lineElements[2])
        currentTerm2DProbability = float(lineElements[3])
        currentTermGoodTuringProbability = float(lineElements[4])
        probabilitiesTuple = (currentTermGoldStandardRealProbability,currentTerm1DProbability,currentTerm2DProbability,currentTermGoodTuringProbability)
        
        if currentTerm not in queryTermsWithEstimatedProbabilitiesDict:
            queryTermsWithEstimatedProbabilitiesDict[currentTerm] = (currentTermGoldStandardRealProbability,currentTerm1DProbability,currentTerm2DProbability,currentTermGoodTuringProbability)
    print "len(queryTermsWithEstimatedProbabilitiesDict):",len(queryTermsWithEstimatedProbabilitiesDict)
    print "queryTermsWithEstimatedProbabilitiesDict['deficits']:",queryTermsWithEstimatedProbabilitiesDict['deficits']
    inputFileHandler.close()
    print "test begins..."
    (currentTermGoldStandardRealProbability,currentTerm1DProbability,currentTerm2DProbability,currentTermGoodTuringProbability) = queryTermsWithEstimatedProbabilitiesDict['deficits']
    print "currentTermGoodTuringProbability/queryTermsWithTheirLengthOfList['deficits']:",currentTermGoodTuringProbability/queryTermsWithTheirLengthOfList['deficits']
    print "test ends."
    exit(1)


def step4_runMainExperiments():
    
    # key: term in string in string format
    # value: another dict
        # key: pieceNum in int format
        # value: no use currently
    termWithPiecesBeenDeletedDict = {}
    
    inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/piecesWhichHaveBeenGettingRidOf"
    inputFileHanlder = open(inputFileName,"r")
    percentageCounter = 0
    currentLine = inputFileHanlder.readline()
    print "currentLine:",currentLine
    if currentLine.startswith("currentNumOfPostingsLeftThreshold:"):
        currentLineElements = currentLine.strip().split(" ")
        percentageOfPostingsActuallyKept = int(currentLineElements[1])
        print "Percentage Hit."
        print "percentageOfPostingsActuallyKept:",percentageOfPostingsActuallyKept
        currentLine = inputFileHanlder.readline()
        percentageCounter += 1
        if percentageCounter == 2:
            exit(1)
    
    while currentLine:
        
        if currentLine.startswith("currentNumOfPostingsLeftThreshold:"):
            currentLineElements = currentLine.strip().split(" ")
            percentageOfPostingsActuallyKept = int(currentLineElements[1])
            print "Percentage Hit."
            print "percentageOfPostingsActuallyKept:",percentageOfPostingsActuallyKept
            percentageCounter += 1
            if percentageCounter == 2:
                print "len(termWithPiecesBeenDeletedDict):",len(termWithPiecesBeenDeletedDict)
                totalNumOfPiecesCut = 0
                for term in termWithPiecesBeenDeletedDict:
                    totalNumOfPiecesCut += len(termWithPiecesBeenDeletedDict[term])
                print "totalNumOfPiecesCut:",totalNumOfPiecesCut
                print "termWithPiecesBeenDeletedDict['gov']:",termWithPiecesBeenDeletedDict['gov']
                # termWithPiecesBeenDeletedDict['gov']: {0: 0, 1: 0, 2: 0}
                
                # key: qid in int format
                # value: num of results in int format
                qidWithNumOfValidResultsDict = {}
                temInputFileName1 = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/tail5KWithNumOfResultsEvaluated"
                tempInputFileHandler = open(temInputFileName1,"r")
                for line in tempInputFileHandler.readlines():
                    lineElements = line.strip().split(" ")
                    qidInIntFormat = int(lineElements[0])
                    numOfResultForCurrentQID = int(lineElements[1])
                    if qidInIntFormat not in qidWithNumOfValidResultsDict:
                        qidWithNumOfValidResultsDict[qidInIntFormat] = numOfResultForCurrentQID
                    else:
                        print "system error"
                        exit(1)
                tempInputFileHandler.close()
                print "len(qidWithNumOfValidResultsDict):",len(qidWithNumOfValidResultsDict)
                
                totalNumOfTOP10Results = 0
                for qid in qidWithNumOfValidResultsDict:
                    totalNumOfTOP10Results += qidWithNumOfValidResultsDict[qid]
                print "totalNumOfTOP10Results(before pruned):",totalNumOfTOP10Results
                
                # key: oneResultKey with str(qidInIntFormat) + "_" + str(docIDInIntFormat) in string format
                # value: NO USE currently
                deletedResultDict = {}
                temInputFileName2 = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/tail5KResultsWithRelatedPieceNum"
                tempInputFileHandler = open(temInputFileName2,"r")
                for line in tempInputFileHandler.readlines():
                    lineElements = line.strip().split(" ")
                    qidInIntFormat = int(lineElements[0])
                    docIDInIntFormat = int(lineElements[1])
                    oneResultKey = str(qidInIntFormat) + "_" + str(docIDInIntFormat)
                    currentTerm = lineElements[2]
                    currentTermPieceInIntFormat = int(lineElements[4])
                    if currentTerm in termWithPiecesBeenDeletedDict and currentTermPieceInIntFormat in termWithPiecesBeenDeletedDict[currentTerm] and oneResultKey not in deletedResultDict:
                        deletedResultDict[oneResultKey] = 0 # prevent double delete of this result
                        qidWithNumOfValidResultsDict[qidInIntFormat] -= 1
                
                totalNumOfTOP10Results = 0
                for qid in qidWithNumOfValidResultsDict:
                    totalNumOfTOP10Results += qidWithNumOfValidResultsDict[qid]
                print "totalNumOfTOP10Results(after pruned):",totalNumOfTOP10Results
                print "len(deletedResultDict):",len(deletedResultDict)
                tempInputFileHandler.close()
                exit(1)
        else:
            currentLineElements = currentLine.strip().split(" ")
            currentTerm = currentLineElements[0]
            currentPieceNum = int(currentLineElements[1])
            
            if currentTerm not in termWithPiecesBeenDeletedDict:
                termWithPiecesBeenDeletedDict[currentTerm] = {}
            if currentPieceNum not in termWithPiecesBeenDeletedDict[currentTerm]:
                termWithPiecesBeenDeletedDict[currentTerm][currentPieceNum] = 0 # the value has no meaning currently
            
            termWithPiecesBeenDeletedDict[currentTerm][currentPieceNum] = 0
        currentLine = inputFileHanlder.readline()
    inputFileHanlder.close()
    

print "Program Begins..."
# step1_producePiecesInfoGivenSetOfTerms()
# step2_makeClassLabelWithPieceInfoComplete()
step3_produceCuttingThreshold()
# middleStep_analyzeMyFaultInDoingThisPruning()
# tonight, need to debug this. Updated on 2013/08/21 night by Wei
# step4_runMainExperiments()
print "Program Ends."







