from __future__ import division
from operator import itemgetter, attrgetter
import random
import os
import sys
import math

# step1
def selectRandomPostings():
    print "Function Begins..."
    print "The purpose of this function is to select some random postings with their universal postingIDs"
    
    outputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/randomlySelectedWithUniversalPostingIDs_20130924Night"
    outputFileHanlder = open(outputFileName,"w")
    
    postingIDList = []
    postingIDDict = {}
    
    # in debug
    # totalNumOfRandomlySampledPostings = 10
    # in production
    # updated by Wei 2013/08/28
    # Let's do 100K for example
    totalNumOfRandomlySampledPostings = 100000
    totalNumOfPostingsInIndex = 6451948010
    # assume that the posting ID starts from 0 to 6451948009 in the total of 6,451,948,010 (6.5B)
    while len(postingIDList) != totalNumOfRandomlySampledPostings:
        # Return a random integer N such that a <= N <= b
        postingID = random.randint(0, totalNumOfPostingsInIndex-1)
        if postingID not in postingIDDict:
            postingIDList.append(postingID)
            postingIDDict[postingID] = 1
        print len(postingIDList)
        
    postingIDList.sort(cmp=None, key=None, reverse=False)
    for postingID in postingIDList:
        # print postingIDList
        outputFileHanlder.write(str(postingID) + "\n")
    
    outputFileHanlder.close()
    print "outputFileName:",outputFileName
    print "Function Ends."


# ONE TIME ONLY computation function
def generatePrefixSumAuxFileOfEachTermInGov2Index():
    print "Function Begins..."
    inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection.txt"
    inputFileHandler = open(inputFileName,"r")
    
    outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollectionANDPostingRangePrefixSum_20130806.txt"
    outputFileHanlder = open(outputFileName,"w")
    
    currentLine = inputFileHandler.readline()
    currentTermBeginningPosition = 0
    while currentLine:
        currentLineElements = currentLine.strip().split(" ")
        currentTermNumOfPostingsInTheList = int(currentLineElements[1])
        outputFileHanlder.write(currentLine.strip() + " " + str(currentTermBeginningPosition) + "\n")
        currentTermBeginningPosition += currentTermNumOfPostingsInTheList
        currentLine = inputFileHandler.readline()
    
    
    print "inputFileName:",inputFileName
    print "outputFileName:",outputFileName
    inputFileHandler.close()
    outputFileHanlder.close()
    print "Function Ends."

#step3  
def addLocalPostingIDsForIRToolkitToUse():
    print "Function Begins..."
    # option2 (current)
    outputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/randomlySelectedWithUniversalAndLocalPostingIDs_20130924Night"
    # option1 (weeks ago)
    # outputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/randomlySampledPostingIDList_with_local_index_for_each_term_added"
    # This aux file can still be USED.
    auxInputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollectionANDPostingRangePrefixSum_20130806.txt"
    # option2 (current)
    auxInputFileName2 = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/randomlySelectedWithUniversalPostingIDs_20130924Night"
    # option1 (weeks ago)
    # auxInputFileName2 = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/randomlySampledPostingIDList"
    
    auxInputFileHandler1 = open(auxInputFileName1,"r")
    auxInputFileHandler2 = open(auxInputFileName2,"r")
    outputFileHanlder = open(outputFileName,"w")
    
    # init1
    currentPostingIndexIDLine = auxInputFileHandler2.readline()
    currentGlobalPostingIndexID = int( currentPostingIndexIDLine.strip() )
    
    # init2
    currentTermLine = auxInputFileHandler1.readline()
    currentTermLineElements = currentTermLine.strip().split(" ")
    currentTerm = currentTermLineElements[0]
    currentTermBaseIndex = int(currentTermLineElements[2])
    
    nextTermLine = auxInputFileHandler1.readline()
    nextTermLineElements = nextTermLine.strip().split(" ")
    nextTerm = nextTermLineElements[0]
    nextTermBaseIndex = int(nextTermLineElements[2])    
    while currentPostingIndexIDLine:
        print "currentGlobalPostingIndexID:",currentGlobalPostingIndexID

        
        while not (currentGlobalPostingIndexID >= currentTermBaseIndex and currentGlobalPostingIndexID < nextTermBaseIndex):
            # print "[",currentTermBaseIndex,nextTermBaseIndex,")"
            currentTerm = nextTerm
            currentTermBaseIndex = nextTermBaseIndex
            
            nextTermLine = auxInputFileHandler1.readline()
            nextTermLineElements = nextTermLine.strip().split(" ")
            nextTerm = nextTermLineElements[0]
            nextTermBaseIndex = int(nextTermLineElements[2])
        
        # this currentGlobalPostingIndexID belongs to the current term
        currentLocalPostingIndexID = currentGlobalPostingIndexID - currentTermBaseIndex
        print currentTerm,currentLocalPostingIndexID,currentGlobalPostingIndexID
        outputLine = currentTerm + " " + str(currentLocalPostingIndexID) + " " + str(currentGlobalPostingIndexID) + "\n"
        outputFileHanlder.write(outputLine)
        currentPostingIndexIDLine = auxInputFileHandler2.readline()
        if currentPostingIndexIDLine.strip() != "":
            currentGlobalPostingIndexID = int( currentPostingIndexIDLine.strip() )
    
    auxInputFileHandler1.close()
    auxInputFileHandler2.close()
    outputFileHanlder.close()
    print "auxInputFileName1:",auxInputFileName1
    print "auxInputFileName2:",auxInputFileName2
    print "outputFileName:",outputFileName
    print "Function Ends."

# step4 
def computePercentageThresholdForGov2Index():
    print "Function Begins..."
    partialBM25ScoreList = []
    firstFactorProbabilityList = []
    secondFactorProbabilityList = []
    thirdFactorProbabilityList = []
    second_AND_thirdFactorProbabilityList = []
    first_AND_thirdFactorProbabilityList = []
    first_AND_second_AND_thirdFactorProbabilityList = []
    
    # The following file has been updated correctly by Wei on 2013/08/08 night at school
    # After I directly getting the output file from the toolkit (The important point is that with set of probabilities added)
    inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/randomlySampledPostingIDList20130828_with_set_of_probability_added_DEBUG"
    # inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/randomlySampledPostingIDList_with_set_of_probability_added"
    inputFileHandler = open(inputFileName,"r")
    for line in inputFileHandler.readlines():
        lineElements = line.strip().split(" ")
        # ignore lineElements[0]
        # ignore lineElements[1]
        # ignore lineElements[2]
        currentPartialBM25Score = float( lineElements[3] )
        currentFirstFactorProbability = float( lineElements[4] )
        currentSecondFactorProbability = float( lineElements[5] )
        currentThirdFactorProbability = float( lineElements[6] )
        currentSecond_AND_thirdFactorProbability = float( lineElements[7] )
        currentFirst_AND_thirdFactorProbability = float( lineElements[8] )
        currentFirst_AND_second_AND_thirdFactorProbability = float( lineElements[9] )
        
        partialBM25ScoreList.append(currentPartialBM25Score)
        firstFactorProbabilityList.append(currentFirstFactorProbability)
        secondFactorProbabilityList.append(currentSecondFactorProbability)
        thirdFactorProbabilityList.append(currentThirdFactorProbability)
        second_AND_thirdFactorProbabilityList.append(currentSecond_AND_thirdFactorProbability)
        first_AND_thirdFactorProbabilityList.append(currentFirst_AND_thirdFactorProbability)
        first_AND_second_AND_thirdFactorProbabilityList.append(currentFirst_AND_second_AND_thirdFactorProbability) 
              
    inputFileHandler.close()
    
    partialBM25ScoreList.sort(cmp=None, key=None, reverse=True)
    firstFactorProbabilityList.sort(cmp=None, key=None, reverse=True)
    secondFactorProbabilityList.sort(cmp=None, key=None, reverse=True)
    thirdFactorProbabilityList.sort(cmp=None, key=None, reverse=True)
    second_AND_thirdFactorProbabilityList.sort(cmp=None, key=None, reverse=True)
    first_AND_thirdFactorProbabilityList.sort(cmp=None, key=None, reverse=True)
    first_AND_second_AND_thirdFactorProbabilityList.sort(cmp=None, key=None, reverse=True)
    
    print "len(partialBM25ScoreList):",len(partialBM25ScoreList)
    print "len(firstFactorProbabilityList):",len(firstFactorProbabilityList)
    print "len(secondFactorProbabilityList):",len(secondFactorProbabilityList)
    print "len(thirdFactorProbabilityList):",len(thirdFactorProbabilityList)
    print "len(second_AND_thirdFactorProbabilityList):",len(second_AND_thirdFactorProbabilityList)
    print "len(first_AND_thirdFactorProbabilityList):",len(first_AND_thirdFactorProbabilityList)
    print "len(first_AND_second_AND_thirdFactorProbabilityList):",len(first_AND_second_AND_thirdFactorProbabilityList)
    
    Top1PercentKeptLowerBoundIndex = int( len(partialBM25ScoreList) * 0.01 )
    Top5PercentKeptLowerBoundIndex = int( len(partialBM25ScoreList) * 0.05 )
    Top10PercentKeptLowerBoundIndex = int( len(partialBM25ScoreList) * 0.1 )
    Top20PercentKeptLowerBoundIndex = int( len(partialBM25ScoreList) * 0.2 )
    Top30PercentKeptLowerBoundIndex = int( len(partialBM25ScoreList) * 0.3 )
    Top40PercentKeptLowerBoundIndex = int( len(partialBM25ScoreList) * 0.4 ) 
    Top50PercentKeptLowerBoundIndex = int( len(partialBM25ScoreList) * 0.5 )
    Top60PercentKeptLowerBoundIndex = int( len(partialBM25ScoreList) * 0.6 )
    Top70PercentKeptLowerBoundIndex = int( len(partialBM25ScoreList) * 0.7 )
    Top80PercentKeptLowerBoundIndex = int( len(partialBM25ScoreList) * 0.8 )
    Top90PercentKeptLowerBoundIndex = int( len(partialBM25ScoreList) * 0.9 )
    
    print
    print "# partialBM25"
    print "# uniformPruningThreshold percentageToKeep percentageToThrow"
    print "#","0.0","1.0","0.0"
    print "#",partialBM25ScoreList[Top90PercentKeptLowerBoundIndex],"0.9","0.1"
    print "#",partialBM25ScoreList[Top80PercentKeptLowerBoundIndex],"0.8","0.2"
    print "#",partialBM25ScoreList[Top70PercentKeptLowerBoundIndex],"0.7","0.3"
    print "#",partialBM25ScoreList[Top60PercentKeptLowerBoundIndex],"0.6","0.4"
    print "#",partialBM25ScoreList[Top50PercentKeptLowerBoundIndex],"0.5","0.5"
    print "#",partialBM25ScoreList[Top40PercentKeptLowerBoundIndex],"0.4","0.6"
    print "#",partialBM25ScoreList[Top30PercentKeptLowerBoundIndex],"0.3","0.7"
    print "#",partialBM25ScoreList[Top20PercentKeptLowerBoundIndex],"0.2","0.8"
    print "#",partialBM25ScoreList[Top10PercentKeptLowerBoundIndex],"0.1","0.9"
    print "#",partialBM25ScoreList[Top5PercentKeptLowerBoundIndex],"0.05","0.85"
    print "#",partialBM25ScoreList[Top1PercentKeptLowerBoundIndex],"0.01","0.99"
    
    
    print
    print "# 3"
    print "# uniformPruningThreshold percentageToKeep percentageToThrow"
    print "#","0.0","1.0","0.0"
    print "#",thirdFactorProbabilityList[Top90PercentKeptLowerBoundIndex],"0.9","0.1"
    print "#",thirdFactorProbabilityList[Top80PercentKeptLowerBoundIndex],"0.8","0.2"
    print "#",thirdFactorProbabilityList[Top70PercentKeptLowerBoundIndex],"0.7","0.3"
    print "#",thirdFactorProbabilityList[Top60PercentKeptLowerBoundIndex],"0.6","0.4"
    print "#",thirdFactorProbabilityList[Top50PercentKeptLowerBoundIndex],"0.5","0.5"
    print "#",thirdFactorProbabilityList[Top40PercentKeptLowerBoundIndex],"0.4","0.6"
    print "#",thirdFactorProbabilityList[Top30PercentKeptLowerBoundIndex],"0.3","0.7"
    print "#",thirdFactorProbabilityList[Top20PercentKeptLowerBoundIndex],"0.2","0.8"
    print "#",thirdFactorProbabilityList[Top10PercentKeptLowerBoundIndex],"0.1","0.9"
    print "#",thirdFactorProbabilityList[Top5PercentKeptLowerBoundIndex],"0.05","0.85"
    print "#",thirdFactorProbabilityList[Top1PercentKeptLowerBoundIndex],"0.01","0.99"
    

    print
    print "# 2_3"
    print "# uniformPruningThreshold percentageToKeep percentageToThrow"
    print "#","0.0","1.0","0.0"
    print "#",second_AND_thirdFactorProbabilityList[Top90PercentKeptLowerBoundIndex],"0.9","0.1"
    print "#",second_AND_thirdFactorProbabilityList[Top80PercentKeptLowerBoundIndex],"0.8","0.2"
    print "#",second_AND_thirdFactorProbabilityList[Top70PercentKeptLowerBoundIndex],"0.7","0.3"
    print "#",second_AND_thirdFactorProbabilityList[Top60PercentKeptLowerBoundIndex],"0.6","0.4"
    print "#",second_AND_thirdFactorProbabilityList[Top50PercentKeptLowerBoundIndex],"0.5","0.5"
    print "#",second_AND_thirdFactorProbabilityList[Top40PercentKeptLowerBoundIndex],"0.4","0.6"
    print "#",second_AND_thirdFactorProbabilityList[Top30PercentKeptLowerBoundIndex],"0.3","0.7"
    print "#",second_AND_thirdFactorProbabilityList[Top20PercentKeptLowerBoundIndex],"0.2","0.8"
    print "#",second_AND_thirdFactorProbabilityList[Top10PercentKeptLowerBoundIndex],"0.1","0.9"
    print "#",second_AND_thirdFactorProbabilityList[Top5PercentKeptLowerBoundIndex],"0.05","0.85"
    print "#",second_AND_thirdFactorProbabilityList[Top1PercentKeptLowerBoundIndex],"0.01","0.99"    
    
    
    print
    print "# 1_3"
    print "# uniformPruningThreshold percentageToKeep percentageToThrow"
    print "#","0.0","1.0","0.0"
    print "#",first_AND_thirdFactorProbabilityList[Top90PercentKeptLowerBoundIndex],"0.9","0.1"
    print "#",first_AND_thirdFactorProbabilityList[Top80PercentKeptLowerBoundIndex],"0.8","0.2"
    print "#",first_AND_thirdFactorProbabilityList[Top70PercentKeptLowerBoundIndex],"0.7","0.3"
    print "#",first_AND_thirdFactorProbabilityList[Top60PercentKeptLowerBoundIndex],"0.6","0.4"
    print "#",first_AND_thirdFactorProbabilityList[Top50PercentKeptLowerBoundIndex],"0.5","0.5"
    print "#",first_AND_thirdFactorProbabilityList[Top40PercentKeptLowerBoundIndex],"0.4","0.6"
    print "#",first_AND_thirdFactorProbabilityList[Top30PercentKeptLowerBoundIndex],"0.3","0.7"
    print "#",first_AND_thirdFactorProbabilityList[Top20PercentKeptLowerBoundIndex],"0.2","0.8"
    print "#",first_AND_thirdFactorProbabilityList[Top10PercentKeptLowerBoundIndex],"0.1","0.9"
    print "#",first_AND_thirdFactorProbabilityList[Top5PercentKeptLowerBoundIndex],"0.05","0.85"
    print "#",first_AND_thirdFactorProbabilityList[Top1PercentKeptLowerBoundIndex],"0.01","0.99"


    print
    print "# 1_2_3"
    print "# uniformPruningThreshold percentageToKeep percentageToThrow"
    print "#","0.0","1.0","0.0"
    print "#",first_AND_second_AND_thirdFactorProbabilityList[Top90PercentKeptLowerBoundIndex],"0.9","0.1"
    print "#",first_AND_second_AND_thirdFactorProbabilityList[Top80PercentKeptLowerBoundIndex],"0.8","0.2"
    print "#",first_AND_second_AND_thirdFactorProbabilityList[Top70PercentKeptLowerBoundIndex],"0.7","0.3"
    print "#",first_AND_second_AND_thirdFactorProbabilityList[Top60PercentKeptLowerBoundIndex],"0.6","0.4"
    print "#",first_AND_second_AND_thirdFactorProbabilityList[Top50PercentKeptLowerBoundIndex],"0.5","0.5"
    print "#",first_AND_second_AND_thirdFactorProbabilityList[Top40PercentKeptLowerBoundIndex],"0.4","0.6"
    print "#",first_AND_second_AND_thirdFactorProbabilityList[Top30PercentKeptLowerBoundIndex],"0.3","0.7"
    print "#",first_AND_second_AND_thirdFactorProbabilityList[Top20PercentKeptLowerBoundIndex],"0.2","0.8"
    print "#",first_AND_second_AND_thirdFactorProbabilityList[Top10PercentKeptLowerBoundIndex],"0.1","0.9"
    print "#",first_AND_second_AND_thirdFactorProbabilityList[Top5PercentKeptLowerBoundIndex],"0.05","0.85"
    print "#",first_AND_second_AND_thirdFactorProbabilityList[Top1PercentKeptLowerBoundIndex],"0.01","0.99" 
    
    print "Function Ends."

print "Program Begins..."
# step1:
# selectRandomPostings()
# output file
# /data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/randomlySelectedWithUniversalPostingIDs_20130924Night

# This is ONE TIME ONLY computation function
# generatePrefixSumAuxFileOfEachTermInGov2Index()
# output file
# /data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollectionANDPostingRangePrefixSum_20130806.txt

# step3:
addLocalPostingIDsForIRToolkitToUse()

# Updated by Wei 2013/08/30 afternoon
# step4:
# computePercentageThresholdForGov2Index()

print "Program Ends."    
    
