from __future__ import division
from operator import itemgetter, attrgetter
import random
import os
import sys
import math

# step1
def selectRandomPostings():
    # updated by Wei 2014/06/14
    print "Function Begins..."
    print "The purpose of this function is to select some random postings with their universal postingIDs"
    postingIDList = []
    postingIDDict = {}
    
    # in debug
    # totalNumOfRandomlySampledPostings = 10
    # in production
    # for gov2: total:6451948010 sample size:100K
    # for clueweb09B: total:17075485964 sample size:1M
    totalNumOfRandomlySampledPostings = 1000000
    totalNumOfPostingsInIndex = 17075485964
    # for gov2:        
    # outputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/randomlySelectedWithUniversalPostingIDs_20130924Night"
    # on vidaserver1
    outputFileName = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/uniformPruningMethods/clueweb09B/randomlySelectedWithUniversalPostingIDs_20140614"
    # for clueweb09B
    outputFileHanlder = open(outputFileName,"w")
    
    while len(postingIDList) != totalNumOfRandomlySampledPostings:
        # Return a random integer N such that a <= N <= b
        postingID = random.randint(0, totalNumOfPostingsInIndex-1)
        if postingID not in postingIDDict:
            postingIDList.append(postingID)
            postingIDDict[postingID] = 1
        if len(postingIDList) % 10000 == 0:
            print len(postingIDList),"postings gathered."
        
    postingIDList.sort(cmp=None, key=None, reverse=False)
    for index,postingID in enumerate(postingIDList):
        # print postingIDList
        outputFileHanlder.write(str(index) + " " + str(postingID) + "\n")
    
    outputFileHanlder.close()
    print "Overall:"
    print "outputFileName:",outputFileName
    print "Function Ends."


# ONE TIME ONLY computation function
def generatePrefixSumAuxFileOfEachTermInGov2Index():
    print "Function Begins..."
    # for gov2 
    #inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection.txt"
    #inputFileHandler = open(inputFileName,"r")
    #outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollectionANDPostingRangePrefixSum_20130806.txt"
    #outputFileHanlder = open(outputFileName,"w")
    # for clueweb09B
    inputFileName = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/clueweb09BIndexOverallStatistics_20140606"
    inputFileHandler = open(inputFileName,"r")
    outputFileName = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/clueweb09BIndexOverallStatisticsANDPostingRangePrefixSum_20140614"
    outputFileHanlder = open(outputFileName,"w")    
    
    currentLine = inputFileHandler.readline()
    numOfLinesProcessed = 1
    currentTermBeginningPosition = 0
    while currentLine:
        currentLineElements = currentLine.strip().split(" ")
        currentTermNumOfPostingsInTheList = int(currentLineElements[2])
        outputFileHanlder.write(currentLine.strip() + " " + str(currentTermBeginningPosition) + "\n")
        currentTermBeginningPosition += currentTermNumOfPostingsInTheList
        currentLine = inputFileHandler.readline()
        numOfLinesProcessed += 1
        if numOfLinesProcessed % 1000000 == 0:
            print "numOfLinesProcessed:",numOfLinesProcessed
    
    print "inputFileName:",inputFileName
    print "outputFileName:",outputFileName
    inputFileHandler.close()
    outputFileHanlder.close()
    print "Function Ends."

#step3  
def addLocalPostingIDsForIRToolkitToUse():
    print "Function Begins..."
    # for clueweb09B
    outputFileName = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/uniformPruningMethods/clueweb09B/randomlySelectedWithUniversalAndLocalPostingIDs_20140614"
    auxInputFileName1 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/clueweb09BIndexOverallStatisticsANDPostingRangePrefixSum_20140614"
    auxInputFileName2 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/uniformPruningMethods/clueweb09B/randomlySelectedWithUniversalPostingIDs_20140614"    
    
    # for gov2
    # outputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/randomlySelectedWithUniversalAndLocalPostingIDs_20130924Night"
    # auxInputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollectionANDPostingRangePrefixSum_20130806.txt"
    # auxInputFileName2 = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/randomlySelectedWithUniversalPostingIDs_20130924Night"
    
    auxInputFileHandler1 = open(auxInputFileName1,"r")
    auxInputFileHandler2 = open(auxInputFileName2,"r")
    outputFileHanlder = open(outputFileName,"w")
    
    # init1
    currentPostingIndexIDLine = auxInputFileHandler2.readline()
    currentGlobalPostingIndexID = int( currentPostingIndexIDLine.strip().split(" ")[1] )
    
    # init2
    currentTermLine = auxInputFileHandler1.readline()
    currentTermLineElements = currentTermLine.strip().split(" ")
    currentTermID = currentTermLineElements[0]
    currentTerm = currentTermLineElements[1]
    currentTermBaseIndex = int(currentTermLineElements[3])
    
    nextTermLine = auxInputFileHandler1.readline()
    nextTermLineElements = nextTermLine.strip().split(" ")
    nextTermID = nextTermLineElements[0]
    nextTerm = nextTermLineElements[1]
    nextTermBaseIndex = int(nextTermLineElements[3])
    
    numOfPostingProcessed = 0
    
    while currentPostingIndexIDLine:
        # print "currentGlobalPostingIndexID:",currentGlobalPostingIndexID
        while not (currentGlobalPostingIndexID >= currentTermBaseIndex and currentGlobalPostingIndexID < nextTermBaseIndex):
            # print "[",currentTermBaseIndex,nextTermBaseIndex,")"
            currentTermID = nextTermID
            currentTerm = nextTerm
            currentTermBaseIndex = nextTermBaseIndex
            
            nextTermLine = auxInputFileHandler1.readline()
            nextTermLineElements = nextTermLine.strip().split(" ")
            nextTermID = nextTermLineElements[0]
            nextTerm = nextTermLineElements[1]
            nextTermBaseIndex = int(nextTermLineElements[3])
        
        # this currentGlobalPostingIndexID belongs to the current term
        currentLocalPostingIndexID = currentGlobalPostingIndexID - currentTermBaseIndex
        # print currentTermID,currentTerm,currentLocalPostingIndexID,currentGlobalPostingIndexID
        outputLine = currentTermID + " " + currentTerm + " " + str(currentLocalPostingIndexID) + " " + str(currentGlobalPostingIndexID) + "\n"
        outputFileHanlder.write(outputLine)
        numOfPostingProcessed += 1
        
        if numOfPostingProcessed % 1000 == 0:
            print numOfPostingProcessed,"postings processed."
        
        
        currentPostingIndexIDLine = auxInputFileHandler2.readline()
        if currentPostingIndexIDLine.strip() != "":
            currentGlobalPostingIndexID = int( currentPostingIndexIDLine.strip().split(" ")[1] )
    
    auxInputFileHandler1.close()
    auxInputFileHandler2.close()
    outputFileHanlder.close()
    print "auxInputFileName1:",auxInputFileName1
    print "auxInputFileName2:",auxInputFileName2
    print "outputFileName:",outputFileName
    print "Function Ends."

# step4 
def computePercentageThresholdForBothDatasets():
    print "Function Begins..."
    partialBM25ScoreList = []
    # for gov2
    # The following file has been updated correctly by Wei on 2013/08/08 night at school
    # After I directly getting the output file from the toolkit (The important point is that with set of probabilities added)
    inputFileName = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/uniformPruningMethods/gov2/randomlySampledPostingIDList20130828_with_set_of_probability_added_DEBUG_1D"
    
    # for clueweb09B
    # inputFileName = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/uniformPruningMethods/clueweb09B/randomlySampledPostingIDList_with_scores_added_20140615"
    inputFileHandler = open(inputFileName,"r")
    for line in inputFileHandler.readlines():
        lineElements = line.strip().split(" ")
        # ignore lineElements[0]
        # ignore lineElements[1]
        # ignore lineElements[2]
        # for gov2
        currentPartialBM25Score = float( lineElements[3] )
        # for clueweb09B
        # currentPartialBM25Score = float( lineElements[4] )
        partialBM25ScoreList.append(currentPartialBM25Score)     
    inputFileHandler.close()
    
    partialBM25ScoreList.sort(cmp=None, key=None, reverse=True)
    
    print "len(partialBM25ScoreList):",len(partialBM25ScoreList)
    
    Top1PercentKeptLowerBoundIndex = int( len(partialBM25ScoreList) * 0.01 )
    Top2PercentKeptLowerBoundIndex = int( len(partialBM25ScoreList) * 0.02 )
    Top3PercentKeptLowerBoundIndex = int( len(partialBM25ScoreList) * 0.03 )
    Top4PercentKeptLowerBoundIndex = int( len(partialBM25ScoreList) * 0.04 )
    Top5PercentKeptLowerBoundIndex = int( len(partialBM25ScoreList) * 0.05 )
    Top6PercentKeptLowerBoundIndex = int( len(partialBM25ScoreList) * 0.06 )
    Top7PercentKeptLowerBoundIndex = int( len(partialBM25ScoreList) * 0.07 )
    Top8PercentKeptLowerBoundIndex = int( len(partialBM25ScoreList) * 0.08 )
    Top9PercentKeptLowerBoundIndex = int( len(partialBM25ScoreList) * 0.09 )
    Top10PercentKeptLowerBoundIndex = int( len(partialBM25ScoreList) * 0.1 )
    Top15PercentKeptLowerBoundIndex = int( len(partialBM25ScoreList) * 0.15 )
    Top20PercentKeptLowerBoundIndex = int( len(partialBM25ScoreList) * 0.2 )
    Top30PercentKeptLowerBoundIndex = int( len(partialBM25ScoreList) * 0.3 )
    Top40PercentKeptLowerBoundIndex = int( len(partialBM25ScoreList) * 0.4 ) 
    Top50PercentKeptLowerBoundIndex = int( len(partialBM25ScoreList) * 0.5 )
    Top60PercentKeptLowerBoundIndex = int( len(partialBM25ScoreList) * 0.6 )
    Top70PercentKeptLowerBoundIndex = int( len(partialBM25ScoreList) * 0.7 )
    Top80PercentKeptLowerBoundIndex = int( len(partialBM25ScoreList) * 0.8 )
    Top90PercentKeptLowerBoundIndex = int( len(partialBM25ScoreList) * 0.9 )
    
    print
    print "# impact scores(partialBM25)"
    print "# uniformPruningThreshold, % index kept"
    print "#","0.0","1.0"
    print "#",partialBM25ScoreList[Top90PercentKeptLowerBoundIndex],"0.9"
    print "#",partialBM25ScoreList[Top80PercentKeptLowerBoundIndex],"0.8"
    print "#",partialBM25ScoreList[Top70PercentKeptLowerBoundIndex],"0.7"
    print "#",partialBM25ScoreList[Top60PercentKeptLowerBoundIndex],"0.6"
    print "#",partialBM25ScoreList[Top50PercentKeptLowerBoundIndex],"0.5"
    print "#",partialBM25ScoreList[Top40PercentKeptLowerBoundIndex],"0.4"
    print "#",partialBM25ScoreList[Top30PercentKeptLowerBoundIndex],"0.3"
    print "#",partialBM25ScoreList[Top20PercentKeptLowerBoundIndex],"0.2"
    print "#",partialBM25ScoreList[Top15PercentKeptLowerBoundIndex],"0.15"
    print "#",partialBM25ScoreList[Top10PercentKeptLowerBoundIndex],"0.1"
    print "#",partialBM25ScoreList[Top9PercentKeptLowerBoundIndex],"0.09"
    print "#",partialBM25ScoreList[Top8PercentKeptLowerBoundIndex],"0.08"
    print "#",partialBM25ScoreList[Top7PercentKeptLowerBoundIndex],"0.07"
    print "#",partialBM25ScoreList[Top6PercentKeptLowerBoundIndex],"0.06"
    print "#",partialBM25ScoreList[Top5PercentKeptLowerBoundIndex],"0.05"
    print "#",partialBM25ScoreList[Top4PercentKeptLowerBoundIndex],"0.04"
    print "#",partialBM25ScoreList[Top3PercentKeptLowerBoundIndex],"0.03"
    print "#",partialBM25ScoreList[Top2PercentKeptLowerBoundIndex],"0.02"
    print "#",partialBM25ScoreList[Top1PercentKeptLowerBoundIndex],"0.01"
    print "Function Ends."

print "Program Begins..."
# Updated by Wei 2014/06/14 at school
# step1:
# selectRandomPostings()
# sample output file
# /local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/uniformPruningMethods/clueweb09B/randomlySelectedWithUniversalPostingIDs_20140614

# This is ONE TIME ONLY computation function
# generatePrefixSumAuxFileOfEachTermInGov2Index()
# sample output file
# /local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/clueweb09BIndexOverallStatisticsANDPostingRangePrefixSum_20140614

# step3:
# addLocalPostingIDsForIRToolkitToUse()
# sample output file (This is the input of the toolkit)
# /local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/uniformPruningMethods/clueweb09B/randomlySelectedWithUniversalAndLocalPostingIDs_20140614

# step4:
computePercentageThresholdForBothDatasets()

print "Program Ends."
