# Updated by Wei at school 2013/11/22 afternoon
# Continue to update this program by Wei at school on 2014/02/05 afternoon
# Continue to update this program by Wei at school on 2014/02/09 night
# Take the configuration and apply to the whole gov2 index
# Continue to update this program by Wei at school on 2014/02/10 afternoon
# (1) How to get it fast ?
# (2) How to get it fully good in main memory?
# abandoned since 2014/02/10 afternoon for the following reasons:
# (1) difficult to handle the heap in python
# (2) already have some code for the c++ implementation, so I think I can just do it in c++
from __future__ import division
from operator import itemgetter, attrgetter

#import gc
#import math
#import matplotlib
import os
import pylab
import random
import sys
import time
#from sets import Set
#from scipy import stats
#import numpy as np
import heapq
from guppy import hpy

# print "GOD..."
# exit(1)

print "Program Begins..."
DYNAMIC_WEIGHT = 1
BIG_NUM_FOR_DIFF = 100000000 #100M

# This is used for merge sort for our alg.
iters = []
# h = []


# It is NOT NEEDED when applying to the whole gov2 index
# step-2: load the info for the doc AND docID converter
localDocIDANDUniversalDocIDDict = {}
'''
# the input file format:
# 1st column: trecID
# 2ed column: docID (for the baby index)
# 3th column: docID (for the whole index)
inputFileName1 = "/home/diaosi/gov2ClearYourMindAndDoItAgain/trecID_WholeIndexUniversalDocID_MappingTableForBabyIndex_sortedByTrecID"
inputFileHandler = open(inputFileName1,"r")
for currentLine in inputFileHandler.readlines():
    currentLineElements = currentLine.strip().split(" ") 
    currentLocalDocIDInStringFormat = currentLineElements[1]
    currentUniversalDocIDInIntFormat = currentLineElements[2]
    if currentLocalDocIDInStringFormat not in localDocIDANDUniversalDocIDDict:
        localDocIDANDUniversalDocIDDict[currentLocalDocIDInStringFormat] = currentUniversalDocIDInIntFormat
print "len(localDocIDANDUniversalDocIDDict):",len(localDocIDANDUniversalDocIDDict)
print "localDocIDANDUniversalDocIDDict['0']:",localDocIDANDUniversalDocIDDict['0']
inputFileHandler.close()
# exit(1)
'''

'''
# It is NEEDED when applying to the whole gov2 index
# step-1: load the info for term AND termID converter 
termANDTermIDDict = {}
# the input format of the file should be the following:
# termID
# term
# 1.7M terms in the lexicon
# It takes 3.817s to load
# inputFileName2 = "/home/diaosi/gov2ClearYourMindAndDoItAgain/TermsWithTermIDFor90778ImportantAndFakeDocsFromGov2"
inputFileName2 = "/data/obukai/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermID"
# It takes 2 mins to load
# inputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermID"
inputFileHandler = open(inputFileName2,"r")
currentLine = inputFileHandler.readline()
currentLineNum = 0

while currentLine:
    if currentLineNum % 1000000 == 0:
        print currentLineNum,"lines loaded."
    currentLineElements = currentLine.strip().split(" ")
    currentTermIDInIntFormat = currentLineElements[0]
    currentTerm = currentLineElements[1]
    if currentTerm not in termANDTermIDDict:
        termANDTermIDDict[currentTerm] = currentTermIDInIntFormat
    currentLine = inputFileHandler.readline()
    currentLineNum += 1
print "len(termANDTermIDDict): ",len(termANDTermIDDict)
print "termANDTermIDDict['0000000000000000']: ",termANDTermIDDict['0000000000000000']
inputFileHandler.close()
# exit(1)
'''


# It is NEEDED when applying to the whole gov2 index
# step0: load the info for all the related TOP10 postings of the final 5K queries
# It takes 1s
top10RelatedPostingsDict = {}
# The input format of the file
# queryID
# docID
# term
# (Just guess) rank in the list for this posting
# pieceNumIndex
# inputFileName3 = "/home/diaosi/web-search-engine-wei/polyIRIndexer/tail5KResultsWithRelatedPieceNum_sortedByQID"
# inputFileName3 = "/data/obukai/gov2ClearYourMindAndDoItAgain/tail5KResultsWithRelatedPieceNum_sortedByQID"
inputFileName3 = "/data/obukai/gov2ClearYourMindAndDoItAgain/tail5KResults_sortedByQID_NEW_FORMAT_20140210Afternoon"
inputFileHandler = open(inputFileName3,"r")
currentLine = inputFileHandler.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentDocIDInIntFormat = int(currentLineElements[1])
    currentTermIDInIntFormat = int(currentLineElements[2])
    currentPostingKeyInIntFormat = currentTermIDInIntFormat * BIG_NUM_FOR_DIFF + currentDocIDInIntFormat 
    if currentPostingKeyInIntFormat not in top10RelatedPostingsDict:
        top10RelatedPostingsDict[currentPostingKeyInIntFormat] = 1
    currentLine = inputFileHandler.readline()
# print "top10RelatedPostingsDict['bronx_21911485']: ",top10RelatedPostingsDict['bronx_21911485']
print "len(top10RelatedPostingsDict):",len(top10RelatedPostingsDict)
# print top10RelatedPostingsDict[1602680521911485]
# print top10RelatedPostingsDict[1602680521911486]
inputFileHandler.close()
# for debug
# h=hpy()
# print h.heap()
# exit(1)



# step1: some very simple popping methods to do it
# # of documents included
NUM_OF_DOCUMENTS_INCLUDED = 26000000
# for debug
# NUM_OF_DOCUMENTS_INCLUDED = 3
# 90778
# NUM_OF_DOCUMENTS_INCLUDED = 91000 # the num should be larger than the 90778 document set

# option1:
# inputFileName4 = "/home/diaosi/web-search-engine-wei/polyIRIndexer/selectedDocumentPrefixValuesInfo20131107Night_weight_0_WITH_TIME_COST_Added_2"
# option2:
# inputFileName4 = "/home/diaosi/web-search-engine-wei/polyIRIndexer/selectedDocumentPrefixValuesInfo20131107Night_weight_1_WITH_TIME_COST_Added_2"
# option3:
# inputFileName4 = "/home/diaosi/web-search-engine-wei/polyIRIndexer/selectedDocumentPrefixValuesInfo20131107Night_weight_1000_WITH_TIME_COST_Added_2"
# option4:
# inputFileName4 = "/home/diaosi/web-search-engine-wei/polyIRIndexer/selectedDocumentPrefixValuesInfo20140205Night_weight_0"
# inputFileName4 = "/home/diaosi/web-search-engine-wei/polyIRIndexer/selectedDocumentPrefixValuesInfo20140205Afternoon_weight_1"
# inputFileName4 = "/home/diaosi/web-search-engine-wei/polyIRIndexer/selectedDocumentPrefixValuesInfo20140205Afternoon_weight_10"
# inputFileName4 = "/home/diaosi/web-search-engine-wei/polyIRIndexer/selectedDocumentPrefixValuesInfo20140205Afternoon_weight_50"
# inputFileName4 = "/home/diaosi/web-search-engine-wei/polyIRIndexer/selectedDocumentPrefixValuesInfo20140205Afternoon_weight_100"
# inputFileName4 = "/home/diaosi/web-search-engine-wei/polyIRIndexer/selectedDocumentPrefixValuesInfo20140205Afternoon_weight_1000"
# inputFileName4 = "/home/diaosi/web-search-engine-wei/polyIRIndexer/selectedDocumentPostingValuesInfo20140205Night_weight_0"
# inputFileName4 = "/home/diaosi/web-search-engine-wei/polyIRIndexer/selectedDocumentPostingValuesInfo20140205Night_weight_1"
# inputFileName4 = "/home/diaosi/web-search-engine-wei/polyIRIndexer/selectedDocumentPostingValuesInfo20140205Night_weight_10"
# inputFileName4 = "/home/diaosi/web-search-engine-wei/polyIRIndexer/selectedDocumentPostingValuesInfo20140205Night_weight_50"
# inputFileName4 = "/home/diaosi/web-search-engine-wei/polyIRIndexer/selectedDocumentPostingValuesInfo20140205Night_weight_100"
# inputFileName4 = "/home/diaosi/web-search-engine-wei/polyIRIndexer/selectedDocumentPostingValuesInfo20140205Night_weight_1000"
# inputFileName4 = "/home/diaosi/web-search-engine-wei/polyIRIndexer/selectedDocumentPostingValuesInfo20140205Night_weight_1000"
inputFileName4 = "/data/obukai/workspace/web-search-engine-wei-2014-Feb/polyIRIndexer/selectedDocumentPostingValuesInfo20140208Afternoon_HALF_DOC_weight_1"
inputFileHandler = open(inputFileName4,"r")
lineCounter = 0

absoluateStartingTime = time.time() 
startTime = absoluateStartingTime
endTime = startTime
absoluateEndingTime = endTime
currentLine = inputFileHandler.readline()
lineCounter += 1

while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    # When applying to the whole gov2 index, the local docID is exactly the universalID, so NO need to do the transfer
    # ??? The docID info will be appended to the endTime of the info tuple
    currentLocalDocIDInStringFormat = currentLineElements[0]
    if currentLocalDocIDInStringFormat in localDocIDANDUniversalDocIDDict:
        currentUniversalDocIDInIntFormat = localDocIDANDUniversalDocIDDict[currentLocalDocIDInStringFormat]
    else:
        currentUniversalDocIDInIntFormat = int(currentLocalDocIDInStringFormat) 
    numOfTuplesNeededToRead = int( currentLineElements[1] )
    # fit to the NEW input file format which both record the (1)static probability (2)dynamic probability and the (3)total probability
    upperBound = numOfTuplesNeededToRead * 4 + 2
    
    currentDocPostingValueList = []
    tupleIndex = 0
    for i in range(2, upperBound, 4):
        # the input format
        # 0: finalValueForComparizon
        # 1: static probability
        # 2: atomic dynamic probability
        # 3: termID
        # 4: universal docID
        currentUniversalTermIDInIntFormat = int(currentLineElements[i])
        staticProbability = float( currentLineElements[i+1] )
        dynamicProbability = float( currentLineElements[i+2] )
        finalValueForComparizon = -(staticProbability + DYNAMIC_WEIGHT * dynamicProbability) 
        currentUniversalPostingKeyInIntFormat = currentUniversalTermIDInIntFormat * BIG_NUM_FOR_DIFF + currentUniversalDocIDInIntFormat
        currentTuple = ( finalValueForComparizon,currentUniversalPostingKeyInIntFormat )
        # print "currentTuple: ",tupleIndex,currentTuple
        tupleIndex += 1
        currentDocPostingValueList.append( currentTuple )
    # exit(1)
    # print "len(currentDocPostingValueList): ",len(currentDocPostingValueList)
    iters.append(currentDocPostingValueList)
    # heapq.heappush(h, currentTuple)
    
    currentLine = inputFileHandler.readline()
    lineCounter += 1
    
    if lineCounter % 1000 == 0:
        endTime = time.time()
        absoluateEndingTime = endTime
        print "documents processed: ",lineCounter,endTime - startTime,(absoluateEndingTime-absoluateStartingTime)/lineCounter
        startTime = time.time()
        # h=hpy()
        # print h.heap()
        # exit(1)
    if lineCounter == NUM_OF_DOCUMENTS_INCLUDED:
        break

print "# of documents included(1st check): ",lineCounter - 1
# print "# of postings included(2ed check): ",len(h)
inputFileHandler.close()

# step2: an evaluation mechanism
current_total_num_of_postings_counted = 0
for currentDocPostingValueList in iters:
    current_total_num_of_postings_counted += len(currentDocPostingValueList)
print "current_total_num_of_postings_counted:",current_total_num_of_postings_counted

NUM_POSTINGS_IN_HEAP = current_total_num_of_postings_counted
NUM_POSTINGS_WANT_TO_POPPED = NUM_POSTINGS_IN_HEAP # this variable can be changed
NUM_POSTINGS_IN_HEAP_90Percent = int(NUM_POSTINGS_IN_HEAP * 0.9)
NUM_POSTINGS_IN_HEAP_80Percent = int(NUM_POSTINGS_IN_HEAP * 0.8)
NUM_POSTINGS_IN_HEAP_70Percent = int(NUM_POSTINGS_IN_HEAP * 0.7)
NUM_POSTINGS_IN_HEAP_60Percent = int(NUM_POSTINGS_IN_HEAP * 0.6)
NUM_POSTINGS_IN_HEAP_50Percent = int(NUM_POSTINGS_IN_HEAP * 0.5)
NUM_POSTINGS_IN_HEAP_40Percent = int(NUM_POSTINGS_IN_HEAP * 0.4)
NUM_POSTINGS_IN_HEAP_30Percent = int(NUM_POSTINGS_IN_HEAP * 0.3)
NUM_POSTINGS_IN_HEAP_20Percent = int(NUM_POSTINGS_IN_HEAP * 0.2)
NUM_POSTINGS_IN_HEAP_15Percent = int(NUM_POSTINGS_IN_HEAP * 0.15)
NUM_POSTINGS_IN_HEAP_10Percent = int(NUM_POSTINGS_IN_HEAP * 0.1)
NUM_POSTINGS_IN_HEAP_5Percent = int(NUM_POSTINGS_IN_HEAP * 0.05)
NUM_POSTINGS_IN_HEAP_3Percent = int(NUM_POSTINGS_IN_HEAP * 0.03)
NUM_POSTINGS_IN_HEAP_1Percent = int(NUM_POSTINGS_IN_HEAP * 0.01)
print "NUM_POSTINGS_IN_HEAP_1Percent:",NUM_POSTINGS_IN_HEAP_1Percent
print "NUM_POSTINGS_IN_HEAP_3Percent:",NUM_POSTINGS_IN_HEAP_3Percent
print "NUM_POSTINGS_IN_HEAP_5Percent:",NUM_POSTINGS_IN_HEAP_5Percent
print "NUM_POSTINGS_IN_HEAP_10Percent:",NUM_POSTINGS_IN_HEAP_10Percent
print "NUM_POSTINGS_IN_HEAP_15Percent:",NUM_POSTINGS_IN_HEAP_15Percent
print "NUM_POSTINGS_IN_HEAP_20Percent:",NUM_POSTINGS_IN_HEAP_20Percent
print "NUM_POSTINGS_IN_HEAP_30Percent:",NUM_POSTINGS_IN_HEAP_30Percent
print "NUM_POSTINGS_IN_HEAP_40Percent:",NUM_POSTINGS_IN_HEAP_40Percent
print "NUM_POSTINGS_IN_HEAP_50Percent:",NUM_POSTINGS_IN_HEAP_50Percent
print "NUM_POSTINGS_IN_HEAP_60Percent:",NUM_POSTINGS_IN_HEAP_60Percent
print "NUM_POSTINGS_IN_HEAP_70Percent:",NUM_POSTINGS_IN_HEAP_70Percent
print "NUM_POSTINGS_IN_HEAP_80Percent:",NUM_POSTINGS_IN_HEAP_80Percent
print "NUM_POSTINGS_IN_HEAP_90Percent:",NUM_POSTINGS_IN_HEAP_90Percent
NUM_POSTINGS_POPPED_ARE_IN_TOP10 = 0
NUM_POSTINGS_CAN_BE_POPPED = 0
if NUM_POSTINGS_WANT_TO_POPPED > NUM_POSTINGS_IN_HEAP:
    NUM_POSTINGS_CAN_BE_POPPED = NUM_POSTINGS_IN_HEAP
else:
    NUM_POSTINGS_CAN_BE_POPPED = NUM_POSTINGS_WANT_TO_POPPED

print "NUM_POSTINGS_CAN_BE_POPPED: ",NUM_POSTINGS_CAN_BE_POPPED
# exit(1)

for index,currentTupleBeingPopped in enumerate( heapq.merge(*iters) ):
    # print index,currentTupleBeingPopped
    ( utilityValue,termIDInStringFormat, docIDInStringFormat ) = currentTupleBeingPopped
    currentKey = termIDInStringFormat + "_" + docIDInStringFormat
    
    #if index % 1000000 == 0:
    #    print "Postings Popped: ",index
    
    if currentKey in top10RelatedPostingsDict:
        # debug
        print "Posting_ID: ",termIDInStringFormat,docIDInStringFormat
        NUM_POSTINGS_POPPED_ARE_IN_TOP10 += 1
    else:
        pass
    
    # print "the currentKey",currentKey,"is NOT in the top10RelatedPostingsDict."
    # exit(1)
    if index == NUM_POSTINGS_IN_HEAP_1Percent - 1:
        print "1%:",NUM_POSTINGS_POPPED_ARE_IN_TOP10
    if index == NUM_POSTINGS_IN_HEAP_3Percent - 1:
        print "3%:",NUM_POSTINGS_POPPED_ARE_IN_TOP10    
    if index == NUM_POSTINGS_IN_HEAP_5Percent - 1:
        print "5%:",NUM_POSTINGS_POPPED_ARE_IN_TOP10
    if index == NUM_POSTINGS_IN_HEAP_10Percent - 1:
        print "10%:",NUM_POSTINGS_POPPED_ARE_IN_TOP10
    if index == NUM_POSTINGS_IN_HEAP_15Percent - 1:
        print "15%:",NUM_POSTINGS_POPPED_ARE_IN_TOP10
    if index == NUM_POSTINGS_IN_HEAP_20Percent - 1:
        print "20%:",NUM_POSTINGS_POPPED_ARE_IN_TOP10
    if index == NUM_POSTINGS_IN_HEAP_30Percent - 1:
        print "30%:",NUM_POSTINGS_POPPED_ARE_IN_TOP10
    if index == NUM_POSTINGS_IN_HEAP_40Percent - 1:
        print "40%:",NUM_POSTINGS_POPPED_ARE_IN_TOP10
    if index == NUM_POSTINGS_IN_HEAP_50Percent - 1:
        print "50%:",NUM_POSTINGS_POPPED_ARE_IN_TOP10
    if index == NUM_POSTINGS_IN_HEAP_60Percent - 1:
        print "60%:",NUM_POSTINGS_POPPED_ARE_IN_TOP10
    if index == NUM_POSTINGS_IN_HEAP_70Percent - 1:
        print "70%:",NUM_POSTINGS_POPPED_ARE_IN_TOP10
    if index == NUM_POSTINGS_IN_HEAP_80Percent - 1:
        print "80%:",NUM_POSTINGS_POPPED_ARE_IN_TOP10
    if index == NUM_POSTINGS_IN_HEAP_90Percent - 1:
        print "90%:",NUM_POSTINGS_POPPED_ARE_IN_TOP10
    if index == NUM_POSTINGS_IN_HEAP - 1:
        print "100%:",NUM_POSTINGS_POPPED_ARE_IN_TOP10
  
print "NUM_POSTINGS_POPPED_ARE_IN_TOP10: ",NUM_POSTINGS_POPPED_ARE_IN_TOP10
print "Program Ends."

