# Updated by Wei on 2014/03/08 afternoon at school
# Note: This might only for the case of the AND semantics
# (1) get the qIDs With their TOP10DocIDs from the original search engine, there is NO WAY better than doing this
# (2) get the qIDs With their TermIDs based on what the search engine will do with the pre query content processing
# (3) based on (1) and (2), top10 Postings Computed By Search System over the unpruned index can be computed under the AND semantics
from __future__ import division
import os
from struct import *

print "Program Begins..."
trecIDANDDocIDDict = {}
inputFileName0 = "/data/obukai/gov2ClearYourMindAndDoItAgain2014/trecID_docID_MappingTableForGov2Dataset"
inputFileHandler = open(inputFileName0,"r")
currentLine = inputFileHandler.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentTrecID = currentLineElements[0]
    currentDocID =  currentLineElements[1]
    if currentTrecID not in trecIDANDDocIDDict:
        trecIDANDDocIDDict[currentTrecID] = currentDocID  
    currentLine = inputFileHandler.readline()
inputFileHandler.close()
print "len(trecIDANDDocIDDict):",len(trecIDANDDocIDDict)

numOfDocumentResultReturnedBySearchSystem = 0
numOfRelatedDocumentResultReturned = 0
humanJudgedRelatedPostingsDict = {}
inputFileName3 = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/polyIRIndexer/humanJudgeQueryResults_AND_POSTINGS_20140302Afternoon"
inputFileHandler = open(inputFileName3,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    currentTrecID = lineElements[1]
    currentDocID = trecIDANDDocIDDict[currentTrecID]
    currentTermID = lineElements[3]
    postingKey = currentTermID + "_" + currentDocID
    if postingKey not in humanJudgedRelatedPostingsDict:
        humanJudgedRelatedPostingsDict[postingKey] = 1
print "len(humanJudgedRelatedPostingsDict):",len(humanJudgedRelatedPostingsDict)
inputFileHandler.close()


# key: qid
# value: top10 docIDs
qIDsWithTOP10DocIDsDict = {}
# for AND
inputFileName1 = "/data/obukai/gov2ClearYourMindAndDoItAgain/pruningProjectResults/reproduceRomanQueryEffectiveness_20140121Night/AND_Semantics/AND_top10000_100%_tb04-06_final"
# for OR
# inputFileName1 = "/data/obukai/gov2ClearYourMindAndDoItAgain/pruningProjectResults/reproduceRomanQueryEffectiveness_20140121Night/OR_Semantics/OR_top10000_100%_tb04-06_final"
inputFileHandler = open(inputFileName1,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split("\t")
    currentQID = lineElements[0]
    currentTrecID = lineElements[2]
    currentDocID = trecIDANDDocIDDict[currentTrecID]
    currentRank = int(lineElements[3])
    if currentQID not in qIDsWithTOP10DocIDsDict:
        qIDsWithTOP10DocIDsDict[currentQID] = []
    if currentRank <= 9:
        qIDsWithTOP10DocIDsDict[currentQID].append(currentDocID)
print "qIDsWithTOP10DocIDsDict['850']: ",qIDsWithTOP10DocIDsDict['850']
print "qIDsWithTOP10DocIDsDict['849']: ",qIDsWithTOP10DocIDsDict['849']
inputFileHandler.close()

qIDsWithTermIDsDict = {}
inputFileName2 = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/polyIRIndexer/qrels.tb04-tb06.top150_with_docID_added_termid_added_AND"
inputFileHandler = open(inputFileName2,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    currentQID = lineElements[0]
    if currentQID not in qIDsWithTermIDsDict:
        qIDsWithTermIDsDict[currentQID] = []
    for currentTermID in lineElements[5:]:
        if currentTermID not in qIDsWithTermIDsDict[currentQID]:
            qIDsWithTermIDsDict[currentQID].append(currentTermID)
print "qIDsWithTermIDsDict['850']: ",qIDsWithTermIDsDict['850']
print "qIDsWithTermIDsDict['849']: ",qIDsWithTermIDsDict['849']
inputFileHandler.close()

for qID in qIDsWithTOP10DocIDsDict:
    numOfDocumentResultReturnedBySearchSystem += len(qIDsWithTOP10DocIDsDict[qID])

print "numOfDocumentResultReturnedBySearchSystem:",numOfDocumentResultReturnedBySearchSystem
print "len(qIDsWithTOP10DocIDsDict): ",len(qIDsWithTOP10DocIDsDict)
print "len(qIDsWithTermIDsDict): ",len(qIDsWithTermIDsDict)

# key: posting key
# value: no use currently (The best thing is to have impact score with this posting)
# for AND and for OR semantics
originalTOP10PostingsComputedBySearchSystemDict = {}
dynamicTOP10PostingsComputedBySearchSystemAtDifferentPruningLevelDict = {}
for currentQID in qIDsWithTermIDsDict:
    for currentTermID in qIDsWithTermIDsDict[currentQID]:
        for currentDocID in qIDsWithTOP10DocIDsDict[currentQID]:
            postingKey = currentTermID + "_" + currentDocID
            if postingKey not in originalTOP10PostingsComputedBySearchSystemDict:
                originalTOP10PostingsComputedBySearchSystemDict[postingKey] = 0
print "len(originalTOP10PostingsComputedBySearchSystemDict):",len(originalTOP10PostingsComputedBySearchSystemDict)
# print "originalTOP10PostingsComputedBySearchSystemDict:",originalTOP10PostingsComputedBySearchSystemDict
'''
# debug section
tempCounter = 0
for postingKey in originalTOP10PostingsComputedBySearchSystemDict:
    print postingKey,str( originalTOP10PostingsComputedBySearchSystemDict[postingKey] )
    tempCounter += 1
    if tempCounter == 10:
        break
'''

TOTAL_NUM_OF_POSTINGS = 6451948010
numOfPostingNeededToBePoppedAt1Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.01)
numOfPostingNeededToBePoppedAt3Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.03)
numOfPostingNeededToBePoppedAt5Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.05)
numOfPostingNeededToBePoppedAt10Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.1)
numOfPostingNeededToBePoppedAt15Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.15)
numOfPostingNeededToBePoppedAt20Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.2)
numOfPostingNeededToBePoppedAt30Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.3)
numOfPostingNeededToBePoppedAt40Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.4)
numOfPostingNeededToBePoppedAt50Percentage = int(TOTAL_NUM_OF_POSTINGS * 0.5)
print "numOfPostingNeededToBePoppedAt1Percentage:",numOfPostingNeededToBePoppedAt1Percentage
print "numOfPostingNeededToBePoppedAt3Percentage:",numOfPostingNeededToBePoppedAt3Percentage
print "numOfPostingNeededToBePoppedAt5Percentage:",numOfPostingNeededToBePoppedAt5Percentage
print "numOfPostingNeededToBePoppedAt10Percentage:",numOfPostingNeededToBePoppedAt10Percentage
print "numOfPostingNeededToBePoppedAt15Percentage:",numOfPostingNeededToBePoppedAt15Percentage
print "numOfPostingNeededToBePoppedAt20Percentage:",numOfPostingNeededToBePoppedAt20Percentage
print "numOfPostingNeededToBePoppedAt30Percentage:",numOfPostingNeededToBePoppedAt30Percentage
print "numOfPostingNeededToBePoppedAt40Percentage:",numOfPostingNeededToBePoppedAt40Percentage
print "numOfPostingNeededToBePoppedAt50Percentage:",numOfPostingNeededToBePoppedAt50Percentage
numOfBytes = 0
numOfPostingPopped = 0
numOfPostingBeInTOP10 = 0


inputFileName3 = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/scripts/src/cScripts/AND_weight_0_20140311Night/resultfile0"
inputFileHandler = open(inputFileName3,"rb")
statinfo = os.stat(inputFileName3)
fileSize = statinfo.st_size
print "file size:",fileSize

while numOfBytes < fileSize:
    # old version
    # each time, just read the info of ONE posting, too few
    # byteString = inputFileHandler.read(4 + 4 + 4)
    # (termID,docID,currentProbability) = unpack( "2I1f", byteString)
    
    # current version
    # each time, read the info of 1M postings
    byteStringBuffer = inputFileHandler.read( 1000000 * 12)
    byteStringBufferIndexPosition = 0
    for i in range(0,1000000):
        byteString = byteStringBuffer[byteStringBufferIndexPosition:byteStringBufferIndexPosition+12]
        byteStringBufferIndexPosition += 12
        (termID,docID,currentProbability) = unpack( "2I1f", byteString)
        postingKey = str(termID) + "_" + str(docID)
        # print "postingKey:",postingKey
        # print termID,docID,currentProbability
        if postingKey not in originalTOP10PostingsComputedBySearchSystemDict:
            pass
        else:
            numOfPostingBeInTOP10 += 1
            dynamicTOP10PostingsComputedBySearchSystemAtDifferentPruningLevelDict[postingKey] = 1
        
        if numOfPostingPopped == numOfPostingNeededToBePoppedAt1Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt3Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt5Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt10Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt15Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt20Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt30Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt40Percentage or numOfPostingPopped == numOfPostingNeededToBePoppedAt50Percentage:
            numOfTOP10PostingsReturnedIntersectionSet = set(dynamicTOP10PostingsComputedBySearchSystemAtDifferentPruningLevelDict).intersection( set(humanJudgedRelatedPostingsDict) )
            numOfTOP10PostingsReturnedUnionSet = set(dynamicTOP10PostingsComputedBySearchSystemAtDifferentPruningLevelDict).union( set(humanJudgedRelatedPostingsDict) )
            print "len(numOfTOP10PostingsReturnedIntersectionSet):",len(numOfTOP10PostingsReturnedIntersectionSet)
            print "len(numOfTOP10PostingsReturnedUnionSet):",len(numOfTOP10PostingsReturnedUnionSet)
            print "symmetric difference:",len(numOfTOP10PostingsReturnedIntersectionSet)/len(numOfTOP10PostingsReturnedUnionSet)
            
            # The variable numOfTOP10PostingsReturnedIntersectionSet has ALL the top10 related postings, how many top10 document results can be formed from them?
            numOfRelatedDocumentResultReturned = 0
            inputFileName4 = "/data/obukai/workspace_USE_SINCE_20140217Night/web-search-engine-wei-2014-March/polyIRIndexer/qrels.tb04-tb06.top150_with_docID_added_termid_added_AND"
            inputFileHanlder = open(inputFileName4,"r")
            for line in inputFileHanlder.readlines():
                lineElements = line.strip().split(" ")
                currentTrecID = lineElements[2]
                relevenceLabel = lineElements[3]
                currentDocID = lineElements[4]
                if relevenceLabel != "0":
                    documentResultMaintainedFlag = True
                    for currentTermID in lineElements[5:]:
                        currentPostingKey = currentTermID + "_" + currentDocID
                        if currentPostingKey not in numOfTOP10PostingsReturnedIntersectionSet:
                            documentResultMaintainedFlag = False
                    if documentResultMaintainedFlag:
                        numOfRelatedDocumentResultReturned += 1
            print "len(numOfTOP10PostingsReturnedIntersectionSet):",len(numOfTOP10PostingsReturnedIntersectionSet)
            print "numOfRelatedDocumentResultReturned:",numOfRelatedDocumentResultReturned
            print "numOfPostingPopped:",numOfPostingPopped
            print "numOfPostingBeInTOP10:",numOfPostingBeInTOP10
            inputFileHanlder.close()
        
        numOfBytes += 12
        numOfPostingPopped += 1
        if numOfPostingPopped % 1000000 == 0:
            print str(numOfPostingPopped),"postings have been examined."

inputFileHandler.close()
print "Program Ends."
exit(1)
