# Updated by Wei 2013/08/16 night
# Note: sample command:
# python /data3/obukai/workspace/web-search-engine-wei/scripts/src/pythonScripts/gov2/step10_gov2_compute_symmetric_difference.py 
# /data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/2013AugPruningResults/goldStandardResultsForPruningProject_AND_TOP10_tail5k 
# /data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/2013AugPruningResults/goldStandardResultsForPruningProject_AND_TOP10_tail5k 
# NUM_OF_LINES_NEEDED_TO_SKIP : 0 
# NUM_OF_TOP_RESULTS_TAKEN_INTO_CONSIDERATION : 10 
# PRUNING_METHOD : uniformPartialBM25 
# PERCENTAGE_KEPT : 100 
# CONDITION : AND

# python /data3/obukai/workspace/web-search-engine-wei/scripts/src/pythonScripts/gov2/step10_gov2_compute_symmetric_difference.py /data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/2013AugPruningResults/goldStandardResultsForPruningProject_AND_TOP10_tail5k_qid_95984_STOP /data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/2013AugPruningResults/goldStandardResultsForPruningProject_AND_TOP10_tail5k_qid_95984_STOP 0 10 uniformPartialBM25 100 AND
from __future__ import division
import random
import math
import os
import sys
from sets import Set
# Updated by Wei 2013/08/04 night at school
# optimized for the 3 factor probablies formula

# Updated by Wei 20130621
# Basically this time, use this program in the machine pangolin to produce some results

# Updated by Wei 20130525
# some raw results produced by the irtk

# Program inputs:
# gold standard file
# inputResultFileName = ""

# compared file
# inputFileName2 = ""

# Program outputs:
# the symmetric_difference

# step2:
# top10ResultsHitInvertedListDistributionAnalysis given the results
def top10ResultsHitInvertedListDistributionAnalysis():
    UPPER_BOUND_FOR_LENGTH_OF_LIST = 24000000 #24M
    
    # sub_step1
    # key: class label in int format
    # value: LowerBoundForTheCurrentClass in int format, the lower bound is based on the length of the list (Here,I only record the lower bound :) )
    classLabelWithTheirLowerBoundDict = {}
    stepFactor = 1.2
    classLabelInIntFormat = 0
    currentLowerBound = int( 1 )
    currentUpperBound = int( 100 )
    classLabelWithTheirLowerBoundDict[classLabelInIntFormat] = currentLowerBound
    while currentLowerBound < UPPER_BOUND_FOR_LENGTH_OF_LIST:
        currentLowerBound = int( currentUpperBound )
        currentUpperBound = int( currentUpperBound * stepFactor )
        classLabelInIntFormat += 1
        classLabelWithTheirLowerBoundDict[classLabelInIntFormat] = currentLowerBound
    
    print "len(classLabelWithTheirLowerBoundDict):",len(classLabelWithTheirLowerBoundDict)
    print "classLabelWithTheirLowerBoundDict[0]:",classLabelWithTheirLowerBoundDict[0]
    print "classLabelWithTheirLowerBoundDict[1]:",classLabelWithTheirLowerBoundDict[1]
    print
    # for i in range(0, len(classLabelWithTheirLowerBoundDict) ):
    #     print str(i),str(classLabelWithTheirLowerBoundDict[i])
    
    # sub_step2
    # key: term in string format
    # value: termListLength in int format
    termWithTheirListLengthDict = {}
    inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermsWithTheirLengthsOfInvertedList"
    inputFileHandler = open(inputFileName1,"r")
    for line in inputFileHandler.readlines():
        lineElements = line.strip().split(" ")
        term = lineElements[0]
        termListLength = int(lineElements[1])
        if term not in termWithTheirListLengthDict:
            termWithTheirListLengthDict[term] = termListLength
    print "len(termWithTheirListLengthDict):",len(termWithTheirListLengthDict)
    print "termWithTheirListLengthDict['zx2']:",termWithTheirListLengthDict['zx2']
    print "termWithTheirListLengthDict['zoning']:",termWithTheirListLengthDict['zoning']
    print
    inputFileHandler.close()
    
    # sub_step3
    # key: term in string format
    # value format1 (used currently)
    # value: a list of tuples which have the following format (pieceNum,numOfPostingsInThisPiece)
    # value format2 (NOT used currently)
    # value: a list of integers representing the numOfPostingsInThisPiece in each piece
    termWithTheirListPiecesInfoDict = {}
    # tempCounter = 0
    for term in termWithTheirListLengthDict:
        # print "term:",term
        if term not in termWithTheirListPiecesInfoDict:
            termWithTheirListPiecesInfoDict[term] = []
        
        # tempCounter += 1
        # if tempCounter == 10:
        #     exit(1)
        
        
        # Partition Method1
        termListLength = termWithTheirListLengthDict[term]
        # print "termListLength:",termListLength
        currentPieceNumber = 0
        if termListLength > 100:
            patitionFactor = 1/2
            patitionStep = 1/2
            numOfPostingsInCurrentPiece = int(termListLength * patitionFactor)
            numOfPostingsLeft = termListLength - numOfPostingsInCurrentPiece
            # print "piece",str(currentPieceNumber),numOfPostingsInCurrentPiece,numOfPostingsLeft
            termWithTheirListPiecesInfoDict[term].append( (currentPieceNumber,numOfPostingsInCurrentPiece) )
            while numOfPostingsLeft >= 100:
                patitionFactor = patitionFactor * patitionStep
                numOfPostingsInCurrentPiece = int( termListLength * patitionFactor)
                numOfPostingsLeft = numOfPostingsLeft - numOfPostingsInCurrentPiece
                currentPieceNumber += 1
                # print "piece",str(currentPieceNumber),numOfPostingsInCurrentPiece,numOfPostingsLeft
                termWithTheirListPiecesInfoDict[term].append( (currentPieceNumber,numOfPostingsInCurrentPiece))
            currentPieceNumber += 1
            # print "piece",str(currentPieceNumber),numOfPostingsLeft,str(0)
            termWithTheirListPiecesInfoDict[term].append( (currentPieceNumber,numOfPostingsLeft) )
        else:
            # print "piece",str(0),str(termListLength),str(0)
            termWithTheirListPiecesInfoDict[term].append( (currentPieceNumber,termListLength) )
        # print
    
    # sort the pieces info for each term
    for term in termWithTheirListPiecesInfoDict:
        termWithTheirListPiecesInfoDict[term].sort(cmp=None, key=None, reverse=True)
    
    print "len(termWithTheirListPiecesInfoDict):",len(termWithTheirListPiecesInfoDict)
    print "termWithTheirListPiecesInfoDict['bromelain']:",termWithTheirListPiecesInfoDict['bromelain']
    print "termWithTheirListPiecesInfoDict['francesca']:",termWithTheirListPiecesInfoDict['francesca']
    print "termWithTheirListPiecesInfoDict['copywrite']:",termWithTheirListPiecesInfoDict['copywrite']
    print
    # exit(1) # debug
    
    # key: term in string format
    # value: a list having the hitting positions(rank in the list) sorted
    termWithTheHitsPositionDict = {}
    inputFileName2 = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/setOfTOP10Postings_postingRankInListAdded"
    inputFileHandler = open(inputFileName2,"r")
    for line in inputFileHandler.readlines():
        lineElements = line.strip().split(" ")
        term = lineElements[4]
        postingRankInList = int(lineElements[5])
        if term not in termWithTheHitsPositionDict:
            termWithTheHitsPositionDict[term] = []
        else:
            pass
        termWithTheHitsPositionDict[term].append(postingRankInList)
    inputFileHandler.close()
    
    # sort the hit positions for each term in the dict called termWithTheHitsPositionDict
    for term in termWithTheHitsPositionDict:
        termWithTheHitsPositionDict[term].sort(cmp=None, key=None, reverse=False)
    
    
    
    # key: term in string format
    # value: a dict
        # key: piece number
        # value: counter of # of postings belonging to this piece
    termWithTheirPiecesCounterDict = {}
    
    # key: classLabel in int format
    # value: Num Of Postings belonging to this class in int format
    classLabelWithTheirNumOfPostingsCounterDict = {}
    
    # key: classLabel in int format
    # value: a list of terms belonging to this class in list format, the term inside is in string format
    classLabelWithTheirBelongingTermsDict = {}
    
    # init the variable classLabelWithTheirNumOfPostingsCounterDict
    for i in range(0,len(classLabelWithTheirLowerBoundDict)):
        classLabelWithTheirNumOfPostingsCounterDict[i] = 0
        classLabelWithTheirBelongingTermsDict[i] = []
    
    termList = []
    termList = termWithTheHitsPositionDict.keys()
    termList.sort(cmp=None, key=None, reverse=False)
    for term in termList:
        print term,len( termWithTheHitsPositionDict[term] ),termWithTheirListLengthDict[term]
        
        for classLabel in classLabelWithTheirLowerBoundDict:
            # compare the class upperBound with the length of the list for this term
            if classLabelWithTheirLowerBoundDict[classLabel] > termWithTheirListLengthDict[term]:
                actualBelongClassLabel = classLabel - 1
                if actualBelongClassLabel not in classLabelWithTheirNumOfPostingsCounterDict:
                    classLabelWithTheirNumOfPostingsCounterDict[actualBelongClassLabel] = 0
                else:
                    pass # don't need to do anything
                classLabelWithTheirNumOfPostingsCounterDict[actualBelongClassLabel] += len(termWithTheHitsPositionDict[term])
                classLabelWithTheirBelongingTermsDict[actualBelongClassLabel].append(term)
                break
            else:
                pass # don't need to do anything
        
        print "--->numOfHitsPositionForThisTerm:",len( termWithTheHitsPositionDict[term] )
        print "--->numOfPiecesForThisTerm:",len( termWithTheirListPiecesInfoDict[term] )
        # print "--->termWithTheHitsPositionDict[term]:",termWithTheHitsPositionDict[term]
        print "--->termWithTheirListPiecesInfoDict[term]:",termWithTheirListPiecesInfoDict[term]
        
        for currentPosition in termWithTheHitsPositionDict[term]:
            currentPieceUpperBound = 0 
            for currentTermPieceTupleInfo in termWithTheirListPiecesInfoDict[term]:
                (currentTermPieceNum,currentNumOfPostingInThatPiece)= currentTermPieceTupleInfo
                currentPieceUpperBound += currentNumOfPostingInThatPiece
                if currentPosition <= currentPieceUpperBound:
                    
                    if term not in termWithTheirPiecesCounterDict:
                        termWithTheirPiecesCounterDict[term] = {}
                    
                    if currentTermPieceNum not in termWithTheirPiecesCounterDict[term]:
                        termWithTheirPiecesCounterDict[term][currentTermPieceNum] = 0 # init the piece counter to 0
                    
                    termWithTheirPiecesCounterDict[term][currentTermPieceNum] += 1
                    break
                else:
                    pass # continue to search for the right piece for that term
        
        print "--->termWithTheirPiecesCounterDict[term]:",termWithTheirPiecesCounterDict[term]
        numOfPostingsCountedIntoPieces = 0
        for pieceNum in termWithTheirPiecesCounterDict[term]:
            numOfPostingsCountedIntoPieces += termWithTheirPiecesCounterDict[term][pieceNum]
        
        assert numOfPostingsCountedIntoPieces == len( termWithTheHitsPositionDict[term] )
        
        print 
    
    totalNumOfPostings = 0
    totalNumOfTerms = 0
    print "classLabel classLowerBound #OfTerms #OfTOP10Postings"
    for i in range(0,len(classLabelWithTheirLowerBoundDict)):
        totalNumOfPostings += classLabelWithTheirNumOfPostingsCounterDict[i]
        totalNumOfTerms += len(classLabelWithTheirBelongingTermsDict[i])
        print i,classLabelWithTheirLowerBoundDict[i],len(classLabelWithTheirBelongingTermsDict[i]),classLabelWithTheirNumOfPostingsCounterDict[i]
    print "total_num_of_classes:",len(classLabelWithTheirLowerBoundDict)
    print "total_num_of_terms:",totalNumOfTerms
    print "total_num_of_TOP10_postings:",totalNumOfPostings

# step1:
# produce the set of TOP10 postings which needs to add the rank_in_the_list feature, say.
def produceSetOfTOP10PostingsForResultsHitInvertedListDistributionAnalysis():
    inputResultFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/raw_TOP1000_results_for_head_95K_queries_output_from_IRTK_STOP_AT_qid_11811"
    TOPKValueTakenIntoConsideration = 10
    
    # sub_step1: load the results from the raw result file
    # key: queryID 
    # value: a list of tuples with the format (rank,trecID,docID)
    outsideQueryIDAndResultSetDict = {}
    # key: queryID
    # value: the query term dict
    outsideQueryIDAndQueryTermsDictDict = {}
    
    # 3 arguments:
    # (1) inputResultFileName
    # (3) NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE
    getQueryIDAndResultSetGivenResultFile(inputResultFileName,TOPKValueTakenIntoConsideration,outsideQueryIDAndResultSetDict,outsideQueryIDAndQueryTermsDictDict)
    
    print "len(outsideQueryIDAndResultSetDict):",len(outsideQueryIDAndResultSetDict)
    print "len(outsideQueryIDAndQueryTermsDictDict):",len(outsideQueryIDAndQueryTermsDictDict)
    # print "outsideQueryIDAndResultSetDict:",outsideQueryIDAndResultSetDict
    # print "outsideQueryIDAndQueryTermsDictDict:",outsideQueryIDAndQueryTermsDictDict
    
    # sub_step2: based on the variables outsideQueryIDAndResultSetDict and outsideQueryIDAndQueryTermsDictDict, output the set of postings need to get the feature value posting_rank_in_inverted_list    
    termsWithTrecIDsDict = {}
    trecIDANDDocIDMappingDict = {}
    for qid in outsideQueryIDAndQueryTermsDictDict:
        for termIndex in outsideQueryIDAndQueryTermsDictDict[qid]:
            currentTerm = outsideQueryIDAndQueryTermsDictDict[qid][termIndex]
            if currentTerm not in termsWithTrecIDsDict:
                termsWithTrecIDsDict[ currentTerm ] = {}
            else:
                pass
         
            for documentInfoTuple in outsideQueryIDAndResultSetDict[qid]:
                (trecID,docID) = documentInfoTuple
                if trecID not in termsWithTrecIDsDict[currentTerm]:
                    termsWithTrecIDsDict[currentTerm][trecID] = 1
                if trecID not in trecIDANDDocIDMappingDict:
                    trecIDANDDocIDMappingDict[trecID] = docID
                else:
                    pass # this document info has already been stored
    termList = []
    termList = termsWithTrecIDsDict.keys()
    termList.sort(cmp=None, key=None, reverse=False)
    
    '''
    # for DEBUG ONLY
    for term in termList:
        print term,len(termsWithTrecIDsDict[term])
    
    print "commissioner:",termsWithTrecIDsDict["commissioner"]
    print "of:",termsWithTrecIDsDict["of"]
    '''

    outputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/setOfTOP10Postings"
    ouputFileHanlder = open(outputFileName,"w")
    for term in termList:
        for trecID in termsWithTrecIDsDict[term]:
            outputLine = "N/A" + " " + "0" + " " + str(trecID) + " " + str( trecIDANDDocIDMappingDict[trecID] ) + " " + str(term) + "\n"   
            ouputFileHanlder.write(outputLine)
    ouputFileHanlder.close()
    
    print "Overall Processing Statistics:"
    print "len(termList):",len(termList)
    print "inputResultFileName:",inputResultFileName
    print "outputFileName:",outputFileName

def getQueryIDAndResultSetGivenResultFile(inputResultFileName,NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE,insideQueryIDAndResultSetDict,insideQueryIDAndQueryTermsDictDict):
    print "getQueryIDAndResultSetGivenResultFile() function called."
    numOfQueriesHavingQID = 0
    numOfQueriesHavingSearchContent = 0
    numOfQueriesHavingSearchResults = 0
    
    inputFileHandler1 = open(inputResultFileName,"r")
    
    
    
    currentLine = inputFileHandler1.readline()
    nextLine = ""
    currentQID = ""
    
    numOfResultsForTheAllQueries = 0
    
    while currentLine:
        # sampling parsing line:
        # qid: 701
        if currentLine.startswith("qid:"):
            currentQID = int( currentLine.strip().split(":")[1].strip() )
            print "currentQID:",str( currentQID )
            # init the variable
            if currentQID not in insideQueryIDAndResultSetDict:
                insideQueryIDAndResultSetDict[currentQID] = []
            numOfQueriesHavingQID += 1
        
        # sample parsing line:
        # Search: u s  oil industry history
        if currentLine.startswith("Search:"):
            elementList = currentLine.strip().split(" ")
            
            currentSearchContent = ""
            
            queryTermList = []
            for element in elementList[1:]:
                if element.strip() != "":
                    queryTermList.append( element.strip() )        
            currentQueryLength = len(queryTermList)
            print "currentQueryLength:",currentQueryLength
    
            for element in queryTermList:
                currentSearchContent += element + " "
            
            print currentSearchContent
            numOfQueriesHavingSearchContent += 1
            
            
            currentQueryTermIndexDict = {}
            nextLine = inputFileHandler1.readline()
            #print nextLine.strip()
            if nextLine.strip() != "":
                # I think I can skip the line: 
                # [2013-05-24 14:20:44] 38871 terms with their lists length have been loaded.
                nextLine = inputFileHandler1.readline()
                nextLineElements = nextLine.strip().split(" ")
                # print "nextLineElements:",nextLineElements
                # sample parsing line: 
                # oil:0 industry:1 history:2 u:3 s:4
                for element in nextLineElements:
                    if element.split(":")[0] not in currentQueryTermIndexDict:
                        currentQueryTermIndexDict[ int(element.split(":")[1]) ] = element.split(":")[0]
                
                print "currentQueryTermIndexDict:",currentQueryTermIndexDict
                # print "len(currentQueryTermIndexDict):",len(currentQueryTermIndexDict)
                if currentQID not in insideQueryIDAndQueryTermsDictDict:
                    insideQueryIDAndQueryTermsDictDict[currentQID] = currentQueryTermIndexDict

            
                
                # Now, it is time to read the threshold line
                currentLine = inputFileHandler1.readline()
                if len(currentLine.strip().split(":")) == 2:
                    name = currentLine.strip().split(":")[0]
                    value = currentLine.strip().split(":")[1]
                    print name,"::",value
                else:
                    print "format problem"
                    exit(1)
                    
                # Now, it is time to read computation method line
                currentLine = inputFileHandler1.readline()
                if len(currentLine.strip().split(":")) == 2:
                    name = currentLine.strip().split(":")[0]
                    value = currentLine.strip().split(":")[1]
                    print name,"::",value
                else:
                    print "format problem"
                    exit(1)
                
                # skip the line like the following:
                # 3 factor probability formula logic implemented by Wei on 2013/08/02 at school    
                # currentLine = inputFileHandler1.readline()
                
                # Now, it is time to read the actual training example line
                currentLine = inputFileHandler1.readline()
                numOfResultsForTheCurrentQuery = 0
                currentResultLines = []
                # for DEBUG ONLY(IMPOERTANT debug setting here)
                # print "len( currentLine.strip().split(" ") ):",len( currentLine.strip().split(" ") )
                # print "currentLine:",currentLine
                # exit(1)
                lineElements = currentLine.strip().split(" ")
                while len( lineElements ) == 35:
                    theRank = int(lineElements[0])
                    theDocID = lineElements[-2]
                    theTrecID = lineElements[-1]
                    if theRank <= NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE:
                        insideQueryIDAndResultSetDict[currentQID].append( (theTrecID,theDocID) )
                    
                    numOfResultsForTheCurrentQuery += 1
                    currentResultLines.append(currentLine.strip())
                    currentLine = inputFileHandler1.readline()
                    lineElements = currentLine.strip().split(" ")
                   
                numOfResultsForTheAllQueries += numOfResultsForTheCurrentQuery
                
                # sampling method 1: among all the top10 documents, select those documents, open those documents and extract the corresponding postings.
                # This method make sure that once the document have been selected, ALL the coresponding postings will also be selected.
                # This sampling method has been developed on 2013/01/09
                
                print "numOfResultsForTheCurrentQuery:",numOfResultsForTheCurrentQuery
                print "len(currentResultLines):",len(currentResultLines)
                print
            
        currentLine = inputFileHandler1.readline()
        
    print "numOfQueriesHavingQID:",numOfQueriesHavingQID
    print "numOfQueriesHavingSearchContent:",numOfQueriesHavingSearchContent
    print "numOfQueriesHavingSearchResults:",numOfQueriesHavingSearchResults
    print "len(insideQueryIDAndResultSetDict):",len(insideQueryIDAndResultSetDict)
    print "len(insideQueryIDAndQueryTermsDictDict):",len(insideQueryIDAndQueryTermsDictDict)
    # for debug ONLY
    # print "insideQueryIDAndResultSetDict[0]:",insideQueryIDAndResultSetDict[0]


print "Program Begins..."

# step1:
# produceSetOfTOP10PostingsForResultsHitInvertedListDistributionAnalysis()

# step2:
top10ResultsHitInvertedListDistributionAnalysis()
print "Program Ends."



