# Updated by Wei 2013/08/16 night
# Note: sample command:
# python /data3/obukai/workspace/web-search-engine-wei/scripts/src/pythonScripts/gov2/step10_gov2_compute_symmetric_difference.py 
# /data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/2013AugPruningResults/goldStandardResultsForPruningProject_AND_TOP10_tail5k 
# /data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/2013AugPruningResults/goldStandardResultsForPruningProject_AND_TOP10_tail5k 
# NUM_OF_LINES_NEEDED_TO_SKIP : 0 
# NUM_OF_TOP_RESULTS_TAKEN_INTO_CONSIDERATION : 10 
# PRUNING_METHOD : uniformPartialBM25 
# PERCENTAGE_KEPT : 100 
# CONDITION : AND

# python /data3/obukai/workspace/web-search-engine-wei/scripts/src/pythonScripts/gov2/step10_gov2_compute_symmetric_difference.py /data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/2013AugPruningResults/goldStandardResultsForPruningProject_AND_TOP10_tail5k_qid_95984_STOP /data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/2013AugPruningResults/goldStandardResultsForPruningProject_AND_TOP10_tail5k_qid_95984_STOP 0 10 uniformPartialBM25 100 AND
from __future__ import division
import random
import math
import os
import sys
from sets import Set
# Updated by Wei 2013/08/04 night at school
# optimized for the 3 factor probablies formula

# Updated by Wei 20130621
# Basically this time, use this program in the machine pangolin to produce some results

# Updated by Wei 20130525
# some raw results produced by the irtk

# Program inputs:
# gold standard file
# inputResultFileName = ""

# compared file
# inputFileName2 = ""

# Program outputs:
# the symmetric_difference

# step2:
# top10ResultsHitInvertedListDistributionAnalysis given the results
def top10ResultsHitInvertedListDistributionAnalysis():
    UPPER_BOUND_FOR_LENGTH_OF_LIST = 24000000 #24M
    
    # sub_step1
    # key: class label in int format
    # value: LowerBoundForTheCurrentClass in int format, the lower bound is based on the length of the list (Here,I only record the lower bound :) )
    classLabelWithTheirLowerBoundDict = {}
    stepFactor = 1.2
    classLabelInIntFormat = 0
    currentLowerBound = int( 1 )
    currentUpperBound = int( 100 )
    classLabelWithTheirLowerBoundDict[classLabelInIntFormat] = currentLowerBound
    while currentLowerBound < UPPER_BOUND_FOR_LENGTH_OF_LIST:
        currentLowerBound = int( currentUpperBound )
        currentUpperBound = int( currentUpperBound * stepFactor )
        classLabelInIntFormat += 1
        classLabelWithTheirLowerBoundDict[classLabelInIntFormat] = currentLowerBound
    
    print "len(classLabelWithTheirLowerBoundDict):",len(classLabelWithTheirLowerBoundDict)
    print "classLabelWithTheirLowerBoundDict[0]:",classLabelWithTheirLowerBoundDict[0]
    print "classLabelWithTheirLowerBoundDict[1]:",classLabelWithTheirLowerBoundDict[1]
    print
    # for i in range(0, len(classLabelWithTheirLowerBoundDict) ):
    #     print str(i),str(classLabelWithTheirLowerBoundDict[i])
    
    # sub_step2
    # key: term in string format
    # value: termListLength in int format
    termWithTheirListLengthDict = {}
    inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermsWithTheirLengthsOfInvertedList"
    inputFileHandler = open(inputFileName1,"r")
    for line in inputFileHandler.readlines():
        lineElements = line.strip().split(" ")
        term = lineElements[0]
        termListLength = int(lineElements[1])
        if term not in termWithTheirListLengthDict:
            termWithTheirListLengthDict[term] = termListLength
    print "len(termWithTheirListLengthDict):",len(termWithTheirListLengthDict)
    print "termWithTheirListLengthDict['zx2']:",termWithTheirListLengthDict['zx2']
    print "termWithTheirListLengthDict['zoning']:",termWithTheirListLengthDict['zoning']
    print
    inputFileHandler.close()
    
    '''
    # Updated on 2013/08/17 night by Wei at school
    # The following logic has been moved caused it needs to depend on the class which the term belonging to
    # sub_step3
    # key: term in string format
    # value format1 (used currently)
    # value: a list of tuples which have the following format (pieceNum,numOfPostingsInThisPiece)
    # value format2 (NOT used currently)
    # value: a list of integers representing the numOfPostingsInThisPiece in each piece
    termWithTheirListPiecesInfoDict = {}
    # tempCounter = 0
    for term in termWithTheirListLengthDict:
        # print "term:",term
        if term not in termWithTheirListPiecesInfoDict:
            termWithTheirListPiecesInfoDict[term] = []
        
        # tempCounter += 1
        # if tempCounter == 10:
        #     exit(1)
        
        
        # Partition Method1
        termListLength = termWithTheirListLengthDict[term]
        # print "termListLength:",termListLength
        currentPieceNumber = 0
        if termListLength > 100:
            patitionFactor = 1/2
            patitionStep = 1/2
            numOfPostingsInCurrentPiece = int(termListLength * patitionFactor)
            numOfPostingsLeft = termListLength - numOfPostingsInCurrentPiece
            # print "piece",str(currentPieceNumber),numOfPostingsInCurrentPiece,numOfPostingsLeft
            termWithTheirListPiecesInfoDict[term].append( (currentPieceNumber,numOfPostingsInCurrentPiece) )
            while numOfPostingsLeft >= 100:
                patitionFactor = patitionFactor * patitionStep
                numOfPostingsInCurrentPiece = int( termListLength * patitionFactor)
                numOfPostingsLeft = numOfPostingsLeft - numOfPostingsInCurrentPiece
                currentPieceNumber += 1
                # print "piece",str(currentPieceNumber),numOfPostingsInCurrentPiece,numOfPostingsLeft
                termWithTheirListPiecesInfoDict[term].append( (currentPieceNumber,numOfPostingsInCurrentPiece))
            currentPieceNumber += 1
            # print "piece",str(currentPieceNumber),numOfPostingsLeft,str(0)
            termWithTheirListPiecesInfoDict[term].append( (currentPieceNumber,numOfPostingsLeft) )
        else:
            # print "piece",str(0),str(termListLength),str(0)
            termWithTheirListPiecesInfoDict[term].append( (currentPieceNumber,termListLength) )
        # print
    
    # sort the pieces info for each term
    for term in termWithTheirListPiecesInfoDict:
        termWithTheirListPiecesInfoDict[term].sort(cmp=None, key=None, reverse=True)
    
    print "len(termWithTheirListPiecesInfoDict):",len(termWithTheirListPiecesInfoDict)
    print "termWithTheirListPiecesInfoDict['bromelain']:",termWithTheirListPiecesInfoDict['bromelain']
    print "termWithTheirListPiecesInfoDict['francesca']:",termWithTheirListPiecesInfoDict['francesca']
    print "termWithTheirListPiecesInfoDict['copywrite']:",termWithTheirListPiecesInfoDict['copywrite']
    print
    # exit(1) # debug
    '''
    
    
    # key: term in string format
    # value: a list having the hitting positions(rank in the list) sorted
    termWithTheHitsPositionDict = {}
    
    # option1 (Updated by Wei 2013/08/19 morning at school)
    # inputFileName2 = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/setOfTOP10Postings_head95K_PART_OF_b_c_d_f_h_i_j_postingRankInListAdded"
    # option2
    # inputFileName2 = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/setOfTOP10Postings_head10K_postingRankInListAdded"
    # option3
    inputFileName2 = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/setOfTOP10Postings_head95K_COMPLETE_postingRankInListAdded"
    
    inputFileHandler = open(inputFileName2,"r")
    for line in inputFileHandler.readlines():
        lineElements = line.strip().split(" ")
        term = lineElements[4]
        postingRankInList = int(lineElements[5])
        if term not in termWithTheHitsPositionDict:
            termWithTheHitsPositionDict[term] = []
        else:
            pass
        termWithTheHitsPositionDict[term].append(postingRankInList)
    inputFileHandler.close()
    
    # sort the hit positions for each term in the dict called termWithTheHitsPositionDict
    for term in termWithTheHitsPositionDict:
        termWithTheHitsPositionDict[term].sort(cmp=None, key=None, reverse=False)
    
    
    
    # key: term in string format
    # value: a dict
        # key: piece number
        # value: counter of # of postings belonging to this piece
    termWithTheirPiecesCounterDict = {}
    
    # key: classLabel in int format
    # value: Num Of Postings belonging to this class in int format
    classLabelWithTheirNumOfPostingsCounterDict = {}
    
    # key: classLabel in int format
    # value: a list of terms belonging to this class in list format, the term inside is in string format
    classLabelWithTheirBelongingTermsDict = {}
    
    # key: term in int format
    # value: class label in int format
    termsWithTheirClassLabelDict = {}
    
    # key: classLabel in int format
    # value: num of pieces in int forat
    classLabelWithTheirNumOfPiecesDict = {}
    
    # init the variable classLabelWithTheirNumOfPostingsCounterDict
    for i in range(0,len(classLabelWithTheirLowerBoundDict)):
        classLabelWithTheirNumOfPostingsCounterDict[i] = 0
        classLabelWithTheirBelongingTermsDict[i] = []
        classLabelWithTheirNumOfPiecesDict[i] = 0 # The init is 0 pieces
    termList = []
    termList = termWithTheHitsPositionDict.keys()
    termList.sort(cmp=None, key=None, reverse=False)
    
    # key: term in string format
    # value format1 (used currently)
    # value: a list of tuples which have the following format (pieceNum,numOfPostingsInThisPiece)
    # value format2 (NOT used currently)
    # value: a list of integers representing the numOfPostingsInThisPiece in each piece
    termWithTheirListPiecesInfoDict = {}
    
    for term in termList:
        print term,len( termWithTheHitsPositionDict[term] ),termWithTheirListLengthDict[term]
        
        for classLabel in classLabelWithTheirLowerBoundDict:
            # compare the class upperBound with the length of the list for this term
            if classLabelWithTheirLowerBoundDict[classLabel] > termWithTheirListLengthDict[term]:
                actualBelongClassLabel = classLabel - 1
                if actualBelongClassLabel not in classLabelWithTheirNumOfPostingsCounterDict:
                    classLabelWithTheirNumOfPostingsCounterDict[actualBelongClassLabel] = 0
                else:
                    pass # don't need to do anything
                classLabelWithTheirNumOfPostingsCounterDict[actualBelongClassLabel] += len(termWithTheHitsPositionDict[term])
                classLabelWithTheirBelongingTermsDict[actualBelongClassLabel].append(term)
                termsWithTheirClassLabelDict[term] = actualBelongClassLabel
                break
            else:
                pass # don't need to do anything
        
        #########################################################Code in test begins...
        # print "term:",term
        if term not in termWithTheirListPiecesInfoDict:
            termWithTheirListPiecesInfoDict[term] = []
        
        # Partition Process Begins...
        NUM_OF_PIECES_NEEDED_TO_PARTITION_FOR_THIS_CLASS = 0
        if classLabelWithTheirNumOfPiecesDict[ termsWithTheirClassLabelDict[term] ] == 0:
            pass # The number of partition for this class is NOT set and should be set when dealing with this term
        else:
            # The number of partition for this class has been set and should be produce the same num of pieces for this term based on pre-set parameter
            NUM_OF_PIECES_NEEDED_TO_PARTITION_FOR_THIS_CLASS = classLabelWithTheirNumOfPiecesDict[ termsWithTheirClassLabelDict[term] ]
        
        
        print "Belonging Class Label:",termsWithTheirClassLabelDict[term]
        print "NUM_OF_PIECES_NEEDED_TO_PARTITION_FOR_THIS_CLASS:",NUM_OF_PIECES_NEEDED_TO_PARTITION_FOR_THIS_CLASS
        
        if NUM_OF_PIECES_NEEDED_TO_PARTITION_FOR_THIS_CLASS == 0:
            termListLength = termWithTheirListLengthDict[term]
            # print "termListLength:",termListLength
            currentPieceNumber = 0
            if termListLength > 100:
                patitionFactor = 1/2
                patitionStep = 1/2
                numOfPostingsInCurrentPiece = int(termListLength * patitionFactor)
                numOfPostingsLeft = termListLength - numOfPostingsInCurrentPiece
                # print "piece",str(currentPieceNumber),numOfPostingsInCurrentPiece,numOfPostingsLeft
                termWithTheirListPiecesInfoDict[term].append( (currentPieceNumber,numOfPostingsInCurrentPiece) )
                while numOfPostingsLeft >= 100:
                    patitionFactor = patitionFactor * patitionStep
                    numOfPostingsInCurrentPiece = int( termListLength * patitionFactor)
                    numOfPostingsLeft = numOfPostingsLeft - numOfPostingsInCurrentPiece
                    currentPieceNumber += 1
                    # print "piece",str(currentPieceNumber),numOfPostingsInCurrentPiece,numOfPostingsLeft
                    termWithTheirListPiecesInfoDict[term].append( (currentPieceNumber,numOfPostingsInCurrentPiece))
                currentPieceNumber += 1
                # print "piece",str(currentPieceNumber),numOfPostingsLeft,str(0)
                termWithTheirListPiecesInfoDict[term].append( (currentPieceNumber,numOfPostingsLeft) )
            else:
                # print "piece",str(0),str(termListLength),str(0)
                termWithTheirListPiecesInfoDict[term].append( (currentPieceNumber,termListLength) )
            NUM_OF_PIECES_NEEDED_TO_PARTITION_FOR_THIS_CLASS = len( termWithTheirListPiecesInfoDict[term] )
            classLabelWithTheirNumOfPiecesDict[ termsWithTheirClassLabelDict[term] ] = NUM_OF_PIECES_NEEDED_TO_PARTITION_FOR_THIS_CLASS 
        else:
            # The partitions will ALL based on the pre-set parameter: NUM_OF_PIECES_NEEDED_TO_PARTITION_FOR_THIS_CLASS
            termListLength = termWithTheirListLengthDict[term]
            # print "termListLength:",termListLength
            currentPieceNumber = 0
            if NUM_OF_PIECES_NEEDED_TO_PARTITION_FOR_THIS_CLASS != 1:
                patitionFactor = 1/2
                patitionStep = 1/2
                numOfPostingsInCurrentPiece = int(termListLength * patitionFactor)
                numOfPostingsLeft = termListLength - numOfPostingsInCurrentPiece
                # print "piece",str(currentPieceNumber),numOfPostingsInCurrentPiece,numOfPostingsLeft
                termWithTheirListPiecesInfoDict[term].append( (currentPieceNumber,numOfPostingsInCurrentPiece) )
                while currentPieceNumber < NUM_OF_PIECES_NEEDED_TO_PARTITION_FOR_THIS_CLASS-2:
                    patitionFactor = patitionFactor * patitionStep
                    numOfPostingsInCurrentPiece = int( termListLength * patitionFactor)
                    numOfPostingsLeft = numOfPostingsLeft - numOfPostingsInCurrentPiece
                    currentPieceNumber += 1
                    # print "piece",str(currentPieceNumber),numOfPostingsInCurrentPiece,numOfPostingsLeft
                    termWithTheirListPiecesInfoDict[term].append( (currentPieceNumber,numOfPostingsInCurrentPiece))
                currentPieceNumber += 1
                # print "piece",str(currentPieceNumber),numOfPostingsLeft,str(0)
                termWithTheirListPiecesInfoDict[term].append( (currentPieceNumber,numOfPostingsLeft) )
            else:
                # print "piece",str(0),str(termListLength),str(0)
                termWithTheirListPiecesInfoDict[term].append( (currentPieceNumber,termListLength) )            
            
        # print
        
        # sort the pieces info for this term
        termWithTheirListPiecesInfoDict[term].sort(cmp=None, key=None, reverse=True)
        
        print "--->numOfHitsPositionForThisTerm:",len( termWithTheHitsPositionDict[term] )
        print "--->numOfPiecesForThisTerm:",len( termWithTheirListPiecesInfoDict[term] )
        # print "--->termWithTheHitsPositionDict[term]:",termWithTheHitsPositionDict[term]
        print "--->termWithTheirListPiecesInfoDict[term]:",termWithTheirListPiecesInfoDict[term]
        
        #########################################################Code in test ends.
        
        
        for currentPosition in termWithTheHitsPositionDict[term]:
            currentPieceUpperBound = 0 
            for currentTermPieceTupleInfo in termWithTheirListPiecesInfoDict[term]:
                (currentTermPieceNum,currentNumOfPostingInThatPiece)= currentTermPieceTupleInfo
                currentPieceUpperBound += currentNumOfPostingInThatPiece
                if currentPosition <= currentPieceUpperBound:
                    
                    if term not in termWithTheirPiecesCounterDict:
                        termWithTheirPiecesCounterDict[term] = {}
                    
                    if currentTermPieceNum not in termWithTheirPiecesCounterDict[term]:
                        termWithTheirPiecesCounterDict[term][currentTermPieceNum] = 0 # init the piece counter to 0
                    
                    termWithTheirPiecesCounterDict[term][currentTermPieceNum] += 1
                    break
                else:
                    pass # continue to search for the right piece for that term
        
        print "--->termWithTheirPiecesCounterDict[term]:",termWithTheirPiecesCounterDict[term]
        numOfPostingsCountedIntoPieces = 0
        for pieceNum in termWithTheirPiecesCounterDict[term]:
            numOfPostingsCountedIntoPieces += termWithTheirPiecesCounterDict[term][pieceNum]
        
        assert numOfPostingsCountedIntoPieces == len( termWithTheHitsPositionDict[term] )
        
        print 
    
    print "len(termWithTheirPiecesCounterDict):",len(termWithTheirPiecesCounterDict)
    print "len(termWithTheirListPiecesInfoDict):",len(termWithTheirListPiecesInfoDict)
    # print "termWithTheirListPiecesInfoDict['0']:",termWithTheirListPiecesInfoDict['0']
    # print "termWithTheirListPiecesInfoDict['1120h']:",termWithTheirListPiecesInfoDict['1120h']
    print "termWithTheirListPiecesInfoDict['zx2']:",termWithTheirListPiecesInfoDict['zx2']
    print
    
    # for correctness check
    # for term in termsWithTheirClassLabelDict:
    #     assert classLabelWithTheirNumOfPiecesDict[ termsWithTheirClassLabelDict[term] ] == len(termWithTheirListPiecesInfoDict[term])
    # print "Pass Check"
    
    # sum up the piece counter of each term in the same class
    # key: classLabel_pieceNum in string format eg. 0_0 means piece0 in class 0
    # value: counter in int format
    postingCounterForEachPiecesAmongAllClassesDict = {}
    
    # init the postingCounterForEachPiecesAmongAllClassesDict
    for i in range(0,len(classLabelWithTheirLowerBoundDict)):
        for j in range(0,classLabelWithTheirNumOfPiecesDict[i]):
            currentKey = str(i) + "_" + str(j)
            if currentKey not in postingCounterForEachPiecesAmongAllClassesDict:
                postingCounterForEachPiecesAmongAllClassesDict[currentKey] = 0
                
    # sum up
    for i in range(0,len(classLabelWithTheirLowerBoundDict)):
        for j in range(0,classLabelWithTheirNumOfPiecesDict[i]):
            currentKey = str(i) + "_" + str(j)
            for term in classLabelWithTheirBelongingTermsDict[i]:
                # for DEBUG
                # print "term:",term
                # print "i:",i
                # print "j:",j
                if j not in termWithTheirPiecesCounterDict[term]:
                    postingCounterForEachPiecesAmongAllClassesDict[currentKey] += 0
                else:
                    postingCounterForEachPiecesAmongAllClassesDict[currentKey] += termWithTheirPiecesCounterDict[term][j]
    
    # check
    # for i in range(0,len(classLabelWithTheirLowerBoundDict)):
    #     for j in range(0,classLabelWithTheirNumOfPiecesDict[i]):
    #         currentKey = str(i) + "_" + str(j)
    #         print currentKey,postingCounterForEachPiecesAmongAllClassesDict[currentKey]
    # print 
    
    totalNumOfPostings = 0
    totalNumOfTerms = 0
    print "classLabel classLowerBound #OfTerms #OfPieces #OfTOP10PostingsInTotal #OfTOP10PostingsInEachPiece"
    for i in range(0,len(classLabelWithTheirLowerBoundDict)):
        totalNumOfPostings += classLabelWithTheirNumOfPostingsCounterDict[i]
        totalNumOfTerms += len(classLabelWithTheirBelongingTermsDict[i])
        outputLine = str(i) + " " + str(classLabelWithTheirLowerBoundDict[i]) + " " + str( len(classLabelWithTheirBelongingTermsDict[i]) ) + " " + str(classLabelWithTheirNumOfPiecesDict[i]) + " " + str(classLabelWithTheirNumOfPostingsCounterDict[i]) + " "
        for j in range(0,classLabelWithTheirNumOfPiecesDict[i]):
            currentKey = str(i) + "_" + str(j)
            outputLine += str( postingCounterForEachPiecesAmongAllClassesDict[currentKey] ) + " "
        outputLine += "\n"
        # current version
        print outputLine,
        # original version
        # print i,classLabelWithTheirLowerBoundDict[i],len(classLabelWithTheirBelongingTermsDict[i]),classLabelWithTheirNumOfPiecesDict[i],classLabelWithTheirNumOfPostingsCounterDict[i]
    print "total_num_of_classes:",len(classLabelWithTheirLowerBoundDict)
    print "total_num_of_terms:",totalNumOfTerms
    print "total_num_of_TOP10_postings:",totalNumOfPostings
    print "inputFileName1:",inputFileName1
    print "inputFileName2:",inputFileName2

# step1:
# produce the set of TOP10 postings which needs to add the rank_in_the_list feature, say.
def produceSetOfTOP10PostingsForResultsHitInvertedListDistributionAnalysis():
    # option0(Updated on 20130820 night)
    inputResultFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/2013AugPruningResults/goldStandardResultsForPruningProject_AND_TOP10_tail5k"
    # option1(Updated on 20130818):
    # inputResultFileName = "/data1/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/raw_TOP1000_results_for_head_95K_queries_output_from_IRTK"
    # option2:
    # inputResultFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/raw_TOP1000_results_for_head_95K_queries_output_from_IRTK_STOP_AT_qid_11811"
    TOPKValueTakenIntoConsideration = 10
    
    # sub_step1: load the results from the raw result file
    # key: queryID 
    # value: a list of tuples with the format (rank,trecID,docID)
    outsideQueryIDAndResultSetDict = {}
    # key: queryID
    # value: the query term dict
    outsideQueryIDAndQueryTermsDictDict = {}
    
    # 3 arguments:
    # (1) inputResultFileName
    # (3) NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE
    getQueryIDAndResultSetGivenResultFile(inputResultFileName,TOPKValueTakenIntoConsideration,outsideQueryIDAndResultSetDict,outsideQueryIDAndQueryTermsDictDict)
    
    print "len(outsideQueryIDAndResultSetDict):",len(outsideQueryIDAndResultSetDict)
    print "len(outsideQueryIDAndQueryTermsDictDict):",len(outsideQueryIDAndQueryTermsDictDict)
    # print "outsideQueryIDAndResultSetDict:",outsideQueryIDAndResultSetDict
    # print "outsideQueryIDAndQueryTermsDictDict:",outsideQueryIDAndQueryTermsDictDict
    
    # sub_step2: based on the variables outsideQueryIDAndResultSetDict and outsideQueryIDAndQueryTermsDictDict, output the set of postings need to get the feature value posting_rank_in_inverted_list    
    termsWithTrecIDsDict = {}
    trecIDANDDocIDMappingDict = {}
    for qid in outsideQueryIDAndQueryTermsDictDict:
        for termIndex in outsideQueryIDAndQueryTermsDictDict[qid]:
            currentTerm = outsideQueryIDAndQueryTermsDictDict[qid][termIndex]
            if currentTerm not in termsWithTrecIDsDict:
                termsWithTrecIDsDict[ currentTerm ] = {}
            else:
                pass
         
            for documentInfoTuple in outsideQueryIDAndResultSetDict[qid]:
                (trecID,docID) = documentInfoTuple
                if trecID not in termsWithTrecIDsDict[currentTerm]:
                    termsWithTrecIDsDict[currentTerm][trecID] = 1
                if trecID not in trecIDANDDocIDMappingDict:
                    trecIDANDDocIDMappingDict[trecID] = docID
                else:
                    pass # this document info has already been stored
    termList = []
    termList = termsWithTrecIDsDict.keys()
    termList.sort(cmp=None, key=None, reverse=False)
    
    '''
    # for DEBUG ONLY
    for term in termList:
        print term,len(termsWithTrecIDsDict[term])
    
    print "commissioner:",termsWithTrecIDsDict["commissioner"]
    print "of:",termsWithTrecIDsDict["of"]
    '''

    outputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/setOfTOP10Postings_tail5K"
    ouputFileHanlder = open(outputFileName,"w")
    for term in termList:
        for trecID in termsWithTrecIDsDict[term]:
            outputLine = "N/A" + " " + "0" + " " + str(trecID) + " " + str( trecIDANDDocIDMappingDict[trecID] ) + " " + str(term) + "\n"   
            ouputFileHanlder.write(outputLine)
    ouputFileHanlder.close()
    
    print "Overall Processing Statistics:"
    print "len(termList):",len(termList)
    print "inputResultFileName:",inputResultFileName
    print "outputFileName:",outputFileName

def getQueryIDAndResultSetGivenResultFile(inputResultFileName,NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE,insideQueryIDAndResultSetDict,insideQueryIDAndQueryTermsDictDict):
    print "getQueryIDAndResultSetGivenResultFile() function called."
    numOfQueriesHavingQID = 0
    numOfQueriesHavingSearchContent = 0
    numOfQueriesHavingSearchResults = 0
    
    inputFileHandler1 = open(inputResultFileName,"r")
    
    
    
    currentLine = inputFileHandler1.readline()
    nextLine = ""
    currentQID = ""
    
    numOfResultsForTheAllQueries = 0
    
    while currentLine:
        # sampling parsing line:
        # qid: 701
        if currentLine.startswith("qid:"):
            currentQID = int( currentLine.strip().split(":")[1].strip() )
            print "currentQID:",str( currentQID )
            # init the variable
            if currentQID not in insideQueryIDAndResultSetDict:
                insideQueryIDAndResultSetDict[currentQID] = []
            numOfQueriesHavingQID += 1
        
        # sample parsing line:
        # Search: u s  oil industry history
        if currentLine.startswith("Search:"):
            elementList = currentLine.strip().split(" ")
            
            currentSearchContent = ""
            
            queryTermList = []
            for element in elementList[1:]:
                if element.strip() != "":
                    queryTermList.append( element.strip() )        
            currentQueryLength = len(queryTermList)
            print "currentQueryLength:",currentQueryLength
    
            for element in queryTermList:
                currentSearchContent += element + " "
            
            print currentSearchContent
            numOfQueriesHavingSearchContent += 1
            
            
            currentQueryTermIndexDict = {}
            nextLine = inputFileHandler1.readline()
            #print nextLine.strip()
            if nextLine.strip() != "":
                # I think I can skip the line: 
                # [2013-05-24 14:20:44] 38871 terms with their lists length have been loaded.
                nextLine = inputFileHandler1.readline()
                nextLineElements = nextLine.strip().split(" ")
                # print "nextLineElements:",nextLineElements
                # sample parsing line: 
                # oil:0 industry:1 history:2 u:3 s:4
                for element in nextLineElements:
                    if element.split(":")[0] not in currentQueryTermIndexDict:
                        currentQueryTermIndexDict[ int(element.split(":")[1]) ] = element.split(":")[0]
                
                print "currentQueryTermIndexDict:",currentQueryTermIndexDict
                # print "len(currentQueryTermIndexDict):",len(currentQueryTermIndexDict)
                if currentQID not in insideQueryIDAndQueryTermsDictDict:
                    insideQueryIDAndQueryTermsDictDict[currentQID] = currentQueryTermIndexDict

            
                
                # Now, it is time to read the threshold line
                currentLine = inputFileHandler1.readline()
                if len(currentLine.strip().split(":")) == 2:
                    name = currentLine.strip().split(":")[0]
                    value = currentLine.strip().split(":")[1]
                    print name,"::",value
                else:
                    print "format problem"
                    exit(1)
                    
                # Now, it is time to read computation method line
                currentLine = inputFileHandler1.readline()
                if len(currentLine.strip().split(":")) == 2:
                    name = currentLine.strip().split(":")[0]
                    value = currentLine.strip().split(":")[1]
                    print name,"::",value
                else:
                    print "format problem"
                    exit(1)
                
                # skip the line like the following:
                # 3 factor probability formula logic implemented by Wei on 2013/08/02 at school    
                currentLine = inputFileHandler1.readline()
                
                # Now, it is time to read the actual training example line
                currentLine = inputFileHandler1.readline()
                numOfResultsForTheCurrentQuery = 0
                currentResultLines = []
                # for DEBUG ONLY(IMPOERTANT debug setting here)
                # print "len( currentLine.strip().split(" ") ):",len( currentLine.strip().split(" ") )
                # print "currentLine:",currentLine
                # exit(1)
                lineElements = currentLine.strip().split(" ")
                while len( lineElements ) == 65:
                    theRank = int(lineElements[0])
                    theDocID = lineElements[-2]
                    theTrecID = lineElements[-1]
                    if theRank <= NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE:
                        insideQueryIDAndResultSetDict[currentQID].append( (theTrecID,theDocID) )
                    
                    numOfResultsForTheCurrentQuery += 1
                    currentResultLines.append(currentLine.strip())
                    currentLine = inputFileHandler1.readline()
                    lineElements = currentLine.strip().split(" ")
                   
                numOfResultsForTheAllQueries += numOfResultsForTheCurrentQuery
                
                # sampling method 1: among all the top10 documents, select those documents, open those documents and extract the corresponding postings.
                # This method make sure that once the document have been selected, ALL the coresponding postings will also be selected.
                # This sampling method has been developed on 2013/01/09
                
                print "numOfResultsForTheCurrentQuery:",numOfResultsForTheCurrentQuery
                print "len(currentResultLines):",len(currentResultLines)
                print
            
        currentLine = inputFileHandler1.readline()
        
    print "numOfQueriesHavingQID:",numOfQueriesHavingQID
    print "numOfQueriesHavingSearchContent:",numOfQueriesHavingSearchContent
    print "numOfQueriesHavingSearchResults:",numOfQueriesHavingSearchResults
    print "len(insideQueryIDAndResultSetDict):",len(insideQueryIDAndResultSetDict)
    print "len(insideQueryIDAndQueryTermsDictDict):",len(insideQueryIDAndQueryTermsDictDict)
    # for debug ONLY
    # print "insideQueryIDAndResultSetDict[0]:",insideQueryIDAndResultSetDict[0]

def constructQIDANDTOP10ResultsSetANDPostingsSetMappingTable():
    # sub_step1: get the qid with the TOP10 results info, also the related query terms in each query
    
    TOPKValueTakenIntoConsideration = 10
    # key: queryID in int format 
    # value: a list of tuples with the format (rank,trecID,docID)
    outsideQueryIDAndResultSetDict = {}
    # key: queryID in int format
    # value: the query term dict
    outsideQueryIDAndQueryTermsDictDict = {}
    
    # option0(Updated on 20130820 night)
    inputFileName1 = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/2013AugPruningResults/goldStandardResultsForPruningProject_AND_TOP10_tail5k"    
    # 3 arguments:
    # (1) inputFileName1
    # (3) NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE
    getQueryIDAndResultSetGivenResultFile(inputFileName1,TOPKValueTakenIntoConsideration,outsideQueryIDAndResultSetDict,outsideQueryIDAndQueryTermsDictDict)
    
    print "outsideQueryIDAndResultSetDict[95001]:",outsideQueryIDAndResultSetDict[95001]
    print "outsideQueryIDAndQueryTermsDictDict[95001]:",outsideQueryIDAndQueryTermsDictDict[95001]
    # The answers are as following:
    # outsideQueryIDAndResultSetDict[95001]: [('GX010-15-15257322', '976712'), ('GX233-29-5284449', '21911485'), ('GX234-28-1118086', '22004233'), ('GX233-63-4787473', '21947108'), ('GX233-69-14919387', '21954048'), ('GX234-50-5683067', '22028285'), ('GX266-48-2119691', '24712022'), ('GX233-29-12155045', '21911907'), ('GX233-42-15104413', '21925644'), ('GX234-38-8663315', '22015584')]
    # outsideQueryIDAndQueryTermsDictDict[95002]: {0: 'registration', 1: 'single', 2: 'state'}
    
    ##############################temp output file begins...
    tempOutputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/tail5KWithNumOfResultsEvaluated"
    tempOutputFileHanlder = open(tempOutputFileName,"w")
    tempOutputLine = ""
    qidList = []
    qidList = outsideQueryIDAndResultSetDict.keys()
    qidList.sort(cmp=None, key=None, reverse=False)
    for qid in qidList:
        tempOutputLine = str(qid) + " " + str( len(outsideQueryIDAndResultSetDict[qid]) ) + "\n"
        tempOutputFileHanlder.write(tempOutputLine)
    tempOutputFileHanlder.close()
    exit(1)
    ##############################temp output file ends.
    
    
    
    # sub_step2: read the related file for the pieces info for the query terms
    # key: term in string format
    # value format1 (used currently)
    # value: a list of tuples which have the following format (pieceNum,numOfPostingsInThisPiece)
    termWithTheirListPiecesInfoDict = {}
    inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/100KQueryTermsWithTermFreqInCollectionANDClassLabelANDNumOfPostingsInEachPieces20130821"
    inputFileHandler = open(inputFileName2,"r")
    for line in inputFileHandler.readlines():
        lineElements = line.strip().split(" ")
        currentTerm = lineElements[0]
        
        if currentTerm not in termWithTheirListPiecesInfoDict:
            termWithTheirListPiecesInfoDict[currentTerm] = []
        
        numOfPiecesForCurrentTerm = int(lineElements[3])
        base = 4
        for i in range( 0,len(lineElements[base:]),2 ):
            currentPieceNumber = int(lineElements[4+i])
            numOfPostingsInCurrentPiece = int(lineElements[4+i+1])
            termWithTheirListPiecesInfoDict[currentTerm].append( (currentPieceNumber,numOfPostingsInCurrentPiece) )
    inputFileHandler.close()
    
    # sort the pieces info based on pieceNum, from largest to smallest
    for term in termWithTheirListPiecesInfoDict:
        termWithTheirListPiecesInfoDict[term].sort(cmp=None, key=None, reverse=True)
    
    print "len(termWithTheirListPiecesInfoDict):",len(termWithTheirListPiecesInfoDict)
    print "termWithTheirListPiecesInfoDict['0']:",termWithTheirListPiecesInfoDict['0']
    print "termWithTheirListPiecesInfoDict['000sites']:",termWithTheirListPiecesInfoDict['000sites']
    
    # sub_step3: read the rank in the list info of the TOP10 postings
    rankInListDict = {}
    inputFileName3 = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/setOfTOP10Postings_tail5K_COMPLETE_postingRankInListAdded"
    inputFileHandler = open(inputFileName3,"r")
    for line in inputFileHandler.readlines():
        lineElements = line.strip().split(" ")
        docIDInStringFormat = lineElements[3]
        termInStringFormat = lineElements[4]
        rankInListInIntFormat = int(lineElements[5])
        rankInListAccessKey = termInStringFormat + "_" + docIDInStringFormat
        if rankInListAccessKey not in rankInListDict:
            rankInListDict[rankInListAccessKey] = rankInListInIntFormat
    inputFileHandler.close()
    print "len(rankInListDict):",len(rankInListDict)
    
    outputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/tail5KResultsWithRelatedPieceNum"
    outputFileHandler = open(outputFileName,"w")
    
    # sub_step4: let's output the dependecy graph/file like the following format
    # qid rank(NOT yet provided) docID trecID(provided but NOT used) term relatedPiece
    qidList = []
    qidList = outsideQueryIDAndResultSetDict.keys() # have a lot of qid in int format in the list
    qidList.sort(cmp=None, key=None, reverse=False)
    for qid in outsideQueryIDAndResultSetDict:  # loop through qids
        for docInfoTuple in outsideQueryIDAndResultSetDict[qid]:    # loop through TOP10 document
                (trecIDInStringFormat, docIDInStringFormat) = docInfoTuple
                for termIndex in outsideQueryIDAndQueryTermsDictDict[qid]:   # loop through terms in the query
                    # with the following format: term_docID
                    termInStringFormat = outsideQueryIDAndQueryTermsDictDict[qid][termIndex]
                    # for DEBUG
                    # print "type(termInStringFormat):",type(termInStringFormat)
                    # print "type(docIDInStringFormat):",type(docIDInStringFormat)
                    # print "termInStringFormat:",termInStringFormat
                    # print "docIDInStringFormat:",docIDInStringFormat
                    rankInListAccessKey = termInStringFormat + "_" + docIDInStringFormat
                    if rankInListAccessKey in rankInListDict:
                        currentPosition = rankInListDict[rankInListAccessKey]       
                        currentPieceUpperBound = 0 
                        for currentTermPieceTupleInfo in termWithTheirListPiecesInfoDict[termInStringFormat]:
                            (currentTermPieceNum,currentNumOfPostingInThatPiece)= currentTermPieceTupleInfo
                            currentPieceUpperBound += currentNumOfPostingInThatPiece
                            if currentPosition <= currentPieceUpperBound:
                                # all I want is the currentTermPieceNum, directly output this
                                outputLine = str(qid) + " " + str(docIDInStringFormat) + " " + str(termInStringFormat) + " " + str(currentPosition) + " " + str(currentTermPieceNum) + "\n"
                                outputFileHandler.write(outputLine)
                                
                                '''
                                # old version for reference. Updated by Wei 2013/08/21 night by Wei
                                if term not in termWithTheirPiecesCounterDict:
                                    termWithTheirPiecesCounterDict[term] = {}
                                
                                if currentTermPieceNum not in termWithTheirPiecesCounterDict[term]:
                                    termWithTheirPiecesCounterDict[term][currentTermPieceNum] = 0 # init the piece counter to 0
                                
                                termWithTheirPiecesCounterDict[term][currentTermPieceNum] += 1
                                '''
                                
                                break # stop searching for this hit position
                            else:
                                pass # continue to search for the right piece for that term
                    else:
                        print "error,mark3"
                        exit(1)
    outputFileHandler.close()
    
    print "inputFileName1:",inputFileName1
    print "inputFileName2:",inputFileName2
    print "inputFileName3:",inputFileName3
    print "outputFileName:",outputFileName
    
print "Program Begins..."
# Updated by Wei 2013/08/20 night at school
# There are two major steps of doing this whole analysis
# step1: output the set of TOP10 posting results for another program to make the rank in the list into the file
# step2: do the analyze chart in order to show the hit distribution for each list and each class terms.

# step1:
# produceSetOfTOP10PostingsForResultsHitInvertedListDistributionAnalysis()

# The middle step is to use generate the posting_rank_in_list for each TOP10 posting instances.
# The program is located here@ /data3/obukai/workspace/web-search-engine-wei/scripts/src/pythonScripts/gov2/step4_gov2-make_posting_rank_in_the_inverted_list.py

# step2:
# top10ResultsHitInvertedListDistributionAnalysis()

# step3:
constructQIDANDTOP10ResultsSetANDPostingsSetMappingTable()
print "Program Ends."



