# Updated by Wei 2013/08/10 afternoon at school
# Improvement after the meeting
from __future__ import division
from operator import itemgetter, attrgetter
import random
import os
import sys
import math
from sets import Set

# function for parsing the raw results(NOT for directly calling)
def getQueryIDAndResultSetGivenResultFileForNewFormatResultFile(inputFileName1,numOfLinesStillNeededToSkip,NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE,NUM_OF_TOTAL_QUERIES_NEEDED_TO_EVALUATE):
    print "getQueryIDAndResultSetGivenResultFileForNewFormatResultFile() function begins..."
    
    numOfQueriesHavingQID = 0
    numOfQueriesHavingSearchContent = 0
    numOfQueriesHavingSearchResults = 0
    
    # key:queryID value:a list of tuples with the format (rank,docID)
    insideQueryIDAndResultSetDict = {}
    
    inputFileHandler1 = open(inputFileName1,"r")
    
    
    
    currentLine = inputFileHandler1.readline()
    nextLine = ""
    currentQID = ""
    
    numOfResultsForTheAllQueries = 0
    
    while currentLine and numOfQueriesHavingSearchContent < NUM_OF_TOTAL_QUERIES_NEEDED_TO_EVALUATE:
        # sampling parsing line:
        # qid: 701
        if currentLine.startswith("qid:"):
            currentQID = int( currentLine.strip().split(":")[1].strip() )
            print "currentQID:",str( currentQID )
            if currentQID not in insideQueryIDAndResultSetDict:
                insideQueryIDAndResultSetDict[currentQID] = Set([])
            numOfQueriesHavingQID += 1
        
        # sample parsing line:
        # Search: u s  oil industry history
        if currentLine.startswith("Search:"):
            elementList = currentLine.strip().split(" ")
            
            currentSearchContent = ""
            
            queryTermList = []
            for element in elementList[1:]:
                if element.strip() != "":
                    queryTermList.append( element.strip() )        
            currentQueryLength = len(queryTermList)
            print "currentQueryLength:",currentQueryLength
    
            for element in queryTermList:
                currentSearchContent += element + " "
            
            print currentSearchContent
            numOfQueriesHavingSearchContent += 1
            
            
            currentQueryTermIndexDict = {}
            nextLine = inputFileHandler1.readline()
            #print nextLine.strip()
            if nextLine.strip() != "":
                # I think I can skip the line: 
                # [2013-05-24 14:20:44] 38871 terms with their lists length have been loaded.
                nextLine = inputFileHandler1.readline()
                nextLineElements = nextLine.strip().split(" ")
                # print "nextLineElements:",nextLineElements
                # sample parsing line: 
                # oil:0 industry:1 history:2 u:3 s:4
                for element in nextLineElements:
                    if element.split(":")[0] not in currentQueryTermIndexDict:
                        currentQueryTermIndexDict[ int(element.split(":")[1]) ] = element.split(":")[0]
                
                print "currentQueryTermIndexDict:",currentQueryTermIndexDict
                # print "len(currentQueryTermIndexDict):",len(currentQueryTermIndexDict)
                
                # Now, it is time to read the threshold line
                currentLine = inputFileHandler1.readline()
                if len(currentLine.strip().split(":")) == 2:
                    name = currentLine.strip().split(":")[0]
                    value = currentLine.strip().split(":")[1]
                    print name,"::",value
                else:
                    print "format problem"
                    exit(1)
                    
                # Now, it is time to read computation method line
                currentLine = inputFileHandler1.readline()
                if len(currentLine.strip().split(":")) == 2:
                    name = currentLine.strip().split(":")[0]
                    value = currentLine.strip().split(":")[1]
                    print name,"::",value
                else:
                    print "format problem"
                    exit(1)
                
                if numOfLinesStillNeededToSkip == 0:
                    # do nothing
                    pass
                elif numOfLinesStillNeededToSkip == 1:
                    # do nothing
                    currentLine = inputFileHandler1.readline()
                else:
                    # not yet consider the other situation
                    pass
                
                # skip the line like the following:
                # 3 factor probability formula logic implemented by Wei on 2013/08/02 at school    
                currentLine = inputFileHandler1.readline()
                
                # Now, it is time to read the actual training example line
                currentLine = inputFileHandler1.readline()
                numOfResultsForTheCurrentQuery = 0
                currentResultLines = []
                # for DEBUG ONLY
                # print "len( currentLine.strip().split(" ") ):",len( currentLine.strip().split(" ") )
                # print "currentLine:",currentLine
                # exit(1)
                lineElements = currentLine.strip().split(" ")
                while len( lineElements ) == 14:
                    theRank = int(lineElements[0])
                    theDocID = lineElements[-2]
                    if theRank <= NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE:
                        insideQueryIDAndResultSetDict[currentQID].add( theDocID )
                    
                    numOfResultsForTheCurrentQuery += 1
                    currentResultLines.append(currentLine.strip())
                    currentLine = inputFileHandler1.readline()
                    lineElements = currentLine.strip().split(" ")
                   
                numOfResultsForTheAllQueries += numOfResultsForTheCurrentQuery
                
                if numOfResultsForTheCurrentQuery != 0:
                    numOfQueriesHavingSearchResults += 1
                
                print "numOfResultsForTheCurrentQuery:",numOfResultsForTheCurrentQuery
                print "len(currentResultLines):",len(currentResultLines)
                print
            
        currentLine = inputFileHandler1.readline()
        
    print "numOfQueriesHavingQID:",numOfQueriesHavingQID
    print "numOfQueriesHavingSearchContent:",numOfQueriesHavingSearchContent
    print "numOfQueriesHavingSearchResults:",numOfQueriesHavingSearchResults
    print "len(insideQueryIDAndResultSetDict):",len(insideQueryIDAndResultSetDict)
    print "getQueryIDAndResultSetGivenResultFileForNewFormatResultFile() function ends."
    return insideQueryIDAndResultSetDict



# step4:
def groupTheDocumentsBasedOnSthANDComputeTheQueryHitDocumentClusterStatistics():
    print "Function Begins..."
    ########################################
    # key: queryID in int format
    # value: a dict with unique query terms
    queryDict = {}
    inputQueryFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/100KQueries_head_50K"
    inputQueryHandler = open(inputQueryFileName,"r")
    for index,line in enumerate( inputQueryHandler.readlines() ):
        elements = line.strip().split(":")
        queryID = int(elements[0])
        
        data = elements[1]
        data = data.lower()
        
        for i in range(0,len(data)):
            # print "data[i]:",ord(data[i])
            if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
                # Just replace them with a space.
                data = data[:i] + " " + data[i+1:]
        
        queryContent = data
        
        uniuqeQueryTermsDict = {}
        queryContentElements = queryContent.strip().split(" ")
        for element in queryContentElements:
            if element.strip() != "":
                if element.strip() not in uniuqeQueryTermsDict:
                    uniuqeQueryTermsDict[element.strip()] = 1
                else:
                    uniuqeQueryTermsDict[element.strip()] += 1
        if queryID not in queryDict:
            queryDict[queryID] = uniuqeQueryTermsDict
    
    '''
    # some debug results:
    # queryDict[1]: {'commissioner': 1, 'revenue': 1, 'of': 1, 'virginia': 1, 'county': 1, 'orange': 1}
    # queryDict[73]: {'cord': 1, 'on': 2, 'neck': 1, 'spinal': 1, 'pressing': 1, 'bone': 1, 'the': 2, 'spurs': 1}
    # len(queryDict): 50000
    '''

    print "for debug ONLY:"
    print "queryDict[1]:",queryDict[1]
    print "queryDict[73]:",queryDict[73]
    print "len(queryDict):",len(queryDict)
    
    ########################################

    # The size can either be measured in (1)doc size in words OR (2)posting constructed for each document
    # This file should be sorted by doc size in words for each document
    # option1:
    # inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/trecID_docID_numOfPostingsRecorded_DocSizeInWords_MappingTableForGov2Dataset_sorted_by_docSizeInWords"
    # option2:
    # inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/trecID_docID_numOfPostingsRecorded_DocSizeInWords_MappingTableForGov2Dataset_with_Xdoc_values_added_sorted_by_XdocValueUsingGoodTurning"
    # option3:
    inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/trecID_docID_numOfPostingsRecorded_DocSizeInWords_MappingTableForGov2Dataset_with_Xdoc_values_added_sorted_by_XdocValueUsingGoodTurningDividedByNumOfPostingsForEachDocument"
    
    inputFileHandler = open(inputFileName,"r")
    # skip the headline
    # inputFileHandler.readline()
    # The following are the variables which is document related
    group0NumOfUniqueDocumentsBeingTouched = 0
    group1NumOfUniqueDocumentsBeingTouched = 0
    group2NumOfUniqueDocumentsBeingTouched = 0
    group3NumOfUniqueDocumentsBeingTouched = 0
    group4NumOfUniqueDocumentsBeingTouched = 0
    
    group0NumOfTimesDocumentsBeingTouched = 0
    group1NumOfTimesDocumentsBeingTouched = 0
    group2NumOfTimesDocumentsBeingTouched = 0
    group3NumOfTimesDocumentsBeingTouched = 0
    group4NumOfTimesDocumentsBeingTouched = 0
    
    # The following are the variables which is posting related
    group0NumOfUniquePostingsBeingTouched = 0
    group1NumOfUniquePostingsBeingTouched = 0
    group2NumOfUniquePostingsBeingTouched = 0
    group3NumOfUniquePostingsBeingTouched = 0
    group4NumOfUniquePostingsBeingTouched = 0
    
    group0NumOfTimesPostingsBeingTouched = 0
    group1NumOfTimesPostingsBeingTouched = 0
    group2NumOfTimesPostingsBeingTouched = 0
    group3NumOfTimesPostingsBeingTouched = 0
    group4NumOfTimesPostingsBeingTouched = 0

    group0DocDict = {}
    group1DocDict = {}
    group2DocDict = {}
    group3DocDict = {}
    group4DocDict = {}
    groupXDocDictListContainer = []
    groupXDocDictListContainer.append(group0DocDict)
    groupXDocDictListContainer.append(group1DocDict)
    groupXDocDictListContainer.append(group2DocDict)
    groupXDocDictListContainer.append(group3DocDict)
    groupXDocDictListContainer.append(group4DocDict)
    
    group0PostingDict = {}
    group1PostingDict = {}
    group2PostingDict = {}
    group3PostingDict = {}
    group4PostingDict = {}
    groupXPostingDictListContainer = []
    groupXPostingDictListContainer.append(group0PostingDict)
    groupXPostingDictListContainer.append(group1PostingDict)
    groupXPostingDictListContainer.append(group2PostingDict)
    groupXPostingDictListContainer.append(group3PostingDict)
    groupXPostingDictListContainer.append(group4PostingDict)    
    
    baseIndexForGroupXDocDictListContainer = 0
    
    rangesUpperBoundKeyDict = {}
    
    # key: either docID / trecID (used in measured in # of postings recorded for each document)
    rangesUpperBoundKeyDict["25161175"] = 1
    rangesUpperBoundKeyDict["8611287"] = 2
    rangesUpperBoundKeyDict["4713214"] = 3
    rangesUpperBoundKeyDict["1669607"] = 4
    
    currentLine = inputFileHandler.readline()
    while currentLine:
        currentLineElements = currentLine.strip().split(" ")
        currentValue = currentLineElements[1]
        currentDocSizeInWords           = int( currentLineElements[3] )
        currentDocumentGoodTurningValue = float( currentLineElements[7] )
        currentDocumentGoodTurningValueDividedByDocSizeInUniqueWords = float( currentLineElements[11] )
        groupXDocDictListContainer[baseIndexForGroupXDocDictListContainer][currentValue] = 0
        if currentValue in rangesUpperBoundKeyDict:
            print "baseIndexForGroupXDocDictListContainer:",baseIndexForGroupXDocDictListContainer
            print "len( groupXDocDictListContainer[baseIndexForGroupXDocDictListContainer] ):",len( groupXDocDictListContainer[baseIndexForGroupXDocDictListContainer] )
            print "currentDocSizeInWords:",currentDocSizeInWords
            print "currentDocumentGoodTurningValue:",currentDocumentGoodTurningValue
            print "currentDocumentGoodTurningValueDividedByDocSizeInUniqueWords:",currentDocumentGoodTurningValueDividedByDocSizeInUniqueWords
            baseIndexForGroupXDocDictListContainer += 1
        else:
            pass
        currentLine = inputFileHandler.readline()
    inputFileHandler.close()
    
    totalDocumentCounted = 0
    for currentDict in groupXDocDictListContainer:
        totalDocumentCounted += len(currentDict)
    
    print "totalDocumentCounted:",totalDocumentCounted
    
    print "len(groupXDocDictListContainer[0]):",len(groupXDocDictListContainer[0])
    print "len(groupXDocDictListContainer[1]):",len(groupXDocDictListContainer[1])
    print "len(groupXDocDictListContainer[2]):",len(groupXDocDictListContainer[2])
    print "len(groupXDocDictListContainer[3]):",len(groupXDocDictListContainer[3])
    print "len(groupXDocDictListContainer[4]):",len(groupXDocDictListContainer[4])
    
    # exit(1)
    
    '''
    # used in 2013/08/11 very early morning
    # The results are here based on sorting by XdocValueUsingGoodTuringMethodOfEachDocumentDividedByDocSizeInUniqueWords:
    cluster 1: 2079538  [4.06043542106e-07   ,0.00024040129683 ]
    cluster 2: 1901986  [0.00024040129683    ,0.000360663461538 ]
    cluster 3: 4091649  [0.000360663461538   ,0.000593205882353 ]
    cluster 4: 6410483  [0.000593205882353   ,0.000824487437186 ]
    cluster 5: 10721523 [0.000824487437186   ,0.00581547142857 ]
    totalDocumentCounted: 25205179    
    
    # used in 2013/08/10 night
    # The results are here based on sorting by XdocValueUsingGoodTuringMethod of each document:
    cluster 1: 11934936 [3.50924e-05,0.127702 ]
    cluster 2: 6191362  [0.127702   ,0.180104 ]
    cluster 3: 4030775  [0.180104   ,0.220922 ]
    cluster 4: 2087691  [0.220922   ,0.260201 ]
    cluster 5: 960415   [0.260201   ,0.6636   ]
    totalDocumentCounted: 25205179
        
    # used in 2013/08/08 night 
    # The results are here based on sorting by doc_size_in_words for each document:
    cluster 1: 13188600: [3,379]
    cluster 2: 5443344:  [379,786]
    cluster 3: 3591372:  [786,1919]
    cluster 4: 2143454:  [1919,3845]
    cluster 5: 838409:   [3845,127152]
    totalDocumentCounted: 25205179
    '''
    
    # NOW, let's continue to analyze the query results
    inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/raw_results_complete_result_set_100%_TOP10_AND_20130807_head_50K"
    numOfLinesStillNeededToSkip = 0
    NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE = 10
    NUM_OF_TOTAL_QUERIES_NEEDED_TO_EVALUATE = 50000
    outsideQueryIDAndResultSetDict = getQueryIDAndResultSetGivenResultFileForNewFormatResultFile(inputFileName,numOfLinesStillNeededToSkip,NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE,NUM_OF_TOTAL_QUERIES_NEEDED_TO_EVALUATE)
    # print "outsideQueryIDAndResultSetDict:",outsideQueryIDAndResultSetDict
    for qid in outsideQueryIDAndResultSetDict:
        # print "qid:",qid
        for docID in outsideQueryIDAndResultSetDict[qid]:
            # print "docID:",docID
            if docID in groupXDocDictListContainer[0]:
                if groupXDocDictListContainer[0][docID] == 0:
                    group0NumOfUniqueDocumentsBeingTouched += 1
                groupXDocDictListContainer[0][docID] += 1
                group0NumOfTimesDocumentsBeingTouched += 1
                
                # newly added component on 2013/08/11 night by Wei at school
                for currentTerm in queryDict[qid]:
                    currentPostingKey = currentTerm + "_" + str(docID)
                    if currentPostingKey not in groupXPostingDictListContainer[0]:
                        groupXPostingDictListContainer[0][currentPostingKey] = 0 # Just make a init value for this key
                        group0NumOfUniquePostingsBeingTouched += 1
                    groupXPostingDictListContainer[0][currentPostingKey] += 1
                    group0NumOfTimesPostingsBeingTouched += 1
                    
            elif docID in groupXDocDictListContainer[1]:
                if groupXDocDictListContainer[1][docID] == 0:
                    group1NumOfUniqueDocumentsBeingTouched += 1
                groupXDocDictListContainer[1][docID] += 1                
                group1NumOfTimesDocumentsBeingTouched += 1
                
                # newly added component on 2013/08/11 night by Wei at school
                for currentTerm in queryDict[qid]:
                    currentPostingKey = currentTerm + "_" + str(docID)
                    if currentPostingKey not in groupXPostingDictListContainer[1]:
                        groupXPostingDictListContainer[1][currentPostingKey] = 0 # Just make a init value for this key
                        group1NumOfUniquePostingsBeingTouched += 1
                    groupXPostingDictListContainer[1][currentPostingKey] += 1
                    group1NumOfTimesPostingsBeingTouched += 1
                
            elif docID in groupXDocDictListContainer[2]:
                if groupXDocDictListContainer[2][docID] == 0:
                    group2NumOfUniqueDocumentsBeingTouched += 1
                groupXDocDictListContainer[2][docID] += 1
                group2NumOfTimesDocumentsBeingTouched += 1

                # newly added component on 2013/08/11 night by Wei at school
                for currentTerm in queryDict[qid]:
                    currentPostingKey = currentTerm + "_" + str(docID)
                    if currentPostingKey not in groupXPostingDictListContainer[2]:
                        groupXPostingDictListContainer[2][currentPostingKey] = 0 # Just make a init value for this key
                        group2NumOfUniquePostingsBeingTouched += 1
                    groupXPostingDictListContainer[2][currentPostingKey] += 1
                    group2NumOfTimesPostingsBeingTouched += 1

                
            elif docID in groupXDocDictListContainer[3]:
                if groupXDocDictListContainer[3][docID] == 0:
                    group3NumOfUniqueDocumentsBeingTouched += 1
                groupXDocDictListContainer[3][docID] += 1
                group3NumOfTimesDocumentsBeingTouched += 1

                # newly added component on 2013/08/11 night by Wei at school
                for currentTerm in queryDict[qid]:
                    currentPostingKey = currentTerm + "_" + str(docID)
                    if currentPostingKey not in groupXPostingDictListContainer[3]:
                        groupXPostingDictListContainer[3][currentPostingKey] = 0 # Just make a init value for this key
                        group3NumOfUniquePostingsBeingTouched += 1
                    groupXPostingDictListContainer[3][currentPostingKey] += 1
                    group3NumOfTimesPostingsBeingTouched += 1
                
            elif docID in groupXDocDictListContainer[4]:
                if groupXDocDictListContainer[4][docID] == 0:
                    group4NumOfUniqueDocumentsBeingTouched += 1
                groupXDocDictListContainer[4][docID] += 1
                group4NumOfTimesDocumentsBeingTouched += 1

                # newly added component on 2013/08/11 night by Wei at school
                for currentTerm in queryDict[qid]:
                    currentPostingKey = currentTerm + "_" + str(docID)
                    if currentPostingKey not in groupXPostingDictListContainer[4]:
                        groupXPostingDictListContainer[4][currentPostingKey] = 0 # Just make a init value for this key
                        group4NumOfUniquePostingsBeingTouched += 1
                    groupXPostingDictListContainer[4][currentPostingKey] += 1
                    group4NumOfTimesPostingsBeingTouched += 1
                                
            # print
    
    print "group0NumOfUniqueDocumentsBeingTouched:",group0NumOfUniqueDocumentsBeingTouched
    print "group1NumOfUniqueDocumentsBeingTouched:",group1NumOfUniqueDocumentsBeingTouched
    print "group2NumOfUniqueDocumentsBeingTouched:",group2NumOfUniqueDocumentsBeingTouched
    print "group3NumOfUniqueDocumentsBeingTouched:",group3NumOfUniqueDocumentsBeingTouched
    print "group4NumOfUniqueDocumentsBeingTouched:",group4NumOfUniqueDocumentsBeingTouched
    print
    print "group0NumOfTimesDocumentsBeingTouched:",group0NumOfTimesDocumentsBeingTouched
    print "group1NumOfTimesDocumentsBeingTouched:",group1NumOfTimesDocumentsBeingTouched
    print "group2NumOfTimesDocumentsBeingTouched:",group2NumOfTimesDocumentsBeingTouched
    print "group3NumOfTimesDocumentsBeingTouched:",group3NumOfTimesDocumentsBeingTouched
    print "group4NumOfTimesDocumentsBeingTouched:",group4NumOfTimesDocumentsBeingTouched
    print
    print "group0NumOfUniquePostingsBeingTouched:",group0NumOfUniquePostingsBeingTouched
    print "group1NumOfUniquePostingsBeingTouched:",group1NumOfUniquePostingsBeingTouched
    print "group2NumOfUniquePostingsBeingTouched:",group2NumOfUniquePostingsBeingTouched
    print "group3NumOfUniquePostingsBeingTouched:",group3NumOfUniquePostingsBeingTouched
    print "group4NumOfUniquePostingsBeingTouched:",group4NumOfUniquePostingsBeingTouched
    print
    print "group0NumOfTimesPostingsBeingTouched:",group0NumOfTimesPostingsBeingTouched
    print "group1NumOfTimesPostingsBeingTouched:",group1NumOfTimesPostingsBeingTouched
    print "group2NumOfTimesPostingsBeingTouched:",group2NumOfTimesPostingsBeingTouched
    print "group3NumOfTimesPostingsBeingTouched:",group3NumOfTimesPostingsBeingTouched
    print "group4NumOfTimesPostingsBeingTouched:",group4NumOfTimesPostingsBeingTouched    
    
    '''
    # 2013/08/10 night.
    group0NumOfUniqueDocumentsBeingTouched: 37994
    group1NumOfUniqueDocumentsBeingTouched: 91160
    group2NumOfUniqueDocumentsBeingTouched: 102846
    group3NumOfUniqueDocumentsBeingTouched: 60195
    group4NumOfUniqueDocumentsBeingTouched: 92047
    
    group0NumOfTimesDocumentsBeingTouched: 41503
    group1NumOfTimesDocumentsBeingTouched: 102901
    group2NumOfTimesDocumentsBeingTouched: 122687
    group3NumOfTimesDocumentsBeingTouched: 74445
    group4NumOfTimesDocumentsBeingTouched: 125897
    numOfQueries: 50000
    numOfQueriesHavingSearchResults: 48559
    
    
    # 2013/08/10 afternoon. Prof said this is still NOT good, Cause what you computed is NOT the total hit time, but the unique number of documents being hit, :)
    # And prof is actually right in this case,
    # I think the following 3rd time result is good. 2013/08/08 night
    group0NumOfUniqueDocumentsBeingTouched: 72801
    group1NumOfUniqueDocumentsBeingTouched: 94175
    group2NumOfUniqueDocumentsBeingTouched: 93867
    group3NumOfUniqueDocumentsBeingTouched: 47298
    group4NumOfUniqueDocumentsBeingTouched: 76101
    numOfQueries: 50000
    numOfQueriesHavingSearchResults: 48559
 
    # I don't think this is the right info Prof want 2013/08/08 night
    # the 2nd time result I get 2013/08/08 night by Wei():
    group0NumOfUniqueDocumentsBeingTouched: 79573
    group1NumOfUniqueDocumentsBeingTouched: 109002
    group2NumOfUniqueDocumentsBeingTouched: 114384
    group3NumOfUniqueDocumentsBeingTouched: 58699
    group4NumOfUniqueDocumentsBeingTouched: 105775
    totalNumOfQueries: 467433 (Please do remember that there are some queries do NOT return any results)
    
    # I don't think this is the right info Prof want 2013/08/08 night
    # the preliminary result I get is the following 2013/08/08 afternoon by Wei:
    group0NumOfUniqueDocumentsBeingTouched: 78914
    group1NumOfUniqueDocumentsBeingTouched: 101187
    group2NumOfUniqueDocumentsBeingTouched: 113768
    group3NumOfUniqueDocumentsBeingTouched: 56647
    group4NumOfUniqueDocumentsBeingTouched: 113326
    totalNumOfQueries: 463842 (Please do remember that there are some queries do NOT return any results)
    '''
    
    print "Function Ends."

# step3:   
def computeTheUpperBoundForEachClassOfDocuments():
    '''
    # Let's sort the documents based on the XdocValue Using Good Turning Method / doc_size_in_unique_words
    # range1
    currentAccumulatedNumOfPostingsInCollection: 1290390171
    currentThreshold: 1290389603
    lineIndex: 2079537
    currentLineElements[0],trecID: GX272-29-5717472
    currentLineElements[1],docID: 25161175
    currentLineElements[7],XdocValueUsingGoodTurning: 0.333677
    currentLineElements[11],XdocValue(goodTurning) / numOfPostingsRecordedForEachDocument: 0.00024040129683
    
    # range2
    currentAccumulatedNumOfPostingsInCollection: 2580779516
    currentThreshold: 2580779206
    lineIndex: 3981523
    currentLineElements[0],trecID: GX077-81-1475871
    currentLineElements[1],docID: 8611287
    currentLineElements[7],XdocValueUsingGoodTurning: 0.225054
    currentLineElements[11],XdocValue(goodTurning) / numOfPostingsRecordedForEachDocument: 0.000360663461538
    
    # range3
    currentAccumulatedNumOfPostingsInCollection: 3871168895
    currentThreshold: 3871168809
    lineIndex: 8073172
    currentLineElements[0],trecID: GX043-88-6966304
    currentLineElements[1],docID: 4713214
    currentLineElements[7],XdocValueUsingGoodTurning: 0.181521
    currentLineElements[11],XdocValue(goodTurning) / numOfPostingsRecordedForEachDocument: 0.000593205882353
    
    # range4
    currentAccumulatedNumOfPostingsInCollection: 5161558499
    currentThreshold: 5161558412
    lineIndex: 14483655
    currentLineElements[0],trecID: GX016-81-7773651
    currentLineElements[1],docID: 1669607
    currentLineElements[7],XdocValueUsingGoodTurning: 0.164073
    currentLineElements[11],XdocValue(goodTurning) / numOfPostingsRecordedForEachDocument: 0.000824487437186
    
    # range5
    (NOT output but can be computed)
    '''
    
    
    '''
    # Let's sort the documents based on the XdocValue Using Good Turning Method 
    # range1:
    currentAccumulatedNumOfPostingsInCollection: 1290389629
    currentThreshold: 1290389603
    lineIndex: 11934935
    currentLineElements[0],trecID: GX242-85-1951808
    currentLineElements[1],docID: 22766688
    currentLineElements[7],XdocValueUsingGoodTurning: 0.127702
    
    # range2:
    currentAccumulatedNumOfPostingsInCollection: 2580779306
    currentThreshold: 2580779206
    lineIndex: 18126297
    currentLineElements[0],trecID: GX142-61-9013645
    currentLineElements[1],docID: 14611603
    currentLineElements[7],XdocValueUsingGoodTurning: 0.180104
    
    # range3:
    currentAccumulatedNumOfPostingsInCollection: 3871168907
    currentThreshold: 3871168809
    lineIndex: 22157072
    currentLineElements[0],trecID: GX238-24-10105755
    currentLineElements[1],docID: 22357883
    currentLineElements[7],XdocValueUsingGoodTurning: 0.220922
    
    # range4:
    currentAccumulatedNumOfPostingsInCollection: 5161558441
    currentThreshold: 5161558412
    lineIndex: 24244763
    currentLineElements[0],trecID: GX001-59-15568457
    currentLineElements[1],docID: 143857
    currentLineElements[7],XdocValueUsingGoodTurning: 0.260201

    # range5:
    (NOT output but can be computed)
    '''
    
    '''
    # We have DONE this experiment on 2013/08/08 night
    # These ranges are based on sort the document by docSizeInWords
    # range1:
    currentAccumulatedNumOfPostingsInCollection: 1290389691
    currentThreshold: 1290389603
    lineIndex: 13188599
    currentLine: GX272-73-7195689 25196435 150 379
    
    # range2:
    currentAccumulatedNumOfPostingsInCollection: 2580779333
    currentThreshold: 2580779206
    lineIndex: 18631943
    currentLine: GX079-97-5134714 8811916 264 786
    
    # range3:
    currentAccumulatedNumOfPostingsInCollection: 3871169374
    currentThreshold: 3871168809
    lineIndex: 22223315
    currentLine: GX272-58-9526684 25184433 771 1919
    
    # range4:
    currentAccumulatedNumOfPostingsInCollection: 5161559341
    currentThreshold: 5161558412
    lineIndex: 24366769
    currentLine: GX251-07-16138666 23602217 1283 3845
    
    # range5:
    (NOT output but can be computed)
    '''
    
    print "Function Begins..."
    # This input file should be sorted by docSizeInWords
    # option1:
    # inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/trecID_docID_numOfPostingsRecorded_DocSizeInWords_MappingTableForGov2Dataset_sorted_by_docSizeInWords"
    # option2:
    # inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/trecID_docID_numOfPostingsRecorded_DocSizeInWords_MappingTableForGov2Dataset_with_Xdoc_values_added_sorted_by_XdocValueUsingGoodTurning"
    # option3:
    inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/trecID_docID_numOfPostingsRecorded_DocSizeInWords_MappingTableForGov2Dataset_with_Xdoc_values_added_sorted_by_XdocValueUsingGoodTurningDividedByNumOfPostingsForEachDocument"
    inputFileHandler = open(inputFileName,"r")
    currentLine = inputFileHandler.readline()
    
    currentAccumulatedNumOfPostingsInCollection = 0
    lineIndex = 0
    currentStep = 1
    
    # divide the 20%, 20%, 20%, 20% and 20% by the amount of postings occupied
    # # of postings recorded for each document
    # The gap will be 6451948010/5 about = 1290389603
    GAP = 1290389603
    currentThreshold = GAP * currentStep
    
    while currentLine:
        currentLineElements = currentLine.strip().split(" ")
        currentNumOfPostingsRecorded = int(currentLineElements[2])
        currentAccumulatedNumOfPostingsInCollection += currentNumOfPostingsRecorded
        if currentAccumulatedNumOfPostingsInCollection > currentThreshold:
            print "currentAccumulatedNumOfPostingsInCollection:",currentAccumulatedNumOfPostingsInCollection
            print "currentThreshold:",currentThreshold
            print "lineIndex:",lineIndex
            print "currentLineElements[0],trecID:",currentLineElements[0]
            print "currentLineElements[1],docID:",currentLineElements[1]
            print "currentLineElements[7],XdocValueUsingGoodTurning:",currentLineElements[7]
            print "currentLineElements[11],XdocValue(goodTurning) / numOfPostingsRecordedForEachDocument:",currentLineElements[11]
            print
        
            currentStep += 1
            currentThreshold = GAP * currentStep
            
        currentLine = inputFileHandler.readline()
        lineIndex += 1
    print "Function Ends."
    
# step2_1:
def combineNumOfPostingsInEachDocumentANDDocSizeInWordsIntoOneFile():
    print "Function Begins..."
    # This file contains two columns and should be sorted by docID:
    # column1: docID
    # column2: num of postings recorded for each document
    inputFileName1 = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/trecID_docID_num_of_postings_recorded_in_index_MappingTableForGov2Dataset"
    inputFileHanlder1 = open(inputFileName1,"r")
    
    # This file contains three columns and should be sorted by docID:
    # column1: trecID
    # column2: docID
    # column3: doc size in words for each document
    inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/trecID_docID_DocSizeInWords_MappingTableForGov2Dataset_sorted_by_docID"
    inputFileHanlder2 = open(inputFileName2,"r")
    
    # This file should contain 4 columns and should be sorted by docID:
    # column1: trecID
    # column2: docID
    # column3: num of postings recorded for each document
    # column4: doc size in words for each document
    outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/trecID_docID_numOfPostingsRecorded_DocSizeInWords_MappingTableForGov2Dataset_sorted_by_docID"
    outputFileHandler = open(outputFileName,"w")
    
    currentLineFromFile1 = inputFileHanlder1.readline()
    currentLineFromFile2 = inputFileHanlder2.readline()
    
    while currentLineFromFile1:
        currentLineElementsFromFile1 = currentLineFromFile1.strip().split(" ")
        currentLineElementsFromFile2 = currentLineFromFile2.strip().split(" ")
        
        if currentLineElementsFromFile1[0] == currentLineElementsFromFile2[1]:
            outputLine = currentLineElementsFromFile2[0] + " " + currentLineElementsFromFile2[1] + " " + currentLineElementsFromFile1[1] + " " + currentLineElementsFromFile2[2] + "\n"
            outputFileHandler.write(outputLine)
        else:
            print "system error.mark1"
            exit(1)
        

        
        currentLineFromFile1 = inputFileHanlder1.readline()
        currentLineFromFile2 = inputFileHanlder2.readline()
               
    inputFileHanlder1.close()
    inputFileHanlder2.close()
    outputFileHandler.close()
    
    print "inputFileName1:",inputFileName1
    print "inputFileName2:",inputFileName2
    print "outputFileName:",outputFileName
    print "Function Ends."

# step2_2:
def combineXdocValueForEachDocumentIntoTheMainFile():
    print "Function Begins..."
    # This file should contain 4 columns and should be sorted by docID:
    # column0: trecID
    # column1: docID
    # column2: num of postings recorded for each document
    # column3: doc size in words for each document
    inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/trecID_docID_numOfPostingsRecorded_DocSizeInWords_MappingTableForGov2Dataset_sorted_by_docID"
    inputFileHandler1 = open(inputFileName1,"r")
    
    # This file contains 6 columns and should be sorted by docID:
    # column0: trecID
    # column1: docID
    # column2: XdocValue(goldStandard)
    # column3: XdocValue(1D)
    # column4: XdocValue(2D)
    # column5: XdocValue(goodTurning)
    inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/gov2DocumentWithTheirXdocValues_for_debug_ONLY_Xdoc_value_FIXED_with_internalDocID_added_20130805"
    inputFileHandler2 = open(inputFileName2,"r")
    
    # This file should contain 8 columns and should be sorted by docID:
    # column0: trecID
    # column1: docID
    # column2: num of postings recorded for each document
    # column3: doc size in words for each document
    # column4: XdocValue(goldStandard)
    # column5: XdocValue(1D)
    # column6: XdocValue(2D)
    # column7: XdocValue(goodTurning)
    # column8: XdocValue(goldStandard) / num of postings recorded for each document
    # column9: XdocValue(1D) / num of postings recorded for each document
    # column10: XdocValue(2D) / num of postings recorded for each document
    # column11: XdocValue(goodTurning) / num of postings recorded for each document
    outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/trecID_docID_numOfPostingsRecorded_DocSizeInWords_MappingTableForGov2Dataset_with_Xdoc_values_added"
    outputFileHandler = open(outputFileName,"w")    
    
    # for the file2, we need to skip the ONE line of headline
    inputFileHandler2.readline()
    
    currentLineFromFile1 = inputFileHandler1.readline()
    currentLineFromFile2 = inputFileHandler2.readline()
    
    while currentLineFromFile1:
        currentLineElementsFromFile1 = currentLineFromFile1.strip().split(" ")
        currentLineElementsFromFile2 = currentLineFromFile2.strip().split(" ")
        
        if currentLineElementsFromFile1[0] == currentLineElementsFromFile2[0] and currentLineElementsFromFile1[1] == currentLineElementsFromFile2[1]:
            outputLine = currentLineFromFile1.strip() + " "
            outputLine += currentLineElementsFromFile2[2] + " " + currentLineElementsFromFile2[3] + " " + currentLineElementsFromFile2[4] + " " + currentLineElementsFromFile2[5] + " "
            outputLine += str( float(currentLineElementsFromFile2[2]) / int(currentLineElementsFromFile1[2]) ) + " "
            outputLine += str( float(currentLineElementsFromFile2[3]) / int(currentLineElementsFromFile1[2]) ) + " "
            outputLine += str( float(currentLineElementsFromFile2[4]) / int(currentLineElementsFromFile1[2]) ) + " "
            outputLine += str( float(currentLineElementsFromFile2[5]) / int(currentLineElementsFromFile1[2]) ) + " "
            outputLine += "\n"
            outputFileHandler.write(outputLine)
        else:
            print "system error.mark1"
            exit(1)
        
        currentLineFromFile1 = inputFileHandler1.readline()
        currentLineFromFile2 = inputFileHandler2.readline()
               
    inputFileHandler1.close()
    inputFileHandler2.close()
    outputFileHandler.close()
    print "inputFileName1:",inputFileName1
    print "inputFileName2:",inputFileName2
    print "outputFileName:",outputFileName
    print "Function Ends."

# step1:
def computeTotalNumOfPostingsRecordedInCollection():
    # Note: this is a one time computation and the result value is: 6,451,948,010 postings for the whole index
    # print "directly return the result value"
    return 6451948010
    
    print "Function Begins..."
    inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/trecID_docID_num_of_postings_recorded_in_index_MappingTableForGov2Dataset"
    inputFileHandler = open(inputFileName,"r")
    currentLine = inputFileHandler.readline()
    accumulatedNumOfPostingsInCollection = 0
    lineIndex = 0
    while currentLine:
        if lineIndex % 1000000 == 0:
            print lineIndex
        
        currentLineElements = currentLine.strip().split(" ")
        currentValueForNumOfPostingsRecorded = int(currentLineElements[1])
        
        accumulatedNumOfPostingsInCollection += currentValueForNumOfPostingsRecorded
        currentLine = inputFileHandler.readline()
        lineIndex += 1
    
    # Note: the result will be:  6,451,948,010 postings
    print "lineIndex:",lineIndex 
    print "accumulatedNumOfPostingsInCollection:",accumulatedNumOfPostingsInCollection
    
    inputFileHandler.close()
    print "Function Ends."

print "Program Begins..."
# step1:
# computeTotalNumOfPostingsRecordedInCollection()

# step2_1(Use at 2013/08/08 evening during testing, maybe a little bit OLD):
# This method will help us to combine the two key components:
# (1) Num Of Postings In Each Document
# (2) Doc Size In Words In Each Document 
# into ONE file in order for us to do the sorting :)
# combineNumOfPostingsInEachDocumentANDDocSizeInWordsIntoOneFile()

# step2_2
# This method will help us to combine the new component into our so called big file:
# (1) the raw Xdoc value
# combineXdocValueForEachDocumentIntoTheMainFile()

# step3:
# computeTheUpperBoundForEachClassOfDocuments()

# step4:
groupTheDocumentsBasedOnSthANDComputeTheQueryHitDocumentClusterStatistics()

'''
print "--->checking logic begins..."
# option1:
# inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/trecID_docID_numOfPostingsRecorded_DocSizeInWords_MappingTableForGov2Dataset_with_Xdoc_values_added_sorted_by_XdocValueUsingGoodTurning"                
# option2:
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/trecID_docID_numOfPostingsRecorded_DocSizeInWords_MappingTableForGov2Dataset_with_Xdoc_values_added_sorted_by_XdocValueUsingGoodTurningDividedByNumOfPostingsForEachDocument"
inputFileHandler = open(inputFileName,"r")
previousValue = -1.0
currentLine = inputFileHandler.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentValue = float( currentLineElements[11] )
    if previousValue <= currentValue:
        previousValue = currentValue
    else:
        print "previousValue:",previousValue
        print "currentValue:",currentValue
        exit(1)
    currentLine = inputFileHandler.readline()
print "--->Pass the test"
inputFileHandler.close()
print "--->checking logic ends."
'''

print "Program Ends."

