# Updated by Wei on 2014/06/12 at school
# attempt to fit for the clueweb09B dataset
# Updated by Wei on 2014/01/08 at school
from __future__ import division
import random
import math
import os
import sys
from sets import Set


def getQueryIDAndResultSetGivenResultFileForNewFormatResultFile(inputFileHandler,outputFileHandler,numOfLinesStillNeededToSkip,NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE):
    print "getQueryIDAndResultSetGivenResultFileForNewFormatResultFile() function begins..."
    num_of_postings_recorded = 0
    
    numOfQueriesHavingQID = 0
    numOfQueriesHavingSearchContent = 0
    numOfQueriesHavingSearchResults = 0
    
    currentLine = inputFileHandler.readline()
    nextLine = ""
    currentQID = ""
    
    numOfResultsForTheAllQueries = 0
    
    while currentLine:
        # sampling parsing line:
        # qid: 701
        if currentLine.startswith("qid:"):
            currentQID = int( currentLine.strip().split(":")[1].strip() )
            print "currentQID:",str( currentQID )
            numOfQueriesHavingQID += 1
        
        # sample parsing line:
        # Search: u s  oil industry history
        if currentLine.startswith("Search:"):
            elementList = currentLine.strip().split(" ")
            
            currentSearchContent = ""
            
            queryTermList = []
            for element in elementList[1:]:
                if element.strip() != "":
                    queryTermList.append( element.strip() )        
            currentQueryLength = len(queryTermList)
            print "currentQueryLength:",currentQueryLength
    
            for element in queryTermList:
                currentSearchContent += element + " "
            
            print currentSearchContent
            numOfQueriesHavingSearchContent += 1
            
            
            currentQueryTermIndexDict = {}
            nextLine = inputFileHandler.readline()
            #print nextLine.strip()
            parsedFlag = True
            
            if nextLine.strip().endswith("is NOT in the lexicon."):
                parsedFlag = False 
            
            if nextLine.strip() != "" and parsedFlag:
                
                # skip the following sampling line: 
                # [2013-05-24 14:20:44] 38871 terms with their lists length have been loaded.
                # nextLine = inputFileHandler.readline()
                
                nextLineElements = nextLine.strip().split(" ")
                # print "nextLineElements:",nextLineElements
                
                # sample parsing line: 
                # oil:0 industry:1 history:2 u:3 s:4
                for element in nextLineElements:
                    if element.split(":")[0] not in currentQueryTermIndexDict:
                        currentQueryTermIndexDict[ int(element.split(":")[1]) ] = element.split(":")[0]
                
                print "currentQueryTermIndexDict:",currentQueryTermIndexDict
                # print "len(currentQueryTermIndexDict):",len(currentQueryTermIndexDict)
                
                '''
                # Now, it is time to read the threshold line
                currentLine = inputFileHandler.readline()
                if len(currentLine.strip().split(":")) == 2:
                    name = currentLine.strip().split(":")[0]
                    value = currentLine.strip().split(":")[1]
                    print name,"::",value
                else:
                    print "format problem"
                    exit(1)
                
                    
                # Now, it is time to read computation method line
                currentLine = inputFileHandler.readline()
                if len(currentLine.strip().split(":")) == 2:
                    name = currentLine.strip().split(":")[0]
                    value = currentLine.strip().split(":")[1]
                    print name,"::",value
                else:
                    print "format problem"
                    exit(1)
                '''
                
                if numOfLinesStillNeededToSkip == 0:
                    # do nothing
                    pass
                elif numOfLinesStillNeededToSkip == 1:
                    # do nothing
                    currentLine = inputFileHandler.readline()
                else:
                    # not yet consider the other situation
                    pass
                
                # skip the line like the following:
                # 3 factor probability formula logic implemented by Wei on 2013/08/02 at school    
                # currentLine = inputFileHandler.readline()
                
                # Now, it is time to read the actual training example line
                currentLine = inputFileHandler.readline()
                numOfResultsForTheCurrentQuery = 0
                currentResultLines = []
                # for DEBUG ONLY
                # print "len( currentLine.strip().split(" ") ):",len( currentLine.strip().split(" ") )
                # print "currentLine:",currentLine
                # exit(1)
                
                lineElements = currentLine.strip().split(" ")
                while len( lineElements ) == 34:
                    theRank = int(lineElements[0])
                    stepGap = 10
                    postingScoreList = []
                    postingScoreList.append( lineElements[stepGap * 0 + 1] )
                    postingScoreList.append( lineElements[stepGap * 0 + 2] )
                    postingScoreList.append( lineElements[stepGap * 0 + 3] )
                    postingScoreList.append( lineElements[stepGap * 0 + 4] )
                    postingScoreList.append( lineElements[stepGap * 0 + 5] )
                    postingScoreList.append( lineElements[stepGap * 0 + 6] )
                    postingScoreList.append( lineElements[stepGap * 0 + 7] )
                    postingScoreList.append( lineElements[stepGap * 0 + 8] )
                    postingScoreList.append( lineElements[stepGap * 0 + 9] )
                    postingScoreList.append( lineElements[stepGap * 0 + 10] )
                                   
                    postingRankList = []
                    postingRankList.append( int(lineElements[stepGap * 1 + 1]) )
                    postingRankList.append( int(lineElements[stepGap * 1 + 2]) )
                    postingRankList.append( int(lineElements[stepGap * 1 + 3]) )
                    postingRankList.append( int(lineElements[stepGap * 1 + 4]) )
                    postingRankList.append( int(lineElements[stepGap * 1 + 5]) )
                    postingRankList.append( int(lineElements[stepGap * 1 + 6]) )
                    postingRankList.append( int(lineElements[stepGap * 1 + 7]) )
                    postingRankList.append( int(lineElements[stepGap * 1 + 8]) )
                    postingRankList.append( int(lineElements[stepGap * 1 + 9]) )
                    postingRankList.append( int(lineElements[stepGap * 1 + 10]) )
                    
                    postingListOfLengthList = []
                    postingListOfLengthList.append( int(lineElements[stepGap * 2 + 1]) )
                    postingListOfLengthList.append( int(lineElements[stepGap * 2 + 2]) )
                    postingListOfLengthList.append( int(lineElements[stepGap * 2 + 3]) )
                    postingListOfLengthList.append( int(lineElements[stepGap * 2 + 4]) )
                    postingListOfLengthList.append( int(lineElements[stepGap * 2 + 5]) )
                    postingListOfLengthList.append( int(lineElements[stepGap * 2 + 6]) )
                    postingListOfLengthList.append( int(lineElements[stepGap * 2 + 7]) )
                    postingListOfLengthList.append( int(lineElements[stepGap * 2 + 8]) )
                    postingListOfLengthList.append( int(lineElements[stepGap * 2 + 9]) )
                    postingListOfLengthList.append( int(lineElements[stepGap * 2 + 10]) )
                    
                    theDocID = lineElements[-2]
                    if theRank <= NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE:
                        upperBound = 0
                        if len(currentQueryTermIndexDict) > 10:
                            upperBound = 10
                        else:
                            upperBound = len(currentQueryTermIndexDict)
                        
                        for k in range(0,upperBound):
                            outputLine = str(currentQID) + " " + str(currentQueryTermIndexDict[k]) + " " + str(theDocID) + " " + str( postingRankList[k] ) + " " + str( postingListOfLengthList[k] ) + " " + str( postingScoreList[k] ) + "\n"
                            outputFileHandler.write(outputLine)
                            num_of_postings_recorded += 1
                        
                    numOfResultsForTheCurrentQuery += 1
                    currentResultLines.append(currentLine.strip())
                    currentLine = inputFileHandler.readline()
                    lineElements = currentLine.strip().split(" ")
                   
                numOfResultsForTheAllQueries += numOfResultsForTheCurrentQuery
                
                # sampling method 1: among all the top10 documents, select those documents, open those documents and extract the corresponding postings.
                # This method make sure that once the document have been selected, ALL the coresponding postings will also be selected.
                # This sampling method has been developed on 2013/01/09
                
                print "numOfResultsForTheCurrentQuery:",numOfResultsForTheCurrentQuery
                print "len(currentResultLines):",len(currentResultLines)
                print
            
        currentLine = inputFileHandler.readline()
        
    print "numOfQueriesHavingQID: ",numOfQueriesHavingQID
    print "numOfQueriesHavingSearchContent: ",numOfQueriesHavingSearchContent
    print "numOfQueriesHavingSearchResults: ",numOfQueriesHavingSearchResults
    print "num_of_postings_recorded: ",num_of_postings_recorded
    print "getQueryIDAndResultSetGivenResultFileForNewFormatResultFile() function ends."
    return num_of_postings_recorded

print "Program Begins..."
'''
# for gov2 dataset
# on the server dodo
# debug options:
# inputFileNameForDebug1 = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/2014JanPruningResults/ANDSemantics/rawResults_100%_TOP1000_AND_20140101Night_xaa_DEBUG"
# inputFileNameForDebug2 = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/2014JanPruningResults/ANDSemantics/rawResults_100%_TOP1000_AND_20140101Night_xab_DEBUG"
inputFileName1 = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/2014JanPruningResults/ANDSemantics/rawResults_100%_TOP1000_AND_20140101Night_xaa"
inputFileName2 = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/2014JanPruningResults/ANDSemantics/rawResults_100%_TOP1000_AND_20140101Night_xab"
inputFileName3 = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/2014JanPruningResults/ANDSemantics/rawResults_100%_TOP1000_AND_20140101Night_xac"
inputFileName4 = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/2014JanPruningResults/ANDSemantics/rawResults_100%_TOP1000_AND_20140101Night_xad"
inputFileName5 = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/2014JanPruningResults/ANDSemantics/rawResults_100%_TOP1000_AND_20140101Night_xae"
inputFileName6 = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/2014JanPruningResults/ANDSemantics/rawResults_100%_TOP1000_AND_20140101Night_xaf_Part1"
inputFileName7 = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/2014JanPruningResults/ANDSemantics/rawResults_100%_TOP1000_AND_20140101Night_xaf_Part2"
inputFileName8 = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/2014JanPruningResults/ANDSemantics/rawResults_100%_TOP1000_AND_20140101Night_xag_Part1"
inputFileName9 = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/2014JanPruningResults/ANDSemantics/rawResults_100%_TOP1000_AND_20140101Night_xag_Part2"
inputFileName10 = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/2014JanPruningResults/ANDSemantics/rawResults_100%_TOP1000_AND_20140101Night_xah_Part1"
inputFileName11 = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/2014JanPruningResults/ANDSemantics/rawResults_100%_TOP1000_AND_20140101Night_xah_Part2"
inputFileName12 = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/2014JanPruningResults/ANDSemantics/rawResults_100%_TOP1000_AND_20140101Night_xai_Part1"
inputFileName13 = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/2014JanPruningResults/ANDSemantics/rawResults_100%_TOP1000_AND_20140101Night_xai_Part2"
inputFileName14 = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/2014JanPruningResults/ANDSemantics/rawResults_100%_TOP1000_AND_20140101Night_xaj_Part1"
inputFileName15 = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/2014JanPruningResults/ANDSemantics/rawResults_100%_TOP1000_AND_20140101Night_xaj_Part2"
'''

'''
# for gov2 dataset
# on the server moa
# debug options:
# inputFileNameForDebug1 = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/rawResults_100%_TOP1000_AND_20140101Night_xaa_DEBUG"
# inputFileNameForDebug2 = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/rawResults_100%_TOP1000_AND_20140101Night_xab_DEBUG"
# [1,10000]
inputFileName1 = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/rawResults_100%_TOP1000_AND_20140101Night_xaa"
# [10001,20000]
inputFileName2 = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/rawResults_100%_TOP1000_AND_20140101Night_xab"
# [20000,30000]
inputFileName3 = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/rawResults_100%_TOP1000_AND_20140101Night_xac"
# [30001,40000]
inputFileName4 = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/rawResults_100%_TOP1000_AND_20140101Night_xad"
# [40001,50000]
inputFileName5 = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/rawResults_100%_TOP1000_AND_20140101Night_xae"
# [50001,54361]
inputFileName6 = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/rawResults_100%_TOP1000_AND_20140101Night_xaf_Part1"
# [54362,60000]
inputFileName7 = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/rawResults_100%_TOP1000_AND_20140101Night_xaf_Part2"
# [60001,64394]
inputFileName8 = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/rawResults_100%_TOP1000_AND_20140101Night_xag_Part1"
# [64395,70000]
inputFileName9 = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/rawResults_100%_TOP1000_AND_20140101Night_xag_Part2"
# [70001,74644]
inputFileName10 = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/rawResults_100%_TOP1000_AND_20140101Night_xah_Part1"
# [74645,80000]
inputFileName11 = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/rawResults_100%_TOP1000_AND_20140101Night_xah_Part2"
# [80001,84532]
inputFileName12 = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/rawResults_100%_TOP1000_AND_20140101Night_xai_Part1"
# [84533,90000]
inputFileName13 = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/rawResults_100%_TOP1000_AND_20140101Night_xai_Part2"
# [90001,94242]
inputFileName14 = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/rawResults_100%_TOP1000_AND_20140101Night_xaj_Part1"
# [94243,95000]
inputFileName15 = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/rawResults_100%_TOP1000_AND_20140101Night_xaj_Part2"
'''

# for the clueweb09B
# for vidaserver1
inputFileName0 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_clueweb09B_2DProbablityTableTraining_20140612_Part00"
inputFileName1 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_clueweb09B_2DProbablityTableTraining_20140612_Part01"
inputFileName2 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_clueweb09B_2DProbablityTableTraining_20140612_Part02"
inputFileName3 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_clueweb09B_2DProbablityTableTraining_20140612_Part03"
inputFileName4 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_clueweb09B_2DProbablityTableTraining_20140612_Part04"
inputFileName5 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_clueweb09B_2DProbablityTableTraining_20140612_Part05"
inputFileName6 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_clueweb09B_2DProbablityTableTraining_20140612_Part06"
inputFileName7 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_clueweb09B_2DProbablityTableTraining_20140612_Part07"
inputFileName8 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_clueweb09B_2DProbablityTableTraining_20140612_Part08"
inputFileName9 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_clueweb09B_2DProbablityTableTraining_20140612_Part09"
inputFileName10 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_clueweb09B_2DProbablityTableTraining_20140612_Part10"
inputFileName11 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_clueweb09B_2DProbablityTableTraining_20140612_Part11"
inputFileName12 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_clueweb09B_2DProbablityTableTraining_20140612_Part12"
inputFileName13 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_clueweb09B_2DProbablityTableTraining_20140612_Part13"
inputFileName14 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_clueweb09B_2DProbablityTableTraining_20140612_Part14"
inputFileName15 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_clueweb09B_2DProbablityTableTraining_20140612_Part15"
inputFileName16 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_clueweb09B_2DProbablityTableTraining_20140612_Part16"
inputFileName17 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_clueweb09B_2DProbablityTableTraining_20140612_Part17"
inputFileName18 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_clueweb09B_2DProbablityTableTraining_20140612_Part18"
inputFileName19 = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_clueweb09B_2DProbablityTableTraining_20140612_Part19"

inputFileList = []
# inputFileList.append(inputFileNameForDebug1)
# inputFileList.append(inputFileNameForDebug2)
inputFileList.append(inputFileName0)
inputFileList.append(inputFileName1)
inputFileList.append(inputFileName2)
inputFileList.append(inputFileName3)
inputFileList.append(inputFileName4)
inputFileList.append(inputFileName5)
inputFileList.append(inputFileName6)
inputFileList.append(inputFileName7)
inputFileList.append(inputFileName8)
inputFileList.append(inputFileName9)
inputFileList.append(inputFileName10)
inputFileList.append(inputFileName11)
inputFileList.append(inputFileName12)
inputFileList.append(inputFileName13)
inputFileList.append(inputFileName14)
inputFileList.append(inputFileName15)
inputFileList.append(inputFileName16)
inputFileList.append(inputFileName17)
inputFileList.append(inputFileName18)
inputFileList.append(inputFileName19)

NUM_OF_LINES_NEEDED_TO_SKIP = 0
TOPKValue = 100

# for moa
# outputFileName = "/home/diaosi/workspace/web-search-engine-wei-2014-April/results/TOP100PostingsWithRanksFrom95K_20140518"
# for vidaserver1
outputFileName = "/local_scratch/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TOP100PostingsWithRanksANDScores_clueweb09B_trainingQueries_20140612"
outputFileHandler = open(outputFileName,"w")

numOfTrainingPostingsRecorded = 0
# 3 arguments:
# (1) inputFileName
# (2) numOfLinesStillNeededToSkip
# (3) NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE
for currentInputFileName in inputFileList:
    inputFileHandler = open(currentInputFileName,"r")
    # continue to write to the disk
    numOfTrainingPostingsRecorded += getQueryIDAndResultSetGivenResultFileForNewFormatResultFile(inputFileHandler,outputFileHandler,NUM_OF_LINES_NEEDED_TO_SKIP,TOPKValue)
    print "inputFileName: ",currentInputFileName,"DONE"
outputFileHandler.close()


print "Overall Processing Statistics:"
print "outputFileName: ",outputFileName
print "# Of Training Postings Recorded:",numOfTrainingPostingsRecorded
print "Program Ends."

















