# Updated by Wei on 2014/01/24 night 
# This is the measure for the OR semantics, % of TOP10 posting among all the TOP10 postings
# python /data3/obukai/workspace/web-search-engine-wei/scripts/src/pythonScripts/gov2/step10_gov2_compute_symmetric_difference.py /data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/2013AugPruningResults/goldStandardResultsForPruningProject_AND_TOP10_tail5k_qid_95984_STOP /data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/2013AugPruningResults/goldStandardResultsForPruningProject_AND_TOP10_tail5k_qid_95984_STOP 0 10 uniformPartialBM25 100 AND
from __future__ import division
import random
import math
import os
import sys
from sets import Set

def getQueryIDAndResultSetGivenResultFileForNewFormatResultFile(inputFileName1,numOfLinesStillNeededToSkip,NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE,num_of_queries_needed_to_extract):
    print "getQueryIDAndResultSetGivenResultFileForNewFormatResultFile() function begins..."
    # Updated by Wei 2014/01/20 afternoon
    numOfTop10Postings = 0
    numOfTop10PostingsNotFiltered = 0
    
    numOfQueriesHavingQID = 0
    numOfQueriesHavingSearchContent = 0
    numOfQueriesHavingSearchResults = 0
    
    
    
    inputFileHandler1 = open(inputFileName1,"r")
    
    
    
    currentLine = inputFileHandler1.readline()
    nextLine = ""
    currentQID = ""
    
    numOfResultsForTheAllQueries = 0
    
    while currentLine:
        # sampling parsing line:
        # qid: 701
        if currentLine.startswith("qid:"):
            currentQID = int( currentLine.strip().split(":")[1].strip() )
            print "currentQID:",str( currentQID )
            
            numOfQueriesHavingQID += 1
            
            if num_of_queries_needed_to_extract == numOfQueriesHavingQID:
                break
        # sample parsing line:
        # Search: u s  oil industry history
        if currentLine.startswith("Search:"):
            elementList = currentLine.strip().split(" ")
            
            currentSearchContent = ""
            
            queryTermList = []
            for element in elementList[1:]:
                if element.strip() != "":
                    queryTermList.append( element.strip() )        
            currentQueryLength = len(queryTermList)
            print "currentQueryLength:",currentQueryLength
    
            for element in queryTermList:
                currentSearchContent += element + " "
            
            print currentSearchContent
            numOfQueriesHavingSearchContent += 1
            
            
            currentQueryTermIndexDict = {}
            nextLine = inputFileHandler1.readline()
            #print nextLine.strip()
            parsedFlag = True
            
            if nextLine.strip().endswith("is NOT in the lexicon."):
                parsedFlag = False 
            
            if nextLine.strip() != "" and parsedFlag:
                
                # skip the following sampling line: 
                # [2013-05-24 14:20:44] 38871 terms with their lists length have been loaded.
                # nextLine = inputFileHandler1.readline()
                
                nextLineElements = nextLine.strip().split(" ")
                # print "nextLineElements:",nextLineElements
                
                # sample parsing line: 
                # oil:0 industry:1 history:2 u:3 s:4
                for element in nextLineElements:
                    if element.split(":")[0] not in currentQueryTermIndexDict:
                        currentQueryTermIndexDict[ int(element.split(":")[1]) ] = element.split(":")[0]
                
                print "currentQueryTermIndexDict:",currentQueryTermIndexDict
                # print "len(currentQueryTermIndexDict):",len(currentQueryTermIndexDict)
                
                # Now, it is time to read the threshold line
                currentLine = inputFileHandler1.readline()
                if len(currentLine.strip().split(":")) == 2:
                    name = currentLine.strip().split(":")[0]
                    value = currentLine.strip().split(":")[1]
                    print name,"::",value
                else:
                    print "format problem"
                    exit(1)
                    
                # Now, it is time to read computation method line
                currentLine = inputFileHandler1.readline()
                if len(currentLine.strip().split(":")) == 2:
                    name = currentLine.strip().split(":")[0]
                    value = currentLine.strip().split(":")[1]
                    print name,"::",value
                else:
                    print "format problem"
                    exit(1)
                
                if numOfLinesStillNeededToSkip == 0:
                    # do nothing
                    pass
                elif numOfLinesStillNeededToSkip == 1:
                    # do nothing
                    currentLine = inputFileHandler1.readline()
                else:
                    # not yet consider the other situation
                    pass
                
                # skip the line like the following:
                # 3 factor probability formula logic implemented by Wei on 2013/08/02 at school    
                currentLine = inputFileHandler1.readline()
                # currentLine = inputFileHandler1.readline()
                
                # Now, it is time to read the actual training example line
                currentLine = inputFileHandler1.readline()
                numOfResultsForTheCurrentQuery = 0
                
                # for DEBUG ONLY
                # print "len( currentLine.strip().split(" ") ):",len( currentLine.strip().split(" ") )
                # print "currentLine:",currentLine
                # exit(1)
                
                lineElements = currentLine.strip().split(" ")
                
                # CURRENT version
                while len( lineElements ) == 55:
                    theRank = int(lineElements[0])
                    
                    theDocID = lineElements[-3]
                    
                    if theRank <= NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE:
                        numOfTop10Postings += len(currentQueryTermIndexDict)
                        baseJ = -24
                        for j in range(0,len(currentQueryTermIndexDict)):
                            if lineElements[baseJ + j] == "1":
                                numOfTop10PostingsNotFiltered += 1
                        numOfResultsForTheCurrentQuery += 1
                    
                    currentLine = inputFileHandler1.readline()
                    lineElements = currentLine.strip().split(" ")                
                

                   
                numOfResultsForTheAllQueries += numOfResultsForTheCurrentQuery
                
                # sampling method 1: among all the top10 documents, select those documents, open those documents and extract the corresponding postings.
                # This method make sure that once the document have been selected, ALL the coresponding postings will also be selected.
                # This sampling method has been developed on 2013/01/09
                
                print "numOfResultsForTheCurrentQuery:",numOfResultsForTheCurrentQuery
                print
            
        currentLine = inputFileHandler1.readline()
        
    print "numOfQueriesHavingQID:",numOfQueriesHavingQID
    print "numOfQueriesHavingSearchContent:",numOfQueriesHavingSearchContent
    print "numOfQueriesHavingSearchResults:",numOfQueriesHavingSearchResults
    print "numOfTop10PostingsNotFiltered:",numOfTop10PostingsNotFiltered
    print "numOfTop10Postings: ",numOfTop10Postings
    print "getQueryIDAndResultSetGivenResultFileForNewFormatResultFile() function ends."

print "Program Begins..."
num_of_queries_needed_to_extract = 5000
numOfLinesStillNeededToSkip = 0
NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE = 10
# ONLY deal with the OR semantics 100K query log file
# inputFileName = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/2014JanPruningResults/ORSemantics/relRank20140119Night_relRank_stepFactor_point5/rawResults_1%_TOP1000_OR_20140101Night"
# inputFileName = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/2014JanPruningResults/ORSemantics/relRank20140119Night_relRank_stepFactor_point5/rawResults_3%_TOP1000_OR_20140101Night"
# inputFileName = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/2014JanPruningResults/ORSemantics/relRank20140119Night_relRank_stepFactor_point5/rawResults_5%_TOP1000_OR_20140101Night"
inputFileName = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/2014JanPruningResults/ORSemantics/relRank20140119Night_relRank_stepFactor_point5/rawResults_10%_TOP1000_OR_20140101Night"
# inputFileName = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/2014JanPruningResults/ORSemantics/relRank20140119Night_relRank_stepFactor_point5/rawResults_15%_TOP1000_OR_20140101Night"
# inputFileName = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/2014JanPruningResults/ORSemantics/relRank20140119Night_relRank_stepFactor_point5/rawResults_20%_TOP1000_OR_20140101Night"
# inputFileName = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/2014JanPruningResults/ORSemantics/relRank20140119Night_relRank_stepFactor_point5/rawResults_30%_TOP1000_OR_20140101Night"
# inputFileName = "/home/diaosi/web-search-engine-wei_MOVE_FROM_PANGOLIN_20131206/polyIRIndexer/2014JanPruningResults/ORSemantics/relRank20140119Night_relRank_stepFactor_point5/rawResults_50%_TOP1000_OR_20140101Night"
getQueryIDAndResultSetGivenResultFileForNewFormatResultFile(inputFileName,numOfLinesStillNeededToSkip,NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE,num_of_queries_needed_to_extract)
print "Program Ends."

















