# Updated by Wei 2013/08/08 morning
# Note: sample command:
# python /data3/obukai/workspace/web-search-engine-wei/scripts/src/pythonScripts/gov2/step10_gov2_compute_symmetric_difference.py 
# /data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/2013AugPruningResults/goldStandardResultsForPruningProject_AND_TOP10_tail5k 
# /data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/2013AugPruningResults/goldStandardResultsForPruningProject_AND_TOP10_tail5k 
# NUM_OF_LINES_NEEDED_TO_SKIP : 0 
# NUM_OF_TOP_RESULTS_FOR_EVALUATION : 10 
# PRUNING_METHOD : uniformPartialBM25 
# PERCENTAGE_KEPT : 100 
# CONDITION : AND

# python /data3/obukai/workspace/web-search-engine-wei/scripts/src/pythonScripts/gov2/step10_gov2_compute_symmetric_difference.py /data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/2013AugPruningResults/goldStandardResultsForPruningProject_AND_TOP10_tail5k_qid_95984_STOP /data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/2013AugPruningResults/goldStandardResultsForPruningProject_AND_TOP10_tail5k_qid_95984_STOP 0 10 uniformPartialBM25 100 AND
from __future__ import division
import random
import math
import os
import sys
from sets import Set
# Updated by Wei 2013/08/04 night at school
# optimized for the 3 factor probablies formula

# Updated by Wei 20130621
# Basically this time, use this program in the machine pangolin to produce some results

# Updated by Wei 20130525
# some raw results produced by the irtk

# Program inputs:
# gold standard file
# inputFileName1 = ""

# compared file
# inputFileName2 = ""

# Program outputs:
# the symmetric_difference

def getQueryIDAndResultSetGivenResultFileForNewFormatResultFile(inputFileName1,numOfLinesStillNeededToSkip,NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE):
    print "getQueryIDAndResultSetGivenResultFileForNewFormatResultFile() function begins..."
    
    numOfQueriesHavingQID = 0
    numOfQueriesHavingSearchContent = 0
    numOfQueriesHavingSearchResults = 0
    
    # key:queryID value:a list of tuples with the format (rank,docID)
    insideQueryIDAndResultSetDict = {}
    
    inputFileHandler1 = open(inputFileName1,"r")
    
    
    
    currentLine = inputFileHandler1.readline()
    nextLine = ""
    currentQID = ""
    
    numOfResultsForTheAllQueries = 0
    
    while currentLine:
        # sampling parsing line:
        # qid: 701
        if currentLine.startswith("qid:"):
            currentQID = int( currentLine.strip().split(":")[1].strip() )
            print "currentQID:",str( currentQID )
            if currentQID not in insideQueryIDAndResultSetDict:
                insideQueryIDAndResultSetDict[currentQID] = Set([])
            numOfQueriesHavingQID += 1
        
        # sample parsing line:
        # Search: u s  oil industry history
        if currentLine.startswith("Search:"):
            elementList = currentLine.strip().split(" ")
            
            currentSearchContent = ""
            
            queryTermList = []
            for element in elementList[1:]:
                if element.strip() != "":
                    queryTermList.append( element.strip() )        
            currentQueryLength = len(queryTermList)
            print "currentQueryLength:",currentQueryLength
    
            for element in queryTermList:
                currentSearchContent += element + " "
            
            print currentSearchContent
            numOfQueriesHavingSearchContent += 1
            
            
            currentQueryTermIndexDict = {}
            nextLine = inputFileHandler1.readline()
            #print nextLine.strip()
            parsedFlag = True
            
            if nextLine.strip().endswith("is NOT in the lexicon."):
                parsedFlag = False 
            
            if nextLine.strip() != "" and parsedFlag:
                
                # skip the following sampling line: 
                # [2013-05-24 14:20:44] 38871 terms with their lists length have been loaded.
                # nextLine = inputFileHandler1.readline()
                
                nextLineElements = nextLine.strip().split(" ")
                # print "nextLineElements:",nextLineElements
                
                # sample parsing line: 
                # oil:0 industry:1 history:2 u:3 s:4
                for element in nextLineElements:
                    if element.split(":")[0] not in currentQueryTermIndexDict:
                        currentQueryTermIndexDict[ int(element.split(":")[1]) ] = element.split(":")[0]
                
                print "currentQueryTermIndexDict:",currentQueryTermIndexDict
                # print "len(currentQueryTermIndexDict):",len(currentQueryTermIndexDict)
                
                # Now, it is time to read the threshold line
                currentLine = inputFileHandler1.readline()
                if len(currentLine.strip().split(":")) == 2:
                    name = currentLine.strip().split(":")[0]
                    value = currentLine.strip().split(":")[1]
                    print name,"::",value
                else:
                    print "format problem"
                    exit(1)
                    
                # Now, it is time to read computation method line
                currentLine = inputFileHandler1.readline()
                if len(currentLine.strip().split(":")) == 2:
                    name = currentLine.strip().split(":")[0]
                    value = currentLine.strip().split(":")[1]
                    print name,"::",value
                else:
                    print "format problem"
                    exit(1)
                
                if numOfLinesStillNeededToSkip == 0:
                    # do nothing
                    pass
                elif numOfLinesStillNeededToSkip == 1:
                    # do nothing
                    currentLine = inputFileHandler1.readline()
                else:
                    # not yet consider the other situation
                    pass
                
                # skip the line like the following:
                # 3 factor probability formula logic implemented by Wei on 2013/08/02 at school    
                currentLine = inputFileHandler1.readline()
                
                # Now, it is time to read the actual training example line
                currentLine = inputFileHandler1.readline()
                numOfResultsForTheCurrentQuery = 0
                currentResultLines = []
                # for DEBUG ONLY
                # print "len( currentLine.strip().split(" ") ):",len( currentLine.strip().split(" ") )
                # print "currentLine:",currentLine
                # exit(1)
                lineElements = currentLine.strip().split(" ")
                while len( lineElements ) == 14:
                    theRank = int(lineElements[0])
                    theDocID = lineElements[-2]
                    if theRank <= NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE:
                        insideQueryIDAndResultSetDict[currentQID].add( theDocID )
                    
                    numOfResultsForTheCurrentQuery += 1
                    currentResultLines.append(currentLine.strip())
                    currentLine = inputFileHandler1.readline()
                    lineElements = currentLine.strip().split(" ")
                   
                numOfResultsForTheAllQueries += numOfResultsForTheCurrentQuery
                
                # sampling method 1: among all the top10 documents, select those documents, open those documents and extract the corresponding postings.
                # This method make sure that once the document have been selected, ALL the coresponding postings will also be selected.
                # This sampling method has been developed on 2013/01/09
                
                print "numOfResultsForTheCurrentQuery:",numOfResultsForTheCurrentQuery
                print "len(currentResultLines):",len(currentResultLines)
                print
            
        currentLine = inputFileHandler1.readline()
        
    print "numOfQueriesHavingQID:",numOfQueriesHavingQID
    print "numOfQueriesHavingSearchContent:",numOfQueriesHavingSearchContent
    print "numOfQueriesHavingSearchResults:",numOfQueriesHavingSearchResults
    print "len(insideQueryIDAndResultSetDict):",len(insideQueryIDAndResultSetDict)
    print "getQueryIDAndResultSetGivenResultFileForNewFormatResultFile() function ends."
    return insideQueryIDAndResultSetDict
    '''
    for qid in insideQueryIDAndResultSetDict:
        if qid == "99956":
            for TOP10DocID in insideQueryIDAndResultSetDict[qid]:
                print TOP10DocID
    '''


def getQueryIDAndResultSetGivenResultFileForGoldStandardResultFile(inputFileName1,numOfLinesStillNeededToSkip,NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE):
    print "getQueryIDAndResultSetGivenResultFileForGoldStandardResultFile() function begins..."
    numOfQueriesHavingQID = 0
    numOfQueriesHavingSearchContent = 0
    numOfQueriesHavingSearchResults = 0
    
    # key:queryID value:a list of tuples with the format (rank,docID)
    insideQueryIDAndResultSetDict = {}
    
    inputFileHandler1 = open(inputFileName1,"r")
    
    
    
    currentLine = inputFileHandler1.readline()
    nextLine = ""
    currentQID = ""
    
    numOfResultsForTheAllQueries = 0
    
    while currentLine:
        # sampling parsing line:
        # qid: 701
        if currentLine.startswith("qid:"):
            currentQID = int( currentLine.strip().split(":")[1].strip() )
            print "currentQID:",str( currentQID )
            if currentQID not in insideQueryIDAndResultSetDict:
                insideQueryIDAndResultSetDict[currentQID] = Set([])
            numOfQueriesHavingQID += 1
        
        # sample parsing line:
        # Search: u s  oil industry history
        if currentLine.startswith("Search:"):
            elementList = currentLine.strip().split(" ")
            
            currentSearchContent = ""
            
            queryTermList = []
            for element in elementList[1:]:
                if element.strip() != "":
                    queryTermList.append( element.strip() )        
            currentQueryLength = len(queryTermList)
            print "currentQueryLength:",currentQueryLength
    
            for element in queryTermList:
                currentSearchContent += element + " "
            
            print currentSearchContent
            numOfQueriesHavingSearchContent += 1
            
            
            currentQueryTermIndexDict = {}
            nextLine = inputFileHandler1.readline()
            # print nextLine.strip()
            parsedFlag = True
            
            if nextLine.strip().endswith("is NOT in the lexicon."):
                parsedFlag = False
            
            if nextLine.strip() != "" and parsedFlag:
                
                # skip the following sampling line: 
                # [2013-05-24 14:20:44] 38871 terms with their lists length have been loaded.
                # nextLine = inputFileHandler1.readline()
                
                nextLineElements = nextLine.strip().split(" ")
                # print "nextLineElements:",nextLineElements
                
                # sample parsing line: 
                # oil:0 industry:1 history:2 u:3 s:4
                for element in nextLineElements:
                    if element.split(":")[0] not in currentQueryTermIndexDict:
                        currentQueryTermIndexDict[ int(element.split(":")[1]) ] = element.split(":")[0]
                
                print "currentQueryTermIndexDict:",currentQueryTermIndexDict
                # print "len(currentQueryTermIndexDict):",len(currentQueryTermIndexDict)
                
                # Now, it is time to read the threshold line
                currentLine = inputFileHandler1.readline()
                if len(currentLine.strip().split(":")) == 2:
                    name = currentLine.strip().split(":")[0]
                    value = currentLine.strip().split(":")[1]
                    print name,"::",value
                else:
                    print "format problem"
                    exit(1)
                    
                # Now, it is time to read computation method line
                currentLine = inputFileHandler1.readline()
                if len(currentLine.strip().split(":")) == 2:
                    name = currentLine.strip().split(":")[0]
                    value = currentLine.strip().split(":")[1]
                    print name,"::",value
                else:
                    print "format problem"
                    exit(1)
                
                if numOfLinesStillNeededToSkip == 0:
                    # do nothing
                    pass
                elif numOfLinesStillNeededToSkip == 1:
                    # do nothing
                    currentLine = inputFileHandler1.readline()
                else:
                    # not yet consider the other situation
                    pass
                
                # skip the line like the following:
                # 3 factor probability formula logic implemented by Wei on 2013/08/02 at school    
                currentLine = inputFileHandler1.readline()
                
                # Now, it is time to read the actual training example line
                currentLine = inputFileHandler1.readline()
                numOfResultsForTheCurrentQuery = 0
                currentResultLines = []
                # for DEBUG ONLY
                # print "len( currentLine.strip().split(" ") ):",len( currentLine.strip().split(" ") )
                # print "currentLine:",currentLine
                # exit(1)
                lineElements = currentLine.strip().split(" ")
                while len( lineElements ) == 14:
                    theRank = int(lineElements[0])
                    theDocID = lineElements[-2]
                    if theRank <= NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE:
                        insideQueryIDAndResultSetDict[currentQID].add( theDocID )
                    
                    numOfResultsForTheCurrentQuery += 1
                    currentResultLines.append(currentLine.strip())
                    currentLine = inputFileHandler1.readline()
                    lineElements = currentLine.strip().split(" ")
                   
                numOfResultsForTheAllQueries += numOfResultsForTheCurrentQuery
                
                # sampling method 1: among all the top10 documents, select those documents, open those documents and extract the corresponding postings.
                # This method make sure that once the document have been selected, ALL the coresponding postings will also be selected.
                # This sampling method has been developed on 2013/01/09
                
                print "numOfResultsForTheCurrentQuery:",numOfResultsForTheCurrentQuery
                print "len(currentResultLines):",len(currentResultLines)
                print
            
        currentLine = inputFileHandler1.readline()
        
    print "numOfQueriesHavingQID:",numOfQueriesHavingQID
    print "numOfQueriesHavingSearchContent:",numOfQueriesHavingSearchContent
    print "numOfQueriesHavingSearchResults:",numOfQueriesHavingSearchResults
    print "len(insideQueryIDAndResultSetDict):",len(insideQueryIDAndResultSetDict)
    # for debug ONLY
    # print "insideQueryIDAndResultSetDict[0]:",insideQueryIDAndResultSetDict[0]
    print "insideQueryIDAndResultSetDict[95085]:",insideQueryIDAndResultSetDict[95085]
    print "insideQueryIDAndResultSetDict[95086]:",insideQueryIDAndResultSetDict[95086]
    print "getQueryIDAndResultSetGivenResultFileForGoldStandardResultFile() function ends."
    return insideQueryIDAndResultSetDict


print "Program Begins..."
# results being used for 2013/06/21
# gold_standard_file_name1
# "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/GoldStandardResults_2013_JUNE/5KTop100ANDSemanticsResults_UNIVERSAL_1.0Kept_probability_TRUE_PROBABILITY_2013_JUNE"
# "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/GoldStandardResults_2013_JUNE/5KTop100ANDSemanticsResults_UNIVERSAL_1.0Kept_probability_OUR_1D_2013_JUNE"

# need_to_compare_file_name2
# "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_True_Query_Term_probability_2013_JUNE/5KTop100ANDSemanticsResults_UNIVERSAL_0.01Kept_probability_TRUE_PROBABILITY_2013_JUNE"
# "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_True_Query_Term_probability_2013_JUNE/5KTop100ANDSemanticsResults_UNIVERSAL_0.05Kept_probability_TRUE_PROBABILITY_2013_JUNE"
# "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_True_Query_Term_probability_2013_JUNE/5KTop100ANDSemanticsResults_UNIVERSAL_0.1Kept_probability_TRUE_PROBABILITY_2013_JUNE"
# "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_True_Query_Term_probability_2013_JUNE/5KTop100ANDSemanticsResults_UNIVERSAL_0.2Kept_probability_TRUE_PROBABILITY_2013_JUNE"
# "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_True_Query_Term_probability_2013_JUNE/5KTop100ANDSemanticsResults_UNIVERSAL_0.3Kept_probability_TRUE_PROBABILITY_2013_JUNE"
# "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_True_Query_Term_probability_2013_JUNE/5KTop100ANDSemanticsResults_UNIVERSAL_0.4Kept_probability_TRUE_PROBABILITY_2013_JUNE"
# "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_True_Query_Term_probability_2013_JUNE/5KTop100ANDSemanticsResults_UNIVERSAL_0.5Kept_probability_TRUE_PROBABILITY_2013_JUNE"
# "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_True_Query_Term_probability_2013_JUNE/5KTop100ANDSemanticsResults_UNIVERSAL_0.6Kept_probability_TRUE_PROBABILITY_2013_JUNE"
# "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_True_Query_Term_probability_2013_JUNE/5KTop100ANDSemanticsResults_UNIVERSAL_0.7Kept_probability_TRUE_PROBABILITY_2013_JUNE"
# "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_True_Query_Term_probability_2013_JUNE/5KTop100ANDSemanticsResults_UNIVERSAL_0.8Kept_probability_TRUE_PROBABILITY_2013_JUNE"
# "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_True_Query_Term_probability_2013_JUNE/5KTop100ANDSemanticsResults_UNIVERSAL_0.9Kept_probability_TRUE_PROBABILITY_2013_JUNE"

# "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_OUR_OWN_1D_probability_2013_JUNE/5KTop100ANDSemanticsResults_UNIVERSAL_0.01Kept_probability_OUR_1D_2013_JUNE"
# "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_OUR_OWN_1D_probability_2013_JUNE/5KTop100ANDSemanticsResults_UNIVERSAL_0.05Kept_probability_OUR_1D_2013_JUNE"
# "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_OUR_OWN_1D_probability_2013_JUNE/5KTop100ANDSemanticsResults_UNIVERSAL_0.1Kept_probability_OUR_1D_2013_JUNE"
# "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_OUR_OWN_1D_probability_2013_JUNE/5KTop100ANDSemanticsResults_UNIVERSAL_0.2Kept_probability_OUR_1D_2013_JUNE"
# "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_OUR_OWN_1D_probability_2013_JUNE/5KTop100ANDSemanticsResults_UNIVERSAL_0.3Kept_probability_OUR_1D_2013_JUNE"
# "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_OUR_OWN_1D_probability_2013_JUNE/5KTop100ANDSemanticsResults_UNIVERSAL_0.4Kept_probability_OUR_1D_2013_JUNE"
# "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_OUR_OWN_1D_probability_2013_JUNE/5KTop100ANDSemanticsResults_UNIVERSAL_0.5Kept_probability_OUR_1D_2013_JUNE"
# "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_OUR_OWN_1D_probability_2013_JUNE/5KTop100ANDSemanticsResults_UNIVERSAL_0.6Kept_probability_OUR_1D_2013_JUNE"
# "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_OUR_OWN_1D_probability_2013_JUNE/5KTop100ANDSemanticsResults_UNIVERSAL_0.7Kept_probability_OUR_1D_2013_JUNE"
# "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_OUR_OWN_1D_probability_2013_JUNE/5KTop100ANDSemanticsResults_UNIVERSAL_0.8Kept_probability_OUR_1D_2013_JUNE"
# "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_OUR_OWN_1D_probability_2013_JUNE/5KTop100ANDSemanticsResults_UNIVERSAL_0.9Kept_probability_OUR_1D_2013_JUNE"



# options for input f1
# option1:
# "/home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP1000Results/GoldStandardResults/5KTop100ANDSemanticsResults_UNIVERSAL_1.0Kept_probability_TRUE_PROBABILITY"
# option2:
# "/home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP1000Results/GoldStandardResults/5KTop100ANDSemanticsResults_UNIVERSAL_1.0Kept_probability_OUR_METHOD_Fake"
# option3:
# "/home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP1000Results/GoldStandardResults/5KTop100ANDSemanticsResults_UNIVERSAL_1.0Kept_partialBM25"
# option4:
# "/home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP1000Results/GoldStandardResults/5KTop100ANDSemanticsResults_TCP_1.0Kept_partialBM25"

# options for input f2
# corresponding to the above option1
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_true_query_term_probability/results_UNIVERSAL_0.01Kept_probability
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_true_query_term_probability/results_UNIVERSAL_0.05Kept_probability
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_true_query_term_probability/results_UNIVERSAL_0.1Kept_probability
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_true_query_term_probability/results_UNIVERSAL_0.2Kept_probability
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_true_query_term_probability/results_UNIVERSAL_0.3Kept_probability
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_true_query_term_probability/results_UNIVERSAL_0.4Kept_probability
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_true_query_term_probability/results_UNIVERSAL_0.5Kept_probability
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_true_query_term_probability/results_UNIVERSAL_0.6Kept_probability
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_true_query_term_probability/results_UNIVERSAL_0.7Kept_probability
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_true_query_term_probability/results_UNIVERSAL_0.8Kept_probability
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_true_query_term_probability/results_UNIVERSAL_0.9Kept_probability

# corresponding to the above option2
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_OUR_OWN_2D_probability/results_UNIVERSAL_0.01Kept_probability
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_OUR_OWN_2D_probability/results_UNIVERSAL_0.05Kept_probability
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_OUR_OWN_2D_probability/results_UNIVERSAL_0.1Kept_probability
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_OUR_OWN_2D_probability/results_UNIVERSAL_0.2Kept_probability
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_OUR_OWN_2D_probability/results_UNIVERSAL_0.3Kept_probability
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_OUR_OWN_2D_probability/results_UNIVERSAL_0.4Kept_probability
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_OUR_OWN_2D_probability/results_UNIVERSAL_0.5Kept_probability
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_OUR_OWN_2D_probability/results_UNIVERSAL_0.6Kept_probability
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_OUR_OWN_2D_probability/results_UNIVERSAL_0.7Kept_probability
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_OUR_OWN_2D_probability/results_UNIVERSAL_0.8Kept_probability
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_PROBABILITY_based_on_OUR_OWN_2D_probability/results_UNIVERSAL_0.9Kept_probability

# corresponding to the above option3
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_BM25/results_UNIVERSAL_0.01Kept_partialBM25
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_BM25/results_UNIVERSAL_0.05Kept_partialBM25
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_BM25/results_UNIVERSAL_0.1Kept_partialBM25
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_BM25/results_UNIVERSAL_0.2Kept_partialBM25
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_BM25/results_UNIVERSAL_0.3Kept_partialBM25
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_BM25/results_UNIVERSAL_0.4Kept_partialBM25
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_BM25/results_UNIVERSAL_0.5Kept_partialBM25
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_BM25/results_UNIVERSAL_0.6Kept_partialBM25
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_BM25/results_UNIVERSAL_0.7Kept_partialBM25
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_BM25/results_UNIVERSAL_0.8Kept_partialBM25
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/UNIVERSAL_BM25/results_UNIVERSAL_0.9Kept_partialBM25

# corresponding to the above option4
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/TCP/results_TCP_0.01Kept_partialBM25
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/TCP/results_TCP_0.05Kept_partialBM25
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/TCP/results_TCP_0.1Kept_partialBM25
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/TCP/results_TCP_0.2Kept_partialBM25
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/TCP/results_TCP_0.3Kept_partialBM25
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/TCP/results_TCP_0.4Kept_partialBM25
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/TCP/results_TCP_0.5Kept_partialBM25
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/TCP/results_TCP_0.6Kept_partialBM25
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/TCP/results_TCP_0.7Kept_partialBM25
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/TOP100Results/TCP/results_TCP_0.8Kept_partialBM25
# /home/diaosi/outputDirForResults/tail5KTestingQueries/ANDSemantics/NO_USE_Result_Corruption_results_TCP_0.9Kept_partialBM25
NUM_OF_LINES_NEEDED_TO_SKIP = -1
NUM_OF_TOP_RESULTS_FOR_EVALUATION = -1

if len(sys.argv) != 8:
    # example command for using this program
    print "USAGE:","python pythonProgramName gold_standard_file_name1 need_to_compare_file_name2 NUM_OF_LINES_NEEDED_TO_SKIP NUM_OF_TOP_RESULTS_FOR_EVALUATION PRUNING_METHOD PERCENTAGE_KEPT CONDITION"
    print "len(sys.argv):",len(sys.argv)
    exit(1)
else:
    print "main program begins..."
    print 'Number of arguments:', len(sys.argv), 'arguments.'
    print 'Argument List:', str(sys.argv)
    
    inputFileName1 = str( sys.argv[1] )
    inputFileName2 = str( sys.argv[2] )
    inputFileNameList2 = inputFileName2.strip().split("/")
    OUTPUT_FILE_BASED_PATH = ""
    for element in inputFileNameList2[:-1]:
        OUTPUT_FILE_BASED_PATH += element + "/"
    print "OUTPUT_FILE_BASED_PATH:",OUTPUT_FILE_BASED_PATH
    
    NUM_OF_LINES_NEEDED_TO_SKIP = int( sys.argv[3] )
    TOPKValue = int( sys.argv[4] )
    PRUNING_METHOD = str( sys.argv[5] )
    PERCENTAGE_KEPT = float( sys.argv[6] )
    CONDITION = str( sys.argv[7] )
    
    # 3 arguments:
    # (1) inputFileName1
    # (2) numOfLinesStillNeededToSkip
    # (3) NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE
    outsideQueryIDAndResultSetDict1 = getQueryIDAndResultSetGivenResultFileForGoldStandardResultFile(inputFileName1,NUM_OF_LINES_NEEDED_TO_SKIP,TOPKValue)
    
    # 3 arguments:
    # (1) inputFileName2
    # (2) numOfLinesStillNeededToSkip
    # (3) NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE
    
    # temp assignment(Just to test that the evaluation function is working)
    # outsideQueryIDAndResultSetDict2 = getQueryIDAndResultSetGivenResultFileForGoldStandardResultFile(inputFileName2,NUM_OF_LINES_NEEDED_TO_SKIP,TOPKValue)
    # should be used.
    outsideQueryIDAndResultSetDict2 = getQueryIDAndResultSetGivenResultFileForNewFormatResultFile(inputFileName2,NUM_OF_LINES_NEEDED_TO_SKIP,TOPKValue)
    
    
    
    # Now the results should be the same
    queryIDList = outsideQueryIDAndResultSetDict1.keys()
    queryIDList.sort()
    
    # temp NOT used currently
    # allQueriesIntersectionSet = Set([])
    # allQueriesUnionSet = Set([])
    
    # evaluation method1:
    all_queries_average_symmetric_difference_value1 = 0.0
    # evaluation method2:
    all_queries_average_symmetric_difference_value2 = 0.0
    # evaluation method3:
    all_queries_average_NON_symmetric_difference_value3 = 0.0
    # evaluation method4:
    all_queries_average_NON_symmetric_difference_value4 = 0.0
    
    currentTotalIntersectionSet = Set([])
    currentTotalUnionSetFor_symmetric_difference_value2 = Set([])
    currentTotalUnionSetForNON_symmetric_difference_value4 = Set([])
    num_queries_returning_results = 0
    num_queries_NOT_returning_results = 0
    
    for queryID in queryIDList:
        if queryID in outsideQueryIDAndResultSetDict1 and queryID in outsideQueryIDAndResultSetDict2:
            intersectionSet = outsideQueryIDAndResultSetDict1[queryID].intersection( outsideQueryIDAndResultSetDict2[queryID] )
            unionSet = outsideQueryIDAndResultSetDict1[queryID].union( outsideQueryIDAndResultSetDict2[queryID] )
            currentDenominatorForNON_symmetric_difference = outsideQueryIDAndResultSetDict1[queryID]
            symmetric_difference_value = 0.0
            NON_symmetric_difference_value = 0.0
            if len(unionSet) == 0:
                print "queryID:",str(queryID),"symmetric_difference:","N/A cause there are NO results"
                num_queries_NOT_returning_results += 1
            else:
                currentTotalIntersectionSet = currentTotalIntersectionSet.union( intersectionSet )
                currentTotalUnionSetFor_symmetric_difference_value2 = currentTotalUnionSetFor_symmetric_difference_value2.union( unionSet )
                currentTotalUnionSetForNON_symmetric_difference_value4 = currentTotalUnionSetForNON_symmetric_difference_value4.union( currentDenominatorForNON_symmetric_difference )
                symmetric_difference_value = len(intersectionSet)/len(unionSet)
                NON_symmetric_difference_value = len(intersectionSet)/len(currentDenominatorForNON_symmetric_difference)
                all_queries_average_symmetric_difference_value1 += symmetric_difference_value
                all_queries_average_NON_symmetric_difference_value3 += NON_symmetric_difference_value
                num_queries_returning_results += 1
                print "queryID:",str(queryID),"symmetric_difference:",symmetric_difference_value
                
                # Just for checking
                '''
                if symmetric_difference_value != 1.0:
                    print "has problem with the queryID:",queryID,symmetric_difference_value
                    exit(1)
                '''
            
            '''
            if queryID in outsideQueryIDAndResultSetDict2:
                pass
            else:
                print "error"
                exit(1)
            '''
    
    # ONLY evaluate the top10 results.
    # This can be set as an parameter to transfer into the function
    all_queries_average_symmetric_difference_value2 = len(currentTotalIntersectionSet) / len(currentTotalUnionSetFor_symmetric_difference_value2)
    all_queries_average_NON_symmetric_difference_value4 = len(currentTotalIntersectionSet) / len(currentTotalUnionSetForNON_symmetric_difference_value4)
    print "num_queries_returning_results:",num_queries_returning_results
    print "num_queries_NOT_returning_results:",num_queries_NOT_returning_results
    print "len(currentTotalIntersectionSet):",len(currentTotalIntersectionSet)
    print "len(currentTotalUnionSetFor_symmetric_difference_value2):",len(currentTotalUnionSetFor_symmetric_difference_value2)
    print "len(currentTotalUnionSetForNON_symmetric_difference_value4):",len(currentTotalUnionSetForNON_symmetric_difference_value4)
    print "all_queries_average_symmetric_difference_value1:",all_queries_average_symmetric_difference_value1/num_queries_returning_results
    print "all_queries_average_symmetric_difference_value2:",all_queries_average_symmetric_difference_value2
    print "all_queries_average_NON_symmetric_difference_value3:",all_queries_average_NON_symmetric_difference_value3/num_queries_returning_results
    print "all_queries_average_NON_symmetric_difference_value4:",all_queries_average_NON_symmetric_difference_value4
    
    outputFileName = OUTPUT_FILE_BASED_PATH + "NEW_symmetric_difference_measure_values" + "_" + PRUNING_METHOD + "_" + CONDITION + "_" + "TOP" + str(TOPKValue)
    print "outputFileName:",outputFileName
    outputFileHandler = open(outputFileName,"a")
    # outputFileHandler.write("all_queries_average_symmetric_difference_value1" + " " + "all_queries_average_symmetric_difference_value2" + " " + "all_queries_average_NON_symmetric_difference_value3" + " " + "all_queries_average_NON_symmetric_difference_value4" + "\n")
    outputFileHandler.write(str(PERCENTAGE_KEPT) + " " + str(all_queries_average_symmetric_difference_value1/num_queries_returning_results) + " " + str(all_queries_average_symmetric_difference_value2) + " " + str(all_queries_average_NON_symmetric_difference_value3/num_queries_returning_results) + " " + str(all_queries_average_NON_symmetric_difference_value4) + "\n")
    outputFileHandler.close()
    print "pass"


print "Program Ends."

















