# Updated by Wei on 2014/08/19
# My task is simple, to make the UP,TCP, UP-QV and TCP-QV working.
from __future__ import division
from operator import itemgetter, attrgetter
from struct import *
import gc
import math
import os
import random
import sys
import time
from sets import Set
import numpy as np

def getUnprunedDocumentResultsForQueries(inputFileName1, \
                                         NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE, \
                                         termANDTermIDDict, \
                                         qIDsWithTopKDocumentResultImpactDict, \
                                         top10PostingWithDocumentResultDict, \
                                         top10DocumentResultDict, \
                                         topKPostingWithDocumentResultDict, \
                                         NUM_OF_TOP_RESULTS_CUTOFF, \
                                         qidsToEvaluateDict, \
                                         qIDsWithTopKDocumentResultNumOfPostingsContributedDict, \
                                         qIDWithQueryTermsDict):
    numOfQueriesHavingQID = 0
    numOfQueriesHavingSearchContent = 0
    numOfResultsForAllTheQueries = 0
    
    inputFileHandler1 = open(inputFileName1,"r")
    currentLine = inputFileHandler1.readline()
    nextLine = ""
    currentQID = ""
    
    processFlag = False
    while currentLine:
        # sampling parsing line:
        # qid: 701
        if currentLine.startswith("qid:"):
            currentQID = currentLine.strip().split(":")[1].strip()
            print "currentQID:",currentQID
            print "numOfQueriesHavingQID:",numOfQueriesHavingQID
            numOfQueriesHavingQID += 1
            # debug
            if numOfQueriesHavingQID == 100:
                break
            
            if currentQID in qidsToEvaluateDict:
                processFlag = True
            else:
                processFlag = False
        # sample parsing line:
        # Search: u s  oil industry history
        if currentLine.startswith("Search:") and processFlag:
            elementList = currentLine.strip().split(" ")
            
            currentSearchContent = ""
            
            queryTermList = []
            for element in elementList[1:]:
                if element.strip() != "":
                    queryTermList.append( element.strip() )        
            currentQueryLength = len(queryTermList)
            print "currentQueryLength:",currentQueryLength
    
            for element in queryTermList:
                currentSearchContent += element + " "
            
            print currentSearchContent
            numOfQueriesHavingSearchContent += 1
            
            
            currentQueryTermIndexDict = {}
            nextLine = inputFileHandler1.readline()
            # print nextLine.strip()
            parsedFlag = True
            
            if nextLine.strip().endswith("is NOT in the lexicon."):
                parsedFlag = False
            
            if nextLine.strip() != "" and parsedFlag:
                
                # skip the following sampling line: 
                # [2013-05-24 14:20:44] 38871 terms with their lists length have been loaded.
                # nextLine = inputFileHandler1.readline()
                
                nextLineElements = nextLine.strip().split(" ")
                # print "nextLineElements:",nextLineElements
                
                # sample parsing line: 
                # oil:0 industry:1 history:2 u:3 s:4
                for element in nextLineElements:
                    if element.split(":")[0] not in currentQueryTermIndexDict:
                        currentQueryTermIndexDict[ int(element.split(":")[1]) ] = element.split(":")[0]
                
                print "currentQueryTermIndexDict:",currentQueryTermIndexDict
                # print "len(currentQueryTermIndexDict):",len(currentQueryTermIndexDict)
                
                if currentQID not in qIDWithQueryTermsDict:
                    qIDWithQueryTermsDict[currentQID] = []
                
                for i in range(0,len(currentQueryTermIndexDict)):
                    currentTerm = currentQueryTermIndexDict[i]
                    qIDWithQueryTermsDict[currentQID].append(currentTerm)
                    
                # Now, it is time to read the threshold line
                currentLine = inputFileHandler1.readline()
                if len(currentLine.strip().split(":")) == 2:
                    name = currentLine.strip().split(":")[0]
                    value = currentLine.strip().split(":")[1]
                    print name,"::",value
                else:
                    print "format problem"
                    exit(1)
                    
                # Now, it is time to read computation method line
                currentLine = inputFileHandler1.readline()
                if len(currentLine.strip().split(":")) == 2:
                    name = currentLine.strip().split(":")[0]
                    value = currentLine.strip().split(":")[1]
                    print name,"::",value
                else:
                    print "format problem"
                    exit(1)
                
                # skip one line
                currentLine = inputFileHandler1.readline()
                
                # Now, it is time to read the actual training example line
                currentLine = inputFileHandler1.readline()
                numOfResultsForTheCurrentQuery = 0
                
                # for IMPORTANT DEBUG ONLY
                #print "len( currentLine.strip().split(" ") ):",len( currentLine.strip().split(" ") )
                #print "currentLine:",currentLine
                #exit(1)
                
                lineElements = currentLine.strip().split(" ")
                while len( lineElements ) == 25:
                    theRank = int(lineElements[0])
                    theDocID = lineElements[-3]
                    theScore = float(lineElements[-4])
                    documentResultKey = currentQID + "_" + theDocID
                    
                    if theRank <= NUM_OF_TOP_RESULTS_CUTOFF:
                        if currentQID not in qIDsWithTopKDocumentResultImpactDict:
                            qIDsWithTopKDocumentResultImpactDict[currentQID] = {}
                            qIDsWithTopKDocumentResultNumOfPostingsContributedDict[currentQID] = {}
                        if documentResultKey not in qIDsWithTopKDocumentResultImpactDict:
                            qIDsWithTopKDocumentResultImpactDict[currentQID][documentResultKey] = 0.0
                            qIDsWithTopKDocumentResultNumOfPostingsContributedDict[currentQID][documentResultKey] = 0
                        else:
                            print "duplicated document result."
                            print "documentResultKey:",documentResultKey
                            exit(1)

                    if theRank <= NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE:
                        top10DocumentResultDict[documentResultKey] = 1;
                    
                    upperBound = 0
                    if len(currentQueryTermIndexDict) >= 10:
                        upperBound = 10
                    else:
                        upperBound = len(currentQueryTermIndexDict)
                    
                    for i in range(0,upperBound):
                        currentTerm = currentQueryTermIndexDict[i]
                        if currentTerm not in termANDTermIDDict:
                            print currentTerm,"is NOT in the dict."
                            exit(1)
                        else:
                            currentTermID = termANDTermIDDict[currentTerm]
                        # both good for gov2 / clueweb09 although the input format is a little different
                        currentTermScore = float( lineElements[11 + i] )
                        if currentTermScore != 0.0:
                            postingKey = currentTermID + "_" + theDocID
                            
                            if theRank <= NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE:
                                if postingKey not in top10PostingWithDocumentResultDict:
                                    top10PostingWithDocumentResultDict[postingKey] = []
                                top10PostingWithDocumentResultDict[postingKey].append(documentResultKey)                            
                            
                            if theRank <= NUM_OF_TOP_RESULTS_CUTOFF:
                                if postingKey not in topKPostingWithDocumentResultDict:
                                    topKPostingWithDocumentResultDict[postingKey] = []
                                topKPostingWithDocumentResultDict[postingKey].append(documentResultKey)                    
                    
                    numOfResultsForTheCurrentQuery += 1
                    
                    currentLine = inputFileHandler1.readline()
                    lineElements = currentLine.strip().split(" ")
                    
                numOfResultsForAllTheQueries += numOfResultsForTheCurrentQuery
                
                # sampling method 1: among all the top10 documents, select those documents, open those documents and extract the corresponding postings.
                # This method make sure that once the document have been selected, ALL the coresponding postings will also be selected.
                # This sampling method has been developed on 2013/01/09
                
                print "numOfResultsForCurrentQuery:",numOfResultsForTheCurrentQuery
                print
            
        currentLine = inputFileHandler1.readline()
    
    print "numOfQueriesHavingQID:",numOfQueriesHavingQID
    print "numOfQueriesHavingSearchContent:",numOfQueriesHavingSearchContent
    print "numOfResultsForAllTheQueries:",numOfResultsForAllTheQueries
    print

print "Program Begins..."
NUM_OF_TOP_RESULTS_CUTOFF = 10
NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE = 10
QUERY_LENGTH_TO_EVALUATE = 0
print "len(sys.argv):",len(sys.argv)
if len(sys.argv) != 3:
    print "Illegal # of arguments"
    print "Usage: python programName.py all_postings_being_popped_file QUERY_LENGTH_TO_EVALUATE"
    exit(1)
else:
    QUERY_LENGTH_TO_EVALUATE = int(sys.argv[2])

# variables inits
# key: termID
# value: # of posting in the list length
termIDANDCurrentListLengthDict = {}
# key: qID
# value: a list of topK document results
qIDsWithTopKDocumentResultImpactDict = {}
# key: qID
# value: a list of topK document results
qIDsWithTopKDocumentResultNumOfPostingsContributedDict = {}

# key: topK posting
# value: a list of document results this poting serves
topKPostingWithDocumentResultDict = {}
# key: document results for TOP10
# value: (currently NOT USED)
top10DocumentResultDict = {}
# key: top10 posting
# value: a list of document results this posting serves
top10PostingWithDocumentResultDict = {}
# key: qID
# value: a list of query terms formed the current query
qIDWithQueryTermsDict = {}
# key: qID needs to be consider
# value: (currently NOT used)
qidsToEvaluateDict = {}
# for vidaserver1
# gov2 tail5K testing queries
inputFileName2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/gov2_tail5K_queries_testing_withQueryLengthAdded"
# clueweb09B testing queries
# inputFileName2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/clueweb09B_queries_ALL_sortedByRandom_testing_withQueryLengthAdded"
inputFileHandler = open(inputFileName2,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    currentQueryLength = int(lineElements[0])
    if QUERY_LENGTH_TO_EVALUATE == 0:
        currentQID = line.strip().split(":")[0].strip().split(" ")[1]
        qidsToEvaluateDict[currentQID] = 1
    else:      
        if currentQueryLength == QUERY_LENGTH_TO_EVALUATE:
            currentQID = line.strip().split(":")[0].strip().split(" ")[1]
            qidsToEvaluateDict[currentQID] = 1
inputFileHandler.close()
print "len(qidsToEvaluateDict):",len(qidsToEvaluateDict)

# for gov2 testing queries
# for vidaserver1:
inputFileName1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/queryProcessingCostAnalysis/gov2/rawResults_50%_TOP1000_OR_20140126Night"
# for clueweb09B testing queries
# inputFileName1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_clueweb09B_TOP1000_testingQueries_OR_20140705"

termANDTermIDDict = {}
termIDAndTermDict = {}
termIDANDUnprunedListLengthDict = {}
# for gov2
# for vidaserver1:
inputFileName0 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/queryProcessingCostAnalysis/gov2/partOfLexiconTermsWithTermIDForTail5KQueries"
# for clueweb09B
# inputFileName0 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/partOfLexiconTermsWithTermIDForTestingQueries_clueweb09B"
inputFileHandler = open(inputFileName0,"r")
currentLine = inputFileHandler.readline()
currentLineNum = 0
while currentLine:
    if currentLineNum % 1000000 == 0:
        print currentLineNum,"lines loaded."
    currentLineElements = currentLine.strip().split(" ")
    currentTermID = currentLineElements[0]
    currentTerm = currentLineElements[1]
    currentTermListLength = int(currentLineElements[2])
    termANDTermIDDict[currentTerm] = currentTermID
    termIDAndTermDict[currentTermID] = currentTerm
    termIDANDUnprunedListLengthDict[currentTermID] = currentTermListLength 
    currentLine = inputFileHandler.readline()
    currentLineNum += 1
print "len(termANDTermIDDict): ",len(termANDTermIDDict)
print "len(termIDAndTermDict): ",len(termIDAndTermDict)
print "len(termIDANDUnprunedListLengthDict): ",len(termIDANDUnprunedListLengthDict)
inputFileHandler.close()

getUnprunedDocumentResultsForQueries(inputFileName1,NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE,termANDTermIDDict,qIDsWithTopKDocumentResultImpactDict,top10PostingWithDocumentResultDict,top10DocumentResultDict,topKPostingWithDocumentResultDict,NUM_OF_TOP_RESULTS_CUTOFF,qidsToEvaluateDict,qIDsWithTopKDocumentResultNumOfPostingsContributedDict,qIDWithQueryTermsDict)

print "OVERALL:"
print "len(qidsToEvaluateDict):",len(qidsToEvaluateDict)
print "QUERY_LENGTH_TO_EVALUATE:",QUERY_LENGTH_TO_EVALUATE
NUM_OF_TOPK_DOCUMENT_RESULT = 0
for currentQID in qIDsWithTopKDocumentResultImpactDict:
    NUM_OF_TOPK_DOCUMENT_RESULT += len( qIDsWithTopKDocumentResultImpactDict[currentQID] )
print "NUM_OF_TOPK_DOCUMENT_RESULT:",NUM_OF_TOPK_DOCUMENT_RESULT
print "len(topKPostingWithDocumentResultDict):",len(topKPostingWithDocumentResultDict)
print "len(top10DocumentResultDict):",len(top10DocumentResultDict)
print "len(top10PostingWithDocumentResultDict):",len(top10PostingWithDocumentResultDict)
print "NUM_OF_TOP_RESULTS_CUTOFF:",NUM_OF_TOP_RESULTS_CUTOFF
print "NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE:",NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE
#################################################################################################################
inputFileNameList = []
# FOR TCP:
# for gov2
# 1% - 100%
#inputFileName1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TCP_results/gov2_OVERLAP/rawResults_OVERLAP_TCP_1%_GOV2"
#inputFileName2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TCP_results/gov2_OVERLAP/rawResults_OVERLAP_TCP_2%_GOV2"
#inputFileName3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TCP_results/gov2_OVERLAP/rawResults_OVERLAP_TCP_3%_GOV2"
#inputFileName4 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TCP_results/gov2_OVERLAP/rawResults_OVERLAP_TCP_4%_GOV2"
#inputFileName5 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TCP_results/gov2_OVERLAP/rawResults_OVERLAP_TCP_5%_GOV2"
#inputFileName6 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TCP_results/gov2_OVERLAP/rawResults_OVERLAP_TCP_6%_GOV2"
#inputFileName7 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TCP_results/gov2_OVERLAP/rawResults_OVERLAP_TCP_7%_GOV2"
#inputFileName8 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TCP_results/gov2_OVERLAP/rawResults_OVERLAP_TCP_8%_GOV2"
#inputFileName9 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TCP_results/gov2_OVERLAP/rawResults_OVERLAP_TCP_9%_GOV2"
#inputFileName10 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TCP_results/gov2_OVERLAP/rawResults_OVERLAP_TCP_10%_GOV2"
#inputFileName11 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TCP_results/gov2_OVERLAP/rawResults_OVERLAP_TCP_15%_GOV2"
#inputFileName12 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TCP_results/gov2_OVERLAP/rawResults_OVERLAP_TCP_20%_GOV2"
#inputFileName13 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TCP_results/gov2_OVERLAP/rawResults_OVERLAP_TCP_30%_GOV2"
#inputFileName14 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TCP_results/gov2_OVERLAP/rawResults_OVERLAP_TCP_40%_GOV2"
#inputFileName15 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TCP_results/gov2_OVERLAP/rawResults_OVERLAP_TCP_50%_GOV2"
#inputFileName16 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TCP_results/gov2_OVERLAP/rawResults_OVERLAP_TCP_60%_GOV2"
#inputFileName17 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TCP_results/gov2_OVERLAP/rawResults_OVERLAP_TCP_70%_GOV2"
#inputFileName18 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TCP_results/gov2_OVERLAP/rawResults_OVERLAP_TCP_80%_GOV2"
#inputFileName19 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TCP_results/gov2_OVERLAP/rawResults_OVERLAP_TCP_90%_GOV2"
#inputFileName20 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TCP_results/gov2_OVERLAP/rawResults_OVERLAP_TCP_100%_GOV2"

# for clueweb09B
#inputFileName1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TCP_results/clueweb09B_OVERLAP/rawResults_OVERLAP_Clueweb09B_TCP_1%"
#inputFileName2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TCP_results/clueweb09B_OVERLAP/rawResults_OVERLAP_Clueweb09B_TCP_2%"
#inputFileName3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TCP_results/clueweb09B_OVERLAP/rawResults_OVERLAP_Clueweb09B_TCP_3%"
#inputFileName4 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TCP_results/clueweb09B_OVERLAP/rawResults_OVERLAP_Clueweb09B_TCP_4%"
#inputFileName5 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TCP_results/clueweb09B_OVERLAP/rawResults_OVERLAP_Clueweb09B_TCP_5%"
#inputFileName6 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TCP_results/clueweb09B_OVERLAP/rawResults_OVERLAP_Clueweb09B_TCP_6%"
#inputFileName7 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TCP_results/clueweb09B_OVERLAP/rawResults_OVERLAP_Clueweb09B_TCP_7%"
#inputFileName8 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TCP_results/clueweb09B_OVERLAP/rawResults_OVERLAP_Clueweb09B_TCP_8%"
#inputFileName9 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TCP_results/clueweb09B_OVERLAP/rawResults_OVERLAP_Clueweb09B_TCP_9%"
#inputFileName10 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/TCP_results/clueweb09B_OVERLAP/rawResults_OVERLAP_Clueweb09B_TCP_10%"

# FOR TCP-QV-50K
# for gov2
# 1% - 100%
inputFileName1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_TCP_50K_QV_1%_GOV2"
inputFileName2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_TCP_50K_QV_2%_GOV2"
inputFileName3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_TCP_50K_QV_3%_GOV2"
inputFileName4 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_TCP_50K_QV_4%_GOV2"
inputFileName5 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_TCP_50K_QV_5%_GOV2"
inputFileName6 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_TCP_50K_QV_6%_GOV2"
inputFileName7 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_TCP_50K_QV_7%_GOV2"
inputFileName8 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_TCP_50K_QV_8%_GOV2"
inputFileName9 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_TCP_50K_QV_9%_GOV2"
inputFileName10 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_TCP_50K_QV_10%_GOV2"
inputFileName11 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_TCP_50K_QV_15%_GOV2"
inputFileName12 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_TCP_50K_QV_20%_GOV2"
inputFileName13 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_TCP_50K_QV_30%_GOV2"
inputFileName14 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_TCP_50K_QV_40%_GOV2"
inputFileName15 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_TCP_50K_QV_50%_GOV2"

# FOR TCP-QV-95K
# 1% - 100%
#inputFileName1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_TCP_95K_QV_1%_GOV2"
#inputFileName2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_TCP_95K_QV_2%_GOV2"
#inputFileName3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_TCP_95K_QV_3%_GOV2"
#inputFileName4 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_TCP_95K_QV_4%_GOV2"
#inputFileName5 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_TCP_95K_QV_5%_GOV2"
#inputFileName6 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_TCP_95K_QV_6%_GOV2"
#inputFileName7 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_TCP_95K_QV_7%_GOV2"
#inputFileName8 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_TCP_95K_QV_8%_GOV2"
#inputFileName9 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_TCP_95K_QV_9%_GOV2"
#inputFileName10 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_TCP_95K_QV_10%_GOV2"
#inputFileName11 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_TCP_95K_QV_15%_GOV2"
#inputFileName12 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_TCP_95K_QV_20%_GOV2"
#inputFileName13 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_TCP_95K_QV_30%_GOV2"
#inputFileName14 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_TCP_95K_QV_40%_GOV2"
#inputFileName15 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_TCP_95K_QV_50%_GOV2"

# FOR UP-QV-95K
# 1% - 100%
#inputFileName1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_UP_95K_QV_1%_GOV2"
#inputFileName2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_UP_95K_QV_2%_GOV2"
#inputFileName3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_UP_95K_QV_3%_GOV2"
#inputFileName4 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_UP_95K_QV_4%_GOV2"
#inputFileName5 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_UP_95K_QV_5%_GOV2"
#inputFileName6 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_UP_95K_QV_6%_GOV2"
#inputFileName7 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_UP_95K_QV_7%_GOV2"
#inputFileName8 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_UP_95K_QV_8%_GOV2"
#inputFileName9 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_UP_95K_QV_9%_GOV2"
#inputFileName10 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_UP_95K_QV_10%_GOV2"
#inputFileName11 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_UP_95K_QV_15%_GOV2"
#inputFileName12 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_UP_95K_QV_20%_GOV2"
#inputFileName13 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_UP_95K_QV_30%_GOV2"
#inputFileName14 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_UP_95K_QV_40%_GOV2"
#inputFileName15 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/rawResults_OVERLAP_UP_95K_QV_50%_GOV2"


# For UP-QV:
# further evidence shows that this QV is actually build from 95K queries.
# for gov2:
#inputFileName1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_QV_results/gov2_OVERLAP/rawResults_OVERLAP_UP_QV_1%_GOV2"
#inputFileName2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_QV_results/gov2_OVERLAP/rawResults_OVERLAP_UP_QV_2%_GOV2"
#inputFileName3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_QV_results/gov2_OVERLAP/rawResults_OVERLAP_UP_QV_3%_GOV2"
#inputFileName4 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_QV_results/gov2_OVERLAP/rawResults_OVERLAP_UP_QV_4%_GOV2"
#inputFileName5 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_QV_results/gov2_OVERLAP/rawResults_OVERLAP_UP_QV_5%_GOV2"
#inputFileName6 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_QV_results/gov2_OVERLAP/rawResults_OVERLAP_UP_QV_6%_GOV2"
#inputFileName7 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_QV_results/gov2_OVERLAP/rawResults_OVERLAP_UP_QV_7%_GOV2"
#inputFileName8 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_QV_results/gov2_OVERLAP/rawResults_OVERLAP_UP_QV_8%_GOV2"
#inputFileName9 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_QV_results/gov2_OVERLAP/rawResults_OVERLAP_UP_QV_9%_GOV2"
#inputFileName10 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_QV_results/gov2_OVERLAP/rawResults_OVERLAP_UP_QV_10%_GOV2"
#inputFileName11 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_QV_results/gov2_OVERLAP/rawResults_OVERLAP_UP_QV_15%_GOV2"
#inputFileName12 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_QV_results/gov2_OVERLAP/rawResults_OVERLAP_UP_QV_20%_GOV2"
#inputFileName13 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_QV_results/gov2_OVERLAP/rawResults_OVERLAP_UP_QV_30%_GOV2"
#inputFileName14 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_QV_results/gov2_OVERLAP/rawResults_OVERLAP_UP_QV_40%_GOV2"
#inputFileName15 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_QV_results/gov2_OVERLAP/rawResults_OVERLAP_UP_QV_50%_GOV2"


# For UP:
# for gov2
#inputFileName1 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_results/gov2_OVERLAP/rawResults_OVERLAP_UP_1%_GOV2_Reborn"
#inputFileName2 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_results/gov2_OVERLAP/rawResults_OVERLAP_UP_2%_GOV2_Reborn"
#inputFileName3 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_results/gov2_OVERLAP/rawResults_OVERLAP_UP_3%_GOV2_Reborn"
#inputFileName4 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_results/gov2_OVERLAP/rawResults_OVERLAP_UP_4%_GOV2_Reborn"
#inputFileName5 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_results/gov2_OVERLAP/rawResults_OVERLAP_UP_5%_GOV2_Reborn"
#inputFileName6 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_results/gov2_OVERLAP/rawResults_OVERLAP_UP_6%_GOV2_Reborn"
#inputFileName7 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_results/gov2_OVERLAP/rawResults_OVERLAP_UP_7%_GOV2_Reborn"
#inputFileName8 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_results/gov2_OVERLAP/rawResults_OVERLAP_UP_8%_GOV2_Reborn"
#inputFileName9 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_results/gov2_OVERLAP/rawResults_OVERLAP_UP_9%_GOV2_Reborn"
#inputFileName10 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_results/gov2_OVERLAP/rawResults_OVERLAP_UP_10%_GOV2_Reborn"
#inputFileName11 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_results/gov2_OVERLAP/rawResults_OVERLAP_UP_15%_GOV2_Reborn"
#inputFileName12 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_results/gov2_OVERLAP/rawResults_OVERLAP_UP_20%_GOV2_Reborn"
#inputFileName13 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_results/gov2_OVERLAP/rawResults_OVERLAP_UP_30%_GOV2_Reborn"
#inputFileName14 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_results/gov2_OVERLAP/rawResults_OVERLAP_UP_40%_GOV2_Reborn"
#inputFileName15 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_results/gov2_OVERLAP/rawResults_OVERLAP_UP_50%_GOV2_Reborn"
#inputFileName16 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_results/gov2_OVERLAP/rawResults_OVERLAP_UP_60%_GOV2_Reborn"
#inputFileName17 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_results/gov2_OVERLAP/rawResults_OVERLAP_UP_70%_GOV2_Reborn"
#inputFileName18 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_results/gov2_OVERLAP/rawResults_OVERLAP_UP_80%_GOV2_Reborn"
#inputFileName19 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_results/gov2_OVERLAP/rawResults_OVERLAP_UP_90%_GOV2_Reborn"
#inputFileName20 = "/home/vgc/wei/workspace/NYU_IRTK/polyIRToolkit_Wei/UP_results/gov2_OVERLAP/rawResults_OVERLAP_UP_100%_GOV2_Reborn"


inputFileNameList.append(inputFileName1)
inputFileNameList.append(inputFileName2)
inputFileNameList.append(inputFileName3)
inputFileNameList.append(inputFileName4)
inputFileNameList.append(inputFileName5)
inputFileNameList.append(inputFileName6)
inputFileNameList.append(inputFileName7)
inputFileNameList.append(inputFileName8)
inputFileNameList.append(inputFileName9)
inputFileNameList.append(inputFileName10)
inputFileNameList.append(inputFileName11)
inputFileNameList.append(inputFileName12)
inputFileNameList.append(inputFileName13)
inputFileNameList.append(inputFileName14)
inputFileNameList.append(inputFileName15)
#inputFileNameList.append(inputFileName16)
#inputFileNameList.append(inputFileName17)
#inputFileNameList.append(inputFileName18)
#inputFileNameList.append(inputFileName19)
#inputFileNameList.append(inputFileName20)
counter = 1
for currentFileName in inputFileNameList:
    # key: qID
    # value: a list of topK document results
    qIDsWithTopKDocumentResultImpactDict2 = {}
    # key: qID
    # value: a list of topK document results
    qIDsWithTopKDocumentResultNumOfPostingsContributedDict2 = {}
    
    # key: topK posting
    # value: a list of document results this poting serves
    topKPostingWithDocumentResultDict2 = {}
    # key: document results for TOP10
    # value: (currently NOT USED)
    top10DocumentResultDict2 = {}
    # key: top10 posting
    # value: a list of document results this posting serves
    top10PostingWithDocumentResultDict2 = {}
    # key: qID
    # value: a list of query terms formed the current query
    qIDWithQueryTermsDict2 = {}
    getUnprunedDocumentResultsForQueries(currentFileName,NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE,termANDTermIDDict,qIDsWithTopKDocumentResultImpactDict2,top10PostingWithDocumentResultDict2,top10DocumentResultDict2,topKPostingWithDocumentResultDict2,NUM_OF_TOP_RESULTS_CUTOFF,qidsToEvaluateDict,qIDsWithTopKDocumentResultNumOfPostingsContributedDict2,qIDWithQueryTermsDict2)

    print "OVERALL:"
    print "len(qidsToEvaluateDict):",len(qidsToEvaluateDict)
    print "QUERY_LENGTH_TO_EVALUATE:",QUERY_LENGTH_TO_EVALUATE
    print "len(top10DocumentResultDict2):",len(top10DocumentResultDict2)
    print "len(top10PostingWithDocumentResultDict2):",len(top10PostingWithDocumentResultDict2)
    
    percentageOfTopDocsRetained = len(set(top10DocumentResultDict).intersection(top10DocumentResultDict2)) / len(top10DocumentResultDict)
    percentageOfTopPostingsRetained = len(set(top10PostingWithDocumentResultDict).intersection(top10PostingWithDocumentResultDict2)) / len(top10PostingWithDocumentResultDict) 
    print "----->",counter,percentageOfTopDocsRetained,percentageOfTopPostingsRetained
    counter += 1
print "----->",counter,"%OfResults","%OfPostings"












