from __future__ import division
from operator import itemgetter, attrgetter
import os
import sys
import math


print "Program Begins..."
# step2
def computeAverageTermRelatedIntersectionSize():
    outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/highFreqTermsWithTheirAverageIntersectionSizeFromMachineGeneratedQueriesAndMetaInfo3"
    outputFileHandler = open(outputFileName,"w")
    
    ################################################################################################################################
    # key: int
    # queryID in int format
    # value: int
    # query length (duplicate terms NOT counted)
    queryIDsWIthTheirLengthDict = {}
    # for human generated queries
    # inputQueryFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/100KQueries_head_95K"
    # for machine generated queries
    inputQueryFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/QueriesWhichContainHighFreqTermAmong95KMachineGenerated_IRTK_Compatible_Format3"
    inputQueryHandler = open(inputQueryFileName,"r")
    
    for line in inputQueryHandler.readlines():
        elements = line.strip().split(":")
        queryID = int(elements[0])
        
        data = elements[1]
        data = data.lower()
        
        for i in range(0,len(data)):
            # print "data[i]:",ord(data[i])
            if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
                # Just replace them with a space.
                data = data[:i] + " " + data[i+1:]
        
        queryContent = data
        
        queryContentElements = queryContent.strip().split(" ")
        currentQueryTermDict = {}
        for element in queryContentElements:
            if element.strip() != "":
                if element.strip() not in currentQueryTermDict:
                    currentQueryTermDict[element.strip()] = 1
        
        # print "----->",queryID,len(currentQueryTermDict)
        
        if queryID not in queryIDsWIthTheirLengthDict:
            queryIDsWIthTheirLengthDict[queryID] = len(currentQueryTermDict)
    
    totalQueryLength = 0
    avgQueryLength = 0
    for queryID in queryIDsWIthTheirLengthDict:
        totalQueryLength += queryIDsWIthTheirLengthDict[queryID]
    avgQueryLength = totalQueryLength / len(queryIDsWIthTheirLengthDict)
    print "len(queryIDsWIthTheirLengthDict):",len(queryIDsWIthTheirLengthDict)
    # print "--->queryIDsWIthTheirLengthDict[3]:",queryIDsWIthTheirLengthDict[3]
    # print "--->queryIDsWIthTheirLengthDict[4]:",queryIDsWIthTheirLengthDict[4]
    print "--->avgQueryLength:",avgQueryLength
    inputQueryHandler.close()
    ################################################################################################################################
    
    
    ################################################################################################################################
    queryTermsWithLengthOfInvertedListDict = {}
    inputFileName0 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermsWithTheirLengthsOfInvertedList"
    inputFileHandler = open(inputFileName0,"r")
    for line in inputFileHandler.readlines():
        lineElements = line.strip().split(" ")
        currentTerm = lineElements[0]
        currentTermLengthOfInvertedList = int(lineElements[1]) 
        if currentTerm not in queryTermsWithLengthOfInvertedListDict:
            queryTermsWithLengthOfInvertedListDict[currentTerm] = currentTermLengthOfInvertedList
    print "len(queryTermsWithLengthOfInvertedListDict):",len(queryTermsWithLengthOfInvertedListDict)
    print "--->queryTermsWithLengthOfInvertedListDict['0']:",queryTermsWithLengthOfInvertedListDict['0']
    inputFileHandler.close()
    ################################################################################################################################
    
    ################################################################################################################################
    # key: int
    # qid
    # value: int
    # intersection size
    qidWithIntersectionDict = {}
    # input file format
    # column0: qid
    # column1: intersection size
    # for human generated queries
    # inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/qidANDIntersectionSizeMappingTable"
    # for machine generated queries
    inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/machineGeneratedQIDsANDIntersectionSizeMappingTable3"
    inputFileHandler = open(inputFileName1,"r")
    for line in inputFileHandler.readlines():
        lineElements = line.strip().split(" ")
        currentQID = int(lineElements[0])
        currentQIDIntersectionSize = int(lineElements[1])
        if currentQID not in qidWithIntersectionDict:
            qidWithIntersectionDict[currentQID] = currentQIDIntersectionSize
        else:
            print "system error, mark1"
            exit(1)

    print "len(qidWithIntersectionDict):",len(qidWithIntersectionDict)
    # for DEBUG
    # print "--->qidWithIntersectionDict[3]:",qidWithIntersectionDict[3]
    # print "--->qidWithIntersectionDict[4]:",qidWithIntersectionDict[4]
    # print "--->qidWithIntersectionDict[5]:",qidWithIntersectionDict[5]
    # print "--->qidWithIntersectionDict[7]:",qidWithIntersectionDict[7]
    # print "--->qidWithIntersectionDict[8]:",qidWithIntersectionDict[8]
    inputFileHandler.close()
    ################################################################################################################################
    
    # for human generated queries
    # inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/highFreqTermsWithRelatedQIDs
    # for machine generated queries"
    inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/highFreqTermsWithRelatedQIDsFromMachineGeneratedQueries3_20130818_using_new_fake_queryLog"
    inputFileHandler = open(inputFileName2,"r")
    
    for line in inputFileHandler.readlines():
        lineElements = line.strip().split(" ")
        currentTerm = lineElements[0]
        currentTermNumOfRelatedQIDs = int( lineElements[1] )
        
        assert currentTermNumOfRelatedQIDs == len(lineElements[2:])
        totalIntersectionSizeAmongAllQIDs = 0
        totalQueryLengthAmongAllQIDs = 0
        averageIntersectionSizeForCurrentTerm = 0
        avgrageNumberOfTermsInQuery = 0
        for currentQID in lineElements[2:]:
            currentQIDInIntFormat = int(currentQID)
            totalIntersectionSizeAmongAllQIDs += qidWithIntersectionDict[currentQIDInIntFormat]
            totalQueryLengthAmongAllQIDs += queryIDsWIthTheirLengthDict[currentQIDInIntFormat]
            # for DEBUG
            # print "--->currentQIDInIntFormat:",currentQIDInIntFormat
            # print "--->currentQIDQueryLength:",queryIDsWIthTheirLengthDict[currentQIDInIntFormat]
            # print "--->totalQueryLengthAmongAllQIDs:",totalQueryLengthAmongAllQIDs            
            # print "--->currentQIDIntersectionSize:",qidWithIntersectionDict[currentQIDInIntFormat]
            # print "--->totalIntersectionSizeAmongAllQIDs:",totalIntersectionSizeAmongAllQIDs
            # print
            
        averageIntersectionSizeForCurrentTerm = int(totalIntersectionSizeAmongAllQIDs / currentTermNumOfRelatedQIDs)
        avgrageNumberOfTermsInQuery = float( totalQueryLengthAmongAllQIDs / currentTermNumOfRelatedQIDs )
        # print currentTerm,currentTermNumOfRelatedQIDs,str(averageIntersectionSizeForCurrentTerm)
        outputLine = currentTerm + " " + str( queryTermsWithLengthOfInvertedListDict[currentTerm] ) + " " + str(averageIntersectionSizeForCurrentTerm) + " " + str(currentTermNumOfRelatedQIDs) + " " + str(avgrageNumberOfTermsInQuery) + "\n"
        outputFileHandler.write(outputLine)
    
    print "inputQueryFileName:",inputQueryFileName
    print "inputFileName0:",inputFileName0
    print "inputFileName1:",inputFileName1
    print "inputFileName2:",inputFileName2
    print "outputFileName:",outputFileName
    
    inputFileHandler.close()
    outputFileHandler.close()
    
    
# step1_1
# Updated by Wei 2013/08/13 morning at school
def outputHighFreqTermWithSetOfRelatedQIDs():
    # step1:
    # purpose: get the set of terms which have a high freq in training queries
    outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/highFreqTermsWithRelatedQIDs"
    outputFileHandler = open(outputFileName,"w")
    
    HIGH_FREQ_THRESHOLD = 40 # >= this freq, I care. smaller than this threshold, I just don't care
    # key: term
    # value: a list containing all the queryIDs that I need to issue
    consideredTermDict = {} # :)
    
    inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/effectToMake1StFactor/freqOfFreqInQueries_0_1_95K_95%_with_query_terms_appended_sortedByFreqR"
    inputFileHandler = open(inputFileName1,"r")
    for line in inputFileHandler.readlines():
        lineElements = line.strip().split(" ")
        freqOfFreq = int( lineElements[0] )
        if freqOfFreq < HIGH_FREQ_THRESHOLD:
            # Just ignore those terms
            pass
        else:
            # where freqOfFreq >= 40:
            numOfTermsBelongingToThisGroup = int( lineElements[1] )
            if numOfTermsBelongingToThisGroup != 0:
                # start reading those terms
                for element in lineElements[2:]:
                    if element.strip() not in consideredTermDict:
                        # for DEBUG only
                        '''
                        if element.strip() == "0":
                            print "freqOfFreq:",freqOfFreq
                        '''
                        consideredTermDict[element.strip()] = []
                    else:
                        # print "consideredTermDict:",consideredTermDict
                        print "len(consideredTermDict):",len(consideredTermDict)
                        print "term:",element.strip()
                        print "freqOfFreq:",freqOfFreq
                        print "duplicated terms"
                        print "SYSTEM ERROR, say"
                        exit(1)
    
    print "len(consideredTermDict):",len(consideredTermDict)
    # print "consideredTermDict:",consideredTermDict
    inputFileHandler.close()
    
    # step2:
    # purpose: get the associate list of qids for a specific term in order to compute the intersection size
    # main logic: scan the training queries AGAIN
    involvedQueryIDDict = {}
    inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/100KQueries_0_1_95%"
    inputFileHandler = open(inputFileName2,"r")
    queryTermList = []
    for line in inputFileHandler.readlines():
            # print "line:",line.strip()
            queryIDInIntFormat = int( line.strip().split(":")[0] )
            queryTermList = line.strip().split(":")[1].strip().split(" ")
            # print "queryTermList:",queryTermList
            
            data = ""
            for element in queryTermList:
                data += element + " "
            
            # print "data(old):",data
            # print "original data:",data
            
            for i in range(0,len(data)):
                # print "data[i]:",ord(data[i])
                if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
                    # Just replace them with a space.
                    data = data[:i] + " " + data[i+1:]
        
            # print "data(new):",data
            
            currentNewQueryTermList = data.strip().split(" ")
            currentNewQueryTermDict = {}
            
            for queryTerm in currentNewQueryTermList:
                if queryTerm.strip() != "":
                    queryTermLower = queryTerm.lower()
                    if queryTermLower not in currentNewQueryTermDict:
                        currentNewQueryTermDict[queryTermLower] = 1
            
            for queryTerm in currentNewQueryTermDict:
                if queryTerm in consideredTermDict:
                    consideredTermDict[queryTerm].append(queryIDInIntFormat)
                    if queryIDInIntFormat not in involvedQueryIDDict:
                        involvedQueryIDDict[queryIDInIntFormat] = 1
    
    consideredTermList = []
    consideredTermList = consideredTermDict.keys()
    consideredTermList.sort(cmp=None, key=None, reverse=False)
    
    for term in consideredTermList:
        # for DEBUG
        # print term,len( consideredTermDict[term] )
        outputLine = ""
        outputLine = term + " " + str( len(consideredTermDict[term]) ) + " "
        # sort the qids in increasing order
        consideredTermDict[term].sort(cmp=None, key=None, reverse=False)
        for docID in consideredTermDict[term]:
            outputLine += str(docID) + " "
        outputLine.strip()
        outputLine += "\n"
        outputFileHandler.write(outputLine)
    # print "for DEBUG"
    # print 'consideredTermDict["yellowstone"]:',consideredTermDict["yellowstone"]
    # print "len(involvedQueryIDDict):",len(involvedQueryIDDict)
    
    inputFileHandler.close()
    outputFileHandler.close()
    print "inputFileName1:",inputFileName1
    print "inputFileName2:",inputFileName2
    print "outputFileName:",outputFileName

# step1_2
# Updated by Wei 2013/08/14 very early morning at school
def outputRelatedQIDsGivenASetOfHighFreqTerms():

    outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/highFreqTermsWithRelatedQIDsFromMachineGeneratedQueries3_20130818_using_new_fake_queryLog"
    outputFileHandler = open(outputFileName,"w")

    # step1:
    # purpose: get the set of terms which needs to get the set of related queryIDs    
    # key: term
    # value: a list containing all the queryIDs that I need to issue
    consideredTermDict = {} # :)
    
    inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/highFreqTermsWithTheirAverageIntersectionSizeAndMetaInfo"
    inputFileHandler = open(inputFileName1,"r")
    for line in inputFileHandler.readlines():
        lineElements = line.strip().split(" ")
        currentTerm = lineElements[0]
        if currentTerm not in consideredTermDict:
            consideredTermDict[currentTerm] = []
        else:
            print "Duplicated Term:",currentTerm
            exit(1)
    
    print "len(consideredTermDict):",len(consideredTermDict)
    # print "consideredTermDict:",consideredTermDict
    inputFileHandler.close()
    
    # step2:
    # purpose: get the associate list of qids for a specific term in order to compute the intersection size
    # main logic: scan the queries
    involvedQueryIDDict = {}
    # option1
    inputFileName2 = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/QueriesWhichContainHighFreqTermAmong95KMachineGenerated3_20130818_using_new_fake_queryLog"
    # option2
    # inputFileName2 = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/QueriesWhichContainHighFreqTermAmong95KMachineGenerated2"
    inputFileHandler = open(inputFileName2,"r")
    queryTermList = []
    for line in inputFileHandler.readlines():
            # print "line:",line.strip()
            queryIDInIntFormat = int( line.strip().split(":")[1] )
            queryTermList = line.strip().split(":")[2].strip().split(" ")
            # print "queryTermList:",queryTermList
            
            data = ""
            for element in queryTermList:
                data += element + " "
            
            # print "data(old):",data
            # print "original data:",data
            
            for i in range(0,len(data)):
                # print "data[i]:",ord(data[i])
                if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
                    # Just replace them with a space.
                    data = data[:i] + " " + data[i+1:]
        
            # print "data(new):",data
            
            currentNewQueryTermList = data.strip().split(" ")
            currentNewQueryTermDict = {}
            
            for queryTerm in currentNewQueryTermList:
                if queryTerm.strip() != "":
                    queryTermLower = queryTerm.lower()
                    if queryTermLower not in currentNewQueryTermDict:
                        currentNewQueryTermDict[queryTermLower] = 1
            
            for queryTerm in currentNewQueryTermDict:
                if queryTerm in consideredTermDict:
                    consideredTermDict[queryTerm].append(queryIDInIntFormat)
                    if queryIDInIntFormat not in involvedQueryIDDict:
                        involvedQueryIDDict[queryIDInIntFormat] = 1
    
    consideredTermList = []
    consideredTermList = consideredTermDict.keys()
    consideredTermList.sort(cmp=None, key=None, reverse=False)
    
    for term in consideredTermList:
        # for DEBUG
        # print term,len( consideredTermDict[term] )
        outputLine = ""
        outputLine = term + " " + str( len(consideredTermDict[term]) ) + " "
        # sort the qids in increasing order
        consideredTermDict[term].sort(cmp=None, key=None, reverse=False)
        for docID in consideredTermDict[term]:
            outputLine += str(docID) + " "
        outputLine.strip()
        outputLine += "\n"
        outputFileHandler.write(outputLine)
    # print "for DEBUG"
    # print 'consideredTermDict["yellowstone"]:',consideredTermDict["yellowstone"]
    print "len(involvedQueryIDDict):",len(involvedQueryIDDict)
    
    inputFileHandler.close()
    outputFileHandler.close()
    print "inputFileName1:",inputFileName1
    print "inputFileName2:",inputFileName2
    print "outputFileName:",outputFileName


# Updated on 2013/08/18 afternoon
# step1_1 and step1_2 are all individually operated
# step1_1:
# This is for the human generated query log
# outputHighFreqTermWithSetOfRelatedQIDs()
# step1_2:
# This is the same function but for the machine generated query log
# The difference is that the set of terms have already been given
# outputRelatedQIDsGivenASetOfHighFreqTerms()

'''
# Updated by Wei on 20130818 afternoon
# Some middle steps:
# This logic is responsible for generating the IRTK compatible query log format
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/QueriesWhichContainHighFreqTermAmong95KMachineGenerated3_20130818_using_new_fake_queryLog"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/QueriesWhichContainHighFreqTermAmong95KMachineGenerated_IRTK_Compatible_Format3"
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(":")
    newQueryID = int(lineElements[1])
    queryContent =  lineElements[2]
    outputLine = str(newQueryID) + ":" + queryContent + "\n" 
    outputFileHandler.write(outputLine)
inputFileHandler.close()
outputFileHandler.close()
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
'''

'''
# small logic of building the (qid,intersectionSizeMappingTable)
inputFileName = "/home/diaosi/web-search-engine-wei/polyIRIndexer/intersectionResult_machine_generated_queries_whole3"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/home/diaosi/web-search-engine-wei/polyIRIndexer//machineGeneratedQIDsANDIntersectionSizeMappingTable3"
outputFileHandler = open(outputFileName,"w")

currentQID = 0
currentQueryIntersectionSize = 0
qIDFoundFlag = False
queryIntersectionSizeFoundFlag = False
for line in inputFileHandler.readlines():
    if line.startswith("qid:"):
        currentQID = int( line.strip().split(" ")[1] )
        qIDFoundFlag =True
        # for DEBUG
        # print currentQID
        # print line.strip()
    
    if line.startswith("Showing"):
        currentQueryIntersectionSize = int(line.strip().split(" ")[5][:-1])
        queryIntersectionSizeFoundFlag = True
        # for DEBUG
        # print currentQueryIntersectionSize
        # print line.strip()
        # print
    
    if qIDFoundFlag and queryIntersectionSizeFoundFlag:
        outputFileHandler.write(str(currentQID) + " " + str(currentQueryIntersectionSize) + "\n")
        qIDFoundFlag = False
        queryIntersectionSizeFoundFlag = False
    
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
inputFileHandler.close()
outputFileHandler.close()
'''


# step2:
computeAverageTermRelatedIntersectionSize()
print "Program Ends."











