# Output File Analysis:
# the file called freqOfTermsInQueries.txt contains 38871 unique query terms with their frequency in the query trace
# the meaning here for the queryTermFreq in queries is that: how many times in total that a specific term appeared over all the query trace
# can be regarded as one machine learned feature

# Program inputs:
# (1) inputFileName1 = "/home/obukai/workspace/polyIRToolkit/polyIRIndexer/04-06.topics.701-850.polyIRTKCompatibleMode"
# (2) inputFileName2 = "/home/obukai/workspace/polyIRToolkit/polyIRIndexer/06.efficiency_topics.all"

# Program outputs:
# (1) outputFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/freqOfTermsInQueries.txt"

from struct import *
import sys

print "Begin:"
# the input files should have 2
# one for queries that are human judged, and one for queries that are NOT just for effeciency task for the year 2006
# please add the special char filer for all our project

queryTermFreqDict = {}

inputFileNameList = []

inputFileName1 = "/home/obukai/workspace/polyIRToolkit/polyIRIndexer/04-06.topics.701-850.polyIRTKCompatibleMode"
inputFileName2 = "/home/obukai/workspace/polyIRToolkit/polyIRIndexer/06.efficiency_topics.all"

# This file is used just for testing the filter to normalize the query terms, the only thing is to be consistent with what the polyIRToolkit is originally doing.
# inputFileName3 = "/home/obukai/workspace/polyIRToolkit/polyIRIndexer/difficultQueries.test"

# current output path
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/freqOfTermsInQueries.txt"

# old output path
# outputFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/freqOfTermsInQueries.txt"
outputFileHandler = open(outputFileName,"w")


inputFileNameList.append(inputFileName1)
inputFileNameList.append(inputFileName2)
#inputFileNameList.append(inputFileName3)


for inputFileName in inputFileNameList:
    inputFileHandler = open(inputFileName,"r")
    
    for line in inputFileHandler.readlines():
        # print "line:",line.strip()
        queryTermList = line.strip().split(":")[1].strip().split(" ")
        # print "queryTermList:",queryTermList
        
        data = ""
        for element in queryTermList:
            data += element + " "
        
        # print "data(old):",data
        # print "original data:",data
        
        for i in range(0,len(data)):
            # print "data[i]:",ord(data[i])
            if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
                # Just replace them with a space.
                data = data[:i] + " " + data[i+1:]
    
        # print "data(new):",data
        
        newQueryTermList = data.strip().split(" ")
        
        for queryTerm in newQueryTermList:
            if queryTerm.strip() != "":
                queryTermLower = queryTerm.lower()
                if queryTermLower not in queryTermFreqDict:
                    queryTermFreqDict[queryTermLower] = 1
                else:
                    queryTermFreqDict[queryTermLower] += 1


            
# print "queryTermFreqDict:",queryTermFreqDict
overallQueryTermList = queryTermFreqDict.keys()
overallQueryTermList.sort()

for queryTerm in overallQueryTermList:
    outputFileHandler.write(queryTerm + " " + str( queryTermFreqDict[queryTerm] ) + "\n")
    print queryTerm,queryTermFreqDict[queryTerm]


inputFileHandler.close()
outputFileHandler.close()
print "End..."

