from __future__ import division

print "This program has now become the program to fix the term in the query trace problem"



'''
# load the dict
queryTermFreqFromAOLdict = {}
inputFileName = "/data5/team/weijiang/the_new_trip_of_feature_generation/final-queryTermsFreqFromAOL.txt"
inputFileHandler = open(inputFileName,"r")

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    queryTermFreqFromAOL = int( lineElements[1] )
    
    if queryTerm not in queryTermFreqFromAOLdict:
        queryTermFreqFromAOLdict[queryTerm] = queryTermFreqFromAOL
    else:
        exit(1)

inputFileHandler.close()    
print "len(queryTermFreqFromAOLdict):",len(queryTermFreqFromAOLdict)
print "queryTermFreqFromAOLdict['obama']:",queryTermFreqFromAOLdict["obama"]
print "queryTermFreqFromAOLdict['family']:",queryTermFreqFromAOLdict["family"]
print "queryTermFreqFromAOLdict['tree']:",queryTermFreqFromAOLdict["tree"]

print "queryTermFreqFromAOLdict['french']:",queryTermFreqFromAOLdict["french"]
print "queryTermFreqFromAOLdict['lick']:",queryTermFreqFromAOLdict["lick"]
print "queryTermFreqFromAOLdict['resort']:",queryTermFreqFromAOLdict["resort"]
print "queryTermFreqFromAOLdict['and']:",queryTermFreqFromAOLdict["and"]
print "queryTermFreqFromAOLdict['casino']:",queryTermFreqFromAOLdict["casino"]



queryIDAndContentDict = {}
inputFileName2 = "/data5/team/weijiang/the_new_trip_of_feature_generation/final-150Clueweb09Queries.txt"
inputFileHandler2 = open(inputFileName2,"r")
for line in inputFileHandler2.readlines():
    queryID = line.strip().split(":")[0]
    queryContent = line.strip().split(":")[1]
    if queryID not in queryIDAndContentDict:
        queryIDAndContentDict[queryID] = queryContent
        
print "len(queryIDAndContentDict):",len(queryIDAndContentDict)
inputFileHandler2.close()


outputFileName = "/data5/team/weijiang/the_new_trip_of_feature_generation/wholeTrainingFileTempleteV3_phase0.txt.input.20121023"
outputFileHandler = open(outputFileName,"w")


# assign the new term freq from the AOL to the training file phase0
inputFileName1 = "/data5/team/weijiang/the_new_trip_of_feature_generation/wholeTrainingFileTempleteV3_phase0.txt.input"
inputFileHandler1 = open(inputFileName1,"r")
for line in inputFileHandler1.readlines():
    lineElements = line.strip().split(" ")
    queryID = lineElements[0]
    relevenceScore = lineElements[-1]
    
    sum_term_trace_freq = lineElements[-5]
    avg_term_trace_freq = lineElements[-4]
    min_term_trace_freq = lineElements[-3]
    max_term_trace_freq = lineElements[-2]
    
    queryContent = queryIDAndContentDict[queryID]
    queryTerms = queryContent.split(" ")
    queryLength = len(queryTerms)
    
    # print queryID,sum_term_trace_freq,avg_term_trace_freq,min_term_trace_freq,max_term_trace_freq,relevenceScore,"test query terms:",queryTerms
    
    # Let's compute the following:
    # sum_term_trace_freq_new 
    # avg_term_trace_freq_new 
    # min_term_trace_freq_new
    # max_term_trace_freq_new
    
    varyLargeNumber = 999999999999999999999
    varySmallNumber = -999999999999999999999
    
    sum_term_trace_freq_new = 0 
    avg_term_trace_freq_new = 0
    min_term_trace_freq_new = varyLargeNumber
    max_term_trace_freq_new = varySmallNumber
    
    
    for queryTerm in queryTerms:
        if queryTerm in queryTermFreqFromAOLdict:
            sum_term_trace_freq_new += queryTermFreqFromAOLdict[queryTerm]
            if queryTermFreqFromAOLdict[queryTerm] < min_term_trace_freq_new:
                min_term_trace_freq_new = queryTermFreqFromAOLdict[queryTerm]
                
            if queryTermFreqFromAOLdict[queryTerm] > max_term_trace_freq_new:
                max_term_trace_freq_new = queryTermFreqFromAOLdict[queryTerm]
                
    # handle special case for min_term_trace_freq_new
    if min_term_trace_freq_new == varyLargeNumber:
        min_term_trace_freq_new = 0
        
    # compute the avg_term_trace_freq_new
    avg_term_trace_freq_new = sum_term_trace_freq_new / queryLength
    
    newOutputLine = ""
    # Let's construct the new output line
    for lineElement in lineElements[:-5]:
        newOutputLine += lineElement + " "
        
    # add the new fix feature for 
    # sum_term_trace_freq_new
    # avg_term_trace_freq_new
    # min_term_trace_freq_new 
    # max_term_trace_freq_new
    
    newOutputLine += str(sum_term_trace_freq_new) + " " + str(avg_term_trace_freq_new) + " " + str(min_term_trace_freq_new) + " " + str(max_term_trace_freq_new) + " "
    
    newOutputLine += lineElements[-1] + " "
    
    outputFileHandler.write(newOutputLine + "\n")
    
    
    
inputFileHandler1.close()
outputFileHandler.close()
'''

'''
inputFileName1 = "/data4/team/weijiang/human_judge_web_pages_plain_text_only_words/enwp01/clueweb09-enwp01-75-20596_78732_plain_text.txt"
inputFileNameHandler1 = open(inputFileName1,"r")
content1 = inputFileNameHandler1.read()
print "len(content1)",len(content1.split(" "))-1

inputFileName2 = "/data4/team/weijiang/human_judge_web_pages_plain_text_only_words/enwp01/clueweb09-enwp01-31-11362_76882_plain_text.txt"
inputFileNameHandler2 = open(inputFileName2,"r")
content2 = inputFileNameHandler2.read()
print "len(content2)",len(content2.split(" "))-1
'''

'''
from __future__ import division

inputFileName = "/data5/team/weijiang/the_new_trip_of_feature_generation/final-all_human_judged_webpage_with_doc_length_in_words.txt"
inputFileHandler = open(inputFileName,"r")

sumDocLength = 0

for index,line in enumerate( inputFileHandler.readlines() ):
    currentDocLength = int( line.strip().split(" ")[1] )
    sumDocLength += currentDocLength

avgDocLength = sumDocLength / (index+1)
print "index:",index
print "avgDocLength:",avgDocLength
'''
 
'''
inputFileName = "/data4/team/weijiang/human_judge_web_pages_plain_text_only_words/en0000/clueweb09-en0000-00-04866_21_plain_text.txt"
inputFileHandler = open(inputFileName,"r")
print len(inputFileHandler.read().split(" "))-1
'''

'''
print "do some minor stuff, 3"

inputFileName = "/data5/team/weijiang/the_new_trip_of_feature_generation/final-all_human_judged_webpage_paths.txt"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/data5/team/weijiang/the_new_trip_of_feature_generation/final-all_human_judged_webpage_with_doc_length_in_words.txt"
outputFileHandler = open(outputFileName,"w")

for index,line in enumerate(inputFileHandler.readlines()):
    print index
    tempInputFileName = line.strip()
    trecID = tempInputFileName.split("/")[-1].split("_")[0]
    tempInputFileHandler = open(tempInputFileName,"r")
    tempPageContent = tempInputFileHandler.read()
    tempInputFileHandler.close()
    tempPageDocWordsLength = len(tempPageContent.split(" ")) - 1
    outputFileHandler.write(trecID + " " + str(tempPageDocWordsLength) + "\n")
 

inputFileHandler.close()
outputFileHandler.close()
''' 