# Updated by Wei 2013/07/27 afternoon at school
# I need to handle a relatively large training size located in: 
# /data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Training_Set_2013_07_19.train

# Updated by Wei 2013/07/13 night
# adding the term_freq_in_queries into the main training file
# the feature term_freq_in_queries should be computed based on the head95K queries

# Some notes:
# Updated by Wei 2013/06/10
# Use for adding the term_freq_in_queries into the main training file

# Some notes:
# Updated by Wei 2013/02/21

# Note: I will directly modify the paths BUT not here
# Program inputs:
# option1 for the machine pangolin
# (1) inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train"
# (2) inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/freqOfTermsInQueries.txt"

# option2 for the machine dodo
# (1) inputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_02_21.smallTest.train"
# (2) inputFileName2 = "/home/diaosi/gov2ClearYourMindAndDoItAgain/freqOfTermsInQueries.txt"

# Program outputs:
# option1 for the machine pangolin
# (1) outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded"

# option2 for the machine dodo
# (1) outputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_02_21.smallTest.train_tfqAdded"

# Program purposes:
# The program take one training file as the input and then add the feature term_freq_in_queries into the training file. Output the new training file.

print "Program begins..."
TOTAL_NUMBER_OF_TRAINING_POSTING_CURRENTLY_RECORDED = 0
# variables
termANDFreqInQueryTraceDict = {}

# input files
# inputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_06_09.smallTest.Combine.train"
# inputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_02_22.smallTest.train"
# inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Training_Set_2013_07_15.train"
# inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Training_Set_2013_07_19.train"
# inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/TrainingSetForFixingTheMissingTOPKResults20130715"
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model17/TrainingSetForTOPKMissingResults20130719"
inputFileHandler = open(inputFileName,"r")

# inputFileName2 = ""
inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/freqOfTermsIn100KQueries_head_95K"
inputFileHandler2 = open(inputFileName2,"r")

# output files
# outputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_06_09.smallTest.Combine.train_tfqAdded"
# outputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_02_22.smallTest.train_tfqAdded"
# outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Training_Set_2013_07_15_tfqAdded.train"
# outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Training_Set_2013_07_19_tfqAdded.train"
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model17/TrainingSetForTOPKMissingResults20130719_tfqAdded.train"
outputFileHandler = open(outputFileName,"w")

# Switch ON/OFF for headline
infoHeadLine = inputFileHandler.readline()
newInfoHeadLine = infoHeadLine.strip() + " " + "term_freq_in_training_head95K_queries" + "\n"
outputFileHandler.write(newInfoHeadLine)

for line in inputFileHandler2.readlines():
    lineElements = line.strip().split(" ")
    term = lineElements[0]
    freqInQueries = int(lineElements[1])
    if term not in termANDFreqInQueryTraceDict:
        termANDFreqInQueryTraceDict[term] = freqInQueries
    else:
        print "system error"
        exit()

print "len(termANDFreqInQueryTraceDict):",len(termANDFreqInQueryTraceDict)

for line in inputFileHandler.readlines():
    termLookingFor = line.strip().split()[4]
    newOutputLine = ""
    if termLookingFor in termANDFreqInQueryTraceDict:
        newOutputLine = line.strip() + " " + str( termANDFreqInQueryTraceDict[termLookingFor] ) + "\n"
        outputFileHandler.write(newOutputLine)
        TOTAL_NUMBER_OF_TRAINING_POSTING_CURRENTLY_RECORDED += 1
    else:
        print "can NOT find the term:",termLookingFor
        exit()
        
        
        
inputFileHandler.close()
outputFileHandler.close()
print "Overall Processing Statistics:"
print "TOTAL_NUMBER_OF_TRAINING_POSTING_CURRENTLY_RECORDED:",TOTAL_NUMBER_OF_TRAINING_POSTING_CURRENTLY_RECORDED
print "inputFileName:",inputFileName
print "inputFileName2:",inputFileName2
print "outputFileName:",outputFileName
print "Program ends."



