from __future__ import division
from operator import itemgetter, attrgetter
import os
import sys
import math

def multiply(a,b):
    print "Will compute", a, "times", b
    c = 0
    for i in range(0, a):
        c = c + b
    return c

def hello():
    print "hello from Python"

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_weiAllPreparedFeaturesAdded_fixed_20130116"
inputFileHandler = open(inputFileName,"r")
infoHeadLine = inputFileHandler.readline()

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_weiAllPreparedFeaturesAdded_fixed_20130116_sorted_by_rank_in_list"
outputFileHandler = open(outputFileName,"w")
outputFileHandler.write(infoHeadLine)

allLinesList = []

for line in inputFileHandler.readlines():
    currentLineElements = line.strip().split(" ")
    modifiedCurrentLineElementsList = []
    modifiedCurrentLineElementsList.append( int( currentLineElements[0] ) )
    modifiedCurrentLineElementsList += currentLineElements[1:]
    # print "modifiedCurrentLineElementsList:",modifiedCurrentLineElementsList
    allLinesList.append(modifiedCurrentLineElementsList)

print "len(allLinesList):",len(allLinesList)

# make sure using the same sorting method
allLinesList.sort(cmp=None, key=itemgetter(21), reverse=False)
# allLinesList.sort(cmp=None, key=itemgetter(1), reverse=False)
# allLinesList.sort(cmp=None, key=itemgetter(0), reverse=False)

for lineElements in allLinesList:
    outputLine = ""
    outputLine += str( lineElements[0] ) + " "
    for element in lineElements[1:]:
        outputLine += element + " "
    outputLine = outputLine.strip() + "\n"
    outputFileHandler.write( outputLine )

print "OK"
inputFileHandler.close()
outputFileHandler.close()
'''

'''
inputDataSourceFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/rawTermInvertedIndexOutput_with_trecID_added.txt"
inputDataSourceFileHandler = open(inputDataSourceFileName,"r")
inputDataSourceFileHandler.seek(115362260628)

currentLine = inputDataSourceFileHandler.readline()
while currentLine.strip().split(" ")[0] != "GX019-35-1502414":
    # print currentLine.strip().split(" ")[0]
    currentLine = inputDataSourceFileHandler.readline()
print "currentLine:",currentLine
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_weiAllPreparedFeaturesAdded.OLD"
inputFileHandler = open(inputFileName,"r")
oldInfoHeadLine = inputFileHandler.readline()
oldInfoHeadLineElements = oldInfoHeadLine.strip().split(" ")
for index,element in enumerate(oldInfoHeadLineElements):
    print index,element
    
print

newInfoHeadLine = ""
for element in oldInfoHeadLineElements[:20]:
    newInfoHeadLine += element + " "

for element in oldInfoHeadLineElements[21:]:
    newInfoHeadLine += element + " "

newInfoHeadLine += "TOP10Label" + " " + "TOP50Label" + " " + "TOP100Label" + " " + "TOP11To50Label" + " " + "TOP51To100Label" + " " + "\n"

newInfoHeadLine.strip()

newInfoHeadLineElements = newInfoHeadLine.strip().split(" ")
for index,element in enumerate(newInfoHeadLineElements):
    print index,element

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_weiAllPreparedFeaturesAdded"
outputFileHandler = open(outputFileName,"w")
outputFileHandler.write(newInfoHeadLine)

for line in inputFileHandler.readlines():
    newOutputLine = ""
    lineElements = line.strip().split(" ")
    for element in lineElements[:20]:
        newOutputLine += element + " "
    
    for element in lineElements[21:]:
        newOutputLine += element + " "
    
    # when I sample, should have more. 
    if lineElements[20] == "TOP10":
        newOutputLine += "True" + " " + "True" + " " + "True" + " " + "False" + " " + "False"
    elif lineElements[20] == "TOP11To50":
        newOutputLine += "False" + " " + "True" + " " + "True" + " " + "True" + " " + "False"
    elif lineElements[20] == "TOP51To100":
        newOutputLine += "False" + " " + "False" + " " + "True" + " " + "False" + " " + "True"
    
    newOutputLine += "\n"

    outputFileHandler.write(newOutputLine)

inputFileHandler.close()
outputFileHandler.close()
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_weiAllPreparedFeaturesAdded"
inputFileHandler = open(inputFileName,"r")
infoHeadLine = inputFileHandler.readline()
infoHeadLineElements = infoHeadLine.strip().split(" ")

dataLine = inputFileHandler.readline()
dataLineElements = dataLine.strip().split(" ")

for index,element in enumerate( infoHeadLineElements ):
    print index,element,dataLineElements[index] 
''' 
 


'''
for index,line in enumerate( inputFileHandler.readlines() ):
    if len( line.strip().split(" ") ) == 23:
        pass
    else:
        print "index:",index

print "Test Pass"
'''



'''
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_sorted_by_term_rankInInvertedListAdded"
outputFileHandler = open(outputFileName,"w")

inputBasePath = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/parallel_work_for_feature_Rank_in_the_inverted_list/"
for currentPartNumber in range(1,11):
    fileName = "Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_sorted_by_term_part" + "%02d" % currentPartNumber + "_rankInInvertedListAdded"
    absoluteFileName = inputBasePath + fileName
    currentInputFileHandler = open(absoluteFileName,"r")
    currentInputFileHandler.readline()
    
    for line in currentInputFileHandler.readlines():
        outputFileHandler.write(line)
    
    currentInputFileHandler.close()


outputFileHandler.close()
'''


'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_weiHLFeaturesAdded_OLD"
inputFileHandler = open(inputFileName,"r")
infoHeadLine = inputFileHandler.readline()

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_weiHLFeaturesAdded"
outputFileHandler = open(outputFileName,"w")
outputFileHandler.write(infoHeadLine)

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    newRankInDoc = int( lineElements[-1] ) + 1
    newLine = ""
    for element in lineElements[:-1]:
        newLine += element + " "
    newLine += str(newRankInDoc) + " "
    newLine.strip()
    outputFileHandler.write(newLine + "\n")
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_tail_1K_verify_weiHLFeaturesAdded_verify"
inputFileHandler = open(inputFileName,"r")
inputFileHandler.readline()

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    queryID = lineElements[0]
    trecID = lineElements[1]
    term = lineElements[2]
    rank_in_the_doc_from_High_Level = lineElements[21]
    rank_in_the_doc_from_polyIRToolkit = lineElements[23]
    print queryID,trecID,term,rank_in_the_doc_from_High_Level,rank_in_the_doc_from_polyIRToolkit
    # print line.strip()
    # print len( line.strip().split(" ") )
'''  


'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_weiHLFeaturesAdded"
inputFileHandler = open(inputFileName,"r")
inputFileHandler.readline()

inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K"
inputFileHandler2 = open(inputFileName2,"r")
inputFileHandler2.readline()

for index,line in enumerate( inputFileHandler2.readlines() ):
    comparedLine = inputFileHandler.readline()
    lineElements = line.strip().split(" ")
    
    modifyLine = ""
    for element in lineElements[:-1]:
        modifyLine += element + " "
    modifyLine = modifyLine.strip()
    
    if comparedLine.startswith(modifyLine):
        pass
    else:
        print "index:",index
        print "newLine:",comparedLine.strip()
        print "oldLine:",line.strip()
        print
        exit()

print "Test Pass"
'''    


'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/freqOfQueryTermsInCollection.txt"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermsONLY.txt"
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    term = lineElements[0]
    outputFileHandler.write(term + "\n")

inputFileHandler.close()
outputFileHandler.close()
'''


'''
# The purpose of this part of the code
lexiconTermDict = {}

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/freqOfQueryTermsInCollection.txt"
outputFileHandler = open(outputFileName,"w")
  
inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection.txt"
inputFileHandler2 = open(inputFileName2,"r")
for line in inputFileHandler2.readlines():
    lineElements = line.strip().split(" ")
    term = lineElements[0]
    freqInCollection = int( lineElements[1] )
    if term not in lexiconTermDict:
        lexiconTermDict[term] = freqInCollection        
print "len(lexiconTermDict):",len(lexiconTermDict)

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/freqOfTermsInQueries.txt"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    newLine = ""
    term = line.strip().split(" ")[0]
    if term in lexiconTermDict:
        newLine = term + " " + str( lexiconTermDict[term] ) + "\n"
    else:
        print "lexicon do NOT have the term:",term
        newLine = term + " " + "0" + "\n"
    outputFileHandler.write(newLine)
inputFileHandler.close()
inputFileHandler2.close()
outputFileHandler.close()
'''




'''
numOfDatalinesForEachFile = 50000
numberOfFiles = 10

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_sorted_by_term"
inputFileHandler = open(inputFileName,"r")
oldInfoHeadLine = inputFileHandler.readline()

currentPartNumber = 1

for index,line in enumerate( inputFileHandler.readlines() ):
    if index % 50000 == 0:
        currentOutputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/parallel_work_for_feature_Rank_in_the_inverted_list/" + "Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_sorted_by_term" + "_part" + "%02d" % currentPartNumber
        currentOutputFileHandler = open(currentOutputFileName,"w")
        currentOutputFileHandler.write( oldInfoHeadLine.strip() + " (Part%02dOutof10)" % currentPartNumber + "\n")
        currentPartNumber += 1
    currentOutputFileHandler.write( line )
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K"
inputFileHandler = open(inputFileName,"r")
infoHeadLine = inputFileHandler.readline()

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K" + "_sorted_by_term"
outputFileHandler = open(outputFileName,"w")
outputFileHandler.write(infoHeadLine)

allLinesList = []

for line in inputFileHandler.readlines():
    currentLineElements = line.strip().split(" ")
    allLinesList.append(currentLineElements)

print "len(allLinesList):",len(allLinesList)

allLinesList.sort(cmp=None, key=itemgetter(2), reverse=False)
for lineElements in allLinesList:
    outputLine = ""
    for element in lineElements:
        outputLine += element + " "
    outputLine = outputLine.strip() + "\n"
    outputFileHandler.write( outputLine )

print "OK"
inputFileHandler.close()
outputFileHandler.close()
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded"
inputFileHandler = open(inputFileName,"r")
infoHeadLine = inputFileHandler.readline()

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded" + "_sorted_by_trecID"
outputFileHandler = open(outputFileName,"w")
outputFileHandler.write(infoHeadLine)

allLinesList = []

for line in inputFileHandler.readlines():
    currentLineElements = line.strip().split(" ")
    allLinesList.append(currentLineElements)

print "len(allLinesList):",len(allLinesList)

allLinesList.sort(cmp=None, key=itemgetter(1), reverse=False)
for lineElements in allLinesList:
    outputLine = ""
    for element in lineElements:
        outputLine += element + " "
    outputLine = outputLine.strip() + "\n"
    outputFileHandler.write( outputLine )

print "OK"
inputFileHandler.close()
outputFileHandler.close()
'''

'''
inputFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/20121215Posting_Oriented_Balanced_Training_Dataset.txt.input"
inputFileHandler = open(inputFileName,"r")

numTop10Count = 0
numNotTop10Count = 0


for line in inputFileHandler.readlines():
    if line.strip().split(" ")[9] == "TOP10":
        numTop10Count += 1
    elif line.strip().split(" ")[9] == "NOTTOP10":
        numNotTop10Count += 1

print "numTop10Count:",numTop10Count
print "numNotTop10Count:",numNotTop10Count
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/rankInListFeature/allInfoAboutRankInList"
inputFileHandler = open(inputFileName,"r")

postingRankDict = {}
tempCounter = 0

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    key = lineElements[0] + "_" + lineElements[1] + "_" + lineElements[2]
    if key not in postingRankDict:
        postingRankDict[key] = lineElements[3]
    else:
        print "Duplicated posting, key shown:",key
        tempCounter += 1

print "len(postingRankDict):",len(postingRankDict)
print "tempCounter:",tempCounter

outputFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/trainingIntermediateFile20121205.txt"
outputFileHandler = open(outputFileName,"w")

inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/rankInDocFeature/allInfoAboutRankInDoc"
inputFileHandler2 = open(inputFileName2,"r")
for line in inputFileHandler2.readlines():
    lineElements = line.strip().split(" ")
    accessKey = lineElements[0] + "_" + lineElements[1] + "_" + lineElements[2]
    outputFileHandler.write( line.strip() + " " + postingRankDict[accessKey] + "\n")

outputFileHandler.close()
'''

'''
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/rankInListFeature/allInfoAboutRankInList"
outputFileHandler = open(outputFileName,"w")

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/rankInListFeature/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added_ordered_by_query_term_20121130_simplified.txt.input_beginning_part0_add_rank_in_list"
print inputFileName
inputFileHandler = open(inputFileName,"r")

for line in inputFileHandler.readlines():
    outputFileHandler.write(line)

for i in range(1,21):
    currentInputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/rankInListFeature/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added_ordered_by_query_term_20121130_simplified.txt.input_left_part" + "%02d" % i + "_add_rank_in_list"
    # print currentInputFileName
    
    currentInputFileHandler = open(currentInputFileName,"r")
    for line in currentInputFileHandler.readlines():
        outputFileHandler.write(line)
    
outputFileHandler.close()
'''

'''
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/rankInDocFeature/allInfoAboutRankInDoc"
outputFileHandler = open(outputFileName,"w")

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/rankInDocFeature/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added.txt.input_beginning_part00_rank_in_document"
inputFileHandler = open(inputFileName,"r")

for line in inputFileHandler.readlines():
    outputFileHandler.write(line)

for i in range(1,11):
    currentInputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/rankInDocFeature/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added.txt.input_left_part" + "%02d" % i + "_rank_in_document"
    currentInputFileHandler = open(currentInputFileName,"r")
    for line in currentInputFileHandler.readlines():
        outputFileHandler.write(line)

outputFileHandler.close()
'''


'''
inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/rankInDocFeature/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added.txt.input_left_part10"
inputFileHandler1 = open(inputFileName1,"r")

inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/rankInDocFeature/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added.txt.input_left_part10_rank_in_document"
inputFileHandler2 = open(inputFileName2,"r")

file1Lines = inputFileHandler1.readlines()
file2Lines = inputFileHandler2.readlines()

for index,line in enumerate(file1Lines):
    if line.strip().split(" ")[0] == file2Lines[index].strip().split(" ")[0] and line.strip().split(" ")[1] == file2Lines[index].strip().split(" ")[1] and line.strip().split(" ")[2] == file2Lines[index].strip().split(" ")[2]:
        pass
    else:
        print "sth wrong."
        print "len(file1Lines):",len(file1Lines)
        print "len(file2Lines):",len(file2Lines)
        print "index:",index
        print "line from file1:",line.strip()
        print "line from file2:",file2Lines[index].strip()
        exit(1)
        
print "all pass"
'''



'''
# The purpose of this script is to split the file into # of parts which have been specified.

print "len(sys.argv):",len(sys.argv)

if len(sys.argv) != 2:
    print "Illegal # of arguments"
    print "Usage: python programName.py #ofFilesWantToSplit"
    exit(1)
   
numberOfFilesWantToSplit = int( sys.argv[1] )

#option1
#inputFileName1 = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added.txt.input_left"

#option2
inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added_ordered_by_query_term_20121130_simplified.txt.input_left"

inputFileHandler1 = open(inputFileName1,"r")

totalNumberOfLines = len( inputFileHandler1.readlines() )
print "totalNumberOfLines:",totalNumberOfLines
numberOfLinesPerFile = math.ceil( totalNumberOfLines/numberOfFilesWantToSplit )
print "numberOfLinesPerFile:",numberOfLinesPerFile

inputFileHandler1.close()

inputFileHandler1 = open(inputFileName1,"r")

i = 0

for index,line in enumerate( inputFileHandler1.readlines() ):
    if index % numberOfLinesPerFile == 0:
        i += 1
        currentOutputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added_ordered_by_query_term_20121130_simplified.txt.input_left" + "_part%02d" % i
        currentOutputFileHandler = open(currentOutputFileName,"w")        
    
    currentOutputFileHandler.write(line)

inputFileHandler1.close()
'''

'''
inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added.txt.input_part0"
inputFileHandler1 = open(inputFileName1,"r")

inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added_rank_in_document_added.txt.input_part0"
inputFileHandler2 = open(inputFileName2,"r")

lineNumber = 0

currentLineForFile1 = inputFileHandler1.readline()
currentLineForFile2 = inputFileHandler2.readline()
lineNumber += 1

while currentLineForFile1.strip().split(" ")[0] == currentLineForFile2.strip().split(" ")[0] and currentLineForFile1.strip().split(" ")[1] == currentLineForFile2.strip().split(" ")[1] and currentLineForFile1.strip().split(" ")[2] == currentLineForFile2.strip().split(" ")[2]:
    currentLineForFile1 = inputFileHandler1.readline()
    currentLineForFile2 = inputFileHandler2.readline()
    
    if currentLineForFile1.strip() == "" and currentLineForFile2.strip() == "":
        break

    lineNumber += 1

print "lineNumber:",lineNumber
print "currentLineForFile1:",currentLineForFile1
print "currentLineForFile2:",currentLineForFile2


inputFileHandler1.close()
inputFileHandler2.close()
'''



'''
duplicatedTupleCheckDict = {}

inputFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added.txt.input"
inputFileHandler = open(inputFileName,"r")

counter = 0

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    queryID = lineElements[0]
    trecID = lineElements[1]
    term = lineElements[2]
    tuple = (queryID, trecID, term)
    if tuple not in duplicatedTupleCheckDict:
        duplicatedTupleCheckDict[tuple] = 1
    else:
        duplicatedTupleCheckDict[tuple] += 1
        print "shit, this is duplicate:",tuple
        counter += 1

print "counter:",counter
inputFileHandler.close()
'''


'''
traingTupleList = []

inputFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added.txt.input"
inputFileHandler = open(inputFileName,"r")


# This is for generating the features of rank_in_inverted_index
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added_ordered_by_query_term_20121130_simplified.txt.input"
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    simplifiedTuple = (lineElements[0],lineElements[1],lineElements[2])
    traingTupleList.append(simplifiedTuple)

print "len(traingTupleList):",len(traingTupleList)

traingTupleList_sorted = sorted(traingTupleList, key=itemgetter(2))

for tuple in traingTupleList_sorted:
    # print tuple
    (queryID, trecID, term) = tuple
    # print queryID,trecID,term
    outputFileHandler.write(queryID + " " + trecID + " " + term + "\n")

inputFileHandler.close()
outputFileHandler.close()

print "DONE"
'''


'''
inputFileName = "/home/obukai/workspace/polyIRToolkit/polyIRIndexer/allQueriesIncludingHumanJudgedAndEffeciencyTask"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/home/obukai/workspace/polyIRToolkit/polyIRIndexer/allQueriesIncludingHumanJudgedAndEffeciencyTask_NEW20121128.txt"
outputFileHandler = open(outputFileName,"w")

totalNumberOfLine = 147

for line in inputFileHandler.readlines():
    if totalNumberOfLine > 0:
        lineElements = line.strip().split(":")
        
        originalQueryID = int( lineElements[0] )
        newQueryID = originalQueryID + 100000
        
        print originalQueryID,lineElements[1]
        print newQueryID,lineElements[1]
        
        totalNumberOfLine -= 1
        outputFileHandler.write( str( newQueryID ) + ":" + lineElements[1] + "\n")
        
    else:
        outputFileHandler.write( line.strip() + "\n" )

inputFileHandler.close()
outputFileHandler.close()
'''



'''
# write a program that make the query related lexicon term NOT fake anymore
# the involved file includes the following file: "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/final-fake-queryTermsCollectionFreqs.txt" 

lexiconTermFreqInCollectionDict = {}

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection.txt"
inputFileHandler = open(inputFileName,"r")

currentLine = inputFileHandler.readline()

while currentLine:
    lineElements = currentLine.strip().split(" ")
    
    if len( lineElements ) == 2:
        if lineElements[0].strip() not in lexiconTermFreqInCollectionDict:
            lexiconTermFreqInCollectionDict[lineElements[0].strip()] = int( lineElements[1].strip() )
    else:
        exit(1)
    
    currentLine = inputFileHandler.readline()

print "All passed"
print "lexiconTermFreqInCollectionDict['0']:",lexiconTermFreqInCollectionDict['0']
print "len( lexiconTermFreqInCollectionDict ):",len( lexiconTermFreqInCollectionDict )
'''

'''
print "make the corresponding folders."
for i in range(0,273):
    directory = "/data3/obukai/human_judge_web_pages_gov2_ALL/" + "GX%03d" % i
    print directory
    os.mkdir(directory)
'''


'''
allRelevenceDocs = {}
allRelevenceDocsList = []

inputFileNameList = []
inputFileName1 = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/priorityTrecDocumentsIDs_all_sorted_with_index_number_BM25_oriented.txt"
inputFileName2 = "/data1/team/obukai/machine-learning-project-related/trec-related/trecCombine04_06/priorityTrecDocumentsIDs_all_sorted_with_index_number.txt"

outputFileName = "/data1/team/obukai/machine-learning-project-related/trec-related/trecCombine04_06/priorityTrecDocumentsIDs_include_approximating_BM25_AND_human_judged.txt"
outputFileHandler = open(outputFileName,"w")

inputFileNameList.append(inputFileName1)
inputFileNameList.append(inputFileName2)

for name in inputFileNameList:
    inputFileHandler = open(name,"r")
    for line in inputFileHandler.readlines():
        lineElements = line.strip().split(" ")
        if lineElements[1] not in allRelevenceDocs:
            allRelevenceDocs[lineElements[1]] = 1
        else:
            allRelevenceDocs[lineElements[1]] += 1
            
print "len(allRelevenceDocs):",len(allRelevenceDocs)
'''



'''
for docID in allRelevenceDocs:
    if docID.startswith("GX"):
        pass
    else:
        print "NOT passed:",docID
'''


'''
allRelevenceDocsList = allRelevenceDocs.keys()
allRelevenceDocsList.sort(cmp=None, key=None, reverse=False)

for index,docID in enumerate( allRelevenceDocsList ):
    outputFileHandler.write( str( index ) + " " + docID + " " + "-1" + "\n")


inputFileHandler.close()
outputFileHandler.close()
'''


'''
inputFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented.txt.output"
inputFileHandler = open(inputFileName,"r")

inputFileName2 = "/data1/team/obukai/machine-learning-project-related/learningToPrune/qrels.tb04-tb06.top150_CONTAINS_ONLY_147"
inputFileHandler2 = open(inputFileName2,"r")

outputFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_with_label.txt.output"
outputFileHandler = open(outputFileName,"w")

judgeDict = {}

for line in inputFileHandler2.readlines():
    lineElements = line.strip().split(" ")
    queryID = lineElements[0]
    trecID = lineElements[2]
    relevenceScore = lineElements[3]
    
    key = queryID + "_" + trecID
    if key not in judgeDict:
        judgeDict[key] = relevenceScore

print "len(judgeDict):",len(judgeDict)
# print "judgeDict['850_GX265-48-6314208']:",judgeDict['850_GX265-48-6314208']
    
for line in inputFileHandler.readlines():
    outputLine = ""
    lineElements = line.strip().split(" ")
    queryID = lineElements[0]
    trecID = lineElements[1]
    key = queryID + "_" + trecID 
    outputLine = line.strip() + " " + judgeDict[key]
    outputFileHandler.write(outputLine + "\n")
    # print "outputLine:",outputLine

inputFileHandler.close()
inputFileHandler2.close()
outputFileHandler.close()
'''

'''
inputFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_with_label.txt.output"
inputFileHandler = open(inputFileName,"r")

line = inputFileHandler.readline()
lineElements = line.strip().split(" ")
print "len(lineElements):",len(lineElements)
for index,element in enumerate(lineElements):
    if index == 0:
        print index,"queryID:",element
    elif index == 1:
        print index,"trecID:",element
    elif index == 2:
        print index,"postingTerm:",element
    elif index == 3:
        print index,"doc_words:",element
    elif index == 4:
        print index,"doc_distinct_words:",element
    elif index == 5:
        print index,"text_size:",element
    elif index == 6:
        print index,"script_size:",element
    elif index == 7:
        print index,"script_text_ratio:",element
    elif index == 8:
        print index,"doc_outlinks:",element
    elif index == 9:
        print index,"current_term_col_freq:",element
    elif index == 10:
        print index,"whether_current_term_in_header:",element
    elif index == 11:
        print index,"whether_current_term_in_title:",element
    elif index == 12:
        print index,"whether_current_term_in_bold:",element
    elif index == 13:
        print index,"whether_current_term_in_url:",element
    elif index == 14:
        print index,"whether_current_term_in_italic:",element
    elif index == 15:
        print index,"current_term_freq_in_doc:",element
    elif index == 16:
        print index,"current_term_rel_freq_in_doc:",element   
    elif index == 17:
        print index,"current_term_BM25(BM25 Score for this term):",element
    elif index == 18:
        print index,"current_term_QL(Language Model Score for this term):",element
    elif index == 19:
        print index,"current_term_distribution(Term Distribution Score for this term):",element
    elif index == 20:
        print index,"relevence score label:",element
    else:
        print index,element
'''


'''
inputFileName = "/home/obukai/workspace/polyIRToolkit/scripts/src/pythonScripts/gov2/tempErrorMessage_need_to_handled.txt"
inputFileHandler = open(inputFileName,"r")

while True:
    indexLine = inputFileHandler.readline()
    documentSourceLine = inputFileHandler.readline()
    parsedStateLine = inputFileHandler.readline()
    docLengthLine = inputFileHandler.readline()
    emptyLine = inputFileHandler.readline()
    
    print "doc length:",docLengthLine
'''    



'''
import random
import math
import os
from HTMLParser import HTMLParser
from HTMLParser import HTMLParseError

###########################################################################################################################class begin...
class MyHTMLParser(HTMLParser):
    def handle_starttag(self, tag, attrs):
        # print "Start tag:", tag
        # set the current_tag to tag
        self.current_tag = tag
        
        if hasattr(self, 'doc_outlinks'):
            pass
        else:
            self.doc_outlinks = 0
        
        for attr in attrs:
            (attr_name, attr_value) = attr
            if attr_name == "href":
                #print "     attr:", attr
                self.doc_outlinks += 1
    def handle_endtag(self, tag):
        #print "End tag  :", tag
        # set back the current_tag to ""
        self.current_tag = ""
        
    def handle_data(self, data):
        if hasattr(self, 'current_tag'):
            # step0: pre-processing
            for i in range(0,len(data)):
                # print "data[i]:",ord(data[i])
                if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
                    # Just replace them with a space.
                    data = data[:i] + " " + data[i+1:]
            
            if hasattr(self, 'header_words'):
                pass
            else:
                self.header_words = []
                self.header_words_dict = {}

            if hasattr(self, 'title_words'):
                pass
            else:
                self.title_words = []
                self.title_words_dict = {}
            
            if hasattr(self, 'b_or_strong_words'):
                pass
            else:
                self.b_or_strong_words = []
                self.b_or_strong_words_dict = {}          
            
            if hasattr(self, 'a_words'):
                pass
            else:
                self.a_words = []
                self.a_words_dict = {}
            
            if hasattr(self, 'i_or_em_words'):
                pass
            else:
                self.i_or_em_words = []
                self.i_or_em_words_dict = {}                       
            
                        
            # step1: some context feature
            if self.current_tag == "head":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.header_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.header_words_dict:
                            self.header_words_dict[lowerCaseWord] = 1
                        else:
                            self.header_words_dict[lowerCaseWord] += 1
            
            if self.current_tag == "title":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.title_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.title_words_dict:
                            self.title_words_dict[lowerCaseWord] = 1
                        else:
                            self.title_words_dict[lowerCaseWord] += 1
            
            if self.current_tag == "b" or self.current_tag == "strong":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.b_or_strong_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.b_or_strong_words_dict:
                            self.b_or_strong_words_dict[lowerCaseWord] = 1
                        else:
                            self.b_or_strong_words_dict[lowerCaseWord] += 1
                                     
            if self.current_tag == "a":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.a_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.a_words_dict:
                            self.a_words_dict[lowerCaseWord] = 1
                        else:
                            self.a_words_dict[lowerCaseWord] += 1
            
            if self.current_tag == "i" or self.current_tag == "em":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.i_or_em_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.i_or_em_words_dict:
                            self.i_or_em_words_dict[lowerCaseWord] = 1
                        else:
                            self.i_or_em_words_dict[lowerCaseWord] += 1
            
       
            # step2: text or script
            if self.current_tag == "script" or self.current_tag == "style":
                #print "The following content should NOT be indexed."
                #print "script content     :", data
                #print "script length     :",len(data)
                if hasattr(self, 'script_size'):
                    self.script_size += len(data)
                else:
                    self.script_size = len(data)
            else:
                if hasattr(self, 'text_size'):
                    self.text_size += len(data)
                else:
                    self.text_size = len(data)
                # print "text content:*",data,"*"

                for i in range(0,len(data)):
                    # print "data[i]:",ord(data[i])
                    if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
                        # Just replace them with a space.
                        data = data[:i] + " " + data[i+1:]
                
                
                if hasattr(self, 'doc_words'):
                    pass
                else:
                    self.doc_words = []
                
                if hasattr(self, 'doc_distinct_words'):
                    pass
                else:
                    self.doc_distinct_words = []               

                
                if hasattr(self, 'doc_words_dict'):
                    pass
                else:
                    self.doc_words_dict = {}
                                    
                #print "raw word list:",data.split(" ")
                
                for word in data.split(" "):
                    if word != "":
                        # for later use.
                        lowerCaseWord = word.lower()
                        
                        # for current use
                        # lowerCaseWord = word
                        self.doc_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.doc_words_dict:
                            self.doc_words_dict[lowerCaseWord] = 1
                        else:
                            self.doc_words_dict[lowerCaseWord] += 1
                        
                        
                        if lowerCaseWord not in self.doc_distinct_words:
                            self.doc_distinct_words.append(lowerCaseWord)
                        
                #print "doc_words:",self.doc_words
                #print "doc_distinct_words:",self.doc_distinct_words
                  
                #print "text length      :", len(data)
        else:
            pass
    def handle_comment(self, data):
        pass
        #print "Comment  :", data
    def handle_entityref(self, name):
        pass
        #c = unichr(name2codepoint[name])
        #print "Named ent:", c
    def handle_charref(self, name):
        pass
        #if name.startswith('x'):
        #    c = unichr(int(name[1:], 16))
        #else:
        #    c = unichr(int(name))
        #print "Num ent  :", c
    def handle_decl(self, data):
        pass
        #print "Decl     :", data
    
    def generate_statistics_report(self,status):
        
        print "-----> completed_parsed:",status
        
        # attrs check or init
        if hasattr(self, 'doc_words'):
            pass
        else:
            self.doc_words = []

        if hasattr(self, 'doc_distinct_words'):
            pass
        else:
            self.doc_distinct_words = []
        
        if hasattr(self, 'text_size'):
            pass
        else:
            self.text_size = 0
            
        if hasattr(self, 'script_size'):
            pass
        else:
            self.script_size = 0       

        if hasattr(self, 'doc_outlinks'):
            pass
        else:
            self.doc_outlinks = 0
            
        if hasattr(self, 'doc_waterloo_spam'):
            pass
        else:
            self.doc_waterloo_spam = 0
                
        #print "len(doc_words):",len(self.doc_words)
        #print "len(doc_distinct_words):",len(self.doc_distinct_words)
        #print "text_size:",self.text_size
        #print "script_size:",self.script_size
        if self.text_size != 0:
            script_text_ratio = self.script_size / self.text_size
        else:
            script_text_ratio = 0.0
        #print "script_text_ratio:",script_text_ratio
        #print "doc_outlinks:",self.doc_outlinks
        
        #outputFileFeaturePart1Handler.write(str( len(self.doc_words) ) + " ")
        #outputFileFeaturePart1Handler.write(str( len(self.doc_distinct_words) )  + " ")
        #outputFileFeaturePart1Handler.write(str(self.text_size)  + " ")
        #outputFileFeaturePart1Handler.write(str(self.script_size)  + " ")
        #outputFileFeaturePart1Handler.write(str(script_text_ratio)  + " ")
        #outputFileFeaturePart1Handler.write(str(self.doc_outlinks)  + " ")
        
        
        
        #print "self.header_words:",self.header_words
        #print "self.header_words_dict:",self.header_words_dict

        #print
        #print "self.title_words:",self.title_words
        #print
        #print "self.title_words_dict:",self.title_words_dict
    
        #print
        #print "self.b_or_strong_words:",self.b_or_strong_words
        #print
        #print "self.b_or_strong_words_dict:",self.b_or_strong_words_dict
    
        #print
        #print "self.a_words:",self.a_words
        #print
        #print "self.a_words_dict:",self.a_words_dict
    
        #print
        #print "self.i_or_em_words:",self.i_or_em_words
        #print
        #print "self.i_or_em_words_dict:",self.i_or_em_words_dict 
        
        #print
        #print "self.doc_words:",self.doc_words
        
        #print
        #print "self.doc_distinct_words:",self.doc_distinct_words
        
        #print 
        #print "self.doc_words_dict:",self.doc_words_dict
###########################################################################################################################class end.

inputFileName = "/data1/team/obukai/machine-learning-project-related/trec-related/trecCombine04_06/priorityTrecDocumentsIDs_all_sorted_with_index_number.txt"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/home/obukai/workspace/polyIRToolkit/scripts/src/pythonScripts/gov2/tempErrorMessage.txt"
outputFileErrorHandler = open(outputFileName, "w")

human_judge_query_location_base_path = "/data5/team/obukai/human_judge_web_pages_gov2/"

for index,line in enumerate( inputFileHandler.readlines() ):
    print "index:",index
    outputFileErrorHandler.write("index:" + str(index) + "\n")
    #if index == 2:
    #    break
    
    trecID = line.strip().split(" ")[1]
    
    trecIDElements = trecID.split("-")
    
    segmentNumber = trecIDElements[0]
    
    fileNamePrefixLookingFor = trecID
    
    pathLookFor = human_judge_query_location_base_path + segmentNumber
    
    foundTag = False
    
    for dirname, dirnames, filenames in os.walk(pathLookFor):
        for filename in filenames:
            if filename.startswith(fileNamePrefixLookingFor):
                foundTag = True
                # print filename
                absolutePathForWebPageFileName = os.path.join(dirname, filename)
                print "-----> ",absolutePathForWebPageFileName
                outputFileErrorHandler.write("-----> " + absolutePathForWebPageFileName + "\n")
                
                absolutePathForWebPageFileHandler = open(absolutePathForWebPageFileName,"r")
                file_content = absolutePathForWebPageFileHandler.read()
                absolutePathForWebPageFileHandler.close()
                
                web_page_content = file_content
                parser = MyHTMLParser()
                
                try:
                    parser.feed(web_page_content)
                    parser.generate_statistics_report(1)
                    
                    outputFileErrorHandler.write("-----> completed_parsed: 1" + "\n") 
                    outputFileErrorHandler.write("-----> doc length:" + str( len(parser.doc_words) ) + "\n") 
                except HTMLParseError,e:
                    print "----->",absolutePathForWebPageFileName
                    print "----->",trecID,":",e.msg,":",e.lineno,":",e.offset
                    outputFileErrorHandler.write("----->" + trecID + ":" + e.msg + ":" + str(e.lineno) + ":" + str(e.offset) + "\n")
                    #outputErrorMessageFileHandle.write("----->" + trecID + ":" + e.msg + ":" + str(e.lineno) + ":" + str(e.offset) + "\n")
                    # do not handle the error message.
                    parser.generate_statistics_report(0)
                    
                    outputFileErrorHandler.write("-----> completed_parsed: 0" + "\n")
                    outputFileErrorHandler.write("-----> doc length:" + str( len(parser.doc_words) ) + "\n")
                except UnicodeDecodeError,e:
                    print "----->",absolutePathForWebPageFileName
                    print "----->",e
                    outputFileErrorHandler.write("----->" + trecID + " UnicodeDecodeError Error." "\n")
                    #outputErrorMessageFileHandle.write("----->" + trecID + " UnicodeDecodeError Error." "\n")
                    # do not handle the error message.
                    parser.generate_statistics_report(0)
                    
                    outputFileErrorHandler.write("-----> completed_parsed: 0" + "\n")
                    outputFileErrorHandler.write("-----> doc length:" + str( len(parser.doc_words) ) + "\n")
                
                print
                outputFileErrorHandler.write("\n")
                
                
    

inputFileHandler.close()
outputFileErrorHandler.close()
'''



    

# for line in inputFileHandler.readlines():
#    print len(line.strip().split(" "))

'''
queriesDict = {}

#the whole purpose of this training file is to build the wholeTrainingFileTempleteGov2V1.txt.input

# the file for me to compare:
# /data5/team/weijiang/the_new_trip_of_feature_generation/wholeTrainingFileTempleteV3_phase0.txt.input

inputFileName1 = "/data1/team/obukai/machine-learning-project-related/learningToPrune/qrels.tb04-tb06.top150_CONTAINS_ONLY_147"
inputFileHandler1 = open(inputFileName1,"r")

inputFileName2 = "/data1/team/weijiang/workspace/polyIRToolkit/polyIRIndexer/04-06.topics.701-850.polyIRTKCompatibleMode"
inputFileHandler2 = open(inputFileName2,"r")

outputFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/wholeTrainingFileTempleteGov2V1.txt.input"
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler2.readlines():
    queryID = int( line.strip().split(":")[0] )
    
    data = line.strip().split(":")[1]
    data = data.lower()
    
    for i in range(0,len(data)):
        # print "data[i]:",ord(data[i])
        if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
            # Just replace them with a space.
            data = data[:i] + " " + data[i+1:]
    
    queryContent = data
    
    if queryID not in queriesDict:
        queriesDict[queryID] = queryContent    
    # print 

for line in inputFileHandler1.readlines():
    # example line: 850 0 GX272-67-14117174 0
    queryID = int(line.strip().split(" ")[0])
    trecID = line.strip().split(" ")[2]
    relevenceScore = line.strip().split(" ")[3]
    outputFileHandler.write( str(queryID) + " " + trecID + " '" + queriesDict[queryID] + "' " + relevenceScore + "\n")

print "len(queriesDict):",len(queriesDict)
print "Done."

inputFileHandler1.close()
inputFileHandler2.close()
outputFileHandler.close()
'''

'''
inputFileName = "/data1/team/weijiang/workspace/polyIRToolkit/polyIRIndexer/04-06.topics.701-850.polyIRTKCompatibleMode"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/final-fake-queryTermsCollectionFreqs.txt"
outputFileHandler = open(outputFileName,"w")

queryTermList = []

for line in inputFileHandler.readlines():
    data = line.strip().split(":")[1]
    
    for i in range(0,len(data)):
        # print "data[i]:",ord(data[i])
        if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
            # Just replace them with a space.
            data = data[:i] + " " + data[i+1:]

    queryTerms = data.split(" ")
    
    for queryTerm in queryTerms:
        if queryTerm.lower() != "" and queryTerm.lower() not in queryTermList:
            queryTermList.append(queryTerm.lower())

queryTermList.sort(cmp=None, key=None, reverse=False)

for queryTerm in queryTermList:
    outputFileHandler.write(queryTerm + " " + str(random.randint(1, 100000000)) + "\n")


inputFileHandler.close()
outputFileHandler.close()
'''


'''
for dirname, dirnames, filenames in os.walk('/data5/team/obukai/human_judge_web_pages_gov2'):
    # for subdirname in dirnames:
    #     print os.path.join(dirname, subdirname)
    for filename in filenames:
        print os.path.join(dirname, filename)
'''

'''
basedPath = "/data5/team/obukai/human_judge_web_pages_gov2/GX"

for i in range(0,273):
    directory = basedPath + "%03d" % i
    #print directory
    os.mkdir(directory)
'''
