from __future__ import division
from operator import itemgetter, attrgetter

import gc
import math
import matplotlib
import os
import pylab
import random
import sys
import time
from sets import Set
from scipy import stats
import numpy as np

'''
sizeDistributionDict = {}
# the pre-defined gaps(lower bounds and upper bounds) are as following:
# gaps:
# 0 [0,10)
# 1 [10,50)
# 2 [50,100)
# 3 [100,200)
# 4 [200,500)
# 5 [500,MAX BIG)

inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/docID_num_of_postings_recorded_in_index_MappingTableForGov2Dataset_head_1092"
inputFileHandler = open(inputFileName,"r")
lineCounter = 0
sumNumOfPostings = 0
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    currentNumOfPostings = int( lineElements[1] )
    if currentNumOfPostings >= 0 and currentNumOfPostings < 10:
        if 0 not in sizeDistributionDict:
            sizeDistributionDict[0] = 1
        else:
            sizeDistributionDict[0] += 1
    if currentNumOfPostings >= 10 and currentNumOfPostings < 50:
        if 1 not in sizeDistributionDict:
            sizeDistributionDict[1] = 1
        else:
            sizeDistributionDict[1] += 1
    if currentNumOfPostings >= 50 and currentNumOfPostings < 100:
        if 2 not in sizeDistributionDict:
            sizeDistributionDict[2] = 1
        else:
            sizeDistributionDict[2] += 1
    if currentNumOfPostings >= 100 and currentNumOfPostings < 200:
        if 3 not in sizeDistributionDict:
            sizeDistributionDict[3] = 1
        else:
            sizeDistributionDict[3] += 1
    if currentNumOfPostings >= 200 and currentNumOfPostings < 500:
        if 4 not in sizeDistributionDict:
            sizeDistributionDict[4] = 1
        else:
            sizeDistributionDict[4] += 1
    if currentNumOfPostings >= 500:
        if 5 not in sizeDistributionDict:
            sizeDistributionDict[5] = 1
        else:
            sizeDistributionDict[5] += 1    
    sumNumOfPostings += currentNumOfPostings
    lineCounter += 1
print "sumNumOfPostingsForALLDocs:",sumNumOfPostings
print "avgNumOfPostingsPerDoc:",sumNumOfPostings/lineCounter
print "# 0 [0,10)",sizeDistributionDict[0]
print "# 1 [10,50)",sizeDistributionDict[1]
print "# 2 [50,100)",sizeDistributionDict[2]
print "# 3 [100,200)",sizeDistributionDict[3]
print "# 4 [200,500)",sizeDistributionDict[4]
print "# 5 [500,MAX BIG)",sizeDistributionDict[5]
inputFileHandler.close()
exit(1)
'''


# step2:
termDict = {}
# inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithLengthOfListANDClassLabelANDNumOfPostingsInEachPieces20130819_25714_DEBUG_For_1_doc_file"
inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithLengthOfListANDClassLabelANDNumOfPostingsInEachPieces20130819_25714_DEBUG_For_Head2CompressedFileImportantDocsFromGov2"
inputFileHandler = open(inputFileName1,"r")
for line in inputFileHandler.readlines():
    currentTerm = line.strip().split(" ")[0]
    if currentTerm not in termDict:
        termDict[currentTerm] = 1
inputFileHandler.close()
print "len(termDict):",len(termDict)

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/TermsWithTermIDForHead2CompressedFileImportantDocsFromGov2"
outputFileHandler = open(outputFileName,"w")

inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermID"
inputFileHandler = open(inputFileName2,"r")
currentLine = inputFileHandler.readline()
while currentLine:
    currentTerm = currentLine.strip().split(" ")[1]
    if currentTerm in termDict:
        outputFileHandler.write(currentLine)
    else:
        pass
    currentLine = inputFileHandler.readline()
inputFileHandler.close()
outputFileHandler.close()
print "inputFileName1:",inputFileName1
print "inputFileName2:",inputFileName2
print "outputFileName:",outputFileName
exit(1)


'''
# step1:
termDict = {}
# Each line, the file format should be: "traverse the team: 0"
inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/TermsWithTermIDForHead2CompressedFilImportantDocsFromGov2"
inputFileHandler = open(inputFileName1,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(":")
    currentTerm = lineElements[1].strip()
    if currentTerm not in termDict:
        termDict[currentTerm] = 1
    else:
        print "critical error."
print "len(termDict):",len(termDict)
# print "termDict['00002']:",termDict['00002']
inputFileHandler.close()

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithLengthOfListANDClassLabelANDNumOfPostingsInEachPieces20130819_25714_DEBUG_For_Head2CompressedFileImportantDocsFromGov2"
outputFileHandler = open(outputFileName,"w")

inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithLengthOfListANDClassLabelANDNumOfPostingsInEachPieces20130819"
inputFileHandler = open(inputFileName2,"r")
currentLine = inputFileHandler.readline()
while currentLine:
    currentTerm = currentLine.strip().split(" ")[0]
    if currentTerm not in termDict:
        pass
    else:
        outputFileHandler.write(currentLine)
    currentLine = inputFileHandler.readline()

inputFileHandler.close()
outputFileHandler.close()
print "inputFileName1:",inputFileName1
print "inputFileName2:",inputFileName2
print "outputFileName:",outputFileName
exit(1)
'''


'''
# code logic currently under construction 2013/09/30 afternoon
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2DocumentWithTheirNumOfPostingsRecordedAndTheDistinctSetOfTerms_20130926Night_xaa"
inputFileHandler = open(inputFileName,"r")
inputFileHandler.close()
'''

'''
randomlySelectedIDDict = {}
totalNumOfPostingsSelected = 1000 # 1K
totalNumOfSamples = 100000 # 100K
# assume that the selectedID starts from 0 to 25205179 in the total of 25,205,179 (25M)
while len(randomlySelectedIDDict) != totalNumOfPostingsSelected:
    # Return a random integer N such that a <= N <= b
    selectedID = random.randint(0, totalNumOfSamples-1)
    if selectedID not in randomlySelectedIDDict:
        randomlySelectedIDDict[selectedID] = 1

print "randomlySelectedIDDict:",randomlySelectedIDDict
print "len(randomlySelectedIDDict):",len(randomlySelectedIDDict)

inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/randomlySelectedWithLocalPostingIDsWithStaticANDDynamicPart_20130928Afternoon"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/randomlySelectedWithLocalPostingIDsWithStaticANDDynamicPart_20130928Afternoon_random_1000.csv"
outputFileHandler = open(outputFileName,"w")

for index,line in enumerate( inputFileHandler.readlines() ):
    if index in randomlySelectedIDDict:
        outputFileHandler.write(line)
    else:
        pass

print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
inputFileHandler.close()
outputFileHandler.close()
exit(1)        
'''



'''
inputFileNameList = []
inputFileName_a = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/xaa"
inputFileName_b = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/xab"
inputFileName_c = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/xac"
inputFileName_d = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/xad"
inputFileName_e = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/xae"
inputFileName_f = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/xaf"
inputFileName_g = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/xag"
inputFileName_h = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/xah"
inputFileName_i = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/xai"
inputFileName_j = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/xaj"

inputFileNameList.append(inputFileName_a)
inputFileNameList.append(inputFileName_b)
inputFileNameList.append(inputFileName_c)
inputFileNameList.append(inputFileName_d)
inputFileNameList.append(inputFileName_e)
inputFileNameList.append(inputFileName_f)
inputFileNameList.append(inputFileName_g)
inputFileNameList.append(inputFileName_h)
inputFileNameList.append(inputFileName_i)
inputFileNameList.append(inputFileName_j)

firstPart = "/data/jhe/trecdata/"
lastPart = ".gz"
for currentInputFileName in inputFileNameList:
    outputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/" + "gov2_files_" + currentInputFileName[-3:] + "_with_command_19_added"
    outputFileHandler = open(outputFileName,"w")
    outputFileHandler.write("19" + "\n")
    # key: compressFilePath
    # value: (NO USE currently)
    compressFilePathDict = {}
    compressFilePathList = []
    currentInputFileHanlder = open(currentInputFileName,"r")
    for line in currentInputFileHanlder.readlines():
        currentLineElements = line.strip().split("-")
        currentCompressedFileCompletedPath = firstPart + currentLineElements[0] + "/" + currentLineElements[1] + lastPart
        if currentCompressedFileCompletedPath not in compressFilePathDict:
            compressFilePathDict[currentCompressedFileCompletedPath] = 1
        else:
            pass
    currentInputFileHanlder.close()
    print "len(compressFilePathDict):",len(compressFilePathDict)
    compressFilePathList = compressFilePathDict.keys()
    compressFilePathList.sort(cmp=None, key=None, reverse=False)
    
    for currentCompressedFileCompletedPath in compressFilePathList:
         outputFileHandler.write(currentCompressedFileCompletedPath + "\n")
    print "outputFileName:",outputFileName,"DONE"
    outputFileHandler.close()
exit(1)
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2DocumentWithTheirNumOfPostingsRecordedAndTheDistinctSetOfTermsONLY_For_DEBUG_20130926Night"
inputFileHandler = open(inputFileName,"r")

# skip the headline
currentLine = inputFileHandler.readline()

# the first data line
currentLine = inputFileHandler.readline()
currentLineElements = currentLine.strip().split(" ")
print len(currentLineElements)
inputFileHandler.close()
exit(1)
'''


# Updated by Wei on 2013/09/26 afternoon at school
# data analysis about the correctness of the output file
# analyze the following files:(after the break)
    # /data1/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/gov2DocumentWithTheirConnectedEdges_for_DEBUG_20130926Night
    # /data1/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/gov2DocumentWithTheirForwardIndex_for_DEBUG_20130926Night
    # /data1/obukai/the_new_trip_of_feature_generation/gov2DocumentWithTheirCompletedPostingSet_for_DEBUG_20130926Night
    # /data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/gov2DocumentWithTheirXdocValues_for_DEBUG_20130926Night (No Used)

'''
inputFileName = "/data1/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/gov2DocumentWithTheirForwardIndex_for_DEBUG_20130926Night"
inputFileHandler = open(inputFileName,"r")
# skip the headline
currentLine = inputFileHandler.readline()
# the first data line
currentLine = inputFileHandler.readline()
currentLineElements = currentLine.strip().split(" ")

currentTrecID = currentLineElements[0]
currentDocSizeInWords = int( currentLineElements[1] )
currentNumOfPostingsRecorded = int( currentLineElements[2] )
print "currentNumOfPostingsRecorded: ",currentNumOfPostingsRecorded

if currentNumOfPostingsRecorded == len(currentLineElements[3:]):
    print "Pass"
else:
    print "Problem"
    exit(1)


inputFileHandler.close()
exit(1)
'''

'''
tempCounter = 0
inputFileName = "/data1/obukai/the_new_trip_of_feature_generation/gov2DocumentWithTheirCompletedPostingSet_for_DEBUG_20130926Night"
inputFileHandler = open(inputFileName,"r")
# skip the headline
currentLine = inputFileHandler.readline()
# the first data line
currentLine = inputFileHandler.readline()
currentLineElements = currentLine.strip().split(" ")
currentTrecID = currentLineElements[0]
currentDocSizeInWords = int( currentLineElements[1] )
currentDocSizeInWords2 = int( currentLineElements[2] )
if currentDocSizeInWords2 == len(currentLineElements[3:]):
    for tupleInStringFormat in currentLineElements[3:]:
        term = tupleInStringFormat.strip().split(",")[0].split("(")[1]
        print "term:",term
        if term == "the":
            tempCounter += 1
else:
    print "Problem"
    exit(1)

print "tempCounter:",tempCounter
inputFileHandler.close()
exit(1)
'''

'''
# Updated by Wei on 2013/09/24 morning at school
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/docID_num_of_postings_recorded_in_index_MappingTableForGov2Dataset"
inputFileHandler = open(inputFileName,"r")

currentNumOfLines = 0
currentNumOfLines += 1
currentLine = inputFileHandler.readline()

totalNumOfPostingsRecorded = 0
maxNumOfPostingsRecordedForIndividualDocument = 0
minNumOfPostingsRecordedForIndividualDocument = 99999
averageNumOfPostingsRecordedForIndividualDocument = 0
currentNumOfPostingsRecorded = 0

while currentLine:
    currentNumOfPostingsRecorded = int( currentLine.strip().split(" ")[1] ) 
    totalNumOfPostingsRecorded += currentNumOfPostingsRecorded
    if currentNumOfPostingsRecorded > maxNumOfPostingsRecordedForIndividualDocument:
        maxNumOfPostingsRecordedForIndividualDocument = currentNumOfPostingsRecorded
    
    if currentNumOfPostingsRecorded < minNumOfPostingsRecordedForIndividualDocument:
        minNumOfPostingsRecordedForIndividualDocument = currentNumOfPostingsRecorded 

    currentNumOfLines += 1    
    currentLine = inputFileHandler.readline()
    
print "totalNumOfPostingsRecorded:",totalNumOfPostingsRecorded
print "maxNumOfPostingsRecordedForIndividualDocument:",maxNumOfPostingsRecordedForIndividualDocument
print "minNumOfPostingsRecordedForIndividualDocument:",minNumOfPostingsRecordedForIndividualDocument
print "averageNumOfPostingsRecordedForIndividualDocument:",totalNumOfPostingsRecorded / currentNumOfLines
print "current Num Of Lines:",currentNumOfLines
inputFileHandler.close()
exit(1);
'''


'''
# key: doc
# value: no use
docDict = {}
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/tail5KResultsWithRelatedPieceNum_sortedByQID"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    currentTerm = lineElements[2]
    currentDoc = lineElements[3]
    if currentTerm == "so":
        if currentDoc not in docDict:
            docDict[currentDoc] = 1
print "len(docDict):",len(docDict)
inputFileHandler.close()
exit(1)
'''

'''
allTermsDict = {}
inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermsONLY.txt"
inputFileHandler = open(inputFileName1,"r")
for line in inputFileHandler.readlines():
    currentTerm = line.strip()
    if currentTerm not in allTermsDict:
        allTermsDict[currentTerm] = 1
    else:
        exit(1)
print "len(allTermsDict):",len(allTermsDict)
inputFileHandler.close()

head95KTermsDict = {}
inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermsONLY_from_head_95K_queries_ALL"
inputFileHandler = open(inputFileName2,"r")
for line in inputFileHandler.readlines():
    currentTerm = line.strip()
    if currentTerm not in head95KTermsDict:
        head95KTermsDict[currentTerm] = 1
    else:
        exit(1)
print "len(head95KTermsDict):",len(head95KTermsDict)
inputFileHandler.close()

tail5KTermsDict = {}
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermsONLY_from_tail_5K_queries_ALL"
outputFileHanlder = open(outputFileName,"w")
for term in allTermsDict:
    if term in head95KTermsDict:
        pass
    else:
        tail5KTermsDict[term] = 1
        
termList = []
termList = tail5KTermsDict.keys()
termList.sort(cmp=None, key=None, reverse=False)
for term in termList:
    outputFileHanlder.write(term + "\n")

outputFileHanlder.close()

print "len(allTermsDict): ",len(allTermsDict)
print "len(head95KTermsDict): ",len(head95KTermsDict)
print "len(tail5KTermsDict): ",len(tail5KTermsDict)

print "inputFileName1:",inputFileName1
print "inputFileName2:",inputFileName2
print "outputFileName:",outputFileName
exit(1)
'''

'''
print "Let's verify some info first"
# key: docID
# value: term
docIDDict = {}
postingDict = {}
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/tail5KResultsWithRelatedPieceNum_sortedByQID"
inputFileHanlder = open(inputFileName,"r")
for line in inputFileHanlder.readlines():
    lineElements = line.strip().split(" ")
    currentDocID = lineElements[1]
    currentTerm = lineElements[2]
    currentPostingKey = currentTerm + "_" + currentDocID
    
    if currentDocID not in docIDDict:
        docIDDict[currentDocID] = 1
    else:
        docIDDict[currentDocID] += 1
    
    if currentPostingKey not in postingDict:
        postingDict[currentPostingKey] = 1
    else:
        postingDict[currentPostingKey] += 1

print "# of unique docIDs: ",len(docIDDict)
print "# of document results: ","46772"
print "# of unique postings: ",len(postingDict)
inputFileHanlder.close()
exit(1)
'''



'''
termWithTermIDDict = {}

inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermID"
inputFileHandler = open(inputFileName1,"r")
currentLine = inputFileHandler.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentTermIDInStringFormat = currentLineElements[0]
    currentTermInStringFormat = currentLineElements[1]
    if currentTermInStringFormat not in termWithTermIDDict:
        termWithTermIDDict[currentTermInStringFormat] = currentTermIDInStringFormat
    else:
        print "Duplicate terms"
        exit(1)
    currentLine = inputFileHandler.readline()
inputFileHandler.close()
print "len(termWithTermIDDict):",len(termWithTermIDDict)
print "termWithTermIDDict['000']:",termWithTermIDDict['000']

inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/TermsBothAppearIn100KANDLexicionWithLengthOfListANDClassLabelANDNumOfPostingsInEachPieces20130819"
inputFileHandler = open(inputFileName2,"r")
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/TermsBothAppearIn100KANDLexicionWithTermID"
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    currentTerm = lineElements[0]
    outputFileHandler.write( termWithTermIDDict[currentTerm] + " " + currentTerm + "\n")

inputFileHandler.close()
outputFileHandler.close()

print "inputFileName1: ",inputFileName1
print "inputFileName2: ",inputFileName2
print "outputFileName: ",outputFileName
exit(1)
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithLengthOfListANDClassLabelANDNumOfPostingsInEachPieces20130819"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermID"
outputFileHandler = open(outputFileName,"w")

currentLine = inputFileHandler.readline()
lineIndex = 0
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentTerm = currentLineElements[0]
    outputFileHandler.write(str(lineIndex) + " " + currentTerm + "\n")
    
    lineIndex += 1
    currentLine = inputFileHandler.readline()
    
inputFileHandler.close()
outputFileHandler.close()
exit(1)
'''

'''
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/trecID_docID_num_of_postings_recorded_in_index_MappingTableForGov2Dataset"
inputFileHandler = open(inputFileName,"r")
currentLine = inputFileHandler.readline()
totalNumOfPostings = 0
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentNumOfPostings = int( currentLineElements[1] )
    totalNumOfPostings += currentNumOfPostings
    currentLine = inputFileHandler.readline()
inputFileHandler.close()
print "totalNumOfPostings:",totalNumOfPostings
exit(1)
'''

'''
termDict = {}

inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/tempFile"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    term = line.strip()
    if term not in termDict:
        termDict[term] = 1
    else:
        print "critical error"
        exit(1)
inputFileHandler.close()
print "len(termDict):",len(termDict)

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithLengthOfListANDClassLabelANDNumOfPostingsInEachPieces20130819"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/TermsBothAppearIn100KANDLexicionWithLengthOfListANDClassLabelANDNumOfPostingsInEachPieces20130819"
outputFileHanlder = open(outputFileName,"w")

currentLine = inputFileHandler.readline()
while currentLine:
    currentTerm = currentLine.strip().split(" ")[0]
    if currentTerm in termDict:
        outputFileHanlder.write(currentLine)
    else:
        pass
    currentLine = inputFileHandler.readline()

inputFileHandler.close()
outputFileHanlder.close()
exit(1)
'''

'''
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/tempSetOfTermsForDebuggingMultipleExternalIndexAccess"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    print len(line.strip().split(" "))
inputFileHandler.close()
exit(1)
'''

'''
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/forwardIndexExperimentalRepresentation20130909_OLD"
inputFileHandler = open(inputFileName,"r")

outputFileName2 = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/tempSetOfTermsForDebuggingMultipleExternalIndexAccess"
outputFileHanlder2 = open(outputFileName2,"w")

totalNumOfPostingsCounted = 0
currentOutputLine = ""
currentDocTermProbabilityDict = {}

currentLine = inputFileHandler.readline()
while currentLine:
    
    currentLineElements = currentLine.strip().split(" ")
    currentDocTrecID = currentLineElements[0]
    
    print "currentDocTrecID:",currentDocTrecID
    print "# of words in doc:",int( len(currentLineElements[2:])/3 )
    # Updated by Wei 2013/09/09 night
    # This document identifier can be as the trecID external, or the docID internal, OR simply just the docIndex
    currentDocIdentifier = currentDocTrecID
    numOfPostingsRecordedForCurrentDocument = int(currentLineElements[1])
    
    
    
    ##############################################
    
    
    if numOfPostingsRecordedForCurrentDocument != len(currentLineElements[2:])/3:
        print "critical format problem in the input file"
        exit(1)
    else:
        # OK for passing
        pass
    
    baseIndex = 2
    step = 3
    for i in range(0,numOfPostingsRecordedForCurrentDocument):
        currentTerm = currentLineElements[baseIndex + step * i]
        currentTermScore = float( currentLineElements[baseIndex + step * i + 1] )
        currentTermFakePart1Probability = -1.0
        if currentTerm not in currentDocTermProbabilityDict:
            currentTermFakePart1Probability = random.random()
            currentDocTermProbabilityDict[currentTerm] = currentTermFakePart1Probability
        else:
            currentTermFakePart1Probability = currentDocTermProbabilityDict[currentTerm]
        
        
        totalNumOfPostingsCounted += 1
    
    for term in currentDocTermProbabilityDict:
        outputFileHanlder2.write(term + "\n")
    
    print "# of postings in doc:",len(currentDocTermProbabilityDict)
    print
    
    currentLine = inputFileHandler.readline()
    
    # for DEBUG only
    break

print "Overall Processing Stats:"
print "totalNumOfPostingsCounted:",totalNumOfPostingsCounted
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
inputFileHandler.close()
outputFileHanlder2.close()
exit(1)
'''



'''
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/forwardIndexExperimentalRepresentation20130909_OLD"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/forwardIndexExperimentalRepresentation20130909_NEW"
outputFileHanlder = open(outputFileName,"w")

totalNumOfPostingsCounted = 0
currentOutputLine = ""
currentDocTermProbabilityDict = {}

currentLine = inputFileHandler.readline()
while currentLine:
    currentOutputLine = ""
    currentLineElements = currentLine.strip().split(" ")
    currentDocTrecID = currentLineElements[0]
    
    print "currentDocTrecID:",currentDocTrecID
    print "# of words in doc:",int( len(currentLineElements[2:])/3 )
    # Updated by Wei 2013/09/09 night
    # This document identifier can be as the trecID external, or the docID internal, OR simply just the docIndex
    currentDocIdentifier = currentDocTrecID
    numOfPostingsRecordedForCurrentDocument = int(currentLineElements[1])
    
    # column0: currentDocTrecID (external)
    # column1: currentDocDicID (internal)
    # column2: numOfPostingsRecordedForCurrentDocument
    currentOutputLine += currentDocTrecID + " " + "N/A" + " " + str(numOfPostingsRecordedForCurrentDocument) + " "
    
    ##############################################
    
    
    if numOfPostingsRecordedForCurrentDocument != len(currentLineElements[2:])/3:
        print "critical format problem in the input file"
        exit(1)
    else:
        # OK for passing
        pass
    
    baseIndex = 2
    step = 3
    for i in range(0,numOfPostingsRecordedForCurrentDocument):
        currentTerm = currentLineElements[baseIndex + step * i]
        currentTermScore = float( currentLineElements[baseIndex + step * i + 1] )
        currentTermFakePart1Probability = -1.0
        if currentTerm not in currentDocTermProbabilityDict:
            currentTermFakePart1Probability = random.random()
            currentDocTermProbabilityDict[currentTerm] = currentTermFakePart1Probability
        else:
            currentTermFakePart1Probability = currentDocTermProbabilityDict[currentTerm]
        
        currentOutputLine += currentTerm + " " + str(currentTermScore) + " " + str(currentTermFakePart1Probability) + " "
        totalNumOfPostingsCounted += 1
    
    currentOutputLine = currentOutputLine.strip() + "\n"
    outputFileHanlder.write(currentOutputLine)
    
    print "# of postings in doc:",len(currentDocTermProbabilityDict)
    print
        
    currentLine = inputFileHandler.readline()

print "Overall Processing Stats:"
print "totalNumOfPostingsCounted:",totalNumOfPostingsCounted
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
inputFileHandler.close()
outputFileHanlder.close()
exit(1)
'''

'''
print "sys.float_info.max:",sys.float_info.max
print "sys.float_info.min:",sys.float_info.min
exit(1)

d = {
     "int": 0,
     "float": 0.0,
     "dict": dict(),
     "set": set(),
     "tuple": tuple(),
     "list": list(),
     "str": "a"
}

for k, v in sorted(d.iteritems()):
    print k, sys.getsizeof(v)

exit(1)
'''


'''
for i in range(0,10):
    print random.random()
exit(1)
'''

'''
for i in range(0,10):
    print i
exit(1)
'''

'''
# logic of finding the largest element in the list
queryTermsIn100KQueriesDict = {}
inputFileName0 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/100KQueryTermsWithTermFreqInCollection.txt"
inputFileHandler = open(inputFileName0,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    currentTerm = lineElements[0]
    # ignore lineElements[1]
    if currentTerm not in queryTermsIn100KQueriesDict:
        queryTermsIn100KQueriesDict[currentTerm] = 0
print "len(queryTermsIn100KQueriesDict):",len(queryTermsIn100KQueriesDict)
inputFileHandler.close()


inputFileName1 = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/randomlySampledPostingIDList20130828_with_set_of_probability_added_DEBUG"
inputFileHandler = open(inputFileName1,"r")

currentMaxScore = 0.0
currentMaxScoreLine = ""

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    currentTerm = lineElements[0]
    if currentTerm in queryTermsIn100KQueriesDict:
        currentLineScore = float(lineElements[3])
        if currentLineScore > currentMaxScore:
            currentMaxScore = currentLineScore
            currentMaxScoreLine = line
    else:
        pass

print "currentMaxScoreLine:",currentMaxScoreLine.strip()
inputFileHandler.close()
exit(1)
'''

'''
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/randomlySampledPostingIDList20130828_with_local_index_for_each_term_added"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/randomlySampledPostingIDList20130828_with_local_index_for_each_term_added_for_DEBUG_using_term_gov"
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    currentTerm = lineElements[0]
    if currentTerm != "gov":
        pass
    else:
        outputFileHandler.write(line)

print "inputFileName:",inputFileName
print "outputFileName:",outputFileName

inputFileHandler.close()
outputFileHandler.close()
exit(1)
'''

'''
x = np.random.random(10)
y = np.random.random(10)

slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
print "slope:",slope
print "intercept:",intercept
print "r-squared:", r_value**2
print "p_value:",p_value
print "std_err:",std_err
exit(1)
'''


'''
# This program is currently under construction since 2013/08/29 morning by Wei
# There are still some bugs in that, the reason I dump it is because I want to do it manually. 
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/partialBM25ScoreBucketingPrelimanryResultsV2_RAW.csv"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/partialBM25ScoreBucketingPrelimanryResultsV2_Combined.csv"
outputFileHandler = open(outputFileName,"w")

currentAccumulatedDenominator = 0
currentAccumulatedNumerator = 0

combineClassLowerBoundValue = 0
combineClassUpperBoundValue = 0

combineClassLowerBoundLabel = 0
combineClassUpperBoundLabel = 0

newCombineClassFlag = True
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    currentClassLabel = int(lineElements[0])
    if newCombineClassFlag:
        combineClassLowerBoundLabel = currentClassLabel
        newCombineClassFlag = False
    classLowerBound = float(lineElements[1])
    currentDenominator = int(lineElements[2])
    currentNumerator = int(lineElements[2])
    currentAccumulatedDenominator += currentDenominator
    currentAccumulatedNumerator += currentNumerator
    if currentAccumulatedNumerator > 200:
        # it is time to combine those lines
        newCombineClassFlag =True
        combineClassUpperBoundLabel = currentClassLabel
        outputLine =  str(combineClassLowerBoundLabel) + "_" + str(combineClassUpperBoundLabel) + " " 
        outputLine += str(combineClassLowerBoundValue) + "_" + str(combineClassUpperBoundValue) + " "
        outputLine += str(currentAccumulatedDenominator) + " "
        outputLine += str(currentAccumulatedNumerator) + " "
        outputLine += "\n"
        outputFileHandler.write(outputLine)
        
        
inputFileHandler.close()
outputFileHandler.close()
'''


'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/ALLRelatedPostingsBucketingIntoClassesFromHead95KQueries"
inputFileHanlder = open(inputFileName,"r")
currentAccumulatedFreq = 0
for line in inputFileHanlder.readlines():
    lineElements = line.strip().split(" ")
    currentAccumulatedFreq += int( lineElements[1] )
print "currentAccumulatedFreq:",currentAccumulatedFreq
inputFileHanlder.close()
exit(1)
'''

'''
print "Program Begins..."

inputFileName = "/data3/obukai/workspace/web-search-engine-wei/scripts/src/pythonScripts/gov2/debug"
inputFileHandler = open(inputFileName,"r")

totalNumOfPostingsCounted = 0

currentTerm = ""
currentRealFreqOfTermInHead95KQueries = 0
currentTermLengthOfList = 0
tripleCounter = 0

line = inputFileHandler.readline()

while line:
    if line.strip().startswith("totalNumOfPostingsCounted:"):
        break
    
    if line.strip().startswith("------>term:"):
        lineElements = line.strip().split(" ")
        currentTerm = lineElements[-1] 
        line = inputFileHandler.readline()
    
    if line.strip().startswith("------>real freq of term in head 95K queries:"):
        lineElements = line.strip().split(" ")
        currentRealFreqOfTermInHead95KQueries = int(lineElements[-1])
        line = inputFileHandler.readline()
    
    if line.strip().startswith("------>currentTermInvertedIndexCorrectLength:"):
        lineElements = line.strip().split(" ")
        currentTermLengthOfList = int(lineElements[1])
    
    if currentTerm.strip() != "":
        if currentRealFreqOfTermInHead95KQueries != 0:
            if currentTermLengthOfList != 0:
                totalNumOfPostingsCounted += currentRealFreqOfTermInHead95KQueries * currentTermLengthOfList
                tripleCounter += 1
                print "Processing term:",currentTerm
                print "currentRealFreqOfTermInHead95KQueries:",currentRealFreqOfTermInHead95KQueries
                print "currentTermLengthOfList:",currentTermLengthOfList
                print
            else:
                line = inputFileHandler.readline()
                if line.strip().endswith("is NOT in the lexicon."):
                    pass
                else:
                    print "DEBUG mode:"
                    print "currentTerm:",currentTerm
                    print "currentRealFreqOfTermInHead95KQueries:",currentRealFreqOfTermInHead95KQueries
                    print "currentTermLengthOfList:",currentTermLengthOfList
                    exit(1)                    
        else:
            print "DEBUG mode:"
            print "currentTerm:",currentTerm
            print "currentRealFreqOfTermInHead95KQueries:",currentRealFreqOfTermInHead95KQueries
            print "currentTermLengthOfList:",currentTermLengthOfList
            exit(1)
    else:
        pass
        #print "currentTerm:",currentTerm
        #print "currentRealFreqOfTermInHead95KQueries:",currentRealFreqOfTermInHead95KQueries
        #print "currentTermLengthOfList:",currentTermLengthOfList
        #print "some problems happened :)"
        #exit(1)
    
    line = inputFileHandler.readline()
    
print "totalNumOfPostingsCounted:",totalNumOfPostingsCounted
print "tripleCounter:",tripleCounter
print "Program Ends."
exit(1)
'''

'''
# This part of logic has been copied to the program called: 
# /data3/obukai/workspace/web-search-engine-wei/scripts/src/pythonScripts/gov2/stepX_gov2_partialBM25_hit_list_distribution_analysis.py
print "Program Begins..."

# key: class label in int format
# value: currentLowerBound in float format
classLabelsWithTheirLowerBoundDict = {}

# key: class label in int format
# value: # of ALL related postings belonging to this class
classLabelsWithNumOfALLRelatedPostingsDict = {}

# key: class label in int format
# value: # of TOP10 related postings belonging to this class
classLabelsWithNumOfTOP10RelatedPostingsDict = {}

sMin = 0.001
sMax = 19.746
stepFactor = 1.2

currentLowerBound = sMin
classLabel = 0
while currentLowerBound <= 25:
    # print classLabel,currentLowerBound
    if classLabel not in classLabelsWithTheirLowerBoundDict:
        classLabelsWithTheirLowerBoundDict[classLabel] = currentLowerBound
        classLabelsWithNumOfALLRelatedPostingsDict[classLabel] = 0
        classLabelsWithNumOfTOP10RelatedPostingsDict[classLabel] = 0
    currentLowerBound = currentLowerBound * 1.2
    classLabel += 1

print "len(classLabelsWithTheirLowerBoundDict):",len(classLabelsWithTheirLowerBoundDict)

inputFileName0 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/ALLRelatedPostingsBucketingIntoClassesFromHead95KQueries"
inputFileHandler = open(inputFileName0,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    currentOriginalRoundingClassInFloatFormat = float(lineElements[0])
    numOfPostingsInCurrentRoundingClass = int( lineElements[1] )
    
    if numOfPostingsInCurrentRoundingClass != 0:
        beenCountedFlag = False
        for classLabel in classLabelsWithTheirLowerBoundDict:
            # for DEBUG
            # print "currentOriginalRoundingClassInFloatFormat:",currentOriginalRoundingClassInFloatFormat
            # print "classLabelsWithTheirLowerBoundDict[classLabel]:",classLabelsWithTheirLowerBoundDict[classLabel]
            if classLabel == 0:
                pass
                # for DEBUG
                # print "classLabelsWithTheirLowerBoundDict[classLabel-1]:",classLabelsWithTheirLowerBoundDict[classLabel-1]
            elif classLabel != 0:
                if currentOriginalRoundingClassInFloatFormat < classLabelsWithTheirLowerBoundDict[classLabel] and currentOriginalRoundingClassInFloatFormat >= classLabelsWithTheirLowerBoundDict[classLabel-1]:
                    classLabelsWithNumOfALLRelatedPostingsDict[classLabel-1] += numOfPostingsInCurrentRoundingClass
                    beenCountedFlag = True
                else:
                    pass
            else:
                pass # just don't care
        if beenCountedFlag:
            pass
        else:
            print "currentOriginalRoundingClassInFloatFormat:",currentOriginalRoundingClassInFloatFormat
            print "numOfPostingsInCurrentRoundingClass:",numOfPostingsInCurrentRoundingClass
            exit(1)
inputFileHandler.close()

inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/TOP10RelatedPostingsBucketingIntoClassesFromHead95KQueries"
inputFileHandler = open(inputFileName1,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    currentOriginalRoundingClassInFloatFormat = float(lineElements[0])
    numOfPostingsInCurrentRoundingClass = int( lineElements[1] )
    if numOfPostingsInCurrentRoundingClass != 0:
        for classLabel in classLabelsWithTheirLowerBoundDict:
            # for DEBUG
            # print "currentOriginalRoundingClassInFloatFormat:",currentOriginalRoundingClassInFloatFormat
            # print "classLabelsWithTheirLowerBoundDict[classLabel]:",classLabelsWithTheirLowerBoundDict[classLabel]
            if classLabel != 0:
                pass
                # for DEBUG
                # print "classLabelsWithTheirLowerBoundDict[classLabel-1]:",classLabelsWithTheirLowerBoundDict[classLabel-1]
            
            if classLabel != 0:
                if currentOriginalRoundingClassInFloatFormat < classLabelsWithTheirLowerBoundDict[classLabel] and currentOriginalRoundingClassInFloatFormat >= classLabelsWithTheirLowerBoundDict[classLabel-1]:
                    classLabelsWithNumOfTOP10RelatedPostingsDict[classLabel-1] += numOfPostingsInCurrentRoundingClass
                else:
                    pass
            else:
                pass # just don't care
inputFileHandler.close()

print "len(classLabelsWithTheirLowerBoundDict):",len(classLabelsWithTheirLowerBoundDict)
print "len(classLabelsWithNumOfALLRelatedPostingsDict):",len(classLabelsWithNumOfALLRelatedPostingsDict)
print "len(classLabelsWithNumOfTOP10RelatedPostingsDict):",len(classLabelsWithNumOfTOP10RelatedPostingsDict)
assert len(classLabelsWithTheirLowerBoundDict) == len(classLabelsWithNumOfALLRelatedPostingsDict)

for i in range( 0,len(classLabelsWithTheirLowerBoundDict) ):
    if classLabelsWithNumOfALLRelatedPostingsDict[i] != 0:
        print i,classLabelsWithTheirLowerBoundDict[i],classLabelsWithNumOfALLRelatedPostingsDict[i],classLabelsWithNumOfTOP10RelatedPostingsDict[i],classLabelsWithNumOfTOP10RelatedPostingsDict[i]/classLabelsWithNumOfALLRelatedPostingsDict[i]
    else:
        print i,classLabelsWithTheirLowerBoundDict[i],classLabelsWithNumOfALLRelatedPostingsDict[i],classLabelsWithNumOfTOP10RelatedPostingsDict[i],0

# check
# total Num Of ALL Related Postings Counted: 5054184114
# total Num Of TOP10 Related Postings Counted: 3632263
tempSumValue1 = 0
tempSumValue2 = 0
for i in range( 0,len(classLabelsWithTheirLowerBoundDict) ):
    tempSumValue1 += classLabelsWithNumOfALLRelatedPostingsDict[i]
    tempSumValue2 += classLabelsWithNumOfTOP10RelatedPostingsDict[i]
print "ALLRelatedPostings:",tempSumValue1
print "TOP10RelatedPostings:",tempSumValue2
print "Probability for a random posting to be in the TOP10:",tempSumValue2/tempSumValue1
print "sMin:",sMin
print "sMax:",sMax
print "stepFactor:",stepFactor
assert tempSumValue1 == 5054184114
assert tempSumValue2 == 3632263
print "Program Ends."
exit(1)
'''


'''
print "Program Begins..."
# 2 Billion so far, still need to wait for the program to run. The program itself is called:
# /data3/obukai/workspace/web-search-engine-wei/scripts/src/pythonScripts/gov2/govX_gov2_partialBM25_hit_list_distribution_analysis.py
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/scripts/src/pythonScripts/gov2/debug"
inputFileHandler = open(inputFileName,"r")

totalNumOfPostings = 0
for index,line in enumerate( inputFileHandler.readlines() ):
    if line.strip().startswith("------>currentTermInvertedIndexCorrectLength:"):
        lineElements = line.strip().split(" ")
        currentNumOfPostings = int(lineElements[1])
        totalNumOfPostings += currentNumOfPostings
print "totalNumOfPostings:",totalNumOfPostings
print "Program Ends."
exit(1)
'''

'''
print "Program Begins..."
# This task is very simple, answer prof's question, get that answer done and direct reply. Very simple.
# This logic is going to answer the question4 by prof
TOTAL_NUM_OF_TERM_OCCURRENCES_IN_100K_QUERIES = 413533

inputFileName0 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsIn_100KQueries_sorted_by_real_freq_decreasing_order"
inputFileHandler = open(inputFileName0,"r")
# tempCounter = 0

# init the variable: TOPKNumbersOutputingList 
TOPKNumbersOutputingList = []

# fill the variable: TOPKNumbersOutputingList
currentTOPKValue = 5
step = 2
for i in range(0,14):
    TOPKNumbersOutputingList.append(currentTOPKValue)
    currentTOPKValue = currentTOPKValue * step
# TOPKNumbersOutputingList.append(1000)
# check the variable: TOPKNumbersOutputingList
print "TOPKNumbersOutputingList:",TOPKNumbersOutputingList
accumulatedTermRealFreqIn100KQueries = 0

for index,line in enumerate( inputFileHandler.readlines() ):
    lineElements = line.strip().split(" ")
    currentTerm = lineElements[0]
    currentTermRealFreqIn100KQueries = int(lineElements[1])
    accumulatedTermRealFreqIn100KQueries += currentTermRealFreqIn100KQueries
    
    if index+1 in TOPKNumbersOutputingList:
        print index+1,accumulatedTermRealFreqIn100KQueries,accumulatedTermRealFreqIn100KQueries / TOTAL_NUM_OF_TERM_OCCURRENCES_IN_100K_QUERIES

print index+1,accumulatedTermRealFreqIn100KQueries,accumulatedTermRealFreqIn100KQueries / TOTAL_NUM_OF_TERM_OCCURRENCES_IN_100K_QUERIES
inputFileHandler.close()
print "Program Ends."
exit(1)
'''

'''
print "Program Begins..."
TOTAL_NUM_OF_TERM_OCCURRENCES_IN_100K_QUERIES = 413533

# This task is very simple, answer prof's question, get that answer done and direct reply. Very simple.
# The file TOP1000 lines: 
# /data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection_TermFreqInQLAdded_ProbabilityInQLAdded_DesidingFactorAdded_sortedByListLength_tail_1000.txt


# init the variable: TOPKNumbersOutputingList 
TOPKNumbersOutputingList = []

# fill the variable: TOPKNumbersOutputingList
currentTOPKValue = 5
step = 2
for i in range(0,24):
    TOPKNumbersOutputingList.append(currentTOPKValue)
    currentTOPKValue = currentTOPKValue * step
# TOPKNumbersOutputingList.append(1000)
# check the variable: TOPKNumbersOutputingList
print "TOPKNumbersOutputingList:",TOPKNumbersOutputingList

queryTermsWithRealFreqIn100KQueriesDict = {}
# num of term occurrences in this 100K queries: 413533
inputFileName0 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsIn_100KQueries_sorted_by_real_freq_decreasing_order"
inputFileHandler = open(inputFileName0,"r")
# tempCounter = 0
for index,line in enumerate( inputFileHandler.readlines() ):
    lineElements = line.strip().split(" ")
    currentTerm = lineElements[0]
    currentTermRealFreqIn100KQueries = int(lineElements[1])
    if currentTerm not in queryTermsWithRealFreqIn100KQueriesDict:
        queryTermsWithRealFreqIn100KQueriesDict[currentTerm] = currentTermRealFreqIn100KQueries
    else:
        print "duplicated terms found"
        exit(1)
    # tempCounter += int( lineElements[1] )
# print "tempCounter:",tempCounter
inputFileHandler.close()
print "len(queryTermsWithRealFreqIn100KQueriesDict):",len(queryTermsWithRealFreqIn100KQueriesDict)
print "queryTermsWithRealFreqIn100KQueriesDict['of']:",queryTermsWithRealFreqIn100KQueriesDict['of']


# for production
inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection_TermFreqInQLAdded_ProbabilityInQLAdded_DesidingFactorAdded_sortedByListLength_decreasing_order.txt"
# for debug
# inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection_TermFreqInQLAdded_ProbabilityInQLAdded_DesidingFactorAdded_sortedByListLength_decreasing_order_tail_1000.txt"
inputFileHanlder = open(inputFileName1,"r")

# outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/TOP1000TermsInTermsOfLengthOfListForGOV2Index_by_BAGS_OF_TERMS.csv"
# outputFileHandler = open(outputFileName,"w")

currentAccumulatedNumOfOccurencesCounted = 0
currentLine = 0 # init 
lineCounter = 0

currentLine = inputFileHanlder.readline() # read the 1st line
lineCounter += 1

while currentLine:
    lineElements = currentLine.strip().split(" ")
    currentTerm = lineElements[0]
    if currentTerm in queryTermsWithRealFreqIn100KQueriesDict:
        currentAccumulatedNumOfOccurencesCounted += queryTermsWithRealFreqIn100KQueriesDict[currentTerm]
    
    if lineCounter in TOPKNumbersOutputingList: 
        percentageOfTotalLengthOfListOfALL = currentAccumulatedNumOfOccurencesCounted / TOTAL_NUM_OF_TERM_OCCURRENCES_IN_100K_QUERIES 
        print lineCounter,currentAccumulatedNumOfOccurencesCounted,percentageOfTotalLengthOfListOfALL
    
    currentLine = inputFileHanlder.readline()
    lineCounter += 1
print lineCounter,currentAccumulatedNumOfOccurencesCounted,percentageOfTotalLengthOfListOfALL

print "inputFileName0:",inputFileName0
print "inputFileName1:",inputFileName1
inputFileHanlder.close()
# outputFileHandler.close()
print "Program Ends."
exit(1)
'''


'''
TOTAL_NUM_OF_DESIGNED_POSTINGS_IN_THE_INVERTED_INDEX = 6451948010 # 100% of the whole index (all the unseen term lists have been added)

# init the variable: TOPKNumbersOutputingList 
TOPKNumbersOutputingIndexList = []

# fill the variable: TOPKNumbersOutputingList
currentTOPKValue = 5
step = 2
for i in range(0,14):
    TOPKNumbersOutputingIndexList.append(currentTOPKValue)
    currentTOPKValue = currentTOPKValue * step
print "TOPKNumbersOutputingIndexList:",TOPKNumbersOutputingIndexList

# key: term in string format
# value: length of the list in int format
queryTermWithLengthOfListDict = {}
inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/100KQueryTermsWithTermFreqInCollection.txt"
inputFileHanlder = open(inputFileName1,"r")
for line in inputFileHanlder.readlines():
    lineElements = line.strip().split(" ")
    currentTerm = lineElements[0]
    currentTermLengthOfList = int(lineElements[1])
    if currentTerm not in queryTermWithLengthOfListDict:
        queryTermWithLengthOfListDict[currentTerm] = currentTermLengthOfList
    else:
        print "duplicated terms."
        exit(1)

# the top 5 query terms which have been searched for many times
# (1) of 11587
# (2) in 8006
# (3) and 6066
# (4) for 5705
# (5) the 4542


print "len(queryTermWithLengthOfListDict):",len(queryTermWithLengthOfListDict)
print "queryTermWithLengthOfListDict['of']:",queryTermWithLengthOfListDict['of']
print "queryTermWithLengthOfListDict['in']:",queryTermWithLengthOfListDict['in']
print "queryTermWithLengthOfListDict['and']:",queryTermWithLengthOfListDict['and']
print "queryTermWithLengthOfListDict['for']:",queryTermWithLengthOfListDict['for']
print "queryTermWithLengthOfListDict['the']:",queryTermWithLengthOfListDict['the']
inputFileHanlder.close()


inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsIn_100KQueries_sorted_by_real_freq_decreasing_order"
inputFileHandler = open(inputFileName2,"r")
accumulatedNumOfPostings = 0
for index,line in enumerate( inputFileHandler.readlines() ):
    lineElements = line.strip().split(" ")
    currentTerm = lineElements[0]
    # get the length of list from another dict
    if currentTerm not in queryTermWithLengthOfListDict:
        print "system error."
        exit(1)
    else:
        accumulatedNumOfPostings += queryTermWithLengthOfListDict[currentTerm]

    if index+1 in TOPKNumbersOutputingIndexList:
        print index+1,accumulatedNumOfPostings,accumulatedNumOfPostings/TOTAL_NUM_OF_DESIGNED_POSTINGS_IN_THE_INVERTED_INDEX
print index+1,accumulatedNumOfPostings,accumulatedNumOfPostings/TOTAL_NUM_OF_DESIGNED_POSTINGS_IN_THE_INVERTED_INDEX
inputFileHandler.close()
exit(1)
'''

'''
print "Program Begins..."
TOTAL_NUM_OF_DESIGNED_POSTINGS_IN_THE_INVERTED_INDEX = 6451948010 # 100% of the whole index (all the unseen term lists have been added)

# This task is very simple, answer prof's question, get that answer done and direct reply. Very simple.
# The file TOP1000 lines: 
# /data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection_TermFreqInQLAdded_ProbabilityInQLAdded_DesidingFactorAdded_sortedByListLength_tail_1000.txt


# init the variable: TOPKNumbersOutputingList 
TOPKNumbersOutputingList = []

# fill the variable: TOPKNumbersOutputingList
currentTOPKValue = 10
step = 2
for i in range(0,24):
    TOPKNumbersOutputingList.append(currentTOPKValue)
    currentTOPKValue = currentTOPKValue * step
# TOPKNumbersOutputingList.append(1000)
# check the variable: TOPKNumbersOutputingList
print "TOPKNumbersOutputingList:",TOPKNumbersOutputingList

# for production
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection_TermFreqInQLAdded_ProbabilityInQLAdded_DesidingFactorAdded_sortedByListLength_decreasing_order.txt"
# for debug
# inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection_TermFreqInQLAdded_ProbabilityInQLAdded_DesidingFactorAdded_sortedByListLength_decreasing_order_tail_1000.txt"
inputFileHanlder = open(inputFileName,"r")

# outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/TOP1000TermsInTermsOfLengthOfListForGOV2Index_by_BAGS_OF_TERMS.csv"
# outputFileHandler = open(outputFileName,"w")

currentAccumulatedNumOfPostingsCounted = 0
currentLine = 0 # init 
lineCounter = 0

currentLine = inputFileHanlder.readline() # read the 1st line
lineCounter += 1

while currentLine:
    lineElements = currentLine.strip().split(" ")
    currentTermLengthOfList = int(lineElements[1])
    currentAccumulatedNumOfPostingsCounted += currentTermLengthOfList
    
    if lineCounter in TOPKNumbersOutputingList: 
        percentageOfTotalLengthOfListOfALL = currentAccumulatedNumOfPostingsCounted / TOTAL_NUM_OF_DESIGNED_POSTINGS_IN_THE_INVERTED_INDEX 
        print lineCounter,currentAccumulatedNumOfPostingsCounted,percentageOfTotalLengthOfListOfALL
    
    currentLine = inputFileHanlder.readline()
    lineCounter += 1

print "inputFileName:",inputFileName
# print "outputFileName:",outputFileName
inputFileHanlder.close()
# outputFileHandler.close()
print "Program Ends."
exit(1)
'''


'''
print "Program Begins..."
dict = {}
dict[0] = 0
dict[1] = 1
del dict
dict = None
gc.collect()
print "Program Ends."
'''


'''
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/tail5KWithNumOfResultsEvaluated"
inputFileHanlder = open(inputFileName,"r")
totalNumOfResults = 0
for line in inputFileHanlder.readlines():
    lineElements = line.strip().split(" ")
    currentNumOfResults = int(lineElements[1])
    totalNumOfResults += currentNumOfResults
print "totalNumOfResults:",totalNumOfResults
inputFileHanlder.close()
exit(1)
'''

'''
# Some numbers:
# Just read all the lines(37M) one by one into main memory, it takes 26 seconds.
# Just split the list into elements(37M lines) and it takes 55 seconds.
# Take 630 seconds to assign some variables in memory
# It takes about 13.2 mins only to load the info to the main memory


startingTime = time.clock()
endingTime = (time.clock() - startingTime)*1000
print "endingTime:",endingTime
   
# code part in test begins.
# key: term in string format
# value: class label in int format
termClassLabelDict = {}

# key: term in string format
# value: a dict
    # key: pieceNumber
    # value: numOfPostingsInThisPiece
termPiecesInfoDict = {}

# This file maybe NOT big enough
# option1: 100K query terms ONLY
# inputFileName0 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/100KQueryTermsWithTermFreqInCollectionANDClassLabelANDNumOfPostingsInEachPieces20130821"
# option2: 100K query terms + some unseen terms for debug
# inputFileName0 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/100KQueryTermsPlusSomeUnseenTermsWithTermFreqInCollectionANDClassLabelANDNumOfPostingsInEachPieces20130821"
# option3: whole lexicon terms for production
inputFileName0 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithLengthOfListANDClassLabelANDNumOfPostingsInEachPieces20130819"
inputFileHandler = open(inputFileName0,"r")

lineCounter = 0 # init

currentLine = inputFileHandler.readline()
lineCounter += 1

while currentLine:
    
    currentLineElements = currentLine.strip().split(" ")
    
    
    currentTerm = currentLineElements[0]
    # ignore currentLineElements[1], cause we will some other sources provide this info about length of the inverted list for this term
    currentTermClassLabelInIntFormat = int( currentLineElements[2] )
    currentTermNumOfPiecesHave = int( currentLineElements[3] )
    
    # fill the variable: termClassLabelDict
    if currentTerm not in termClassLabelDict:
        termClassLabelDict[currentTerm] = currentTermClassLabelInIntFormat 
    
    if currentTerm not in termPiecesInfoDict:
        termPiecesInfoDict[currentTerm] = {}
    
    
    # fill the variable: termPiecesInfoDict
    baseIndex = 4
    for i in range( 0,len( currentLineElements[4:]),2):
        currentPieceNum = int( currentLineElements[4+i] )
        currentNumOfPostingsInThiePiece = int( currentLineElements[4+i+1])
        if currentPieceNum not in termPiecesInfoDict[currentTerm]:
            termPiecesInfoDict[currentTerm][currentPieceNum] = currentNumOfPostingsInThiePiece
        else:
            print "system error, mark1."
    
    
    currentLine = inputFileHandler.readline()
    lineCounter += 1
    if lineCounter % 10000 == 0:
        print "lineCounter:",lineCounter
        print "len(termClassLabelDict):",len(termClassLabelDict)
        print "len(termPiecesInfoDict):",len(termPiecesInfoDict) 
        endingTime = (time.clock() - startingTime)*1000
        print "endingTime:",endingTime
    

inputFileHandler.close()
print "len(termClassLabelDict):",len(termClassLabelDict)
print "len(termPiecesInfoDict):",len(termPiecesInfoDict)
print "termClassLabelDict['0']:",termClassLabelDict['0']
print "termPiecesInfoDict['0']:",termPiecesInfoDict['0']
# print "termClassLabelDict['0120j4']:",termClassLabelDict['0120j4']
# print "termPiecesInfoDict['0120j4']:",termPiecesInfoDict['0120j4']
# len(termClassLabelDict): 38871
# len(termPiecesInfoDict): 38871
# termClassLabelDict['0']: 63
# termPiecesInfoDict['0']: {0: 4200166, 1: 2100083, 2: 1050041, 3: 525020, 4: 262510, 5: 131255, 6: 65627, 7: 32813, 8: 16406, 9: 8203, 10: 4101, 11: 2050, 12: 1025, 13: 512, 14: 256, 15: 128, 16: 64, 17: 73}
# termClassLabelDict['0120j4']: -1
# termPiecesInfoDict['0120j4']: {}
# code part in test ends...
exit(1)
'''





'''
testDict = {}
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/tail5KResultsWithRelatedPieceNum_sortedByQID"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    docIDInStringFormat = lineElements[1]
    termInStringFormat = lineElements[2]
    key = docIDInStringFormat + "_" + termInStringFormat
    if key not in testDict:
        testDict[key] = 1

print "len(testDict):",len(testDict)
inputFileHandler.close()
'''

'''
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom100KWithTheirTrueProbablityAndOurOwnModelPredictedProbablity1D_2D_GoodTuringProbabilityAdded20130820"
outputFileHandler = open(outputFileName,"w")

# key: oldGoodTuringProbability In String Format
# value: freqOfFreq in int format
oldGoodTuringProbabilityInStringFormatANDfreqOfFreqDict = {}
inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/freqOfFreqInQueries_head_85K_0_85%_good_turing_output"
inputFileHandler = open(inputFileName1,"r")
inputFileHandler.readline()
inputFileHandler.readline()
inputFileHandler.readline()
inputFileHandler.readline()
lineCounter = 1
currentLine = inputFileHandler.readline()
while currentLine:
    lineElements = currentLine.strip().split(" ")
    freqOfFreq = int(lineElements[0])
    oldGoodTuringProbabilityInStringFormat = lineElements[3]
    if oldGoodTuringProbabilityInStringFormat not in oldGoodTuringProbabilityInStringFormatANDfreqOfFreqDict:
        oldGoodTuringProbabilityInStringFormatANDfreqOfFreqDict[oldGoodTuringProbabilityInStringFormat] = freqOfFreq
    currentLine = inputFileHandler.readline()
    lineCounter += 1
    if lineCounter % 21 == 0:
        break
print "len(oldGoodTuringProbabilityInStringFormatANDfreqOfFreqDict):",len(oldGoodTuringProbabilityInStringFormatANDfreqOfFreqDict)
print "oldGoodTuringProbabilityInStringFormatANDfreqOfFreqDict:",oldGoodTuringProbabilityInStringFormatANDfreqOfFreqDict
inputFileHandler.close()

# key: freqOfFreq in int format
# value: newGoodTuringProbability In String Format Dict
freqOfFreqANDnewGoodTuringProbabilityInStringFormatDict = {}
inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/freqOfFreqInQueries_0_1_95K_95%_good_turing_output"
inputFileHandler = open(inputFileName2,"r")
inputFileHandler.readline()
inputFileHandler.readline()
inputFileHandler.readline()
inputFileHandler.readline()
lineCounter = 1
currentLine = inputFileHandler.readline()
while currentLine:
    lineElements = currentLine.strip().split(" ")
    freqOfFreq = int(lineElements[0])
    newGoodTuringProbabilityInStringFormat = lineElements[3]
    if freqOfFreq not in freqOfFreqANDnewGoodTuringProbabilityInStringFormatDict:
        freqOfFreqANDnewGoodTuringProbabilityInStringFormatDict[freqOfFreq] = newGoodTuringProbabilityInStringFormat
    currentLine = inputFileHandler.readline()
    lineCounter += 1
    if lineCounter % 21 == 0:
        break
print "len(freqOfFreqANDnewGoodTuringProbabilityInStringFormatDict):",len(freqOfFreqANDnewGoodTuringProbabilityInStringFormatDict)
print "freqOfFreqANDnewGoodTuringProbabilityInStringFormatDict:",freqOfFreqANDnewGoodTuringProbabilityInStringFormatDict
inputFileHandler.close()


inputFileName3 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom100KWithTheirTrueProbablityAndOurOwnModelPredictedProbablity1D_2D_GoodTuringProbabilityAdded20130429"
inputFileHandler = open(inputFileName3,"r")

outputFileHandler.write(inputFileHandler.readline())

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    oldGoodTuringProbabilityInStringFormat = lineElements[4]
    outputLine = ""
    if oldGoodTuringProbabilityInStringFormat in oldGoodTuringProbabilityInStringFormatANDfreqOfFreqDict:
        outputLine = lineElements[0] + " " + lineElements[1] + " " + lineElements[2] + " " + lineElements[3] + " " + freqOfFreqANDnewGoodTuringProbabilityInStringFormatDict[ oldGoodTuringProbabilityInStringFormatANDfreqOfFreqDict[oldGoodTuringProbabilityInStringFormat] ] + "\n"
    else:
        outputLine = line
    outputFileHandler.write(outputLine)
    
print "inputFileName1:",inputFileName1
print "inputFileName2:",inputFileName2
print "inputFileName3:",inputFileName3
print "outputFileName:",outputFileName
    
inputFileHandler.close()
outputFileHandler.close()
'''


'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom100KWithTheirTrueProbablityAndOurOwnModelPredictedProbablity1D_2D_GoodTuringProbabilityAdded20130429"
inputFileHandler = open(inputFileName,"r")
inputFileHandler.readline()

# key: probability1D in string format
# value: counter in int format
probabilityFor1DDict = {}

# key: probability2D in string format
# value: counter in int format
probabilityFor2DDict = {}

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    current1DProbabilityInStringFormat = lineElements[2]
    current2DProbabilityInStringFormat = lineElements[3]
    if current1DProbabilityInStringFormat not in probabilityFor1DDict:
        probabilityFor1DDict[current1DProbabilityInStringFormat] = 1
    else:
        pass
    
    if current2DProbabilityInStringFormat not in probabilityFor2DDict:
        probabilityFor2DDict[current2DProbabilityInStringFormat] = 1
    else:
        pass

print "len(probabilityFor1DDict):",len(probabilityFor1DDict)
print "len(probabilityFor2DDict):",len(probabilityFor2DDict)
inputFileHandler.close()
exit(1)
'''

'''
for i in range(0,19):
    print i, 1 / pow(2,i+1)

exit(1)
'''

'''
# What is the query length distribution for the fake one, it seems that the fake one is too fake for me
# key: queryLength in int format
# value: freq in int format
queryLengthDistributionDict = {}
# option1
# inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/100KQueries_head_95K"
# option2
# inputFileName = "/data/jrodri04/lm/10M.95KQ.5gram.ModKN.txt"
# option3
# inputFileName = "/data/jrodri04/lm/30M.95KQ.ModKN.txt"
# option4
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/95KQueriesMachineGenerated3_20130818_using_new_fake_queryLog"

inputFileHandler = open(inputFileName,"r")
currentLine = inputFileHandler.readline()
lineCounter = 1
while currentLine:
    # option1
    # data = currentLine.strip().split(":")[1]
    # option2
    # data = currentLine.strip().lower()
    # option3
    data = currentLine.strip().split(":")[2]
    
    
    # processing option1 (the ORIGINAL one)
    # for i in range(0,len(data)):
    #    if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
    #        data = data[:i] + " " + data[i+1:]
    # queryContent = data
    
    
    # processing option2 (the simple split one)
    queryContent = data
    
    
    queryContentElements = queryContent.strip().split(" ") 
    
    currentQueryTermDict = {}
    for element in queryContentElements:
        if element.strip() != "":
            if element.strip() not in currentQueryTermDict:
                currentQueryTermDict[element.strip()] = 1
    currentQueryLength = len(currentQueryTermDict)
    if currentQueryLength not in queryLengthDistributionDict:
        if currentQueryLength == 0:
            print "lineCounter:",lineCounter,"*",currentLine.strip(),"*"
            # print currentQueryTermDict,"*"
            # print "*",currentLine.strip(),"*"
            # print
        queryLengthDistributionDict[currentQueryLength] = 1
    else:
        if currentQueryLength == 0:
            print "lineCounter:",lineCounter,"*",currentLine.strip(),"*"
            # print currentQueryTermDict,"*"
            # print "*",currentLine.strip(),"*"
            # print
        queryLengthDistributionDict[currentQueryLength] += 1
    currentLine = inputFileHandler.readline()     
    lineCounter += 1
    # print lineCounter
    if lineCounter % 1000001 == 0:
        print lineCounter
        break

queryLengthList = []
queryLengthList = queryLengthDistributionDict.keys()
queryLengthList.sort(cmp=None, key=None, reverse=False)
# option1
TOTAL_NUM_OF_QUERIES = 95000
# option2
# TOTAL_NUM_OF_QUERIES = 1000000
totalQueryLength = 0
averageQueryLength = 0
for queryLength in queryLengthDistributionDict:
    print queryLength,queryLengthDistributionDict[queryLength],queryLengthDistributionDict[queryLength]/TOTAL_NUM_OF_QUERIES
    totalQueryLength += queryLengthDistributionDict[queryLength] * queryLength
averageQueryLength = totalQueryLength / TOTAL_NUM_OF_QUERIES
print "total # of queries counted:",lineCounter-1
print "average Query Length:",averageQueryLength
print "inputFileName:",inputFileName
exit(1)
'''

'''
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/debugForSimulationTest"
inputFileHandler = open(inputFileName,"r")
valueCounter = 0
for line in inputFileHandler.readlines():
    # option1
    if line.strip().startswith("Showing 0 results out of 0."):
        print line.strip()
    
    # option2
    #if line.strip().startswith("qid:"):
    #    print line.strip()
    #if line.strip().startswith("Search:"):
    #    print line.strip()
    #if line.strip().startswith("the term:"):
    #    print line.strip()
    #if line.strip().startswith("This query has query terms"):
    #    print line.strip()
    #    valueCounter += 1
    #    print "numOfOutOfLexiconQueries:",valueCounter
    #    print
    
inputFileHandler.close()
exit(1)
'''

'''
termDict = {}
inputFileName1 = "/data/jrodri04/lm/models/95KQ.5gram.ModKN.vocab"
inputFileHandler1 = open(inputFileName1,"r")
for line in inputFileHandler1.readlines():
    currentTerm = line.strip()
    if currentTerm not in termDict:
        termDict[currentTerm] = False
    else:
        print "duplicated term:",currentTerm
        exit(1)
print "len(termDict):",len(termDict)
inputFileHandler1.close()

inputFileName2 = "/data/jrodri04/lm/10M.95KQ.5gram.ModKN.txt"
inputFileHandler2 = open(inputFileName2,"r")
currentLine = inputFileHandler2.readline()
lineCounter = 1
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    for element in currentLineElements:
        if element.strip() in termDict:
            termDict[ element.strip() ] = True
            # for DEBUG ONLY
            # print "element.strip():",element.strip()
            pass
        else:
            if element.strip() != "":
                termWithBeginningAndEndingTag = "*" + element.strip() + "*" 
                print "query term:",termWithBeginningAndEndingTag,"does NOT appear in the lexicon"
                exit(1)
            else:
                pass # Just an empty string

    currentLine = inputFileHandler2.readline()
    lineCounter += 1
    if lineCounter % 1000000 == 0:
        print "lineCounter:",lineCounter

print "The following terms have NOT been generated in the fake query trace"
for term in termDict:
    if termDict[term] == False:
        print term

inputFileHandler2.close()
exit(1)
'''

'''
# 37827 query terms from Juan
# 38871 query terms from Wei
set1 = Set([])
set2 = Set([])
intersectionSet = Set([])
unionSet = Set([])

inputFileName1 = "/data/jrodri04/lm/models/95KQ.5gram.ModKN.vocab"
inputFileHandler1 = open(inputFileName1,"r")
for line in inputFileHandler1.readlines():
    set1.add(line.strip())
inputFileHandler1.close()
print "len(set1):",len(set1)

inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermsWithTheirLengthsOfInvertedList"
inputFileHandler2 = open(inputFileName2,"r")
for line in inputFileHandler2.readlines():
    set2.add(line.strip().split(" ")[0])
inputFileHandler2.close()
print "len(set2):",len(set2)
# print "set1:",set1
# print "set2:",set2
intersectionSet = set1.intersection( set2 )
unionSet = set1.union( set2 )
print "length of intersection Set:",len(intersectionSet)
print "length of union set:",len(unionSet)
print "symmetric difference:",len(intersectionSet)/len(unionSet)
'''

'''
# Join the result set together in order to upload them to the google shared doc for prof to see
print "Program Begins..."
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/highFreqTermsWithTheirAverageIntersectionSizeAndMetaInfoBothFor1HumanAND2MachineQueries"
outputFileHandler = open(outputFileName,"w")

inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/highFreqTermsWithTheirAverageIntersectionSizeAndMetaInfo"
inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/highFreqTermsWithTheirAverageIntersectionSizeFromMachineGeneratedQueriesAndMetaInfo"
inputFileName3 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/highFreqTermsWithTheirAverageIntersectionSizeFromMachineGeneratedQueriesAndMetaInfo2"

inputFileHandler1 = open(inputFileName1,"r")
inputFileHandler2 = open(inputFileName2,"r")
inputFileHandler3 = open(inputFileName3,"r")
currentLineFromFile1 = inputFileHandler1.readline()
while currentLineFromFile1:
    currentLineElementsFromFile1 = currentLineFromFile1.strip().split(" ")
    currentLineFromFile2 = inputFileHandler2.readline()
    currentLineFromFile3 = inputFileHandler3.readline()
    currentLineElementsFromFile2 = currentLineFromFile2.strip().split(" ")
    currentLineElementsFromFile3 = currentLineFromFile3.strip().split(" ")
    
    outputLine = ""
    if currentLineElementsFromFile1[0] == currentLineElementsFromFile2[0] and currentLineElementsFromFile1[2] == currentLineElementsFromFile2[2] and currentLineElementsFromFile1[0] == currentLineElementsFromFile3[0] and currentLineElementsFromFile1[2] == currentLineElementsFromFile3[2]:
        outputLine = currentLineElementsFromFile1[0] + " " + currentLineElementsFromFile1[2] + " "
        outputLine += currentLineElementsFromFile1[1] + " " + currentLineElementsFromFile1[3] + " " + currentLineElementsFromFile1[4] + " "
        outputLine += currentLineElementsFromFile2[1] + " " + currentLineElementsFromFile2[3] + " " + currentLineElementsFromFile2[4] + " "
        outputLine += currentLineElementsFromFile3[1] + " " + currentLineElementsFromFile3[3] + " " + currentLineElementsFromFile3[4] + " "
    outputLine = outputLine.strip() + "\n"
    outputFileHandler.write(outputLine)
    
    currentLineFromFile1 = inputFileHandler1.readline()

inputFileHandler1.close()
inputFileHandler2.close()
inputFileHandler3.close()
outputFileHandler.close()
print "inputFileName1:",inputFileName1
print "inputFileName2:",inputFileName2
print "inputFileName3:",inputFileName3
print "outputFileName:",outputFileName
print "Program Ends."
'''


'''
# small logic test of how many zero results returned for a set of queries.
# human generated query log
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/qidANDIntersectionSizeMappingTable"
# machine generated query log
# inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/machineGeneratedQIDsANDIntersectionSizeMappingTable"
inputFileHanlder = open(inputFileName,"r")
numOfQueriesReturnZeroResults = 0
for line in inputFileHanlder.readlines():
    lineElements = line.strip().split(" ")
    numOfResultsForCurrentQuery = int( lineElements[1] )
    if numOfResultsForCurrentQuery == 0:
        numOfQueriesReturnZeroResults += 1
print "numOfQueriesReturnZeroResults:",numOfQueriesReturnZeroResults
inputFileHanlder.close()
exit(1)
'''

'''
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/QueriesWhichContainHighFreqTermAmong95KMachineGenerated2"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/QueriesWhichContainHighFreqTermAmong95KMachineGenerated_IRTK_Compatible_Format2"
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(":")
    newQueryID = int(lineElements[1])
    queryContent =  lineElements[2]
    outputLine = str(newQueryID) + ":" + queryContent + "\n" 
    outputFileHandler.write(outputLine)
inputFileHandler.close()
outputFileHandler.close()
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
exit(1)
'''

'''
# step1: random select the queryIDs from the range [1,10000000]
randomlySelectedIDList = []
randomlySelectedIDDict = {}

# in debug
# totalNumOfRandomlySelectedSamples = 10
# in production
totalNumOfRandomlySelectedSamples = 95000
totalNumOfSamples = 10000000
# assume that the selectedID starts from 0 to 25205179 in the total of 25,205,179 (25M)
while len(randomlySelectedIDDict) != totalNumOfRandomlySelectedSamples:
    # Return a random integer N such that a <= N <= b
    selectedID = random.randint(1, totalNumOfSamples)
    if selectedID not in randomlySelectedIDDict:
        randomlySelectedIDList.append(selectedID)
        randomlySelectedIDDict[selectedID] = 1

randomlySelectedIDList.sort(cmp=None, key=None, reverse=False)
# print "randomlySelectedIDList:",randomlySelectedIDList
print "len(randomlySelectedIDList):",len(randomlySelectedIDList)
print "len(randomlySelectedIDDict):",len(randomlySelectedIDDict)

# step2:
# extract those queries from the machine generated query log and form a machine generated query log compared to the human made query log 
outputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/95KQueriesMachineGenerated"
outputFileHandler = open(outputFileName,"w")

inputFileName = "/data/jrodri04/lm/10M.95KQ.5gram.ModKN.txt"
inputFileHandler = open(inputFileName,"r")
currentLine = inputFileHandler.readline()
originalLineCounter = 1
newQueryIDLineCounter = 1
while currentLine:
    if originalLineCounter in randomlySelectedIDDict:
        # for debug ONLY
        # print "currentLine:",currentLine.strip()
        queryContent = currentLine.strip()
        outputFileHandler.write(str(originalLineCounter) + " " + str(newQueryIDLineCounter) + " " + queryContent + "\n")
        newQueryIDLineCounter += 1
    currentLine = inputFileHandler.readline()
    originalLineCounter += 1
inputFileHandler.close()
outputFileHandler.close()
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
'''

'''
print "Program Begins..."
# key: int
# queryID in int format
# value: int
# query length (duplicate terms NOT counted)
queryIDsWIthTheirLengthDict = {}
inputQueryFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/100KQueries_head_95K"
inputQueryHandler = open(inputQueryFileName,"r")


for line in inputQueryHandler.readlines():
    elements = line.strip().split(":")
    queryID = int(elements[0])
    
    data = elements[1]
    data = data.lower()
    
    for i in range(0,len(data)):
        # print "data[i]:",ord(data[i])
        if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
            # Just replace them with a space.
            data = data[:i] + " " + data[i+1:]
    
    queryContent = data
    
    queryContentElements = queryContent.strip().split(" ")
    currentQueryTermDict = {}
    for element in queryContentElements:
        if element.strip() != "":
            if element.strip() not in currentQueryTermDict:
                currentQueryTermDict[element.strip()] = 1
    
    # print "----->",queryID,len(currentQueryTermDict)
    
    if queryID not in queryIDsWIthTheirLengthDict:
        queryIDsWIthTheirLengthDict[queryID] = len(currentQueryTermDict)

print "len(queryIDsWIthTheirLengthDict):",len(queryIDsWIthTheirLengthDict)
print "queryIDsWIthTheirLengthDict[94996]:",queryIDsWIthTheirLengthDict[94996]
inputQueryHandler.close()
print "Program Ends."
'''


'''
# small logic of building the (qid,intersectionSizeMappingTable)
print "Program Begins..."
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/intersectionResult_machine_generated_queries_whole2"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/machineGeneratedQIDsANDIntersectionSizeMappingTable2"
outputFileHandler = open(outputFileName,"w")

currentQID = 0
currentQueryIntersectionSize = 0
qIDFoundFlag = False
queryIntersectionSizeFoundFlag = False
for line in inputFileHandler.readlines():
    if line.startswith("qid:"):
        currentQID = int( line.strip().split(" ")[1] )
        qIDFoundFlag =True
        # for DEBUG
        # print currentQID
        # print line.strip()
    
    if line.startswith("Showing"):
        currentQueryIntersectionSize = int(line.strip().split(" ")[5][:-1])
        queryIntersectionSizeFoundFlag = True
        # for DEBUG
        # print currentQueryIntersectionSize
        # print line.strip()
        # print
    
    if qIDFoundFlag and queryIntersectionSizeFoundFlag:
        outputFileHandler.write(str(currentQID) + " " + str(currentQueryIntersectionSize) + "\n")
        qIDFoundFlag = False
        queryIntersectionSizeFoundFlag = False
    
print "inputFileName:",inputFileName
print "outputFileName:",outputFileName
inputFileHandler.close()
outputFileHandler.close()
print "Program Ends."
'''

'''
# small logic for computing the average query cost
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/intersectionResultPart1"
inputFileHandler = open(inputFileName,"r")
totalQueryCost = 0.0
currentQueryCost = 0.0
averageQueryCost = 0.0
queryCounter = 0
for line in inputFileHandler.readlines():
    if line.startswith("Showing"):
        currentQueryCost = float(line.strip().split("(")[1].split(")")[0].split(" ")[0])
        totalQueryCost += currentQueryCost
        queryCounter += 1
averageQueryCost = totalQueryCost/queryCounter
print "average query cost:",averageQueryCost
inputFileHandler.close()
'''

'''
# working example in both local computer and remote pangolin server
# step1: select the random samples

randomlySelectedIDList = []
randomlySelectedIDDict = {}

# in debug
# totalNumOfRandomlySelectedSamples = 10
# in production
totalNumOfRandomlySelectedSamples = 1000000
totalNumOfSamples = 25205179
# assume that the selectedID starts from 0 to 25205179 in the total of 25,205,179 (25M)
while len(randomlySelectedIDDict) != totalNumOfRandomlySelectedSamples:
    # Return a random integer N such that a <= N <= b
    selectedID = random.randint(0, totalNumOfSamples-1)
    if selectedID not in randomlySelectedIDDict:
        randomlySelectedIDDict[selectedID] = 1
        randomlySelectedIDList.append(selectedID)
print "len(randomlySelectedIDDict):",len(randomlySelectedIDDict)
print "len(randomlySelectedIDList):",len(randomlySelectedIDList)
randomlySelectedIDList.sort(cmp=None, key=None, reverse=False)
print "randomlySelectedIDList[-2]:",randomlySelectedIDList[-2]
print "randomlySelectedIDList[-1]:",randomlySelectedIDList[-1]
time.sleep(10) # delays for 10 seconds
# print randomlySelectedIDDict

NUM_OF_LINES_NEEDED = 26000000
x = []
y = []

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/trecID_docID_numOfPostingsRecorded_DocSizeInWords_MappingTableForGov2Dataset_with_Xdoc_values_added_sorted_by_XdocValueUsingGoodTurningDividedByNumOfPostingsForEachDocument"
inputFileHandler = open(inputFileName,"r")


currentLine = inputFileHandler.readline()
currentLineIndex = 0

while currentLine and currentLineIndex < NUM_OF_LINES_NEEDED:
    currentLineElements = currentLine.strip().split(" ")
    currentNumOfPostingsRecorded = int(currentLineElements[2])
    currentXdocValue = float(currentLineElements[7])
    
    if currentLineIndex in randomlySelectedIDDict:
        x.append(currentNumOfPostingsRecorded)
        y.append(currentXdocValue)
        print "len(x):",len(x)
        print "currentLineIndex:",currentLineIndex
        print
        
    
    currentLine = inputFileHandler.readline() 
    currentLineIndex += 1

matplotlib.pyplot.scatter(x,y)
matplotlib.pyplot.show()
inputFileHandler.close()
'''


'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/trecID_docID_numOfPostingsRecorded_DocSizeInWords_MappingTableForGov2Dataset_with_Xdoc_values_added_sorted_by_XdocValueUsingGoodTurningDividedByNumOfPostingsForEachDocument"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/docID_numOfPostingsRecorded_XdocValueUsingGoodTuringMethod"
outputFileHandler = open(outputFileName,"w")

currentLine = inputFileHandler.readline()
while currentLine:
    currentLineElements = currentLine
    currentLine = inputFileHandler.readline()


outputFileHandler.close()
inputFileHandler.close()
'''



'''
inputFileName1 = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/randomlySampledPostingIDList_with_local_index_for_each_term_added"
inputFileName2 = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/randomlySampledPostingIDList_with_set_of_probability_added"
inputFileHandler1 = open(inputFileName1,"r")
inputFileHandler2 = open(inputFileName2,"r")

for index,line1 in enumerate(inputFileHandler1.readlines()):
    line1Elements = line1.strip().split(" ")
    line2 = inputFileHandler2.readline()
    line2Elements = line2.strip().split(" ")
    if line1Elements[0] == line2Elements[0]:
        pass
    else:
        print "index:",index
        print "line1:",line1.strip()
        print "line2:",line2.strip()
        exit(1)
inputFileHandler1.close()
inputFileHandler2.close()
'''

'''
termsDict ={}
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/randomlySampledPostingIDList_with_local_index_for_each_term_added"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    currentTerm = lineElements[0]
    if currentTerm not in termsDict:
        termsDict[currentTerm] = 1
    else:
        pass

print "len(termsDict):",len(termsDict)
inputFileHandler.close()
'''

'''
print "Program Begins..."
outputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/randomlySampledPostingIDList"
outputFileHanlder = open(outputFileName,"w")

postingIDList = []
postingIDDict = {}

# in debug
# totalNumOfRandomlySampledPostings = 10
# in production
totalNumOfRandomlySampledPostings = 10000
totalNumOfPostingsInIndex = 6451948010
# assume that the posting ID starts from 0 to 6451948009 in the total of 6,451,948,010 (6.5B)
while len(postingIDList) != totalNumOfRandomlySampledPostings:
    # Return a random integer N such that a <= N <= b
    postingID = random.randint(0, totalNumOfPostingsInIndex-1)
    if postingID not in postingIDDict:
        postingIDList.append(postingID)
        postingIDDict[postingID] = 1
    print len(postingIDList)
    
postingIDList.sort(cmp=None, key=None, reverse=False)
for postingID in postingIDList:
    # print postingIDList
    outputFileHanlder.write(str(postingID) + "\n")

print "Program Ends."
outputFileHanlder.close()
exit(1)
'''

'''
print "Program Begins..."
# The following answer ONE question, how many postings in total of our whole inverted index?
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection.txt"
inputFileHandler = open(inputFileName,"r")
currentLine = inputFileHandler.readline()
totalNumOfPostings = 0
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentTerm = currentLineElements[0]
    currentTermNumOfPostingsInList = int(currentLineElements[1])
    totalNumOfPostings += currentTermNumOfPostingsInList
    currentLine = inputFileHandler.readline()
print "totalNumOfPostings:",totalNumOfPostings
inputFileHandler.close()
print "Program Ends."
exit(1)
'''

'''
print "Program Begins..."
docIDsNeededDict = {}
docIDsNeededDict["GX003-61-0529973"] = 1
docIDsNeededDict["GX021-66-11626454"] = 1
docIDsNeededDict["GX023-10-14566633"] = 1
docIDsNeededDict["GX023-98-2547874"] = 1
docIDsNeededDict["GX027-31-12566721"] = 1
docIDsNeededDict["GX057-81-12345226"] = 1
docIDsNeededDict["GX200-48-4132914"] = 1
docIDsNeededDict["GX229-55-7098182"] = 1
docIDsNeededDict["GX251-96-13027981"] = 1
docIDsNeededDict["GX265-40-16483965"] = 1
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model17/TOPK_sorted_by_queryID/distributedWorkForFeatureGenerationPostingRankInDoc20130727/gov2DocumentWithTheirXdocValues_for_debug_ONLY_Xdoc_value_FIXED_with_internalDocID_added_20130805"
inputFileHandler = open(inputFileName,"r")
# skip this headline
inputFileHandler.readline()
currentLine = inputFileHandler.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentLineTrecID = currentLineElements[0]
    currentLineDocID = currentLineElements[1]
    if currentLineTrecID not in docIDsNeededDict:
        pass
    else:
        print currentLineTrecID,currentLineDocID
    currentLine = inputFileHandler.readline()
inputFileHandler.close()
print "Program Ends."
exit(1)
'''

'''
print "Program Begins..."
inputGoldStandardFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/debugTrecIDANDDocIDPair"
inputComparedFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model17/TOPK_sorted_by_queryID/distributedWorkForFeatureGenerationPostingRankInDoc20130727/gov2DocumentWithTheirXdocValues_for_debug_ONLY_Xdoc_value_FIXED_with_internalDocID_added_20130805"

inputGoldStandardFileHandler = open(inputGoldStandardFileName,"r")
inputComparedFileHandler = open(inputComparedFileName,"r")

# skip the headline
inputComparedFileHandler.readline()

currentLineFromComparedFile = inputComparedFileHandler.readline()
while currentLineFromComparedFile:
    currentLineFromComparedFileElements = currentLineFromComparedFile.strip().split(" ")
    trecIDFromComparedFile = currentLineFromComparedFileElements[0]
    docIDFromComparedFile = currentLineFromComparedFileElements[1]
    
    currentLineFromGoldStandardFileElements = inputGoldStandardFileHandler.readline().strip().split(" ")
    trecIDFromGoldStandardFile = currentLineFromGoldStandardFileElements[0]
    docIDFromGoldStandardFile = currentLineFromGoldStandardFileElements[1]
    
    if trecIDFromComparedFile == trecIDFromGoldStandardFile and docIDFromComparedFile == docIDFromGoldStandardFile:
        pass
    else:
        print "critical error"
        print "trecIDFromComparedFile:",trecIDFromComparedFile
        print "trecIDFromGoldStandardFile:",trecIDFromGoldStandardFile
        print "docIDFromComparedFile:",docIDFromComparedFile
        print "docIDFromGoldStandardFile:",docIDFromGoldStandardFile
    
    currentLineFromComparedFile = inputComparedFileHandler.readline()

inputGoldStandardFileHandler.close()
inputComparedFileHandler.close()
print "Program Ends."
exit(1)
'''

'''
print "Program Begins..."
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model17/TOPK_sorted_by_queryID/distributedWorkForFeatureGenerationPostingRankInDoc20130727/gov2DocumentWithTheirXdocValues_for_debug_ONLY_Xdoc_value"
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model17/TOPK_sorted_by_queryID/distributedWorkForFeatureGenerationPostingRankInDoc20130727/gov2DocumentWithTheirXdocValues_for_debug_ONLY_Xdoc_value_FIXED_with_internalDocID_added_20130805"
inputFileHandler = open(inputFileName,"r")
outputFileHandler = open(outputFileName,"w")

# Copy the headline
currentLine = inputFileHandler.readline()
outputFileHandler.write(currentLine)

currentLine = inputFileHandler.readline()
docIDCounter = 0
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    newCurrentLine = currentLineElements[0] + " " + str(docIDCounter) + " " + currentLineElements[1] + " " + currentLineElements[2] + " " + currentLineElements[3] + " " + currentLineElements[4] + "\n"
    outputFileHandler.write(newCurrentLine)
    docIDCounter += 1
    currentLine = inputFileHandler.readline()
    
outputFileHandler.close()
inputFileHandler.close()
print "Program Ends."
exit(1)
'''

'''
# This part of logic is just to compute the average time cost/per query under the AND semantics
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/uniform_pruning_probability_90%Kept_PART_OF"
inputFileHandler = open(inputFileName,"r")
numOfQueriesEncountered = 0
totalMilliSeconds = 0
for line in inputFileHandler.readlines():
    if line.startswith("Showing"):
        numOfQueriesEncountered += 1
        print line.strip()
        lineElements = line.strip().split("(")
        totalMilliSeconds += float( lineElements[1].strip().split(")")[0].split(" ")[0] )
        print

print "totalMilliSeconds:",totalMilliSeconds,"ms"
print "numOfQueriesEncountered:",numOfQueriesEncountered
print "average query cost:",totalMilliSeconds/numOfQueriesEncountered,"ms"
    
inputFileHandler.close()
exit(1)
'''

'''
# small scripts for generating the 2ed factor needed documents info
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model19/TOPK_sorted_by_queryID/set_of_documents_with_their_Xdoc_values_for_DEBUG_soalr"
outputFileHanlder = open(outputFileName,"w")

neededTrecIDsDict = {}


# option1
# fill the neededTrecIDsDict via a file
# inputAuxFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model19/TOPK_sorted_by_queryID/set_of_documents_needed_for_second_factor"
# inputAuxFileHandler = open(inputAuxFileName,"r")
# for line in inputAuxFileHandler.readlines():
#     currentTrecID = line.strip()
#    if currentTrecID not in neededTrecIDsDict:
#        neededTrecIDsDict[currentTrecID] = 1
#    else:
#        pass
        # print "error, mark1"
        # exit(1)
inputAuxFileHandler.close()


# option2
# fill the neededTrecIDsDict manually. :)
# for the term soalr
neededTrecIDsDict["GX003-61-0529973"] = 1
neededTrecIDsDict["GX021-66-11626454"] = 1
neededTrecIDsDict["GX023-10-14566633"] = 1
neededTrecIDsDict["GX023-98-2547874"] = 1
neededTrecIDsDict["GX027-31-12566721"] = 1
neededTrecIDsDict["GX057-81-12345226"] = 1
neededTrecIDsDict["GX200-48-4132914"] = 1
neededTrecIDsDict["GX229-55-7098182"] = 1
neededTrecIDsDict["GX251-96-13027981"] = 1
neededTrecIDsDict["GX265-40-16483965"] = 1

print "len(neededTrecIDsDict):",len(neededTrecIDsDict)


inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model17/TOPK_sorted_by_queryID/distributedWorkForFeatureGenerationPostingRankInDoc20130727/gov2DocumentWithTheirXdocValues_for_debug_ONLY_Xdoc_value"
inputFileHandler = open(inputFileName,"r")

# skip the headline
inputFileHandler.readline()

currentLine = inputFileHandler.readline()
while currentLine:
    currentLineElements = currentLine.strip().split(" ")
    currentTrecID = currentLineElements[0]
    if currentTrecID in neededTrecIDsDict:
        outputFileHanlder.write(currentLine)
    currentLine = inputFileHandler.readline()
inputFileHandler.close()
outputFileHanlder.close()
exit(1)
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom100KWithTheirTrueProbablityAndOurOwnModelPredictedProbablity1D_2D_GoodTuringProbabilityAdded20130429"
inputFileHandler = open(inputFileName,"r")
# skip the headline
inputFileHandler.readline()
accumulatedProbability = 0.0
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    currentCountingProbability = float( lineElements[4] ) 
    accumulatedProbability += currentCountingProbability
# The result will be: 0.951587450089
# of terms in the lexicon: 37728619
# of query terms in the 100K queries: 38871
# of unseen words: 37689748
# probablity mass for the unseen words: 1 - 0.951587450089 = 0.04841255
# probability for each unseen word: 0.04841255 / 37689748 = 0.0000000012845
print "accumulatedProbability:",accumulatedProbability
inputFileHandler.close()
exit(1)
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/prels"
inputFileHandler = open(inputFileName,"r")
trecIDDict = {}
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    trecID = lineElements[0]
    if trecID not in trecIDDict:
        trecIDDict[trecID] = 1
    else:
        pass
print "len(trecIDDict):",len(trecIDDict) 
inputFileHandler.close()
'''

'''
# extract the TOPK results, here is setting the K
# K can be set to 10,100,1000,10000
K = 10

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/TrainingSetForFixingTheMissingTOPKResults20130715"
outputFileHandler = open(outputFileName,"w")

# This simple part of logic is to extract those missing results in order to satisfy the prof
queryIDList = ["1","21","25","27","40","41","53","61","62","64","65","68","97","99","102","111","121","147","161","165","170","172","178","191","196","197","198","200","202","210","217","218","219","227","230","232","242","246","270","273","275","286","300","302","305"]
originalRawResultFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/rawResultsHead10KANDSemanticsTOP2MResults"
originalRawResultFileHandler = open(originalRawResultFileName,"r")

queryAuxDict = {}
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/rawResultsHead10KANDSemanticsTOP2MResultsAccessAuxFile_20130720"
inputFileHandler = open(inputFileName,"r")

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    qidInStringFormat = lineElements[0]
    # num of query results which we don't need. (lineElements[1])
    beginningPositionForThisQuery = int(lineElements[2])
    endingPositionForThisQuery = int(lineElements[3])
    currentTuple = (beginningPositionForThisQuery,endingPositionForThisQuery)
    queryAuxDict[qidInStringFormat] = currentTuple
    
print "----->","len(queryAuxDict):",len(queryAuxDict)
print "queryAuxDict['1']:",queryAuxDict['1']
print "queryAuxDict['20']:",queryAuxDict['20']

for currentQIDInStringFormat in queryIDList:
    (beginningPositionInIntFormatInOriginalRawResultFile,endingPositionInIntFormatInOriginalRawResultFile) = queryAuxDict[currentQIDInStringFormat]
    if beginningPositionInIntFormatInOriginalRawResultFile != -1 and endingPositionInIntFormatInOriginalRawResultFile != -1:
        originalRawResultFileHandler.seek(beginningPositionInIntFormatInOriginalRawResultFile)
        
        for j in range(0,3):
            originalRawResultFileHandler.readline()
        
        # print "(right line for dict):"
        # key: term index
        # value: term
        currentQueryTermIndexDict = {}
        currentLine = originalRawResultFileHandler.readline()
        currentLineElements = currentLine.strip().split(" ")
        for element in currentLineElements:
            term = element.split(":")[0]
            termIndex = int(element.split(":")[1])
            if term not in currentQueryTermIndexDict:
                currentQueryTermIndexDict[termIndex] = term
        # print "currentQueryTermIndexDict:",currentQueryTermIndexDict
        
        for j in range(4,7):
            originalRawResultFileHandler.readline()            
        
        for j in range(0,K):
            currentLineElements = originalRawResultFileHandler.readline().strip().split(" ")
            if len(currentLineElements) == 65:
                base = 1
                for i in range(0,len(currentQueryTermIndexDict)):
                    term = str( currentQueryTermIndexDict[i] )
                    internal_doc_id = str( currentLineElements[63] )
                    external_trec_id = str( currentLineElements[64] )
                    totalBM25_score = float( currentLineElements[62] )
                    # newly updated for each training posting instance
                    partialBM25_score_component_part1 = float( currentLineElements[base + 10 + i] )
                    partialBM25_score_component_part2 = float( currentLineElements[base + 10 + 10 + i] )
                    partialBM25_score = float( currentLineElements[base + 10 + 10 + 10 + i] )
                    freq_in_collection = int( currentLineElements[base + 10 + 10 + 10 + i + 10] )
                    freq_in_doc = int( currentLineElements[base + 10 + 10 + 10 + i + 10 + 10] )
                    doc_words = int( currentLineElements[61] )
                    result_rank_for_this_posting = int( currentLineElements[0] )
                    
                    outputTrainingExample = str(result_rank_for_this_posting-1) + " " + currentQIDInStringFormat + " " + external_trec_id + " " + internal_doc_id + " " + term + " " + str(partialBM25_score_component_part1) + " " + str(partialBM25_score_component_part2) + " " + str(partialBM25_score) + " " + str(freq_in_collection) + " " + str(freq_in_doc) + " " + str(doc_words) + " " + str(totalBM25_score) + " " + str(result_rank_for_this_posting)
                    # for debug
                    print outputTrainingExample
                    outputFileHandler.write(outputTrainingExample + "\n")
            else:
                pass

inputFileHandler.close()
outputFileHandler.close()
originalRawResultFileHandler.close()
exit(1)
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom100KWithTheirTrueProbablityAndOurOwnModelPredictedProbablity1D_2D_GoodTuringProbabilityAdded20130429"
inputFileHandler = open(inputFileName,"r")
inputFileHandler.readline()

total2DProbability = 0
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    current2DProbability = float(lineElements[3])
    total2DProbability += current2DProbability
print total2DProbability

inputFileHandler.close()
'''

'''
queryIDDict = {}

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/Training_Set_2013_07_15_tail_10000_for_testing.arff"
inputFileHandler = open(inputFileName,"r")
for i in range(0,26):
    inputFileHandler.readline()

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    queryID = lineElements[1]
    TOP10ClassLabel = lineElements[-5]
    if TOP10ClassLabel == "True":
        if queryID not in queryIDDict:
            queryIDDict[queryID] = 1

print "len(queryIDDict):",len(queryIDDict)
inputFileHandler.close()
'''

'''
# key: trecID
# value: none
queryIDDict ={}
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model15/Connected_Postings_Training_Set_2013_07_15_tfqAdded_tfcAdded_postingRankInDocAdd_postingRankInListAdd_percentageForPostingRankInDoc_percentageForPostingRankInList_classLabelsAdded_WITH_headline.train"
inputFileHandler = open(inputFileName,"r")
# skip the headline
inputFileHandler.readline()
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    queryID = lineElements[1]
    if queryID not in queryIDDict:
        queryIDDict[queryID] = 1
    else:
        pass

print "len(queryIDDict):",len(queryIDDict)
inputFileHandler.close()
exit(1)
'''

'''
# small check on the file /data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/gov2DocumentWithTheirCompletedPostingSet_for_debug_ONLY
print "program begins..."
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/gov2DocumentWithTheirCompletedPostingSet_for_debug_ONLY"
inputFileHandler = open(inputFileName,"r")
inputFileHandler.readline()

for index,line in enumerate(inputFileHandler.readlines()):
    lineElements = line.strip().split(" ")
    
    # step1: check whether the docSizeInWords has been computed correctly.
    value1InInt = int( lineElements[1] )
    value2InInt = int( lineElements[2] )
    if value1InInt == value2InInt:
        pass
    else:
        print "mark1:",value1InInt,value2InInt
    
    # step2: check whether we have enough postings actually recorded
    if value1InInt == len(lineElements[3:]):
        pass
    else:
        print "mark2:",value1InInt,len(lineElements[3:])
    
    print index,len(lineElements)

print "ALL Passed"
inputFileHandler.close()
print "program ends."
exit(1)
'''

'''
# This logic is to create the file called: /data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/threeFeatureValuesForTrainingIn95KQueries
# This file contains the 3 feature values for the machine learned training purposes in head 95K Queries.
# This three features for the head 95K queries are:
# column1: query term
# column2: length_of_the_inverted_index (feature index 2)
# column3: term_freq_in_collection (feature index 3)
# column4: term_freq_in_queries (feature index 6)

queryTermLengthOfTheInvertedIndexDict = {}
queryTermTermFreqInCollectionDict = {}
queryTermTermFreqIn95KQueriesDict = {}
queryTermList = []

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/freqOfQueryTermsInCollection.txt"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    lengthOfTheInvertedIndex = int( lineElements[1] )
    termFreqInCollection = int( lineElements[2] )
    if queryTerm not in queryTermLengthOfTheInvertedIndexDict and queryTerm not in queryTermTermFreqInCollectionDict:
        queryTermLengthOfTheInvertedIndexDict[queryTerm] = lengthOfTheInvertedIndex
        queryTermTermFreqInCollectionDict[queryTerm] = termFreqInCollection
inputFileHandler.close()

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/freqOfTermsIn100KQueries_head_95K"
inputFileHandler = open(inputFileName,"r")

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    term = lineElements[0]
    freqIn95KQueries = int(lineElements[1])
    if term not in queryTermTermFreqIn95KQueriesDict:
        queryTermTermFreqIn95KQueriesDict[term] = freqIn95KQueries

print "len(queryTermLengthOfTheInvertedIndexDict):",len(queryTermLengthOfTheInvertedIndexDict)
print "len(queryTermTermFreqInCollectionDict):",len(queryTermTermFreqInCollectionDict)
print "len(queryTermTermFreqIn95KQueriesDict):",len(queryTermTermFreqIn95KQueriesDict) 
inputFileHandler.close()


outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/threeFeatureValuesForTrainingIn95KQueries"
outputFileHandler = open(outputFileName,"w")
queryTermList = queryTermTermFreqIn95KQueriesDict.keys()
queryTermList.sort(cmp=None, key=None, reverse=False)
for term in queryTermList:
    if term in queryTermTermFreqInCollectionDict:
        outputFileHandler.write( term + " " + str( queryTermLengthOfTheInvertedIndexDict[term] ) + " " + str( queryTermTermFreqInCollectionDict[term] ) + " " + str( queryTermTermFreqIn95KQueriesDict[term] ) + "\n")
    else:
        outputFileHandler.write( term + " " + "0" + " " + "0" + " " + str( queryTermTermFreqIn95KQueriesDict[term] ) + "\n")
outputFileHandler.close()
'''

'''
queryTermDict = {}
queryTermList = []

# option1
# Updated by Wei 2013/02/22
# No need to include the gov2 150 human judge queries, but ONLY consider the 100K efficiency task queries will be enough.
# note: (Old answers)Now, it is NOT only gov2 150 queries but also has the efficiency task queries as well
# inputQueryFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/final-150Gov2Queries.txt"

# option2
# note: Should contain all the queries in the 100K efficiency task query log
inputQueryFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/100KQueries_head_95K"
inputQueryHandler = open(inputQueryFileName,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermsONLY_head_95K"
outputFileHandler = open(outputFileName,"w")

for line in inputQueryHandler.readlines():
    elements = line.strip().split(":")
    queryID = int(elements[0])
    
    data = elements[1]
    data = data.lower()
    
    for i in range(0,len(data)):
        # print "data[i]:",ord(data[i])
        if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
            # Just replace them with a space.
            data = data[:i] + " " + data[i+1:]
    
    queryContent = data

    queryContentElements = queryContent.strip().split(" ")
    
    for element in queryContentElements:
        if element.strip() != "":
            if element.strip() not in queryTermDict:
                queryTermDict[element.strip()] = 1

print "----->","len(queryTermDict):",len(queryTermDict)

queryTermList = []
queryTermList = list(queryTermDict)
queryTermList.sort(cmp=None, key=None, reverse=False)
for queryTerm in queryTermList:
    outputFileHandler.write(queryTerm + "\n")
     
inputQueryHandler.close()
outputFileHandler.close()
'''


'''
termFreqDict = {}
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/freqOfTermsIn100KQueries_tail_95K"
inputFileHandler = open(inputFileName,"r")

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    queryTermFreqInStringFormat = lineElements[1]
    if queryTerm not in termFreqDict:
        termFreqDict[queryTerm] = queryTermFreqInStringFormat

print "len(termFreqDict):",len(termFreqDict)
inputFileHandler.close()

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/TrainingSet20130609"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/TrainingSet20130609_fixFreq"
outputFileHandler = open(outputFileName,"w")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    # length = 16
    # print "len(lineElements):",len(lineElements)
    currentQueryTerm = lineElements[4]
    
    newOutputLine = ""
    for i in range(0,11):
        newOutputLine += lineElements[i] + " "
    newOutputLine += termFreqDict[currentQueryTerm] + " "
    for i in range(12,16):
        newOutputLine += lineElements[i] + " "
    newOutputLine = newOutputLine.strip() + "\n"
    
    outputFileHandler.write(newOutputLine)
outputFileHandler.close()
inputFileHandler.close()
'''

'''
print "Program Begins..."
# 1st level
# key: trecID
# value: another dict
    # 2ed level
    # key: term
    # value: posting_rank_in_doc
docDictWithTermDict = {}

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/gov2DocumentWithTheirForwardIndex_for_debug_ONLY"
inputFileHandler = open(inputFileName,"r")
# ignore the headerLine
# headerLine = inputFileHandler.readline()
# print "headerLine:",headerLine

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    externalTrecID = lineElements[0]
    docWords = int( lineElements[1] )
    docPostingRecorded = int( lineElements[2] )
    currentDocPostingsList = []
    
    if externalTrecID not in docDictWithTermDict:
        docDictWithTermDict[externalTrecID] = {}
    else:
        print "mark1"
        exit(1)

    # checking mechanism
    if docPostingRecorded == len(lineElements[3:]):
        pass
    else:
        print "docPostingRecorded:",docPostingRecorded,type(docPostingRecorded)
        print "len(lineElements[2:]):",len(lineElements[3:]),type( len(lineElements[3:]) )
        exit(1)
    
    # loading mechanism
    for posingInfoTupleInStringFormat in lineElements[3:]:
        # print "posingInfoTupleInStringFormat:",posingInfoTupleInStringFormat,type(posingInfoTupleInStringFormat)
        tupleElements = posingInfoTupleInStringFormat.split("(")[1].split(")")[0].split(",")
        term = tupleElements[0]
        partialBM25InFloatFormat = float(tupleElements[1])
        currentDocPostingsList.append( (term,partialBM25InFloatFormat) )
            
    currentDocPostingsList.sort(cmp=None, key=itemgetter(1), reverse=True)
    
    
    # checking mechanism
    # if externalTrecID == "GX000-01-8658041":
    #    print "currentDocPostingsList:",currentDocPostingsList
    
    
    for index,postingTuple in enumerate(currentDocPostingsList):
        (term,_) = postingTuple
        docDictWithTermDict[externalTrecID][term] = index + 1
    
    
    # checking mechanism
    # if externalTrecID == "GX000-01-8658041":
    #    print "docDictWithTermDict[externalTrecID]:",docDictWithTermDict[externalTrecID]
    
    
    
    # check point
    # print "len(docDictWithTermDict[",externalTrecID,"]):",len(docDictWithTermDict[externalTrecID])
    # if len(docDictWithTermDict) % 10000 == 0:
    #    print "len(docDictWithTermDict):",len(docDictWithTermDict)
    #    break
    

print "len(docDictWithTermDict):",len(docDictWithTermDict)
print "docDictWithTermDict['GX235-40-13384592']['dssr']:",docDictWithTermDict['GX235-40-13384592']['dssr']
print "docDictWithTermDict['GX235-40-13384592']['pdf']:",docDictWithTermDict['GX235-40-13384592']['pdf']
inputFileHandler.close()
print "Program Ends."
'''

'''
docsDict = {}
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/that_kind_of_missing_posting_documents_for_1_term_query_sortedByTrecID"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    trecID = line.strip()
    if trecID not in docsDict:
        docsDict[trecID] = 1
    else:
        print "mark1"
        exit(1)

print "len(docsDict):",len(docsDict)

inputFileHandler.close()

inputFileName = "/data3/obukai/workspace/web-search-engine-wei/gov2DocumentWithTheirForwardIndex_Combine_final_20130612_sortedByExternalTrecID"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/data3/obukai/workspace/web-search-engine-wei/gov2DocumentWithTheirForwardIndex_Combine_final_20130612_without_postings_missing_documents"
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler.readlines():
    trecID = line.strip().split(" ")[0]
    if trecID not in docsDict:
        outputFileHandler.write(line)
    else:
        pass

inputFileHandler.close()
outputFileHandler.close()
'''


'''
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/document_set_needed_to_be_REPARSED_again_for_the_1_term_query_problem"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/data3/obukai/workspace/web-search-engine-wei/that_kind_of_missing_posting_documents_for_1_term_query"
outputFileHanlder = open(outputFileName,"w")

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    externalTrecID = lineElements[2]
    outputFileHanlder.write(externalTrecID + "\n")

inputFileHandler.close()
outputFileHanlder.close()
'''

'''
qIDsDict = {}
queryTermsDict = {}
externalTrecIDsDict = {}


inputFileName = "/data3/obukai/workspace/web-search-engine-wei/document_set_needed_to_be_REPARSED_again_for_the_1_term_query_problem"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    qID = lineElements[0]
    queryTerm = lineElements[1]
    externalTrecID = lineElements[2]
    
    if qID not in qIDsDict:
        qIDsDict[qID] = 1
    else:
        pass
    
    if queryTerm not in queryTermsDict:
        queryTermsDict[queryTerm] = 1
    else:
        pass
    
    if externalTrecID not in externalTrecIDsDict:
        externalTrecIDsDict[externalTrecID] = 1
    else:
        pass

print "len(qIDsDict):",len(qIDsDict)
print "qIDsDict:",qIDsDict
print "len(queryTermsDict):",len(queryTermsDict)
print "queryTermsDict:",queryTermsDict
print "len(externalTrecIDsDict):",len(externalTrecIDsDict)
inputFileHandler.close()
'''

'''
# testing: GX000-32-3662810
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/gov2DocumentWithTheirForwardIndex_Combine_final_20130612_sortedByExternalTrecID"
inputFileHandler1 = open(inputFileName,"r")
for line in inputFileHandler1.readlines():
    lineElements = line.strip().split(" ")
    externalTrecID = lineElements[0]
    if externalTrecID == "GX000-32-3662810":
        print line
        exit(1)

inputFileHandler1.close()
'''

'''
# checking logic for make sure that all the trecIDs has been sorted.
# Sorted
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/gov2DocumentWithTheirForwardIndex_Combine_final_20130613_sortedByTrecID"
# Not sorted
# inputFileName = "/data3/obukai/workspace/web-search-engine-wei/gov2DocumentWithTheirForwardIndex_Combine_final_20130612"
inputFileHandler = open(inputFileName,"r")
previousTrecID = "GX000-00-0000000"

for index,line in enumerate( inputFileHandler.readlines() ):
    currentTrecID = line.strip().split(" ")[0]
    if currentTrecID >= previousTrecID:
        # that is normal
        previousTrecID = currentTrecID
    else:
        print "NOT good"
        print "index:",index

print "Passed"
inputFileHandler.close()
'''

'''
outputLineDict = {}

inputFileName = "/data3/obukai/workspace/web-search-engine-wei/that_kind_of_missing_posting_documents_for_1_term_query_sortedByTrecID"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/gov2_files_with_CAT_command_for_the_missing_posting_documents_for_1_term_query"
outputFileHandler = open(outputFileName,"w")

outputFileHandler.write("19" + "\n")
for line in inputFileHandler.readlines():
    trecIDElements = line.strip().split("-")
    gov2FolderName = trecIDElements[0]
    gov2SegmentName = trecIDElements[1]
    outputLine = "/data/jhe/trecdata/" + gov2FolderName + "/" + gov2SegmentName + ".gz" + "\n"
    if outputLine not in outputLineDict:
        outputLineDict[outputLine] = 1
        outputFileHandler.write(outputLine)
    else:
        # already write to the file
        pass

print "len(outputLineDict):",len(outputLineDict)
inputFileHandler.close()
outputFileHandler.close()
'''

'''
print "Program Begins..."
trecIDsDictFromNeeded = {}
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_06_09.smallTest.Combine.train_tfqAdded_labelsAdded_rankInListAdded_sortedByExternalTrecID"
inputFileHandler = open(inputFileName,"r")

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    trecID = lineElements[2]
    if trecID not in trecIDsDictFromNeeded:
        trecIDsDictFromNeeded[trecID] = 1
    else:
        pass

print "len(trecIDsDictFromNeeded):",len(trecIDsDictFromNeeded)
# print "trecIDsDictFromNeeded:",trecIDsDictFromNeeded
inputFileHandler.close()

trecIDsDictFromActual = {}
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/gov2DocumentWithTheirForwardIndex_Combine_final_20130612_sortedByExternalTrecID"
inputFileHandler = open(inputFileName,"r")

# ignore the headline
headerLine = inputFileHandler.readline()

for line in inputFileHandler.readlines():
    if line.startswith("trecID"):
        print "Encounter the headline"
    else:
        lineElements = line.strip().split(" ")
        trecID = lineElements[0]
        if trecID not in trecIDsDictFromActual:
            trecIDsDictFromActual[trecID] = 1
        else:
            pass

print "len(trecIDsDictFromActual):",len(trecIDsDictFromActual)
# print "trecIDsDictFromActual:",trecIDsDictFromActual
inputFileHandler.close()


missingDocumentsCount = 0
for trecID in trecIDsDictFromNeeded:
    if trecID not in trecIDsDictFromActual:
        missingDocumentsCount += 1
        print "missing:",trecID
    else:
        # It is in the trecIDsDictFromActual, so that is OK
        pass

print "missingDocumentsCount:",missingDocumentsCount
print "Program Ends."
'''




'''
# This part of logic is to verify the file located and called: /data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermProbabilityDistribution_sortedByTerm
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermProbabilityDistribution_sortedByTerm"
inputFileHanlder = open(inputFileName,"r")
totalProbability = 0.0
for line in inputFileHanlder.readlines():
    lineElements = line.strip().split(" ")
    currentProbability = float(lineElements[1])
    totalProbability += currentProbability
print "totalProbability:",totalProbability
inputFileHanlder.close()
exit(1)
'''

'''
# UNDER CONSTRUCTION, NOT Finished and please do NOT USE.
# This part of the logic is to fix the ricardo thing
basePath = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/"
fileName1 = basePath + "CellROW1_0QueryTermsWithMetaInfo"
fileName2 = basePath + "CellROW2_0QueryTermsWithMetaInfo"
fileName3 = basePath + "CellROW3_0QueryTermsWithMetaInfo"
fileName4 = basePath + "CellROW4_0QueryTermsWithMetaInfo"
fileName5 = basePath + "CellROW5_0QueryTermsWithMetaInfo"

processingFileNameList = []
processingFileNameList.append(fileName1)
processingFileNameList.append(fileName2)
processingFileNameList.append(fileName3)
processingFileNameList.append(fileName4)
processingFileNameList.append(fileName5)

for fileName in processingFileNameList:
    inputFileHandler = open(fileName,"r")
    newOutputLine = ""
    for line in inputFileHandler.readlines():
        lineElements = line.strip().split(" ")
        
    inputFileHandler.close()
'''
    


# find those 36872359 terms with the key ROW1_0 in that cell.
# ROW1_0 [0,4]:36872359
# the length of the inverted list: [1,100)

# find those 688369 terms with the key ROW2_0 in that cell.
# ROW2_0 [5,9]:688369
# the length of the inverted list: [100,665)

# find those 87252 terms with the key ROW3_0 in that cell.
# ROW3_0 [10,25]:87252
# the length of the inverted list: [665,2473)

# find those 32085 terms with the key ROW4_0 in that cell.
# ROW4_0 [26,64]:32085
# the length of the inverted list: [2473,9964)

# find those 13296 terms with the key ROW5_0 in that cell.
# ROW5_0 [65,999]:13296
# the length of the inverted list: [9964,25205179]

'''
lowerUpperBoundTupleList = []
lowerUpperBoundTupleList.append( ("ROW1_0",1,100) )
lowerUpperBoundTupleList.append( ("ROW2_0",100,665) )
lowerUpperBoundTupleList.append( ("ROW3_0",665,2473) )
lowerUpperBoundTupleList.append( ("ROW4_0",2473,9964) )
lowerUpperBoundTupleList.append( ("ROW5_0",9964,25205179) )
print "len(lowerUpperBoundTupleList):",len(lowerUpperBoundTupleList)
'''

'''
# This part of logic is to output the query terms which are in the CellROW1_0, CellROW2_0, CellROW3_0, CellROW4_0, CellROW5_0
queryTermWithTheirRealFreqIn85KQueriesDict = {}
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsInQueries_head_85K_0_85%"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    queryTermFreq = int(lineElements[1])
    if queryTerm not in queryTermWithTheirRealFreqIn85KQueriesDict:
        queryTermWithTheirRealFreqIn85KQueriesDict[queryTerm] = queryTermFreq
    else:
        print "error,Mark3"
        exit(1)
print "len(queryTermWithTheirRealFreqIn85KQueriesDict):",len(queryTermWithTheirRealFreqIn85KQueriesDict)
inputFileHandler.close()

outputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/CellROW1_0QueryTermsWithMetaInfo"
outputFileHandler1 = open(outputFileName1,"w")

outputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/CellROW2_0QueryTermsWithMetaInfo"
outputFileHandler2 = open(outputFileName2,"w")

outputFileName3 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/CellROW3_0QueryTermsWithMetaInfo"
outputFileHandler3 = open(outputFileName3,"w")

outputFileName4 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/CellROW4_0QueryTermsWithMetaInfo"
outputFileHandler4 = open(outputFileName4,"w")



# add the file called: 
numOfCurrentQueryTermsInCellROW1_0 = 0
numOfCurrentQueryTermsInCellROW2_0 = 0
numOfCurrentQueryTermsInCellROW3_0 = 0
numOfCurrentQueryTermsInCellROW4_0 = 0
THE_CELL_ROW1_0_PROBABILITY = 2.7962236405996843e-10
THE_CELL_ROW2_0_PROBABILITY = 1.6040397629079132e-08
THE_CELL_ROW3_0_PROBABILITY = 1.1538218688794478e-07
THE_CELL_ROW4_0_PROBABILITY = 3.2578867212223025e-07
VERY_BIG_NUMBER = 99999999

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection.txt"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    queryTermListLength = int( lineElements[1] )
    
    #lowerUpperBoundTupleList.append( ("ROW1_0",1,100) )
    #lowerUpperBoundTupleList.append( ("ROW2_0",100,665) )
    #lowerUpperBoundTupleList.append( ("ROW3_0",665,2473) )
    #lowerUpperBoundTupleList.append( ("ROW4_0",2473,9964) )
    
    # for ROW1_0
    if queryTermListLength >= 1 and queryTermListLength < 100 and queryTerm not in queryTermWithTheirRealFreqIn85KQueriesDict:
        numOfCurrentQueryTermsInCellROW1_0 += 1
        print "queryTerm:",queryTerm,"queryTermListLength:",queryTermListLength
        # For these terms, they do NOT have RealFreqIn85KQueriesDict
        # I can ONLY assign the VERY_BIG_NUMBER to the ricardoRatio
        # ricardoRatio = queryTermListLength / queryTermWithTheirRealFreqIn85KQueriesDict[queryTerm]
        # current version
        ricardoRatio = VERY_BIG_NUMBER
        outputFileHandler1.write(queryTerm + " " + str( 0 ) + " " + str(queryTermListLength) + " " + str(THE_CELL_ROW1_0_PROBABILITY) + " " + str(ricardoRatio) + "\n")
        # break
    
    # for ROW2_0
    if queryTermListLength >= 100 and queryTermListLength < 665 and queryTerm not in queryTermWithTheirRealFreqIn85KQueriesDict:
        numOfCurrentQueryTermsInCellROW2_0 += 1
        print "queryTerm:",queryTerm,"queryTermListLength:",queryTermListLength
        # For these terms, they do NOT have RealFreqIn85KQueriesDict
        # I can ONLY assign the VERY_BIG_NUMBER to the ricardoRatio
        # ricardoRatio = queryTermListLength / queryTermWithTheirRealFreqIn85KQueriesDict[queryTerm]
        # current version
        ricardoRatio = VERY_BIG_NUMBER
        outputFileHandler2.write(queryTerm + " " + str( 0 ) + " " + str(queryTermListLength) + " " + str(THE_CELL_ROW2_0_PROBABILITY) + " " + str(ricardoRatio) + "\n")
        # break
        
    # for ROW3_0
    if queryTermListLength >= 665 and queryTermListLength < 2473 and queryTerm not in queryTermWithTheirRealFreqIn85KQueriesDict:
        numOfCurrentQueryTermsInCellROW3_0 += 1
        print "queryTerm:",queryTerm,"queryTermListLength:",queryTermListLength
        # For these terms, they do NOT have RealFreqIn85KQueriesDict
        # I can ONLY assign the VERY_BIG_NUMBER to the ricardoRatio
        # ricardoRatio = queryTermListLength / queryTermWithTheirRealFreqIn85KQueriesDict[queryTerm]
        # current version
        ricardoRatio = VERY_BIG_NUMBER
        outputFileHandler3.write(queryTerm + " " + str( 0 ) + " " + str(queryTermListLength) + " " + str(THE_CELL_ROW3_0_PROBABILITY) + " " + str(ricardoRatio) + "\n")
        # break
    
    # for ROW4_0
    if queryTermListLength >= 2473 and queryTermListLength < 9964 and queryTerm not in queryTermWithTheirRealFreqIn85KQueriesDict:
        numOfCurrentQueryTermsInCellROW4_0 += 1
        print "queryTerm:",queryTerm,"queryTermListLength:",queryTermListLength
        # For these terms, they do NOT have RealFreqIn85KQueriesDict
        # I can ONLY assign the VERY_BIG_NUMBER to the ricardoRatio
        # ricardoRatio = queryTermListLength / queryTermWithTheirRealFreqIn85KQueriesDict[queryTerm]
        # current version
        ricardoRatio = VERY_BIG_NUMBER
        outputFileHandler4.write(queryTerm + " " + str( 0 ) + " " + str(queryTermListLength) + " " + str(THE_CELL_ROW4_0_PROBABILITY) + " " + str(ricardoRatio) + "\n")
        # break 

print "numOfCurrentQueryTermsInCellROW4_0:",numOfCurrentQueryTermsInCellROW4_0
print "numOfCurrentQueryTermsInCellROW3_0:",numOfCurrentQueryTermsInCellROW3_0
print "numOfCurrentQueryTermsInCellROW2_0:",numOfCurrentQueryTermsInCellROW2_0
print "numOfCurrentQueryTermsInCellROW1_0:",numOfCurrentQueryTermsInCellROW1_0

outputFileHandler1.close()
outputFileHandler2.close()
outputFileHandler3.close()
outputFileHandler4.close()    
inputFileHandler.close()
'''

'''
# This part of logic:
# compute the num Of Query Terms Out Of Lexicon Beside 85K Queries
numOfQueryTermsOutOfLexiconBeside85KQueries = 0

queryTermNOTExistedInLexiconDict = {}
inputAuxFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermsWhichNOTExistedInTheCollection.txt"
inputAuxFileHandler = open(inputAuxFileName,"r")
for line in inputAuxFileHandler.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[-1]
    if queryTerm not in queryTermNOTExistedInLexiconDict:
        queryTermNOTExistedInLexiconDict[queryTerm] = 1

print "len(queryTermNOTExistedInLexiconDict):",len(queryTermNOTExistedInLexiconDict)
inputAuxFileHandler.close()

# check the number 63(out of lexicon terms in the last 15% Queries)
basePath = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/"
file1Path = basePath + "100KQueries_1_10%"
file2Path = basePath + "100KQueries_2_4%"
file3Path = basePath + "100KQueries_3_1%"
fileNameList = []
fileNameList.append(file1Path)
fileNameList.append(file2Path)
fileNameList.append(file3Path)
for fileName in fileNameList:
    fileHandler = open(fileName,"r")
    for line in fileHandler.readlines():
        
        queryID = line.strip().split(":")[0]
        queryTermList = line.strip().split(":")[1].strip().split(" ")
        # print "queryTermList:",queryTermList
        
        data = ""
        for element in queryTermList:
            data += element + " "
        
        # print "data(old):",data
        # print "original data:",data
        
        for i in range(0,len(data)):
            # print "data[i]:",ord(data[i])
            if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
                # Just replace them with a space.
                data = data[:i] + " " + data[i+1:]
    
        # print "data(new):",data
        
        currentNewQueryTermList = data.strip().split(" ")
        currentNewQueryTermDict = {}
        
        for queryTerm in currentNewQueryTermList:
            if queryTerm.strip() != "":
                queryTermLower = queryTerm.lower()
                if queryTermLower not in currentNewQueryTermDict:
                    currentNewQueryTermDict[queryTermLower] = 1
    
        for queryTerm in currentNewQueryTermDict:
            # do sth.
            if queryTerm in queryTermNOTExistedInLexiconDict:
                numOfQueryTermsOutOfLexiconBeside85KQueries += 1

    fileHandler.close()

print "numOfQueryTermsOutOfLexiconBeside85KQueries:",numOfQueryTermsOutOfLexiconBeside85KQueries
'''

'''
# This part of logic is to compute the following:
# num Of Queries Can NOT Be Answered :)
queryTermWhichNOTExistedInLexiconDict = {}
inputAuxFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermsWhichNOTExistedInTheCollection.txt"
inputAuxFileHandler = open(inputAuxFileName,"r")

for line in inputAuxFileHandler.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[-1]
    if queryTerm not in queryTermWhichNOTExistedInLexiconDict:
        queryTermWhichNOTExistedInLexiconDict[queryTerm] = 1

print "len(queryTermWhichNOTExistedInLexiconDict):",len(queryTermWhichNOTExistedInLexiconDict)
# print "queryTermWhichNOTExistedInLexiconDict:",queryTermWhichNOTExistedInLexiconDict
inputAuxFileHandler.close()



inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/100KQueries"
inputFileHandler = open(inputFileName,"r")
numOfQueriesCanNOTBeAnswered = 0
queriesCanNOTBeAnsweredList = []
for line in inputFileHandler.readlines():
    # print "line:",line.strip()
    queryID = line.strip().split(":")[0]
    queryTermList = line.strip().split(":")[1].strip().split(" ")
    # print "queryTermList:",queryTermList
    
    data = ""
    for element in queryTermList:
        data += element + " "
    
    # print "data(old):",data
    # print "original data:",data
    
    for i in range(0,len(data)):
        # print "data[i]:",ord(data[i])
        if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
            # Just replace them with a space.
            data = data[:i] + " " + data[i+1:]

    # print "data(new):",data
    
    currentNewQueryTermList = data.strip().split(" ")
    currentNewQueryTermDict = {}
    
    for queryTerm in currentNewQueryTermList:
        if queryTerm.strip() != "":
            queryTermLower = queryTerm.lower()
            if queryTermLower not in currentNewQueryTermDict:
                currentNewQueryTermDict[queryTermLower] = 1

    for queryTerm in currentNewQueryTermDict:
        # whether it is in the dict or not
        if queryTerm in queryTermWhichNOTExistedInLexiconDict:
            numOfQueriesCanNOTBeAnswered += 1
            queriesCanNOTBeAnsweredList.append( line.strip() )
            break

print "numOfQueriesCanNOTBeAnswered:",numOfQueriesCanNOTBeAnswered
# print "queriesCanNOTBeAnsweredList:",queriesCanNOTBeAnsweredList
inputFileHandler.close()
'''


'''
# This part of logic is to produce the cellProbabilityTupleList
cellProbabilityTupleList = []
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/probabilityWeWantFinally20130418"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    if not line.strip().startswith("SUM"):
        lineElements = line.strip().split(" ")
        cellKey = lineElements[0][3:]
        probability = float(lineElements[1])
        cellKeyProbabilityTuple = (cellKey,probability)
        cellProbabilityTupleList.append(cellKeyProbabilityTuple)
    else:
        # cause line starts with "SUM", so just ignore that line
        pass



cellProbabilityTupleList.sort(cmp=None, key=itemgetter(1), reverse=False)
for tuple in cellProbabilityTupleList:
    print tuple

print "len(cellProbabilityTupleList):",len(cellProbabilityTupleList)
inputFileHandler.close()
'''




'''
# This part of logic is to count:
# the total number of postings
# the total number of terms in the lexicon
print "program begins..."
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection.txt"
inputFileHandler = open(inputFileName,"r")
dataLine = inputFileHandler.readline()
totalNumPostings = 0
originalLineCounter = 0
while dataLine:
    totalNumPostings += int( dataLine.strip().split(" ")[1] )
    originalLineCounter += 1
    dataLine = inputFileHandler.readline()
    # if originalLineCounter == 2:
    #    break

print "totalNumPostings:",totalNumPostings
print "originalLineCounter:",originalLineCounter

inputFileHandler.close()
print "program ends."
'''

'''
# This temp logic is to combine 2D and 1D probability together
inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom100KWithTheirTrueProbablityAndOurOwnModelPredictedProbablity1D"
inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom100KWithTheirTrueProbablityAndOurOwnModelPredictedProbablity2D"
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom100KWithTheirTrueProbablityAndOurOwnModelPredictedProbablity1D_2D"

inputFileHanlder1 = open(inputFileName1,"r")
inputFileHanlder2 = open(inputFileName2,"r")
outputFileHandler = open(outputFileName,"w")

lineFromFile1 = inputFileHanlder1.readline()
lineFromFile2 = inputFileHanlder2.readline()

outputFileHandler.write("queryTerm goldStandardRealProbability 1D 2D" + "\n")


while lineFromFile1:
    lineElementsFromFile1 = lineFromFile1.strip().split(" ")
    lineElementsFromFile2 = lineFromFile2.strip().split(" ")
    if lineElementsFromFile1[0] == lineElementsFromFile2[0]:
        outputFileHandler.write( lineFromFile1.strip() + " " + lineElementsFromFile2[2] + "\n")
    lineFromFile1 = inputFileHanlder1.readline()
    lineFromFile2 = inputFileHanlder2.readline()
inputFileHanlder1.close()
inputFileHanlder2.close()
outputFileHandler.close()
'''


'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsIn_100KQueries"
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsIn_100KQueries_withProbablityAdded"

inputFileHandler = open(inputFileName,"r")
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    goldStandardProbability = int( lineElements[1] ) / 413533
    outputFileHandler.write(line.strip() + " " + str(goldStandardProbability) + "\n")

inputFileHandler.close()    
outputFileHandler.close()
'''


'''
# easy check of the totalFreq
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsIn_100KQueries_3_1%"
inputFileHandler = open(inputFileName,"r")

totalFreq = 0
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    totalFreq += int( lineElements[1] )
print "totalFreq:",totalFreq

inputFileHandler.close()
'''

'''
# Unknown usage knowledge
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/freqOfFreqInQueries_head_85K_0_85%_good_turing_output"
inputFileHandler = open(inputFileName,"r")

# ignore the file headlines
inputFileHandler.readline()
inputFileHandler.readline()
inputFileHandler.readline()
inputFileHandler.readline()

currentTotalProbability = 0.0

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    num_of_occurences = int( lineElements[1] )
    probability = float( lineElements[3] )
    currentTotalProbability += probability * num_of_occurences

print "currentTotalProbability:",currentTotalProbability
inputFileHandler.close()
exit(1)
'''

'''
# This part of logic is to compute # of UNIQUE TERMS SEEN:
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/freqOfFreqInQueries_head_85K_0_85%_sortedByFreqR"
inputFileHandler = open(inputFileName,"r")
totalFreq = 0

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    currentFreq = int( lineElements[1] )
    totalFreq += currentFreq

print "totalFreq:",totalFreq
inputFileHandler.close()
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom4KWithTheLatestProbabilitySettings1DProbabilityAdded"
inputFileHandler = open(inputFileName,"r")
inputFileHandler.readline()
lastColumnProbabilityDict = {}
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    # probability = float( lineElements[-1] )
    probability = float( lineElements[2] )
    if probability not in lastColumnProbabilityDict:
        lastColumnProbabilityDict[probability] = 1
    else:
        lastColumnProbabilityDict[probability] += 1

print "len(lastColumnProbabilityDict):",len(lastColumnProbabilityDict)
# print "lastColumnProbabilityDict:",lastColumnProbabilityDict

# print "lastColumnProbabilityDict['1.3870025585078873e-09']:",lastColumnProbabilityDict['1.3870025585078873e-09']
# print "lastColumnProbabilityDict['1.32367943426e-06']:",lastColumnProbabilityDict['1.32367943426e-06']
# print "lastColumnProbabilityDict['4.13057762416e-06']:",lastColumnProbabilityDict['4.13057762416e-06']
# print "lastColumnProbabilityDict['6.56625976833e-06']:",lastColumnProbabilityDict['6.56625976833e-06']
# print "..."
# print "lastColumnProbabilityDict['4.70009360308e-05']:",lastColumnProbabilityDict['4.70009360308e-05']

inputFileHandler.close()
exit(1)
'''


'''
# The part of logic is for checking correctness of the final probability
z1 = 4.89033137093e-06
print 1 - math.pow((1 - z1),41221),0.182565789474
exit(1)
'''

'''
DIVIDING_FACTOR(add one) is: 0.0072427900025
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom4KWithTheirTrueProbablityAndPredictedProbablity_withNormalizedPredictedProbabilityAdded_withAddONEUnnormalizedProbabilityAdded"
inputFileHanlder = open(inputFileName,"r")
inputFileHanlder.readline()
currentTotalProbablity = 0.0
for line in inputFileHanlder.readlines():
    lineElements = line.strip().split(" ")
    currentProbability = float(lineElements[4])
    currentTotalProbablity += currentProbability
print "currentTotalProbablity:",currentTotalProbablity
inputFileHanlder.close()
'''

'''
# This part of logic is to check the correctness of the normalized probability for the KL measure.
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom4KWithTheirTrueProbablityAndPredictedProbablity_withNormalizedPredictedProbabilityAdded_withAddONEUnnormalizedProbabilityAdded_withNormalizedPredictedProbabilityAdded"
inputFileHanlder = open(inputFileName,"r")
# ignore the head line
inputFileHanlder.readline()
currentTotalProbability = 0.0
for line in inputFileHanlder.readlines():
    lineElements = line.strip().split(" ")
    normalizedProbability = float(lineElements[5])
    currentTotalProbability += normalizedProbability

print "currentTotalProbability:",currentTotalProbability
inputFileHanlder.close()
'''

'''
# DIVIDING_FACTOR = 0.749559731735
DIVIDING_FACTOR = 0.0072427900025
# This part of logic is to normalize the estimated probability in order to fit into the KL measure.
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom4KWithTheirTrueProbablityAndPredictedProbablity_withNormalizedPredictedProbabilityAdded_withAddONEUnnormalizedProbabilityAdded_withNormalizedPredictedProbabilityAdded"
outputFileHanlder = open(outputFileName,"w")

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom4KWithTheirTrueProbablityAndPredictedProbablity_withNormalizedPredictedProbabilityAdded_withAddONEUnnormalizedProbabilityAdded"
inputFileHanlder = open(inputFileName,"r")
oldHeadLine = inputFileHanlder.readline().strip()
newHeadLine = oldHeadLine + " " + "addOneNormalizedProbablity" + "\n"
outputFileHanlder.write(newHeadLine)
for line in inputFileHanlder.readlines():
    lineElements = line.strip().split(" ")
    unNormalizedProbability = float(lineElements[4])
    newNormalizedProbability = unNormalizedProbability / DIVIDING_FACTOR
    outputFileHanlder.write(line.strip() + " " + str(newNormalizedProbability) + "\n")

inputFileHanlder.close()
outputFileHanlder.close()
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/100KQueries_1_10%_with_query_terms"
inputFileHandler = open(inputFileName,"r")
cellKeyWithQueryTermListDict = {}
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    cellKey = lineElements[0]
    numOfQueryTerms = int( lineElements[1] )
    if cellKey not in cellKeyWithQueryTermListDict:
        cellKeyWithQueryTermListDict[cellKey] = []
        for i in range(0+2,0+2+numOfQueryTerms):
             cellKeyWithQueryTermListDict[cellKey].append( lineElements[i] )
    else:
        print "Unexpected Behaviour."
        exit(1)
inputFileHandler.close()
print "len(cellKeyWithQueryTermListDict):",len(cellKeyWithQueryTermListDict)
# print "cellKeyWithQueryTermListDict:",cellKeyWithQueryTermListDict

list1 = []
for i in range(0,5):
    cellKey = "0" + "_" + str(i)
    list1 += cellKeyWithQueryTermListDict[cellKey]

list2 = []
for i in range(5,10):
    cellKey = "0" + "_" + str(i)
    list2 += cellKeyWithQueryTermListDict[cellKey]
    
list3 = []
for i in range(10,26):
    cellKey = "0" + "_" + str(i)
    list3 += cellKeyWithQueryTermListDict[cellKey]

list4 = []
for i in range(26,65):
    cellKey = "0" + "_" + str(i)
    list4 += cellKeyWithQueryTermListDict[cellKey]

list5 = []
for i in range(65,1000):
    cellKey = "0" + "_" + str(i)
    list5 += cellKeyWithQueryTermListDict[cellKey]

set1Original = set(list1)
set2Original = set(list2)
set3Original = set(list3)
set4Original = set(list4)
set5Original = set(list5)

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/fixingTheMisCountingProblemInfo20130420"
inputFileHandler = open(inputFileName,"r")

listsDict = {}
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    key = lineElements[0]
    number = int(lineElements[1])
    if key not in listsDict:
        listsDict[key] = []
        for i in range(2,2+number):
            listsDict[key].append(lineElements[i])

print "original:"
print "ROW1_0 [0,4]:",len(set1Original)
print "ROW2_0 [5,9]:",len(set2Original)
print "ROW3_0 [10,25]:",len(set3Original)
print "ROW4_0 [26,64]:",len(set4Original)
print "ROW5_0 [65,999]:",len(set5Original)

set1Exclude = set(listsDict["ROW1_0"])
set2Exclude = set(listsDict["ROW2_0"])
set3Exclude = set(listsDict["ROW3_0"])
set4Exclude = set(listsDict["ROW4_0"])
set5Exclude = set(listsDict["ROW5_0"])

print "exclude:"
print "ROW1_0 [0,4]:",len(set1Exclude)
print "ROW2_0 [5,9]:",len(set2Exclude)
print "ROW3_0 [10,25]:",len(set3Exclude)
print "ROW4_0 [26,64]:",len(set4Exclude)
print "ROW5_0 [65,999]:",len(set5Exclude)

print "rest:"
print "ROW1_0 [0,4]:",len(set1Original) - len( set1Original.intersection(set1Exclude) )
print "ROW2_0 [5,9]:",len(set2Original) - len( set2Original.intersection(set2Exclude) )
print "ROW3_0 [10,25]:",len(set3Original) - len( set3Original.intersection(set3Exclude) )
print "ROW4_0 [26,64]:",len(set4Original) - len( set4Original.intersection(set4Exclude) )
print "ROW5_0 [65,999]:",len(set5Original) - len( set5Original.intersection(set5Exclude) )

print "listsDict['ROW3_0']:",listsDict["ROW3_0"]
print "set5Original:",set5Original
print "set5Exclude:",set5Exclude
inputFileHandler.close()
'''

'''
# Purpose: this part of logic is to compute the set of query terms which have been miscounted in the unseen columns :)
# The following are the results which have been mis-counted:
# len(queryTermWithFreqInCollectionDict): 38871
# numForCellKeyROW1_0: 0
# numForCellKeyROW2_0: 0
# numForCellKeyROW3_0: 3
# numForCellKeyROW4_0: 28
# numForCellKeyROW5_0: 2475

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/fixingTheMisCountingProblemInfo20130420"
outputFileHandler = open(outputFileName,"w")


queryTermWithFreqInCollectionDict = {}
inputAuxFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/100KQueryTermsWithTermFreqInCollection.txt"
inputAuxFileHandler = open(inputAuxFileName,"r")

for line in inputAuxFileHandler.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    freqInCollection = int( lineElements[1] )
    if queryTerm not in queryTermWithFreqInCollectionDict:
        queryTermWithFreqInCollectionDict[queryTerm] = freqInCollection

print "len(queryTermWithFreqInCollectionDict):",len(queryTermWithFreqInCollectionDict)
inputAuxFileHandler.close()

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsInQueries_head_85K_0_85%_withSomeProbabilityAdded_sortedByRealProbability"
inputFileHandler = open(inputFileName,"r")
totalNumOfQueryTermsProcessed = 0

numForCellKeyROW1_0 = 0
numForCellKeyROW2_0 = 0
numForCellKeyROW3_0 = 0
numForCellKeyROW4_0 = 0
numForCellKeyROW5_0 = 0

queryTermListForCellKeyROW1_0 = []
queryTermListForCellKeyROW2_0 = []
queryTermListForCellKeyROW3_0 = []
queryTermListForCellKeyROW4_0 = []
queryTermListForCellKeyROW5_0 = []


for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    freq = int( lineElements[1] ) 
    if freq >= 20:
       totalNumOfQueryTermsProcessed += 1
       # ROW1 [0,4] [1,100)
       # ROW2 [5,9] [100,665)
       # ROW3 [10,25] [665,2473)
       # ROW4 [26,64] [2473,9964)
       # ROW5 [65,999] [9964,25205179)
       if queryTermWithFreqInCollectionDict[queryTerm] >= 1 and queryTermWithFreqInCollectionDict[queryTerm] < 100:
           numForCellKeyROW1_0 += 1
           queryTermListForCellKeyROW1_0.append(queryTerm)
       elif queryTermWithFreqInCollectionDict[queryTerm] >= 100 and queryTermWithFreqInCollectionDict[queryTerm] < 665:
           numForCellKeyROW2_0 += 1
           queryTermListForCellKeyROW2_0.append(queryTerm)
       elif queryTermWithFreqInCollectionDict[queryTerm] >= 665 and queryTermWithFreqInCollectionDict[queryTerm] < 2473:
           numForCellKeyROW3_0 += 1
           queryTermListForCellKeyROW3_0.append(queryTerm)
       elif queryTermWithFreqInCollectionDict[queryTerm] >= 2473 and queryTermWithFreqInCollectionDict[queryTerm] < 9964:
           numForCellKeyROW4_0 += 1
           queryTermListForCellKeyROW4_0.append(queryTerm)
       elif queryTermWithFreqInCollectionDict[queryTerm] >= 9964 and queryTermWithFreqInCollectionDict[queryTerm] <= 25205179:
           numForCellKeyROW5_0 += 1
           queryTermListForCellKeyROW5_0.append(queryTerm)

print "numForCellKeyROW1_0:",numForCellKeyROW1_0
print "numForCellKeyROW2_0:",numForCellKeyROW2_0
print "numForCellKeyROW3_0:",numForCellKeyROW3_0
print "numForCellKeyROW4_0:",numForCellKeyROW4_0
print "numForCellKeyROW5_0:",numForCellKeyROW5_0


# print "queryTermListForCellKeyROW1_0:",queryTermListForCellKeyROW1_0
# print "queryTermListForCellKeyROW2_0:",queryTermListForCellKeyROW2_0
# print "queryTermListForCellKeyROW3_0:",queryTermListForCellKeyROW3_0
# print "queryTermListForCellKeyROW4_0:",queryTermListForCellKeyROW4_0
# print "queryTermListForCellKeyROW5_0:",queryTermListForCellKeyROW5_0


tempList = []
tempList.append(queryTermListForCellKeyROW1_0)
tempList.append(queryTermListForCellKeyROW2_0)
tempList.append(queryTermListForCellKeyROW3_0)
tempList.append(queryTermListForCellKeyROW4_0)
tempList.append(queryTermListForCellKeyROW5_0)

for i in range(0,5):
    outputLine = "ROW" + str(i) + "_0" + " "
    outputLine += str( len( tempList[i] ) ) + " "
    for queryTerm in tempList[i]:
        outputLine += queryTerm + " "
    outputLine = outputLine.strip() + "\n"
    outputFileHandler.write(outputLine)

inputFileHandler.close()
outputFileHandler.close()
'''

'''
# TODO: need to subtract the part query terms which have been double counted.
# This part of logic is to compute the final probability mass for the whole predicted area
# This step0 is to add the missing but popular query term probability mass into consideration.
print "step0"
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsInQueries_head_85K_0_85%_withSomeProbabilityAdded_sortedByRealProbability"
inputFileHandler = open(inputFileName,"r")
part2TotalProbabilityShouldBeAdded = 0.0
part2NumberOfQueryTerms = 0
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    freq = int( lineElements[1] )
    probability = float( lineElements[2] )
    if freq >= 20:
        part2TotalProbabilityShouldBeAdded += probability
        part2NumberOfQueryTerms += 1
print "part2TotalProbabilityShouldBeAdded:",part2TotalProbabilityShouldBeAdded

inputFileHandler.close()

print "step1"
cellKeyWithItsProbabilityDict = {}
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/probabilityWeWantFinally20130418"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    cellKey = lineElements[0]
    cellProbabilityValue = float(lineElements[1])
    if cellKey not in cellKeyWithItsProbabilityDict:
        cellKeyWithItsProbabilityDict[cellKey] = cellProbabilityValue
    else:
        pass
print "len(cellKeyWithItsProbabilityDict):",len(cellKeyWithItsProbabilityDict)
inputFileHandler.close()

print "step2"
classLabelList = ["ROW1","ROW2","ROW3","ROW4","ROW5"]
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/probabilityTableUsingProfIdea20130420_fixed"
inputFileHandler = open(inputFileName,"r")
dataLine = inputFileHandler.readline()
while not dataLine.strip().startswith("table:denominator:freqOfFreqForTheLexiconTerm"):
    dataLine = inputFileHandler.readline()
# print "mark1:"
# print dataLine
# print "mark2:"
inputFileHandler.readline()
inputFileHandler.readline()
inputFileHandler.readline()
inputFileHandler.readline()

cellCorrespondingRangesDict = {}
cellCorrespondingFreqDict = {}

for classLabel in classLabelList:
    rowLineData = inputFileHandler.readline().strip()
    rowLineDataElements = rowLineData.split(" ")
    for i in range(0,20):
        cellKey = classLabel + "_" + str(i)
        if cellKey not in cellCorrespondingRangesDict:
            cellCorrespondingRangesDict[cellKey] = rowLineDataElements[i+1].split(":")[0]
            cellCorrespondingFreqDict[cellKey] = int( rowLineDataElements[i+1].split(":")[1] )

print "example key:ROW1_6 value:[0,73]"
print "len(cellCorrespondingRangesDict):",len(cellCorrespondingRangesDict)
print 

print "example key:ROW1_6 value:0.38235294117599999,"
print "len(cellCorrespondingFreqDict):",len(cellCorrespondingFreqDict)
print 

# print "cellCorrespondingRangesDict:",cellCorrespondingRangesDict
# print "cellCorrespondingFreqDict:",cellCorrespondingFreqDict

inputFileHandler.close()

print "step3: the nervous time is coming(Passed)"
part1NumOfQueryTermsCounted = 0
originalCellsTotalProbability = 0.0
for classLabel in classLabelList:
    for i in range(0,20):
        cellKey = classLabel + "_" + str(i)
        currentCellTotalProbability = cellKeyWithItsProbabilityDict[cellKey] * cellCorrespondingFreqDict[cellKey]
        part1NumOfQueryTermsCounted += cellCorrespondingFreqDict[cellKey]
        
        # for debug ONLY section
        # print "cellKey:",cellKey
        # print "cellKeyWithItsProbabilityDict[cellKey]:",cellKeyWithItsProbabilityDict[cellKey]
        # print "cellCorrespondingFreqDict[cellKey]:",cellCorrespondingFreqDict[cellKey]
        # print "currentCellTotalProbability:",currentCellTotalProbability
        
        originalCellsTotalProbability += currentCellTotalProbability

print
print "part1CellsTotalProbability:",originalCellsTotalProbability
print "part2TotalProbabilityShouldBeAdded:",part2TotalProbabilityShouldBeAdded
print "part1CellsTotalProbability+part2TotalProbabilityShouldBeAdded:",originalCellsTotalProbability + part2TotalProbabilityShouldBeAdded
print "part1NumOfQueryTermsCounted:",part1NumOfQueryTermsCounted
print "part2NumOfQueryTermsCounted:",part2NumberOfQueryTerms
print "total number of query terms processed:",part1NumOfQueryTermsCounted + part2NumberOfQueryTerms
'''

'''
# Purpose: This part of logic is to compute total # of unique term positions for a specific set of queries.
# input option1
# inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsIn_100KQueries_2_4%_sortedByQueryTermFreq"

# input option2
# inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsIn_100KQueries_1_10%_sortedByQueryTermFreq"

# input option3
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsInQueries_head_85K_0_85%_sortedByQueryTermFreq"

inputFileHandler = open(inputFileName,"r")

totalFreq = 0
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    realFreq = int( lineElements[1] )
    totalFreq += realFreq
print "totalFreq:",totalFreq

inputFileHandler.close()
'''



'''
# Purpose: make the final probability into 2D dimension format
classLabelList = ["ROW1","ROW2","ROW3","ROW4","ROW5"]
queryTermWithProbabilityDict = {}
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/probabilityWeWantFinally20130418"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    cellkey = lineElements[0]
    cellkeyProbablity = float( lineElements[1] )
    if cellkey not in queryTermWithProbabilityDict:
        queryTermWithProbabilityDict[cellkey] = cellkeyProbablity
print "len(queryTermWithProbabilityDict):",len(queryTermWithProbabilityDict)
inputFileHandler.close()

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/probabilityWeWantFinally20130418_2D_table_format"
outputFileHandler = open(outputFileName,"w")
outputFileHandler.write("table:probability(final)" + "\n")
outputFileHandler.write("Freq 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19" + "\n")
outputFileHandler.write("********************" + "\n")
for classLabel in classLabelList:
    outputLine = classLabel + " "
    
    for i in range(0,20):
        cellKey = classLabel + "_" + str(i)
        outputLine += str( queryTermWithProbabilityDict[cellKey] ) + " "
        
    outputLine = outputLine.strip() + "\n"
    outputFileHandler.write(outputLine)

outputFileHandler.close()
'''

'''
# Purpose: This part of code logic is to easily compute the real probability for the query terms which greater or equal to 20
# I will NOT delete the following statement cause I have made it wrong.
# And I want to leave to remind me NOT to make such mistakes again.
# (This is the wrong statement) totalNumOfQueries = 10000
totalNumberOfQueryTermPositionFor85KQueries = 351734

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsInQueries_head_85K_0_85%_withSomeProbabilityAdded_sortedByQueryTermFreq"
outputFileHandler = open(outputFileName,"w")

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsInQueries_head_85K_0_85%_sortedByQueryTermFreq"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    freq = int( lineElements[1] )
    if freq >= 20:
        # The following is an wrong statement
        # (This is the wrong statement) newProbability = freq / totalNumOfQueries
        newProbability = freq / totalNumberOfQueryTermPositionFor85KQueries
        outputFileHandler.write(line.strip() + " " + str( newProbability ) + "\n")
    else:
        newProbability = -1.0
        outputFileHandler.write(line.strip() + " " + str( newProbability ) + "\n")

inputFileHandler.close()
outputFileHandler.close()
'''

'''
# Solve[(1 - z)^41221 == 0.85593220339, z]
print "Let's solve this problem here."
# set the init value to z = 0.5
# init value:
z = 0.5
powerNumber = 41221
trueProbability = 0.710526315789
previousValueOfZ = 0.0
previousPredictivedValue = 0.0
currentPredictivedValue = math.pow((1-z),powerNumber)
STEP_PARAMETER_for_LOOP1 = 2

# The most beautiful bound with the STEP_PARAMETER_for_LOOP2 set to be 1000000
STEP_PARAMETER_for_LOOP2 = 10000000

while currentPredictivedValue < trueProbability:
    # backup the old things
    previousValueOfZ = z
    previousPredictivedValue = currentPredictivedValue
    
    # update the new things
    z = z - z / STEP_PARAMETER_for_LOOP1
    currentPredictivedValue = math.pow((1-z),powerNumber)
    # print z,previousPredictivedValue,currentPredictivedValue,trueProbability

print "bounds produced for the round1:"
print previousValueOfZ,previousPredictivedValue,trueProbability
print z,currentPredictivedValue,trueProbability

print "**********"
# let's try the effects of make things more precise
while previousPredictivedValue < trueProbability:
    # backup the old things
    previousValueOfZ2 = previousValueOfZ
    previousPredictivedValue2 = previousPredictivedValue
    
    # update the new things
    previousValueOfZ = previousValueOfZ - previousValueOfZ / STEP_PARAMETER_for_LOOP2
    previousPredictivedValue = math.pow((1 - previousValueOfZ),powerNumber)
    # print previousValueOfZ,previousPredictivedValue2,previousPredictivedValue,trueProbability

print "bounds produced for the round2:"
print previousValueOfZ2,previousPredictivedValue2,trueProbability
print previousValueOfZ,previousPredictivedValue,trueProbability
print "**********"
'''

'''
# Updated by Wei 2013/04/18,THE PROBABILITY PROVIDED HERE IS NOT RIGHT and SHOULD BE 1 - probability
# This is OBSERVERED again by my dear prof, Torsten !
# This part of logic is to produce equations for Juan to compute.
classLabelList = ["ROW1","ROW2","ROW3","ROW4","ROW5","SUM"]
cellCorrespondingRangesDict = {}
cellCorrespondingProbabilityDict = {}

# This part of logic is to prepare some high order equations with one variable for Juan to compute
# step1: load the probability into the dict first.
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/probabilityTableUsingProfIdea20130413"
inputFileHandler = open(inputFileName,"r")
dataLine = inputFileHandler.readline()
while not dataLine.strip().startswith("table:probability"):
    dataLine = inputFileHandler.readline()
# print "mark1:"
# print dataLine
# print "mark2:"
inputFileHandler.readline()
inputFileHandler.readline()

for classLabel in classLabelList:
    rowLineData = inputFileHandler.readline().strip()
    rowLineDataElements = rowLineData.split(" ")
    for i in range(0,20):
        cellKey = classLabel + "_" + str(i)
        if cellKey not in cellCorrespondingRangesDict:
            cellCorrespondingRangesDict[cellKey] = rowLineDataElements[i+1].split(":")[0]
            cellCorrespondingProbabilityDict[cellKey] = float( rowLineDataElements[i+1].split(":")[1] )

print "example key:ROW1_6 value:[0,73]"
print "len(cellCorrespondingRangesDict):",len(cellCorrespondingRangesDict)
print 

print "example key:ROW1_6 value:0.38235294117599999"
print "len(cellCorrespondingProbabilityDict):",len(cellCorrespondingProbabilityDict)
print 
# print "cellCorrespondingRangesDict:",cellCorrespondingRangesDict

inputFileHandler.close()

# step2: produce the equations for Juan to compute
NUM_OF_QUERY_TERM_POSITIONS = 41221
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/equationsForJuanToSolve"
outputFileHandler = open(outputFileName,"w")
equationInWolframFormatPart1 = "Solve[(1 - z)^" + str(NUM_OF_QUERY_TERM_POSITIONS) + " == "
equationInWolframFormatPart2 = ", z]"
wholeCompleteEquationInWolframFormat = ""
for classLabel in classLabelList:
    for i in range(0,20):
        cellKey = classLabel + "_" + str(i)
        print cellKey
        outputFileHandler.write(cellKey + "\n")
        
        wholeCompleteEquationInWolframFormat = equationInWolframFormatPart1 + str(cellCorrespondingProbabilityDict[cellKey]) + equationInWolframFormatPart2
        print wholeCompleteEquationInWolframFormat
        outputFileHandler.write(wholeCompleteEquationInWolframFormat + "\n")
        
        print
        outputFileHandler.write("\n")
        
outputFileHandler.close()
'''

'''
# Note: the # of unique query terms among one query(prof's query term positions) are 
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsIn_100KQueries_1_10%"
inputFileHandler = open(inputFileName,"r")
totalFreq = 0
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    currentFreq = int(lineElements[1])
    totalFreq += currentFreq
print "totalFreq:",totalFreq
inputFileHandler.close()
'''

'''
# Note: the # of unique query terms among one query(prof's query term positions) are 16442
# This part of logic is to fix the real probability distribution for the 4K queries.
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/realFreqOfTermsIn_100KQueries_2_4%_sortedByQueryTerm_withProbablityAdded_OLD"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/realFreqOfTermsIn_100KQueries_2_4%_sortedByQueryTerm_withProbablityAdded"
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    currentFreq = int(lineElements[1])
    newProbability = currentFreq / 16442
    newOutputLine = lineElements[0] + " " + str(currentFreq) + " " + str(newProbability) + "\n"
    outputFileHandler.write(newOutputLine)

inputFileHandler.close()
outputFileHandler.close()
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom4KWithTheirTrueProbablityAndPredictedProbablity_probability_normalized"
inputFileHandler = open(inputFileName,"r")


totalRealProbability = 0
totalPredictedprobability = 0

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    realProbability = float( lineElements[1] )
    predictedProbability = float( lineElements[2] )
    
    totalRealProbability += realProbability
    totalPredictedprobability += predictedProbability

print "totalRealProbability:",totalRealProbability
print "totalPredictedprobability:",totalPredictedprobability

inputFileHandler.close()
'''

'''
# IMPORTANT INFO for the probability normalization:
dividing factor for Real Probability: 0.82125
dividing factor for Predicted Probability: 794.887275577
'''
'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom4KWithTheirTrueProbablityAndPredictedProbablity_probability_unnormalized"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/queryTermsFrom4KWithTheirTrueProbablityAndPredictedProbablity_probability_normalized"
outputFileHandler = open(outputFileName,"w")


totalRealProbability = 0
totalPredictedprobability = 0

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    realProbability = float( lineElements[1] )
    predictedProbability = float( lineElements[2] )
    
    new_realProbability = realProbability / 0.82125
    new_predictedProbability = predictedProbability / 794.887275577
    
    outputFileHandler.write(queryTerm + " " + str(new_realProbability) + " " + str(new_predictedProbability) + "\n")
    
    totalRealProbability += realProbability
    totalPredictedprobability += predictedProbability

print "totalRealProbability:",totalRealProbability
print "totalPredictedprobability:",totalPredictedprobability

inputFileHandler.close()
outputFileHandler.close()
'''

'''
TOTAL_NUM_OF_QUERIES = 4000

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/realFreqOfTermsIn_100KQueries_2_4%_sortedByQueryTerm"
outputFileName = inputFileName + "_withProbablityAdded"

inputFileHandler = open(inputFileName,"r")
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    freq = int(lineElements[1])
    probablity = freq / TOTAL_NUM_OF_QUERIES
    outputFileHandler.write(line.strip() + " " + str(probablity) + "\n")

inputFileHandler.close()
outputFileHandler.close()
'''

'''
# This part of logic is to verify the correctness of the output file called: rangesForEachCellWithEvenValueFor100KQueries_1_10%
freqMetaInfoDict = {}
inputGuideFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/rangesForEachCellWithEvenValueFor100KQueries_1_10%"
inputGuideFileHandler = open(inputGuideFileName,"r")
dataLine = inputGuideFileHandler.readline()
while dataLine:
    headInfoLine = dataLine
    headInfoLineElements = headInfoLine.strip().split(" ")
    freq = int(headInfoLineElements[0])
    totalNumOfQueryTerms = int(headInfoLineElements[1])
    # ignore the number of ranges: 5
    # averageGap = int( headInfoLineElements[3] )

    if freq not in freqMetaInfoDict:
        freqMetaInfoDict[freq] = []
            
    for i in range(0,5):
        # do 5 times
        dataInfoLine = inputGuideFileHandler.readline()
        dataInfoLineElements = dataInfoLine.strip().split(" ")
        (beginningRangeID,endingRangeID,totalFreqForThisBigRange) = (int(dataInfoLineElements[0]),int(dataInfoLineElements[1]),int(dataInfoLineElements[2]) )
        freqMetaInfoDict[freq].append( (beginningRangeID,endingRangeID,totalFreqForThisBigRange) )
    
    # ignore the empty line
    dataLine = inputGuideFileHandler.readline()
    # get the header line
    dataLine = inputGuideFileHandler.readline()
inputGuideFileHandler.close()

print "len(freqMetaInfoDict):",len(freqMetaInfoDict)
for i in range(0,20):
    print i,freqMetaInfoDict[i]

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/100KQueries_1_10%_without_query_terms_sortedByMyOwnWay"
inputFileHandler = open(inputFileName,"r")

smallRangeCellValue = -1
currentFreqAccumulateCounter = 0
smallRangeCellKeyFreq = -1
smallRangeCellKeyRangeID = -1
for i in range(0,20):
    for tuple in freqMetaInfoDict[i]:
        (beginningRangeID,endingRangeID,totalFreqForThisBigRange) = tuple
        
        dataLine = inputFileHandler.readline()
        dataLineElements = dataLine.strip().split(" ")
        smallRangeCellKey = dataLineElements[0]
        smallRangeCellValue = int(dataLineElements[1])
        smallRangeCellKeyElements = smallRangeCellKey.split("_")
        smallRangeCellKeyFreq = int(smallRangeCellKeyElements[0])
        smallRangeCellKeyRangeID = int(smallRangeCellKeyElements[1])
        
        while smallRangeCellKeyFreq == i and smallRangeCellKeyRangeID >= beginningRangeID and smallRangeCellKeyRangeID <= endingRangeID:
            # for debug ONLY
            # print "smallRangeCellValue:",smallRangeCellValue,"smallRangeCellKeyFreq:",smallRangeCellKeyFreq,"smallRangeCellKeyRangeID:",smallRangeCellKeyRangeID
            currentFreqAccumulateCounter += smallRangeCellValue
            dataLine = inputFileHandler.readline()
            if dataLine.strip() != "":
                dataLineElements = dataLine.strip().split(" ")
                smallRangeCellKey = dataLineElements[0]
                smallRangeCellValue = int(dataLineElements[1])
                smallRangeCellKeyElements = smallRangeCellKey.split("_")
                smallRangeCellKeyFreq = int(smallRangeCellKeyElements[0])
                smallRangeCellKeyRangeID = int(smallRangeCellKeyElements[1])
            else:
                if currentFreqAccumulateCounter == totalFreqForThisBigRange:
                    print "PASS","Value:",currentFreqAccumulateCounter
                    exit(1)
                else:
                    print "mark3,error"
                    exit(1)
                
                print "EOF"

        if currentFreqAccumulateCounter == totalFreqForThisBigRange:
            print "PASS","Value:",currentFreqAccumulateCounter
            # reset to be 0
            currentFreqAccumulateCounter = 0
            # add the missing smallRangeCellValue
            currentFreqAccumulateCounter += smallRangeCellValue
            # for debug ONLY
            # print "smallRangeCellValue:",smallRangeCellValue,"smallRangeCellKeyFreq:",smallRangeCellKeyFreq,"smallRangeCellKeyRangeID:",smallRangeCellKeyRangeID 
        else:
            print "currentFreqAccumulateCounter:",currentFreqAccumulateCounter
            print "totalFreqForThisBigRange:",totalFreqForThisBigRange
            print "Mark2,Unexpected Behaviour"
            exit(1)

inputFileHandler.close()
'''

'''
classFreqDict = {}
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/freqOfFreqInQueries_head_85K_0_85%_2D_without_query_terms_fix_zerop_freq_sortedByMyOwnWay"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    key = lineElements[0]
    value = int(lineElements[1])
    if key not in classFreqDict:
        classFreqDict[key] = value
    else:
        print "unexpected behaviour"
        exit(1)

print "len(classFreqDict):",len(classFreqDict)

# modified values
totalFreqExceptZero = 0

classLabelList = []
for i in range(0,1000):
    classLabelList.append( str(i) )
print "len(classLabelList):",len(classLabelList)

for classLabel in classLabelList:
    # horizontal computations
    totalFreqExceptZero = 0
    for i in range(0,20):
        key = str(i) + "_" + classLabel
        totalFreqExceptZero += classFreqDict[key]
    
    # for easy application
    print classLabel,totalFreqExceptZero
 
inputFileHandler.close()
'''

'''
# This part is just check the correctness of the file 1000SmallRangesWithNumOfTermsBelongingTo
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/1000SmallRangesWithNumOfTermsBelongingTo"
inputFileHandler = open(inputFileName,"r")
totalNumOfQueryTerms = 0

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    value = int( lineElements[3] )
    totalNumOfQueryTerms += value

print "totalNumOfQueryTerms:",totalNumOfQueryTerms
inputFileHandler.close()
'''

'''
# check the correctness of the small buckets for the two table files.
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/freqOfFreqInQueries_head_85K_0_85%_2D_without_query_terms_sortedByMyOwnWay"
inputFileHandler = open(inputFileName,"r")

print "forDenominatorTable"

currentPrefix = "N/A"
currentFreqTotalSum = 0

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    key = lineElements[0]
    value = int( lineElements[1] )
    if currentPrefix == key.split("_")[0]:
        currentFreqTotalSum += value
    else:
        print currentPrefix, currentFreqTotalSum
        currentPrefix = key.split("_")[0]
        currentFreqTotalSum = 0
        currentFreqTotalSum += value
print currentPrefix, currentFreqTotalSum
inputFileHandler.close()

# check the correctness of the small buckets for the two table files.
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/100KQueries_1_10%_without_query_terms_sortedByMyOwnWay"
inputFileHandler = open(inputFileName,"r")

print "forMolecularTable"

currentPrefix = "N/A"
currentFreqTotalSum = 0

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    key = lineElements[0]
    value = int( lineElements[1] )
    if currentPrefix == key.split("_")[0]:
        currentFreqTotalSum += value
    else:
        print currentPrefix, currentFreqTotalSum
        currentPrefix = key.split("_")[0]
        currentFreqTotalSum = 0
        currentFreqTotalSum += value
print currentPrefix, currentFreqTotalSum
inputFileHandler.close()
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/smallBucketsRangesForQueryTermOverGov2Dataset"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/smallBucketsRangesForQueryTermOverGov2Dataset_withUniqueIDAdded"
outputFileHandler = open(outputFileName,"w")

for index,line in enumerate( inputFileHandler.readlines() ):
    outputFileHandler.write( str(index) + " " + line.strip() + "\n")

inputFileHandler.close()
outputFileHandler.close()
'''

'''
# this part of logic is to compute NUM_OF_QUERY_TERMS_SEEN_WHICH_FREQ_LESS_THAN_20
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/freqOfFreqInQueries_head_85K_0_85%_2D_without_query_terms_sortedByMyOwnWay"
inputFileHandler = open(inputFileName,"r")
totalNumOfSeenWrods = 0
blackKeyList = ["0_VF","0_F","0_M","0_NF","0_VR"]
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    key = lineElements[0]
    value = int(lineElements[1])
    if key not in blackKeyList:
        totalNumOfSeenWrods += value
    else:
        print key,"NOT included."

print "totalNumOfSeenWrods:",totalNumOfSeenWrods
'''

'''
# ["VR","NF","M","F","VF"]
# [1 - 100)
# [100 - 5000)
# [5000 - 80,000)
# [80,000 - 600,000)
# [600,000 - ending)

UPPER_BOUND_FOR_RANGE1 = 100
UPPER_BOUND_FOR_RANGE2 = 5000
UPPER_BOUND_FOR_RANGE3 = 80000
UPPER_BOUND_FOR_RANGE4 = 600000

num_terms_in_VR_counter = 0
num_terms_in_NF_counter = 0
num_terms_in_M_counter = 0
num_terms_in_F_counter = 0
num_terms_in_VF_counter = 0



# the purpose of this program is to count how many terms belong to the ranges.
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection_TermFreqInQLAdded_ProbabilityInQLAdded_DesidingFactorAdded_sortedByListLength.txt"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    lengthOfListForLexiconTerm = int(lineElements[1])
    
    if lengthOfListForLexiconTerm >= 1 and lengthOfListForLexiconTerm < UPPER_BOUND_FOR_RANGE1:
        # it is very rare
        # example:
        # (freqOfFreqNr,modifiedFreqRStar,VFProbability,FProbability,MProbability,NFProbability,VRProbability)
        num_terms_in_VR_counter += 1

    elif lengthOfListForLexiconTerm >= UPPER_BOUND_FOR_RANGE1 and lengthOfListForLexiconTerm < UPPER_BOUND_FOR_RANGE2:
        # it is not frequent
        # example:
        # (freqOfFreqNr,modifiedFreqRStar,VFProbability,FProbability,MProbability,NFProbability,VRProbability)
        num_terms_in_NF_counter += 1

    elif lengthOfListForLexiconTerm >= UPPER_BOUND_FOR_RANGE2 and lengthOfListForLexiconTerm < UPPER_BOUND_FOR_RANGE3:
        # it is medium
        # example:
        # (freqOfFreqNr,modifiedFreqRStar,VFProbability,FProbability,MProbability,NFProbability,VRProbability)
        num_terms_in_M_counter += 1

    elif lengthOfListForLexiconTerm >= UPPER_BOUND_FOR_RANGE3 and lengthOfListForLexiconTerm < UPPER_BOUND_FOR_RANGE4:
        # it is frequent
        # example:
        # (freqOfFreqNr,modifiedFreqRStar,VFProbability,FProbability,MProbability,NFProbability,VRProbability)
        num_terms_in_F_counter += 1

    elif lengthOfListForLexiconTerm >= UPPER_BOUND_FOR_RANGE4:
        # it is very frequent
        # example:
        # (freqOfFreqNr,modifiedFreqRStar,VFProbability,FProbability,MProbability,NFProbability,VRProbability)
        num_terms_in_VF_counter += 1
    
inputFileHandler.close()

print "Begins..."
print "num of lexicons belonging to each range"
print "num_terms_in_VR_counter:",num_terms_in_VR_counter
print "num_terms_in_NF_counter:",num_terms_in_NF_counter
print "num_terms_in_M_counter:",num_terms_in_M_counter
print "num_terms_in_F_counter:",num_terms_in_F_counter
print "num_terms_in_VF_counter:",num_terms_in_VF_counter
print "Ends."
'''

'''
# DO NOT USE THIS PART OF LOGIC
# This part of logic has been moved to a seperate program called: gov2_Phase2_make_easy_looking_table_for_prof_to_check.py
# Also note that this part of logic will NOT longer be updated
# make the easy looking table for prof to check
# also with some numbers in the denominator smoothing(average the neighbours)
classLabelList = ["VF","F","M","NF","VR"]
denominatorDict = {}
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/freqOfFreqInQueries_head_85K_0_85%_2D_without_query_terms_sortedByMyOwnWay"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    key = lineElements[0]
    value = int(lineElements[1])
    if key not in denominatorDict:
        denominatorDict[key] = value
    else:
        print "mark1"
        exit(1)

print "len(denominatorDict):",len(denominatorDict)
# do a smoothing for the value of 0
# the smoothing is very simple, just average the two sides.
print "smoothing in..."
for classLabel in classLabelList:
    outputLine = classLabel + " "
    for i in range(0,20):
        key = str(i) + "_" + classLabel
        if denominatorDict[key] == 0:
            print "original:",key,denominatorDict[key]
            previousAuxSmoothingKey = str(i-1) + "_" + classLabel
            nextAuxSmoothingKey = str(i+1) + "_" + classLabel
            denominatorDict[key] = (denominatorDict[previousAuxSmoothingKey] + denominatorDict[nextAuxSmoothingKey])/2
            print "     new:",key,denominatorDict[key]
print "smoothing out."        
        
# this is just for checking
for key in denominatorDict:
    if denominatorDict[key] == 0:
        print key,denominatorDict[key]
        print "not expected"
        exit(1)

inputFileHandler.close()

molecularDict = {}
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/100KQueries_1_10%_without_query_terms_sortedByMyOwnWay"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    key = lineElements[0]
    value = int(lineElements[1])
    if key not in molecularDict:
        molecularDict[key] = value
    else:
        print "mark2"
        exit(1)

print "len(molecularDict):",len(molecularDict)
inputFileHandler.close()

# One output file containing 3 tables
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/probabilityTableUsingProfIdea20130410"
outputFileHandler = open(outputFileName,"w")

outputFileHandler.write("table:denominator" + "\n")
outputFileHandler.write("NUM_OF_QUERY_TERMS_SEEN_WHICH_FREQ_LESS_THAN_20:?" + "\n")
outputFileHandler.write("NUM_OF_QUERIS_COUNT:Head_85K" + "\n")
outputFileHandler.write("Freq 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19" + "\n")
outputFileHandler.write("********************" + "\n")
for classLabel in classLabelList:
    outputLine = classLabel + " "
    for i in range(0,20):
        key = str(i) + "_" + classLabel
        outputLine += str(denominatorDict[key]) + " "
    outputLine = outputLine.strip() + "\n"
    outputFileHandler.write(outputLine)

outputFileHandler.write("\n")
outputFileHandler.write("table:molecular" + "\n")
outputFileHandler.write("NUM_OF_QUERY_TERMS_NEW:?" + "\n")
outputFileHandler.write("NUM_OF_QUERIS_COUNT:FROM_85K_to_95K" + "\n")
outputFileHandler.write("Freq 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19" + "\n")
outputFileHandler.write("********************" + "\n")
for classLabel in classLabelList:
    outputLine = classLabel + " "
    for i in range(0,20):
        key = str(i) + "_" + classLabel
        outputLine += str(molecularDict[key]) + " "
    outputLine = outputLine.strip() + "\n"
    outputFileHandler.write(outputLine)


outputFileHandler.write("\n")
outputFileHandler.write("table:probability" + "\n")
outputFileHandler.write("Freq 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19" + "\n")
outputFileHandler.write("********************" + "\n")
for classLabel in classLabelList:
    outputLine = classLabel + " "
    for i in range(0,20):
        key = str(i) + "_" + classLabel
        outputLine += str(molecularDict[key]/denominatorDict[key]) + " "
    outputLine = outputLine.strip() + "\n"
    outputFileHandler.write(outputLine)
    
outputFileHandler.close()
'''

'''
# do the histogram thing for the length of the inverted list over the gov2 dataset
freqOfFreqDict = {}

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection.txt"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/histogramForTheLengthOfTheListOverGov2Dataset"
outputFileHandler = open(outputFileName,"w")


dataLine = inputFileHandler.readline()
while dataLine:
    dataLineElements = dataLine.strip().split(" ")
    length = int(dataLineElements[1])
    
    if length not in freqOfFreqDict:
        freqOfFreqDict[length] = 1
    else:
        freqOfFreqDict[length] += 1
    
    dataLine = inputFileHandler.readline()

#output the freqOfFreqDict
for freq in freqOfFreqDict:
    outputFileHandler.write(str(freq) + " " + str(freqOfFreqDict[freq]) + "\n")

inputFileHandler.close()
outputFileHandler.close()
'''

'''
# checking the correctness of each cell.
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/freqOfFreqInQueries_head_85K_0_85%_2D_without_query_terms_fix_zerop_freq_sortedByMyOwnWay"
inputFileHandler = open(inputFileName,"r")

inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/100KQueries_1_10%_without_query_terms_sortedByMyOwnWay"
inputFileHandler2 = open(inputFileName2,"r")

lineFromFile1 = inputFileHandler.readline()
lineFromFile2 = inputFileHandler2.readline()

while lineFromFile1:
    line1Elements = lineFromFile1.strip().split(" ")
    line2Elements = lineFromFile2.strip().split(" ")
    # print line1Elements,line2Elements
    
    key1 = line1Elements[0]
    value1 = int(line1Elements[1])

    key2 = line2Elements[0]
    value2 = int(line2Elements[1])
    
    # print "key1:",key1,"key2:",key2
    # print value1,value2
    
    if key1 != key2:
        print key1,key2
        print "Unexpected Behavior"
        exit(1)
    else:
        if value1 < value2:
            print key1,value1,key2,value2
            print "Unexpected Behavior"
            exit(1)
        else:
            pass
    
    print value1,value2
    
    lineFromFile1 = inputFileHandler.readline()
    lineFromFile2 = inputFileHandler2.readline()    

print "Final Pass"
inputFileHandler.close()
inputFileHandler2.close()
'''

'''
# The following code part is used for fixing zero freq problem in the file called: freqOfFreqInQueries_head_85K_0_85%_2D_without_query_terms_sortedByMyOwnWay
smallRangeIDAndZeroFreqDict = {}
inputAuxFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/eachSmallRangeInfo20130412_sortedBySmallRangeID"
inputAuxFileHandler = open(inputAuxFileName,"r")
for line in inputAuxFileHandler.readlines():
    lineElements = line.strip().split(" ")
    smallRangeID = lineElements[0]
    freq = int(lineElements[3])
    if smallRangeID not in smallRangeIDAndZeroFreqDict:
        smallRangeIDAndZeroFreqDict[smallRangeID] = freq
    else:
        print "Unexpected Behaviour"
        exit(1)

print "len(smallRangeIDAndZeroFreqDict):",len(smallRangeIDAndZeroFreqDict)
inputAuxFileHandler.close()

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/freqOfFreqInQueries_head_85K_0_85%_2D_without_query_terms_sortedByMyOwnWay"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/freqOfFreqInQueries_head_85K_0_85%_2D_without_query_terms_fix_zerop_freq_sortedByMyOwnWay"
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    if lineElements[0].startswith("0_"):
        # need to fix the corresponding value
        smallRangeID = lineElements[0].split("_")[1]
        cellKey = "0_" + smallRangeID
        newZeroFreqValue = smallRangeIDAndZeroFreqDict[smallRangeID]
        
        outputFileHandler.write(cellKey + " " + str(newZeroFreqValue) + "\n")
    else:
        outputFileHandler.write(line)

inputFileHandler.close()
outputFileHandler.close()
'''

'''
# We need to extend this part of logic a little bit.
# This part of script just show how many terms left for the zero probability.
# Latest results for the head 85K queries

# extended solution:

# OLD solution: manually change the value for (1)0_VF, (2)0_F, (3)0_M, (4)0_NF, (5)0_VR
#    except zero appearance total      left for zero appearance
#VF  905                    2141       1236
#F   4712                   6837       2125
#M   16787                  36148      19361
#NF  29144                  807526     778382
#VR  32752                  36875967   36843215           

eachSmallRangeIDWithTotalFreqDict = {}
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/1000SmallRangesWithNumOfTermsBelongingTo"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    smallRangeID = lineElements[0]
    totalFreqForThisRange = int( lineElements[3] )
    
    if smallRangeID not in eachSmallRangeIDWithTotalFreqDict:
        eachSmallRangeIDWithTotalFreqDict[smallRangeID] = totalFreqForThisRange
    else:
        print "Unexpected Behaviour"
        exit(1)

inputFileHandler.close()

eachSmallRangeIDWithTotalFreqExceptZeroFreqDict = {}
classFreqDict = {}
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/freqOfFreqInQueries_head_85K_0_85%_2D_without_query_terms_sortedByMyOwnWay"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    key = lineElements[0]
    value = int(lineElements[1])
    if key not in classFreqDict:
        classFreqDict[key] = value
    else:
        print "unexpected behaviour"
        exit(1)

print "len(classFreqDict):",len(classFreqDict)

# modified values
totalFreqExceptZero = 0

classLabelList = []
for i in range(0,1000):
    classLabelList.append( str(i) )
print "len(classLabelList):",len(classLabelList)

for classLabel in classLabelList:
    # horizontal computations
    totalFreqExceptZero = 0
    for i in range(1,20):
        key = str(i) + "_" + classLabel
        totalFreqExceptZero += classFreqDict[key]
    
    if classLabel not in eachSmallRangeIDWithTotalFreqExceptZeroFreqDict:
        eachSmallRangeIDWithTotalFreqExceptZeroFreqDict[classLabel] = totalFreqExceptZero
    
    # for easy application
    # print classLabel,totalFreqExceptZero
 
inputFileHandler.close()

print "len( eachSmallRangeIDWithTotalFreqDict ):",len(eachSmallRangeIDWithTotalFreqDict)
print "len( eachSmallRangeIDWithTotalFreqExceptZeroFreqDict ):",len(eachSmallRangeIDWithTotalFreqExceptZeroFreqDict)

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/eachSmallRangeInfo20130412"
outputFileHandler = open(outputFileName,"w")
# Or I can record this info from the my feeling document
# outputFileHandler.write("smallRangeID" + " " + "totalFreqForThisRow" + " " + "totalFreqForThisRowExceptZeroFreq" + " " + "zeroFreqForEachSmallRow")
for key in eachSmallRangeIDWithTotalFreqDict:
    zeroFreqForCurrentSmallRange = eachSmallRangeIDWithTotalFreqDict[key] - eachSmallRangeIDWithTotalFreqExceptZeroFreqDict[key]
    outputFileHandler.write(str(key) + " " + str( eachSmallRangeIDWithTotalFreqDict[key] ) + " " + str( eachSmallRangeIDWithTotalFreqExceptZeroFreqDict[key] ) + " " + str(zeroFreqForCurrentSmallRange) + "\n")
outputFileHandler.close()
'''



'''
# This part of logic is help to compute # of query terms belonging to each ranges (Both in small ranges or big ranges)
print "program begins..."

tupleRangeWithFreqList = []
tupleRangeWithCounterDict = {}
inputAuxFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/smallBucketsRangesForQueryTermOverGov2Dataset_withUniqueIDAdded"
inputAuxFileHandler = open(inputAuxFileName,"r")
for line in inputAuxFileHandler.readlines():
    lineElements = line.strip().split(" ")
    currentLowerBound = int(lineElements[3])
    currentUpperBound = int(lineElements[4])
    keyTuple = (currentLowerBound,currentUpperBound)
    if keyTuple not in tupleRangeWithFreqList:
        tupleRangeWithFreqList.append(keyTuple)
        tupleRangeWithCounterDict[keyTuple] = 0
    else:
        print "Unexpected Behaviour"
        exit(1)

print "len(tupleRangeWithFreqList):",len(tupleRangeWithFreqList)
inputAuxFileHandler.close()

# in debugging
# inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection_TermFreqInQLAdded_ProbabilityInQLAdded_DesidingFactorAdded_sortedByListLength_tail_8700.txt"
# in production 
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection_TermFreqInQLAdded_ProbabilityInQLAdded_DesidingFactorAdded_sortedByListLength.txt"
inputFileHandler = open(inputFileName,"r")
dataLine = inputFileHandler.readline()
counterForTest = 0
currentRangeIndex = 0
(currentRangeLowerBound,currentRangeUpperBound) = tupleRangeWithFreqList[currentRangeIndex]

while dataLine:    
    if counterForTest % 10000 == 0:
        print "Process:",counterForTest
        
    counterForTest += 1
    # do some logic here.
    dataLineElements = dataLine.strip().split(" ")
    lengthOfListForLexiconTerm = int( dataLineElements[1] )
    
    if lengthOfListForLexiconTerm >= currentRangeLowerBound and lengthOfListForLexiconTerm < currentRangeUpperBound:
        tupleRangeWithCounterDict[tupleRangeWithFreqList[currentRangeIndex]] += 1
    else:
        while not (lengthOfListForLexiconTerm >= currentRangeLowerBound and lengthOfListForLexiconTerm < currentRangeUpperBound):
            if currentRangeIndex == 999:
                break
            currentRangeIndex += 1
            print "lengthOfListForLexiconTerm:",lengthOfListForLexiconTerm
            print "currentRangeIndex:",currentRangeIndex
            print "currentRangeLowerBound:",currentRangeLowerBound
            print "currentRangeUpperBound:",currentRangeUpperBound
            (currentRangeLowerBound,currentRangeUpperBound) = tupleRangeWithFreqList[currentRangeIndex]
        
        tupleRangeWithCounterDict[tupleRangeWithFreqList[currentRangeIndex]] += 1
        
          
    dataLine = inputFileHandler.readline()

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/1000SmallRangesWithNumOfTermsBelongingTo"
outputFileHandler = open(outputFileName,"w")

for index,tuple in enumerate(tupleRangeWithFreqList):
    (lowerBoundScore,upperBoundScore) = tuple
    outputFileHandler.write(str(index) + " " + str(lowerBoundScore) + " " + str(upperBoundScore) + " " + str( tupleRangeWithCounterDict[tuple] ) + "\n")

outputFileHandler.close()
print "program ends."
'''

'''
# This part of logic is to order the list in my thinking order
orderJustificationDictForFile1 = {}
orderJustificationDictForFile2 = {}

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/freqOfFreqInQueries_head_85K_0_85%_2D_without_query_terms"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    key = lineElements[0]
    value = int(lineElements[1])
    if key not in orderJustificationDictForFile1:
        orderJustificationDictForFile1[key] = value
    else:
        print "mark1, unexpected behaviour"
        exit(1)

print "len(orderJustificationDictForFile1):",len(orderJustificationDictForFile1)
inputFileHandler.close()



inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/100KQueries_1_10%_without_query_terms"
inputFileHandler2 = open(inputFileName2,"r")
for line in inputFileHandler2.readlines():
    lineElements = line.strip().split(" ")
    key = lineElements[0]
    value = int(lineElements[1])
    if key not in orderJustificationDictForFile2:
        orderJustificationDictForFile2[key] = value
    else:
        print "mark2, unexpected behaviour"
        exit(1)
print "len(orderJustificationDictForFile2):",len(orderJustificationDictForFile2)
inputFileHandler2.close()

classLabelList = []
for i in range(0,1000):
    classLabelList.append( str(i) )
print "len(classLabelList):",len(classLabelList)

outputFileName = inputFileName + "_sortedByMyOwnWay"
outputFileHandler = open(outputFileName,"w")

for i in range(0,20):
    for freqLevelLabel in classLabelList:
        key = str(i) + "_" + freqLevelLabel
        outputFileHandler.write(key + " " + str( orderJustificationDictForFile1[key] ) + "\n")

outputFileHandler.close()

outputFileName2 = inputFileName2 + "_sortedByMyOwnWay"
outputFileHandler2 = open(outputFileName2,"w")

for i in range(0,20):
    for freqLevelLabel in classLabelList:
        key = str(i) + "_" + freqLevelLabel
        outputFileHandler2.write(key + " " + str( orderJustificationDictForFile2[key] ) + "\n")

outputFileHandler2.close()
'''

##################################################################################################################################
'''
totalFreq = 0
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/freqOfFreqInQueries_head_95K_sortedByFreqR_2D_without_query_terms_sortedByKey.txt"
inputFileHandler = open(inputFileName,"r")

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    totalFreq += int(lineElements[1])
print "totalFreq:",totalFreq

inputFileHandler.close()
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/freqOfFreqInQueries_head_95K_sortedByFreqR.txt"
inputFileHandler = open(inputFileName,"r")

numOfKindsWhichHasFreqGreaterOrEqualTo20 = 0

for line in inputFileHandler.readlines()[19:]:
    lineElements = line.strip().split(" ")
    freqR = int(lineElements[0])
    freqOfFreqNr = int(lineElements[1])
    numOfKindsWhichHasFreqGreaterOrEqualTo20 += freqOfFreqNr

print "numOfKindsWhichHasFreqGreaterOrEqualTo20:",numOfKindsWhichHasFreqGreaterOrEqualTo20
inputFileHandler.close()
'''


'''
# the sum value of column3 is: 413533
# But I am not going to use this value.
# Instead, the DENOMINATOR will be set to be 100K
DENOMINATOR = 100000

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/freqOfTermsInQueries_extended_sortedByQueryTermFreq.txt"
inputFileHandler = open(inputFileName,"r")
queryTermDictWithInfoTuple = {}

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    realFreqInQueries = int( lineElements[2] )
    if realFreqInQueries > 20:
        print line.strip()
        break

    if queryTerm not in queryTermDictWithInfoTuple:
        originalProbability = realFreqInQueries/ DENOMINATOR
        modifiedProbability = originalProbability
        valueTuple = (realFreqInQueries, originalProbability,modifiedProbability)
        queryTermDictWithInfoTuple[queryTerm] = valueTuple 

print "len(queryTermDictWithInfoTuple):",len(queryTermDictWithInfoTuple)
 
inputFileHandler.close()
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/freqOfTermsInQueries_extended_sortedByQueryTermFreq.txt"
inputFileHandler = open(inputFileName,"r")

totalFreq = 0
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    print lineElements
    totalFreq += int( lineElements[2] )

print "totalFreq:",totalFreq
inputFileHandler.close()
'''

# Let's do the simulation here
# step1: load all the necessary tuple into the main memory
# step2: sort the list based on a specific factor. In memory will have the tuple (0,8400333,25,1.63844456821e-05,percentageBelongingTo,threshold)

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollectionANDTermFreqANDProbabilityInQueryTrace.txt"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection_TermFreqInQLAdded_ProbabilityInQLAdded_DesidingFactorAdded.txt"
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    decidingValue = int( lineElements[2] ) * float( lineElements[3] )
    outputFileHandler.write(line.strip() + " " + str(decidingValue) + "\n")

outputFileHandler.close()
inputFileHandler.close()
'''

'''
# number of terms in lexicon: 37728619
# number of terms in queries: 38871
# increase freq beyond my imagination: 37728619
# the new total freq(including my imagination: 417316 + 37728619 = 38145935)

queryTermWithFreqInQueriesDict = {}
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/freqOfTermsInQueries.txt"
inputFileHandler = open(inputFileName,"r")
totalFreq = 0
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    queryTerm = lineElements[0]
    queryTermFreqInQueries = int(lineElements[1])
    totalFreq += queryTermFreqInQueries
    if queryTerm not in queryTermWithFreqInQueriesDict:
        queryTermWithFreqInQueriesDict[queryTerm] = queryTermFreqInQueries
print "totalFreq:",totalFreq
inputFileHandler.close()

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection.txt"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollectionANDTermFreqANDProbabilityInQueryTrace.txt"
outputFileHandler = open(outputFileName,"w")

# This is exactly the add one smoothing method, but prof know that is horrible
for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    # I need only the queryTerm
    queryTerm = lineElements[0]
    if queryTerm not in queryTermWithFreqInQueriesDict:
        newFreq = 1
        outputFileHandler.write(line.strip() + " " + str(newFreq) + " " + str( newFreq/38145935 ) + "\n")
    else:
        newFreq = queryTermWithFreqInQueriesDict[queryTerm] + 1
        outputFileHandler.write(line.strip() + " " + str(newFreq) + " " + str( newFreq/38145935 ) + "\n")

inputFileHandler.close()
outputFileHandler.close()
'''



'''
# step1: load all the 100KQueryTerms with their term freq in collection to the main memory using a dict structure
# the size of this dict will be: 38871
# key: term 
# value: term_freq_in_collection

newTermPopUp = 0

queryTermBeingTouchCurrentlyDict = {}
queryTermBeingTouchCurrentlyList = []

numOfUniquePostingsBeingTouchedCurrently = 0

queryTermDictWithFreqInCollection = {}

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/100KQueryTermsWithTermFreqInCollection.txt"
inputFileHandler = open(inputFileName,"r")

dataLine = inputFileHandler.readline()

while dataLine:
    dataLineElements = dataLine.strip().split(" ")
    queryTerm = dataLineElements[0]
    queryTermFreqInCollection = int(dataLineElements[1])
    if queryTerm not in queryTermDictWithFreqInCollection:
        queryTermDictWithFreqInCollection[queryTerm] = queryTermFreqInCollection
    else:
        print "error, mark1"
    
    # print "weiIndexPostingCount:",weiIndexPostingCount
    dataLine = inputFileHandler.readline()

print "len(queryTermDictWithFreqInCollection):",len(queryTermDictWithFreqInCollection)
inputFileHandler.close()

# step2: load a certain fraction of queries in the 100K to see how many postings have been covered.
queryDict = {}
inputQueryFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/100KQueries"
inputQueryHandler = open(inputQueryFileName,"r")
for index,line in enumerate( inputQueryHandler.readlines() ):
    elements = line.strip().split(":")
    queryID = int(elements[0])
    
    data = elements[1]
    data = data.lower()
    
    for i in range(0,len(data)):
        # print "data[i]:",ord(data[i])
        if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
            # Just replace them with a space.
            data = data[:i] + " " + data[i+1:]
    
    queryContent = data

    if queryID not in queryDict:
        queryDict[queryID] = queryContent
        # Let's do some content analysis on this
        queryContentElements = queryContent.strip().split(" ")
        for term in queryContentElements:
            if term.strip() != "" and term.strip() in queryTermDictWithFreqInCollection:
                if term.strip() not in queryTermBeingTouchCurrentlyDict:
                    queryTermBeingTouchCurrentlyDict[ term.strip() ] = 1
                    numOfUniquePostingsBeingTouchedCurrently += queryTermDictWithFreqInCollection[term.strip()]
                    newTermPopUp += 1
                else:
                    queryTermBeingTouchCurrentlyDict[ term.strip() ] += 1
            else:
                if term.strip() != "":
                    print "the term:",term.strip(),"is NOT in the lexicon"
    
    if (index+1) == 2:
        print index+1,numOfUniquePostingsBeingTouchedCurrently,newTermPopUp
        newTermPopUp = 0
    
    if (index+1) % 10000 == 0:  
        print index+1,numOfUniquePostingsBeingTouchedCurrently,newTermPopUp
        newTermPopUp = 0

    
    #print "queryTermBeingTouchCurrentlyDict:",queryTermBeingTouchCurrentlyDict
    #tempCheckPostingCounter = 0
    #for element in queryTermBeingTouchCurrentlyDict:
    #    tempCheckPostingCounter += queryTermDictWithFreqInCollection[element]
    #    print element," ",queryTermDictWithFreqInCollection[element]
    #print "tempCheckPostingCounter:",tempCheckPostingCounter
    

for queryTerm in queryTermBeingTouchCurrentlyDict:
    queryTermBeingTouchCurrentlyList.append( (queryTerm,queryTermBeingTouchCurrentlyDict[queryTerm]) )

queryTermBeingTouchCurrentlyList.sort(cmp=None, key=itemgetter(1), reverse=True)

for tuple in queryTermBeingTouchCurrentlyList[0:10]:
    print tuple
    
print "len(queryTermBeingTouchCurrentlyDict):",len(queryTermBeingTouchCurrentlyDict)
   
# print "----->","len(queryDict):",len(queryDict)
inputQueryHandler.close()
'''


'''
# key: trecID value: dict 
    # subKey: queryTerm subValue: a tuple with the following format(4.45178,718986,11.9503)
    # the meaning of the tuple is (4.45178,718986,11.9503)
    # 4.45178: BM25
    # 718986: postingProbabilityGivenTheQueryTimesBigNum1000000(I am NOT sure what is the value of the big number???)
    # 11.9503: postingProbabilityInGeneralTimesBigNum1000000(I am NOT sure what is the value of the big number???)
forwardIndexInMainMemory = {}

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/gov2DocumentWithTheirForwardIndex_for_debug_ONLY"
inputFileHandler = open(inputFileName,"r")

# This is the headline info
headLine = inputFileHandler.readline()

dataLine = inputFileHandler.readline()
while dataLine:
    data = dataLine.strip()
    dataElements = data.split(" ")
    trecID = dataElements[0]
    docSizeInWords = int( dataElements[1] ) # This field will be ignored
    docPostingsRecorded = int( dataElements[2] )
    
    if trecID not in forwardIndexInMainMemory:
        forwardIndexInMainMemory[trecID] = {}
    else:
        # the word dict for the current document has been build.
        # so there is NO logic for that
        pass
    
    for i in range(0+3,docPostingsRecorded+3):
        tupleElements = dataElements[i].split("(")[1].split(")")[0].split(",")
        # the tuple element will have the following forms:
        # ['www', '0.435614', '262071', '3.73365']
        # subKey: tupleElements[0]
        # subValue: (0.435614,262071,3.73365)
        if tupleElements[0] not in forwardIndexInMainMemory[trecID]:
            valueTuple = (tupleElements[1],tupleElements[2],tupleElements[3])
            forwardIndexInMainMemory[trecID][ tupleElements[0] ] = valueTuple
    # for debug
    # print "forwardIndexInMainMemory:",forwardIndexInMainMemory
    # print "len(forwardIndexInMainMemory):",len(forwardIndexInMainMemory)
    dataLine = inputFileHandler.readline()
inputFileHandler.close()
# print "len(forwardIndexInMainMemory):",len(forwardIndexInMainMemory)
# exit(1)
'''

'''
# build the histogram for X-Axis
freqOfTermPairFreqDict = {}

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermPairProbabilityOfTheQueryTrace_sorted_by_query_term_pair_sorted_by_term_pair_freq"
inputFileHandler = open(inputFileName,"r")

# example line: 0 0001_3064 1
# ...
# example line: 401482 of_the 1863

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    termPairFreq = int( lineElements[2] )
    # hold
    if termPairFreq not in freqOfTermPairFreqDict:
        freqOfTermPairFreqDict[termPairFreq] = 1
    else:
        freqOfTermPairFreqDict[termPairFreq] += 1

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermPairIThinkCorrectHistogram20130318_NOT_sorted"
outputFileHandler = open(outputFileName,"w")
for termPairFreq in freqOfTermPairFreqDict:
    outputFileHandler.write(str(termPairFreq) + " " + str( freqOfTermPairFreqDict[termPairFreq] ) + "\n")

inputFileHandler.close()
outputFileHandler.close()
'''

'''
print "calculate the overall statistics"
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermPairProbabilityOfTheQueryTrace_sorted_by_query_term_pair_sorted_by_uniqueID_termPairProbabilityFromQueryTraceAdded"
inputFileHandler = open(inputFileName,"r")
originalLineCounter = 0
overallCheckFreq = 0
overallCheckProbability = 0.0
for line in inputFileHandler.readlines():
    originalLineCounter += 1
    lineElements = line.strip().split(" ")
    termPairFreq = int( lineElements[2] )
    termPairProbability = float( lineElements[3] )
    
    overallCheckFreq += termPairFreq
    overallCheckProbability += termPairProbability

print "originalLineCounter:",originalLineCounter
print "overallCheckFreq:",overallCheckFreq
print "overallCheckProbability:",overallCheckProbability

inputFileHandler.close()
'''

'''
# doing this like a 2 passes
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermPairProbabilityOfTheQueryTrace_sorted_by_query_term_pair_sorted_by_uniqueID"
inputFileHandler = open(inputFileName,"r")
totalTermPairFreq = 0

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    indexNumber = lineElements[0]
    termPairContent = lineElements[1]
    termPairFreq = int( lineElements[2] )
    totalTermPairFreq += termPairFreq

inputFileHandler.close()

print "totalTermPairFreq:",totalTermPairFreq

inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermPairProbabilityOfTheQueryTrace_sorted_by_query_term_pair_sorted_by_uniqueID"
inputFileHandler1 = open(inputFileName1,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermPairProbabilityOfTheQueryTrace_sorted_by_query_term_pair_sorted_by_uniqueID_termPairProbabilityFromQueryTraceAdded"
outputFileHandler = open(outputFileName,"w")


for line in inputFileHandler1.readlines():
    lineElements = line.strip().split(" ")
    termPairFreq = int( lineElements[2] )
    termPairProbability = termPairFreq / totalTermPairFreq 
    outputFileHandler.write( line.strip() + " " + str(termPairProbability) + "\n")


inputFileHandler.close()
outputFileHandler.close()
'''

'''
# step1:
termPairUniqueIDWithTermPairAndFreqDict = {}

inputAuxFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermPairProbabilityOfTheQueryTrace_sorted_by_query_term_pair_sorted_by_uniqueID"
inputAuxFileHandler = open(inputAuxFileName,"r")
for line in inputAuxFileHandler.readlines():
    lineElements = line.strip().split(" ")
    uniqueID = lineElements[0]
    termPair = lineElements[1]
    termPairFreq = lineElements[2]
    tuple = (termPair,termPairFreq)
    if uniqueID not in termPairUniqueIDWithTermPairAndFreqDict:
        termPairUniqueIDWithTermPairAndFreqDict[uniqueID] = tuple
inputAuxFileHandler.close()

print "len(termPairUniqueIDWithTermPairAndFreqDict):",len(termPairUniqueIDWithTermPairAndFreqDict)

# step2:
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/gov2DocumentWithTheirConnectedEdges_for_debug_ONLY"
inputFileHandler = open(inputFileName,"r")
inputFileHandler.readline()

dataLine = inputFileHandler.readline()
dataLineElements = dataLine.strip().split(" ")
 
docID = dataLineElements[0]
NUM_OF_EDGES = int(dataLineElements[1])

for i in range(2,2 + NUM_OF_EDGES):
    uniqueID = dataLineElements[i]
    if uniqueID not in termPairUniqueIDWithTermPairAndFreqDict:
        print "system error"
        exit(1)
    else:
        (termPair,termPairFreq) = termPairUniqueIDWithTermPairAndFreqDict[uniqueID]
        print uniqueID,termPair,termPairFreq
'''  
    

'''
outputLineList = []

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_02_22.train_tfqAdded_labelsAdded_sortedByTerm_rankInListAdded_sortedByDocID_rankInDocAdded_freqInCollectionAdded_sortedByQueryID_fixed"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/small_set_of_gov2_documents_needed_to_be_extracted_for_training_queries_sorted_by_qid"
outputFileHandler = open(outputFileName,"w")

# ingore the header line
inputFileHandler.readline()

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    # for debug ONLY
    # print len(lineElements)
    
    # index directory listing
    # queryID 0
    # trecID 1
    # term 2
    # partialBM25 3 
    # length_of_the_inverted_index 4 
    # term_freq_in_doc 5
    # doc_words 6
    # overallBM25Score 7 
    # rank_in_this_results_list_for_this_query 8 
    # term_freq_in_queries 9
    # TOP10Label 10
    # posting_rank_in_list 11 
    # posting_rank_in_doc 12
    # term_freq_in_collection 13
    
    
    queryID = lineElements[0]
    trecID =  lineElements[1]
    overallBM25Score = lineElements[7]
    rank_in_this_results_list_for_this_query = lineElements[8]
    outputLine = queryID + " " + rank_in_this_results_list_for_this_query + " " + overallBM25Score + " " + "N/A" + " " + trecID + "\n"
    if outputLine not in outputLineList:
        outputLineList.append(outputLine)

for line in outputLineList:
    outputFileHandler.write(line)

inputFileHandler.close()
outputFileHandler.close()
'''

'''
outputFilePathList = []
outputFilePathDict = {}

# for the machine pangolin
basePath = "/data/jhe/trecdata"

inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/small_set_of_gov2_documents_needed_to_be_extracted_for_training_queries_sorted_by_trecID"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/small_set_of_document_collection_files_for_training_queries_needed_to_be_processed"
outputFileHandler = open(outputFileName,"w")

# sample output line
# /data/jhe/trecdata/GX000/00.gz

for line in inputFileHandler.readlines():
    trecID = line.strip().split(" ")[4]
    trecIDElements = trecID.strip().split("-")
    folderName = trecIDElements[0]
    segmentName = trecIDElements[1]
    finalPath = basePath + "/" + folderName + "/" + segmentName + ".gz"
    if finalPath not in outputFilePathDict:
        outputFilePathDict[finalPath] = 1
        outputFilePathList.append(finalPath)
    else:
        # just do nothing
        pass

print "len(outputFilePathList):",len(outputFilePathList)
print "len(outputFilePathDict):",len(outputFilePathDict)

for filePath in outputFilePathList:
    outputFileHandler.write(filePath + "\n")
    # print filePath 


inputFileHandler.close()
outputFileHandler.close()
'''


'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/gov2DocumentWithTheirConnectedEdges"
inputFileHandler = open(inputFileName,"r")
# ignore the header line
inputFileHandler.readline()

dataLine = inputFileHandler.readline()
dataLineElements = dataLine.strip().split(" ")
docID = dataLineElements[0]
NUM_OF_EDGES = int( dataLineElements[1] )
for i in range(0,NUM_OF_EDGES,2):
    print "i:",i
    if dataLineElements[2+i] == dataLineElements[2 + i + 1]:
        print dataLineElements[2+i],dataLineElements[2 + i + 1]
    else:
        print "result NOT expected"
        print "dataLineElements[2+i]:",i,dataLineElements[2+i]
        print "dataLineElements[2 + i + 1]:",i,dataLineElements[2 + i + 1]
        exit(1)
'''

'''
# Rewrite this part 
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermPairNoDirectionProbabilityFor100KQueries"
inputFileHandler = open(inputFileName,"r")
oldHeadLine = inputFileHandler.readline()

outputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermPairNoDirectionProbabilityFor100KQueries_NEW_sorted_by_term_pair"
outputFileHandler1 = open(outputFileName1,"w")

# do NOT write the header file because I need to use the command sort
# newHeadLine1 = "term_pair(default_sorted)" + " " + "freq_from_query_trace" + " " + "probability_from_query_trace" + "\n"
# outputFileHandler1.write(newHeadLine1)


indexCounter = 0
newTermPairList = []
newTermPairDict = {}

for oldLine in inputFileHandler.readlines():
    lineElements = oldLine.strip().split(" ")
    
    termPair = lineElements[0]
    termPairFreq = lineElements[1]
    termPairProbability = lineElements[2]
        
    termPairElements = termPair.split("_")
    term1 = termPairElements[0]
    term2 = termPairElements[1]
    
    testedKey1 = term1 + "_" + term2
    testedKey2 = term2 + "_" + term1
    if testedKey1 not in newTermPairDict and testedKey2 not in newTermPairDict:
        if term1 <= term2:
            newTermPairDict[testedKey1] = True
            tuple = (testedKey1,termPairFreq,termPairProbability)
            newTermPairList.append(tuple)
        else:
            newTermPairDict[testedKey2] = True
            tuple = (testedKey2,termPairFreq,termPairProbability)
            newTermPairList.append(tuple)
    else:
        pass

# write the info from the newTermPairList out.
newTermPairList.sort(cmp=None, key=itemgetter(0), reverse=False)
for index,tuple in enumerate(newTermPairList):
    (termPair, termPairFreq, termPairProbability) = tuple
    outputFileHandler1.write(str(index) + " " + termPair + " " + termPairFreq + " " + termPairProbability + "\n")

inputFileHandler.close()
outputFileHandler1.close()
'''



'''
# Rewrite this part 
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermPairNoDirectionProbabilityFor100KQueries"
inputFileHandler = open(inputFileName,"r")
oldHeadLine = inputFileHandler.readline()

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermPairNoDirectionProbabilityFor100KQueries_NEW"
outputFileHandler = open(outputFileName,"w")
newHeadLine = oldHeadLine.strip() + " " + "popular_term_pair_without_order_unique_id" + "\n"
outputFileHandler.write(newHeadLine)

indexCounter = 0
for oldLine in inputFileHandler.readlines():
    outputFileHandler.write(oldLine.strip() + " " + str(indexCounter) + "\n")
    indexCounter += 1

inputFileHandler.close()
outputFileHandler.close()
'''

'''
# temp fix the problem in the file /home/diaosi/gov2ClearYourMindAndDoItAgain/queryTermThresholdsKeptBasedOnPercentage.txt. Add another column for the terms
inputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/queryTermThresholdsKeptBasedOnPercentage.txt"
inputFileHandler = open(inputFileName,"r")
oldHeaderLine = inputFileHandler.readline()
newHeaderLine = oldHeaderLine.strip() + " " + "1.0Kept" + "\n"

outputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/queryTermThresholdsKeptBasedOnPercentage_NEW.txt"
outputFileHandler = open(outputFileName,"w")
outputFileHandler.write(newHeaderLine) 

for line in inputFileHandler.readlines():
    outputFileHandler.write(line.strip() + " " + "0" + "\n")

inputFileHandler.close()
outputFileHandler.close()
'''


'''
inputFileName1 = "/home/diaosi/gov2ClearYourMindAndDoItAgain/queryTermsONLY.txt"
inputFileHandler1 = open(inputFileName1,"r")

inputFileName2 = "/home/diaosi/gov2ClearYourMindAndDoItAgain/queryTermProbabilityDistribution_sortedByTerm"
inputFileHandler2 = open(inputFileName2,"r")

for line in inputFileHandler1.readlines():
    termFromFile2 = inputFileHandler2.readline().strip().split(" ")[0]
    termFromFile1 = line.strip()
    if termFromFile2 == termFromFile1:
        pass
    else:
        print "NOT good"
        print "termFromFile1:",termFromFile1
        print "termFromFile2:",termFromFile2

print "pass"
'''


'''
inputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_sortedByTerm_rankInListAdded_sortedByDocID_rankInDocAdded_freqInCollectionAdded_sortedByQueryID"
inputFileHandler = open(inputFileName,"r")
oldHeadLine = inputFileHandler.readline()
newHeadLine = oldHeadLine

outputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_sortedByTerm_rankInListAdded_sortedByDocID_rankInDocAdded_freqInCollectionAdded_sortedByQueryID_fixed"
outputFileHandler = open(outputFileName,"w")
outputFileHandler.write(newHeadLine)

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    newLine = ""
    for element in lineElements[0:10]:
        newLine += element + " "
    
    if lineElements[10] == "TOP10":
        newLine += "True" + " "
    elif lineElements[10] == "NotTOP10":
        newLine += "False" + " "
    
    for element in lineElements[11:14]:
        newLine += element + " "
    
    newLine += "\n"
    
    outputFileHandler.write( newLine )


inputFileHandler.close()
outputFileHandler.close()
'''

'''
inputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection.txt"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/lexiconTermsONLY.txt"
outputFileHandler = open(outputFileName,"w")

currentInputLine = inputFileHandler.readline()

while currentInputLine:
    term = currentInputLine.strip().split(" ")[0]
    outputFileHandler.write(term + "\n")
    currentInputLine = inputFileHandler.readline()

inputFileHandler.close()
outputFileHandler.close()
'''

'''
prefixLetterExistingDict = {}

basePath = "/home/diaosi/outputDirForTermScores"

print "give you some time to process"
# option1
inputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection.txt"

# option2
# inputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/freqOfQueryTermsInCollection.txt"

inputFileHandler = open(inputFileName,"r")

# read the lines line by line incrementally
currentLine = inputFileHandler.readline()

originalLineCounter = 0
while currentLine:
    originalLineCounter += 1
    prefixLetter = currentLine.strip()[0]
    neededToCheckPath = basePath + "/" + prefixLetter
    if prefixLetter in prefixLetterExistingDict:
        # do NOT need to check anything
        pass
    else:
        if os.path.exists(neededToCheckPath):
            prefixLetterExistingDict[prefixLetter] = True
        else:
            print neededToCheckPath
            exit(1)
    currentLine = inputFileHandler.readline()


inputFileHandler.close()
print "originalLineCounter:",originalLineCounter
print "prefixLetterExistingDict:",prefixLetterExistingDict
print "len(prefixLetterExistingDict):",len(prefixLetterExistingDict)
'''

'''
#step0
def makeTheCorrespondingFolders():
    letterList = ["0","1","2","3","4","5","6","7","8","9","a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"]
    print "step0"
    print "make the corresponding folders."
    for letter in letterList:
        directory = "/home/diaosi/outputDirForTermScores/" + letter 
        print directory
        os.mkdir(directory)

makeTheCorrespondingFolders()
'''

'''
def multiply(a,b):
    print "Will compute", a, "times", b
    c = 0
    for i in range(0, a):
        c = c + b
    return c

def hello():
    print "hello from Python"
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_weiAllPreparedFeaturesAdded_fixed_20130116"
inputFileHandler = open(inputFileName,"r")
infoHeadLine = inputFileHandler.readline()

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_weiAllPreparedFeaturesAdded_fixed_20130116_sorted_by_rank_in_list"
outputFileHandler = open(outputFileName,"w")
outputFileHandler.write(infoHeadLine)

allLinesList = []

for line in inputFileHandler.readlines():
    currentLineElements = line.strip().split(" ")
    modifiedCurrentLineElementsList = []
    modifiedCurrentLineElementsList.append( int( currentLineElements[0] ) )
    modifiedCurrentLineElementsList += currentLineElements[1:]
    # print "modifiedCurrentLineElementsList:",modifiedCurrentLineElementsList
    allLinesList.append(modifiedCurrentLineElementsList)

print "len(allLinesList):",len(allLinesList)

# make sure using the same sorting method
allLinesList.sort(cmp=None, key=itemgetter(21), reverse=False)
# allLinesList.sort(cmp=None, key=itemgetter(1), reverse=False)
# allLinesList.sort(cmp=None, key=itemgetter(0), reverse=False)

for lineElements in allLinesList:
    outputLine = ""
    outputLine += str( lineElements[0] ) + " "
    for element in lineElements[1:]:
        outputLine += element + " "
    outputLine = outputLine.strip() + "\n"
    outputFileHandler.write( outputLine )

print "OK"
inputFileHandler.close()
outputFileHandler.close()
'''

'''
inputDataSourceFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/rawTermInvertedIndexOutput_with_trecID_added.txt"
inputDataSourceFileHandler = open(inputDataSourceFileName,"r")
inputDataSourceFileHandler.seek(115362260628)

currentLine = inputDataSourceFileHandler.readline()
while currentLine.strip().split(" ")[0] != "GX019-35-1502414":
    # print currentLine.strip().split(" ")[0]
    currentLine = inputDataSourceFileHandler.readline()
print "currentLine:",currentLine
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_weiAllPreparedFeaturesAdded.OLD"
inputFileHandler = open(inputFileName,"r")
oldInfoHeadLine = inputFileHandler.readline()
oldInfoHeadLineElements = oldInfoHeadLine.strip().split(" ")
for index,element in enumerate(oldInfoHeadLineElements):
    print index,element
    
print

newInfoHeadLine = ""
for element in oldInfoHeadLineElements[:20]:
    newInfoHeadLine += element + " "

for element in oldInfoHeadLineElements[21:]:
    newInfoHeadLine += element + " "

newInfoHeadLine += "TOP10Label" + " " + "TOP50Label" + " " + "TOP100Label" + " " + "TOP11To50Label" + " " + "TOP51To100Label" + " " + "\n"

newInfoHeadLine.strip()

newInfoHeadLineElements = newInfoHeadLine.strip().split(" ")
for index,element in enumerate(newInfoHeadLineElements):
    print index,element

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_weiAllPreparedFeaturesAdded"
outputFileHandler = open(outputFileName,"w")
outputFileHandler.write(newInfoHeadLine)

for line in inputFileHandler.readlines():
    newOutputLine = ""
    lineElements = line.strip().split(" ")
    for element in lineElements[:20]:
        newOutputLine += element + " "
    
    for element in lineElements[21:]:
        newOutputLine += element + " "
    
    # when I sample, should have more. 
    if lineElements[20] == "TOP10":
        newOutputLine += "True" + " " + "True" + " " + "True" + " " + "False" + " " + "False"
    elif lineElements[20] == "TOP11To50":
        newOutputLine += "False" + " " + "True" + " " + "True" + " " + "True" + " " + "False"
    elif lineElements[20] == "TOP51To100":
        newOutputLine += "False" + " " + "False" + " " + "True" + " " + "False" + " " + "True"
    
    newOutputLine += "\n"

    outputFileHandler.write(newOutputLine)

inputFileHandler.close()
outputFileHandler.close()
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_weiAllPreparedFeaturesAdded"
inputFileHandler = open(inputFileName,"r")
infoHeadLine = inputFileHandler.readline()
infoHeadLineElements = infoHeadLine.strip().split(" ")

dataLine = inputFileHandler.readline()
dataLineElements = dataLine.strip().split(" ")

for index,element in enumerate( infoHeadLineElements ):
    print index,element,dataLineElements[index] 
''' 
 


'''
for index,line in enumerate( inputFileHandler.readlines() ):
    if len( line.strip().split(" ") ) == 23:
        pass
    else:
        print "index:",index

print "Test Pass"
'''



'''
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_sorted_by_term_rankInInvertedListAdded"
outputFileHandler = open(outputFileName,"w")

inputBasePath = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/parallel_work_for_feature_Rank_in_the_inverted_list/"
for currentPartNumber in range(1,11):
    fileName = "Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_sorted_by_term_part" + "%02d" % currentPartNumber + "_rankInInvertedListAdded"
    absoluteFileName = inputBasePath + fileName
    currentInputFileHandler = open(absoluteFileName,"r")
    currentInputFileHandler.readline()
    
    for line in currentInputFileHandler.readlines():
        outputFileHandler.write(line)
    
    currentInputFileHandler.close()


outputFileHandler.close()
'''


'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_weiHLFeaturesAdded_OLD"
inputFileHandler = open(inputFileName,"r")
infoHeadLine = inputFileHandler.readline()

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_weiHLFeaturesAdded"
outputFileHandler = open(outputFileName,"w")
outputFileHandler.write(infoHeadLine)

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    newRankInDoc = int( lineElements[-1] ) + 1
    newLine = ""
    for element in lineElements[:-1]:
        newLine += element + " "
    newLine += str(newRankInDoc) + " "
    newLine.strip()
    outputFileHandler.write(newLine + "\n")
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_tail_1K_verify_weiHLFeaturesAdded_verify"
inputFileHandler = open(inputFileName,"r")
inputFileHandler.readline()

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    queryID = lineElements[0]
    trecID = lineElements[1]
    term = lineElements[2]
    rank_in_the_doc_from_High_Level = lineElements[21]
    rank_in_the_doc_from_polyIRToolkit = lineElements[23]
    print queryID,trecID,term,rank_in_the_doc_from_High_Level,rank_in_the_doc_from_polyIRToolkit
    # print line.strip()
    # print len( line.strip().split(" ") )
'''  


'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_weiHLFeaturesAdded"
inputFileHandler = open(inputFileName,"r")
inputFileHandler.readline()

inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K"
inputFileHandler2 = open(inputFileName2,"r")
inputFileHandler2.readline()

for index,line in enumerate( inputFileHandler2.readlines() ):
    comparedLine = inputFileHandler.readline()
    lineElements = line.strip().split(" ")
    
    modifyLine = ""
    for element in lineElements[:-1]:
        modifyLine += element + " "
    modifyLine = modifyLine.strip()
    
    if comparedLine.startswith(modifyLine):
        pass
    else:
        print "index:",index
        print "newLine:",comparedLine.strip()
        print "oldLine:",line.strip()
        print
        exit()

print "Test Pass"
'''    


'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/freqOfQueryTermsInCollection.txt"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/queryTermsONLY.txt"
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    term = lineElements[0]
    outputFileHandler.write(term + "\n")

inputFileHandler.close()
outputFileHandler.close()
'''


'''
# The purpose of this part of the code
lexiconTermDict = {}

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/freqOfQueryTermsInCollection.txt"
outputFileHandler = open(outputFileName,"w")
  
inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection.txt"
inputFileHandler2 = open(inputFileName2,"r")
for line in inputFileHandler2.readlines():
    lineElements = line.strip().split(" ")
    term = lineElements[0]
    freqInCollection = int( lineElements[1] )
    if term not in lexiconTermDict:
        lexiconTermDict[term] = freqInCollection        
print "len(lexiconTermDict):",len(lexiconTermDict)

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/freqOfTermsInQueries.txt"
inputFileHandler = open(inputFileName,"r")
for line in inputFileHandler.readlines():
    newLine = ""
    term = line.strip().split(" ")[0]
    if term in lexiconTermDict:
        newLine = term + " " + str( lexiconTermDict[term] ) + "\n"
    else:
        print "lexicon do NOT have the term:",term
        newLine = term + " " + "0" + "\n"
    outputFileHandler.write(newLine)
inputFileHandler.close()
inputFileHandler2.close()
outputFileHandler.close()
'''




'''
numOfDatalinesForEachFile = 50000
numberOfFiles = 10

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_sorted_by_term"
inputFileHandler = open(inputFileName,"r")
oldInfoHeadLine = inputFileHandler.readline()

currentPartNumber = 1

for index,line in enumerate( inputFileHandler.readlines() ):
    if index % 50000 == 0:
        currentOutputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/parallel_work_for_feature_Rank_in_the_inverted_list/" + "Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K_sorted_by_term" + "_part" + "%02d" % currentPartNumber
        currentOutputFileHandler = open(currentOutputFileName,"w")
        currentOutputFileHandler.write( oldInfoHeadLine.strip() + " (Part%02dOutof10)" % currentPartNumber + "\n")
        currentPartNumber += 1
    currentOutputFileHandler.write( line )
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K"
inputFileHandler = open(inputFileName,"r")
infoHeadLine = inputFileHandler.readline()

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded_tail_500K" + "_sorted_by_term"
outputFileHandler = open(outputFileName,"w")
outputFileHandler.write(infoHeadLine)

allLinesList = []

for line in inputFileHandler.readlines():
    currentLineElements = line.strip().split(" ")
    allLinesList.append(currentLineElements)

print "len(allLinesList):",len(allLinesList)

allLinesList.sort(cmp=None, key=itemgetter(2), reverse=False)
for lineElements in allLinesList:
    outputLine = ""
    for element in lineElements:
        outputLine += element + " "
    outputLine = outputLine.strip() + "\n"
    outputFileHandler.write( outputLine )

print "OK"
inputFileHandler.close()
outputFileHandler.close()
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded"
inputFileHandler = open(inputFileName,"r")
infoHeadLine = inputFileHandler.readline()

outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/Posting_Oriented_Training_Dataset_2013_01_10.train_tfqAdded_labelsAdded" + "_sorted_by_trecID"
outputFileHandler = open(outputFileName,"w")
outputFileHandler.write(infoHeadLine)

allLinesList = []

for line in inputFileHandler.readlines():
    currentLineElements = line.strip().split(" ")
    allLinesList.append(currentLineElements)

print "len(allLinesList):",len(allLinesList)

allLinesList.sort(cmp=None, key=itemgetter(1), reverse=False)
for lineElements in allLinesList:
    outputLine = ""
    for element in lineElements:
        outputLine += element + " "
    outputLine = outputLine.strip() + "\n"
    outputFileHandler.write( outputLine )

print "OK"
inputFileHandler.close()
outputFileHandler.close()
'''

'''
inputFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/20121215Posting_Oriented_Balanced_Training_Dataset.txt.input"
inputFileHandler = open(inputFileName,"r")

numTop10Count = 0
numNotTop10Count = 0


for line in inputFileHandler.readlines():
    if line.strip().split(" ")[9] == "TOP10":
        numTop10Count += 1
    elif line.strip().split(" ")[9] == "NOTTOP10":
        numNotTop10Count += 1

print "numTop10Count:",numTop10Count
print "numNotTop10Count:",numNotTop10Count
'''

'''
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/rankInListFeature/allInfoAboutRankInList"
inputFileHandler = open(inputFileName,"r")

postingRankDict = {}
tempCounter = 0

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    key = lineElements[0] + "_" + lineElements[1] + "_" + lineElements[2]
    if key not in postingRankDict:
        postingRankDict[key] = lineElements[3]
    else:
        print "Duplicated posting, key shown:",key
        tempCounter += 1

print "len(postingRankDict):",len(postingRankDict)
print "tempCounter:",tempCounter

outputFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/trainingIntermediateFile20121205.txt"
outputFileHandler = open(outputFileName,"w")

inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/rankInDocFeature/allInfoAboutRankInDoc"
inputFileHandler2 = open(inputFileName2,"r")
for line in inputFileHandler2.readlines():
    lineElements = line.strip().split(" ")
    accessKey = lineElements[0] + "_" + lineElements[1] + "_" + lineElements[2]
    outputFileHandler.write( line.strip() + " " + postingRankDict[accessKey] + "\n")

outputFileHandler.close()
'''

'''
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/rankInListFeature/allInfoAboutRankInList"
outputFileHandler = open(outputFileName,"w")

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/rankInListFeature/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added_ordered_by_query_term_20121130_simplified.txt.input_beginning_part0_add_rank_in_list"
print inputFileName
inputFileHandler = open(inputFileName,"r")

for line in inputFileHandler.readlines():
    outputFileHandler.write(line)

for i in range(1,21):
    currentInputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/rankInListFeature/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added_ordered_by_query_term_20121130_simplified.txt.input_left_part" + "%02d" % i + "_add_rank_in_list"
    # print currentInputFileName
    
    currentInputFileHandler = open(currentInputFileName,"r")
    for line in currentInputFileHandler.readlines():
        outputFileHandler.write(line)
    
outputFileHandler.close()
'''

'''
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/rankInDocFeature/allInfoAboutRankInDoc"
outputFileHandler = open(outputFileName,"w")

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/rankInDocFeature/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added.txt.input_beginning_part00_rank_in_document"
inputFileHandler = open(inputFileName,"r")

for line in inputFileHandler.readlines():
    outputFileHandler.write(line)

for i in range(1,11):
    currentInputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/rankInDocFeature/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added.txt.input_left_part" + "%02d" % i + "_rank_in_document"
    currentInputFileHandler = open(currentInputFileName,"r")
    for line in currentInputFileHandler.readlines():
        outputFileHandler.write(line)

outputFileHandler.close()
'''


'''
inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/rankInDocFeature/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added.txt.input_left_part10"
inputFileHandler1 = open(inputFileName1,"r")

inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/rankInDocFeature/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added.txt.input_left_part10_rank_in_document"
inputFileHandler2 = open(inputFileName2,"r")

file1Lines = inputFileHandler1.readlines()
file2Lines = inputFileHandler2.readlines()

for index,line in enumerate(file1Lines):
    if line.strip().split(" ")[0] == file2Lines[index].strip().split(" ")[0] and line.strip().split(" ")[1] == file2Lines[index].strip().split(" ")[1] and line.strip().split(" ")[2] == file2Lines[index].strip().split(" ")[2]:
        pass
    else:
        print "sth wrong."
        print "len(file1Lines):",len(file1Lines)
        print "len(file2Lines):",len(file2Lines)
        print "index:",index
        print "line from file1:",line.strip()
        print "line from file2:",file2Lines[index].strip()
        exit(1)
        
print "all pass"
'''



'''
# The purpose of this script is to split the file into # of parts which have been specified.

print "len(sys.argv):",len(sys.argv)

if len(sys.argv) != 2:
    print "Illegal # of arguments"
    print "Usage: python programName.py #ofFilesWantToSplit"
    exit(1)
   
numberOfFilesWantToSplit = int( sys.argv[1] )

#option1
#inputFileName1 = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added.txt.input_left"

#option2
inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added_ordered_by_query_term_20121130_simplified.txt.input_left"

inputFileHandler1 = open(inputFileName1,"r")

totalNumberOfLines = len( inputFileHandler1.readlines() )
print "totalNumberOfLines:",totalNumberOfLines
numberOfLinesPerFile = math.ceil( totalNumberOfLines/numberOfFilesWantToSplit )
print "numberOfLinesPerFile:",numberOfLinesPerFile

inputFileHandler1.close()

inputFileHandler1 = open(inputFileName1,"r")

i = 0

for index,line in enumerate( inputFileHandler1.readlines() ):
    if index % numberOfLinesPerFile == 0:
        i += 1
        currentOutputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added_ordered_by_query_term_20121130_simplified.txt.input_left" + "_part%02d" % i
        currentOutputFileHandler = open(currentOutputFileName,"w")        
    
    currentOutputFileHandler.write(line)

inputFileHandler1.close()
'''

'''
inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added.txt.input_part0"
inputFileHandler1 = open(inputFileName1,"r")

inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/alreadyFinishedWork20121202/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added_rank_in_document_added.txt.input_part0"
inputFileHandler2 = open(inputFileName2,"r")

lineNumber = 0

currentLineForFile1 = inputFileHandler1.readline()
currentLineForFile2 = inputFileHandler2.readline()
lineNumber += 1

while currentLineForFile1.strip().split(" ")[0] == currentLineForFile2.strip().split(" ")[0] and currentLineForFile1.strip().split(" ")[1] == currentLineForFile2.strip().split(" ")[1] and currentLineForFile1.strip().split(" ")[2] == currentLineForFile2.strip().split(" ")[2]:
    currentLineForFile1 = inputFileHandler1.readline()
    currentLineForFile2 = inputFileHandler2.readline()
    
    if currentLineForFile1.strip() == "" and currentLineForFile2.strip() == "":
        break

    lineNumber += 1

print "lineNumber:",lineNumber
print "currentLineForFile1:",currentLineForFile1
print "currentLineForFile2:",currentLineForFile2


inputFileHandler1.close()
inputFileHandler2.close()
'''



'''
duplicatedTupleCheckDict = {}

inputFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added.txt.input"
inputFileHandler = open(inputFileName,"r")

counter = 0

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    queryID = lineElements[0]
    trecID = lineElements[1]
    term = lineElements[2]
    tuple = (queryID, trecID, term)
    if tuple not in duplicatedTupleCheckDict:
        duplicatedTupleCheckDict[tuple] = 1
    else:
        duplicatedTupleCheckDict[tuple] += 1
        print "shit, this is duplicate:",tuple
        counter += 1

print "counter:",counter
inputFileHandler.close()
'''


'''
traingTupleList = []

inputFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added.txt.input"
inputFileHandler = open(inputFileName,"r")


# This is for generating the features of rank_in_inverted_index
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added_ordered_by_query_term_20121130_simplified.txt.input"
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    simplifiedTuple = (lineElements[0],lineElements[1],lineElements[2])
    traingTupleList.append(simplifiedTuple)

print "len(traingTupleList):",len(traingTupleList)

traingTupleList_sorted = sorted(traingTupleList, key=itemgetter(2))

for tuple in traingTupleList_sorted:
    # print tuple
    (queryID, trecID, term) = tuple
    # print queryID,trecID,term
    outputFileHandler.write(queryID + " " + trecID + " " + term + "\n")

inputFileHandler.close()
outputFileHandler.close()

print "DONE"
'''


'''
inputFileName = "/home/obukai/workspace/polyIRToolkit/polyIRIndexer/allQueriesIncludingHumanJudgedAndEffeciencyTask"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/home/obukai/workspace/polyIRToolkit/polyIRIndexer/allQueriesIncludingHumanJudgedAndEffeciencyTask_NEW20121128.txt"
outputFileHandler = open(outputFileName,"w")

totalNumberOfLine = 147

for line in inputFileHandler.readlines():
    if totalNumberOfLine > 0:
        lineElements = line.strip().split(":")
        
        originalQueryID = int( lineElements[0] )
        newQueryID = originalQueryID + 100000
        
        print originalQueryID,lineElements[1]
        print newQueryID,lineElements[1]
        
        totalNumberOfLine -= 1
        outputFileHandler.write( str( newQueryID ) + ":" + lineElements[1] + "\n")
        
    else:
        outputFileHandler.write( line.strip() + "\n" )

inputFileHandler.close()
outputFileHandler.close()
'''



'''
# write a program that make the query related lexicon term NOT fake anymore
# the involved file includes the following file: "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/final-fake-queryTermsCollectionFreqs.txt" 

lexiconTermFreqInCollectionDict = {}

inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection.txt"
inputFileHandler = open(inputFileName,"r")

currentLine = inputFileHandler.readline()

while currentLine:
    lineElements = currentLine.strip().split(" ")
    
    if len( lineElements ) == 2:
        if lineElements[0].strip() not in lexiconTermFreqInCollectionDict:
            lexiconTermFreqInCollectionDict[lineElements[0].strip()] = int( lineElements[1].strip() )
    else:
        exit(1)
    
    currentLine = inputFileHandler.readline()

print "All passed"
print "lexiconTermFreqInCollectionDict['0']:",lexiconTermFreqInCollectionDict['0']
print "len( lexiconTermFreqInCollectionDict ):",len( lexiconTermFreqInCollectionDict )
'''

'''
print "make the corresponding folders."
for i in range(0,273):
    directory = "/data3/obukai/human_judge_web_pages_gov2_ALL/" + "GX%03d" % i
    print directory
    os.mkdir(directory)
'''


'''
allRelevenceDocs = {}
allRelevenceDocsList = []

inputFileNameList = []
inputFileName1 = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/priorityTrecDocumentsIDs_all_sorted_with_index_number_BM25_oriented.txt"
inputFileName2 = "/data1/team/obukai/machine-learning-project-related/trec-related/trecCombine04_06/priorityTrecDocumentsIDs_all_sorted_with_index_number.txt"

outputFileName = "/data1/team/obukai/machine-learning-project-related/trec-related/trecCombine04_06/priorityTrecDocumentsIDs_include_approximating_BM25_AND_human_judged.txt"
outputFileHandler = open(outputFileName,"w")

inputFileNameList.append(inputFileName1)
inputFileNameList.append(inputFileName2)

for name in inputFileNameList:
    inputFileHandler = open(name,"r")
    for line in inputFileHandler.readlines():
        lineElements = line.strip().split(" ")
        if lineElements[1] not in allRelevenceDocs:
            allRelevenceDocs[lineElements[1]] = 1
        else:
            allRelevenceDocs[lineElements[1]] += 1
            
print "len(allRelevenceDocs):",len(allRelevenceDocs)
'''



'''
for docID in allRelevenceDocs:
    if docID.startswith("GX"):
        pass
    else:
        print "NOT passed:",docID
'''


'''
allRelevenceDocsList = allRelevenceDocs.keys()
allRelevenceDocsList.sort(cmp=None, key=None, reverse=False)

for index,docID in enumerate( allRelevenceDocsList ):
    outputFileHandler.write( str( index ) + " " + docID + " " + "-1" + "\n")


inputFileHandler.close()
outputFileHandler.close()
'''


'''
inputFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented.txt.output"
inputFileHandler = open(inputFileName,"r")

inputFileName2 = "/data1/team/obukai/machine-learning-project-related/learningToPrune/qrels.tb04-tb06.top150_CONTAINS_ONLY_147"
inputFileHandler2 = open(inputFileName2,"r")

outputFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_with_label.txt.output"
outputFileHandler = open(outputFileName,"w")

judgeDict = {}

for line in inputFileHandler2.readlines():
    lineElements = line.strip().split(" ")
    queryID = lineElements[0]
    trecID = lineElements[2]
    relevenceScore = lineElements[3]
    
    key = queryID + "_" + trecID
    if key not in judgeDict:
        judgeDict[key] = relevenceScore

print "len(judgeDict):",len(judgeDict)
# print "judgeDict['850_GX265-48-6314208']:",judgeDict['850_GX265-48-6314208']
    
for line in inputFileHandler.readlines():
    outputLine = ""
    lineElements = line.strip().split(" ")
    queryID = lineElements[0]
    trecID = lineElements[1]
    key = queryID + "_" + trecID 
    outputLine = line.strip() + " " + judgeDict[key]
    outputFileHandler.write(outputLine + "\n")
    # print "outputLine:",outputLine

inputFileHandler.close()
inputFileHandler2.close()
outputFileHandler.close()
'''

'''
inputFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_with_label.txt.output"
inputFileHandler = open(inputFileName,"r")

line = inputFileHandler.readline()
lineElements = line.strip().split(" ")
print "len(lineElements):",len(lineElements)
for index,element in enumerate(lineElements):
    if index == 0:
        print index,"queryID:",element
    elif index == 1:
        print index,"trecID:",element
    elif index == 2:
        print index,"postingTerm:",element
    elif index == 3:
        print index,"doc_words:",element
    elif index == 4:
        print index,"doc_distinct_words:",element
    elif index == 5:
        print index,"text_size:",element
    elif index == 6:
        print index,"script_size:",element
    elif index == 7:
        print index,"script_text_ratio:",element
    elif index == 8:
        print index,"doc_outlinks:",element
    elif index == 9:
        print index,"current_term_col_freq:",element
    elif index == 10:
        print index,"whether_current_term_in_header:",element
    elif index == 11:
        print index,"whether_current_term_in_title:",element
    elif index == 12:
        print index,"whether_current_term_in_bold:",element
    elif index == 13:
        print index,"whether_current_term_in_url:",element
    elif index == 14:
        print index,"whether_current_term_in_italic:",element
    elif index == 15:
        print index,"current_term_freq_in_doc:",element
    elif index == 16:
        print index,"current_term_rel_freq_in_doc:",element   
    elif index == 17:
        print index,"current_term_BM25(BM25 Score for this term):",element
    elif index == 18:
        print index,"current_term_QL(Language Model Score for this term):",element
    elif index == 19:
        print index,"current_term_distribution(Term Distribution Score for this term):",element
    elif index == 20:
        print index,"relevence score label:",element
    else:
        print index,element
'''


'''
inputFileName = "/home/obukai/workspace/polyIRToolkit/scripts/src/pythonScripts/gov2/tempErrorMessage_need_to_handled.txt"
inputFileHandler = open(inputFileName,"r")

while True:
    indexLine = inputFileHandler.readline()
    documentSourceLine = inputFileHandler.readline()
    parsedStateLine = inputFileHandler.readline()
    docLengthLine = inputFileHandler.readline()
    emptyLine = inputFileHandler.readline()
    
    print "doc length:",docLengthLine
'''    



'''
import random
import math
import os
from HTMLParser import HTMLParser
from HTMLParser import HTMLParseError

###########################################################################################################################class begin...
class MyHTMLParser(HTMLParser):
    def handle_starttag(self, tag, attrs):
        # print "Start tag:", tag
        # set the current_tag to tag
        self.current_tag = tag
        
        if hasattr(self, 'doc_outlinks'):
            pass
        else:
            self.doc_outlinks = 0
        
        for attr in attrs:
            (attr_name, attr_value) = attr
            if attr_name == "href":
                #print "     attr:", attr
                self.doc_outlinks += 1
    def handle_endtag(self, tag):
        #print "End tag  :", tag
        # set back the current_tag to ""
        self.current_tag = ""
        
    def handle_data(self, data):
        if hasattr(self, 'current_tag'):
            # step0: pre-processing
            for i in range(0,len(data)):
                # print "data[i]:",ord(data[i])
                if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
                    # Just replace them with a space.
                    data = data[:i] + " " + data[i+1:]
            
            if hasattr(self, 'header_words'):
                pass
            else:
                self.header_words = []
                self.header_words_dict = {}

            if hasattr(self, 'title_words'):
                pass
            else:
                self.title_words = []
                self.title_words_dict = {}
            
            if hasattr(self, 'b_or_strong_words'):
                pass
            else:
                self.b_or_strong_words = []
                self.b_or_strong_words_dict = {}          
            
            if hasattr(self, 'a_words'):
                pass
            else:
                self.a_words = []
                self.a_words_dict = {}
            
            if hasattr(self, 'i_or_em_words'):
                pass
            else:
                self.i_or_em_words = []
                self.i_or_em_words_dict = {}                       
            
                        
            # step1: some context feature
            if self.current_tag == "head":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.header_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.header_words_dict:
                            self.header_words_dict[lowerCaseWord] = 1
                        else:
                            self.header_words_dict[lowerCaseWord] += 1
            
            if self.current_tag == "title":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.title_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.title_words_dict:
                            self.title_words_dict[lowerCaseWord] = 1
                        else:
                            self.title_words_dict[lowerCaseWord] += 1
            
            if self.current_tag == "b" or self.current_tag == "strong":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.b_or_strong_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.b_or_strong_words_dict:
                            self.b_or_strong_words_dict[lowerCaseWord] = 1
                        else:
                            self.b_or_strong_words_dict[lowerCaseWord] += 1
                                     
            if self.current_tag == "a":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.a_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.a_words_dict:
                            self.a_words_dict[lowerCaseWord] = 1
                        else:
                            self.a_words_dict[lowerCaseWord] += 1
            
            if self.current_tag == "i" or self.current_tag == "em":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.i_or_em_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.i_or_em_words_dict:
                            self.i_or_em_words_dict[lowerCaseWord] = 1
                        else:
                            self.i_or_em_words_dict[lowerCaseWord] += 1
            
       
            # step2: text or script
            if self.current_tag == "script" or self.current_tag == "style":
                #print "The following content should NOT be indexed."
                #print "script content     :", data
                #print "script length     :",len(data)
                if hasattr(self, 'script_size'):
                    self.script_size += len(data)
                else:
                    self.script_size = len(data)
            else:
                if hasattr(self, 'text_size'):
                    self.text_size += len(data)
                else:
                    self.text_size = len(data)
                # print "text content:*",data,"*"

                for i in range(0,len(data)):
                    # print "data[i]:",ord(data[i])
                    if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
                        # Just replace them with a space.
                        data = data[:i] + " " + data[i+1:]
                
                
                if hasattr(self, 'doc_words'):
                    pass
                else:
                    self.doc_words = []
                
                if hasattr(self, 'doc_distinct_words'):
                    pass
                else:
                    self.doc_distinct_words = []               

                
                if hasattr(self, 'doc_words_dict'):
                    pass
                else:
                    self.doc_words_dict = {}
                                    
                #print "raw word list:",data.split(" ")
                
                for word in data.split(" "):
                    if word != "":
                        # for later use.
                        lowerCaseWord = word.lower()
                        
                        # for current use
                        # lowerCaseWord = word
                        self.doc_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.doc_words_dict:
                            self.doc_words_dict[lowerCaseWord] = 1
                        else:
                            self.doc_words_dict[lowerCaseWord] += 1
                        
                        
                        if lowerCaseWord not in self.doc_distinct_words:
                            self.doc_distinct_words.append(lowerCaseWord)
                        
                #print "doc_words:",self.doc_words
                #print "doc_distinct_words:",self.doc_distinct_words
                  
                #print "text length      :", len(data)
        else:
            pass
    def handle_comment(self, data):
        pass
        #print "Comment  :", data
    def handle_entityref(self, name):
        pass
        #c = unichr(name2codepoint[name])
        #print "Named ent:", c
    def handle_charref(self, name):
        pass
        #if name.startswith('x'):
        #    c = unichr(int(name[1:], 16))
        #else:
        #    c = unichr(int(name))
        #print "Num ent  :", c
    def handle_decl(self, data):
        pass
        #print "Decl     :", data
    
    def generate_statistics_report(self,status):
        
        print "-----> completed_parsed:",status
        
        # attrs check or init
        if hasattr(self, 'doc_words'):
            pass
        else:
            self.doc_words = []

        if hasattr(self, 'doc_distinct_words'):
            pass
        else:
            self.doc_distinct_words = []
        
        if hasattr(self, 'text_size'):
            pass
        else:
            self.text_size = 0
            
        if hasattr(self, 'script_size'):
            pass
        else:
            self.script_size = 0       

        if hasattr(self, 'doc_outlinks'):
            pass
        else:
            self.doc_outlinks = 0
            
        if hasattr(self, 'doc_waterloo_spam'):
            pass
        else:
            self.doc_waterloo_spam = 0
                
        #print "len(doc_words):",len(self.doc_words)
        #print "len(doc_distinct_words):",len(self.doc_distinct_words)
        #print "text_size:",self.text_size
        #print "script_size:",self.script_size
        if self.text_size != 0:
            script_text_ratio = self.script_size / self.text_size
        else:
            script_text_ratio = 0.0
        #print "script_text_ratio:",script_text_ratio
        #print "doc_outlinks:",self.doc_outlinks
        
        #outputFileFeaturePart1Handler.write(str( len(self.doc_words) ) + " ")
        #outputFileFeaturePart1Handler.write(str( len(self.doc_distinct_words) )  + " ")
        #outputFileFeaturePart1Handler.write(str(self.text_size)  + " ")
        #outputFileFeaturePart1Handler.write(str(self.script_size)  + " ")
        #outputFileFeaturePart1Handler.write(str(script_text_ratio)  + " ")
        #outputFileFeaturePart1Handler.write(str(self.doc_outlinks)  + " ")
        
        
        
        #print "self.header_words:",self.header_words
        #print "self.header_words_dict:",self.header_words_dict

        #print
        #print "self.title_words:",self.title_words
        #print
        #print "self.title_words_dict:",self.title_words_dict
    
        #print
        #print "self.b_or_strong_words:",self.b_or_strong_words
        #print
        #print "self.b_or_strong_words_dict:",self.b_or_strong_words_dict
    
        #print
        #print "self.a_words:",self.a_words
        #print
        #print "self.a_words_dict:",self.a_words_dict
    
        #print
        #print "self.i_or_em_words:",self.i_or_em_words
        #print
        #print "self.i_or_em_words_dict:",self.i_or_em_words_dict 
        
        #print
        #print "self.doc_words:",self.doc_words
        
        #print
        #print "self.doc_distinct_words:",self.doc_distinct_words
        
        #print 
        #print "self.doc_words_dict:",self.doc_words_dict
###########################################################################################################################class end.

inputFileName = "/data1/team/obukai/machine-learning-project-related/trec-related/trecCombine04_06/priorityTrecDocumentsIDs_all_sorted_with_index_number.txt"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/home/obukai/workspace/polyIRToolkit/scripts/src/pythonScripts/gov2/tempErrorMessage.txt"
outputFileErrorHandler = open(outputFileName, "w")

human_judge_query_location_base_path = "/data5/team/obukai/human_judge_web_pages_gov2/"

for index,line in enumerate( inputFileHandler.readlines() ):
    print "index:",index
    outputFileErrorHandler.write("index:" + str(index) + "\n")
    #if index == 2:
    #    break
    
    trecID = line.strip().split(" ")[1]
    
    trecIDElements = trecID.split("-")
    
    segmentNumber = trecIDElements[0]
    
    fileNamePrefixLookingFor = trecID
    
    pathLookFor = human_judge_query_location_base_path + segmentNumber
    
    foundTag = False
    
    for dirname, dirnames, filenames in os.walk(pathLookFor):
        for filename in filenames:
            if filename.startswith(fileNamePrefixLookingFor):
                foundTag = True
                # print filename
                absolutePathForWebPageFileName = os.path.join(dirname, filename)
                print "-----> ",absolutePathForWebPageFileName
                outputFileErrorHandler.write("-----> " + absolutePathForWebPageFileName + "\n")
                
                absolutePathForWebPageFileHandler = open(absolutePathForWebPageFileName,"r")
                file_content = absolutePathForWebPageFileHandler.read()
                absolutePathForWebPageFileHandler.close()
                
                web_page_content = file_content
                parser = MyHTMLParser()
                
                try:
                    parser.feed(web_page_content)
                    parser.generate_statistics_report(1)
                    
                    outputFileErrorHandler.write("-----> completed_parsed: 1" + "\n") 
                    outputFileErrorHandler.write("-----> doc length:" + str( len(parser.doc_words) ) + "\n") 
                except HTMLParseError,e:
                    print "----->",absolutePathForWebPageFileName
                    print "----->",trecID,":",e.msg,":",e.lineno,":",e.offset
                    outputFileErrorHandler.write("----->" + trecID + ":" + e.msg + ":" + str(e.lineno) + ":" + str(e.offset) + "\n")
                    #outputErrorMessageFileHandle.write("----->" + trecID + ":" + e.msg + ":" + str(e.lineno) + ":" + str(e.offset) + "\n")
                    # do not handle the error message.
                    parser.generate_statistics_report(0)
                    
                    outputFileErrorHandler.write("-----> completed_parsed: 0" + "\n")
                    outputFileErrorHandler.write("-----> doc length:" + str( len(parser.doc_words) ) + "\n")
                except UnicodeDecodeError,e:
                    print "----->",absolutePathForWebPageFileName
                    print "----->",e
                    outputFileErrorHandler.write("----->" + trecID + " UnicodeDecodeError Error." "\n")
                    #outputErrorMessageFileHandle.write("----->" + trecID + " UnicodeDecodeError Error." "\n")
                    # do not handle the error message.
                    parser.generate_statistics_report(0)
                    
                    outputFileErrorHandler.write("-----> completed_parsed: 0" + "\n")
                    outputFileErrorHandler.write("-----> doc length:" + str( len(parser.doc_words) ) + "\n")
                
                print
                outputFileErrorHandler.write("\n")
                
                
    

inputFileHandler.close()
outputFileErrorHandler.close()
'''



    

# for line in inputFileHandler.readlines():
#    print len(line.strip().split(" "))

'''
queriesDict = {}

#the whole purpose of this training file is to build the wholeTrainingFileTempleteGov2V1.txt.input

# the file for me to compare:
# /data5/team/weijiang/the_new_trip_of_feature_generation/wholeTrainingFileTempleteV3_phase0.txt.input

inputFileName1 = "/data1/team/obukai/machine-learning-project-related/learningToPrune/qrels.tb04-tb06.top150_CONTAINS_ONLY_147"
inputFileHandler1 = open(inputFileName1,"r")

inputFileName2 = "/data1/team/weijiang/workspace/polyIRToolkit/polyIRIndexer/04-06.topics.701-850.polyIRTKCompatibleMode"
inputFileHandler2 = open(inputFileName2,"r")

outputFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/wholeTrainingFileTempleteGov2V1.txt.input"
outputFileHandler = open(outputFileName,"w")

for line in inputFileHandler2.readlines():
    queryID = int( line.strip().split(":")[0] )
    
    data = line.strip().split(":")[1]
    data = data.lower()
    
    for i in range(0,len(data)):
        # print "data[i]:",ord(data[i])
        if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
            # Just replace them with a space.
            data = data[:i] + " " + data[i+1:]
    
    queryContent = data
    
    if queryID not in queriesDict:
        queriesDict[queryID] = queryContent    
    # print 

for line in inputFileHandler1.readlines():
    # example line: 850 0 GX272-67-14117174 0
    queryID = int(line.strip().split(" ")[0])
    trecID = line.strip().split(" ")[2]
    relevenceScore = line.strip().split(" ")[3]
    outputFileHandler.write( str(queryID) + " " + trecID + " '" + queriesDict[queryID] + "' " + relevenceScore + "\n")

print "len(queriesDict):",len(queriesDict)
print "Done."

inputFileHandler1.close()
inputFileHandler2.close()
outputFileHandler.close()
'''

'''
inputFileName = "/data1/team/weijiang/workspace/polyIRToolkit/polyIRIndexer/04-06.topics.701-850.polyIRTKCompatibleMode"
inputFileHandler = open(inputFileName,"r")

outputFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/final-fake-queryTermsCollectionFreqs.txt"
outputFileHandler = open(outputFileName,"w")

queryTermList = []

for line in inputFileHandler.readlines():
    data = line.strip().split(":")[1]
    
    for i in range(0,len(data)):
        # print "data[i]:",ord(data[i])
        if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
            # Just replace them with a space.
            data = data[:i] + " " + data[i+1:]

    queryTerms = data.split(" ")
    
    for queryTerm in queryTerms:
        if queryTerm.lower() != "" and queryTerm.lower() not in queryTermList:
            queryTermList.append(queryTerm.lower())

queryTermList.sort(cmp=None, key=None, reverse=False)

for queryTerm in queryTermList:
    outputFileHandler.write(queryTerm + " " + str(random.randint(1, 100000000)) + "\n")


inputFileHandler.close()
outputFileHandler.close()
'''


'''
for dirname, dirnames, filenames in os.walk('/data5/team/obukai/human_judge_web_pages_gov2'):
    # for subdirname in dirnames:
    #     print os.path.join(dirname, subdirname)
    for filename in filenames:
        print os.path.join(dirname, filename)
'''

'''
basedPath = "/data5/team/obukai/human_judge_web_pages_gov2/GX"

for i in range(0,273):
    directory = basedPath + "%03d" % i
    #print directory
    os.mkdir(directory)
'''
