from __future__ import division
from operator import itemgetter, attrgetter

import gc
import math
import matplotlib
import os
import pylab
import random
import sys
import time
from scipy import stats
import numpy as np

# Updated by Wei 2014/01/10 afternoon
def getQueryIDAndResultSetGivenResultFileForDebug(inputResultFileName,NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE):
    print "getQueryIDAndResultSetGivenResultFile(...) function called."
    num_of_postings_recorded = 0
      
    # pre-set variables
    NUM_OF_QUERIES_WANT_TO_GET_IN_TOTAL = 95001
    num_of_queries_already_got = -1
    numOfQueriesHavingQID = 0
    numOfQueriesHavingSearchContent = 0
    
    
    inputFileHandler1 = open(inputResultFileName,"r")
    
    
    
    currentLine = inputFileHandler1.readline()
    nextLine = ""
    currentQID = ""
    
    numOfResultsForTheAllQueries = 0
    
    while currentLine:
        # condition statement for exiting the loop
        # Updated by Wei 2013/12/01 afternoon at school
        if NUM_OF_QUERIES_WANT_TO_GET_IN_TOTAL == num_of_queries_already_got:
            print "# OF QUERIES WANT TO GET IN TOTAL:",NUM_OF_QUERIES_WANT_TO_GET_IN_TOTAL
            print "It is time to break"
            break
        
        # sampling parsing line:
        # qid: 701
        if currentLine.startswith("qid:"):
            num_of_queries_already_got += 1
            
            currentQID = int( currentLine.strip().split(":")[1].strip() )
            print "currentQID:",str( currentQID )

            if currentQID == 10001 or currentQID == 20001 or currentQID == 30001 or currentQID == 40001 or currentQID == 50001 or currentQID == 60001 or currentQID == 70001 or currentQID == 80001 or currentQID == 90001 or currentQID == 95001:
                print currentQID,num_of_postings_recorded
            
            numOfQueriesHavingQID += 1
        
        # sample parsing line:
        # Search: u s  oil industry history
        if currentLine.startswith("Search:"):
            elementList = currentLine.strip().split(" ")
            
            currentSearchContent = ""
            
            queryTermList = []
            for element in elementList[1:]:
                if element.strip() != "":
                    queryTermList.append( element.strip() )        
            currentQueryLength = len(queryTermList)
            print "currentQueryLength:",currentQueryLength
    
            for element in queryTermList:
                currentSearchContent += element + " "
            
            print currentSearchContent
            numOfQueriesHavingSearchContent += 1
            
            
            currentQueryTermIndexDict = {}
            nextLine = inputFileHandler1.readline()
            #print nextLine.strip()
            if nextLine.strip() != "":
                # I think I can skip the line: 
                # [2013-05-24 14:20:44] 38871 terms with their lists length have been loaded.
                nextLine = inputFileHandler1.readline()
                nextLineElements = nextLine.strip().split(" ")
                # print "nextLineElements:",nextLineElements
                # sample parsing line: 
                # oil:0 industry:1 history:2 u:3 s:4
                for element in nextLineElements:
                    if element.split(":")[0] not in currentQueryTermIndexDict:
                        currentQueryTermIndexDict[ int(element.split(":")[1]) ] = element.split(":")[0]
                
                print "currentQueryTermIndexDict:",currentQueryTermIndexDict
                # print "len(currentQueryTermIndexDict):",len(currentQueryTermIndexDict)
            
                
                # Now, it is time to read the threshold line
                currentLine = inputFileHandler1.readline()
                if len(currentLine.strip().split(":")) == 2:
                    name = currentLine.strip().split(":")[0]
                    value = currentLine.strip().split(":")[1]
                    print name,"::",value
                else:
                    print "format problem"
                    exit(1)
                    
                # Now, it is time to read computation method line
                currentLine = inputFileHandler1.readline()
                if len(currentLine.strip().split(":")) == 2:
                    name = currentLine.strip().split(":")[0]
                    value = currentLine.strip().split(":")[1]
                    print name,"::",value
                else:
                    print "format problem"
                    exit(1)
                
                # skip the line like the following:
                # 3 factor probability formula logic implemented by Wei on 2013/08/02 at school    
                # currentLine = inputFileHandler1.readline()
                
                # Now, it is time to read the actual training example line
                currentLine = inputFileHandler1.readline()
                numOfResultsForTheCurrentQuery = 0
                
                # for DEBUG ONLY(IMPOERTANT debug setting here)
                # print "len( currentLine.strip().split(" ") ):",len( currentLine.strip().split(" ") )
                # print "currentLine:",currentLine
                # exit(1)
                lineElements = currentLine.strip().split(" ")
                while len( lineElements ) == 35:
                    theRank = int(lineElements[0])
                    theDocID = lineElements[-2]
                    theTrecID = lineElements[-1]

                    if theRank <= NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE:
                        
                        # pick up the BM25 scores if the rank <= NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE
                        base = 1
                        LENGTH = -1
                        if len(currentQueryTermIndexDict) <= 10:
                            LENGTH = len(currentQueryTermIndexDict)
                        else:
                            LENGTH = 10
                        
                        for i in range(0,LENGTH):
                            # the two files are as following: dict1_for_list_length and dict2_for_impact_score
                            num_of_postings_recorded += 1
                            
                            
                    
                    numOfResultsForTheCurrentQuery += 1
                    
                    currentLine = inputFileHandler1.readline()
                    lineElements = currentLine.strip().split(" ")
                   
                numOfResultsForTheAllQueries += numOfResultsForTheCurrentQuery
                
                # sampling method 1: among all the top10 documents, select those documents, open those documents and extract the corresponding postings.
                # This method make sure that once the document have been selected, ALL the coresponding postings will also be selected.
                # This sampling method has been developed on 2013/01/09
                
                print "numOfResultsForTheCurrentQuery:",numOfResultsForTheCurrentQuery
                print
            
        currentLine = inputFileHandler1.readline()
    
    print "num_of_queries_already_got: ",num_of_queries_already_got
    print "numOfQueriesHavingQID: ",numOfQueriesHavingQID
    print "numOfQueriesHavingSearchContent: ",numOfQueriesHavingSearchContent
    print "num_of_postings_recorded: ",num_of_postings_recorded
    print "numOfResultsForTheCurrentQuery: ",numOfResultsForTheCurrentQuery
    # debug
    # exit(1)



# Updated by Wei 2013/12/14 night
def getQueryIDAndResultSetGivenResultFile(inputResultFileName,NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE,queryTermInvertedIndexInfo,dict1_for_list_length,dict2_for_impact_score,twoDTableWithNumOfPostingsDict):
    print "getQueryIDAndResultSetGivenResultFile(...) function called."
    num_of_postings_recorded = 0
      
    # pre-set variables
    NUM_OF_QUERIES_WANT_TO_GET_IN_TOTAL = 95001
    num_of_queries_already_got = -1
    numOfQueriesHavingQID = 0
    numOfQueriesHavingSearchContent = 0
    numOfQueriesHavingSearchResults = 0
    
    inputFileHandler1 = open(inputResultFileName,"r")
    
    
    
    currentLine = inputFileHandler1.readline()
    nextLine = ""
    currentQID = ""
    
    numOfResultsForTheAllQueries = 0
    
    while currentLine:
        # condition statement for exiting the loop
        # Updated by Wei 2013/12/01 afternoon at school
        if NUM_OF_QUERIES_WANT_TO_GET_IN_TOTAL == num_of_queries_already_got:
            print "# OF QUERIES WANT TO GET IN TOTAL:",NUM_OF_QUERIES_WANT_TO_GET_IN_TOTAL
            print "It is time to break"
            break
        
        # sampling parsing line:
        # qid: 701
        if currentLine.startswith("qid:"):
            num_of_queries_already_got += 1
            
            currentQID = int( currentLine.strip().split(":")[1].strip() )
            print "currentQID:",str( currentQID )
            # init the variable
            
            numOfQueriesHavingQID += 1
        
        # sample parsing line:
        # Search: u s  oil industry history
        if currentLine.startswith("Search:"):
            elementList = currentLine.strip().split(" ")
            
            currentSearchContent = ""
            
            queryTermList = []
            for element in elementList[1:]:
                if element.strip() != "":
                    queryTermList.append( element.strip() )        
            currentQueryLength = len(queryTermList)
            print "currentQueryLength:",currentQueryLength
    
            for element in queryTermList:
                currentSearchContent += element + " "
            
            print currentSearchContent
            numOfQueriesHavingSearchContent += 1
            
            
            currentQueryTermIndexDict = {}
            nextLine = inputFileHandler1.readline()
            #print nextLine.strip()
            if nextLine.strip() != "":
                # I think I can skip the line: 
                # [2013-05-24 14:20:44] 38871 terms with their lists length have been loaded.
                nextLine = inputFileHandler1.readline()
                nextLineElements = nextLine.strip().split(" ")
                # print "nextLineElements:",nextLineElements
                # sample parsing line: 
                # oil:0 industry:1 history:2 u:3 s:4
                for element in nextLineElements:
                    if element.split(":")[0] not in currentQueryTermIndexDict:
                        currentQueryTermIndexDict[ int(element.split(":")[1]) ] = element.split(":")[0]
                
                print "currentQueryTermIndexDict:",currentQueryTermIndexDict
                # print "len(currentQueryTermIndexDict):",len(currentQueryTermIndexDict)
            
                
                # Now, it is time to read the threshold line
                currentLine = inputFileHandler1.readline()
                if len(currentLine.strip().split(":")) == 2:
                    name = currentLine.strip().split(":")[0]
                    value = currentLine.strip().split(":")[1]
                    print name,"::",value
                else:
                    print "format problem"
                    exit(1)
                    
                # Now, it is time to read computation method line
                currentLine = inputFileHandler1.readline()
                if len(currentLine.strip().split(":")) == 2:
                    name = currentLine.strip().split(":")[0]
                    value = currentLine.strip().split(":")[1]
                    print name,"::",value
                else:
                    print "format problem"
                    exit(1)
                
                # skip the line like the following:
                # 3 factor probability formula logic implemented by Wei on 2013/08/02 at school    
                # currentLine = inputFileHandler1.readline()
                
                # Now, it is time to read the actual training example line
                currentLine = inputFileHandler1.readline()
                numOfResultsForTheCurrentQuery = 0
                
                # for DEBUG ONLY(IMPOERTANT debug setting here)
                # print "len( currentLine.strip().split(" ") ):",len( currentLine.strip().split(" ") )
                # print "currentLine:",currentLine
                # exit(1)
                lineElements = currentLine.strip().split(" ")
                while len( lineElements ) == 35:
                    theRank = int(lineElements[0])
                    theDocID = lineElements[-2]
                    theTrecID = lineElements[-1]

                    if theRank <= NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE:
                        
                        # pick up the BM25 scores if the rank <= NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE
                        base = 1
                        LENGTH = -1
                        if len(currentQueryTermIndexDict) <= 10:
                            LENGTH = len(currentQueryTermIndexDict)
                        else:
                            LENGTH = 10
                        
                        for i in range(0,LENGTH):
                            # the two files are as following: dict1_for_list_length and dict2_for_impact_score
                            
                            postingScoreInStringFormat = lineElements[base + i]
                            postingScoreInFloatFormat = float(postingScoreInStringFormat)
                            currentTerm20131201 = currentQueryTermIndexDict[i]
                            classLabelForListLength = "None"
                            classLabelForImpactScore = "None"
                            classLabelFor2D = "None_None"
                            currentTermInvertedIndexCorrectLength = queryTermInvertedIndexInfo[currentTerm20131201][1]
                            
                            # TODO on 2013/12/14 night, I have the 2 values here:
                            # postingScoreInFloatFormat and currentTermInvertedIndexCorrectLength (And I think I can use the whole Sun to do this)
                            # Use the following dicts to decide the cells.
                            # dict1_for_list_length,dict2_for_impact_score
                            
                            # assign the value of classLabelForListLength
                            # CURRENT version
                            for i in range(len(dict1_for_list_length)-1,-1,-1):
                                # print i,classLabelWithClassLowerBoundDict[ str(i) ]
                                if dict1_for_list_length[ i ] <= currentTermInvertedIndexCorrectLength:
                                    classLabelForListLength = str(i)
                                    break
                            
                            
                            '''
                            # OLD version being dumped since 2013/12/15 night
                            for i in range(70,-1,-1):
                                # print i,classLabelWithClassLowerBoundDict[ str(i) ]
                                if classLabelWithClassLowerBoundDict[ str(i) ] <= currentTermInvertedIndexCorrectLength:
                                    classLabelForListLength = str(i)
                                    break
                            '''
                            
                            if classLabelForListLength == "None":
                                print "critical error: the term can NOT found the belonging class"
                                print "term: ",currentTerm20131201
                                print "currentTermInvertedIndexCorrectLength: ",currentTermInvertedIndexCorrectLength
                                print "currentTermInvertedIndexBeginningPosition: ",currentTermInvertedIndexBeginningPosition
                                print "currentTermInvertedIndexEndingPosition: ",currentTermInvertedIndexEndingPosition
                                # weak solution
                                # continue
                                # strong solution
                                exit(1)
                            
                            # debug
                            # print "------>term:",currentTerm20131201
                            # print "------>currentTermInvertedIndexCorrectLength:",currentTermInvertedIndexCorrectLength
                            # print "------>classLabelForListLength:",classLabelForListLength
                            # print "------>postingScoreInFloatFormat:",postingScoreInFloatFormat
                            # print "------>postingScoreInStringFormat:",postingScoreInStringFormat
                            # print
                            
                            # assign the value of postingScoreInFloatFormat
                            for i in range(len(dict2_for_impact_score)-1,-1,-1):
                                # print i,dict2_for_impact_score[ i ]
                                if dict2_for_impact_score[ i ] <= postingScoreInFloatFormat:
                                    classLabelForImpactScore = str(i)
                                    break
                                
                            
                            
                            if classLabelForImpactScore == "None":
                                print "critical error: the term can NOT found the belonging class"
                                print "term: ",currentTerm20131201
                                print "postingScoreInFloatFormat: ",postingScoreInFloatFormat
                                # weak solution
                                # continue
                                # strong solution
                                exit(1)
                            
                            classLabelFor2D = classLabelForListLength + "_" + classLabelForImpactScore
                            if classLabelFor2D in twoDTableWithNumOfPostingsDict:
                                # debug section
                                '''
                                print "term:",currentTerm20131201
                                print "list length:",currentTermInvertedIndexCorrectLength
                                print "score:",postingScoreInFloatFormat
                                print "classLabelFor2D:",classLabelFor2D
                                print
                                '''
                                twoDTableWithNumOfPostingsDict[classLabelFor2D] += 1
                                num_of_postings_recorded += 1
                            else:
                                print "critical error."
                                exit(1)
                    
                    numOfResultsForTheCurrentQuery += 1
                    
                    currentLine = inputFileHandler1.readline()
                    lineElements = currentLine.strip().split(" ")
                   
                numOfResultsForTheAllQueries += numOfResultsForTheCurrentQuery
                
                # sampling method 1: among all the top10 documents, select those documents, open those documents and extract the corresponding postings.
                # This method make sure that once the document have been selected, ALL the coresponding postings will also be selected.
                # This sampling method has been developed on 2013/01/09
                
                print "numOfResultsForTheCurrentQuery:",numOfResultsForTheCurrentQuery
                print
            
        currentLine = inputFileHandler1.readline()
    
    print "num_of_postings_recorded: ",num_of_postings_recorded
    # debug
    # exit(1)




print "program begins..."
def step1_micro_cells_creation_for_list_length():
    outputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/micro_cells_for_list_length"
    outputFileHandler = open(outputFileName,"w")
    
    # This is for the list length
    startValue = 1
    step = 1.01
    
    currentLowerBound = 0
    currentUpperBound = 0
    
    currentLowerBound = startValue
    currentUpperBound = int( currentLowerBound * step ) 
    
    for i in range(0,50000):
        outputLine = str(i) + " " + "[" + " " + str(currentLowerBound) + " " + "," + " " + str(currentUpperBound) + " " + "]" + "\n"
        # print outputLine.strip()
        outputFileHandler.write(outputLine)
        currentLowerBound = currentUpperBound + 1
        currentUpperBound = int( currentLowerBound * step )
        if currentUpperBound >= 25205179:
            outputLine = str(i+1) + " " + "[" + " " + str(currentLowerBound) + " " + "," + " " + str(currentUpperBound) + " " + "]"  + "\n"
            # print outputLine.strip()
            outputFileHandler.write(outputLine)
            break
    outputFileHandler.close()
    print "Overall Processing Statistics:"
    print "outputFileName:",outputFileName

def step2_micro_cells_creation_for_impact_scores():
    outputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/micro_cells_for_impact_score"
    outputFileHandler = open(outputFileName,"w")
    
    # This is for the impact score
    startValue = 0.0001
    step = 1.01
    
    currentLowerBound = 0
    currentUpperBound = 0
    
    currentLowerBound = startValue
    currentUpperBound = currentLowerBound * step 
    
    for i in range(0,50000):
        outputLine = str(i) + " " + "[" + " " + str(currentLowerBound) + " " + "," + " " + str(currentUpperBound) + " " + "]" + "\n"
        # print outputLine.strip()
        outputFileHandler.write(outputLine)
        
        currentLowerBound = currentUpperBound + 0.0001
        currentUpperBound = currentLowerBound * step
        if currentUpperBound >= 20:
            outputLine = str(i+1) + " " + "[" + " " + str(currentLowerBound) + " " + "," + " " + str(currentUpperBound) + " " + "]" + "\n"
            # print outputLine.strip()
            outputFileHandler.write(outputLine)
            break
    
    outputFileHandler.close()
    print "Overall Processing Statistics:"
    print "outputFileName:",outputFileName
    
# load the list length info into the main memory
# file1: 
# 10717 lines
# /data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/micro_cells_for_list_length
def step3_load_micro_cell_info_for_list_length(dict):
    inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/micro_cells_for_list_length"
    inputFileHandler = open(inputFileName,"r")
    for line in inputFileHandler.readlines():
        lineElements = line.strip().split(" ")
        classLabel = int(lineElements[0])
        classLowerBound = int(lineElements[2])
        if classLabel not in dict:
            dict[classLabel] = classLowerBound
    print "list length len(dict):",len(dict)
    inputFileHandler.close()

# load the impact score info into the main memory 
# file2:
# 30446 lines
# /data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/micro_cells_for_impact_score
def step4_load_micro_cell_info_for_impact_score(dict):
    inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/micro_cells_for_impact_score"
    inputFileHandler = open(inputFileName,"r")
    for line in inputFileHandler.readlines():
        lineElements = line.strip().split(" ")
        classLabel = int(lineElements[0])
        classLowerBound = float(lineElements[2])
        if classLabel not in dict:
            dict[classLabel] = classLowerBound
    print "impact score len(dict):",len(dict)
        
    inputFileHandler.close()

def step6_load_related_postings_AND_bin_them_into_micro_cells_optimized_version(dict1_for_list_length,dict2_for_impact_score):
    print "optimized_version"
    
    # NUM_OF_TERMS_NEEDED_TO_BE_PROCESSED
    
    print "Begins..."
    twoDTableWithNumOfPostingsDict = {}
    for i in range( 0,len(dict1_for_list_length) ):
        for j in range( 0,len(dict2_for_impact_score) ):
            keyInStringFormat = str(i) + "_" + str(j)
            if keyInStringFormat not in twoDTableWithNumOfPostingsDict:
                twoDTableWithNumOfPostingsDict[keyInStringFormat] = 0
    print "len(twoDTableWithNumOfPostingsDict):",len(twoDTableWithNumOfPostingsDict)

    ################################################
    # step1: get the set of query terms from 95K queries which need to analyze
    # # of query terms included in the head95K: 37817
    queryTermHead95KQueriesDict = {}
    # for Production
    # from xa{a - j}
    inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/xaj"
    # inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsIn_100KQueries_0_1_95%"
    # for DEBUG
    # inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsIn_100KQueries_0_1_95%_DEBUG"
    inputFileHanlder = open(inputFileName1,"r")
    
    for index,line in enumerate( inputFileHanlder.readlines() ):
        lineElements = line.strip().split(" ")
        currentTerm = lineElements[0]
        currentTermRealFreqIn95KQueires = int(lineElements[1])
        # ignore the lineElements[1] cause I don't need this info
        if currentTerm not in queryTermHead95KQueriesDict:
            queryTermHead95KQueriesDict[currentTerm] = currentTermRealFreqIn95KQueires
        else:
            print "duplicated terms found."
            exit(1)
    print "len(queryTermHead95KQueriesDict):",len(queryTermHead95KQueriesDict)
    # print "queryTermHead95KQueriesDict['of']:",queryTermHead95KQueriesDict['of']
    # print "queryTermHead95KQueriesDict['fawn']:",queryTermHead95KQueriesDict['fawn']
    inputFileHanlder.close()
    
    ################################################
    # step2: Load the aux file for the terms and ready to count all the related postings to form the denominator 
    # key: query term in string format
    # value: a list contains 4 items as following:
        # termIndexNumber
        # invertedIndexLength
        # invertedIndexBeginningPosition
        # invertedIndexEndingPosition
    queryTermInvertedIndexInfo = {}
    
    inputDataSourceFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/rawTermInvertedIndexOutput_with_trecID_added.txt"
    inputDataSourceFileHandler = open(inputDataSourceFileName,"r")
    
    inputAuxSourceFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/rawTermInvertedIndexOutputAccessAuxFile_NEW_20121128.txt"
    inputAuxSourceFileHandler = open(inputAuxSourceFileName,"r")
    
    for line in inputAuxSourceFileHandler.readlines():
        lineElements = line.strip().split(" ")
        
        termIndexNumber = int(lineElements[0])
        term = lineElements[1]
        invertedIndexLength = int(lineElements[2])
        invertedIndexBeginningPosition = int(lineElements[3])
        invertedIndexEndingPosition = int(lineElements[4])
        
        if term not in queryTermInvertedIndexInfo:
            queryTermInvertedIndexInfo[term] = []
            # index 0
            queryTermInvertedIndexInfo[term].append(termIndexNumber)
            # index 1
            queryTermInvertedIndexInfo[term].append(invertedIndexLength)
            # index 2
            queryTermInvertedIndexInfo[term].append(invertedIndexBeginningPosition)
            # index 3
            queryTermInvertedIndexInfo[term].append(invertedIndexEndingPosition)   
        else:
            print "unnormal, mark1"
            exit(1)
    
    print "len(queryTermInvertedIndexInfo):",len(queryTermInvertedIndexInfo)
    
    
    ################################################
    # This loop takes the most of the time
    # step3: Assign and do the counting NOW, and that I think it is the exciting part
    for index,term in enumerate(queryTermHead95KQueriesDict):
        print "------>index:",index,"/",len(queryTermHead95KQueriesDict)
        # if index == 3:
        #    break
        currentTermInvertedIndexCorrectLength = queryTermInvertedIndexInfo[term][1]
        currentTermInvertedIndexBeginningPosition = queryTermInvertedIndexInfo[term][2]
        currentTermInvertedIndexEndingPosition = queryTermInvertedIndexInfo[term][3]
        classLabelForListLength = "None"
        classLabelForImpactScore = "None"
        classLabelFor2D = "None_None"
        for i in range(len(dict1_for_list_length)-1,-1,-1):
            # print i,classLabelWithClassLowerBoundDict[ str(i) ]
            if dict1_for_list_length[ i ] <= currentTermInvertedIndexCorrectLength:
                classLabelForListLength = str(i)
                break
        
        if classLabelForListLength == "None":
            print "critical error: the term can NOT found the belonging class"
            print "term: ",term
            print "currentTermInvertedIndexCorrectLength: ",currentTermInvertedIndexCorrectLength
            print "currentTermInvertedIndexBeginningPosition: ",currentTermInvertedIndexBeginningPosition
            print "currentTermInvertedIndexEndingPosition: ",currentTermInvertedIndexEndingPosition
            # weak solution
            continue
            # strong solution
            # exit(1)
            
        print "------>term:",term
        print "------>real freq of term in head 95K queries:",queryTermHead95KQueriesDict[term]
        print "------>currentTermInvertedIndexCorrectLength:",currentTermInvertedIndexCorrectLength
        print "------>classLabelForListLength:",classLabelForListLength

        # print "------>currentTermInvertedIndexBeginningPosition:",currentTermInvertedIndexBeginningPosition
        # print "------>currentTermInvertedIndexEndingPosition:",currentTermInvertedIndexEndingPosition
        
        # optimized logic: load the postings and then assign the value all at once
        if currentTermInvertedIndexCorrectLength != 0:
            inputDataSourceFileHandler.seek(currentTermInvertedIndexBeginningPosition)
            
            currentDataLine = inputDataSourceFileHandler.readline()
            # print "first line:",currentDataLine
            
            postingScoreInFloatList = []
            
            startingTime = time.time()
            while inputDataSourceFileHandler.tell() <= currentTermInvertedIndexEndingPosition:
                
                currentDataLineElements = currentDataLine.strip().split(" ")
                
                # get the posting score and ready to bucket them
                postingScoreInStringFormat = currentDataLineElements[2]
                postingScoreInFloatFormat = float(postingScoreInStringFormat)
                postingScoreInFloatList.append(postingScoreInFloatFormat)
                                
                currentDataLine = inputDataSourceFileHandler.readline()
            
            # dealing with the LAST line
            # print "last line:",currentDataLine
            currentDataLineElements = currentDataLine.strip().split(" ")
            
            # get the posting score and ready to bucket them
            postingScoreInStringFormat = currentDataLineElements[2]
            postingScoreInFloatFormat = float(postingScoreInStringFormat)
            postingScoreInFloatList.append(postingScoreInFloatFormat)
            
            elapsedTime = time.time() - startingTime
            print "len(postingScoreInFloatList):",len(postingScoreInFloatList)
            print "finish loading...",elapsedTime,"secs"
            
            
            # sort the posting scores
            startingTime = time.time()
            postingScoreInFloatList.sort(cmp=None, key=None, reverse=True)
            elapsedTime = time.time() - startingTime
            print "finish sorting...",elapsedTime,"secs"
            
            # debug
            # for currentPostingScore in postingScoreInFloatList:
            #    print currentPostingScore
            
            # init the value
            startingTime = time.time()
            counter = len(dict2_for_impact_score) - 1
            for currentPostingScore in postingScoreInFloatList:
                while dict2_for_impact_score[ counter ] > currentPostingScore:
                    counter -= 1
                classLabelForImpactScore = str(counter)
                
                # OLD version
                # assign the value of currentPostingScore
                # for i in range(len(dict2_for_impact_score)-1,-1,-1):
                     # print i,dict2_for_impact_score[ i ]
                #    if dict2_for_impact_score[ i ] <= currentPostingScore:
                #         classLabelForImpactScore = str(i)
                #         break
                
                if classLabelForImpactScore == "None":
                    print "critical error: the term can NOT found the belonging class"
                    print "term: ",term
                    print "currentPostingScore: ",currentPostingScore
                    # weak solution
                    # continue
                    # strong solution
                    exit(1)

                classLabelFor2D = classLabelForListLength + "_" + classLabelForImpactScore
                if classLabelFor2D in twoDTableWithNumOfPostingsDict:
                    # debug section
                    # print "term:",term
                    # print "list length:",currentTermInvertedIndexCorrectLength
                    # print "score:",currentPostingScore
                    # print "classLabelFor2D:",classLabelFor2D
                    # print
                    twoDTableWithNumOfPostingsDict[classLabelFor2D] += queryTermHead95KQueriesDict[term]
                else:
                    print "critical error."
                    exit(1)
            elapsedTime = time.time() - startingTime
            print "finish counting...",elapsedTime,"secs"
            print
                  
        else:
            print "query term:",term,"is NOT in the lexicon."        

    ################################################
    # step4: output the info from the memory to the disk
    outputFileName = inputFileName1 + "outputing"
    # outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/impactWithLengthAnalyze20131130Night/microCellsGenerationOfRelatedPostingsDenominator20131216Afternoon"
    outputFileHandler = open(outputFileName,"w")    

    classLabelFor2D = "None_None"
    totalNumOfPostingsCounted = 0
    for i in range( 0,len(dict1_for_list_length) ):
        for j in range( 0,len(dict2_for_impact_score) ):
            classLabelFor2D = str(i) + "_" + str(j)
            outputFileHandler.write( classLabelFor2D + " " + str(twoDTableWithNumOfPostingsDict[classLabelFor2D]) + "\n")
            totalNumOfPostingsCounted += twoDTableWithNumOfPostingsDict[classLabelFor2D]
    print "totalNumOfPostingsCounted:",totalNumOfPostingsCounted
    outputFileHandler.close()      
    
    print "Overall Processing Statistics:"
    print "outputFileName: ",outputFileName
    print "Ends."


def step6_load_related_postings_AND_bin_them_into_micro_cells_unoptimized_version(dict1_for_list_length,dict2_for_impact_score):
    print "unoptimized_version"
    # NUM_OF_TERMS_NEEDED_TO_BE_PROCESSED
    
    print "Begins..."
    twoDTableWithNumOfPostingsDict = {}
    for i in range( 0,len(dict1_for_list_length) ):
        for j in range( 0,len(dict2_for_impact_score) ):
            keyInStringFormat = str(i) + "_" + str(j)
            if keyInStringFormat not in twoDTableWithNumOfPostingsDict:
                twoDTableWithNumOfPostingsDict[keyInStringFormat] = 0
    print "len(twoDTableWithNumOfPostingsDict):",len(twoDTableWithNumOfPostingsDict)

    ################################################
    # step1: get the set of query terms from 95K queries which need to analyze
    # # of query terms included in the head95K: 37817
    queryTermHead95KQueriesDict = {}
    # for Production
    inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsIn_100KQueries_0_1_95%"
    # for DEBUG
    # inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsIn_100KQueries_0_1_95%_DEBUG"
    inputFileHanlder = open(inputFileName1,"r")
    
    for index,line in enumerate( inputFileHanlder.readlines() ):
        lineElements = line.strip().split(" ")
        currentTerm = lineElements[0]
        currentTermRealFreqIn95KQueires = int(lineElements[1])
        # ignore the lineElements[1] cause I don't need this info
        if currentTerm not in queryTermHead95KQueriesDict:
            queryTermHead95KQueriesDict[currentTerm] = currentTermRealFreqIn95KQueires
        else:
            print "duplicated terms found."
            exit(1)
    print "len(queryTermHead95KQueriesDict):",len(queryTermHead95KQueriesDict)
    # print "queryTermHead95KQueriesDict['of']:",queryTermHead95KQueriesDict['of']
    # print "queryTermHead95KQueriesDict['fawn']:",queryTermHead95KQueriesDict['fawn']
    inputFileHanlder.close()
    
    ################################################
    # step2: Load the aux file for the terms and ready to count all the related postings to form the denominator 
    # key: query term in string format
    # value: a list contains 4 items as following:
        # termIndexNumber
        # invertedIndexLength
        # invertedIndexBeginningPosition
        # invertedIndexEndingPosition
    queryTermInvertedIndexInfo = {}
    
    inputDataSourceFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/rawTermInvertedIndexOutput_with_trecID_added.txt"
    inputDataSourceFileHandler = open(inputDataSourceFileName,"r")
    
    inputAuxSourceFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/rawTermInvertedIndexOutputAccessAuxFile_NEW_20121128.txt"
    inputAuxSourceFileHandler = open(inputAuxSourceFileName,"r")
    
    for line in inputAuxSourceFileHandler.readlines():
        lineElements = line.strip().split(" ")
        
        termIndexNumber = int(lineElements[0])
        term = lineElements[1]
        invertedIndexLength = int(lineElements[2])
        invertedIndexBeginningPosition = int(lineElements[3])
        invertedIndexEndingPosition = int(lineElements[4])
        
        if term not in queryTermInvertedIndexInfo:
            queryTermInvertedIndexInfo[term] = []
            # index 0
            queryTermInvertedIndexInfo[term].append(termIndexNumber)
            # index 1
            queryTermInvertedIndexInfo[term].append(invertedIndexLength)
            # index 2
            queryTermInvertedIndexInfo[term].append(invertedIndexBeginningPosition)
            # index 3
            queryTermInvertedIndexInfo[term].append(invertedIndexEndingPosition)   
        else:
            print "unnormal, mark1"
            exit(1)
    
    print "len(queryTermInvertedIndexInfo):",len(queryTermInvertedIndexInfo)
    
    
    ################################################
    # This loop takes the most of the time
    # step3: Assign and do the counting NOW, and that I think it is the exciting part
    for index,term in enumerate(queryTermHead95KQueriesDict):
        if index == 1:
            break
        currentTermInvertedIndexCorrectLength = queryTermInvertedIndexInfo[term][1]
        currentTermInvertedIndexBeginningPosition = queryTermInvertedIndexInfo[term][2]
        currentTermInvertedIndexEndingPosition = queryTermInvertedIndexInfo[term][3]
        classLabelForListLength = "None"
        classLabelForImpactScore = "None"
        classLabelFor2D = "None_None"
        for i in range(len(dict1_for_list_length)-1,-1,-1):
            # print i,classLabelWithClassLowerBoundDict[ str(i) ]
            if dict1_for_list_length[ i ] <= currentTermInvertedIndexCorrectLength:
                classLabelForListLength = str(i)
                break
        
        if classLabelForListLength == "None":
            print "critical error: the term can NOT found the belonging class"
            print "term: ",term
            print "currentTermInvertedIndexCorrectLength: ",currentTermInvertedIndexCorrectLength
            print "currentTermInvertedIndexBeginningPosition: ",currentTermInvertedIndexBeginningPosition
            print "currentTermInvertedIndexEndingPosition: ",currentTermInvertedIndexEndingPosition
            # weak solution
            continue
            # strong solution
            # exit(1)
            
        print "------>term:",term
        print "------>real freq of term in head 95K queries:",queryTermHead95KQueriesDict[term]
        print "------>currentTermInvertedIndexCorrectLength:",currentTermInvertedIndexCorrectLength
        print "------>classLabelForListLength:",classLabelForListLength

        # print "------>currentTermInvertedIndexBeginningPosition:",currentTermInvertedIndexBeginningPosition
        # print "------>currentTermInvertedIndexEndingPosition:",currentTermInvertedIndexEndingPosition
        
        if currentTermInvertedIndexCorrectLength != 0:
            inputDataSourceFileHandler.seek(currentTermInvertedIndexBeginningPosition)
            
            currentDataLine = inputDataSourceFileHandler.readline()
            # print "first line:",currentDataLine
            
            while inputDataSourceFileHandler.tell() <= currentTermInvertedIndexEndingPosition:
                
                currentDataLineElements = currentDataLine.strip().split(" ")
                
                # get the posting score and ready to bucket them
                postingScoreInStringFormat = currentDataLineElements[2]
                postingScoreInFloatFormat = float(postingScoreInStringFormat)
                
                # assign the value of postingScoreInFloatFormat
                for i in range(len(dict2_for_impact_score)-1,-1,-1):
                    # print i,dict2_for_impact_score[ i ]
                    if dict2_for_impact_score[ i ] <= postingScoreInFloatFormat:
                        classLabelForImpactScore = str(i)
                        break
                
                if classLabelForImpactScore == "None":
                    print "critical error: the term can NOT found the belonging class"
                    print "term: ",term
                    print "postingScoreInFloatFormat: ",postingScoreInFloatFormat
                    # weak solution
                    # continue
                    # strong solution
                    exit(1)
                   
                classLabelFor2D = classLabelForListLength + "_" + classLabelForImpactScore
                if classLabelFor2D in twoDTableWithNumOfPostingsDict:
                    # debug section
                    print "term:",term
                    print "list length:",currentTermInvertedIndexCorrectLength
                    print "score:",postingScoreInFloatFormat
                    print "classLabelFor2D:",classLabelFor2D
                    print
                    twoDTableWithNumOfPostingsDict[classLabelFor2D] += 1
                else:
                    print "critical error."
                    exit(1)
                
                currentDataLine = inputDataSourceFileHandler.readline()
            
            # dealing with the LAST line
            # print "last line:",currentDataLine
            currentDataLineElements = currentDataLine.strip().split(" ")
            
            # get the posting score and ready to bucket them
            postingScoreInStringFormat = currentDataLineElements[2]
            postingScoreInFloatFormat = float(postingScoreInStringFormat)
            
            # assign the value of postingScoreInFloatFormat
            for i in range(len(dict2_for_impact_score)-1,-1,-1):
                # print i,dict2_for_impact_score[ i ]
                if dict2_for_impact_score[ i ] <= postingScoreInFloatFormat:
                    classLabelForImpactScore = str(i)
                    break
            
            if classLabelForImpactScore == "None":
                print "critical error: the term can NOT found the belonging class"
                print "term: ",term
                print "postingScoreInFloatFormat: ",postingScoreInFloatFormat
                # weak solution
                # continue
                # strong solution
                exit(1)
               
            classLabelFor2D = classLabelForListLength + "_" + classLabelForImpactScore
            if classLabelFor2D in twoDTableWithNumOfPostingsDict:
                # debug section
                '''
                print "term:",currentTerm20131201
                print "list length:",currentTermInvertedIndexCorrectLength
                print "score:",postingScoreInFloatFormat
                print "classLabelFor2D:",classLabelFor2D
                print
                '''
                twoDTableWithNumOfPostingsDict[classLabelFor2D] += 1
            else:
                print "critical error."
                exit(1)
        else:
            print "query term:",term,"is NOT in the lexicon."
    ################################################
    # step4: output the info from the memory to the disk
    outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/impactWithLengthAnalyze20131130Night/microCellsGenerationOfRelatedPostingsDenominator20131216Night"
    outputFileHandler = open(outputFileName,"w")    

    classLabelFor2D = "None_None"
    totalNumOfPostingsCounted = 0
    for i in range( 0,len(dict1_for_list_length) ):
        for j in range( 0,len(dict2_for_impact_score) ):
            classLabelFor2D = str(i) + "_" + str(j)
            outputFileHandler.write( classLabelFor2D + " " + str(twoDTableWithNumOfPostingsDict[classLabelFor2D]) + "\n")
            totalNumOfPostingsCounted += twoDTableWithNumOfPostingsDict[classLabelFor2D]
    print "totalNumOfPostingsCounted:",totalNumOfPostingsCounted
    outputFileHandler.close()      
    
    print "Overall Processing Statistics:"
    print "outputFileName: ",outputFileName
    print "Ends."

def step5_load_TOP10_postings_AND_bin_them_into_micro_cells(dict1_for_list_length,dict2_for_impact_score):
    twoDTableWithNumOfPostingsDict = {}
    for i in range( 0,len(dict1_for_list_length) ):
        for j in range( 0,len(dict2_for_impact_score) ):
            keyInStringFormat = str(i) + "_" + str(j)
            if keyInStringFormat not in twoDTableWithNumOfPostingsDict:
                twoDTableWithNumOfPostingsDict[keyInStringFormat] = 0
    print "len(twoDTableWithNumOfPostingsDict):",len(twoDTableWithNumOfPostingsDict)
    
    # key: query term in string format
    # value: a list contains 4 items as following:
        # termIndexNumber
        # invertedIndexLength
        # invertedIndexBeginningPosition
        # invertedIndexEndingPosition
    queryTermInvertedIndexInfo = {}
    
    inputDataSourceFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/rawTermInvertedIndexOutput_with_trecID_added.txt"
    inputDataSourceFileHandler = open(inputDataSourceFileName,"r")
    
    inputAuxSourceFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/rawTermInvertedIndexOutputAccessAuxFile_NEW_20121128.txt"
    inputAuxSourceFileHandler = open(inputAuxSourceFileName,"r")
    
    for line in inputAuxSourceFileHandler.readlines():
        lineElements = line.strip().split(" ")
        
        termIndexNumber = int(lineElements[0])
        term = lineElements[1]
        invertedIndexLength = int(lineElements[2])
        invertedIndexBeginningPosition = int(lineElements[3])
        invertedIndexEndingPosition = int(lineElements[4])
        
        if term not in queryTermInvertedIndexInfo:
            queryTermInvertedIndexInfo[term] = []
            # index 0
            queryTermInvertedIndexInfo[term].append(termIndexNumber)
            # index 1
            queryTermInvertedIndexInfo[term].append(invertedIndexLength)
            # index 2
            queryTermInvertedIndexInfo[term].append(invertedIndexBeginningPosition)
            # index 3
            queryTermInvertedIndexInfo[term].append(invertedIndexEndingPosition)   
        else:
            print "unnormal, mark1"
            exit(1)
    
    print "len(queryTermInvertedIndexInfo):",len(queryTermInvertedIndexInfo)

    # for production
    inputResultFileName = "/data1/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/raw_TOP1000_results_for_head_95K_queries_output_from_IRTK"
    # for debug
    # inputResultFileName = "/data1/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/raw_TOP1000_results_for_head_95K_queries_output_from_IRTK_head_2_queries"               
    
    TOPKValueTakenIntoConsideration = 10
    
    # 3 arguments:
    # (1) inputResultFileName
    # (3) NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE
    getQueryIDAndResultSetGivenResultFile(inputResultFileName,TOPKValueTakenIntoConsideration,queryTermInvertedIndexInfo,dict1_for_list_length,dict2_for_impact_score,twoDTableWithNumOfPostingsDict)
    
    # CURRENT version
    # step4: output the info from the memory to the disk
    outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/impactWithLengthAnalyze20131130Night/microCellsGenerationOfTOP10PostingsNumerator20131215Afternoon"
    outputFileHandler = open(outputFileName,"w")    

    classLabelFor2D = "None_None"
    totalNumOfPostingsCounted = 0
    for i in range( 0,len(dict1_for_list_length) ):
        for j in range( 0,len(dict2_for_impact_score) ):
            classLabelFor2D = str(i) + "_" + str(j)
            outputFileHandler.write( classLabelFor2D + " " + str(twoDTableWithNumOfPostingsDict[classLabelFor2D]) + "\n")
            totalNumOfPostingsCounted += twoDTableWithNumOfPostingsDict[classLabelFor2D]
    print "totalNumOfPostingsCounted:",totalNumOfPostingsCounted
    outputFileHandler.close()      
    
    print "Overall Processing Statistics:"
    print "inputResultFileName: ",inputResultFileName
    print "outputFileName: ",outputFileName

def stepX_check_num_of_postings_counted():
    inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/impactWithLengthAnalyze20131130Night/microCellsGenerationOfTOP10PostingsNumerator20131215Afternoon"
    inputFileHandler = open(inputFileName,"r")
    totalNumOfPostingsCounted = 0
    for line in inputFileHandler.readlines():
        lineElements = line.strip().split(" ")
        totalNumOfPostingsCounted += int(lineElements[1])
    print "totalNumOfPostingsCounted:",totalNumOfPostingsCounted
    inputFileHandler.close()

def stepX_check_correctness_of_the_parallel_files():
    print "the function stepX_check_correctness_of_the_parallel_files() called."
    inputFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/xaaoutputing"
    inputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/xaboutputing"
    inputFileName3 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/xacoutputing"
    inputFileName4 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/xadoutputing"
    inputFileName5 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/xaeoutputing"
    inputFileName6 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/xafoutputing"
    inputFileName7 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/xagoutputing"
    inputFileName8 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/xahoutputing"
    inputFileName9 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/xaioutputing"
    inputFileName10 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/xajoutputing"
    
    outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/impactWithLengthAnalyze20131130Night/microCellsGenerationOfRelatedPostingsDenominator20131216Night"
    outputFileHandler = open(outputFileName,"w")
    
    inputFileNameList = []
    inputFileNameList.append(inputFileName1)
    inputFileNameList.append(inputFileName2)
    inputFileNameList.append(inputFileName3)
    inputFileNameList.append(inputFileName4)
    inputFileNameList.append(inputFileName5)
    inputFileNameList.append(inputFileName6)
    inputFileNameList.append(inputFileName7)
    inputFileNameList.append(inputFileName8)
    inputFileNameList.append(inputFileName9)
    inputFileNameList.append(inputFileName10)
    
    totalNumOfPostingsCounted = 0
    twoDTableWithNumOfPostingsDict = {}
    
    for currentInputFileName in inputFileNameList:
        print "open the file:",currentInputFileName
        inputFileHandler = open(currentInputFileName,"r")
        for line in inputFileHandler.readlines():
            lineElements = line.strip().split(" ")
            currentClassLabel = lineElements[0]
            currentNumOfPostings = int(lineElements[1])
            
            if currentClassLabel not in twoDTableWithNumOfPostingsDict:
                twoDTableWithNumOfPostingsDict[currentClassLabel] = currentNumOfPostings
            else:
                twoDTableWithNumOfPostingsDict[currentClassLabel] += currentNumOfPostings
            
            totalNumOfPostingsCounted += currentNumOfPostings
    print "Overall Processing Stastics:"
    print "totalNumOfPostingsCounted:",totalNumOfPostingsCounted
    
    for i in range( 0,1306 ):
        for j in range( 0,763 ):
            classLabelFor2D = str(i) + "_" + str(j)
            outputFileHandler.write( classLabelFor2D + " " + str(twoDTableWithNumOfPostingsDict[classLabelFor2D]) + "\n")

    outputFileHandler.close()

    print "Overall Processing Statstics:"
    print "outputFileName:",outputFileName
    print "totalNumOfPostingsCounted:",totalNumOfPostingsCounted

dict1_for_list_length = {}
dict2_for_impact_score = {}

# The following functions can be used.
# step1_micro_cells_creation_for_list_length()
# step2_micro_cells_creation_for_impact_scores()
# step3_load_micro_cell_info_for_list_length(dict1_for_list_length)
# step4_load_micro_cell_info_for_impact_score(dict2_for_impact_score)
# step5_load_TOP10_postings_AND_bin_them_into_micro_cells(dict1_for_list_length,dict2_for_impact_score)
# step6_load_related_postings_AND_bin_them_into_micro_cells_unoptimized_version(dict1_for_list_length,dict2_for_impact_score)
# step6_load_related_postings_AND_bin_them_into_micro_cells_optimized_version(dict1_for_list_length,dict2_for_impact_score)
# stepX_check_num_of_postings_counted()
# stepX_check_correctness_of_the_parallel_files()




# Updated on 2014/01/10 afternoon for debug purpose
NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE = 10
inputResultList = []

inputFileName1 = "/data1/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/raw_TOP1000_results_for_head_95K_queries_output_from_IRTK"
inputResultList.append(inputFileName1)

for currentFileName in inputResultList:
    getQueryIDAndResultSetGivenResultFileForDebug(currentFileName,NUM_OF_TOP_RESULTS_NEEDED_TO_EVALUATE)

print "program ends."








