from __future__ import division
from operator import itemgetter, attrgetter
import os
import sys
import math

# Notes:
# Updated by Wei 2013/07/31 night at school
# This program is doing the good turing smoothing estimation
# In order to make the 1st factor of our formula and then compute the BLUE curve and RED curve again. 
# Updated by Wei 2013/05/03
# We need to regard the good turing as the baseline.
# Updated by Wei 2013/04/11
# step3 and the temp step are NOT useful cause we are not actually doing good-turing here, but we borrow the idea from there

# Updated by Wei 2013/07/31 evening
def step6_computeTheProbabilityOfASpecifcTermAppearedInTheQuery(inputAuxFileName,inputFileName,outputFileName):
    queryLengthProbabilityDict = {}
    
    outputFileHandler = open(outputFileName,"w")
    
    inputAuxFileHanlder = open(inputAuxFileName,"r")
    # skip the headline
    inputAuxFileHanlder.readline()
    
    for line in inputAuxFileHanlder.readlines():
        lineElements = line.strip().split(" ")
        queryLength = int(lineElements[0])
        probabilityOfKTermQuery = float(lineElements[1])
        if queryLength not in queryLengthProbabilityDict:
            queryLengthProbabilityDict[queryLength] = probabilityOfKTermQuery

    TOTAL_QUERY_LENGTH_unknown_but_right_explanation = 0
    AVG_QUERY_LENGTH   = 0
    NUM_OF_QUERIS_unknown_but_right_explanation = 0
    print "len(queryLengthProbabilityDict):",len(queryLengthProbabilityDict)
    for key in queryLengthProbabilityDict:
        NUM_OF_QUERIS_unknown_but_right_explanation += queryLengthProbabilityDict[key]
        TOTAL_QUERY_LENGTH_unknown_but_right_explanation += key * queryLengthProbabilityDict[key]
    AVG_QUERY_LENGTH = TOTAL_QUERY_LENGTH_unknown_but_right_explanation / NUM_OF_QUERIS_unknown_but_right_explanation
    
    print "TOTAL_QUERY_LENGTH_unknown_but_right_explanation:",TOTAL_QUERY_LENGTH_unknown_but_right_explanation
    print "NUM_OF_QUERIS_unknown_but_right_explanation:",NUM_OF_QUERIS_unknown_but_right_explanation
    print "AVG_QUERY_LENGTH:",AVG_QUERY_LENGTH
    
    inputAuxFileHanlder.close()
    
    inputFileHandler = open(inputFileName,"r")
    # skip the headline: N2 (# of observed objects): 392955
    outputFileHandler.write( inputFileHandler.readline() )
    # skip the headline: # of UNIQUE TERMS SEEN: 37817
    outputFileHandler.write( inputFileHandler.readline() )
    # skip the headline: # OF UNIQUE TERMS IN GOV2 LEXICON: 37728619
    outputFileHandler.write( inputFileHandler.readline() )
    # skip the headline: r Nr r* unsmoothProbability r*/N2(VF_1.0)
    outputFileHandler.write( inputFileHandler.readline().strip() + " " + "probabilityOfTheTermAppearedInTheNextQuery" + "\n")
    
    for line in inputFileHandler.readlines():
        lineElements = line.strip().split(" ")
        freq = lineElements[0]
        probabilityOfThisTermAppearedInNextQuerySlot = float(lineElements[4])
        probabilityOfThisTermAppearedInNextQuery = 0.0
        
        '''
        # CURRENTLY NOT USED
        # formula alternative 1: Updated by Wei 2013/07/31
        # I think this formula has some problems and please just don't use
        for currentQueryLength in queryLengthProbabilityDict:
            # This formula will again, has some problems since 2013/08/01
            partialProbability = queryLengthProbabilityDict[currentQueryLength] * currentQueryLength * probabilityOfThisTermAppearedInNextQuerySlot * math.pow(1-probabilityOfThisTermAppearedInNextQuerySlot,currentQueryLength-1)
            
            probabilityOfThisTermAppearedInNextQuery += partialProbability
            print "currentQueryLength:",currentQueryLength
            print "probabilityOfThisTermAppearedInNextQuerySlot:",probabilityOfThisTermAppearedInNextQuerySlot
            print "math.pow(1-probabilityOfThisTermAppearedInNextQuerySlot,currentQueryLength-1):",math.pow(1-probabilityOfThisTermAppearedInNextQuerySlot,currentQueryLength-1)
            print "partialProbability:",partialProbability
            print "probabilityOfThisTermAppearedInNextQuery:",probabilityOfThisTermAppearedInNextQuery
        '''
        
        # CURRENTLY USED VERSION
        # formula alternative 2: Updated by Wei 2013/07/31
        # use the AVG_QUERY_LENGTH as an estimation first
        # This formula can be divided into 3 parts.
        # part1: AVG_QUERY_LENGTH
        # part2: probabilityOfThisTermAppearedInNextQuerySlot
        # part3: math.pow(1-probabilityOfThisTermAppearedInNextQuerySlot,AVG_QUERY_LENGTH-1)
        probabilityOfThisTermAppearedInNextQuery = AVG_QUERY_LENGTH * probabilityOfThisTermAppearedInNextQuerySlot * math.pow(1-probabilityOfThisTermAppearedInNextQuerySlot,AVG_QUERY_LENGTH-1)
        # for debug
        '''
        print "AVG_QUERY_LENGTH:",AVG_QUERY_LENGTH
        print "probabilityOfThisTermAppearedInNextQuerySlot:",probabilityOfThisTermAppearedInNextQuerySlot
        print "math.pow(1-probabilityOfThisTermAppearedInNextQuerySlot,AVG_QUERY_LENGTH-1):",math.pow(1-probabilityOfThisTermAppearedInNextQuerySlot,AVG_QUERY_LENGTH-1)
        print "probabilityOfThisTermAppearedInNextQuery:",probabilityOfThisTermAppearedInNextQuery        
        '''
        
        # for debug ONLY
        '''
        if probabilityOfThisTermAppearedInNextQuerySlot != 0.0:
            print "freq:",freq
            print "probabilityOfThisTermAppearedInNextQuerySlot:",probabilityOfThisTermAppearedInNextQuerySlot
            print "probabilityOfThisTermAppearedInNextQuery:",probabilityOfThisTermAppearedInNextQuery
            if probabilityOfThisTermAppearedInNextQuerySlot > probabilityOfThisTermAppearedInNextQuery:
                print "computation of probabilityOfThisTermAppearedInNextQuery ERROR."
                exit(1)
            print
        '''
        
        outputFileHandler.write(line.strip() + " " + str(probabilityOfThisTermAppearedInNextQuery) + "\n")
    
    inputFileHandler.close()
    outputFileHandler.close()
    
    print "inputAuxFileName:",inputAuxFileName
    print "inputFileName:",inputFileName
    print "outputFileName:",outputFileName

# Updated by Wei 2013/07/31 afternoon
def step5_computeLengthDistributionForAQuerySet(inputFileName,outputFileName):
    print "the function step5_computeLengthDistributionForAQuerySet(...) called."
    inputFileHandler = open(inputFileName,"r")
    outputFileHandler = open(outputFileName,"w")
    
    queryTermList = []
    queryLengthDistributionDict = {}
    queryLengthDistributionDictKeyList = []
    
    for line in inputFileHandler.readlines():
            # print "line:",line.strip()
            queryTermList = line.strip().split(":")[1].strip().split(" ")
            # print "queryTermList:",queryTermList
            
            data = ""
            for element in queryTermList:
                data += element + " "
            
            # print "data(old):",data
            # print "original data:",data
            
            for i in range(0,len(data)):
                # print "data[i]:",ord(data[i])
                if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
                    # Just replace them with a space.
                    data = data[:i] + " " + data[i+1:]
        
            # print "data(new):",data
            
            currentNewQueryTermList = data.strip().split(" ")
            # using this dict can have two good things.
            # (1) distinct terms
            # (2) no EMPTY string
            currentNewQueryTermDict = {}
            
            for queryTerm in currentNewQueryTermList:
                if queryTerm.strip() != "":
                    queryTermLower = queryTerm.lower()
                    if queryTermLower not in currentNewQueryTermDict:
                        currentNewQueryTermDict[queryTermLower] = 1
            
            key = len(currentNewQueryTermDict)
            '''
            if key == 34:
                print data
                print currentNewQueryTermDict
                print len(currentNewQueryTermDict)
            '''
            if key not in queryLengthDistributionDict:
                queryLengthDistributionDict[key] = 1
            else:
                queryLengthDistributionDict[key] += 1
    
    queryLengthDistributionDictKeyList = queryLengthDistributionDict.keys()
    queryLengthDistributionDictKeyList.sort(cmp=None, key=None, reverse=False)
    
    outputLine = "queryLength" + " " + "probability(k term query)" + "\n"
    outputFileHandler.write(outputLine)
    
    # totalNumOfQueries = 0
    TOTAL_NUM_OF_QUERIES = 95000
    for key in queryLengthDistributionDictKeyList:
        currentProbability = queryLengthDistributionDict[key]/TOTAL_NUM_OF_QUERIES
        print key,queryLengthDistributionDict[key],currentProbability
        outputLine = str(key) + " " + str(currentProbability) + "\n"
        outputFileHandler.write(outputLine)
        # totalNumOfQueries += queryLengthDistributionDict[key]
    # print "totalNumOfQueries:",totalNumOfQueries
    
    print "inputFileName:",inputFileName
    print "outputFileName:",outputFileName
    
    inputFileHandler.close()
    outputFileHandler.close()

# Updated by Wei 2013/07/31 afternoon
def step4_checkCorrectnessOfTheProbabilityDistribution(inputFileName):
    # temp step for checking the correctness of the probability
    print "check whether the probability will be summing up to ONE"
    inputFileHandler = open(inputFileName,"r")
    
    # ignore the meta headlines
    inputFileHandler.readline()
    inputFileHandler.readline()
    inputFileHandler.readline()
    inputFileHandler.readline()
    
    totalProb3 = 0.0
    totalProb2 = 0.0
    
    for line in inputFileHandler.readlines():
        # print line.strip()
        lineElements = line.strip().split(" ")
        numOfspecies = int( lineElements[1] )
        
        probabilityForThisSpecies3 = float( lineElements[3] )
        probabilityForThisSpecies2 = float( lineElements[4] )
        
        totalProb3 += probabilityForThisSpecies3 * numOfspecies
        totalProb2 += probabilityForThisSpecies2 * numOfspecies
        
    
    print "total probability before using good turning smoothing method:",totalProb3
    print "total probability after using good turning smoothing method:",totalProb2
    inputFileHandler.close()


# Updated by Wei on 2013/07/31 afternoon
# step3_compute the new r* 
def step3_computeTheNewRStar(inputFileName,outputFileName):
    print "function do step3_computeTheNewRStar(...) called"
    # key: original freq
    # value: modified freq
    originalFreqAndModifiedFreqDict = {}
    originalFreq_RAndoriginalFreqOfFreq_NrDict = {}
    
    inputFileHandler = open(inputFileName,"r")
    
    # legacy number here: 11473
    for line in range(1,11473):
        line = inputFileHandler.readline()
        lineElements = line.strip().split(" ")
        originalFreq_R = int(lineElements[0])
        originalFreqOfFreq_Nr = int(lineElements[1])
        if originalFreq_R not in originalFreq_RAndoriginalFreqOfFreq_NrDict:
            originalFreq_RAndoriginalFreqOfFreq_NrDict[originalFreq_R] = originalFreqOfFreq_Nr
        else:
            print "Unexpected Behavior"
            exit(1)
    
    for i in range(1,21):
        modifiedFreqRStar = (i+1) * originalFreq_RAndoriginalFreqOfFreq_NrDict[i+1] / originalFreq_RAndoriginalFreqOfFreq_NrDict[i]
        originalFreqAndModifiedFreqDict[i] = modifiedFreqRStar
    
    print "originalFreq","newModifiedFreq"
    for r in originalFreqAndModifiedFreqDict:
        print r,originalFreqAndModifiedFreqDict[r]
    inputFileHandler.close()
    
    # 5 columns
    # column0: original freq
    # column1: # of query terms belonging to that kind/species 
    # column2: modified freq using the good turing method
    # column3: probability of this term appear in the NEXT query slot (concept 0 the original unmodifed one)
    # column4: probability of this term appear in the NEXT query slot (concept 2 used in our 2ed probability factor)
    # column5: probability of this term appear in the NEXT query (concept 1 used in our 1st probability factor)
    
    # Updated by Wei on 2013/04/25
    # The following N2 value is wrong cause N2 here means, I think, # of objects observed. NOT the # of queries in the training set
    # (Wrong selection)N2 = 85000
    
    # I think the correct solution
    N1 = 85000 # N1 means how many training queries in total
    N2 = 392955 # N2 means how many training query slots we have observed in total
    NUM_OF_UNIQUE_TERMS_SEEN = 37817
    NUM_OF_UNIQUE_TERMS_IN_GOV2_LEXICON = 37728619

    
    probabilityConcept2 = 0.0
    probabilityConcept3 = 0.0
    
    outputFileHandler = open(outputFileName,"w")
    outputFileHandler.write("N2 (# of observed objects): " + str(N2) + "\n")
    outputFileHandler.write("# of UNIQUE TERMS SEEN: " + str(NUM_OF_UNIQUE_TERMS_SEEN) + "\n")
    outputFileHandler.write("# OF UNIQUE TERMS IN GOV2 LEXICON: " + str(NUM_OF_UNIQUE_TERMS_IN_GOV2_LEXICON) + "\n")
    outputFileHandler.write("r" + " " + "Nr" + " " + "r*" + " " + "unsmoothProbability" + " " + "r*/N2(VF_1.0)" + "\n")
    for i in range(0,11473):
        if i == 0:
            # question from me(2013/07/31)
            # Is it in this case, the same probability as the probabilityConcept2 do?
            
            probabilityConcept2 = originalFreq_RAndoriginalFreqOfFreq_NrDict[i+1]/N2/(NUM_OF_UNIQUE_TERMS_IN_GOV2_LEXICON - NUM_OF_UNIQUE_TERMS_SEEN)
            probabilityConcept3 = 0.0
            outputFileHandler.write(str(i) + " " + str(NUM_OF_UNIQUE_TERMS_IN_GOV2_LEXICON - NUM_OF_UNIQUE_TERMS_SEEN) + " " + "N/A" + " " + str(probabilityConcept3) + " " + str(probabilityConcept2) + "\n")
        elif i in originalFreqAndModifiedFreqDict:
            
            probabilityConcept2 = originalFreqAndModifiedFreqDict[i]/N2
            probabilityConcept3 = i/N2
            # outputFileHandler.write("mark: "+ str(i) + " " + str(originalFreqAndModifiedFreqDict[i]) + "\n")
            outputFileHandler.write(str(i) + " " + str(originalFreq_RAndoriginalFreqOfFreq_NrDict[i]) + " " + str( originalFreqAndModifiedFreqDict[i] ) + " " + str(probabilityConcept3) + " " + str(probabilityConcept2) + "\n")
        else:
            if originalFreq_RAndoriginalFreqOfFreq_NrDict[i] == 0:
                # This species has no probability
                
                probabilityConcept2 = 0.0
                probabilityConcept3 = 0.0
                outputFileHandler.write(str(i) + " " + str(originalFreq_RAndoriginalFreqOfFreq_NrDict[i]) + " " + str(i) + " " + str(probabilityConcept3) + " " + str(probabilityConcept2) + "\n")
            else:
                
                probabilityConcept2 = i/N2
                probabilityConcept3 = i/N2
                outputFileHandler.write(str(i) + " " + str(originalFreq_RAndoriginalFreqOfFreq_NrDict[i]) + " " + str(i) + " " + str(probabilityConcept3) + " " + str(probabilityConcept2) + "\n")
    
    outputFileHandler.close()
    
    print "inputFileName:",inputFileName
    print "outputFileName:",outputFileName

# Updated by Wei on 2013/07/31 morning
# step2_2_computeFreqOfFreqForTermsInQueries 
# The only purpose of this function is to compute the freq of freq statistics in the context of smoothing 
def step2_2_computeFreqOfFreqForTermsInQueries(inputFileName,outputFileName):
    print "function do step2_2_computeFreqOfFreqForTermsInQueries(...) called"
    inputFileHandler = open(inputFileName,"r")
    outputFileHandler = open(outputFileName,"w")
    
    freqOfFreqCounterDict = {}
    freqOfFreqDictWithQueryTerms = {}
    
    
    for line in inputFileHandler.readlines():
        lineElements = line.strip().split(" ")
        queryTerm = lineElements[0]
        freq = int(lineElements[1])
        
        if freq not in freqOfFreqCounterDict or freq not in freqOfFreqDictWithQueryTerms:
            # extra init step
            freqOfFreqCounterDict[freq] = 0
            freqOfFreqDictWithQueryTerms[freq] = []
            # increase the counter by 1 and append the corresponding queryTerm to the corresponding list
            freqOfFreqCounterDict[freq] += 1
            freqOfFreqDictWithQueryTerms[freq].append(queryTerm)
        else:
            freqOfFreqCounterDict[freq] += 1
            freqOfFreqDictWithQueryTerms[freq].append(queryTerm)            
    
    # output freq of freq statistics 
    for currentFreq in range(1,11474):
        currentOutputLine = ""
        if currentFreq in freqOfFreqCounterDict and currentFreq in freqOfFreqDictWithQueryTerms:
            currentOutputLine = str(currentFreq) + " " + str( freqOfFreqCounterDict[currentFreq] ) + " "
            freqOfFreqDictWithQueryTerms[currentFreq].sort(cmp=None, key=None, reverse=False)
            # print currentFreq,len(freqOfFreqDictWithQueryTerms[currentFreq])
            for queryTerm in freqOfFreqDictWithQueryTerms[currentFreq]:
                currentOutputLine += queryTerm + " "
            currentOutputLine = currentOutputLine.strip() + "\n"
        else:
            currentOutputLine = str(currentFreq) + " " + "0" + " " + "0" + "\n"
        outputFileHandler.write( currentOutputLine )
    
    print "inputFileName:",inputFileName
    print "outputFileName:",outputFileName
    
    inputFileHandler.close()
    outputFileHandler.close()



# step2_1: 
# Updated by Wei on 2013/07/31. The main purpose seems to be clear and seems heavy in this logic. I will have a new function called step2_2_computeFreqOfFreqForTermsInQueries(...) to satisfy my light weight needs. 
# compute the freq of freq for this distribution
# Updated by Wei 2013/04/11 night
# CURRENT applied method2.
# Here, the alg. is to read the ranges from the plaintext file. :)
def step2_1():
    print "do step2_1"
    tupleRangeWithIDDict = {}
    inputAuxFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/smallBucketsRangesForQueryTermOverGov2Dataset_withUniqueIDAdded"
    inputAuxFileHandler = open(inputAuxFileName,"r")
    for line in inputAuxFileHandler.readlines():
        lineElements = line.strip().split(" ")
        uniqueID = lineElements[0]
        currentLowerBound = int(lineElements[3])
        currentUpperBound = int(lineElements[4])
        keyTuple = (currentLowerBound,currentUpperBound)
        if keyTuple not in tupleRangeWithIDDict:
            tupleRangeWithIDDict[keyTuple] = uniqueID
        else:
            print "Unexpected Behaviour"
            exit(1)
    
    print "len(tupleRangeWithIDDict):",len(tupleRangeWithIDDict)
    inputAuxFileHandler.close()
    # Now the variable here extends from 5 small buckets to 1000 buckets based on the length of the list
    currentTwoDClass = "N/A"
    
    freqOfFreqDict = {}
    freqOfFreqDictWithQueryTerms = {}
    freqOfFreqDict2DAlsoBasedOnTermDocumentDistribution = {}
    freqOfFreqDictWithQueryTermList2DAlsoBasedOnTermDocumentDistribution = {}
    
    classLabelList = []
    for i in range(0,1000):
        classLabelList.append( str(i) )
    print "len(classLabelList):",len(classLabelList)
    
    for i in range(0,20):
        for classLabel in classLabelList:
            key = str(i) + "_" + classLabel
            freqOfFreqDict2DAlsoBasedOnTermDocumentDistribution[key] = 0
    
    queryTermsWithFreqInCollectionDict = {}
    inputAuxFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/100KQueryTermsWithTermFreqInCollection.txt"
    inputAuxFileHandler = open(inputAuxFileName,"r")
    for line in inputAuxFileHandler.readlines():
        lineElements = line.strip().split(" ")
        queryTerm = lineElements[0]
        freqInCollection = int(lineElements[1])
        if queryTerm not in queryTermsWithFreqInCollectionDict:
            queryTermsWithFreqInCollectionDict[queryTerm] = freqInCollection
        else:
            print "Unexpected Behaviour"
    
    print "len(queryTermsWithFreqInCollectionDict):",len(queryTermsWithFreqInCollectionDict)
    inputAuxFileHandler.close()
    
    
    inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/realFreqOfTermsIn_100KQueries_0_1_95%"
    inputFileHandler = open(inputFileName,"r")
    
    outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/freqOfFreqInQueries_0_1_95K_95%_with_query_terms_sortedByFreqR"
    outputFileHandler = open(outputFileName,"w")
    
    outputFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/freqOfFreqInQueries_0_1_95K_95%_2D_without_query_terms.txt_NO_USE"
    outputFileHandler2 = open(outputFileName2,"w")
    
    for i in range(1,11473):
        freqOfFreqDict[i] = 0
        freqOfFreqDictWithQueryTerms[i] = []
    
    print "len(freqOfFreqDict):",len(freqOfFreqDict)
    
    for line in inputFileHandler.readlines():
        lineElements = line.strip().split(" ")
        queryTerm = lineElements[0]
        freq = int(lineElements[1])
        if freq < 20:
            if queryTerm in queryTermsWithFreqInCollectionDict:
                lengthOfListForLexiconTerm = queryTermsWithFreqInCollectionDict[queryTerm]
                if lengthOfListForLexiconTerm < 1:
                    print "The query term:",queryTerm,"does NOT appear in the lexicon."
                else:
                    for tuple in tupleRangeWithIDDict:
                        (currentLowerBound,currentUpperBound) = tuple
                        if lengthOfListForLexiconTerm >= currentLowerBound and lengthOfListForLexiconTerm < currentUpperBound:
                            currentTwoDClass = tupleRangeWithIDDict[tuple]
                            break
                    
                    currentTwoDClass = str(freq) + "_" + currentTwoDClass
                    
                    print queryTerm,freq,lengthOfListForLexiconTerm,currentTwoDClass
                    
                    freqOfFreqDict2DAlsoBasedOnTermDocumentDistribution[currentTwoDClass] += 1
                    if currentTwoDClass not in freqOfFreqDictWithQueryTermList2DAlsoBasedOnTermDocumentDistribution:
                        freqOfFreqDictWithQueryTermList2DAlsoBasedOnTermDocumentDistribution[currentTwoDClass] = []
                        freqOfFreqDictWithQueryTermList2DAlsoBasedOnTermDocumentDistribution[currentTwoDClass].append(queryTerm)
                    else:
                        freqOfFreqDictWithQueryTermList2DAlsoBasedOnTermDocumentDistribution[currentTwoDClass].append(queryTerm)
            else:
                print "The query term:",queryTerm,"does NOT appear in the lexicon."
        freqOfFreqDict[freq] += 1
        freqOfFreqDictWithQueryTerms[freq].append(queryTerm)
    
    
    # output the freq of freq
    for i in range(1,11473):
        currentOutputLine = str(i) + " " + str( freqOfFreqDict[i] ) + " "
        
        freqOfFreqDictWithQueryTerms[i].sort(cmp=None, key=None, reverse=False)
        
        print i,len(freqOfFreqDictWithQueryTerms[i])
        
        for queryTerm in freqOfFreqDictWithQueryTerms[i]:
            currentOutputLine += queryTerm + " "
        
        currentOutputLine = currentOutputLine.strip() + "\n"
        
        outputFileHandler.write( currentOutputLine )
    
    
    # print "freqOfFreqDict2DAlsoBasedOnTermDocumentDistribution:",freqOfFreqDict2DAlsoBasedOnTermDocumentDistribution
    print "len(freqOfFreqDict2DAlsoBasedOnTermDocumentDistribution):",len(freqOfFreqDict2DAlsoBasedOnTermDocumentDistribution)
    
    for key in freqOfFreqDict2DAlsoBasedOnTermDocumentDistribution:
        outputLine = key + " " + str(freqOfFreqDict2DAlsoBasedOnTermDocumentDistribution[key]) + " "
        
        #if key in freqOfFreqDictWithQueryTermList2DAlsoBasedOnTermDocumentDistribution:
        #    for queryTerm in freqOfFreqDictWithQueryTermList2DAlsoBasedOnTermDocumentDistribution[key]:
        #        outputLine += queryTerm + " "
        
        outputLine += "\n"
        outputFileHandler2.write(outputLine)
    
    inputFileHandler.close()
    outputFileHandler.close()
    outputFileHandler2.close()


# step1_computeFreqOfTermsInQueries
# program input: the input will be a specific set of queries.
# program output: the real freq of terms for a specific set of queries.
# Notes: 
    # for the head 95K queries, there are 37817 query terms
    # for the whole 100K queries, there are 38871 query terms
def step1_computeFreqOfTermsInQueries(inputFileName,outputFileName):
    print "the function step1_computeFreqOfTermsInQueries(...) called."
    
    inputFileHandler = open(inputFileName,"r")
    outputFileHandler = open(outputFileName,"w")
    
    queryTermDictWithRealFreqInQueries = {}
    queryTermList = []
    
    for line in inputFileHandler.readlines():
            # print "line:",line.strip()
            # Updated by Wei on 2013/08/13 night at school 
            # This 1 or 2 should set according to the different format of the input file
            queryTermList = line.strip().split(":")[1].strip().split(" ")
            # print "queryTermList:",queryTermList
            
            data = ""
            for element in queryTermList:
                data += element + " "
            
            # print "data(old):",data
            # print "original data:",data
            
            for i in range(0,len(data)):
                # print "data[i]:",ord(data[i])
                if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
                    # Just replace them with a space.
                    data = data[:i] + " " + data[i+1:]
        
            # print "data(new):",data
            
            currentNewQueryTermList = data.strip().split(" ")
            currentNewQueryTermDict = {}
            
            for queryTerm in currentNewQueryTermList:
                if queryTerm.strip() != "":
                    queryTermLower = queryTerm.lower()
                    if queryTermLower not in currentNewQueryTermDict:
                        currentNewQueryTermDict[queryTermLower] = 1
            
            for queryTerm in currentNewQueryTermDict:
                if queryTerm not in queryTermDictWithRealFreqInQueries:
                    queryTermDictWithRealFreqInQueries[queryTerm] = 1
                else:
                    queryTermDictWithRealFreqInQueries[queryTerm] += 1
               
    
    queryTermList = queryTermDictWithRealFreqInQueries.keys()
    # This makes it into sorted order
    queryTermList.sort(cmp=None, key=None, reverse=False)
    for queryTerm in queryTermList:
        outputFileHandler.write(queryTerm + " " + str(queryTermDictWithRealFreqInQueries[queryTerm]) + "\n")
    
    print "inputFileName:",inputFileName
    print "outputFileName:",outputFileName
    
    inputFileHandler.close()
    outputFileHandler.close()


print "Program Begins..."

# main logic for step1 begins...
# option1:
# the format for the human generated query log
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/100KQueries_0_1_95%"
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/effectToMake1StFactor/realFreqOfTermsIn_100KQueries_0_1_95%"
# option2:
# the format for the machine generated query log
# inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/QueriesWhichContainHighFreqTermAmong95KMachineGenerated"
# outputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/realFreqOfTermsIn_QueriesWhichContainHighFreqTermAmong95KMachineGenerated"
step1_computeFreqOfTermsInQueries(inputFileName,outputFileName)
# main logic for step1 ends.


'''
# main logic for step2 begins...
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/effectToMake1StFactor/realFreqOfTermsIn_100KQueries_0_1_95%"
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/effectToMake1StFactor/freqOfFreqInQueries_0_1_95K_95%_with_query_terms_appended_sortedByFreqR"
step2_2_computeFreqOfFreqForTermsInQueries(inputFileName,outputFileName)
# main logic for step2 ends.
'''

'''
# main logic for step3 begins...
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/effectToMake1StFactor/freqOfFreqInQueries_0_1_95K_95%_with_query_terms_appended_sortedByFreqR"
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/effectToMake1StFactor/good_turing_estimation_output_for_terms_0_1_95K_95%_20130731"
step3_computeTheNewRStar(inputFileName,outputFileName)
# main logic for step3 ends.
'''

'''
# main logic for step4 begins...
# old compared file from months ago
# inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/smallBucketsEquallizationMethodByProf20130411/freqOfFreqInQueries_0_1_95K_95%_good_turing_output"
# current testing file
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/effectToMake1StFactor/good_turing_estimation_output_for_terms_0_1_95K_95%_20130731"
step4_checkCorrectnessOfTheProbabilityDistribution(inputFileName)
# main logic for step4 ends.
'''

'''
# main logic for step5 begins...
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/probabilityDistributionEstimationByProf/fourSetOfQueriesByProf20130410/100KQueries_0_1_95%"
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/effectToMake1StFactor/queryLengthDistributionForHead95KQueries"
step5_computeLengthDistributionForAQuerySet(inputFileName,outputFileName)
# main logic for step5 ends.
'''

'''
# main logic for step6 begins...
inputAuxFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/effectToMake1StFactor/queryLengthDistributionForHead95KQueries"
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/effectToMake1StFactor/good_turing_estimation_output_for_terms_0_1_95K_95%_20130731"
outputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/effectToMake1StFactor/good_turing_estimation_output_for_terms_0_1_95K_95%_probabilityInQueryAdded_20130731"
step6_computeTheProbabilityOfASpecifcTermAppearedInTheQuery(inputAuxFileName,inputFileName,outputFileName)
# main logic for step6 ends.
'''

print "Program Ends."












