from __future__ import division
from operator import itemgetter, attrgetter
import os
import sys
import math

print "program begins..."
print "Updated on 2013/08/04 night by Wei at school."
print "The sole purpose of this program is check the correctness of the 3 factors using the term 'soalr'"


def get_probablity_given_query_terms(valueTuple):
    (valueOfPartialBM25ScoreComponentPart1_IDF,valueOfPartialBM25ScoreComponentPart2_TF,valueOfPartialBM25Score,valueOfCurrentPostingLengthOfTheInvertedList,f_d_t,doc_len,valueOfcurrentPostingTermFreqInQueries,valueOfCurrentPostingTermFreqInCollection,valueOfPostingRankInDoc,valueOfPostingRankInList) = valueTuple
    
    '''
    # current TOP10 prediction model weights for the small training dataset (correspond to the set of weights for the training set 2013/07/15)
    intercept_weight_0                             = -5.66860601155366;
    partialBM25ScoreComponentPart1_IDF_weight_1    =  0.7859618832630518;
    partialBM25ScoreComponentPart2_TF_weight_2     =  1.0561102619894307;
    partialBM25_weight_3                           =  -0.06647681560102658;
    length_of_the_inverted_index_weight_4          =  -1.1073691594385867E-8;
    term_freq_in_doc_weight_5                      =  -9.313567074454872E-4;
    doc_words_weight_6                             =  -1.486537851743183E-5;
    term_freq_in_training_head95K_queries_weight_7 =  -3.779843056413164E-5;
    term_freq_in_collection_weight_8               =  3.1870676569686953E-12;
    posting_rank_in_doc_weight_9                   =  8.195911362040283E-6;
    posting_rank_in_list_weight_10                 =  6.528019473067528E-8;
    '''

    '''
    # Model17
    # current TOP10 prediction model weights for the larger training dataset (correspond to the set of weights for the training set 2013/07/19)
    intercept_weight_0                             = -5.249082886875503;
    partialBM25ScoreComponentPart1_IDF_weight_1    =  0.6387728940771575;
    partialBM25ScoreComponentPart2_TF_weight_2     =  0.8428609713388355;
    partialBM25_weight_3                           =  0.017612566641205217;
    length_of_the_inverted_index_weight_4          =  -1.4312699125905865E-8;
    term_freq_in_doc_weight_5                      =  -5.583739856196939E-4;
    doc_words_weight_6                             =  -1.65525511371603E-5;
    term_freq_in_training_head95K_queries_weight_7 =  -1.2427967983967744E-5;
    term_freq_in_collection_weight_8               =  1.756117667726548E-10;
    posting_rank_in_doc_weight_9                   =  -1.1872327916929713E-6;
    posting_rank_in_list_weight_10                 =  5.3612849755764095E-8;
    '''
    
    '''
    # Model18
    # current TOP10 prediction model weights for the larger training dataset (correspond to the set of weights for the training set 2013/07/19)
    intercept_weight_0                             = -4.013040562192128;
    partialBM25ScoreComponentPart1_IDF_weight_1    =  0.0;
    partialBM25ScoreComponentPart2_TF_weight_2     =  0.0;
    partialBM25_weight_3                           =  0.4150058509887912;
    length_of_the_inverted_index_weight_4          =  0.0;
    term_freq_in_doc_weight_5                      =  0.0;
    doc_words_weight_6                             =  0.0;
    term_freq_in_training_head95K_queries_weight_7 =  0.0;
    term_freq_in_collection_weight_8               =  0.0;
    posting_rank_in_doc_weight_9                   =  0.0;
    posting_rank_in_list_weight_10                 =  0.0;
    '''

    # Model19
    # current TOP10 prediction model weights for the larger training dataset (correspond to the set of weights for the training set 2013/07/19)
    intercept_weight_0                             =  -5.729427121406548;
    partialBM25ScoreComponentPart1_IDF_weight_1    =  0.7311304467682848;
    partialBM25ScoreComponentPart2_TF_weight_2     =  0.9032571917744138;
    partialBM25_weight_3                           =  0.0;
    length_of_the_inverted_index_weight_4          =  0.0;
    term_freq_in_doc_weight_5                      =  0.0;
    doc_words_weight_6                             =  0.0;
    term_freq_in_training_head95K_queries_weight_7 =  0.0;
    term_freq_in_collection_weight_8               =  0.0;
    posting_rank_in_doc_weight_9                   =  0.0;
    posting_rank_in_list_weight_10                 =  0.0;
          
    
    matrixMultiplicationScore =       0.0
    matrixMultiplicationScore +=      intercept_weight_0                                  * 1
    matrixMultiplicationScore +=      partialBM25ScoreComponentPart1_IDF_weight_1         * valueOfPartialBM25ScoreComponentPart1_IDF
    matrixMultiplicationScore +=      partialBM25ScoreComponentPart2_TF_weight_2          * valueOfPartialBM25ScoreComponentPart2_TF
    matrixMultiplicationScore +=      partialBM25_weight_3                                * valueOfPartialBM25Score
    matrixMultiplicationScore +=      length_of_the_inverted_index_weight_4               * valueOfCurrentPostingLengthOfTheInvertedList
    matrixMultiplicationScore +=      term_freq_in_doc_weight_5                           * f_d_t
    matrixMultiplicationScore +=      doc_words_weight_6                                  * doc_len
    matrixMultiplicationScore +=      term_freq_in_training_head95K_queries_weight_7      * valueOfcurrentPostingTermFreqInQueries
    matrixMultiplicationScore +=      term_freq_in_collection_weight_8                    * valueOfCurrentPostingTermFreqInCollection
    matrixMultiplicationScore +=      posting_rank_in_doc_weight_9                        * valueOfPostingRankInDoc 
    matrixMultiplicationScore +=      posting_rank_in_list_weight_10                      * valueOfPostingRankInList 
    matrixMultiplicationScore +=      0.0

    # Updated by Wei 2013/07/17 night:
    # For this formula, I need to clearly understand when it is 1 - sth, when it is just sth.
    third_factor_probability_value = 1 - 1/(1 + math.exp( matrixMultiplicationScore ))    
    return third_factor_probability_value

#########################################################################################Adding component begins...since 2013/08/01
queryLengthProbabilityDict = {}
inputAuxFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/effectToMake1StFactor/queryLengthDistributionForHead95KQueries"
inputAuxFileHanlder = open(inputAuxFileName,"r")
# skip the headline
inputAuxFileHanlder.readline()

for line in inputAuxFileHanlder.readlines():
    lineElements = line.strip().split(" ")
    queryLength = int(lineElements[0])
    probabilityOfKTermQuery = float(lineElements[1])
    if queryLength not in queryLengthProbabilityDict:
        queryLengthProbabilityDict[queryLength] = probabilityOfKTermQuery

print "queryLengthProbabilityDict[1]:",queryLengthProbabilityDict[1]
print "len(queryLengthProbabilityDict):",len(queryLengthProbabilityDict)

# key: trecID
# value: tuple with Xdoc values
trecIDWithXdocValues = {}
inputFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/MLRelated/Model19/TOPK_sorted_by_queryID/set_of_documents_with_their_Xdoc_values_for_DEBUG_soalr"
inputFileHandler = open(inputFileName,"r")

# skip the headline
inputFileHandler.readline()

for line in inputFileHandler.readlines():
    lineElements = line.strip().split(" ")
    trecID = lineElements[0]
    valuesTuple = (float(lineElements[1]),float(lineElements[2]),float(lineElements[3]),float(lineElements[4]))
    if trecID not in trecIDWithXdocValues:
        trecIDWithXdocValues[trecID] = valuesTuple
print "trecIDWithXdocValues['GX003-61-0529973']:",trecIDWithXdocValues['GX003-61-0529973']
inputFileHandler.close()
#########################################################################################Adding component ends.since 2013/08/01

#########################################################################################Adding component begins...since 2013/07/31
# key: # of times this object appears
# value: the probability that this term will occur in the next query
freq1stFactorProbabilityDict = {}

# key: the terms which have been seen in the training queries
# value: which freq it belongs to
termsWithCorrespondingSpeciesBelongingToDict = {}

inputAuxFileName1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/effectToMake1StFactor/good_turing_estimation_output_for_terms_0_1_95K_95%_probabilityInQueryAdded_20130731"
inputAuxFileName2 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/effectToMake1StFactor/freqOfFreqInQueries_0_1_95K_95%_with_query_terms_appended_sortedByFreqR"
inputAuxFileHanlder1 = open(inputAuxFileName1,"r")
inputAuxFileHanlder2 = open(inputAuxFileName2,"r")

# skip 4 not related lines
inputAuxFileHanlder1.readline()
inputAuxFileHanlder1.readline()
inputAuxFileHanlder1.readline()
inputAuxFileHanlder1.readline()

for line in inputAuxFileHanlder1.readlines():
    lineElements = line.strip().split(" ")
    currentFreq = int(lineElements[0])
    currentProbability = float(lineElements[5])
    if currentProbability != 0:
        if currentFreq not in freq1stFactorProbabilityDict:
            freq1stFactorProbabilityDict[currentFreq] = currentProbability
        else:
            print "system error"
            exit(1)
'''
# for debug
print "len(freq1stFactorProbabilityDict):",len(freq1stFactorProbabilityDict)
print "freq1stFactorProbabilityDict[1]:",freq1stFactorProbabilityDict[1]
'''

numOfFreq = 0
for line in inputAuxFileHanlder2.readlines():
    lineElements = line.strip().split(" ")
    freq = int( lineElements[0] )
    numOfTerms = int( lineElements[1] )
    if numOfTerms != 0:
        numOfFreq += 1
        if numOfTerms == len(lineElements[2:]):
            for term in lineElements[2:]:
                if term not in termsWithCorrespondingSpeciesBelongingToDict:
                    termsWithCorrespondingSpeciesBelongingToDict[term] = freq
        else:
            print "critical error, mark1"
    else:
        # just do NOT need to be processed
        pass

'''
# for debug       
print "termsWithCorrespondingSpeciesBelongingToDict['of']:",termsWithCorrespondingSpeciesBelongingToDict['of']
print "numOfFreq:",numOfFreq
'''

inputAuxFileHanlder1.close()
inputAuxFileHanlder2.close()
#########################################################################################Adding component ends.since 2013/07/31
inputFileName = "/data3/obukai/workspace/web-search-engine-wei/polyIRIndexer/theTermSolarRawResults"
inputFileHanlder = open(inputFileName,"r")

first_factor_probability_value = 0.0
second_factor_probability_value = 0.0
third_factor_probability_value = 0.0
three_factors_combined_probability_value = 0.0
currentTerm = "soalr"
currentTrecID = ""

for line in inputFileHanlder.readlines():
    lineElements = line.strip().split(" ")
    currentTrecID = lineElements[-1]
    
    # compute the first_factor_probability_value
    if currentTerm in termsWithCorrespondingSpeciesBelongingToDict:
        first_factor_probability_value = freq1stFactorProbabilityDict[ termsWithCorrespondingSpeciesBelongingToDict[currentTerm] ] 
    else:
        first_factor_probability_value = freq1stFactorProbabilityDict[0]
    
    # compute the second_factor_probability_value
    if currentTrecID in trecIDWithXdocValues:
        (_,_,_,goodTuringXDocValue) = trecIDWithXdocValues[currentTrecID]
    else:
        print "System Error, mark3"
        exit(1)
    
    for currentQueryLength in queryLengthProbabilityDict:
        partialProbability = queryLengthProbabilityDict[currentQueryLength] * math.pow(goodTuringXDocValue,currentQueryLength-1)
        second_factor_probability_value += partialProbability
        '''
        print "currentQueryLength:",currentQueryLength
        print "partialProbability:",partialProbability
        print "math.pow(goodTuringXDocValue,currentQueryLength-1):",math.pow(goodTuringXDocValue,currentQueryLength-1)
        print "second_factor_probability_value:",second_factor_probability_value
        '''
    
    # compute the third_factor_probability_value
    base = 11
    step = 10
    valueOfPartialBM25ScoreComponentPart1_IDF = float(lineElements[base])
    valueOfPartialBM25ScoreComponentPart2_TF = float(lineElements[base + step])
    valueOfPartialBM25Score = float(0.0)
    valueOfCurrentPostingLengthOfTheInvertedList = float(0.0)
    f_d_t = float(0.0)
    doc_len = float(0.0)
    valueOfcurrentPostingTermFreqInQueries = float(0.0)
    valueOfCurrentPostingTermFreqInCollection = float(0.0)
    valueOfPostingRankInDoc = float(0.0)
    valueOfPostingRankInList = float(0.0)
    
    valueTuple = (valueOfPartialBM25ScoreComponentPart1_IDF,valueOfPartialBM25ScoreComponentPart2_TF,valueOfPartialBM25Score,valueOfCurrentPostingLengthOfTheInvertedList,f_d_t,doc_len,valueOfcurrentPostingTermFreqInQueries,valueOfCurrentPostingTermFreqInCollection,valueOfPostingRankInDoc,valueOfPostingRankInList)
    third_factor_probability_value = get_probablity_given_query_terms(valueTuple)
    
    three_factors_combined_probability_value = first_factor_probability_value * second_factor_probability_value * third_factor_probability_value
    
    print "current_term:",currentTerm
    print "current_TrecID:",currentTrecID
    print "first_factor_probability_value:",first_factor_probability_value
    print "second_factor_probability_value:",second_factor_probability_value
    print "third_factor_probability_value:",third_factor_probability_value
    print "final_3_factors_probability_combined_value:",three_factors_combined_probability_value
    print
    
    # make the variables become 0 again.
    first_factor_probability_value = 0.0
    second_factor_probability_value = 0.0
    third_factor_probability_value = 0.0
    three_factors_combined_probability_value = 0.0
    
inputFileHanlder.close()
##################################
print "program ends."
