# Program inputs:
# (1) the identifiers from the c++ side

# main source from getting the rank
# (2) inputDataSourceFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/rawTermInvertedIndexOutput_with_trecID_added.txt"

# the aux file to help getting the rank
# (3) inputAuxSourceFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/rawTermInvertedIndexOutputAccessAuxFile_NEW_20121128.txt"

# Program outputs:
# output the rank and send it back to the c++ program

# Output File Analysis:
# this program should be called from a c++ side, bascailly there are two functions. One for pre-loading the materials, and one for assigning the rank to the input.
# It is that easy.

from __future__ import division
from operator import itemgetter, attrgetter
from HTMLParser import HTMLParser
from HTMLParser import HTMLParseError
import math
import sys
import os

# 2013/01/21 added
# fill the dict for aux info for the list
queryTermInvertedIndexInfo = {}
# currentTermTrecIDScorePairList = []
# currentTermTrecIDScorePair_sorted = []


currentTermDocIDScorePairList = []
currentTermDocIDScorePair_sorted = []

# Updated by Wei 2013/01/27
# This is originally a list, let's change it into a dict
# I do NOT want to support the trecID, cause it is slow.
# I only want to support the docID, cause it is internal and fast
# trecIDANDRankInDocDict = {}
postingDocIDANDRankInDocDict = {}

# 2013/01/22 added
queryTermCollectionFreqDict = {}
queryDict = {}

# This is the newly added posting feature vector dict (with respect to Jimmy)
# identified by postingTerm_trecID
postingFeatureVectorDict = {}

# 2013/01/23 added
# postingFeatureRankInTheListDict = {}
freqOfTermsInQueriesDict = {}

outputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/documentsNeededToBeAdded.txt"
outputFileHandler = open(outputFileName,"w")

outputErrorMessageFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/errorMessage.txt"
outputErrorMessageFileHandler = open(outputErrorMessageFileName,"w") 

###########################################################################################################################class begin...
class MyHTMLParser(HTMLParser):
    def handle_starttag(self, tag, attrs):
        # print "Start tag:", tag
        # set the current_tag to tag
        self.current_tag = tag
        
        if hasattr(self, 'doc_outlinks'):
            pass
        else:
            self.doc_outlinks = 0
        
        for attr in attrs:
            (attr_name, attr_value) = attr
            if attr_name == "href":
                #print "     attr:", attr
                self.doc_outlinks += 1
    def handle_endtag(self, tag):
        #print "End tag  :", tag
        # set back the current_tag to ""
        self.current_tag = ""
        
    def handle_data(self, data):
        if hasattr(self, 'current_tag'):
            # step0: pre-processing
            for i in range(0,len(data)):
                # print "data[i]:",ord(data[i])
                if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
                    # Just replace them with a space.
                    data = data[:i] + " " + data[i+1:]
            
            if hasattr(self, 'header_words'):
                pass
            else:
                self.header_words = []
                self.header_words_dict = {}

            if hasattr(self, 'title_words'):
                pass
            else:
                self.title_words = []
                self.title_words_dict = {}
            
            if hasattr(self, 'b_or_strong_words'):
                pass
            else:
                self.b_or_strong_words = []
                self.b_or_strong_words_dict = {}          
            
            if hasattr(self, 'a_words'):
                pass
            else:
                self.a_words = []
                self.a_words_dict = {}
            
            if hasattr(self, 'i_or_em_words'):
                pass
            else:
                self.i_or_em_words = []
                self.i_or_em_words_dict = {}                       
            
                        
            # step1: some context feature
            if self.current_tag == "head":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.header_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.header_words_dict:
                            self.header_words_dict[lowerCaseWord] = 1
                        else:
                            self.header_words_dict[lowerCaseWord] += 1
            
            if self.current_tag == "title":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.title_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.title_words_dict:
                            self.title_words_dict[lowerCaseWord] = 1
                        else:
                            self.title_words_dict[lowerCaseWord] += 1
            
            if self.current_tag == "b" or self.current_tag == "strong":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.b_or_strong_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.b_or_strong_words_dict:
                            self.b_or_strong_words_dict[lowerCaseWord] = 1
                        else:
                            self.b_or_strong_words_dict[lowerCaseWord] += 1
                                     
            if self.current_tag == "a":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.a_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.a_words_dict:
                            self.a_words_dict[lowerCaseWord] = 1
                        else:
                            self.a_words_dict[lowerCaseWord] += 1
            
            if self.current_tag == "i" or self.current_tag == "em":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.i_or_em_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.i_or_em_words_dict:
                            self.i_or_em_words_dict[lowerCaseWord] = 1
                        else:
                            self.i_or_em_words_dict[lowerCaseWord] += 1
            
       
            # step2: text or script
            if self.current_tag == "script" or self.current_tag == "style":
                #print "The following content should NOT be indexed."
                #print "script content     :", data
                #print "script length     :",len(data)
                if hasattr(self, 'script_size'):
                    self.script_size += len(data)
                else:
                    self.script_size = len(data)
            else:
                if hasattr(self, 'text_size'):
                    self.text_size += len(data)
                else:
                    self.text_size = len(data)
                # print "text content:*",data,"*"

                for i in range(0,len(data)):
                    # print "data[i]:",ord(data[i])
                    if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
                        # Just replace them with a space.
                        data = data[:i] + " " + data[i+1:]
                
                
                if hasattr(self, 'doc_words'):
                    pass
                else:
                    self.doc_words = []
                
                if hasattr(self, 'doc_distinct_words'):
                    pass
                else:
                    self.doc_distinct_words = []               

                
                if hasattr(self, 'doc_words_dict'):
                    pass
                else:
                    self.doc_words_dict = {}
                                    
                #print "raw word list:",data.split(" ")
                
                for word in data.split(" "):
                    if word != "":
                        # for later use.
                        lowerCaseWord = word.lower()
                        
                        # for current use
                        # lowerCaseWord = word
                        self.doc_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.doc_words_dict:
                            self.doc_words_dict[lowerCaseWord] = 1
                        else:
                            self.doc_words_dict[lowerCaseWord] += 1
                        
                        
                        if lowerCaseWord not in self.doc_distinct_words:
                            self.doc_distinct_words.append(lowerCaseWord)
                        
                #print "doc_words:",self.doc_words
                #print "doc_distinct_words:",self.doc_distinct_words
                  
                #print "text length      :", len(data)
        else:
            pass
    def handle_comment(self, data):
        pass
        #print "Comment  :", data
    def handle_entityref(self, name):
        pass
        #c = unichr(name2codepoint[name])
        #print "Named ent:", c
    def handle_charref(self, name):
        pass
        #if name.startswith('x'):
        #    c = unichr(int(name[1:], 16))
        #else:
        #    c = unichr(int(name))
        #print "Num ent  :", c
    def handle_decl(self, data):
        pass
        #print "Decl     :", data
    
    def generate_statistics_report(self,status):
        print "-----> completed_parsed:",status
        # attrs check or init
        if hasattr(self, 'doc_words'):
            pass
        else:
            self.doc_words = []

        if hasattr(self, 'doc_distinct_words'):
            pass
        else:
            self.doc_distinct_words = []
        
        if hasattr(self, 'text_size'):
            pass
        else:
            self.text_size = 0
            
        if hasattr(self, 'script_size'):
            pass
        else:
            self.script_size = 0       

        if hasattr(self, 'doc_outlinks'):
            pass
        else:
            self.doc_outlinks = 0
            
        if hasattr(self, 'doc_waterloo_spam'):
            pass
        else:
            self.doc_waterloo_spam = 0
                
        #print "len(doc_words):",len(self.doc_words)
        #print "len(doc_distinct_words):",len(self.doc_distinct_words)
        #print "text_size:",self.text_size
        #print "script_size:",self.script_size
        if self.text_size != 0:
            script_text_ratio = self.script_size / self.text_size
        else:
            script_text_ratio = 0.0
        #print "script_text_ratio:",script_text_ratio
        #print "doc_outlinks:",self.doc_outlinks

        
        #print "self.header_words:",self.header_words
        #print "self.header_words_dict:",self.header_words_dict

        #print
        #print "self.title_words:",self.title_words
        #print
        #print "self.title_words_dict:",self.title_words_dict
    
        #print
        #print "self.b_or_strong_words:",self.b_or_strong_words
        #print
        #print "self.b_or_strong_words_dict:",self.b_or_strong_words_dict
    
        #print
        #print "self.a_words:",self.a_words
        #print
        #print "self.a_words_dict:",self.a_words_dict
    
        #print
        #print "self.i_or_em_words:",self.i_or_em_words
        #print
        #print "self.i_or_em_words_dict:",self.i_or_em_words_dict 
        
        #print
        #print "self.doc_words:",self.doc_words
        
        #print
        #print "self.doc_distinct_words:",self.doc_distinct_words
        
        #print 
        #print "self.doc_words_dict:",self.doc_words_dict
###########################################################################################################################class end.

def pythonModuleForCallingFromC_hello():
    print "hello"

def pythonModuleForCallingFromC_test():
    # print len(queryTermInvertedIndexInfo)
    # print "9999"
    return len(queryTermInvertedIndexInfo)

def pythonModuleForCallingFromC_loadTheAuxInfoIntoMemoryForHighLevelFeaturesIncludingRankInTheDoc():
    
    # *******************step1: load the queryTermCollectionFreqDict
    # notes:
    # updated by Wei 2013/01/12.
    
    # current answer
    # after we load the whole dict, it will occupy around 8% of the total memory in pangolin
    # we need to load this dict cause when doing rank_in_the_doc, we need this dict for help
    
    # old answers
    # we do NOT need to use the queryTermCollectionFreqDict cause we have that data output from the polyIRToolkit
    # we do NOT need to run this part of logic and this can save time.
    # currently, queryTermCollectionFreqDict is just empty and still being used and checked. Furture development should eliminate the logic belonging to queryTermCollectionFreqDict
    
    # load the wholeLexiconTermsWithTermFreqInCollection.txt into the main memory, cause it is needed to compute the posting_rank_in_the_doc
    
    # option 1
    # in production
    # inputQueryTermFreqFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection.txt"
    
    # in debug (ONLY contains the words in the query log)
    inputQueryTermFreqFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/freqOfQueryTermsInCollection.txt"
    
    # option 2
    # NULL
    
    inputQueryTermFreqHandler = open(inputQueryTermFreqFileName,"r")
    
    for line in inputQueryTermFreqHandler.readlines():
        elements = line.strip().split(" ")
        queryTerm = elements[0]
        queryTermIDF = int(elements[1])
        if queryTerm not in queryTermCollectionFreqDict:
            queryTermCollectionFreqDict[queryTerm] = queryTermIDF
    
    print "----->","len(queryTermCollectionFreqDict):",len(queryTermCollectionFreqDict)
    inputQueryTermFreqHandler.close()
    
    # *******************step2: load the queryDict
    # note: Now, it is NOT only gov2 150 queries but also has the effeciency task queries as well
    
    # option1
    # human judged 150 queries ONLY
    # data5 is currently NOT accessable. 2013/01/21
    # inputQueryFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/final-150Gov2Queries.txt"
    
    # option2
    # note: This file Should contain all the queries in the gov2 dataset being evaluated
    inputQueryFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/allQueriesIncludingHumanJudgedAndEffeciencyTask_with_reordered_queryID"
    
    inputQueryHandler = open(inputQueryFileName,"r")
    for line in inputQueryHandler.readlines():
        elements = line.strip().split(":")
        queryID = int(elements[0])
        
        data = elements[1]
        data = data.lower()
        
        for i in range(0,len(data)):
            # print "data[i]:",ord(data[i])
            if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
                # Just replace them with a space.
                data = data[:i] + " " + data[i+1:]
        
        queryContent = data
    
        if queryID not in queryDict:
            queryDict[queryID] = queryContent
    
    print "----->","len(queryDict):",len(queryDict)
    
def pythonModuleForCallingFromC_generateHighLevelFeaturesIncludingRankInTheDoc(postingTerm,trecID):
    # print "Begins...**********"
    current_posting_rank_in_docWei = -1
    human_judge_query_location_base_path = "/home/diaosi/gov2ClearYourMindAndDoItAgain/human_judge_web_pages_gov2_ALL/"
    previousAbsolutePathForWebPageFileName = ""
    absolutePathForWebPageFileName = ""
    
    
    # yes, the IR system can provide the trecID
    # extra new line
    print
    print "----->","postingTerm:",postingTerm,"trecID:",trecID
    
    trecIDElements = trecID.split("-")
    # cluewebTag = trecIDElements[0]
    segmentNumber = trecIDElements[0]
    compressedFileNumber = trecIDElements[1]
    sequenceNumber = trecIDElements[2]
    
    pathLookFor = human_judge_query_location_base_path + segmentNumber
    # print "----->pathLookFor:",pathLookFor
    
    fileNamePrefixLookingFor = trecID
    
    currentDocumentFeatureVectorDictIdentifier = postingTerm+"_"+trecID
    
    if currentDocumentFeatureVectorDictIdentifier not in postingFeatureVectorDict:
        # print "pathLookFor:",pathLookFor
        foundTag = False    
        for dirname, dirnames, filenames in os.walk(pathLookFor):
            for filename in filenames:
                if filename.startswith(fileNamePrefixLookingFor):
                    foundTag = True
                    # print filename
                    absolutePathForWebPageFileName = os.path.join(dirname, filename)
                    # print "absolutePathForWebPageFileName:",absolutePathForWebPageFileName
                    if absolutePathForWebPageFileName != previousAbsolutePathForWebPageFileName:
                        print "-----> open",absolutePathForWebPageFileName
                        absolutePathForWebPageFileHandler = open(absolutePathForWebPageFileName,"r")
                        
                        # new statement
                        previousAbsolutePathForWebPageFileName = absolutePathForWebPageFileName
                        
                        file_content = absolutePathForWebPageFileHandler.read()
                        absolutePathForWebPageFileHandler.close()
                        # print "**********"
                        # print "check file_content...PASSED"
                        # print file_content
                        # print "**********"
                        
                        web_page_content = file_content
                        
                        parser = MyHTMLParser()
                        
                        try:
                            parser.feed(web_page_content)
                            parser.generate_statistics_report(1)
                        except HTMLParseError,e:
                            print "----->",trecID,":",e.msg,":",e.lineno,":",e.offset
                            outputErrorMessageFileHandler.write("----->" + trecID + ":" + e.msg + ":" + str(e.lineno) + ":" + str(e.offset) + "\n")
                            # do not handle the error message.
                            parser.generate_statistics_report(0)
                        except UnicodeDecodeError,e:
                            print "----->",e
                            outputErrorMessageFileHandler.write("----->" + trecID + " UnicodeDecodeError Error." "\n")
                            # do not handle the error message.
                            parser.generate_statistics_report(0)
    
                        if len(parser.doc_words) != 0:
                            pass
                        else:
                            # Let's do the most common filter
                            # step1: filter out all the special chars.
                            data = web_page_content
                            data = data.lower()
                            
                            for i in range(0,len(data)):
                                # print "data[i]:",ord(data[i])
                                if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
                                    # Just replace them with a space.
                                    data = data[:i] + " " + data[i+1:]
                            
                            web_page_content_refined = data
                            
                            # print web_page_content_refined
                            
                            documentWordList = []
                            documentUniqueTermList = []
                            documentWordsDict = {}
                            for documentTerm in web_page_content_refined.strip().split(" "):
                                if documentTerm.strip() != "":
                                    documentWordList.append(documentTerm)
                                    
                                    if documentTerm not in documentWordsDict:
                                        documentWordsDict[documentTerm] = 1
                                    else:
                                        documentWordsDict[documentTerm] += 1
                                
                                    if documentTerm not in documentUniqueTermList:
                                        documentUniqueTermList.append(documentTerm)
                            
                            # assignment
                            parser.doc_words = documentWordList
                            parser.doc_distinct_words = documentUniqueTermList
                            parser.doc_words_dict = documentWordsDict
                            
                            # print "parser.doc_words_dict:",parser.doc_words_dict
                            
                            for documentWord in documentWordList:
                                parser.text_size += len(documentWord)
                            
                    else:
                        print "----->reuse",absolutePathForWebPageFileName,"in memory"
    
                    doc_wordsWei = len(parser.doc_words)
                    doc_distinct_wordsWei = len(parser.doc_distinct_words)
                    text_sizeWei = parser.text_size
                    script_sizeWei = parser.script_size
                    
                    # index 3 This program generated global NONE
                    # outputFileFeaturePart1Handler.write(str( doc_wordsWei ) + " ")
                    
                    # index 4 This program generated global 10
                    # outputFileFeaturePart1Handler.write(str( doc_distinct_wordsWei )  + " ")
                    
                    # index 5 This program generated global 11
                    # outputFileFeaturePart1Handler.write(str( text_sizeWei )  + " ")
                    
                    # index 6 This program generated global 12
                    # outputFileFeaturePart1Handler.write(str( script_sizeWei )  + " ")
                        
                    
                    if parser.text_size != 0:
                        script_text_ratio = parser.script_size / parser.text_size
                    else:
                        script_text_ratio = 0.0
                    
                    
                    script_text_ratioWei = script_text_ratio
                    doc_outlinksWei = parser.doc_outlinks
                    
                    # index 7 Roman global 13
                    # outputFileFeaturePart1Handler.write(str( script_text_ratioWei )  + " ")
                    
                    # index 8 Roman global 14
                    # outputFileFeaturePart1Handler.write(str( doc_outlinksWei )  + " ")
                    
                    
                    
                    # collection freq for the query terms.
                    veryLargeNumber = 999999999
                    verySmallNumber = -999999999
                    current_term_col_freq = 0
                    
                    if hasattr(parser, 'header_words_dict'):
                        pass
                    else:
                        parser.header_words_dict = {}
    
    
                    if hasattr(parser, 'title_words_dict'):
                        pass
                    else:
                        parser.title_words_dict = {}
    
                    if hasattr(parser, 'b_or_strong_words_dict'):
                        pass
                    else:
                        parser.b_or_strong_words_dict = {}                                  
    
                    if hasattr(parser, 'a_words_dict'):
                        pass
                    else:
                        parser.a_words_dict = {}
    
                    if hasattr(parser, 'i_or_em_words'):
                        pass
                    else:
                        parser.i_or_em_words = []
    
                    if hasattr(parser, 'doc_words_dict'):
                        pass
                    else:
                        parser.doc_words_dict = {}
                    
                    # Let's do the LM feature(4).
                    MIOU = 2000
                    
                    
                    
                    
                    
                    # BM25 parameters: see 'http://en.wikipedia.org/wiki/Okapi_BM25'.
                    kBm25K1 =  2.0  # k1
                    kBm25B = 0.75   #b
                    # We can precompute a few of the BM25 values here.
                    kBm25NumeratorMul = kBm25K1 + 1
                    kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B)
                    collection_average_doc_len = 1102 # This number comes from the cluweb2009 CatB index info
                    collection_total_num_docs = 50220420 # This number comes from the cluweb2009 CatB index info
                    kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / collection_average_doc_len
                    
                    
                    idf_t_dict = {} # This dict is for storing the idf value for each query term
                    
                    #####################################################################################
                    '''
                    # feature generation: current_term_col_freq
                    if postingTerm in queryTermCollectionFreqDict:
                        current_term_col_freq = queryTermCollectionFreqDict[postingTerm]
                    else:
                        current_term_col_freq = 0
                    
                    # index 9 Roman global NONE
                    current_term_col_freqWei = current_term_col_freq
                    # outputFileFeaturePart1Handler.write(str( current_term_col_freqWei ) + " ")
                    '''
                    #####################################################################################
                    
                    # feature generation: whether_current_term_in_header
                    # feature generation: whether_current_term_in_title
                    # feature generation: whether_current_term_in_bold
                    # feature generation: whether_current_term_in_header
                    # feature generation: whether_current_term_in_url
                    # feature generation: whether_current_term_in_italic
                    
                    if postingTerm in parser.header_words_dict:
                        whether_current_term_in_header = True
                    else:
                        whether_current_term_in_header = False
    
                    if postingTerm in parser.title_words_dict:
                        whether_current_term_in_title = True
                    else:
                        whether_current_term_in_title = False
    
                    if postingTerm in parser.b_or_strong_words_dict:
                        whether_current_term_in_bold = True
                    else:
                        whether_current_term_in_bold = False
    
                    if postingTerm in parser.a_words_dict:
                        whether_current_term_in_url = True
                    else:
                        whether_current_term_in_url = False             
                                   
                    if postingTerm in parser.i_or_em_words:
                        whether_current_term_in_italic = True
                    else:
                        whether_current_term_in_italic = False               
    
                        
                    whether_current_term_in_headerWei = whether_current_term_in_header
                    whether_current_term_in_titleWei = whether_current_term_in_title
                    whether_current_term_in_boldWei = whether_current_term_in_bold
                    whether_current_term_in_urlWei = whether_current_term_in_url
                    whether_current_term_in_italicWei = whether_current_term_in_italic
                    
                    # index 10 This program generated global 15
                    # index 11 This program generated global 16
                    # index 12 This program generated global 17
                    # index 13 This program generated global 18
                    # index 14 This program generated global 19
                    
                    # outputFileFeaturePart1Handler.write(str( whether_current_term_in_headerWei ) + " ")
                    # outputFileFeaturePart1Handler.write(str( whether_current_term_in_titleWei ) + " ")
                    # outputFileFeaturePart1Handler.write(str( whether_current_term_in_boldWei ) + " ")
                    # outputFileFeaturePart1Handler.write(str( whether_current_term_in_urlWei ) + " ")
                    # outputFileFeaturePart1Handler.write(str( whether_current_term_in_italicWei ) + " ")
                    
                    #####################################################################################
                    '''
                    # feature generation: current_term_freq_in_doc
                    current_term_freq_in_doc = 0
                    if postingTerm in parser.doc_words_dict:
                        current_term_freq_in_doc = parser.doc_words_dict[postingTerm]
                    
                    # index 15 This program generated global NONE    
                    current_term_freq_in_docWei = current_term_freq_in_doc
                    # outputFileFeaturePart1Handler.write(str( current_term_freq_in_docWei ) + " ")
                    '''
                    #####################################################################################
                    
                    
                    
                    #####################################################################################
                    '''
                    # feature generation: current_term_rel_freq_in_doc
                    current_term_rel_freq_in_doc = 0
                    if len(parser.doc_words) != 0:
                        current_term_rel_freq_in_doc = current_term_freq_in_doc / len(parser.doc_words)
                    
                    # index 16 This program generated global NONE  
                    current_term_rel_freq_in_docWei = current_term_rel_freq_in_doc
                    # outputFileFeaturePart1Handler.write( str( current_term_rel_freq_in_docWei ) + " ")
                    '''
                    #####################################################################################
                    
                    
                    #####################################################################################
                    
                    # feature generation: current_term_BM25
                    # This part is doing the BM25 feature
                    current_term_BM25 = 0
                    idf_t = math.log10(1 + (collection_total_num_docs - queryTermCollectionFreqDict[postingTerm] + 0.5) / (queryTermCollectionFreqDict[postingTerm] + 0.5))
                    idf_t_dict[postingTerm] = idf_t
                    if postingTerm in parser.doc_words_dict:
                        partial_BM25 = idf_t_dict[postingTerm] * (parser.doc_words_dict[postingTerm] * kBm25NumeratorMul) / (parser.doc_words_dict[postingTerm] + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * len(parser.doc_words))
                        current_term_BM25 = partial_BM25
                    
                    # index 17 This program generated global NONE 
                    current_term_BM25Wei = current_term_BM25
                    # outputFileFeaturePart1Handler.write(str( current_term_BM25Wei ) + " ")
                    
                    #####################################################################################
                    
                    
                    #####################################################################################
                    '''
                    # feature generation: current_term_QL
                    # This part is doing the LM feature
                    if postingTerm in parser.doc_words_dict:
                        partial_LM_component_1 = parser.doc_words_dict[postingTerm] + MIOU * (queryTermCollectionFreqDict[postingTerm] / collection_total_num_docs)
                    if postingTerm not in parser.doc_words_dict:
                        partial_LM_component_1 = MIOU * (queryTermCollectionFreqDict[postingTerm] / collection_total_num_docs)
                    
                    partial_LM_component_2 = len(parser.doc_words) + MIOU
                    
                    # Here, the base is 10
                    partial_LM_score = math.log(partial_LM_component_1 / partial_LM_component_2)
                    current_term_QL = partial_LM_score
                    
                    # index 18 This program generated global None
                    current_term_QLWei = current_term_QL
                    # outputFileFeaturePart1Handler.write(str( current_term_QLWei ) + " ")
                    '''                          
                    #####################################################################################
                    
                    
                    #####################################################################################
                    '''
                    # This is a try to generate distribution score among all query terms.
                    onlyOneQueryTermList = []
                    onlyOneQueryTermList.append(postingTerm)
                    generate_distribution_features(onlyOneQueryTermList,parser.doc_words,outputFileFeaturePart1Handler)
                    '''
                    #####################################################################################
    
    
    
                    ###############################################################################################
                    
                    # Notes:
                    # Updated by Wei, 2013/01/10 night
                    # I can COPY this part of logic into the new version of this file, in order to do ONE scan of each document, all the data needed will be generated.
                    # Now come to the most exciting part of tonight, let's get the rank DONE 2012/11/28
                    # print "parser.doc_words:",parser.doc_words
                    
                    
                    # special part of logic implemented for debugging
                    # specialOutputFileName = "/home/obukai/workspace/polyIRToolkit/scripts/src/pythonScripts/gov2/specialTempLexiconGenerator.txt"
                    # specialOutputFileHandler = open(specialOutputFileName,"w")
                    
                    # for compared_word in parser.doc_words:
                    #    specialOutputFileHandler.write(compared_word + " " + str( random.randint(1, 10000000) ) + "\n")
                    # specialOutputFileHandler.close()
                    
    
                    # Originally, I am NO.1 in the list, this value should be set to 1, NOT 0
                    current_posting_rank_in_doc = 1
                    
                    
                    for compared_word in parser.doc_words:
                        if compared_word in queryTermCollectionFreqDict:
                            compared_term_BM25 = 0
                            idf_t = math.log10(1 + (collection_total_num_docs - queryTermCollectionFreqDict[compared_word] + 0.5) / (queryTermCollectionFreqDict[compared_word] + 0.5))
                            idf_t_dict[compared_word] = idf_t
                            if compared_word in parser.doc_words_dict:
                                partial_BM25 = idf_t_dict[compared_word] * (parser.doc_words_dict[compared_word] * kBm25NumeratorMul) / (parser.doc_words_dict[compared_word] + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * len(parser.doc_words))
                                compared_term_BM25 = partial_BM25
                            
                                # index 17 Roman global NONE 
                                compared_term_BM25Wei = compared_term_BM25
                                
                                # print compared_word,":",compared_term_BM25Wei
                                
                                
                                if compared_term_BM25Wei > current_term_BM25Wei:
                                    current_posting_rank_in_doc += 1
                                
                                
                            
                        else:
                            # the term is NOT in queryTermCollectionFreqDict, so we do NOT have enough info to count the rank in the inverted index
                            pass
                    
                    # put the actual write feature into the file last in order to varify well.
                    current_posting_rank_in_docWei = current_posting_rank_in_doc
                    
                    returnFeatureStringWei = str(doc_distinct_wordsWei) + " " + str(text_sizeWei) + " " + str(script_sizeWei) + " " + str(script_text_ratioWei) + " " + str(doc_outlinksWei) + " " + str(whether_current_term_in_headerWei) + " " + str(whether_current_term_in_titleWei) + " " + str(whether_current_term_in_boldWei) + " " + str(whether_current_term_in_urlWei) + " " + str(whether_current_term_in_italicWei) + " " + str(current_posting_rank_in_docWei)
                    
                    # for testing
                    print "returnFeatureStringWei:",returnFeatureStringWei
                    postingFeatureVectorDict[currentDocumentFeatureVectorDictIdentifier] = returnFeatureStringWei
                    
                    print "-----> compute and return the HIGH level features"
                    
                    # print "current_posting_rank_in_docWei:",current_posting_rank_in_docWei
                    
                    ###############################################################################################
                    # index 10 This program generated global 20
                    # outputFileFeaturePart1Handler.write( str(labelRoman) + " " )
                    # outputFileFeaturePart1Handler.write( str(current_posting_rank_in_docWei) + " " )
                    
                    # Notes:
                    # updated 2013/01/13 by Wei. Test whether these two BM25(one from polyIRToolkit and one from High Level Python)
                    # This part just for testing.
                    # outputFileFeaturePart1Handler.write( str(current_term_BM25Wei) + " " )
                    # outputFileFeaturePart1Handler.write( str(current_posting_rank_in_doc_for_testWei) + " " )
                    # outputFileFeaturePart1Handler.write( str(partialBM25Roman) + " " )
                    
                    # outputFileFeaturePart1Handler.write("\n")
            
                    #####################################################################################
                    '''
                    # The part of logic is for outputing the term vector of this document given to Juan.
                    outputTermVectorFileName = "/data4/team/weijiang/human_judge_web_pages_plain_text_only_words/" + trecID.split("-")[1] + "/" + filename.split(".")[0] + "_plain_text.txt"
                    outputTermVectorFileHandler = open(outputTermVectorFileName,"w")
                    for word in parser.doc_words:
                        outputTermVectorFileHandler.write(word + " ")
                    outputTermVectorFileHandler.close()
                    '''
                    #####################################################################################
        if not foundTag:
            print "-----> document NOT found and return the fake HIGH level features"
            outputFileHandler.write(fileNamePrefixLookingFor + "\n")
            returnFeatureStringWei = "-1 -1 -1 -1 -1 False False False False False -1"
            postingFeatureVectorDict[currentDocumentFeatureVectorDictIdentifier] = returnFeatureStringWei
    else:
        print "-----> direct return the HIGH level features"
        returnFeatureStringWei = postingFeatureVectorDict[currentDocumentFeatureVectorDictIdentifier]

    # print "current_posting_rank_in_docWei(in python):",current_posting_rank_in_docWei
    # print "Ends.**********"
    
    # Updated on 2013/01/23 by Wei
    # return the whole set of RICH features

    
    return returnFeatureStringWei
    

def pythonModuleForCallingFromC_loadTheAuxInfoIntoMemoryForFeatureRankInTheList():
    # the aux file for look up the source
    inputAuxSourceFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/rawTermInvertedIndexOutputAccessAuxFile_NEW_20121128.txt"
    inputAuxSourceFileHandler = open(inputAuxSourceFileName,"r")

    # fill the dict for aux info for the list
    # queryTermInvertedIndexInfo = {}
    
    for line in inputAuxSourceFileHandler.readlines():
        lineElements = line.strip().split(" ")
        
        termIndexNumber = int(lineElements[0])
        term = lineElements[1]
        invertedIndexLength = int(lineElements[2])
        invertedIndexBeginningPosition = int(lineElements[3])
        invertedIndexEndingPosition = int(lineElements[4])
        
        if term not in queryTermInvertedIndexInfo:
            queryTermInvertedIndexInfo[term] = []
            # index 0
            queryTermInvertedIndexInfo[term].append(termIndexNumber)
            # index 1
            queryTermInvertedIndexInfo[term].append(invertedIndexLength)
            # index 2
            queryTermInvertedIndexInfo[term].append(invertedIndexBeginningPosition)
            # index 3
            queryTermInvertedIndexInfo[term].append(invertedIndexEndingPosition)   
        else:
            print "unnormal, mark1"
            exit(1)

    print "-----> len(queryTermInvertedIndexInfo):",len(queryTermInvertedIndexInfo)
    return  len(queryTermInvertedIndexInfo)
  
def pythonModuleForCallingFromC_loadTheActualQueryTermListDataIntoMemory(term):
    # The init procedure
    global currentTermDocIDScorePairList
    global currentTermDocIDScorePair_sorted
    global postingDocIDANDRankInDocDict
    
    del currentTermDocIDScorePairList[:]
    del currentTermDocIDScorePair_sorted[:]
    postingDocIDANDRankInDocDict.clear()
    
    # the main source file
    inputDataSourceFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/rawTermInvertedIndexOutput_with_trecID_added.txt"
    inputDataSourceFileHandler = open(inputDataSourceFileName,"r")
    
    # print "------>Load the corresponding info..."
    ############################################################################################################################
    
    # the alg is as the following: access that info, maintain a counter, larger +1; smaller,remain the same
    try:
        currentTermInvertedIndexCorrectLength = queryTermInvertedIndexInfo[term][1]
        currentTermInvertedIndexBeginningPosition = queryTermInvertedIndexInfo[term][2]
        currentTermInvertedIndexEndingPosition = queryTermInvertedIndexInfo[term][3]
    except:
        currentTermInvertedIndexCorrectLength = 0
        currentTermInvertedIndexBeginningPosition = -1
        currentTermInvertedIndexEndingPosition = -1
    
    print "-----> currentTermInvertedIndexCorrectLength:",currentTermInvertedIndexCorrectLength
    # print "------>currentTermInvertedIndexBeginningPosition:",currentTermInvertedIndexBeginningPosition
    # print "------>currentTermInvertedIndexEndingPosition:",currentTermInvertedIndexEndingPosition
    
    if currentTermInvertedIndexCorrectLength != 0:
        # print "currentTermInvertedIndexBeginningPosition:",currentTermInvertedIndexBeginningPosition
        
        inputDataSourceFileHandler.seek(currentTermInvertedIndexBeginningPosition)
        
        currentDataLine = inputDataSourceFileHandler.readline()
        # print "first line:",currentDataLine
        
        while inputDataSourceFileHandler.tell() <= currentTermInvertedIndexEndingPosition:
            
            currentDataLineElements = currentDataLine.strip().split(" ")
            
            # put it into dict, currently NOT use
            # currentTrecID = currentDataLineElements[0]
            
            # put it into dict as well
            docIDInIRSystem = currentDataLineElements[1]
            
            # put it into dict
            postingScore = float(currentDataLineElements[2])
            
            # trecIDPostingScoreTuple = (currentTrecID,postingScore)
            docIDPostingScoreTuple = (docIDInIRSystem,postingScore)
            # currentTermTrecIDScorePairDict[term].append(trecIDPostingScoreTuple)
            
            # currentTermTrecIDScorePairList.append(trecIDPostingScoreTuple)
            currentTermDocIDScorePairList.append(docIDPostingScoreTuple)
            
            currentDataLine = inputDataSourceFileHandler.readline()
        
        # dealing with the last line
        # print "last line:",currentDataLine
        currentDataLineElements = currentDataLine.strip().split(" ")
        
        # ingore 
        currentTrecID = currentDataLineElements[0]
        
        # put it into dict
        docIDInIRSystem = currentDataLineElements[1]
        
        # put it into dict
        postingScore = float(currentDataLineElements[2])
        
        # trecIDPostingScoreTuple = (currentTrecID,postingScore)
        docIDPostingScoreTuple = (docIDInIRSystem,postingScore)
        
        # currentTermTrecIDScorePairDict[term].append(trecIDPostingScoreTuple)
        # currentTermTrecIDScorePairList.append(trecIDPostingScoreTuple)
        currentTermDocIDScorePairList.append(docIDPostingScoreTuple)
        
        currentDataLine = inputDataSourceFileHandler.readline()
        
        # print "len(currentTermTrecIDScorePairDict[term]):",len( currentTermTrecIDScorePairDict[term] )
        # print "currentTermTrecIDScorePairDict[term]:",currentTermTrecIDScorePairDict[term]
        
        # It is time to sort the list currentTermTrecIDScorePairDict[term]
        print "-----> read the list from disk:DONE"
        
        # new version
        '''
        currentTermTrecIDScorePair_sorted = sorted(currentTermTrecIDScorePairList, key=itemgetter(1), reverse=True)
        for pairTuple in currentTermTrecIDScorePair_sorted:
            (currentTrecID, score) = pairTuple
            trecIDANDRankInDocDict.append(currentTrecID)
        '''

        currentTermDocIDScorePair_sorted = sorted(currentTermDocIDScorePairList, key=itemgetter(1), reverse=True)
        print "-----> sort the list in memory:DONE"
        for index,pairTuple in enumerate(currentTermDocIDScorePair_sorted):
            (currentDocID, score) = pairTuple
            currentDocumentFeatureVectorDictIdentifier = term + "_" + currentDocID
            if currentDocumentFeatureVectorDictIdentifier not in postingDocIDANDRankInDocDict:
                postingDocIDANDRankInDocDict[currentDocumentFeatureVectorDictIdentifier] = index + 1
            else:
                print "system error. Mark: duplicated docID"
        print "-----> build the dict in memory:DONE"
        # print "postingDocIDANDRankInDocDict:",postingDocIDANDRankInDocDict
        lengthOfResults = len(postingDocIDANDRankInDocDict)
        print "-----> # Of posting rank in dict:",lengthOfResults
        return lengthOfResults
    else:
        return 0

'''
# currently, do NOT need this function.
# use the trecID as the unified identifier
def pythonModuleForCallingFromC_assignTheRankByTrecID(postingTerm,trecID):
    # currently, the term argument is NOT used and may can be used for the varification purpose.
    currentPostingOutputRankInList = 0
    currentDocumentFeatureVectorDictIdentifier = postingTerm+"_"+trecID
    
    if currentDocumentFeatureVectorDictIdentifier not in postingFeatureRankInTheListDict:
        print "-----> compute and return the rank_in_the_list"
        
        try:
            currentPostingOutputRankInList = currentPostingOutputRankInList = trecIDANDRankInDocDict.index(trecID) + 1
            # print "currentPostingOutputRankInList:",currentPostingOutputRankInList
        except:
            currentPostingOutputRankInList = -1
        
        postingFeatureRankInTheListDict[currentDocumentFeatureVectorDictIdentifier] = currentPostingOutputRankInList
    else:
        print "-----> direct return the rank_in_the_list"
        currentPostingOutputRankInList = postingFeatureRankInTheListDict[currentDocumentFeatureVectorDictIdentifier]
        
    return currentPostingOutputRankInList
'''


def pythonModuleForCallingFromC_assignTheRankByDocID(postingTerm,docID):
    # currently, the term argument is NOT used and may can be used for the varification purpose.
    currentPostingOutputRankInList = -1
    currentDocumentFeatureVectorDictIdentifier = postingTerm + "_" + docID
    # print "currentDocumentFeatureVectorDictIdentifier:",currentDocumentFeatureVectorDictIdentifier
    # print "-----> direct return the rank_in_the_list in the dict"
    if currentDocumentFeatureVectorDictIdentifier in postingDocIDANDRankInDocDict:
        currentPostingOutputRankInList = postingDocIDANDRankInDocDict[currentDocumentFeatureVectorDictIdentifier]
    else:
        pass
    # print "term_docID:",currentDocumentFeatureVectorDictIdentifier,"rank:",currentPostingOutputRankInList
    return currentPostingOutputRankInList


def pythonModuleForCallingFromC_loadTheAuxFileForFreqOfTermsInQueries():
    inputFileName = "/home/diaosi/gov2ClearYourMindAndDoItAgain/freqOfTermsInQueries.txt"
    inputFileHandler = open(inputFileName,"r")
    for line in inputFileHandler.readlines():
        lineElements = line.strip().split(" ")
        term = lineElements[0]
        freqOfTermsInQueries = int( lineElements[1] )
    
        if term not in freqOfTermsInQueriesDict:
            freqOfTermsInQueriesDict[term] = freqOfTermsInQueries
    
    print "-----> len(freqOfTermsInQueriesDict):",len(freqOfTermsInQueriesDict)
    # print freqOfTermsInQueriesDict
    inputFileHandler.close()
    
def pythonModuleForCallingFromC_getTermFreqInQueries(term):
    # print "pythonModuleForCallingFromC_getTermFreqInQueries called."
    returnValue = -2
    
    if term in freqOfTermsInQueriesDict:
        returnValue = freqOfTermsInQueriesDict[term]
    
    return returnValue

def multiply(a,b):
    print "Will compute", a, "times", b
    c = 0
    for i in range(0, a):
        c = c + b
    return c


