# Some notes:
# Updated by Wei 2012/01/10
# (0) PLEASE DO NOT USE this program and the reasons are as following.
# (1) Computing the rank here take too much time(maybe not enough optimizations)
# (2) So there already have some OTHER programs to generate the rank_in_the_doc and the rank_in_the_inverted_list.
# (3) That's for now and this program will NOT be updated.

# Updated by Wei 2012/11/28
# (1) This is a posting oriented intermediate training file
# (2) Use python2.7 but not python2.6 cause the HTML parser is not good for python2.6
# (3) This program is written for generating basic features for the edge graph, the edge has the relationship between query,termID(term) and docID(trecID)
# (4) More advance features such as positional-sensitve feature is generated using another program called: *.complex.py ^^^
# (5) Probablity the most important feature need to add now is to compute the rank in the document, I really need this!
# (6) Let's verify this is correct or NOT. And then do a further review and run the long time job

from __future__ import division
import random
import gzip
from HTMLParser import HTMLParser
from HTMLParser import HTMLParseError
from htmlentitydefs import name2codepoint
import os
import math
import sys
from urlparse import urlparse

# the variable should be the ONLY useful variable in this function: current_term_distribution
def generate_distribution_features(queryTermList,docWordList,outputFileHandler):
    # print "generate_distribution_features(...) called"
    veryLargeNumber = 999999999
    verySmallNumber = -999999999
    
    sum_term_distribution = 0.0
    avg_term_distribution = 0.0
    max_term_distribution = verySmallNumber
    min_term_distribution = veryLargeNumber
    current_term_distribution = 0.0
    
    
    queryTermPartialDistributionDict = {}
    for queryTerm in queryTermList:
        if queryTerm not in queryTermPartialDistributionDict:
            queryTermPartialDistributionDict[queryTerm] = 0
        else:
            pass
    
    print "doc_size:",len(docWordList)
    upperBoundOfCounter = int(len(docWordList)/100)
    #print upperBoundOfCounter
    
    for i in range(0,upperBoundOfCounter+1):
        partOfDocWordList = []
        partOfDocWordDict = {}
        partOfDocWordList = docWordList[i*100: i*100 +100]
        for word in partOfDocWordList:
            if word not in partOfDocWordDict:
                partOfDocWordDict[word] = 1
            else:
                partOfDocWordDict[word] += 1
        
        for queryTerm in queryTermPartialDistributionDict:
            if queryTerm in partOfDocWordDict:
                component_1 = partOfDocWordDict[queryTerm] / 100
                component_2 = math.log(component_1)
                queryTermPartialDistributionDict[queryTerm] -= component_1 * component_2
    
    # check
    for queryTerm in queryTermPartialDistributionDict:
        sum_term_distribution += queryTermPartialDistributionDict[queryTerm]
        current_term_distribution = queryTermPartialDistributionDict[queryTerm]
        if queryTermPartialDistributionDict[queryTerm] < min_term_distribution:
            min_term_distribution = queryTermPartialDistributionDict[queryTerm]
        if queryTermPartialDistributionDict[queryTerm] > max_term_distribution:
            max_term_distribution = queryTermPartialDistributionDict[queryTerm]
    
    avg_term_distribution = sum_term_distribution / len(queryTermList)
    
    #outputFileHandler.write(str(sum_term_distribution) + " ")
    #outputFileHandler.write(str(min_term_distribution) + " ")
    #outputFileHandler.write(str(max_term_distribution) + " ")
    #outputFileHandler.write(str(avg_term_distribution) + " ")
    
    # index 19 Roman global 21
    current_term_distributionWei = current_term_distribution
    outputFileHandler.write(str( current_term_distributionWei ) + " ")
    
    #print "sum_term_distribution:",sum_term_distribution
    #print "min_term_distribution:",min_term_distribution
    #print "max_term_distribution:",max_term_distribution
    #print "avg_term_distribution:",avg_term_distribution
    

###########################################################################################################################class begin...
class MyHTMLParser(HTMLParser):
    def handle_starttag(self, tag, attrs):
        # print "Start tag:", tag
        # set the current_tag to tag
        self.current_tag = tag
        
        if hasattr(self, 'doc_outlinks'):
            pass
        else:
            self.doc_outlinks = 0
        
        for attr in attrs:
            (attr_name, attr_value) = attr
            if attr_name == "href":
                #print "     attr:", attr
                self.doc_outlinks += 1
    def handle_endtag(self, tag):
        #print "End tag  :", tag
        # set back the current_tag to ""
        self.current_tag = ""
        
    def handle_data(self, data):
        if hasattr(self, 'current_tag'):
            # step0: pre-processing
            for i in range(0,len(data)):
                # print "data[i]:",ord(data[i])
                if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
                    # Just replace them with a space.
                    data = data[:i] + " " + data[i+1:]
            
            if hasattr(self, 'header_words'):
                pass
            else:
                self.header_words = []
                self.header_words_dict = {}

            if hasattr(self, 'title_words'):
                pass
            else:
                self.title_words = []
                self.title_words_dict = {}
            
            if hasattr(self, 'b_or_strong_words'):
                pass
            else:
                self.b_or_strong_words = []
                self.b_or_strong_words_dict = {}          
            
            if hasattr(self, 'a_words'):
                pass
            else:
                self.a_words = []
                self.a_words_dict = {}
            
            if hasattr(self, 'i_or_em_words'):
                pass
            else:
                self.i_or_em_words = []
                self.i_or_em_words_dict = {}                       
            
                        
            # step1: some context feature
            if self.current_tag == "head":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.header_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.header_words_dict:
                            self.header_words_dict[lowerCaseWord] = 1
                        else:
                            self.header_words_dict[lowerCaseWord] += 1
            
            if self.current_tag == "title":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.title_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.title_words_dict:
                            self.title_words_dict[lowerCaseWord] = 1
                        else:
                            self.title_words_dict[lowerCaseWord] += 1
            
            if self.current_tag == "b" or self.current_tag == "strong":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.b_or_strong_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.b_or_strong_words_dict:
                            self.b_or_strong_words_dict[lowerCaseWord] = 1
                        else:
                            self.b_or_strong_words_dict[lowerCaseWord] += 1
                                     
            if self.current_tag == "a":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.a_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.a_words_dict:
                            self.a_words_dict[lowerCaseWord] = 1
                        else:
                            self.a_words_dict[lowerCaseWord] += 1
            
            if self.current_tag == "i" or self.current_tag == "em":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.i_or_em_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.i_or_em_words_dict:
                            self.i_or_em_words_dict[lowerCaseWord] = 1
                        else:
                            self.i_or_em_words_dict[lowerCaseWord] += 1
            
       
            # step2: text or script
            if self.current_tag == "script" or self.current_tag == "style":
                #print "The following content should NOT be indexed."
                #print "script content     :", data
                #print "script length     :",len(data)
                if hasattr(self, 'script_size'):
                    self.script_size += len(data)
                else:
                    self.script_size = len(data)
            else:
                if hasattr(self, 'text_size'):
                    self.text_size += len(data)
                else:
                    self.text_size = len(data)
                # print "text content:*",data,"*"

                for i in range(0,len(data)):
                    # print "data[i]:",ord(data[i])
                    if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
                        # Just replace them with a space.
                        data = data[:i] + " " + data[i+1:]
                
                
                if hasattr(self, 'doc_words'):
                    pass
                else:
                    self.doc_words = []
                
                if hasattr(self, 'doc_distinct_words'):
                    pass
                else:
                    self.doc_distinct_words = []               

                
                if hasattr(self, 'doc_words_dict'):
                    pass
                else:
                    self.doc_words_dict = {}
                                    
                #print "raw word list:",data.split(" ")
                
                for word in data.split(" "):
                    if word != "":
                        # for later use.
                        lowerCaseWord = word.lower()
                        
                        # for current use
                        # lowerCaseWord = word
                        self.doc_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.doc_words_dict:
                            self.doc_words_dict[lowerCaseWord] = 1
                        else:
                            self.doc_words_dict[lowerCaseWord] += 1
                        
                        
                        if lowerCaseWord not in self.doc_distinct_words:
                            self.doc_distinct_words.append(lowerCaseWord)
                        
                #print "doc_words:",self.doc_words
                #print "doc_distinct_words:",self.doc_distinct_words
                  
                #print "text length      :", len(data)
        else:
            pass
    def handle_comment(self, data):
        pass
        #print "Comment  :", data
    def handle_entityref(self, name):
        pass
        #c = unichr(name2codepoint[name])
        #print "Named ent:", c
    def handle_charref(self, name):
        pass
        #if name.startswith('x'):
        #    c = unichr(int(name[1:], 16))
        #else:
        #    c = unichr(int(name))
        #print "Num ent  :", c
    def handle_decl(self, data):
        pass
        #print "Decl     :", data
    
    def generate_statistics_report(self,status):
        print "completed_parsed:",status
        # attrs check or init
        if hasattr(self, 'doc_words'):
            pass
        else:
            self.doc_words = []

        if hasattr(self, 'doc_distinct_words'):
            pass
        else:
            self.doc_distinct_words = []
        
        if hasattr(self, 'text_size'):
            pass
        else:
            self.text_size = 0
            
        if hasattr(self, 'script_size'):
            pass
        else:
            self.script_size = 0       

        if hasattr(self, 'doc_outlinks'):
            pass
        else:
            self.doc_outlinks = 0
            
        if hasattr(self, 'doc_waterloo_spam'):
            pass
        else:
            self.doc_waterloo_spam = 0
                
        #print "len(doc_words):",len(self.doc_words)
        #print "len(doc_distinct_words):",len(self.doc_distinct_words)
        #print "text_size:",self.text_size
        #print "script_size:",self.script_size
        if self.text_size != 0:
            script_text_ratio = self.script_size / self.text_size
        else:
            script_text_ratio = 0.0
        #print "script_text_ratio:",script_text_ratio
        #print "doc_outlinks:",self.doc_outlinks

        
        #print "self.header_words:",self.header_words
        #print "self.header_words_dict:",self.header_words_dict

        #print
        #print "self.title_words:",self.title_words
        #print
        #print "self.title_words_dict:",self.title_words_dict
    
        #print
        #print "self.b_or_strong_words:",self.b_or_strong_words
        #print
        #print "self.b_or_strong_words_dict:",self.b_or_strong_words_dict
    
        #print
        #print "self.a_words:",self.a_words
        #print
        #print "self.a_words_dict:",self.a_words_dict
    
        #print
        #print "self.i_or_em_words:",self.i_or_em_words
        #print
        #print "self.i_or_em_words_dict:",self.i_or_em_words_dict 
        
        #print
        #print "self.doc_words:",self.doc_words
        
        #print
        #print "self.doc_distinct_words:",self.doc_distinct_words
        
        #print 
        #print "self.doc_words_dict:",self.doc_words_dict
###########################################################################################################################class end.

print "Begin the program..."
print "Updated 2012/12/02 morning by Wei, Let's continue to make it better."

# wholeTrainingFileTempleteGov2V1.txt.input
# new input example: 
# 701 GX000-00-13923627 u 0

print "len(sys.argv):",len(sys.argv)

if len(sys.argv) != 2:
    print "Illegal # of arguments"
    print "Usage: python programName.py theInputFileName"
    exit(1)

# in production mode
# option1:
inputFileName1 = sys.argv[1]
# inputFileName1 = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added.txt.input"

# option2:
# inputFileName1 = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented.txt.input"

# in debug mode, let's debug this
# option1:
# inputFileName1 = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added.txt.input_test"

# option2:
# inputFileName1 = ""

# old input example:
# inputFileName1 = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/wholeTrainingFileTempleteGov2V1_phase0.txt.input"

inputFileHandler1 = open(inputFileName1,"r")

# in production mode
# outputFileNameFeaturePart1 = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added_rank_in_document_added.txt.input"

# outputFileNameFeaturePart1 = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added_rank_in_document_added.txt.input"
outputFileNameFeaturePart1 = inputFileName1 + "_rank_in_document"
# in debug mode, let's debug this
# outputFileNameFeaturePart1 = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/wholeTrainingFileTempleteGov2V1_phase0_posting_oriented_term_freq_inCollection_added_rank_in_document_added.txt.input_test"

outputFileFeaturePart1Handler = open(outputFileNameFeaturePart1,"w")

# Write the headline file first
# feature list
# number of features: 42

# outputFileFeaturePart1Handler.write("queryID" + " " + "trecID" + " " + "doc_words" + " " + "doc_distinct_words" + " " + "text_size" + " " + "script_size" + " " + "script_text_ratio" + " " + "doc_outlinks" + " " + "\n")

# need to be constructed. 2012/11/28 tomorrow
# outputErrorMessageFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/errorMessage.txt"
# outputErrorMessageFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/errorMessage.txt"
outputErrorMessageFileName = inputFileName1 + "_rank_in_document" + "_errorMessage.txt"
outputErrorMessageFileHandle = open(outputErrorMessageFileName,"w")



###########################################################################################################################
queryTermCollectionFreqDict = {}
# have NOT been extracted from the toolkit. 20121117

# option 1
inputQueryTermFreqFileName = "/data3/obukai/the_new_trip_of_feature_generation/gov2ClearYourMindAndDoItAgain/wholeLexiconTermsWithTermFreqInCollection.txt"

# option 2
# inputQueryTermFreqFileName = "/home/obukai/workspace/polyIRToolkit/scripts/src/pythonScripts/gov2/final-queryTermsCollectionFreqs.txt_test"
# inputQueryTermFreqFileName =  "/home/obukai/workspace/polyIRToolkit/scripts/src/pythonScripts/gov2/final-queryTermsCollectionFreqs.txt_test_additional_doc_words_added"
# inputQueryTermFreqFileName = "/home/obukai/workspace/polyIRToolkit/scripts/src/pythonScripts/gov2/final-queryTermsCollectionFreqs.txt_test_additional_doc_words_added_second_time.txt"

inputQueryTermFreqHandler = open(inputQueryTermFreqFileName,"r")

for line in inputQueryTermFreqHandler.readlines():
    elements = line.strip().split(" ")
    queryTerm = elements[0]
    queryTermIDF = int(elements[1])
    if queryTerm not in queryTermCollectionFreqDict:
        queryTermCollectionFreqDict[queryTerm] = queryTermIDF

print "----->","len(queryTermCollectionFreqDict):",len(queryTermCollectionFreqDict)
inputQueryTermFreqHandler.close()

###########################################################################################################################

###########################################################################################################################
# load the 150 queries into a dict for lookup
queryDict = {}
# need to fix tomorrow !!!

# option1
# note: Now, it is NOT only gov2 150 queries but also has the effeciency task queries as well
# inputQueryFileName = "/data5/team/obukai/the_new_trip_of_feature_generation/gov2/final-150Gov2Queries.txt"

# option2
# note: Should contain all the queries in the gov2 being evaluated
inputQueryFileName = "/home/obukai/workspace/polyIRToolkit/polyIRIndexer/allQueriesIncludingHumanJudgedAndEffeciencyTask_NEW20121128.txt"

inputQueryHandler = open(inputQueryFileName,"r")
for line in inputQueryHandler.readlines():
    elements = line.strip().split(":")
    queryID = int(elements[0])
    
    data = elements[1]
    data = data.lower()
    
    for i in range(0,len(data)):
        # print "data[i]:",ord(data[i])
        if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
            # Just replace them with a space.
            data = data[:i] + " " + data[i+1:]
    
    queryContent = data

    if queryID not in queryDict:
        queryDict[queryID] = queryContent

print "----->","len(queryDict):",len(queryDict)
###########################################################################################################################

human_judge_query_location_base_path = "/data3/obukai/human_judge_web_pages_gov2_ALL/"

queryIDWithItsDocs = {}

#inputFileHandler1.readline()

for index,line in enumerate(inputFileHandler1.readlines()):
    
    #if index == 100:
    #    break

    elements = line.strip().split(" ")
    if len(elements) == 11:
        queryID = int(elements[0])
        trecID = elements[1]
        postingTerm = elements[2]
        # Do STH, record all the useful info here.
        partialBM25Roman = elements[3]
        term_freq_in_collectionRoman = elements[4]
        term_freq_in_docRoman = elements[5]
        doc_wordsRoman = elements[6]
        overallBM25ScoreRoman = elements[7]
        rank_in_this_results_list_for_this_queryRoman = elements[8]
        term_freq_in_queriesRoman = elements[9]
        labelRoman = elements[10]
        
        
        
        print "----->","queryID:",queryID,"trecID:",trecID,"postingTerm:",postingTerm
        
        # index 0 1 2 Roman global 0,1,2
        outputFileFeaturePart1Handler.write(str(queryID) + " " + str(trecID) + " " + str(postingTerm) + " ")
        # index 3 Roman global 3
        outputFileFeaturePart1Handler.write(str(partialBM25Roman) + " ")
        # index 4 Roman global 4
        outputFileFeaturePart1Handler.write(str(term_freq_in_collectionRoman) + " ")
        # index 5 Roman global 5
        outputFileFeaturePart1Handler.write(str(term_freq_in_docRoman) + " ")
        # index 6 Roman global 6
        outputFileFeaturePart1Handler.write(str(doc_wordsRoman) + " ")
        # index 7 Roman global 7
        outputFileFeaturePart1Handler.write(str(overallBM25ScoreRoman) + " ")
        # index 8 Roman global 8
        outputFileFeaturePart1Handler.write(str(rank_in_this_results_list_for_this_queryRoman) + " ")
        # index 9 Roman global 9
        outputFileFeaturePart1Handler.write(str(term_freq_in_queriesRoman) + " ")
        
        
        
        trecIDElements = trecID.split("-")
        # cluewebTag = trecIDElements[0]
        segmentNumber = trecIDElements[0]
        compressedFileNumber = trecIDElements[1]
        sequenceNumber = trecIDElements[2]
        
        pathLookFor = human_judge_query_location_base_path + segmentNumber
        # print "----->pathLookFor:",pathLookFor
        
        fileNamePrefixLookingFor = trecID
    
        foundTag = False
        
        for dirname, dirnames, filenames in os.walk(pathLookFor):
            for filename in filenames:
                if filename.startswith(fileNamePrefixLookingFor):
                    foundTag = True
                    # print filename
                    absolutePathForWebPageFileName = os.path.join(dirname, filename)
                    print "----->",absolutePathForWebPageFileName
                    absolutePathForWebPageFileHandler = open(absolutePathForWebPageFileName,"r")
                    file_content = absolutePathForWebPageFileHandler.read()
                    absolutePathForWebPageFileHandler.close()
                    # print "**********"
                    # print "check file_content...PASSED"
                    # print file_content
                    # print "**********"
                    
                    web_page_content = file_content
                    parser = MyHTMLParser()
                    try:
                        parser.feed(web_page_content)
                        parser.generate_statistics_report(1)
                    except HTMLParseError,e:
                        print "----->",trecID,":",e.msg,":",e.lineno,":",e.offset
                        outputErrorMessageFileHandle.write("----->" + trecID + ":" + e.msg + ":" + str(e.lineno) + ":" + str(e.offset) + "\n")
                        # do not handle the error message.
                        parser.generate_statistics_report(0)
                    except UnicodeDecodeError,e:
                        print "----->",e
                        outputErrorMessageFileHandle.write("----->" + trecID + " UnicodeDecodeError Error." "\n")
                        # do not handle the error message.
                        parser.generate_statistics_report(0)
                    
                    # output some general info for the whole document
                    if len(parser.doc_words) != 0:
                        pass
                    else:
                        # Let's do the most common filter
                        # step1: filter out all the special chars.
                        data = web_page_content
                        data = data.lower()
                        
                        for i in range(0,len(data)):
                            # print "data[i]:",ord(data[i])
                            if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
                                # Just replace them with a space.
                                data = data[:i] + " " + data[i+1:]
                        
                        web_page_content_refined = data
                        print "**************************************"
                        # print web_page_content_refined
                        
                        documentWordList = []
                        documentUniqueTermList = []
                        documentWordsDict = {}
                        for documentTerm in web_page_content_refined.strip().split(" "):
                            if documentTerm.strip() != "":
                                documentWordList.append(documentTerm)
                                
                                if documentTerm not in documentWordsDict:
                                    documentWordsDict[documentTerm] = 1
                                else:
                                    documentWordsDict[documentTerm] += 1
                            
                                if documentTerm not in documentUniqueTermList:
                                    documentUniqueTermList.append(documentTerm)
                        
                        # assignment
                        parser.doc_words = documentWordList
                        parser.doc_distinct_words = documentUniqueTermList
                        parser.doc_words_dict = documentWordsDict
                        
                        # print "parser.doc_words_dict:",parser.doc_words_dict
                        
                        for documentWord in documentWordList:
                            parser.text_size += len(documentWord)
                            
    
                        
                        print "**************************************"
    
                    doc_wordsWei = len(parser.doc_words)
                    doc_distinct_wordsWei = len(parser.doc_distinct_words)
                    text_sizeWei = parser.text_size
                    script_sizeWei = parser.script_size
                    
                    # index 3 Roman global NONE
                    # outputFileFeaturePart1Handler.write(str( doc_wordsWei ) + " ")
                    
                    # index 4 Roman global 10
                    outputFileFeaturePart1Handler.write(str( doc_distinct_wordsWei )  + " ")
                    # index 5 Roman global 11
                    outputFileFeaturePart1Handler.write(str( text_sizeWei )  + " ")
                    # index 6 Roman global 12
                    outputFileFeaturePart1Handler.write(str( script_sizeWei )  + " ")
                        
                    
                    if parser.text_size != 0:
                        script_text_ratio = parser.script_size / parser.text_size
                    else:
                        script_text_ratio = 0.0
                    
                    
                    script_text_ratioWei = script_text_ratio
                    doc_outlinksWei = parser.doc_outlinks
                    
                    # index 7 Roman global 13
                    outputFileFeaturePart1Handler.write(str( script_text_ratioWei )  + " ")
                    # index 8 Roman global 14
                    outputFileFeaturePart1Handler.write(str( doc_outlinksWei )  + " ")
                    
                    queryContent = queryDict[queryID]
                    
                    # collection freq for the query terms.
                    veryLargeNumber = 999999999
                    verySmallNumber = -999999999
                    current_term_col_freq = 0
                    
                    
                    
                    queryTermsOriginal = queryContent.split(" ")
                    queryTerms = []
                    for element in queryTermsOriginal:
                        if element.strip() != "":
                            queryTerms.append( element.strip() )
                    
                    queryTermsLength = len(queryTerms)
                    
                    if hasattr(parser, 'header_words_dict'):
                        pass
                    else:
                        parser.header_words_dict = {}
    
    
                    if hasattr(parser, 'title_words_dict'):
                        pass
                    else:
                        parser.title_words_dict = {}
    
                    if hasattr(parser, 'b_or_strong_words_dict'):
                        pass
                    else:
                        parser.b_or_strong_words_dict = {}                                  
    
                    if hasattr(parser, 'a_words_dict'):
                        pass
                    else:
                        parser.a_words_dict = {}
    
                    if hasattr(parser, 'i_or_em_words'):
                        pass
                    else:
                        parser.i_or_em_words = []
    
                    if hasattr(parser, 'doc_words_dict'):
                        pass
                    else:
                        parser.doc_words_dict = {}
                    
                    # Let's do the LM feature(4).
                    MIOU = 2000
                    
                    
                    
                    
                    
                    # BM25 parameters: see 'http://en.wikipedia.org/wiki/Okapi_BM25'.
                    kBm25K1 =  2.0  # k1
                    kBm25B = 0.75   #b
                    # We can precompute a few of the BM25 values here.
                    kBm25NumeratorMul = kBm25K1 + 1
                    kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B)
                    collection_average_doc_len = 1102 # This number comes from the cluweb2009 CatB index info
                    collection_total_num_docs = 50220420 # This number comes from the cluweb2009 CatB index info
                    kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / collection_average_doc_len
                    
                    
                    idf_t_dict = {} # This dict is for storing the idf value for each query term
                                                                                                                                       
                    
                        
                        
                                                    
                                              
                    
                    
                    # feature generation: current_term_col_freq
                    current_term_col_freq = queryTermCollectionFreqDict[postingTerm]
                    
                    # index 9 Roman global NONE
                    current_term_col_freqWei = current_term_col_freq
                    # outputFileFeaturePart1Handler.write(str( current_term_col_freqWei ) + " ")
                    
                    # feature generation: whether_current_term_in_header
                    # feature generation: whether_current_term_in_title
                    # feature generation: whether_current_term_in_bold
                    # feature generation: whether_current_term_in_header
                    # feature generation: whether_current_term_in_url
                    # feature generation: whether_current_term_in_italic
                    
                    if postingTerm in parser.header_words_dict:
                        whether_current_term_in_header = True
                    else:
                        whether_current_term_in_header = False
    
                    if postingTerm in parser.title_words_dict:
                        whether_current_term_in_title = True
                    else:
                        whether_current_term_in_title = False
    
                    if postingTerm in parser.b_or_strong_words_dict:
                        whether_current_term_in_bold = True
                    else:
                        whether_current_term_in_bold = False
    
                    if postingTerm in parser.a_words_dict:
                        whether_current_term_in_url = True
                    else:
                        whether_current_term_in_url = False             
                                   
                    if postingTerm in parser.i_or_em_words:
                        whether_current_term_in_italic = True
                    else:
                        whether_current_term_in_italic = False               
    
                        
                    whether_current_term_in_headerWei = whether_current_term_in_header
                    whether_current_term_in_titleWei = whether_current_term_in_title
                    whether_current_term_in_boldWei = whether_current_term_in_bold
                    whether_current_term_in_urlWei = whether_current_term_in_url
                    whether_current_term_in_italicWei = whether_current_term_in_italic
                    
                    # index 10 Roman global 15
                    # index 11 Roman global 16
                    # index 12 Roman global 17
                    # index 13 Roman global 18
                    # index 14 Roman global 19
                    
                    outputFileFeaturePart1Handler.write(str( whether_current_term_in_headerWei ) + " ")
                    outputFileFeaturePart1Handler.write(str( whether_current_term_in_titleWei ) + " ")
                    outputFileFeaturePart1Handler.write(str( whether_current_term_in_boldWei ) + " ")
                    outputFileFeaturePart1Handler.write(str( whether_current_term_in_urlWei ) + " ")
                    outputFileFeaturePart1Handler.write(str( whether_current_term_in_italicWei ) + " ")
                    
                    # feature generation: current_term_freq_in_doc
                    current_term_freq_in_doc = 0
                    if postingTerm in parser.doc_words_dict:
                        current_term_freq_in_doc = parser.doc_words_dict[postingTerm]
                    
                    # index 15 Roman global NONE    
                    current_term_freq_in_docWei = current_term_freq_in_doc
                    # outputFileFeaturePart1Handler.write(str( current_term_freq_in_docWei ) + " ")
                    
                    # feature generation: current_term_rel_freq_in_doc
                    current_term_rel_freq_in_doc = 0
                    if len(parser.doc_words) != 0:
                        current_term_rel_freq_in_doc = current_term_freq_in_doc / len(parser.doc_words)
                    
                    # index 16 Roman global NONE  
                    current_term_rel_freq_in_docWei = current_term_rel_freq_in_doc
                    # outputFileFeaturePart1Handler.write( str( current_term_rel_freq_in_docWei ) + " ")
                    
                    # feature generation: current_term_BM25
                    # This part is doing the BM25 feature
                    current_term_BM25 = 0
                    idf_t = math.log10(1 + (collection_total_num_docs - queryTermCollectionFreqDict[postingTerm] + 0.5) / (queryTermCollectionFreqDict[postingTerm] + 0.5))
                    idf_t_dict[postingTerm] = idf_t
                    if postingTerm in parser.doc_words_dict:
                        partial_BM25 = idf_t_dict[postingTerm] * (parser.doc_words_dict[postingTerm] * kBm25NumeratorMul) / (parser.doc_words_dict[postingTerm] + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * len(parser.doc_words))
                        current_term_BM25 += partial_BM25
                    
                    # index 17 Roman global NONE 
                    current_term_BM25Wei = current_term_BM25
                    # outputFileFeaturePart1Handler.write(str( current_term_BM25Wei ) + " ")
                    
                    ###############################################################################################
                    # Notes:
                    # Updated by Wei, 2013/01/10
                    # I can COPY this part of logic into the new version of this file, in order to do ONE scan of each document, all the data needed will be generated.
                    # Now come to the most exciting part of tonight, let's get the rank DONE 2012/11/28
                    # print "parser.doc_words:",parser.doc_words
                    
                    '''
                    # special part of logic implemented for debugging
                    specialOutputFileName = "/home/obukai/workspace/polyIRToolkit/scripts/src/pythonScripts/gov2/specialTempLexiconGenerator.txt"
                    specialOutputFileHandler = open(specialOutputFileName,"w")
                    
                    for compared_word in parser.doc_words:
                        specialOutputFileHandler.write(compared_word + " " + str( random.randint(1, 10000000) ) + "\n")
                    
                    specialOutputFileHandler.close()
                    '''

                    current_posting_rank_in_doc = 0
                    
                    for compared_word in parser.doc_words:
                        if compared_word in queryTermCollectionFreqDict:
                            compared_term_BM25 = 0
                            idf_t = math.log10(1 + (collection_total_num_docs - queryTermCollectionFreqDict[compared_word] + 0.5) / (queryTermCollectionFreqDict[compared_word] + 0.5))
                            idf_t_dict[compared_word] = idf_t
                            if compared_word in parser.doc_words_dict:
                                partial_BM25 = idf_t_dict[compared_word] * (parser.doc_words_dict[compared_word] * kBm25NumeratorMul) / (parser.doc_words_dict[compared_word] + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * len(parser.doc_words))
                                compared_term_BM25 += partial_BM25
                            
                            # index 17 Roman global NONE 
                            compared_term_BM25Wei = compared_term_BM25
                            
                            # print compared_word,":",compared_term_BM25Wei
                            
                            if compared_term_BM25Wei > current_term_BM25Wei:
                                current_posting_rank_in_doc += 1
                        else:
                            # the term is NOT in queryTermCollectionFreqDict, so we do NOT have enough info to count the rank in the inverted index
                            pass
                    
                    # put the actual write feature into the file last in order to varify well.
                    current_posting_rank_in_docWei = current_posting_rank_in_doc
                    
                    # print "current_posting_rank_in_doc:",current_posting_rank_in_doc
                    ###############################################################################################
                    
                    
                    # feature generation: current_term_QL
                    # This part is doing the LM feature
                    if postingTerm in parser.doc_words_dict:
                        partial_LM_component_1 = parser.doc_words_dict[postingTerm] + MIOU * (queryTermCollectionFreqDict[postingTerm] / collection_total_num_docs)
                    if postingTerm not in parser.doc_words_dict:
                        partial_LM_component_1 = MIOU * (queryTermCollectionFreqDict[postingTerm] / collection_total_num_docs)
                    
                    partial_LM_component_2 = len(parser.doc_words) + MIOU
                    
                    # Here, the base is 10
                    partial_LM_score = math.log(partial_LM_component_1 / partial_LM_component_2)
                    current_term_QL = partial_LM_score
                    
                    # index 18 Roman global 20
                    current_term_QLWei = current_term_QL
                    outputFileFeaturePart1Handler.write(str( current_term_QLWei ) + " ")                               
                    
                    
                    # This is a try to generate distribution score among all query terms.
                    onlyOneQueryTermList = []
                    onlyOneQueryTermList.append(postingTerm)
                    generate_distribution_features(onlyOneQueryTermList,parser.doc_words,outputFileFeaturePart1Handler)
                    
                    # index 10 Roman global 22 (21 is in the function generate_distribution_features(...))
                    outputFileFeaturePart1Handler.write( str(labelRoman) + " " )
                    
                    outputFileFeaturePart1Handler.write(str( current_posting_rank_in_docWei ) + " ")
                    
                    outputFileFeaturePart1Handler.write("\n")
                    
                    # And finally, output the term vector of this document given to Juan.
                    # outputTermVectorFileName = "/data4/team/weijiang/human_judge_web_pages_plain_text_only_words/" + trecID.split("-")[1] + "/" + filename.split(".")[0] + "_plain_text.txt"
                    # outputTermVectorFileHandler = open(outputTermVectorFileName,"w")
                    # for word in parser.doc_words:
                    #     outputTermVectorFileHandler.write(word + " ")
                    # outputTermVectorFileHandler.close()
                    
                    print
                
    if not foundTag:
        print "----->",trecID,"NOT Found."
        outputErrorMessageFileHandle.write("----->" + trecID + " is NOT Found." + "\n")

outputFileFeaturePart1Handler.close()
inputFileHandler1.close()
outputErrorMessageFileHandle.close()
print "End Processing Documents, updated by Wei 2012/11/28"
