# 2012/10/07
# This program is written for generating features between queries and documents.
# Basically, the very pink area of the xls file except the LM model related.
from __future__ import division
import gzip
from HTMLParser import HTMLParser
from HTMLParser import HTMLParseError
from htmlentitydefs import name2codepoint
import os
import math
from urlparse import urlparse

###########################################################################################################################
class MyHTMLParser(HTMLParser):
    def handle_starttag(self, tag, attrs):
        # print "Start tag:", tag
        # set the current_tag to tag
        self.current_tag = tag
        
        if hasattr(self, 'doc_outlinks'):
            pass
        else:
            self.doc_outlinks = 0
        
        for attr in attrs:
            (attr_name, attr_value) = attr
            if attr_name == "href":
                #print "     attr:", attr
                self.doc_outlinks += 1
    def handle_endtag(self, tag):
        #print "End tag  :", tag
        # set back the current_tag to ""
        self.current_tag = ""
        
    def handle_data(self, data):
        if hasattr(self, 'current_tag'):
            # step0: pre-processing
            for i in range(0,len(data)):
                # print "data[i]:",ord(data[i])
                if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
                    # Just replace them with a space.
                    data = data[:i] + " " + data[i+1:]
            
            if hasattr(self, 'header_words'):
                pass
            else:
                self.header_words = []
                self.header_words_dict = {}

            if hasattr(self, 'title_words'):
                pass
            else:
                self.title_words = []
                self.title_words_dict = {}
            
            if hasattr(self, 'b_or_strong_words'):
                pass
            else:
                self.b_or_strong_words = []
                self.b_or_strong_words_dict = {}          
            
            if hasattr(self, 'a_words'):
                pass
            else:
                self.a_words = []
                self.a_words_dict = {}
            
            if hasattr(self, 'i_or_em_words'):
                pass
            else:
                self.i_or_em_words = []
                self.i_or_em_words_dict = {}                       
            
                        
            # step1: some context feature
            if self.current_tag == "head":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.header_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.header_words_dict:
                            self.header_words_dict[lowerCaseWord] = 1
                        else:
                            self.header_words_dict[lowerCaseWord] += 1
            
            if self.current_tag == "title":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.title_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.title_words_dict:
                            self.title_words_dict[lowerCaseWord] = 1
                        else:
                            self.title_words_dict[lowerCaseWord] += 1
            
            if self.current_tag == "b" or self.current_tag == "strong":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.b_or_strong_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.b_or_strong_words_dict:
                            self.b_or_strong_words_dict[lowerCaseWord] = 1
                        else:
                            self.b_or_strong_words_dict[lowerCaseWord] += 1
                                     
            if self.current_tag == "a":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.a_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.a_words_dict:
                            self.a_words_dict[lowerCaseWord] = 1
                        else:
                            self.a_words_dict[lowerCaseWord] += 1
            
            if self.current_tag == "i" or self.current_tag == "em":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.i_or_em_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.i_or_em_words_dict:
                            self.i_or_em_words_dict[lowerCaseWord] = 1
                        else:
                            self.i_or_em_words_dict[lowerCaseWord] += 1
            
       
            # step2: text or script
            if self.current_tag == "script" or self.current_tag == "style":
                #print "The following content should NOT be indexed."
                #print "script content     :", data
                #print "script length     :",len(data)
                if hasattr(self, 'script_size'):
                    self.script_size += len(data)
                else:
                    self.script_size = len(data)
            else:
                if hasattr(self, 'text_size'):
                    self.text_size += len(data)
                else:
                    self.text_size = len(data)
                # print "text content:*",data,"*"

                for i in range(0,len(data)):
                    # print "data[i]:",ord(data[i])
                    if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
                        # Just replace them with a space.
                        data = data[:i] + " " + data[i+1:]
                
                
                if hasattr(self, 'doc_words'):
                    pass
                else:
                    self.doc_words = []
                
                if hasattr(self, 'doc_distinct_words'):
                    pass
                else:
                    self.doc_distinct_words = []               

                
                if hasattr(self, 'doc_words_dict'):
                    pass
                else:
                    self.doc_words_dict = {}
                                    
                #print "raw word list:",data.split(" ")
                
                for word in data.split(" "):
                    if word != "":
                        # for later use.
                        lowerCaseWord = word.lower()
                        
                        # for current use
                        # lowerCaseWord = word
                        self.doc_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.doc_words_dict:
                            self.doc_words_dict[lowerCaseWord] = 1
                        else:
                            self.doc_words_dict[lowerCaseWord] += 1
                        
                        
                        if lowerCaseWord not in self.doc_distinct_words:
                            self.doc_distinct_words.append(lowerCaseWord)
                        
                #print "doc_words:",self.doc_words
                #print "doc_distinct_words:",self.doc_distinct_words
                  
                #print "text length      :", len(data)
        else:
            pass
    def handle_comment(self, data):
        pass
        #print "Comment  :", data
    def handle_entityref(self, name):
        pass
        #c = unichr(name2codepoint[name])
        #print "Named ent:", c
    def handle_charref(self, name):
        pass
        #if name.startswith('x'):
        #    c = unichr(int(name[1:], 16))
        #else:
        #    c = unichr(int(name))
        #print "Num ent  :", c
    def handle_decl(self, data):
        pass
        #print "Decl     :", data
    
    def generate_statistics_report(self,status):
        print "completed_parsed:",status
        # attrs check or init
        if hasattr(self, 'doc_words'):
            pass
        else:
            self.doc_words = []

        if hasattr(self, 'doc_distinct_words'):
            pass
        else:
            self.doc_distinct_words = []
        
        if hasattr(self, 'text_size'):
            pass
        else:
            self.text_size = 0
            
        if hasattr(self, 'script_size'):
            pass
        else:
            self.script_size = 0       

        if hasattr(self, 'doc_outlinks'):
            pass
        else:
            self.doc_outlinks = 0
            
        if hasattr(self, 'doc_waterloo_spam'):
            pass
        else:
            self.doc_waterloo_spam = 0
                
        #print "len(doc_words):",len(self.doc_words)
        #print "len(doc_distinct_words):",len(self.doc_distinct_words)
        #print "text_size:",self.text_size
        #print "script_size:",self.script_size
        if self.text_size != 0:
            script_text_ratio = self.script_size / self.text_size
        else:
            script_text_ratio = 0.0
        #print "script_text_ratio:",script_text_ratio
        #print "doc_outlinks:",self.doc_outlinks
        
        outputFileFeaturePart1Handler.write(str( len(self.doc_words) ) + " ")
        outputFileFeaturePart1Handler.write(str( len(self.doc_distinct_words) )  + " ")
        outputFileFeaturePart1Handler.write(str(self.text_size)  + " ")
        outputFileFeaturePart1Handler.write(str(self.script_size)  + " ")
        outputFileFeaturePart1Handler.write(str(script_text_ratio)  + " ")
        outputFileFeaturePart1Handler.write(str(self.doc_outlinks)  + " ")
        
        
        
        #print "self.header_words:",self.header_words
        #print "self.header_words_dict:",self.header_words_dict

        #print
        #print "self.title_words:",self.title_words
        #print
        #print "self.title_words_dict:",self.title_words_dict
    
        #print
        #print "self.b_or_strong_words:",self.b_or_strong_words
        #print
        #print "self.b_or_strong_words_dict:",self.b_or_strong_words_dict
    
        #print
        #print "self.a_words:",self.a_words
        #print
        #print "self.a_words_dict:",self.a_words_dict
    
        #print
        #print "self.i_or_em_words:",self.i_or_em_words
        #print
        #print "self.i_or_em_words_dict:",self.i_or_em_words_dict 
        
        #print
        #print "self.doc_words:",self.doc_words
        
        #print
        #print "self.doc_distinct_words:",self.doc_distinct_words
        
        #print 
        #print "self.doc_words_dict:",self.doc_words_dict
###########################################################################################################################

###########################################################################################################################
queryTermCollectionFreqDict = {}
inputQueryTermFreqFileName = "/data5/team/weijiang/the_new_trip_of_feature_generation/final-queryTermsWithMetaInfo.txt"
inputQueryTermFreqHandler = open(inputQueryTermFreqFileName,"r")

for line in inputQueryTermFreqHandler.readlines():
    elements = line.strip().split(" ")
    queryTerm = elements[0]
    queryTermIDF = int(elements[1])
    if queryTerm not in queryTermCollectionFreqDict:
        queryTermCollectionFreqDict[queryTerm] = queryTermIDF

print "----->","len(queryTermCollectionFreqDict):",len(queryTermCollectionFreqDict)
inputQueryTermFreqHandler.close()

###########################################################################################################################

###########################################################################################################################
# load the 150 queries into a dict for lookup
queryDict = {}
inputQueryFileName = "/data5/team/weijiang/the_new_trip_of_feature_generation/final-150Clueweb09Queries.txt"
inputQueryHandler = open(inputQueryFileName,"r")
for line in inputQueryHandler.readlines():
    elements = line.strip().split(":")
    queryID = int(elements[0])
    queryContent = elements[1]
    if queryID not in queryDict:
        queryDict[queryID] = queryContent

print "----->","len(queryDict):",len(queryDict)
###########################################################################################################################



###########################################################################################################################
# load the 1M popular site lists.
inputAuxFileName = "/data5/team/weijiang/the_new_trip_of_feature_generation/top-1m.csv"
inputAuxFileHandler = open(inputAuxFileName, "r")

top1MPopularSitesDict = {}

for line in inputAuxFileHandler.readlines():
    siteNumber = int( line.strip().split(",")[0] )
    siteDomain = line.strip().split(",")[1]
    if siteDomain not in top1MPopularSitesDict:
        top1MPopularSitesDict[siteDomain] = siteNumber
        
print "----->","len(top1MPopularSitesDict):",len(top1MPopularSitesDict)
###########################################################################################################################

print "Begin Parsing Documents"
print "Updated 2012/10/08 night by Wei, Let's make it better."
#inputFileName1 = "/data5/team/weijiang/the_new_trip_of_feature_generation/wholeTrainingFileTempleteV3.txt"
inputFileName1 = "/data5/team/weijiang/the_new_trip_of_feature_generation/wholeTrainingFileTempleteV3.txt.input.part10"

inputFileHandler1 = open(inputFileName1,"r")

outputFileNameFeaturePart1 = "/data5/team/weijiang/the_new_trip_of_feature_generation/wholeTrainingFileTempleteV3_output_feature_part10.txt"
outputFileFeaturePart1Handler = open(outputFileNameFeaturePart1,"w")
outputFileFeaturePart1Handler.write("queryID" + " " + "trecID" + " " + "url_length" + " " + "url_in_top_million" + " " + "url_nesting" + " " + "url_domain" + " " + "doc_size" + " " + "doc_words" + " " + "doc_distinct_words" + " " + "text_size" + " " + "script_size" + " "+ "script_text_ratio" + " " + "doc_outlinks" + " " + "sum_term_col_freq" + " " + "max_term_col_freq" + " " + "min_term_col_freq" + " " + "avg_term_col_freq" + " " + "terms_in_header" + " " + "terms_in_title" + " " + "terms_in_bold" + " " + "terms_in_url" + " " + "terms_in_italic" + " " + "frac_terms_in_header" + " " + "frac_terms_in_title" + " " + "frac_terms_in_bold" + " " + "frac_terms_in_url" + " " + "frac_terms_in_italic" + " " + "sum_term_freq_in_doc" + " " + "min_term_freq_in_doc" + " " + "max_term_freq_in_doc" + " " + "avg_term_freq_in_doc" + " " + "sum_term_rel_freq_in_doc" + " " + "min_term_rel_freq_in_doc" + " " + "max_term_rel_freq_in_doc" + " " + "avg_term_rel_freq_in_doc" + " " + "sum_term_BM25" + " " + "avg_term_BM25" + " " + "min_term_BM25" + " " + "max_term_BM25" + "\n")                             

outputErrorMessageFileName = "/data5/team/weijiang/the_new_trip_of_feature_generation/errorMessage10.txt"
outputErrorMessageFileHandle = open(outputErrorMessageFileName,"w")

human_judge_query_location_base_path = "/data4/team/weijiang/human_judge_web_pages/"

queryIDWithItsDocs = {}

#inputFileHandler1.readline()

for index,line in enumerate(inputFileHandler1.readlines()):
    
    #if index == 50:
    #    break

    elements = line.strip().split(" ")
    queryID = int(elements[0])
    trecID = elements[1]
    
    print "----->","queryID:",queryID,"trecID:",trecID
    outputFileFeaturePart1Handler.write(str(queryID) + " " + str(trecID) + " ")
    
    trecIDElements = trecID.split("-")
    cluewebTag = trecIDElements[0]
    segmentNumber = trecIDElements[1]
    compressedFileNumber = trecIDElements[2]
    sequenceNumber = trecIDElements[3]
    
    pathLookFor = human_judge_query_location_base_path + segmentNumber
    # print "----->pathLookFor:",pathLookFor
    
    fileNamePrefixLookingFor = trecID

    foundTag = False
    
    for dirname, dirnames, filenames in os.walk(pathLookFor):
        for filename in filenames:
            if filename.startswith(fileNamePrefixLookingFor):
                foundTag = True
                # print filename
                absolutePathForWebPageFileName = os.path.join(dirname, filename)
                print "----->",absolutePathForWebPageFileName
                absolutePathForWebPageFileHandler = open(absolutePathForWebPageFileName,"r")
                file_content = absolutePathForWebPageFileHandler.read()
                absolutePathForWebPageFileHandler.close()
                # print "**********"
                # print "check file_content...PASSED"
                # print file_content
                # print "**********"
                currentLine = ""
                for i in range(0,len(file_content)):
                    if file_content[i] != "\n":
                        currentLine += file_content[i]
                    else:
                        # WARC-TREC-ID: clueweb09-en0040-54-00000
                        if currentLine.strip().split(":")[0].strip() == "WARC-TREC-ID":
                            print "WARC-TREC-ID:",currentLine.strip().split(":")[1].strip()
                        # WARC-Target-URI: http://www.smartwebby.com/DreamweaverTemplates/templates/business_general_template59.asp
                        if currentLine.strip().split(":")[0].strip() == "WARC-Target-URI":
                            target_URI = currentLine.strip()[17:]
                            
                            print "WARC-Target-URI:",target_URI
                            # the logic of generating the url related features.
                            url_length = len(target_URI)
                            
                            #print "url_length:",url_length
                            outputFileFeaturePart1Handler.write(str(url_length) + " ")
                            
                            o = urlparse(target_URI)
                            # print "str(o.path):",o.path
                            # print "str(o.path).split('/'):",str(o.path).split('/')
                            
                            tempNumber = len(str(o.path).split('/'))
                            if tempNumber == 0:
                                url_nesting = 0
                            elif tempNumber == 1:
                                url_nesting = 0
                            elif tempNumber == 2:
                                url_nesting = 0
                            else:
                                url_nesting = tempNumber - 2
                            
                            url_host = str(o.netloc)[4:]
                            
                            url_in_top_million = False
                            
                            if url_host in top1MPopularSitesDict:
                                url_in_top_million = True
                            else:
                                url_in_top_million = False
                            
                            #print "url_in_top_million:",url_in_top_million
                            #print "url_nesting:",url_nesting
                            url_domain = str(o.netloc).split(".")[-1]
                            #print "url_domain:",url_domain
                            
                            outputFileFeaturePart1Handler.write(str(url_in_top_million) + " ")
                            outputFileFeaturePart1Handler.write(str(url_nesting) + " ")
                            outputFileFeaturePart1Handler.write(str(url_domain) + " ")
                            
                            
                            
                            
                            
                            contentLengthSpecialCounter = 2
                        if currentLine.strip().split(":")[0].strip() == "Content-Length":
                            contentLengthSpecialCounter -= 1
                            if contentLengthSpecialCounter == 0:
                                web_page_content_length = int(currentLine.strip()[16:])
                                doc_size = currentLine.strip()[16:]
                                #print "doc_size:",doc_size
                                outputFileFeaturePart1Handler.write(str(doc_size) + " ")
                                
                                
                                while file_content[i] == "\n":
                                    i += 1
                                    
                                web_page_content = file_content[i:i+web_page_content_length]
                                
                                #print "**********"
                                #print "check web_page_content...PASSED"
                                # print web_page_content
                                #print "**********"
                                
                                
                                
                                parser = MyHTMLParser()
                                
                                
                                # new version, currently in debug mode.
                                # parser.feed(web_page_content)
                                # parser.generate_statistics_report(1)
                                
                                # old version, you are NOT directly facing the problem. 2012/10/08
                                                              
                                try:
                                    parser.feed(web_page_content)
                                    parser.generate_statistics_report(1)
                                except HTMLParseError,e:
                                    print "----->",trecID,":",e.msg,":",e.lineno,":",e.offset
                                    outputErrorMessageFileHandle.write("----->" + trecID + ":" + e.msg + ":" + str(e.lineno) + ":" + str(e.offset) + "\n")
                                    # do not handle the error message.
                                    parser.generate_statistics_report(0)
                                except UnicodeDecodeError,e:
                                    print "----->",e
                                    outputErrorMessageFileHandle.write("----->" + "UnicodeDecodeError Error." "\n")
                                    # do not handle the error message.
                                    parser.generate_statistics_report(0)                                    
                                
                                
                                #print
                                
                                queryContent = queryDict[queryID]
                                
                                # collection freq for the query terms.
                                veryLargeNumber = 999999999
                                sum_term_col_freq = 0
                                avg_term_col_freq = 0
                                min_term_col_freq = veryLargeNumber
                                max_term_col_freq = 0
                                
                                terms_in_header = 0
                                frac_terms_in_header = 0.0
                                
                                terms_in_title = 0
                                frac_terms_in_title = 0.0
                                
                                terms_in_bold = 0
                                frac_terms_in_bold = 0.0
                                
                                terms_in_url = 0
                                frac_terms_in_url = 0.0
                                
                                terms_in_italic = 0
                                frac_terms_in_italic = 0.0
                                
                                
                                sum_term_freq_in_doc = 0
                                avg_term_freq_in_doc = 0
                                
                                
                                min_term_freq_in_doc = veryLargeNumber
                                max_term_freq_in_doc = 0
                                
                                queryTerms = queryContent.split(" ")
                                queryTermsLength = len(queryTerms)
                                
                                if hasattr(parser, 'header_words_dict'):
                                    pass
                                else:
                                    parser.header_words_dict = {}


                                if hasattr(parser, 'title_words_dict'):
                                    pass
                                else:
                                    parser.title_words_dict = {}

                                if hasattr(parser, 'b_or_strong_words_dict'):
                                    pass
                                else:
                                    parser.b_or_strong_words_dict = {}                                  

                                if hasattr(parser, 'a_words_dict'):
                                    pass
                                else:
                                    parser.a_words_dict = {}

                                if hasattr(parser, 'i_or_em_words'):
                                    pass
                                else:
                                    parser.i_or_em_words = []

                                if hasattr(parser, 'doc_words_dict'):
                                    pass
                                else:
                                    parser.doc_words_dict = {}
                                
                                # Let's do the BM25 feature.
                                sum_term_BM25 = 0.0
                                avg_term_BM25 = 0.0
                                min_term_BM25 = veryLargeNumber
                                max_term_BM25 = 0.0
                                # BM25 parameters: see 'http://en.wikipedia.org/wiki/Okapi_BM25'.
                                kBm25K1 =  2.0  # k1
                                kBm25B = 0.75   #b
                                # We can precompute a few of the BM25 values here.
                                kBm25NumeratorMul = kBm25K1 + 1
                                kBm25DenominatorAdd = kBm25K1 * (1 - kBm25B)
                                collection_average_doc_len = 1102 # This number comes from the cluweb2009 CatB index info
                                collection_total_num_docs = 50220420 # This number comes from the cluweb2009 CatB index info
                                kBm25DenominatorDocLenMul = kBm25K1 * kBm25B / collection_average_doc_len
                                
                                
                                idf_t_dict = {} # This dict is for storing the idf value for each query term
                                                                                                                                                   
                                for queryTerm in queryTerms:
                                    idf_t = math.log10(1 + (collection_total_num_docs - queryTermCollectionFreqDict[queryTerm] + 0.5) / (queryTermCollectionFreqDict[queryTerm] + 0.5))
                                    idf_t_dict[queryTerm] = idf_t
                                    
                                    if queryTerm in parser.doc_words_dict:
                                        partial_BM25 = idf_t_dict[queryTerm] * (parser.doc_words_dict[queryTerm] * kBm25NumeratorMul) / (parser.doc_words_dict[queryTerm] + kBm25DenominatorAdd + kBm25DenominatorDocLenMul * len(parser.doc_words))
                                        sum_term_BM25 += partial_BM25
                                        if partial_BM25 < min_term_BM25:
                                            min_term_BM25 = partial_BM25
                                        if partial_BM25 > max_term_BM25:
                                            max_term_BM25 = partial_BM25
                                    
                                    if queryTerm in parser.header_words_dict:
                                        terms_in_header += 1
                                    if queryTerm in parser.title_words_dict:
                                        terms_in_title += 1                                 
                                    if queryTerm in parser.b_or_strong_words_dict:
                                        terms_in_bold += 1
                                    if queryTerm in parser.a_words_dict:
                                        terms_in_url += 1                                  
                                    if queryTerm in parser.i_or_em_words:
                                        terms_in_italic += 1
                                    
                                    if queryTerm in parser.doc_words_dict:
                                        sum_term_freq_in_doc += parser.doc_words_dict[queryTerm]
                                        if parser.doc_words_dict[queryTerm] > max_term_freq_in_doc:
                                            max_term_freq_in_doc = parser.doc_words_dict[queryTerm]
                                        if parser.doc_words_dict[queryTerm] < min_term_freq_in_doc:
                                            min_term_freq_in_doc = parser.doc_words_dict[queryTerm]
                                            
                                    if queryTerm in queryTermCollectionFreqDict:
                                        sum_term_col_freq += queryTermCollectionFreqDict[queryTerm]
                                        
                                        if queryTermCollectionFreqDict[queryTerm] > max_term_col_freq:
                                            max_term_col_freq = queryTermCollectionFreqDict[queryTerm]
                                        if queryTermCollectionFreqDict[queryTerm] < min_term_col_freq:
                                            min_term_col_freq = queryTermCollectionFreqDict[queryTerm]
                                
                                # handle special case.
                                if min_term_BM25 == veryLargeNumber:
                                    min_term_BM25 = 0.0

                                # handle special case.
                                if min_term_col_freq == veryLargeNumber:
                                    min_term_col_freq = 0
                                                                                              
                                # handle special case.
                                if min_term_freq_in_doc == veryLargeNumber:
                                    min_term_freq_in_doc = 0
                                
                                
                                avg_term_freq_in_doc = sum_term_freq_in_doc / queryTermsLength
                                avg_term_col_freq = sum_term_col_freq / queryTermsLength
                                
                                                                                            
                                frac_terms_in_header = terms_in_header / queryTermsLength
                                frac_terms_in_title = terms_in_title / queryTermsLength
                                frac_terms_in_bold = terms_in_bold / queryTermsLength
                                frac_terms_in_url = terms_in_url / queryTermsLength
                                frac_terms_in_italic = terms_in_italic / queryTermsLength
                                
                                #print "terms_in_header:",terms_in_header
                                #print "terms_in_title:",terms_in_title
                                #print "terms_in_bold:",terms_in_bold
                                #print "terms_in_url:",terms_in_url
                                #print "terms_in_italic:",terms_in_italic
                                
                                #print "frac_terms_in_header:",frac_terms_in_header
                                #print "frac_terms_in_title:",frac_terms_in_title
                                #print "frac_terms_in_bold:",frac_terms_in_bold
                                #print "frac_terms_in_url:",frac_terms_in_url
                                #print "frac_terms_in_italic:",frac_terms_in_italic
                                
                                #print "sum_term_freq_in_doc:",sum_term_freq_in_doc
                                #print "min_term_freq_in_doc:",min_term_freq_in_doc
                                #print "max_term_freq_in_doc:",max_term_freq_in_doc
                                #print "avg_term_freq_in_doc:",avg_term_freq_in_doc
                                

                                # I currently think that they are almost the same.
                                # need to ask Maria for advice.
                                sum_term_rel_freq_in_doc = sum_term_freq_in_doc
                                min_term_rel_freq_in_doc = min_term_freq_in_doc
                                max_term_rel_freq_in_doc = max_term_freq_in_doc
                                avg_term_rel_freq_in_doc = avg_term_freq_in_doc
                                                                
                                #print "sum_term_rel_freq_in_doc:",sum_term_rel_freq_in_doc
                                #print "min_term_rel_freq_in_doc:",min_term_rel_freq_in_doc
                                #print "max_term_rel_freq_in_doc:",max_term_rel_freq_in_doc
                                #print "avg_term_rel_freq_in_doc:",avg_term_rel_freq_in_doc
                                

                                #print "sum_term_col_freq:",sum_term_col_freq
                                #print "max_term_col_freq:",max_term_col_freq
                                #print "min_term_col_freq:",min_term_col_freq
                                #print "avg_term_rel_freq:",avg_term_col_freq                                
                                
                                outputFileFeaturePart1Handler.write(str(sum_term_col_freq) + " ")
                                outputFileFeaturePart1Handler.write(str(max_term_col_freq) + " ")
                                outputFileFeaturePart1Handler.write(str(min_term_col_freq) + " ")
                                outputFileFeaturePart1Handler.write(str(avg_term_col_freq) + " ")
                                
                                outputFileFeaturePart1Handler.write(str(terms_in_header) + " ")
                                outputFileFeaturePart1Handler.write(str(terms_in_title) + " ")
                                outputFileFeaturePart1Handler.write(str(terms_in_bold) + " ")
                                outputFileFeaturePart1Handler.write(str(terms_in_url) + " ")
                                outputFileFeaturePart1Handler.write(str(terms_in_italic) + " ")

                                outputFileFeaturePart1Handler.write(str(frac_terms_in_header) + " ")
                                outputFileFeaturePart1Handler.write(str(frac_terms_in_title) + " ")
                                outputFileFeaturePart1Handler.write(str(frac_terms_in_bold) + " ")
                                outputFileFeaturePart1Handler.write(str(frac_terms_in_url) + " ")
                                outputFileFeaturePart1Handler.write(str(frac_terms_in_italic) + " ")
                                
                                outputFileFeaturePart1Handler.write(str(sum_term_freq_in_doc) + " ")
                                outputFileFeaturePart1Handler.write(str(min_term_freq_in_doc) + " ")
                                outputFileFeaturePart1Handler.write(str(max_term_freq_in_doc) + " ")
                                outputFileFeaturePart1Handler.write(str(avg_term_freq_in_doc) + " ")

                                outputFileFeaturePart1Handler.write(str(sum_term_rel_freq_in_doc) + " ")
                                outputFileFeaturePart1Handler.write(str(min_term_rel_freq_in_doc) + " ")
                                outputFileFeaturePart1Handler.write(str(max_term_rel_freq_in_doc) + " ")
                                outputFileFeaturePart1Handler.write(str(avg_term_rel_freq_in_doc) + " ")

                                outputFileFeaturePart1Handler.write(str(sum_term_BM25) + " ")
                                outputFileFeaturePart1Handler.write(str(avg_term_BM25) + " ")
                                outputFileFeaturePart1Handler.write(str(min_term_BM25) + " ")
                                outputFileFeaturePart1Handler.write(str(max_term_BM25) + " ")                                
                                
                                
                                outputFileFeaturePart1Handler.write("\n")
                                

                                
                                
                                
                                
                                # And finally, output the term vector of this document given to Juan.
                                outputTermVectorFileName = "/data4/team/weijiang/human_judge_web_pages_plain_text_only_words/" + trecID.split("-")[1] + "/" + filename.split(".")[0] + "_plain_text.txt"
                                outputTermVectorFileHandler = open(outputTermVectorFileName,"w")
                                for word in parser.doc_words:
                                    outputTermVectorFileHandler.write(word + " ")
                                outputTermVectorFileHandler.close()
                                
                                print
                                                                                                                      
                        currentLine = ""
                # print "compressed file content end." 
                #print "**********"

    if not foundTag:
        print "----->",trecID,"NOT Found."
        outputErrorMessageFileHandle.write("----->" + trecID + " is NOT Found." + "\n")

outputFileFeaturePart1Handler.close()
inputFileHandler1.close()
outputErrorMessageFileHandle.close()
print "End Processing Documents"