# 2012/10/07
# This program is written for generating features between queries and documents.
# Basically, the very pink area of the xls file except the LM model related.
from __future__ import division
import gzip
from HTMLParser import HTMLParser
from htmlentitydefs import name2codepoint
import os
from urlparse import urlparse
###########################################################################################################################
# load the 150 queries into a dict for lookup
queryDict = {}
inputQueryFileName = "/data5/team/weijiang/the_new_trip_of_feature_generation/final-150Clueweb09Queries.txt"
inputQueryHandler = open(inputQueryFileName,"r")
for line in inputQueryHandler.readlines():
    elements = line.strip().split(":")
    queryID = int(elements[0])
    queryContent = elements[1]
    if queryID not in queryDict:
        queryDict[queryID] = queryContent

print "len(queryDict):",len(queryDict)
###########################################################################################################################



###########################################################################################################################
# load the 1M popular site lists.
inputAuxFileName = "/data5/team/weijiang/the_new_trip_of_feature_generation/top-1m.csv"
inputAuxFileHandler = open(inputAuxFileName, "r")

top1MPopularSitesDict = {}

for line in inputAuxFileHandler.readlines():
    siteNumber = int( line.strip().split(",")[0] )
    siteDomain = line.strip().split(",")[1]
    if siteDomain not in top1MPopularSitesDict:
        top1MPopularSitesDict[siteDomain] = siteNumber
        
print "len(top1MPopularSitesDict):",len(top1MPopularSitesDict)
###########################################################################################################################
class MyHTMLParser(HTMLParser):
    def handle_starttag(self, tag, attrs):
        # print "Start tag:", tag
        # set the current_tag to tag
        self.current_tag = tag
        
        if hasattr(self, 'doc_outlinks'):
            pass
        else:
            self.doc_outlinks = 0
        
        for attr in attrs:
            (attr_name, attr_value) = attr
            if attr_name == "href":
                #print "     attr:", attr
                self.doc_outlinks += 1
    def handle_endtag(self, tag):
        #print "End tag  :", tag
        # set back the current_tag to ""
        self.current_tag = ""
        
    def handle_data(self, data):
        if hasattr(self, 'current_tag'):
            # step0: pre-processing
            for i in range(0,len(data)):
                # print "data[i]:",ord(data[i])
                if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
                    # Just replace them with a space.
                    data = data[:i] + " " + data[i+1:]
            
            if hasattr(self, 'header_words'):
                pass
            else:
                self.header_words = []
                self.header_words_dict = {}

            if hasattr(self, 'title_words'):
                pass
            else:
                self.title_words = []
                self.title_words_dict = {}
            
            if hasattr(self, 'b_or_strong_words'):
                pass
            else:
                self.b_or_strong_words = []
                self.b_or_strong_words_dict = {}          
            
            if hasattr(self, 'a_words'):
                pass
            else:
                self.a_words = []
                self.a_words_dict = {}
            
            if hasattr(self, 'i_or_em_words'):
                pass
            else:
                self.i_or_em_words = []
                self.i_or_em_words_dict = {}                       
            
                        
            # step1: some context feature
            if self.current_tag == "head":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.header_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.header_words_dict:
                            self.header_words_dict[lowerCaseWord] = 1
                        else:
                            self.header_words_dict[lowerCaseWord] += 1
            
            if self.current_tag == "title":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.title_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.title_words_dict:
                            self.title_words_dict[lowerCaseWord] = 1
                        else:
                            self.title_words_dict[lowerCaseWord] += 1
            
            if self.current_tag == "b" or self.current_tag == "strong":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.b_or_strong_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.b_or_strong_words_dict:
                            self.b_or_strong_words_dict[lowerCaseWord] = 1
                        else:
                            self.b_or_strong_words_dict[lowerCaseWord] += 1
                                     
            if self.current_tag == "a":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.a_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.a_words_dict:
                            self.a_words_dict[lowerCaseWord] = 1
                        else:
                            self.a_words_dict[lowerCaseWord] += 1
            
            if self.current_tag == "i" or self.current_tag == "em":
                for word in data.split(" "):
                    if word != "":
                        lowerCaseWord = word.lower()
                        self.i_or_em_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.i_or_em_words_dict:
                            self.i_or_em_words_dict[lowerCaseWord] = 1
                        else:
                            self.i_or_em_words_dict[lowerCaseWord] += 1
            
       
            # step2: text or script
            if self.current_tag == "script" or self.current_tag == "style":
                #print "The following content should NOT be indexed."
                #print "script content     :", data
                #print "script length     :",len(data)
                if hasattr(self, 'script_size'):
                    self.script_size += len(data)
                else:
                    self.script_size = len(data)
            else:
                if hasattr(self, 'text_size'):
                    self.text_size += len(data)
                else:
                    self.text_size = len(data)
                #print "text content     :", data

                for i in range(0,len(data)):
                    # print "data[i]:",ord(data[i])
                    if not ( (ord(data[i]) >= 48 and ord(data[i]) < 58) or (ord(data[i]) >= 65 and ord(data[i]) < 91) or (ord(data[i]) >= 97 and ord(data[i]) < 123) or (ord(data[i]) == 32) ):
                        # Just replace them with a space.
                        data = data[:i] + " " + data[i+1:]
                
                # can index the data content now
                if hasattr(self, 'doc_words'):
                    pass
                else:
                    self.doc_words = []
                
                if hasattr(self, 'doc_distinct_words'):
                    pass
                else:
                    self.doc_distinct_words = []                
                    
                #print "raw word list:",data.split(" ")
                
                for word in data.split(" "):
                    if word != "":
                        # for later use.
                        lowerCaseWord = word.lower()
                        
                        # for current use
                        # lowerCaseWord = word
                        self.doc_words.append(lowerCaseWord)
                        if lowerCaseWord not in self.doc_distinct_words:
                            self.doc_distinct_words.append(lowerCaseWord)
                        
                #print "doc_words:",self.doc_words
                #print "doc_distinct_words:",self.doc_distinct_words
                  
                #print "text length      :", len(data)
        else:
            pass
    def handle_comment(self, data):
        pass
        #print "Comment  :", data
    def handle_entityref(self, name):
        pass
        #c = unichr(name2codepoint[name])
        #print "Named ent:", c
    def handle_charref(self, name):
        pass
        #if name.startswith('x'):
        #    c = unichr(int(name[1:], 16))
        #else:
        #    c = unichr(int(name))
        #print "Num ent  :", c
    def handle_decl(self, data):
        pass
        #print "Decl     :", data
    
    def generate_statistics_report(self,status):
        print "completed_parsed:",status
        # attrs check or init
        if hasattr(self, 'doc_words'):
            pass
        else:
            self.doc_words = []

        if hasattr(self, 'doc_distinct_words'):
            pass
        else:
            self.doc_distinct_words = []
        
        if hasattr(self, 'text_size'):
            pass
        else:
            self.text_size = 0
            
        if hasattr(self, 'script_size'):
            pass
        else:
            self.script_size = 0       

        if hasattr(self, 'doc_outlinks'):
            pass
        else:
            self.doc_outlinks = 0
            
        if hasattr(self, 'doc_waterloo_spam'):
            pass
        else:
            self.doc_waterloo_spam = 0
                
        print "len(doc_words):",len(self.doc_words)
        print "len(doc_distinct_words):",len(self.doc_distinct_words)
        print "text_size:",self.text_size
        print "script_size:",self.script_size
        script_text_ratio = self.script_size / self.text_size
        print "script_text_ratio:",script_text_ratio
        print "doc_outlinks:",self.doc_outlinks
        
        
        print "self.header_words:",self.header_words
        print "self.header_words_dict:",self.header_words_dict

    
        print "self.title_words:",self.title_words
        print "self.title_words_dict:",self.title_words_dict
    
    
        print "self.b_or_strong_words:",self.b_or_strong_words
        print "self.b_or_strong_words_dict:",self.b_or_strong_words_dict
    
    
        print "self.a_words:",self.a_words
        print "self.a_words_dict:",self.a_words_dict
    
    
        print "self.i_or_em_words:",self.i_or_em_words
        print "self.i_or_em_words_dict:",self.i_or_em_words_dict 
        
        print "self.doc_words:",self.doc_words
        print "self.doc_distinct_words:",self.doc_distinct_words
###########################################################################################################################
inputFileName1 = "/data5/team/weijiang/the_new_trip_of_feature_generation/wholeTrainingFileTempleteV3.txt"
inputFileHandler1 = open(inputFileName1,"r")

human_judge_query_location_base_path = "/data4/team/weijiang/human_judge_web_pages/"

queryIDWithItsDocs = {}

inputFileHandler1.readline()

for line in inputFileHandler1.readlines():
    elements = line.strip().split(" ")
    queryID = int(elements[0])
    trecID = elements[1]
    print queryID,trecID
    trecIDElements = trecID.split("-")
    cluewebTag = trecIDElements[0]
    segmentNumber = trecIDElements[1]
    compressedFileNumber = trecIDElements[2]
    sequenceNumber = trecIDElements[3]
    
    pathLookFor = human_judge_query_location_base_path + segmentNumber
    print "pathLookFor:",pathLookFor
    
    fileNamePrefixLookingFor = trecID

    for dirname, dirnames, filenames in os.walk(pathLookFor):
        for filename in filenames:
            if filename.startswith(fileNamePrefixLookingFor):
                print filename
                absolutePathForWebPageFileName = os.path.join(dirname, filename)
                print absolutePathForWebPageFileName
                absolutePathForWebPageFileHandler = open(absolutePathForWebPageFileName,"r")
                file_content = absolutePathForWebPageFileHandler.read()
                print "**********"
                print "check file_content...PASSED"
                #print file_content
                print "**********"
                currentLine = ""
                for i in range(0,len(file_content)):
                    if file_content[i] != "\n":
                        currentLine += file_content[i]
                    else:
                        # WARC-TREC-ID: clueweb09-en0040-54-00000
                        if currentLine.strip().split(":")[0].strip() == "WARC-TREC-ID":
                            print "WARC-TREC-ID:",currentLine.strip().split(":")[1].strip()
                        # WARC-Target-URI: http://www.smartwebby.com/DreamweaverTemplates/templates/business_general_template59.asp
                        if currentLine.strip().split(":")[0].strip() == "WARC-Target-URI":
                            target_URI = currentLine.strip()[17:]
                            
                            print "WARC-Target-URI:",target_URI
                            # the logic of generating the url related features.
                            url_length = len(target_URI)
                            print "url_length:",url_length
                            o = urlparse(target_URI)
                            # print "str(o.path):",o.path
                            # print "str(o.path).split('/'):",str(o.path).split('/')
                            
                            tempNumber = len(str(o.path).split('/'))
                            if tempNumber == 0:
                                url_nesting = 0
                            elif tempNumber == 1:
                                url_nesting = 0
                            elif tempNumber == 2:
                                url_nesting = 0
                            else:
                                url_nesting = tempNumber - 2
                            
                            url_host = str(o.netloc)[4:]
                            
                            url_in_top_million = False
                            
                            if url_host in top1MPopularSitesDict:
                                url_in_top_million = True
                            else:
                                url_in_top_million = False
                            
                            print "url_in_top_million:",url_in_top_million
                            
                            print "url_nesting:",url_nesting
                            
                            print "url_domain:",str(o.netloc).split(".")[-1]
                            
                            
                            
                            
                            
                            contentLengthSpecialCounter = 2
                        if currentLine.strip().split(":")[0].strip() == "Content-Length":
                            contentLengthSpecialCounter -= 1
                            if contentLengthSpecialCounter == 0:
                                web_page_content_length = int(currentLine.strip()[16:])
                                print "doc_size:",currentLine.strip()[16:]
                                
                                
                                
                                while file_content[i] == "\n":
                                    i += 1
                                    
                                web_page_content = file_content[i:i+web_page_content_length]
                                
                                print "**********"
                                print "check web_page_content...PASSED"
                                # print web_page_content
                                print "**********"
                                
                                
                                
                                parser = MyHTMLParser()
                                
                                try:
                                    parser.feed(web_page_content)
                                    parser.generate_statistics_report(1)
                                except:
                                    # do not handle the error message.
                                    parser.generate_statistics_report(0)
                                print
                                
                                queryContent = queryDict[queryID]
                                
                                terms_in_header = 0
                                frac_terms_in_header = 0.0
                                
                                terms_in_title = 0
                                frac_terms_in_title = 0.0
                                
                                terms_in_bold = 0
                                frac_terms_in_bold = 0.0
                                
                                terms_in_url = 0
                                frac_terms_in_url = 0.0
                                
                                terms_in_italic = 0
                                frac_terms_in_italic = 0.0
                                
                                queryTerms = queryContent.split(" ")
                                queryTermsLength = len(queryTerms)
                                
                                for queryTerm in queryTerms:
                                    if queryTerm in parser.header_words_dict:
                                        terms_in_header += 1
                                    if queryTerm in parser.title_words_dict:
                                        terms_in_title += 1                                 
                                    if queryTerm in parser.b_or_strong_words_dict:
                                        terms_in_bold += 1
                                    if queryTerm in parser.a_words_dict:
                                        terms_in_url += 1                                  
                                    if queryTerm in parser.i_or_em_words:
                                        terms_in_italic += 1 
                                                                                            
                                frac_terms_in_header = terms_in_header / queryTermsLength
                                frac_terms_in_title = terms_in_title / queryTermsLength
                                frac_terms_in_bold = terms_in_bold / queryTermsLength
                                frac_terms_in_url = terms_in_url / queryTermsLength
                                frac_terms_in_italic = terms_in_italic / queryTermsLength
                                
                                print "terms_in_header:",terms_in_header
                                print "terms_in_title:",terms_in_title
                                print "terms_in_bold:",terms_in_bold
                                print "terms_in_url:",terms_in_url
                                print "terms_in_italic:",terms_in_italic
                                
                                print "frac_terms_in_header:",frac_terms_in_header
                                print "frac_terms_in_title:",frac_terms_in_title
                                print "frac_terms_in_bold:",frac_terms_in_bold
                                print "frac_terms_in_url:",frac_terms_in_url
                                print "frac_terms_in_italic:",frac_terms_in_italic
                                
                                # It is time to grap the pages.       
                        currentLine = ""
                # print "compressed file content end." 
                print "**********"
        


    
    
    break

    if queryID not in queryIDWithItsDocs:
        queryIDWithItsDocs[queryID] = []
        queryIDWithItsDocs[queryID].append(trecID)
    else:
        queryIDWithItsDocs[queryID].append(trecID)


'''
sum = 0
numberOfQueries = 148

for i in range(1,151):
    # check what happend to these queryIDs?
    if i != 95 and i != 100:
        #print i,len(queryIDWithItsDocs[i])
        sum += len(queryIDWithItsDocs[i])
        
print "average human judge documents / per query:",sum/numberOfQueries
'''



    
inputFileHandler1.close()