import os
import gzip
import re
import sys
import htmldata
import settings 
import time
import htmlentitydefs
import model
import logging
from settings import log

def ExtractSites(contents):
    ''' Feature:
      - Ignored web page which no content-types
      - Ignored no <html> tag
      - Ignored robot.txt
    '''
    db = model.Database() 

    # split document into list [start, end, header dictionary]
    page_list = []   # return value
    url_count = 0
    for m in re.finditer(settings.docSeperator,contents): # docid per content
       
        # Identify start and end position of header and html
        header_start = m.start()
        header_end = contents[header_start:header_start+1000].lower().find('<html')

        if header_end == -1 :   # IF no <html> tag THEN skip it out
            pass
        else:
            header_end += header_start
            html_start = header_end
            html_end = html_start+contents[html_start:].lower().find('</html>')+7
        
            # Parse header to dictionary
            header = {}
            header_txt =  contents[header_start : header_end].split('\n')[1:]
 
            # ==P=>>>>=i===<<<<=T===>=A===<=!Junghoo!==>
            # URL: http://www.klainfo.com/
            # Date: 
            # Position: 0
            # DocId: 0
                                                        # step1
            # HTTP/1.1 200 OK
            # Content-Length: 74
            # Content-Type: text/plain
            # Last-Modified: Thu, 01 May 2008 16:47:16 GMT
            # Accept-Ranges: bytes
            # Date: Tue, 16 Sep 2008 01:36:26 GMT
            # Connection: close
                                                        # step2
            # <html>...</html>

            step = 0    # Counting the number of '' or '\r' for stop header
            for h in header_txt :  # omit document seperator
                if h in ['','\r'] :
                    step += 1
                    if step == 2:
                        break
                else:
                    # find the key and value
                    key = h[0:h.find(' ')][:-1].lower()
                    value = h[h.find(' ')+1:].strip().lower()

                    # Insert document url to DB
                    if key == 'url' : 
                        db.addurl(value)
                   
                    # recompile http
                    if 'http' in key :  
                        header['http/1.1'] = value
                    else:
                        header[key] = value

            # Finally Process
            # Append parameters to 'page_list'
            # Rejected file
            if not header.has_key('content-type') or 'robot' in header['url']: 
                pass
            else:
                page_list.append([html_start,html_end,header])

            ######################################################################
            # ExtractURLs
            #url_count += ExtractURLs(contents[html_start:html_end],header['url'],db) ######################################################################

    #END
    db.close()
    del db
    return page_list
    # return len(page_list),urlcount

def ExtractURLs(contents, url, db):
    '''
    >>> util.ExtractURLs( <web page> , <url> )
    return len(urllist)
    '''
    if re.search(r'href',contents,re.IGNORECASE) != None :
        #log = logging.getLogger('Util')
        #log.info(url)
        urls = htmldata.urlextract(contents, url)
        urllist = []
        for u in urls:          # u.{url,start,end,in_html,tag_name}
            if u.tag_name == 'a' and u.in_html :
                if db != None :
                    db.addurl(u.url)
                urllist.append(u.url)
                clean_html = htmldecode(remove_html_tags(contents))

                # Find anchor text
                anchor_start = u.end + contents[u.end+1:u.end+300].find('>')+2
                anchor_end = u.end+contents[u.end:u.end+400].lower().find('</a>')
                anchor = contents[ anchor_start : anchor_end ]
                
                # Find surrouding text
                prefix = remove_extra_spaces(clean_html[u.start-300:u.start])
                suffix = remove_extra_spaces(clean_html[anchor_end:anchor_end+1000])
                
        return len(urllist)
    else:
        return 0

def checkContentTypes(content_type):
    for type in settings.validContentTypes:
        try:
            m = re.search(type,content_type,re.IGNORECASE).group(0)
            return True
        except:
            pass
    return False

def remove_html_tags(contents):
    p = re.compile(r'<.*?>')
    return p.sub('',contents)

def remove_extra_spaces(contents):
    p = re.compile(r'\s+')
    return p.sub('',contents)

def htmldecode(contents):
    pattern = re.compile(r'&(\w+?);')
    def descape_entity(m,defs=htmlentitydefs.entitydefs): 
        try:
            return defs[m.group(1)]
        except KeyError:
            return m.group(0)
    return pattern.sub(descape_entity,contents)

def test():
    contents = open('../data/adsabs.harvard.edu','r').read()
    #contents = open('../data/myhero.com','r').read()
    start_t = time.time()
    page_list = ExtractSites(contents)
    url_count = 0
    print 'ExtractDoc',time.time()-start_t
    for starthtml,endhtml,header in page_list:
        start_tt = time.time()
        url_count += ExtractURLs(contents[starthtml:endhtml],header['url'],None)
        print '\t',time.time()-start_tt,url_count,' urls'
    print time.time()-start_t,url_count,len(contents)

if __name__ == "__main__" :
    test()
