'''
Created on 2011-12-27

@author: ajwang
'''

import sgmllib
import urllib
import BeautifulSoup
from urlparse import urljoin, urlparse
import socket
from const import CONST
import re
import sys
import chardet

socket.setdefaulttimeout(5.0)

class PageLinks(sgmllib.SGMLParser):
    def __init__(self, _url, verbose=0):
        sgmllib.SGMLParser.__init__(self, verbose)
        self.hyperlinks = []
        fp = urllib.urlopen(_url)
        self.base_url = fp.geturl()
        _html = fp.read()
        self.charset = self.get_page_charset(_html)
        soup = BeautifulSoup.BeautifulSoup(_html.decode(self.charset))
        self._parse(str(soup))
        
    def _parse(self, s):
        self.feed(s)
        self.close()
    
    def get_page_charset(self, _html):
        ptn = re.compile(r'<meta[^>]+charset=(.*)\"')
        m = ptn.search(_html)
        if m: return m.group(1)
        else:
            _cs = chardet.detect(_html)['encoding']
            if not _cs:
                return 'utf-8'
            return _cs
            
            

    def _get_valid_url(self, link):
        try:
            fp = urllib.urlopen(link)
            if fp.headers['Content-Type'].lower().rfind('html') != -1:
                return fp.geturl()
            else: return None
        except:
            return None
        
    def url_encode(self, _url):
        ptn = re.compile(r'([^\x00-\x7f]+)')
        arr = ptn.split(_url)
        
        sss = ''
        for s in arr:
            if ptn.match(s):
                sss += urllib.quote(s.decode('utf-8').encode(self.charset))
            else: sss += s
        
        return sss

    def _add_link(self, value):
        value = urljoin(self.base_url, value)
        
        # remove fragment
        _u = urlparse(value)
        
        if _u.scheme.lower() != 'http':
            return
        
        if _u.fragment != '':
            value = value[:value.index('#')]
        
        # eliminate none possible links
        _path = urlparse(value).path
        
        if _path != '' and _path.rfind('.') != -1:
            _subfix = _path[_path.rfind('.') + 1:].lower()
            if _subfix not in ['htm', 'html', 'shtml', 'xhtml', 'asp', 'aspx', 'jsp', 'php', 'action']:
                return
            
        # only links to html page and under PageLinks.DOMAIN is added
#        value = self._get_valid_url(value)
#        if value and value.rfind(CONST.DOMAIN) != -1:
#            self.hyperlinks.append(value)
        if value.rfind(CONST.DOMAIN) != -1:
            self.hyperlinks.append(self.url_encode(value))
    
    #--------------------------------------------------------------
    # tags process
    def start_a(self, attributes):
        for name, value in attributes:
            if name == "href": self._add_link(value)

    def start_frame(self, attributes):
        for name, value in attributes:
            if name == "src": self._add_link(value)
    
    def start_iframe(self, attributes):
        for name, value in attributes:
            if name == "src": self._add_link(value)


    #---------------------------------------------------------------
    def get_html_links(self):
        return self.hyperlinks