#!/usr/bin/python
# -*- coding: utf-8 -*-
# Description: Base templates spider for grab Web Page
# Create at 2008-11-1 上午11:02:14
# Author: MK2[fengmk2@gmail.com]
"""A web spider can just define a template, and it will give what you want.
"""

import urllib2
#import urllib3
import urlparse
import re

DEFAULT_TIMEOUT = 300 #默认超时时间为5分钟

class Spider(object):
    """Base templates web spider.
    
    You only setting the template, and it will give you what you want.
    """
    
    DEFAULT_CHARSET = 'utf-8'
    # charset pattern
    CHARSET_RE = re.compile(ur'(?:<meta\s[^<>]*?\s)?charset=([\w\d\-]*)(?:[^<>]*?>)?', 
                                 re.I)
    
    def __init__(self, url, templates, referer=None, timeout=DEFAULT_TIMEOUT, 
                 user_agent=None,
                 http_headers=None):
        """Init a spider
        
        Set referer if the url has, None default;
        Also can set the request timeout, default is 60 seconds;
        user_agent it will be use for builder the http request headers, None default.
        http_headers is a dict, if it set, spider will use it.
        """
        self.start_url = url
        self.templates = templates
        self.maxlevel = len(templates)
        self.referer = referer
        self.timeout = timeout
        self.user_agent = user_agent
        self.http_headers = http_headers
        
        self._init_spider()
        
    def _init_spider(self):
        """Init spider, like setting timeout and other things"""
#        self._old_timeout = urllib2.socket.getdefaulttimeout()
#        urllib2.socket.setdefaulttimeout(self.timeout)
#        self.http_pool = urllib3.HTTPConnectionPool.from_url(self.start_url)
        self.opener = urllib2.build_opener()
        self._caches = set()
    
    def __del__(self):
        """Release things and set back the socket timeout to default value. """
#        urllib2.socket.setdefaulttimeout(self._old_timeout)
    
    def walk(self, url=None, parent_url=None, level=1):
        """Like os.walk(), this will return iters.
        
        Return level, url, parent_url, grab_info, grab_urls, content, reponse
        
        content is a unicode, spider will try to decode with the http page charset,
        if it no charset, spider use 'utf-8' by default.
        """
        if url is None:
            url = self.start_url
        else:
            assert(isinstance(url, basestring))
        if parent_url is not None:
            assert(isinstance(parent_url, basestring))
        url = self.urlencode(url)
        if url in self._caches:
            return
        print url
        self._current_url = url
        self._caches.add(url)
        template = self.templates[level-1]
        downurls, softinfo = [], {}
        try:
            response = self.request(url, parent_url)
            content = self.get_content(response)
        except Exception, e:
            print e, url
        else:
            if 'downurl' in template:
                if 'downurl_callback' in template:
                    exec template['downurl_callback']
                    callback = downurl_callback
                else:
                    callback = None
                downurls = set(self.get_urls(content, template['downurl'], callback))
                if 'downurl_finish_callback' in template:
                    exec template['downurl_finish_callback']
                    downurls = downurl_finish_callback(self._current_url, 
                                                                 downurls)
                if not downurls:
                    print '###### %s: no download urls ######' % url
            if 'infos' in template:
                infos = self.get_infos(content, template['infos'])
                for info in infos:
                    yield level, url, parent_url, response, content, downurls, info
                    for r in self.walk(info.nexturl, url, level + 1):
                        yield r
            else:
                if 'info' in template:
                    softinfo = self.get_info(content, template['info'])
                yield level, url, parent_url, response, content, downurls, softinfo
            
            if 'next_level' in template:
                if 'next_level_callback' in template:
                    exec template['next_level_callback']
                    callback = next_level_callback
                else:
                    callback = None
                next_level_urls = self.get_urls(content, template['next_level'], 
                                                callback)
                if 'next_level_finish_callback' in template:
                    exec template['next_level_finish_callback']
                    next_level_urls = next_level_finish_callback(self._current_url, 
                                                                 next_level_urls)
                next_level = level + 1
                if next_level <= self.maxlevel:
                    for u in next_level_urls:
                        for r in self.walk(u, url, next_level):
                            yield r
            if 'same_level' in template:
                if 'same_level_callback' in template:
                    exec template['same_level_callback']
                    callback = same_level_callback
                else:
                    callback = None
                save_level_urls = self.get_urls(content, template['same_level'], callback)
                if 'same_level_finish_callback' in template:
                    exec template['same_level_finish_callback']
                    save_level_urls = same_level_finish_callback(self._current_url, 
                                                                 save_level_urls)
                for u in save_level_urls:
                    for r in self.walk(u, url, level):
                        yield r
            
        
    def request(self, url, referer=None):
        """请求url，返回响应的response"""
        result = urllib2.urlopen(url)
        return result
        self._current_url = url
        req = urllib2.Request(url)
        req.add_header('User-Agent', 
                       # 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6')
                       'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; MAXTHON 2.0)')
        if referer is not None:
            req.add_header('Referer', referer)
        try:
            return self.opener.open(req)
        except urllib2.URLError, e:
            print 'error url: %s' % url
            if hasattr(e, 'reason'):
                print 'We failed to reach a server.'
                print 'Reason: ', e.reason
                raise
            elif hasattr(e, 'code'): # 2级跳转
                if e.code == 302:
                    info = e.info()
                    print '302: restart new url:', self.info.dict['location']
                    redirect_url = info.dict['location']
                    return self.get_webinfo(redirect_url, url)
            raise
        
    def get_content(self, response):
        """get the reponse content(Unicode), decode with charset
        """
        content = response.read()
        charset = self.CHARSET_RE.search(response.headers['content-type']) #自动检测charset，若有，则decode
        if not charset:
            charset = self.CHARSET_RE.search(content)
        if charset:
            charset = charset.groups()[0]
        else:
            charset = self.DEFAULT_CHARSET
        content = content.decode(charset, 'replace')
        return content
    
    DESC_RE = re.compile(ur'<meta[^<>]*?name="description"[^<>]*?content="([^<>]*?)"', re.I)
    TITLE_RE = re.compile(ur'<title[^<>]*>([^<>]*?)</title>', re.I)
    def get_html_detail(self, url, referer=None):
        detail = {}
        if not url.startswith('http://'):
            return detail
        try:
            response = self.request(url, referer)
            content = self.get_content(response)
        except:
            return detail
        m = self.DESC_RE.search(content)
        if m:
            detail['description'] = m.group(1).replace('\n', '')
        m = self.TITLE_RE.search(content)
        if m:
            detail['title'] = m.group(1).replace('\n', '')
        return detail
    
    def get_urls(self, content, express, callback=None):
        """获取urls"""
        assert isinstance(content, basestring)
        assert isinstance(express, basestring)
        url_pattern = re.compile(express, re.I | re.VERBOSE)
        rawurls = url_pattern.findall(content)
        urls = []
        for url in rawurls:
            if callback is not None and callable(callback):
                url = callback(self._current_url, url)
            if not url.startswith('http://'):
                url = urlparse.urljoin(self._current_url, url)
            urls.append(url)
        return urls
    
    def get_info(self, content, express):
        infos = self.get_infos(content, express)
        if infos:
            return infos[0]
    
    def get_infos(self, content, express):
        assert isinstance(content, basestring)
        assert isinstance(express, basestring)
        if not hasattr(self, '_info_pattern'):
            self._info_pattern = re.compile(express, re.I | re.VERBOSE)
        infos = []
        pos = 0
        print self._info_pattern.findall(content)
        while True:
            m = self._info_pattern.search(content, pos)
            if not m:
                print "duplicate match"
                break
            pos = m.end(0)
            print pos
            print m.groupdict()
            infos.append(m.groupdict())
        return infos
    
    @staticmethod
    def urlencode(url):
        """encode the url"""
        return urllib2.quote(url.encode('utf-8'), ':/&=;@?+$,#')

def html_detail(url, *arg, **kargs):
    spider = Spider('test sd', [])
    return spider.get_html_detail(url, *arg, **kargs)

if __name__ == '__main__':
    print html_detail('http://suourl.appspot.com/41a')