#!/usr/bin/env python
#
#   pywebspider.py - web spider, based on curl
#   
#   dependent: curl
#

from subprocess import Popen, PIPE
from urldb2 import UrlDB
from hashlib import md5
import re
import sys

__version="0.1"


class PyWebSpider():
    ''' 
    PyWebSpeder as curl frontend 
    '''
    def __init__(self, urldb_filename):
        ''' 
        init 
        '''
        self.urldb = UrlDB(urldb_filename)

        # url filters: for finding urls in code
        self.__url_common_pattern = r"[\w\.\:\;/\~\%\-\?\=#_&]+"
        self.__url_filters = (
            r"(?mi)((?:http|https|ftp|hxxp)://" + self.__url_common_pattern + r")",
            r"(?mi)href=(?:\"|\'|)(" + self.__url_common_pattern + r")(?:\"|\'|)",
            r"(?mi)src=(?:\"|\'|)(" + self.__url_common_pattern + r")(?:\"|\'|)",
            r"(?mi)background=(?:\"|\'|)(" + self.__url_common_pattern + r")(?:\"|\'|)",
        )

        self.urlpatterns = { r'.*': {'type': None}, }
    
    def get_code(self, url=None, curl_opts=''):
        ''' 
        get html code by url, based on curl 
        
        curl default options:
        -L:   if the initial web server response indicates that the requested page 
              has moved to a new location (redirect), CURL's default behaviour is 
              not to request the page at that new location, but just print the HTTP 
              error message. This switch instructs CURL to make another request 
              asking for the page at the new location whenever the web server returns 
              a 3xx HTTP code.
        '''
        curl_def_opts = '-L'
        if not url: url = self.__url
        curl = Popen(["curl", curl_def_opts, curl_opts, url], stdout=PIPE, stderr=PIPE)
        return curl.stdout.read()

    def get_url_info(self, url=None, type=''):
        ''' 
        get url info 
        '''
        
        if not url: url = self.__url
        
        # curl option, -I: CURL prints only the server response's HTTP headers
        url_info = self.get_code(url, '-I')
        return url_info
        
    def extract_info(self, url=None):
        ''' 
        extract web page info: 
        
        md5 sum, urls
        
        '''
        import sys
        from urlparse import urlparse, urlunparse
        from os.path import dirname
        from os.path import join as join_path

        result_urls = []
        
        base_params = urlparse(url)
        if base_params.scheme == '' or base_params.netloc == '':
            print "Incorrect url parameter: %s" % url
            sys.exit(1)

        data = self.get_code(url)
        md5 = self.md5calc(data)
        for url_filter in self.__url_filters:
            urls = re.findall(url_filter, data)
            for u in set(urls):
                u = u.replace("hxxp://", "http://")
                (scheme, netloc, path, params, query, fragment) = urlparse(u)
                if scheme and netloc:
                    result_urls.append(u)
                else:
                    try:
                        if not path[0] == '/' and not path[0] == '.':
                            path = join_path(dirname(base_params.path), path)
                    except IndexError:
                        pass
                    result_urls.append(urlunparse([base_params.scheme, base_params.netloc, path, \
                                        params, query, fragment]))
        result_urls.sort()
        return {'md5': md5, 'urls': set(result_urls)}

    def md5calc(self, code):
        '''
        md5 calculation
        '''
        return md5(code).hexdigest()

    def handle_url(self, url):
        '''
        handle url: add to database, ignore
        '''
        def allocation(url, **kwargs):
            '''
            url allocation: add to database, skip, define type
            '''
            if kwargs['type'] == 'skip': 
                pass
            elif kwargs['type'] == 'normal': 
                self.urldb.add(url=url, url_type=None)
            elif kwargs['type'] == 'check_once': 
                self.urldb.add(url=url, url_type='check_once')
            elif kwargs['type'] == 'edge': 
                self.urldb.add(url=url, url_type='edge')

        def get_mod_func(callback):
            # Converts 'webspider.function' to
            # ['webspider', 'function']
            try:
                dot = callback.rindex('.')
            except ValueError:
 	            return callback, ''
            return callback[:dot], callback[dot+1:]
        
        def url_callback(method):
            '''
            call url method and get back parameters
            '''
            (module_name, func_name) = get_mod_func(method)
            try:
                f_module = __import__(module_name)
            except ImportError:
                print "Error! Cannot import module: %s" % f_module
                sys.exit()
            return getattr(f_module, func_name)(url)

        def url_conv(url):
            '''
            url convertor for special character entities 
            '''
            res = url.replace('&amp;', '&')
            return res
        
        url = url_conv(url)
        for pattern in self.urlpatterns.keys():
            if re.search(pattern, url): 
                if isinstance(self.urlpatterns[pattern], dict):
                    # in case when parameters are defined
                    allocation(url, **self.urlpatterns[pattern])
                elif isinstance(self.urlpatterns[pattern], basestring):
                    # in case when url callback is defined
                    url_params = url_callback(self.urlpatterns[pattern])
                    allocation(url, **url_params)
                else:
                    print "Error! Unknow type of url handling"
                    sys.exit()
        
    def next_url(self):
        '''
        return next url for checking
        '''
        try:
            url = self.urldb.select(where='(status="" or status is null) and (type <> "edge" or type is null)', 
                                    limit=1)[0][1]
        except IndexError:
            return None
        return url

    def run(self, start_url):
        ''' 
        run web spider
        '''
        import sys

        if not self.handle_url(start_url): self.urldb.update(start_url, status='')
        try:
            while 1:
                url = self.next_url()
                if not url: break
                
                urls_db_stat = self.urldb.stat()
                print "[%d/%d] %s" % (urls_db_stat['total'], 
                                        urls_db_stat['total'] - urls_db_stat['checked'] - urls_db_stat['edge'], url)
                
                page_info = self.extract_info(url)
                for n_url in page_info['urls']: 
                    self.handle_url(n_url)
                self.urldb.update(url, status='checked', md5=page_info['md5'])
                
            self.urldb.sync()
        except KeyboardInterrupt:
            print "Keyboard interrupted"
            self.urldb.sync()
            sys.exit()

        
    
