#!/usr/bin/env python
#
#   pywebspider.py - web spider, based on curl
#   
#   dependent: curl
#

from subprocess import Popen, PIPE
from urldb import UrlDB
from hashlib import md5
import re

__version="0.1"


class PyWebSpider():
    ''' 
    PyWebSpeder as curl frontend 
    '''
    def __init__(self):
        ''' 
        init 
        '''
        # url filters: for finding urls in code
        self.__url_filters = (
            r"(?mi)((?:http|https|ftp|hxxp)://[a-z0-9\.\:/\~\%\-\?\=#_&]+)",
            r"(?mi)href=(?:\"|\')([a-z0-9\.\:/\~\%\-\?\=#_&]+)(?:\"|\')",
            r"(?mi)src=(?:\"|\')([a-z0-9\.\:/\~\%\-\?\=#_&]+)(?:\"|\')",
        )

        self.urls_db = UrlDB()
        self.urls_db_datafile = None

        self.url_templates = { r'.*': '', }
    
    def get_code(self, url=None, curl_opts=''):
        ''' 
        get html code by url, based on curl 
        
        curl default options:
        -L:   if the initial web server response indicates that the requested page 
              has moved to a new location (redirect), CURL's default behaviour is 
              not to request the page at that new location, but just print the HTTP 
              error message. This switch instructs CURL to make another request 
              asking for the page at the new location whenever the web server returns 
              a 3xx HTTP code.
        '''
        curl_def_opts = '-L'
        if not url: url = self.__url
        curl = Popen(["curl", curl_def_opts, curl_opts, url], stdout=PIPE, stderr=PIPE)
        return curl.stdout.read()

    def get_url_info(self, url=None, type=''):
        ''' 
        get url info 
        '''
        
        if not url: url = self.__url
        
        # curl option, -I: CURL prints only the server response's HTTP headers
        url_info = self.get_code(url, '-I')
        return url_info
        
    def extract_info(self, url=None):
        ''' 
        extract web page info: 
        
        md5 sum, urls
        
        '''
        import sys
        from urlparse import urlparse, urlunparse

        result_urls = []
        
        base_params = urlparse(url)
        if base_params.scheme == '' or base_params.netloc == '':
            print "Incorrect url parameter: %s" % url
            sys.exit(1)

        data = self.get_code(url)
        md5 = self.md5calc(data)
        for url_filter in self.__url_filters:
            urls = re.findall(url_filter, data)
            for u in set(urls):
                u = u.replace("hxxp://", "http://")
                (scheme, netloc, path, params, query, fragment) = urlparse(u)
                if scheme and netloc:
                    result_urls.append(u)
                else:
                    result_urls.append(urlunparse([base_params.scheme, base_params.netloc, path, \
                                        params, query, fragment]))
        result_urls.sort()
        return {'md5': md5, 'urls': set(result_urls)}

    def md5calc(self, code):
        '''
        md5 calculation
        '''
        return md5(code).hexdigest()

    def add_url2db(self, url):
        '''
        filter following, edge or skipped url, then update urls db respectively
        '''
        for tmpl in self.url_templates.keys():
            if re.search(tmpl, url): 
                self.urls_db.add(url, type=self.url_templates[tmpl], status='', md5='')
        
    def show_urls(self):
        '''
        show urls in database
        '''
        for url in self.urls_db.keys():
            print "%s:%s" % (url, self.urls_db[url])

    def next_url(self):
        '''
        return next url for checking
        '''
        try:
            url = list(set(self.urls_db.filter(status='',type='')).union(self.urls_db.filter(status='',type='check_once')))[0]
        except IndexError: 
            return None
        return url

    def run(self, start_url):
        ''' 
        run web spider
        '''
        import sys
        self.urls_db.load_from_json(self.urls_db_datafile)
        self.add_url2db(start_url)
        try:
            while 1:
                next_url = self.next_url()
                if not next_url: break
                
                urls_db_stat = self.urls_db.stat()
                print "[%d/%d] %s" % (urls_db_stat['total'], urls_db_stat['not_checked'] - urls_db_stat['edge'], next_url)
                
                page_info = self.extract_info(next_url)
                for url in page_info['urls']: self.add_url2db(url)
                self.urls_db.update(next_url, status='checked', md5=page_info['md5'])
                
            self.urls_db.save_to_json(self.urls_db_datafile)
        except KeyboardInterrupt:
            self.urls_db.save_to_json(self.urls_db_datafile)

        

if __name__ == "__main__":

    from optparse import OptionParser
    import sys
	
    parser = OptionParser(version="%prog, v." + __version)
    parser.add_option("-u", "--url", dest="url", help="url, links will be extracted")
    (options, args) = parser.parse_args()

    if options.url == None:
        parser.error("Incorrect number of arguments, \n\nuse -h or --help for detail\n")
        sys.exit()

    sp = PyWebSpider(url=options.url)
    sp.run()
    sp.show_url()
        
    
