#!/usr/bin/env python3

import re
import gzip
import time
import queue
import os.path
import sqlite3
import threading
import urllib.error as ue
import urllib.request as ur

from socket import timeout as SocketTimeout

from bs4 import BeautifulSoup

from utils import SpiderBase



class Core(SpiderBase):
    '''Core engine and data structures'''
    def __init__(self, depth):
        SpiderBase.__init__(self)
        self.depth = depth
        self.todo = queue.Queue()
        self.resp = queue.Queue()
        self.done = set()

    def add_timer(self, interval):
        def _log(msg, *args):
            while True:
                self.logger.info(msg, *(a() for a in args))
                time.sleep(interval)
        msg = ':: done %d, todo %d, pages %d'
        args = self.done.__len__, self.todo.qsize, self.resp.qsize
        timer = threading.Thread(target=_log, args=(msg,) + args)
        timer.daemon = True
        timer.start()

    def put_requests(self, urls):
        if not urls or urls[0][0] > self.depth:
            return
        [self.todo.put(r) for r in set(urls).difference(self.done)]

    def put_done(self, url):
        self.done.add(url)

    def get_request(self):
        return self.todo.get()

    def put_response(self, resp):
        self.resp.put(resp)

    def get_response(self):
        return self.resp.get()

    def is_finished(self):
        return self.todo.empty() and self.resp.empty()


class Downloader(SpiderBase):
    HEADERS = {
        'User-Agent': ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) '
                       'AppleWebKit/537.36 (KHTML, like Gecko) '
                       'Chrome/45.0.2454.85 Safari/537.36'),
        'Accept': "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
        'Accept-Language': "zh-cn,zh;q=0.8,en-us;q=0.5,en;q=0.3",
        'Accept-Encoding': "gzip, deflate"
    }
    def __init__(self, core, timeout):
        SpiderBase.__init__(self)
        self.core = core
        self.timeout = timeout

    def work(self):
        depth, url = self.core.get_request()
        self.logger.debug('Downloader get request %s' % url)
        page = self.retrieve(url)
        self.core.put_response((depth, url, page))

    def retrieve(self, url):
        '''Get the content of the `url`, return null bytes if not HTML page'''
        opener = ur.build_opener()
        request = ur.Request(url, None, Downloader.HEADERS)
        request.add_header('Accept-Encoding', 'gzip')
        data = b''
        try:
            with opener.open(request, timeout=self.timeout) as conn:
                if 'text' in conn.getheader('Content-Type'):  # text/html
                    data = conn.read()
        except SocketTimeout:
            self.logger.warn('socket.timeout: %s' % url)
        except ue.HTTPError as e:
            self.logger.error('urllib.error.HTTPError: %d - %s: %s'
                              % (e.code, e.msg, url))
        except ue.URLError as e:
            self.logger.warn('urllib.error.URLError: %s' % str(e))
        except Exception as e:
            pass
        return gzip.decompress(data) if data.startswith(b'\x1f\x8b') else data


class Pipeline(SpiderBase):
    '''parse, clean up, verify keyword, remove duplicate and db storage.'''
    def __init__(self, core, dbfile, keyword=None):
        SpiderBase.__init__(self)
        self.core = core
        self.keyword = keyword
        self.dbfile = dbfile
        self.__init__database()

    def __init__database(self):
        self.db = sqlite3.connect(self.dbfile)
        self.cur = self.db.cursor()
        self.cur.execute('CREATE TABLE IF NOT EXISTS KnownsecSpider ('
                         'id INTEGER PRIMARY KEY AUTOINCREMENT, '
                         'keyword NVARCHAR(20), '
                         'url VARCHAR(200) UNIQUE, '
                         'page BLOB)')
        self.db.commit()

    def pipeline(self):
        depth, self.baseurl, page = self.core.get_response()
        urls = self.extract(page)
        urls = self.cleanup(urls)
        self.core.put_requests([(depth + 1, self.regularize(u)) for u in urls])
        if self.keyword and self.has_key(page, self.keyword):
            self.db_insert(self.keyword, self.baseurl, page)
        self.core.put_done(self.baseurl)

    def extract(self, page):
        '''Parse the page and get the urls'''
        try:
            soup = BeautifulSoup(page, 'lxml')
        except Exception:
            soup = BeautifulSoup(page.decode('gbk'), 'lxml')
        links = [h['href'] for h in soup.find_all('a') if h.has_attr('href')]
        self.logger.debug('extract -> %d urls' % len(links))
        return links

    def cleanup(self, urls):
        '''Filter the useless links including js scripts, stylesheets,
        fragments, and binaries'''
        _ignores = {'.js', '.css', '.zip', '.rar', '.7z', '.bz', '.gz',
                    '.png', '.jpg', '.gif', '.flv', '.mp4', '.mp3'}
        _getext = lambda ref: os.path.splitext(ur.urlparse(ref).path)[-1]
        urls = [u for u in urls if u and not u.startswith('javascript:')]
        urls = [u for u in urls if not u.startswith('#')]
        urls = [u for u in urls if _getext(u) not in _ignores]
        self.logger.debug('cleanup -> %d urls' % len(urls))
        return urls

    def regularize(self, url):
        '''URL regularization to `http(s)://example.com`'''
        url = url.strip()
        if '=http' in url:
            url = ur.unquote('http' + url.split('=http')[-1])
        if '#' in url:
            url = url.rsplit('#', 1)[0]
        if not re.match(r'http(s)?://', url):
            if url.startswith('?'):
                url = self.baseurl + url
            elif url.startswith('/'):
                pr = ur.urlparse(self.baseurl)
                baseurl = pr.scheme + '://' + pr.netloc
                url = ur.urljoin(baseurl, url)
            elif url.startswith('../'):
                while url.startswith('../'):
                    baseurl = self.baseurl.rsplit('/', 2)[0]
                    url = url[3:]
                url = ur.urljoin(baseurl, url)
            else:
                url = ur.urljoin(self.baseurl, url)
        return url

    def has_key(self, page, keyword):
        '''Test if `page` contains the `keyword`'''
        if not keyword:
            return False
        soup = BeautifulSoup(page, 'lxml')
        text = [l.strip() for l in soup.get_text().splitlines() if l.strip()]
        hasp = any([keyword in line for line in text])
        return hasp

    def db_insert(self, keyword, url, page):
        self.cur.execute('INSERT INTO KnownsecSpider VALUES(?,?,?,?)',
                         (None, keyword, url, page))
        self.db.commit()
