# -*- coding:utf-8 -*-

"""
celery gevent wbcrawler
"""

from celery.task import task, group
from celery_worker import app
from bloom_filter import BloomFilter
from eventlet import Timeout
import requests
from requests import exceptions
import urlparse
import traceback
from lxml.html import etree
from config import BLOOMFILTER_SETTINGS

__author__ = 'ghostviper'

bf = BloomFilter(BLOOMFILTER_SETTINGS)


def extract_links(base_url, html):
    et = etree.HTML(html)
    url_list = list()
    for element in et.xpath('//a[@href]'):
        url_list.append(urlparse.urljoin(base_url, element.attrib['href']))
    return url_list


def domain(base_url):
    return urlparse.urlsplit(base_url).netloc


def is_seen(url):
    if bf.isContains(url):
        return True
    else:
        bf.insert(url)
        return False

@app.task(ignore_result=True, serializer='pickle', compression='zlib')
def crawler(url, crawler_config):
    """
    抓取页面所有链接
    :param url:
    :param seen:
    :return:
    """
    is_seen(url)
    domian_url = domain(url)
    with Timeout(5, False):
        try:
            response = requests.get(url)
            wanted_urls = list()
            # TODO: 加载解析规则
            if crawler_config is not None:
                print len(response.text), crawler_config
                if response.status_code == 200 and crawler_config:
                    app.send_task('webparser.parser', args=(response.text, crawler_config))
                    for new_url in extract_links(url, response.text):
                        if domian_url in new_url:
                            print new_url
                            wanted_urls.append(new_url)
                            is_seen(new_url)
                    subtasks = group(crawler.s(url, crawler_config) for url in wanted_urls)
                    subtasks.delay()
                elif response.status_code == 404:
                    print url, "can't fetch page"
                else:
                    print "else reason!"
            else:
                print "no crawler rule"
        except exceptions.RequestException:
            print exceptions.RequestException, traceback.print_exc()
        except Exception as e:
            print Exception, e, traceback.print_exc()



