# -*- coding: utf-8 -*-
from scrapy.contrib.spiders import Rule
from scrapy.contrib.spiders import CrawlSpider
from scrapy.contrib.linkextractors import LinkExtractor
from scrapy.utils.log import logger

from jobbole.bloom.bloom import BloomFilter
from jobbole.service.article_service import ArticleInfoService
from jobbole.utils.header_util import HeaderUtil


class BlogJobboleSpider(CrawlSpider):
    name = 'blog_jobbole'
    allowed_domains = ['jobbole.com']
    start_urls = [  # 'http://jobbole.com/',
        'http://top.jobbole.com/',
        'http://group.jobbole.com/',
        'http://top.jobbole.com/category/news/',
        'http://blog.jobbole.com/all-posts/',
        'http://blog.jobbole.com/',
        'http://web.jobbole.com/',
        'http://python.jobbole.com/',
        'http://android.jobbole.com/',
        'http://ios.jobbole.com/',
        'http://hao.jobbole.com/',
        'http://blog.jobbole.com/category/career/',
        'http://web.jobbole.com/category/javascript-2/',
        'http://web.jobbole.com/category/html5/',
        'http://web.jobbole.com/category/css/',
        'http://web.jobbole.com/category/basic-tech/',
        'http://hao.jobbole.com/category/web-front-end/',
        'http://group.jobbole.com/category/tech/webfront/',
    ]

    rules = [
        # 规则匹配 域名'jobbole.com'
        Rule(LinkExtractor(
            allow=(),
            allow_domains=allowed_domains,
        ),
            follow=True,
            process_request="process_request",
            process_links="process_links"
        )
    ]

    bf = BloomFilter(0.001, 10000000)

    # 请求前对网页内的所有URL进行过滤网页中的样式表和JS脚本的引用：.css和.js
    # 使用布隆过滤器对URL进行过滤
    def process_links(self, links):
        for link in links:
            url = link.url
            if not self.bf.is_element_exist(url):
                if not url.endswith(".css"):
                    self.bf.insert_element(url)
                    yield link
                else:
                    logger.info(">>>>>>>>>>>>>>>>>>>>>>>>>CSS文件：" + url)

    # 请求前对request进行处理，增加headers和meta, URL需要传递，需要持久化
    def process_request(self, reqeust):
        host = ArticleInfoService.get_host_from_url(reqeust.url)
        return reqeust.replace(url=reqeust.url,
                               headers=HeaderUtil.get_common_request_header(host),
                               callback=self.parse,
                               meta={'blog_url': reqeust.url})

    # 分析页面内容
    # 如果页面上有 <div class="p-entry">或者<div class="entry"> 这个元素，那么 说明是博客内容页面，否则就不是
    def parse_start_url(self, response):
        p_entry_list = response.xpath(".//div[@class='p-entry']")
        entry_list = response.xpath(".//div[@class='entry']")
        if p_entry_list or entry_list:
            blog_url = response.meta['blog_url']
            item = ArticleInfoService.compose_article_item(response, blog_url)
            if item["id"]:
                item["content"] = response.body_as_unicode()
                yield item
