# -*- coding: utf-8 -*-
import time
from scrapy.contrib.spiders import Rule
from scrapy.contrib.spiders import CrawlSpider
from scrapy.contrib.linkextractors import LinkExtractor
from selenium import webdriver
from hexunblog.bloom.bloom import BloomFilter
from hexunblog.dao.article_dao import ArticleInfoDao
from hexunblog.service.article_service import ArticleInfoService
from hexunblog.utils.header_util import HeaderUtil


class SpiderHexunblogSpider(CrawlSpider):
    name = 'spider_hexunblog'
    allowed_domains = ['blog.hexun.com']
    csv_file_path = "./hexun_blog.csv"
    start_urls = [
        'http://blog.hexun.com/',
        'http://f.blog.hexun.com/',
        'http://blog.hexun.com/group/shehui.html',
        'http://blog.hexun.com/group/life.html',
        'http://blog.hexun.com/group/blogstars.html',
        'http://blog.hexun.com/finance.htm',
        'http://blog.hexun.com/group/bojingtou.html',
    ]

    rules = [
        # 规则匹配 域名'jobbole.com'
        Rule(LinkExtractor(
            allow=(),
            allow_domains=allowed_domains,
        ),
            follow=True,
            process_request="process_request",
            process_links="process_links"
        )
    ]

    bf = BloomFilter(0.001, 10000000)

    browser = webdriver.Firefox()

    ArticleInfoDao.create_csv(csv_file_path)

    # 请求前对网页内的所有URL进行过滤网页中的样式表和JS脚本的引用：.css和.js
    # 使用布隆过滤器对URL进行过滤
    def process_links(self, links):
        for link in links:
            url = link.url
            if not self.bf.is_element_exist(url):
                if not url.endswith(".css") and url.find("login.aspx") <= 0 and url.find("robots.txt") <= 0:
                    self.bf.insert_element(url)
                    yield link

    # 请求前对request进行处理，增加headers和meta, URL需要传递，需要持久化
    def process_request(self, reqeust):
        return reqeust.replace(url=reqeust.url,
                               headers=HeaderUtil.get_common_request_header(reqeust.url),
                               callback=self.parse,
                               meta={'blog_url': reqeust.url})

    # 分析页面内容
    # 如果页面上有 <div class="p-entry">或者<div class="entry"> 这个元素，那么 说明是博客内容页面，否则就不是
    def parse_start_url(self, response):
        articleblogtext_node_list = response.xpath(".//div[@class='ArticleBlogText']")
        if articleblogtext_node_list:
            blog_url = response.meta['blog_url']
            self.browser.get(blog_url)  # Load page
            time.sleep(1)
            try:
                html_content = self.browser.page_source
                item = ArticleInfoService.compose_article_item(html_content, blog_url)
                yield item
            except Exception as err:
                print("程序在从互联网获取内容时发生未知异常，URL:" + blog_url)
                print(err)
