# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from ..items import ZonghengItem, ContentItem
import re


class Zh1CrawlSpider(CrawlSpider):
    name = 'zh1_crawl'
    page = 1
    allowed_domains = ['book.zongheng.com']
    start_urls = ['http://book.zongheng.com/store/c0/c0/b0/u0/p{}/v9/s9/t0/u0/i1/ALL.html'.format(page)]

    rules = (
        # 获取书名
        Rule(LinkExtractor(allow=r'http://book.zongheng.com/book/\d+\.html'), callback='parse_item', follow=True,
             process_links='process_links'),
        # 获取章节目录 follow=True解析出来的全部章节交给引擎后获得的response还会再对所有rule规则进行一次匹配
        # 当我们需要对response进行进一步提取的时候我们才使用follow, 它会把response用rules过滤一遍, 产生新的response.
        # 当response包含有需要的信息，直接用callback提取信息.不要滥用follow, 因为我们提取出来的链接都会被下载, 造成了不必要的请求.
        Rule(LinkExtractor(allow=r'http://book.zongheng.com/showchapter/\d+.html'), callback='parse_chapter',
             follow=True),
        # follow=True是因为需要第二个匹配的链接生成的response进行第三条rule的匹配  第三条rule需要的的url都在第二条链接生成的response里
        # follow=False 解析完交给引擎后不会再匹配rule规则
        # 加restrict_xpaths限制爬取的区域，因为在纵横小说简介界面有最新章节xpath="//div[@class='tit']/a"，
        # 如果不加限制区域会将其一起爬取，则会爬取整个首页的所以小说的最新章节
        Rule(LinkExtractor(allow=r'http://book.zongheng.com/chapter/\d+/\d+.html', restrict_xpaths=("//li/a")),
             callback='get_content', follow=False)
    )
    """
    Rule  提取链接，定义链接规则，请求规则
    def __init__(self, link_extractor=None, callback=None, cb_kwargs=None, follow=None,
                 process_links=None, process_request=None, errback=None)
    link_extractor:链接对象
    callback:回调
    cb_kwargs: cb_callback 回调函数参数
    follow:调用每一个网站的链接数据，一页套一页
    process_links: 函数 处理链接 链接对象的属性过滤
    process_request:过滤请求函数
    errback:处理异常的回调函数
    """
    """
    LinkExtractor 筛选url 根据html格式来进行筛选
    def __init__(self, allow=(), deny=(), allow_domains=(), deny_domains=(), restrict_xpaths=(),
                 tags=('a', 'area'), attrs=('href',), canonicalize=False,
                 unique=True, process_value=None, deny_extensions=None, restrict_css=(),
                 strip=True, restrict_text=None)
    allow:正则表达式或其列表匹配url 为空则匹配所有url
    deny:正则表达式或其列表排除url 为空则不排除url 优先级比allow高
    allow_domains(允许域名):str或者列表
    deny_domains(不允许域名):str或者列表
    restrict_xpath(区域-xpath):通过xpath定义提取链接
    tags
    atts
    canonicalize:规范每个提取的url
    unique:对匹配的链接重复过滤
    process_value:接收从标签提取的每个值
    deny_extensions:提取链接时忽略扩展名
    restrict_css:通过css定义提取链接
    restrict_text:可以匹配正则表达式
    """

    # 不能定义parse解析函数
    def parse_item(self, response):

        self.logger.info("爬虫")
        item = ZonghengItem()
        # item['domain_id'] = response.xpath('//input[@id="sid"]/@value').get()
        # item['name'] = response.xpath('//div[@id="name"]').get()
        # item['description'] = response.xpath('//div[@id="description"]').get()
        book_name = response.xpath("//div[@class='book-info']/div[@class='book-name']/text()").extract()[0].strip()
        novel_num = response.xpath("//div[@class='nums']/span[1]/i/text()").extract()[0].strip()
        # print(response)
        print(book_name, novel_num)
        item['小说名'] = book_name
        item['字数'] = novel_num
        return item

    # 重写start_url来获取下一页网站
    # def parse_start_url(self, response):
    #     self.page += 1
    #     next_page = 'http://book.zongheng.com/store/c0/c0/b0/u0/p{}/v9/s9/t0/u0/i1/ALL.html'.format(self.page)
    #     if self.page == 1:
    #         return
    #     return scrapy.Request(next_page)

    # 过滤链接函数  拿太多数据会被封ip
    def process_links(self, links):
        for index, link in enumerate(links):
            print(link.url)
            url = int(re.findall(r"(\d+)", link.url)[0])
            if url == 852121:
                yield link

    # 获取章节的callback
    def parse_chapter(self, response):
        pass

    def get_content(self, response):
        item = ContentItem()
        content = response.xpath("//div[@class='content']/p/text()").extract()
        content = "".join([data.strip() for data in content])
        book_name = response.xpath("//div[@class='reader_crumb']/a/text()").extract()[-1]
        chapter_name = response.xpath("//div[@class='title_txtbox']/text()").extract()[0]
        print(book_name,chapter_name,content)
        item['name'] = book_name
        item['chapter'] = chapter_name
        item['content'] = content
        return item
