# -*- coding: utf-8 -*-
import scrapy,re
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from a17k.items import bqglspiderItem

class BqgspiderSpider(CrawlSpider):
    name = 'bqgSpider'
    allowed_domains = ['xbiquge.la']
    start_urls = ['http://www.xbiquge.la/xiaoshuodaquan/']

    rules = (
        Rule(LinkExtractor(allow=r'la/\d+',unique=True), callback='parse_item', follow=False),
    )

    def parse_item(self, response):
        # print('response',response.body)
        chap_lists = response.xpath('//*[@id="list"]/dl/dd')
        name = response.xpath('//*[@id="info"]/h1').extract_first()
        author = response.xpath('//*[@id="info"]/p[1]').extract_first()
        intro = response.xpath('//*[@id="intro"]/p[2]').extract_first()
        for chapter in chap_lists:
            chapter_name = chapter.xpath('./a/text()').extract_first()
            chapter_link = chapter.xpath('./a/@href').extract_first()
            # print(chapter_name,chapter_link,author,name,intro)
            if chapter_name:
                item = bqglspiderItem(name=name,author=author,intro=intro,chapter_name=chapter_name)
                url = response.urljoin(chapter_link)
                request = scrapy.Request(url=url,callback=self.parse_body)
                request.meta['key'] = item
                yield request

        # item = {}
        # item['intro'] = response.xpath('//*[@id="intro"]').get()
        # item['name'] = response.xpath('//*[@id="info"]/h1').get()
        # item['description'] = response.xpath('//*[@id="info"]/p[3]').get()
        # print(item)
        # return item


    def parse_body(self,response):
        item = response.meta['key']
        content_list = response.xpath('.//*[@id="content"]').re('([\u4e00-\u9fa5]|<br>)+?') # 匹配到的是一个列表
        # 利用re直接匹配小说的汉字内容.正则可以匹配标签下的任何内容，这样我们可以提取我们想要的数据
        content_str = ''.join(content_list)
        print(content_str)
        content = re.sub('<br><br>','\n  ',content_str)
        
        # 对匹配的章节进行分段
        item['body'] = content
        yield item
