import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule

from doubanScrapy.bqgNovel.bqgNovel.items import BqgnovelItem


class BqgsexySpider(CrawlSpider):
    name = "bqgSexy"
    allowed_domains = ["m.c3719.lol"]
    start_urls = ["https://m.c3719.lol/book/61808/list.html"]

    # 定义爬取规则(使用正则表达式定义爬取哪些格式的链接)
    rules = (
        # allow=r"/book/61808/\d+.html" -> 爬取类似 /book/61808/\1.html、/book/61808/\2.html 的链接
        # follow=False -> 爬取到这个格式的链接以后就不再爬了，停止了
        # callback="parse_item" -> 爬取到这个格式的链接以后就执行 parse_item 这个函数
        # Rule(LinkExtractor(allow=r"/book/61808/\d+.html"), callback="parse_item", follow=False),
        Rule(LinkExtractor(allow=r"/book/61808/\d+.html"), callback="parse_item", follow=False),
    )

    def parse_item(self, response):
        # 章节链接
        chapter_url = response.url

        # 标题
        title_list = response.xpath('//span[@class="title"]/text()').getall()
        title = ''.join(title_list)

        # 内容
        # 爬取到的正文内容，返回的是列表形式
        content_list = response.xpath('//div[@class="Readarea ReadAjax_content"]/text()').getall()
        # 对内容进行格式化，去掉首位空格
        c_list = []
        for i in content_list:
            c_list.append(i.strip())
        # 把列表中的正文内容拼接成一段完整的字符串
        content = ''.join(c_list)

        # 创建Item的对象
        item = BqgnovelItem()
        item['title'] = title
        item['content'] = content
        item['chapter_url'] = chapter_url
        yield item