import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule


class SdmzCrawlSpider(CrawlSpider):
    name = "sdmz_crawl"
    # allowed_domains = ["www.shicimingju.com"]
    start_urls = ["https://www.shicimingju.com/bookmark/sidamingzhu.html"]

    rules = (
        # 获取书的url规则
        Rule(LinkExtractor(allow=r"/book/(sanguoyanyi.html)|(shuihuzhuan.html)|(xiyouji.html)|(/hongloumeng.html)"), follow=True),
        # Rule(LinkExtractor(allow=r"/book/sanguoyanyi.html"), follow=True),
        # Rule(LinkExtractor(allow=r"/book/shuihuzhuan.html"), follow=True),
        # Rule(LinkExtractor(allow=r"/book/xiyouji.html"), follow=True),
        # Rule(LinkExtractor(allow=r"/book/hongloumeng.html"), follow=True),

        # 获取章节的url规则
        Rule(LinkExtractor(allow=r"/book/(hongloumeng)|(sanguoyanyi)|(xiyouji)|(hongloumeng)/\d+.html"), callback="parse_sidmz", follow=True),

        # Rule(LinkExtractor(allow=r"/book/sanguoyanyi/\d+.html"), callback="parse_sidmz", follow=True),
        # Rule(LinkExtractor(allow=r"/book/shuihuzhuan/\d+.html"), callback="parse_sidmz", follow=True),
        # Rule(LinkExtractor(allow=r"/book/xiyouji/\d+.html"), callback="parse_sidmz", follow=True),
        # Rule(LinkExtractor(allow=r"/book/hongloumeng/\d+.html"), callback="parse_sidmz", follow=True),
    )

    # 不能再有方法名叫 parse的  因为内部使用
    def parse_sidmz(self, response):
        item = {}
        # 直接提取章节名称 和URL 手动发起请求 yield scrapy.Request
        # 现在经过更改  拿到的就是每个章节请求后的响应内容
        # print(response.url.split('/')[-2], response.xpath('//div[@class="card bookmark-list"]/h1/text()').extract_first())

        item['book_mulu_name'] =  response.xpath('//div[@class="card bookmark-list"]/h1/text()').extract_first()
        item['con'] = ''.join(response.xpath('//div[@id="main_left"]//text()').extract())
        item['url'] = response.url.split('/')[-2]  # 把书名传递过去
        yield item
