import scrapy


class SdmzSpider(scrapy.Spider):
    name = "sdmz"
    # allowed_domains = ["www.shicimingju.com"]
    start_urls = ["https://www.shicimingju.com/bookmark/sidamingzhu.html"]


    def parse(self, response, **kwargs):
        # print(response.text)
        # 获取四大名著的url
        url_list = response.xpath('//div[@class="book-item"]/a/@href').extract()
        # 拼接完整的url
        for url in url_list:
            new_url = response.urljoin(url)
            yield scrapy.Request(new_url, callback=self.parse_book)
    # 处理请求书url的响应
    def parse_book(self, response, **kwargs):
        # 获取章节的url 和章节名称
        a_list = response.xpath('//div[@class="book-mulu"]/ul/li/a')
        for a in a_list:
            url = response.urljoin(a.xpath('./@href').extract_first())
            book_mulu_name = a.xpath('./text()').extract_first()
            # print(url, book_mulu_name)
            yield scrapy.Request(url, callback=self.parse_book_mulu, meta={'book_mulu_name': book_mulu_name})
    # 处理请求章节的响应
    def parse_book_mulu(self, response, **kwargs):
        item = {}
        # 章节名称
        book_mulu_name = response.meta['book_mulu_name']
        # 获取当前章节的内容
        con = ''.join(response.xpath('//div[@id="main_left"]//text()').extract())
        item['book_mulu_name'] = book_mulu_name
        item['con'] = con
        item['url'] = response.url.split('/')[-2]  # 把书名传递过去
        # print(item)
        yield item




