import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule

from scrapy_dldl_47.items import ScrapyDldl47Item


class DldlSpider(CrawlSpider):
    name = "dldl"
    allowed_domains = ["shuhaige.net"]
    start_urls = ["https://www.shuhaige.net/61753/381318.html"]

    rules = (Rule(LinkExtractor(restrict_xpaths='//div[@class="bottem2"]//a[@id="A3"]'),
                  callback="parse_item",
                  follow=True),)

    def parse_item(self, response):
        # //div[@class="content_read"]//h1/text()
        # //div[@class="content_read"]//div[@id="content"]/p/text()
        list = response.xpath('//div[@class="content_read"]')

        for i in list:
            name_list = i.xpath('.//h1/text()').get()
            txt_list = i.xpath('.//div[@id="content"]/p/text()').getall()
            # item['txt'] 是一个包含多段文本的列表,可以将它们合并为一个字符串，然后插入数据库。
            txt_list = "\n".join(txt_list)
            '//div[@class="content_read"]//div[@id="content"]/p/text()'
            item = ScrapyDldl47Item(name=name_list, txt=txt_list)

            yield item
