import scrapy
from scrapy.linkextractors import LinkExtractor
from baidu.items import BaiduItem


class BaidubaikeSpider(scrapy.Spider):
    name = "baidubaike"
    allowed_domains = ["baike.baidu.com"]
    # search_key = "中国"
    # start_urls = ["https://baike.baidu.com/item/" + search_key, ]
    start_urls = ["https://baike.baidu.com/item/%E7%AC%AC60%E5%B1%8A%E9%BB%84%E9%87%91%E6%97%B6%E6%AE%B5%E8%89%BE%E7%BE%8E%E5%A5%96/20595321?fromModule=lemma_inlink"]

    def parse(self, response):
        for each in response.xpath('//div[@id="J-lemma-main-wrapper"]'):
            item = BaiduItem()
            # title = each.xpath('//h1[@class="lemmaTitle__69Po J-lemma-title"]/text()').extract_first()
            title = each.xpath(
                '//h1[contains(@class, "lemmaTitle") and contains(@class, "J-lemma-title")]/text()').extract_first()

            # info = ''.join(each.xpath('//div[@class="lemmaSummary_FtKHZ J-summary"]//text()').extract())
            info = ''.join(each.xpath('//div[contains(@class, "lemmaSummary")]//text()').extract())
            item['title'] = title
            item['info'] = info
            item['url'] = response.url
            yield item

        # 使用 LinkExtractor 提取符合条件的链接
        link_extractor = LinkExtractor(allow=('/item/',))
        links = link_extractor.extract_links(response)

        for link in links:
            yield scrapy.Request(link.url, callback=self.parse)
