from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule

from Midde.items import MiddeItem


class TextdemoSpider(CrawlSpider):
    name = 'TextDemo'
    start_urls = ["https://news.daxues.cn/xiaohua/ziliao/"]

    rules = (
        Rule(LinkExtractor(allow=r'index_\d+\.html'), callback='parse_item', follow=True),
    )

    def parse_start_url(self, response, **kwargs):
        # 当 scrapy 爬虫无法爬取第一页数据时，添加即可，进行强制回调，已达到全站爬取的作用
        return self.parse_item(response)

    def parse_item(self, response):
        print(
            f"""The target URL information is crawled successfully, the URL is{response.url}, and the return status code is {response.status}""")
        img_list = response.xpath("""/html/body/div[2]/div[2]/dl""")

        for li in img_list:
            img_name = li.xpath("./dt/a/text()").extract_first() + ".jpg"
            img_picture = "https://news.daxues.cn/" + li.xpath("./a/img/@src").extract_first()

            items = MiddeItem()
            items["name"] = img_name
            items["src"] = img_picture

            yield items
