import scrapy


class PenpaiSpider(scrapy.Spider):
    name = 'penpai'
    allowed_domains = ['thepaper.cn']
    start_urls = ['https://m.thepaper.cn/list_page.jsp?nodeid=25600&isList=1&pageidx=1']
    page_num  =1
    def parse(self, response):
        print("----回调函数----", response.url)
        div_list = response.xpath('//div[@class="list_item"]')
        for div_temp in div_list:
            title = div_temp.xpath('.//div[@class="list_item_title"]/span/a/text()').extract_first()
            img_src = div_temp.xpath('.//a[@class="list_item_thumb"]/img/@src').extract_first()
            link = div_temp.xpath('.//div[@class="list_item_title"]/span/a/@href').extract_first()
            link_url = "https://m.thepaper.cn/" + link

            yield {
                "type": "info",
                "title": title,
                "img_src": img_src,
                "link_url": link_url
            }

            yield scrapy.Request(url=img_src, callback=self.parse_img, cb_kwargs={"img_name": title})

        self.page_num += 1
        if self.page_num <= 10:
            next_page_url = "https://m.thepaper.cn/list_page.jsp?nodeid=25600&isList=1&pageidx=%s" % (self.page_num)
            # 生成Request，爬取下一页的数据
            yield scrapy.Request(url=next_page_url, callback=self.parse)

    def parse_img(self, response, img_name):
        print("----图片url----", response.url)
        yield {
            'type': "img",
            'img_name': img_name + ".png",
            'img_bytes': response.body
        }