import scrapy


class PpSpider(scrapy.Spider):
    name = 'pp'
    allowed_domains = ['thepaper.cn']
    start_urls = ['https://m.thepaper.cn/list_page.jsp?nodeid=25600&isList=1&pageidx=1']
    page = 1

    def parse(self, response):
        a_list = response.xpath("//div[@class='list_item']")
        for a_temp in a_list:
            img_url = a_temp.xpath("./a/img/@src").extract_first()
            title_url = a_temp.xpath("./a/@href").extract_first()
            title = a_temp.xpath(".//div[@class='list_item_title']/span/a/text()").extract_first()
            title_url_s = 'https://m.thepaper.cn/' + str(title_url)
            # print('----------------------', img_url, title_url_s, title)
            yield {
                'type':'info',
                '标题': title,
                '图片链接':img_url,
                '文章链接':title_url_s,
            }
            self.page += 1
            if self.page <= 10:
                start_urls_s = 'https://m.thepaper.cn/list_page.jsp?nodeid=25600&isList=1&pageidx=%s' % self.page
                yield scrapy.Request(url=start_urls_s, callback=self.parse)
            yield scrapy.Request(url=img_url, callback=self.parse_img, cb_kwargs={"img_name": title})

    def parse_img(self, response, img_name):
        yield {
            "type": "img",
            "img_name": img_name + ".png",
            "img_bytes": response.body
        }
