import scrapy


class PengpaiNewsSpider(scrapy.Spider):
    name = 'pengpai_news'
    allowed_domains = ['thepaper.cn']
    start_urls = ['https://m.thepaper.cn/list_page.jsp?nodeid=25600&isList=1&pageidx=1']
    url = 'https://m.thepaper.cn/list_page.jsp?nodeid=25600&isList=1&pageidx='
    page_num = 1

    def parse(self, response):

        yield {
            'type': 'info',
            'title': '标题',
            'img_src': '文章链接',
            'news_src': '图片链接'
        }
        news_list = response.xpath('//div[@class="list_item"]')
        for news in news_list:
            title = news.xpath('./div/div[1]//a/text()').extract_first()
            img_src = news.xpath('./a/img/@src').extract_first()
            news_src = news.xpath('./div/div[1]/span/a/@href').extract_first()
            if news_src:
                news_src = 'https://m.thepaper.cn/' + news_src

            item = {
                'type': 'info',
                'title': title,
                'img_src': img_src,
                'news_src': news_src
            }
            if img_src:
                yield item
            if img_src:
                yield scrapy.Request(url=img_src, callback=self.img_parse, cb_kwargs={'title': title})
        print('第%s页爬取完成，共30条信息' % str(self.page_num))
        if self.page_num < 4:
            self.page_num += 1
            yield scrapy.Request(url=self.url + str(self.page_num), callback=self.parse)

    def img_parse(self, response, title):
        item = {
            'type': 'img',
            'title': title,
            'content': response.body
        }
        yield item

