import json

import scrapy
from jsonpath import jsonpath


class CtSpider(scrapy.Spider):
    name = 'ct'
    allowed_domains = ['chouti.com']
    start_urls = ['https://dig.chouti.com/link/hot']
    page_num = 0

    def parse(self, response):
        # 返回json数据
        json_str = json.loads(response.body)
        # 获取所有标题
        title = jsonpath(json_str, '$.data..title')
        # 获取图片url 这里不能用 img_url标签（有重名）
        img_url = jsonpath(json_str, '$..original_img_url')
        # 获取热度
        ups = jsonpath(json_str, '$..ups')
        # 获取新闻id
        news_id = jsonpath(json_str, '$..id')
        # 获取最后一次时间戳
        es1 = jsonpath(json_str, '$.data[24].operateTime')[0]
        print(title, img_url, ups, news_id)
        for i in range(len(title)):
            yield {
                'type': 'info',
                'title': title[i],
                'img_url': img_url[i],
                'ups': ups[i],
                'news_id': news_id[i],
            }
            # 生成Request，爬取图片
            print(news_id[i])
            yield scrapy.Request(url=img_url[i], callback=self.img_parse, cb_kwargs={"news_id": news_id[i]})
        self.page_num += 1
        if self.page_num <= 3:
            next_page_url = "https://dig.chouti.com/link/hot?afterTime={}".format(es1)
            print(next_page_url)
            # 生成Request，爬取下一页的数据
            yield scrapy.Request(url=next_page_url, callback=self.parse)

    def img_parse(self, response, news_id):
        print('----图片url----', response)
        yield {
            'type': 'img',
            'news_id': news_id,
            'img_bytes': response.body
        }
