# -*- coding: utf-8 -*-
import hashlib
import json

import scrapy
from redis import StrictRedis


class ShishiSpider(scrapy.Spider):
    name = 'shishi'
    allowed_domains = ['thepaper.cn', ' https://imagecloud.thepaper.cn']
    start_urls = ['https://m.thepaper.cn/list_25600']
    page = 1

    def parse(self, response):
        news_li = response.xpath('//div[@class="list_item"]')
        for i in news_li:
            title = i.xpath('.//div[@class="list_item_title"]//a/text()').extract_first()
            img_url = i.xpath('./a/img/@src').extract_first()
            news_url = i.xpath('.//div[@class="list_item_title"]//a/@href').extract_first()
            print(title, img_url, news_url)
            item = {
                "type": "info",
                "title": title,
                "news_url": news_url,
            }
            # 转为哈希
            hash_str = json.dumps(item)
            md5 = hashlib.md5()
            md5.update(hash_str.encode())
            hash_val = md5.hexdigest()
            # 数据库查找
            redis_client = StrictRedis(host='localhost', port=6379, db=0)
            exist_flag = redis_client.get(hash_val)
            # 找到则跳过
            if exist_flag:
                continue
            else:
                yield item
                yield scrapy.Request(url=img_url, callback=self.img_parse, cb_kwargs={'img_name': title})
        self.page += 1
        if self.page <= 4:
            next_page = 'https://m.thepaper.cn/list_page.jsp?nodeid=25600&isList=1&pageidx=' + str(self.page)
            yield scrapy.Request(url=next_page, callback=self.parse)

    def img_parse(self, response, img_name):
        print("----图片url----", response.url)
        yield {
            'type': "img",
            'img_name': img_name + ".png",
            'img_bytes': response.body
        }
