# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
import csv
import hashlib
import json

from itemadapter import ItemAdapter

# 检查数据是重复
from redis import StrictRedis
from scrapy.exceptions import DropItem


class InsDataisRe():
    def open_spider(self, spider):
        # 连接 redis
        self.redis_client = StrictRedis(host='localhost', port=6379, db=0)

    def close_spider(self, spider):
        # 关闭redis连接
        self.redis_client.close()

    def process_item(self, item, spider):
        if item.get("type") != "images":
            # 检查 redis中是否存在此数据 如果存在则抛出异常 不存在保存数据
            print("=----redis检查管道")
            hash_srr = json.dumps(item)
            md5 = hashlib.md5()
            md5.update(hash_srr.encode())
            hash_val = md5.hexdigest()

            # 判断redis是否存在
            exist_flag = self.redis_client.get(hash_val)
            if exist_flag:
                #
                raise DropItem("----数据存在-----------")
            else:
                self.redis_client.set(hash_val, hash_srr)

        return item


# 保存数据
class PenpaiPipeline:
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self.isonew = True
        self.img_index = 0

    def open_spider(self, spider):  # 在爬虫开启的时候仅执行一次

        self.f = open("C:/Users\/admin\Desktop\Scrapy周考\data\penpai-100.csv", "a+", encoding="utf-8")
        self.csv_f = csv.DictWriter(self.f, ["标题", "链接"])
        self.csv_f.writeheader()
        print("open")

    def close_spider(self, spider):  # 在爬虫关闭的时候仅执行一次
        print("clpse")
        self.f.close()

    def process_item(self, item, spider):
        if item.get("type") == "info":
            item.pop("type")
            self.csv_f.writerows([item])
            print("数据保存成功")
        else:
            print("--------写入图片-------------")
            with open("C:\\Users\\admin\Desktop\Scrapy周考\data\images\\" + str(self.img_index) + ".jpg", 'wb') as f:
                f.write(item.get("content"))
                self.img_index += 1
                print("图片写入成功")

        return item
