# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
import csv
import hashlib
import json
import os

from itemadapter import ItemAdapter
from redis import StrictRedis
from scrapy.exceptions import DropItem


class CheckExist:
    def open_spider(self, spider):
        self.redis_client = StrictRedis(host='localhost', port=6379, db=0)

    def close_spider(self, spider):
        self.redis_client.close()

    def process_item(self, item, spider):
        print("---------Redis检查管道")
        waitting_hash_str = json.dumps(item)
        md5 = hashlib.md5()
        md5.update(waitting_hash_str.encode())
        hash_val = md5.hexdigest()

        exist_flag = self.redis_client.get(hash_val)
        if exist_flag:
            raise DropItem("---------此数据不保存，因为Redis中查询到")
        else:
            self.redis_client.set(hash_val, waitting_hash_str)
        return item


class penpai_1Pipeline:

    def process_item(self, item, spider):
        print("-----------管道被调用")

        download_path = os.getcwd() + '/download/'  # 当前文件夹下的download文件夹
        if not os.path.exists(download_path):  # 判断文件夹或文件
            os.makedirs(download_path)
        type_1 = item.get("type")
        print(type_1)
        if type_1 == "news":
            with open(download_path + '澎湃.csv', 'a', newline="") as f:
                f_csv = csv.DictWriter(f, ['title', 'href', 'img_src'])
                item.pop("type")
                f_csv.writerows([item])
                print("保存信息到CSV....成功")
        # elif type_1 == 'images':
        #     with open(download_path + item.get("img_src"), "wb") as f:
        #         f.write(item.get("img_bytes"))
        #         print("保存图片成功....")
        return item



