# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
import csv
import hashlib
import json
import os

from itemadapter import ItemAdapter
from redis import StrictRedis
from scrapy.exceptions import DropItem


class CtnewsPipeline:
    def __init__(self):
        self.rows = []

    def process_item(self, item, spider):
        self.rows.append(
            {'标题': item.get('标题'),
             '来源': item.get('来源'),
             '发布人': item.get('发布人'),
             '图片链接': item.get('图片链接')
             }
        )
        download_path = os.getcwd() + '/download/'  # 当前文件夹下的download文件夹
        if not os.path.exists(download_path):  # 判断文件夹或文件
            os.makedirs(download_path)

        with open(download_path + 'ct.csv', 'a', newline='', encoding='utf-8')as f:
            f_csv = csv.DictWriter(f, ['标题','来源','发布人','图片链接'])
            f_csv.writeheader()
            f_csv.writerows(self.rows)


class CheckExist:
    def open_spider(self, spider):
        self.redis_client = StrictRedis(host='localhost', port=6379, db=1)

    def close_spider(self, spider):
        self.redis_client.close()

    def process_item(self, item, spider):
        w_hash_str = json.dumps(item)
        md5 = hashlib.md5()
        md5.update(w_hash_str.encode())
        hash_val = md5.hexdigest()
        exist_flag = self.redis_client.get(hash_val)
        if exist_flag:
            raise DropItem('----此数据丢弃，因为Redis中查询到了----')
        else:
            self.redis_client.set(hash_val, w_hash_str)

        return item