# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
import csv
import hashlib
import json
import os

from itemadapter import ItemAdapter
from redis import StrictRedis
from scrapy.exceptions import DropItem


class PpnewsPipeline:
    def __init__(self):
        self.rows = []
    def process_item(self, item, spider):
        self.rows.append(
            {'标题':item.get('标题'),
            '图片链接':item.get('图片链接'),
             '文章链接':item.get('文章链接')
             }
        )
        download_path = os.getcwd() + '/download/'  # 当前文件夹下的download文件夹
        if not os.path.exists(download_path):  # 判断文件夹或文件
            os.makedirs(download_path)

        msg_type = item.get('type')
        if msg_type == 'info':
            with open(download_path + 'pp.csv', 'w', newline='', encoding='utf-8')as f:
                f_csv = csv.DictWriter(f, ['标题','图片链接', '文章链接'])
                f_csv.writeheader()
                f_csv.writerows(self.rows)
        elif msg_type == 'img':
            with open(download_path + item.get('img_name'), 'wb')as f:
                f.write(item.get('img_bytes'))

class CheckExist:
    def open_spider(self, spider):
        self.redis_client = StrictRedis(host='localhost', port=6379, db=0)

    def close_spider(self, spider):
        self.redis_client.close()

    def process_item(self, item, spider):
        msg_type = item.get('type')
        if msg_type == 'info':
            w_hash_str = json.dumps(item)
            md5 = hashlib.md5()
            md5.update(w_hash_str.encode())
            hash_val = md5.hexdigest()
            exist_flag = self.redis_client.get(hash_val)
            if exist_flag:
                raise DropItem('----此数据丢弃，因为Redis中查询到了----')
            else:
                self.redis_client.set(hash_val, w_hash_str)
        return item
