# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
import csv
import hashlib
import json
import os

from itemadapter import ItemAdapter
from redis import StrictRedis
from scrapy.exceptions import DropItem


class UpdateInfo:
    def process_item(self, item, spider):
        title = item.get('title')
        item['title'] = title.replace(' ', '').replace('|', '').replace('\n', '').replace('\t', '').replace('\r', '')
        return item


class CheckExist:
    def open_spider(self, spider):
        self.redis_client = StrictRedis(host='localhost', port=6379, db=1)

    def process_item(self, item, spider):
        type_ = item.get('type')
        if type_ == 'info':
            wait_hash_str = json.dumps(item)
            md5 = hashlib.md5()
            md5.update(wait_hash_str.encode())
            hash_val = md5.hexdigest()
            exist_have = self.redis_client.get(hash_val)

            if exist_have:
                raise DropItem("丢弃数据")
            else:
                self.redis_client.set(hash_val, wait_hash_str)
            return item
        elif type_ == 'img':
            return item


class PengpaiPipeline:
    def __init__(self):
        self.download_path = os.getcwd() + '/download/'
        if not os.path.exists(self.download_path):
            os.mkdir(self.download_path)
        self.f = open(self.download_path + '澎湃新闻.csv', 'a')

        self.download_img_path = os.getcwd() + '/images/'
        if not os.path.exists(self.download_img_path):
            os.mkdir(self.download_img_path)

    def process_item(self, item, spider):
        type_ = item.get('type')
        item.pop('type')
        if type_ == 'info':
            f_csv = csv.DictWriter(self.f, ['title', 'news_src', 'img_src'])
            f_csv.writerows([item])
        elif type_ == 'img':
            with open(self.download_img_path + item.get('title') + '.jpg', 'wb') as f1:
                f1.write(item.get('content'))

    def close_spider(self, spider):
        self.f.close()
