# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
import csv
import os
from itemadapter import ItemAdapter
from redis import StrictRedis


class OnePipeline:
    def open_spider(self, spider):
        # 链接Redis
        # self.redis_client = StrictRedis(host='localhost', port=6379, db=0)
        if not os.path.exists('./download'):  # 判断文件夹或文件
            os.makedirs('./download')
        self.f = open('pengpai.csv', 'a', encoding='utf-8')
        self.f_csv = csv.DictWriter(self.f, ['title', 'text_url'])
        self.f_csv.writeheader()

    def process_item(self, item, spider):
        types = item.get("type")
        if types == "img":
            # 当前文件夹下的download文件夹
            img = item.get('title').replace('/', '')
            img = img.replace('\\', '')
            img = img.replace(' ', '')
            img = img.replace('，', '')
            img = img.replace('“', '')
            img = img.replace('”', '')
            img = img.replace('！', '')
            img = img.replace(':', '、')
            img = img.replace('：', '、')
            img = img.replace('|', '｜')
            with open('./download/' + img, 'wb') as f:
                f.write(item.get('info'))
        elif types == 'text':
            self.f_csv.writerows([{
                'title': item.get("title"),
                'text_url': item.get("text_url"),
            }])
        return item
