# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from scrapy.exporters import CsvItemExporter


class ItcastCsvPipeline:
    def open_spider(self, spider):
        # 根据CSV文件创建文件对象
        self.file = open("itcast.csv", "wb")
        # 创建CsvItemExporter类的对象
        self.csv_exporter = CsvItemExporter(self.file, encoding='utf-8-sig')
        # 标识开始导出文件
        self.csv_exporter.start_exporting()

    def process_item(self, item, spider):
        # 将Item数据写入文件中
        self.csv_exporter.export_item(item)
        return item

    def close_spider(self, spider):
        # 标识结束导出文件
        self.csv_exporter.finish_exporting()
        # 关闭文件
        self.file.close()


import redis
import json


class ItcastRedisPipeline:
    def open_spider(self, spider):
        # 建立与Redis数据库的连接
        self.redis_cli = redis.Redis(host="127.0.0.1", port=6379)

    def process_item(self, item, spider):
        # 将Item数据转换成JSON对象
        content = json.dumps(dict(item), ensure_ascii=False)
        # 将content插入Redis数据库
        self.redis_cli.lpush("ITCAST_List", content)
        return item


import pymongo


class ItcastMongoPipeline:
    def open_spider(self, spider):
        # 建立与MongoDB数据库的连接
        self.mongo_cli = pymongo.MongoClient(host="127.0.0.1", port=27017)
        # 创建数据库
        self.db = self.mongo_cli["itcast"]
        # 创建集合
        self.sheet = self.db['itcast_item']

    def process_item(self, item, spider):
        self.sheet.insert_one(dict(item))
        return item

class RedisPipeline:
    def process_item(self, item, spider):
        return item