# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from .items import MovieCrapyItem, BookCrapyItem
import pymongo


# 保存数据到文件中
class FileScrapyPipeline:

    # 初始化管道时就打开文件
    def __init__(self):
        self.f = open('./test.txt', 'w')

    # 处理item
    def process_item(self, item, spider):
        # 依次写入文件
        self.f.write(item['number'] + '-' + item['name'] + '\n')
        return item

    # 关闭文件
    def __del__(self):
        self.f.close()



# 保存数据到 MongoDB 中 ---------------------------------------------------------

# 初始化 MongoDB 客户端
client = pymongo.MongoClient("mongodb://admin:123456@192.168.149.100:27017/")

# 电影爬虫管道
class MovieScrapyPipeline:

    def process_item(self, item, spider):
        # 判断 item 是否为 MovieCrapyItem 类型
        if isinstance(item, MovieCrapyItem):
            # 选择数据库
            db = client["douban"]
            # 选择集合
            collection = db["movie"]
            # 插入数据，返回插入的 _id
            res = collection.insert_one(dict(item))
            print(res.inserted_id)
        return item

    # def __del__(self):
    #     self.client.close()
    #     print("close mongodb")

# 图书爬虫管道
class BookScrapyPipeline:
    def process_item(self, item, spider):
        # 判断 item 是否为 BookCrapyItem 类型
        if isinstance(item, BookCrapyItem):
            # 选择数据库
            db = client["douban"]
            # 选择集合
            collection = db["book"]
            # 插入数据，返回插入的 _id
            res = collection.insert_one(dict(item))
            print(res.inserted_id)
        return item



