# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
import MySQLdb
import pymongo
from itemadapter import ItemAdapter
from scrapy.exceptions import DropItem


class DuplicatesPipeline:
    """去掉作者重复的书籍"""
    def __init__(self):    # 定义一个保存作者姓名的集合
        self.author_set = set()
    def process_item(self, item, spider):
        if item['author'] in self.author_set:
            raise DropItem("查找到重复作者的项目：%s" % item)
        else:
            self.author_set.add(item['author'])
        return item

class QidianHotPipeline:
    """修改形式"""
    def process_item(self, item, spider):
        if item['form'] == '连载':
            item['form'] = 'LZ'
        else:
            item['form'] = 'WJ'
        return item


class SaveToTxtPipeline:
    """以文本文档形式存储"""
    file = None

    @classmethod
    def from_crawler(cls, crawler):
        cls.file_name = crawler.settings.get("FILE_NAME", 'hot2.txt')
        return cls()

    def open_spider(self, spider):
        self.file = open(self.file_name, 'a', encoding='utf-8')

    def process_item(self, item, spider):
        novel_str = item['name'] + ';' + item['author'] + ';' + item['type'] + ';' + item['form'] + '\n'
        self.file.write(novel_str)
        return item

    def close_spider(self, spider):
        self.file.close()

class MySQLPipeline:
    """与Mysql数据进行交互"""
    def open_spider(self, spider):
        # 获取配置文件中的MySQL信息
        db_name = spider.settings.get("MYSQL_DB_NAME", "qidian_db")
        host = spider.settings.get("MYSQL_HOST", "localhost")
        user = spider.settings.get("MYSQL_USER", "root")
        pwd = spider.settings.get("MYSQL_PASSWORD", "")
        # 连接数据库
        self.db_conn = MySQLdb.connect(
            db=db_name,
            host=host,
            user=user,
            password=pwd,
            charset="utf8"
        )
        # 使用cursor()方法获取操作游标
        self.db_cursor = self.db_conn.cursor()

    def process_item(self, item, spider):
        # 将数据保存至数据库
        values = (item['name'], item['author'], item['type'], item['form'])
        sql = 'insert into hot(name, author, type, form) values (%s, %s, %s, %s)'
        self.db_cursor.execute(sql, values)
        return item

    def close_spider(self, spider):
        # Spider关闭时，执行数据库关闭工作
        # 提交数据局
        self.db_conn.commit()
        # 关闭游标
        self.db_cursor.close()
        # 关闭数据库
        self.db_conn.close()


class MongoDBPipeline:
    """将数据保存于MongoDB"""
    def open_spider(self, spider):
        # 获取配置中的MongoDB的配置信息
        host = spider.settings.get("MONGODB_HOST", "localhost")
        port = spider.settings.get("MONGODB_PORT", 27017)
        db_name = spider.settings.get("MONGODB_NAME", "qidian")
        collection_name = spider.settings.get("MONGODB_COLLECTION", "hot")

        # 连接MongoDB,得到一个客户端对象
        self.db_client = pymongo.MongoClient(host=host, port=port)
        # 指定数据库，得到一个数据库对象
        self.db = self.db_client[db_name]
        # 指定集合，得到一个集合对象
        self.db_collection = self.db[collection_name]

    def process_item(self, item, spider):
        # 将item转换为字典类型
        item_dict = dict(item)
        # 将数据插入到集合中
        self.db_collection.insert_one(item_dict)
        return item

    def close_spider(self, spider):
        # 关闭数据库连接
        self.db_client.close()