# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
import json
from itemadapter import ItemAdapter
from scrapy.cmdline import execute
from Freepatentsonline.databases.redis_handler import Redis_handler



from Freepatentsonline.utils.path_file import get_config
from Freepatentsonline.settings import CONFIG_DIR, DB_CONFIG_FILENAME





DB_CONFIG = get_config(CONFIG_DIR, DB_CONFIG_FILENAME)

# 调度redis 的配置文件
REDIS_SCHEDULER = DB_CONFIG["REDIS_SCHEDULER"]
# 临时存储tendis 的配置文件
REDIS_STORE = DB_CONFIG["REDIS_STORE"]

# 临时存储的key
REDIS_STORE_DATA = DB_CONFIG["REDIS_STORE_DATA"]


class FreepatentsonlinePipeline:
    def process_item(self, item, spider):
        return item

    def close_spider(self,spider):
        execute("scrapy crawl freepatentsonline".split())

class RedisPipeline:
    def __init__(self) -> None:
        pass

    # def spider_opened(self):
    #     self.redis_store = Redis_handler(REDIS_STORE)
    #     pass

    # def spider_closed(self):
    #     self.redis_store.redis.close()

    def open_spider(self,spider):
        self.redis_store = Redis_handler(REDIS_STORE)

    def close_spider(self,spider):
        self.redis_store.redis.close()

    def process_item(self, item, spider):

        # 数据先存储
        self.redis_store.redis.lpush(
            REDIS_STORE_DATA, json.dumps(item, ensure_ascii=False))
