from scrapy import signals  
import logging

from Freepatentsonline.databases.redis_handler import Redis_handler


import time
import scrapy
from scrapy.exceptions import DontCloseSpider
from Freepatentsonline.databases.redis_handler import Redis_handler
from Freepatentsonline.utils.path_file import get_config
from Freepatentsonline.settings import CONFIG_DIR, DB_CONFIG_FILENAME
from Freepatentsonline.utils.group_handler import Group_handler
from Freepatentsonline.project_utils.settings import GROUP_COUNT, GROUP_LENGTH
from Freepatentsonline.project_utils.utils import gen_group_task,gen_group_finished



from scrapy.utils.project import get_project_settings



DB_CONFIG = get_config(CONFIG_DIR, DB_CONFIG_FILENAME)

CONFIG_REDIS = DB_CONFIG["REDIS_SCHEDULER"]

logger = logging.getLogger(__name__)



class RedisTaskSchedulerExtension:  
    
    name = "freepatentsonline"

    # article_url_format = "https://www.freepatentsonline.com/{}.html"

    redis = Redis_handler(CONFIG_REDIS)
    group_handler = Group_handler(GROUP_LENGTH, GROUP_COUNT)

    group_map = group_handler.gen_fake_group_map()

    def spider_opened(self,spider):
        print("opened spider %s", spider.name)


    @classmethod  
    def from_crawler(cls, crawler):  
        # 这将作为Scrapy扩展的实例化方法  
        ext = cls()  
        crawler.signals.connect(ext.spider_idle, signal=signals.spider_idle)  
        # 可以设置其他事件监听器  
        return ext  
  
    def spider_idle(self, spider):
        # 当爬虫处于空闲状态时触发  
        # 这里检查Redis并生成新的请求
        while 1:
            for group, group_li in self.group_map.items():
                # 生成对应的任务 key 和 已完成 key
                group_task = gen_group_task(group)
                group_finish = gen_group_finished(group)
            
                tasks =  self.redis.redis.spop(group_task)  # 假设这个函数返回一个任务列表  
                if tasks:  
                    for task in tasks:  
                        # 假设task包含生成URL所需的信息  
                        url = spider.article_url_format.format(task)  
                        info = {
                            "task_id": task,
                            "group_name" : str(group),
                            "group_task":group_task,
                            "group_finish":group_finish,

                            }
                        meta= {"info":info}
                        # 使用Scrapy的API来调度新的请求  
                        # 注意：这取决于你的Scrapy版本和是否使用异步  
                        # 这里只是一个概念性的示例，你可能需要调整它以适应你的具体情况  
                        spider.crawler.engine.crawl(scrapy.Request(url, meta=meta), spider)  
 
                        raise DontCloseSpider
