import logging
import pickle
import json
from typing import Dict, Any, Optional
from datetime import datetime
import uuid

from celery import current_app
from celery.schedules import crontab
from celery.beat import Service
import redis

from sqlalchemy.orm import Session
from app.crud.shipment import shipment
from app.crud.sys_config import sys_config
from app.utils.cron_utils import convert_quartz_to_apscheduler, parse_cron_expression
from app.core.config import settings

logger = logging.getLogger(__name__)

class TaskManagerService:
    """
    货物监控任务管理服务
    负责为每个货物创建单独的定时任务
    """
    
    def __init__(self, db: Session):
        self.db = db
        self._task_registry = {}  # 用于跟踪任务注册状态
        self._celery_app = current_app
        
        # 初始化Redis连接
        try:
            # 使用与Celery任务结果存储相同的数据库
            self.redis_client = redis.from_url(settings.REDIS_BACKEND_URL)
            # 尝试从Redis加载现有任务注册信息
            self._load_task_registry_from_redis()
        except Exception as e:
            logger.error(f"初始化Redis连接失败: {str(e)}")
            self.redis_client = None
    
    def _load_task_registry_from_redis(self):
        """从Redis加载任务注册信息"""
        try:
            if self.redis_client:
                # 加载任务注册表
                registry_data = self.redis_client.get('task_registry')
                if registry_data:
                    self._task_registry = json.loads(registry_data)
                    logger.info(f"从Redis加载了 {len(self._task_registry)} 个任务注册信息")
                
                # 加载调度配置
                beat_schedule_data = self.redis_client.get('beat_schedule')
                if beat_schedule_data:
                    # 需要特殊处理crontab对象
                    beat_schedule = json.loads(beat_schedule_data)
                    for task_id, task_config in beat_schedule.items():
                        if 'schedule' in task_config and isinstance(task_config['schedule'], dict):
                            schedule_data = task_config['schedule']
                            if schedule_data.get('type') == 'crontab':
                                # 重建crontab对象，解析集合文本为列表
                                try:
                                    # 需要处理集合字符串，例如 "{0, 1, 2}" 转换为 "0,1,2"
                                    def parse_set_str(s):
                                        if isinstance(s, str) and s.startswith('{') and s.endswith('}'):
                                            # 移除花括号，分割并去除空格，然后用逗号连接
                                            return ','.join([item.strip() for item in s[1:-1].split(',')])
                                        return s
                                    
                                    task_config['schedule'] = crontab(
                                        minute=parse_set_str(schedule_data.get('minute', '*')),
                                        hour=parse_set_str(schedule_data.get('hour', '*')),
                                        day_of_month=parse_set_str(schedule_data.get('day_of_month', '*')),
                                        month_of_year=parse_set_str(schedule_data.get('month_of_year', '*')),
                                        day_of_week=parse_set_str(schedule_data.get('day_of_week', '*'))
                                    )
                                except Exception as e:
                                    logger.error(f"重建crontab对象失败: {str(e)}")
                                    # 使用默认的每5分钟的crontab作为回退
                                    task_config['schedule'] = crontab(minute='*/5')
                    
                    # 更新celery应用的beat_schedule
                    self._celery_app.conf.beat_schedule.update(beat_schedule)
                    logger.info(f"从Redis加载了 {len(beat_schedule)} 个任务调度配置")
                    
                    # 尝试重启Beat服务
                    self._restart_beat_if_available()
        except Exception as e:
            logger.error(f"从Redis加载任务注册信息失败: {str(e)}")
    
    def _convert_crontab_to_dict(self, crontab_obj):
        """
        将crontab对象转换为可JSON序列化的字典
        """
        if not isinstance(crontab_obj, crontab):
            return str(crontab_obj)
            
        return {
            'type': 'crontab',
            'minute': str(crontab_obj.minute),
            'hour': str(crontab_obj.hour),
            'day_of_month': str(crontab_obj.day_of_month),
            'month_of_year': str(crontab_obj.month_of_year),
            'day_of_week': str(crontab_obj.day_of_week)
        }
        
    def _save_task_registry_to_redis(self, send_notification=True):
        """
        保存任务注册信息到Redis
        
        Args:
            send_notification: 是否发送通知给Beat进程，默认为True
        """
        try:
            if self.redis_client:
                # 1. 转换任务注册表为可序列化格式 - 只保存活跃任务
                registry_data = {}
                for task_id, task_info in self._task_registry.items():
                    # 只保存活跃的任务
                    if task_info.get('status') == 'active':
                        registry_copy = task_info.copy()
                        # 确保所有值都是可JSON序列化的
                        if 'schedule' in registry_copy:
                            if isinstance(registry_copy['schedule'], crontab):
                                registry_copy['schedule'] = self._convert_crontab_to_dict(registry_copy['schedule'])
                            elif not isinstance(registry_copy['schedule'], (str, dict)):
                                registry_copy['schedule'] = str(registry_copy['schedule'])
                        registry_data[task_id] = registry_copy
                
                # 2. 保存任务注册表
                self.redis_client.set('task_registry', json.dumps(registry_data))
                logger.info(f"已将 {len(registry_data)} 个活跃任务注册信息保存到Redis")
                
                # 3. 保存调度配置 - 需要特殊处理crontab对象
                beat_schedule = {}
                for task_id, task_config in self._celery_app.conf.beat_schedule.items():
                    # 只处理货物监控任务且只保存活跃任务
                    if task_id.startswith('shipment-monitor-') and task_id in registry_data:
                        task_copy = task_config.copy()
                        
                        # 处理crontab对象
                        if 'schedule' in task_copy:
                            if isinstance(task_copy['schedule'], crontab):
                                task_copy['schedule'] = self._convert_crontab_to_dict(task_copy['schedule'])
                            elif not isinstance(task_copy['schedule'], (str, dict)):
                                task_copy['schedule'] = str(task_copy['schedule'])
                        
                        # 确保所有嵌套对象都是可序列化的
                        if 'kwargs' in task_copy and isinstance(task_copy['kwargs'], dict):
                            task_copy['kwargs'] = {k: str(v) if not isinstance(v, (str, int, float, bool, type(None))) else v 
                                                 for k, v in task_copy['kwargs'].items()}
                        
                        if 'options' in task_copy and isinstance(task_copy['options'], dict):
                            task_copy['options'] = {k: str(v) if not isinstance(v, (str, int, float, bool, type(None))) else v 
                                                  for k, v in task_copy['options'].items()}
                        
                        beat_schedule[task_id] = task_copy
                
                # 4. 保存beat_schedule到Redis
                self.redis_client.set('beat_schedule', json.dumps(beat_schedule))
                logger.info(f"已将 {len(beat_schedule)} 个任务调度配置保存到Redis")
                
                # 5. 向Beat进程发送信号，通知配置已更新（可选）
                if send_notification and beat_schedule:  # 只有当有任务且需要发送通知时才发送
                    update_id = uuid.uuid4()
                    self.redis_client.publish('beat_schedule_updated', f"update-{update_id}")
                    logger.info(f"已发送Beat调度更新通知 (ID: {update_id})")
                
                # 6. 记录任务列表
                logger.info(f"当前活跃任务列表: {list(registry_data.keys())}")
        except Exception as e:
            logger.error(f"保存任务注册信息到Redis失败: {str(e)}")
    
    
    def register_shipment_task(self, shipment_id: int, mac_id: str, shipment_number: str) -> Dict[str, Any]:
        """
        为单个货物注册定时监控任务
        
        Args:
            shipment_id: 货物ID
            mac_id: 设备ID
            shipment_number: 货物编号(方便日志追踪)
            
        Returns:
            任务注册结果
        """
        try:
            # 检查任务是否已存在
            task_id = f"shipment-monitor-{shipment_id}"
            if task_id in self._task_registry:
                logger.info(f"货物 {shipment_number} 的监控任务已存在，将覆盖")
                
            # 获取任务频率配置
            frequency_config = sys_config.get_value_by_code(
                self.db, 
                code='shipmentTaskFrequency', 
                default_value='*/5 * * * *'
            )
            
            # 解析Cron表达式
            schedule = self._parse_cron_expression(frequency_config)
            if not schedule:
                logger.error(f"解析任务频率失败: {frequency_config}，将使用默认频率 */5 * * * *")
                schedule = crontab(minute='*/5')
                
            # 生成唯一的任务运行ID
            unique_task_run_id = f"{task_id}-{uuid.uuid4()}"
            
            # 创建任务配置，使用一致的命名格式确保Beat能正确识别
            task_config = {
                'task': 'app.tasks.shipment_tasks.process_single_shipment_task',
                'schedule': schedule,
                'args': (shipment_id,),
                'options': {
                    'expires': 60 * 5,  # 5分钟过期
                    'queue': 'shipment_monitor',  # 指定队列
                    'task_id': unique_task_run_id
                },
                'kwargs': {
                    'shipment_number': shipment_number,
                    'mac_id': mac_id
                }
            }
            
            # 确保现有相同ID的任务被移除
            if task_id in self._celery_app.conf.beat_schedule:
                del self._celery_app.conf.beat_schedule[task_id]
                logger.info(f"移除了现有的任务配置: {task_id}")
            
            # 添加新的任务配置
            self._celery_app.conf.beat_schedule[task_id] = task_config
            
            # 记录任务状态
            self._task_registry[task_id] = {
                'shipment_id': shipment_id,
                'schedule': self._convert_crontab_to_dict(schedule),  # 使用可序列化的格式
                'registered_at': datetime.now().isoformat(),
                'status': 'active'
            }
            
            logger.info(f"已为货物 {shipment_number} (ID: {shipment_id}) 注册定时监控任务，频率: {schedule}")
            
            # 保存到Redis - 不发送通知，由外层一次性通知
            try:
                # 直接保存单个任务到Redis beat_schedule
                if self.redis_client:
                    # 获取当前beat_schedule
                    beat_schedule_data = self.redis_client.get('beat_schedule')
                    if beat_schedule_data:
                        beat_schedule = json.loads(beat_schedule_data)
                    else:
                        beat_schedule = {}
                    
                    # 添加或更新任务
                    serialized_config = task_config.copy()
                    serialized_config['schedule'] = self._convert_crontab_to_dict(schedule)
                    beat_schedule[task_id] = serialized_config
                    
                    # 保存回Redis
                    self.redis_client.set('beat_schedule', json.dumps(beat_schedule))
                    logger.info(f"已直接更新Redis中的单个任务配置: {task_id}")
                    
                    # 保存任务注册表到Redis（但不发送通知）
                    self._save_task_registry_to_redis(send_notification=False)
                    
                    # 验证任务是否已保存到Redis
                    import time
                    time.sleep(0.1)  # 短暂等待确保写入完成
                    
                    # 检查Redis中是否已包含此任务
                    beat_schedule_check = self.redis_client.get('beat_schedule')
                    if beat_schedule_check:
                        beat_schedule_json = json.loads(beat_schedule_check)
                        if task_id in beat_schedule_json:
                            logger.info(f"确认任务已成功保存到Redis: {task_id}")
            except Exception as e:
                logger.error(f"更新Redis中的任务配置失败: {str(e)}")
            
            return {
                'success': True,
                'task_id': task_id,
                'message': f"已为货物 {shipment_number} 注册定时监控任务"
            }
            
        except Exception as e:
            logger.error(f"为货物 {shipment_number} 注册监控任务失败: {str(e)}")
            return {
                'success': False,
                'message': f"注册监控任务失败: {str(e)}"
            }
    
    def unregister_shipment_task(self, shipment_id: int) -> Dict[str, Any]:
        """
        移除货物的定时监控任务
        
        Args:
            shipment_id: 货物ID
            
        Returns:
            任务移除结果
        """
        try:
            task_id = f"shipment-monitor-{shipment_id}"
            task_removed = False
            
            # 1. 先从本地内存中移除
            # 1.1 从beat_schedule中移除
            if task_id in self._celery_app.conf.beat_schedule:
                del self._celery_app.conf.beat_schedule[task_id]
                task_removed = True
                logger.info(f"已从本地Celery Beat调度中移除任务 {task_id}")
            
            # 1.2 从任务注册表中移除
            if task_id in self._task_registry:
                # 完全删除任务，而不是仅仅标记为removed
                del self._task_registry[task_id]
                task_removed = True
                logger.info(f"已从本地任务注册表中移除任务 {task_id}")
            
            # 2. 直接从Redis中移除
            if self.redis_client:
                try:
                    # 2.1 从Redis的beat_schedule中移除
                    beat_schedule_data = self.redis_client.get('beat_schedule')
                    if beat_schedule_data:
                        beat_schedule = json.loads(beat_schedule_data)
                        if task_id in beat_schedule:
                            del beat_schedule[task_id]
                            self.redis_client.set('beat_schedule', json.dumps(beat_schedule))
                            logger.info(f"已从Redis中的beat_schedule移除任务 {task_id}")
                            task_removed = True
                    
                    # 2.2 从Redis的task_registry中移除
                    registry_data = self.redis_client.get('task_registry')
                    if registry_data:
                        registry = json.loads(registry_data)
                        if task_id in registry:
                            del registry[task_id]
                            self.redis_client.set('task_registry', json.dumps(registry))
                            logger.info(f"已从Redis中的task_registry移除任务 {task_id}")
                            task_removed = True
                except Exception as e:
                    logger.error(f"直接从Redis中移除任务 {task_id} 失败: {str(e)}")
            
            # 3. 保存更新后的任务注册表到Redis (以防还有其他变更)
            # 只在任务确实被移除时发送通知
            if task_removed:
                self._save_task_registry_to_redis(send_notification=True)
                
                # 4. 验证任务已从Redis移除
                try:
                    import time
                    time.sleep(0.5)  # 给点时间让Redis更新
                    
                    # 验证任务已从Redis移除
                    if self.redis_client:
                        beat_schedule_data = self.redis_client.get('beat_schedule')
                        if beat_schedule_data:
                            beat_schedule = json.loads(beat_schedule_data)
                            if task_id in beat_schedule:
                                logger.warning(f"警告: 任务 {task_id} 仍然存在于Redis的beat_schedule中")
                                # 再次强制移除
                                del beat_schedule[task_id]
                                self.redis_client.set('beat_schedule', json.dumps(beat_schedule))
                                # 只在必要时发送额外的通知
                                self.redis_client.publish('beat_schedule_updated', f"force-remove-{uuid.uuid4()}")
                except Exception as e:
                    logger.error(f"验证任务移除失败: {str(e)}")
                
                logger.info(f"已移除货物 ID {shipment_id} 的监控任务，并通知Beat进程更新")
                return {
                    'success': True,
                    'message': f"已移除货物 ID {shipment_id} 的监控任务"
                }
            else:
                logger.warning(f"未找到货物 ID {shipment_id} 的监控任务")
                return {
                    'success': False,
                    'message': f"未找到货物 ID {shipment_id} 的监控任务"
                }
                
        except Exception as e:
            logger.error(f"移除货物 ID {shipment_id} 的监控任务失败: {str(e)}")
            return {
                'success': False,
                'message': f"移除监控任务失败: {str(e)}"
            }
    
    def init_all_active_shipment_tasks(self) -> Dict[str, Any]:
        """
        初始化所有活跃货物的定时监控任务
        用于系统启动时
        
        Returns:
            初始化结果
        """
        try:
            # 获取所有需要监控的货物
            active_shipments = shipment.get_active_shipments(self.db)
            
            if not active_shipments:
                logger.info("没有需要监控的货物")
                return {
                    'success': True,
                    'message': '没有需要监控的货物',
                    'total': 0,
                    'success_count': 0
                }
            
            # 注册所有活跃货物的任务
            success_count = 0
            failed_shipments = []
            
            for shipment_obj in active_shipments:
                result = self.register_shipment_task(
                    shipment_id=shipment_obj.id,
                    mac_id=shipment_obj.mac_id,
                    shipment_number=shipment_obj.number_no
                )
                
                if result['success']:
                    success_count += 1
                else:
                    failed_shipments.append({
                        'id': shipment_obj.id,
                        'number_no': shipment_obj.number_no,
                        'error': result['message']
                    })
            
            # 保存到Redis并一次性发送通知
            self._save_task_registry_to_redis(send_notification=True)
            
            # 确保Beat进程收到更新并验证任务都已在Redis中
            try:
                import time
                time.sleep(0.5)  # 给点时间让Redis更新
                
                if self.redis_client:
                    # 检查Redis中是否包含所有活跃任务
                    beat_schedule_data = self.redis_client.get('beat_schedule')
                    registry_data = self.redis_client.get('task_registry')
                    
                    if beat_schedule_data and registry_data:
                        beat_schedule = json.loads(beat_schedule_data)
                        registry = json.loads(registry_data)
                        
                        # 检查所有活跃任务是否都在beat_schedule中
                        active_task_ids = [task_id for task_id, info in registry.items() 
                                        if info.get('status') == 'active']
                        missing_tasks = [task_id for task_id in active_task_ids 
                                        if task_id not in beat_schedule]
                        
                        if missing_tasks:
                            logger.warning(f"警告: 以下活跃任务不在Redis的beat_schedule中: {missing_tasks}")
                        else:
                            logger.info(f"确认所有 {len(active_task_ids)} 个活跃任务都已在Redis的beat_schedule中")
            except Exception as e:
                logger.error(f"确认Redis配置更新失败: {str(e)}")
            
            logger.info(f"已初始化 {len(active_shipments)} 个货物的监控任务，成功 {success_count} 个")
            
            return {
                'success': True,
                'message': f"已初始化 {len(active_shipments)} 个货物的监控任务，成功 {success_count} 个",
                'total': len(active_shipments),
                'success_count': success_count,
                'failed': failed_shipments
            }
            
        except Exception as e:
            logger.error(f"初始化货物监控任务失败: {str(e)}")
            return {
                'success': False,
                'message': f"初始化货物监控任务失败: {str(e)}"
            }
    
    def update_all_task_schedules(self) -> Dict[str, Any]:
        """
        更新所有监控任务的调度频率
        当系统配置更改时调用
        
        Returns:
            更新结果
        """
        try:
            # 获取最新任务频率配置
            frequency_config = sys_config.get_value_by_code(
                self.db, 
                code='shipmentTaskFrequency', 
                default_value='*/5 * * * *'
            )
            
            # 解析Cron表达式
            schedule = self._parse_cron_expression(frequency_config)
            if not schedule:
                logger.error(f"解析任务频率失败: {frequency_config}，将使用默认频率 */5 * * * *")
                schedule = crontab(minute='*/5')
            
            # 更新所有货物任务的调度频率
            updated_count = 0
            for task_id, task_config in self._celery_app.conf.beat_schedule.items():
                # 只更新货物监控任务
                if task_id.startswith('shipment-monitor-'):
                    task_config['schedule'] = schedule
                    
                    # 更新任务注册表中的频率
                    if task_id in self._task_registry:
                        self._task_registry[task_id]['schedule'] = str(schedule)
                    
                    updated_count += 1
            
            # 保存到Redis
            self._save_task_registry_to_redis()
            
            # 可选: 重启Celery Beat服务以使更改生效
            self._restart_beat_if_available()
            
            logger.info(f"已更新 {updated_count} 个货物监控任务的频率为 {frequency_config}")
            
            return {
                'success': True,
                'message': f"已更新 {updated_count} 个货物监控任务的频率",
                'updated_count': updated_count,
                'new_schedule': frequency_config
            }
            
        except Exception as e:
            logger.error(f"更新任务频率失败: {str(e)}")
            return {
                'success': False,
                'message': f"更新任务频率失败: {str(e)}"
            }
    
    def get_registered_tasks(self) -> Dict[str, Any]:
        """
        获取所有已注册的监控任务
        
        Returns:
            任务列表
        """
        tasks = []
        for task_id, task_info in self._task_registry.items():
            tasks.append({
                'task_id': task_id,
                'shipment_id': task_info.get('shipment_id'),
                'schedule': task_info.get('schedule'),
                'registered_at': task_info.get('registered_at'),
                'status': task_info.get('status', 'unknown')
            })
        
        return {
            'success': True,
            'total': len(tasks),
            'tasks': tasks
        }
    
    def _parse_cron_expression(self, cron_expression: str) -> Optional[crontab]:
        """
        解析cron表达式为Celery crontab对象
        
        Args:
            cron_expression: cron表达式
            
        Returns:
            Celery crontab对象
        """
        try:
            # 转换为APScheduler格式
            apscheduler_expression = convert_quartz_to_apscheduler(cron_expression)
            
            # 解析cron表达式
            cron_parts = parse_cron_expression(apscheduler_expression)
            
            # 转换为Celery crontab
            if "second" in cron_parts:
                # 6字段格式 (秒 分 时 日 月 周)
                return crontab(
                    minute=self._convert_cron_part(cron_parts["minute"]),
                    hour=self._convert_cron_part(cron_parts["hour"]),
                    day_of_month=self._convert_cron_part(cron_parts["day"]),
                    month_of_year=self._convert_cron_part(cron_parts["month"]),
                    day_of_week=self._convert_cron_part(cron_parts["day_of_week"])
                )
            else:
                # 5字段格式 (分 时 日 月 周)
                return crontab(
                    minute=self._convert_cron_part(cron_parts["minute"]),
                    hour=self._convert_cron_part(cron_parts["hour"]),
                    day_of_month=self._convert_cron_part(cron_parts["day"]),
                    month_of_year=self._convert_cron_part(cron_parts["month"]),
                    day_of_week=self._convert_cron_part(cron_parts["day_of_week"])
                )
        except Exception as e:
            logger.error(f"解析cron表达式 {cron_expression} 失败: {str(e)}")
            return None
    
    def _convert_cron_part(self, cron_part):
        """
        转换cron表达式的一个部分为Celery可接受的格式
        
        Args:
            cron_part: cron表达式的一部分
            
        Returns:
            Celery可接受的格式
        """
        # 如果是单个值或通配符，直接返回
        if cron_part == '*' or cron_part.isdigit():
            return cron_part
        
        # 处理范围和步长
        if '/' in cron_part:
            base, step = cron_part.split('/')
            if base == '*':
                return f'*/{step}'
            return f'{base}/{step}'
        
        # 处理列表
        if ',' in cron_part:
            return cron_part
        
        # 处理范围
        if '-' in cron_part:
            return cron_part
        
        # 默认返回原始值
        return cron_part
    
    def _restart_beat_if_available(self):
        """
        尝试重启Celery Beat服务以使任务更改生效
        通过Redis发布/订阅机制通知Beat进程更新调度
        """
        try:
            # 1. 通过Redis发布消息通知Beat进程重新加载调度
            if self.redis_client:
                # 添加唯一ID确保消息是新的
                update_id = uuid.uuid4()
                self.redis_client.publish('beat_schedule_updated', f"update-{update_id}")
                logger.info(f"已通过Redis发送Beat调度更新通知 (ID: {update_id})")
                
                # 打印当前活跃任务的队列信息
                for task_id, task_config in self._celery_app.conf.beat_schedule.items():
                    if task_id.startswith('shipment-monitor-'):
                        queue = task_config.get('options', {}).get('queue', 'celery')
                        logger.info(f"任务 {task_id} 使用队列: {queue}")
            
            # 2. 尝试直接重启Beat服务 (如果在同一进程中)
            if hasattr(self._celery_app, 'beat') and self._celery_app.beat:
                logger.info("检测到本地Beat服务实例，尝试直接重新加载")
                # 2.1 如果Beat服务是Service实例
                if hasattr(self._celery_app.beat, 'scheduler'):
                    scheduler = self._celery_app.beat.scheduler
                    
                    # 2.2 尝试重置调度器内部状态
                    if hasattr(scheduler, 'schedule'):
                        try:
                            # 清空当前调度
                            scheduler.schedule = {}
                            # 重新合并配置
                            scheduler.merge_inplace(self._celery_app.conf.beat_schedule)
                            
                            # 执行同步
                            if hasattr(scheduler, 'sync') and callable(scheduler.sync):
                                scheduler.sync()
                                logger.info("已成功重置本地Beat调度器并同步")
                            
                            # 重置时间戳
                            if hasattr(scheduler, '_last_timestamp'):
                                import time
                                scheduler._last_timestamp = time.time()
                        except Exception as e:
                            logger.error(f"重置Beat调度器失败: {str(e)}")
            
            # 3. 确认Redis中的配置已更新
            try:
                import time
                time.sleep(0.5)  # 等待更新完成
                
                if self.redis_client:
                    # 检查Redis中是否包含所有活跃任务
                    beat_schedule_data = self.redis_client.get('beat_schedule')
                    registry_data = self.redis_client.get('task_registry')
                    
                    if beat_schedule_data and registry_data:
                        beat_schedule = json.loads(beat_schedule_data)
                        registry = json.loads(registry_data)
                        
                        # 检查所有活跃任务是否都在beat_schedule中
                        active_task_ids = [task_id for task_id, info in registry.items() 
                                        if info.get('status') == 'active']
                        missing_tasks = [task_id for task_id in active_task_ids 
                                        if task_id not in beat_schedule]
                        
                        if missing_tasks:
                            logger.warning(f"警告: 以下活跃任务不在Redis的beat_schedule中: {missing_tasks}")
                        else:
                            logger.info(f"确认所有 {len(active_task_ids)} 个活跃任务都已在Redis的beat_schedule中")
                            
                        # 打印队列信息
                        for task_id, task_config in beat_schedule.items():
                            queue = task_config.get('options', {}).get('queue', 'celery')
                            logger.info(f"Redis中的任务 {task_id} 使用队列: {queue}")
            except Exception as e:
                logger.error(f"确认Redis配置更新失败: {str(e)}")
                
        except Exception as e:
            logger.warning(f"重新加载Beat服务失败: {str(e)}")
