"""
Celery分布式任务队列配置
负责处理异步任务和分布式消息传递
"""

import os
import logging
from celery import Celery
from celery.signals import setup_logging
from typing import Optional, Dict, Any
from datetime import timedelta

# 配置日志
logger = logging.getLogger(__name__)

# 获取Redis配置
REDIS_HOST = os.getenv('REDIS_HOST', '192.168.64.100')
REDIS_PORT = int(os.getenv('REDIS_PORT', 6379))
REDIS_DB = int(os.getenv('REDIS_DB', 0))
REDIS_PASSWORD = os.getenv('REDIS_PASSWORD', '')

# 构建Redis连接URL
if REDIS_PASSWORD:
    REDIS_URL = f'redis://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}'
else:
    REDIS_URL = f'redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}'

# 创建Celery应用实例
celery_app = Celery(
    'maess',
    broker=REDIS_URL,
    backend=REDIS_URL,
    include=[
        'apps.agents.tasks',
        'apps.monitor.tasks',
        'apps.search.tasks',
        'app.core.task_scheduler'
    ]
)

# Celery配置
celery_app.conf.update(
    # 任务序列化
    task_serializer='json',
    accept_content=['json'],
    result_serializer='json',
    
    # 时区配置
    timezone='Asia/Shanghai',
    enable_utc=False,
    
    # 任务结果配置
    result_expires=3600,  # 结果过期时间1小时
    result_persistent=True,
    
    # 任务路由配置
    task_routes={
        'apps.agents.tasks.*': {'queue': 'agents'},
        'apps.monitor.tasks.*': {'queue': 'monitor'},
        'apps.search.tasks.*': {'queue': 'search'},
    },
    
    # 任务执行配置
    task_acks_late=True,  # 任务完成后才确认
    task_reject_on_worker_lost=True,  # Worker丢失时拒绝任务
    worker_prefetch_multiplier=1,  # 预取任务数
    worker_max_tasks_per_child=1000,  # 每个worker最大任务数
    
    # 并发配置
    worker_concurrency=4,  # 并发worker数
    worker_pool='prefork',  # 进程池模式
    
    # 监控配置
    worker_send_task_events=True,  # 发送任务事件
    task_send_sent_event=True,  # 发送任务发送事件
    
    # 重试配置
    task_acks_on_failure_or_timeout=False,  # 失败时不确认
    task_track_started=True,  # 跟踪任务开始
    
    # 定时任务配置
    beat_schedule={
        'monitor-system-health': {
            'task': 'apps.monitor.tasks.monitor_system_health',
            'schedule': timedelta(minutes=1),  # 每分钟执行一次
            'options': {'queue': 'monitor'}
        },
        'cleanup-old-tasks': {
            'task': 'apps.monitor.tasks.cleanup_old_tasks',
            'schedule': timedelta(hours=1),  # 每小时执行一次
            'options': {'queue': 'monitor'}
        },
        'update-search-index': {
            'task': 'apps.search.tasks.update_search_index',
            'schedule': timedelta(minutes=30),  # 每30分钟执行一次
            'options': {'queue': 'search'}
        },
        # 新增：每30分钟更新知识库索引
        'update-knowledge-index': {
            'task': 'app.core.task_scheduler.update_knowledge_index_task',
            'schedule': timedelta(minutes=30),
            'options': {'queue': 'default'}
        },
        # 新增：每天清理旧日志
        'cleanup-old-logs': {
            'task': 'app.core.task_scheduler.cleanup_old_logs_task',
            'schedule': timedelta(days=1),
            'args': (7,),  # 保留7天日志
            'options': {'queue': 'default'}
        },
        # 新增：每15分钟优化任务队列
        'optimize-task-queue': {
            'task': 'app.core.task_scheduler.optimize_task_queue_task',
            'schedule': timedelta(minutes=15),
            'options': {'queue': 'default'}
        },
        # 新增：每小时预加载模型
        'prewarm-models': {
            'task': 'app.core.task_scheduler.prewarm_models_task',
            'schedule': timedelta(hours=1),
            'options': {'queue': 'default'}
        }
    },
    beat_scheduler='celery.beat.PersistentScheduler',  # 持久化调度器
    beat_schedule_filename='celerybeat-schedule',  # 调度文件
    
    # 日志配置
    worker_hijack_root_logger=False,  # 不劫持根日志器
    worker_log_color=True,  # 彩色日志
    
    # 安全配置
    security_key=os.getenv('CELERY_SECURITY_KEY', 'maess-secure-key-2024'),
    
    # 性能优化
    task_compression='gzip',  # 任务压缩
    result_compression='gzip',  # 结果压缩
)


@setup_logging.connect
def setup_celery_logging(**kwargs):
    """配置Celery日志"""
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
        handlers=[
            logging.StreamHandler(),
            logging.FileHandler('logs/celery.log', encoding='utf-8')
        ]
    )


class CeleryManager:
    """Celery任务管理器"""
    
    def __init__(self):
        self.app = celery_app
        self.logger = logging.getLogger(__name__)
    
    def submit_task(self, task_name: str, args: tuple = (), kwargs: dict = None, 
                   queue: str = 'default', priority: int = 5) -> str:
        """
        提交任务到队列
        
        Args:
            task_name: 任务名称
            args: 任务参数
            kwargs: 任务关键字参数
            queue: 队列名称
            priority: 任务优先级 (1-10, 10为最高)
            
        Returns:
            任务ID
        """
        try:
            if kwargs is None:
                kwargs = {}
            
            # 获取任务签名
            task_signature = self.app.signature(
                task_name,
                args=args,
                kwargs=kwargs,
                queue=queue,
                priority=priority
            )
            
            # 异步执行任务
            result = task_signature.apply_async()
            
            self.logger.info(f"任务已提交: {task_name}, 任务ID: {result.id}, 队列: {queue}")
            return result.id
            
        except Exception as e:
            self.logger.error(f"提交任务失败: {task_name}, 错误: {str(e)}")
            raise
    
    def get_task_status(self, task_id: str) -> Dict[str, Any]:
        """
        获取任务状态
        
        Args:
            task_id: 任务ID
            
        Returns:
            任务状态信息
        """
        try:
            result = self.app.AsyncResult(task_id)
            
            return {
                'task_id': task_id,
                'status': result.status,
                'result': result.result if result.ready() else None,
                'traceback': result.traceback if result.failed() else None,
                'date_done': result.date_done.isoformat() if result.date_done else None
            }
            
        except Exception as e:
            self.logger.error(f"获取任务状态失败: {task_id}, 错误: {str(e)}")
            return {
                'task_id': task_id,
                'status': 'ERROR',
                'error': str(e)
            }
    
    def revoke_task(self, task_id: str, terminate: bool = False) -> bool:
        """
        撤销任务
        
        Args:
            task_id: 任务ID
            terminate: 是否强制终止
            
        Returns:
            是否成功撤销
        """
        try:
            self.app.control.revoke(task_id, terminate=terminate)
            self.logger.info(f"任务已撤销: {task_id}, 强制终止: {terminate}")
            return True
            
        except Exception as e:
            self.logger.error(f"撤销任务失败: {task_id}, 错误: {str(e)}")
            return False
    
    def get_queue_info(self) -> Dict[str, Any]:
        """
        获取队列信息
        
        Returns:
            队列统计信息
        """
        try:
            # 获取队列统计
            inspector = self.app.control.inspect()
            
            stats = {
                'active_queues': {},
                'scheduled': {},
                'active': {},
                'reserved': {},
                'revoked': {}
            }
            
            # 活跃队列
            active_queues = inspector.active_queues()
            if active_queues:
                stats['active_queues'] = active_queues
            
            # 计划任务
            scheduled = inspector.scheduled()
            if scheduled:
                stats['scheduled'] = scheduled
            
            # 活跃任务
            active = inspector.active()
            if active:
                stats['active'] = active
            
            # 预留任务
            reserved = inspector.reserved()
            if reserved:
                stats['reserved'] = reserved
            
            # 已撤销任务
            revoked = inspector.revoked()
            if revoked:
                stats['revoked'] = revoked
            
            return stats
            
        except Exception as e:
            self.logger.error(f"获取队列信息失败: {str(e)}")
            return {'error': str(e)}
    
    def purge_queue(self, queue: str = None) -> int:
        """
        清空队列
        
        Args:
            queue: 队列名称，None表示清空所有队列
            
        Returns:
            清空的任务数量
        """
        try:
            if queue:
                count = self.app.control.purge(queue_name=queue)
                self.logger.info(f"队列已清空: {queue}, 清空任务数: {count}")
            else:
                count = self.app.control.purge()
                self.logger.info(f"所有队列已清空, 清空任务数: {count}")
            
            return count
            
        except Exception as e:
            self.logger.error(f"清空队列失败: {str(e)}")
            return 0
    
    def get_worker_status(self) -> Dict[str, Any]:
        """
        获取Worker状态
        
        Returns:
            Worker状态信息
        """
        try:
            inspector = self.app.control.inspect()
            
            status = {
                'ping': {},
                'stats': {},
                'registered': {}
            }
            
            # Ping测试
            ping_result = inspector.ping()
            if ping_result:
                status['ping'] = ping_result
            
            # 统计信息
            stats = inspector.stats()
            if stats:
                status['stats'] = stats
            
            # 注册任务
            registered = inspector.registered()
            if registered:
                status['registered'] = registered
            
            return status
            
        except Exception as e:
            self.logger.error(f"获取Worker状态失败: {str(e)}")
            return {'error': str(e)}


# 创建全局Celery管理器实例
celery_manager = CeleryManager()


if __name__ == '__main__':
    # 启动Celery Worker (用于测试)
    celery_app.start()