"""
基于APScheduler的任务调度器
提供类似xxl-job-admin的调度功能
支持K8s集群分布式锁避免重复执行
"""
import logging
import asyncio
import traceback
from typing import Dict, Any, Optional, Callable
from datetime import datetime
from uuid import UUID

from apscheduler.schedulers.asyncio import AsyncIOScheduler
from apscheduler.triggers.cron import CronTrigger
from apscheduler.triggers.interval import IntervalTrigger
from apscheduler.triggers.date import DateTrigger
from apscheduler.events import EVENT_JOB_EXECUTED, EVENT_JOB_ERROR, EVENT_JOB_MISSED
from apscheduler.job import Job
from apscheduler.executors.pool import ThreadPoolExecutor
from apscheduler.jobstores.memory import MemoryJobStore

from sqlalchemy.orm import Session
from app.database.base import get_db
from app.models.scheduled_job import JobStatus, JobType, ScheduledJob
from app.schemas.scheduled_job import JobLogCreate, ExecutionStatus, TriggerType
from app.services.scheduled_job import ScheduledJobService
from app.services.distributed_lock import DistributedLock, LockContext
from app.services.shard_task import shard_task_service

# 配置日志
logger = logging.getLogger(__name__)


class JobScheduler:
    """定时任务调度器"""
    
    def __init__(self):
        self.scheduler: Optional[AsyncIOScheduler] = None
        self.job_service: ScheduledJobService = ScheduledJobService()
        self.job_handlers: Dict[str, Callable] = {}
        self._running_jobs: Dict[str, UUID] = {}  # 记录正在运行的任务
        
    def initialize(self):
        """初始化调度器"""
        if self.scheduler is not None:
            return
        
        # 配置任务存储和执行器
        jobstores = {
            'default': MemoryJobStore()
        }
        
        executors = {
            'default': ThreadPoolExecutor(20),
        }
        
        job_defaults = {
            'coalesce': False,
            'max_instances': 3,
            'misfire_grace_time': 30
        }
        
        # 创建调度器
        self.scheduler = AsyncIOScheduler(
            jobstores=jobstores,
            executors=executors,
            job_defaults=job_defaults,
            timezone='Asia/Shanghai'
        )
        
        # 添加事件监听器
        self.scheduler.add_listener(self._job_executed_listener, EVENT_JOB_EXECUTED)
        self.scheduler.add_listener(self._job_error_listener, EVENT_JOB_ERROR)
        self.scheduler.add_listener(self._job_missed_listener, EVENT_JOB_MISSED)
        
        logger.info("任务调度器初始化完成")
    
    def start(self):
        """启动调度器"""
        if self.scheduler is None:
            self.initialize()
        
        if not self.scheduler.running:
            self.scheduler.start()
            logger.info("任务调度器已启动")
    
    def shutdown(self):
        """关闭调度器"""
        if self.scheduler and self.scheduler.running:
            self.scheduler.shutdown()
            logger.info("任务调度器已关闭")
    
    def register_handler(self, handler_name: str, handler_func: Callable):
        """注册任务处理器"""
        self.job_handlers[handler_name] = handler_func
        logger.info(f"注册任务处理器: {handler_name}")
    
    def add_job_from_db(self, job: ScheduledJob) -> bool:
        """从数据库任务记录添加调度任务"""
        try:
            # 根据任务类型创建触发器
            trigger = self._create_trigger(job)
            if not trigger:
                logger.error(f"任务 {job.job_name} 触发器创建失败")
                return False
            
            # 添加任务到调度器
            self.scheduler.add_job(
                func=self._execute_job,
                trigger=trigger,
                id=str(job.id),
                name=job.job_name,
                kwargs={
                    'job_id': job.id,
                    'handler_name': job.executor_handler,
                    'params': job.executor_params or {}
                },
                replace_existing=True
            )
            
            logger.info(f"任务 {job.job_name} 已添加到调度器")
            return True
            
        except Exception as e:
            logger.error(f"添加任务 {job.job_name} 失败: {str(e)}")
            return False
    
    def remove_job(self, job_id: UUID) -> bool:
        """从调度器中移除任务"""
        try:
            self.scheduler.remove_job(str(job_id))
            logger.info(f"任务 {job_id} 已从调度器移除")
            return True
        except Exception as e:
            logger.error(f"移除任务 {job_id} 失败: {str(e)}")
            return False
    
    def pause_job(self, job_id: UUID) -> bool:
        """暂停任务"""
        try:
            self.scheduler.pause_job(str(job_id))
            logger.info(f"任务 {job_id} 已暂停")
            return True
        except Exception as e:
            logger.error(f"暂停任务 {job_id} 失败: {str(e)}")
            return False
    
    def resume_job(self, job_id: UUID) -> bool:
        """恢复任务"""
        try:
            self.scheduler.resume_job(str(job_id))
            logger.info(f"任务 {job_id} 已恢复")
            return True
        except Exception as e:
            logger.error(f"恢复任务 {job_id} 失败: {str(e)}")
            return False
    
    def trigger_job(self, job_id: UUID, params: Optional[Dict[str, Any]] = None) -> bool:
        """手动触发任务"""
        try:
            # 获取任务信息
            db = next(get_db())
            job = self.job_service.get_job(db, job_id)
            if not job:
                logger.error(f"任务 {job_id} 不存在")
                return False
            
            # 创建执行日志
            log_data = JobLogCreate(
                job_id=job_id,
                trigger_type=TriggerType.MANUAL,
                executor_handler=str(job.executor_handler),  # type: ignore
                executor_params=params or (job.executor_params or {}),  # type: ignore
                trigger_time=datetime.now()
            )
            log = self.job_service.create_job_log(db, log_data)
            
            # 异步执行任务
            asyncio.create_task(self._execute_job_async(
                job_id=job_id,
                handler_name=str(job.executor_handler),  # type: ignore
                params=params or (job.executor_params or {}),  # type: ignore
                log_id=log.id  # type: ignore
            ))
            
            logger.info(f"手动触发任务 {job.job_name}")
            return True
            
        except Exception as e:
            logger.error(f"手动触发任务 {job_id} 失败: {str(e)}")
            return False
    
    def reload_job_from_db(self, job_id: UUID) -> bool:
        """从数据库重新加载任务"""
        try:
            # 先移除现有任务
            self.remove_job(job_id)
            
            # 从数据库获取任务
            db = next(get_db())
            job = self.job_service.get_job(db, job_id)
            job_status_str = str(job.job_status) if job else None  # type: ignore
            if not job or job_status_str != JobStatus.ACTIVE.value:
                return False
            
            # 重新添加任务
            return self.add_job_from_db(job)
            
        except Exception as e:
            logger.error(f"重新加载任务 {job_id} 失败: {str(e)}")
            return False
    
    def load_all_jobs_from_db(self):
        """从数据库加载所有激活的任务"""
        try:
            db = next(get_db())
            active_jobs = self.job_service.get_active_jobs(db)
            
            success_count = 0
            for job in active_jobs:
                if self.add_job_from_db(job):
                    success_count += 1
            
            logger.info(f"从数据库加载任务完成，成功: {success_count}/{len(active_jobs)}")
            
        except Exception as e:
            logger.error(f"从数据库加载任务失败: {str(e)}")
    
    def get_scheduler_status(self) -> Dict[str, Any]:
        """获取调度器状态"""
        if not self.scheduler:
            return {"status": "not_initialized"}
        
        jobs = self.scheduler.get_jobs()
        return {
            "status": "running" if self.scheduler.running else "stopped",
            "job_count": len(jobs),
            "running_jobs": len(self._running_jobs),
            "jobs": [
                {
                    "id": job.id,
                    "name": job.name,
                    "next_run_time": job.next_run_time.isoformat() if job.next_run_time else None
                }
                for job in jobs
            ]
        }
    
    def _create_trigger(self, job: ScheduledJob):
        """根据任务配置创建触发器"""
        try:
            job_type_str = str(job.job_type)  # type: ignore
            
            if job_type_str == JobType.CRON.value:
                cron_expression = str(job.cron_expression) if job.cron_expression else None  # type: ignore
                if not cron_expression:
                    return None
                return CronTrigger.from_crontab(cron_expression)
            
            elif job_type_str == JobType.INTERVAL.value:
                interval_seconds = int(job.interval_seconds) if job.interval_seconds else None  # type: ignore
                if not interval_seconds:
                    return None
                return IntervalTrigger(
                    seconds=interval_seconds,
                    start_date=job.start_time,  # type: ignore
                    end_date=job.end_time  # type: ignore
                )
            
            elif job_type_str == JobType.DATE.value:
                start_time = job.start_time  # type: ignore
                if not start_time:
                    return None
                return DateTrigger(run_date=start_time)
            
            return None
            
        except Exception as e:
            logger.error(f"创建触发器失败: {str(e)}")
            return None
    
    async def _execute_job(self, job_id: UUID, handler_name: str, params: Dict[str, Any]):
        """执行任务的包装方法"""
        # 创建执行日志
        db = next(get_db())
        log_data = JobLogCreate(
            job_id=job_id,
            trigger_type=TriggerType.AUTO,
            executor_handler=handler_name,
            executor_params=params,
            trigger_time=datetime.now()
        )
        log = self.job_service.create_job_log(db, log_data)
        
        # 执行任务
        await self._execute_job_async(job_id, handler_name, params, log.id)
    
    async def _execute_job_async(self, job_id: UUID, handler_name: str, params: Dict[str, Any], log_id: UUID):
        """
        异步执行任务
        支持分片集群和单节点两种模式
        """
        db = next(get_db())
        
        # 获取任务信息
        job = self.job_service.get_job(db, job_id)
        if not job:
            logger.error(f"任务 {job_id} 不存在，无法执行")
            return
        
        # 检查是否为分片任务
        is_shard_task = params.get('enable_sharding', False)
        
        if is_shard_task:
            # 执行分片任务
            await self._execute_shard_task(job_id, handler_name, params, log_id)
        else:
            # 执行普通任务（使用分布式锁）
            await self._execute_single_task(job_id, handler_name, params, log_id)
    
    async def _execute_shard_task(self, job_id: UUID, handler_name: str, params: Dict[str, Any], log_id: UUID):
        """
        执行分片任务
        """
        try:
            # 记录任务开始执行
            self._running_jobs[str(job_id)] = log_id
            
            # 更新任务状态为运行中
            self.job_service.update_job_status(db, job_id, JobStatus.RUNNING)
            
            logger.info(f"节点开始执行分片任务 {job_id}")
            
            # 执行分片任务
            success = await shard_task_service.execute_shard_task(
                job_id=job_id,
                handler_name=handler_name,
                params=params,
                log_id=log_id,
                max_shards=params.get('max_shards', 10)
            )
            
            if success:
                logger.info(f"分片任务 {job_id} 执行成功")
            else:
                logger.info(f"分片任务 {job_id} 未获取到分片，跳过执行")
                
        except Exception as e:
            logger.error(f"分片任务 {job_id} 执行失败: {str(e)}")
            
        finally:
            # 清理运行状态
            if str(job_id) in self._running_jobs:
                del self._running_jobs[str(job_id)]
            
            # 恢复任务状态
            self.job_service.update_job_status(db, job_id, JobStatus.ACTIVE)
    
    async def _execute_single_task(self, job_id: UUID, handler_name: str, params: Dict[str, Any], log_id: UUID):
        """
        执行单节点任务（使用分布式锁避免重复执行）
        """
        db = next(get_db())
        lock_key = f"job:{job_id}"
        
        # 获取任务信息用于计算锁超时时间
        job = self.job_service.get_job(db, job_id)
        if not job:
            logger.error(f"任务 {job_id} 不存在，无法执行")
            return
        
        # 计算锁的超时时间（任务超时时间 + 缓冲时间）
        timeout_seconds = int(job.timeout_seconds) if job.timeout_seconds else 3600  # type: ignore
        lock_ttl = max(timeout_seconds + 300, 600)  # 至少10分钟，额外5分钟缓冲
        
        # 尝试获取分布式锁
        distributed_lock = DistributedLock(db)
        lock_token = distributed_lock.acquire_lock(
            lock_key=lock_key,
            ttl_seconds=lock_ttl,
            wait_timeout=5  # 等待5秒
        )
        
        if lock_token is None:
            # 获取锁失败，说明其他节点正在执行此任务
            logger.info(f"任务 {job_id} 正在其他节点执行，跳过本次执行")
            
            # 更新日志状态为跳过
            self.job_service.update_job_log(db, log_id, {
                "start_time": datetime.now(),
                "end_time": datetime.now(),
                "status": "SKIPPED",
                "result_code": 0,
                "result_msg": "任务已在其他节点执行，跳过本次执行"
            })
            return
        
        try:
            # 记录任务开始执行
            self._running_jobs[str(job_id)] = log_id
            
            # 更新任务状态为运行中
            self.job_service.update_job_status(db, job_id, JobStatus.RUNNING)
            
            # 更新日志开始时间
            self.job_service.update_job_log(db, log_id, {
                "start_time": datetime.now(),
                "status": ExecutionStatus.RUNNING
            })
            
            logger.info(f"节点开始执行任务 {job_id}，锁令牌: {lock_token[:8]}...")
            
            # 获取处理器
            handler = self.job_handlers.get(handler_name)
            if not handler:
                raise Exception(f"未找到处理器: {handler_name}")
            
            # 执行处理器
            if asyncio.iscoroutinefunction(handler):
                result = await handler(**params)
            else:
                result = handler(**params)
            
            # 任务执行成功
            self.job_service.complete_job_execution(
                db, log_id, ExecutionStatus.SUCCESS, 0, str(result)
            )
            
            logger.info(f"任务 {job_id} 执行成功")
            
        except Exception as e:
            # 任务执行失败
            error_msg = str(e)
            error_log = traceback.format_exc()
            
            self.job_service.complete_job_execution(
                db, log_id, ExecutionStatus.FAILED, -1, error_msg, None, error_log
            )
            
            logger.error(f"任务 {job_id} 执行失败: {error_msg}")
            
        finally:
            # 清理运行状态
            if str(job_id) in self._running_jobs:
                del self._running_jobs[str(job_id)]
            
            # 恢复任务状态
            self.job_service.update_job_status(db, job_id, JobStatus.ACTIVE)
            
            # 释放分布式锁
            if lock_token:
                released = distributed_lock.release_lock(lock_key, lock_token)
                if released:
                    logger.info(f"任务 {job_id} 锁已释放")
                else:
                    logger.warning(f"任务 {job_id} 锁释放失败")
    
    def _job_executed_listener(self, event):
        """任务执行完成监听器"""
        logger.debug(f"任务执行完成: {event.job_id}")
    
    def _job_error_listener(self, event):
        """任务执行错误监听器"""
        logger.error(f"任务执行出错: {event.job_id}, 异常: {event.exception}")
    
    def _job_missed_listener(self, event):
        """任务错过执行监听器"""
        logger.warning(f"任务错过执行: {event.job_id}")


# 全局调度器实例
job_scheduler = JobScheduler()


# ==================== 内置任务处理器 ====================

async def demo_task_handler(**kwargs):
    """演示任务处理器"""
    import time
    await asyncio.sleep(2)  # 模拟任务执行
    return f"演示任务执行成功，参数: {kwargs}"


def sync_demo_task_handler(**kwargs):
    """同步演示任务处理器"""
    import time
    time.sleep(1)  # 模拟任务执行
    return f"同步演示任务执行成功，参数: {kwargs}"


def device_health_check_handler(**kwargs):
    """设备健康检查任务处理器"""
    # 这里可以实现设备健康检查逻辑
    device_id = kwargs.get('device_id')
    return f"设备 {device_id} 健康检查完成"


def data_backup_handler(**kwargs):
    """数据备份任务处理器"""
    # 这里可以实现数据备份逻辑
    backup_type = kwargs.get('backup_type', 'full')
    return f"数据备份完成，类型: {backup_type}"


def report_generation_handler(**kwargs):
    """报表生成任务处理器"""
    # 这里可以实现报表生成逻辑
    report_type = kwargs.get('report_type', 'daily')
    return f"报表生成完成，类型: {report_type}"


# 注册内置处理器
def register_builtin_handlers():
    """注册内置任务处理器"""
    job_scheduler.register_handler("demo_task", demo_task_handler)
    job_scheduler.register_handler("sync_demo_task", sync_demo_task_handler)
    job_scheduler.register_handler("device_health_check", device_health_check_handler)
    job_scheduler.register_handler("data_backup", data_backup_handler)
    job_scheduler.register_handler("report_generation", report_generation_handler)
    
    # 注册分片处理器
    from app.services.shard_task import register_builtin_shard_handlers
    register_builtin_shard_handlers()


# 调度器启动和关闭函数
def start_scheduler():
    """启动调度器"""
    job_scheduler.initialize()
    register_builtin_handlers()
    job_scheduler.start()
    job_scheduler.load_all_jobs_from_db()


def stop_scheduler():
    """停止调度器"""
    job_scheduler.shutdown()