# encoding: utf-8
# @File  : task_scheduler.py
# @Author: shaoyun
# @Date  :  2025/05/11
import asyncio
import json
import logging
import uuid
from datetime import datetime
from typing import Dict, Optional, Any, Type

from croniter import croniter
from sqlalchemy.orm import Session

from app.db.redis_manager import RedisClient
from app.models.node import Node, NodeStatus
from app.models.task import Task, TaskStatus, TaskNodeAssociation
from app.services.ai_optimizer import AIOptimizer

logger = logging.getLogger(__name__)


class TaskScheduler:
    """任务调度器"""

    @staticmethod
    async def create_task(
            db: Session,
            user_id: str,
            spider_id: str,
            name: str,
            parameters: Optional[Dict[str, Any]] = None,
            priority: int = 0,
            cron: Optional[str] = None
    ) -> Task:
        """创建新任务"""
        task_id = str(uuid.uuid4())
        params_json = json.dumps(parameters) if parameters else None

        task = Task(
            id=task_id,
            user_id=user_id,
            spider_id=spider_id,
            name=name,
            parameters=params_json,
            priority=priority,
            cron=cron,
            status=TaskStatus.PENDING
        )

        db.add(task)
        db.commit()
        db.refresh(task)

        # 将任务添加到队列中
        if not cron:  # 非定时任务立即加入队列
            RedisClient.push_task(task_id)
            logger.info(f"Task {task_id} added to queue")
        else:
            logger.info(f"Cron task {task_id} created with schedule: {cron}")

        return task

    @staticmethod
    async def get_task(db: Session, task_id: str) -> Optional[Task]:
        """获取任务详情"""
        return db.query(Task).filter(Task.id == task_id).first()

    @staticmethod
    async def get_tasks(
            db: Session,
            user_id: Optional[str] = None,
            spider_id: Optional[str] = None,  # 添加 spider_id 参数
            status: Optional[str] = None,
            skip: int = 0,
            limit: int = 100
    ) -> list[Type[Task]]:
        """获取任务列表"""
        query = db.query(Task)

        if user_id:
            query = query.filter(Task.user_id == user_id)

        if spider_id:
            query = query.filter(Task.spider_id == spider_id)

        if status:
            try:
                task_status = TaskStatus(status)
                query = query.filter(Task.status == task_status)
            except ValueError:
                logger.warning(f"Invalid task status: {status}")

        return query.order_by(Task.created_at.desc()).offset(skip).limit(limit).all()

    @staticmethod
    async def update_task_status(db: Session, task_id: str, status: TaskStatus) -> bool:
        """更新任务状态"""
        task = db.query(Task).filter(Task.id == task_id).first()
        if not task:
            return False

        task.status = status

        if status == TaskStatus.RUNNING:
            task.started_at = datetime.utcnow()
        elif status in [TaskStatus.COMPLETED, TaskStatus.FAILED, TaskStatus.CANCELED]:
            task.finished_at = datetime.utcnow()

        db.commit()
        return True

    @staticmethod
    async def cancel_task(db: Session, task_id: str) -> bool:
        """取消任务"""
        task = db.query(Task).filter(Task.id == task_id).first()
        if not task:
            return False

        # 只有待执行的任务可以取消
        if task.status != TaskStatus.PENDING:
            return False

        task.status = TaskStatus.CANCELED
        task.finished_at = datetime.utcnow()
        db.commit()
        return True

    @staticmethod
    async def assign_task(db: Session, task_id: str, node_id: str) -> bool:
        """分配任务到指定节点"""
        # 检查任务和节点是否存在
        task = db.query(Task).filter(Task.id == task_id).first()
        node = db.query(Node).filter(Node.id == node_id).first()

        if not task or not node:
            return False

        # 检查节点是否在线
        if node.status != NodeStatus.ONLINE:
            return False

        # 创建任务与节点的关联
        association = TaskNodeAssociation(
            task_id=task_id,
            node_id=node_id
        )

        db.add(association)
        db.commit()

        # 更新任务状态
        await TaskScheduler.update_task_status(db, task_id, TaskStatus.RUNNING)

        logger.info(f"Task {task_id} assigned to node {node_id}")
        return True

    @staticmethod
    async def schedule_pending_tasks(db: Session) -> int:
        """调度所有待执行的非定时任务"""
        tasks = db.query(Task).filter(Task.status == TaskStatus.PENDING, Task.cron == None).all()
        scheduled_count = 0

        for task in tasks:
            RedisClient.push_task(task.id)
            scheduled_count += 1

        logger.info(f"Scheduled {scheduled_count} pending tasks")
        return scheduled_count

    @staticmethod
    async def process_cron_tasks(db: Session) -> int:
        """处理定时任务，将到期的定时任务加入队列"""
        now = datetime.utcnow()
        cron_tasks = db.query(Task).filter(Task.cron != None, Task.status.in_([
            TaskStatus.PENDING, TaskStatus.COMPLETED
        ])).all()

        scheduled_count = 0
        for task in cron_tasks:
            if not croniter.is_valid(task.cron):
                logger.warning(f"Invalid cron expression for task {task.id}: {task.cron}")
                continue

            cron = croniter(task.cron, task.finished_at or task.created_at)
            next_run = cron.get_next(datetime)

            if next_run <= now:
                # 如果已经完成，复制一个新任务
                if task.status == TaskStatus.COMPLETED:
                    new_task = TaskScheduler.copy_cron_task(db, task)
                    RedisClient.push_task(new_task.id)
                    logger.info(f"New cron task {new_task.id} created and scheduled")
                # 如果是待执行，直接加入队列
                elif task.status == TaskStatus.PENDING:
                    RedisClient.push_task(task.id)
                    logger.info(f"Cron task {task.id} scheduled")
                scheduled_count += 1

        return scheduled_count

    @staticmethod
    def copy_cron_task(db: Session, task: Task) -> Task:
        """复制定时任务创建新任务"""
        new_task_id = str(uuid.uuid4())
        new_task = Task(
            id=new_task_id,
            user_id=task.user_id,
            spider_id=task.spider_id,
            name=f"{task.name} (scheduled)",
            parameters=task.parameters,
            priority=task.priority,
            cron=task.cron,
            status=TaskStatus.PENDING
        )

        db.add(new_task)
        db.commit()
        db.refresh(new_task)
        return new_task

    @staticmethod
    async def find_suitable_node(db: Session, task_id: str) -> Optional[str]:
        """查找适合执行任务的节点"""
        # 优先考虑负载较低的在线节点
        nodes = db.query(Node).filter(Node.status == NodeStatus.ONLINE).all()
        if not nodes:
            logger.warning("No online nodes available")
            return None

        # 先尝试使用AI优化器获取推荐节点
        recommended_node_id = await AIOptimizer.recommend_node_for_task(task_id, [node.id for node in nodes])
        if recommended_node_id:
            logger.info(f"AI optimizer recommended node {recommended_node_id} for task {task_id}")
            return recommended_node_id

        # 如果没有推荐，按CPU负载排序选择最低负载的节点
        nodes.sort(key=lambda node: node.cpu_usage)
        return nodes[0].id if nodes else None

    @staticmethod
    async def process_failed_tasks(db: Session) -> int:
        """处理失败的任务，根据策略进行重试"""
        # 获取失败的任务
        failed_tasks = db.query(Task).filter(Task.status == TaskStatus.FAILED).all()
        retry_count = 0

        for task in failed_tasks:
            # 获取任务的重试计数（可以存储在任务参数中）
            retry_param = json.loads(task.parameters).get("retry_count", 0) if task.parameters else 0

            # 如果重试次数小于最大重试次数，则重新调度
            if retry_param < 3:  # 最多重试3次
                # 更新重试计数
                params = json.loads(task.parameters) if task.parameters else {}
                params["retry_count"] = retry_param + 1
                task.parameters = json.dumps(params)

                # 更新状态并重新入队
                task.status = TaskStatus.PENDING
                db.commit()
                RedisClient.push_task(task.id)
                retry_count += 1
                logger.info(f"Failed task {task.id} rescheduled for retry {retry_param + 1}/3")

        return retry_count

    @staticmethod
    async def distribute_task(db: Session, task_id: str) -> bool:
        """分配任务到合适的节点"""
        # 查找合适的节点
        node_id = await TaskScheduler.find_suitable_node(db, task_id)
        if not node_id:
            logger.warning(f"No suitable node found for task {task_id}")
            return False

        # 分配任务
        success = await TaskScheduler.assign_task(db, task_id, node_id)
        return success

    @staticmethod
    async def run_scheduler_loop():
        """运行调度器循环，处理定时任务和失败任务"""
        from app.db.mysql import SessionLocal

        while True:
            try:
                db = SessionLocal()

                # 处理定时任务
                cron_count = await TaskScheduler.process_cron_tasks(db)
                if cron_count > 0:
                    logger.info(f"Processed {cron_count} cron tasks")

                # 处理失败任务
                retry_count = await TaskScheduler.process_failed_tasks(db)
                if retry_count > 0:
                    logger.info(f"Rescheduled {retry_count} failed tasks")

                # 等待一段时间再次执行
                db.close()
                await asyncio.sleep(60)  # 每分钟执行一次
            except Exception as e:
                logger.error(f"Error in scheduler loop: {e}")
                await asyncio.sleep(10)  # 出错后等待较短时间再次尝试