import asyncio
from collections import OrderedDict
from datetime import datetime
from functools import wraps
from typing import Any, Dict, Optional, Tuple

from redis.exceptions import RedisError
from tortoise.transactions import in_transaction

from app.core.config import settings
from app.core.constants import ChangeType, TaskStatus
from app.core.redis_conn import redis_helper
from app.core.task.scheduler import Scheduler
from app.core.task.store.redis import RedisStore
from app.core.task.task_log import scheduler_logger
from app.models.job_model import JobChangeLogModel, JobExecutionLogModel, JobModel
from app.models.user_model import UserModel
from app.schemas.common import OrderSchema, PaginationParams, PaginationResult
from app.schemas.job import (
    ChangeLogFilters,
    ExecutionHistoryFilters,
    JobChangeLog,
    JobCreate,
    JobExecutionLog,
    JobGet,
    JobItem,
    JobListFilters,
    JobUpdate,
    SchedulerJob,
)
from app.core.task.time_wheel import RedisTimeWheel
from app.utils.timezone import now


# 自定义异常类
class TaskManagerException(Exception):
    """任务管理器基础异常类"""

    pass


class TaskNotFoundException(TaskManagerException):
    """任务未找到异常"""

    pass


class TaskValidationError(TaskManagerException):
    """任务验证错误"""

    pass


class TaskSchedulingError(TaskManagerException):
    """任务调度错误"""

    pass


# 重试装饰器
def retry_on_redis_error(max_retries: int = 3, delay: float = 0.1):
    def decorator(func):
        @wraps(func)
        async def wrapper(*args, **kwargs):
            last_error = None
            for attempt in range(max_retries):
                try:
                    return await func(*args, **kwargs)
                except RedisError as e:
                    last_error = e
                    if attempt < max_retries - 1:
                        await asyncio.sleep(delay * (2**attempt))
            raise last_error

        return wrapper

    return decorator


class LRUCache:
    """
    线程安全的LRU缓存实现
    """

    def __init__(self, capacity: int, ttl: int):
        self._capacity = capacity
        self._ttl = ttl
        self._cache: OrderedDict[str, Tuple[Any, datetime]] = OrderedDict()
        self._lock = asyncio.Lock()
        self._hits = 0
        self._misses = 0

    async def get(self, key: str) -> Optional[Any]:
        """
        获取缓存项，如果存在且未过期则返回，否则返回None
        """
        async with self._lock:
            if key not in self._cache:
                self._misses += 1
                return None

            value, timestamp = self._cache[key]
            if (now() - timestamp).total_seconds() >= self._ttl:
                # 过期了，删除并返回None
                del self._cache[key]
                self._misses += 1
                return None

            # 更新访问顺序
            self._cache.move_to_end(key)
            self._hits += 1
            return value

    async def put(self, key: str, value: Any) -> None:
        """
        添加或更新缓存项
        """
        async with self._lock:
            if key in self._cache:
                del self._cache[key]
            elif len(self._cache) >= self._capacity:
                # 如果达到容量限制，删除最久未使用的项
                self._cache.popitem(last=False)

            self._cache[key] = (value, now())
            self._cache.move_to_end(key)

    async def delete(self, key: str) -> None:
        """
        删除缓存项
        """
        async with self._lock:
            if key in self._cache:
                del self._cache[key]

    async def clear(self) -> None:
        """
        清空缓存
        """
        async with self._lock:
            self._cache.clear()

    async def get_stats(self) -> Dict[str, int]:
        """
        获取缓存统计信息
        """
        async with self._lock:
            return {"size": len(self._cache), "capacity": self._capacity, "hits": self._hits, "misses": self._misses}


class TaskService:
    """
    TaskService的主要功能

    1. 同步数据库内的任务到Redis中,且保证其能正常的在Scheduler中运行
    2. 对Redis中的任务进行增删改查
    3. 对任务进行暂停、恢复、终止等操作
    4. 对任务运行记录进行同步

    # 涉及接口
    - 任务的增删改查
        - create_job
            - 创建任务, 接受一个JobCreate对象
        - update_job
            - 更新任务, 接受一个JobUpdate对象
        - delete_job
            - 删除任务, 接受一个job_id
        - get_job
            - 获取任务详情, 接受一个job_id
        - list_jobs
            - 获取任务列表, 接受一个filters字典, page, page_size
    - 任务同步
        - sync_db_with_redis
            - 同步数据库中的任务数据到Redis中
        - sync_execution_log_with_db
            - 同步任务执行记录到数据库中

        - sync_specified_job
            - 同步指定任务的数据到Redis中, 具体实现应该与 update_job 一致

    - 任务的暂停、恢复、终止等操作
        - pause_job
            - 暂停任务, 接受一个 job_id 和 操作者
        - resume_job
            - 恢复任务, 接受一个 job_id 和 操作者
        - terminate_job
            - 终止任务, 接受一个 job_id 和 操作者
    """

    def __init__(self):
        """初始化任务管理器"""
        try:
            self.store = RedisStore()
            self.time_wheel = RedisTimeWheel()
            self.scheduler = Scheduler().get_instance()
            self.redis_client = redis_helper.get_connection(settings.TASK_REDIS_DB)
            # 初始化LRU缓存，容量1000，TTL 300秒
            self._task_cache = LRUCache(capacity=1000, ttl=300)
        except Exception as e:
            scheduler_logger.error(f"TaskManager初始化失败: {str(e)}", exc_info=True)
            raise TaskManagerException("TaskManager初始化失败") from e

    # 任务的增删改查
    @retry_on_redis_error()
    async def create_job(self, job_create: JobCreate, creator: UserModel) -> JobModel:
        """创建新任务"""
        try:
            async with in_transaction():
                # 创建任务记录
                job = await JobModel.create(**job_create.model_dump(), creator=creator)
                # 如果是活动状态，同步到Redis
                if job.status == TaskStatus.ACTIVE:
                    job_schema = SchedulerJob.model_validate(job)
                    await self.store.add_job(job_schema)
                    next_run_time = await job_schema.trigger.get_next_fire_time(now())
                    if next_run_time:
                        await self.time_wheel.add_task(job.id, next_run_time)

                # 记录创建操作 - 只记录新建时的字段
                await self._record_action(
                    job_id=job.id,
                    change_type=ChangeType.CREATE,
                    details=job_create.model_dump(exclude_unset=True),  # 只记录设置的字段
                    user=creator,
                )

                return job

        except Exception as e:
            scheduler_logger.error(f"创建任务失败: {str(e)}", exc_info=True)
            raise TaskManagerException("创建任务失败") from e

    @retry_on_redis_error()
    async def update_job(self, job_id: str, job_update: JobUpdate, operator: UserModel) -> JobModel:
        """更新任务"""
        try:
            async with in_transaction():
                # 获取更新前的任务数据
                original_job = await self.get_task(job_id, use_cache=False)
                original_data = original_job.model_dump()

                # 获取更新的字段
                update_data = job_update.model_dump(exclude_unset=True)

                # 计算实际变更的字段
                changed_fields = {}
                for field, new_value in update_data.items():
                    if field in original_data and original_data[field] != new_value:
                        changed_fields[field] = new_value

                # 执行更新
                await JobModel.filter(id=job_id).update(**update_data)
                updated_job = await self.get_task(job_id, use_cache=False)

                # 同步到Redis
                if updated_job.status == TaskStatus.ACTIVE:
                    job_schema = SchedulerJob.model_validate(updated_job)
                    await self.store.update_job(job_id, job_schema)
                    next_run_time = await job_schema.trigger.get_next_fire_time(now())
                    if next_run_time:
                        await self.time_wheel.add_task(job_id, next_run_time)
                else:
                    # 非活动状态从Redis移除
                    await self.store.delete_job(job_id)
                    await self.time_wheel.remove_task(job_id)

                # 只记录实际变更的字段
                if changed_fields:
                    await self._record_action(
                        job_id=job_id, change_type=ChangeType.UPDATE, details=changed_fields, user=operator
                    )

                # 更新缓存
                await self._task_cache.put(job_id, updated_job)
                return updated_job

        except TaskNotFoundException:
            raise
        except Exception as e:
            scheduler_logger.error(f"更新任务 {job_id} 失败: {str(e)}", exc_info=True)
            raise TaskManagerException(f"更新任务失败") from e

    @retry_on_redis_error()
    async def delete_job(self, job_id: str, operator: UserModel) -> bool:
        """删除任务"""
        try:
            job = await self.get_task(job_id)

            async with in_transaction():
                # 从Redis移除
                await self.store.delete_job(job_id)
                await self.time_wheel.remove_task(job_id)

                # 取消正在运行的任务
                await self.scheduler.cancel_job(job_id)

                # 记录删除操作
                delete_details = {"name": job.name, "status": job.status, "deleted_at": now().isoformat()}
                await self._record_action(
                    job_id=job_id, change_type=ChangeType.DELETE, details=delete_details, user=operator
                )

                # 从数据库删除
                await job.delete()

                # 清除缓存
                await self._task_cache.delete(job_id)

                return True

        except TaskNotFoundException:
            raise
        except Exception as e:
            scheduler_logger.error(f"删除任务 {job_id} 失败: {str(e)}", exc_info=True)
            return False

    async def get_task(self, job_id: str, use_cache: bool = True) -> JobItem:
        """从数据库获取任务，优先使用缓存"""
        if use_cache:
            cached_job = await self._task_cache.get(job_id)
            if cached_job is not None:
                return cached_job.model_copy()

        # 从数据库获取
        job = await JobModel.get_or_none(id=job_id)
        if not job:
            raise TaskNotFoundException(f"任务 {job_id} 不存在")

        job_item = JobItem.model_validate(job)
        # 更新缓存
        if use_cache:
            await self._task_cache.put(job_id, job_item)
        return job_item

    async def detail_job(self, job_id: int) -> JobGet:
        """获取任务详情，包含最近的执行记录和变更记录"""
        job = await self.get_task(job_id)

        # 获取最近5条执行记录
        recent_logs = await JobExecutionLogModel.filter(job_id=job_id).order_by("-created_at").limit(5).all()

        # 获取最近5条变更记录
        change_logs = await JobChangeLogModel.filter(job_id=job_id).order_by("-created_at").limit(5).all()

        return JobGet(**job.model_dump(), recent_logs=recent_logs, change_logs=change_logs)

    async def list_jobs(
        self, filters: JobListFilters, order: OrderSchema, pagination: PaginationParams
    ) -> PaginationResult[JobItem]:
        """获取任务列表"""
        special_filter_conditions = {
            "name": {"name__icontains": filters.name},
        }
        return await self.list_items(
            model=JobModel,
            filters=filters,
            order=order,
            pagination=pagination,
            special_filter_conditions=special_filter_conditions,
            list_schema=PaginationResult[JobItem],
        )

    # 任务同步相关方法
    @retry_on_redis_error()
    async def sync_db_with_redis(self) -> None:
        """同步数据库任务到Redis"""
        try:
            # 获取所有活动任务
            active_tasks = await JobModel.filter(status=TaskStatus.ACTIVE).all()
            jobs = [SchedulerJob.model_validate(task) for task in active_tasks]

            # 获取Redis中的任务ID
            redis_task_ids = await self.store.get_all_job_ids()

            # 同步数据库到Redis
            for task in jobs:
                if task.id not in redis_task_ids:
                    await self.store.add_job(task)
                    next_run_time = await task.trigger.get_next_fire_time(now())
                    if next_run_time:
                        await self.time_wheel.add_task(task.id, next_run_time)

            # 清理Redis中多余的任务
            for task_id in redis_task_ids:
                if task_id not in [t.id for t in jobs]:
                    await self.store.delete_job(task_id)
                    await self.time_wheel.remove_task(task_id)
                    await self.scheduler.cancel_job(task_id)

            scheduler_logger.info("数据同步完成")

        except Exception as e:
            scheduler_logger.error(f"数据同步失败: {str(e)}", exc_info=True)
            raise TaskManagerException("数据同步失败") from e

    @retry_on_redis_error()
    async def sync_specified_job(self, job_id: str) -> bool:
        """同步指定任务到Redis"""
        try:
            job = await self.get_task(job_id)
            job_schema = SchedulerJob.model_validate(job)

            # 更新Redis存储
            if job.status == TaskStatus.ACTIVE:
                await self.store.update_job(job_id, job_schema)
                next_run_time = await job.trigger.get_next_fire_time(now())
                if next_run_time:
                    await self.time_wheel.add_task(job_id, next_run_time)
            else:
                await self.store.remove_job(job_id)
                await self.time_wheel.remove_task(job_id)

            scheduler_logger.info(f"任务 {job_id} 同步完成")
            return True

        except Exception as e:
            scheduler_logger.error(f"同步任务 {job_id} 失败: {str(e)}", exc_info=True)
            return False

    async def _record_action(self, job_id: str, change_type: str, details: dict, user: UserModel) -> None:
        """记录任务操作"""
        try:
            await JobChangeLogModel.create(
                job_id=job_id, operator=user, change_type=change_type, changed_fields=details, created_at=now()
            )
        except Exception as e:
            scheduler_logger.error(f"记录任务操作失败: {str(e)}", exc_info=True)

    async def list_execution_history(
        self, filters: ExecutionHistoryFilters, order: OrderSchema, pagination: PaginationParams
    ) -> PaginationResult[JobExecutionLog]:
        """获取执行历史记录"""
        special_filter_conditions = {
            "created_at__gte": {"created_at__gte": filters.start_time},
            "created_at__lte": {"created_at__lte": filters.end_time},
        }
        return await self.list_items(
            model=JobExecutionLogModel,
            filters=filters,
            order=order,
            pagination=pagination,
            special_filter_conditions=special_filter_conditions,
        )

    async def list_change_logs(
        self, filters: ChangeLogFilters, order: OrderSchema, pagination: PaginationParams
    ) -> PaginationResult[JobChangeLog]:
        """获取变更记录"""
        special_filter_conditions = {
            "created_at__gte": {"created_at__gte": filters.start_time},
            "created_at__lte": {"created_at__lte": filters.end_time},
        }
        return await self.list_items(
            model=JobChangeLogModel,
            filters=filters,
            order=order,
            pagination=pagination,
            special_filter_conditions=special_filter_conditions,
            list_schema=PaginationResult[JobChangeLog],
        )

    async def pause_job(self, job_id: str, operator: UserModel) -> bool:
        """暂停任务"""
        job = await self.get_task(job_id)
        if job.status == TaskStatus.ACTIVE:
            job.status = TaskStatus.PAUSED
            job_update = JobUpdate.model_validate(job)
            await self.update_job(job_id, job_update, operator)
        return True

    async def resume_job(self, job_id: str, operator: UserModel) -> bool:
        """恢复任务"""
        job = await self.get_task(job_id)
        if job.status == TaskStatus.PAUSED:
            job.status = TaskStatus.ACTIVE
            job_update = JobUpdate.model_validate(job)
            await self.update_job(job_id, job_update, operator)
        return True

    async def get_cache_stats(self) -> Dict[str, int]:
        """获取缓存统计信息"""
        return await self._task_cache.get_stats()
