import asyncio
import functools
import inspect
import os
from datetime import datetime, timezone
from typing import List

from apscheduler.events import JobExecutionEvent, EVENT_JOB_EXECUTED, EVENT_JOB_ERROR
from apscheduler.job import Job
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from apscheduler.triggers.interval import IntervalTrigger
from fastapi import APIRouter, HTTPException, Body

from Logs.log_handle import get_log
from schemas.scheduled_tasks_schemas import ScheduledTaskVO, get_scheduled_task_list, ScheduledTaskDTO, \
    create_scheduled_task, update_scheduled_task, delete_scheduled_tasks, get_by_ids
from schemas.tool.tool_schemas import PageDTO
from tools.decorators import return_response

# 初始化调度器
scheduler = BackgroundScheduler(timezone="UTC")  # 使用BackgroundScheduler，可以让它在后台运行
router = APIRouter(prefix="/scheduled-tasks", tags=["调度任务管理"])
log = get_log(os.path.basename(__file__))


# logging.basicConfig(
#     level=logging.DEBUG,
#     format='[定时任务] - %(asctime)s - %(levelname)s - %(message)s - 文件: %(filename)s - 函数: %(funcName)s - 行号: %(lineno)d',
#     handlers=[logging.FileHandler('scheduler.log'), logging.StreamHandler()]
# )
#
# # 获取日志记录器
# logger = logging.getLogger('apscheduler')


# 获取调度任务列表接口
@router.post('/list')
@return_response()
async def res_get_scheduled_task_list(query: PageDTO):
    """
    获取调度任务列表，支持分页、排序和过滤
    - 通过 `query` 参数传递分页、排序和过滤条件
    - 返回指定条件下的调度任务数据列表

    示例请求体：
    {
        "page": 1,                 # 当前页码，从1开始
        "page_size": 10,           # 每页条数，最大支持100条
        "sort_by": "id",           # 排序字段，例如按 ID 排序
        "sort_order": "ASC",       # 排序顺序，ASC 表示升序，DESC 表示降序
        "filters": {}              # 可选的过滤条件，例如根据任务名称过滤
    }
    """

    # 直接返回字典格式的响应
    return get_scheduled_task_list(query)


# 创建调度任务接口
@router.post('/create')
@return_response()
async def res_create_scheduled_task(scheduled_task: ScheduledTaskDTO):
    """
    创建新的调度任务
    - 通过 `scheduled_task` 参数传递任务的相关数据
    - 调用 `create_scheduled_task` 函数创建任务

    示例请求体：
    {
        "name": "任务1",
        "invoke_target": "target1",
        "invoke_args": {},      # 任务执行所需的参数，可以为空
        "cron_expr": "* * * * *",
        "interval_seconds": 60,
        "strategy": "simple",
        "status": 1
    }
    """
    return create_scheduled_task(scheduled_task)


# 更新调度任务接口
@router.post('/update')
@return_response()
async def res_update_scheduled_task(scheduled_task: ScheduledTaskVO):
    """
    更新调度任务数据
    - 通过 `scheduled_task` 参数传递更新的任务数据
    - 调用 `update_scheduled_task` 函数更新任务信息

    示例请求体：
    {
        "id": 1,
        "name": "更新后的任务1",
        "invoke_target": "target2",
        "invoke_args": {},      # 任务执行参数
        "cron_expr": "0 * * * *",  # 更新后的 cron 表达式
        "interval_seconds": 30,
        "strategy": "advanced",   # 更新后的执行策略
        "status": 1
    }
    """
    update_scheduled_task(scheduled_task)
    await res_sync_scheduler_status()
    return "更新任务成功"


# 删除调度任务接口
@router.post('/delete')
@return_response()
async def res_delete_scheduled_task(task_ids: list[int]):
    """
    批量删除调度任务（逻辑删除）
    - 通过 `task_ids` 参数传递待删除任务的 ID 列表
    - 调用 `delete_scheduled_tasks` 函数批量删除任务

    示例请求体：
    [1, 2, 3]  # 需要删除的任务 ID 列表

    """
    return delete_scheduled_tasks(task_ids)


# 添加调度任务接口
@router.post('/add-job')
@return_response()
async def res_add_job(add_jobs: list[ScheduledTaskVO]):
    """
    添加新的调度任务到任务队列
    - 通过 `job` 参数传递待添加的任务数据
    [{
        "id": 1,
        "name": "更新后的任务1",
        "invoke_target": "target2",
        "invoke_args": [],      # 任务执行参数
        "invoke_kwargs": {},      # 任务执行参数
        "cron_expr": "0 * * * *",  # 更新后的 cron 表达式
        "interval_seconds": 30,
        "strategy": "advanced",   # 更新后的执行策略
        "status": 1
        },
        {
        "id": 2,
        .........
        }]
    """
    for add_job in add_jobs:
        job_id = str(add_job.id)
        job_func = import_function(add_job.invoke_target)

        job_params = {
            "id": job_id,
            "name": add_job.name,
            "args": add_job.invoke_args.get("args", []),
            "kwargs": add_job.invoke_kwargs.get("kwargs", {}),
            "next_run_time": add_job.next_run_time,
            "replace_existing": True
        }

        # 根据 strategy 决定调度行为
        strategy = add_job.strategy or "scheduled"

        if strategy == "once":
            scheduler.add_job(job_func, trigger="date", run_date=datetime.now(timezone.utc), **job_params)

        elif strategy == "immediate":
            scheduler.add_job(job_func, trigger="date", run_date=datetime.now(timezone.utc), **job_params)
            if add_job.cron_expr:
                trigger = CronTrigger.from_crontab(add_job.cron_expr)
                scheduler.add_job(job_func, trigger=trigger, **job_params)

        elif strategy == "scheduled":
            if add_job.cron_expr:
                trigger = CronTrigger.from_crontab(add_job.cron_expr)
            elif add_job.interval_seconds:
                trigger = IntervalTrigger(seconds=add_job.interval_seconds)
            else:
                continue
            scheduler.add_job(job_func, trigger=trigger, **job_params)

        elif strategy == "abandon":
            if add_job.cron_expr:
                trigger = CronTrigger.from_crontab(add_job.cron_expr)
                job_params["misfire_grace_time"] = 0
                scheduler.add_job(job_func, trigger=trigger, **job_params)

        else:
            log.warning(f"未知策略 {strategy}，任务 {job_id} 被跳过")

    return "定时任务添加成功"


# 移除调度任务接口
@router.post('/remove-job')
@return_response()
async def res_remove_job(job_ids: list[int]):
    """
    批量移除指定的调度任务
    - 传入一个 job_ids 列表，移除所有对应的任务

    请求示例：
    {
      "job_ids": [1, 2, 3]
    }
    """
    success_ids = []
    failed_ids = []

    for job_id in job_ids:
        try:
            # 移除调度器中的任务
            scheduler.remove_job(str(job_id))
            success_ids.append(job_id)
            # 更新数据库中的任务状态为禁用
            update_scheduled_task(ScheduledTaskVO(id=job_id, status=0))
        except Exception as e:
            log.warning(f"移除任务 {job_id} 失败: {str(e)}")
            failed_ids.append(job_id)

    return {
        "success": success_ids,
        "failed": failed_ids,
        "message": f"成功删除 {len(success_ids)} 个任务，失败 {len(failed_ids)} 个"
    }


# 获取正在执行的调度任务列表接口
@router.post('/scheduled-tasks')
@return_response()
async def res_scheduled_tasks(query: PageDTO):
    """
    获取当前正在运行的调度任务列表（带分页、排序）
    - 获取 APScheduler 中的所有任务，并返回分页结果
    - 支持按指定字段排序

    请求示例：
    {
      "page": 1,
      "page_size": 9999,
      "sort_by": "id",
      "sort_order": "ASC"
    }
    """
    all_jobs: List[Job] = scheduler.get_jobs()
    job_list = []

    # 获取所有任务的信息
    for job in all_jobs:
        job_info = {
            "id": job.id,
            "name": job.name,
            "next_run_time": job.next_run_time.strftime("%Y-%m-%d %H:%M:%S") if job.next_run_time else None,
            "trigger": str(job.trigger),
            "args": job.args,
            "kwargs": job.kwargs
        }
        job_list.append(job_info)

    # 排序逻辑
    sort_by = query.sort_by if query.sort_by else "id"  # 默认按 id 排序
    sort_order = query.sort_order if query.sort_order else "ASC"  # 默认升序

    reverse = sort_order == "DESC"  # 判断是否降序

    try:
        # 对任务列表进行排序，确保 id 按数字顺序排序
        if sort_by == "id":
            job_list.sort(key=lambda job: int(job.get(sort_by)), reverse=reverse)
        else:
            job_list.sort(key=lambda job: job.get(sort_by), reverse=reverse)
    except KeyError:
        return {
            "code": 400,
            "message": f"无法按 {sort_by} 字段排序，字段无效",
            "data": None
        }

    # 手动分页处理（基于 PageDTO）
    page = query.page
    page_size = query.page_size
    start = (page - 1) * page_size
    end = start + page_size
    paged_jobs = job_list[start:end]

    return {
        "total": len(job_list),
        "list": paged_jobs,
        "page": page,
        "page_size": page_size
    }


# 暂停或恢复调度任务接口
@router.post('/pause-resume')
@return_response()
async def res_pause_resume(tasks: list[ScheduledTaskVO]):
    """
    批量暂停或恢复调度任务
    - status=1 为恢复任务，任务不存在则自动添加
    - status=2 为暂停任务，任务不存在则提示无法暂停
    """
    success = []
    failed = []

    for task in tasks:
        job_id = str(task.id)
        job = scheduler.get_job(job_id)

        if task.status not in (1, 2):
            reason = f"任务状态不支持: {task.status}"
            log.warning(f"{reason} (任务 {job_id})")
            failed.append({"id": job_id, "reason": reason})
            continue

        # 恢复任务（包括不存在时自动添加）
        if task.status == 1:
            if not job:
                try:
                    job_func = import_function(task.invoke_target)

                    if task.cron_expr:
                        trigger = CronTrigger.from_crontab(task.cron_expr)
                    elif task.interval_seconds:
                        trigger = IntervalTrigger(seconds=task.interval_seconds)
                    else:
                        raise ValueError("无有效调度表达式")

                    scheduler.add_job(
                        job_func,
                        trigger=trigger,
                        args=task.invoke_args.get("args", []),
                        kwargs=task.invoke_kwargs.get("kwargs", {}),
                        id=job_id,
                        name=task.name,
                        replace_existing=True
                    )

                    update_scheduled_task(task)
                    log.info(f"任务 {job_id} 不存在，已自动添加并恢复")
                    success.append({"id": job_id, "action": "任务不存在，已自动添加并恢复"})
                    continue

                except Exception as e:
                    msg = f"任务 {job_id} 自动添加失败: {str(e)}"
                    log.error(msg)
                    failed.append({"id": job_id, "reason": msg})
                    continue

                # job 存在的情况
                scheduler.resume_job(job_id)
            try:
                log.info(f"任务 {job_id} 已恢复")
                update_scheduled_task(task)
                success.append({"id": job_id, "action": "已恢复"})
            except Exception as e:
                err_msg = f"恢复任务时出错: {str(e)}"
                log.error(f"{err_msg} (任务 {job_id})")
                failed.append({"id": job_id, "reason": err_msg})

        # 暂停任务（job 必须存在）
        if task.status == 2:
            if not job:
                msg = "任务不存在于调度器中，无法暂停"
                log.warning(f"{msg} (任务 {job_id})")
                failed.append({"id": job_id, "reason": msg})
                continue

            try:
                scheduler.pause_job(job_id)
                update_scheduled_task(task)
                log.info(f"任务 {job_id} 已暂停")
                success.append({"id": job_id, "action": "已暂停"})
            except Exception as e:
                err_msg = f"暂停任务时出错: {str(e)}"
                log.error(f"{err_msg} (任务 {job_id})")
                failed.append({"id": job_id, "reason": err_msg})

    return {
        "success": success,
        "failed": failed,
        "message": f"成功操作 {len(success)} 个任务，失败 {len(failed)} 个"
    }


@router.post('/sync-scheduler-status')
@return_response()
async def res_sync_scheduler_status():
    """状态同步检查"""
    try:
        log.info("开始同步任务状态...")

        # 获取数据库中所有任务（不分页）
        result = get_scheduled_task_list(PageDTO(
            page_size=9999,
            page=1,
            sort_by="id",
            sort_order="ASC",
            filters={}
        ))

        db_tasks = result.get('result') or []
        total = result.get('total', 0)

        log.info(f"共获取到 {total} 条任务数据")

        if not db_tasks:
            return {"synced": 0, "message": "没有需要同步的任务"}

        # 拿出调度器当前所有 job
        existing_jobs = {job.id for job in scheduler.get_jobs()}

        # 同步任务
        synced = 0
        for t in db_tasks:
            job_id = str(t.id)

            try:
                job_func = import_function(t.invoke_target)
            except Exception as e:
                log.warning(f"任务 {job_id} 导入函数失败: {e}")
                continue

            # 构造 trigger
            if t.cron_expr:
                try:
                    trigger = CronTrigger.from_crontab(t.cron_expr)
                except Exception as e:
                    log.warning(f"任务 {job_id} cron 表达式无效: {e}")
                    continue
            elif t.interval_seconds:
                trigger = IntervalTrigger(seconds=t.interval_seconds)
            else:
                log.warning(f"任务 {job_id} 无有效触发器，跳过")
                continue

            if t.status == 1:
                log.info(f"添加/更新任务 {job_id}")
                scheduler.add_job(
                    job_func,
                    trigger=trigger,
                    args=t.invoke_args.get("args", []),
                    kwargs=t.invoke_kwargs.get("kwargs", {}),
                    id=job_id,
                    name=t.name,
                    replace_existing=True
                )
                synced += 1
            elif job_id in existing_jobs:
                log.info(f"移除任务 {job_id}")
                scheduler.remove_job(job_id)

        log.info(f"状态同步完成，共同步 {synced} 条任务")
        return {"synced": synced}

    except Exception as e:
        log.error(f"状态同步异常: {e}")
        raise


@router.post('/execute-now/')
@return_response()
async def execute_tasks_now(
        task_ids: list[int] = Body(...)

):
    """
    立即执行
    请求示例  [1, 3]
    """
    tasks = get_by_ids(task_ids)
    coros = []

    for task in tasks:
        job_func = import_function(task.invoke_target)
        args = task.invoke_args.get("args", [])
        kwargs = task.invoke_kwargs.get("kwargs", {})

        if inspect.iscoroutinefunction(job_func):
            coros.append(job_func(*args, **kwargs))
        else:
            # 把同步函数包装到线程池里跑
            coros.append(asyncio.to_thread(job_func, *args, **kwargs))

    # 并发执行
    raw_results = await asyncio.gather(*coros, return_exceptions=True)
    success, failed = {}, {}
    for task, res in zip(tasks, raw_results):
        if isinstance(res, Exception):
            failed[task.id] = str(res)
            log.error(f"任务 {task.id} 执行失败：{res}")
        else:
            success[task.id] = res['data']['results']
            log.info(f"任务 {task.id} 执行成功：{res}")

    return {"success": success, "failed": failed}


def run_async_func_and_return(func, args, kwargs):
    """
    在线程中运行异步函数并返回结果
    """
    loop = asyncio.new_event_loop()
    asyncio.set_event_loop(loop)
    try:
        return loop.run_until_complete(func(*args, **kwargs))
    finally:
        loop.close()


@functools.lru_cache(maxsize=128)
def import_function(invoke_target: str):
    """
    根据字符串路径导入函数，并包装异步函数为同步可调用，能获取返回值
    """
    try:
        module_path, func_name = invoke_target.rsplit(".", 1)
        module = __import__(module_path, fromlist=[func_name])
        func = getattr(module, func_name)
        log.info(f"成功导入函数: {invoke_target}")

        if inspect.iscoroutinefunction(func):
            log.info(f"目标函数 {invoke_target} 是异步函数，将用事件循环包装")
            return lambda *args, **kwargs: run_async_func_and_return(func, args, kwargs)

        log.info(f" 目标函数 {invoke_target} 是同步函数，直接返回")
        return func

    except (ImportError, AttributeError) as e:
        log.error(f" 函数导入失败: {e}")
        raise HTTPException(status_code=500, detail="目标函数导入失败")


# ------------------ 调度器控制 ------------------
def start_scheduler():
    """
    启动调度器
    - 启动调度器并记录启动日志
    """
    if not scheduler.running:
        scheduler.start()
        log.info("调度器已启动")
    else:
        log.info("调度器已在运行中")


def stop_scheduler():
    """
    安全关闭调度器
    - 如果调度器正在运行，则尝试停止它并记录日志
    """
    try:
        if scheduler.running:
            scheduler.shutdown(wait=True)
            log.info("调度器已安全停止")
        else:
            log.info("调度器当前没有运行")
    except Exception as e:
        log.error(f"调度器停止异常: {str(e)}")


def job_event_listener(event: JobExecutionEvent):
    """
    APScheduler 任务执行后的回调
    """
    log_data = {
        "job_id": event.job_id,
        "scheduled_run_time": event.scheduled_run_time,
        "actual_run_time": event.retval.get('start_time') if isinstance(event.retval, dict) else datetime.now(
            timezone.utc),
        "success": not bool(event.exception),
        "exception": str(event.exception) if event.exception else None,
        "runtime": (event.retval.get('end_time') - event.retval.get('start_time')).total_seconds()
        if isinstance(event.retval, dict) and event.retval.get('end_time') and event.retval.get('start_time')
        else None,
    }
    # 保存到数据库或内存
    print(log_data)
    log.info("1111111111111")
    log.info(log_data)
    # save_execution_log(JobExecutionLogDTO(**log_data))


# 注册监听
scheduler.add_listener(
    job_event_listener,
    EVENT_JOB_EXECUTED | EVENT_JOB_ERROR
)
