"""
---------------------------------------------------------------------
- File: d1.py
- Date: 2025/5/28
- Author: yangzhide
- Email: xxzjyzd@126.com
- Description:
---------------------------------------------------------------------
"""
import asyncio
import logging
from datetime import datetime, timezone
from functools import partial # 用于包装带参数的函数给APScheduler
from typing import Dict, List, Any, Optional

from fastapi import FastAPI, HTTPException
from pydantic import BaseModel, Field
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from apscheduler.triggers.cron import CronTrigger
from apscheduler.triggers.interval import IntervalTrigger
from apscheduler.jobstores.base import JobLookupError
from contextlib import asynccontextmanager

# --- 配置 ---
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
APS_SCHEDULER_TIMEZONE = "Asia/Shanghai" # 或者你的目标时区

# --- 模拟消息队列发布器 ---
class MessageQueuePublisher:
    async def publish_task(self, exchange_name: str, routing_key: str, task_id: str):
        # 在真实场景中，这里会连接到 RabbitMQ, Kafka, Redis Streams 等
        logger.info(f"向消息队列发布: Exchange='{exchange_name}', RoutingKey='{routing_key}', TaskID='{task_id}'")
        # 模拟异步操作
        await asyncio.sleep(0.01)

# 实例化一个发布器 (可以是单例或通过依赖注入)
mq_publisher = MessageQueuePublisher()

# --- 任务配置数据模型 ---
class DBConnectionConfig(BaseModel):
    host: str
    port: int
    user: str
    password: str # 实际应用中应更安全地处理
    database: str

class TaskConfig(BaseModel):
    id: str = Field(..., description="任务的唯一ID, 例如 'erp_to_tpl_sales_orders'")
    description: Optional[str] = None
    source_db: DBConnectionConfig
    target_db: DBConnectionConfig
    source_sql: str
    target_insert_sql: str
    target_update_sql: Optional[str] = None # 回写SQL
    trigger_type: str = Field(..., description="APScheduler触发器类型: 'interval' 或 'cron'")
    trigger_args: Dict[str, Any] = Field(..., description="触发器参数, 例如 {'seconds': 60} 或 {'hour': 1, 'minute': 30}")
    is_enabled_on_startup: bool = True # 应用启动时是否自动调度此任务
    mq_exchange: str = "tasks_exchange" # 消息队列交换机
    mq_routing_key_prefix: str = "tasks.trigger" # 路由键前缀

# --- 模拟任务配置存储 (实际应用中这会来自数据库或配置文件) ---
TASK_CONFIGURATIONS: Dict[str, TaskConfig] = {
    "erp_to_tpl_sales_orders": TaskConfig(
        id="erp_to_tpl_sales_orders",
        description="从ERP同步销售订单到TPL系统",
        source_db={"host": "erp_db_host", "port": 5432, "user": "erp_user", "password": "erp_password", "database": "erp_db"},
        target_db={"host": "tpl_db_host", "port": 3306, "user": "tpl_user", "password": "tpl_password", "database": "tpl_db"},
        source_sql="SELECT id, order_data FROM erp_sales_orders WHERE status='new';",
        target_insert_sql="INSERT INTO tpl_incoming_orders (erp_order_id, data) VALUES (%s, %s);",
        trigger_type="interval",
        trigger_args={"seconds": 300}, # 每5分钟
        is_enabled_on_startup=True,
        mq_exchange="erp_tpl_exchange",
        mq_routing_key_prefix="erp.tpl.sales_orders"
    ),
    "tpl_to_wms_shipments": TaskConfig(
        id="tpl_to_wms_shipments",
        description="从TPL同步发货信息到WMS系统",
        source_db={"host": "tpl_db_host", "port": 3306, "user": "tpl_user", "password": "tpl_password", "database": "tpl_db"},
        target_db={"host": "wms_db_host", "port": 1433, "user": "wms_user", "password": "wms_password", "database": "wms_db"},
        source_sql="SELECT shipment_id, details FROM tpl_shipments WHERE status='ready_to_ship';",
        target_insert_sql="INSERT INTO wms_shipment_requests (tpl_shipment_id, shipment_data) VALUES (%s, %s);",
        trigger_type="cron",
        trigger_args={"hour": "2", "minute": "0"}, # 每天凌晨2点
        is_enabled_on_startup=True
    ),
    "wms_to_tpl_inventory_update": TaskConfig(
        id="wms_to_tpl_inventory_update",
        description="从WMS回写库存到TPL系统",
        source_db={"host": "wms_db_host", "port": 1433, "user": "wms_user", "password": "wms_password", "database": "wms_db"},
        target_db={"host": "tpl_db_host", "port": 3306, "user": "tpl_user", "password": "tpl_password", "database": "tpl_db"},
        source_sql="SELECT product_id, quantity FROM wms_inventory_changes WHERE last_updated > DATE_SUB(NOW(), INTERVAL 1 HOUR);",
        target_insert_sql="-- This might be an update rather than insert, placeholder --",
        target_update_sql="UPDATE tpl_product_stock SET quantity = %s WHERE wms_product_id = %s;", # 假设有回写
        trigger_type="interval",
        trigger_args={"minutes": 15},
        is_enabled_on_startup=False # 默认不启动
    ),
    # ... 可以添加更多其他类型的任务 (tpl-erp, other)
}

# --- APScheduler 实例 ---
scheduler = AsyncIOScheduler(timezone=APS_SCHEDULER_TIMEZONE)

# --- 调度器要执行的函数 ---
async def send_task_id_to_mq(task_config_id: str):
    """
    这个函数由 APScheduler 调用，负责将任务ID发送到消息队列。
    """
    task_config = TASK_CONFIGURATIONS.get(task_config_id)
    if not task_config:
        logger.error(f"APScheduler: 尝试为不存在的任务配置ID '{task_config_id}' 发送消息")
        return

    routing_key = f"{task_config.mq_routing_key_prefix}.{task_config.id}"
    logger.info(f"APScheduler 触发: 准备发送任务ID '{task_config_id}' 到 MQ (Exchange: {task_config.mq_exchange}, Key: {routing_key})")
    try:
        await mq_publisher.publish_task(
            exchange_name=task_config.mq_exchange,
            routing_key=routing_key, # 更具体的路由键
            task_id=task_config_id
        )
        logger.info(f"APScheduler 成功发送任务ID '{task_config_id}' 到 MQ.")
    except Exception as e:
        logger.error(f"APScheduler 发送任务ID '{task_config_id}' 到 MQ 失败: {e}")

# --- 辅助函数: 加载和调度任务 ---
def load_and_schedule_tasks_from_config(task_configs: Dict[str, TaskConfig]):
    logger.info("开始加载和调度任务...")
    for task_id, config in task_configs.items():
        aps_job_id = f"scheduled_task_{task_id}" # APScheduler内部的job ID

        # 检查任务是否已存在，如果存在且需要更新，则先移除或修改
        existing_job = scheduler.get_job(aps_job_id)
        if existing_job:
            logger.info(f"任务 '{aps_job_id}' 已存在，将先移除再添加以确保配置最新。")
            try:
                scheduler.remove_job(aps_job_id)
            except JobLookupError:
                logger.warning(f"尝试移除任务 '{aps_job_id}' 时未找到，可能已被移除。")


        if config.is_enabled_on_startup:
            trigger = None
            if config.trigger_type == "interval":
                trigger = IntervalTrigger(**config.trigger_args, timezone=APS_SCHEDULER_TIMEZONE)
            elif config.trigger_type == "cron":
                trigger = CronTrigger(**config.trigger_args, timezone=APS_SCHEDULER_TIMEZONE)
            else:
                logger.warning(f"任务 '{task_id}' 的触发器类型 '{config.trigger_type}' 不支持。跳过。")
                continue

            # 使用 functools.partial 来传递 task_config_id 给回调函数
            job_function = partial(send_task_id_to_mq, task_config_id=task_id)

            scheduler.add_job(
                job_function,
                trigger,
                id=aps_job_id,
                name=config.description or task_id,
                replace_existing=True # 如果ID已存在，则替换
            )
            logger.info(f"已调度任务: '{task_id}' (APS Job ID: {aps_job_id}) 使用 {config.trigger_type} 触发器, 参数: {config.trigger_args}")
        else:
            logger.info(f"任务 '{task_id}' 配置为启动时不启用，跳过调度。")
    logger.info("任务加载和调度完成。")


# --- FastAPI Lifespan (用于启动和关闭调度器) ---
@asynccontextmanager
async def lifespan(app: FastAPI):
    logger.info("FastAPI 应用启动...")
    load_and_schedule_tasks_from_config(TASK_CONFIGURATIONS)
    if not scheduler.running:
        scheduler.start()
        logger.info("APScheduler 已启动.")
    else:
        logger.info("APScheduler 已在运行中.")
    try:
        yield
    finally:
        logger.info("FastAPI 应用关闭...")
        if scheduler.running:
            scheduler.shutdown()
            logger.info("APScheduler 已关闭.")

app = FastAPI(lifespan=lifespan, title="APScheduler Task Manager API")

# --- API 端点 ---
class JobStatus(BaseModel):
    job_id: str
    task_config_id: str
    name: str
    trigger: str
    next_run_time: Optional[datetime] = None
    is_paused: bool

@app.get("/scheduler/jobs", response_model=List[JobStatus], summary="获取所有调度任务的状态")
async def get_all_jobs():
    jobs_status = []
    if not scheduler.running:
        return jobs_status

    for job in scheduler.get_jobs():
        task_config_id = job.id.replace("scheduled_task_", "", 1) if job.id.startswith("scheduled_task_") else "N/A"

        # 检查作业是否暂停：APScheduler中，暂停的作业其next_run_time为None
        # 更准确的方式是检查作业的 'paused' 状态，但APScheduler Job对象本身不直接暴露一个is_paused属性
        # 我们可以通过尝试恢复一个未暂停的作业会抛出异常，或者暂停一个已暂停的作业也会有特定行为来间接判断
        # 但最简单的方式是，我们自己维护一个状态或依赖next_run_time (通常暂停后next_run_time会是None，除非手动设置过)
        # 对于BackgroundScheduler，暂停的作业其next_run_time确实会是None。
        # 对于AsyncIOScheduler，一个作业暂停后，它的next_run_time也会变成None。
        is_paused = job.next_run_time is None and job.trigger is not None # 一个有效的trigger但没有next_run_time通常意味着暂停
                                                                          # 或者是刚添加还未首次运行。更准确的暂停状态需要自己跟踪
                                                                          # 或者检查 trigger 的 pause/resume 状态 (如果 trigger 支持)

        jobs_status.append(
            JobStatus(
                job_id=job.id,
                task_config_id=task_config_id,
                name=job.name,
                trigger=str(job.trigger),
                next_run_time=job.next_run_time.astimezone(timezone.utc) if job.next_run_time else None, # 统一转为UTC
                is_paused=is_paused
            )
        )
    return jobs_status

def get_aps_job_id(task_config_id: str) -> str:
    return f"scheduled_task_{task_config_id}"

@app.post("/scheduler/tasks/{task_config_id}/pause", summary="暂停指定任务的调度")
async def pause_task_schedule(task_config_id: str):
    aps_job_id = get_aps_job_id(task_config_id)
    try:
        job = scheduler.get_job(aps_job_id)
        if not job:
            raise HTTPException(status_code=404, detail=f"任务配置ID '{task_config_id}' (APS Job ID: {aps_job_id}) 未找到或未被调度。")

        # 检查是否已暂停
        if job.next_run_time is None and job.trigger is not None: # 粗略判断
             logger.info(f"任务 '{aps_job_id}' 已经被暂停。")
             return {"message": f"任务 '{task_config_id}' (APS Job ID: {aps_job_id}) 已经被暂停。"}

        scheduler.pause_job(aps_job_id)
        logger.info(f"任务 '{aps_job_id}' 已暂停。")
        return {"message": f"任务 '{task_config_id}' (APS Job ID: {aps_job_id}) 已成功暂停。"}
    except JobLookupError:
        raise HTTPException(status_code=404, detail=f"APS Job ID '{aps_job_id}' 对应的任务未找到。")
    except Exception as e:
        logger.error(f"暂停任务 '{aps_job_id}' 失败: {e}")
        raise HTTPException(status_code=500, detail=f"暂停任务失败: {str(e)}")

@app.post("/scheduler/tasks/{task_config_id}/resume", summary="恢复指定任务的调度")
async def resume_task_schedule(task_config_id: str):
    aps_job_id = get_aps_job_id(task_config_id)
    try:
        job = scheduler.get_job(aps_job_id)
        if not job:
            raise HTTPException(status_code=404, detail=f"任务配置ID '{task_config_id}' (APS Job ID: {aps_job_id}) 未找到或未被调度。")

        # 检查是否需要恢复 (如果它已经在运行，next_run_time 就不是 None)
        if job.next_run_time is not None:
            logger.info(f"任务 '{aps_job_id}' 未被暂停，无需恢复。")
            return {"message": f"任务 '{task_config_id}' (APS Job ID: {aps_job_id}) 未被暂停，无需恢复。"}

        scheduler.resume_job(aps_job_id)
        logger.info(f"任务 '{aps_job_id}' 已恢复。")
        # 恢复后，获取新的next_run_time
        updated_job = scheduler.get_job(aps_job_id)
        next_run = updated_job.next_run_time.isoformat() if updated_job and updated_job.next_run_time else "N/A"
        return {"message": f"任务 '{task_config_id}' (APS Job ID: {aps_job_id}) 已成功恢复。下次运行时间: {next_run}"}
    except JobLookupError:
        raise HTTPException(status_code=404, detail=f"APS Job ID '{aps_job_id}' 对应的任务未找到。")
    except Exception as e:
        # APScheduler 对于已经运行的job调用resume会报错，需要处理
        if "is not paused" in str(e).lower():
             logger.info(f"任务 '{aps_job_id}' 未被暂停，无需恢复。")
             return {"message": f"任务 '{task_config_id}' (APS Job ID: {aps_job_id}) 未被暂停，无需恢复。"}
        logger.error(f"恢复任务 '{aps_job_id}' 失败: {e}")
        raise HTTPException(status_code=500, detail=f"恢复任务失败: {str(e)}")

@app.post("/scheduler/tasks/{task_config_id}/trigger", summary="立即触发指定任务一次")
async def trigger_task_now(task_config_id: str):
    aps_job_id = get_aps_job_id(task_config_id)
    try:
        job = scheduler.get_job(aps_job_id)
        if not job:
            # 如果任务未被调度（例如 is_enabled_on_startup=False），我们也可以允许手动触发
            task_config = TASK_CONFIGURATIONS.get(task_config_id)
            if not task_config:
                raise HTTPException(status_code=404, detail=f"任务配置ID '{task_config_id}' 未找到。")

            logger.info(f"任务 '{aps_job_id}' 未被调度，将直接执行一次其关联函数。")
            await send_task_id_to_mq(task_config_id)
            return {"message": f"任务 '{task_config_id}' (未调度) 已手动触发一次。"}

        # 对于已调度的任务，修改其next_run_time为现在
        # 使用 scheduler.timezone 保证时区正确
        now_with_tz = datetime.now(scheduler.timezone)
        scheduler.modify_job(aps_job_id, next_run_time=now_with_tz)
        logger.info(f"任务 '{aps_job_id}' 已被设置为立即运行。")
        return {"message": f"任务 '{task_config_id}' (APS Job ID: {aps_job_id}) 已设置为立即运行。"}
    except JobLookupError: # modify_job也可能抛出这个
        task_config = TASK_CONFIGURATIONS.get(task_config_id)
        if not task_config:
            raise HTTPException(status_code=404, detail=f"任务配置ID '{task_config_id}' (APS Job ID: {aps_job_id}) 未找到或未被调度。")
        logger.info(f"任务 '{aps_job_id}' 未找到于调度器，将直接执行一次其关联函数。")
        await send_task_id_to_mq(task_config_id)
        return {"message": f"任务 '{task_config_id}' (未调度) 已手动触发一次。"}
    except Exception as e:
        logger.error(f"立即触发任务 '{aps_job_id}' 失败: {e}")
        raise HTTPException(status_code=500, detail=f"立即触发任务失败: {str(e)}")

@app.post("/scheduler/reload_tasks", summary="重新加载并调度所有任务配置")
async def reload_all_tasks():
    logger.info("收到重新加载任务的请求...")
    # 在实际应用中，TASK_CONFIGURATIONS 可能会从数据库或文件重新加载
    # global TASK_CONFIGURATIONS
    # TASK_CONFIGURATIONS = load_tasks_from_persistent_storage()

    # 简单演示，我们还是用现有的 TASK_CONFIGURATIONS
    # 先移除所有当前由这个逻辑管理的job (基于ID前缀)
    # for job in scheduler.get_jobs():
    #     if job.id.startswith("scheduled_task_"):
    #         try:
    #             scheduler.remove_job(job.id)
    #             logger.info(f"重新加载：移除了旧任务 {job.id}")
    #         except JobLookupError:
    #             pass # 可能已经被移除了

    # 重新加载和调度
    # 注意：如果任务配置本身变化了（比如trigger），`replace_existing=True` 在 `add_job` 中很重要。
    # load_and_schedule_tasks_from_config 会处理 `replace_existing`
    load_and_schedule_tasks_from_config(TASK_CONFIGURATIONS)

    return {"message": "任务配置已重新加载和调度。"}

@app.get("/")
async def root():
    return {"message": "APScheduler Task Management API is running."}

# --- 运行 (使用 uvicorn) ---
# uvicorn main:app --reload"""

