"""
DataPlexus Backend API
数据治理平台后端服务
"""

import uvicorn
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles
from contextlib import asynccontextmanager
from datetime import datetime, timedelta
from sqlalchemy import text
import os

from app.core.config import settings
from app.core.database import engine, Base
from app.api.v1.api import api_router
from app.core.logger import logger
from app.utils.timezone_utils import get_shanghai_now


async def cleanup_zombie_sync_tasks(max_runtime_hours: int = 24):
    """
    清理僵尸同步任务

    更智能的清理策略：
    1. 只清理运行时间超过24小时且没有进度更新的任务
    2. 或者运行时间超过48小时的任务（无论是否有进度）
    3. 检查最近是否有日志更新，如果有则认为任务仍在活跃

    Args:
        max_runtime_hours: 基础最大运行时间（小时），默认24小时
    """
    try:
        # 计算不同的截止时间
        cutoff_24h = get_shanghai_now() - timedelta(hours=24)
        cutoff_48h = get_shanghai_now() - timedelta(hours=48)
        cutoff_recent_activity = get_shanghai_now() - timedelta(minutes=30)  # 30分钟内无活动

        async with engine.begin() as conn:
            # 查找真正的僵尸任务
            result = await conn.execute(text("""
                SELECT
                    e.id,
                    e.task_id,
                    e.started_at,
                    e.updated_at,
                    e.progress_percentage,
                    TIMESTAMPDIFF(HOUR, e.started_at, NOW()) as runtime_hours,
                    COALESCE(MAX(l.created_at), e.started_at) as last_activity
                FROM dp_sync_executions e
                LEFT JOIN dp_sync_logs l ON e.id = l.execution_id
                WHERE e.status IN ('running', 'paused')
                GROUP BY e.id, e.task_id, e.started_at, e.updated_at, e.progress_percentage
                HAVING (
                    -- 情况1: 运行超过48小时（强制清理）
                    runtime_hours > 48
                    OR
                    -- 情况2: 运行超过24小时且30分钟内无活动
                    (runtime_hours > 24 AND last_activity < :cutoff_recent_activity)
                    OR
                    -- 情况3: 运行超过12小时且进度为0且30分钟内无活动
                    (runtime_hours > 12 AND e.progress_percentage = 0 AND last_activity < :cutoff_recent_activity)
                )
            """), {"cutoff_recent_activity": cutoff_recent_activity})

            zombie_tasks = result.fetchall()

            if zombie_tasks:
                logger.info(f"🧹 发现 {len(zombie_tasks)} 个僵尸任务，正在清理...")

                for task in zombie_tasks:
                    exec_id, task_id, started_at, updated_at, progress, runtime_hours, last_activity = task

                    # 确定清理原因
                    if runtime_hours > 48:
                        reason = f"运行时间超过48小时（{runtime_hours:.1f}小时）"
                    elif runtime_hours > 24:
                        reason = f"运行时间超过24小时（{runtime_hours:.1f}小时）且30分钟内无活动"
                    else:
                        reason = f"运行时间超过12小时（{runtime_hours:.1f}小时）且进度为0且30分钟内无活动"

                    logger.info(f"清理执行ID {exec_id}（任务ID {task_id}）: {reason}")

                # 批量清理僵尸任务
                zombie_ids = [task[0] for task in zombie_tasks]
                await conn.execute(text("""
                    UPDATE dp_sync_executions
                    SET
                        status = 'failed',
                        completed_at = :now,
                        updated_at = :now,
                        error_message = '任务异常终止：疑似僵尸进程（长时间运行且无活动）'
                    WHERE id IN :zombie_ids
                """), {
                    "now": get_shanghai_now(),
                    "zombie_ids": tuple(zombie_ids)
                })

                logger.info(f"✅ 成功清理 {len(zombie_tasks)} 个僵尸任务")
            else:
                logger.info("✅ 没有发现僵尸任务")

    except Exception as e:
        logger.error(f"❌ 清理僵尸任务失败: {str(e)}")
        raise


@asynccontextmanager
async def lifespan(app: FastAPI):
    """应用生命周期管理"""
    # 启动时执行
    logger.info("🚀 DataPlexus Backend API 启动中...")

    # 尝试创建数据库表，如果失败则继续启动
    try:
        async with engine.begin() as conn:
            await conn.run_sync(Base.metadata.create_all)
        logger.info("✅ 数据库连接成功")
    except Exception as e:
        logger.warning(f"⚠️ 数据库连接失败: {str(e)}")
        logger.info("🔄 API将在没有数据库连接的情况下启动，连接测试功能可用于诊断问题")

    logger.info(f"🌐 API文档地址: http://{settings.API_HOST}:{settings.API_PORT}/docs")

    # 清理僵尸任务
    try:
        await cleanup_zombie_sync_tasks()
        logger.info("✅ 僵尸任务清理完成")
    except Exception as e:
        logger.warning(f"⚠️ 僵尸任务清理失败: {str(e)}")

    yield

    # 关闭时执行
    logger.info("🛑 DataPlexus Backend API 关闭中...")


# 创建FastAPI应用
app = FastAPI(
    title="DataPlexus API",
    description="数据血缘分析系统后端API",
    version="1.0.0",
    docs_url="/docs",
    redoc_url="/redoc",
    lifespan=lifespan
)

# 配置CORS
app.add_middleware(
    CORSMiddleware,
    allow_origins=["http://localhost:3000", "http://127.0.0.1:3000", "http://localhost:3001", "http://127.0.0.1:3001"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 注册路由
app.include_router(api_router, prefix=f"{settings.API_PREFIX}/v1")

# 挂载静态文件服务
if os.path.exists(settings.CSV_EXPORT_PATH):
    app.mount("/exports", StaticFiles(directory=settings.CSV_EXPORT_PATH), name="exports")


@app.get("/")
async def root():
    """根路径"""
    return {
        "message": "DataPlexus Backend API",
        "version": "1.0.0",
        "docs": f"http://{settings.API_HOST}:{settings.API_PORT}/docs"
    }


@app.get("/health")
async def health_check():
    """健康检查"""
    return {"status": "healthy", "message": "API is running"}


if __name__ == "__main__":
    uvicorn.run(
        "main:app",
        host=settings.API_HOST,
        port=settings.API_PORT,
        reload=settings.DEBUG,
        log_level=settings.LOG_LEVEL.lower()
    )
