from contextlib import asynccontextmanager

import torch
import uvicorn
import os
from fastapi import FastAPI, Depends, BackgroundTasks
from fastapi.exceptions import RequestValidationError, HTTPException
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
from starlette.middleware.gzip import GZipMiddleware
from models import ResponseData
from utils import BusinessException

from app.core.config import settings
from app.core.database import init_db_pool, close_db_pool



from services.nn_day import QuantileNetEnergy #引入nn_day业务逻辑
from services.cnn_lstm_api import router



# FastAPI初始化 ------------------------------------------------------------
@asynccontextmanager
async def lifespan(app: FastAPI):
    try:
        # 初始化数据库连接池
        await init_db_pool()
        print("数据库连接池初始化完成")
    except Exception as e:
        print(f"数据库连接池初始化失败: {str(e)}")
        # 关键组件失败时终止服务
        #import sys
        #sys.exit(1)

    try:
        # 模型初始化（单例模式）
        app.state.nn_day_model = QuantileNetEnergy()
        print("模型初始化完成")
    except Exception as e:
        print(f"模型初始化失败: {str(e)}")
        # 关键组件失败时终止服务
        #import sys
        #sys.exit(1)

    yield

    try:
        # 应用关闭时关闭连接池
        await close_db_pool()
        print("数据库连接池已关闭")
    except Exception as e:
        print(f"关闭数据库连接池时出错: {str(e)}")
app = FastAPI(
    title="电量预测API",
    description="基于深度学习模型的电量预测服务",
    version="1.0.0",
    root_path="/api/v1",
    docs_url="/docs" if settings.ENVIRONMENT == "dev" else None,  # 生产环境关闭文档
    debug=settings.DEBUG,
    redoc_url=None,
    lifespan=lifespan
)

# 中间件配置 ---------------------------------------------------------------
app.add_middleware(GZipMiddleware, minimum_size=1000)
app.add_middleware(
    CORSMiddleware,
    allow_origins=os.getenv("ALLOWED_ORIGINS", "*").split(","),
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)


# 全局异常处理 -------------------------------------------------------------
@app.exception_handler(BusinessException)
async def biz_exception_handler(request, exc: BusinessException):
    return JSONResponse(
        status_code=418 if settings.ENVIRONMENT == "dev" else 400,
        content=ResponseData.error(
            message=exc.message,
            debug_info=exc.extra_info if settings.ENVIRONMENT == "dev" else None
        ).dict()
    )

@app.exception_handler(HTTPException)
async def http_exception_handler(request, exc: HTTPException):
    return JSONResponse(
        status_code=exc.status_code,
        content=ResponseData.error(
            message=exc.detail,
            code=exc.status_code
        ).dict()
    )

@app.exception_handler(RequestValidationError)
async def validation_exception_handler(request, exc: RequestValidationError):
    return JSONResponse(
        status_code=422,
        content=ResponseData.error(
            message="参数校验失败",
            errors=exc.errors() if settings.ENVIRONMENT == "dev" else None
        ).dict()
    )



# 路由配置 ----------------------------------------------------------------
# 包含 cnn_lstm_api.py 中的路由
app.include_router(router, prefix="/cnn_lstm")


@app.get("/info", response_model=ResponseData[str], summary="服务信息")
async def info():
    return ResponseData.success('电量预测')


@app.get("/healthy", response_model=ResponseData[str], summary="健康检查")
async def healthy():
    return ResponseData.success(data={
        "status": "OK",
        "version": os.getenv("APP_VERSION", "1.0.0"),
        "model_ready": hasattr(app.state, "nn_day_model")
    })

@app.get("/system/gpu", summary="添加GPU内存监控")
async def check_gpu():
    if torch.cuda.is_available():
        return {
            "gpu_available": True,
            "memory_allocated": torch.cuda.memory_allocated(),
            "max_memory_allocated": torch.cuda.max_memory_allocated()
        }
    return {"gpu_available": False}

# nn_day 训练
@app.get("/nn_day/train", response_model=ResponseData[str], summary="启动模型训练")
async def nn_day_tran(background_tasks: BackgroundTasks):
    """
        启动异步模型训练任务
        - 使用后台任务避免阻塞主线程
        - 训练完成后自动更新模型版本
        """
    try:
        background_tasks.add_task(
            app.state.nn_day_model.train,  # 使用单例模型
            publish=True  # 假设有发布新模型的参数
        )
        return ResponseData.success("训练任务已提交")
    except Exception as e:
        raise BusinessException(f"训练任务启动失败: {str(e)}")

# nn_day 更新
@app.get("/nn_day/update", response_model=ResponseData[str], summary="更新预测模型")
async def nn_day_update(background_tasks: BackgroundTasks):
    """增量更新模型数据"""
    background_tasks.add_task(
        app.state.nn_day_model.update
    )
    return ResponseData.success("模型更新任务已提交")

if __name__ == "__main__":
    print(f"当前ENVIRONMENT: {settings.ENVIRONMENT}")
    # 开发环境配置
    server_config = {
        "host": "0.0.0.0",
        "port": int(os.getenv("PORT", 8000)),
        "reload": settings.ENVIRONMENT == "dev",
        "workers": int(os.getenv("WORKERS", 1))
    }
    if settings.ENVIRONMENT == "prod":
        # 生产环境优化配置
        server_config.update({
            "access_log": False,
            "timeout_keep_alive": 30,
            "limit_concurrency": 100
        })

    uvicorn.run("main:app", **server_config)

