#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
昇腾大模型服务
提供大模型推理的RESTful API服务
"""

import os
import sys
import json
import time
import logging
import argparse
from typing import Dict, List, Any, Optional

import torch
from fastapi import FastAPI, Request, HTTPException, BackgroundTasks
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
from pydantic import BaseModel, Field

# 导入工具模块
from ascend_llm_demo.utils.ascend_utils import init_ascend, release_ascend, get_available_npu_devices
from ascend_llm_demo.utils.model_utils import ModelManager

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.StreamHandler(sys.stdout)
    ]
)
logger = logging.getLogger(__name__)

# 创建FastAPI应用
app = FastAPI(
    title="Ascend LLM API",
    description="昇腾大语言模型推理服务API",
    version="1.0.0"
)

# 添加CORS中间件
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # 允许所有来源
    allow_credentials=True,
    allow_methods=["*"],  # 允许所有方法
    allow_headers=["*"],  # 允许所有头
)

# 全局模型管理器
model_manager = None


# 请求模型定义
class ChatMessage(BaseModel):
    role: str = Field(..., description="消息角色，可以是user、assistant或system")
    content: str = Field(..., description="消息内容")


class ChatRequest(BaseModel):
    messages: List[ChatMessage] = Field(..., description="聊天消息列表")
    max_length: Optional[int] = Field(2048, description="最大生成长度")
    temperature: Optional[float] = Field(0.7, description="温度参数，控制随机性")
    top_p: Optional[float] = Field(0.9, description="top-p参数")
    top_k: Optional[int] = Field(40, description="top-k参数")
    show_performance: Optional[bool] = Field(False, description="是否返回性能信息")


# 健康检查和指标路由
@app.get("/health")
async def health_check():
    """API健康检查"""
    global model_manager
    
    if model_manager is None:
        return {
            "status": "unavailable",
            "reason": "模型管理器未初始化"
        }
    
    if not model_manager.is_loaded:
        return {
            "status": "loading",
            "reason": "模型正在加载中"
        }
    
    return {
        "status": "ok",
        "model_path": model_manager.model_path,
        "device": model_manager.device
    }


@app.get("/metrics")
async def metrics():
    """获取模型服务指标"""
    global model_manager
    
    if model_manager is None or not model_manager.is_loaded:
        raise HTTPException(status_code=503, detail="模型未加载")
    
    stats = model_manager.get_stats()
    
    return {
        "model_path": model_manager.model_path,
        "device": model_manager.device,
        "metrics": stats
    }


# 聊天接口
@app.post("/chat")
async def chat(request: ChatRequest):
    """聊天接口"""
    global model_manager
    
    if model_manager is None or not model_manager.is_loaded:
        raise HTTPException(status_code=503, detail="模型未加载")
    
    try:
        # 将Pydantic模型转换为Python字典
        messages = [msg.dict() for msg in request.messages]
        
        # 调用模型进行生成
        result = model_manager.chat(
            messages=messages,
            max_length=request.max_length,
            temperature=request.temperature,
            top_p=request.top_p,
            top_k=request.top_k
        )
        
        # 检查是否有错误
        if "error" in result:
            raise HTTPException(status_code=500, detail=result["error"])
        
        # 构建响应
        response = {
            "response": result["response"]
        }
        
        # 是否返回性能信息
        if request.show_performance and "performance" in result:
            response["performance"] = result["performance"]
        
        return response
    
    except Exception as e:
        logger.error(f"聊天接口出错: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


# 启动时加载模型
@app.on_event("startup")
async def startup_event():
    """应用启动时执行"""
    global model_manager
    
    # 获取环境变量
    model_path = os.environ.get("MODEL_PATH", None)
    device_type = os.environ.get("DEVICE_TYPE", "npu")
    device_id = int(os.environ.get("DEVICE_ID", "0"))
    
    # 如果没有设置模型路径，则不加载模型
    if not model_path:
        logger.warning("未设置MODEL_PATH环境变量，模型将不会自动加载")
        return
    
    # 初始化昇腾环境（如果使用昇腾）
    if device_type.lower() == "npu":
        init_ascend(device_id)
    
    # 创建模型管理器
    model_manager = ModelManager(model_path, device_type, device_id)
    
    # 后台加载模型
    def load_model_task():
        try:
            success = model_manager.load()
            if success:
                logger.info(f"模型 '{model_path}' 加载成功")
            else:
                logger.error(f"模型 '{model_path}' 加载失败")
        except Exception as e:
            logger.error(f"加载模型时发生错误: {str(e)}")
    
    # 启动后台任务加载模型
    import threading
    threading.Thread(target=load_model_task, daemon=True).start()


# 应用关闭时释放资源
@app.on_event("shutdown")
async def shutdown_event():
    """应用关闭时执行"""
    global model_manager
    
    if model_manager and model_manager.is_loaded:
        logger.info("正在卸载模型...")
        model_manager.unload()
    
    # 释放昇腾资源
    if os.environ.get("DEVICE_TYPE", "").lower() == "npu":
        device_id = int(os.environ.get("DEVICE_ID", "0"))
        release_ascend(device_id)
    
    logger.info("资源已释放")


def start_service(model_path: str, 
                 device_type: str = "npu",
                 device_id: int = 0,
                 host: str = "0.0.0.0",
                 port: int = 8000):
    """
    启动模型服务
    
    Args:
        model_path: 模型路径
        device_type: 设备类型 (npu, cuda, cpu)
        device_id: 设备ID
        host: 主机地址
        port: 端口号
    """
    # 设置环境变量
    os.environ["MODEL_PATH"] = model_path
    os.environ["DEVICE_TYPE"] = device_type
    os.environ["DEVICE_ID"] = str(device_id)
    
    # 启动服务
    uvicorn.run(app, host=host, port=port)


def parse_args():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description="昇腾大模型推理服务")
    
    parser.add_argument("--model_path", type=str, required=True,
                        help="大模型的路径，可以是本地路径或Hugging Face模型ID")
    
    parser.add_argument("--device", type=str, default="npu", choices=["npu", "cuda", "cpu"],
                        help="运行设备，支持npu（昇腾）、cuda（英伟达GPU）和cpu")
    
    parser.add_argument("--device_id", type=int, default=0,
                        help="设备ID，当有多个NPU或GPU时使用")
    
    parser.add_argument("--host", type=str, default="0.0.0.0",
                        help="服务主机地址")
    
    parser.add_argument("--port", type=int, default=8000,
                        help="服务端口号")
    
    return parser.parse_args()


if __name__ == "__main__":
    # 解析命令行参数
    args = parse_args()
    
    # 检查设备
    if args.device == "npu":
        available_devices = get_available_npu_devices()
        if not available_devices:
            logger.warning("未检测到昇腾设备，将使用CPU替代")
            args.device = "cpu"
        elif args.device_id not in available_devices:
            logger.warning(f"指定的昇腾设备ID {args.device_id} 不可用，将使用设备0")
            args.device_id = 0
    
    # 启动服务
    start_service(
        model_path=args.model_path,
        device_type=args.device,
        device_id=args.device_id,
        host=args.host,
        port=args.port
    ) 