import asyncio
import json
import logging
import time
from typing import Dict, List, Optional, Any
from contextlib import asynccontextmanager
import uvicorn
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from vllm import LLM, SamplingParams
from vllm.engine.arg_utils import AsyncEngineArgs
from vllm.engine.async_llm_engine import AsyncLLMEngine

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


# 请求数据模型
class ChatMessage(BaseModel):
    role: str
    content: str


class ChatRequest(BaseModel):
    messages: List[ChatMessage]
    temperature: float = 0.7
    max_tokens: int = 2048
    model: Optional[str] = None  # 可选指定模型


class ChatResponse(BaseModel):
    response: str
    model_used: str
    generation_time: float


# 模型配置
MODEL_CONFIGS = {
    "qwen2-7b": {
        "model_name": "Qwen/Qwen2-7B-Instruct",
        "tensor_parallel_size": 1,
        "gpu_memory_utilization": 0.3,
        "max_model_len": 4096,
        "port": 8001
    },
    "qwen3-32b": {
        "model_name": "Qwen/Qwen2.5-32B-Instruct",  # 使用Qwen2.5-32B作为Qwen3的替代
        "tensor_parallel_size": 2,  # 32B模型可能需要多GPU
        "gpu_memory_utilization": 0.8,
        "max_model_len": 4096,
        "port": 8002
    },
    "llama-3-8b": {
        "model_name": "meta-llama/Meta-Llama-3-8B-Instruct",
        "tensor_parallel_size": 1,
        "gpu_memory_utilization": 0.3,
        "max_model_len": 4096,
        "port": 8003
    }
}


class ModelManager:
    def __init__(self):
        self.engines: Dict[str, AsyncLLMEngine] = {}
        self.current_model = None

    async def load_model(self, model_key: str):
        """加载指定模型"""
        if model_key in self.engines:
            logger.info(f"Model {model_key} already loaded")
            return

        if model_key not in MODEL_CONFIGS:
            raise ValueError(f"Unknown model: {model_key}")

        config = MODEL_CONFIGS[model_key]
        logger.info(f"Loading model {model_key}: {config['model_name']}")

        try:
            # 创建异步引擎参数
            engine_args = AsyncEngineArgs(
                model=config["model_name"],
                tensor_parallel_size=config["tensor_parallel_size"],
                gpu_memory_utilization=config["gpu_memory_utilization"],
                max_model_len=config["max_model_len"],
                trust_remote_code=True,
                enforce_eager=True,  # 避免CUDA图编译问题
            )

            # 创建异步引擎
            engine = AsyncLLMEngine.from_engine_args(engine_args)
            self.engines[model_key] = engine
            self.current_model = model_key
            logger.info(f"Successfully loaded model {model_key}")

        except Exception as e:
            logger.error(f"Failed to load model {model_key}: {str(e)}")
            raise

    async def unload_model(self, model_key: str):
        """卸载指定模型"""
        if model_key in self.engines:
            # vLLM的AsyncLLMEngine没有显式的关闭方法，依赖垃圾回收
            del self.engines[model_key]
            logger.info(f"Unloaded model {model_key}")

            if self.current_model == model_key:
                self.current_model = None

    async def switch_model(self, model_key: str):
        """切换到指定模型（卸载其他模型以节省显存）"""
        if model_key not in MODEL_CONFIGS:
            raise ValueError(f"Unknown model: {model_key}")

        # 卸载其他模型
        for key in list(self.engines.keys()):
            if key != model_key:
                await self.unload_model(key)

        # 加载目标模型
        await self.load_model(model_key)

    def get_engine(self, model_key: str = None) -> AsyncLLMEngine:
        """获取模型引擎"""
        if model_key is None:
            model_key = self.current_model

        if model_key is None or model_key not in self.engines:
            raise ValueError(f"Model {model_key} not loaded")

        return self.engines[model_key]


# 全局模型管理器
model_manager = ModelManager()


@asynccontextmanager
async def lifespan(app: FastAPI):
    """应用生命周期管理"""
    # 启动时加载默认模型
    logger.info("Starting FastAPI server with vLLM...")
    try:
        # 默认加载qwen2-7b模型
        await model_manager.load_model("qwen2-7b")
        logger.info("Default model loaded successfully")
    except Exception as e:
        logger.error(f"Failed to load default model: {str(e)}")

    yield

    # 关闭时清理资源
    logger.info("Shutting down FastAPI server...")
    for model_key in list(model_manager.engines.keys()):
        await model_manager.unload_model(model_key)


# 创建FastAPI应用
app = FastAPI(
    title="Multi-Model LLM Server",
    description="FastAPI server with vLLM acceleration supporting multiple models",
    version="1.0.0",
    lifespan=lifespan
)


def format_messages_for_model(messages: List[ChatMessage], model_key: str) -> str:
    """将消息格式化为模型可接受的文本格式"""
    if model_key.startswith("qwen"):
        # Qwen系列模型的格式
        formatted_text = ""
        for msg in messages:
            if msg.role == "system":
                formatted_text += f"<|im_start|>system\n{msg.content}<|im_end|>\n"
            elif msg.role == "user":
                formatted_text += f"<|im_start|>user\n{msg.content}<|im_end|>\n"
            elif msg.role == "assistant":
                formatted_text += f"<|im_start|>assistant\n{msg.content}<|im_end|>\n"
        formatted_text += "<|im_start|>assistant\n"
        return formatted_text

    elif model_key.startswith("llama"):
        # Llama系列模型的格式
        formatted_text = ""
        for msg in messages:
            if msg.role == "system":
                formatted_text += f"<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{msg.content}<|eot_id|>"
            elif msg.role == "user":
                formatted_text += f"<|start_header_id|>user<|end_header_id|>\n\n{msg.content}<|eot_id|>"
            elif msg.role == "assistant":
                formatted_text += f"<|start_header_id|>assistant<|end_header_id|>\n\n{msg.content}<|eot_id|>"
        formatted_text += "<|start_header_id|>assistant<|end_header_id|>\n\n"
        return formatted_text

    else:
        # 默认格式
        formatted_text = ""
        for msg in messages:
            formatted_text += f"{msg.role}: {msg.content}\n"
        return formatted_text


@app.post("/8001", response_model=Dict)
async def chat_qwen2_7b(request: ChatRequest):
    """Qwen2-7B模型推理接口"""
    return await process_chat_request(request, "qwen2-7b")


@app.post("/8002", response_model=Dict)
async def chat_qwen3_32b(request: ChatRequest):
    """Qwen3-32B模型推理接口"""
    return await process_chat_request(request, "qwen3-32b")


@app.post("/8003", response_model=Dict)
async def chat_llama3_8b(request: ChatRequest):
    """Llama-3-8B模型推理接口"""
    return await process_chat_request(request, "llama-3-8b")


async def process_chat_request(request: ChatRequest, model_key: str) -> Dict:
    """处理聊天请求的通用函数"""
    start_time = time.time()

    try:
        # 确保模型已加载
        if model_key not in model_manager.engines:
            logger.info(f"Loading model {model_key}...")
            await model_manager.switch_model(model_key)

        # 获取模型引擎
        engine = model_manager.get_engine(model_key)

        # 格式化消息
        prompt = format_messages_for_model(request.messages, model_key)

        # 设置采样参数
        sampling_params = SamplingParams(
            temperature=request.temperature,
            max_tokens=request.max_tokens,
            stop=["<|im_end|>", "<|eot_id|>"],  # 添加停止词
        )

        # 异步生成
        results = []
        async for request_output in engine.generate(prompt, sampling_params, request_id=f"req_{int(time.time())}"):
            results.append(request_output)

        if not results:
            raise HTTPException(status_code=500, detail="No response generated")

        # 获取最后的输出
        final_output = results[-1]
        generated_text = final_output.outputs[0].text.strip()

        end_time = time.time()
        generation_time = end_time - start_time

        logger.info(f"Generated response in {generation_time:.2f} seconds using {model_key}")

        # 返回符合你的格式要求的响应
        response_data = {
            "response": generated_text,
            "model_used": model_key,
            "generation_time": generation_time
        }

        return {"data": response_data, "response_code": 200}

    except Exception as e:
        logger.error(f"Error processing request with {model_key}: {str(e)}")
        return {"response_code": 500, "error_info": str(e)}


@app.get("/models")
async def list_models():
    """列出所有可用模型"""
    return {
        "available_models": list(MODEL_CONFIGS.keys()),
        "loaded_models": list(model_manager.engines.keys()),
        "current_model": model_manager.current_model
    }


@app.post("/models/{model_key}/load")
async def load_model_endpoint(model_key: str):
    """加载指定模型"""
    try:
        await model_manager.load_model(model_key)
        return {"message": f"Model {model_key} loaded successfully"}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/models/{model_key}/unload")
async def unload_model_endpoint(model_key: str):
    """卸载指定模型"""
    try:
        await model_manager.unload_model(model_key)
        return {"message": f"Model {model_key} unloaded successfully"}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


@app.get("/health")
async def health_check():
    """健康检查接口"""
    return {
        "status": "healthy",
        "loaded_models": list(model_manager.engines.keys()),
        "timestamp": time.time()
    }


if __name__ == "__main__":
    uvicorn.run(
        "model_api:app",  # 假设文件名为main.py
        host="127.0.0.1",
        port=8000,
        workers=1,  # vLLM不支持多进程，使用单进程
        log_level="info"
    )