import os
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from peft import PeftModel
from vllm import LLM, SamplingParams
from fastapi import FastAPI
from pydantic import BaseModel


# ======================
# 配置参数
# ======================

# 基础模型路径（注意：需与训练时一致）
base_model_path = "/home/yangxing/.cache/modelscope/hub/models/LLM-Research/Llama-3___2-1B-Instruct"

# 微调后的适配器路径（LoRA权重）
lora_adapter_path = "./car_llama2_model"

# 合并后模型保存路径
output_dir = "./deployed_model"


# ======================
# 合并 LoRA 权重到基础模型
# ======================

def load_and_merge_model():
    print("🔄 开始加载基础模型...")
    base_model = AutoModelForCausalLM.from_pretrained(
        base_model_path,
        torch_dtype=torch.float16,
        device_map="auto",
        load_in_4bit=True
    )

    print("🔄 加载 LoRA 适配器...")
    lora_model = PeftModel.from_pretrained(
        base_model,
        lora_adapter_path,
        torch_dtype=torch.float16
    )

    print("🔄 合并 LoRA 到基础模型...")
    merged_model = lora_model.merge_and_unload()

    print("💾 保存合并后的模型...")
    tokenizer = AutoTokenizer.from_pretrained(base_model_path)
    merged_model.save_pretrained(output_dir)
    tokenizer.save_pretrained(output_dir)

    print(f"✅ 模型已合并并保存至 {output_dir}")
    return output_dir


# ======================
# 初始化 vLLM 模型
# ======================

print("📦 正在加载并合并模型...")
model_path = load_and_merge_model()

print("🚀 初始化 vLLM 引擎...")
llm = LLM(
    model=model_path,
    tensor_parallel_size=1,      # 根据 GPU 数量调整
    dtype="float16",
    max_model_len=512,          # 最大序列长度
    quantization="bitsandbytes",  # 使用量化支持 4-bit 模型
    load_format="bitsandbytes" 
)

tokenizer = AutoTokenizer.from_pretrained(model_path)


# ======================
# FastAPI 推理接口
# ======================

app = FastAPI(title="Llama3.2-1B-Instruct Car Model API")

class GenerationRequest(BaseModel):
    prompt: str
    max_tokens: int = 256
    temperature: float = 0.7
    top_p: float = 0.9


class GenerationResponse(BaseModel):
    response: str


@app.post("/generate", response_model=GenerationResponse)
def generate(request: GenerationRequest):
    try:
        sampling_params = SamplingParams(
            temperature=request.temperature,
            top_p=request.top_p,
            max_tokens=request.max_tokens
        )
        outputs = llm.generate([request.prompt], sampling_params)
        return {"response": outputs[0].text}
    except Exception as e:
        raise RuntimeError(f"推理失败: {str(e)}")


if __name__ == "__main__":
    import uvicorn
    print("🌐 启动 FastAPI 服务...")
    uvicorn.run(app, host="0.0.0.0", port=8000)