import os
import torch
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
import warnings
import uvicorn
from fastapi.middleware.cors import CORSMiddleware

warnings.filterwarnings("ignore")

app = FastAPI(title="Mini LLM API - Finetuned Model", description="基于Qwen的微调模型服务")

# 添加CORS中间件以支持跨域请求
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # 在生产环境中应该指定具体的域名
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 确保offload文件夹存在
os.makedirs("./offload", exist_ok=True)

# 全局变量存储模型和分词器
model = None
tokenizer = None
model_load_error = None

# 导入模型查找工具函数
from utils.model_utils import find_local_model_path

# 获取项目根目录
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))

# 项目内模型路径
PROJECT_MODEL_PATH = os.path.join(PROJECT_ROOT, "models", "Qwen1.5-1.8B")
FINETUNED_MODEL_PATH = os.path.join(PROJECT_ROOT, "training", "models", "finetuned_model_tianqi")

# 查找本地模型路径
LOCAL_MODEL_PATH = find_local_model_path(PROJECT_ROOT)

class GenerationRequest(BaseModel):
    prompt: str
    max_new_tokens: int = 200  # 改为max_new_tokens，避免冲突
    temperature: float = 0.7
    top_p: float = 0.9
    # 移除了max_length，使用max_new_tokens代替

class GenerationResponse(BaseModel):
    prompt: str
    generated_text: str

@app.on_event("startup")
async def load_model():
    """在应用启动时加载微调模型"""
    global model, tokenizer, model_load_error, LOCAL_MODEL_PATH
    
    print("正在加载基础模型...")
    try:
        # 强制使用本地模型路径
        if LOCAL_MODEL_PATH and os.path.exists(LOCAL_MODEL_PATH):
            print(f"从本地路径加载基础模型: {LOCAL_MODEL_PATH}")
            base_model_path = LOCAL_MODEL_PATH
        else:
            raise ValueError("未找到本地模型路径。请确保已下载Qwen1.5-1.8B模型到本地")
        
        # 先加载tokenizer
        tokenizer = AutoTokenizer.from_pretrained(base_model_path, local_files_only=True)
        
        # 确保tokenizer有pad_token
        if tokenizer.pad_token is None:
            tokenizer.pad_token = tokenizer.eos_token
        
        # 加载基础模型
        base_model = AutoModelForCausalLM.from_pretrained(
            base_model_path,
            device_map="balanced",  # 自动平衡CPU/GPU负载
            offload_folder="./offload",
            dtype=torch.float16,    # ✅ 修复：使用dtype而不是torch_dtype
            low_cpu_mem_usage=True,
            local_files_only=True  # 强制仅使用本地文件
        )
        
        # 加载微调模型
        if not os.path.exists(FINETUNED_MODEL_PATH):
            raise ValueError(f"微调模型路径不存在: {FINETUNED_MODEL_PATH}")
        
        print(f"正在加载微调模型: {FINETUNED_MODEL_PATH}")
        model = PeftModel.from_pretrained(
            base_model,
            FINETUNED_MODEL_PATH,
            torch_dtype=torch.float16
        )
        
        # 🔧 修复：检查并清理generation_config中的max_new_tokens
        if hasattr(model, 'generation_config'):
            # 清除可能存在的max_new_tokens默认值
            if hasattr(model.generation_config, 'max_new_tokens'):
                model.generation_config.max_new_tokens = None
        
        print("微调模型加载完成!")
    except Exception as e:
        model_load_error = str(e)
        print(f"模型加载失败: {e}")
        print("提示: 请检查网络连接或确保模型已下载到本地")

@app.get("/")
async def root():
    return {
        "message": "Mini LLM API 微调模型已启动", 
        "model": "Qwen/Qwen1.5-1.8B",
        "finetuned_model": FINETUNED_MODEL_PATH,
        "model_status": "loaded" if model is not None else "load failed" if model_load_error else "loading",
        "model_path": LOCAL_MODEL_PATH if LOCAL_MODEL_PATH else "not found"
    }

@app.post("/generate", response_model=GenerationResponse)
async def generate_text(request: GenerationRequest):
    """根据提示生成文本"""
    global model, tokenizer, model_load_error
    
    if model_load_error:
        raise HTTPException(status_code=500, detail=f"模型加载失败: {model_load_error}")
    
    if model is None or tokenizer is None:
        raise HTTPException(status_code=503, detail="模型未加载，请稍后重试或检查服务状态")
    
    try:
        # ✅ 修复：使用tokenizer返回所有必要信息，包括attention_mask
        inputs = tokenizer(
            request.prompt, 
            return_tensors="pt", 
            padding=True,
            truncation=True,
            max_length=min(2048, tokenizer.model_max_length),  # 设置合理的输入长度限制
            add_special_tokens=True
        )
        
        # 移动输入到正确的设备
        inputs = {k: v.to(model.device) for k, v in inputs.items()}
        
        # 生成文本
        with torch.no_grad():
            outputs = model.generate(
                **inputs,  # ✅ 修复：解包inputs，包含input_ids和attention_mask
                max_new_tokens=request.max_new_tokens,  # ✅ 使用max_new_tokens
                temperature=request.temperature,
                top_p=request.top_p,
                do_sample=True,
                pad_token_id=tokenizer.pad_token_id,
                eos_token_id=tokenizer.eos_token_id,
                # 可选：添加重复惩罚避免重复内容
                repetition_penalty=1.1,
                # 可选：使用更好的生成策略
                no_repeat_ngram_size=3
            )
        
        # 解码输出
        generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
        
        # ✅ 修复：更安全的方式移除输入提示部分
        # 使用input_ids长度来确定生成的部分，而不是字符串操作
        input_length = inputs["input_ids"].shape[1]
        generated_tokens = outputs[0][input_length:]
        generated_text = tokenizer.decode(generated_tokens, skip_special_tokens=True).strip()
        
        return GenerationResponse(prompt=request.prompt, generated_text=generated_text)
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"文本生成失败: {str(e)}")

@app.get("/health")
async def health_check():
    """健康检查端点"""
    if model is None and model_load_error:
        raise HTTPException(status_code=503, detail=f"模型加载失败: {model_load_error}")
    elif model is None:
        raise HTTPException(status_code=503, detail="模型仍在加载中，请稍后重试")
    return {"status": "healthy", "model_loaded": True, "model_path": LOCAL_MODEL_PATH}

if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=4062)