import os
import torch
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from transformers import AutoModelForCausalLM, AutoTokenizer
import warnings
import uvicorn
from fastapi.middleware.cors import CORSMiddleware

# 忽略警告
warnings.filterwarnings("ignore")

# 创建FastAPI应用
app = FastAPI(title="Mini LLM API - Base Model", description="基于Qwen的基础模型服务")

# 添加CORS中间件以支持跨域请求
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # 在生产环境中应指定具体域名
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 确保offload文件夹存在
os.makedirs("./offload", exist_ok=True)

# 全局变量存储模型和分词器
model = None
tokenizer = None
model_load_error = None

# 导入模型查找工具函数
from utils.model_utils import find_local_model_path

# 获取项目根目录
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))

# 查找本地模型路径
LOCAL_MODEL_PATH = find_local_model_path(PROJECT_ROOT)

class GenerationRequest(BaseModel):
    prompt: str
    max_length: int = 512  # 改为总长度，默认512更合理
    temperature: float = 0.7
    top_p: float = 0.9
    # 可选：添加max_new_tokens参数
    # max_new_tokens: int = 200

class GenerationResponse(BaseModel):
    prompt: str
    generated_text: str

@app.on_event("startup")
async def load_model():
    """在应用启动时加载模型"""
    global model, tokenizer, model_load_error, LOCAL_MODEL_PATH
    
    print("正在加载基础模型...")
    try:
        # 强制使用本地模型路径
        if LOCAL_MODEL_PATH and os.path.exists(LOCAL_MODEL_PATH):
            print(f"从本地路径加载模型: {LOCAL_MODEL_PATH}")
            model_path = LOCAL_MODEL_PATH
        else:
            raise ValueError("未找到本地模型路径。请确保已下载Qwen1.5-1.8B模型到本地")
        
        # 先加载tokenizer
        tokenizer = AutoTokenizer.from_pretrained(model_path, local_files_only=True)
        
        # 确保tokenizer有pad_token
        if tokenizer.pad_token is None:
            tokenizer.pad_token = tokenizer.eos_token
        
        model = AutoModelForCausalLM.from_pretrained(
            model_path,
            device_map="balanced",  # 自动平衡CPU/GPU负载
            offload_folder="./offload",
            dtype=torch.float16,    # ✅ 使用新的参数名
            low_cpu_mem_usage=True,
            local_files_only=True  # 强制仅使用本地文件
        )
        
        print("基础模型加载完成!")
    except Exception as e:
        model_load_error = str(e)
        print(f"模型加载失败: {e}")
        print("提示: 请检查网络连接或确保模型已下载到本地")

@app.get("/")
async def root():
    return {
        "message": "Mini LLM API 基础模型已启动", 
        "model": "Qwen/Qwen1.5-1.8B",
        "model_status": "loaded" if model is not None else "load failed" if model_load_error else "loading",
        "model_path": LOCAL_MODEL_PATH if LOCAL_MODEL_PATH else "not found"
    }

@app.post("/generate", response_model=GenerationResponse)
async def generate_text(request: GenerationRequest):
    """根据提示生成文本"""
    global model, tokenizer, model_load_error
    
    if model_load_error:
        raise HTTPException(status_code=500, detail=f"模型加载失败: {model_load_error}")
    
    if model is None or tokenizer is None:
        raise HTTPException(status_code=503, detail="模型未加载，请稍后重试或检查服务状态")
    
    try:
        # 编码输入 - 使用tokenizer返回所有必要信息
        inputs = tokenizer(
            request.prompt, 
            return_tensors="pt", 
            padding=True,  # 确保有padding
            truncation=True,  # 确保有truncation
            max_length=min(request.max_length, tokenizer.model_max_length),  # 防止超过模型最大长度
            add_special_tokens=True  # 确保添加特殊token
        )
        
        # 移动输入到正确的设备
        inputs = {k: v.to(model.device) for k, v in inputs.items()}
        
        # 生成文本
        with torch.no_grad():
            outputs = model.generate(
                **inputs,  # 解包inputs，包括input_ids和attention_mask
                max_length=request.max_length,
                temperature=request.temperature,
                max_new_tokens=None,  # 🔧 关键修复：显式设置为None
                top_p=request.top_p,
                do_sample=True,
                pad_token_id=tokenizer.pad_token_id,
                eos_token_id=tokenizer.eos_token_id,
                # 可选：添加重复惩罚避免重复内容
                repetition_penalty=1.1,
                # 可选：使用更好的生成策略
                no_repeat_ngram_size=3
            )
        
        # 解码输出
        generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
        
        # 移除输入提示部分，只返回生成的部分
        # 更安全的方式：使用input_ids长度来确定生成的部分
        input_length = inputs["input_ids"].shape[1]
        generated_tokens = outputs[0][input_length:]
        generated_text = tokenizer.decode(generated_tokens, skip_special_tokens=True).strip()
        
        return GenerationResponse(prompt=request.prompt, generated_text=generated_text)
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"文本生成失败: {str(e)}")

@app.get("/health")
async def health_check():
    """健康检查端点"""
    if model is None and model_load_error:
        raise HTTPException(status_code=503, detail=f"模型加载失败: {model_load_error}")
    elif model is None:
        raise HTTPException(status_code=503, detail="模型仍在加载中，请稍后重试")
    return {"status": "healthy", "model_loaded": True, "model_path": LOCAL_MODEL_PATH}

if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=4061)