#!/usr/bin/env python3
"""
Mini LLM服务主入口

此项目支持多种模型启动方式：
1. 基础模型服务: python main_base.py
2. 微调模型服务: python main_finetuned.py

推荐使用以下npm命令启动服务：
- npm run start-base     # 启动基础模型服务(端口4061)
- npm run start-finetuned # 启动微调模型服务(端口4062)

注意：直接运行此文件将显示使用说明。
"""

from fastapi import FastAPI, HTTPException
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import uvicorn
from pydantic import BaseModel
import os
from fastapi.middleware.cors import CORSMiddleware
from peft import PeftModel
import warnings
warnings.filterwarnings("ignore")

app = FastAPI(title="Mini LLM API", description="基于Qwen的小型LLM服务")

# 添加CORS中间件以支持跨域请求
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # 在生产环境中应该指定具体的域名
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 确保offload文件夹存在
os.makedirs("./offload", exist_ok=True)

# 全局变量存储模型和分词器
model = None
tokenizer = None
finetuned_model = None  # 添加这一行用于缓存微调模型
model_load_error = None

# 项目内模型路径
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
PROJECT_MODEL_PATH = os.path.join(PROJECT_ROOT, "models", "Qwen1.5-1.8B")
FINETUNED_MODEL_PATH = os.path.join(PROJECT_ROOT, "training", "models", "finetuned_model_tianqi")

# Hugging Face缓存模型路径
HF_MODEL_PATH = os.path.expanduser("~/.cache/huggingface/hub/models--Qwen--Qwen1.5-1.8B")

# 本地模型路径（优先使用项目内模型，然后是Hugging Face缓存模型）
LOCAL_MODEL_PATH = None

# 查找项目内的模型
if os.path.exists(PROJECT_MODEL_PATH):
    # 检查是否是直接的模型文件（不是Hugging Face的结构）
    config_path = os.path.join(PROJECT_MODEL_PATH, "config.json")
    if os.path.exists(config_path):
        LOCAL_MODEL_PATH = PROJECT_MODEL_PATH

# 如果项目内没有模型，则查找Hugging Face缓存中的模型
if LOCAL_MODEL_PATH is None and os.path.exists(HF_MODEL_PATH):
    refs_path = os.path.join(HF_MODEL_PATH, "refs", "main")
    if os.path.exists(refs_path):
        with open(refs_path, 'r') as f:
            commit_hash = f.read().strip()
            snapshot_path = os.path.join(HF_MODEL_PATH, "snapshots", commit_hash)
            if os.path.exists(snapshot_path):
                LOCAL_MODEL_PATH = snapshot_path

class GenerationRequest(BaseModel):
    prompt: str
    max_length: int = 100
    temperature: float = 0.7
    top_p: float = 0.9
    use_finetuned: bool = False  # 是否使用微调模型

class GenerationResponse(BaseModel):
    prompt: str
    generated_text: str

@app.on_event("startup")
async def load_model():
    """在应用启动时加载模型"""
    global model, tokenizer, model_load_error, LOCAL_MODEL_PATH
    
    print("正在加载模型...")
    try:
        # 强制使用本地模型路径
        if LOCAL_MODEL_PATH and os.path.exists(LOCAL_MODEL_PATH):
            print(f"从本地路径加载模型: {LOCAL_MODEL_PATH}")
            model_path = LOCAL_MODEL_PATH
        else:
            raise ValueError("未找到本地模型路径。请确保已下载Qwen1.5-1.8B模型到本地")
        
        model = AutoModelForCausalLM.from_pretrained(
            model_path,
            device_map="balanced",  # 自动平衡CPU/GPU负载
            offload_folder="./offload",
            torch_dtype=torch.float16,
            low_cpu_mem_usage=True,
            local_files_only=True  # 强制仅使用本地文件
        )
        
        tokenizer = AutoTokenizer.from_pretrained(model_path, local_files_only=True)
        print("基础模型加载完成!")
    except Exception as e:
        model_load_error = str(e)
        print(f"模型加载失败: {e}")
        print("提示: 请检查网络连接或确保模型已下载到本地")
        # 不抛出异常，允许应用启动但标记模型加载失败

def load_finetuned_model():
    """加载微调后的模型"""
    global model, finetuned_model, tokenizer
    
    if not os.path.exists(FINETUNED_MODEL_PATH):
        raise ValueError(f"微调模型路径不存在: {FINETUNED_MODEL_PATH}")
    
    # 如果微调模型已经加载过了，直接返回
    if finetuned_model is not None:
        model = finetuned_model
        print("使用已缓存的微调模型")
        return
    
    print(f"正在加载微调模型: {FINETUNED_MODEL_PATH}")
    # 确保基础模型已经加载
    if model is None:
        load_model()
    
    # 使用正确的PEFT模型加载方式
    finetuned_model = PeftModel.from_pretrained(
        model, 
        FINETUNED_MODEL_PATH,
        torch_dtype=torch.float16
    )
    
    model = finetuned_model
    print("微调模型加载完成!")

@app.get("/")
async def root():
    return {
        "message": "Mini LLM API 已启动", 
        "model": "Qwen/Qwen1.5-1.8B",
        "model_status": "loaded" if model is not None else "load failed" if model_load_error else "loading",
        "model_path": LOCAL_MODEL_PATH if LOCAL_MODEL_PATH else "not found",
        "finetuned_model_available": os.path.exists(FINETUNED_MODEL_PATH)
    }

@app.post("/generate", response_model=GenerationResponse)
async def generate_text(request: GenerationRequest):
    """根据提示生成文本"""
    global model, tokenizer, model_load_error, finetuned_model
    
    if model_load_error:
        raise HTTPException(status_code=500, detail=f"模型加载失败: {model_load_error}")
    
    if model is None or tokenizer is None:
        raise HTTPException(status_code=503, detail="模型未加载，请稍后重试或检查服务状态")
    
    # 如果请求使用微调模型且当前未加载微调模型，则加载微调模型
    if request.use_finetuned and not isinstance(model, PeftModel):
        try:
            load_finetuned_model()
        except Exception as e:
            raise HTTPException(status_code=500, detail=f"微调模型加载失败: {str(e)}")
    
    # 如果请求使用基础模型但当前加载的是微调模型，则切换回基础模型
    if not request.use_finetuned and isinstance(model, PeftModel):
        try:
            print("切换回基础模型...")
            # 重新加载基础模型
            base_model = AutoModelForCausalLM.from_pretrained(
                LOCAL_MODEL_PATH,
                device_map="balanced",
                offload_folder="./offload",
                torch_dtype=torch.float16,
                low_cpu_mem_usage=True,
                local_files_only=True
            )
            model = base_model
            print("已切换回基础模型!")
        except Exception as e:
            raise HTTPException(status_code=500, detail=f"基础模型加载失败: {str(e)}")
    
    try:
        # 编码输入
        inputs = tokenizer.encode(request.prompt, return_tensors="pt", add_special_tokens=False)
        
        # 移动输入到正确的设备
        inputs = inputs.to(model.device)
        
        # 生成文本
        with torch.no_grad():
            outputs = model.generate(
                inputs, 
                max_length=request.max_length,
                temperature=request.temperature,
                top_p=request.top_p,
                do_sample=True,
                pad_token_id=tokenizer.pad_token_id,
                eos_token_id=tokenizer.eos_token_id
            )
        
        # 解码输出
        generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
        
        # 移除输入提示部分，只返回生成的部分
        generated_text = generated_text[len(request.prompt):].strip()
        
        return GenerationResponse(prompt=request.prompt, generated_text=generated_text)
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"文本生成失败: {str(e)}")

@app.get("/health")
async def health_check():
    """健康检查端点"""
    if model is None and model_load_error:
        raise HTTPException(status_code=503, detail=f"模型加载失败: {model_load_error}")
    elif model is None:
        raise HTTPException(status_code=503, detail="模型仍在加载中，请稍后重试")
    return {"status": "healthy", "model_loaded": True, "model_path": LOCAL_MODEL_PATH}

if __name__ == "__main__":
    print("Mini LLM服务")
    print("=" * 50)
    print("请使用以下方式启动服务:")
    print("1. 基础模型服务: python main_base.py (端口4061)")
    print("2. 微调模型服务: python main_finetuned.py (端口4062)")
    print("")
    print("或者使用npm命令:")
    print("- npm run start-base     # 启动基础模型服务")
    print("- npm run start-finetuned # 启动微调模型服务")
    print("")
    print("服务启动后，可通过以下方式测试:")
    print("- 基础模型: curl -X POST http://localhost:4061/generate -H \"Content-Type: application/json\" -d '{\"prompt\":\"你好\"}'")
    print("- 微调模型: curl -X POST http://localhost:4062/generate -H \"Content-Type: application/json\" -d '{\"prompt\":\"你好\"}'")
