import argparse
from contextlib import asynccontextmanager

import torch
import uvicorn
from fastapi import FastAPI, HTTPException  # , Request
from pydantic import BaseModel
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig


@asynccontextmanager
async def lifespan(_: FastAPI):
    """生命周期管理，用于模型加载和清理"""
    global model, tokenizer, gen_config

    # 加载模型
    try:
        # 从命令行参数获取配置
        args = parse_args()
        print(f"Loading model from {args.checkpoint}...")
        load_model(args.checkpoint, args.cpu_only)
        print("Model loaded successfully")
        yield
    finally:
        # 清理资源
        print("Cleaning up resources...")
        if model is not None:
            del model
        if tokenizer is not None:
            del tokenizer
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
        print("Resources released")


app = FastAPI(lifespan=lifespan)
# app = FastAPI()

# 全局模型和分词器
model = None
tokenizer = None
gen_config = None


# 定义 Pydantic 模型
class ChatRequest(BaseModel):
    query: str
    history: list = []  # 格式为 [(query1, response1), (query2, response2), ...]


class TopicRequest(BaseModel):
    topic: str


# 系统提示词配置
SYSTEM_PROMPTS = {
    "generateppt": "你是一个专业的PPT设计师，请根据用户提供的内容生成一份结构化的PPT大纲，包含标题页、目录和各章节的主要内容。格式要求：使用Markdown格式，每页内容以'##'开头",
    "generateoutline": "你是一个专业的内容策划师，请根据用户提供的主题生成一份详细的内容大纲。要求：包含主标题、子标题和核心要点",
    "generateexam": "你是一个资深教育专家，请根据用户提供的知识点生成一份完整的考试试卷。要求：包含选择题、填空题和简答题",
}


def load_model(checkpoint_path="Qwen/Qwen-7B-Chat", cpu_only=False):
    global model, tokenizer, gen_config

    print(f"Loading tokenizer from {checkpoint_path}...")
    tokenizer = AutoTokenizer.from_pretrained(
        checkpoint_path, trust_remote_code=True, resume_download=True
    )

    device_map = "cpu" if cpu_only else "auto"

    print(f"Loading model from {checkpoint_path}...")
    model = AutoModelForCausalLM.from_pretrained(
        checkpoint_path,
        device_map=device_map,
        trust_remote_code=True,
        resume_download=True,
    ).eval()

    print("Loading generation config...")
    gen_config = GenerationConfig.from_pretrained(
        checkpoint_path, trust_remote_code=True, resume_download=True
    )
    print("Model and tokenizer loaded successfully")


def _ensure_model_loaded():
    """确保模型已加载，否则抛出异常"""
    if model is None or tokenizer is None or gen_config is None:
        raise RuntimeError(
            "Model not loaded. Please check if model was loaded during startup."
        )
    return model, tokenizer, gen_config


@app.post("/modelchat")
async def model_chat(chat_request: ChatRequest):
    """通用聊天接口"""
    model_obj, tokenizer_obj, gen_config_obj = _ensure_model_loaded()
    query = chat_request.query
    history = chat_request.history or []

    if not query:
        raise HTTPException(status_code=400, detail="Query cannot be empty")

    try:
        # 调用模型生成回复
        response, updated_history = model_obj.chat(
            tokenizer_obj, query, history=history, generation_config=gen_config_obj
        )
        return {"response": response, "history": updated_history}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/generateppt")
async def generate_ppt(ppt_request: TopicRequest):
    """生成PPT大纲接口"""
    return await _generate_content("generateppt", ppt_request.topic)


@app.post("/generateoutline")
async def generate_outline(outline_request: TopicRequest):
    """生成内容大纲接口"""
    return await _generate_content("generateoutline", outline_request.topic)


@app.post("/generateexam")
async def generate_exam(exam_request: TopicRequest):
    """生成考试试卷接口"""
    return await _generate_content("generateexam", exam_request.topic)


async def _generate_content(task_type: str, topic: str):
    model_obj, tokenizer_obj, gen_config_obj = _ensure_model_loaded()
    """通用内容生成方法"""
    if not topic:
        raise HTTPException(status_code=400, detail="Topic cannot be empty")

    # 组合系统提示词和用户输入
    system_prompt = SYSTEM_PROMPTS[task_type]
    query = f"{system_prompt}\n主题：{topic}"

    try:
        # 调用模型生成内容
        response, _ = model_obj.chat(
            tokenizer_obj, query, history=[], generation_config=gen_config_obj
        )
        return {"task": task_type, "topic": topic, "content": response}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


@app.get("/health")
def health_check():
    """健康检查端点"""
    try:
        _, _, _ = _ensure_model_loaded()
        return {"status": "healthy", "model_loaded": True}
    except Exception as e:
        return {"status": "unhealthy", "model_loaded": False, "error": str(e)}, 500


def parse_args():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description="Qwen-Chat API Service")
    parser.add_argument(
        "-c",
        "--checkpoint",
        default="Qwen/Qwen-7B-Chat",
        help="Checkpoint name or path",
    )
    parser.add_argument("--cpu-only", action="store_true", help="Run with CPU only")
    parser.add_argument(
        "--port", type=int, default=45678, help="Port to run the server"
    )
    parser.add_argument(
        "--host", type=str, default="0.0.0.0", help="Host to run the server"
    )
    return parser.parse_args()


if __name__ == "__main__":
    args = parse_args()
    uvicorn.run(app, host=args.host, port=args.port)
