import argparse
import os
import shutil
import subprocess
import uuid
from contextlib import asynccontextmanager

import torch
import uvicorn
import whisper
from fastapi import FastAPI, File, HTTPException, UploadFile
from llama_index.core import (Settings, SimpleDirectoryReader, StorageContext,
                              VectorStoreIndex, load_index_from_storage)
from llama_index.core.node_parser import SentenceSplitter  # 新增分块解析器
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.huggingface import HuggingFaceLLM
from llama_index.readers.file import PyMuPDFReader
from pydantic import BaseModel
from sse_starlette.sse import EventSourceResponse
from transformers import (AutoModelForCausalLM, AutoTokenizer,
                          TextIteratorStreamer, TextStreamer)
from transformers.generation.configuration_utils import GenerationConfig


@asynccontextmanager
async def lifespan(_: FastAPI):
    """生命周期管理，用于模型和RAG资源加载"""
    global model, tokenizer, gen_config, retriever, rag_embed_model

    # 从命令行获取参数
    args = parse_args()

    # 加载主模型
    print(f"Loading main model from {args.checkpoint}...")
    load_main_model(args.checkpoint, args.cpu_only)

    # 加载RAG资源
    print("Loading RAG resources...")
    load_rag_resources(
        persist_dir=args.rag_persist_dir,
        device="cpu" if args.cpu_only else "cuda",
        chunk_size=args.chunk_size,  # 新增chunk_size参数
        chunk_overlap=args.chunk_overlap,  # 新增chunk_overlap参数
    )

    print("All resources loaded successfully")
    yield

    # 清理资源
    print("Cleaning up resources...")
    cleanup_resources()
    print("Resources released")


app = FastAPI(lifespan=lifespan)

# 全局变量
model = None
tokenizer = None
gen_config = None
retriever = None
rag_embed_model = None
data_dir = "./rag_data/"


# 定义Pydantic模型
class ChatRequest(BaseModel):
    query: str
    history: list = []  # 格式为 [(query1, response1), (query2, response2), ...]


class TopicRequest(BaseModel):
    topic: str


# 系统提示词配置
SYSTEM_PROMPTS = {
    "generateppt": "你是一个专业的PPT设计师，请根据用户提供的内容生成一份结构化的PPT大纲，包含标题页、目录和各章节的主要内容。格式要求：使用Markdown格式，每页内容以'##'开头",
    "generateoutline": "你是一个专业的内容策划师，请根据用户提供的主题生成一份详细的内容大纲。要求：包含主标题、子标题和核心要点",
    "generateexam": "你是一个资深教育专家，请根据用户提供的知识点生成一份完整的考试试卷。要求：保证格式一致，格式: {问题内容} A. {选项} B. {选项} C. {选项} D. {选项}",
    # "generateexam": "你是一个资深教育专家，请根据用户提供的知识点生成一份完整的考试试卷。要求：包含选择题、判断题和填空题，并给出正确答案",
}


def load_main_model(checkpoint_path="Qwen/Qwen-7B-Chat", cpu_only=False):
    """加载主语言模型和分词器"""
    global model, tokenizer, gen_config

    print(f"Loading tokenizer from {checkpoint_path}...")
    tokenizer = AutoTokenizer.from_pretrained(
        checkpoint_path, trust_remote_code=True, resume_download=True
    )

    tokenizer.pad_token = tokenizer.eos_token
    tokenizer.padding_side = "left"
    device_map = "cpu" if cpu_only else "auto"

    print(f"Loading model from {checkpoint_path}...")
    model = AutoModelForCausalLM.from_pretrained(
        checkpoint_path,
        device_map=device_map,
        trust_remote_code=True,
        resume_download=True,
    ).eval()

    print("Loading generation config...")
    gen_config = GenerationConfig.from_pretrained(
        checkpoint_path, trust_remote_code=True, resume_download=True
    )
    print("Main model and tokenizer loaded successfully")


def load_rag_resources(
    persist_dir: str,
    device: str = "cuda",
    chunk_size: int = 256,  # 新增chunk_size参数
    chunk_overlap: int = 100,  # 新增chunk_overlap参数
):
    """加载RAG资源：嵌入模型、文档索引和检索器"""
    global model, tokenizer, retriever, rag_embed_model

    print("Initializing RAG resources...")
    print(
        f"Using chunk_size={chunk_size}, chunk_overlap={chunk_overlap}"
    )  # 打印分块配置

    # 确保目录存在
    os.makedirs(data_dir, exist_ok=True)
    os.makedirs(persist_dir, exist_ok=True)

    # 1. 加载嵌入模型
    print(f"Loading RAG embedding model on {device}...")
    rag_embed_model = HuggingFaceEmbedding(
        model_name="BAAI/bge-small-zh",
        cache_folder="/home/rthetapi/.cache/huggingface/hub/",
        device=device,
    )

    # 2. 设置全局配置
    Settings.embed_model = rag_embed_model

    # 新增：配置文档分块策略
    print("Setting up document chunking...")
    node_parser = SentenceSplitter(
        chunk_size=chunk_size,
        chunk_overlap=chunk_overlap,
        paragraph_separator="\n\n",
        tokenizer=tokenizer,  # 使用主模型的分词器保持一致
    )
    Settings.node_parser = node_parser  # 应用到全局设置

    # 复用主服务的LLM（避免重复加载大模型）
    Settings.llm = HuggingFaceLLM(
        model=model,
        tokenizer=tokenizer,
        max_new_tokens=512,
        device_map="auto",
        model_kwargs={"trust_remote_code": True},
        tokenizer_kwargs={"trust_remote_code": True},
    )

    # 3. 检查索引是否已经存在
    index_file = os.path.join(persist_dir, "docstore.json")

    if os.path.exists(index_file):
        print(f"Loading existing RAG index from {persist_dir}...")
        # 从存储加载索引
        storage_context = StorageContext.from_defaults(persist_dir=persist_dir)
        index = load_index_from_storage(storage_context)
    else:
        print(f"Creating new RAG index from documents in {data_dir}...")
        # 读取并处理文档
        loader = PyMuPDFReader()
        reader = SimpleDirectoryReader(
            input_dir=data_dir,
            file_extractor={".pdf": loader},
            recursive=True,
            required_exts=[
                ".pdf",
                ".txt",
                ".docx",
                ".pptx",
                ".xlsx",
            ],  # 扩展支持的文件类型
            filename_as_id=True,  # 使用文件名作为文档ID
        )
        documents = reader.load_data(show_progress=True)

        # 新增：处理文档分块
        print(f"Splitting {len(documents)} documents into chunks...")
        for doc in documents:
            print(f"Document: {doc.metadata['file_name']} - {len(doc.text)} characters")

        # 创建索引（自动应用node_parser分块）
        index = VectorStoreIndex.from_documents(documents, show_progress=True)

        # 持久化索引
        index.storage_context.persist(persist_dir=persist_dir)
        print(f"RAG index saved to {persist_dir}")

    # 4. 创建检索器
    retriever = index.as_retriever(similarity_top_k=5)
    print("RAG retrieval engine ready")


def cleanup_resources():
    """清理所有资源"""
    global model, tokenizer, retriever, rag_embed_model

    print("Releasing resources...")
    if model is not None:
        del model
    if tokenizer is not None:
        del tokenizer
    if retriever is not None:
        # LlamaIndex对象需要特殊处理
        retriever = None
    if rag_embed_model is not None:
        del rag_embed_model

    if torch.cuda.is_available():
        torch.cuda.empty_cache()

    model = None
    tokenizer = None
    gen_config = None
    retriever = None
    rag_embed_model = None


def _ensure_model_loaded():
    """确保模型已加载，否则抛出异常"""
    if model is None or tokenizer is None or gen_config is None:
        raise RuntimeError(
            "Main model not loaded. Please check if model was loaded during startup."
        )
    return model, tokenizer, gen_config


@app.post("/modelchat")
async def model_chat(chat_request: ChatRequest):
    """通用聊天接口"""
    model_obj, tokenizer_obj, gen_config_obj = _ensure_model_loaded()
    query = chat_request.query
    history = chat_request.history or []

    if not query:
        raise HTTPException(status_code=400, detail="Query cannot be empty")

    try:
        # 调用模型生成回复
        response, updated_history = model_obj.chat(
            tokenizer_obj, query, history=history, generation_config=gen_config_obj
        )
        return {"response": response, "history": updated_history}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


async def predict(query, history):
    model_obj, tokenizer_obj, gen_config_obj = _ensure_model_loaded()

    streamer = TextIteratorStreamer(tokenizer_obj, skip_prompt=True)
    last_response = ""
    # 调用模型生成回复
    for response in model_obj.chat_stream(
        tokenizer_obj,
        query=query,
        history=history,
        generation_config=gen_config_obj,
        streamer=streamer,
    ):
        new_token = response[len(last_response) :]
        last_response = response
        yield new_token


@app.post("/stream_modelchat")
async def stream_model_chat(chat_request: ChatRequest):
    predictGenerator = predict(chat_request.query, chat_request.history)
    return EventSourceResponse(predictGenerator)


@app.post("/generateppt")
async def generate_ppt(ppt_request: TopicRequest):
    """生成PPT大纲接口"""
    return await _generate_content("generateppt", ppt_request.topic)


@app.post("/generateoutline")
async def generate_outline(outline_request: TopicRequest):
    """生成内容大纲接口"""
    return await _generate_content("generateoutline", outline_request.topic)


@app.post("/generateexam")
async def generate_exam(exam_request: TopicRequest):
    """生成考试试卷接口"""
    return await _generate_content_without_rag("generateexam", exam_request.topic)


async def _generate_content_without_rag(task_type: str, topic: str):
    model_obj, tokenizer_obj, gen_config_obj = _ensure_model_loaded()
    """通用内容生成方法"""
    if not topic:
        raise HTTPException(status_code=400, detail="Topic cannot be empty")

    # 组合系统提示词和用户输入
    system_prompt = SYSTEM_PROMPTS[task_type]
    query = f"{system_prompt}\n主题：{topic}"

    try:
        print(f"Generating content for {task_type}...")
        # 调用模型生成内容
        response, _ = model_obj.chat(
            tokenizer_obj, query, history=[], generation_config=gen_config_obj
        )
        print("Content generation successful")
        return {"task": task_type, "topic": topic, "content": response}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


async def _generate_content(task_type: str, topic: str):
    """集成RAG的内容生成方法"""
    global model, tokenizer, gen_config, retriever

    # 确保模型已加载
    model_obj, tokenizer_obj, gen_config_obj = _ensure_model_loaded()

    if not topic:
        raise HTTPException(status_code=400, detail="Topic cannot be empty")

    # 确保RAG资源已加载
    if retriever is None:
        raise HTTPException(status_code=500, detail="RAG resources not loaded")

    print(f"Retrieving relevant information for topic: {topic}")
    # RAG检索：获取相关知识片段
    retrieved_nodes = retriever.retrieve(topic)
    context_text = "\n\n".join(
        [
            f"[相关文档 {i+1}]: {node.node.get_content()}"
            for i, node in enumerate(retrieved_nodes)
        ]
    )
    print(f"Retrieved {len(retrieved_nodes)} relevant chunks from documents")

    # 组合系统提示词、主题和检索到的上下文
    system_prompt = SYSTEM_PROMPTS[task_type]
    enhanced_prompt = (
        f"{system_prompt}\n"
        f"主题：{topic}\n"
        f"### 相关背景知识：\n{context_text}\n"
        "### 请根据以上信息生成内容："
    )

    try:
        print(f"Generating content for {task_type}...")
        # 调用模型生成内容
        response, _ = model_obj.chat(
            tokenizer_obj, enhanced_prompt, history=[], generation_config=gen_config_obj
        )
        print("Content generation successful")

        return {
            "task": task_type,
            "topic": topic,
            "context_snippets": len(retrieved_nodes),
            "content": response,
        }
    except Exception as e:
        print(f"Error during content generation: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


def extract_audio(video_path: str, output_path: str):
    try:
        result = subprocess.run(
            [
                "ffmpeg",
                "-i",
                video_path,
                "-vn",
                "-acodec",
                "pcm_s16le",
                "-ar",
                "16000",
                "-ac",
                "1",
                output_path,
            ],
            capture_output=True,
            text=True,
            check=True,
        )
        if result.returncode != 0:
            raise RuntimeError(f"FFmpeg failed: {result.stderr}")
    except FileNotFoundError:
        raise HTTPException(status_code=500, detail="FFmpeg not installed.")


@app.post("/upload")
async def upload_file(file: UploadFile = File(...)):
    try:
        UPLOAD_DIR = "uploads"
        os.makedirs(UPLOAD_DIR, exist_ok=True)

        filename = file.filename
        if not filename:
            raise HTTPException(status_code=400, detail="Missing filename")

        extension = filename.split(".")[-1].lower()
        file_id = str(uuid.uuid4())
        original_path = os.path.join(UPLOAD_DIR, f"{file_id}.{extension}")
        with open(original_path, "wb") as buffer:
            shutil.copyfileobj(file.file, buffer)

        print(
            f"Saved uploaded file: {original_path} ({os.path.getsize(original_path) / 1024:.2f} KB)"
        )

        audio_path = os.path.join(UPLOAD_DIR, f"{file_id}.wav")
        output_path = os.path.join(data_dir, f"{file_id}.txt")
        if extension in [
            "mp4",
            "mp3",
            "flac",
            "aac",
            "m4a",
            "mov",
            "avi",
            "flv",
            "mkv",
        ]:
            extract_audio(original_path, audio_path)
            model = whisper.load_model("small")
            result = model.transcribe(audio_path)
            if isinstance(result["text"], str):
                with open(output_path, "w", encoding="utf-8") as txt_file:
                    txt_file.write(result["text"])
        elif extension in ["txt", "md", "pdf"]:
            shutil.copy(original_path, output_path)
        else:
            raise HTTPException(
                status_code=400, detail=f"Unsupported file type: {extension}"
            )
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


@app.get("/health")
def health_check():
    """健康检查端点"""
    try:
        # 检查主模型
        _ensure_model_loaded()

        # 检查RAG资源
        if retriever is None or rag_embed_model is None:
            raise RuntimeError("RAG resources not loaded")

        return {"status": "healthy", "model_loaded": True, "rag_ready": True}
    except Exception as e:
        return {
            "status": "unhealthy",
            "model_loaded": model is not None,
            "rag_ready": retriever is not None,
            "error": str(e),
        }, 500


def parse_args():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description="Qwen-Chat with RAG API Service")
    parser.add_argument(
        "-c",
        "--checkpoint",
        default="Qwen/Qwen-7B-Chat",
        help="模型检查点名称或路径，默认为 'Qwen/Qwen-7B-Chat'",
    )
    parser.add_argument("--cpu-only", action="store_true", help="仅使用CPU模式运行")
    parser.add_argument(
        "--port", type=int, default=45678, help="服务运行的端口号，默认为45678"
    )
    parser.add_argument(
        "--host",
        type=str,
        default="0.0.0.0",
        help="服务绑定的主机地址，默认为 '0.0.0.0'",
    )

    # RAG相关参数
    parser.add_argument(
        "--rag-data-dir",
        default="./rag_data/",
        help="RAG文档存储目录，默认为 './rag_data/'",
    )
    parser.add_argument(
        "--rag-persist-dir",
        default="./rag_storage/",
        help="RAG向量索引存储目录，默认为 './rag_storage/'",
    )

    # 新增chunk参数
    parser.add_argument(
        "--chunk-size",
        type=int,
        default=256,
        help="文档分块的最大大小（单位：token），默认为256",
    )
    parser.add_argument(
        "--chunk-overlap",
        type=int,
        default=100,
        help="文档分块之间的重叠量（单位：token），默认为100",
    )
    return parser.parse_args()


if __name__ == "__main__":
    args = parse_args()
    uvicorn.run(app, host=args.host, port=args.port)
