# -*- coding: utf-8 -*-
# ----------------------------
# @Time    : 2025/4/3 14:52
# @Author  : acedar
# @FileName: rag_server.py
# ----------------------------

from mcp.server.fastmcp import FastMCP

import asyncio
import os
from dotenv import load_dotenv
from typing import List, Dict, Any
from langchain_community.document_loaders import PyPDFLoader, TextLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
from langchain_community.vectorstores import Chroma
from langchain.schema import Document
from langchain.chains import RetrievalQA
from langchain.embeddings.huggingface import HuggingFaceEmbeddings

# 加载环境变量（在.env文件存放DASHSCOPE_API_KEY）
load_dotenv()


class RAGSystem:
    def __init__(self, config):
        self.config = config
        # 初始化通义千问模型（通过OpenAI兼容接口）
        self.llm = ChatOpenAI(
            model=os.getenv("MODEL", "qwen-plus"),
            base_url=os.getenv("BASE_URL", "https://dashscope.aliyuncs.com/compatible-mode/v1"),
            api_key=os.getenv("OPENAI_API_KEY")
        )

        # 初始化Embedding模型
        # self.embeddings = OpenAIEmbeddings(
        #     model=os.getenv("EMBED_MODEL", "text-embedding-v2"),  # 使用OpenAI兼容的embedding
        #     base_url=os.getenv("EMBED_BASE_URL", "https://dashscope.aliyuncs.com/compatible-mode/v1"),
        #     api_key=os.getenv("EMBED_OPENAI_API_KEY")
        # )

        self.embeddings = HuggingFaceEmbeddings(
            model_name=os.getenv("EMBED_MODEL")
        )

        # 初始化向量数据库
        self.vectorstore = Chroma(
            collection_name=config["collection_name"],
            embedding_function=self.embeddings,
            persist_directory=config["persist_dir"]
        )
        self.retriever = self.vectorstore.as_retriever(
            search_type="mmr",
            search_kwargs={"k": config.get("retrieve_top_k", 5)}
        )

    async def _load_documents(self, file_paths: List[str]) -> List[Document]:
        """加载本地PDF/TXT文档"""
        docs = []

        for path in file_paths:
            if path.endswith(".pdf"):
                loader = PyPDFLoader(path)
                docs.extend(loader.load())
            elif path.endswith(".txt"):
                loader = TextLoader(path, encoding="utf-8")
                docs.extend(loader.load())
            else:
                print(f"跳过不支持的文件类型: {path}")

        return docs

    async def _chunk_documents(self, docs: List[Document]) -> List[Document]:
        """文档分块处理"""
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=self.config["chunk_size"],
            chunk_overlap=self.config["chunk_overlap"],
            length_function=len,
            is_separator_regex=False,
        )
        return text_splitter.split_documents(docs)

    async def build_knowledge_base(self, file_paths: List[str]):
        """构建知识库"""
        # 1. 加载文档
        raw_docs = self._load_documents(file_paths)

        # 2. 文档分块
        chunks = self._chunk_documents(raw_docs)

        # 3. 生成向量并存储
        self.vectorstore.add_documents(chunks)
        self.vectorstore.persist()
        print(f"知识库构建完成，存储文档块数: {len(chunks)}")

    async def query(self, question: str) -> Dict[str, Any]:
        """执行查询"""
        qa_chain = RetrievalQA.from_chain_type(
            self.llm,
            retriever=self.retriever,
            return_source_documents=True
        )

        result = qa_chain.invoke({"query": question})
        return {
            "answer": result["result"],
            "sources": [
                {
                    "source": doc.metadata.get("source", "unknown"),
                    "page": doc.metadata.get("page", "N/A")
                }
                for doc in result["source_documents"]
            ]
        }

# 构建rag
config = {
    "persist_dir": "./data/rag_db",
    "collection_name": "rag_demo",
    "chunk_size": 200,
    "chunk_overlap": 50,
    "retrieve_top_k": 5
}

# 初始化系统
rag = RAGSystem(config)

# 构建知识库（示例路径）
rag.build_knowledge_base(
    file_paths=[
        "data/doupocangqiong.txt"  # TXT文档路径
    ]
)

mcp = FastMCP("rag_mcp")
USER_AGENT = "rag_mcp-app/1.0"


@mcp.tool()
async def rag_query(query: str) -> str:
    """
    为斗破苍穹小说提供相关的知识补充[rag]
    :param query:
    :return:
    """
    response = await rag.query(query)

    print(f"问题: {query}")
    print(f"回答: {response['answer']}")
    return response['answer']


async def search_demo():
    query = '萧炎的女性朋友有哪些？'
    response = await rag_query(query)
    print("response", type(response), response)


if __name__ == "__main__":
    # 以标准 I/O 方式运行
    # asyncio.run(search_demo())

    mcp.run(transport='stdio')
