# -*- encoding: utf-8 -*-
"""
@author: acedar  
@time: 2025/4/3 23:28
@file: rag.py 
"""
# 安装依赖
# pip install langchain langchain-openai chromadb tiktoken python-dotenv pypdf

import os
from dotenv import load_dotenv
from typing import List, Dict, Any
from langchain_community.document_loaders import PyPDFLoader, TextLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
from langchain_community.vectorstores import Chroma
from langchain.schema import Document
from langchain.chains import RetrievalQA

# 加载环境变量（在.env文件存放DASHSCOPE_API_KEY）
load_dotenv()


class RAGSystem:
    def __init__(self, config: Dict[str, Any]):
        self.config = config

        # 初始化通义千问模型（通过OpenAI兼容接口）
        self.llm = ChatOpenAI(
            model=os.environ.get("MODEL", "qwen-plus"),
            base_url=os.environ.get("BASE_URL"),
            api_key=os.getenv("OPENAI_API_KEY")  # 从阿里云获取
        )

        # 初始化Embedding模型
        self.embeddings = OpenAIEmbeddings(
            model=os.environ.get("EMBED_MODEL", "text-embedding-v2"),  # 使用OpenAI兼容的embedding
            base_url=os.environ.get("EMBED_BASE_URL", "https://dashscope.aliyuncs.com/compatible-mode/v1"),
            # api_key=os.getenv("OPENAI_API_KEY")
        )

        # 初始化向量数据库
        self.vectorstore = Chroma(
            collection_name=config["collection_name"],
            embedding_function=self.embeddings,
            persist_directory=config["persist_dir"]
        )
        self.retriever = self.vectorstore.as_retriever(
            search_type="mmr",
            search_kwargs={"k": config.get("retrieve_top_k", 5)}
        )

    def _load_documents(self, file_paths: List[str]) -> List[Document]:
        """加载本地PDF/TXT文档"""
        docs = []

        for path in file_paths:
            if path.endswith(".pdf"):
                loader = PyPDFLoader(path)
                docs.extend(loader.load())
            elif path.endswith(".txt"):
                loader = TextLoader(path, encoding="utf-8")
                docs.extend(loader.load())
            else:
                print(f"跳过不支持的文件类型: {path}")
        return docs

    def _chunk_documents(self, docs: List[Document]) -> List[Document]:
        """文档分块处理"""
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=self.config["chunk_size"],
            chunk_overlap=self.config["chunk_overlap"],
            length_function=len,
            is_separator_regex=False,
        )
        return text_splitter.split_documents(docs)

    def build_knowledge_base(self, file_paths: List[str]):
        """构建知识库"""
        # 1. 加载文档
        raw_docs = self._load_documents(file_paths)

        # 2. 文档分块
        chunks = self._chunk_documents(raw_docs)
        print("chunks:", chunks)
        # 3. 生成向量并存储
        self.vectorstore.add_documents(chunks)
        self.vectorstore.persist()
        print(f"知识库构建完成，存储文档块数: {len(chunks)}")

    def query(self, question: str) -> Dict[str, Any]:
        """执行查询"""
        qa_chain = RetrievalQA.from_chain_type(
            self.llm,
            retriever=self.retriever,
            return_source_documents=True
        )

        result = qa_chain.invoke({"query": question})
        return {
            "answer": result["result"],
            "sources": [
                {
                    "source": doc.metadata.get("source", "unknown"),
                    "page": doc.metadata.get("page", "N/A")
                }
                for doc in result["source_documents"]
            ]
        }


if __name__ == "__main__":
    # 配置参数
    config = {
        "persist_dir": "./qwen_rag_db",
        "collection_name": "qwen_demo",
        "chunk_size": 1000,
        "chunk_overlap": 200,
        "model_name": "qwen-max",  # 可选qwen-plus/qwen-turbo
        "retrieve_top_k": 5
    }

    # 初始化系统
    rag = RAGSystem(config)

    # 构建知识库（示例路径）
    rag.build_knowledge_base(
        file_paths=[
            "data/doupocangqiong.txt"  # TXT文档路径
        ]
    )

    # 执行查询
    question = "萧炎的女性朋友有哪些？"
    response = rag.query(question)

    print(f"问题: {question}")
    print(f"回答: {response['answer']}")
    print("\n来源文档:")
    for source in response["sources"]:
        print(f"- 文件: {source['source']}, 页码: {source['page']}")