import os
from langchain_community.document_loaders import PyPDFLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import Chroma
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.chat_models import ChatZhipuAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_core.output_parsers import StrOutputParser
from dotenv import load_dotenv
import logging
import shutil

# 配置模型路径（建议使用绝对路径）
MODEL_PATH = "D:/ideaSpace/MyPython/models/bge-small-zh-v1.5"

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

# 加载环境变量
load_dotenv()

class StablePDFRAGSystem:
    def __init__(self, pdf_path: str):
        """
        稳定版PDF RAG系统
        
        参数:
            pdf_path: PDF文档路径（支持中文路径）
        """
        self.pdf_path = pdf_path
        self._initialize_system()

    def _initialize_system(self):
        """系统初始化"""
        # 1. 初始化模型
        self._init_models()

        # 2. 加载和处理文档
        documents = self._load_and_process_documents()

        # 3. 创建向量存储
        self._create_vector_store(documents)

        # 4. 构建RAG链
        self._build_rag_chain()

        logger.info("RAG系统初始化完成")

    def _init_models(self):
        """初始化模型"""
        try:
            # 初始化智谱聊天模型
            self.llm = ChatZhipuAI(
                api_key=os.getenv("ZHIPUAI_API_KEY"),
                model="glm-4",
                temperature=0.3,
                top_p=0.8
            )

            # 初始化本地嵌入模型
            self.embeddings = HuggingFaceEmbeddings(
                model_name=MODEL_PATH,
                model_kwargs={'device': 'cpu'},
                encode_kwargs={
                    'normalize_embeddings': True,
                    'batch_size': 4  # 减小批处理大小防止内存不足
                }
            )

            logger.info("模型初始化成功")
        except Exception as e:
            raise RuntimeError(f"模型初始化失败: {str(e)}")

    def _load_and_process_documents(self):
        """加载和处理PDF文档"""
        try:
            logger.info(f"正在加载文档: {self.pdf_path}")

            # 使用PyPDFLoader加载文档（处理中文路径）
            loader = PyPDFLoader(self.pdf_path)
            documents = loader.load_and_split()

            # 中文优化文本分割
            text_splitter = RecursiveCharacterTextSplitter(
                chunk_size=600,  # 适当减小分块大小
                chunk_overlap=80,
                length_function=len,
                separators=["\n\n", "\n", "。", "！", "？", "；", "......"]
            )

            splits = text_splitter.split_documents(documents)
            logger.info(f"文档处理完成，共{len(splits)}个文本块")
            return splits

        except Exception as e:
            raise RuntimeError(f"文档处理失败: {str(e)}")

    def _create_vector_store(self, documents):
        """创建向量存储"""
        try:
            # 清理旧数据库
            if os.path.exists("./chroma_db"):
                shutil.rmtree("./chroma_db")
                logger.info("已清理旧向量数据库")

            # 创建新的向量存储
            self.vectorstore = Chroma.from_documents(
                documents=documents,
                embedding=self.embeddings,
                persist_directory="./chroma_db",
                collection_metadata={
                    "hnsw:space": "cosine",
                    "description": "PDF RAG System"
                }
            )

            # 配置检索器
            self.retriever = self.vectorstore.as_retriever(
                search_type="mmr",
                search_kwargs={
                    "k": 3,
                    "fetch_k": 5,
                    "lambda_mult": 0.7
                }
            )

            logger.info("向量存储创建成功")
        except Exception as e:
            raise RuntimeError(f"向量存储创建失败: {str(e)}")

    def _build_rag_chain(self):
        """构建RAG链"""
        template = """你是一个专业的信息助手，请根据以下上下文回答问题。
        
        PDF文档内容：
        {context}
        
        用户问题：{question}
        
        回答要求：
        1. 必须基于提供的上下文内容
        2. 保持回答准确、简洁
        3. 如果上下文不包含相关信息，请回答"根据文档内容无法回答该问题"
        4. 对于操作指南类问题，请分步骤说明
        
        最终回答："""

        prompt = ChatPromptTemplate.from_template(template)

        self.rag_chain = (
                {"context": self.retriever, "question": RunnablePassthrough()}
                | prompt
                | self.llm
                | StrOutputParser()
        )

        logger.info("RAG链构建完成")

    def query(self, question: str) -> str:
        """执行查询"""
        try:
            logger.info(f"用户查询: {question[:100]}...")
            response = self.rag_chain.invoke(question)
            logger.info("查询处理成功")
            return response
        except Exception as e:
            logger.error(f"查询失败: {str(e)}")
            return "系统处理查询时出错，请稍后再试"

if __name__ == "__main__":
    try:
        # 测试智谱API连接
        from zhipuai import ZhipuAI
        client = ZhipuAI(api_key=os.getenv("ZHIPUAI_API_KEY"))
        test_resp = client.chat.completions.create(
            model="glm-4",
            messages=[{"role": "user", "content": "测试连接"}]
        )
        print("API连接测试成功:", test_resp.choices[0].message.content)

        # 初始化RAG系统
        print("\n正在初始化RAG系统...")
        rag = StablePDFRAGSystem(
            pdf_path="D:/document/阿里巴巴Java开发手册-嵩山版.pdf"
        )

        # 交互式查询
        print("\n系统已就绪，输入问题开始查询（输入'退出'结束）")
        while True:
            question = input("\n请输入问题: ").strip()
            if question.lower() in ['退出', 'exit', 'quit']:
                break

            if not question:
                print("问题不能为空")
                continue

            answer = rag.query(question)
            print(f"\n回答：{answer}")

    except Exception as e:
        logger.exception("系统运行失败")
        print(f"错误: {str(e)}")
        print("可能原因：1. API密钥无效 2. PDF路径错误 3. 模型路径不正确")