"""
Self-RAG 优化实现
基于LangChain和LangGraph构建的具有自反思能力的检索增强生成系统

主要功能：
1. 异步文档加载与处理
2. 模块化评分器设计
3. 可配置的工作流程
4. 完善的错误处理和日志记录
"""

import os
import json
import logging
import asyncio
from typing import List, Optional, Dict, Any
from concurrent.futures import ThreadPoolExecutor
from typing_extensions import TypedDict
from pathlib import Path

# 第三方库导入
from dotenv import load_dotenv
from pydantic import BaseModel, Field
from pydantic_settings import BaseSettings  # 修改后的导入
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import WebBaseLoader
from langchain_community.vectorstores import Chroma
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain import hub
from langchain_community.chat_models import ChatZhipuAI
from langgraph.graph import END, StateGraph, START

# 加载环境变量
load_dotenv()

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
logger = logging.getLogger(__name__)

# ==================== 配置类 ====================
class Settings(BaseSettings):
    """应用配置类"""

    # 模型配置
    embedding_model: str = "D:/ideaSpace/MyPython/models/bge-small-zh-v1.5"
    llm_model: str = "glm-4"
    llm_temperature: float = 0

    # 向量存储配置
    collection_name: str = "rag-chroma"
    persist_directory: str = "./chroma_db"

    # 文本处理配置
    chunk_size: int = 250
    chunk_overlap: int = 0
    max_retries: int = 3

    # API密钥
    zhipuai_api_key: str = Field(default=..., env="ZHIPUAI_API_KEY")

    # 其他可能用到的API密钥（可选）
    tavily_api_key: Optional[str] = None
    deepseek_api_key: Optional[str] = None

    class Config:
        env_file = ".env"
        env_file_encoding = "utf-8"
        extra = "ignore"  # 忽略未定义的额外字段

# ==================== 数据模型 ====================
class GradeDocuments(BaseModel):
    """文档相关性评分模型"""
    score: int = Field(..., ge=1, le=5, description="相关性评分1-5分")
    reason: str = Field(description="评分理由")

class GradeHallucinations(BaseModel):
    """幻觉评分模型"""
    binary_score: str = Field(description="答案是否基于事实，'是'或'否'")
    evidence: List[str] = Field(default_factory=list, description="支持证据")

class GradeAnswer(BaseModel):
    """答案质量评分模型"""
    binary_score: str = Field(default="否", description="答案是否解决问题，'是'或'否'")
    improvement_suggestions: Optional[str] = Field(default=None, description="改进建议")

class GraphState(TypedDict):
    """
    图状态数据结构
    
    属性:
        question: 当前问题
        generation: LLM生成内容
        documents: 检索到的文档列表
        retry_count: 重试次数
        metadata: 其他元数据
    """
    question: str
    generation: Optional[str]
    documents: List[Any]
    retry_count: int
    metadata: Dict[str, Any]

# ==================== 核心组件 ====================
class DocumentProcessor:
    """文档处理组件"""

    def __init__(self, settings: Settings):
        self.settings = settings
        self.text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
            chunk_size=settings.chunk_size,
            chunk_overlap=settings.chunk_overlap
        )

    async def load_documents(self, urls: List[str]) -> List[Any]:
        """异步加载文档"""
        logger.info(f"开始加载 {len(urls)} 个URL的文档")
        try:
            loader = WebBaseLoader(urls)
            # documents = await loader.aload() # 此处再进行异步调用会造成嵌套 asyncio.run()
            documents = loader.load() # 使用同步load方法确保返回Document对象
            # documents = await loader.fetch_all(urls) # 直接使用 fetch_all 避免嵌套 asyncio.run()
            logger.info(f"成功加载 {len(documents)} 个文档")
            return documents
        except Exception as e:
            logger.error(f"文档加载失败: {e}")
            raise

    def split_documents(self, documents: List[Any]) -> List[Any]:
        """分割文档为小块"""
        logger.info("开始分割文档")
        # 确保输入是Document对象列表
        if documents and isinstance(documents[0], str):
            from langchain_core.documents import Document
            documents = [Document(page_content=doc) for doc in documents]
        return self.text_splitter.split_documents(documents)

    def get_embedding_model(self) -> HuggingFaceEmbeddings:
        """获取嵌入模型"""
        return HuggingFaceEmbeddings(
            model_name=self.settings.embedding_model,
            model_kwargs={"device": "cpu"},
            encode_kwargs={"normalize_embeddings": True}
        )

class VectorStoreManager:
    """向量存储管理"""

    def __init__(self, settings: Settings, embedding_model: HuggingFaceEmbeddings):
        self.settings = settings
        self.embedding_model = embedding_model
        self.vectorstore = self._init_vectorstore()

    def _init_vectorstore(self) -> Chroma:
        """初始化向量存储"""
        persist_path = Path(self.settings.persist_directory)
        if persist_path.exists():
            logger.info("加载已有向量存储")
            return Chroma(
                persist_directory=str(persist_path),
                embedding_function=self.embedding_model,
                collection_name=self.settings.collection_name
            )
        return None

    async def create_vectorstore(self, documents: List[Any]) -> Chroma:
        """创建新的向量存储"""
        logger.info("创建新向量存储")
        vectorstore = await Chroma.afrom_documents(
            documents=documents,
            collection_name=self.settings.collection_name,
            embedding=self.embedding_model,
            persist_directory=self.settings.persist_directory
        )
        logger.info("向量存储创建完成")
        return vectorstore

# ==================== 评分器工厂 ====================
class GraderFactory:
    """评分器工厂类"""

    def __init__(self, llm: ChatZhipuAI):
        self.llm = llm
        self.grader_configs = self._load_grader_configs()

    def _load_grader_configs(self) -> Dict[str, Any]:
        """加载评分器配置"""
        config_path = Path("config/grader_configs.json")
        if config_path.exists():
            with open(config_path, "r", encoding="utf-8") as f:
                return json.load(f)
        return {
            "retrieval": {
                "system": """你是一个评估检索文档与用户问题相关性的评分器...""",
                "human": "检索到的文档: \n\n {document} \n\n 用户问题: {question}",
                "grader_model": GradeDocuments
            },
            "hallucination": {
                "system": """你是一个严格的幻觉评分器。必须按照以下格式响应：
                {"binary_score": "是/否", "evidence": ["支持证据1", ...]}
                
                评分规则：
                1. 答案完全基于给定文档 → "是"
                2. 答案包含文档外的信息 → "否"
                
                即使没有证据也要返回空列表""",
                    "human": "文档:\n{documents}\n\n生成内容:\n{generation}",
                    "grader_model": GradeHallucinations
            },
            "answer": {
                "system": """你是一个答案质量评分器。必须包含 binary_score 字段：
                {"binary_score": "是/否", "improvement_suggestions": "..."}
                
                评分标准：
                1. 答案准确解决问题 → "是"
                2. 答案不完整/不准确 → "否" """,
                    "human": "问题: {question}\n答案: {generation}",
                    "grader_model": GradeAnswer
            }
        }

    def create_grader(self, grader_type: str):
        """创建指定类型的评分器"""
        config = self.grader_configs.get(grader_type)
        if not config:
            raise ValueError(f"未知评分器类型: {grader_type}")

        prompt = ChatPromptTemplate.from_messages([
            ("system", config["system"]),
            ("human", config["human"])
        ])
        return prompt | self.llm.with_structured_output(config["grader_model"])

# ==================== 工作流节点 ====================
class WorkflowNodes:
    """工作流节点实现"""

    def __init__(self, settings: Settings, llm: ChatZhipuAI, retriever: Any):
        self.settings = settings
        self.llm = llm
        self.retriever = retriever
        self.graders = GraderFactory(llm)

        # 初始化链条
        self._init_chains()

    def _init_chains(self):
        """初始化各种处理链条"""
        # 问题重写器
        re_write_prompt = ChatPromptTemplate.from_messages([
            ("system", "你是一个问题重写器，将输入问题转换为更适合检索的版本。"),
            ("human", "初始问题: \n\n {question} \n 改进问题:")
        ])
        self.question_rewriter = re_write_prompt | self.llm | StrOutputParser()

        # RAG生成链
        self.rag_chain = hub.pull("rlm/rag-prompt") | self.llm | StrOutputParser()

    async def retrieve(self, state: GraphState) -> Dict[str, Any]:
        """检索文档节点"""
        logger.info("执行文档检索")
        question = state["question"]
        try:
            # 增加检索数量和相似度阈值
            documents = await self.retriever.ainvoke(
                question,
                config={"k": 10, "score_threshold": 0.6}
            )
            logger.info(f"检索到 {len(documents)} 个文档")
            return {"documents": documents, "question": question}
        except Exception as e:
            logger.error(f"检索失败: {e}")
            return {"documents": [], "question": question}

    async def grade_documents(self, state: GraphState) -> Dict[str, Any]:
        """文档评分节点"""
        logger.info("评估文档相关性")
        question = state["question"]
        documents = state["documents"]

        if not documents:
            return {"documents": [], "question": question}

        # 并行评估文档
        def grade_one(doc):
            try:
                return self.graders.create_grader("retrieval").invoke({
                    "question": question,
                    "document": doc.page_content
                })
            except Exception as e:
                logger.error(f"文档评分出错: {e}")
                return None

        with ThreadPoolExecutor() as executor:
            results = list(executor.map(grade_one, documents))

        # 过滤相关文档
        filtered_docs = [
            doc for doc, score in zip(documents, results)
            if score and score.score >= 3  # 假设3分以上为相关
        ]
        logger.info(f"过滤后剩余 {len(filtered_docs)} 个相关文档")
        return {"documents": filtered_docs, "question": question}

    async def generate(self, state: GraphState) -> Dict[str, Any]:
        """生成答案节点"""
        logger.info("生成答案")
        question = state["question"]
        documents = state["documents"]

        try:
            generation = await self.rag_chain.ainvoke({
                "context": documents,
                "question": question
            })
            logger.info(f"生成内容: {generation[:200]}...")
            return {"generation": generation, "documents": documents, "question": question}
        except Exception as e:
            logger.error(f"生成失败: {e}")
            return {"generation": None, "documents": documents, "question": question}

    async def transform_query(self, state: GraphState) -> Dict[str, Any]:
        """问题重写节点"""
        logger.info("重写问题")
        question = state["question"]
        retry_count = state.get("retry_count", 0) + 1

        try:
            better_question = await self.question_rewriter.ainvoke({"question": question})
            logger.info(f"问题重写: {question} -> {better_question}")
            return {
                "question": better_question,
                "retry_count": retry_count,
                "documents": []
            }
        except Exception as e:
            logger.error(f"问题重写失败: {e}")
            return {
                "question": question,
                "retry_count": retry_count,
                "documents": []
            }

    async def grade_answer(self, state: GraphState) -> str:
        """答案评分节点"""
        logger.info("评估答案质量")
        question = state["question"]
        generation = state["generation"]
        retry_count = state.get("retry_count", 0)

        if not generation:
            return "transform_query"

        # 宽松评分：如果生成内容非空且超过50字，视为可用
        if len(generation) > 50 and "无法回答" not in generation:
            logger.info(f"宽松评分通过：{generation[:200]}...")  # 修改这里，使用f-string格式化
            return "useful"

        # 严格评分
        try:
            answer_score = await self.graders.create_grader("answer").ainvoke({
                "question": question,
                "generation": generation
            })
            if answer_score.binary_score == "是":
                return "useful"
        except Exception as e:
            logger.error(f"评分失败，使用宽松模式: {e}")

        if retry_count >= self.settings.max_retries:
            return "end"
        return "transform_query"

    async def decide_to_generate(self, state: GraphState) -> str:
        """决策节点"""
        logger.info("决定下一步操作")
        documents = state["documents"]
        retry_count = state.get("retry_count", 0)

        if retry_count >= self.settings.max_retries:
            logger.warning("达到最大重试次数，终止流程")
            return "end"
        elif not documents:
            logger.info("无相关文档，将重写问题")
            return "transform_query"
        else:
            logger.info("有相关文档，将生成答案")
            return "generate"

    async def end_node(self, state: GraphState) -> Dict[str, Any]:
        """结束节点"""
        logger.info("流程结束")
        if "generation" not in state or not state["generation"]:
            return {"generation": "抱歉，我无法找到合适的答案。"}
        return state

# ==================== 工作流构建 ====================
class WorkflowBuilder:
    """工作流构建器"""

    def __init__(self, settings: Settings, nodes: WorkflowNodes):
        self.settings = settings
        self.nodes = nodes

    def build(self) -> StateGraph:
        """构建工作流"""
        workflow = StateGraph(GraphState)

        # 添加节点
        workflow.add_node("retrieve", self.nodes.retrieve)
        workflow.add_node("grade_documents", self.nodes.grade_documents)
        workflow.add_node("generate", self.nodes.generate)
        workflow.add_node("transform_query", self.nodes.transform_query)
        workflow.add_node("end", self.nodes.end_node)

        # 构建流程
        workflow.add_edge(START, "retrieve")
        workflow.add_edge("retrieve", "grade_documents")

        workflow.add_conditional_edges(
            "grade_documents",
            self.nodes.decide_to_generate,
            {
                "transform_query": "transform_query",
                "generate": "generate",
                "end": "end"
            },
        )

        workflow.add_edge("transform_query", "retrieve")

        workflow.add_conditional_edges(
            "generate",
            self.nodes.grade_answer,
            {
                "transform_query": "transform_query",
                "useful": END,
                "end": "end"
            },
        )

        workflow.add_edge("end", END)

        return workflow

# ==================== 主应用 ====================
class SelfRAGApp:
    """Self-RAG 主应用"""

    def __init__(self):
        self.settings = Settings()
        self.llm = ChatZhipuAI(
            api_key=self.settings.zhipuai_api_key,
            model=self.settings.llm_model,
            temperature=self.settings.llm_temperature
        )

        # 初始化组件
        self.document_processor = DocumentProcessor(self.settings)
        self.vectorstore_manager = VectorStoreManager(
            self.settings,
            self.document_processor.get_embedding_model()
        )

        # 初始化工作流
        self._init_workflow()

    async def initialize(self, urls: List[str]):
        """初始化应用"""
        # 加载和处理文档
        docs = await self.document_processor.load_documents(urls)
        splits = self.document_processor.split_documents(docs)

        # 创建向量存储
        self.vectorstore = await self.vectorstore_manager.create_vectorstore(splits)
        self.retriever = self.vectorstore.as_retriever()

        # 构建工作流
        nodes = WorkflowNodes(self.settings, self.llm, self.retriever)
        workflow = WorkflowBuilder(self.settings, nodes).build()
        self.app = workflow.compile()

    def _init_workflow(self):
        """初始化工作流"""
        # 如果没有向量存储，先创建空工作流
        nodes = WorkflowNodes(self.settings, self.llm, None)
        workflow = WorkflowBuilder(self.settings, nodes).build()
        self.app = workflow.compile()

    async def run(self, question: str) -> str:
        """运行问答流程"""
        logger.info(f"开始处理问题: {question}")

        inputs = {"question": question}
        final_answer = None

        async for output in self.app.astream(inputs):
            for key, value in output.items():
                if key == "generation" and value:
                    final_answer = value
                    logger.info(f"生成答案: {value}")

        return final_answer or "抱歉，我无法回答这个问题。"

# ==================== 测试运行 ====================
async def main():
    """测试主函数"""
    # 测试URL
    test_urls = [
        "https://lilianweng.github.io/posts/2023-06-23-agent/",
        "https://lilianweng.github.io/posts/2023-03-15-prompt-engineering/",
        "https://lilianweng.github.io/posts/2023-10-25-adv-attack-llm/",
    ]

    # 测试问题
    test_questions = [
        "解释不同类型的智能体记忆是如何工作的？",
        "什么是思维链提示？",
        "如何对LLM进行对抗攻击？"
    ]

    # 创建应用实例
    app = SelfRAGApp()
    await app.initialize(test_urls)

    # 测试问答
    for question in test_questions:
        print(f"\n{'='*50}")
        print(f"问题: {question}")
        answer = await app.run(question)
        print(f"\n答案: {answer}")
        print(f"{'='*50}")

if __name__ == "__main__":
    asyncio.run(main())