import os
from pydantic import BaseModel, Field
from langgraph.graph import StateGraph
from src.common.logger import getLogger
from langgraph.constants import START, END
from typing import Literal, TypedDict, List
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, PromptTemplate

logger = getLogger()

class AdaptiveState(TypedDict):
    query: str
    decide: str
    documents: List[str]
    grade: str
    answer: str

class DecideRoute(BaseModel):
    decide: Literal["vector_store", "web_search"] = Field(description = "根据问题的相关性，选择'vector_store'或'web_search'")

class GradeDocument(BaseModel):
    grade: Literal["yes", "no"] = Field(description = "文档与问题的相关性，相关为'yes'，不相关为'no'")

class GradeHallucination(BaseModel):
    grade: Literal["yes", "no"] = Field(description = "答案是否基于检索到的事实，是为'yes'，否为'no'")

class GradeAnswer(BaseModel):
    grade: Literal["yes", "no"] = Field(description = "答案是否回答了问题，是为'yes'，否为'no'")

class AdaptiveRAG:

    def __init__(self, llm_model, vector_store, agent_tools, document_summary):
        self.llm_model = llm_model
        self.vector_store = vector_store
        self.agent_tools = agent_tools
        self.document_summary = document_summary

    def decide_node(self, state: AdaptiveState):
        logger.info("AdaptiveRAG decide_node start")
        query = state["query"]
        decide_template = f"""
            您精通将用户问题路由至向量存储或网络搜索的技巧。
            该向量存储库收录了{self.document_summary}的文档，针对这些主题的问题，请使用 vector_store。否则，请使用 web_search。
        """
        decide_prompt = ChatPromptTemplate.from_messages([("system", decide_template), ("human", "{question}")])
        decide_chain = decide_prompt | self.llm_model.with_structured_output(DecideRoute)
        decide_result = decide_chain.invoke({ "question": query })
        logger.info(f"AdaptiveRAG decide_node decide_result： {decide_result}")
        return decide_result.decide

    def retrieve_node(self, state: AdaptiveState):
        logger.info("AdaptiveRAG retrieve_node start")
        query = state["query"]
        retriever = self.vector_store.as_retriever(search_kwargs = {"k": 3})
        retrieve_docs = retriever.invoke(query)
        retrieve_doc = "".join(doc.page_content for doc in retrieve_docs)
        logger.info(f"AdaptiveRAG retrieve_node retrieve_doc len: {len(retrieve_doc)}")
        return { "documents": retrieve_docs }

    def grade_node(self, state: AdaptiveState):
        logger.info("AdaptiveRAG grade_node start")
        query = state["query"]
        documents = state["documents"]
        template = """
            您是评分员，负责评估检索到的文档与用户问题的相关性。
            如果文档包含与问题相关的关键词或语义含义，请将其评为相关。
            给出二元评分“yes”或“no”，以表明文档是否与问题相关。
        """
        prompt = ChatPromptTemplate.from_messages([
            ("system", template), ("human", "检索的文档: \n {documents}\n用户输入的问题: {question}")
        ])
        structure_llm = self.llm_model.with_structured_output(GradeDocument)
        grade_result = (prompt | structure_llm).invoke({ "documents": documents, "question": query })
        logger.info(f"AdaptiveRAG grade_node grade_result: {grade_result}")
        return { "grade": grade_result.grade }

    def route_node(self, state: AdaptiveState):
        logger.info("AdaptiveRAG route_node start")
        grade = state["grade"]
        if "yes" == grade:
            return "generate"
        else:
            return "transform"

    def transform_node(self, state: AdaptiveState):
        logger.info("AdaptiveRAG route_node start")
        query = state["query"]
        template = f"""
            您是一个问题重写器，可将输入的问题转换为优化后的更好版本。
            用于网络搜索。查看输入内容，尝试理解其潜在的语义意图或含义。
            尽量简短的回答问题，答案长度不超过{len(query) * 2}字。
        """
        prompt = ChatPromptTemplate.from_messages([
            ("system", template), ("human", "用户输入的问题：{question}\n创建更佳问题")
        ])
        transform_result = (prompt | self.llm_model | StrOutputParser()).invoke({ "question": query })
        logger.info(f"AdaptiveRAG route_node transform_result: {transform_result}")
        return { "keyword": transform_result }

    def search_node(self, state: AdaptiveState):
        logger.info("AdaptiveRAG search_node start")
        keyword = state["query"]
        search_result = ""
        for tool in self.agent_tools:
            if tool.name == "search_web":
                search_result = tool.func(keyword)
        return { "documents": search_result }

    def hallucination_node(self, state: AdaptiveState):
        logger.info("AdaptiveRAG hallucination_node start")
        query = state["query"]
        answer = state["answer"]
        documents = state["documents"]
        hallucination_template = """
            您是一名评分员，负责评估一个LLM生成是否基于检索到的事实。
            请给出二元评分“是”或“否”。“是”表示答案基于/由事实支持。
        """
        hallucination_prompt = ChatPromptTemplate.from_messages([
            ("system", hallucination_template), ("human", "检索的文档: \n {documents}\n用户输入的问题: {answer}")
        ])
        structure_llm = self.llm_model.with_structured_output(GradeHallucination)
        hallucination_chain = hallucination_prompt | structure_llm
        hallucination_result = hallucination_chain.invoke({ "documents": documents, "answer": answer })
        logger.info(f"AdaptiveRAG hallucination_node hallucination_result: {hallucination_result}")
        if hallucination_result.grade == "yes":
            answer_template = """
                您是评分员，负责评估答案是否回答了问题。
                请输入二元评分“是”或“否”。“是”表示答案能解答问题。
            """
            answer_prompt = ChatPromptTemplate.from_messages([
                ("system", answer_template), ("human", "用户输入的问题: {question}\n答案：{answer}")
            ])
            structure_llm = self.llm_model.with_structured_output(GradeAnswer)
            answer_chain = answer_prompt | structure_llm
            answer_result = answer_chain.invoke({ "question": query, "answer": answer })
            logger.info(f"AdaptiveRAG hallucination_node answer_result: {answer_result}")
            if answer_result.grade == "yes":
                return "useful"
            else:
                return "useless"
        else:
            return "hallucination"

    def generate_node(self, state: AdaptiveState):
        logger.info("AdaptiveRAG generate_node start")
        query = state["query"]
        documents = state["documents"]

        template = """
            您是用于问答任务的助手。
            请使用下列文件回答问题。
            如果不知道答案，就直接说不知道。
            问题：{question}
            文件：{documents}
            必须用中文详尽的回答问题。
        """
        prompt = PromptTemplate(template = template, input_variables = [ "question", "documents" ])
        generate_chain = prompt | self.llm_model | StrOutputParser()
        generate_result = generate_chain.invoke({ "documents": documents, "question": query })
        logger.info(f"AdaptiveRAG generate_node generate_result len: {len(generate_result)}")
        return { "answer": generate_result }

    def build_graph(self):
        logger.info("AdaptiveRAG build_graph start")
        graph = StateGraph(AdaptiveState)
        graph.add_node("retrieve", self.retrieve_node)
        graph.add_node("search", self.search_node)
        graph.add_node("grade", self.grade_node)
        graph.add_node("transform", self.transform_node)
        graph.add_node("generate", self.generate_node)

        graph.add_conditional_edges(START, self.decide_node, { "vector_store": "retrieve", "web_search": "search" })
        graph.add_edge("search", "generate")
        graph.add_edge("retrieve", "grade")
        graph.add_conditional_edges("grade", self.route_node, { "transform": "transform", "generate": "generate" })
        graph.add_edge("transform", "retrieve")
        graph.add_conditional_edges("generate", self.hallucination_node, { "useful": END, "useless": "transform", "hallucination": "generate" })

        workflow = graph.compile()

        save_path = "D:/Downloads/taixu/images/agentics"
        image_path = os.path.join(save_path, "RAG_Adaptive_Workflow.png")
        if not os.path.exists(save_path):
            os.makedirs(save_path)

        with open(image_path, 'wb') as file:
            file.write(workflow.get_graph().draw_mermaid_png())

        return workflow

    def invoke(self, query):
        logger.info(f"AdaptiveRAG invoke query: {query}")
        workflow = self.build_graph()
        response = workflow.invoke({ "query": query })
        answer = response.get("answer", None)
        logger.info(f"AdaptiveRAG invoke answer len: {len(str(answer))}")
        return { "retrieve_docs": response.get("documents", None), "chain_result": answer }
