import os
import numpy as np
from typing import TypedDict, List
from src.common import commonUtils
from langgraph.graph import StateGraph
from src.common.logger import getLogger
from langgraph.constants import START, END
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_community.utils.math import cosine_similarity
from src.agentic.rag.program.CommunityGraph import CommunityGraph
from langchain_text_splitters import RecursiveCharacterTextSplitter

logger = getLogger()

class ProgramState(TypedDict):
    query: str
    pattern: str
    tutorial_docs: List
    search_docs: List
    vector_docs: List
    graph_docs: List
    answer: str

class ProgramRAG:

    def __init__(self, llm_model, embed_model, agent_tools, max_row):
        self.llm_model = llm_model
        self.embed_model = embed_model
        self.agent_tools = agent_tools
        self.tutorial_prefix = "https://www.runoob.com/"
        self.tutorial_suffix = "-tutorial.html"
        self.max_row = max_row

    def search_tutorial(self, state: ProgramState):
        logger.info("ProgramRAG search_tutorial start")
        pattern = state["pattern"]
        texts = commonUtils.extract_website_content(self.tutorial_prefix, pattern, self.tutorial_suffix)
        website_text = "\n".join(texts)
        logger.info(f"ProgramRAG search_tutorial website_text len: {len(website_text)}")

        text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(chunk_size = 768, chunk_overlap = 50)
        doc_texts = text_splitter.split_text(website_text)
        logger.info(f"ProgramRAG search_tutorial doc_texts len: {len(doc_texts)}")
        return { "tutorial_docs": doc_texts }

    def retrieve_vector(self, state: ProgramState):
        logger.info("ProgramRAG retrieve_vector start")
        query = state["query"]
        documents = state.get("tutorial_docs", []) + state.get("search_docs", [])

        embennding_documents = self.embed_model.embed_documents(documents)
        embennding_query = self.embed_model.embed_query(query)
        similarities = cosine_similarity([embennding_query], embennding_documents)[0]
        indices = np.argsort(similarities)[:self.max_row]
        logger.info(f"ProgramRAG retrieve_vector indices: {indices}")
        vector_docs = [(documents[i], similarities[i]) for i in indices]
        return { "vector_docs": vector_docs }

    def retrieve_graph(self, state: ProgramState):
        logger.info("ProgramRAG retrieve_graph start")
        query = state["query"]
        documents = state.get("tutorial_docs", []) + state.get("search_docs", [])
        community_graph = CommunityGraph(self.llm_model, self.embed_model, False)
        response = community_graph.invoke(query, documents)
        return { "graph_docs": response }

    def search_web(self, state: ProgramState):
        logger.info("ProgramRAG search_web start")
        query = state["query"]
        for tool in self.agent_tools:
            if "search_web" == tool.name:
                search_result = tool.func(query)
        logger.info(f"ProgramRAG search_web search_result len: {len(search_result)}")

        text_splitter = RecursiveCharacterTextSplitter(chunk_size = 768, chunk_overlap = 50)
        split_texts = text_splitter.split_text(search_result)
        logger.info(f"ProgramRAG search_web split_texts len: {len(split_texts)}")
        return { "search_docs": split_texts }

    def generate_answer(self, state: ProgramState):
        logger.info("ProgramRAG generate_answer start")
        query = state["query"]
        context = state.get("vector_docs", "") + state.get("graph_docs", "") + state.get("search_docs", "")
        logger.info(f"ProgramRAG generate_answer context len: {len(context)}")
        template = """
            请根据提供的上下文回答用户问题，务必按上下文事实回答，不得自行杜撰和扩展。
            
            上下文： {context}
            用户问题： {question}
            
            请务必用中文详尽的回答问题。
        """
        prompt = ChatPromptTemplate.from_template(template)
        chain = prompt | self.llm_model | StrOutputParser()
        response = chain.invoke({ "context": context, "question": query })
        logger.info(f"ProgramRAG generate_answer response len: {len(response)}")
        return { "answer": response }

    def build_graph(self):
        logger.info("ProgramRAG build_graph start")
        graph = StateGraph(ProgramState)
        graph.add_node("search_tutorial", self.search_tutorial)
        graph.add_node("retrieve_vector", self.retrieve_vector)
        graph.add_node("retrieve_graph", self.retrieve_graph)
        graph.add_node("search_web", self.search_web)
        graph.add_node("generate_answer", self.generate_answer)

        graph.add_edge(START, "search_tutorial")
        graph.add_edge("search_tutorial", "retrieve_vector")
        graph.add_edge("search_tutorial", "retrieve_graph")
        graph.add_edge(START, "search_web")
        graph.add_edge("search_web", "retrieve_vector")
        graph.add_edge("search_web", "retrieve_graph")
        graph.add_edge("retrieve_vector", "generate_answer")
        graph.add_edge("retrieve_graph", "generate_answer")
        graph.add_edge("generate_answer", END)

        workflow = graph.compile()

        save_path = "D:/Downloads/taixu/images/agentics"
        image_path = os.path.join(save_path, "RAG_Program_Workflow.png")
        if not os.path.exists(save_path):
            os.makedirs(save_path)

        with open(image_path, 'wb') as file:
            file.write(workflow.get_graph().draw_mermaid_png())

        return workflow

    def invoke(self, query, pattern):
        logger.info(f"ProgramRAG invoke query: {query}")
        workflow = self.build_graph()
        response = workflow.invoke({ "query": query, "pattern": pattern })
        answer = response.get("answer", None)
        logger.info(f"ProgramRAG invoke answer len: {len(str(answer))}")
        return { "retrieve_docs": response.get("vector_docs", "") + response.get("graph_docs", ""), "chain_result": answer }
