import os
import arxiv
import numpy as np
from typing import TypedDict, List
from src.common import commonUtils
from langgraph.graph import StateGraph
from src.common.logger import getLogger
from langgraph.constants import START, END
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_community.utils.math import cosine_similarity
from src.agentic.rag.program.CommunityGraph import CommunityGraph
from langchain_text_splitters import RecursiveCharacterTextSplitter

logger = getLogger()

class ArxivState(TypedDict):
    query: str
    documents: List
    arxiv_docs: List
    search_docs: List
    vector_docs: List
    graph_docs: List
    answer: str

class ArxivRAG:

    def __init__(self, llm_model, embed_model, agent_tools, max_row):
        self.llm_model = llm_model
        self.embed_model = embed_model
        self.agent_tools = agent_tools
        self.max_row = max_row

    def search_arxiv(self, state: ArxivState):
        logger.info("ArxivRAG search_arxiv start")
        query = state["query"]
        language = commonUtils.robust_detect_language(query)
        logger.info(f"ArxivRAG search_arxiv language: {language}")
        if language == "zh":
            for tool in self.agent_tools:
                if "translate_text" == tool.name:
                    query = tool.func(query)
        arxiv_client = arxiv.Client()
        arxiv_result = arxiv.Search(query = query, max_results = self.max_row, sort_by = arxiv.SortCriterion.Relevance)

        documents = []
        document_summaries = []
        for result in arxiv_client.results(arxiv_result):
            documents.append({ "title": result.title, "summary": result.summary, "url": result.entry_id })
            document_summaries.append(result.summary)
        logger.info(f"ArxivRAG search_arxiv document_summaries len: {len(document_summaries)}")

        text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(chunk_size = 768, chunk_overlap = 50)
        doc_texts = text_splitter.create_documents(document_summaries, metadatas = documents)
        logger.info(f"ArxivRAG search_arxiv doc_texts len: {len(doc_texts)}")
        return { "arxiv_docs": [doc.page_content for doc in doc_texts] }

    def retrieve_vector(self, state: ArxivState):
        logger.info("ArxivRAG retrieve_vector start")
        query = state["query"]
        documents = state.get("arxiv_docs", []) + state.get("search_docs", [])

        embennding_documents = self.embed_model.embed_documents(documents)
        embennding_query = self.embed_model.embed_query(query)
        similarities = cosine_similarity([embennding_query], embennding_documents)[0]
        indices = np.argsort(similarities)[:self.max_row]
        logger.info(f"ArxivRAG retrieve_vector indices: {indices}")
        vector_docs = [(documents[i], similarities[i]) for i in indices]
        return { "vector_docs": vector_docs }

    def retrieve_graph(self, state: ArxivState):
        logger.info("ArxivRAG retrieve_graph start")
        query = state["query"]
        documents = state.get("arxiv_docs", []) + state.get("search_docs", [])
        community_graph = CommunityGraph(self.llm_model, self.embed_model, False)
        response = community_graph.invoke(query, documents)
        return { "graph_docs": response }

    def search_web(self, state: ArxivState):
        logger.info("ArxivRAG search_web start")
        query = state["query"]
        for tool in self.agent_tools:
            if "search_web" == tool.name:
                search_result = tool.func(query)
        logger.info(f"ArxivRAG search_web search_result len: {len(search_result)}")

        text_splitter = RecursiveCharacterTextSplitter(chunk_size = 768, chunk_overlap = 50)
        split_texts = text_splitter.split_text(search_result)
        logger.info(f"ArxivRAG search_web split_texts len: {len(split_texts)}")
        return { "search_docs": split_texts }

    def generate_answer(self, state: ArxivState):
        logger.info("ArxivRAG generate_answer start")
        query = state["query"]
        context = state.get("vector_docs", "") + state.get("graph_docs", "") + state.get("search_docs", "")
        logger.info(f"ArxivRAG generate_answer context len: {len(context)}")
        template = """
            请根据提供的上下文回答用户问题，务必按上下文事实回答，不得自行杜撰和扩展。
            
            上下文： {context}
            用户问题： {question}
            
            请务必用中文详尽的回答问题。
        """
        prompt = ChatPromptTemplate.from_template(template)
        chain = prompt | self.llm_model | StrOutputParser()
        response = chain.invoke({ "context": context, "question": query })
        logger.info(f"ArxivRAG generate_answer response len: {len(response)}")
        return { "answer": response }

    def build_graph(self):
        logger.info("ArxivRAG build_graph start")
        graph = StateGraph(ArxivState)
        graph.add_node("search_arxiv", self.search_arxiv)
        graph.add_node("retrieve_vector", self.retrieve_vector)
        graph.add_node("retrieve_graph", self.retrieve_graph)
        graph.add_node("search_web", self.search_web)
        graph.add_node("generate_answer", self.generate_answer)

        graph.add_edge(START, "search_arxiv")
        graph.add_edge("search_arxiv", "retrieve_vector")
        graph.add_edge("search_arxiv", "retrieve_graph")
        graph.add_edge(START, "search_web")
        graph.add_edge("search_web", "retrieve_vector")
        graph.add_edge("search_web", "retrieve_graph")
        graph.add_edge("retrieve_vector", "generate_answer")
        graph.add_edge("retrieve_graph", "generate_answer")
        graph.add_edge("generate_answer", END)

        workflow = graph.compile()

        save_path = "D:/Downloads/taixu/images/agentics"
        image_path = os.path.join(save_path, "RAG_Arxiv_Workflow.png")
        if not os.path.exists(save_path):
            os.makedirs(save_path)

        with open(image_path, 'wb') as file:
            file.write(workflow.get_graph().draw_mermaid_png())

        return workflow

    def invoke(self, query):
        logger.info(f"ArxivRAG invoke query: {query}")
        workflow = self.build_graph()
        response = workflow.invoke({"query": query})
        answer = response.get("answer", None)
        logger.info(f"ArxivRAG invoke answer len: {len(str(answer))}")
        return { "retrieve_docs": response.get("vector_docs", "") + response.get("graph_docs", ""), "chain_result": answer }
