import os
import threading
import numpy as np
from arxiv import arxiv
from typing import TypedDict, List
from pydantic import BaseModel, Field
from langgraph.graph import StateGraph
from src.common.logger import getLogger
from langgraph.constants import START, END
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from concurrent.futures import ThreadPoolExecutor, as_completed
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_experimental.graph_transformers import LLMGraphTransformer

logger = getLogger()

class ArxivState(TypedDict):
    query: str
    documents: List
    vector_docs: List
    graph_docs: List
    search_docs: List
    answer: str

class GraphEntity(BaseModel):
    names: List[str] = Field(description = "文本中的实体")

class ArxivStoreRAG:

    def __init__(self, llm_model, embed_model, agent_tools, vector_store, graph_store, prefix_library, max_row):
        self.llm_model = llm_model
        self.embed_model = embed_model
        self.agent_tools = agent_tools
        self.vector_store = vector_store
        self.graph_store = graph_store
        self.prefix_library = prefix_library
        self.max_row = max_row

    def search_arxiv(self, state: ArxivState):
        logger.info("ArxivStoreRAG search_arxiv start")
        query = state["query"]
        arxiv_client = arxiv.Client()
        arxiv_result = arxiv.Search(query = query, max_results = self.max_row, sort_by = arxiv.SortCriterion.Relevance)

        documents = []
        for result in arxiv_client.results(arxiv_result):
            documents.append({ "title": result.title, "summary": result.summary, "url": result.entry_id })
        logger.info(f"ArxivStoreRAG search_arxiv documents len: {len(documents)}")
        logger.info(f"ArxivStoreRAG search_arxiv documents: \n{documents}")

        text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(chunk_size = 768, chunk_overlap = 50)
        doc_texts = text_splitter.create_documents([doc["summary"] for doc in documents], metadatas = documents)
        logger.info(f"ArxivStoreRAG search_arxiv doc_texts len: {len(doc_texts)}")
        return { "documents": doc_texts }

    def retrieve_vector(self, state: ArxivState):
        logger.info("ArxivStoreRAG retrieve_vector start")
        query = state["query"]
        documents = state["documents"]

        collection_name = self.prefix_library + "arxiv_vector"
        vector_store = self.vector_store.new_vector_store(self.embed_model, collection_name)
        vector_store.add_documents(documents)

        retriever = vector_store.as_retriever(search_kwargs = { "k": self.max_row })
        vector_docs = retriever.invoke(query)
        return { "vector_docs": "\n".join([doc.page_content for doc in vector_docs]) }

    def retrieve_graph(self, state: ArxivState):
        logger.info("ArxivStoreRAG retrieve_graph start")
        graph_transformer = LLMGraphTransformer(llm = self.llm_model, ignore_tool_usage = True)

        batch_size = 5
        documents = state["documents"]
        array_documents = np.array(documents)
        group_documents = np.array_split(array_documents, batch_size)
        logger.info(f"ArxivStoreRAG retrieve_graph group_documents len: {len(group_documents)}")

        graph = self.graph_store
        library_label = self.prefix_library + "arxiv_graph"
        with ThreadPoolExecutor(max_workers = batch_size) as executor:
            future_tasks = [
                executor.submit(self.add_document_task, graph_transformer, sub_docs, library_label, graph)
                for sub_docs in group_documents
            ]
        for index, future_task in enumerate(as_completed(future_tasks)):
            result = future_task.result()
            logger.info(f"ArxivStoreRAG retrieve_graph future_task: {index} finished, result: {result}")

        query = state["query"]
        graph_entities = self.extract_graph_entity(query)
        graph_docs = self.select_graph_document(graph, graph_entities, library_label)
        return { "graph_docs": graph_docs }

    def add_document_task(self, graph_transformer, documents, prefix_label, graph):
        current_thread_name = threading.current_thread().name
        logger.info(f"ArxivStoreRAG add_document_task thread: {current_thread_name}")
        graph_documents = graph_transformer.convert_to_graph_documents(documents)
        logger.info(f"ArxivStoreRAG add_document_task graph_documents len: {len(graph_documents)}")

        prefix = prefix_label + "_"
        for graph_document in graph_documents:
            for node in graph_document.nodes:
                node.type = prefix + node.type
        logger.info(f"ArxivStoreRAG add_document_task thread: {current_thread_name}, prefix label finished")

        graph.add_graph_documents(graph_documents = graph_documents, baseEntityLabel = True, include_source = True)
        logger.info(f"ArxivStoreRAG add_document_task thread: {current_thread_name}, neo4j add_graph_documents finished")
        return current_thread_name

    def extract_graph_entity(self, query):
        logger.info("ArxivStoreRAG extract_graph_entity start")
        template = """
            你是一个专业的信息抽取系统。请从以下文本中精确识别并提取所有有意义的命名实体。

            要求：
            1. 只提取明确提及的实体，不要推测或生成不存在的内容。
            2. 实体类型包括但不限于：人物（Person）、组织（Organization）、地点（Location）、日期/时间（Date）、技术术语（Technology）、产品（Product）、事件（Event）。
            3. 对每个实体，提供实体在文本中的原始形式（保持大小写和标点）
            4. 不要包含任何解释、前缀、后缀或 Markdown。

            文本： {context}
        """
        prompt = ChatPromptTemplate.from_template(template)
        extract_chain = prompt | self.llm_model.with_structured_output(GraphEntity)
        extract_result = extract_chain.invoke({"context": query})
        logger.info(f"ArxivStoreRAG extract_graph_entity extract_result: {extract_result}")
        return extract_result.names

    def select_graph_document(self, graph, entities, library_label):
        logger.info("ArxivStoreRAG select_graph_document start")
        graph_docs = []
        for entity in entities:
            response = graph.query(
                f"""
                    MATCH (n)
                    WHERE any(lbl IN labels(n) WHERE lbl STARTS WITH "{library_label}")
                    OPTIONAL MATCH (n:`{entity}`)-[r]->(m)
                    RETURN n.id AS source_id, type(r) AS relationship, m.id AS target_id
                    LIMIT 50
                """
            )
            graph_docs.extend([f"{ele['source_id']} - {ele['relationship']} - {ele['target_id']}" for ele in response])
        logger.info(f"ArxivStoreRAG select_graph_document graph_docs len: {len(graph_docs)}")
        return "\n".join(graph_docs)

    def search_web(self, state: ArxivState):
        logger.info("ArxivStoreRAG search_web start")
        query = state["query"]
        for tool in self.agent_tools:
            if "search_web" == tool.name:
                search_results = tool.func(query)
        return { "search_docs": "\n".join(search_results) }

    def generate_answer(self, state: ArxivState):
        logger.info("ArxivStoreRAG generate_answer start")
        query = state["query"]
        context = state.get("vector_docs", "") + state.get("graph_docs", "") + state.get("search_docs", "")
        logger.info(f"ArxivStoreRAG generate_answer context len: {len(context)}")
        template = """
            请根据提供的上下文回答用户问题，务必按上下文事实回答，不得自行杜撰和扩展。
            
            上下文： {context}
            用户问题： {question}
            
            请务必用中文详尽的回答问题。
        """
        prompt = ChatPromptTemplate.from_template(template)
        chain = prompt | self.llm_model | StrOutputParser()
        response = chain.invoke({ "context": context, "question": query })
        logger.info(f"ArxivStoreRAG generate_answer response len: {len(response)}")
        return { "answer": response }

    def build_graph(self):
        logger.info("ArxivStoreRAG build_graph start")
        graph = StateGraph(ArxivState)
        graph.add_node("search_arxiv", self.search_arxiv)
        graph.add_node("retrieve_vector", self.retrieve_vector)
        graph.add_node("retrieve_graph", self.retrieve_graph)
        graph.add_node("search_web", self.search_web)
        graph.add_node("generate_answer", self.generate_answer)

        graph.add_edge(START, "search_arxiv")
        graph.add_edge("search_arxiv", "retrieve_vector")
        graph.add_edge("search_arxiv", "retrieve_graph")
        graph.add_edge(START, "search_web")
        graph.add_edge("retrieve_vector", "generate_answer")
        graph.add_edge("retrieve_graph", "generate_answer")
        graph.add_edge("search_web", "generate_answer")
        graph.add_edge("generate_answer", END)

        workflow = graph.compile()

        save_path = "D:/Downloads/taixu/images/agentics"
        image_path = os.path.join(save_path, "RAG_Arxiv_Workflow.png")
        if not os.path.exists(save_path):
            os.makedirs(save_path)

        with open(image_path, 'wb') as file:
            file.write(workflow.get_graph().draw_mermaid_png())

        return workflow

    def invoke(self, query):
        logger.info(f"ArxivStoreRAG invoke query: {query}")
        workflow = self.build_graph()
        response = workflow.invoke({"query": query})
        answer = response.get("answer", None)
        logger.info(f"ArxivStoreRAG invoke answer len: {len(str(answer))}")
        return { "retrieve_docs": response.get("vector_docs", "") + response.get("graph_docs", ""), "chain_result": answer }
