import threading
import numpy as np
from src.common.config import config
from langchain_neo4j import Neo4jGraph
from src.common.logger import getLogger
from langchain_core.documents import Document
from concurrent.futures import ThreadPoolExecutor, as_completed
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_experimental.graph_transformers import LLMGraphTransformer

logger = getLogger()

class GraphStore:

    def __init__(self):
        config_dict = config.parse_config_key(["neo4j"])
        logger.info(f"GraphStore __init__ config_dict: {config_dict}")
        self.url = config_dict.get("url", "")
        self.username = config_dict.get("username", "")
        self.password = config_dict.get("password", "")

    def new_graph_store(self):
        graph = Neo4jGraph(
            url = self.url,
            username = self.username,
            password = self.password
        )
        logger.info(f"GraphStore new_graph_store graph: {graph}")
        return graph

    def add_document_graph(self, llm_model, prefix_label, content):
        logger.info(f"GraphStore add_document_graph prefix_label: {prefix_label}")

        text_splitter = RecursiveCharacterTextSplitter(chunk_size = 768, chunk_overlap = 50)
        split_texts = text_splitter.split_text(content)
        documents = [Document(page_content = text) for text in split_texts]
        logger.info(f"GraphStore add_document_graph documents len: {len(documents)}")

        batch_size = 5
        graph = self.new_graph_store()
        llm_graph_transformer = LLMGraphTransformer(llm = llm_model, ignore_tool_usage = True)
        array_documents = np.array(documents)
        group_documents = np.array_split(array_documents, batch_size)
        logger.info(f"GraphStore add_document_graph group_documents len: {len(group_documents)}")

        with ThreadPoolExecutor(max_workers=batch_size) as executor:
            future_tasks = [
                executor.submit(self.add_document_task, llm_graph_transformer, sub_docs, prefix_label, graph)
                for sub_docs in group_documents
            ]
        for index, future_task in enumerate(as_completed(future_tasks)):
            result = future_task.result()
            logger.info(f"GraphStore add_document_graph future_task: {index} finished, result: {result}")

    def add_document_task(self, llm_graph_transformer, documents, prefix_label, graph):
        current_thread_name = threading.current_thread().name
        logger.info(f"GraphStore add_document_task thread: {current_thread_name}")
        graph_documents = llm_graph_transformer.convert_to_graph_documents(documents)
        logger.info(f"GraphStore add_document_task graph_documents len: {len(graph_documents)}")

        prefix = prefix_label + "_"
        for graph_document in graph_documents:
            for node in graph_document.nodes:
                node.type = prefix + node.type
        logger.info(f"GraphStore add_document_task thread: {current_thread_name}, prefix label finished")

        graph.add_graph_documents(graph_documents = graph_documents, baseEntityLabel = True, include_source = True)
        logger.info(f"GraphStore add_document_task thread: {current_thread_name}, neo4j add_graph_documents finished")
        return current_thread_name
