# -*- coding: utf-8 -*-
# @Author: Cursor
# @Date: 2025-01-08
# @Last Modified by: Tim Liu
# @Last Modified time: 2025-01-14
# @Description: Knowledge Graph Builder for managing multiple graph engines and graph upsertion

import json
from typing import Dict, List, Optional
from sqlalchemy.ext.asyncio import AsyncSession

from langchain_core.documents import Document
from langchain_core.vectorstores.base import VectorStore
from langchain_core.language_models import BaseLanguageModel
from langchain_community.graphs.graph_document import GraphDocument

from crewplus.apps.rag.models.ontology import OntologyDB
from crewplus.apps.rag.models.document import DocumentDB

from crewplus.apps.rag.schemas.ingest_request import IngestRequest

from crewplus.services.graph_engines.graph_engine import GraphEngine
from crewplus.services.chat_openai_service import ChatOpenAIService

from crewplus.custom.custom_function_registry import custom_function_registry
from .schema_graph_transformer import SchemaGraphTransformer
from .schema_manager import SchemaManager

class KnowledgeGraphBuilder:
    def __init__(self, db: AsyncSession):
        self.db = db  # Store the database session
        self.graph_instances: Dict[int, GraphEngine] = {}  # Dictionary to hold graph instances based on user_id
        self.ontologies: Dict[int, OntologyDB] = {}  # Dictionary to hold Ontology instances
        self.vector_stores: Dict[int, VectorStore] = {}  # Dictionary to hold vector stores based on graph instance
        self.llm: Optional[BaseLanguageModel] = None  # Optional LLM
        self.embedder = None  # Optional embedder        
        self.build_policy = "auto-schema"  # Default build policy

    def register_graph_instance(self, user_id: int, graph_instance: GraphEngine):
        """Register a graph instance for a specific user."""
        self.graph_instances[user_id] = graph_instance

    def register_ontology(self, ontology_id: int, ontology: OntologyDB):
        """Register an ontology instance."""
        self.ontologies[ontology_id] = ontology

    def bind_vstore(self, user_id: int, vector_store: VectorStore):
        """Bind a vector store to a graph instance."""
        if user_id in self.graph_instances:
            self.vector_stores[user_id] = vector_store
        else:
            raise ValueError("Graph instance not found for user_id")
        
    def bind_llm(self, llm: BaseLanguageModel):
        """Bind a language model to the graph builder."""
        self.llm = llm

    def bind_embedder(self, embedder):
        """Bind an embedder to the graph builder."""
        self.embedder = embedder
                
    def set_build_policy(self, policy: str):
        """Set the schema graph build policy."""
        if policy in ["auto-schema", "schema-based"]:
            self.build_policy = policy
        else:
            raise ValueError("Invalid build policy")

    async def upsert_graph(self, user_id: int, message: IngestRequest, document: DocumentDB, ontology_name: str = None, schema_content: str = None) -> List[GraphDocument]:
        """Upsert a graph based on the provided input parameters."""
        if user_id not in self.graph_instances:
            raise ValueError("Graph instance not found for user_id")

        graph_instance = self.graph_instances[user_id]
        vector_store = self.vector_stores.get(user_id)

        # Create SchemaManager instance
        schema_manager = SchemaManager(db=self.db, ontology_name=ontology_name)

        # Use the bound LLM if available, otherwise use ChatOpenAIService
        llm = self.llm or ChatOpenAIService(callbacks=[]).get_azure_llm_deployment(deployment_id='GPT4o', temperature=0.0)

        # Determine the schema content if not provided
        if not schema_content:
            if self.build_policy == "schema-based":
                raise ValueError("Schema content is mandatory for schema-based build policy")
            else:
                if document.content:
                    schema_content = await schema_manager.classify_w_llm(document.content, llm)
                else:
                    schema_content = None
                        
        # Remove existing nodes related to the node_id or file_url
        graph_instance.remove_existing_nodes(file_url=message.url)

        # Upsert the graph
        graph_documents = await self._upsert_graph_instance(graph_instance, message.url, schema_content, vector_store)
        
        return graph_documents

    def _build_question(self, allowed_nodes):
        """
        Builds a question from the allowed_nodes list.

        Args:
            allowed_nodes (list): List of allowed node names.

        Returns:
            str: The constructed question.
        """
        nodes_str = ", ".join(allowed_nodes)
        question = f"What are {nodes_str}?"
        return question

    async def _upsert_graph_instance(self, graph_instance: GraphEngine, file_url: str, schema_content: Optional[str], vector_store: Optional[VectorStore]) -> List[GraphDocument]:
        """Upsert the graph instance with the provided file URL and schema content."""
        # Return empty list if schema content is None or empty
        if not schema_content:
            # TODO: build simple graph nodes with some default schema 
            return []
                
        # Convert schema content to graph meta
        schema_json = json.loads(schema_content)
        allowed_nodes, allowed_relationships, node_properties, _ = SchemaGraphTransformer.convert_schema_to_graphmeta(schema_json)
        
        # Build a question using allowed nodes
        query = self._build_question(allowed_nodes)
        
        print(f"File url: {file_url}")
        
        # Perform similarity search with vector store Zilliz
        expr = f"source_url == '{file_url}'"
        docs_with_score = vector_store.similarity_search_with_score(query, k=5, expr=expr)
        
        #docs_with_score = vector_store.similarity_search_with_score(query, k=5, filter={"source_url": {"$eq": file_url}})
        # Below is Neo4jVector workable filter syntax
        # docs_with_score = vector_store.similarity_search(query, k=5, filter={"source": {"$eq": file_url}})

        # Extract page content from the documents
        #concatenated_content = " ".join([doc.page_content for doc, score in docs_with_score])
        concatenated_content = str(docs_with_score)
                        
        print(f"Schema content: {schema_content}")
        print(f"Docs extracted from vstore: {concatenated_content}")
        
        # Define a threshold for the minimum length of concatenated_content
        content_length_threshold = 10

        # Check if the length of concatenated_content is greater than or equal to the threshold
        if len(concatenated_content) < content_length_threshold:
            print(f"Concatenated content is too short (length: {len(concatenated_content)}). Skipping graph document conversion.")
            return []
        
        # Use the bound LLM if available, otherwise use ChatOpenAIService
        llm = self.llm or ChatOpenAIService(callbacks=[]).get_azure_llm_deployment(deployment_id='GPT4o', temperature=0.0)

        # Use SchemaGraphTransformer instead of LLMGraphTransformer
        llm_transformer_props = SchemaGraphTransformer(
            llm=llm,
            allowed_nodes=allowed_nodes,
            allowed_relationships=allowed_relationships,
            node_properties=node_properties,
        )
        
        # Create a single Document object with the concatenated content
        document = Document(page_content=concatenated_content)
        
        graph_documents = llm_transformer_props.convert_to_graph_documents([document])
        
        # Check if graph_documents is not empty or None before adding to graph_instance
        if not graph_documents:
            return []
        
        # Convert graph_documents to string for debugging
        graph_documents_str = "\n".join([str(doc) for doc in graph_documents])
        print(f"Graph documents: {graph_documents_str}")
       
        # Call custom function based on action_id
        # action_id = 'after_add_graph_document'  # or 'after_add_graph_document' based on your requirement
        # # #schema_name = classified_sname if classified_sname else 'default'
        # customized_documents = custom_function_registry.call_custom_function(action_id, graph_document=graph_documents)
        
        # if isinstance(customized_documents, List[GraphDocument]):
        #      graph_documents = customized_documents
                
        graph_instance.add_graph_documents(graph_documents)
        ## TODO: try-catch for add_graph_documents, how can we know if the nodes are created or partially created         

        return graph_documents
    
    def _upsert_vector_index(self, graph_instance: GraphEngine, index_name: str, node_type: str, vprop: str, dimensions: int = 1536):
        """Create a vector index for graph nodes."""
        graph_instance.create_vector_index(
            index_name, 
            label=node_type, 
            embedding_property=vprop, 
            dimensions=dimensions, 
            similarity_fn="cosine", 
            fail_if_exists=False 
        )

    def flush_vector_index(self, user_id: int, schema_content: Optional[str]):
        """Flush vector indexes for a user's graph instance based on schema content."""
        if user_id not in self.graph_instances:
            raise ValueError("Graph instance not found for user_id")

        graph_instance = self.graph_instances[user_id]

        if not schema_content:
            raise ValueError("Schema content is required to flush vector indexes")

        schema_json = json.loads(schema_content)
        _, _, _, indexes = SchemaGraphTransformer.convert_schema_to_graphmeta(schema_json)

        for node_type, index_info in indexes.items():
            for index_name, index_props in index_info.items():
                self._upsert_vector_index(graph_instance, index_name, node_type, index_props["vprop"])
                        
    def upsert_embeddings_for_nodes(self, user_id: int, schema_content: Optional[str]):
        """Upsert embeddings for graph nodes."""
        if user_id not in self.graph_instances:
            raise ValueError("Graph instance not found for user_id")

        graph_instance = self.graph_instances[user_id]

        if not schema_content:
            return
        
        schema_json = json.loads(schema_content)
        _, _, _, indexes = SchemaGraphTransformer.convert_schema_to_graphmeta(schema_json)
        
        for node_type, index_info in indexes.items():
            for index_name, index_props in index_info.items():
                vprop = index_props["vprop"]
                nodes = graph_instance.get_nodes_by_type(node_type)
                
                for node in nodes:
                    properties = {k: v for k, v in node.items() if k not in ["source_url", "page", "node_embedding"]}
                    if len(properties) == 0:
                        continue
                    
                    text_for_embedding = ", ".join(properties.values())
                    vector = self.embedder.embed_query(text_for_embedding)
                    
                    print(f"Graph node: {str(node)} ")
                    
                    graph_instance.upsert_vector(
                        node_id=node.element_id,
                        embedding_property=vprop,
                        vector=vector,
                    )
                
                # Upsert vector index after all nodes' vectors are upserted
                self._upsert_vector_index(graph_instance, index_name, node_type, vprop)