from neo4j import GraphDatabase
import logging
import uuid
import json
# import openai  # Or any other LLM API client

# Set up logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


class DomainAwareGraphBuilder:
    def __init__(self, uri, user, password, database="neo4j", domain="simulation_science"):
        self.driver = GraphDatabase.driver(uri, auth=(user, password), database=database)
        # self.openai_client = openai.OpenAI()  # Initialize your LLM client
        self.domain = domain
        self.domain_schemas = self._initialize_domain_schemas()
        logger.info(f"Initialized Domain-Aware Graph Builder for {domain} domain")

    def _initialize_domain_schemas(self):
        """Initialize domain-specific schemas"""
        schemas = {
            "simulation_science": {
                "entity_types": [
                    "SOFTWARE", "FISH", "TASK", "PRECONFIGURATION",
                    "FILE_TYPE", "ENVIRONMENT", "WEBSITE", "TUTORIAL",
                    "FMU", "SIMULATION", "PARAMETER", "MODEL", "RESULT"
                ],
                "relationship_types": [
                    "SIMULATES", "PART_OF", "USED_FOR", "CONFIGURED_BY",
                    "INPUTS_TO", "OUTPUTS_FROM", "DEPENDS_ON", "TYPE_OF",
                    "RUNS_IN", "DOCUMENTS", "REFERS_TO"
                ],
                "domain_context": """
                This domain is about simulation science, particularly related to:
                - Fish growth simulation models
                - Software tools like Kopl for simulation
                - Simulation parameters and configurations
                - File formats used in simulations (XML, PDF)
                - Results and analysis from simulations
                """
            },
            "general": {
                "entity_types": [
                    "PERSON", "ORGANIZATION", "LOCATION", "DATE", "EVENT",
                    "PRODUCT", "TECHNOLOGY", "CONCEPT", "DOCUMENT"
                ],
                "relationship_types": [
                    "RELATED_TO", "PART_OF", "CREATED_BY", "LOCATED_IN",
                    "WORKS_FOR", "HAPPENED_AT", "HAPPENED_ON", "USES"
                ],
                "domain_context": """
                General domain covering common entity types and relationships.
                """
            }
        }

        # Default to general if specified domain not found
        return schemas.get(self.domain, schemas["general"])

    def close(self):
        self.driver.close()

    def get_document_context(self, doc_id):
        """Get broader document context for a document"""
        with self.driver.session() as session:
            result = session.run("""
                MATCH (d:Document {id: $doc_id})
                RETURN d.title as title, d.source as source
            """, doc_id=doc_id)
            record = result.single()
            if record:
                return {"title": record["title"], "source": record["source"]}
            return {"title": "Unknown", "source": "Unknown"}

    def get_chunks(self):
        """Retrieve document chunks from Neo4j with context"""
        chunks = []
        with self.driver.session() as session:
            result = session.run("""
                MATCH (d:Document)-[:CONTAINS]->(c:Chunk)
                RETURN d.id AS doc_id, c.id AS chunk_id, c.text AS text
            """)
            for record in result:
                doc_id = record["doc_id"]
                context = self.get_document_context(doc_id)
                chunks.append({
                    "doc_id": doc_id,
                    "chunk_id": record["chunk_id"],
                    "text": record["text"],
                    "context": context
                })
        logger.info(f"Retrieved {len(chunks)} chunks from Neo4j")
        return chunks

    def extract_knowledge(self, text, context):
        """Extract domain-specific knowledge using an LLM"""
        # Format the domain schema as a string for the prompt
        entity_types_str = ", ".join(self.domain_schemas["entity_types"])
        relationship_types_str = ", ".join(self.domain_schemas["relationship_types"])
        domain_context = self.domain_schemas["domain_context"]

        # Create the prompt with domain context
        prompt = f"""
        # Domain Context
        {domain_context}

        # Document Context
        Title: {context['title']}
        Source: {context['source']}

        # Task
        Analyze the following text and extract domain-specific knowledge:

        Text: "{text}"

        Extract the following:
        1. Entities: Identify important entities in the text.
           Valid entity types: {entity_types_str}

        2. Relationships: Identify relationships between the entities.
           Valid relationship types: {relationship_types_str}

        3. Concepts: Identify higher-level concepts or themes.

        # Output Format
        Provide your analysis in JSON format:

        {{
          "entities": [
            {{
              "id": "unique_id",
              "text": "entity_text",
              "type": "entity_type",
              "properties": {{
                "key1": "value1"
              }}
            }}
          ],
          "relationships": [
            {{
              "source": "source_entity_id",
              "target": "target_entity_id", 
              "type": "relationship_type",
              "properties": {{
                "key1": "value1"
              }}
            }}
          ],
          "concepts": [
            {{
              "name": "concept_name",
              "description": "brief description",
              "related_entities": ["entity_id1", "entity_id2"]
            }}
          ]
        }}

        Only return the JSON object, no additional text.
        """

        '''try:
            # Get LLM response
            response = self.openai_client.chat.completions.create(
                model="gpt-4",  # Use your preferred model
                messages=[{"role": "user", "content": prompt}],
                response_format={"type": "json_object"}
            )

            # Parse JSON response
            content = response.choices[0].message.content
            result = json.loads(content)

            return result
        except Exception as e:
            logger.error(f"Error extracting knowledge with LLM: {str(e)}")
            return {"entities": [], "relationships": [], "concepts": []}'''

    def create_knowledge_graph(self, doc_id, chunk_id, knowledge):
        """Create knowledge graph from extracted information"""
        with self.driver.session() as session:
            # Create entities
            for entity in knowledge.get("entities", []):
                # Convert entity type to proper Neo4j label
                neo4j_label = entity["type"]
                properties = entity.get("properties", {})
                # Add the entity text as a name property if not already present
                if "name" not in properties:
                    properties["name"] = entity["text"]

                # Create the entity and link to the chunk
                session.run(f"""
                    MATCH (d:Document {{id: $doc_id}})
                    MATCH (c:Chunk {{id: $chunk_id}})
                    MERGE (e:{neo4j_label} {{id: $entity_id}})
                    SET e += $properties
                    MERGE (c)-[:MENTIONS]->(e)
                    MERGE (d)-[:CONTAINS_ENTITY]->(e)
                    """,
                            doc_id=doc_id,
                            chunk_id=chunk_id,
                            entity_id=entity["id"],
                            properties=properties
                            )

            # Create relationships between entities
            for rel in knowledge.get("relationships", []):
                rel_properties = rel.get("properties", {})
                session.run(f"""
                    MATCH (source) WHERE source.id = $source_id
                    MATCH (target) WHERE target.id = $target_id
                    MERGE (source)-[r:{rel["type"]}]->(target)
                    SET r += $properties
                    """,
                            source_id=rel["source"],
                            target_id=rel["target"],
                            properties=rel_properties
                            )

            # Create concept nodes
            for concept in knowledge.get("concepts", []):
                concept_id = str(uuid.uuid4())
                session.run("""
                    MATCH (c:Chunk {id: $chunk_id})
                    MERGE (con:Concept {id: $concept_id, name: $concept_name})
                    SET con.description = $description
                    MERGE (c)-[:DISCUSSES]->(con)
                    """,
                            chunk_id=chunk_id,
                            concept_id=concept_id,
                            concept_name=concept["name"],
                            description=concept["description"]
                            )

                # Link concepts to related entities
                for entity_id in concept.get("related_entities", []):
                    session.run("""
                        MATCH (con:Concept {id: $concept_id})
                        MATCH (e) WHERE e.id = $entity_id
                        MERGE (con)-[:ENCOMPASSES]->(e)
                        """,
                                concept_id=concept_id,
                                entity_id=entity_id
                                )

    def build_knowledge_graph(self):
        """Main function to build the domain-aware knowledge graph"""
        chunks = self.get_chunks()

        for chunk in chunks:
            doc_id = chunk["doc_id"]
            chunk_id = chunk["chunk_id"]
            text = chunk["text"]
            context = chunk["context"]

            # Extract domain-specific knowledge
            knowledge = self.extract_knowledge(text, context)

            # Create knowledge graph
            self.create_knowledge_graph(doc_id, chunk_id, knowledge)

            logger.info(f"Processed chunk {chunk_id} for document {doc_id}")

    def generate_domain_insights(self):
        """Generate domain-specific insights from the knowledge graph"""
        with self.driver.session() as session:
            # Domain-specific queries based on the chosen domain
            if self.domain == "simulation_science":
                queries = {
                    "simulation_tools": """
                        MATCH (s:SOFTWARE)-[r]->(t)
                        WHERE r.type <> 'CONTAINS' AND r.type <> 'MENTIONS'
                        RETURN s.name as Software, type(r) as Relationship, t.name as Target,
                               labels(t) as TargetType
                        LIMIT 20
                    """,

                    "fish_simulation_models": """
                        MATCH path = (f:FISH)-[*1..3]->(m)
                        WHERE m:MODEL OR m:SIMULATION
                        RETURN [node.name for node in nodes(path)] as SimulationPath,
                               [type(r) for r in relationships(path)] as RelationshipTypes
                        LIMIT 15
                    """,

                    "simulation_configurations": """
                        MATCH (p:PRECONFIGURATION)-[*1..2]-(related)
                        RETURN p.name as Configuration, 
                               collect(distinct related.name) as RelatedEntities,
                               collect(distinct labels(related)) as EntityTypes
                        LIMIT 10
                    """
                }
            else:
                # General domain insights
                queries = {
                    "central_entities": """
                        MATCH (e)-[r]-()
                        WHERE NOT e:Document AND NOT e:Chunk AND NOT e:Concept
                        RETURN e.name as Entity, labels(e) as Types, count(r) as ConnectionCount
                        ORDER BY ConnectionCount DESC
                        LIMIT 10
                    """,

                    "key_concepts": """
                        MATCH (c:Concept)-[:ENCOMPASSES]->(e)
                        RETURN c.name as Concept, c.description as Description, 
                               collect(distinct e.name) as RelatedEntities, 
                               count(e) as EntityCount
                        ORDER BY EntityCount DESC
                        LIMIT 10
                    """,

                    "relationship_summary": """
                        MATCH ()-[r]->()
                        WHERE type(r) <> 'CONTAINS' AND type(r) <> 'MENTIONS' 
                              AND type(r) <> 'CONTAINS_ENTITY' AND type(r) <> 'DISCUSSES'
                              AND type(r) <> 'ENCOMPASSES'
                        RETURN type(r) as RelationshipType, count(r) as Count
                        ORDER BY Count DESC
                    """
                }

            results = {}
            for name, query in queries.items():
                try:
                    results[name] = list(session.run(query))
                except Exception as e:
                    logger.error(f"Error running query {name}: {str(e)}")
                    results[name] = []

            return results

    def get_natural_language_insights(self, graph_insights):
        """Generate natural language insights using LLM"""
        # Format the insights as text
        insights_text = ""
        for name, results in graph_insights.items():
            insights_text += f"\n--- {name} ---\n"
            for record in results:
                insights_text += f"{record}\n"

        prompt = f"""
        # Task
        Analyze the following insights from a knowledge graph about {self.domain} and provide a natural language summary
        of the key findings, patterns, and interesting observations.

        # Graph Insights
        {insights_text}

        # Output Format
        Provide a concise summary (3-5 paragraphs) highlighting:
        1. Key entities and their relationships
        2. Important patterns or clusters
        3. Surprising or unexpected connections
        4. Potential applications or uses of this knowledge

        Make your summary insightful and focused on domain-specific knowledge.
        """

        '''try:
            response = self.openai_client.chat.completions.create(
                model="gpt-4",
                messages=[{"role": "user", "content": prompt}]
            )
            return response.choices[0].message.content
        except Exception as e:
            logger.error(f"Error generating natural language insights: {str(e)}")
            return "Error generating insights."'''


# Example usage
def main():
    uri = "neo4j+s://ff2d3c4d.databases.neo4j.io"
    user = "neo4j"
    password = "nurE98tTctgv2c3RYzduho7xTdog1o3xX7uQ9ZKj1qw"

    # Initialize the domain-aware knowledge graph builder for simulation science
    kg_builder = DomainAwareGraphBuilder(uri, user, password, domain="simulation_science")

    try:
        # Build the knowledge graph
        kg_builder.build_knowledge_graph()

        # Generate graph insights
        graph_insights = kg_builder.generate_domain_insights()

        # Generate natural language summary
        nl_insights = kg_builder.get_natural_language_insights(graph_insights)

        # Print natural language insights
        print("\n=== Natural Language Insights ===\n")
        print(nl_insights)

    finally:
        kg_builder.close()


if __name__ == "__main__":
    main()
