from neo4j import GraphDatabase
import spacy
import numpy as np
from spacy.training.example import Example
from spacy.training import offsets_to_biluo_tags
from spacy.util import minibatch, compounding
import re
from transformers import pipeline

class EnhancedKnowledgeGraphBuilder:
    def __init__(self, uri, user, password, database_name="neo4j"):
        self.driver = GraphDatabase.driver(uri, auth=(user, password), database=database_name)
        self.nlp = None
        self.relation_extractor = None
        self.initialize_nlp_components()
        
    def initialize_nlp_components(self):
        # Load the existing fine-tuned model or base model
        try:
            self.nlp = spacy.load("fine_tuned_model")
            print("Loaded fine-tuned model")
        except:
            self.nlp = spacy.load("en_core_web_lg")
            print("Loaded base model")
        
        # Initialize the transformer-based relation extraction pipeline
        try:
            self.relation_extractor = pipeline(
                "text-classification", 
                #model="Sefidgar/PURE-relation-extraction",
                model="Babelscape/rebel-large",  # Alternative model for relation extraction
                device=0 if spacy.prefer_gpu() else -1
            )
            print("Initialized relation extraction model")
        except Exception as e:
            print(f"Could not load relation extraction model: {e}")
            self.relation_extractor = None
    
    def close(self):
        self.driver.close()
    
    def get_chunks_and_docs(self):
        """Retrieve all chunks and their associated documents from Neo4j"""
        chunks_data = []
        with self.driver.session() as session:
            result = session.run("""
                MATCH (d:Document)-[:CONTAINS]->(c:Chunk)
                RETURN d.id AS doc_id, c.id AS chunk_id, c.text AS text
            """)
            for record in result:
                chunks_data.append({
                    "doc_id": record["doc_id"],
                    "chunk_id": record["chunk_id"],
                    "text": record["text"]
                })
        return chunks_data
    
    def extract_entities(self, text):
        """Extract entities from text using the NLP model"""
        doc = self.nlp(text)
        entities = []
        for ent in doc.ents:
            entities.append({
                "text": ent.text,
                "label": ent.label_,
                "start": ent.start_char,
                "end": ent.end_char
            })
        return entities
    
    def extract_noun_phrases(self, text):
        """Extract important noun phrases that might be missed by NER"""
        doc = self.nlp(text)
        noun_phrases = []
        for chunk in doc.noun_chunks:
            # Filter out common pronouns and very short chunks
            if len(chunk.text.strip()) > 3 and not chunk.text.lower() in ["i", "you", "he", "she", "it", "we", "they", "this", "that"]:
                noun_phrases.append({
                    "text": chunk.text,
                    "start": chunk.start_char,
                    "end": chunk.end_char
                })
        return noun_phrases
    
    def extract_relationships(self, text, entities):
        """Extract relationships between entities"""
        if self.relation_extractor is None:
            return []
        
        relationships = []
        # For each pair of entities, check if there's a relationship
        for i, entity1 in enumerate(entities):
            for j, entity2 in enumerate(entities):
                if i != j:  # Don't check relationship with itself
                    # Extract the sentence containing both entities
                    # This is a simplified approach, ideally you would use sentence boundaries
                    sentence = self.extract_sentence_with_entities(text, entity1, entity2)
                    if sentence:
                        # Mark entities in the text for the relation extractor
                        marked_text = self.mark_entities_in_text(sentence, entity1, entity2)
                        # Extract relationship
                        try:
                            rel_result = self.relation_extractor(marked_text)
                            if rel_result and rel_result[0]['score'] > 0.7:  # Confidence threshold
                                relationships.append({
                                    "source": entity1["text"],
                                    "source_type": entity1["label"],
                                    "target": entity2["text"],
                                    "relation_type": rel_result[0]['label'],
                                    "confidence": rel_result[0]['score']
                                })
                        except Exception as e:
                            print(f"Error extracting relationship: {e}")
        
        return relationships
    
    def extract_sentence_with_entities(self, text, entity1, entity2):
        """Extract a sentence containing both entities"""
        # Simple sentence extraction using regex
        # A more robust approach would use spaCy's sentence segmentation
        sentences = re.split(r'(?<=[.!?])\s+', text)
        for sentence in sentences:
            if entity1["text"] in sentence and entity2["text"] in sentence:
                return sentence
        return None
    
    def mark_entities_in_text(self, text, entity1, entity2):
        """Mark entities in text for relation extraction"""
        # Replace entities with special markers for the relation extractor
        # This is a simplified approach and might need adjustments based on the model used
        marked_text = text.replace(entity1["text"], f"[E1]{entity1['text']}[/E1]", 1)
        marked_text = marked_text.replace(entity2["text"], f"[E2]{entity2['text']}[/E2]", 1)
        return marked_text
    
    def find_or_create_entity_node(self, session, entity, chunk_id):
        """Find or create an entity node in Neo4j"""
        # Clean the entity text to be used as an identifier
        clean_text = re.sub(r'[^a-zA-Z0-9]', '_', entity["text"]).lower()
        entity_id = f"{clean_text}_{entity['label']}"
        
        # Create the entity if it doesn't exist and link it to the chunk
        result = session.run("""
            MATCH (c:Chunk {id: $chunk_id})
            MERGE (e:Entity {id: $entity_id})
            ON CREATE SET e.text = $text, e.type = $label
            MERGE (c)-[:CONTAINS_ENTITY]->(e)
            RETURN e.id as id
        """, chunk_id=chunk_id, entity_id=entity_id, text=entity["text"], label=entity["label"])
        
        return result.single()["id"]
    
    def create_relationship_between_entities(self, session, source_id, target_id, rel_type, properties=None):
        """Create a relationship between two entities in Neo4j"""
        if properties is None:
            properties = {}
            
        # Create the relationship with the given type and properties
        session.run(f"""
            MATCH (source:Entity {{id: $source_id}}), (target:Entity {{id: $target_id}})
            MERGE (source)-[r:{rel_type}]->(target)
            SET r += $properties
        """, source_id=source_id, target_id=target_id, properties=properties)
    
    def process_and_enhance_graph(self):
        """Process all chunks and enhance the knowledge graph"""
        chunks_data = self.get_chunks_and_docs()
        print(f"Processing {len(chunks_data)} chunks...")
        
        with self.driver.session() as session:
            # For each chunk, extract entities and relationships
            for i, chunk in enumerate(chunks_data):
                if i % 10 == 0:
                    print(f"Processing chunk {i+1}/{len(chunks_data)}")
                    
                text = chunk["text"]
                chunk_id = chunk["chunk_id"]
                
                # Extract entities
                entities = self.extract_entities(text)
                
                # Also extract important noun phrases that might be missed by NER
                noun_phrases = self.extract_noun_phrases(text)
                
                # Add extracted entities to Neo4j
                entity_ids = {}
                for entity in entities:
                    entity_id = self.find_or_create_entity_node(session, entity, chunk_id)
                    entity_ids[entity["text"]] = entity_id
                
                # Add important noun phrases as Concept nodes
                for np in noun_phrases:
                    # Skip if the noun phrase is already an entity
                    if any(np["text"] in entity["text"] or entity["text"] in np["text"] for entity in entities):
                        continue
                    
                    np_entity = {
                        "text": np["text"],
                        "label": "CONCEPT"
                    }
                    concept_id = self.find_or_create_entity_node(session, np_entity, chunk_id)
                    entity_ids[np["text"]] = concept_id
                
                # Extract relationships between entities
                relationships = self.extract_relationships(text, entities)
                
                # Add relationships to Neo4j
                for rel in relationships:
                    if rel["source"] in entity_ids and rel["target"] in entity_ids:
                        source_id = entity_ids[rel["source"]]
                        target_id = entity_ids[rel["target"]]
                        rel_type = rel["relation_type"].upper().replace(" ", "_")
                        properties = {"confidence": rel["confidence"]}
                        self.create_relationship_between_entities(session, source_id, target_id, rel_type, properties)
                
                # Add co-occurrence relationships for entities in the same chunk
                entity_list = list(entity_ids.items())
                for i in range(len(entity_list)):
                    for j in range(i+1, len(entity_list)):
                        source_text, source_id = entity_list[i]
                        target_text, target_id = entity_list[j]
                        self.create_relationship_between_entities(session, source_id, target_id, "CO_OCCURS_WITH")
    
    def fine_tune_model(self, training_data):
        """Fine-tune the NLP model with additional entity examples"""
        # Add new labels to the NER
        ner = self.nlp.get_pipe("ner")
        for _, annotations in training_data:
            for ent in annotations.get("entities", []):
                ner.add_label(ent[2])
        
        # Disable other pipes during training
        other_pipes = [pipe for pipe in self.nlp.pipe_names if pipe != "ner"]
        with self.nlp.disable_pipes(*other_pipes):
            optimizer = self.nlp.resume_training()
            
            # Training loop with mini-batches
            batch_sizes = compounding(4, 32, 1.001)
            for i in range(30):  # 30 iterations
                losses = {}
                batches = minibatch(training_data, size=batch_sizes)
                for batch in batches:
                    examples = []
                    for text, annotations in batch:
                        doc = self.nlp.make_doc(text)
                        example = Example.from_dict(doc, annotations)
                        examples.append(example)
                    
                    self.nlp.update(examples, drop=0.2, sgd=optimizer, losses=losses)
                print(f"Iteration {i+1}, Losses: {losses}")
        
        # Save the fine-tuned model
        self.nlp.to_disk("fine_tuned_model")
        print("Model fine-tuned and saved to 'fine_tuned_model'")
    
    def generate_training_data_from_graph(self):
        """Generate training data from entities already in the graph"""
        training_data = []
        
        with self.driver.session() as session:
            result = session.run("""
                MATCH (e:Entity)<-[:CONTAINS_ENTITY]-(c:Chunk)
                RETURN e.text AS entity_text, e.type AS entity_type, c.text AS chunk_text
                LIMIT 1000
            """)
            
            chunk_entities = {}
            for record in result:
                chunk_text = record["chunk_text"]
                entity_text = record["entity_text"]
                entity_type = record["entity_type"]
                
                if chunk_text not in chunk_entities:
                    chunk_entities[chunk_text] = []
                
                # Find all occurrences of the entity in the chunk text
                for match in re.finditer(re.escape(entity_text), chunk_text):
                    chunk_entities[chunk_text].append((match.start(), match.end(), entity_type))
            
            # Convert to spaCy training format
            for chunk_text, entities in chunk_entities.items():
                if entities:  # Only add if there are entities
                    training_data.append((chunk_text, {"entities": entities}))
        
        return training_data
    
    def run_insight_queries(self):
        """Run queries to extract insights from the enhanced graph"""
        insights = {}
        with self.driver.session() as session:
            # Find most common entity types
            result = session.run("""
                MATCH (e:Entity)
                RETURN e.type AS type, COUNT(e) AS count
                ORDER BY count DESC
                LIMIT 10
            """)
            insights["entity_types"] = [{"type": record["type"], "count": record["count"]} for record in result]

            # Find most connected entities
            result = session.run("""
                MATCH (e:Entity)-[r]-()
                RETURN e.text AS entity, e.type AS type, COUNT(r) AS connections
                ORDER BY connections DESC
                LIMIT 20
            """)
            insights["connected_entities"] = [
                {"entity": record["entity"], "type": record["type"], "connections": record["connections"]}
                for record in result
            ]

            # Find all relationship types
            result = session.run("""
                MATCH ()-[r]->()
                RETURN type(r) AS relationship, COUNT(r) AS count
                ORDER BY count DESC
            """)
            insights["relationship_types"] = [
                {"relationship": record["relationship"], "count": record["count"]}
                for record in result
            ]
        
        '''with self.driver.session() as session:
            # Find most common entity types
            result = session.run("""
                MATCH (e:Entity)
                RETURN e.type AS type, COUNT(e) AS count
                ORDER BY count DESC
                LIMIT 10
            """)
            insights["entity_types"] = [{"type": record["type"], "count": record["count"]} for record in result]
            
            # Find most connected entities
            result = session.run("""
                MATCH (e:Entity)-[r]-()
                RETURN e.text AS entity, e.type AS type, COUNT(r) AS connections
                ORDER BY connections DESC
                LIMIT 20
            """)
            insights["connected_entities"] = [
                {"entity": record["entity"], "type": record["type"], "connections": record["connections"]} 
                for record in result
            ]
            
            # Find all relationship types
            result = session.run("""
                MATCH ()-[r]->()
                WHERE type(r) <> 'CONTAINS' AND type(r) <> 'CONTAINS_ENTITY'
                RETURN type(r) AS relationship, COUNT(r) AS count
                ORDER BY count DESC
            """)
            insights["relationship_types"] = [
                {"relationship": record["relationship"], "count": record["count"]} 
                for record in result
            ]
            
            # Find entity clusters (communities)
            result = session.run("""
                CALL gds.graph.project(
                  'entityGraph',
                  'Entity',
                  {
                    CO_OCCURS_WITH: {orientation: 'UNDIRECTED'}
                  }
                )
                YIELD graphName, nodeCount, relationshipCount
                
                CALL gds.louvain.stream('entityGraph')
                YIELD nodeId, communityId
                
                MATCH (e:Entity) WHERE id(e) = nodeId
                RETURN communityId, COLLECT(e.text) AS entities, COUNT(*) AS count
                ORDER BY count DESC
                LIMIT 10
            """)
            insights["entity_clusters"] = [
                {"community_id": record["communityId"], "entities": record["entities"], "count": record["count"]} 
                for record in result
            ]'''
            
        return insights

def main():
    # Set up the connection parameters
    uri = "neo4j+s://ff2d3c4d.databases.neo4j.io"  # or "bolt://localhost:7687" for local
    user = "neo4j"
    password = "nurE98tTctgv2c3RYzduho7xTdog1o3xX7uQ9ZKj1qw"  # Replace with your password
    
    # Initialize the enhanced KG builder
    kg_builder = EnhancedKnowledgeGraphBuilder(uri, user, password)
    
    try:
        # Process and enhance the graph
        kg_builder.process_and_enhance_graph()
        
        # Generate training data from the graph and fine-tune the model
        training_data = kg_builder.generate_training_data_from_graph()
        if training_data:
            print(f"Generated {len(training_data)} training examples from the graph")
            kg_builder.fine_tune_model(training_data)
        
        # Run insight queries to analyze the graph
        insights = kg_builder.run_insight_queries()
        
        # Print insights
        print("\n=== Graph Insights ===")
        
        print("\nEntity Types:")
        for et in insights.get("entity_types", []):
            print(f"  {et['type']}: {et['count']} entities")
        
        print("\nMost Connected Entities:")
        for ce in insights.get("connected_entities", []):
            print(f"  {ce['entity']} ({ce['type']}): {ce['connections']} connections")
        
        print("\nRelationship Types:")
        for rt in insights.get("relationship_types", []):
            print(f"  {rt['relationship']}: {rt['count']} relationships")
        
        print("\nEntity Clusters (Communities):")
        for ec in insights.get("entity_clusters", []):
            entities_sample = ec['entities'][:5]  # Show just a few entities from each cluster
            print(f"  Community {ec['community_id']} ({ec['count']} entities): {', '.join(entities_sample)}...")
        
    finally:
        kg_builder.close()

if __name__ == "__main__":
    main()
