import logging
import uuid
import re
import spacy
from neo4j import GraphDatabase
from transformers import pipeline
from spacy.training import Example, offsets_to_biluo_tags
from spacy.util import minibatch, compounding
import random

class SimulationScienceKnowledgeGraphBuilder:
    def __init__(self, uri, user, password, database="neo4j"):
        # Neo4j connection setup
        self.driver = GraphDatabase.driver(uri, auth=(user, password), database=database)

        # Logging setup
        logging.basicConfig(level=logging.INFO,
                            format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        self.logger = logging.getLogger(__name__)

        # Domain-specific schemas
        self.domain_schemas = self._initialize_domain_schemas()

        # NLP and ML components
        self.nlp = None
        self.relation_extractor = None
        self.initialize_nlp_components()

    def _initialize_domain_schemas(self):
        """Initialize domain-specific schemas for simulation science"""
        return {
            "entity_types": [
                "SOFTWARE", "FISH", "TASK", "PRECONFIGURATION",
                "FILE_TYPE", "ENVIRONMENT", "WEBSITE", "TUTORIAL",
                "FMU", "SIMULATION", "PARAMETER", "MODEL", "RESULT"
            ],
            "relationship_types": [
                "SIMULATES", "PART_OF", "USED_FOR", "CONFIGURED_BY",
                "INPUTS_TO", "OUTPUTS_FROM", "DEPENDS_ON", "TYPE_OF",
                "RUNS_IN", "DOCUMENTS", "REFERS_TO"
            ],
            "domain_context": """
            Domain focused on simulation science, particularly:
            - Fish growth simulation models
            - Simulation software tools 
            - Simulation parameters and configurations
            - File formats in simulations
            - Simulation results and analysis
            """
        }

    def prepare_training_data(self):
        """
        Prepare training data for NER model from existing chunk nodes

        Returns:
        - List of training examples with annotated entities
        """
        training_data = []

        with self.driver.session() as session:
            # Query to get chunks with pre-existing entities
            result = session.run("""
                MATCH (c:Chunk)-[:CONTAINS_ENTITY]->(e:Entity)
                RETURN c.text AS text, 
                       e.text AS entity_text, 
                       e.type AS entity_type
            """)

            # Convert to training format
            processed_texts = {}
            for record in result:
                text = record['text']
                entity_text = record['entity_text']
                entity_type = record['entity_type']

                if text not in processed_texts:
                    processed_texts[text] = {
                        'entities': []
                    }

                # Find all occurrences of the entity in the text
                for match in re.finditer(re.escape(entity_text), text):
                    start = match.start()
                    end = match.end()
                    processed_texts[text]['entities'].append((start, end, entity_type))

            # Convert to spaCy training format
            for text, annotations in processed_texts.items():
                training_data.append((text, {'entities': annotations['entities']}))

        self.logger.info(f"Prepared {len(training_data)} training examples")
        return training_data

    def train_ner_model(self,
                        output_path='simulation_science_ner_model',
                        base_model='en_core_web_lg',
                        iterations=50):
        """
        Train a domain-specific NER model

        Args:
        - output_path: Path to save the trained model
        - base_model: Base spaCy model to start from
        - iterations: Number of training iterations
        """
        # Load base model
        nlp = spacy.load(base_model)

        # Add custom entity types
        ner = nlp.get_pipe('ner')
        for entity_type in self.entity_types:
            ner.add_label(entity_type)

        # Prepare training data
        training_data = self.prepare_training_data()

        if not training_data:
            self.logger.error("No training data available. Cannot train NER model.")
            return None

        # Disable other pipeline components
        other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'ner']

        # Train the model
        with nlp.disable_pipes(*other_pipes):
            optimizer = nlp.begin_training()

            for i in range(iterations):
                # Shuffle training data
                random.shuffle(training_data)
                losses = {}

                # Process in mini-batches
                batches = minibatch(training_data, size=4)
                for batch in batches:
                    texts, annotations = zip(*batch)

                    # Create training examples
                    examples = []
                    for text, annotation in zip(texts, annotations):
                        doc = nlp.make_doc(text)
                        example = Example.from_dict(doc, annotation)
                        examples.append(example)

                    # Update model
                    nlp.update(examples, drop=0.2, losses=losses, sgd=optimizer)

                # Log training progress
                self.logger.info(f"Iteration {i + 1}/{iterations}, Losses: {losses}")

        # Save the trained model
        nlp.to_disk(output_path)
        self.logger.info(f"NER model saved to {output_path}")

        return nlp

    def initialize_nlp_components(self):
        """Initialize NLP components with domain-specific focus"""
        try:
            # Try to load a fine-tuned model first
            self.nlp = spacy.load("simulation_science_model")
            self.logger.info("Loaded fine-tuned simulation science model")
        except:
            # Fallback to base model with added entity types
            self.nlp = spacy.load("en_core_web_lg")

            # Add custom entity types for simulation science
            ner = self.nlp.get_pipe("ner")
            for entity_type in self.domain_schemas["entity_types"]:
                print("Adding new label from domain schemas: ", entity_type)
                ner.add_label(entity_type)

            self.logger.info("Loaded base model with custom simulation science entities")

        # Initialize relation extraction pipeline
        try:
            self.relation_extractor = pipeline(
                "text-classification",
                model="Babelscape/rebel-large",
                device=0 if spacy.prefer_gpu() else -1
            )
            self.logger.info("Initialized relation extraction model")
        except Exception as e:
            self.logger.error(f"Could not load relation extraction model: {e}")
            self.relation_extractor = None

    def get_chunks_from_graph(self):
        """Retrieve document chunks from Neo4j"""
        chunks = []
        with self.driver.session() as session:
            result = session.run("""
                MATCH (c:Chunk)-[r]->(d:Document) 
                WHERE type(r) IN ['PART_OF', 'FIRST_CHUNK', 'NEXT_CHUNK']
                RETURN d.id AS doc_id, c.id AS chunk_id, c.text AS text
            """)
            for record in result:
                chunks.append({
                    "doc_id": record["doc_id"],
                    "chunk_id": record["chunk_id"],
                    "text": record["text"]
                })

        if not chunks:
            self.logger.warning("No chunks found. Please check if data exists in the database.")
        else:
            self.logger.info(f"Retrieved {len(chunks)} chunks from Neo4j")
        self.logger.info(f"Retrieved {len(chunks)} chunks from Neo4j")
        return chunks

    def extract_domain_knowledge(self, text):
        """
        Extract domain-specific knowledge using NLP and relation extraction

        Returns a structured representation of entities, relationships, and concepts
        """
        # Process text with spaCy
        doc = self.nlp(text)

        # Extract entities with domain-specific labels
        entities = []
        for ent in doc.ents:
            # Ensure entity is from simulation science domain entities
            if ent.label_ in self.domain_schemas["entity_types"]:
                entities.append({
                    "text": ent.text,
                    "label": ent.label_,
                    "start": ent.start_char,
                    "end": ent.end_char
                })

        # Extract relationships (if relation extractor is available)
        relationships = []
        if self.relation_extractor:
            # Pairwise relationship extraction
            for i, entity1 in enumerate(entities):
                for j, entity2 in enumerate(entities):
                    if i != j:
                        sentence = self._extract_sentence_with_entities(text, entity1, entity2)
                        if sentence:
                            marked_text = self._mark_entities_in_text(sentence, entity1, entity2)
                            try:
                                rel_results = self.relation_extractor(marked_text)
                                for rel_result in rel_results:
                                    if rel_result['score'] > 0.7:  # Confidence threshold
                                        relationships.append({
                                            "source": entity1["text"],
                                            "source_type": entity1["label"],
                                            "target": entity2["text"],
                                            "target_type": entity2["label"],
                                            "relation_type": rel_result['label'],
                                            "confidence": rel_result['score']
                                        })
                            except Exception as e:
                                self.logger.error(f"Relationship extraction error: {e}")

        # Extract domain-specific concepts from noun chunks
        concepts = []
        for chunk in doc.noun_chunks:
            # Filter and qualify concepts based on domain context
            if (len(chunk.text.strip()) > 3 and
                    not chunk.text.lower() in ["i", "you", "he", "she", "it", "we", "they", "this", "that"]):
                concepts.append({
                    "text": chunk.text,
                    "related_entities": [
                        ent.text for ent in entities
                        if ent.text in chunk.text or chunk.text in ent.text
                    ]
                })

        return {
            "entities": entities,
            "relationships": relationships,
            "concepts": concepts
        }

    def _extract_sentence_with_entities(self, text, entity1, entity2):
        """Extract sentence containing both entities"""
        sentences = re.split(r'(?<=[.!?])\s+', text)
        for sentence in sentences:
            if entity1["text"] in sentence and entity2["text"] in sentence:
                return sentence
        return None

    def _mark_entities_in_text(self, text, entity1, entity2):
        """Mark entities in text for relation extraction"""
        marked_text = text.replace(entity1["text"], f"[E1]{entity1['text']}[/E1]", 1)
        marked_text = marked_text.replace(entity2["text"], f"[E2]{entity2['text']}[/E2]", 1)
        return marked_text

    def create_knowledge_graph_nodes(self, chunk_id, knowledge):
        """
        Create nodes and relationships in Neo4j based on extracted knowledge
        """
        with self.driver.session() as session:
            # Create entity nodes
            entity_ids = {}
            for entity in knowledge.get("entities", []):
                entity_id = self._create_or_update_entity_node(session, chunk_id, entity)
                entity_ids[entity["text"]] = entity_id

            # Create relationship nodes
            for rel in knowledge.get("relationships", []):
                if rel["source"] in entity_ids and rel["target"] in entity_ids:
                    source_id = entity_ids[rel["source"]]
                    target_id = entity_ids[rel["target"]]

                    # Normalize relationship type
                    rel_type = rel["relation_type"].upper().replace(" ", "_")

                    # Create relationship with properties
                    session.run("""
                        MATCH (source:Entity {id: $source_id}), 
                              (target:Entity {id: $target_id})
                        MERGE (source)-[r:%s]->(target)
                        SET r.confidence = $confidence
                    """ % rel_type,
                                source_id=source_id,
                                target_id=target_id,
                                confidence=rel.get("confidence", 1.0)
                                )

            # Create concept nodes
            for concept in knowledge.get("concepts", []):
                concept_id = str(uuid.uuid4())
                session.run("""
                    MATCH (c:Chunk {id: $chunk_id})
                    MERGE (con:Concept {id: $concept_id, name: $concept_name})
                    SET con.text = $concept_text
                    MERGE (c)-[:DISCUSSES]->(con)
                """,
                            chunk_id=chunk_id,
                            concept_id=concept_id,
                            concept_name=concept["text"],
                            concept_text=concept["text"]
                            )

                # Link concepts to related entities
                for entity_text in concept.get("related_entities", []):
                    session.run("""
                        MATCH (con:Concept {id: $concept_id})
                        MATCH (e:Entity {text: $entity_text})
                        MERGE (con)-[:RELATES_TO]->(e)
                    """,
                                concept_id=concept_id,
                                entity_text=entity_text
                                )

    def _create_or_update_entity_node(self, session, chunk_id, entity):
        """Create or update an entity node in the graph"""
        # Generate a unique identifier for the entity
        clean_text = re.sub(r'[^a-zA-Z0-9]', '_', entity["text"]).lower()
        entity_id = f"{clean_text}_{entity['label']}"

        # Create or update the entity node
        result = session.run("""
            MATCH (c:Chunk {id: $chunk_id})
            MERGE (e:Entity {id: $entity_id})
            ON CREATE SET 
                e.text = $text, 
                e.type = $label, 
                e.first_seen_in_chunk = $chunk_id
            ON MATCH SET 
                e.last_seen_in_chunk = $chunk_id
            MERGE (c)-[:CONTAINS_ENTITY]->(e)
            RETURN e.id as id
        """,
                             chunk_id=chunk_id,
                             entity_id=entity_id,
                             text=entity["text"],
                             label=entity["label"]
                             )

        return result.single()["id"]

    def fine_tune_model(self, training_data):
        """Fine-tune the NLP model with additional entity examples"""
        # Add new labels to the NER
        ner = self.nlp.get_pipe("ner")
        for _, annotations in training_data:
            for ent in annotations.get("entities", []):
                ner.add_label(ent[2])

        # Disable other pipes during training
        other_pipes = [pipe for pipe in self.nlp.pipe_names if pipe != "ner"]
        with self.nlp.disable_pipes(*other_pipes):
            optimizer = self.nlp.resume_training()

            # Training loop with mini-batches
            batch_sizes = compounding(4, 32, 1.001)
            for i in range(30):  # 30 iterations
                losses = {}
                batches = minibatch(training_data, size=batch_sizes)
                for batch in batches:
                    examples = []
                    for text, annotations in batch:
                        doc = self.nlp.make_doc(text)
                        example = Example.from_dict(doc, annotations)
                        examples.append(example)

                    self.nlp.update(examples, drop=0.2, sgd=optimizer, losses=losses)
                self.logger.info(f"Iteration {i + 1}, Losses: {losses}")

        # Save the fine-tuned model
        self.nlp.to_disk("simulation_science_model")
        self.logger.info("Model fine-tuned and saved to 'simulation_science_model'")

    def build_simulation_knowledge_graph(self):
        """
        Main method to build the knowledge graph for simulation science
        """
        # Retrieve chunks from the graph
        chunks = self.get_chunks_from_graph()
        self.logger.info(f"Retrieved {len(chunks)} chunks for processing")

        # Prepare training data
        training_data = []

        # Process each chunk
        for chunk in chunks:
            try:
                # Extract domain-specific knowledge
                knowledge = self.extract_domain_knowledge(chunk["text"])

                # Create nodes and relationships in the graph
                self.create_knowledge_graph_nodes(chunk["chunk_id"], knowledge)

                # Collect training data for fine-tuning
                entities = knowledge.get("entities", [])
                if entities:
                    entity_spans = [
                        (entity["start"], entity["end"], entity["label"])
                        for entity in entities
                    ]
                    training_data.append((chunk["text"], {"entities": entity_spans}))

            except Exception as e:
                self.logger.error(f"Error processing chunk {chunk['chunk_id']}: {e}")

        # Fine-tune the model if we have training data
        if training_data:
            self.fine_tune_model(training_data)

    def close(self):
        """Close the Neo4j driver"""
        self.driver.close()


def main():
    # Neo4j connection parameters
    uri = "neo4j+s://ff2d3c4d.databases.neo4j.io"
    user = "neo4j"
    password = "nurE98tTctgv2c3RYzduho7xTdog1o3xX7uQ9ZKj1qw"

    # Initialize the simulation science knowledge graph builder
    kg_builder = SimulationScienceKnowledgeGraphBuilder(uri, user, password)

    try:
        # Build the knowledge graph
        kg_builder.build_simulation_knowledge_graph()

        print("Simulation Science Knowledge Graph built successfully!")

    except Exception as e:
        print(f"Error building knowledge graph: {e}")

    finally:
        kg_builder.close()


if __name__ == "__main__":
    main()