from neo4j import GraphDatabase
import spacy
from spacy.training.example import Example
from spacy.training import offsets_to_biluo_tags

## result of the en_core_web_lg identified entities
'''
[('first', 'ORDINAL'), ('Kopl', 'PERSON')]
[('Salmon', 'PRODUCT'), ('PDF', 'ORG'), ('Kopl', 'FAC'), ('Salmon', 'PRODUCT')]
[]
[('Salmon AdlibFeeding', 'PRODUCT'), ('Salmon AdaptedFeeding', 'LOC'), ('two', 'CARDINAL'), ('first', 'ORDINAL')]
[('30-day', 'DATE'), ('720', 'CARDINAL'), ('720 hours', 'TIME'), ('Salman', 'PERSON')]
[('14 days', 'DATE'), ('fed', 'ORG'), ('fed', 'ORG'), ('AdlibFeeding', 'ORG'), ('the day', 'DATE'), ('the day', 'DATE')]
[]
[('Kopl', 'PERSON'), ('Kopl', 'PERSON'), ('first', 'ORDINAL')]
[('3.0', 'CARDINAL'), ('Virtual Laboratory      Home Tutorials', 'ORG'), ('the Dynamic Energy Budget', 'ORG'), ('Atlantic', 'LOC'), ('Kopl', 'PERSON')]
[('Kopl', 'PERSON'), ('Open Simulation Platform', 'PRODUCT'), ('Kopl', 'PERSON'), ('CoSimulation', 'ORG'), ('30 days', 'DATE')]
[('Salmon_30days_adlib.xml', 'PRODUCT'), ('0', 'CARDINAL'), ('1.0', 'CARDINAL'), ('720', 'CARDINAL'), ('24 hours', 'TIME'), ('30 days', 'DATE'), ('Atlantic Salmon under Setup', 'LOC'), ('100', 'CARDINAL'), ('100 grams', 'QUANTITY'), ('Adlib', 'ORG'), ('8 hours', 'TIME'), ('Videos', 'ORG')]
[('Salmon', 'LOC'), ('Kopl', 'PERSON'), ('first', 'ORDINAL')]
[('Kopl', 'PERSON'), ('2.0', 'CARDINAL'), ('Seabream 30 days', 'DATE'), ('Seabream 30 days', 'DATE'), ('30 days', 'DATE'), ('30 days', 'DATE'), ('30 days', 'DATE'), ('30 days', 'DATE'), ('Pdf   Debgrowth            ', 'PERSON')]
'''


# Check alignment and correct misaligned entities
def check_and_correct_alignment(nlp, text, entities):
    doc = nlp.make_doc(text)
    aligned_entities = []
    for start, end, label in entities:
        span = doc.char_span(start, end, label=label)
        if span is None:
            print(f"Misaligned entity in text: {text}")
            print(f"Original entity: {text[start:end]} ({start}, {end})")
            # Adjust the span to the closest token boundaries
            span = doc.char_span(start, end, label=label, alignment_mode="expand")
            if span:
                print(f"Adjusted entity: {span.text} ({span.start_char}, {span.end_char})")
                aligned_entities.append((span.start_char, span.end_char, label))
        else:
            aligned_entities.append((start, end, label))
    return aligned_entities


def train_core_web(model_name):
    # Define your training data
    # Feeding Comparison is a co-simulation task
    # AdaptedFeeding is a preconfiguration in the simulation tool
    # Videos
    TRAIN_DATA = [
        ("Kopl is a software tool.", {"entities": [(0, 4, "SOFTWARE")]}),
        ("We use XML files.", {"entities": [(7, 10, "FILE_TYPE")]}),
        ("PDF is a file type.", {"entities": [(0, 3, "FILE_TYPE")]}),
        ("Sea bream are a group of compact, medium-sized fish known as Sparidae.",
         {"entities": [(0, 9, "FISH"), (52, 60, "FAMILY")]}),
        ("Salmon is a type of fish.", {"entities": [(0, 6, "FISH")]}),
        ("Atlantic Salmon is a type of fish.", {"entities": [(0, 15, "FISH")]}),
        ("Feeding Comparison is a co-simulation task.", {"entities": [(0, 18, "TASK")]}),
        ("AdaptedFeeding is a preconfiguration in the simulation tool.", {"entities": [(0, 14, "PRECONFIGURATION")]}),
        ("Videos are tutorials showing how to use the simulation tool.", {"entities": [(0, 6, "TUTORIAL")]}),
        ("Virtual Laboratory is a simulation environment or a digital twin tool.",
         {"entities": [(0, 18, "ENVIRONMENT")]}),
        ("Home Tutorials is the website where links on tutorials are available.", {"entities": [(0, 14, "WEBSITE")]}),
        ("xml is a file type where we put the preconfiguration setup.", {"entities": [(0, 3, "FILE_TYPE")]}),
        ("Debgrowth is the FMU which stands for Functional Mock-up unit.", {"entities": [(0, 9, "FMU")]}),
    ]

    nlp = spacy.load("en_core_web_lg")
    # Correct the training data
    TRAIN_DATA_CORRECTED = []
    for text, annotations in TRAIN_DATA:
        corrected_entities = check_and_correct_alignment(nlp, text, annotations["entities"])
        TRAIN_DATA_CORRECTED.append((text, {"entities": corrected_entities}))

    '''# Check alignment
    print("-------------- TRAIN_DATA ALIGNMENT --------------")
    for text, annotations in TRAIN_DATA:
        doc = nlp.make_doc(text)
        tags = offsets_to_biluo_tags(doc, annotations["entities"])
        print(f"Text: {text}")
        print(f"Tags: {tags}")
    print("-------------- ADD NEW LABELS TO THE NER --------------")'''
    ner = nlp.get_pipe("ner")

    # Add new labels to the NER
    for _, annotations in TRAIN_DATA_CORRECTED:
        for ent in annotations.get("entities"):
            ner.add_label(ent[2])

    # Disable other pipes to only train NER
    other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "ner"]
    with nlp.disable_pipes(*other_pipes):
        optimizer = nlp.resume_training()
        for itn in range(50):
            losses = {}
            for text, annotations in TRAIN_DATA_CORRECTED:
                example = Example.from_dict(nlp.make_doc(text), annotations)
                nlp.update([example], drop=0.5, losses=losses)
            print('losses: ', losses)

    # Save the updated model
    nlp.to_disk(model_name)  # "fine_tuned_model"


def load_model(model_name):
    return spacy.load(model_name)


def extract_entities(nlp, text):
    doc = nlp(text)
    entities = [(ent.text, ent.label_) for ent in doc.ents]
    return entities


# Step 2: Connect to Neo4j and retrieve chunks
def get_chunks(uri, user, password, database_name):
    driver = GraphDatabase.driver(uri, auth=(user, password), database=database_name)
    print('connected to database!')
    chunks = []
    with driver.session() as session:
        result = session.run("MATCH (c:Chunk) RETURN c.text AS text")
        print(result)
        for record in result:
            chunks.append(record["text"])
    driver.close()
    return chunks


# Step 3: Process chunks to extract entities
def process_chunks(nlp, chunks):
    all_entities = []
    for chunk in chunks:
        entities = extract_entities(nlp, chunk)
        all_entities.append(entities)
    return all_entities


def main():
    model_name = "fine_tuned_model"
    train_core_web(model_name)

    uri = "neo4j+s://ff2d3c4d.databases.neo4j.io"
    # uri = "bolt://localhost:7687"
    user = "neo4j"
    password = "nurE98tTctgv2c3RYzduho7xTdog1o3xX7uQ9ZKj1qw"
    # password = "12345678"
    database_name = "neo4j"
    # Example text to extract entities from
    chunks = get_chunks(uri, user, password, database_name)
    nlp = load_model(model_name)
    entities = process_chunks(nlp, chunks)
    for entity in entities:
        print(entity)


if __name__ == "__main__":
    # doc = nlp("This is a test sentence.")
    # for ent in doc.ents:
    #    print(ent.text, ent.label_)
    main()
