import spacy
import pandas as pd
from spacy import displacy
from pathlib import Path

# Load the transformer-based model
print("Loading spaCy model...")
nlp = spacy.load("en_core_web_trf")

# Fun examples showcasing different aspects of language
examples = [
    # Famous quotes with named entities, complex grammar, and sentiment
    "In a hole in the ground there lived a hobbit named Bilbo Baggins.",
    "Harry Potter received his letter from Hogwarts while staying at 4 Privet Drive.",
    "Tony Stark built his first Iron Man suit in a cave with a box of scraps!",
    # Multiple entity types and relationships
    "Apple CEO Tim Cook announced new iPhones will be manufactured in India starting 2024.",
    "The NASA rover landed on Mars in February 2021, sending amazing photos back to Earth.",
    # Complex sentences with various linguistic features
    "Despite being exhausted, she couldn't help but smile when her dog brought her the newspaper.",
    "The ancient manuscript, written in Latin, was discovered beneath the Vatican Library in 1985.",
]


def analyze_text(text):
    """Perform comprehensive analysis of text using spaCy."""
    doc = nlp(text)

    # 1. Named Entities
    entities = [(ent.text, ent.label_) for ent in doc.ents]

    # 2. Part-of-speech tagging
    pos_tags = [(token.text, token.pos_) for token in doc]

    # 3. Dependency parsing
    dependencies = [(token.text, token.dep_) for token in doc]

    # 4. Noun chunks (base noun phrases)
    noun_chunks = list(doc.noun_chunks)

    return {
        "entities": entities,
        "pos_tags": pos_tags,
        "dependencies": dependencies,
        "noun_chunks": noun_chunks,
        "doc": doc,
    }


def display_analysis(text, analysis, index):
    """Display the analysis results in a readable format."""
    print("\n" + "=" * 80)
    print(f"Analyzing: '{text}'")
    print("=" * 80)

    # Display named entities
    print("\n1. Named Entities:")
    if analysis["entities"]:
        for text, label in analysis["entities"]:
            print(f"   • {text} -> {label}")
    else:
        print("   No named entities found.")

    # Display interesting POS patterns
    print("\n2. Notable Parts of Speech:")
    verbs = [word for word, pos in analysis["pos_tags"] if pos == "VERB"]
    nouns = [word for word, pos in analysis["pos_tags"] if pos == "NOUN"]
    adj = [word for word, pos in analysis["pos_tags"] if pos == "ADJ"]

    if verbs:
        print(f"   • Verbs: {', '.join(verbs)}")
    if nouns:
        print(f"   • Nouns: {', '.join(nouns)}")
    if adj:
        print(f"   • Adjectives: {', '.join(adj)}")

    # Display noun chunks
    print("\n3. Noun Chunks (base noun phrases):")
    for chunk in analysis["noun_chunks"]:
        print(f"   • {chunk.text}")

    # Generate and save visualization
    doc = analysis["doc"]
    svg = displacy.render(doc, style="dep", jupyter=False)
    output_path = Path(f"dependency_tree_{index}.svg")
    output_path.write_text(svg)
    print(f"\n4. Dependency Parse Tree saved as: {output_path}")


def main():
    print("\n🎉 Welcome to the SpaCy Feature Showcase! 🎉")
    print("This demo will analyze various texts to demonstrate spaCy's capabilities.")

    for i, example in enumerate(examples, 1):
        print(f"\n\nProcessing Example {i}/{len(examples)}...")
        analysis = analyze_text(example)
        display_analysis(example, analysis, i)

    print("\n\n🎈 Analysis Complete! 🎈")
    print("Check out the generated SVG files to see the dependency parse trees!")


if __name__ == "__main__":
    main()
