import re
import pandas as pd
import networkx as nx
import spacy
from collections import defaultdict
import matplotlib.pyplot as plt
import pickle
import os

# Constants for file paths
DATA_DIR = "data"
CACHE_DIR = "cache1"  # Different cache directory from code2.py
DF_CACHE = os.path.join(CACHE_DIR, "chapters_df.pkl")
NER_CACHE = os.path.join(CACHE_DIR, "chapter_characters.pkl")
GRAPH_CACHE = os.path.join(CACHE_DIR, "character_graph.pkl")

# Create cache directory if it doesn't exist
os.makedirs(CACHE_DIR, exist_ok=True)


def load_or_process_books():
    """Load preprocessed DataFrame if exists, otherwise process books."""
    if os.path.exists(DF_CACHE):
        print("Loading cached DataFrame...")
        return pd.read_pickle(DF_CACHE)

    print("Processing books...")
    books = []
    chapters_text = []
    chapters_title = []

    # Processing the books and extracting chapters
    for i in range(1, 6):  # Loops through books 1-5
        try:
            with open(f"data/GOT{i}.txt", "r", encoding="utf-8") as fp:
                book = fp.read()
                pattern = r"\n{2,}"  # Matches 2 or more newlines
                chapters = re.split(pattern, book, flags=re.IGNORECASE)

                for e, v in enumerate(chapters):
                    if e % 2 == 0:
                        chapters_title.append(v.strip().title())
                    else:
                        chapters_text.append(v.strip())
                        books.append(i)
        except FileNotFoundError:
            print(f"Warning: Book {i} not found")

    # Remove any empty titles
    chapters_title = [c for c in chapters_title if c]

    # Create a pandas DataFrame with the chapters data
    df = pd.DataFrame.from_dict(
        {"book": books, "title": chapters_title, "text": chapters_text}
    )

    # Cache the DataFrame
    df.to_pickle(DF_CACHE)
    return df


def initialize_spacy():
    """Initialize spaCy with the transformer-based model using GPU if available."""
    print("Initializing spaCy NER pipeline...")
    try:
        nlp = spacy.load("en_core_web_trf")
        
        # Try to enable GPU acceleration
        try:
            import cupy  # Check if CuPy is installed
            spacy.prefer_gpu()
            print("Using device: GPU")
        except ImportError:
            print("GPU acceleration not available (CuPy not installed)")
            print("Using device: CPU")
            
        return nlp
    except OSError:
        print("Model 'en_core_web_trf' not found. Please install it using:")
        print("python -m spacy download en_core_web_trf")
        raise


def extract_persons(text, nlp):
    """Extract person entities from text using spaCy NER."""
    # Load and process chapter titles to get character names
    with open('chapters_title.txt', 'r') as f:
        chapter_titles = [line.strip() for line in f.readlines()]
    
    # Extract unique character names from chapter titles
    valid_characters = set()
    for title in chapter_titles:
        words = title.split()
        if len(words) > 0 and words[0].lower() not in ['prologue', 'epilogue']:
            valid_characters.add(title) 
    
    doc = nlp(text)
    persons = set()
    
    for ent in doc.ents:
        if ent.label_ == "PERSON":
            entity_name = ent.text
            # Check if the entity matches any valid character name
            for valid_name in valid_characters:
                # Case-insensitive comparison
                if entity_name.lower() == valid_name.lower():
                    persons.add(valid_name)  
                    break
                    
    return persons


def process_chapters_ner(df):
    """Process all chapters with NER if cache doesn't exist."""
    if os.path.exists(NER_CACHE):
        print("Loading cached character mentions...")
        with open(NER_CACHE, "rb") as f:
            return pickle.load(f)

    print("Processing chapters with NER...")
    nlp = initialize_spacy()
    chapter_characters = defaultdict(set)

    for idx, row in df.iterrows():
        print(f"Processing chapter {idx + 1}/{len(df)}")
        persons = extract_persons(row["text"], nlp)
        chapter_characters[row["title"]].update(persons)

    # Cache the results
    with open(NER_CACHE, "wb") as f:
        pickle.dump(dict(chapter_characters), f)

    return chapter_characters


def build_character_network(chapter_characters):
    """Build and return character network if cache doesn't exist."""
    if os.path.exists(GRAPH_CACHE):
        print("Loading cached character network...")
        with open(GRAPH_CACHE, "rb") as f:
            return pickle.load(f)

    print("Building character network...")
    G = nx.Graph()

    # Add nodes for each unique character
    all_characters = set()
    for characters in chapter_characters.values():
        all_characters.update(characters)

    for character in all_characters:
        G.add_node(character)

    # Add edges based on co-occurrence
    for characters in chapter_characters.values():
        characters = list(characters)
        for i in range(len(characters)):
            for j in range(i + 1, len(characters)):
                if G.has_edge(characters[i], characters[j]):
                    G[characters[i]][characters[j]]["weight"] += 1
                else:
                    G.add_edge(characters[i], characters[j], weight=1)

    # Cache the graph
    with open(GRAPH_CACHE, "wb") as f:
        pickle.dump(G, f)

    return G


def analyze_and_visualize_network(G):
    """Analyze and visualize the character network."""
    print(f"\nNumber of characters (nodes): {G.number_of_nodes()}")
    print(f"Number of relationships (edges): {G.number_of_edges()}")

    # Find the top 10 most connected characters
    degree_dict = dict(G.degree())
    sorted_degrees = sorted(degree_dict.items(), key=lambda x: x[1], reverse=True)
    top_10 = sorted_degrees[:10]

    print("\nTop 10 most connected characters:")
    for character, degree in top_10:
        print(f"{character}: {degree} connections")

    # Visualize the network
    plt.figure(figsize=(15, 15))
    pos = nx.spring_layout(G, k=0.15)

    # Draw nodes
    nx.draw_networkx_nodes(G, pos, node_size=50, node_color="blue", alpha=0.7)

    # Draw edges with transparency based on weight
    edges = G.edges()
    weights = [G[u][v]["weight"] for u, v in edges]
    nx.draw_networkx_edges(G, pos, alpha=0.3, width=[w * 0.1 for w in weights])

    # Draw labels for top characters
    top_characters = [char for char, _ in top_10]
    labels = {char: char for char in top_characters}
    nx.draw_networkx_labels(G, pos, labels, font_size=10, font_color="black")

    plt.title("Game of Thrones Character Co-occurrence Network")
    plt.axis("off")
    # Save the plot instead of showing it
    plot_path = os.path.join(CACHE_DIR, "character_network.png")
    plt.savefig(plot_path, bbox_inches='tight', dpi=300)
    plt.close()  # Close the figure to free memory
    print(f"Network visualization saved as '{plot_path}'")


def main():
    # Load or process books
    df = load_or_process_books()
    print("Initial DataFrame:")
    print(df.head())

    # Process chapters with NER
    chapter_characters = process_chapters_ner(df)

    # Display extracted characters from a sample chapter
    sample_chapter = df.iloc[1]["title"]
    print(f"\nCharacters in chapter '{sample_chapter}':")
    print(chapter_characters[sample_chapter])

    # Build and analyze character network
    G = build_character_network(chapter_characters)

    # Analyze and visualize the network
    analyze_and_visualize_network(G)

    # Save the graph in GEXF format for further analysis
    gexf_path = os.path.join(CACHE_DIR, "got_character_network.gexf")
    nx.write_gexf(G, gexf_path)
    print(f"\nCharacter network graph saved as '{gexf_path}'.")


if __name__ == "__main__":
    main()
