import re
import pandas as pd
import networkx as nx
from collections import defaultdict
import matplotlib.pyplot as plt
import torch
from transformers import AutoTokenizer, AutoModelForTokenClassification
from transformers import pipeline
import pickle
import os

# Constants for file paths
DATA_DIR = "data"
CACHE_DIR = "cache2"
DF_CACHE = os.path.join(CACHE_DIR, "chapters_df.pkl")
NER_CACHE = os.path.join(CACHE_DIR, "chapter_characters.pkl")
GRAPH_CACHE = os.path.join(CACHE_DIR, "character_graph.pkl")

# Create cache directory if it doesn't exist
os.makedirs(CACHE_DIR, exist_ok=True)


def load_or_process_books():
    """Load preprocessed DataFrame if exists, otherwise process books."""
    if os.path.exists(DF_CACHE):
        print("Loading cached DataFrame...")
        return pd.read_pickle(DF_CACHE)

    print("Processing books...")
    books = []
    chapters_text = []
    chapters_title = []

    # Processing the books and extracting chapters
    for i in range(1, 6):  # Loops through books 1-5
        try:
            with open(f"data/GOT{i}.txt", "r", encoding="utf-8") as fp:
                book = fp.read()
                pattern = r"\n{2,}"  # Matches 2 or more newlines
                chapters = re.split(pattern, book, flags=re.IGNORECASE)

                for e, v in enumerate(chapters):
                    if e % 2 == 0:
                        chapters_title.append(v.strip().title())
                    else:
                        chapters_text.append(v.strip())
                        books.append(i)
        except FileNotFoundError:
            print(f"Warning: Book {i} not found")

    # Remove any empty titles
    chapters_title = [c for c in chapters_title if c]

    # Create a pandas DataFrame with the chapters data
    df = pd.DataFrame.from_dict(
        {"book": books, "title": chapters_title, "text": chapters_text}
    )

    # Cache the DataFrame
    df.to_pickle(DF_CACHE)
    return df


def initialize_ner_pipeline():
    """Initialize the BERT NER pipeline with GPU if available."""
    print("Initializing BERT NER pipeline...")
    
    # Check if GPU is available
    device = 0 if torch.cuda.is_available() else -1
    print(f"Using device: {'GPU' if device == 0 else 'CPU'}")
    
    tokenizer = AutoTokenizer.from_pretrained("dslim/bert-large-NER")
    model = AutoModelForTokenClassification.from_pretrained("dslim/bert-large-NER")
    
    return pipeline(
        "ner",
        model=model,
        tokenizer=tokenizer,
        aggregation_strategy="simple",
        device=device  # Use GPU if available
    )


def extract_persons(text, ner_pipeline):
    """Extract person entities from text using BERT NER."""
    # Load and process chapter titles to get character names
    with open('chapters_title.txt', 'r') as f:
        chapter_titles = [line.strip() for line in f.readlines()]
    
    # Extract unique character names from chapter titles
    valid_characters = set()
    for title in chapter_titles:
        # Split on spaces and take the last word as the character name
        # This handles cases like "Cat Of The Canals" -> "Canals"
        words = title.split()
        if len(words) > 0 and words[0].lower() not in ['prologue', 'epilogue']:
            valid_characters.add(title)  
    
    # Process text in chunks due to BERT's token limit
    chunk_size = 500  # words
    words = text.split()
    chunks = [' '.join(words[i:i + chunk_size]) for i in range(0, len(words), chunk_size)]
    
    persons = set()
    CONFIDENCE_THRESHOLD = 0.98
    
    for chunk in chunks:
        try:
            with torch.no_grad():
                entities = ner_pipeline(chunk)
                for entity in entities:
                    if (entity['entity_group'] == 'PER' and 
                        entity['score'] >= CONFIDENCE_THRESHOLD):
                        # Check if the entity matches any valid character name
                        entity_name = entity['word']
                        for valid_name in valid_characters:
                            # Case-insensitive comparison
                            if entity_name.lower() == valid_name.lower():
                                persons.add(valid_name)  # Add the canonical name
                                break
        except Exception as e:
            print(f"Error processing chunk: {e}")
    
    return persons


def process_chapters_ner(df):
    """Process all chapters with NER if cache doesn't exist."""
    if os.path.exists(NER_CACHE):
        print("Loading cached character mentions...")
        with open(NER_CACHE, "rb") as f:
            return pickle.load(f)

    print("Processing chapters with NER...")
    ner_pipeline = initialize_ner_pipeline()
    chapter_characters = defaultdict(set)

    for idx, row in df.iterrows():
        print(f"Processing chapter {idx + 1}/{len(df)}")
        persons = extract_persons(row["text"], ner_pipeline)
        chapter_characters[row["title"]].update(persons)

    # Cache the results
    with open(NER_CACHE, "wb") as f:
        pickle.dump(dict(chapter_characters), f)

    return chapter_characters


def build_character_network(chapter_characters):
    """Build and return character network if cache doesn't exist."""
    if os.path.exists(GRAPH_CACHE):
        print("Loading cached character network...")
        with open(GRAPH_CACHE, "rb") as f:
            return pickle.load(f)

    print("Building character network...")
    G = nx.Graph()

    # Add nodes for each unique character
    all_characters = set()
    for characters in chapter_characters.values():
        all_characters.update(characters)

    for character in all_characters:
        G.add_node(character)

    # Add edges based on co-occurrence
    for characters in chapter_characters.values():
        characters = list(characters)
        for i in range(len(characters)):
            for j in range(i + 1, len(characters)):
                if G.has_edge(characters[i], characters[j]):
                    G[characters[i]][characters[j]]["weight"] += 1
                else:
                    G.add_edge(characters[i], characters[j], weight=1)

    # Cache the graph
    with open(GRAPH_CACHE, "wb") as f:
        pickle.dump(G, f)

    return G


def analyze_and_visualize_network(G):
    """Analyze and visualize the character network."""
    print(f"\nNumber of characters (nodes): {G.number_of_nodes()}")
    print(f"Number of relationships (edges): {G.number_of_edges()}")

    # Find the top 10 most connected characters
    degree_dict = dict(G.degree())
    sorted_degrees = sorted(degree_dict.items(), key=lambda x: x[1], reverse=True)
    top_10 = sorted_degrees[:10]

    print("\nTop 10 most connected characters:")
    for character, degree in top_10:
        print(f"{character}: {degree} connections")

    # Visualize the network
    plt.figure(figsize=(15, 15))
    pos = nx.spring_layout(G, k=0.15)

    # Draw nodes
    nx.draw_networkx_nodes(G, pos, node_size=50, node_color="blue", alpha=0.7)

    # Draw edges with transparency based on weight
    edges = G.edges()
    weights = [G[u][v]["weight"] for u, v in edges]
    nx.draw_networkx_edges(G, pos, alpha=0.3, width=[w * 0.1 for w in weights])

    # Draw labels for top characters
    top_characters = [char for char, _ in top_10]
    labels = {char: char for char in top_characters}
    nx.draw_networkx_labels(G, pos, labels, font_size=10, font_color="black")

    plt.title("Game of Thrones Character Co-occurrence Network")
    plt.axis("off")
    # Save the plot instead of showing it
    plot_path = os.path.join(CACHE_DIR, "character_network.png")
    plt.savefig(plot_path, bbox_inches="tight", dpi=300)
    plt.close()  # Close the figure to free memory
    print(f"Network visualization saved as '{plot_path}'")


def main():
    # Load or process books
    df = load_or_process_books()
    print("Initial DataFrame:")
    print(df.head())

    # Process chapters with NER
    chapter_characters = process_chapters_ner(df)

    # Display extracted characters from a sample chapter
    sample_chapter = df.iloc[1]["title"]
    print(f"\nCharacters in chapter '{sample_chapter}':")
    print(chapter_characters[sample_chapter])

    # Build and analyze character network
    G = build_character_network(chapter_characters)

    # Analyze and visualize the network
    analyze_and_visualize_network(G)

    # Save the graph in GEXF format for further analysis
    gexf_path = os.path.join(CACHE_DIR, "got_character_network.gexf")
    nx.write_gexf(G, gexf_path)
    print(f"\nCharacter network graph saved as '{gexf_path}'.")


if __name__ == "__main__":
    main()
