"""
Visualization utilities for EEG data and model results
"""
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from sklearn.metrics import confusion_matrix
import torch
import pywt


def plot_eeg_signal(signal, fs=100, title='EEG Signal', figsize=(12, 4), save_path=None):
    """
    Plot EEG signal

    Args:
        signal (numpy.ndarray): EEG signal
        fs (int): Sampling rate
        title (str): Plot title
        figsize (tuple): Figure size
        save_path (str, optional): Path to save the figure
    """
    # Create time array
    time = np.arange(len(signal)) / fs

    plt.figure(figsize=figsize)
    plt.plot(time, signal)
    plt.xlabel('Time (s)')
    plt.ylabel('Amplitude')
    plt.title(title)
    plt.grid(True)

    if save_path:
        plt.savefig(save_path)
        plt.close()
    else:
        plt.show()


def plot_sleep_stages(labels, figsize=(15, 4), save_path=None):
    """
    Plot sleep stages hypnogram

    Args:
        labels (numpy.ndarray): Sleep stage labels
        figsize (tuple): Figure size
        save_path (str, optional): Path to save the figure
    """
    # Convert labels to continuous representation for plotting
    stages = np.array(['W', 'N1', 'N2', 'N3', 'REM'])
    stage_values = np.array([0, 1, 2, 3, 4])

    plt.figure(figsize=figsize)
    plt.step(np.arange(len(labels)), labels)
    plt.yticks(stage_values, stages)
    plt.xlabel('Epoch')
    plt.ylabel('Sleep Stage')
    plt.title('Sleep Stages Hypnogram')
    plt.grid(True)

    if save_path:
        plt.savefig(save_path)
        plt.close()
    else:
        plt.show()


def plot_confusion_matrix(y_true, y_pred, class_names=['W', 'N1', 'N2', 'N3', 'REM'],
                          normalize=True, figsize=(10, 8), save_path=None):
    """
    Plot confusion matrix

    Args:
        y_true (numpy.ndarray): Ground truth labels
        y_pred (numpy.ndarray): Predicted labels
        class_names (list): List of class names
        normalize (bool): Whether to normalize the confusion matrix
        figsize (tuple): Figure size
        save_path (str, optional): Path to save the figure
    """
    # Compute confusion matrix
    cm = confusion_matrix(y_true, y_pred)

    if normalize:
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]

    plt.figure(figsize=figsize)
    sns.heatmap(
        cm, annot=True, fmt='.2f' if normalize else 'd',
        cmap='Blues', xticklabels=class_names, yticklabels=class_names
    )
    plt.xlabel('Predicted Label')
    plt.ylabel('True Label')
    plt.title('Confusion Matrix')

    if save_path:
        plt.savefig(save_path)
        plt.close()
    else:
        plt.show()


def plot_frequency_bands(signal, fs=100, figsize=(15, 10), save_path=None):
    """
    Plot frequency bands of EEG signal

    Args:
        signal (numpy.ndarray): EEG signal
        fs (int): Sampling rate
        figsize (tuple): Figure size
        save_path (str, optional): Path to save the figure
    """
    # Define frequency bands
    bands = {
        'Delta': (0.5, 4),
        'Theta': (4, 8),
        'Alpha': (8, 13),
        'Beta': (13, 30),
        'Gamma': (30, 50)
    }

    # Compute frequency domain
    n = len(signal)
    fft_vals = np.fft.rfft(signal)
    fft_freq = np.fft.rfftfreq(n, 1 / fs)

    # Plot
    plt.figure(figsize=figsize)

    # Original signal
    plt.subplot(len(bands) + 1, 1, 1)
    plt.plot(np.arange(len(signal)) / fs, signal)
    plt.title('Original EEG Signal')
    plt.xlabel('Time (s)')
    plt.ylabel('Amplitude')

    # Frequency bands
    for i, (band_name, (low, high)) in enumerate(bands.items()):
        # Filter signal to the band
        mask = np.logical_and(fft_freq >= low, fft_freq <= high)
        band_fft = fft_vals.copy()
        band_fft[~mask] = 0
        band_signal = np.fft.irfft(band_fft)

        # Plot
        plt.subplot(len(bands) + 1, 1, i + 2)
        plt.plot(np.arange(len(band_signal)) / fs, band_signal)
        plt.title(f'{band_name} Band ({low}-{high} Hz)')
        plt.xlabel('Time (s)')
        plt.ylabel('Amplitude')

    plt.tight_layout()

    if save_path:
        plt.savefig(save_path)
        plt.close()
    else:
        plt.show()


def plot_wavelet_transform(signal, wavelet='db4', level=5, fs=100, figsize=(15, 10), save_path=None):
    """
    Plot Wavelet Packet Transform of EEG signal

    Args:
        signal (numpy.ndarray): EEG signal
        wavelet (str): Wavelet family
        level (int): Decomposition level
        fs (int): Sampling rate
        figsize (tuple): Figure size
        save_path (str, optional): Path to save the figure
    """
    # Perform wavelet packet transform
    wp = pywt.WaveletPacket(data=signal, wavelet=wavelet, mode='symmetric', maxlevel=level)

    # Get nodes at level
    nodes = wp.get_level(level, order='freq')
    labels = [n.path for n in nodes]

    # Transform data for visualization
    values = np.array([n.data for n in nodes])

    # Calculate frequency bins
    freqs = np.linspace(0, fs / 2, len(nodes) + 1)[:-1]

    # Plot
    plt.figure(figsize=figsize)

    # Original signal
    plt.subplot(2, 1, 1)
    plt.plot(np.arange(len(signal)) / fs, signal)
    plt.title('Original EEG Signal')
    plt.xlabel('Time (s)')
    plt.ylabel('Amplitude')

    # Wavelet packet transform
    plt.subplot(2, 1, 2)
    plt.imshow(
        values, aspect='auto', cmap='viridis',
        extent=[0, len(signal) / fs, 0, fs / 2]
    )
    plt.colorbar(label='Coefficient Value')
    plt.title(f'Wavelet Packet Transform (Level {level})')
    plt.xlabel('Time (s)')
    plt.ylabel('Frequency (Hz)')

    plt.tight_layout()

    if save_path:
        plt.savefig(save_path)
        plt.close()
    else:
        plt.show()


def visualize_attention_weights(attention_weights, figsize=(12, 6), save_path=None):
    """
    Visualize attention weights

    Args:
        attention_weights (numpy.ndarray): Attention weights
        figsize (tuple): Figure size
        save_path (str, optional): Path to save the figure
    """
    plt.figure(figsize=figsize)
    
    # Handle different shapes of attention weights
    if len(attention_weights.shape) == 1:
        # 1D attention weights
        sequence_length = attention_weights.shape[0]
        plt.bar(range(sequence_length), attention_weights)
        plt.xlabel('Position')
        plt.ylabel('Attention Weight')
        plt.title('Attention Weights')
    elif len(attention_weights.shape) == 2:
        # 2D attention weights (e.g., multi-head)
        heads, sequence_length = attention_weights.shape
        plt.imshow(attention_weights, aspect='auto', cmap='viridis')
        plt.colorbar(label='Attention Weight')
        plt.xlabel('Position')
        plt.ylabel('Attention Head')
        plt.title('Multi-head Attention Weights')
    else:
        # Handle other cases or print error
        plt.text(0.5, 0.5, f"Cannot visualize attention weights with shape {attention_weights.shape}",
                horizontalalignment='center', verticalalignment='center')
        plt.title('Attention Visualization Error')
    
    if save_path:
        plt.savefig(save_path)
        plt.close()
    else:
        plt.show()


def visualize_se_weights(se_weights, channel_names=None, figsize=(12, 6), save_path=None):
    """
    Visualize SE-Block channel weights

    Args:
        se_weights (numpy.ndarray): SE-Block weights, shape (batch_size, channels)
        channel_names (list, optional): List of channel names
        figsize (tuple): Figure size
        save_path (str, optional): Path to save the figure
    """
    # Use first sample if batch size > 1
    if len(se_weights.shape) > 1:
        weights = se_weights[0]
    else:
        weights = se_weights

    # Create channel names if not provided
    if channel_names is None:
        channel_names = [f'Channel {i + 1}' for i in range(len(weights))]

    plt.figure(figsize=figsize)
    plt.bar(channel_names, weights)
    plt.xlabel('Channel')
    plt.ylabel('Weight')
    plt.title('SE-Block Channel Weights')
    plt.xticks(rotation=45)
    plt.grid(True)

    if save_path:
        plt.savefig(save_path)
        plt.close()
    else:
        plt.show()


def visualize_training_history(history, figsize=(15, 10), save_path=None):
    """
    Visualize training history

    Args:
        history (dict): Training history
        figsize (tuple): Figure size
        save_path (str, optional): Path to save the figure
    """
    plt.figure(figsize=figsize)

    # Loss
    plt.subplot(2, 2, 1)
    plt.plot(history['train_loss'], label='Train')
    plt.plot(history['val_loss'], label='Validation')
    plt.title('Model Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    plt.grid(True)

    # Accuracy
    plt.subplot(2, 2, 2)
    plt.plot(history['train_acc'], label='Train')
    plt.plot(history['val_acc'], label='Validation')
    plt.title('Model Accuracy')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')
    plt.legend()
    plt.grid(True)

    # F1 Score
    plt.subplot(2, 2, 3)
    plt.plot(history['train_f1'], label='Train')
    plt.plot(history['val_f1'], label='Validation')
    plt.title('Macro F1 Score')
    plt.xlabel('Epoch')
    plt.ylabel('F1 Score')
    plt.legend()
    plt.grid(True)

    # Kappa
    plt.subplot(2, 2, 4)
    plt.plot(history['train_kappa'], label='Train')
    plt.plot(history['val_kappa'], label='Validation')
    plt.title('Cohen\'s Kappa')
    plt.xlabel('Epoch')
    plt.ylabel('Kappa')
    plt.legend()
    plt.grid(True)

    plt.tight_layout()

    if save_path:
        plt.savefig(save_path)
        plt.close()
    else:
        plt.show()


def visualize_feature_maps(feature_maps, num_maps=16, figsize=(15, 10), save_path=None):
    """
    Visualize feature maps from convolutional layers

    Args:
        feature_maps (torch.Tensor): Feature maps tensor, shape (batch_size, channels, length)
        num_maps (int): Number of feature maps to visualize
        figsize (tuple): Figure size
        save_path (str, optional): Path to save the figure
    """
    # Convert to numpy if it's a tensor
    if isinstance(feature_maps, torch.Tensor):
        feature_maps = feature_maps.detach().cpu().numpy()

    # Use first sample
    maps = feature_maps[0]

    # Limit number of maps to visualize
    num_maps = min(num_maps, maps.shape[0])

    # Calculate grid size
    grid_size = int(np.ceil(np.sqrt(num_maps)))

    plt.figure(figsize=figsize)
    for i in range(num_maps):
        plt.subplot(grid_size, grid_size, i + 1)
        plt.plot(maps[i])
        plt.title(f'Feature Map {i + 1}')
        plt.xticks([])
        plt.yticks([])

    plt.tight_layout()

    if save_path:
        plt.savefig(save_path)
        plt.close()
    else:
        plt.show()


def visualize_hypnogram_comparison(y_true, y_pred, figsize=(15, 6), save_path=None):
    """
    Visualize comparison between true and predicted hypnograms

    Args:
        y_true (numpy.ndarray): Ground truth labels
        y_pred (numpy.ndarray): Predicted labels
        figsize (tuple): Figure size
        save_path (str, optional): Path to save the figure
    """
    # Convert to numpy if tensors
    if isinstance(y_true, torch.Tensor):
        y_true = y_true.cpu().numpy()
    if isinstance(y_pred, torch.Tensor):
        y_pred = y_pred.cpu().numpy()

    # Ensure we have 1D arrays
    y_true = y_true.flatten()
    y_pred = y_pred.flatten()

    # Stage names and their corresponding values
    stages = ['W', 'N1', 'N2', 'N3', 'REM']

    plt.figure(figsize=figsize)

    # Plot true hypnogram
    plt.subplot(2, 1, 1)
    plt.step(np.arange(len(y_true)), y_true, where='post', linewidth=1.5)
    plt.ylabel('Sleep Stage')
    plt.title('True Hypnogram')
    plt.yticks(range(len(stages)), stages)
    plt.xlim(0, len(y_true))
    plt.ylim(-0.5, len(stages) - 0.5)
    plt.grid(True, alpha=0.3)

    # Plot predicted hypnogram
    plt.subplot(2, 1, 2)
    plt.step(np.arange(len(y_pred)), y_pred, where='post', linewidth=1.5, color='orangered')
    plt.xlabel('Epoch')
    plt.ylabel('Sleep Stage')
    plt.title('Predicted Hypnogram')
    plt.yticks(range(len(stages)), stages)
    plt.xlim(0, len(y_pred))
    plt.ylim(-0.5, len(stages) - 0.5)
    plt.grid(True, alpha=0.3)

    plt.tight_layout()

    if save_path:
        plt.savefig(save_path)
        plt.close()
    else:
        plt.show()


def visualize_class_distribution(y, class_names=['W', 'N1', 'N2', 'N3', 'REM'], figsize=(10, 6), save_path=None):
    """
    Visualize class distribution in the dataset

    Args:
        y (numpy.ndarray): Labels
        class_names (list): List of class names
        figsize (tuple): Figure size
        save_path (str, optional): Path to save the figure
    """
    # Convert to numpy if tensor
    if isinstance(y, torch.Tensor):
        y = y.cpu().numpy()

    # Ensure we have a 1D array
    y = y.flatten()

    # Count classes
    unique, counts = np.unique(y, return_counts=True)

    # Create dataframe for easier plotting
    df = pd.DataFrame({
        'Class': [class_names[int(c)] if c < len(class_names) else f'Unknown-{c}' for c in unique],
        'Count': counts
    })

    # Sort by class index
    df = df.sort_values('Class')

    # Calculate percentages
    total = df['Count'].sum()
    df['Percentage'] = (df['Count'] / total) * 100

    plt.figure(figsize=figsize)

    # Bar plot
    ax = plt.bar(df['Class'], df['Count'])
    plt.xlabel('Sleep Stage')
    plt.ylabel('Count')
    plt.title('Class Distribution')
    plt.grid(True, alpha=0.3, axis='y')

    # Add count and percentage labels above bars
    for i, (count, percentage) in enumerate(zip(df['Count'], df['Percentage'])):
        plt.text(
            i, count + 0.1 * max(df['Count']),
            f'{count}\n({percentage:.1f}%)',
            ha='center'
        )

    if save_path:
        plt.savefig(save_path)
        plt.close()
    else:
        plt.show()


def visualize_model_performance_by_stage(report, figsize=(12, 8), save_path=None):
    """
    Visualize model performance metrics for each sleep stage

    Args:
        report (dict): Evaluation report from metrics.generate_evaluation_report
        figsize (tuple): Figure size
        save_path (str, optional): Path to save the figure
    """
    # Extract per-class metrics
    per_class = report['per_class']
    classes = list(per_class.keys())

    # Extract metrics
    precision = [per_class[cls]['precision'] for cls in classes]
    recall = [per_class[cls]['recall'] for cls in classes]
    f1 = [per_class[cls]['f1'] for cls in classes]

    # Set up positions for grouped bars
    x = np.arange(len(classes))
    width = 0.25

    plt.figure(figsize=figsize)

    # Plot bars
    plt.bar(x - width, precision, width, label='Precision')
    plt.bar(x, recall, width, label='Recall')
    plt.bar(x + width, f1, width, label='F1-Score')

    # Add labels and title
    plt.xlabel('Sleep Stage')
    plt.ylabel('Score')
    plt.title('Model Performance by Sleep Stage')
    plt.xticks(x, classes)
    plt.legend()
    plt.grid(True, alpha=0.3, axis='y')

    # Add value labels above bars
    for i, values in enumerate(zip(precision, recall, f1)):
        for j, v in enumerate(values):
            plt.text(
                i + (j - 1) * width,
                v + 0.01,
                f'{v:.3f}',
                ha='center',
                va='bottom',
                fontsize=8,
                rotation=90
            )

    plt.tight_layout()

    if save_path:
        plt.savefig(save_path)
        plt.close()
    else:
        plt.show()


def visualize_temporal_vs_spectral_features(temporal_features, spectral_features,
                                            pca_components=2, figsize=(10, 8), save_path=None):
    """
    Visualize comparison between temporal and spectral features using PCA

    Args:
        temporal_features (numpy.ndarray): Temporal features
        spectral_features (numpy.ndarray): Spectral features
        pca_components (int): Number of PCA components to use
        figsize (tuple): Figure size
        save_path (str, optional): Path to save the figure
    """
    from sklearn.decomposition import PCA

    # Convert to numpy if tensors
    if isinstance(temporal_features, torch.Tensor):
        temporal_features = temporal_features.detach().cpu().numpy()
    if isinstance(spectral_features, torch.Tensor):
        spectral_features = spectral_features.detach().cpu().numpy()

    # Reshape if needed (assuming features are 3D: batch, channels, length)
    if temporal_features.ndim == 3:
        batch_size, channels, length = temporal_features.shape
        temporal_features = temporal_features.reshape(batch_size, channels * length)

    if spectral_features.ndim == 3:
        batch_size, channels, length = spectral_features.shape
        spectral_features = spectral_features.reshape(batch_size, channels * length)

    # Apply PCA
    pca_temporal = PCA(n_components=pca_components)
    pca_spectral = PCA(n_components=pca_components)

    temporal_pca = pca_temporal.fit_transform(temporal_features)
    spectral_pca = pca_spectral.fit_transform(spectral_features)

    # Plot
    plt.figure(figsize=figsize)

    if pca_components == 2:
        # 2D scatter plot
        plt.scatter(
            temporal_pca[:, 0], temporal_pca[:, 1],
            alpha=0.7, label='Temporal Features'
        )
        plt.scatter(
            spectral_pca[:, 0], spectral_pca[:, 1],
            alpha=0.7, label='Spectral Features'
        )

        plt.xlabel('PC1')
        plt.ylabel('PC2')
        plt.title('PCA of Temporal vs Spectral Features')
        plt.legend()
        plt.grid(True, alpha=0.3)

    elif pca_components == 3:
        # 3D scatter plot
        from mpl_toolkits.mplot3d import Axes3D

        ax = plt.subplot(111, projection='3d')
        ax.scatter(
            temporal_pca[:, 0], temporal_pca[:, 1], temporal_pca[:, 2],
            alpha=0.7, label='Temporal Features'
        )
        ax.scatter(
            spectral_pca[:, 0], spectral_pca[:, 1], spectral_pca[:, 2],
            alpha=0.7, label='Spectral Features'
        )

        ax.set_xlabel('PC1')
        ax.set_ylabel('PC2')
        ax.set_zlabel('PC3')
        ax.set_title('PCA of Temporal vs Spectral Features')
        plt.legend()

    else:
        # Multiple 2D scatter plots for pairs of components
        for i in range(pca_components):
            for j in range(i + 1, pca_components):
                plt.subplot(pca_components - 1, pca_components - 1, (i) * (pca_components - 1) + j)
                plt.scatter(
                    temporal_pca[:, i], temporal_pca[:, j],
                    alpha=0.7, label='Temporal'
                )
                plt.scatter(
                    spectral_pca[:, i], spectral_pca[:, j],
                    alpha=0.7, label='Spectral'
                )

                plt.xlabel(f'PC{i + 1}')
                plt.ylabel(f'PC{j + 1}')

                if i == 0 and j == 1:
                    plt.legend()

    plt.tight_layout()

    if save_path:
        plt.savefig(save_path)
        plt.close()
    else:
        plt.show()


def visualize_model_comparison(models_results, metrics=['accuracy', 'macro_f1', 'kappa'],
                               figsize=(12, 8), save_path=None):
    """
    Visualize performance comparison between different models

    Args:
        models_results (dict): Dictionary of model results
                             {model_name: {'accuracy': value, 'macro_f1': value, ...}}
        metrics (list): List of metrics to compare
        figsize (tuple): Figure size
        save_path (str, optional): Path to save the figure
    """
    # Create dataframe for plotting
    data = []
    for model, results in models_results.items():
        row = {'Model': model}
        for metric in metrics:
            if metric in results:
                row[metric] = results[metric]
        data.append(row)

    df = pd.DataFrame(data)

    # Set up positions for grouped bars
    models = df['Model']
    x = np.arange(len(models))
    width = 0.8 / len(metrics)

    plt.figure(figsize=figsize)

    # Plot bars for each metric
    for i, metric in enumerate(metrics):
        pos = x + (i - len(metrics) / 2 + 0.5) * width
        bars = plt.bar(pos, df[metric], width, label=metric.capitalize())

        # Add value labels above bars
        for bar in bars:
            height = bar.get_height()
            plt.text(
                bar.get_x() + bar.get_width() / 2., height + 0.01,
                f'{height:.3f}', ha='center', va='bottom', fontsize=8
            )

    # Add labels and title
    plt.xlabel('Model')
    plt.ylabel('Score')
    plt.title('Performance Comparison Between Models')
    plt.xticks(x, models)
    plt.legend()
    plt.grid(True, alpha=0.3, axis='y')

    plt.tight_layout()

    if save_path:
        plt.savefig(save_path)
        plt.close()
    else:
        plt.show()


def plot_feature_importance(attention_weights, feature_names=None, top_n=20, figsize=(12, 8), save_path=None):
    """
    Plot feature importance based on attention weights

    Args:
        attention_weights (numpy.ndarray): Attention weights
        feature_names (list, optional): List of feature names
        top_n (int): Number of top features to show
        figsize (tuple): Figure size
        save_path (str, optional): Path to save the figure
    """
    # Convert to numpy if tensor
    if isinstance(attention_weights, torch.Tensor):
        attention_weights = attention_weights.detach().cpu().numpy()

    # Ensure we have a 1D array of weights
    weights = attention_weights.flatten()

    # Create feature names if not provided
    if feature_names is None:
        feature_names = [f'Feature {i + 1}' for i in range(len(weights))]

    # Create dataframe
    df = pd.DataFrame({
        'Feature': feature_names[:len(weights)],
        'Importance': weights
    })

    # Sort by importance and take top N
    df = df.sort_values('Importance', ascending=False).head(top_n)

    # Plot
    plt.figure(figsize=figsize)
    bars = plt.barh(df['Feature'], df['Importance'], color='skyblue')
    plt.xlabel('Importance')
    plt.ylabel('Feature')
    plt.title(f'Top {top_n} Features by Importance')
    plt.grid(True, alpha=0.3, axis='x')

    # Add value labels
    for bar in bars:
        width = bar.get_width()
        plt.text(
            width + 0.01, bar.get_y() + bar.get_height() / 2.,
            f'{width:.4f}', va='center'
        )

    plt.tight_layout()

    if save_path:
        plt.savefig(save_path)
        plt.close()
    else:
        plt.show()


def plot_learning_rate_finder(learning_rates, losses, figsize=(10, 6), save_path=None):
    """
    Plot results from a learning rate finder

    Args:
        learning_rates (list): List of learning rates
        losses (list): Corresponding losses
        figsize (tuple): Figure size
        save_path (str, optional): Path to save the figure
    """
    plt.figure(figsize=figsize)

    # Standard plot
    plt.subplot(1, 2, 1)
    plt.plot(learning_rates, losses)
    plt.xlabel('Learning Rate')
    plt.ylabel('Loss')
    plt.xscale('log')
    plt.grid(True, which="both", alpha=0.3)
    plt.title('Learning Rate vs. Loss')

    # Smoothed plot
    plt.subplot(1, 2, 2)
    plt.semilogx(learning_rates, losses)
    plt.xlabel('Learning Rate')
    plt.ylabel('Loss')
    plt.grid(True, which="both", alpha=0.3)
    plt.title('Learning Rate vs. Loss (Semi-log)')

    plt.tight_layout()

    if save_path:
        plt.savefig(save_path)
        plt.close()
    else:
        plt.show()


def plot_stage_transition_matrix(y_pred, y_true=None, class_names=['W', 'N1', 'N2', 'N3', 'REM'],
                                 figsize=(10, 8), save_path=None):
    """
    Plot stage transition matrix

    Args:
        y_pred (numpy.ndarray): Predicted stages sequence
        y_true (numpy.ndarray, optional): True stages sequence for comparison
        class_names (list): List of class names
        figsize (tuple): Figure size
        save_path (str, optional): Path to save the figure
    """
    # Convert to numpy if tensors
    if isinstance(y_pred, torch.Tensor):
        y_pred = y_pred.cpu().numpy()
    if y_true is not None and isinstance(y_true, torch.Tensor):
        y_true = y_true.cpu().numpy()

    # Ensure we have 1D arrays
    y_pred = y_pred.flatten()
    if y_true is not None:
        y_true = y_true.flatten()

    # Calculate transition matrix
    num_classes = len(class_names)

    def get_transition_matrix(y):
        # Create matrix of transitions from t to t+1
        transitions = np.zeros((num_classes, num_classes), dtype=int)

        for i in range(len(y) - 1):
            from_stage = int(y[i])
            to_stage = int(y[i + 1])

            # Skip if any of the stages is out of range
            if from_stage < 0 or from_stage >= num_classes or to_stage < 0 or to_stage >= num_classes:
                continue

            transitions[from_stage, to_stage] += 1

        # Normalize by row
        row_sums = transitions.sum(axis=1, keepdims=True)
        # Avoid division by zero
        row_sums[row_sums == 0] = 1
        transitions_norm = transitions / row_sums

        return transitions, transitions_norm

    # Get transition matrices
    if y_true is not None:
        fig, axes = plt.subplots(1, 2, figsize=figsize)

        # True transitions
        true_transitions, true_norm = get_transition_matrix(y_true)

        sns.heatmap(
            true_norm, annot=true_transitions, fmt='d',
            xticklabels=class_names, yticklabels=class_names,
            cmap='Blues', ax=axes[0]
        )
        axes[0].set_title('True Stage Transitions')
        axes[0].set_xlabel('To Stage')
        axes[0].set_ylabel('From Stage')

        # Predicted transitions
        pred_transitions, pred_norm = get_transition_matrix(y_pred)

        sns.heatmap(
            pred_norm, annot=pred_transitions, fmt='d',
            xticklabels=class_names, yticklabels=class_names,
            cmap='Blues', ax=axes[1]
        )
        axes[1].set_title('Predicted Stage Transitions')
        axes[1].set_xlabel('To Stage')
        axes[1].set_ylabel('From Stage')

    else:
        # Only predicted transitions
        pred_transitions, pred_norm = get_transition_matrix(y_pred)

        plt.figure(figsize=figsize)
        sns.heatmap(
            pred_norm, annot=pred_transitions, fmt='d',
            xticklabels=class_names, yticklabels=class_names,
            cmap='Blues'
        )
        plt.title('Stage Transitions')
        plt.xlabel('To Stage')
        plt.ylabel('From Stage')

    plt.tight_layout()

    if save_path:
        plt.savefig(save_path)
        plt.close()
    else:
        plt.show()