"""
Dataset class for loading and processing sleep EEG data
"""
import os
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import KFold
import torch.nn.functional as F


class SleepEDFDataset(Dataset):
    """
    Dataset class for Sleep-EDF data
    """

    def __init__(self, x, y, transform=None):
        """
        Initialize the dataset

        Args:
            x (numpy.ndarray): EEG signals, shape (n_samples, input_length, n_channels)
            y (numpy.ndarray): Sleep stage labels, shape (n_samples,)
            transform (callable, optional): Optional transform to be applied on a sample
        """
        self.x = x
        self.y = y
        self.transform = transform

    def __len__(self):
        """Return the number of samples"""
        return len(self.y)

    def __getitem__(self, idx):
        """
        Get a sample from the dataset

        Args:
            idx (int): Index of the sample

        Returns:
            dict: A dictionary containing the EEG signal and the label
        """
        x = self.x[idx]
        y = self.y[idx]

        # Apply transform if provided
        if self.transform:
            x = self.transform(x)

        # Convert to tensor
        x = torch.from_numpy(x).float()
        y = torch.tensor(y, dtype=torch.long)

        return {'eeg': x, 'label': y}


class SequentialSleepEDFDataset(Dataset):
    """
    Dataset class for sequential Sleep-EDF data
    This provides sequences of consecutive epochs for context-aware classification
    """

    def __init__(self, x, y, seq_length=25, transform=None):
        """
        Initialize the dataset

        Args:
            x (numpy.ndarray): EEG signals, shape (n_samples, input_length, n_channels)
            y (numpy.ndarray): Sleep stage labels, shape (n_samples,)
            seq_length (int): Number of consecutive epochs to consider
            transform (callable, optional): Optional transform to be applied on a sample
        """
        self.x = x
        self.y = y
        self.seq_length = seq_length
        self.transform = transform

        # Create valid indices (those that have enough context)
        self.indices = np.arange(len(y) - seq_length + 1)

    def __len__(self):
        """Return the number of samples"""
        return len(self.indices)

    def __getitem__(self, idx):
        """
        Get a sequence sample from the dataset

        Args:
            idx (int): Index of the sequence start

        Returns:
            dict: A dictionary containing the sequence of EEG signals and the labels
        """
        start_idx = self.indices[idx]
        end_idx = start_idx + self.seq_length

        x_seq = self.x[start_idx:end_idx]
        y_seq = self.y[start_idx:end_idx]

        # Target is the middle epoch's label
        target_idx = self.seq_length // 2
        target = y_seq[target_idx]

        # Apply transform if provided
        if self.transform:
            x_seq = np.array([self.transform(x) for x in x_seq])

        # Convert to tensor
        x_seq = torch.from_numpy(x_seq).float()
        y_seq = torch.tensor(y_seq, dtype=torch.long)
        target = torch.tensor(target, dtype=torch.long)

        # Check if reshaping is needed
        if x_seq.ndim == 3:  # Shape: [seq_length, signal_length, channels]
            x_seq = x_seq.permute(0, 2, 1)  # Reshape to [seq_length, channels, signal_length]

        return {'eeg_seq': x_seq, 'label_seq': y_seq, 'target': target}


def load_sleep_edf_npz(file_path):
    """
    Load Sleep-EDF data from .npz file

    Args:
        file_path (str): Path to the .npz file

    Returns:
        tuple: (x, y, fs, ch_label) where:
            x (numpy.ndarray): EEG signals
            y (numpy.ndarray): Sleep stage labels
            fs (int): Sampling rate
            ch_label (str): EEG channel label
    """
    data = np.load(file_path)
    x = data['x']
    y = data['y']
    fs = data['fs']
    ch_label = data.get('ch_label', 'Unknown')

    # Filter out unknown sleep stages (label 5)
    valid_idx = y != 5
    x = x[valid_idx]
    y = y[valid_idx]

    return x, y, fs, ch_label


def get_subject_files(data_dir):
    """Get list of .npz files for each subject"""
    all_files = [f for f in os.listdir(data_dir) if f.endswith('.npz')]

    subject_files = {}
    for file in all_files:
        try:
            subject_id = file[3:5]
            if subject_id not in subject_files:
                subject_files[subject_id] = []
            subject_files[subject_id].append(os.path.join(data_dir, file))
        except (IndexError, ValueError):
            print(f"Warning: Could not extract subject ID from filename {file}")

    return subject_files


def create_kfold_splits(data_dir, n_folds=20, verbose=True):
    """
    Create subject-wise k-fold cross validation splits with randomized validation subject selection

    Args:
        data_dir (str): Directory containing the .npz files
        n_folds (int): Number of folds
        verbose (bool): Whether to print detailed information about the splits

    Returns:
        list: List of (train_files, val_files, test_files) tuples for each fold
    """
    subject_files = get_subject_files(data_dir)
    subjects = list(subject_files.keys())
    subjects.sort()  # Sort to ensure reproducibility

    # Ensure we have enough subjects for the requested number of folds
    if len(subjects) < n_folds:
        print(f"Warning: Only {len(subjects)} subjects available, but {n_folds} folds requested")
        n_folds = len(subjects)

    # Set random seed for reproducibility
    np.random.seed(42)

    # Create k-fold splits at subject level
    kf = KFold(n_splits=n_folds, shuffle=True, random_state=42)
    splits = []

    if verbose:
        print(f"\n{'=' * 50}")
        print(f"Creating {n_folds}-fold cross-validation splits")
        print(f"Total subjects: {len(subjects)}")
        print(f"{'=' * 50}")

    for fold_idx, (train_val_idx, test_idx) in enumerate(kf.split(subjects)):
        fold = fold_idx + 1

        # Get subjects for this fold
        train_val_subject_ids = [subjects[i] for i in train_val_idx]
        test_subject_ids = [subjects[i] for i in test_idx]

        # Randomly select validation subjects (20% of train_val subjects)
        n_val_subjects = max(1, int(0.15 * len(train_val_subject_ids)))
        val_subject_indices = np.random.choice(len(train_val_subject_ids), size=n_val_subjects, replace=False)
        val_subject_ids = [train_val_subject_ids[i] for i in val_subject_indices]
        train_subject_ids = [s for i, s in enumerate(train_val_subject_ids) if i not in val_subject_indices]

        # Convert subject IDs to file lists
        train_files = [file for subject_id in train_subject_ids for file in subject_files[subject_id]]
        val_files = [file for subject_id in val_subject_ids for file in subject_files[subject_id]]
        test_files = [file for subject_id in test_subject_ids for file in subject_files[subject_id]]

        splits.append((train_files, val_files, test_files))

        if verbose:
            print(f"\n----- Fold {fold} of {n_folds} -----")
            print(f"Test Subjects ({len(test_subject_ids)}): {sorted(test_subject_ids)}")
            print(f"Validation Subjects ({len(val_subject_ids)}): {sorted(val_subject_ids)}")
            print(f"Training Subjects ({len(train_subject_ids)}): {sorted(train_subject_ids)}")
            print(f"Files: Train ({len(train_files)}), Val ({len(val_files)}), Test ({len(test_files)})")

    if verbose:
        # Add an overall summary to check subject allocation
        subject_allocation = {subject: {"train": 0, "val": 0, "test": 0} for subject in subjects}

        for fold_idx, (train_files, val_files, test_files) in enumerate(splits):
            # Extract subject IDs from filenames
            train_subjects = set([os.path.basename(f)[3:5] for f in train_files])
            val_subjects = set([os.path.basename(f)[3:5] for f in val_files])
            test_subjects = set([os.path.basename(f)[3:5] for f in test_files])

            for subject in train_subjects:
                subject_allocation[subject]["train"] += 1
            for subject in val_subjects:
                subject_allocation[subject]["val"] += 1
            for subject in test_subjects:
                subject_allocation[subject]["test"] += 1

        print(f"\n{'=' * 50}")
        print("Subject Allocation Summary")
        print(f"{'=' * 50}")

        for subject, counts in subject_allocation.items():
            print(f"Subject {subject}: Train ({counts['train']}), Val ({counts['val']}), Test ({counts['test']})")

        # Verify that each subject appears in test exactly once
        test_counts = [counts["test"] for counts in subject_allocation.values()]
        if all(count == 1 for count in test_counts):
            print("\nVerification PASSED: Each subject appears in test set exactly once")
        else:
            print("\nVerification FAILED: Some subjects don't appear in test set exactly once")
            subjects_with_issues = [subject for subject, counts in subject_allocation.items() if counts["test"] != 1]
            print(f"Subjects with issues: {subjects_with_issues}")

    return splits


def prepare_dataloaders(train_files, val_files, test_files, batch_size=128, seq_length=25, num_workers=2,
                        prefetch_factor=2, pin_memory=True, persistent_workers=False, transform=None):
    """
    Prepare DataLoaders for training, validation, and testing

    Args:
        train_files (list): List of training file paths
        val_files (list): List of validation file paths
        test_files (list): List of test file paths
        batch_size (int): Batch size
        seq_length (int): Sequence length for sequential data
        num_workers (int): Number of workers for data loading (REDUCED from default 8)
        prefetch_factor (int): Prefetch factor for data loading
        pin_memory (bool): Whether to pin memory
        persistent_workers (bool): Whether to keep worker processes alive (SET TO FALSE)

    Returns:
        tuple: (train_loader, val_loader, test_loader, class_weights)
    """
    # Load and concatenate data
    x_train, y_train = [], []
    for file in train_files:
        x, y, _, _ = load_sleep_edf_npz(file)
        x_train.append(x)
        y_train.append(y)

    x_val, y_val = [], []
    for file in val_files:
        x, y, _, _ = load_sleep_edf_npz(file)
        x_val.append(x)
        y_val.append(y)

    x_test, y_test = [], []
    for file in test_files:
        x, y, _, _ = load_sleep_edf_npz(file)
        x_test.append(x)
        y_test.append(y)

    x_train = np.concatenate(x_train)
    y_train = np.concatenate(y_train)
    x_val = np.concatenate(x_val)
    y_val = np.concatenate(y_val)
    x_test = np.concatenate(x_test)
    y_test = np.concatenate(y_test)

    # Create datasets
    train_dataset = SleepEDFDataset(x_train, y_train, transform=transform)
    val_dataset = SleepEDFDataset(x_val, y_val)
    test_dataset = SleepEDFDataset(x_test, y_test)

    # Create sequential datasets for sequence learning
    seq_train_dataset = SequentialSleepEDFDataset(x_train, y_train, seq_length=seq_length, transform=transform)
    seq_val_dataset = SequentialSleepEDFDataset(x_val, y_val, seq_length=seq_length)
    seq_test_dataset = SequentialSleepEDFDataset(x_test, y_test, seq_length=seq_length)

    # Create data loaders with reduced workers and no persistent workers
    train_loader = DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=num_workers,
        pin_memory=pin_memory,
        prefetch_factor=prefetch_factor,
        persistent_workers=persistent_workers
    )
    val_loader = DataLoader(
        val_dataset,
        batch_size=batch_size,
        shuffle=False,  # Changed to False for validation
        num_workers=num_workers,
        pin_memory=pin_memory,
        prefetch_factor=prefetch_factor,
        persistent_workers=persistent_workers
    )
    test_loader = DataLoader(
        test_dataset,
        batch_size=batch_size,
        shuffle=False,  # Changed to False for testing
        num_workers=num_workers,
        pin_memory=pin_memory,
        prefetch_factor=prefetch_factor,
        persistent_workers=persistent_workers
    )

    # Sequential data loaders
    seq_train_loader = DataLoader(
        seq_train_dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=num_workers,
        pin_memory=pin_memory,
        prefetch_factor=prefetch_factor,
        persistent_workers=persistent_workers
    )
    seq_val_loader = DataLoader(
        seq_val_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=num_workers,
        pin_memory=pin_memory,
        prefetch_factor=prefetch_factor,
        persistent_workers=persistent_workers
    )
    seq_test_loader = DataLoader(
        seq_test_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=num_workers,
        pin_memory=pin_memory,
        prefetch_factor=prefetch_factor,
        persistent_workers=persistent_workers
    )

    # Calculate class weights for handling imbalance
    class_counts = np.bincount(y_train)
    class_weights = 1.0 / class_counts
    class_weights = class_weights / np.sum(class_weights) * len(class_counts)
    class_weights = torch.tensor(class_weights, dtype=torch.float)

    return {
        'feature_extraction': (train_loader, val_loader, test_loader),
        'sequence_learning': (seq_train_loader, seq_val_loader, seq_test_loader),
        'class_weights': class_weights
    }


# Data augmentation functions
def add_gaussian_noise(x, mean=0, std=0.01):
    """Add Gaussian noise to the signal"""
    return x + np.random.normal(mean, std, x.shape)


def time_shift(x, shift_range=0.1):
    """Randomly shift the signal horizontally"""
    shift = int(shift_range * x.shape[0])
    direction = np.random.choice([-1, 1])
    shifted_x = np.zeros_like(x)

    if direction == 1:  # shift right
        shifted_x[shift:] = x[:-shift]
    else:  # shift left
        shifted_x[:-shift] = x[shift:]

    return shifted_x


def scaling(x, scale_range=(0.9, 1.1)):
    """Randomly scale the amplitude of the signal"""
    scale = np.random.uniform(scale_range[0], scale_range[1])
    return x * scale


class SleepEDFAugmentation:
    """Apply data augmentation for Sleep-EDF data"""

    def __init__(self, p_noise=0.5, p_shift=0.5, p_scale=0.5):
        self.p_noise = p_noise
        self.p_shift = p_shift
        self.p_scale = p_scale

    def __call__(self, x):
        """Apply augmentation with given probabilities"""
        if np.random.rand() < self.p_noise:
            x = add_gaussian_noise(x)

        if np.random.rand() < self.p_shift:
            x = time_shift(x)

        if np.random.rand() < self.p_scale:
            x = scaling(x)

        return x