"""
Split CIFAR datasets for continual learning.

This module provides implementations of CIFAR-10 and CIFAR-100 datasets
specially adapted for continual learning scenarios. The datasets are split into
multiple tasks based on class partitioning:

- Each task contains a distinct subset of classes
- Tasks are presented sequentially during training
- Earlier tasks are typically inaccessible during later tasks

These implementations support both task-incremental learning (where task identity
is known during inference) and class-incremental learning (where task identity
is unknown during inference).
"""

import os
import numpy as np
from typing import Dict, List, Optional, Tuple, Union, Any, Callable
import torch
import torchvision
import torchvision.transforms as transforms

from .base import ContinualDataset, TaskDataset
from continuallearning.registry import DATASETS


@DATASETS.register("SplitCIFAR10")
class SplitCIFAR10(ContinualDataset):
    """
    Split CIFAR-10 dataset for continual learning.

    This class partitions the CIFAR-10 dataset into multiple disjoint tasks,
    where each task contains a distinct subset of classes. For example, with
    num_tasks=5, each task would contain 2 classes:
    - Task 0: Classes 0-1
    - Task 1: Classes 2-3
    - Task 2: Classes 4-5
    - Task 3: Classes 6-7
    - Task 4: Classes 8-9

    The class mapping is reset for each task, so classes always start from 0
    within each task. This implementation follows a task-incremental learning
    approach, where each task has its own output space.

    Args:
        root (str): Root directory for the dataset
        num_tasks (int): Number of tasks to split the dataset into
        train (bool): Whether to use the training set
        download (bool): Whether to download the dataset if not available
        transform (Optional[Callable]): Data transformation function
        target_transform (Optional[Callable]): Target transformation function
    """

    def __init__(
        self,
        root: str = "./data",
        num_tasks: int = 5,
        train: bool = True,
        download: bool = True,
        transform: Optional[Callable] = None,
        target_transform: Optional[Callable] = None,
    ):
        super().__init__(num_tasks)

        # Set up transforms
        if transform is None:
            if train:
                transform = transforms.Compose(
                    [
                        transforms.RandomCrop(32, padding=4),
                        transforms.RandomHorizontalFlip(),
                        transforms.ToTensor(),
                        transforms.Normalize(
                            (0.4914, 0.4822, 0.4465), (0.2470, 0.2435, 0.2616)
                        ),
                    ]
                )
            else:
                transform = transforms.Compose(
                    [
                        transforms.ToTensor(),
                        transforms.Normalize(
                            (0.4914, 0.4822, 0.4465), (0.2470, 0.2435, 0.2616)
                        ),
                    ]
                )

        # Load the dataset
        cifar_dataset = torchvision.datasets.CIFAR10(
            root=root, train=train, download=download, transform=None
        )

        # Create class splits
        num_classes = 10
        assert num_tasks <= num_classes, (
            f"Number of tasks {num_tasks} cannot exceed number of classes {num_classes}"
        )
        classes_per_task = num_classes // num_tasks

        # Split the dataset into tasks
        for task_id in range(num_tasks):
            # Determine classes for this task
            task_classes = list(
                range(task_id * classes_per_task, (task_id + 1) * classes_per_task)
            )

            # Filter data and targets for this task
            indices = [
                i for i, t in enumerate(cifar_dataset.targets) if t in task_classes
            ]
            task_data = np.array(cifar_dataset.data)[indices]
            task_targets = np.array(cifar_dataset.targets)[indices]

            # Create task-specific mapping
            mapping = {
                original_class: i for i, original_class in enumerate(task_classes)
            }

            # Create task dataset
            task_dataset = TaskDataset(
                data=task_data,
                targets=task_targets.tolist(),
                task_id=task_id,
                task_classes=task_classes,
                transform=transform,
                target_transform=target_transform,
            )

            # Apply class mapping
            task_dataset.set_class_mapping(mapping)

            # Add to task datasets
            self.task_datasets[task_id] = task_dataset

    def get_cumulative_dataset(self, up_to_task: Optional[int] = None) -> TaskDataset:
        """
        Get a dataset containing data from all tasks seen so far.

        This method creates a unified dataset combining samples from multiple tasks,
        which is useful for:
        - Joint training or fine-tuning on all available data
        - Evaluating a model's performance across all previously learned tasks
        - Analyzing how knowledge from different tasks might transfer or interfere

        The resulting dataset preserves the original class IDs and combines all
        classes from the included tasks.

        Args:
            up_to_task (Optional[int]): Include tasks up to this task ID (inclusive).
                                       If None, include all tasks the model has seen.

        Returns:
            TaskDataset: Combined dataset containing samples from all included tasks.
                        The task_id is set to -1 to indicate a combined dataset.

        Raises:
            ValueError: If no tasks are available to include
        """
        tasks_to_include = list(self.seen_tasks)
        if up_to_task is not None:
            tasks_to_include = [t for t in tasks_to_include if t <= up_to_task]

        if not tasks_to_include:
            raise ValueError("No tasks to include in cumulative dataset")

        # Get first task to initialize dataset
        first_task = self.task_datasets[tasks_to_include[0]]

        # Initialize cumulative data and targets
        all_data = list(first_task.data)
        all_targets = list(first_task.targets)
        all_classes = list(first_task.task_classes)

        # Add data from remaining tasks
        for task_id in tasks_to_include[1:]:
            task_dataset = self.task_datasets[task_id]
            all_data.extend(list(task_dataset.data))
            all_targets.extend(list(task_dataset.targets))
            all_classes.extend(task_dataset.task_classes)

        # Create combined dataset
        cumulative_dataset = TaskDataset(
            data=np.array(all_data),
            targets=all_targets,
            task_id=-1,  # Indicate a combined dataset
            task_classes=sorted(list(set(all_classes))),
            transform=first_task.transform,
            target_transform=first_task.target_transform,
        )

        return cumulative_dataset


@DATASETS.register("SplitCIFAR100")
class SplitCIFAR100(ContinualDataset):
    """
    Split CIFAR-100 dataset for continual learning.

    This class partitions the CIFAR-100 dataset into multiple disjoint tasks,
    where each task contains a distinct subset of classes. Unlike SplitCIFAR10,
    this implementation:

    1. Randomly permutes class assignments to tasks (using a fixed seed)
    2. Supports both task-incremental and class-incremental learning:
       - Task-incremental: Classes are remapped to start from 0 within each task
       - Class-incremental: Original class IDs are preserved across tasks

    For example, with num_tasks=10, each task would contain 10 randomly selected
    classes. This allows evaluating continual learning algorithms on a more
    challenging dataset with more classes and tasks.

    Args:
        root (str): Root directory for the dataset
        num_tasks (int): Number of tasks to split the dataset into
        train (bool): Whether to use the training set
        download (bool): Whether to download the dataset if not available
        transform (Optional[Callable]): Data transformation function
        target_transform (Optional[Callable]): Target transformation function
        task_incremental (bool): If True, use task-incremental learning setup
                              where class IDs restart from 0 for each task.
                              If False, use class-incremental learning setup
                              where original class IDs are preserved.
    """

    def __init__(
        self,
        root: str = "./data",
        num_tasks: int = 10,
        train: bool = True,
        download: bool = True,
        transform: Optional[Callable] = None,
        target_transform: Optional[Callable] = None,
        task_incremental: bool = False,
    ):
        super().__init__(num_tasks)

        self.task_incremental = task_incremental

        # Set up transforms
        if transform is None:
            if train:
                transform = transforms.Compose(
                    [
                        transforms.RandomCrop(32, padding=4),
                        transforms.RandomHorizontalFlip(),
                        transforms.ToTensor(),
                        transforms.Normalize(
                            (0.5071, 0.4865, 0.4409), (0.2673, 0.2564, 0.2762)
                        ),
                    ]
                )
            else:
                transform = transforms.Compose(
                    [
                        transforms.ToTensor(),
                        transforms.Normalize(
                            (0.5071, 0.4865, 0.4409), (0.2673, 0.2564, 0.2762)
                        ),
                    ]
                )

        # Load the dataset
        cifar_dataset = torchvision.datasets.CIFAR100(
            root=root, train=train, download=download, transform=None
        )

        # Create class splits
        num_classes = 100
        assert num_tasks <= num_classes, (
            f"Number of tasks {num_tasks} cannot exceed number of classes {num_classes}"
        )
        classes_per_task = num_classes // num_tasks

        # Permute classes to create different task distributions
        np.random.seed(42)  # for reproducibility
        permuted_classes = np.random.permutation(num_classes).tolist()

        # Split the dataset into tasks
        for task_id in range(num_tasks):
            # Determine classes for this task
            task_classes = permuted_classes[
                task_id * classes_per_task : (task_id + 1) * classes_per_task
            ]

            # Filter data and targets for this task
            indices = [
                i for i, t in enumerate(cifar_dataset.targets) if t in task_classes
            ]
            task_data = np.array(cifar_dataset.data)[indices]
            task_targets = np.array(cifar_dataset.targets)[indices]

            # Create task-specific mapping if task-incremental
            if self.task_incremental:
                mapping = {
                    original_class: i for i, original_class in enumerate(task_classes)
                }
            else:
                # For class-incremental, keep original class IDs
                mapping = {cls: cls for cls in task_classes}

            # Create task dataset
            task_dataset = TaskDataset(
                data=task_data,
                targets=task_targets.tolist(),
                task_id=task_id,
                task_classes=task_classes,
                transform=transform,
                target_transform=target_transform,
            )

            # Apply class mapping
            task_dataset.set_class_mapping(mapping)

            # Add to task datasets
            self.task_datasets[task_id] = task_dataset

    def get_cumulative_dataset(self, up_to_task: Optional[int] = None) -> TaskDataset:
        """
        Get a dataset containing data from all tasks seen so far.

        This method creates a unified dataset combining samples from multiple tasks,
        which is particularly important for evaluating continual learning models on
        all previously seen knowledge. For CIFAR-100 specifically, this can be used to:

        - Measure catastrophic forgetting across a larger number of classes
        - Evaluate knowledge transfer between randomly assigned class groups
        - Test class-incremental learning scenarios when task_incremental=False

        The behavior of the combined dataset depends on the task_incremental setting:
        - With task_incremental=True: Class IDs are task-specific, potentially overlapping
        - With task_incremental=False: Original class IDs are preserved, ensuring uniqueness

        Args:
            up_to_task (Optional[int]): Include tasks up to this task ID (inclusive).
                                       If None, include all tasks the model has seen.

        Returns:
            TaskDataset: Combined dataset containing samples from all included tasks.
                        The task_id is set to -1 to indicate a combined dataset.

        Raises:
            ValueError: If no tasks are available to include
        """
        tasks_to_include = list(self.seen_tasks)
        if up_to_task is not None:
            tasks_to_include = [t for t in tasks_to_include if t <= up_to_task]

        if not tasks_to_include:
            raise ValueError("No tasks to include in cumulative dataset")

        # Get first task to initialize dataset
        first_task = self.task_datasets[tasks_to_include[0]]

        # Initialize cumulative data and targets
        all_data = list(first_task.data)
        all_targets = list(first_task.targets)
        all_classes = list(first_task.task_classes)

        # Add data from remaining tasks
        for task_id in tasks_to_include[1:]:
            task_dataset = self.task_datasets[task_id]
            all_data.extend(list(task_dataset.data))
            all_targets.extend(list(task_dataset.targets))
            all_classes.extend(task_dataset.task_classes)

        # Create combined dataset
        cumulative_dataset = TaskDataset(
            data=np.array(all_data),
            targets=all_targets,
            task_id=-1,  # Indicate a combined dataset
            task_classes=sorted(list(set(all_classes))),
            transform=first_task.transform,
            target_transform=first_task.target_transform,
        )

        return cumulative_dataset
