"""
Base classes for continual learning datasets.

This module defines the abstract base classes and interfaces for continual learning datasets,
providing standardized ways to access data across multiple tasks.
"""

from abc import ABC, abstractmethod
import numpy as np
from typing import Dict, List, Optional, Tuple, Union, Any, Callable, Iterator, TypeVar

import torch
from torch.utils.data import Dataset, ConcatDataset, Subset, random_split


T_co = TypeVar("T_co", covariant=True)


class TaskDataset(Dataset[Tuple[T_co, int, int]]):
    """
    Dataset class for a single task in continual learning.

    This class extends PyTorch's Dataset to handle task-specific data,
    with support for class mapping and transformations.

    Args:
        data: Data samples
        targets: Target labels
        task_id: Task identifier
        task_classes: Classes present in this task
        transform: Optional data transformation function
        target_transform: Optional target transformation function
    """

    def __init__(
        self,
        data: np.ndarray,
        targets: List[int],
        task_id: int,
        task_classes: List[int],
        transform: Optional[Callable] = None,
        target_transform: Optional[Callable] = None,
    ) -> None:
        self.data = data
        self.targets = targets
        self.task_id = task_id
        self.task_classes = task_classes
        self.transform = transform
        self.target_transform = target_transform

        # For mapping original class IDs to task-specific IDs
        self.class_mapping = {cls: i for i, cls in enumerate(task_classes)}

        # Validate data integrity
        assert len(data) == len(targets), "Data and targets must have the same length"

    def __len__(self) -> int:
        """Return the number of data samples."""
        return len(self.data)

    def __getitem__(self, index: int) -> Tuple[Any, int, int]:
        """
        Get a data item.

        Args:
            index: Index of the data item

        Returns:
            Tuple containing (transformed_data, target_class, task_id)
        """
        img, target = self.data[index], self.targets[index]

        # Convert image data from numpy to PIL format if needed
        if isinstance(img, np.ndarray):
            # Convert to PIL Image
            from PIL import Image

            img = Image.fromarray(img)

        # Apply transforms if available
        if self.transform is not None:
            img = self.transform(img)

        # Apply class mapping
        mapped_target = self.class_mapping.get(target, target)

        # Apply target transform if available
        if self.target_transform is not None:
            mapped_target = self.target_transform(mapped_target)

        return img, mapped_target, self.task_id

    def get_sample_by_class(
        self, class_id: int, n_samples: int = 1
    ) -> List[Tuple[Any, int, int]]:
        """
        Get samples from a specific class.

        Args:
            class_id: Original class identifier
            n_samples: Number of samples to retrieve

        Returns:
            List of data samples for the specified class
        """
        indices = [i for i, t in enumerate(self.targets) if t == class_id]
        if not indices:
            return []

        # Select random indices if more samples available than requested
        if len(indices) > n_samples:
            import random

            indices = random.sample(indices, n_samples)

        return [self[i] for i in indices]

    def set_class_mapping(self, mapping: Dict[int, int]) -> None:
        """
        Set mapping from original class IDs to task-specific IDs.

        Args:
            mapping: Mapping from original to task-specific IDs
        """
        self.class_mapping = mapping

    def get_class_distribution(self) -> Dict[int, int]:
        """
        Get the distribution of classes in the dataset.

        Returns:
            Dictionary mapping class IDs to sample counts
        """
        distribution = {}
        for target in self.targets:
            distribution[target] = distribution.get(target, 0) + 1
        return distribution

    def get_subset(self, indices: List[int]) -> "TaskDataset":
        """
        Create a subset of this dataset.

        Args:
            indices: Indices to include in the subset

        Returns:
            A new TaskDataset containing only the specified indices
        """
        subset_data = self.data[indices]
        subset_targets = [self.targets[i] for i in indices]

        return TaskDataset(
            data=subset_data,
            targets=subset_targets,
            task_id=self.task_id,
            task_classes=self.task_classes,
            transform=self.transform,
            target_transform=self.target_transform,
        )


class ContinualDataset(ABC):
    """
    Abstract base class for continual learning datasets.

    This class defines the interface for accessing task datasets
    in a continual learning scenario, with support for task switching
    and cumulative dataset creation.

    Args:
        num_tasks: Number of tasks in the dataset
    """

    def __init__(self, num_tasks: int) -> None:
        self.num_tasks = num_tasks
        self.task_datasets: Dict[int, TaskDataset] = {}
        self.current_task: int = -1
        self.seen_tasks: set = set()

        # Common transforms that can be applied to all tasks
        self.common_transform: Optional[Callable] = None
        self.common_target_transform: Optional[Callable] = None

    def __len__(self) -> int:
        """Return the total number of tasks."""
        return self.num_tasks

    @abstractmethod
    def prepare_task_dataset(self, task_id: int) -> TaskDataset:
        """
        Prepare the dataset for a specific task.

        This method should be implemented by subclasses to load or generate
        the dataset for a specific task.

        Args:
            task_id: Task identifier

        Returns:
            Dataset for the specified task
        """
        pass

    def get_task_dataset(self, task_id: int) -> TaskDataset:
        """
        Get the dataset for a specific task.

        If the task dataset hasn't been loaded yet, this will call
        prepare_task_dataset() to load it.

        Args:
            task_id: Task identifier

        Returns:
            Dataset for the specified task

        Raises:
            KeyError: If the task ID is not valid
        """
        if task_id < 0 or task_id >= self.num_tasks:
            raise KeyError(
                f"Task {task_id} is not valid (valid range: 0-{self.num_tasks - 1})"
            )

        if task_id not in self.task_datasets:
            self.task_datasets[task_id] = self.prepare_task_dataset(task_id)

        self.seen_tasks.add(task_id)
        return self.task_datasets[task_id]

    def get_current_task_dataset(self) -> TaskDataset:
        """
        Get the dataset for the current task.

        Returns:
            Dataset for the current task

        Raises:
            ValueError: If no current task is set
        """
        if self.current_task < 0:
            raise ValueError("No current task set. Call set_current_task() first.")

        return self.get_task_dataset(self.current_task)

    def set_current_task(self, task_id: int) -> None:
        """
        Set the current task.

        Args:
            task_id: Task identifier

        Raises:
            ValueError: If the task ID is not valid
        """
        if task_id < 0 or task_id >= self.num_tasks:
            raise ValueError(
                f"Task {task_id} is not valid (valid range: 0-{self.num_tasks - 1})"
            )

        self.current_task = task_id
        self.seen_tasks.add(task_id)

        # Ensure the task dataset is loaded
        if task_id not in self.task_datasets:
            self.task_datasets[task_id] = self.prepare_task_dataset(task_id)

    def get_cumulative_dataset(self, up_to_task: Optional[int] = None) -> Dataset:
        """
        Get a dataset containing data from all tasks seen so far.

        Args:
            up_to_task: Include tasks up to this task ID (inclusive)
                        If None, include all seen tasks

        Returns:
            Combined dataset for all seen tasks

        Raises:
            ValueError: If no tasks have been seen yet
        """
        tasks_to_include = set(self.seen_tasks)

        if up_to_task is not None:
            tasks_to_include = {t for t in tasks_to_include if t <= up_to_task}

        if not tasks_to_include:
            raise ValueError("No tasks have been seen yet.")

        if len(tasks_to_include) == 1:
            task_id = next(iter(tasks_to_include))
            return self.get_task_dataset(task_id)

        # Combine all datasets
        datasets = [
            self.get_task_dataset(task_id) for task_id in sorted(tasks_to_include)
        ]
        return ConcatDataset(datasets)

    def set_transforms(
        self,
        transform: Optional[Callable] = None,
        target_transform: Optional[Callable] = None,
    ) -> None:
        """
        Set transforms to be applied to all task datasets.

        Args:
            transform: Data transformation function
            target_transform: Target transformation function
        """
        self.common_transform = transform
        self.common_target_transform = target_transform

        # Apply to already loaded datasets
        for task_id, dataset in self.task_datasets.items():
            dataset.transform = transform
            dataset.target_transform = target_transform

    def get_class_to_task_mapping(self) -> Dict[int, int]:
        """
        Get mapping from class IDs to task IDs.

        Returns:
            Dictionary mapping class IDs to their respective task IDs
        """
        mapping = {}
        for task_id in range(self.num_tasks):
            dataset = self.get_task_dataset(task_id)
            for class_id in dataset.task_classes:
                mapping[class_id] = task_id
        return mapping

    def split_task_dataset(
        self, task_id: int, train_ratio: float = 0.8, random_seed: int = 42
    ) -> Tuple[TaskDataset, TaskDataset]:
        """
        Split a task dataset into training and validation sets.

        Args:
            task_id: Task identifier
            train_ratio: Ratio of data to use for training
            random_seed: Random seed for reproducibility

        Returns:
            Tuple containing (train_dataset, val_dataset)
        """
        dataset = self.get_task_dataset(task_id)
        total_size = len(dataset)
        train_size = int(train_ratio * total_size)
        val_size = total_size - train_size

        # Set random seed for reproducibility
        torch.manual_seed(random_seed)

        train_indices = list(range(train_size))
        val_indices = list(range(train_size, total_size))

        train_dataset = dataset.get_subset(train_indices)
        val_dataset = dataset.get_subset(val_indices)

        return train_dataset, val_dataset


class BaseTransform(ABC):
    """
    Abstract base class for data transformations in continual learning.

    This class defines the interface for transformations that can be
    applied to data samples during continual learning.
    """

    @abstractmethod
    def __call__(self, x: Any) -> Any:
        """
        Apply the transformation to the input.

        Args:
            x: Input data

        Returns:
            Transformed data
        """
        pass

    def __repr__(self) -> str:
        """
        Return a string representation of the transformation.

        Returns:
            String representation
        """
        return self.__class__.__name__


class CompositeTransform(BaseTransform):
    """
    Compose multiple transforms together.

    Args:
        transforms: List of transformations to apply in sequence
    """

    def __init__(self, transforms: List[BaseTransform]) -> None:
        self.transforms = transforms

    def __call__(self, x: Any) -> Any:
        """
        Apply all transformations in sequence.

        Args:
            x: Input data

        Returns:
            Transformed data
        """
        for transform in self.transforms:
            x = transform(x)
        return x

    def __repr__(self) -> str:
        """
        Return a string representation of the composite transformation.

        Returns:
            String representation
        """
        transform_str = ", ".join(repr(t) for t in self.transforms)
        return f"{self.__class__.__name__}([{transform_str}])"
