"""
Data module for continual learning scenarios.

This module provides data handling functionality specifically designed for continual learning,
enabling task-by-task training and evaluation with proper data separation.
"""

from typing import Dict, List, Optional, Any, Union, Protocol, runtime_checkable
import torch
from torch.utils.data import DataLoader, Dataset
import pytorch_lightning as pl
import logging

from continuallearning.registry import TaskAdaptProtocol


class ContinualDataModule(pl.LightningDataModule, TaskAdaptProtocol):
    """
    PyTorch Lightning data module for continual learning.

    This class manages data operations for continual learning scenarios:
    - Handles multiple task datasets
    - Provides task-specific data loaders
    - Supports switching between tasks during training
    - Implements TaskAdaptable protocol for compatibility with task boundary system

    Args:
        task_datasets (Dict[int, Dict[str, Dataset]]): Dictionary mapping task IDs to
            dataset dictionaries containing 'train', 'val', and 'test' splits
        batch_size (int): Batch size for data loaders
        num_workers (int): Number of workers for data loading
        pin_memory (bool): Whether to pin memory for faster GPU transfer
        shuffle (bool): Whether to shuffle training data
    """

    def __init__(
        self,
        task_datasets: Dict[int, Dict[str, Dataset]],
        batch_size: int = 32,
        num_workers: int = 4,
        pin_memory: bool = True,
        shuffle: bool = True,
    ):
        super().__init__()
        # Validate task datasets format
        self._validate_task_datasets(task_datasets)

        self.task_datasets = task_datasets
        self.batch_size = batch_size
        self.num_workers = num_workers
        self.pin_memory = pin_memory
        self.shuffle = shuffle

        # Current task tracking
        self.current_task_id = 0
        self.num_tasks = len(task_datasets)

        # Initialize logger
        self.logger = logging.getLogger(__name__)

    def _validate_task_datasets(
        self, task_datasets: Dict[int, Dict[str, Dataset]]
    ) -> None:
        """
        Validate the task datasets structure.

        Args:
            task_datasets: Dictionary mapping task IDs to dataset dictionaries

        Raises:
            ValueError: If the task datasets format is invalid
        """
        if not isinstance(task_datasets, dict) or not task_datasets:
            raise ValueError("task_datasets must be a non-empty dictionary")

        for task_id, splits in task_datasets.items():
            if not isinstance(splits, dict):
                raise ValueError(f"Task {task_id} data must be a dictionary of splits")

            # Verify required splits exist
            for required_split in ["train", "val"]:
                if required_split not in splits:
                    raise ValueError(
                        f"Task {task_id} missing required '{required_split}' split"
                    )

            # Verify all splits are Dataset objects
            for split_name, dataset in splits.items():
                if not isinstance(dataset, Dataset):
                    raise ValueError(
                        f"Split '{split_name}' in task {task_id} must be a Dataset object"
                    )

    def set_task(self, task_id: int) -> None:
        """
        Switch to a specific task (implements TaskAdaptable protocol).

        Args:
            task_id (int): Task identifier

        Raises:
            ValueError: If the task ID is not found
        """
        if task_id not in self.task_datasets:
            available_tasks = sorted(self.task_datasets.keys())
            raise ValueError(
                f"Task {task_id} not found in available tasks: {available_tasks}"
            )

        self.current_task_id = task_id
        self.logger.info(f"Data module switched to task {task_id}")

    def train_dataloader(self) -> DataLoader:
        """
        Get the training data loader for the current task.

        Returns:
            DataLoader: Training data loader
        """
        try:
            return DataLoader(
                self.task_datasets[self.current_task_id]["train"],
                batch_size=self.batch_size,
                shuffle=self.shuffle,
                num_workers=self.num_workers,
                pin_memory=self.pin_memory,
            )
        except KeyError as e:
            raise KeyError(
                f"Error creating training dataloader for task {self.current_task_id}: {str(e)}"
            )
        except Exception as e:
            raise RuntimeError(f"Failed to create training dataloader: {str(e)}")

    def val_dataloader(self) -> DataLoader:
        """
        Get the validation data loader for the current task.

        Returns:
            DataLoader: Validation data loader
        """
        try:
            return DataLoader(
                self.task_datasets[self.current_task_id]["val"],
                batch_size=self.batch_size,
                shuffle=False,
                num_workers=self.num_workers,
                pin_memory=self.pin_memory,
            )
        except KeyError as e:
            raise KeyError(
                f"Error creating validation dataloader for task {self.current_task_id}: {str(e)}"
            )
        except Exception as e:
            raise RuntimeError(f"Failed to create validation dataloader: {str(e)}")

    def test_dataloader(self) -> Union[DataLoader, List[DataLoader]]:
        """
        Get test data loaders for all seen tasks.

        Returns:
            Union[DataLoader, List[DataLoader]]: Test data loader(s)
        """
        # For testing, we return data loaders for all tasks
        test_dataloaders = []
        for task_id in sorted(self.task_datasets.keys()):
            try:
                if "test" not in self.task_datasets[task_id]:
                    self.logger.warning(
                        f"Task {task_id} has no test split, using validation split instead"
                    )
                    test_split = "val"
                else:
                    test_split = "test"

                test_dataloaders.append(
                    DataLoader(
                        self.task_datasets[task_id][test_split],
                        batch_size=self.batch_size,
                        shuffle=False,
                        num_workers=self.num_workers,
                        pin_memory=self.pin_memory,
                    )
                )
            except Exception as e:
                self.logger.error(
                    f"Failed to create test dataloader for task {task_id}: {str(e)}"
                )

        if not test_dataloaders:
            raise RuntimeError("No test dataloaders could be created")

        return test_dataloaders

    def get_task_dataloader(self, task_id: int, split: str = "train") -> DataLoader:
        """
        Get a data loader for a specific task and split.

        Args:
            task_id (int): Task identifier
            split (str): Data split ('train', 'val', or 'test')

        Returns:
            DataLoader: Task-specific data loader

        Raises:
            ValueError: If the task or split is not found
        """
        if task_id not in self.task_datasets:
            available_tasks = sorted(self.task_datasets.keys())
            raise ValueError(
                f"Task {task_id} not found in available tasks: {available_tasks}"
            )

        if split not in self.task_datasets[task_id]:
            available_splits = list(self.task_datasets[task_id].keys())
            raise ValueError(
                f"Split '{split}' not found in task {task_id}. Available splits: {available_splits}"
            )

        shuffle = self.shuffle and split == "train"

        try:
            return DataLoader(
                self.task_datasets[task_id][split],
                batch_size=self.batch_size,
                shuffle=shuffle,
                num_workers=self.num_workers,
                pin_memory=self.pin_memory,
            )
        except Exception as e:
            raise RuntimeError(
                f"Failed to create {split} dataloader for task {task_id}: {str(e)}"
            )

    def get_all_task_ids(self) -> List[int]:
        """
        Get all available task IDs.

        Returns:
            List[int]: Sorted list of all task IDs
        """
        return sorted(self.task_datasets.keys())

    def get_num_classes(self, task_id: Optional[int] = None) -> int:
        """
        Get the number of classes for a task.

        Args:
            task_id (Optional[int]): Task ID (uses current task if None)

        Returns:
            int: Number of classes in the task

        Raises:
            NotImplementedError: If the dataset doesn't provide class information
        """
        task_id = self.current_task_id if task_id is None else task_id

        if task_id not in self.task_datasets:
            raise ValueError(f"Task {task_id} not found")

        # Try to get number of classes from dataset attributes
        dataset = self.task_datasets[task_id]["train"]

        # Method 1: Check for classes attribute
        if hasattr(dataset, "classes"):
            return len(dataset.classes)

        # Method 2: Check for num_classes attribute
        if hasattr(dataset, "num_classes"):
            return dataset.num_classes

        # Method 3: Check targets and infer number of classes (if available)
        if hasattr(dataset, "targets"):
            targets = dataset.targets
            if isinstance(targets, torch.Tensor):
                return len(torch.unique(targets))
            elif isinstance(targets, list):
                return len(set(targets))

        # If we reach here, we can't determine the number of classes
        raise NotImplementedError(
            "Dataset doesn't provide class information. Consider extending this method for your dataset."
        )
