"""
Split ImageNet dataset implementation for continual learning.
"""

import os
from typing import Dict, List, Tuple, Optional, Union, Callable
import numpy as np
from PIL import Image
import json

import torch
import torchvision
from torchvision import transforms
from torch.utils.data import Dataset

from .base import ContinualDataset, TaskDataset
from continuallearning.registry import DATASETS


class ImageNetSubset(Dataset):
    """
    ImageNet subset implementation for handling a subset of the full ImageNet dataset.

    Args:
        root (str): Root directory of ImageNet data
        split (str): 'train' or 'val' split
        class_subset (List[int]): List of class IDs to include
        transform (Optional[Callable]): Optional transform to be applied to samples
    """

    def __init__(
        self,
        root: str,
        split: str = "train",
        class_subset: Optional[List[int]] = None,
        transform: Optional[Callable] = None,
    ):
        self.root = root
        self.split = split
        self.class_subset = class_subset
        self.transform = transform

        # ImageNet data directory structure: root/split/class/image.JPEG
        # class is named as "n02084071" etc.
        self.split_dir = os.path.join(root, split)

        # Load synset to class index mapping
        self.synsets = self._load_synsets()

        # Filter and collect samples
        self.samples = self._collect_samples()

    def _load_synsets(self) -> Dict[str, int]:
        """Load synset to class index mapping."""
        synset_path = os.path.join(self.root, "synsets.json")

        # Create a default mapping if file doesn't exist
        if not os.path.exists(synset_path):
            # Find all class folders in split directory
            class_dirs = sorted(
                [
                    d
                    for d in os.listdir(self.split_dir)
                    if os.path.isdir(os.path.join(self.split_dir, d))
                ]
            )

            # Create mapping from folder name to class index
            synsets = {d: i for i, d in enumerate(class_dirs)}

            # Save mapping for future use
            with open(synset_path, "w") as f:
                json.dump(synsets, f)
        else:
            # Load existing mapping
            with open(synset_path, "r") as f:
                synsets = json.load(f)

        return synsets

    def _collect_samples(self) -> List[Tuple[str, int]]:
        """Collect (image_path, class_idx) tuples."""
        samples = []

        # Get class folders
        class_dirs = [
            d
            for d in os.listdir(self.split_dir)
            if os.path.isdir(os.path.join(self.split_dir, d))
        ]

        # Filter classes if needed
        if self.class_subset is not None:
            # Convert from class index to synset
            synset_to_idx = {idx: synset for synset, idx in self.synsets.items()}
            class_dirs = [
                synset_to_idx[i] for i in self.class_subset if i in synset_to_idx
            ]

        # Collect samples from each class
        for class_dir in class_dirs:
            class_path = os.path.join(self.split_dir, class_dir)
            class_idx = self.synsets[class_dir]

            # Skip classes not in the subset
            if self.class_subset is not None and class_idx not in self.class_subset:
                continue

            # Get images in this class
            image_files = [
                f
                for f in os.listdir(class_path)
                if f.lower().endswith((".jpeg", ".jpg", ".png"))
            ]

            # Add (path, class) tuples
            for img_file in image_files:
                img_path = os.path.join(class_path, img_file)
                samples.append((img_path, class_idx))

        return samples

    def __len__(self) -> int:
        return len(self.samples)

    def __getitem__(self, idx: int) -> Tuple[torch.Tensor, int]:
        """
        Get a sample from the dataset.

        Args:
            idx (int): Index

        Returns:
            Tuple[torch.Tensor, int]: (image, target)
        """
        img_path, target = self.samples[idx]

        # Load image
        with open(img_path, "rb") as f:
            img = Image.open(f).convert("RGB")

        # Apply transform if provided
        if self.transform is not None:
            img = self.transform(img)

        return img, target


@DATASETS.register()
class SplitImageNet(ContinualDataset):
    """
    Split ImageNet dataset for continual learning.

    Creates tasks by splitting ImageNet classes into multiple tasks.

    Args:
        data_root (str): Root directory for ImageNet dataset
        n_tasks (int): Number of tasks to split the dataset into
        subset_size (int): Number of classes to use from ImageNet (max 1000)
        task_shuffle (bool): Whether to shuffle the order of classes across tasks
        per_task_classes (Optional[int]): Number of classes per task. If None, computed from n_tasks.
        train_transform (Optional[Callable]): Transform for training data
        test_transform (Optional[Callable]): Transform for test data
        seed (int): Random seed for reproducibility
    """

    def __init__(
        self,
        data_root: str = "./data/imagenet",
        n_tasks: int = 10,
        subset_size: int = 100,
        task_shuffle: bool = False,
        per_task_classes: Optional[int] = None,
        train_transform: Optional[Callable] = None,
        test_transform: Optional[Callable] = None,
        seed: int = 42,
    ):
        self.data_root = data_root
        self.subset_size = min(subset_size, 1000)  # Cap at 1000 classes

        # Default transforms if none provided
        if train_transform is None:
            train_transform = transforms.Compose(
                [
                    transforms.RandomResizedCrop(224),
                    transforms.RandomHorizontalFlip(),
                    transforms.ToTensor(),
                    transforms.Normalize(
                        mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
                    ),
                ]
            )

        if test_transform is None:
            test_transform = transforms.Compose(
                [
                    transforms.Resize(256),
                    transforms.CenterCrop(224),
                    transforms.ToTensor(),
                    transforms.Normalize(
                        mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
                    ),
                ]
            )

        # Initialize the base class
        super().__init__(
            n_tasks=n_tasks,
            task_shuffle=task_shuffle,
            per_task_classes=per_task_classes,
            train_transform=train_transform,
            test_transform=test_transform,
            seed=seed,
        )

    def _setup_datasets(self) -> None:
        """Set up ImageNet datasets for each task."""
        # Generate class order for subset selection
        class_order = list(range(1000))

        # Shuffle class order if requested
        if self.task_shuffle:
            np.random.shuffle(class_order)

        # Select subset of classes
        subset_classes = class_order[: self.subset_size]

        # Determine number of classes per task
        if self.per_task_classes is None:
            self.per_task_classes = self.subset_size // self.n_tasks

        # Load base dataset once
        train_dataset = ImageNetSubset(
            root=self.data_root,
            split="train",
            class_subset=subset_classes,
        )

        test_dataset = ImageNetSubset(
            root=self.data_root,
            split="val",
            class_subset=subset_classes,
        )

        # Split classes into tasks
        for task_id in range(self.n_tasks):
            start_idx = task_id * self.per_task_classes
            end_idx = min(start_idx + self.per_task_classes, self.subset_size)

            # Extract classes for this task
            task_classes = subset_classes[start_idx:end_idx]

            # Store task classes
            self.task_classes[task_id] = task_classes

            # Update class to task mapping
            for class_id in task_classes:
                self.class_to_task[class_id] = task_id

            # Create mapping from original class ID to task-specific ID
            target_map = {class_id: i for i, class_id in enumerate(task_classes)}

            # Get indices for train dataset
            train_indices = [
                i for i, (_, y) in enumerate(train_dataset) if y in task_classes
            ]

            # Get indices for test dataset
            test_indices = [
                i for i, (_, y) in enumerate(test_dataset) if y in task_classes
            ]

            # Create task datasets
            train_task_dataset = TaskDataset(
                dataset=train_dataset,
                indices=train_indices,
                transform=self.train_transform,
                target_map=target_map,
                task_id=task_id,
            )

            test_task_dataset = TaskDataset(
                dataset=test_dataset,
                indices=test_indices,
                transform=self.test_transform,
                target_map=target_map,
                task_id=task_id,
            )

            # Store task datasets
            self.task_data[task_id] = {
                "train": train_task_dataset,
                "test": test_task_dataset,
            }
