"""
Memory replay methods for continual learning.

This module implements experience replay-based approaches for continual learning,
which mitigate catastrophic forgetting by storing and replaying examples from
previous tasks during training on new tasks.
"""

from typing import Dict, List, Optional, Union, Any, Tuple
import torch
import random
from torch.utils.data import DataLoader

from .base import ReplayMethod


class MemoryBuffer:
    """
    Buffer for storing samples from previous tasks.

    Implements efficient storage and retrieval of samples for replay.
    """

    def __init__(self, memory_size: int = 200):
        """
        Initialize memory buffer.

        Args:
            memory_size: Maximum number of samples to store
        """
        self.memory_size = memory_size
        self.buffer = {}  # Dict[task_id, Dict[sample_idx, sample_data]]
        self.task_sample_count = {}  # Dict[task_id, current_count]
        self.total_samples = 0

    def add_sample(
        self, sample: Tuple[torch.Tensor, torch.Tensor], task_id: int
    ) -> None:
        """
        Add a sample to the memory buffer.

        Args:
            sample: Tuple of (input_tensor, target_tensor)
            task_id: Task identifier
        """
        # Initialize task in buffer if needed
        if task_id not in self.buffer:
            self.buffer[task_id] = {}
            self.task_sample_count[task_id] = 0

        # Get next sample index for this task
        sample_idx = self.task_sample_count[task_id]
        self.task_sample_count[task_id] += 1

        # Store the sample
        x_tensor, y_tensor = sample
        self.buffer[task_id][sample_idx] = {
            "x": x_tensor.detach().clone(),
            "y": y_tensor.detach().clone(),
        }
        self.total_samples += 1

        # Check if we need to remove samples
        self._enforce_memory_limit()

    def _enforce_memory_limit(self) -> None:
        """
        Ensure memory buffer doesn't exceed size limit.

        This removes samples if the buffer is full, maintaining
        a balanced distribution across tasks.
        """
        if self.total_samples <= self.memory_size:
            return

        # Calculate how many samples to remove
        samples_to_remove = self.total_samples - self.memory_size

        # Calculate target number of samples per task
        num_tasks = len(self.buffer)
        samples_per_task = self.memory_size // num_tasks
        remainder = self.memory_size % num_tasks

        # Determine how many samples to keep for each task
        target_counts = {}
        tasks = list(self.buffer.keys())

        # Distribute remainder samples across tasks
        for i, task_id in enumerate(tasks):
            extra = 1 if i < remainder else 0
            target_counts[task_id] = samples_per_task + extra

        # Remove samples from tasks that have too many
        for task_id, samples in self.buffer.items():
            current_count = len(samples)
            if current_count > target_counts[task_id]:
                # Number of samples to remove from this task
                to_remove = current_count - target_counts[task_id]

                # Get sample indices to remove (oldest samples first)
                indices_to_remove = sorted(samples.keys())[:to_remove]

                # Remove samples
                for idx in indices_to_remove:
                    if idx in self.buffer[task_id]:
                        del self.buffer[task_id][idx]

                # Update total count
                self.total_samples -= to_remove
                samples_to_remove -= to_remove

            if samples_to_remove <= 0:
                break

    def get_samples(
        self, task_id: Optional[int] = None, n_samples: Optional[int] = None
    ) -> Tuple[torch.Tensor, torch.Tensor]:
        """
        Get samples from memory as tensors.

        Args:
            task_id: If provided, return samples only from this task
            n_samples: Number of samples to return, if None returns all samples

        Returns:
            Tuple of (inputs_tensor, targets_tensor)
        """
        # Determine which tasks to sample from
        if task_id is not None:
            tasks_to_sample = [task_id] if task_id in self.buffer else []
        else:
            tasks_to_sample = list(self.buffer.keys())

        if not tasks_to_sample:
            return torch.Tensor(), torch.Tensor()

        # Collect all samples
        all_x = []
        all_y = []

        for t_id in tasks_to_sample:
            # Get all samples for this task
            samples = list(self.buffer[t_id].values())

            # If n_samples provided, sample randomly from this task
            if n_samples is not None:
                samples_per_task = n_samples // len(tasks_to_sample)
                if samples_per_task < len(samples):
                    samples = random.sample(samples, samples_per_task)

            # Extract x and y tensors
            for sample in samples:
                all_x.append(sample["x"])
                all_y.append(sample["y"])

        # Stack tensors
        if all_x:
            x_tensor = torch.cat(all_x, dim=0)  # Concatenate along batch dimension
            y_tensor = torch.cat(all_y, dim=0)

            # Random shuffle
            if n_samples is not None and x_tensor.size(0) > n_samples:
                indices = torch.randperm(x_tensor.size(0))[:n_samples]
                x_tensor = x_tensor[indices]
                y_tensor = y_tensor[indices]

            return x_tensor, y_tensor
        else:
            return torch.Tensor(), torch.Tensor()

    def get_sample_count(self, task_id: Optional[int] = None) -> int:
        """
        Get number of samples stored.

        Args:
            task_id: If provided, return count only for this task

        Returns:
            Number of samples
        """
        if task_id is not None:
            return len(self.buffer.get(task_id, {}))
        else:
            return self.total_samples


class ExperienceReplay(ReplayMethod):
    """
    Experience replay method for continual learning.

    This method stores examples from previous tasks and mixes them with current
    task examples during training to mitigate catastrophic forgetting.

    Args:
        memory_size: Maximum number of samples to store
        replay_ratio: Ratio of replay samples to current task samples
        **kwargs: Additional arguments
    """

    def __init__(self, memory_size: int = 200, replay_ratio: float = 0.5, **kwargs):
        """
        Initialize experience replay method.

        Args:
            memory_size: Maximum number of samples to store
            replay_ratio: Ratio of replay samples to current task samples
            **kwargs: Additional arguments
        """
        super().__init__(memory_size=memory_size, **kwargs)
        self.replay_ratio = replay_ratio
        self.memory_buffer = MemoryBuffer(memory_size=memory_size)

    def compute_loss(
        self,
        model: torch.nn.Module,
        criterion: torch.nn.Module,
        output: Any,
        targets: torch.Tensor,
        task_id: int,
        **kwargs,
    ) -> Dict[str, torch.Tensor]:
        """
        Compute the loss incorporating replay samples.

        For the current task batch, uses standard loss computation.
        For replay batches, may incorporate additional constraints.

        Args:
            model: The model being trained
            criterion: Loss function
            output: Model output
            targets: Target labels
            task_id: Current task ID
            **kwargs: Additional arguments

        Returns:
            Dictionary with loss values
        """
        # Extract logits if output is a dictionary
        if isinstance(output, dict) and "logits" in output:
            logits = output["logits"]
        else:
            logits = output

        # Compute the task loss
        task_loss = criterion(logits, targets)

        # For basic experience replay, we just use the standard loss
        # More advanced replay methods would implement additional loss terms
        return {"loss": task_loss}

    def update_memory(self, task_id: int, dataloader: DataLoader) -> None:
        """
        Update memory buffer with samples from the current task.

        Args:
            task_id: Task identifier
            dataloader: DataLoader with task data
        """
        # Calculate how many samples to store per task
        num_tasks = task_id + 1  # Including current task
        samples_per_task = self.memory_size // num_tasks

        # Store random samples from current task
        samples_stored = 0

        for batch in dataloader:
            # Extract inputs and targets
            if isinstance(batch, (list, tuple)) and len(batch) >= 2:
                inputs, targets = batch[0], batch[1]
            else:
                inputs, targets = batch["inputs"], batch["targets"]

            # Store individual samples
            for i in range(inputs.size(0)):
                if samples_stored >= samples_per_task:
                    break

                x = inputs[i].unsqueeze(0)  # Add batch dimension
                y = targets[i].unsqueeze(0)

                # Add to memory
                self.memory_buffer.add_sample((x, y), task_id)
                samples_stored += 1

            if samples_stored >= samples_per_task:
                break

    def on_batch_start(self, batch: Any, batch_idx: int) -> None:
        """
        Process batch at the start of batch training.

        Args:
            batch: The current batch data
            batch_idx: Index of the current batch
        """
        # Augment batch with replay samples if past task 0
        if self.seen_tasks and self.current_task > 0:
            batch = self._augment_batch_with_replay(batch)

    def _augment_batch_with_replay(self, batch: Any) -> Any:
        """
        Augment the current batch with replay samples.

        Args:
            batch: Current batch of data

        Returns:
            Augmented batch with replay samples
        """
        # Extract inputs and targets
        if isinstance(batch, (list, tuple)) and len(batch) >= 2:
            inputs, targets = batch[0], batch[1]
            task_ids = batch[2] if len(batch) > 2 else None
        elif isinstance(batch, dict):
            inputs = batch["inputs"]
            targets = batch["targets"]
            task_ids = batch.get("task_ids", None)
        else:
            return batch  # Can't augment unknown format

        # Calculate replay batch size
        current_batch_size = inputs.size(0)
        replay_batch_size = int(current_batch_size * self.replay_ratio)

        if replay_batch_size <= 0:
            return batch

        # Get replay samples from previous tasks
        replay_x, replay_y = self.get_memory_samples(
            batch_size=replay_batch_size, exclude_task=self.current_task
        )

        if replay_x.numel() == 0 or replay_x.size(0) == 0:
            return batch  # No replay samples

        # Combine current and replay samples
        combined_x = torch.cat([inputs, replay_x], dim=0)
        combined_y = torch.cat([targets, replay_y], dim=0)

        # Create combined batch in same format as original
        if isinstance(batch, (list, tuple)):
            result = [combined_x, combined_y]
            # Add any other elements from original batch
            for i in range(2, len(batch)):
                result.append(batch[i])
            return type(batch)(result)
        else:
            # Dictionary format
            result = dict(batch)
            result["inputs"] = combined_x
            result["targets"] = combined_y
            return result

    def get_memory_samples(
        self, batch_size: int = 64, exclude_task: Optional[int] = None
    ) -> Tuple[torch.Tensor, torch.Tensor]:
        """
        Get a batch of examples from memory for replay.

        Args:
            batch_size: Number of examples to retrieve
            exclude_task: Optional task to exclude from replay

        Returns:
            Tuple of (inputs_tensor, targets_tensor)
        """
        # For basic replay, we get samples from memory buffer
        # excluding the specified task (usually current task)
        tasks_to_sample = [t for t in self.seen_tasks if t != exclude_task]

        if not tasks_to_sample:
            return torch.Tensor(), torch.Tensor()

        # Get balanced samples from all previous tasks
        samples_per_task = batch_size // len(tasks_to_sample)

        all_x = []
        all_y = []

        for t in tasks_to_sample:
            x, y = self.memory_buffer.get_samples(task_id=t, n_samples=samples_per_task)
            if x.numel() > 0:
                all_x.append(x)
                all_y.append(y)

        if not all_x:
            return torch.Tensor(), torch.Tensor()

        # Combine samples from all tasks
        x = torch.cat(all_x, dim=0)
        y = torch.cat(all_y, dim=0)

        # Ensure we don't exceed requested batch size
        if x.size(0) > batch_size:
            indices = torch.randperm(x.size(0))[:batch_size]
            x = x[indices]
            y = y[indices]

        return x, y
