
from abc import ABC, abstractmethod
from typing import Tuple, Optional, Any, Dict, List
import numpy as np
import torch
import copy
import logging
from torch.utils.data import DataLoader


class MemoryManager(ABC):
    """Interface for managing episodic memory in continual learning"""

    @abstractmethod
    def build_rehearsal_memory(self, data_manager: Any, per_class: int) -> None:
        """
        Build or update the rehearsal memory for the current task

        Args:
            data_manager: Data manager providing access to task data
            per_class: Number of exemplars per class to maintain
        """
        pass

    @abstractmethod
    def get_memory(self) -> Optional[Tuple[np.ndarray, np.ndarray]]:
        """
        Retrieve current memory contents

        Returns:
            Tuple of (data, targets) if memory exists, None otherwise
        """
        pass

    @abstractmethod
    def get_exemplar_size(self) -> int:
        """Get the total number of exemplars in memory"""
        pass

    @abstractmethod
    def get_samples_per_class(self) -> int:
        """Get the number of samples per class in memory"""
        pass

    @abstractmethod
    def reduce_exemplar(self, data_manager: Any, m: int) -> None:
        """
        Reduce the number of exemplars per class to m

        Args:
            data_manager: Data manager for accessing data
            m: Target number of exemplars per class
        """
        pass

    @abstractmethod
    def construct_exemplar(self, data_manager: Any, m: int) -> None:
        """
        Construct exemplars for new classes

        Args:
            data_manager: Data manager for accessing data
            m: Number of exemplars per class to construct
        """
        pass


class ExemplarSelector(ABC):
    """Interface for selecting exemplars from class data"""

    @abstractmethod
    def select_exemplars(
        self,
        data: np.ndarray,
        features: np.ndarray,
        num_exemplars: int
    ) -> Tuple[np.ndarray, np.ndarray]:
        """
        Select exemplars from class data

        Args:
            data: Raw data samples
            features: Feature representations of the data
            num_exemplars: Number of exemplars to select

        Returns:
            Indices of selected exemplars
        """
        pass


class HerdingExemplarSelector(ExemplarSelector):
    """Implements herding strategy to select exemplars based on feature space distribution"""

    def select_exemplars(
        self,
        data: np.ndarray,
        features: np.ndarray,
        num_exemplars: int
    ) -> Tuple[np.ndarray, np.ndarray]:
        """
        Select exemplars using herding selection - an iterative process to select
        samples that best approximate the class mean in feature space

        Args:
            data: Raw data samples [n, ...]
            features: Feature representations of the data [n, feature_dim]
            num_exemplars: Number of exemplars to select

        Returns:
            Tuple of (selected exemplars, selected features)
        """
        class_mean = np.mean(features, axis=0)
        selected_exemplars = []
        selected_features = []

        for k in range(1, min(num_exemplars + 1, len(data))):
            # Sum of already selected exemplar vectors
            if len(selected_features) == 0:
                S = 0
            else:
                S = np.sum(selected_features, axis=0)

            # Mean of potential exemplar set with each candidate added
            mu_p = (features + S) / k  # [n, feature_dim]

            # Select sample that minimizes distance to the class mean
            i = np.argmin(np.sqrt(np.sum((class_mean - mu_p) ** 2, axis=1)))

            # Add to selected exemplars
            selected_exemplars.append(np.array(data[i]))
            selected_features.append(np.array(features[i]))

            # Remove selected sample to avoid duplicate selection
            features = np.delete(features, i, axis=0)
            data = np.delete(data, i, axis=0)

        return np.array(selected_exemplars), np.array(selected_features)


class MemoryStrategy(ABC):
    """Interface for different memory management strategies"""

    @abstractmethod
    def update_memory(
        self,
        new_data: np.ndarray,
        new_targets: np.ndarray,
        current_memory: Optional[Tuple[np.ndarray, np.ndarray]] = None
    ) -> Tuple[np.ndarray, np.ndarray]:
        """
        Update memory with new data according to strategy

        Args:
            new_data: New data to potentially add to memory
            new_targets: Targets for new data
            current_memory: Current memory state

        Returns:
            Updated memory (data, targets)
        """
        pass

    @abstractmethod
    def get_memory_budget(self, total_classes: int) -> int:
        """
        Calculate memory budget for current number of classes

        Args:
            total_classes: Total number of classes seen so far

        Returns:
            Memory budget (total number of exemplars allowed)
        """
        pass


class FixedMemoryStrategy(MemoryStrategy):
    """Memory strategy that maintains fixed number of samples per class"""

    def __init__(self, memory_per_class: int):
        """
        Initialize fixed memory strategy

        Args:
            memory_per_class: Fixed number of samples to keep per class
        """
        self.memory_per_class = memory_per_class

    def update_memory(
        self,
        new_data: np.ndarray,
        new_targets: np.ndarray,
        current_memory: Optional[Tuple[np.ndarray, np.ndarray]] = None
    ) -> Tuple[np.ndarray, np.ndarray]:
        """
        Update memory by adding new data, maintaining fixed number per class

        Args:
            new_data: New data to add
            new_targets: Targets for new data
            current_memory: Current memory tuple (data, targets)

        Returns:
            Updated memory (data, targets)
        """
        if current_memory is None:
            return new_data, new_targets

        memory_data, memory_targets = current_memory

        # Concatenate current memory with new data
        combined_data = np.concatenate((memory_data, new_data)) if len(memory_data) > 0 else new_data
        combined_targets = np.concatenate((memory_targets, new_targets)) if len(memory_targets) > 0 else new_targets

        # Get unique classes
        unique_classes = np.unique(combined_targets)

        # Create new memory with fixed samples per class
        final_data, final_targets = np.array([]), np.array([])

        for cls in unique_classes:
            cls_mask = combined_targets == cls
            cls_data = combined_data[cls_mask]
            cls_targets = combined_targets[cls_mask]

            # Take up to memory_per_class samples
            samples_to_keep = min(self.memory_per_class, len(cls_data))
            kept_data = cls_data[:samples_to_keep]
            kept_targets = cls_targets[:samples_to_keep]

            # Add to final memory
            final_data = np.concatenate((final_data, kept_data)) if len(final_data) > 0 else kept_data
            final_targets = np.concatenate((final_targets, kept_targets)) if len(final_targets) > 0 else kept_targets

        return final_data, final_targets

    def get_memory_budget(self, total_classes: int) -> int:
        """
        Calculate total memory budget

        Args:
            total_classes: Total number of classes

        Returns:
            Total memory budget (memory_per_class * total_classes)
        """
        return self.memory_per_class * total_classes


class DynamicMemoryStrategy(MemoryStrategy):
    """Memory strategy that divides a fixed total budget among all seen classes"""

    def __init__(self, total_memory_size: int):
        """
        Initialize dynamic memory strategy

        Args:
            total_memory_size: Total memory budget to distribute among classes
        """
        self.total_memory_size = total_memory_size

    def update_memory(
        self,
        new_data: np.ndarray,
        new_targets: np.ndarray,
        current_memory: Optional[Tuple[np.ndarray, np.ndarray]] = None
    ) -> Tuple[np.ndarray, np.ndarray]:
        """
        Update memory by redistributing budget among all classes

        Args:
            new_data: New data to add
            new_targets: Targets for new data
            current_memory: Current memory tuple (data, targets)

        Returns:
            Updated memory (data, targets)
        """
        if current_memory is None:
            return new_data, new_targets

        memory_data, memory_targets = current_memory

        # Concatenate current memory with new data
        combined_data = np.concatenate((memory_data, new_data)) if len(memory_data) > 0 else new_data
        combined_targets = np.concatenate((memory_targets, new_targets)) if len(memory_targets) > 0 else new_targets

        # Get unique classes
        unique_classes = np.unique(combined_targets)
        num_classes = len(unique_classes)

        # Calculate samples per class (distribute budget evenly)
        samples_per_class = self.total_memory_size // num_classes

        # Create new memory with calculated samples per class
        final_data, final_targets = np.array([]), np.array([])

        for cls in unique_classes:
            cls_mask = combined_targets == cls
            cls_data = combined_data[cls_mask]
            cls_targets = combined_targets[cls_mask]

            # Take up to samples_per_class samples
            samples_to_keep = min(samples_per_class, len(cls_data))
            kept_data = cls_data[:samples_to_keep]
            kept_targets = cls_targets[:samples_to_keep]

            # Add to final memory
            final_data = np.concatenate((final_data, kept_data)) if len(final_data) > 0 else kept_data
            final_targets = np.concatenate((final_targets, kept_targets)) if len(final_targets) > 0 else kept_targets

        return final_data, final_targets

    def get_memory_budget(self, total_classes: int) -> int:
        """
        Get samples per class based on total memory budget

        Args:
            total_classes: Total number of classes seen so far

        Returns:
            Samples per class (total_memory_size // total_classes)
        """
        if total_classes == 0:
            return 0
        return self.total_memory_size // total_classes


class ClassMeanManager(ABC):
    """Interface for managing class mean representations"""

    @abstractmethod
    def compute_class_means(self, data_manager: Any) -> torch.Tensor:
        """
        Compute class mean representations

        Args:
            data_manager: Data manager for accessing data

        Returns:
            Tensor of class means [num_classes, feature_dim]
        """
        pass

    @abstractmethod
    def get_class_means(self) -> torch.Tensor:
        """Get current class mean representations"""
        pass

    @abstractmethod
    def update_class_means(self, class_means: torch.Tensor) -> None:
        """Update class mean representations"""
        pass


class StandardClassMeanManager(ClassMeanManager):
    """Implementation of class mean manager that computes and maintains class means for NME"""

    def __init__(self, feature_extractor: Any, feature_dim: int, total_classes: int, device: str = "cuda"):
        """
        Initialize class mean manager

        Args:
            feature_extractor: Function to extract features from data
            feature_dim: Dimension of feature vectors
            total_classes: Total number of classes
            device: Device to use for computations
        """
        self.feature_extractor = feature_extractor
        self.feature_dim = feature_dim
        self.device = device
        self.class_means = torch.zeros((total_classes, feature_dim))
        self.EPSILON = 1e-8  # Small constant to avoid division by zero

    def compute_class_means(self, data_manager: Any, batch_size: int = 64) -> torch.Tensor:
        """
        Compute class means for all classes

        Args:
            data_manager: Data manager for accessing class data
            batch_size: Batch size for processing

        Returns:
            Tensor of class means [num_classes, feature_dim]
        """
        for class_idx in range(self.class_means.shape[0]):
            # Get class data
            class_dataset = data_manager.get_dataset(
                np.arange(class_idx, class_idx + 1),
                source="train",
                mode="test"
            )

            if len(class_dataset) == 0:
                continue

            # Create data loader for this class
            class_loader = DataLoader(
                class_dataset,
                batch_size=batch_size,
                shuffle=False,
                num_workers=4
            )

            # Extract features
            vectors, _ = self._extract_vectors(class_loader)

            # Normalize feature vectors
            vectors = (vectors.T / (np.linalg.norm(vectors.T, axis=0) + self.EPSILON)).T

            # Compute mean and normalize it
            mean = np.mean(vectors, axis=0)
            mean = mean / np.linalg.norm(mean)

            # Store class mean
            self.class_means[class_idx, :] = torch.from_numpy(mean).float()

        return self.class_means

    def compute_class_mean_from_exemplars(
        self,
        exemplars: np.ndarray,
        targets: np.ndarray,
        class_idx: int,
        data_manager: Any,
        batch_size: int = 64
    ) -> None:
        """
        Compute class mean from exemplars for a specific class

        Args:
            exemplars: Exemplars for the class
            targets: Targets for the exemplars
            class_idx: Class index
            data_manager: Data manager for creating dataset
            batch_size: Batch size for processing
        """
        # Create dataset from exemplars
        exemplar_dataset = data_manager.get_dataset(
            [],
            source="train",
            mode="test",
            appendent=(exemplars, targets)
        )

        # Create data loader
        exemplar_loader = DataLoader(
            exemplar_dataset,
            batch_size=batch_size,
            shuffle=False,
            num_workers=4
        )

        # Extract features
        vectors, _ = self._extract_vectors(exemplar_loader)

        # Normalize feature vectors
        vectors = (vectors.T / (np.linalg.norm(vectors.T, axis=0) + self.EPSILON)).T

        # Compute mean and normalize it
        mean = np.mean(vectors, axis=0)
        mean = mean / np.linalg.norm(mean)

        # Store class mean
        self.class_means[class_idx, :] = torch.from_numpy(mean).float()

    def get_class_means(self) -> torch.Tensor:
        """
        Get current class mean representations

        Returns:
            Tensor of class means [num_classes, feature_dim]
        """
        return self.class_means

    def update_class_means(self, class_means: torch.Tensor) -> None:
        """
        Update class mean representations

        Args:
            class_means: New class means to set
        """
        self.class_means = class_means

    def _extract_vectors(self, loader: DataLoader) -> Tuple[np.ndarray, np.ndarray]:
        """
        Extract feature vectors from data loader

        Args:
            loader: DataLoader containing samples to extract features from

        Returns:
            Tuple of (feature vectors, targets)
        """
        vectors, targets = [], []
        for _, (_, inputs, labels) in enumerate(loader):
            inputs = inputs.to(self.device)
            with torch.no_grad():
                features = self.feature_extractor(inputs)
            vectors.append(features.cpu().numpy())
            targets.append(labels.numpy())

        vectors = np.concatenate(vectors, axis=0)
        targets = np.concatenate(targets, axis=0)
        return vectors, targets
