from typing import Tuple, Optional, Any, Dict, List, Callable
import numpy as np
import torch
import copy
import logging
from torch.utils.data import DataLoader

from .memory import (
    MemoryManager,
    ExemplarSelector,
    MemoryStrategy,
    HerdingExemplarSelector,
    FixedMemoryStrategy,
    DynamicMemoryStrategy,
    ClassMeanManager,
    StandardClassMeanManager,
)


class EpisodicMemory(MemoryManager):
    """
    Implementation of episodic memory for continual learning that manages
    exemplars and class means.
    """

    def __init__(
        self,
        feature_extractor: Callable,
        feature_dim: int,
        memory_size: int,
        memory_per_class: Optional[int] = None,
        fixed_memory: bool = False,
        total_classes: int = 0,
        device: str = "cuda",
        batch_size: int = 64,
    ):
        """
        Initialize episodic memory

        Args:
            feature_extractor: Function to extract features from inputs
            feature_dim: Dimension of feature vectors
            memory_size: Total memory size limit
            memory_per_class: Number of samples per class (for fixed memory strategy)
            fixed_memory: Whether to use fixed memory per class or dynamic allocation
            total_classes: Current total number of classes
            device: Device to use for computation
            batch_size: Batch size for processing
        """
        self.feature_extractor = feature_extractor
        self.feature_dim = feature_dim
        self.device = device
        self.batch_size = batch_size
        self.total_classes = total_classes
        self.known_classes = 0

        # Initialize memory containers
        self._data_memory = np.array([])
        self._targets_memory = np.array([])

        # Setup memory strategy
        self.fixed_memory = fixed_memory
        if self.fixed_memory and memory_per_class is not None:
            self.memory_strategy = FixedMemoryStrategy(memory_per_class)
            self.memory_per_class = memory_per_class
        else:
            self.memory_strategy = DynamicMemoryStrategy(memory_size)
            self.memory_per_class = 0  # Will be calculated dynamically

        # Setup exemplar selector
        self.exemplar_selector = HerdingExemplarSelector()

        # Setup class mean manager
        self.class_mean_manager = StandardClassMeanManager(
            feature_extractor=feature_extractor,
            feature_dim=feature_dim,
            total_classes=total_classes,
            device=device,
        )

        self.EPSILON = 1e-8  # Small constant to avoid division by zero

    def build_rehearsal_memory(self, data_manager: Any, per_class: int) -> None:
        """
        Build or update the rehearsal memory for the current task

        Args:
            data_manager: Data manager providing access to task data
            per_class: Number of exemplars per class to maintain
        """
        if self.fixed_memory:
            self.construct_exemplar_unified(data_manager, per_class)
        else:
            self.reduce_exemplar(data_manager, per_class)
            self.construct_exemplar(data_manager, per_class)

    def get_memory(self) -> Optional[Tuple[np.ndarray, np.ndarray]]:
        """
        Retrieve current memory contents

        Returns:
            Tuple of (data, targets) if memory exists, None otherwise
        """
        if len(self._data_memory) == 0:
            return None
        return (self._data_memory, self._targets_memory)

    def get_exemplar_size(self) -> int:
        """
        Get the total number of exemplars in memory

        Returns:
            Total number of exemplars
        """
        assert len(self._data_memory) == len(
            self._targets_memory
        ), "Exemplar size error."
        return len(self._targets_memory)

    def get_samples_per_class(self) -> int:
        """
        Get the number of samples per class in memory

        Returns:
            Number of samples per class
        """
        if self.fixed_memory:
            return self.memory_per_class
        else:
            assert self.total_classes != 0, "Total classes is 0"
            return self.memory_strategy.get_memory_budget(self.total_classes)

    def reduce_exemplar(self, data_manager: Any, m: int) -> None:
        """
        Reduce the number of exemplars per class to m

        Args:
            data_manager: Data manager for accessing data
            m: Target number of exemplars per class
        """
        logging.info("Reducing exemplars...({} per classes)".format(m))
        dummy_data, dummy_targets = copy.deepcopy(self._data_memory), copy.deepcopy(
            self._targets_memory
        )

        # Reset memory
        self._data_memory, self._targets_memory = np.array([]), np.array([])

        # Initialize class means
        class_means = torch.zeros((self.total_classes, self.feature_dim))

        # Reduce exemplars for each known class
        for class_idx in range(self.known_classes):
            mask = np.where(dummy_targets == class_idx)[0]
            dd, dt = dummy_data[mask][:m], dummy_targets[mask][:m]

            # Add to memory
            self._data_memory = (
                np.concatenate((self._data_memory, dd))
                if len(self._data_memory) != 0
                else dd
            )
            self._targets_memory = (
                np.concatenate((self._targets_memory, dt))
                if len(self._targets_memory) != 0
                else dt
            )

            # Update class mean
            self._compute_class_mean_from_exemplars(
                dd, dt, class_idx, data_manager, class_means
            )

        # Update class means
        self.class_mean_manager.update_class_means(class_means)

    def construct_exemplar(self, data_manager: Any, m: int) -> None:
        """
        Construct exemplars for new classes

        Args:
            data_manager: Data manager for accessing data
            m: Number of exemplars per class to construct
        """
        logging.info("Constructing exemplars...({} per classes)".format(m))

        # Initialize class means if needed
        if not hasattr(self.class_mean_manager, "class_means"):
            self.class_mean_manager.class_means = torch.zeros(
                (self.total_classes, self.feature_dim)
            )

        # Construct exemplars for each new class
        for class_idx in range(self.known_classes, self.total_classes):
            # Get class data
            data, targets, idx_dataset = data_manager.get_dataset(
                np.arange(class_idx, class_idx + 1),
                source="train",
                mode="test",
                ret_data=True,
            )

            # Create data loader
            idx_loader = DataLoader(
                idx_dataset,
                batch_size=self.batch_size,
                shuffle=False,
                num_workers=4,
            )

            # Extract features
            vectors, _ = self._extract_vectors(idx_loader)

            # Normalize vectors
            vectors = (vectors.T / (np.linalg.norm(vectors.T, axis=0) + self.EPSILON)).T

            # Select exemplars using herding
            selected_exemplars, selected_features = (
                self.exemplar_selector.select_exemplars(data, vectors, m)
            )

            # Create targets for exemplars
            exemplar_targets = np.full(len(selected_exemplars), class_idx)

            # Add to memory
            self._data_memory = (
                np.concatenate((self._data_memory, selected_exemplars))
                if len(self._data_memory) != 0
                else selected_exemplars
            )
            self._targets_memory = (
                np.concatenate((self._targets_memory, exemplar_targets))
                if len(self._targets_memory) != 0
                else exemplar_targets
            )

            # Update class mean
            self._compute_class_mean_from_exemplars(
                selected_exemplars, exemplar_targets, class_idx, data_manager
            )

    def construct_exemplar_unified(self, data_manager: Any, m: int) -> None:
        """
        Unified approach to construct exemplars for all classes

        Args:
            data_manager: Data manager for accessing data
            m: Number of exemplars per class to maintain
        """
        logging.info(
            "Constructing exemplars for new classes...({} per classes)".format(m)
        )

        # Initialize class means
        class_means = torch.zeros((self.total_classes, self.feature_dim))

        # Calculate the means of old classes with newly trained network
        for class_idx in range(self.known_classes):
            mask = np.where(self._targets_memory == class_idx)[0]
            class_data, class_targets = (
                self._data_memory[mask],
                self._targets_memory[mask],
            )

            # Update class mean for this old class
            self._compute_class_mean_from_exemplars(
                class_data, class_targets, class_idx, data_manager, class_means
            )

        # Construct exemplars for new classes
        for class_idx in range(self.known_classes, self.total_classes):
            # Get class data
            data, targets, class_dset = data_manager.get_dataset(
                np.arange(class_idx, class_idx + 1),
                source="train",
                mode="test",
                ret_data=True,
            )

            # Create data loader
            class_loader = DataLoader(
                class_dset,
                batch_size=self.batch_size,
                shuffle=False,
                num_workers=4,
            )

            # Extract features
            vectors, _ = self._extract_vectors(class_loader)

            # Normalize vectors
            vectors = (vectors.T / (np.linalg.norm(vectors.T, axis=0) + self.EPSILON)).T

            # Select exemplars using herding
            selected_exemplars, selected_features = (
                self.exemplar_selector.select_exemplars(data, vectors, m)
            )

            # Create targets for exemplars
            exemplar_targets = np.full(len(selected_exemplars), class_idx)

            # Add to memory
            self._data_memory = (
                np.concatenate((self._data_memory, selected_exemplars))
                if len(self._data_memory) != 0
                else selected_exemplars
            )
            self._targets_memory = (
                np.concatenate((self._targets_memory, exemplar_targets))
                if len(self._targets_memory) != 0
                else exemplar_targets
            )

            # Update class mean
            self._compute_class_mean_from_exemplars(
                selected_exemplars,
                exemplar_targets,
                class_idx,
                data_manager,
                class_means,
            )

        # Update all class means
        self.class_mean_manager.update_class_means(class_means)

    def update_total_classes(self, total_classes: int) -> None:
        """
        Update total number of classes and adjust class means

        Args:
            total_classes: New total number of classes
        """
        old_total = self.total_classes
        self.total_classes = total_classes

        # Update class mean manager if total classes increased
        if total_classes > old_total:
            old_means = self.class_mean_manager.get_class_means()
            new_means = torch.zeros((total_classes, self.feature_dim))

            # Copy old means
            if old_means is not None and old_means.shape[0] > 0:
                new_means[:old_total] = old_means[:old_total]

            self.class_mean_manager.update_class_means(new_means)

    def update_known_classes(self, known_classes: int) -> None:
        """
        Update number of known classes

        Args:
            known_classes: New number of known classes
        """
        self.known_classes = known_classes

    def _compute_class_mean_from_exemplars(
        self,
        exemplars: np.ndarray,
        targets: np.ndarray,
        class_idx: int,
        data_manager: Any,
        class_means: Optional[torch.Tensor] = None,
    ) -> None:
        """
        Compute class mean from exemplars for a specific class

        Args:
            exemplars: Exemplars for the class
            targets: Targets for the exemplars
            class_idx: Class index
            data_manager: Data manager for creating dataset
            class_means: Optional tensor to update directly
        """
        # Create dataset from exemplars
        exemplar_dataset = data_manager.get_dataset(
            [], source="train", mode="test", appendent=(exemplars, targets)
        )

        # Create data loader
        exemplar_loader = DataLoader(
            exemplar_dataset, batch_size=self.batch_size, shuffle=False, num_workers=4
        )

        # Extract features
        vectors, _ = self._extract_vectors(exemplar_loader)

        # Normalize feature vectors
        vectors = (vectors.T / (np.linalg.norm(vectors.T, axis=0) + self.EPSILON)).T

        # Compute mean and normalize it
        mean = np.mean(vectors, axis=0)
        mean = mean / np.linalg.norm(mean)

        # Store class mean
        if class_means is not None:
            class_means[class_idx] = torch.from_numpy(mean).float()
        else:
            self.class_mean_manager.get_class_means()[class_idx] = torch.from_numpy(
                mean
            ).float()

    def _extract_vectors(self, loader: DataLoader) -> Tuple[np.ndarray, np.ndarray]:
        """
        Extract feature vectors from data loader

        Args:
            loader: DataLoader containing samples to extract features from

        Returns:
            Tuple of (feature vectors, targets)
        """
        vectors, targets = [], []
        for _, (_, inputs, labels) in enumerate(loader):
            inputs = inputs.to(self.device)
            with torch.no_grad():
                features = self.feature_extractor(inputs)
            vectors.append(features.cpu().numpy())
            targets.append(labels.numpy())

        if len(vectors) == 0:
            return np.array([]), np.array([])

        vectors = np.concatenate(vectors, axis=0)
        targets = np.concatenate(targets, axis=0)
        return vectors, targets
