import abc
import logging
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from typing import Dict, Type


class DriftCompensation(abc.ABC):
    """
    Abstract base class for feature drift compensation strategies.

    Feature drift occurs in incremental learning when model updates cause
    the feature space to change, affecting previously learned features.
    Drift compensation strategies aim to mitigate this issue.
    """

    @abc.abstractmethod
    def compensate(
        self,
        network: nn.Module,
        old_network: nn.Module,
        train_loader: DataLoader,
        class_means: torch.Tensor,
        known_classes: int,
        device: str,
    ) -> None:
        """
        Apply drift compensation.

        Args:
            network: Current network
            old_network: Network from previous task
            train_loader: DataLoader for current task's training data
            class_means: Tensor of class means
            known_classes: Number of previously known classes
            device: Computation device
        """
        pass


class NoCompensation(DriftCompensation):
    """Strategy that performs no drift compensation (baseline)."""

    def compensate(
        self,
        network: nn.Module,
        old_network: nn.Module,
        train_loader: DataLoader,
        class_means: torch.Tensor,
        known_classes: int,
        device: str,
    ) -> None:
        """
        No-op implementation of drift compensation.

        Args:
            network: Current network
            old_network: Network from previous task
            train_loader: DataLoader for current task's training data
            class_means: Tensor of class means
            known_classes: Number of previously known classes
            device: Computation device
        """
        logging.info("No drift compensation applied")
        pass


class LinearDriftCompensation(DriftCompensation):
    """
    Linear transformation based drift compensation.

    Learns a linear transformation to align old feature space with new.
    """

    def compensate(
        self,
        network: nn.Module,
        old_network: nn.Module,
        train_loader: DataLoader,
        class_means: torch.Tensor,
        known_classes: int,
        device: str,
    ) -> None:
        """
        Apply linear drift compensation.

        Args:
            network: Current network
            old_network: Network from previous task
            train_loader: DataLoader for current task's training data
            class_means: Tensor of class means
            known_classes: Number of previously known classes
            device: Computation device
        """
        if old_network is None:
            logging.warning("No old network available for drift compensation")
            return

        logging.info("Applying linear drift compensation")

        # Create a linear transformation module
        feature_dim = network.feature_dim
        projector = nn.Linear(feature_dim, feature_dim, bias=False).to(device)
        optimizer = torch.optim.Adam(projector.parameters(), lr=0.001)

        # Train the transformation
        for epoch in range(20):
            epoch_loss = 0.0
            batch_count = 0

            for _, (_, inputs, _) in enumerate(train_loader):
                inputs = inputs.to(device)

                # Get features from both networks
                with torch.no_grad():
                    old_feats = old_network(inputs)["features"]
                    new_feats = network(inputs)["features"]

                # Apply transformation and compute loss
                transformed_feats = projector(old_feats)
                loss = nn.MSELoss()(transformed_feats, new_feats)

                # Update transformation
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                epoch_loss += loss.item()
                batch_count += 1

            avg_loss = epoch_loss / batch_count if batch_count > 0 else 0
            logging.info(f"Drift compensation epoch {epoch+1}, Loss: {avg_loss:.6f}")

        # Apply transformation to old class prototypes
        with torch.no_grad():
            old_means = class_means[:known_classes].clone()
            old_means = old_means.to(torch.float32).to(device)
            transformed_means = projector(old_means).detach().clone().cpu()
            class_means[:known_classes] = transformed_means.to(torch.float64)

        logging.info(f"Applied drift compensation to {known_classes} classes")


# Factory for drift compensation strategies
class DriftCompensationFactory:
    """Factory for creating drift compensation strategies."""

    _strategies: Dict[str, Type[DriftCompensation]] = {
        "none": NoCompensation,
        "linear": LinearDriftCompensation,
    }

    @classmethod
    def get_strategy(cls, name: str) -> DriftCompensation:
        """
        Get a drift compensation strategy by name.

        Args:
            name: Strategy name

        Returns:
            DriftCompensation: Strategy instance

        Raises:
            ValueError: If strategy name is not recognized
        """
        strategy_class = cls._strategies.get(name.lower())
        if strategy_class is None:
            raise ValueError(f"Unknown drift compensation strategy: {name}")
        return strategy_class()

    @classmethod
    def register_strategy(
        cls, name: str, strategy_class: Type[DriftCompensation]
    ) -> None:
        """
        Register a new drift compensation strategy.

        Args:
            name: Strategy name
            strategy_class: Strategy class
        """
        cls._strategies[name.lower()] = strategy_class
        logging.info(f"Registered drift compensation strategy: {name}")
