"""
Interfaces for task routing strategies.

This module defines interfaces for routing inputs across multiple
task-specific components based on various strategies such as hard routing,
soft routing, and top-k expert selection.
"""

import torch
from torch import nn
from abc import abstractmethod
from typing import Dict, Any, Optional, List
from continuallearning.interfaces.core.component import ComponentInterface
from continuallearning.interfaces.core.task_related_interface import (
    TaskAwareInterface,
    TaskIrrelevantInterface,
)


class RouterInterface(ComponentInterface, TaskAwareInterface, TaskIrrelevantInterface):
    """Interface for routing inputs across multiple task-specific components.

    This interface defines the contract for routers that determine how inputs
    should be distributed among different task components based on configurable
    routing strategies. Supports hard routing, soft routing, and top-k routing.

    Core Attributes:
        router_weights: Learnable parameters for routing decisions
        num_experts: Total number of expert components managed
        routing_strategy: Active routing strategy ('hard', 'soft', 'top_k')

    Configuration Attributes:
        top_k: Number of experts to select for top-k routing (default: 1)
        temperature: Softmax temperature for soft routing (default: 1.0)
        noise_std: Gaussian noise standard deviation for exploration (default: 0.0)

    State Tracking Attributes:
        routing_history: Optional history of routing decisions for analysis
        load_balancing_loss_weight: Weight for load balancing loss (default: 0.01)
    """

    # Core routing components
    router_weights: nn.ParameterDict
    num_experts: int
    embed_dim: int = 768
    routing_strategy: str = "soft"
    weights_initializer: str = "kaiming_uniform_"

    # Configuration parameters
    top_k: int = 1
    temperature: float = 1.0
    noise_std: float = 0.0

    # State tracking
    routing_history: Optional[List[Dict[int, float]]] = None
    load_balancing_loss_weight: float = 0.01

    @abstractmethod
    def route(
        self, x: torch.Tensor, context: Optional[Dict[str, Any]] = None, *args, **kwargs
    ) -> torch.Tensor:
        """Compute routing weights for distributing input across tasks.

        Args:
            x: Input tensor to route with shape (batch_size, ...)
            context: Optional contextual information for routing decisions
            **kwargs: Additional strategy-specific routing parameters

        Returns:
            Mapping from task_id to routing weight. Weights should sum to 1.0
            for soft routing or be binary for hard routing.
        """
        pass

    @abstractmethod
    def compute_load_balancing_loss(self) -> torch.Tensor:
        """Compute load balancing loss to encourage uniform expert utilization.

        Returns:
            Scalar tensor representing the load balancing penalty.
        """
        pass

    @abstractmethod
    def get_routing_statistics(self) -> Dict[str, Any]:
        """Extract comprehensive statistics about routing behavior.

        Returns:
            Dictionary containing routing statistics including expert utilization,
            routing entropy, load balance coefficient, and active experts count.
        """
        pass

    @abstractmethod
    def reset_routing_state(self) -> None:
        """Reset internal routing state and clear decision history."""
        pass

    @abstractmethod
    def update_routing_config(self, config: Dict[str, Any]) -> None:
        """Update routing configuration parameters dynamically.

        Args:
            config: Dictionary containing configuration updates for
                   routing_strategy, top_k, temperature, noise_std, etc.
        """
        pass
