"""
Interfaces for task routing strategies.
Interface Definition: continuallearning/interfaces/models/pefts/routing/router.py

This module defines interfaces for routing inputs across multiple
task-specific components based on various strategies such as hard routing,
soft routing, and top-k expert selection.
"""

import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Dict, Any, Optional, List
from collections import defaultdict
from continuallearning.registry import ROUTER_REGISTRY
from continuallearning.models.pefts.routers._base_router import BaseRouter


@ROUTER_REGISTRY.register("task_inter_router")
class TaskInterRouter(BaseRouter):
    """Implementation of RouterInterface for routing inputs across task-specific components.

    This router supports multiple routing strategies:
    - 'hard': Select single best expert
    - 'soft': Weighted combination of experts
    - 'top_k': Select top-k experts
    """

    def __init__(
        self,
        num_experts: int,
        embed_dim: int = 768,
        routing_strategy: str = "soft",
        weights_initializer: str = "kaiming_uniform_",
        top_k: int = -1,
        temperature: float = 1.0,
        noise_std: float = 0.0,
        load_balancing_loss_weight: float = -1.0,
        track_routing_history: bool = False,
        **kwargs,
    ):
        """Initialize Router with specified configuration.

        Args:
            num_experts: Number of expert components to route between for each task
            routing_strategy: Strategy for routing ('hard', 'soft', 'top_k')
            top_k: Number of experts to select in top_k routing (-1 for soft)
            temperature: Temperature for softmax in soft routing
            noise_std: Standard deviation of noise for exploration
            load_balancing_loss_weight: Weight for load balancing loss
            track_routing_history: Whether to track routing decisions
            **kwargs: Additional configuration parameters
        """
        super().__init__()
        self._current_task = -1

        # Core attributes/Update per task
        self.router_weights = nn.ParameterDict()
        self.num_experts = num_experts
        self.embed_dim = embed_dim
        self.routing_strategy = routing_strategy
        self.weights_initializer = weights_initializer

        # Configuration parameters
        self.top_k = top_k
        self.temperature = temperature
        self.noise_std = noise_std
        self.load_balancing_loss_weight = load_balancing_loss_weight
        self.loss_flag = load_balancing_loss_weight > 0

        # State tracking
        self.routing_history = [] if track_routing_history else None
        # FIXME should expand with task registered
        self._expert_counts = defaultdict(
            lambda: torch.zeros(self.num_experts, dtype=torch.float32)
        )
        self._total_routed = 0

    def route(
        self,
        x: torch.Tensor,
        task_ids: List[int],
        context: Optional[Dict[str, Any]] = None,
        **kwargs,
    ) -> torch.Tensor:
        """Compute routing weights for distributing input across multiple tasks and experts.

        Args:
            x: Input tensor with shape [B, L, D] where:
                - B: Batch size (number of samples)
                - L: Sequence length (number of tokens)
                - D: Embedding dimension (feature size)
            task_ids: List of task IDs to activate routing for.
                If None, all registered tasks are used.
            context: Optional contextual information for routing decisions.
            **kwargs: Additional strategy-specific routing parameters.

        Returns:
            torch.Tensor: Routing weights with shape [B, T, E] where:
                - B: Batch size (same as input)
                - T: Number of specified tasks (len(task_ids))
                - E: Number of experts per task

        Raises:
            ValueError: If no tasks are registered, unknown routing strategy,
                or specified task_ids are not registered

        Example:
            >>> router = TaskInterRouter(num_experts=4, routing_strategy='soft')
            >>> router.register_task(0)
            >>> router.register_task(1)
            >>> x = torch.randn(32, 128, 768)
            >>> weights = router.route(x, task_ids=[0, 1])
            >>> weights.shape  # torch.Size([32, 2, 4])
        """
        # Validate all specified tasks are registered
        for task_id in sorted(task_ids):
            if task_id not in self.registered_tasks:
                raise ValueError(f"Task {task_id} is not registered in router")

        if len(task_ids) == 0:
            raise ValueError("No tasks specified for routing")

        # Compute routing scores only for specified tasks
        routing_scores = self._compute_routing_scores(x, task_ids)
        B, T, E = routing_scores.shape
        scores_flatten = routing_scores.view(B, T * E)

        # Add noise for exploration if specified
        if self.training and self.noise_std > 0:
            noise = torch.randn_like(scores_flatten) * self.noise_std
            scores_flatten = scores_flatten + noise

        # Apply routing strategy
        routing_weights = self._apply_routing_strategy(scores_flatten)
        routing_weights = routing_weights.view(B, T, E)

        # Update routing statistics
        self._update_statistics(routing_weights.clone().detach(), task_ids)

        # Track routing history if enabled
        if self.routing_history is not None:
            self._track_routing_decisions(routing_weights.clone().detach(), task_ids)

        return routing_weights

    def _compute_routing_scores(
        self, x: torch.Tensor, task_ids: List[int]
    ) -> torch.Tensor:
        """Compute routing scores for specified tasks.
        # FIXME add more similarity measures

        Args:
            x: Input tensor of shape (batch_size, seq_length, embed_dim)
            task_ids: List of task IDs to compute scores for

        Returns:
            Tensor of shape (batch_size, num_tasks, num_experts) containing routing scores.
        """
        cls_tokens = x[:, 0, :]  # Use CLS token for routing

        # Collect weights only for specified tasks
        selected_weights = [self.router_weights[str(tid)] for tid in task_ids]

        # Stack selected task weights: [num_selected_tasks, num_experts, embed_dim]
        all_weights = torch.stack(selected_weights, dim=0)

        # Compute scores: [B, 1, D] @ [T, D, E] -> [B, T, E]
        cls_tokens_expanded = cls_tokens.unsqueeze(1)
        all_weights_transposed = all_weights.transpose(-2, -1)
        scores = torch.matmul(cls_tokens_expanded, all_weights_transposed)

        return scores

    def _apply_routing_strategy(self, scores: torch.Tensor) -> torch.Tensor:
        """Apply the configured routing strategy to compute weights."""
        if self.routing_strategy == "hard":
            return self._hard_routing(scores)
        elif self.routing_strategy == "soft":
            return self._soft_routing(scores)
        elif self.routing_strategy == "top_k":
            return self._top_k_routing(scores)
        else:
            raise ValueError(f"Unknown routing strategy: {self.routing_strategy}")

    def _hard_routing(self, scores: torch.Tensor) -> torch.Tensor:
        """Apply hard routing - select single best expert."""
        max_values, indices = scores.max(dim=-1, keepdim=True)
        max_values = torch.sigmoid(max_values)
        routing_weights = torch.zeros_like(scores)
        routing_weights.scatter_(dim=1, index=indices, src=max_values)
        return routing_weights

    def _soft_routing(self, scores: torch.Tensor) -> torch.Tensor:
        """Apply soft routing - weighted combination using softmax."""
        return F.softmax(scores / self.temperature, dim=-1)

    def _top_k_routing(self, scores: torch.Tensor) -> torch.Tensor:
        """Apply top-k routing - select top-k experts."""
        topk_scores, topk_indices = scores.topk(self.top_k, dim=-1)
        routing_weights = torch.zeros_like(scores)
        topk_weights = F.softmax(topk_scores / self.temperature, dim=-1)
        routing_weights.scatter_(1, topk_indices, topk_weights)
        return routing_weights

    def _update_statistics(
        self, routing_weights: torch.Tensor, task_ids: List[int]
    ) -> None:
        """Update internal routing statistics for specified tasks."""
        with torch.no_grad():
            expert_usage = routing_weights.sum(dim=0)  # [T, E]

            for i, task_id in enumerate(task_ids):
                self._expert_counts[str(task_id)] += expert_usage[i].to(
                    self._expert_counts[str(task_id)].device
                )

            self._total_routed += routing_weights.shape[0]

    def _track_routing_decisions(
        self, routing_weights: torch.Tensor, task_ids: List[int]
    ) -> None:
        """Track routing decisions in history for analysis and debugging.

        This method records the routing weights for each sample in the batch,
        organizing them by task and expert. The history can be used for:
        - Analyzing routing patterns over time
        - Debugging routing behavior
        - Visualizing expert utilization across tasks

        Args:
            routing_weights: Tensor of shape [B, T, E] containing routing weights
                - B: Batch size (number of samples)
                - T: Number of tasks
                - E: Number of experts per task
            task_ids: List of task IDs corresponding to the routing weights

        Note:
            - Only tracks routing if self.routing_history is initialized (not None)
            - Stores weights as Python floats for JSON serialization compatibility
            - Memory usage grows linearly with the number of tracked batches

        Example:
            Given routing_weights with shape [2, 3, 4] (2 samples, 3 tasks, 4 experts),
            the history will be updated with entries like:
            [
                {0: [0.1, 0.7, 0.2, 0.0], 1: [0.0, 1.0, 0.0, 0.0], 2: [0.3, 0.3, 0.4, 0.0]},
                {0: [0.5, 0.5, 0.0, 0.0], 1: [0.0, 0.0, 0.8, 0.2], 2: [1.0, 0.0, 0.0, 0.0]}
            ]
            where each dict represents a sample, keys are task IDs, and values are expert weights.
        """
        if self.routing_history is None:
            return

        batch_routing = []
        B, T, E = routing_weights.shape

        for i in range(B):
            sample_routing = {}
            for t_idx, task_id in enumerate(task_ids):
                task_experts = routing_weights[i, t_idx, :].tolist()
                if any(w > 0 for w in task_experts):
                    sample_routing[task_id] = task_experts
            batch_routing.append(sample_routing)

        self.routing_history.extend(batch_routing)

    def compute_load_balancing_loss(self) -> torch.Tensor:
        """FIXME Compute load balancing loss to encourage uniform expert utilization."""
        raise NotImplementedError(
            "Load balancing loss computation is not implemented in TaskInterRouter."
        )

    def get_routing_statistics(self) -> Dict[str, Any]:
        """FIXME Extract comprehensive statistics about routing behavior."""
        if self._total_routed == 0:
            return {
                "expert_utilization": torch.zeros(self.num_experts).tolist(),
                "routing_entropy": 0.0,
                "load_balance_coefficient": 0.0,
                "active_experts_count": 0,
            }

        # Aggregate expert counts across all tasks
        total_expert_counts = torch.zeros(self.num_experts, dtype=torch.float32)
        total_weights_sum = 0.0  # Track total weight sum for normalization

        for task_id in self.registered_tasks:
            task_key = str(task_id)
            if (
                task_key in self._expert_counts
                and self._expert_counts[task_key] is not None
            ):
                task_counts = self._expert_counts[task_key]
                total_expert_counts += task_counts
                total_weights_sum += task_counts.sum().item()

        # Compute normalized utilization
        # This accounts for different routing strategies having different scales
        if total_weights_sum > 0:
            utilization = total_expert_counts / total_weights_sum
        else:
            utilization = torch.zeros(self.num_experts, dtype=torch.float32)

        # Compute target utilization based on actual average weight per sample
        # This is more accurate than assuming 1.0 / num_experts
        avg_weight_per_sample = (
            total_weights_sum / self._total_routed if self._total_routed > 0 else 1.0
        )
        target_utilization = avg_weight_per_sample / self.num_experts

        # Compute routing entropy
        eps = 1e-8
        # Filter out zero utilization to avoid log(0)
        non_zero_utilization = utilization[utilization > 0]
        if len(non_zero_utilization) > 0:
            entropy = (
                -(non_zero_utilization * torch.log(non_zero_utilization + eps))
                .sum()
                .item()
            )
            max_entropy = torch.log(
                torch.tensor(len(non_zero_utilization), dtype=torch.float32)
            ).item()
            normalized_entropy = entropy / max_entropy if max_entropy > 0 else 0.0
        else:
            entropy = 0.0
            normalized_entropy = 0.0

        # Compute load balance coefficient (1.0 = perfect balance)
        # Use relative deviation from target utilization
        if target_utilization > 0:
            relative_deviation = (
                torch.abs(utilization - target_utilization) / target_utilization
            )
            load_balance = 1.0 - relative_deviation.mean().item()
            load_balance = max(0.0, load_balance)  # Ensure non-negative
        else:
            load_balance = 0.0

        # Count active experts
        active_experts = (total_expert_counts > 0).sum().item()

        return {
            "expert_utilization": utilization.tolist(),
            "routing_entropy": normalized_entropy,
            "load_balance_coefficient": load_balance,
            "active_experts_count": active_experts,
            "avg_weight_per_sample": avg_weight_per_sample,  # Additional info for debugging
            "target_utilization": target_utilization,  # Additional info for debugging
        }

    def reset_routing_state(self) -> None:
        """Reset internal routing state and clear decision history."""
        for task_key in self._expert_counts:
            self._expert_counts[task_key].zero_()

        self._total_routed = 0

        if self.routing_history is not None:
            self.routing_history.clear()

    def update_routing_config(self, config: Dict[str, Any]) -> None:
        """Update routing configuration parameters dynamically."""
        for key, value in config.items():
            if hasattr(self, key):
                setattr(self, key, value)
                if key == "load_balancing_loss_weight":
                    self.loss_flag = value > 0

    def _init_weights(self, weights: nn.Parameter) -> None:
        """Initialize router weights using specified initialization strategy."""
        initializers = {
            "kaiming_uniform_": lambda w: nn.init.kaiming_uniform_(w, a=math.sqrt(5)),
            "xavier_uniform_": nn.init.xavier_uniform_,
            "normal_": lambda w: nn.init.normal_(w, mean=0.0, std=1.0),
        }

        if self.weights_initializer not in initializers:
            raise ValueError(f"Unknown weights initializer: {self.weights_initializer}")

    # TaskAwareInterface
    def register_task(self, task_id: int, **kwargs) -> None:
        """
        Register a new task with this hook.

        Args:
            task_id: Identifier for the new task
            config: Optional task-specific configuration
        """
        if task_id in self.registered_tasks:
            print(f"Task {task_id} is already registered.")
            return

        new_weights = nn.Parameter(
            torch.zeros(self.num_experts, self.embed_dim, dtype=torch.float32)
        )
        self._init_weights(new_weights)
        self.router_weights[str(task_id)] = new_weights

    def set_task(self, task_id: int) -> None:
        """Configure the router for a specific task."""
        if task_id not in self.registered_tasks:
            raise ValueError(f"Task {task_id} is not registered")
        self._current_task = task_id

    def set_trainable_tasks(self, task_ids: int | List[int], **kwargs) -> None:
        """Set which tasks should be trainable."""
        task_ids = [task_ids] if isinstance(task_ids, int) else task_ids

        # First freeze all tasks
        for tid in self.registered_tasks:
            self._freeze(tid)

        # Then unfreeze specified tasks
        for tid in task_ids:
            self._unfreeze(tid)

    def _freeze(self, task_ids: int | List[int]) -> None:
        """Freeze parameters for specific tasks."""
        task_ids = [task_ids] if isinstance(task_ids, int) else task_ids

        for tid in task_ids:
            task_key = str(tid)
            if task_key in self.router_weights:
                self.router_weights[task_key].requires_grad = False

    def _unfreeze(self, task_ids: int | List[int]) -> None:
        """Unfreeze parameters for specific tasks."""
        task_ids = [task_ids] if isinstance(task_ids, int) else task_ids

        for tid in task_ids:
            task_key = str(tid)
            if task_key in self.router_weights:
                self.router_weights[task_key].requires_grad = True
            else:
                raise ValueError(f"Task {tid} is not registered in router weights")

    def process_with_task(
        self, x: torch.Tensor, task_id: int, **kwargs
    ) -> torch.Tensor:
        """Process input with a specific task."""
        return self.route(x, task_ids=[task_id], **kwargs).squeeze(1)

    def process_with_tasks(
        self, x: torch.Tensor, task_ids: Optional[List[int]] = None, **kwargs
    ) -> torch.Tensor:
        """Process input with multiple tasks."""
        task_ids = self.registered_tasks if task_ids is None else task_ids
        return self.route(x, task_ids=task_ids, **kwargs)

    @property
    def current_task(self) -> int:
        """Get the current task ID."""
        return self._current_task

    @property
    def registered_tasks(self) -> List[int]:
        """Get list of registered task IDs."""
        return sorted([int(task_id) for task_id in self.router_weights.keys()])

    # TaskIrrelevantInterface implementation
    def get_parameters(self) -> List[nn.Parameter]:
        """Get all parameters managed by this module."""
        return list(self.parameters())

    def get_trainable_parameters(self) -> List[nn.Parameter]:
        """Get trainable parameters managed by this module."""
        return [param for param in self.parameters() if param.requires_grad]


# FIXME 需要更进一步的优化
@ROUTER_REGISTRY.register("task_specific_router")
class TaskSpecificRouter(BaseRouter):
    """Simple router that only routes to the expert corresponding to the current task ID.

    This is the simplest possible router implementation that creates a one-to-one
    mapping between tasks and experts. Each task is assigned to exactly one expert
    based on its task ID.

    Example:
        - Task 0 -> Expert 0
        - Task 1 -> Expert 1
        - Task 2 -> Expert 2

    Note:
        The number of experts must be greater than or equal to the number of tasks.
    """

    def __init__(self, **kwargs):
        """Initialize TaskSpecificRouter."""
        super().__init__()
        self._current_task = -1
        self._registered_task_ids = []

    def route(
        self, x: torch.Tensor, context: Optional[Dict[str, Any]] = None, **kwargs
    ) -> torch.Tensor:
        """Route input to the expert corresponding to current task.

        Args:
            x: Input tensor with shape [B, L, D]
            context: Optional context (can contain 'task_id')
            **kwargs: Additional parameters (ignored)

        Returns:
            One-hot routing weights of shape [B, E] where only the expert
            corresponding to current task has weight 1.0
        """
        batch_size = x.shape[0]
        device = x.device

        # Get task ID from context or use current task
        task_id = self._current_task
        if context and "task_id" in context:
            task_id = context["task_id"]

        if task_id < 0:
            raise ValueError("No task ID specified for routing")

        if task_id not in self._registered_task_ids:
            raise ValueError(f"Task {task_id} is not registered")

        # Create one-hot routing weights
        num_tasks = len(self._registered_task_ids)
        routing_weights = torch.zeros(
            batch_size, num_tasks, device=device, dtype=x.dtype
        )

        # Find the index of the task_id in registered tasks
        task_index = self._registered_task_ids.index(task_id)
        routing_weights[:, task_index] = 1.0

        return routing_weights

    # TaskAwareInterface
    def register_task(self, task_id: int, **kwargs) -> None:
        """Register a new task."""
        if task_id not in self._registered_task_ids:
            self._registered_task_ids.append(task_id)

    def set_task(self, task_id: int) -> None:
        """Set current task for routing."""
        if task_id not in self._registered_task_ids:
            raise ValueError(f"Task {task_id} is not registered")
        self._current_task = task_id

    def set_trainable_tasks(self, task_ids: int | List[int], **kwargs) -> None:
        """Not implemented for this router."""
        raise NotImplementedError(
            "TaskSpecificRouter does not require task preparation"
        )

    def _freeze(self, task_ids: int | List[int]) -> None:
        """No parameters to freeze in this router."""
        pass

    def _unfreeze(self, task_ids: int | List[int]) -> None:
        """No parameters to unfreeze in this router."""
        pass

    def process_with_task(
        self, x: torch.Tensor, task_id: int, **kwargs
    ) -> torch.Tensor:
        """Process input with specific task ID."""
        return self.route(x, context={"task_id": task_id}, **kwargs)

    def process_with_tasks(
        self, x: torch.Tensor, task_ids: Optional[List[int]] = None, **kwargs
    ) -> torch.Tensor:
        """Process with multiple tasks by stacking individual task routes."""
        raise NotImplementedError(
            "TaskSpecificRouter does not support multi-task routing; use process_with_task instead"
        )

    @property
    def current_task(self) -> int:
        """Get current task ID."""
        return self._current_task

    @property
    def registered_tasks(self) -> List[int]:
        """Get list of registered task IDs."""
        return self._registered_task_ids.copy()

    @property
    def num_experts(self) -> int:  # type: ignore[override]
        """Number of experts equals number of registered tasks."""
        return len(self._registered_task_ids)

    # TaskIrrelevantInterface implementation
    def get_parameters(self) -> List[nn.Parameter]:
        """Get all parameters (none for this router)."""
        return []

    def get_trainable_parameters(self) -> List[nn.Parameter]:
        """Get trainable parameters (none for this router)."""
        return []
