from typing import Dict, Optional, Any
import torch
import torch.nn as nn
from continuallearning.registry import COMBINER_REGISTRY
from continuallearning.interfaces import CombinerInterface


@COMBINER_REGISTRY.register("task_inter_weightedsum")
class TaskInterWeightedSumCombiner(CombinerInterface):
    """task inter combiner that combines outputs using weighted sum."""

    def combine(
        self,
        outputs: Dict[str, torch.Tensor],
        weights: torch.Tensor,
        context: Optional[Dict[str, Any]] = None,
        **kwargs,
    ) -> Dict[str, torch.Tensor]:
        """Combine outputs using weighted sum.

        Args:
            outputs: Dictionary mapping task_id to output tensor
            weights: Dictionary mapping task_id to combination weight
            context: Optional context for combination
            **kwargs: Additional parameters (not used)
        Returns:
            torch.Tensor: Combined output tensor
        """
        if not outputs:
            raise ValueError("No outputs provided to combine")

        # convert outputs
        # dict: [str|int, Tensor] -> [T, B, ...]: Tensor
        # Stack all outputs along a new dimension (task dimension)
        stacked_outputs = torch.stack(tuple(outputs.values()), dim=0)  # [T, B, ...]
        num_tasks = stacked_outputs.size(0)

        # Ensure weights has the right shape [T]
        if weights.shape != (num_tasks,):
            raise ValueError(
                f"Expected weights shape ({num_tasks},), got {weights.shape}"
            )

        # Reshape weights for broadcasting: [T, 1, 1, ...]
        weight_view_shape = [num_tasks] + [1] * (stacked_outputs.dim() - 1)
        weights = weights.view(*weight_view_shape)

        # Apply weights and sum along task dimension
        weighted_outputs = stacked_outputs * weights
        result = weighted_outputs.sum(dim=0)  # [B, ...]
        outputs.update({"combiner_output": result})

        return outputs


@COMBINER_REGISTRY.register("concatenation")
class ConcatCombiner(CombinerInterface):
    """task inter combiner that combines outputs using weighted sum."""

    def __init__(self, topk: int):
        """
        Initialize concatenation combiner.

        Args:
            topk: Number of top outputs to concatenate.
        """
        super().__init__()
        self.topk = topk

    def combine(
        self,
        outputs: Dict[str, Any],
        weights: torch.Tensor,
        context: Optional[Dict[str, Any]] = None,
        **kwargs,
    ) -> Dict[str, Any]:
        """Combine outputs by concatenating top-k selections based on weights.

        Args:
            outputs: Dictionary mapping task_id to output tensor
                {task_id: Tensor of shape [B, prompt_length, dim]}
            weights: Tensor of shape [B, num_tasks, num_experts] containing routing weights
            context: Optional context for combination
            **kwargs: Additional parameters (including potential topk override)

        Returns:
            Dict containing the combined output under 'combiner_output' key
        """
        if not outputs:
            raise ValueError("No outputs provided to combine")

        # Allow topk override from kwargs
        topk = kwargs.get("topk", self.topk)

        # Get task IDs and stack outputs
        task_ids = list(outputs.keys())
        # Stack outputs: from Dict[str, [B, pL, D]] to [T, B, pL, D]
        stacked_outputs = torch.stack([outputs[tid] for tid in task_ids], dim=1)
        batch_size, num_tasks, prompt_length, dim = stacked_outputs.shape
        batch_size_w, num_tasks_w, num_experts = weights.shape
        if (
            batch_size_w != batch_size
            or num_tasks_w != num_tasks
            or num_experts != prompt_length
        ):
            raise ValueError(
                f"Shape mismatch: weights {weights.shape} vs outputs "
                f"[{num_tasks}, {batch_size}, {prompt_length}, {dim}]"
            )
        # Flatten weights to [B, T*E] where T*E represents all task-expert combinations
        flat_weights = weights.reshape(batch_size, num_tasks * num_experts)
        topk = min(topk, num_tasks * num_experts)

        # Get top-k indices for each batch element
        # [B, T*E] -> [B, topk]
        topk_values, topk_flat_indices = torch.topk(flat_weights, k=topk, dim=1)
        # [B, T*pL, D]
        outputs_flat = stacked_outputs.reshape(
            batch_size, num_tasks * prompt_length, dim
        )
        gather_indices = topk_flat_indices.unsqueeze(-1).expand(
            -1, -1, dim
        )  # [B, topk, D]
        # Gather top-k outputs
        topk_outputs = torch.gather(
            outputs_flat, dim=1, index=gather_indices
        )  # [B, topk, D]
        # Convert flat indices to (task_idx, expert_idx) pairs
        # topk_flat_indices: [B, topk]
        task_indices = topk_flat_indices // num_experts  # [B, topk]
        expert_indices = topk_flat_indices % num_experts  # [B, topk]
        # Store result and metadata
        outputs.update(
            {
                "combiner_output": topk_outputs,
                "topk_indices": topk_flat_indices,  # [B, topk] - flat indices
                "topk_task_indices": task_indices,  # [B, topk] - which tasks
                "topk_expert_indices": expert_indices,  # [B, topk] - which experts/prompts
                "topk_weights": topk_values,  # [B, topk] - corresponding weights
                "topk": topk,
            }
        )

        return outputs
