import math
from typing import Dict, List, Optional, Any, Tuple, Union
import torch
import torch.nn as nn

from continuallearning.interfaces import HookType, RouterInterface, CombinerInterface
from continuallearning.models.pefts.common.config.config import BypassLoRAConfig
from continuallearning.models.pefts.common.utils.error_handlers import (
    handle_peft_errors,
    validate_tensor_operation,
)
from continuallearning.models.pefts.common.utils.exceptions import (
    ForwardPassError,
)
from continuallearning.models.pefts.common.context import (
    get_backbone_feature,
    get_current_task_ids,
)
from continuallearning.models.pefts.hooks._base_hook import (
    BaseHook,
    BaseTaskAwareHook,
    BaseHookFactory,
)
from continuallearning.registry import HOOK_REGISTRY, HOOK_FACTORY_REGISTRY
from continuallearning.utils.logging import get_logger

logger = get_logger(__name__)


class BypassLoRAHook(BaseHook[BypassLoRAConfig]):
    """
    Hook for applying bypass transformation at output layers.

    This hook retrieves the stored intermediate input and applies a low-rank
    transformation to create a bypass connection to the output.
    """

    @handle_peft_errors(error_type="hook", collect_context=True)
    def __init__(
        self,
        *,
        module: nn.Module,
        hook_config: BypassLoRAConfig,
        source_layer: str,  # e.g., "encoder.layer.0.intermediate.dense_input"
        **kwargs,
    ):
        super().__init__(module=module, hook_config=hook_config, **kwargs)

        self.source_layer = source_layer
        self._hook_type = HookType.FORWARD_REPLACE

        # Extract shapes for weight matrix
        self.in_features = self.hook_config.in_features
        self.out_features = self.hook_config.out_features
        # Initialize low-rank matrices
        self.lora_A = nn.Parameter(torch.zeros(self.hook_config.rank, self.in_features))
        self.lora_B = nn.Parameter(
            torch.zeros(self.out_features, self.hook_config.rank)
        )
        self.scaling = self.hook_config.scaling
        # Optional dropout layer
        self.dropout = (
            nn.Dropout(self.hook_config.dropout)
            if self.hook_config.dropout > 0.0
            else None
        )
        self.weights_initializer = self.hook_config.weights_initializer
        self._init_weights()

    def _init_weights(
        self,
    ) -> None:
        """Initialize router weights using Xavier/Glorot initialization.

        Args:
            weights: Parameter tensor to initialize
        """
        if self.weights_initializer == "kaiming_uniform_":
            nn.init.kaiming_uniform_(self.lora_A, a=math.sqrt(5))
        elif self.weights_initializer == "xavier_uniform_":
            nn.init.xavier_uniform_(self.lora_A)
        elif self.weights_initializer == "normal_":
            nn.init.normal_(self.lora_A, mean=0.0, std=1.0)
        else:
            raise ValueError(f"Unknown weights initializer: {self.weights_initializer}")

        nn.init.zeros_(self.lora_B)

    @handle_peft_errors(error_type="forward", collect_context=True)
    @validate_tensor_operation("BypassLoRA forward", expected_dims=None)
    def _forward(self, x: torch.Tensor) -> torch.Tensor:
        """Apply bypass transformation."""
        # Get stored bypass input
        bypass_input = get_backbone_feature(self.source_layer)

        if bypass_input is None:
            raise ForwardPassError(
                f"No bypass input found for layer {self.source_layer}. "
                "Ensure BypassLoRAIntermediateHook is attached and executed before this hook.",
                adapter_type="BypassLoRA",
            ).add_context(
                source_layer=self.source_layer,
                module_type=type(self.module).__name__,
            )

        # Apply transformation
        if self.dropout:
            bypass_input = self.dropout(bypass_input)

        # Low-rank transformation: input @ down @ up
        bypass_output = bypass_input @ self.lora_A.T @ self.lora_B.T
        bypass_output = bypass_output * self.scaling

        return bypass_output

    @handle_peft_errors(error_type="forward", collect_context=True)
    def _hook_forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
        """Apply bypass and combine with original output."""
        if not self.is_attached or self._original_forward is None:
            raise ForwardPassError(
                "Hook not properly attached",
                adapter_type="BypassLoRA",
            ).add_context(
                module_type=type(self.module).__name__,
                module_path=getattr(self.module, "_module_path", "unknown"),
            )

        # Get original output
        original_output = self._original_forward(x, *args, **kwargs)

        # Apply bypass transformation
        bypass_output = self._forward(x)

        # Combine
        return original_output + bypass_output


# Task-aware wrapper remains the same
@HOOK_REGISTRY.register("bypass_lora")
class BypassLoRATaskAwareHook(BaseTaskAwareHook[BypassLoRAHook]):
    """Task-aware wrapper for BypassLoRA hooks."""

    _hook_func = BypassLoRAHook

    @handle_peft_errors(error_type="hook", collect_context=True)
    def __init__(
        self,
        router: RouterInterface,
        combiner: CombinerInterface,
        module: nn.Module,
        hook_config: BypassLoRAConfig,
        source_layer: str,
    ):
        super().__init__(
            router=router, combiner=combiner, module=module, hook_config=hook_config
        )
        self.source_layer = source_layer

    def _create_hook(
        self,
    ) -> BypassLoRAHook:
        """Create hook instance for specific task."""
        return self._hook_func(
            module=self._module,
            hook_config=self.hook_config,
            source_layer=self.source_layer,
        )

    @handle_peft_errors(error_type="forward", collect_context=True)
    def _hook_forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
        """
        Apply LoRA transformation and call original forward method.

        This implements W' = W + BA scaling, where:
        - W is the original weight matrix
        - B and A are low-rank matrices

        Args:
            x: Input tensor
            *args, **kwargs: Additional arguments for the original layer

        Returns:
            torch.Tensor: Transformed output tensor directly

        Raises:
            RuntimeError: If the hook is not properly attached
        """
        if not self.is_attached or self._original_forward is None:
            raise ForwardPassError(
                "Hook not properly attached",
                adapter_type="BypassLoRA",
            ).add_context(
                module_type=type(self.module).__name__,
                module_path=getattr(self.module, "_module_path", "unknown"),
            )

        context_task_ids = get_current_task_ids()
        original_output = self._original_forward(x, *args, **kwargs)
        lora_output = self.forward(x, task_ids=context_task_ids, *args, **kwargs)

        # Combine outputs
        combined_output = original_output + lora_output

        return combined_output

    def process_with_task(
        self, x: torch.Tensor, task_id: int, *args, **kwargs
    ) -> torch.Tensor:
        """Process input with a specific task."""
        task_key = str(task_id)
        if task_key not in self._hooks:
            raise ValueError(f"Task {task_id} is not registered")

        # Single task doesn't need router/combiner
        return self._hooks[task_key].forward(x, *args, **kwargs)

    def process_with_tasks(
        self, x: torch.Tensor, task_ids: List[int], *args, **kwargs
    ) -> torch.Tensor:
        """Process input among multiple tasks."""

        # Get routing weights for specified tasks
        bypass_input = get_backbone_feature(self.source_layer)
        if bypass_input is None:
            raise ForwardPassError(
                f"No bypass input found for layer {self.source_layer}. "
                "Ensure BypassLoRAIntermediateHook is attached and executed before this hook.",
                adapter_type="BypassLoRATaskAwareHook",
            ).add_context(
                source_layer=self.source_layer,
                module_type=type(self.module).__name__,
            )

        weights = self.router.route(bypass_input, task_ids=task_ids, *args, **kwargs)

        # Compute outputs only for specified tasks
        o_hook = {}
        for task_id in sorted(task_ids):
            task_key = str(task_id)
            if task_key in self._hooks:
                o_hook[task_id] = self._hooks[task_key].forward(x, *args, **kwargs)

        # Combine outputs with weights
        o = self.combiner.combine(o_hook, weights, task_ids=task_ids, *args, **kwargs)
        return o["combiner_output"]


@HOOK_FACTORY_REGISTRY.register("bypass_lora")
class BypassLoRAHookFactory(BaseHookFactory[BypassLoRATaskAwareHook, BypassLoRAConfig]):
    """Factory for creating BypassLoRA hooks."""

    @handle_peft_errors(error_type="config")
    def __init__(
        self,
        hook_name: str = "bypass_lora",
        router_name: str = "task_inter_router",
        combiner_name: str = "task_inter_weightedsum",
        hook_config: Union[Dict[str, Any], BypassLoRAConfig] = BypassLoRAConfig(),
        router_config: Dict[str, Any] = {
            "num_experts": 1,
            "embed_dim": 768,
            "routing_strategy": "soft",
            "weights_initializer": "kaiming_uniform_",
            "top_k": -1,
            "temperature": 1.0,
            "noise_std": 0.0,
            "load_balancing_loss_weight": -1.0,
            "track_routing_history": False,
        },
        combiner_config: Dict[str, Any] = {},
    ):
        super().__init__(
            hook_name=hook_name,
            router_name=router_name,
            combiner_name=combiner_name,
            hook_config=hook_config,
            router_config=router_config,
            combiner_config=combiner_config,
        )

    def create_hook_instance(
        self,
        module: nn.Module,
        router: RouterInterface,
        combiner: CombinerInterface,
        config: BypassLoRAConfig,
        source_layer: str,
    ) -> BypassLoRATaskAwareHook:
        """Create hook with source feature mapping."""

        return BypassLoRATaskAwareHook(
            router=router,
            combiner=combiner,
            module=module,
            hook_config=config,
            source_layer=source_layer,
        )

    def __call__(self, module_path: str, module: nn.Module) -> BypassLoRATaskAwareHook:
        """
        Create a hook for a specific module.
        """
        # Create router and combiner instances
        router = self.router_cls(**self.router_config)
        combiner = self.combiner_cls(**self.combiner_config)

        # Create hook using the abstract method
        hook = self.create_hook_instance(
            module=module,
            router=router,
            combiner=combiner,
            config=self.hook_config,
            source_layer=module_path,  # Use module path as source layer
        )

        logger.debug(f"Created hook for module at path: {module_path}")
        return hook
