"""
Bypass Low-Rank Adaptation (LoRA) implementation for intermediate to output connections.

This module implements a specialized LoRA variant that creates bypass connections between
intermediate and output layers within Transformer blocks, taking the input from the
intermediate layer and adding LoRA-transformed results to the output layer's output.
This approach may offer advantages over standard LoRA in certain scenarios.
"""

from typing import Dict, List, Optional, Union, Any, Tuple
import math

import torch
import torch.nn as nn
import torch.nn.functional as F

from continuallearning.models.pefts.hooks._base_hook import (
    BaseHook,
    HookType,
    HookAttachError,
)
from continuallearning.utils.logging import get_logger
from continuallearning.models.pefts.common.utils.utils import is_linear_module
from continuallearning.models.pefts.hooks.hook_manager.hook_manager import (
    UnifiedHookManager,
)
from continuallearning.models.pefts.common.config.config import BypassLoRAConfig
from continuallearning.models.pefts.common.config.config_factory import (
    create_peft_config,
)
from continuallearning.interfaces import HookFactoryInterface, ModuleFinder
from continuallearning.registry import HOOK_FACTORY_REGISTRY, HOOK_MANAGER_REGISTRY

# Use custom logging system
logger = get_logger(__name__)


class IntermediateInputCapturingHook(BaseHook):
    """
    Hook for capturing the input to the intermediate layer.

    This hook does not modify the behavior of the intermediate layer, it only captures
    its input for later use.

    Args:
        module: The intermediate module to hook
    """

    def __init__(self, module: nn.Module):
        super().__init__(module=module, hook_type=HookType.FORWARD_PRE)
        self.latest_input = None

    def _pre_forward_hook(
        self, module: nn.Module, inputs: Tuple[torch.Tensor]
    ) -> Union[None, Tuple[torch.Tensor]]:
        """Capture and save the input to the intermediate layer"""
        # Save the input, usually the first element of the tuple
        if isinstance(inputs, tuple) and len(inputs) > 0:
            self.latest_input = inputs[0]
        else:
            self.latest_input = inputs

        # Do not modify the input
        return None

    def _hook_forward(self, *args, **kwargs) -> torch.Tensor:
        """
        Placeholder method to satisfy the abstract method requirement.

        For FORWARD_PRE type hooks, this method will not be called,
        but it must be implemented to satisfy the abstract base class.

        Raises:
            NotImplementedError: This method should not be called
        """
        # This hook does not use forward replacement, so raise an exception if called
        raise NotImplementedError(
            "IntermediateInputCapturingHook is a FORWARD_PRE type hook and does not replace the forward method"
        )

    def get_latest_input(self) -> Optional[torch.Tensor]:
        """Get the most recently captured input"""
        return self.latest_input


class OutputLoRABypassHook(BaseHook):
    """
    Hook to apply LoRA bypass to the output layer's output.

    This hook takes the input from the intermediate module, applies the LoRA transformation,
    and adds it to the output layer's output.

    Args:
        module: The output module to hook
        intermediate_hook: The hook capturing the input to the intermediate layer
        config: BypassLoRA configuration
    """

    def __init__(
        self,
        module: nn.Module,
        intermediate_hook: IntermediateInputCapturingHook,
        config: "BypassLoRAConfig",
    ):
        super().__init__(module=module, hook_type=HookType.FORWARD_POST)

        self.output_module = module
        self.intermediate_hook = intermediate_hook
        self.config = config

        # Validate module type
        if not is_linear_module(module):
            raise HookAttachError(
                f"Output module {type(module).__name__} is not a linear module, cannot apply LoRA"
            )

        # Get dimension information
        intermediate_module = intermediate_hook.module
        if not hasattr(intermediate_module, "weight"):
            raise HookAttachError("Intermediate module has no weight attribute")

        # Initialize LoRA matrices
        self.in_features = intermediate_module.weight.shape[1]
        self.out_features = module.weight.shape[0]

        self.lora_A = nn.Parameter(torch.zeros(self.config.rank, self.in_features))
        self.lora_B = nn.Parameter(torch.zeros(self.out_features, self.config.rank))

        # Optional dropout
        self.dropout = (
            nn.Dropout(self.config.dropout) if self.config.dropout > 0.0 else None
        )

        # scaling = alpha / rank
        self.scaling = self.config.alpha / self.config.rank

        # Initialize parameters
        self._reset_parameters()

    def _reset_parameters(self) -> None:
        """Initialize LoRA matrix parameters"""
        nn.init.kaiming_uniform_(self.lora_A, a=math.sqrt(5))
        nn.init.zeros_(self.lora_B)

    def _post_forward_hook(
        self, module: nn.Module, inputs: Tuple[torch.Tensor], output: torch.Tensor
    ) -> torch.Tensor:
        """Apply LoRA bypass to the output layer's output"""
        # Get the input to the intermediate layer
        x = self.intermediate_hook.get_latest_input()

        if x is None:
            logger.warning("No intermediate input captured, returning original output")
            return output

        try:
            if self.dropout is not None:
                x = self.dropout(x)

            # Compute LoRA contribution
            lora_weight = self.lora_B @ self.lora_A
            lora_output = F.linear(x, lora_weight) * self.scaling

            # Add LoRA output to the original output
            return output + lora_output

        except Exception as e:
            logger.error(f"Error applying LoRA bypass: {str(e)}")
            # Return the original output in case of error
            return output

    def _hook_forward(self, *args, **kwargs) -> torch.Tensor:
        """
        Placeholder method to satisfy the abstract method requirement.

        For FORWARD_POST type hooks, this method will not be called,
        but it must be implemented to satisfy the abstract base class.

        Raises:
            NotImplementedError: This method should not be called
        """
        # This hook does not use forward replacement, so raise an exception if called
        raise NotImplementedError(
            "OutputLoRABypassHook is a FORWARD_POST type hook and does not replace the forward method"
        )

    def get_parameters(self) -> List[nn.Parameter]:
        """Get the trainable parameters"""
        return [self.lora_A, self.lora_B]


@HOOK_FACTORY_REGISTRY.register("bypass_lora")
class BypassLoRAHookFactory(HookFactoryInterface):
    """
    Factory class for creating bypass LoRA hooks.

    Due to the special nature of bypass LoRA, which requires creating connections
    between intermediate and output layers, this factory does not directly create hooks.
    Instead, it provides configuration information to the BypassLoRAHookManager,
    which handles the actual hook creation based on module matching.

    Args:
        **kwargs: Configuration parameters, including rank, alpha, dropout, etc.
    """

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

        # Handle configuration

        if "config" in kwargs and isinstance(kwargs["config"], BypassLoRAConfig):
            self.bypass_lora_config = kwargs["config"]
        else:
            # Create LoRA configuration from kwargs
            # Use "bypass_lora" instead of "lora" to ensure correct type
            self.bypass_lora_config = create_peft_config("bypass_lora", kwargs)

        # Store module matching patterns
        self.intermediate_patterns = kwargs.get("intermediate_patterns", [])
        self.output_patterns = kwargs.get("output_patterns", [])

        # Ensure at least one pattern is provided
        if not self.intermediate_patterns:
            self.intermediate_patterns = self.bypass_lora_config.intermediate_modules
        if not self.output_patterns:
            self.output_patterns = self.bypass_lora_config.output_modules

    def __call__(self, module_path: str, module: nn.Module) -> BaseHook:
        """
        Create a hook for the given module.

        Due to the special nature of bypass LoRA, this method should not be called directly.
        The actual hook creation is handled by the BypassLoRAHookManager.
        This method is mainly for compatibility with the HookFactoryInterface.

        Args:
            module_path: The path of the module within the model
            module: The module to hook

        Returns:
            BaseHook: The hook created for the module

        Raises:
            NotImplementedError: Always raises this exception, indicating that the user should use BypassLoRAHookManager
        """
        logger.warning(
            "BypassLoRAHookFactory.__call__ should not be directly invoked. "
            "Use BypassLoRAHookManager instead."
        )
        raise NotImplementedError(
            "BypassLoRA hooks cannot be created individually through the factory. "
            "Please use BypassLoRAHookManager with appropriate patterns."
        )

    @property
    def config(self) -> BypassLoRAConfig:
        """
        Get the Bypass LoRA configuration.

        Returns:
            BypassLoRAConfig: The BypassLoRA configuration used by this factory
        """
        return self.bypass_lora_config

    def get_patterns(self) -> Tuple[List[str], List[str]]:
        """
        Get the patterns for identifying intermediate and output modules.

        Returns:
            Tuple[List[str], List[str]]: A tuple of (intermediate patterns, output patterns)
        """
        return self.intermediate_patterns, self.output_patterns


@HOOK_MANAGER_REGISTRY.register("bypass_lora")
class BypassLoRAHookManager(UnifiedHookManager):
    """
    Hook manager for managing intermediate-to-output bypass LoRA connections.

    This manager creates bypass LoRA connections between intermediate and output layers
    within Transformer blocks.

    Args:
        model: The model to apply hooks to
        intermediate_patterns: List of patterns to identify intermediate layers
        output_patterns: List of patterns to identify output layers
        hook_factory: Optional hook factory or configuration
        config: BypassLoRA configuration
    """

    def __init__(
        self,
        model: nn.Module,
        intermediate_patterns: Optional[List[str]] = None,
        output_patterns: Optional[List[str]] = None,
        hook_factory: Optional[Union[str, HookFactoryInterface, Dict[str, Any]]] = None,
        config: Optional[Union[Dict[str, Any], "BypassLoRAConfig"]] = None,
    ):
        # Create or validate BypassLoRA configuration
        if config is None:
            bypass_config = create_peft_config("bypass_lora", {})
        elif isinstance(config, dict):
            bypass_config = create_peft_config("bypass_lora", config)
        elif isinstance(config, BypassLoRAConfig):
            bypass_config = config
        else:
            # Convert other config types to BypassLoRAConfig
            bypass_config = create_peft_config("bypass_lora", config.to_dict())

        # Set default patterns from config if not provided
        if intermediate_patterns is None:
            intermediate_patterns = bypass_config.intermediate_modules
        if output_patterns is None:
            output_patterns = bypass_config.output_modules

        # Create hook factory if not provided
        if hook_factory is None:
            hook_factory = BypassLoRAHookFactory(
                config=bypass_config,
                intermediate_patterns=intermediate_patterns,
                output_patterns=output_patterns,
            )

        # Initialize base class with empty config to avoid conflicts
        # The actual module finding is handled in _initialize_resources
        super().__init__(model, hook_factory, {})

        # Store patterns and configuration
        self.bypass_config = bypass_config
        self.intermediate_patterns = intermediate_patterns
        self.output_patterns = output_patterns

        # Store pairing information
        self.intermediate_to_output_pairs = []

        # Initialization state
        self._initialized = False
        self._is_attached = False

    def _initialize_resources(self) -> None:
        """Identify intermediate and output layer pairs in the model and prepare for pairing"""
        if self._initialized:
            return

        finder = ModuleFinder(self.model)

        # Find all intermediate and output modules
        intermediate_modules = {}
        for pattern in self.intermediate_patterns:
            intermediate_modules.update(finder.find_by_name_pattern([pattern]))

        output_modules = {}
        for pattern in self.output_patterns:
            output_modules.update(finder.find_by_name_pattern([pattern]))

        logger.info(
            f"Found {len(intermediate_modules)} intermediate modules and {len(output_modules)} output modules"
        )

        # Pair intermediate and output layers
        matched_pairs = []

        for i_path, i_module in intermediate_modules.items():
            if not is_linear_module(i_module):
                logger.debug(f"Skipping non-linear intermediate module: {i_path}")
                continue

            # Extract layer ID (usually in the format "layer.N" or similar)
            layer_parts = i_path.split(".")
            layer_id = None
            layer_idx = None

            for idx, part in enumerate(layer_parts):
                if part.isdigit() or (part.startswith("layer") and part[5:].isdigit()):
                    layer_id = part
                    layer_idx = idx
                    break

            if layer_id is None:
                logger.debug(f"Could not identify layer ID in path: {i_path}")
                continue

            # Find matching output module
            found_match = False
            for o_path, o_module in output_modules.items():
                if not is_linear_module(o_module):
                    continue

                o_parts = o_path.split(".")

                # Check if they belong to the same layer
                assert layer_idx is not None
                if len(o_parts) > layer_idx and o_parts[layer_idx] == layer_id:
                    matched_pairs.append((i_path, i_module, o_path, o_module))
                    logger.info(f"Matched pair: {i_path} -> {o_path}")
                    found_match = True
                    break

            if not found_match:
                logger.debug(f"No matching output module for: {i_path}")

        self.intermediate_to_output_pairs = matched_pairs
        logger.info(
            f"Found {len(matched_pairs)} intermediate-output pairs for bypass LoRA"
        )

        self._initialized = True

    def add_hooks(self) -> None:
        """Add intermediate-to-output bypass hooks"""
        if not self._initialized:
            self._initialize_resources()

        if not self.intermediate_to_output_pairs:
            logger.warning(
                "No intermediate-output pairs found, cannot add bypass hooks"
            )
            return

        logger.info(
            f"Adding bypass LoRA hooks to {len(self.intermediate_to_output_pairs)} pairs"
        )

        # Add hooks for each intermediate-output module pair
        for i_path, i_module, o_path, o_module in self.intermediate_to_output_pairs:
            try:
                # Create input capturing hook for intermediate module
                intermediate_hook = IntermediateInputCapturingHook(i_module)
                intermediate_hook.attach()

                # Create LoRA bypass hook for output module
                output_hook = OutputLoRABypassHook(
                    o_module, intermediate_hook, self.bypass_config
                )
                output_hook.attach()

                # Save hooks
                pair_id = f"{i_path}->{o_path}"
                self._hooks[f"{pair_id}:intermediate"] = intermediate_hook
                self._hooks[f"{pair_id}:output"] = output_hook

                logger.info(
                    f"✓ Successfully attached bypass LoRA: {i_path} -> {o_path}"
                )

            except Exception as e:
                logger.error(
                    f"Failed to add bypass LoRA between {i_path} and {o_path}: {e}"
                )

        if self._hooks:
            self._is_attached = True
            logger.info(
                f"Successfully attached {len(self._hooks) // 2} bypass LoRA connections"
            )
        else:
            logger.warning("No hooks were successfully attached")

    def remove_hooks(self) -> None:
        """Remove all hooks"""
        if not self._is_attached:
            return

        # Remove output hooks first, then intermediate hooks to avoid reference issues
        output_hooks = [h for k, h in self._hooks.items() if k.endswith(":output")]
        intermediate_hooks = [
            h for k, h in self._hooks.items() if k.endswith(":intermediate")
        ]

        # Record the number of connections before clearing hooks to ensure correct logging
        num_connections = len(self._hooks) // 2

        # Detach output hooks first
        for hook in output_hooks:
            try:
                hook.detach()
            except Exception as e:
                logger.error(f"Error detaching output hook: {e}")

        # Detach intermediate hooks next
        for hook in intermediate_hooks:
            try:
                hook.detach()
            except Exception as e:
                logger.error(f"Error detaching intermediate hook: {e}")

        # Clear hooks dictionary
        self._hooks.clear()
        self._is_attached = False
        logger.info(f"Removed {num_connections} bypass LoRA connections")

    def get_trainable_parameters(self) -> List[nn.Parameter]:
        """Get all trainable parameters"""
        params = []
        for key, hook in self._hooks.items():
            if key.endswith(":output"):  # Only need LoRA parameters from output layers
                params.extend(hook.get_parameters())
        return params
