import torch.nn as nn
from continuallearning.models.pefts.hook_managers import (
    create_hook_manager,
    UnifiedHookManager,
)
from continuallearning.models.pefts.modules._base_module import BaseHookAdapter
from continuallearning.registry import HOOK_ADAPTER_REGISTRY
from continuallearning.utils.logging import get_logger

from continuallearning.models.backbones.base import BaseBackbone

from continuallearning.models.pefts.common.config import (
    ContextManagerConfig,
    HookManagerConfig,
)
from typing import Dict, Any, Optional

logger = get_logger(__name__)


@HOOK_ADAPTER_REGISTRY.register("l2p")
class HookBasedL2pAdapter(BaseHookAdapter):
    _collect_features: bool = True

    def __init__(
        self,
        backbone: BaseBackbone,
        hook_manager_config: Dict[str, Any] | HookManagerConfig,
        context_hook_manager_config: Optional[ContextManagerConfig | Dict] = None,
    ):
        if context_hook_manager_config is None:
            context_hook_manager_config = ContextManagerConfig.from_dict(
                {
                    "discovery": {
                        "target_patterns": [
                            # 匹配所有编码器层 (encoder.layer.0.inter, encoder.layer.1, ...)
                            r"encoder\.layer\.\d+",
                        ],
                    },
                    "extract_output": True,
                    "extract_input": False,
                }
            )

        super().__init__(backbone, hook_manager_config, context_hook_manager_config)

    def _create_hook_manager(self, model: nn.Module) -> UnifiedHookManager:
        return create_hook_manager(
            model, self._hook_manager_config, manager_type="unified"
        )
