"""
Parameter-Efficient Fine-Tuning (PEFT) methods for continual learning.

This package provides implementations of various parameter-efficient fine-tuning
methods for adapting pre-trained models to continual learning scenarios.
"""

# Import base classes
from continuallearning.models.pefts.hooks._base_hook import (
    BaseHook,
    BaseTaskAwareHook,
    BaseHookFactory,
)

# Import LoRA components
from continuallearning.models.pefts.hooks.lora_hooks import (
    LoRALayerHook,
    LoRATaskAwareHook,
    LoRAHookFactory,
)

# Import unified hook manager
from continuallearning.models.pefts.hook_managers import (
    UnifiedHookManager,
    HookManagerFactory,
    create_hook_manager,
)

from continuallearning.models.pefts.routers import (
    BaseRouter,
    TaskInterRouter,
    TaskSpecificRouter,
    TaskInterWeightedSumCombiner,
)

# Import configuration classes
from continuallearning.models.pefts.common.config import (
    BasePEFTConfig,
    LoRAConfig,
    BypassLoRAConfig,
    PromptConfig,
    L2pConfig,
)


__all__ = [
    # Base classes
    "BaseHook",
    "BaseTaskAwareHook",
    "BaseHookFactory",
    # LoRA components
    "LoRALayerHook",
    "LoRATaskAwareHook",
    "LoRAHookFactory",
    # Hook manager
    "UnifiedHookManager",
    "HookManagerFactory",
    "create_hook_manager",
    # Routers
    "BaseRouter",
    "TaskInterRouter",
    "TaskSpecificRouter",
    "TaskInterWeightedSumCombiner",
    # Configuration classes
    "BasePEFTConfig",
    "LoRAConfig",
    "BypassLoRAConfig",
    "PromptConfig",
    "L2pConfig",
]
