"""
Event System Interface for Continual Learning

This interface defines methods for event-driven programming in continual learning,
allowing for decoupled communication between different components.
"""

from abc import ABC, abstractmethod
from typing import Any, Dict, Optional, Set, List
from enum import Enum
from dataclasses import dataclass, field


class EventType(Enum):
    """Training event types following PyTorch Lightning hooks naming convention.

    Events are triggered in the following order during continual learning:

    1. **Training Initialization**:
       - ON_FIT_START: Called at the beginning of the entire training process
       - ON_TRAIN_START: Called when training phase begins (after validation sanity check)

    2. **Task-Level Lifecycle** (repeated for each task):
       - ON_BEFORE_NEW_TASK: Called before starting a new incremental task
       - ON_TASK_START: Called when a specific task begins
       - ON_BEFORE_TASK_TRAINING: Called just before training on the current task

       3. **Epoch-Level Lifecycle** (repeated for each epoch within a task):
          - ON_TRAIN_EPOCH_START: Called at the beginning of each training epoch

          4. **Batch-Level Lifecycle** (repeated for each batch within an epoch):
             - ON_TRAIN_BATCH_START: Called before processing each training batch
             - ON_AFTER_LOSS_COMPUTATION: Called after loss computation but before backprop
             - ON_HIGH_LOSS_DETECTED: Called when unusually high loss is detected
             - ON_TRAIN_BATCH_END: Called after processing each training batch

          - ON_TRAIN_EPOCH_END: Called at the end of each training epoch

          5. **Validation Lifecycle** (optional, per epoch or per task):
             - ON_VALIDATION_START: Called when validation phase begins
             - ON_VALIDATION_EPOCH_START: Called at the beginning of validation epoch
             - ON_VALIDATION_BATCH_START: Called before each validation batch
             - ON_VALIDATION_BATCH_END: Called after each validation batch
             - ON_VALIDATION_EPOCH_END: Called at the end of validation epoch
             - ON_VALIDATION_END: Called when validation phase ends

       - ON_AFTER_TASK_TRAINING: Called after training on the current task is complete
       - ON_AFTER_MEMORY_UPDATE: Called after updating episodic memory/replay buffer
       - ON_TASK_END: Called when a specific task ends

    6. **Testing/Inference Lifecycle**:
       - ON_BEFORE_INFERENCE: Called before inference/testing begins
       - ON_TEST_START: Called when testing phase begins
       - ON_TEST_EPOCH_START: Called at the beginning of test epoch
       - ON_TEST_BATCH_START: Called before each test batch
       - ON_TEST_BATCH_END: Called after each test batch
       - ON_TEST_EPOCH_END: Called at the end of test epoch
       - ON_TEST_END: Called when testing phase ends
       - ON_AFTER_INFERENCE: Called after inference/testing is complete

    7. **Training Completion**:
       - ON_TRAIN_END: Called when training phase ends
       - ON_FIT_END: Called at the end of the entire training process

    8. **Model State Management** (can occur at various points):
       - ON_MODEL_UPDATE: Called when model parameters are updated
       - ON_MODEL_SAVE: Called when model checkpoint is saved
       - ON_MODEL_LOAD: Called when model checkpoint is loaded

    Note:
        - Events follow PyTorch Lightning's naming convention with 'on_' prefix
        - Continual learning specific events (task-level) are integrated with standard training events
        - Some events may not be triggered depending on the training configuration
        - Event handlers can access training context through the event system
    """

    # =====================================================================
    # 1. 整体训练生命周期事件 (Overall Training Lifecycle Events)
    # =====================================================================
    ON_FIT_START = "on_fit_start"                  # 整个训练过程开始（通常在 incremental_train 方法开始时触发）
    ON_FIT_END = "on_fit_end"                      # 整个训练过程结束（通常在 incremental_train 方法结束时触发）

    # =====================================================================
    # 2. 任务级事件 (Task-level Events) - 持续学习中的核心事件
    # =====================================================================
    ON_BEFORE_NEW_TASK = "on_before_new_task"      # 在开始新的增量任务之前（通常在 before_task 方法开始时触发）
    ON_TASK_START = "on_task_start"                # 特定任务开始时（通常在 before_task 方法结束时触发）
    ON_BEFORE_TASK_TRAINING = "on_before_task_training"  # 在开始训练当前任务之前（通常在 _train_task 开始时）
    ON_AFTER_TASK_TRAINING = "on_after_task_training"    # 当前任务训练完成后（通常在 after_train 方法中）
    ON_AFTER_MEMORY_UPDATE = "on_after_memory_update"     # 更新记忆缓冲区后（例如在经验回放后）
    ON_TASK_END = "on_task_end"                    # 特定任务结束时（通常在 after_task 方法结束时）

    # =====================================================================
    # 3. 训练阶段事件 (Training Phase Events)
    # =====================================================================
    ON_TRAIN_START = "on_train_start"              # 训练阶段开始时（通常在 _train 方法开始时）
    ON_TRAIN_END = "on_train_end"                  # 训练阶段结束时（通常在 _train 方法结束时）

    # ---------------------------------------------------------------------
    # 3.1 训练 Epoch 级事件 (Epoch-level Training Events)
    # ---------------------------------------------------------------------
    ON_TRAIN_EPOCH_START = "on_train_epoch_start"  # 每个训练 epoch 开始时（在 _train_func 内循环中）
    ON_TRAIN_EPOCH_END = "on_train_epoch_end"      # 每个训练 epoch 结束时

    # ---------------------------------------------------------------------
    # 3.2 训练 Batch 级事件 (Batch-level Training Events)
    # ---------------------------------------------------------------------
    ON_TRAIN_BATCH_START = "on_train_batch_start"  # 每个训练 batch 处理前（在 _train_batch 方法中）
    ON_AFTER_LOSS_COMPUTATION = "on_after_loss_computation"  # 损失计算后、反向传播前
    ON_HIGH_LOSS_DETECTED = "on_high_loss_detected"  # 检测到异常高的损失时（可用于调试）
    ON_TRAIN_BATCH_END = "on_train_batch_end"      # 每个训练 batch 处理后

    # =====================================================================
    # 4. 验证阶段事件 (Validation Phase Events)
    # =====================================================================
    ON_VALIDATION_START = "on_validation_start"    # 验证阶段开始时
    ON_VALIDATION_EPOCH_START = "on_validation_epoch_start"  # 验证 epoch 开始时
    ON_VALIDATION_BATCH_START = "on_validation_batch_start"  # 验证 batch 开始时
    ON_VALIDATION_BATCH_END = "on_validation_batch_end"      # 验证 batch 结束时
    ON_VALIDATION_EPOCH_END = "on_validation_epoch_end"      # 验证 epoch 结束时
    ON_VALIDATION_END = "on_validation_end"        # 验证阶段结束时

    # =====================================================================
    # 5. 测试/推理阶段事件 (Testing/Inference Events)
    # =====================================================================
    ON_BEFORE_INFERENCE = "on_before_inference"    # 推理开始前
    ON_TEST_START = "on_test_start"                # 测试阶段开始时
    ON_TEST_EPOCH_START = "on_test_epoch_start"    # 测试 epoch 开始时
    ON_TEST_BATCH_START = "on_test_batch_start"    # 测试 batch 开始时
    ON_TEST_BATCH_END = "on_test_batch_end"        # 测试 batch 结束时
    ON_TEST_EPOCH_END = "on_test_epoch_end"        # 测试 epoch 结束时
    ON_TEST_END = "on_test_end"                    # 测试阶段结束时
    ON_AFTER_INFERENCE = "on_after_inference"      # 推理结束后

    # =====================================================================
    # 6. 模型状态事件 (Model State Events) - 可在各个阶段触发
    # =====================================================================
    ON_MODEL_UPDATE = "on_model_update"            # 模型参数更新时
    ON_MODEL_SAVE = "on_model_save"                # 模型检查点保存时
    ON_MODEL_LOAD = "on_model_load"                # 模型检查点加载时

    # =====================================================================
    # 7. 方法级别事件 - 与具体方法名对应 (Method-specific Events)
    # =====================================================================
    # 7.1 before_train 方法相关事件
    ON_BEFORE_TRAIN_START = "on_before_train_start"  # before_train 方法开始时
    ON_BEFORE_TRAIN_END = "on_before_train_end"      # before_train 方法结束时

    # 7.2 after_train 方法相关事件
    ON_AFTER_TRAIN_START = "on_after_train_start"    # after_train 方法开始时
    ON_AFTER_TRAIN_END = "on_after_train_end"        # after_train 方法结束时

    # 7.3 before_task 方法相关事件（补充 ON_BEFORE_NEW_TASK 和 ON_TASK_START）
    ON_BEFORE_TASK_START = "on_before_task_start"    # before_task 方法内部执行开始时
    ON_BEFORE_TASK_END = "on_before_task_end"        # before_task 方法内部执行结束时

    # 7.4 after_task 方法相关事件（补充 ON_TASK_END）
    ON_AFTER_TASK_START = "on_after_task_start"      # after_task 方法开始时
    ON_AFTER_TASK_END = "on_after_task_end"          # after_task 方法结束时

    # 7.5 _train_task 方法相关事件（补充 ON_BEFORE_TASK_TRAINING 和 ON_AFTER_TASK_TRAINING）
    ON_TRAIN_TASK_START = "on_train_task_start"      # _train_task 方法开始时
    ON_TRAIN_TASK_END = "on_train_task_end"          # _train_task 方法结束时

    # 7.6 _train_func 方法相关事件
    ON_TRAIN_FUNC_START = "on_train_func_start"      # _train_func 方法开始时
    ON_TRAIN_FUNC_END = "on_train_func_end"          # _train_func 方法结束时


class EventPriority(Enum):
    """
    Event handler priority levels.

    NOTE: This enum might be over-engineered with 5 priority levels.
    Most systems only need 3 levels (HIGH, NORMAL, LOW).
    Consider simplifying if HIGHEST and LOWEST are rarely used.
    """

    HIGHEST = 0
    HIGH = 1
    NORMAL = 2
    LOW = 3
    LOWEST = 4  # POTENTIALLY REDUNDANT: Consider if this granularity is needed


@dataclass
class EventContext:
    """
    Context information passed to event handlers during training events.

    This class serves as a standardized data container that carries information
    from event emitters to event handlers. It includes both structured fields
    for common training metadata and flexible key-value storage for custom data.

    The class supports both direct learner references and lazy loading through
    weak references to reduce memory overhead and avoid circular references.

    Attributes:
        event: The type of event that triggered this context
        learner: Reference to the learner instance that emitted the event
                (can be direct reference or lazy-loaded via weak reference)
        epoch: Current training epoch (None if not applicable)
        batch_idx: Current batch index within the epoch (None if not applicable)
        task_id: Current task ID in continual learning (None if not applicable)
        metrics: Dictionary storing training metrics (e.g., accuracy, loss)
        data: General purpose key-value store for additional context data
        _learner_ref: Weak reference to learner (internal use only)
        _blocked_attrs: Set of attribute names that should not be accessed directly
        _monitored_attrs: Set of attribute names to log when accessed

    Example:
        >>> context = EventContext(
        ...     event=EventType.ON_TRAIN_BATCH_END,
        ...     learner=trainer,
        ...     epoch=10,
        ...     batch_idx=25,
        ...     task_id=3
        ... )
        >>> context.set('learning_rate', 0.001)
        >>> context.update_metrics(batch_loss=0.45, batch_accuracy=0.87)
        >>>
        >>> # Access data
        >>> lr = context.get('learning_rate')
        >>> accuracy = context.metrics['batch_accuracy']
        >>>
        >>> # Create optimized context with lazy loading
        >>> from weakref import ref
        >>> optimized_context = EventContext.create_optimized(
        ...     event=EventType.ON_TRAIN_BATCH_END,
        ...     learner_ref=ref(trainer),
        ...     blocked_attrs={"_dataset", "train_loader"},
        ...     epoch=10
        ... )
    """

    event: EventType
    learner: Any = None  # The learner instance or lazy proxy
    epoch: Optional[int] = None
    batch_idx: Optional[int] = None
    task_id: Optional[int] = None
    metrics: Dict[str, Any] = field(default_factory=dict)
    data: Dict[str, Any] = field(default_factory=dict)

    # method metadata
    method_name: Optional[str] = None
    method_args: Optional[Dict[str, Any]] = None
    method_kwargs: Optional[Dict[str, Any]] = None
    # result of the event handler, if any
    result: Optional[Any] = None

    # These fields are not included in the constructor's parameter list
    # but are initialized in __post_init__ or by the create_optimized factory method
    _learner_ref: Any = field(default=None, repr=False)
    _blocked_attrs: Set[str] = field(default_factory=set, repr=False)
    _monitored_attrs: Set[str] = field(default_factory=set, repr=False)
    _logger: Any = field(default=None, repr=False)
    _use_lazy_loading: bool = field(default=False, repr=False)
    _accessed_attrs: Set[str] = field(default_factory=set, repr=False)

    def __post_init__(self):
        """Initialize logger and other non-constructor fields"""
        import logging
        self._logger = logging.getLogger(self.__class__.__name__)

    @classmethod
    def create_optimized(cls, event: EventType, learner_ref,
                         blocked_attrs: Optional[Set[str]] = None,
                         monitored_attrs: Optional[Set[str]] = None,
                         **kwargs) -> 'EventContext':
        """
        Factory method to create an optimized EventContext with lazy loading.

        Instead of passing the entire learner instance directly, this method
        creates a context that will load learner attributes only when accessed.

        Args:
            event: The event type
            learner_ref: Weak reference to the learner instance
            blocked_attrs: Set of attribute names that should not be accessed directly
            monitored_attrs: Set of attribute names to log when accessed
            **kwargs: Additional event context data

        Returns:
            An EventContext configured for lazy loading
        """
        # Create a context with standard fields but without a direct learner reference
        context = cls(event=event, learner=None, **kwargs)

        # Configure for lazy loading
        context._use_lazy_loading = True
        context._learner_ref = learner_ref
        context._blocked_attrs = blocked_attrs or set()
        context._monitored_attrs = monitored_attrs or set()

        # Set a proxy to handle learner attribute access
        context.learner = context

        # Add learner class name for type checking
        learner = learner_ref()
        if learner is not None:
            context.set("learner_class", learner.__class__.__name__)

        return context

    def __getattr__(self, name):
        """
        Lazily access attributes from the referenced learner instance.

        This method is called when an attribute is not found in the context itself,
        allowing transparent access to the underlying learner's attributes when
        lazy loading is enabled.

        Args:
            name: Name of the attribute to access

        Returns:
            The attribute value from the original learner

        Raises:
            AttributeError: If the attribute doesn't exist or is blocked
        """
        # Only handle lazy loading if enabled
        if not self._use_lazy_loading:
            raise AttributeError(f"'{self.__class__.__name__}' has no attribute '{name}'")

        # Check if attribute is blocked
        if name in self._blocked_attrs:
            self._logger.warning(
                f"Access to large attribute '{name}' was blocked for performance reasons. "
                "Consider using an explicit accessor method instead."
            )
            raise AttributeError(f"Access to '{name}' is blocked for performance reasons")

        # Get the original learner
        learner = self._learner_ref()
        if learner is None:
            raise RuntimeError("Learner instance has been garbage collected")

        # Track which attributes are being accessed (useful for optimization)
        self._accessed_attrs.add(name)

        # Log monitored attribute access
        if name in self._monitored_attrs:
            self._logger.debug(f"Lazy access to monitored attribute '{name}'")

        # Get the attribute from the original learner
        try:
            return getattr(learner, name)
        except AttributeError:
            raise AttributeError(f"'{type(learner).__name__}' has no attribute '{name}'")

    def get(self, key: str, default: Any = None) -> Any:
        """
        Get data from the context's data store.

        Args:
            key: The key to look up in the data store
            default: Default value to return if key is not found

        Returns:
            The value associated with the key, or default if not found
        """
        return self.data.get(key, default)

    def set(self, key: str, value: Any) -> None:
        """
        Set data in the context's data store.

        Args:
            key: The key to store the value under
            value: The value to store
        """
        self.data[key] = value

    def update_metrics(self, **kwargs: Any) -> None:
        """
        Update the metrics dictionary with new key-value pairs.

        This is a convenience method for batch updating metrics without
        having to directly manipulate the metrics dictionary.

        Args:
            **kwargs: Key-value pairs to add to the metrics dictionary

        Example:
            >>> context.update_metrics(accuracy=0.95, loss=0.12, f1_score=0.88)
        """
        self.metrics.update(kwargs)

    def has_data(self, key: str) -> bool:
        """
        Check if the context has a specific data key.

        Args:
            key: The key to check for existence

        Returns:
            True if the key exists in the data store, False otherwise
        """
        return key in self.data

    def get_accessed_attributes(self) -> Set[str]:
        """
        Get the set of attributes that have been accessed via lazy loading.

        This is useful for monitoring and optimization to understand which
        attributes are actually being used by event handlers.

        Returns:
            Set of attribute names that have been accessed
        """
        return self._accessed_attrs.copy() if self._use_lazy_loading else set()

    def set_blocked_attributes(self, attrs: Set[str]) -> None:
        """
        Set or update the list of blocked attributes.

        Args:
            attrs: Set of attribute names to block from direct access
        """
        self._blocked_attrs = attrs


class EventHandler(ABC):
    """Base interface for event handlers"""

    name: str
    _events: List[EventType] = field(default_factory=list, repr=True)

    def __call__(self, context: EventContext, *args: Any, **kwds: Any) -> Any:
        if self.can_handle(context.event):
            return self.handle(context, *args, **kwds)

    @abstractmethod
    def handle(self, context: EventContext) -> Optional[EventContext]:
        """
        Handle an event

        Args:
            context: Event context containing relevant information
        """
        pass

    @abstractmethod
    def can_handle(self, event: EventType) -> bool:
        """
        Check if this handler can handle the given event type

        Args:
            event: Type of event

        Returns:
            True if handler can handle this event type
        """
        pass

    @property
    def events(self) -> List[EventType]:
        """Get the set of event types this handler can handle."""
        return self._events


class EventManager(ABC):
    """Interface for managing events and handlers"""

    @abstractmethod
    def register(self, event: EventType, handler: EventHandler) -> None:
        """Register an event handler"""
        pass

    @abstractmethod
    def unregister(self, event: EventType, handler: str | EventHandler) -> bool:
        """Unregister an event handler"""
        pass

    @abstractmethod
    def emit(self, context: EventContext) -> Any:
        """Emit an event to all registered handlers"""
        pass

    @abstractmethod
    def get_handlers(
        self,
        event: Optional[EventType] = None,
        return_dict: bool = False
    ) -> set[EventHandler] | list[EventHandler] | Dict[str, list[str]]:
        """
        Get handlers for a specific event type or all events.

        Args:
            event: Optional specific event type to get handlers for.
                  If None, gets handlers for all event types.
            return_dict: If True, returns handler names instead of handler objects.

        Returns:
            When return_dict=False: List of EventHandler objects
            When return_dict=True: Dict mapping event type names to handler names
        """
        pass

    @abstractmethod
    def clear_handlers(self, event: Optional[EventType] = None) -> None:
        """Clear handlers for specific event type or all handlers"""
        pass


class EventEmitter(ABC):
    """Interface for emitting events"""

    _event_manager: EventManager

    @abstractmethod
    def emit_event(self, event: EventType, **kwargs) -> None:
        """
        Emit an event

        Args:
            event: Type of event to emit
            **kwargs: Additional event data
        """
        pass

    @abstractmethod
    def register_handler(self, event: EventType, handler: EventHandler) -> None:
        """
        Register an event handler

        Args:
            event: Type of event to handle
            handler: Handler instance
        """
        pass

    @abstractmethod
    def unregister_handler(self, event: EventType, handler: EventHandler) -> None:
        """
        Unregister an event handler

        Args:
            event: Type of event
            handler: Handler instance to remove
        """
        pass


class EventFilter(ABC):
    """Interface for filtering events"""

    @abstractmethod
    def should_process(self, context: EventContext) -> bool:
        """
        Determine if an event should be processed

        Args:
            context: Event context

        Returns:
            True if event should be processed
        """
        pass


class EventProcessor(ABC):
    """Interface for processing events with filters"""

    @abstractmethod
    def add_filter(self, filter_instance: EventFilter) -> None:
        """Add an event filter"""
        pass

    @abstractmethod
    def remove_filter(self, filter_instance: EventFilter) -> None:
        """Remove an event filter"""
        pass

    @abstractmethod
    def process_event(self, context: EventContext) -> bool:
        """
        Process an event through all filters

        Args:
            context: Event context

        Returns:
            True if event passed all filters and was processed
        """
        pass


class AsyncEventEmitter(ABC):
    """Interface for asynchronous event emission"""

    @abstractmethod
    async def emit_async(self, event: EventType, **kwargs) -> None:
        """Emit event asynchronously"""
        pass

    @abstractmethod
    def emit_deferred(self, event: EventType, **kwargs) -> None:
        """Emit event in deferred manner (queued for later processing)"""
        pass

    @abstractmethod
    async def process_deferred_events(self) -> None:
        """Process all deferred events"""
        pass


class EventConfig(ABC):
    """Interface for event configuration"""

    @abstractmethod
    def get_event_settings(self) -> Dict[str, Any]:
        """Get event system settings"""
        pass

    @abstractmethod
    def is_event_enabled(self, event: EventType) -> bool:
        """Check if specific event type is enabled"""
        pass

    @abstractmethod
    def get_emission_interval(self) -> int:
        """Get event emission interval"""
        pass
