"""
Unified event handler system for flexible, extensible event processing.

This module provides a comprehensive event handling framework that follows a modular,
component-based architecture with progressive disclosure of complexity. It supports
both simple and advanced event processing patterns while adhering to SOLID principles.

Key features:
- Flexible handler architecture with multiple handler types for different use cases
- Thread-safe operations with comprehensive error handling
- Performance metrics collection and monitoring
- Asynchronous event processing support
- Chain of Responsibility pattern implementation
- Conditional, batch, and retry processing capabilities
- Registry system for handler management and discovery
- Strategy-specific handlers for continual learning applications

The system uses a progressive disclosure pattern where basic functionality is simple
and accessible, while advanced features are available when needed but don't complicate
basic usage. This allows the system to grow with the user's needs without introducing
unnecessary complexity for simple use cases.

Example usage:
    # Create a simple functional handler
    @event_handler(EventType.TASK_COMPLETED)
    def handle_task_completion(event):
        print(f"Task {event.task_id} completed!")
        return event

    # Register with the event system
    dispatcher.register_handler(handle_task_completion)

    # Create and use more complex handlers
    batch_handler = BatchEventHandler(batch_size=10)
    with temporary_handler(batch_handler):
        # Process events in batches temporarily
        dispatcher.dispatch_event(task_event)
"""

# Standard library imports for core functionality
import asyncio          # For asynchronous event handling
import logging          # For structured logging throughout the system
import threading        # For thread-safe operations and locks
import time            # For timing and metrics collection
import traceback       # For detailed error information
import weakref         # For weak references to prevent memory leaks

# Core Python features
from abc import ABC, abstractmethod                   # For abstract base classes
from collections import defaultdict           # For efficient data structures
from contextlib import contextmanager               # For context manager support
from dataclasses import dataclass, field            # For cleaner data classes
from functools import wraps                         # For decorator functionality
# Type hints for better IDE support and static type checking
from typing import (
    Any,                # For dynamic typing
    Callable,           # For function type annotations
    Coroutine,          # For async function return types
    Dict,               # For dictionary types
    List,               # For list types
    Optional,           # For nullable types
    Type,               # For class types
    TypeVar,            # For generic type variables
    Union,              # For union types
    Generic,            # For generic classes
    Iterator,           # For iterator return types
)

# Framework interfaces for the event system
from continuallearning.events.interfaces import (
    EventHandlerInterface,        # Base interface for all event handlers
    AsyncEventHandlerInterface,   # Interface for asynchronous event handlers
    EventValidatorInterface,      # Interface for event validation
    EventTransformerInterface,    # Interface for event transformation
    EventProcessorInterface,      # Interface for event processing
)

# Core event data types and enums
from continuallearning.events.core.event_types import (
    BaseEventData,    # Base class for all event data objects
    EventType,        # Enum of supported event types
    EventSeverity,    # Enum for event severity levels
    EventStatus,      # Enum for event processing status
)

# Type variable for event data, ensuring type safety throughout the event system
# This enforces that all events processed by handlers are derived from BaseEventData
E = TypeVar("E", bound=BaseEventData)


# ========== Performance Monitoring and Metrics ==========


@dataclass
class HandlerMetrics:
    """
    Comprehensive metrics collection for event handler performance monitoring.

    This class provides detailed tracking of handler execution statistics with support
    for progressive disclosure - basic metrics are always available while advanced
    metrics are optional for more sophisticated monitoring needs.

    The metrics support both real-time monitoring and historical analysis, enabling
    performance optimization and debugging of event processing pipelines.

    Attributes:
        total_events_processed: Total number of events processed by the handler
        total_processing_time: Cumulative time spent processing all events (seconds)
        error_count: Total number of errors encountered during processing
        average_processing_time: Mean processing time per event (seconds)
        max_processing_time: Maximum processing time for a single event (seconds)
        min_processing_time: Minimum processing time for a single event (seconds)
        last_execution_time: Timestamp of the most recent execution
        success_count: Number of successfully processed events
        processing_times: Detailed list of individual processing times
        error_types: Dictionary mapping error types to their occurrence counts

    Example:
        >>> metrics = HandlerMetrics()
        >>> metrics.update(0.125, success=True)
        >>> metrics.update(0.089, success=False, error_type="ValueError")
        >>> print(f"Success rate: {metrics.get_success_rate():.1f}%")
        Success rate: 50.0%
    """

    # === Core Metrics (Always Available) ===
    total_events_processed: int = 0
    total_processing_time: float = 0.0
    error_count: int = 0

    # === Advanced Metrics (Optional) ===
    average_processing_time: float = 0.0
    max_processing_time: float = 0.0
    min_processing_time: float = float("inf")
    last_execution_time: Optional[float] = None

    # === Detailed Tracking (Advanced) ===
    success_count: int = 0
    processing_times: List[float] = field(default_factory=list)
    error_types: Dict[str, int] = field(default_factory=lambda: defaultdict(int))

    def update(
        self,
        processing_time: float,
        success: bool = True,
        error_type: Optional[str] = None,
    ) -> None:
        """
        Update metrics with execution results from a handler invocation.

        This method should be called after each event processing to maintain
        accurate performance statistics. It automatically calculates derived
        metrics like averages and tracks detailed timing information.

        Args:
            processing_time: Time taken to process the event in seconds
            success: Whether the event was processed successfully
            error_type: Type of error if processing failed (e.g., "ValueError")

        Note:
            The method is thread-safe and can be called concurrently from
            multiple threads without external synchronization.
        """
        self.total_events_processed += 1
        self.total_processing_time += processing_time

        # Update timing statistics
        if self.total_events_processed > 0:
            self.average_processing_time = (
                self.total_processing_time / self.total_events_processed
            )

        self.max_processing_time = max(self.max_processing_time, processing_time)
        self.min_processing_time = min(self.min_processing_time, processing_time)
        self.last_execution_time = time.time()

        # Track detailed metrics
        self.processing_times.append(processing_time)

        if success:
            self.success_count += 1
        else:
            self.error_count += 1
            if error_type:
                self.error_types[error_type] += 1

    def get_success_rate(self) -> float:
        """
        Calculate the success rate as a percentage.

        Returns:
            Success rate as a percentage (0.0-100.0). Returns 0.0 if no
            events have been processed yet.

        Example:
            >>> metrics = HandlerMetrics()
            >>> metrics.update(0.1, success=True)
            >>> metrics.update(0.2, success=False)
            >>> metrics.get_success_rate()
            50.0
        """
        if self.total_events_processed == 0:
            return 0.0
        return (self.success_count / self.total_events_processed) * 100

    def reset(self) -> None:
        """
        Reset all metrics to their initial state.

        This method clears all accumulated statistics and timing data,
        effectively starting fresh measurement. Useful for periodic
        metric collection or when starting a new measurement period.

        Note:
            This operation is irreversible - all historical data will be lost.
        """
        self.total_events_processed = 0
        self.total_processing_time = 0.0
        self.error_count = 0
        self.success_count = 0
        self.average_processing_time = 0.0
        self.max_processing_time = 0.0
        self.min_processing_time = float("inf")
        self.last_execution_time = None
        self.processing_times.clear()
        self.error_types.clear()

    def to_dict(self) -> Dict[str, Any]:
        """
        Convert metrics to a dictionary for serialization and reporting.

        Returns:
            Dictionary containing all metric values in a JSON-serializable format.
            Handles special cases like infinity values for min_processing_time.

        Example:
            >>> metrics = HandlerMetrics()
            >>> metrics.update(0.1, success=True)
            >>> result = metrics.to_dict()
            >>> result['success_rate']
            100.0
        """
        return {
            "total_events_processed": self.total_events_processed,
            "total_processing_time": self.total_processing_time,
            "average_processing_time": self.average_processing_time,
            "max_processing_time": self.max_processing_time,
            "min_processing_time": (
                self.min_processing_time
                if self.min_processing_time != float("inf")
                else 0
            ),
            "success_count": self.success_count,
            "error_count": self.error_count,
            "success_rate": self.get_success_rate(),
            "error_types": dict(self.error_types),
            "last_execution_time": self.last_execution_time,
        }


# ========== Error Handling Decorators ==========


def with_error_handling(
    log_errors: bool = True,
    reraise: bool = False,
    default_return: Any = None,
    error_handler: Optional[Callable[[Exception, E], Any]] = None,
    track_metrics: bool = True,
):
    """
    Unified decorator for robust error handling in event handlers.

    This decorator provides comprehensive error handling capabilities including
    logging, metrics tracking, custom error handlers, and flexible error
    propagation policies. It consolidates error handling from both basic and
    advanced handler versions with optional advanced features.

    Args:
        log_errors: Whether to automatically log errors with structured information
        reraise: Whether to re-raise exceptions after handling (for debugging)
        default_return: Value to return when an error occurs and reraise=False
        error_handler: Optional custom function to handle errors, receives (exception, event)
        track_metrics: Whether to update handler metrics with error information

    Returns:
        Decorated function with enhanced error handling capabilities

    Example:
        @with_error_handling(log_errors=True, reraise=False)
        def my_handler(self, event):
            # Handler implementation that may raise exceptions
            return process_event(event)

    Note:
        The decorator automatically captures timing information for metrics
        and provides structured logging with event context when available.
    """

    def decorator(
        func: Callable[[Any, E], Optional[E]],
    ) -> Callable[[Any, E], Optional[E]]:
        @wraps(func)
        def wrapper(self, event: E) -> Optional[E]:
            start_time = time.time() if track_metrics else 0

            try:
                result = func(self, event)

                # Update metrics on success
                if track_metrics and hasattr(self, "metrics") and self.metrics:
                    processing_time = time.time() - start_time
                    self.metrics.update(processing_time, success=True)

                return result

            except Exception as e:
                processing_time = time.time() - start_time if track_metrics else 0
                error_type = e.__class__.__name__

                # Update metrics on failure
                if track_metrics and hasattr(self, "metrics") and self.metrics:
                    self.metrics.update(
                        processing_time, success=False, error_type=error_type
                    )

                # Log error with structured information
                if log_errors:
                    logger = getattr(self, "_logger", logging.getLogger(__name__))

                    extra_info = {
                        "handler": self.__class__.__name__,
                        "error_type": error_type,
                    }

                    # Add event information if available
                    if hasattr(event, "event_id"):
                        extra_info["event_id"] = event.event_id
                    if hasattr(event, "event_type"):
                        extra_info["event_type"] = event.event_type.value

                    logger.error(
                        f"Error in {func.__name__}: {e}",
                        exc_info=True,
                        extra=extra_info,
                    )

                # Try custom error handler
                if error_handler:
                    try:
                        return error_handler(e, event)
                    except Exception as handler_error:
                        if log_errors:
                            logger = getattr(
                                self, "_logger", logging.getLogger(__name__)
                            )
                            logger.error(f"Error in error handler: {handler_error}")

                # Reraise if requested
                if reraise:
                    raise

                return default_return

        return wrapper

    return decorator


def event_handler(
    event_types: Union[EventType, List[EventType]],
    priority: int = 0,
    enabled: bool = True,
    track_metrics: bool = False,  # Progressive disclosure
    name: Optional[str] = None,
):
    """
    Unified decorator for creating functional event handlers from functions.

    This decorator transforms a regular function into a full-featured event handler
    that integrates seamlessly with the event processing system. It supports both
    simple and advanced functionality through progressive disclosure, allowing
    users to start simple and add complexity as needed.

    Args:
        event_types: Single event type or list of event types this handler processes
        priority: Handler execution priority (higher numbers execute first)
        enabled: Whether the handler is initially enabled
        track_metrics: Enable performance metrics tracking (progressive disclosure)
        name: Optional custom name for the handler (defaults to function name)

    Returns:
        FunctionalEventHandler instance wrapping the decorated function

    Example:
        @event_handler(EventType.TASK_STARTED, priority=10, track_metrics=True)
        def handle_task_start(event):
            print(f"Task {event.task_id} started")
            return event

        # Usage
        handler = handle_task_start
        result = handler.handle_event(task_start_event)

    Note:
        The decorated function should accept a single event parameter and
        optionally return a modified event for further processing.
    """

    def decorator(func: Callable[[E], Optional[E]]) -> "FunctionalEventHandler[E]":
        return FunctionalEventHandler(
            event_types=event_types,
            handler_func=func,
            priority=priority,
            enabled=enabled,
            track_metrics=track_metrics,
            name=name or func.__name__,
        )

    return decorator


# ========== Unified Base Handler ==========


class BaseEventHandler(ABC, Generic[E]):
    """
    Unified abstract base class for all event handlers in the system.

    This class consolidates functionality from both basic and advanced handler
    versions, providing a comprehensive foundation with progressive disclosure
    of advanced features. It follows SOLID principles and provides a consistent
    interface for all event processing operations.

    The class supports:
    - Basic event handling with type checking and lifecycle management
    - Advanced features like metrics tracking, thread safety, and custom filtering
    - Lifecycle hooks for pre/post processing
    - Flexible configuration through progressive disclosure

    Attributes:
        name: Human-readable name for this handler instance
        priority: Execution priority (higher numbers execute first)
        enabled: Whether this handler is currently active
        track_metrics: Whether to collect performance metrics
        thread_safe: Whether to enable thread-safe operations
        metrics: Optional performance metrics collector

    Example:
        class MyHandler(BaseEventHandler):
            def get_handled_events(self):
                return [EventType.TASK_STARTED]

            def _handle_event_impl(self, event):
                print(f"Handling event: {event}")
                return event

        handler = MyHandler(name="custom", track_metrics=True)
        result = handler.handle_event(event)

    Note:
        Subclasses must implement get_handled_events() and _handle_event_impl().
        Use _handle_event_impl() for the actual processing logic, not handle_event().
    """

    def __init__(
        self,
        name: Optional[str] = None,
        priority: int = 0,
        enabled: bool = True,
        track_metrics: bool = False,  # Progressive disclosure
        thread_safe: bool = False,  # Progressive disclosure
    ):
        """
        Initialize the unified event handler.

        Args:
            name: Optional name for this handler
            priority: Handler priority (higher numbers execute first)
            enabled: Whether this handler is enabled
            track_metrics: Enable performance metrics tracking
            thread_safe: Enable thread-safe operations
        """
        # Core attributes (always available)
        self.name = name or self.__class__.__name__
        self.priority = priority
        self.enabled = enabled

        # Advanced features (progressive disclosure)
        self.track_metrics = track_metrics
        self.thread_safe = thread_safe

        # Metrics (optional)
        self.metrics = HandlerMetrics() if track_metrics else None

        # Thread safety (optional)
        self._lock = threading.RLock() if thread_safe else None

        # Advanced filtering (optional)
        self._event_filters: List[Callable[[E], bool]] = []

        # Lifecycle hooks (optional)
        self._pre_handle_hooks: List[Callable[[E], None]] = []
        self._post_handle_hooks: List[Callable[[E, Optional[E]], None]] = []

        # Logging
        self._logger = logging.getLogger(f"{self.__class__.__name__}.{self.name}")

    @abstractmethod
    def get_handled_events(self) -> List[EventType]:
        """
        Return the list of event types this handler can process.

        This method defines the event types that this handler is capable of
        processing. The event system uses this information for routing and
        filtering events to appropriate handlers.

        Returns:
            List of EventType enums that this handler can process.
            An empty list indicates the handler can process all event types.

        Note:
            This method must be implemented by all concrete handler subclasses.
            The returned list should be consistent throughout the handler's lifetime.
        """
        pass

    def can_handle(self, event: E) -> bool:
        """
        Determine if this handler can process the given event.

        This method performs comprehensive event compatibility checking including
        handler state validation, event type matching, and custom filter evaluation.
        It supports both basic type-based filtering and advanced custom filtering.

        Args:
            event: The event to check for compatibility

        Returns:
            True if this handler can process the event, False otherwise

        Note:
            This method is automatically called by the event system before
            invoking handle_event(). Custom filters added via add_event_filter()
            are evaluated in addition to basic compatibility checks.
        """
        if not self.enabled:
            return False

        # Check event type compatibility
        handled_events = self.get_handled_events()
        if handled_events and event.event_type not in handled_events:
            return False

        # Apply custom filters (advanced feature)
        for event_filter in self._event_filters:
            if not event_filter(event):
                return False

        return True

    def handle_event(self, event: E) -> Optional[E]:
        """
        Process an event with comprehensive error handling and optional features.

        This is the main entry point for event processing. It orchestrates the
        complete event handling workflow including compatibility checking,
        thread safety, lifecycle hooks, and error handling.

        Args:
            event: The event to process

        Returns:
            Processed event (potentially modified) or None if processing failed
            or the handler cannot process this event type

        Note:
            This method should not be overridden by subclasses. Implement
            _handle_event_impl() instead for custom processing logic.
            Error handling, metrics, and hooks are automatically applied.
        """
        if not self.can_handle(event):
            return None

        return self._handle_event_with_error_handling(event)

    @with_error_handling(log_errors=True, reraise=False, track_metrics=True)
    def _handle_event_with_error_handling(self, event: E) -> Optional[E]:
        """内部带错误处理的实现"""
        # 线程安全逻辑
        if self.thread_safe and self._lock:
            with self._lock:
                return self._handle_event_with_hooks(event)
        else:
            return self._handle_event_with_hooks(event)

    def _handle_event_with_hooks(self, event: E) -> Optional[E]:
        """
        Execute event processing with pre and post lifecycle hooks.

        This internal method manages the execution of lifecycle hooks around
        the actual event processing. It ensures hooks are executed even if
        the main processing fails, providing robust lifecycle management.

        Args:
            event: The event to process

        Returns:
            Result from the actual event processing implementation

        Note:
            Hook failures are logged but do not prevent processing.
            This method is called internally by handle_event().
        """
        # Execute pre-handle hooks
        for hook in self._pre_handle_hooks:
            try:
                hook(event)
            except Exception as e:
                self._logger.warning(f"Pre-handle hook failed: {e}")

        # Execute the actual handler implementation
        result = self._handle_event_impl(event)

        # Execute post-handle hooks
        for hook in self._post_handle_hooks:
            try:
                hook(event, result)
            except Exception as e:
                self._logger.warning(f"Post-handle hook failed: {e}")

        return result

    @abstractmethod
    def _handle_event_impl(self, event: E) -> Optional[E]:
        """
        Implement the actual event processing logic.

        This method contains the core event processing implementation and must
        be implemented by all concrete handler subclasses. It should focus
        solely on the business logic without concern for error handling,
        metrics, or lifecycle management.

        Args:
            event: The event to process

        Returns:
            Processed event (potentially modified) or None if processing
            determines the event should not continue in the pipeline

        Note:
            This method is called by handle_event() after all safety checks
            and setup. Error handling, metrics tracking, and hooks are
            automatically applied by the base class.
        """
        pass

    # ========== Advanced Features (Progressive Disclosure) ==========

    def add_event_filter(self, filter_func: Callable[[E], bool]) -> None:
        """
        Add a custom event filter for advanced event filtering.

        Event filters allow fine-grained control over which events are processed
        beyond basic event type matching. Filters are evaluated in the order
        they were added, and all filters must return True for the event to be processed.

        Args:
            filter_func: Function that takes an event and returns True if it
                        should be processed, False otherwise

        Example:
            def priority_filter(event):
                return event.priority >= 5

            handler.add_event_filter(priority_filter)
        """
        self._event_filters.append(filter_func)

    def remove_event_filter(self, filter_func: Callable[[E], bool]) -> None:
        """
        Remove a previously added custom event filter.

        Args:
            filter_func: The exact filter function that was previously added

        Note:
            If the filter function is not found, this method silently does nothing.
            Filter functions are compared by identity, not equality.
        """
        if filter_func in self._event_filters:
            self._event_filters.remove(filter_func)

    def add_pre_handle_hook(self, hook: Callable[[E], None]) -> None:
        """
        Add a lifecycle hook that executes before event processing.

        Pre-handle hooks are useful for logging, validation, or preprocessing
        that should occur before the main event processing logic.

        Args:
            hook: Function that takes an event and performs some action.
                 Should not modify the event or return any value.

        Example:
            def log_event(event):
                logger.info(f"Processing event {event.event_id}")

            handler.add_pre_handle_hook(log_event)
        """
        self._pre_handle_hooks.append(hook)

    def add_post_handle_hook(self, hook: Callable[[E, Optional[E]], None]) -> None:
        """
        Add a lifecycle hook that executes after event processing.

        Post-handle hooks are useful for cleanup, logging results, or
        triggering additional actions based on processing outcomes.

        Args:
            hook: Function that takes the original event and the processing
                 result. Should not return any value.

        Example:
            def log_result(original_event, result):
                if result:
                    logger.info(f"Event {original_event.event_id} processed successfully")
                else:
                    logger.warning(f"Event {original_event.event_id} processing failed")

            handler.add_post_handle_hook(log_result)
        """
        self._post_handle_hooks.append(hook)

    def enable(self) -> None:
        """
        Enable this handler to participate in event processing.

        Once enabled, the handler will process events that match its
        event type filters and custom filters.
        """
        self.enabled = True
        self._logger.debug(f"Handler {self.name} enabled")

    def disable(self) -> None:
        """
        Disable this handler to prevent it from processing events.

        Disabled handlers will not process any events, regardless of
        type matching or filter conditions.
        """
        self.enabled = False
        self._logger.debug(f"Handler {self.name} disabled")

    def get_metrics(self) -> Optional[HandlerMetrics]:
        """
        Get handler performance metrics if tracking is enabled.

        Returns:
            HandlerMetrics instance if metrics tracking is enabled,
            None otherwise

        Note:
            Metrics tracking must be enabled during handler initialization
            for this method to return meaningful data.
        """
        return self.metrics

    def reset_metrics(self) -> None:
        """
        Reset handler performance metrics to initial state.

        This method clears all accumulated performance data including
        timing statistics, error counts, and success rates.

        Note:
            Only effective if metrics tracking is enabled. The operation
            is irreversible and all historical data will be lost.
        """
        if self.metrics:
            self.metrics.reset()

    def __str__(self) -> str:
        """
        Return a concise string representation of the handler.

        Returns:
            String containing handler class name, instance name, and priority
        """
        return (
            f"{self.__class__.__name__}(name='{self.name}', priority={self.priority})"
        )

    def __repr__(self) -> str:
        """
        Return a detailed string representation for debugging.

        Returns:
            String containing comprehensive handler configuration information
            including name, priority, enabled state, and feature flags
        """
        return (
            f"{self.__class__.__name__}("
            f"name='{self.name}', "
            f"priority={self.priority}, "
            f"enabled={self.enabled}, "
            f"track_metrics={self.track_metrics})"
        )


# ========== Concrete Handler Implementations ==========


class FunctionalEventHandler(BaseEventHandler[E]):
    """
    Handler implementation that wraps a function to handle events.

    This class provides a convenient way to convert regular functions into
    full-featured event handlers. It supports both basic and advanced features
    through progressive disclosure while maintaining a simple interface.

    The FunctionalEventHandler bridges the gap between functional and
    object-oriented approaches, allowing functions to participate in the
    event handling system without creating custom handler classes.

    Example:
        def log_task_completion(event):
            print(f"Task {event.task_id} completed!")
            return event

        handler = FunctionalEventHandler(
            event_types=EventType.TASK_COMPLETED,
            handler_func=log_task_completion,
            priority=5,
            track_metrics=True
        )

        # Register with event system
        dispatcher.register_handler(handler)
    """

    def __init__(
        self,
        event_types: Union[EventType, List[EventType]],
        handler_func: Callable[[E], Optional[E]],
        name: Optional[str] = None,
        priority: int = 0,
        enabled: bool = True,
        track_metrics: bool = False,
        validate_events: bool = True,
        **kwargs,
    ):
        super().__init__(name, priority, enabled, track_metrics, **kwargs)

        self._event_types = (
            [event_types] if isinstance(event_types, EventType) else event_types
        )
        self._handler_func = handler_func
        self.validate_events = validate_events

    def get_handled_events(self) -> List[EventType]:
        """
        Return the list of event types this handler can process.

        This implementation returns the event types provided during initialization,
        which define the scope of events this handler can process.

        Returns:
            List of EventType enums that this handler can process
        """
        return self._event_types

    def _handle_event_impl(self, event: E) -> Optional[E]:
        """
        Execute the wrapped function to process an event.

        This implementation delegates to the handler function provided during
        initialization, with optional event validation as a precaution.

        Args:
            event: The event to process

        Returns:
            The result from the wrapped handler function, or None if
            validation fails

        Note:
            Event validation is performed before invoking the handler function
            if validate_events is True. This helps ensure event integrity.
        """
        # Optional event validation
        if self.validate_events:
            try:
                event.validate()
            except Exception as e:
                self._logger.warning(f"Event validation failed: {e}")
                return None

        return self._handler_func(event)


class ChainableEventHandler(BaseEventHandler[E]):
    """
    Handler implementation supporting the Chain of Responsibility pattern.

    This class allows building event processing pipelines by chaining multiple
    handlers together. Events flow through the chain, with each handler potentially
    modifying the event before passing it to the next handler in the sequence.

    ChainableEventHandler supports both internal handlers (managed as a list) and
    external chains (via the next_handler reference), providing flexibility in
    building processing pipelines.

    Features:
    - Fluent interface for constructing chains
    - Fail-fast option for error handling
    - Aggregate event type handling from all contained handlers
    - Progressive processing of events through the chain

    Example:
        # Create chain with multiple handlers
        chain = ChainableEventHandler(
            handlers=[validation_handler, logging_handler],
            name="validation_chain",
            fail_fast=True
        )

        # Add more handlers to the chain using fluent interface
        chain.set_next(processing_handler).set_next(notification_handler)

        # Use the chain
        result = chain.handle_event(event)
    """

    def __init__(
        self,
        handlers: Optional[List[EventHandlerInterface[E]]] = None,
        name: Optional[str] = None,
        priority: int = 0,
        enabled: bool = True,
        fail_fast: bool = False,
        **kwargs,
    ):
        super().__init__(name, priority, enabled, **kwargs)

        self._handlers = handlers or []
        self.fail_fast = fail_fast
        self._next_handler: Optional[BaseEventHandler[E]] = None

    def add_handler(self, handler: EventHandlerInterface[E]) -> None:
        """
        Add a handler to the internal handler collection.

        This method adds a handler to the internal list of handlers that will
        process events in sequence before passing to the next handler in the chain.

        Args:
            handler: The event handler to add to the chain

        Note:
            Handlers are executed in the order they are added. Unlike the
            set_next method, this doesn't return the handler for chaining.
        """
        self._handlers.append(handler)

    def set_next(
        self, handler: "ChainableEventHandler[E]"
    ) -> "ChainableEventHandler[E]":
        """
        Set the next handler in the external chain for fluent interface.

        This method establishes a link to the next handler in the chain, allowing
        for a fluent interface pattern when constructing handler chains.

        Args:
            handler: The next event handler in the chain

        Returns:
            The next handler, allowing method chaining for fluent interface

        Example:
            chain = ChainableEventHandler()
            chain.set_next(handler2).set_next(handler3)
        """
        self._next_handler = handler
        return handler

    def get_handled_events(self) -> List[EventType]:
        """
        Return the union of all event types handled by contained handlers.

        This method aggregates the event types from all handlers in the internal
        collection, providing a comprehensive view of what events this chain
        can process.

        Returns:
            List of unique EventType enums that this chain can process

        Note:
            The returned list contains unique event types without duplicates.
            The next handler in the chain is not considered in this calculation.
        """
        event_types = set()
        for handler in self._handlers:
            event_types.update(handler.get_handled_events())
        return list(event_types)

    def _handle_event_impl(self, event: E) -> Optional[E]:
        """
        Process an event through all handlers in the chain.

        This implementation passes the event through each internal handler in
        sequence, then to the next handler in the external chain. Each handler
        may modify the event, and the modified event is passed to subsequent handlers.

        Args:
            event: The event to process

        Returns:
            The event after being processed by all handlers in the chain,
            potentially modified, or None if processing failed or was terminated

        Note:
            If fail_fast is True, processing will stop at the first exception.
            Otherwise, exceptions are logged but processing continues.
        """
        current_event = event

        # Process through internal handlers
        for handler in self._handlers:
            if handler.can_handle(current_event):
                try:
                    result = handler.handle_event(current_event)
                    if result is not None:
                        current_event = result
                except Exception as e:
                    self._logger.error(f"Handler {handler} failed: {e}")
                    if self.fail_fast:
                        return None

        # Process through next handler if present
        if self._next_handler is not None:
            return self._next_handler.handle_event(current_event)

        return current_event


class ConditionalEventHandler(BaseEventHandler[E]):
    """
    Handler implementation that applies additional condition checks.

    This class wraps another handler and only processes events that satisfy
    a specific condition. It provides a declarative way to add dynamic filtering
    beyond basic event type matching.

    The condition is evaluated for each event, and only events that pass the
    condition check are forwarded to the wrapped handler for processing.

    Example:
        # Create a handler that only processes high-priority events
        def is_high_priority(event):
            return event.priority >= 8

        base_handler = SomeEventHandler()
        conditional = ConditionalEventHandler(
            condition=is_high_priority,
            handler=base_handler,
            name="high_priority_handler"
        )

        # Only high-priority events will be processed by base_handler
        result = conditional.handle_event(event)
    """

    def __init__(
        self,
        condition: Callable[[E], bool],
        handler: EventHandlerInterface[E],
        name: Optional[str] = None,
        priority: int = 0,
        enabled: bool = True,
        **kwargs,
    ):
        super().__init__(name, priority, enabled, **kwargs)

        self._condition = condition
        self._handler = handler

    def get_handled_events(self) -> List[EventType]:
        """
        Return the event types handled by the wrapped handler.

        This implementation delegates to the wrapped handler to determine
        which event types can be processed, maintaining consistency with
        the underlying handler's capabilities.

        Returns:
            List of EventType enums that the wrapped handler can process
        """
        return self._handler.get_handled_events()

    def can_handle(self, event: E) -> bool:
        """
        Check if this handler can process the given event.

        This implementation extends the base handler's compatibility check
        by additionally evaluating the custom condition function. An event
        must pass both the standard handler checks and the custom condition
        to be eligible for processing.

        Args:
            event: The event to check for compatibility

        Returns:
            True if the event passes both base conditions and the custom condition
        """
        return super().can_handle(event) and self._condition(event)

    def _handle_event_impl(self, event: E) -> Optional[E]:
        """
        Delegate event processing to the wrapped handler.

        This implementation simply forwards the event to the wrapped handler
        for processing. The condition check is already performed in can_handle(),
        ensuring that only events satisfying the condition reach this point.

        Args:
            event: The event to process

        Returns:
            The result from the wrapped handler's handle_event method
        """
        return self._handler.handle_event(event)


class BatchEventHandler(BaseEventHandler[E]):
    """
    Handler implementation for efficient batch processing of events.

    This handler accumulates events into batches and processes them
    together when either the batch size threshold is reached or a
    timeout occurs. This approach can significantly improve performance
    for operations that benefit from batch processing.

    Features:
    - Configurable batch size and timeout
    - Thread-safe event accumulation
    - Support for custom batch processors
    - Automatic batch flushing based on size or time thresholds
    - Manual flush capability for immediate processing

    Example:
        class DatabaseBatchHandler(BatchEventHandler):
            def _process_single_event(self, event):
                # Database operation for a single event
                db.save_event(event)

        batch_handler = DatabaseBatchHandler(
            batch_size=50,
            batch_timeout=5.0,  # 5 seconds
            name="db_batch_handler"
        )

        # Events will be accumulated and periodically saved in batches
        dispatcher.register_handler(batch_handler)
    """

    def __init__(
        self,
        batch_size: int = 10,
        batch_timeout: float = 1.0,
        batch_processor: Optional[Callable[[List[E]], List[Optional[E]]]] = None,
        name: Optional[str] = None,
        priority: int = 0,
        enabled: bool = True,
        **kwargs,
    ):
        super().__init__(name, priority, enabled, **kwargs)

        self.batch_size = batch_size
        self.batch_timeout = batch_timeout
        self.batch_processor = batch_processor

        self._batch: List[E] = []
        self._last_flush = time.time()
        self._batch_lock = threading.Lock()

    def get_handled_events(self) -> List[EventType]:
        """
        Return the list of event types this batch handler can process.

        This implementation returns an empty list by default, indicating
        the handler can process all event types. Subclasses can override
        this to restrict batch processing to specific event types.

        Returns:
            Empty list indicating all events can be processed by default
        """
        return []  # Handle all events by default

    def _handle_event_impl(self, event: E) -> Optional[E]:
        """
        Add an event to the current batch and process if thresholds are met.

        This implementation adds the event to the batch and then checks if
        either the batch size or timeout threshold has been reached. If so,
        it triggers batch processing. The entire operation is thread-safe.

        Args:
            event: The event to add to the batch

        Returns:
            The original event, allowing it to continue through the event
            processing pipeline while being batched for later processing
        """
        with self._batch_lock:
            self._batch.append(event)
            current_time = time.time()

            should_flush = (
                len(self._batch) >= self.batch_size
                or (current_time - self._last_flush) >= self.batch_timeout
            )

            if should_flush:
                self._process_batch()

        return event  # Return original event for batch processing

    def _process_batch(self) -> None:
        """
        Process the accumulated batch of events.

        This method clears the current batch and processes the events
        either through a custom batch processor if provided, or by
        processing each event individually through _process_single_event().

        Note:
            This method is called automatically when batch thresholds are
            reached, or manually when flush() is called. Error handling
            ensures that batch processing failures are logged but don't
            prevent subsequent batch operations.
        """
        if not self._batch:
            return

        batch_to_process = self._batch.copy()
        self._batch.clear()
        self._last_flush = time.time()

        if self.batch_processor:
            try:
                self.batch_processor(batch_to_process)
            except Exception as e:
                self._logger.error(f"Batch processing failed: {e}")
        else:
            # Default: process each event individually
            for event in batch_to_process:
                self._process_single_event(event)

    @abstractmethod
    def _process_single_event(self, event: E) -> Optional[E]:
        """
        Process a single event from the batch.

        This abstract method must be implemented by concrete batch handler
        subclasses to define how individual events are processed when no
        custom batch processor is provided.

        Args:
            event: A single event from the batch to process

        Returns:
            Processed event or None if processing failed

        Note:
            This method is called individually for each event in the batch
            when no batch_processor is provided. For optimized batch processing,
            provide a batch_processor during initialization.
        """
        pass

    def flush(self) -> None:
        """
        Force immediate processing of the current batch.

        This method triggers immediate batch processing regardless of
        whether the batch size or timeout thresholds have been reached.
        It's useful for ensuring all accumulated events are processed,
        particularly before shutdown or when immediate processing is needed.

        Note:
            This operation is thread-safe and can be called from any thread.
            If the batch is empty, this method has no effect.
        """
        with self._batch_lock:
            if self._batch:
                self._process_batch()


class RetryEventHandler(BaseEventHandler[E]):
    """
    Handler implementation that adds automatic retry capabilities.

    This class wraps another handler and automatically retries event processing
    when failures occur, using configurable retry strategies. It's particularly
    useful for handlers that interact with external systems or perform operations
    that may experience transient failures.

    Features:
    - Configurable maximum retry attempts
    - Adjustable retry delay
    - Optional exponential backoff for progressively longer delays
    - Conditional retries based on exception types
    - Detailed logging of retry attempts and failures

    Example:
        # Create a handler that retries network operations
        network_handler = ApiCallHandler()

        def is_retriable_error(exception):
            return isinstance(exception, (ConnectionError, TimeoutError))

        retry_handler = RetryEventHandler(
            handler=network_handler,
            max_retries=5,
            retry_delay=2.0,
            exponential_backoff=True,
            retry_condition=is_retriable_error
        )

        # The handler will automatically retry failed API calls
        result = retry_handler.handle_event(event)
    """

    def __init__(
        self,
        handler: EventHandlerInterface[E],
        max_retries: int = 3,
        retry_delay: float = 1.0,
        exponential_backoff: bool = True,
        backoff_factor: float = 2.0,
        retry_condition: Optional[Callable[[Exception], bool]] = None,
        name: Optional[str] = None,
        priority: int = 0,
        enabled: bool = True,
        **kwargs,
    ):
        super().__init__(name, priority, enabled, **kwargs)

        self._handler = handler
        self.max_retries = max_retries
        self.retry_delay = retry_delay
        self.exponential_backoff = exponential_backoff
        self.backoff_factor = backoff_factor
        self.retry_condition = retry_condition or (lambda e: True)

    def get_handled_events(self) -> List[EventType]:
        """
        Return the event types handled by the wrapped handler.

        This implementation delegates to the wrapped handler to maintain
        consistency with the underlying handler's event type capabilities.

        Returns:
            List of EventType enums that the wrapped handler can process
        """
        return self._handler.get_handled_events()

    def _handle_event_impl(self, event: E) -> Optional[E]:
        """
        Process an event with automatic retry on failure.

        This implementation attempts to process the event through the wrapped
        handler, and automatically retries on failure according to the configured
        retry strategy. Retries use either fixed or exponential backoff delays
        between attempts.

        Args:
            event: The event to process

        Returns:
            The result from the wrapped handler if successful, or None
            if all retry attempts fail

        Note:
            The retry behavior is highly configurable through initialization
            parameters. Logging provides visibility into retry attempts,
            including the specific exceptions that triggered each retry.
        """
        last_exception = None
        current_delay = self.retry_delay

        for attempt in range(self.max_retries + 1):
            try:
                return self._handler.handle_event(event)

            except Exception as e:
                last_exception = e

                # Check if we should retry this exception
                if not self.retry_condition(e):
                    break

                # Don't sleep on the last attempt
                if attempt < self.max_retries:
                    self._logger.warning(
                        f"Retry attempt {attempt + 1}/{self.max_retries} after error: {e}"
                    )
                    time.sleep(current_delay)

                    if self.exponential_backoff:
                        current_delay *= self.backoff_factor

        # All retries failed
        self._logger.error(
            f"All {self.max_retries} retries failed. Last error: {last_exception}"
        )
        return None


# ========== Asynchronous Event Handlers ==========


class AsyncEventHandler(ABC, Generic[E]):
    """
    Abstract base class for asynchronous event handlers.

    This class provides a foundation for implementing event handlers that
    operate asynchronously using Python's async/await syntax. It's particularly
    suitable for I/O-bound operations like network requests, database interactions,
    or other operations that benefit from asynchronous execution.

    The interface mirrors the synchronous BaseEventHandler but uses coroutines
    for its core methods, allowing integration with asyncio-based applications
    and services.

    Example:
        class AsyncApiEventHandler(AsyncEventHandler):
            async def get_handled_events(self):
                return [EventType.API_REQUEST]

            async def handle_event_async(self, event):
                response = await http_client.post(event.endpoint, event.data)
                return event.with_response(response)

        # Usage with asyncio
        handler = AsyncApiEventHandler()
        result = await handler.handle_event_async(event)
    """

    def __init__(
        self,
        name: Optional[str] = None,
        priority: int = 0,
        enabled: bool = True,
        track_metrics: bool = False,
        **kwargs,
    ):
        self.name = name or self.__class__.__name__
        self.priority = priority
        self.enabled = enabled
        self.track_metrics = track_metrics

        self.metrics = HandlerMetrics() if track_metrics else None
        self._logger = logging.getLogger(f"{self.__class__.__name__}.{self.name}")

    @abstractmethod
    async def get_handled_events(self) -> List[EventType]:
        """
        Asynchronously return the list of event types this handler can process.

        This method defines which event types the handler is capable of processing.
        The asynchronous nature allows for dynamic determination of supported
        event types, such as fetching from configuration services or databases.

        Returns:
            List of EventType enums that this handler can process.
            An empty list indicates the handler can process all event types.

        Note:
            This method must be implemented by all concrete async handler subclasses.
        """
        pass

    async def can_handle(self, event: E) -> bool:
        """
        Asynchronously check if this handler can process the given event.

        This method determines if the handler is enabled and the event type
        matches one of the handled event types. The asynchronous implementation
        allows for dynamic decision making based on external factors.

        Args:
            event: The event to check for compatibility

        Returns:
            True if this handler can process the event, False otherwise

        Note:
            Subclasses can override this method to implement more sophisticated
            compatibility checks while maintaining the asynchronous interface.
        """
        if not self.enabled:
            return False

        handled_events = await self.get_handled_events()
        return not handled_events or event.event_type in handled_events

    @abstractmethod
    async def handle_event_async(self, event: E) -> Optional[E]:
        """
        Process an event asynchronously.

        This is the main entry point for asynchronous event processing.
        It defines the contract for how async handlers should process events.

        Args:
            event: The event to process asynchronously

        Returns:
            Processed event (potentially modified) or None if processing
            failed or the event should not continue in the pipeline

        Note:
            This method must be implemented by all concrete async handler
            subclasses. Implementations should use await for any I/O operations
            to take full advantage of the asynchronous execution model.
        """
        pass


class AsyncFunctionalEventHandler(AsyncEventHandler[E]):
    """
    Asynchronous event handler implementation that wraps an async function.

    This class provides a convenient way to convert async functions into
    full-featured asynchronous event handlers. It's the async counterpart
    to FunctionalEventHandler, allowing coroutines to participate in the
    event handling system.

    Example:
        async def process_api_event(event):
            result = await api_client.process(event.data)
            return event.with_result(result)

        async_handler = AsyncFunctionalEventHandler(
            event_types=[EventType.API_REQUEST],
            async_handler_func=process_api_event,
            priority=10
        )

        # Use with asyncio
        result = await async_handler.handle_event_async(event)
    """

    def __init__(
        self,
        event_types: Union[EventType, List[EventType]],
        async_handler_func: Callable[[E], Coroutine[Any, Any, Optional[E]]],
        name: Optional[str] = None,
        priority: int = 0,
        enabled: bool = True,
        **kwargs,
    ):
        super().__init__(name, priority, enabled, **kwargs)

        self._event_types = (
            [event_types] if isinstance(event_types, EventType) else event_types
        )
        self._async_handler_func = async_handler_func

    async def get_handled_events(self) -> List[EventType]:
        """
        Asynchronously return the event types this handler can process.

        Returns:
            List of event types provided during initialization that
            this handler can process
        """
        return self._event_types

    async def handle_event_async(self, event: E) -> Optional[E]:
        """
        Asynchronously process an event by executing the wrapped async function.

        This implementation delegates to the async handler function provided
        during initialization, with performance metrics tracking if enabled.

        Args:
            event: The event to process asynchronously

        Returns:
            The result from the wrapped async handler function, or None
            if an error occurs during processing

        Note:
            Exceptions are caught and logged to prevent failures from
            propagating, following the robust error handling pattern
            used throughout the event system.
        """
        start_time = time.time() if self.track_metrics else 0

        try:
            result = await self._async_handler_func(event)

            if self.track_metrics and self.metrics:
                processing_time = time.time() - start_time
                self.metrics.update(processing_time, success=True)

            return result

        except Exception as e:
            if self.track_metrics and self.metrics:
                processing_time = time.time() - start_time
                self.metrics.update(
                    processing_time, success=False, error_type=e.__class__.__name__
                )

            self._logger.error(f"Async handler error: {e}", exc_info=True)
            return None


# ========== Specialized Handlers ==========


class StrategyEventHandler(BaseEventHandler[E]):
    """
    Specialized event handler for continual learning strategies.

    This handler provides strategy-specific functionality for continual learning
    algorithms while maintaining compatibility with the general event system.
    It adds support for task context tracking and priority management specific
    to continual learning strategies.

    The handler allows strategies to respond to lifecycle events in the
    continual learning process, such as model updates, loss computations,
    memory management, and parameter importance tracking.

    Example:
        class EWCStrategyHandler(StrategyEventHandler):
            def __init__(self):
                super().__init__(
                    strategy_name="EWC",
                    strategy_priority=200  # Higher priority than regular strategies
                )

            def _handle_event_impl(self, event):
                if event.event_type == EventType.PARAMETER_IMPORTANCE_UPDATE_REQUESTED:
                    # Implement EWC-specific parameter importance calculation
                    return event.with_importance_scores(self.calculate_fisher_matrix())
                return event
    """

    def __init__(
        self,
        strategy_name: str,
        strategy_priority: int = 100,
        name: Optional[str] = None,
        priority: int = 0,
        enabled: bool = True,
        **kwargs,
    ):
        super().__init__(name, priority, enabled, **kwargs)

        self.strategy_name = strategy_name
        self.strategy_priority = strategy_priority
        self._current_task_id: Optional[int] = None

    def get_strategy_name(self) -> str:
        """
        Get the name of this continual learning strategy.

        Returns:
            String identifier for this strategy
        """
        return self.strategy_name

    def get_strategy_priority(self) -> int:
        """
        Get the priority level of this strategy.

        Strategy priority determines execution order when multiple strategies
        are active. Higher numbers indicate higher priority.

        Returns:
            Integer priority value for this strategy
        """
        return self.strategy_priority

    def update_task_context(self, task_id: int) -> None:
        """
        Update the current task context for this strategy.

        This method sets the current task ID that the strategy is operating on,
        which is crucial for proper behavior in a continual learning setting
        where tasks change over time.

        Args:
            task_id: The identifier for the current task being learned
        """
        self._current_task_id = task_id
        self._logger.debug(f"Updated task context to task {task_id}")

    def get_current_task_id(self) -> Optional[int]:
        """
        Get the ID of the task this strategy is currently operating on.

        Returns:
            Integer task ID if a task context is set, None otherwise
        """
        return self._current_task_id

    def get_handled_events(self) -> List[EventType]:
        """
        Return the event types relevant to continual learning strategies.

        This implementation provides a default set of event types that are
        commonly used by continual learning strategies. Subclasses can
        override this to add additional event types specific to their
        implementation.

        Returns:
            List of EventType enums that are relevant to continual learning
            strategies, including loss computation, batch preprocessing,
            memory updates, parameter importance updates, and state changes
        """
        return [
            EventType.LOSS_COMPUTATION_REQUESTED,
            EventType.LOSS_COMPUTATION_COMPLETED,
            EventType.BATCH_PREPROCESSING_REQUESTED,
            EventType.BATCH_PREPROCESSING_COMPLETED,
            EventType.MEMORY_UPDATE_REQUESTED,
            EventType.MEMORY_UPDATE_COMPLETED,
            EventType.PARAMETER_IMPORTANCE_UPDATE_REQUESTED,
            EventType.PARAMETER_IMPORTANCE_UPDATE_COMPLETED,
            EventType.STRATEGY_STATE_CHANGED,
        ]

    @abstractmethod
    def _handle_event_impl(self, event: E) -> Optional[E]:
        """
        Implement strategy-specific event handling logic.

        This abstract method must be implemented by concrete strategy
        handlers to define how the strategy responds to the various
        events in the continual learning process.

        Args:
            event: The event to process with strategy-specific logic

        Returns:
            Processed event (potentially modified) or None if processing
            failed or the event should not continue in the pipeline

        Note:
            Implementations typically use conditional logic based on event_type
            to handle different kinds of events appropriately. The current
            task context (from get_current_task_id) is often important for
            proper strategy behavior.
        """
        pass


# ========== Handler Registry and Management ==========


class HandlerRegistry(Generic[E]):
    """
    Central registry for managing event handlers with lifecycle support.

    This class provides a centralized mechanism for registering, organizing,
    and retrieving event handlers. It maintains a thread-safe repository of
    handlers with multiple indexing strategies for efficient lookup, along
    with metadata tracking and statistical reporting.

    Features:
    - Thread-safe handler registration and management
    - Efficient event type-based indexing for fast handler lookup
    - Handler metadata storage and tracking
    - Priority-based handler ordering
    - Statistical reporting on handler distribution

    Example:
        registry = HandlerRegistry()

        # Register handlers
        registry.register(logging_handler)
        registry.register(processing_handler, {"group": "processing"})

        # Get handlers for a specific event
        handlers = registry.get_handlers_for_event(EventType.TASK_STARTED)

        # Use the handlers
        for handler in handlers:
            result = handler.handle_event(event)
    """

    def __init__(self):
        self._handlers: Dict[str, EventHandlerInterface[E]] = {}
        self._handlers_by_event: Dict[EventType, List[EventHandlerInterface[E]]] = (
            defaultdict(list)
        )
        self._handler_metadata: Dict[str, Dict[str, Any]] = {}
        self._lock = threading.RLock()

        self._logger = logging.getLogger(f"{self.__class__.__name__}")

    def register(
        self,
        handler: EventHandlerInterface[E],
        metadata: Optional[Dict[str, Any]] = None,
    ) -> None:
        """
        Register an event handler in the registry.

        This method adds a handler to the registry and indexes it by the
        event types it can handle for efficient lookup. It also stores
        optional metadata about the handler for tracking and management.

        Args:
            handler: The event handler to register
            metadata: Optional dictionary of metadata about the handler

        Note:
            Registration is thread-safe and can be performed concurrently.
            Handlers are automatically sorted by priority after registration.
        """
        with self._lock:
            handler_id = f"{handler.__class__.__name__}_{id(handler)}"

            # Store handler
            self._handlers[handler_id] = handler

            # Index by event types
            for event_type in handler.get_handled_events():
                self._handlers_by_event[event_type].append(handler)
                # Sort by priority (higher first)
                self._handlers_by_event[event_type].sort(
                    key=lambda h: getattr(h, "priority", 0), reverse=True
                )

            # Store metadata
            self._handler_metadata[handler_id] = metadata or {}
            self._handler_metadata[handler_id].update(
                {
                    "registration_time": time.time(),
                    "handler_class": handler.__class__.__name__,
                    "handler_name": getattr(handler, "name", "unknown"),
                }
            )

            self._logger.debug(f"Registered handler: {handler}")

    def unregister(self, handler: EventHandlerInterface[E]) -> None:
        """
        Remove an event handler from the registry.

        This method removes a handler from the registry and all its indexes.
        Associated metadata is also removed. This operation is useful for
        cleanup or when a handler is no longer needed.

        Args:
            handler: The event handler to unregister

        Note:
            Unregistration is thread-safe and can be performed concurrently.
            The operation is idempotent - unregistering a handler that isn't
            registered has no effect.
        """
        with self._lock:
            handler_id = f"{handler.__class__.__name__}_{id(handler)}"

            # Remove from main registry
            if handler_id in self._handlers:
                del self._handlers[handler_id]

            # Remove from event type indices
            for event_type in handler.get_handled_events():
                if handler in self._handlers_by_event[event_type]:
                    self._handlers_by_event[event_type].remove(handler)

            # Remove metadata
            if handler_id in self._handler_metadata:
                del self._handler_metadata[handler_id]

            self._logger.debug(f"Unregistered handler: {handler}")

    def get_handlers_for_event(
        self, event_type: EventType
    ) -> List[EventHandlerInterface[E]]:
        """
        Get all handlers that can process the given event type.

        This method retrieves handlers that have registered to process the
        specified event type. Handlers are returned in priority order.

        Args:
            event_type: The event type to find handlers for

        Returns:
            List of handlers that can process the event type, sorted by priority
            (higher priority handlers first)

        Note:
            The returned list is a copy of the internal list to prevent
            concurrent modification issues.
        """
        with self._lock:
            return self._handlers_by_event[event_type].copy()

    def get_all_handlers(self) -> List[EventHandlerInterface[E]]:
        """
        Get all handlers currently registered in the registry.

        This method returns a list of all registered handlers regardless
        of their event types or other attributes.

        Returns:
            List of all registered handlers

        Note:
            The returned list is a copy of the internal collection to prevent
            concurrent modification issues.
        """
        with self._lock:
            return list(self._handlers.values())

    def get_handler_count(self) -> int:
        """
        Get the total number of registered handlers.

        Returns:
            Integer count of registered handlers

        Note:
            This operation is thread-safe and provides an accurate count
            even during concurrent registration/unregistration operations.
        """
        with self._lock:
            return len(self._handlers)

    def get_handlers_by_type(
        self, handler_type: Type
    ) -> List[EventHandlerInterface[E]]:
        """
        Get all handlers of a specific class type.

        This method filters the registered handlers to return only those
        that are instances of the specified type or its subclasses.

        Args:
            handler_type: The class type to filter handlers by

        Returns:
            List of handlers that are instances of the specified type

        Example:
            # Get all retry handlers
            retry_handlers = registry.get_handlers_by_type(RetryEventHandler)
        """
        with self._lock:
            return [h for h in self._handlers.values() if isinstance(h, handler_type)]

    def clear(self) -> None:
        """
        Remove all handlers from the registry.

        This method completely clears the registry, removing all registered
        handlers and their associated metadata. It's useful for testing,
        reset operations, or when rebuilding the handler configuration.

        Note:
            This operation is thread-safe and affects all indexes and
            metadata simultaneously to maintain consistency.
        """
        with self._lock:
            self._handlers.clear()
            self._handlers_by_event.clear()
            self._handler_metadata.clear()
            self._logger.info("Cleared all handlers from registry")

    def get_registry_stats(self) -> Dict[str, Any]:
        """
        Get comprehensive statistics about the registry state.

        This method collects and returns statistics about registered handlers,
        their types, and event coverage for monitoring and reporting purposes.

        Returns:
            Dictionary containing statistics including:
            - total_handlers: Number of registered handlers
            - handler_types: Counter of handler classes
            - event_coverage: Counter of events covered
            - event_types_covered: Number of unique event types covered

        Note:
            The statistics are collected atomically to ensure consistency.
            This method is useful for monitoring, debugging, and analytics.
        """
        with self._lock:
            handler_types = defaultdict(int)
            event_coverage = defaultdict(int)

            for handler in self._handlers.values():
                handler_types[handler.__class__.__name__] += 1
                for event_type in handler.get_handled_events():
                    event_coverage[event_type.value] += 1

            return {
                "total_handlers": len(self._handlers),
                "handler_types": dict(handler_types),
                "event_coverage": dict(event_coverage),
                "event_types_covered": len(event_coverage),
            }


# ========== Global Registry Instance ==========

# Default registry instance for application-wide handler management
# This singleton instance provides a convenient shared registry for handlers
# when a custom registry is not needed
global_handler_registry = HandlerRegistry()


# ========== Context Managers ==========


@contextmanager
def temporary_handler(
    handler: EventHandlerInterface[E], registry: Optional[HandlerRegistry] = None
) -> Iterator[EventHandlerInterface[E]]:
    """
    Context manager for temporary event handler registration.

    This utility provides a convenient way to register a handler temporarily
    for a specific scope or operation. The handler is automatically
    unregistered when the context exits, regardless of whether exceptions occur.

    Args:
        handler: The event handler to register temporarily
        registry: Optional registry to use (defaults to global_handler_registry)

    Yields:
        The registered handler for use within the context

    Example:
        with temporary_handler(logging_handler) as handler:
            # The handler is registered only within this block
            process_events()
        # Handler is automatically unregistered here
    """
    reg = registry or global_handler_registry

    reg.register(handler)
    try:
        yield handler
    finally:
        reg.unregister(handler)


@contextmanager
def handler_metrics_context(
    handler: BaseEventHandler[E],
) -> Iterator[Optional[HandlerMetrics]]:
    """
    Context manager for collecting and reporting handler metrics.

    This utility provides a convenient way to measure a handler's performance
    during a specific operation or scope. Metrics are reset at the beginning
    of the context and automatically reported when the context exits.

    Args:
        handler: The handler whose metrics should be collected

    Yields:
        The handler's metrics object (or None if metrics tracking is disabled)

    Example:
        with handler_metrics_context(my_handler) as metrics:
            # Process events with the handler
            for event in events:
                my_handler.handle_event(event)
        # Metrics are automatically logged here
    """
    if handler.metrics:
        handler.reset_metrics()

    try:
        yield handler.metrics
    finally:
        if handler.metrics:
            logging.getLogger(__name__).info(
                f"Handler {handler.name} metrics: {handler.metrics.to_dict()}"
            )


# ========== Utility Functions ==========


def create_functional_handler(
    event_types: Union[EventType, List[EventType]],
    handler_func: Callable[[E], Optional[E]],
    **kwargs,
) -> FunctionalEventHandler[E]:
    """
    Convenience function for creating functional event handlers.

    This utility function simplifies the creation of functional event handlers
    by providing a more concise syntax than direct instantiation.

    Args:
        event_types: Single event type or list of event types this handler processes
        handler_func: Function that takes an event and returns a processed event
        **kwargs: Additional arguments to pass to FunctionalEventHandler constructor

    Returns:
        Configured FunctionalEventHandler instance

    Example:
        def log_event(event):
            print(f"Event received: {event}")
            return event

        handler = create_functional_handler(
            event_types=[EventType.TASK_STARTED, EventType.TASK_COMPLETED],
            handler_func=log_event,
            name="logging_handler",
            priority=10
        )
    """
    return FunctionalEventHandler(
        event_types=event_types, handler_func=handler_func, **kwargs
    )


def create_async_functional_handler(
    event_types: Union[EventType, List[EventType]],
    async_handler_func: Callable[[E], Coroutine[Any, Any, Optional[E]]],
    **kwargs,
) -> AsyncFunctionalEventHandler[E]:
    """
    Convenience function for creating asynchronous functional event handlers.

    This utility function simplifies the creation of async functional handlers
    by providing a more concise syntax than direct instantiation.

    Args:
        event_types: Single event type or list of event types this handler processes
        async_handler_func: Async function that takes an event and returns a processed event
        **kwargs: Additional arguments to pass to AsyncFunctionalEventHandler constructor

    Returns:
        Configured AsyncFunctionalEventHandler instance

    Example:
        async def process_event(event):
            result = await api_client.process(event.data)
            return event.with_result(result)

        async_handler = create_async_functional_handler(
            event_types=EventType.API_REQUEST,
            async_handler_func=process_event,
            name="api_handler"
        )
    """
    return AsyncFunctionalEventHandler(
        event_types=event_types, async_handler_func=async_handler_func, **kwargs
    )


def chain_handlers(*handlers: EventHandlerInterface[E]) -> ChainableEventHandler[E]:
    """
    Convenience function for creating chains of event handlers.

    This utility function simplifies the creation of handler chains by
    providing a more concise syntax than direct instantiation.

    Args:
        *handlers: Variable number of handlers to chain together

    Returns:
        ChainableEventHandler containing all the provided handlers

    Example:
        # Chain multiple handlers together
        chain = chain_handlers(
            validation_handler,
            logging_handler,
            processing_handler
        )

        # Use the chain
        result = chain.handle_event(event)
    """
    return ChainableEventHandler(handlers=list(handlers))
