"""
Unified event dispatcher with comprehensive features and progressive disclosure.

This module consolidates basic and advanced dispatcher functionality into a cohesive
system that supports multiple processing patterns while following SOLID principles.
"""

import logging
import threading
import weakref
import time
from collections import defaultdict, deque
from concurrent.futures import ThreadPoolExecutor, Future, as_completed
from contextlib import contextmanager
from dataclasses import dataclass, field
from enum import Enum
from typing import (
    Any,
    Callable,
    Dict,
    List,
    Optional,
    Set,
    TypeVar,
    Deque,
    ContextManager,
    Generic,
    Iterator,
)

from continuallearning.events.interfaces import (
    EventHandlerInterface,
    AsyncEventHandlerInterface,
)
from continuallearning.events.core.event_types import BaseEventData, EventType

# Type variable for event data
E = TypeVar("E", bound=BaseEventData)


# ========== Core Enumerations ==========


class DispatcherMode(Enum):
    """Dispatcher operation modes for different use cases."""

    SYNCHRONOUS = "synchronous"  # Simple sync processing
    ASYNCHRONOUS = "asynchronous"  # Async-only processing
    BATCH = "batch"  # Batch processing mode
    HYBRID = "hybrid"  # Mixed sync/async processing


class ComponentStatus(Enum):
    """Status of dispatcher components."""

    REGISTERED = "registered"
    ACTIVE = "active"
    SUSPENDED = "suspended"
    FAILED = "failed"


# ========== Statistics and Monitoring ==========


@dataclass
class DispatcherStats:
    """
    Unified statistics for dispatcher performance monitoring.

    Consolidates basic and advanced metrics into a single comprehensive system.
    """

    # === Core Event Processing Stats ===
    events_dispatched: int = 0
    events_completed: int = 0
    events_failed: int = 0
    events_filtered: int = 0

    # === Handler Stats ===
    handlers_registered: int = 0
    handlers_executed: int = 0
    handlers_failed: int = 0

    # === Component Stats ===
    filters_registered: int = 0
    middleware_registered: int = 0
    observers_registered: int = 0
    validators_registered: int = 0

    # === Performance Metrics ===
    total_processing_time: float = 0.0
    average_processing_time: float = 0.0
    max_processing_time: float = 0.0
    min_processing_time: float = float("inf")

    # === Resource Usage ===
    memory_usage_mb: float = 0.0
    thread_pool_size: int = 0
    active_tasks: int = 0

    # === Time Tracking ===
    start_time: float = field(default_factory=time.time)
    last_event_time: Optional[float] = None

    def update_event_stats(self, processing_time: float, success: bool = True) -> None:
        """Update event processing statistics."""
        self.events_dispatched += 1

        if success:
            self.events_completed += 1
        else:
            self.events_failed += 1

        # Update timing metrics
        self.total_processing_time += processing_time
        self.average_processing_time = (
            self.total_processing_time / self.events_dispatched
        )
        self.max_processing_time = max(self.max_processing_time, processing_time)
        self.min_processing_time = min(self.min_processing_time, processing_time)
        self.last_event_time = time.time()

    def get_uptime(self) -> float:
        """Get dispatcher uptime in seconds."""
        return time.time() - self.start_time

    def get_throughput(self) -> float:
        """Get events per second throughput."""
        uptime = self.get_uptime()
        return self.events_dispatched / uptime if uptime > 0 else 0.0

    def to_dict(self) -> Dict[str, Any]:
        """Convert stats to dictionary for serialization."""
        return {
            field.name: getattr(self, field.name)
            for field in self.__dataclass_fields__.values()
        }


@dataclass
class EventMetrics:
    """Advanced metrics collector for detailed monitoring."""

    event_counters: Dict[EventType, int] = field(
        default_factory=lambda: defaultdict(int)
    )
    handler_metrics: Dict[str, Dict[str, Any]] = field(
        default_factory=lambda: defaultdict(dict)
    )
    processing_times: List[float] = field(default_factory=list)
    _lock: threading.Lock = field(default_factory=threading.Lock)

    def record_event_processed(self, event_type: EventType, duration: float) -> None:
        """Record that an event was processed."""
        with self._lock:
            self.event_counters[event_type] += 1
            self.processing_times.append(duration)

    def record_handler_execution(
        self, handler_name: str, duration: float, success: bool
    ) -> None:
        """Record handler execution metrics."""
        with self._lock:
            if handler_name not in self.handler_metrics:
                self.handler_metrics[handler_name] = {
                    "executions": 0,
                    "successes": 0,
                    "failures": 0,
                    "total_time": 0.0,
                    "avg_time": 0.0,
                }

            metrics = self.handler_metrics[handler_name]
            metrics["executions"] += 1
            metrics["total_time"] += duration
            metrics["avg_time"] = metrics["total_time"] / metrics["executions"]

            if success:
                metrics["successes"] += 1
            else:
                metrics["failures"] += 1

    def get_metrics_summary(self) -> Dict[str, Any]:
        """Get a summary of all metrics."""
        with self._lock:
            return {
                "event_counts": dict(self.event_counters),
                "handler_metrics": dict(self.handler_metrics),
                "processing_times": {
                    "count": len(self.processing_times),
                    "avg": sum(self.processing_times) / len(self.processing_times)
                    if self.processing_times
                    else 0,
                    "max": max(self.processing_times) if self.processing_times else 0,
                    "min": min(self.processing_times) if self.processing_times else 0,
                },
            }


# ========== Component Implementations ==========


class EventFilter(Generic[E]):
    """Unified event filter implementation."""

    def __init__(
        self,
        filter_func: Callable[[E], bool],
        event_types: Optional[List[EventType]] = None,
        name: Optional[str] = None,
        priority: int = 0,
    ):
        self.filter_func = filter_func
        self.event_types = set(event_types) if event_types else set()
        self.name = name or f"Filter_{id(self)}"
        self.priority = priority
        self.status = ComponentStatus.REGISTERED

    def should_process(self, event: E) -> bool:
        """Determine if an event should be processed."""
        # Check event type filter
        if self.event_types and event.event_type not in self.event_types:
            return False

        try:
            return self.filter_func(event)
        except Exception:
            return False

    def get_filter_criteria(self) -> Dict[str, Any]:
        """Get filter criteria for debugging."""
        return {
            "name": self.name,
            "event_types": list(self.event_types),
            "priority": self.priority,
            "status": self.status.value,
        }


class EventMiddleware(Generic[E]):
    """Unified event middleware implementation."""

    def __init__(
        self,
        middleware_func: Callable[[E, Callable[[E], Optional[E]]], Optional[E]],
        priority: int = 0,
        name: Optional[str] = None,
    ):
        self.middleware_func = middleware_func
        self.priority = priority
        self.name = name or f"Middleware_{id(self)}"
        self.status = ComponentStatus.REGISTERED

    def process(
        self, event: E, next_handler: Callable[[E], Optional[E]]
    ) -> Optional[E]:
        """Process event through middleware."""
        try:
            return self.middleware_func(event, next_handler)
        except Exception as e:
            logging.error(f"Middleware {self.name} failed: {e}")
            return next_handler(event)  # Continue processing

    def get_priority(self) -> int:
        """Get middleware priority."""
        return self.priority


class EventValidator(Generic[E]):
    """Unified event validator implementation."""

    def __init__(
        self,
        validator_func: Callable[[E], bool],
        error_message_func: Optional[Callable[[E], str]] = None,
        event_types: Optional[List[EventType]] = None,
        name: Optional[str] = None,
    ):
        self.validator_func = validator_func
        self.error_message_func = error_message_func
        self.event_types = set(event_types) if event_types else set()
        self.name = name or f"Validator_{id(self)}"
        self.status = ComponentStatus.REGISTERED

    def can_validate(self, event: E) -> bool:
        """Check if this validator applies to the event."""
        return not self.event_types or event.event_type in self.event_types

    def validate(self, event: E) -> bool:
        """Validate an event."""
        if not self.can_validate(event):
            return True

        try:
            return self.validator_func(event)
        except Exception:
            return False

    def get_validation_errors(self, event: E) -> List[str]:
        """Get validation error messages."""
        if self.validate(event):
            return []

        if self.error_message_func:
            try:
                return [self.error_message_func(event)]
            except Exception:
                pass

        return [f"Validation failed for event {event.event_type} in {self.name}"]


class EventObserver(Generic[E]):
    """Unified event observer implementation."""

    def __init__(
        self,
        observer_func: Callable[[E], None],
        event_types: Optional[List[EventType]] = None,
        name: Optional[str] = None,
        async_observer: bool = False,
    ):
        self.observer_func = observer_func
        self.event_types = set(event_types) if event_types else set()
        self.name = name or f"Observer_{id(self)}"
        self.async_observer = async_observer
        self.status = ComponentStatus.REGISTERED

    def should_observe(self, event: E) -> bool:
        """Check if this observer should process the event."""
        return not self.event_types or event.event_type in self.event_types

    def observe(self, event: E) -> None:
        """Observe an event (non-blocking)."""
        if not self.should_observe(event):
            return

        try:
            if self.async_observer:
                # Run in background thread to avoid blocking
                threading.Thread(
                    target=self.observer_func, args=(event,), daemon=True
                ).start()
            else:
                self.observer_func(event)
        except Exception as e:
            logging.warning(f"Observer {self.name} failed: {e}")

    def get_observed_events(self) -> List[EventType]:
        """Get list of observed event types."""
        return list(self.event_types)


# ========== Unified Event Dispatcher ==========


class EventDispatcher(Generic[E]):
    """
    Unified event dispatcher with comprehensive features and progressive disclosure.

    This dispatcher consolidates all functionality from basic and advanced versions
    while supporting multiple processing patterns through configurable modes.
    """

    def __init__(
        self,
        name: str = "EventDispatcher",
        mode: DispatcherMode = DispatcherMode.SYNCHRONOUS,
        max_workers: Optional[int] = None,
        enable_stats: bool = True,
        enable_metrics: bool = False,  # Progressive disclosure
        default_timeout: float = 30.0,
        batch_size: int = 100,
        batch_timeout: float = 1.0,
        auto_start: bool = True,
    ):
        """
        Initialize the unified event dispatcher.

        Args:
            name: Dispatcher instance name
            mode: Processing mode (sync/async/batch/hybrid)
            max_workers: Max thread pool workers
            enable_stats: Enable basic statistics
            enable_metrics: Enable advanced metrics collection
            default_timeout: Default async timeout
            batch_size: Batch processing size
            batch_timeout: Batch processing timeout
            auto_start: Auto-start dispatcher
        """
        self.name = name
        self.mode = mode
        self.default_timeout = default_timeout
        self.batch_size = batch_size
        self.batch_timeout = batch_timeout
        self.enable_stats = enable_stats
        self.enable_metrics = enable_metrics

        # Component storage
        self._handlers: Dict[EventType, List[EventHandlerInterface[E]]] = defaultdict(
            list
        )
        self._async_handlers: Dict[EventType, List[AsyncEventHandlerInterface[E]]] = (
            defaultdict(list)
        )
        self._filters: List[EventFilter] = []
        self._middleware: List[EventMiddleware] = []
        self._observers: List[EventObserver] = []
        self._validators: List[EventValidator] = []

        # Thread safety
        self._lock = threading.RLock()

        # Performance monitoring
        self.stats = DispatcherStats() if enable_stats else None
        self.metrics = EventMetrics() if enable_metrics else None

        # Async support
        self._thread_pool: Optional[ThreadPoolExecutor] = None
        if mode in [
            DispatcherMode.ASYNCHRONOUS,
            DispatcherMode.HYBRID,
            DispatcherMode.BATCH,
        ]:
            self._thread_pool = ThreadPoolExecutor(
                max_workers=max_workers, thread_name_prefix=f"{name}_executor"
            )

        # Batch processing
        self._event_queue: Deque[E] = deque()
        self._batch_processing_active = False

        # Lifecycle management
        self._running = False
        self._startup_hooks: List[Callable[[], None]] = []
        self._shutdown_hooks: List[Callable[[], None]] = []

        # Weak references for memory management
        self._handler_refs: Set[weakref.ref] = set()

        # Logger
        self._logger = logging.getLogger(f"{self.__class__.__name__}.{name}")

        if auto_start:
            self.start()

    # ========== Lifecycle Management ==========

    def start(self) -> None:
        """Start the dispatcher."""
        if self._running:
            return

        self._logger.info(f"Starting dispatcher: {self.name}")
        self._running = True

        # Execute startup hooks
        for hook in self._startup_hooks:
            try:
                hook()
            except Exception as e:
                self._logger.error(f"Startup hook failed: {e}")

        # Start batch processing if needed
        if self.mode == DispatcherMode.BATCH:
            self._start_batch_processing()

    def stop(self) -> None:
        """Stop the dispatcher."""
        if not self._running:
            return

        self._logger.info(f"Stopping dispatcher: {self.name}")
        self._running = False

        # Stop batch processing
        self._batch_processing_active = False

        # Execute shutdown hooks
        for hook in self._shutdown_hooks:
            try:
                hook()
            except Exception as e:
                self._logger.error(f"Shutdown hook failed: {e}")

        # Shutdown thread pool
        if self._thread_pool:
            self._thread_pool.shutdown(wait=True)

    def add_startup_hook(self, hook: Callable[[], None]) -> None:
        """Add a startup hook."""
        self._startup_hooks.append(hook)

    def add_shutdown_hook(self, hook: Callable[[], None]) -> None:
        """Add a shutdown hook."""
        self._shutdown_hooks.append(hook)

    # ========== Handler Management ==========

    def register_handler(self, handler: EventHandlerInterface[E]) -> None:
        """Register an event handler."""
        with self._lock:
            for event_type in handler.get_handled_events():
                self._handlers[event_type].append(handler)
                # Sort by priority (higher priority first)
                self._handlers[event_type].sort(
                    key=lambda h: getattr(h, "priority", 0), reverse=True
                )

            # Update stats
            if self.stats:
                self.stats.handlers_registered += 1

            self._logger.debug(f"Registered handler: {handler}")

    def register_async_handler(self, handler: AsyncEventHandlerInterface[E]) -> None:
        """Register an async event handler."""
        if self.mode == DispatcherMode.SYNCHRONOUS:
            raise ValueError("Cannot register async handler in synchronous mode")

        with self._lock:
            for event_type in handler.get_handled_events():
                self._async_handlers[event_type].append(handler)
                # Sort by priority
                self._async_handlers[event_type].sort(
                    key=lambda h: getattr(h, "priority", 0), reverse=True
                )

            if self.stats:
                self.stats.handlers_registered += 1

            self._logger.debug(f"Registered async handler: {handler}")

    def unregister_handler(self, handler: EventHandlerInterface[E]) -> None:
        """Unregister an event handler."""
        with self._lock:
            for event_type in handler.get_handled_events():
                if handler in self._handlers[event_type]:
                    self._handlers[event_type].remove(handler)

            if self.stats:
                self.stats.handlers_registered = max(
                    0, self.stats.handlers_registered - 1
                )

            self._logger.debug(f"Unregistered handler: {handler}")

    def register_filter(self, event_filter: EventFilter) -> None:
        """Register an event filter."""
        with self._lock:
            self._filters.append(event_filter)
            # Sort by priority
            self._filters.sort(key=lambda f: f.priority, reverse=True)

            if self.stats:
                self.stats.filters_registered += 1

            self._logger.debug(f"Registered filter: {event_filter.name}")

    def register_middleware(self, middleware: EventMiddleware) -> None:
        """Register event middleware."""
        with self._lock:
            self._middleware.append(middleware)
            # Sort by priority
            self._middleware.sort(key=lambda m: m.priority, reverse=True)

            if self.stats:
                self.stats.middleware_registered += 1

            self._logger.debug(f"Registered middleware: {middleware.name}")

    def register_observer(self, observer: EventObserver) -> None:
        """Register an event observer."""
        with self._lock:
            self._observers.append(observer)

            if self.stats:
                self.stats.observers_registered += 1

            self._logger.debug(f"Registered observer: {observer.name}")

    def register_validator(self, validator: EventValidator) -> None:
        """Register an event validator."""
        with self._lock:
            self._validators.append(validator)

            if self.stats:
                self.stats.validators_registered += 1

            self._logger.debug(f"Registered validator: {validator.name}")

    # ========== Event Processing ==========

    def dispatch(self, event: E) -> Optional[E]:
        """Dispatch an event using the configured mode."""
        if not self._running:
            self._logger.warning("Dispatcher not running, event ignored")
            return None

        start_time = time.time()

        try:
            # Validate event
            if not self._validate_event(event):
                return None

            # Apply filters
            if not self._apply_filters(event):
                if self.stats:
                    self.stats.events_filtered += 1
                return None

            # Notify observers
            self._notify_observers(event)

            # Process based on mode
            result = None
            if self.mode == DispatcherMode.BATCH:
                self._add_to_batch(event)
                result = event  # Return immediately for batch processing
            else:
                # Process through middleware and handlers
                result = self._process_middleware(event)

            # Update stats
            processing_time = time.time() - start_time
            if self.stats:
                self.stats.update_event_stats(processing_time, success=True)

            if self.metrics:
                self.metrics.record_event_processed(event.event_type, processing_time)

            return result

        except Exception as e:
            processing_time = time.time() - start_time
            if self.stats:
                self.stats.update_event_stats(processing_time, success=False)

            self._logger.error(f"Error dispatching event: {e}", exc_info=True)
            return None

    async def dispatch_async(self, event: E) -> Optional[E]:
        """Dispatch an event asynchronously."""
        if self.mode == DispatcherMode.SYNCHRONOUS:
            # Fall back to sync dispatch
            return self.dispatch(event)

        if not self._running:
            return None

        start_time = time.time()

        try:
            # Validation and filtering (sync)
            if not self._validate_event(event) or not self._apply_filters(event):
                return None

            # Notify observers
            self._notify_observers(event)

            # Process through middleware
            current_event = self._process_middleware(event)
            if current_event is None:
                return None

            # Process async handlers
            async_handlers = self._async_handlers.get(event.event_type, [])
            for handler in async_handlers:
                if handler.can_handle(current_event):
                    try:
                        result = await handler.handle_event(current_event)
                        if result is not None:
                            current_event = result

                        if self.stats:
                            self.stats.handlers_executed += 1

                    except Exception as e:
                        if self.stats:
                            self.stats.handlers_failed += 1
                        self._logger.error(f"Async handler failed: {e}")

            # Update stats
            processing_time = time.time() - start_time
            if self.stats:
                self.stats.update_event_stats(processing_time, success=True)

            return current_event

        except Exception as e:
            processing_time = time.time() - start_time
            if self.stats:
                self.stats.update_event_stats(processing_time, success=False)

            self._logger.error(f"Error in async dispatch: {e}", exc_info=True)
            return None

    def dispatch_batch(self, events: List[E]) -> List[Optional[E]]:
        """Dispatch a batch of events."""
        if self.mode == DispatcherMode.SYNCHRONOUS:
            return [self.dispatch(event) for event in events]

        if not self._thread_pool:
            return [self.dispatch(event) for event in events]

        # Submit all events to thread pool
        futures = [self._thread_pool.submit(self.dispatch, event) for event in events]

        # Collect results
        results = []
        for future in as_completed(futures, timeout=self.default_timeout):
            try:
                results.append(future.result())
            except Exception as e:
                self._logger.error(f"Batch processing error: {e}")
                results.append(None)

        return results

    def dispatch_background(self, event: E) -> Future[Optional[E]]:
        """Dispatch an event in background thread."""
        if not self._thread_pool:
            raise RuntimeError("Thread pool not available for background dispatch")

        return self._thread_pool.submit(self.dispatch, event)

    # ========== Internal Processing Methods ==========

    def _validate_event(self, event: E) -> bool:
        """Validate an event using registered validators."""
        for validator in self._validators:
            if not validator.validate(event):
                validation_errors = validator.get_validation_errors(event)
                self._logger.warning(f"Event validation failed: {validation_errors}")
                return False
        return True

    def _apply_filters(self, event: E) -> bool:
        """Apply filters to an event."""
        for event_filter in self._filters:
            if not event_filter.should_process(event):
                self._logger.debug(f"Event filtered by {event_filter.name}")
                return False
        return True

    def _notify_observers(self, event: E) -> None:
        """Notify all observers about an event."""
        for observer in self._observers:
            try:
                observer.observe(event)
            except Exception as e:
                self._logger.warning(f"Observer {observer.name} failed: {e}")

    def _process_middleware(self, event: E) -> Optional[E]:
        """Process event through middleware pipeline."""
        if not self._middleware:
            return self._dispatch_to_handlers(event)

        def create_next_handler(index: int) -> Callable[[E], Optional[E]]:
            if index >= len(self._middleware):
                return self._dispatch_to_handlers

            def next_handler(e: E) -> Optional[E]:
                middleware = self._middleware[index]
                return middleware.process(e, create_next_handler(index + 1))

            return next_handler

        return create_next_handler(0)(event)

    def _dispatch_to_handlers(self, event: E) -> Optional[E]:
        """Dispatch event to registered handlers."""
        handlers = self._handlers.get(event.event_type, [])
        current_event = event

        for handler in handlers:
            if handler.can_handle(current_event):
                try:
                    result = handler.handle_event(current_event)
                    if result is not None:
                        current_event = result

                    if self.stats:
                        self.stats.handlers_executed += 1

                    if self.metrics:
                        self.metrics.record_handler_execution(
                            handler.__class__.__name__, 0, True
                        )

                except Exception as e:
                    if self.stats:
                        self.stats.handlers_failed += 1

                    if self.metrics:
                        self.metrics.record_handler_execution(
                            handler.__class__.__name__, 0, False
                        )

                    self._logger.error(f"Handler {handler} failed: {e}")

        return current_event

    def _add_to_batch(self, event: E) -> None:
        """Add event to batch queue."""
        self._event_queue.append(event)

        if len(self._event_queue) >= self.batch_size:
            self._process_batch()

    def _start_batch_processing(self) -> None:
        """Start batch processing thread."""
        self._batch_processing_active = True

        def batch_processor():
            while self._batch_processing_active:
                time.sleep(self.batch_timeout)
                if self._event_queue:
                    self._process_batch()

        threading.Thread(target=batch_processor, daemon=True).start()

    def _process_batch(self) -> None:
        """Process queued events as a batch."""
        if not self._event_queue:
            return

        with self._lock:
            events = list(self._event_queue)
            self._event_queue.clear()

        # Process batch in thread pool
        if self._thread_pool:
            futures = [
                self._thread_pool.submit(self._dispatch_to_handlers, event)
                for event in events
            ]

            for future in as_completed(futures):
                try:
                    future.result()
                except Exception as e:
                    self._logger.error(f"Batch processing error: {e}")

    # ========== Utility Methods ==========

    def has_handlers_for(self, event_type: EventType) -> bool:
        """Check if handlers are registered for event type."""
        with self._lock:
            return bool(self._handlers.get(event_type)) or bool(
                self._async_handlers.get(event_type)
            )

    def get_handler_count(self) -> int:
        """Get total number of registered handlers."""
        with self._lock:
            sync_count = sum(len(handlers) for handlers in self._handlers.values())
            async_count = sum(
                len(handlers) for handlers in self._async_handlers.values()
            )
            return sync_count + async_count

    def get_stats(self) -> Optional[DispatcherStats]:
        """Get dispatcher statistics."""
        return self.stats

    def get_metrics_summary(self) -> Optional[Dict[str, Any]]:
        """Get advanced metrics summary."""
        return self.metrics.get_metrics_summary() if self.metrics else None

    def clear_handlers(self) -> None:
        """Clear all registered handlers and components."""
        with self._lock:
            self._handlers.clear()
            self._async_handlers.clear()
            self._filters.clear()
            self._middleware.clear()
            self._observers.clear()
            self._validators.clear()

            if self.stats:
                self.stats.handlers_registered = 0
                self.stats.filters_registered = 0
                self.stats.middleware_registered = 0
                self.stats.observers_registered = 0
                self.stats.validators_registered = 0

    # ========== Context Managers ==========

    @contextmanager
    def batch_processing(self) -> Iterator[None]:
        """Context manager for batch processing mode."""
        original_mode = self.mode
        self.mode = DispatcherMode.BATCH

        try:
            yield
        finally:
            # Process any remaining events
            if self._event_queue:
                self._process_batch()
            self.mode = original_mode

    @contextmanager
    def temporary_handler(
        self, handler: EventHandlerInterface[E]
    ) -> Iterator[EventHandlerInterface[E]]:
        """Context manager for temporary handler registration."""
        self.register_handler(handler)
        try:
            yield handler
        finally:
            self.unregister_handler(handler)

    def __enter__(self):
        """Enter context manager."""
        self.start()
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        """Exit context manager."""
        self.stop()

    def __repr__(self) -> str:
        """String representation."""
        return (
            f"{self.__class__.__name__}("
            f"name='{self.name}', "
            f"mode={self.mode.value}, "
            f"handlers={self.get_handler_count()}, "
            f"running={self._running})"
        )


# ========== Global Dispatcher Instances ==========

# Default synchronous dispatcher
global_dispatcher = EventDispatcher(
    name="GlobalDispatcher",
    mode=DispatcherMode.SYNCHRONOUS,
    enable_stats=True,
)

# High-performance async dispatcher
global_async_dispatcher = EventDispatcher(
    name="GlobalAsyncDispatcher",
    mode=DispatcherMode.ASYNCHRONOUS,
    enable_stats=True,
    enable_metrics=True,
)

# Batch processing dispatcher
global_batch_dispatcher = EventDispatcher(
    name="GlobalBatchDispatcher",
    mode=DispatcherMode.BATCH,
    batch_size=50,
    batch_timeout=0.5,
    enable_stats=True,
)

# Hybrid dispatcher for complex workflows
global_hybrid_dispatcher = EventDispatcher(
    name="GlobalHybridDispatcher",
    mode=DispatcherMode.HYBRID,
    enable_stats=True,
    enable_metrics=True,
)


# ========== Convenience Functions ==========


def create_dispatcher(
    name: str = "CustomDispatcher",
    mode: DispatcherMode = DispatcherMode.SYNCHRONOUS,
    **kwargs,
) -> EventDispatcher:
    """Create a new event dispatcher with custom configuration."""
    return EventDispatcher(name=name, mode=mode, **kwargs)


@contextmanager
def temporary_handler(
    handler: EventHandlerInterface[E],
    dispatcher: Optional[EventDispatcher] = None,
) -> Iterator[EventHandlerInterface[E]]:
    """Context manager for temporary handler registration."""
    disp = dispatcher or global_dispatcher
    disp.register_handler(handler)
    try:
        yield handler
    finally:
        disp.unregister_handler(handler)
