"""
Built-in event handlers for common use cases.

This module provides standard event handlers that can be used out-of-the-box
for typical continual learning scenarios. These handlers implement common
patterns like logging, memory monitoring, and loss tracking.

Built-in handlers:
- LoggingEventHandler: Logs events with configurable detail level
- MemoryMonitorHandler: Monitors system and GPU memory usage
- LossTrackerHandler: Tracks loss values and detects anomalies

Example:
    >>> from learners.events.config import HandlerConfig, EventConfig
    >>> from learners.events.handlers.builtin import LoggingEventHandler
    >>> from learners.interfaces.event import EventType
    >>>
    >>> # Create handler configuration
    >>> handler_config = HandlerConfig.create(
    ...     type="LoggingEventHandler",
    ...     events=[EventType.ON_TASK_START.value, EventType.ON_TASK_END.value],
    ...     config={"log_level": "INFO"},
    ...     enabled=True
    ... )
    >>>
    >>> # Create and use the handler
    >>> handler = LoggingEventHandler(handler_config)
    >>> handler.handle(context)  # Logs event information
"""

import logging
from learners.interfaces.event import EventContext, EventType
from learners.events.handlers.base import BaseEventHandler
from learners.events.config import HandlerConfig


class LoggingEventHandler(BaseEventHandler):
    """
    Simple logging event handler that properly implements EventHandler interface.

    Logs event occurrences with configurable log level and formatting options.

    Configuration options:
        log_level: Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
        include_task: Whether to include task ID in log messages
        include_epoch: Whether to include epoch number in log messages
        include_batch: Whether to include batch index in log messages
    """

    def __init__(self, config: HandlerConfig, *args, **kwargs):
        """
        Initialize logging event handler.

        Args:
            config: Handler configuration
            *args, **kwargs: Additional arguments passed to base class
        """
        super().__init__(config, *args, **kwargs)
        self.logger = logging.getLogger("EventSystem")

        # Extract configuration options
        log_level = self.config.get("log_level", "INFO")
        self.log_level = getattr(logging, log_level.upper(), logging.INFO)
        self.include_task = self.config.get("include_task", True)
        self.include_epoch = self.config.get("include_epoch", False)
        self.include_batch = self.config.get("include_batch", False)

    def _handle_event(self, context: EventContext) -> None:
        """
        Handle event by logging it with configured formatting.

        Args:
            context: Event context containing event data
        """
        # Build log message components
        message_parts = [f"Event: {context.event.value}"]

        if (
            self.include_task
            and hasattr(context, "task_id")
            and context.task_id is not None
        ):
            message_parts.append(f"Task: {context.task_id}")

        if (
            self.include_epoch
            and hasattr(context, "epoch")
            and context.epoch is not None
        ):
            message_parts.append(f"Epoch: {context.epoch}")

        if (
            self.include_batch
            and hasattr(context, "batch_idx")
            and context.batch_idx is not None
        ):
            message_parts.append(f"Batch: {context.batch_idx}")

        # Join message components
        message = " | ".join(message_parts)

        # Log the event
        self.logger.log(self.log_level, message)


class MemoryMonitorHandler(BaseEventHandler):
    """
    Handler for monitoring memory usage during training.

    Tracks system RAM and GPU memory (if available) usage at configurable events.

    Configuration options:
        log_level: Logging level for memory reports
        track_gpu: Whether to track GPU memory (default: True)
        track_system: Whether to track system memory (default: True)
    """

    def __init__(self, config: HandlerConfig, *args, **kwargs):
        """
        Initialize memory monitor handler.

        Args:
            config: Handler configuration
            *args, **kwargs: Additional arguments passed to base class
        """
        super().__init__(config, *args, **kwargs)
        self.logger = logging.getLogger("MemoryMonitor")

        # Extract configuration options
        log_level = self.config.get("log_level", "INFO")
        self.log_level = getattr(logging, log_level.upper(), logging.INFO)
        self.track_gpu = self.config.get("track_gpu", True)
        self.track_system = self.config.get("track_system", True)

    def _handle_event(self, context: EventContext) -> None:
        """
        Monitor memory usage during events.

        Args:
            context: Event context containing event data
        """
        memory_info = []

        # Track system memory if configured
        if self.track_system:
            try:
                import psutil

                memory = psutil.virtual_memory()
                memory_info.append(f"RAM: {memory.percent}%")
            except ImportError:
                self.logger.debug("psutil not available for system memory monitoring")

        # Track GPU memory if configured and available
        if self.track_gpu:
            try:
                import torch

                if torch.cuda.is_available():
                    current_device = torch.cuda.current_device()
                    gpu_mem_used = (
                        torch.cuda.memory_allocated(current_device) / 1024**3
                    )  # GB
                    gpu_mem_total = (
                        torch.cuda.get_device_properties(current_device).total_memory
                        / 1024**3
                    )  # GB
                    memory_info.append(
                        f"GPU: {gpu_mem_used:.2f}GB/{gpu_mem_total:.2f}GB ({gpu_mem_used/gpu_mem_total*100:.1f}%)"
                    )
            except (ImportError, AttributeError):
                self.logger.debug("torch.cuda not available for GPU memory monitoring")

        # Log memory information if available
        if memory_info:
            self.logger.log(
                self.log_level,
                f"Memory Usage: {' | '.join(memory_info)} | Event: {context.event.value}",
            )
        else:
            self.logger.debug(
                f"No memory metrics available for event: {context.event.value}"
            )


class LossTrackerHandler(BaseEventHandler):
    """
    Handler for tracking loss values and detecting anomalies.

    Maintains a history of loss values and can detect anomalous loss patterns.

    Configuration options:
        track_history: Whether to maintain loss history (default: True)
        window_size: Size of the rolling window for loss history (default: 100)
        anomaly_threshold: Multiplier above average to consider a loss anomalous (default: 3.0)
        log_level_normal: Log level for regular loss tracking (default: "DEBUG")
        log_level_anomaly: Log level for anomaly reports (default: "WARNING")
    """

    def __init__(self, config: HandlerConfig, *args, **kwargs):
        """
        Initialize loss tracker handler.

        Args:
            config: Handler configuration with options
            *args, **kwargs: Additional arguments passed to base class
        """
        super().__init__(config, *args, **kwargs)
        self.logger = logging.getLogger("LossTracker")

        # Extract configuration options
        self.track_history = self.config.get("track_history", True)
        self.window_size = self.config.get("window_size", 100)
        self.anomaly_threshold = self.config.get("anomaly_threshold", 3.0)

        # Configure log levels
        log_level_normal = self.config.get("log_level_normal", "DEBUG")
        self.log_level_normal = getattr(
            logging, log_level_normal.upper(), logging.DEBUG
        )

        log_level_anomaly = self.config.get("log_level_anomaly", "WARNING")
        self.log_level_anomaly = getattr(
            logging, log_level_anomaly.upper(), logging.WARNING
        )

        # Initialize loss history storage
        self.loss_history = []

    def _handle_event(self, context: EventContext) -> None:
        """
        Track loss values and detect anomalies.

        Args:
            context: Event context containing loss data
        """
        # Only process events with loss data
        if "loss" not in context.data and "loss" not in context.metrics:
            return

        # Extract loss value from context
        loss_value = context.data.get("loss") or context.metrics.get("loss")
        assert (
            loss_value is not None
        ), "Loss value must be provided in context data or metrics"

        # Update loss history if tracking is enabled
        if self.track_history:
            self.loss_history.append(loss_value)
            if len(self.loss_history) > self.window_size:
                self.loss_history.pop(0)

        # Calculate average loss for anomaly detection
        avg_loss: float = (
            sum(self.loss_history) / len(self.loss_history)
            if self.loss_history
            else loss_value
        )

        # Log based on event type and anomaly detection
        if context.event == EventType.ON_HIGH_LOSS_DETECTED:
            self.logger.log(
                self.log_level_anomaly,
                f"High loss detected: {loss_value:.4f} "
                f"(avg: {avg_loss:.4f}, ratio: {loss_value/avg_loss:.2f}x) "
                f"at batch {getattr(context, 'batch_idx', 'N/A')}",
            )
        elif context.event == EventType.ON_AFTER_LOSS_COMPUTATION:
            # Check if current loss is anomalous compared to history
            if self.loss_history and loss_value > avg_loss * self.anomaly_threshold:
                self.logger.log(
                    self.log_level_anomaly,
                    f"Anomalous loss detected: {loss_value:.4f} "
                    f"(avg: {avg_loss:.4f}, ratio: {loss_value/avg_loss:.2f}x) "
                    f"at batch {getattr(context, 'batch_idx', 'N/A')}",
                )
            else:
                # Normal loss logging
                self.logger.log(
                    self.log_level_normal,
                    f"Loss: {loss_value:.4f} at batch {getattr(context, 'batch_idx', 'N/A')}",
                )
