"""
Task boundary callback for continual learning.

This callback manages task transitions in continual learning scenarios,
ensuring proper task boundary detection and notification to model and other components.
"""

from typing import Optional, Set, Dict, Any, List
import pytorch_lightning as pl
from pytorch_lightning.callbacks import Callback

from continuallearning.interfaces import TaskAdaptProtocol, MethodInterface
from ..events.event_types import TaskEvent, EventType
from ..events.dispatcher import EventDispatcher


class TaskBoundaryCallback(Callback):
    """
    Callback to handle task transitions in continual learning.

    This callback ensures that models and data modules are properly notified
    when a task changes, and handles the preparation for each new task. It integrates
    with PyTorch Lightning's event system by hooking into existing events and providing
    task-specific notifications.

    Args:
        verbose (bool): Whether to print task transition messages
        event_dispatcher (EventDispatcher, optional): Event dispatcher for broadcasting task events
    """

    def __init__(
        self, verbose: bool = True, event_dispatcher: Optional[EventDispatcher] = None
    ):
        super().__init__()
        self.verbose = verbose
        self.current_task_id = 0
        self.completed_tasks: Set[int] = set()
        self.task_metrics: Dict[int, Dict[str, Any]] = {}

        # Use provided event dispatcher or create a new one
        self.event_dispatcher = event_dispatcher or EventDispatcher()

    def on_fit_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None:
        """
        Called when starting the training process. Sets up the initial task.

        Args:
            trainer: The trainer instance
            pl_module: The module instance (should support task adaptation)
        """
        # Initialize task
        self._set_current_task(trainer, pl_module)

        # Notify about task start
        self._trigger_task_start(trainer, pl_module)

    def on_fit_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None:
        """
        Called when fit ends. Marks the current task as complete.

        Args:
            trainer: The trainer instance
            pl_module: The module instance
        """
        # Notify about task end
        self._trigger_task_end(trainer, pl_module)

        # Mark task as completed
        self.completed_tasks.add(self.current_task_id)

    def on_train_epoch_start(
        self, trainer: pl.Trainer, pl_module: pl.LightningModule
    ) -> None:
        """
        Called at the beginning of each training epoch.

        Args:
            trainer: The trainer instance
            pl_module: The module instance
        """
        # Ensure task is set correctly
        self._set_current_task(trainer, pl_module)

    def on_validation_epoch_start(
        self, trainer: pl.Trainer, pl_module: pl.LightningModule
    ) -> None:
        """
        Called at the beginning of each validation epoch.

        Args:
            trainer: The trainer instance
            pl_module: The module instance
        """
        # Ensure task is set correctly
        self._set_current_task(trainer, pl_module)

    def on_test_epoch_start(
        self, trainer: pl.Trainer, pl_module: pl.LightningModule
    ) -> None:
        """
        Called at the beginning of each test epoch.

        Args:
            trainer: The trainer instance
            pl_module: The module instance
        """
        # For testing, we keep the current task setting
        # (specific test loops should handle multi-task testing if needed)
        pass

    def set_task(
        self,
        task_id: int,
        trainer: Optional[pl.Trainer] = None,
        pl_module: Optional[pl.LightningModule] = None,
    ) -> None:
        """
        Explicitly set the current task.

        Args:
            task_id: The task identifier
            trainer: Optional trainer instance
            pl_module: Optional module instance
        """
        if self.current_task_id != task_id:
            # If we were in the middle of a task, trigger task end for previous task
            if (
                trainer is not None
                and pl_module is not None
                and self.current_task_id >= 0
            ):
                self._trigger_task_end(trainer, pl_module)

            # Update task ID
            self.current_task_id = task_id

            if self.verbose:
                print(f"\n=== Switching to Task {task_id} ===\n")

            # Set the new task
            if trainer and pl_module:
                self._set_current_task(trainer, pl_module)

                # Trigger task start for new task
                self._trigger_task_start(trainer, pl_module)

    def _get_task_adaptable_components(
        self, trainer: pl.Trainer, pl_module: pl.LightningModule
    ) -> List[TaskAdaptProtocol]:
        """
        Find all components that implement TaskAdaptable protocol.

        Args:
            trainer: The trainer instance
            pl_module: The module instance

        Returns:
            List of components that implement TaskAdaptable
        """
        task_adaptable_components = []

        # Check if module is task adaptable
        if isinstance(pl_module, TaskAdaptProtocol):
            task_adaptable_components.append(pl_module)

        # Check if model is task adaptable
        if hasattr(pl_module, "model") and isinstance(
            pl_module.model, TaskAdaptProtocol
        ):
            task_adaptable_components.append(pl_module.model)

        # Check if datamodule is task adaptable
        if hasattr(trainer, "datamodule") and isinstance(
            trainer.datamodule, TaskAdaptProtocol
        ):
            task_adaptable_components.append(trainer.datamodule)

        return task_adaptable_components

    def _get_method_interface(
        self, pl_module: pl.LightningModule
    ) -> Optional[MethodInterface]:
        """
        Get the continual learning method if available.

        Args:
            pl_module: The module instance

        Returns:
            The continual learning method or None
        """
        if hasattr(pl_module, "cl_strategy") and isinstance(
            pl_module.cl_strategy, MethodInterface
        ):
            return pl_module.cl_strategy
        return None

    def _set_current_task(
        self, trainer: pl.Trainer, pl_module: pl.LightningModule
    ) -> None:
        """
        Helper method to set current task on all TaskAdaptable components.

        Args:
            trainer: The trainer instance
            pl_module: The module instance
        """
        # Find and update all components that implement TaskAdaptable
        for component in self._get_task_adaptable_components(trainer, pl_module):
            component.set_task(self.current_task_id)

    def _trigger_task_start(
        self, trainer: pl.Trainer, pl_module: pl.LightningModule
    ) -> None:
        """
        Trigger task start events - simplified single notification mechanism.

        Args:
            trainer: The trainer instance
            pl_module: The module instance
        """
        if self.verbose:
            print(f"=== 任务 {self.current_task_id} 开始 ===")

        # Notify continual learning method if available
        cl_strategy = self._get_method_interface(pl_module)
        if cl_strategy:
            cl_strategy.on_task_start(self.current_task_id)

        # Notify other task callbacks
        for callback in trainer.callbacks:
            if hasattr(callback, "on_task_start") and callback is not self:
                callback.on_task_start(trainer, pl_module, self.current_task_id)

    def _trigger_task_end(
        self, trainer: pl.Trainer, pl_module: pl.LightningModule
    ) -> None:
        """
        Trigger task end events - simplified single notification mechanism.

        Args:
            trainer: The trainer instance
            pl_module: The module instance
        """
        if self.verbose:
            print(f"=== 任务 {self.current_task_id} 结束 ===")

        # Get training dataloader for the current task
        task_dataloader = None
        if hasattr(trainer, "datamodule") and hasattr(
            trainer.datamodule, "get_task_dataloader"
        ):
            get_dataloader_func = getattr(
                trainer.datamodule, "get_task_dataloader", None
            )
            if callable(get_dataloader_func):
                task_dataloader = get_dataloader_func(self.current_task_id, "train")

        # Notify continual learning method if available
        cl_strategy = self._get_method_interface(pl_module)
        if cl_strategy:
            cl_strategy.on_task_end(self.current_task_id, task_dataloader)

        # Notify other task callbacks
        for callback in trainer.callbacks:
            if hasattr(callback, "on_task_end") and callback is not self:
                callback.on_task_end(
                    trainer, pl_module, self.current_task_id, task_dataloader
                )
