"""
PyTorch Lightning module for continual learning.
"""

from typing import Dict, List, Optional, Tuple, Any, Union, Callable, Type, cast
import torch
import torch.nn as nn
import torch.optim as optim
import pytorch_lightning as pl
from collections import defaultdict

from continuallearning.registry import MethodInterface
from continuallearning.interfaces import TaskAdaptProtocol
from ..config.typed_config import OptimizerConfig, SchedulerConfig
from pytorch_lightning.utilities.types import OptimizerLRScheduler

# FIXME current logging system is quite chaos, it has model/peft logging system, python default logging system and pytorch lightning logging system. Unify them in the future
import logging

log = logging.getLogger(__name__)


class ContinualModule(pl.LightningModule):
    """
    PyTorch Lightning module for continual learning.

    This module focuses on defining the core interface needed for continual learning
    while leveraging Lightning's existing functionality. It manages task transitions,
    integrates continual learning strategies, and handles optimization configuration.

    Args:
        model (nn.Module): PyTorch model
        optimizer_cfg (OptimizerConfig or dict): Configuration for optimizer
        scheduler_cfg (Optional[SchedulerConfig or dict]): Configuration for learning rate scheduler
        cl_strategy (Optional[MethodInterface]): Continual learning strategy
        criterion (Optional[nn.Module or Callable]): Loss function, defaults to CrossEntropyLoss
    """

    def __init__(
        self,
        model: nn.Module,
        optimizer_cfg: Union[OptimizerConfig, Dict[str, Any]],
        scheduler_cfg: Optional[Union[SchedulerConfig, Dict[str, Any]]] = None,
        cl_strategy: Optional[MethodInterface] = None,
        criterion: Optional[nn.Module] = None,
    ):
        super().__init__()
        self.model = model

        # Convert dictionary configs to typed configs if necessary
        if isinstance(optimizer_cfg, dict):
            from ..config.typed_config import OptimizerConfig

            optimizer_cfg = OptimizerConfig(**optimizer_cfg)
        self.optimizer_cfg = optimizer_cfg

        if scheduler_cfg is not None and isinstance(scheduler_cfg, dict):
            from ..config.typed_config import SchedulerConfig

            scheduler_cfg = SchedulerConfig(**scheduler_cfg)
        self.scheduler_cfg = scheduler_cfg

        # Validate cl_strategy implements MethodInterface if provided
        if cl_strategy is not None and not isinstance(cl_strategy, MethodInterface):
            raise TypeError(
                f"cl_strategy must implement MethodInterface, got {type(cl_strategy)}"
            )
        self.cl_strategy = cl_strategy

        # Task tracking with validation
        self._current_task = 0
        self.seen_tasks = set()

        # Task performance tracking with strong typing
        self.task_metrics: Dict[str, Dict[str, float]] = defaultdict(dict)

        # Configurable loss function
        self.criterion = criterion if criterion is not None else nn.CrossEntropyLoss()

        # Use TaskAdaptable protocol instead of hasattr check
        self._has_task_adaptation = isinstance(self.model, TaskAdaptProtocol)

        # Save hyperparameters for checkpoint restoration
        self.save_hyperparameters(ignore=["model", "cl_strategy", "criterion"])

    @property
    def current_task(self) -> int:
        """Get the current task ID."""
        return self._current_task

    @current_task.setter
    def current_task(self, task_id: int) -> None:
        """Set the current task ID with validation."""
        if not isinstance(task_id, int) or task_id < 0:
            raise ValueError(f"Task ID must be a non-negative integer, got {task_id}")
        self._current_task = task_id
        self.seen_tasks.add(task_id)

    def set_task(self, task_id: int) -> None:
        """
        FIXME 这个接口的设计和pefts models里面的定义完全不一样啊，models里面实现相同功能的方法应该是 prepare_task
        FIXME 另外这个TaskAdaptProtocol在model中完全没有提及，要改的啊这个！
        Update the current task for the module and notify the model if supported.

        Args:
            task_id (int): Task identifier
        """
        # Use property setter for validation
        self.current_task = task_id

        # Set task for model if it supports TaskAdaptable protocol
        if self._has_task_adaptation:
            cast(TaskAdaptProtocol, self.model).set_task(task_id)

        # Log task change
        self.log("current_task", float(task_id), prog_bar=True)

    def forward(
        self, x: torch.Tensor, task_id: Optional[List[int]] = None
    ) -> Union[torch.Tensor, Dict[str, torch.Tensor]]:
        """
        Forward pass through the model.

        Args:
            x (torch.Tensor): Input tensor
            task_id (List[int], optional): Task identifier, defaults to None.
                    passing a None is equivalant to pass all task_ids.

        Returns:
            Union[torch.Tensor, Dict[str, torch.Tensor]]: Model output (either logits tensor or dictionary containing logits)
        """
        return self.model(x, task_id)

    def configure_optimizers(self) -> OptimizerLRScheduler:
        """
        FIXME 有个大问题啊，这个 optimizer 的参数传递我写的有问题，这个后面必须要改的
                 尤其是参数传递，要认真检查检查，感觉问题太多了，还有接口不一致的问题等等
        Configure optimizers and schedulers based on provided configurations.

        This method creates the optimizer from the optimizer configuration,
        and optionally creates a learning rate scheduler if a scheduler
        configuration was provided. It also respects any parameter filtering
        applied by the continual learning strategy.

        Returns:
            Union[optim.Optimizer, Dict[str, Any]]: Optimizer or dictionary with
            optimizer and scheduler configuration compatible with PyTorch Lightning
        """
        # Get optimizer parameters from configuration
        opt_config = self.optimizer_cfg.to_dict()
        opt_type = opt_config.pop("type")

        # Validate optimizer type exists
        if not hasattr(optim, opt_type):
            raise ValueError(
                f"Unknown optimizer type: {opt_type}. Must be a class in torch.optim"
            )

        opt_cls = getattr(optim, opt_type)

        # Get parameters to optimize from strategy or model
        if self.cl_strategy is not None:
            parameters = self.cl_strategy.get_parameters_to_optimize(self.model)
        else:
            parameters = self.model.parameters()

        # Create optimizer with error handling
        try:
            optimizer = opt_cls(parameters, **opt_config)
        except TypeError as e:
            raise TypeError(f"Error creating optimizer '{opt_type}': {str(e)}")

        # Return optimizer if no scheduler is configured
        if self.scheduler_cfg is None:
            return optimizer

        # Configure scheduler if provided
        sched_config = self.scheduler_cfg.to_dict()
        sched_type = sched_config.pop("type")

        # Validate scheduler type exists
        if not hasattr(optim.lr_scheduler, sched_type):
            raise ValueError(
                f"Unknown scheduler type: {sched_type}. Must be a class in torch.optim.lr_scheduler"
            )

        sched_cls = getattr(optim.lr_scheduler, sched_type)

        # Create scheduler with error handling
        try:
            scheduler = sched_cls(optimizer, **sched_config)
        except TypeError as e:
            raise TypeError(f"Error creating scheduler '{sched_type}': {str(e)}")

        # Return Lightning-compatible optimizer and scheduler configuration
        return {
            "optimizer": optimizer,
            "lr_scheduler": {
                "scheduler": scheduler,
                "monitor": "val/loss",
                "interval": "epoch",
                "frequency": 1,
            },
        }

    def _extract_batch_data(
        self, batch: Any
    ) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
        """
        Extract inputs, targets, and optional task_ids from batch.

        This helper method handles different batch formats and provides consistent error handling.

        Args:
            batch (Any): Batch data in various formats (tuple, list, or dict)

        Returns:
            Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
                Tuple containing (inputs, targets, task_ids)

        Raises:
            ValueError: If the batch format is unsupported
        """
        try:
            if isinstance(batch, (list, tuple)) and len(batch) >= 2:
                inputs, targets = batch[0], batch[1]
                task_ids = batch[2] if len(batch) > 2 else None
            elif isinstance(batch, dict) and "inputs" in batch and "targets" in batch:
                inputs, targets = batch["inputs"], batch["targets"]
                task_ids = batch.get("task_ids", None)
            else:
                raise ValueError(
                    f"Unsupported batch format: expected tuple/list with at least 2 elements or dict with 'inputs' and 'targets' keys. Got {type(batch)}"
                )
            return inputs, targets, task_ids
        except Exception as e:
            raise RuntimeError(f"Error parsing batch data: {str(e)}")

    def _extract_logits_from_outputs(self, outputs: Any) -> torch.Tensor:
        """
        Extract logits from model outputs.

        This helper method handles different output formats and provides consistent error handling.

        Args:
            outputs (Any): Model outputs (tensor or dict with 'logits' key)

        Returns:
            torch.Tensor: Logits tensor

        Raises:
            TypeError: If logits cannot be extracted or are not a tensor
        """
        if isinstance(outputs, dict) and "logits" in outputs:
            logits = outputs["logits"]
        else:
            logits = outputs

        # Ensure logits is a tensor
        if not isinstance(logits, torch.Tensor):
            raise TypeError(f"Expected logits to be a torch.Tensor, got {type(logits)}")

        return logits

    def training_step(self, batch: Any, batch_idx: int) -> Dict[str, torch.Tensor]:
        """
        Training step for PyTorch Lightning.

        Handles different batch formats and applies the continual learning strategy's
        loss computation if available. Otherwise falls back to standard cross-entropy loss.

        Args:
            batch (Any): Batch data in various formats (tuple, list, or dict)
            batch_idx (int): Batch index

        Returns:
            Dict[str, torch.Tensor]: Dictionary with loss values
        """
        # Extract data using helper method
        inputs, targets, task_ids = self._extract_batch_data(batch)

        # Forward pass with error handling
        try:
            outputs = self(inputs)
        except Exception as e:
            raise RuntimeError(f"Error during forward pass: {str(e)}")

        # Compute loss using strategy if available, otherwise use default
        try:
            if self.cl_strategy is not None:
                loss_dict = self.cl_strategy.compute_loss(
                    model=self.model,
                    criterion=self.criterion,
                    output=outputs,
                    targets=targets,
                    task_id=self.current_task,
                    task_ids=task_ids,  # Pass through task IDs if available
                )
            else:
                # Default loss computation
                logits = self._extract_logits_from_outputs(outputs)
                loss = self.criterion(logits, targets)
                loss_dict = {"loss": loss}
        except Exception as e:
            raise RuntimeError(f"Error computing loss: {str(e)}")

        # Validate loss dictionary
        if not isinstance(loss_dict, dict) or "loss" not in loss_dict:
            raise ValueError("Loss dictionary must contain a 'loss' key")

        # Log losses with improved handling
        for name, value in loss_dict.items():
            if not torch.is_tensor(value):
                continue

            # Check for NaN values
            if torch.isnan(value).any():
                self.log(f"train/{name}_nan_detected", 1.0)
                log.warning(f"NaN detected in {name} loss")
                continue

            self.log(
                f"train/{name}",
                value,
                on_step=True,
                on_epoch=True,
                prog_bar=name == "loss",
                sync_dist=True,  # Enable proper distributed training support
            )

        return loss_dict

    def validation_step(self, batch: Any, batch_idx: int) -> Dict[str, Any]:
        """
        Validation step for PyTorch Lightning.

        Processes validation data, computes metrics, and logs results.

        Args:
            batch (Any): Batch data in various formats (tuple, list, or dict)
            batch_idx (int): Batch index

        Returns:
            Dict[str, torch.Tensor]: Dictionary with validation metrics and outputs
        """
        # Extract data using helper method
        inputs, targets, _ = self._extract_batch_data(batch)

        # Forward pass with error handling
        try:
            outputs = self(inputs)
        except Exception as e:
            raise RuntimeError(f"Error during validation forward pass: {str(e)}")

        # Extract logits using helper method
        logits = self._extract_logits_from_outputs(outputs)

        # Compute loss and metrics
        try:
            loss = self.criterion(logits, targets)
            pred = torch.argmax(logits, dim=1)
            acc = torch.sum(pred == targets).item() / targets.size(0)
        except Exception as e:
            raise RuntimeError(f"Error computing validation metrics: {str(e)}")

        # Log metrics with distributed training support
        self.log(
            "val/loss",
            loss,
            on_step=False,
            on_epoch=True,
            prog_bar=True,
            sync_dist=True,
        )
        self.log(
            "val/acc", acc, on_step=False, on_epoch=True, prog_bar=True, sync_dist=True
        )

        return {
            "loss": loss,
            "acc": acc,
            "logits": logits,
            "targets": targets,
            "task_id": self.current_task,
        }

    def test_step(
        self, batch: Any, batch_idx: int, dataloader_idx: int = 0
    ) -> Dict[str, Any]:
        """
        Test step for PyTorch Lightning.

        Processes test data for continual learning evaluation across multiple tasks.
        The dataloader_idx corresponds to the task being evaluated, enabling
        per-task performance tracking.

        Args:
            batch (Any): Batch data in various formats (tuple, list, or dict)
            batch_idx (int): Batch index
            dataloader_idx (int): Index of the dataloader (corresponds to task)

        Returns:
            Dict[str, torch.Tensor]: Dictionary with test metrics and outputs
        """
        # Extract data using helper method (task_ids not needed for testing)
        inputs, targets, _ = self._extract_batch_data(batch)

        # Use dataloader_idx as task ID for multi-task evaluation
        task_id = dataloader_idx

        # Forward pass with explicit task_id for evaluation
        try:
            outputs = self(inputs, task_id)
        except Exception as e:
            raise RuntimeError(
                f"Error during test forward pass for task {task_id}: {str(e)}"
            )

        # Extract logits using helper method
        logits = self._extract_logits_from_outputs(outputs)

        # Compute metrics with error handling
        try:
            loss = self.criterion(logits, targets)
            pred = torch.argmax(logits, dim=1)
            acc = torch.sum(pred == targets).item() / targets.size(0)
        except Exception as e:
            raise RuntimeError(
                f"Error computing test metrics for task {task_id}: {str(e)}"
            )

        # Log metrics with task-specific namespacing and distributed support
        self.log(
            f"test/task_{task_id}/loss",
            loss,
            on_step=False,
            on_epoch=True,
            sync_dist=True,
        )
        self.log(
            f"test/task_{task_id}/acc",
            acc,
            on_step=False,
            on_epoch=True,
            sync_dist=True,
        )

        # Store task performance in module state
        self.task_metrics[f"task_{task_id}"]["test_loss"] = loss.item()
        self.task_metrics[f"task_{task_id}"]["test_acc"] = acc

        return {
            "loss": loss,
            "acc": acc,
            "task_id": task_id,
            "logits": logits,
            "targets": targets,
        }

    def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
        """
        Add custom data to checkpoint for better resuming.

        This method ensures that task-related state is properly saved in the
        checkpoint to enable full restoration of the continual learning state
        when resuming training.

        Args:
            checkpoint (Dict[str, Any]): Checkpoint dictionary to be modified in-place
        """
        # Convert set to sorted list for deterministic serialization
        checkpoint["seen_tasks"] = sorted(list(self.seen_tasks))
        checkpoint["current_task"] = self.current_task
        checkpoint["task_metrics"] = dict(self.task_metrics)

        # Save continual learning strategy state if available
        if self.cl_strategy is not None:
            try:
                checkpoint["cl_strategy_state"] = self.cl_strategy.state_dict()
            except Exception as e:
                log.warning(
                    f"Failed to save continual learning strategy state: {str(e)}"
                )

    def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
        """
        Load custom data from checkpoint.

        This method restores the continual learning state from a checkpoint,
        including seen tasks, current task, metrics, and strategy state.

        Args:
            checkpoint (Dict[str, Any]): Checkpoint dictionary containing saved state
        """
        # Restore task tracking state
        if "seen_tasks" in checkpoint:
            self.seen_tasks = set(checkpoint["seen_tasks"])

        if "current_task" in checkpoint:
            # Use property setter for validation
            self.current_task = checkpoint["current_task"]

        if "task_metrics" in checkpoint:
            self.task_metrics = defaultdict(dict, checkpoint["task_metrics"])

        # Restore continual learning strategy state if available
        if self.cl_strategy is not None and "cl_strategy_state" in checkpoint:
            try:
                self.cl_strategy.load_state_dict(checkpoint["cl_strategy_state"])
            except Exception as e:
                log.warning(
                    f"Failed to load continual learning strategy state: {str(e)}"
                )
