import torch
import torch.nn as nn
import logging
from typing import Callable, Dict, Any, Optional
from learners.registry import register_training_strategy
from learners.strategies.TrainStrategy.training_strategy import TrainingStrategy


@register_training_strategy(name="standard")
class StandardTraining(TrainingStrategy):
    """Standard supervised training strategy."""

    def _train_epoch(
        self,
        network,
        train_loader: torch.utils.data.DataLoader,
        criterion: Callable,
        optimizer: torch.optim.Optimizer,
        device: torch.device,
        hook_manager: Optional[Any],
        epoch: int,
        args: Dict[str, Any],
        cur_task: int,
        **kwargs,
    ) -> Dict[str, float]:
        """Train for one epoch using standard supervised learning.

        Args:
            network: Neural network to train
            train_loader: DataLoader for training data
            criterion: Loss function
            optimizer: Optimizer for parameter updates
            device: Device to train on
            hook_manager: Hook manager for extensibility
            epoch: Current epoch
            args: Training arguments
            cur_task: Current task ID
            **kwargs: Additional arguments

        Returns:
            Dictionary with training metrics like loss and accuracy
        """
        network.train()

        total_loss = 0.0
        correct, total = 0, 0

        for i, (_, inputs, targets) in enumerate(train_loader):
            inputs, targets = inputs.to(device), targets.to(device)

            # Call hook before batch processing
            if hook_manager:
                hook_manager.call_hook(
                    "before_batch", network=network, inputs=inputs, targets=targets
                )

            # Forward pass
            outputs = network(inputs)
            logits = outputs["logits"]

            # Calculate loss
            loss = criterion(logits, targets)

            # Backward and optimize
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # Track metrics
            total_loss += loss.item()
            _, predicted = logits.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()

            # Call hook after batch processing
            if hook_manager:
                hook_manager.call_hook("after_batch", network=network, loss=loss.item())

        avg_loss = total_loss / len(train_loader)
        accuracy = 100.0 * correct / total

        return {"loss": avg_loss, "accuracy": accuracy}


@register_training_strategy(name="distillation")
class DistillationTraining(TrainingStrategy):
    """Training strategy using knowledge distillation."""

    def _train_epoch(
        self,
        network,
        train_loader: torch.utils.data.DataLoader,
        criterion: Callable,
        optimizer: torch.optim.Optimizer,
        device: torch.device,
        hook_manager: Optional[Any],
        epoch: int,
        args: Dict[str, Any],
        cur_task: int,
        **kwargs,
    ) -> Dict[str, float]:
        """Train for one epoch with knowledge distillation.

        Args:
            network: Neural network to train
            train_loader: DataLoader for training data
            criterion: Loss function for task loss
            optimizer: Optimizer for parameter updates
            device: Device to train on
            hook_manager: Hook manager for extensibility
            epoch: Current epoch
            args: Training arguments
            cur_task: Current task ID
            **kwargs: Additional arguments including old_network

        Returns:
            Dictionary with training metrics like loss and accuracy
        """
        network.train()
        old_network = kwargs.get("old_network")

        if old_network is None and cur_task > 0:
            logging.warning(
                "DistillationTraining: No old network provided for distillation"
            )

        # Get distillation parameters
        temp = args.get("distillation_temp", 2.0)
        alpha = args.get("distillation_alpha", 0.5)

        total_loss = 0.0
        total_task_loss = 0.0
        total_distill_loss = 0.0
        correct, total = 0, 0

        for i, (_, inputs, targets) in enumerate(train_loader):
            inputs, targets = inputs.to(device), targets.to(device)

            # Call hook before batch
            if hook_manager:
                hook_manager.call_hook(
                    "before_batch", network=network, inputs=inputs, targets=targets
                )

            # Forward pass with current model
            outputs = network(inputs)
            logits = outputs["logits"]

            # Task loss
            task_loss = criterion(logits, targets)

            # Distillation loss if old network exists and not in first task
            distill_loss = torch.tensor(0.0, device=device)
            if old_network is not None and cur_task > 0:
                old_network.eval()
                with torch.no_grad():
                    old_outputs = old_network(inputs)
                    old_logits = old_outputs["logits"]

                # Only consider the outputs for previous classes
                old_classes = kwargs.get("known_classes", 0)
                if old_classes > 0:
                    # Apply temperature scaling
                    distill_loss = nn.KLDivLoss(reduction="batchmean")(
                        nn.functional.log_softmax(
                            logits[:, :old_classes] / temp, dim=1
                        ),
                        nn.functional.softmax(
                            old_logits[:, :old_classes] / temp, dim=1
                        ),
                    ) * (temp * temp)

            # Combined loss
            loss = (
                alpha * task_loss + (1 - alpha) * distill_loss
                if cur_task > 0
                else task_loss
            )

            # Backward and optimize
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # Track metrics
            total_loss += loss.item()
            total_task_loss += task_loss.item()
            total_distill_loss += (
                distill_loss.item() if isinstance(distill_loss, torch.Tensor) else 0.0
            )

            _, predicted = logits.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()

            # Call hook after batch
            if hook_manager:
                hook_manager.call_hook("after_batch", network=network, loss=loss.item())

        avg_loss = total_loss / len(train_loader)
        avg_task_loss = total_task_loss / len(train_loader)
        avg_distill_loss = total_distill_loss / len(train_loader)
        accuracy = 100.0 * correct / total

        return {
            "loss": avg_loss,
            "task_loss": avg_task_loss,
            "distill_loss": avg_distill_loss,
            "accuracy": accuracy,
        }
