import logging
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch import optim
from typing import Optional, TypeVar

from learners.registry.decorators import register_classifier_alignment
from learners.interfaces import ClassifierAlignmentInterface
from learners.components.hook_manager import TrainHookEvents


# Type variables for better type hinting
T = TypeVar("T", bound=nn.Module)  # Type variable for network models


@register_classifier_alignment("standard")
class ClassifierAlignment(ClassifierAlignmentInterface):
    """
    Module for classifier alignment in incremental learning.

    This module implements classifier alignment techniques to improve
    performance when new classes are added to a model.

    Class Attributes:
        DEFAULT_EPOCHS: Default number of epochs for alignment
        DEFAULT_LR: Default learning rate for alignment
        DEFAULT_WEIGHT_DECAY: Default weight decay for optimizer
        DEFAULT_SAMPLES_PER_CLASS: Default number of synthetic samples per class
        DEFAULT_MOMENTUM: Default momentum for optimizer

    Attributes:
        epochs (int): Number of epochs for alignment
        lr (float): Learning rate for alignment
        weight_decay (float): Weight decay for optimizer
        samples_per_class (int): Number of synthetic samples generated per class
        momentum (float): Momentum parameter for optimizer
    """

    # Class-level defaults for better readability and maintainability
    DEFAULT_EPOCHS: int = 20
    DEFAULT_LR: float = 0.01
    DEFAULT_WEIGHT_DECAY: float = 0.0005
    DEFAULT_SAMPLES_PER_CLASS: int = 256
    DEFAULT_MOMENTUM: float = 0.9

    def __init__(
        self,
        epochs: Optional[int] = None,
        lr: Optional[float] = None,
        weight_decay: Optional[float] = None,
        samples_per_class: Optional[int] = None,
        momentum: Optional[float] = None,
    ):
        """
        Initialize the classifier alignment module with sensible defaults.

        Args:
            epochs: Number of epochs for alignment (defaults to DEFAULT_EPOCHS)
            lr: Learning rate for alignment (defaults to DEFAULT_LR)
            weight_decay: Weight decay for optimizer (defaults to DEFAULT_WEIGHT_DECAY)
            samples_per_class: Number of synthetic samples per class (defaults to DEFAULT_SAMPLES_PER_CLASS)
            momentum: Momentum for SGD optimizer (defaults to DEFAULT_MOMENTUM)
        """
        # Use class defaults if not specified
        self.epochs = epochs if epochs is not None else self.DEFAULT_EPOCHS
        self.lr = lr if lr is not None else self.DEFAULT_LR
        self.weight_decay = (
            weight_decay if weight_decay is not None else self.DEFAULT_WEIGHT_DECAY
        )
        self.samples_per_class = (
            samples_per_class
            if samples_per_class is not None
            else self.DEFAULT_SAMPLES_PER_CLASS
        )
        self.momentum = momentum if momentum is not None else self.DEFAULT_MOMENTUM

    def register_hooks(self, learner) -> None:
        """
        Register hooks with the learner's hook manager.

        Args:
            learner: The learner to register hooks with
        """
        if hasattr(learner, "hook_manager"):
            # Use the hook_manager if available (modern approach)
            learner.hook_manager.register(
                TrainHookEvents.AFTER_TRAIN, self._after_train_hook
            )
        else:
            # Fall back to direct registration if needed
            if hasattr(learner, "register_hook"):
                learner.register_hook("after_train", self._after_train_hook)
            else:
                logging.warning(
                    "Learner doesn't have hook_manager or register_hook method. "
                    "Classifier alignment will not be automatically applied."
                )

    def _after_train_hook(self, learner, **kwargs) -> None:
        """
        Hook called after training to perform classifier alignment.

        Args:
            learner: The learner being trained
            **kwargs: Additional arguments
        """
        # Skip if this is the first task
        if learner._cur_task == 0:
            return

        # Log alignment start
        logging.info("\n" + "=" * 50)
        logging.info("Classifier Alignment Finetuning")
        logging.info("=" * 50 + "\n")

        # Perform alignment
        self._train_clf_alignment(learner)

    def _train_clf_alignment(self, learner) -> None:
        """
        Perform classifier alignment training.

        This method handles the entire process of:
        1. Preparing optimizer and parameters to train
        2. Generating synthetic samples from class distributions
        3. Training the classifier on these samples
        4. Tracking performance and saving the best model

        Args:
            learner: The learner being trained
        """
        # Configure trainable parameters (only classifier)
        optimizer, scheduler = self._configure_training(learner)

        # Setup network for training
        self._setup_network(learner)

        # Initialize tracking variables
        best_acc_cur = learner.best_acc_cur
        best_model = None

        # Training loop
        for ep in range(self.epochs):
            # Train for one epoch
            losses = self._train_epoch(learner, optimizer)

            # Step the scheduler
            if scheduler:
                scheduler.step()

            # Evaluate and log progress
            test_acc = self._evaluate_model(learner)
            self._log_progress(learner, ep, losses, test_acc)

            # Track best model
            if test_acc >= best_acc_cur:
                best_acc_cur = test_acc
                learner.best_acc[learner._cur_task] = best_acc_cur
                learner.best_epoch[learner._cur_task] = ep
                best_model = copy.deepcopy(learner._network.state_dict())

        # Log final results
        self._log_final_results(learner, best_acc_cur)

        # Apply early stopping if enabled
        self._apply_early_stopping(learner, best_model, best_acc_cur)

    def _configure_training(self, learner):
        """Configure training parameters, optimizer and scheduler."""
        # Enable gradients for classifier parameters only
        for p in learner._network.fc.parameters():
            p.requires_grad = True

        # Get trainable parameters
        param_list = [p for p in learner._network.fc.parameters() if p.requires_grad]
        network_params = [
            {
                "params": param_list,
                "lr": self.lr,
                "weight_decay": self.weight_decay,
            }
        ]

        # Create optimizer
        optimizer = optim.SGD(
            network_params,
            lr=self.lr,
            momentum=self.momentum,
            weight_decay=self.weight_decay,
        )

        # Create scheduler
        scheduler = optim.lr_scheduler.CosineAnnealingLR(
            optimizer=optimizer, T_max=self.epochs
        )

        return optimizer, scheduler

    def _setup_network(self, learner):
        """Prepare the network for alignment training."""
        learner._network.eval()  # Set to eval mode for feature extraction
        learner._network.to(learner._device)

        # Use DataParallel if multiple GPUs
        if len(learner._multiple_gpus) > 1:
            learner._network = nn.DataParallel(learner._network, learner._multiple_gpus)

    def _train_epoch(self, learner, optimizer):
        """Train for one epoch using synthetic data."""
        losses = 0.0

        # Generate synthetic data from distributions
        inputs, targets = self._generate_synthetic_data(learner)

        # Process each batch of synthetic data
        for class_idx in range(learner._total_classes):
            start_idx = class_idx * self.samples_per_class
            end_idx = (class_idx + 1) * self.samples_per_class

            # Get batch for this class
            inp = inputs[start_idx:end_idx]
            tgt = targets[start_idx:end_idx]

            # Forward pass with appropriate input format
            outputs = self._forward_model(learner, inp)

            # Apply scaling if needed
            logits = outputs["logits"]
            if hasattr(learner, "args") and "scale" in learner.args:
                logits = logits * learner.args["scale"]

            # Compute loss and update weights
            loss = F.cross_entropy(logits[:, : learner._total_classes], tgt)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            losses += loss.item()

        return losses

    def _generate_synthetic_data(self, learner):
        """Generate synthetic data from class distributions."""
        sampled_data = []
        sampled_label = []

        # Generate samples for each class
        for class_idx in range(learner._total_classes):
            # Skip if distribution not available
            if not hasattr(learner, "GD") or len(learner.GD) <= class_idx:
                logging.warning(
                    f"No distribution for class {class_idx}, using fallback"
                )
                # Create fallback distribution with class mean and identity covariance
                class_mean = (
                    learner._class_means[class_idx].clone().cpu().to(torch.float32)
                )
                fallback_cov = torch.eye(class_mean.shape[-1]) * 0.1
                dist = torch.distributions.MultivariateNormal(class_mean, fallback_cov)
            else:
                dist = learner.GD[class_idx]

            # Sample from distribution
            samples = dist.sample(sample_shape=torch.Size((self.samples_per_class,)))
            sampled_data.append(samples)
            sampled_label.extend([class_idx] * self.samples_per_class)

        # Combine and shuffle data
        combined_data = torch.cat(sampled_data).float().to(learner._device)
        combined_labels = torch.tensor(sampled_label).long().to(learner._device)

        # Random shuffle
        shuffle_idx = torch.randperm(combined_data.size(0))

        return combined_data[shuffle_idx], combined_labels[shuffle_idx]

    def _forward_model(self, learner, inputs):
        """Forward pass through the model with appropriate interface."""
        if hasattr(learner, "ca_forward"):
            if (
                hasattr(learner.ca_forward, "__code__")
                and "inputs_format" in learner.ca_forward.__code__.co_varnames
            ):
                return learner.ca_forward(
                    learner._network, inputs, inputs_format="features"
                )
            else:
                # For multi-head models that expect a specific input format
                return learner.ca_forward(learner._network, inputs[:, None])
        else:
            return learner._network(inputs, ca=True)

    def _evaluate_model(self, learner):
        """Evaluate the model on test data."""
        return learner._compute_accuracy_domain(learner._network, learner.test_loader)

    def _log_progress(self, learner, epoch, losses, test_acc):
        """Log training progress."""
        info = (
            f"CA Task {learner._cur_task} => "
            f"Epoch {epoch+1}/{self.epochs}, "
            f"Loss {losses/learner._total_classes:.3f}, "
            f"Test_accy {test_acc:.3f}"
        )
        logging.info(info)

    def _log_final_results(self, learner, best_acc_cur):
        """Log final results after alignment."""
        report_str = (
            f"Task {learner._cur_task} => "
            f"Best accuracy: {best_acc_cur:.2f} [Epoch {learner.best_epoch[learner._cur_task]}], "
            f"Average accuracy: {np.mean(learner.best_acc):.2f}"
        )
        logging.info(report_str)

    def _apply_early_stopping(self, learner, best_model, best_acc_cur):
        """Apply early stopping if enabled."""
        if (
            hasattr(learner, "args")
            and learner.args.get("early_stop", False)
            and best_model is not None
        ):
            learner._network.load_state_dict(best_model)
            learner.best_model = best_model
            learner.best_acc_cur = best_acc_cur
