"""
Classifier alignment event handler for continual learning.

This module implements the classifier alignment functionality as an event handler,
allowing classifier fine-tuning to be triggered automatically at specific points
in the training pipeline. The implementation exactly follows the original
_train_clf_alignment method from multi_base_ca.py.

Key Features:
- Exact replication of original classifier alignment logic
- Event-driven architecture for modular integration
- Support for GD-based (Gaussian distribution) sampling
- Early stopping with best model preservation
- Compatible with multi_base_ca learner interface

Example:
    >>> handler_config = {
    ...     'enabled': True,
    ...     'epochs': 50,
    ...     'learning_rate': 0.01,
    ...     'verbose': False
    ... }
    >>> class MockConfig:
    ...     def __init__(self, **kwargs):
    ...         for k, v in kwargs.items():
    ...             setattr(self, k, v)
    ...     def validate(self): pass
    >>> handler = ClassifierAlignmentHandler(MockConfig(**handler_config))
    >>> result = handler(context)  # Called by event manager
"""

import torch
import torch.nn.functional as F
import logging
import copy
import numpy as np
from torch import optim
from typing import Dict, Any, Optional

from learners.interfaces.event import EventContext
from learners.events.handlers.base import BaseEventHandler
from learners.events.config import HandlerConfig


class ClassifierAlignmentHandler(BaseEventHandler):
    """
    Event handler for classifier alignment training that exactly replicates
    the original _train_clf_alignment method from multi_base_ca.py.

    This handler is designed to work specifically with learners that have
    GD (Gaussian distributions) for sampling synthetic data during alignment.
    """

    def __init__(self, config: HandlerConfig, *args, **kwargs):
        """
        Initialize the classifier alignment handler.

        Args:
            config: Configuration object with attributes:
                   - enabled: bool, whether alignment is enabled
                   - epochs: int, number of alignment epochs
                   - learning_rate: float, learning rate for alignment
                   - verbose: bool, whether to enable verbose logging
        """
        super().__init__(
            config=config, *args, name="ClassifierAlignmentHandler", **kwargs
        )

        self.ca_epochs = self.config["ca_epochs"]
        self.ca_lr = self.config["ca_lr"]
        self.ca_wd = self.config["ca_wd"]
        self.verbose = self.config["verbose"]
        # TODO Control the classifier behaviour
        self.single_net = self.config["single_net"]

        self._best_model_state: Optional[Dict[str, Any]] = None
        self._best_epoch: Optional[int] = None
        self._training_results: Dict[str, Any] = {}

    def _handle_event(self, context: EventContext) -> None:
        """
        Handle the after_task_training event by performing classifier alignment.

        This method implements the required abstract method from BaseEventHandler.

        Args:
            context: Event context containing learner instance and training state
        """
        if not self._enabled:
            logging.info("Classifier alignment is disabled")
            return

        if self.ca_epochs <= 0:
            logging.info("Classifier alignment epochs <= 0, skipping")
            return

        logging.info("=" * 60)
        logging.info("Starting Classifier Alignment Finetuning")
        logging.info("=" * 60)

        try:
            results = self._train_classifier_alignment(context.learner)
            self._training_results = results
        except Exception as e:
            logging.error(f"Error during classifier alignment: {e}")

    def _train_classifier_alignment(self, learner: Any) -> Dict[str, Any]:
        """
        Execute classifier alignment training exactly following the original
        _train_clf_alignment implementation from multi_base_ca.py.
        """
        # Validate learner has required attributes
        if not self._validate_learner_interface(learner):
            raise ValueError("Learner does not implement required interface")

        # Setup parameters exactly like original
        for p in learner._network.fc.parameters():
            p.requires_grad = True

        param_list = [p for p in learner._network.fc.parameters() if p.requires_grad]
        network_params = [
            {
                "params": param_list,
                "lr": self.ca_lr,
                "weight_decay": learner.weight_decay,
            }
        ]

        # Print trainable parameters for debugging (like original)
        if self.verbose:
            for name, param in learner._network.fc.named_parameters():
                if param.requires_grad:
                    logging.info(f"Trainable parameter: {name}")

        optimizer = optim.SGD(
            network_params,
            lr=self.ca_lr,
            momentum=0.9,
            weight_decay=learner.weight_decay,
        )
        scheduler = optim.lr_scheduler.CosineAnnealingLR(
            optimizer=optimizer, T_max=self.ca_epochs
        )

        ca_forward = learner.ca_forward

        # Setup network exactly like original
        learner._network.eval()
        learner._network.to(learner._device)
        learner._setup_multi_gpu_training()

        # Training loop following original exactly
        results = self._training_loop_exact(learner, optimizer, scheduler, ca_forward)

        # Apply early stopping like original
        self._apply_early_stopping(learner)

        return results

    def _validate_learner_interface(self, learner: Any) -> bool:
        """Validate that learner implements required interface for classifier alignment"""
        required_attrs = [
            "_network",
            "_device",
            "_total_classes",
            "GD",
            "test_loader",
            "best_acc_cur",
            "best_acc",
            "best_epoch",
            "_cur_task",
            "args",
        ]
        required_methods = ["_setup_multi_gpu_training", "_compute_accuracy"]

        for attr in required_attrs:
            if not hasattr(learner, attr):
                logging.error(f"Learner missing required attribute: {attr}")
                return False

        for method in required_methods:
            if not hasattr(learner, method) or not callable(getattr(learner, method)):
                logging.error(f"Learner missing required method: {method}")
                return False

        # Check network has fc layer
        if not hasattr(learner._network, "fc"):
            logging.error("Network missing fc (classifier) layer")
            return False

        # Check GD is available and properly populated
        if not learner.GD or len(learner.GD) < learner._total_classes:
            logging.error("GD (Gaussian distributions) not properly initialized")
            return False

        # Check args has required scale parameter
        if not hasattr(learner.args, "__getitem__") or "scale" not in learner.args:
            logging.error("Learner args missing 'scale' parameter")
            return False

        return True

    def _training_loop_exact(
        self,
        learner: Any,
        optimizer: optim.Optimizer,
        scheduler: Any,
        ca_forward,
    ) -> Dict[str, Any]:
        """Training loop that exactly follows the original _train_clf_alignment implementation"""
        losses_history = []
        accuracies_history = []
        num_sampled_pcls = 256

        for ep in range(self.ca_epochs):
            losses = 0.0

            # Sample data once per epoch (exactly like original)
            sampled_data = []
            sampled_label = []

            for c_id in range(learner._total_classes):
                m = learner.GD[c_id]
                sampled_data_single = m.sample(
                    sample_shape=torch.Size((num_sampled_pcls,))
                )
                sampled_data.append(sampled_data_single)
                sampled_label.extend([c_id] * num_sampled_pcls)

            sampled_data = torch.cat(sampled_data).float().to(learner._device)
            sampled_label = torch.tensor(sampled_label).long().to(learner._device)

            inputs = sampled_data
            targets = sampled_label

            # Shuffle data (exactly like original)
            sf_indexes = torch.randperm(inputs.size(0))
            inputs = inputs[sf_indexes]
            targets = targets[sf_indexes]

            # Process in batches for each class (exactly like original)
            for _iter in range(learner._total_classes):
                inp = inputs[_iter * num_sampled_pcls : (_iter + 1) * num_sampled_pcls]
                tgt = targets[_iter * num_sampled_pcls : (_iter + 1) * num_sampled_pcls]

                # Forward pass with exact same logic as original
                inp = inp[:, None] if not self.single_net else inp
                outputs = ca_forward(learner._network, inp)
                logits = outputs["logits"] * learner.args["scale"]
                loss = F.cross_entropy(logits[:, : learner._total_classes], tgt)

                # Backward pass (exactly like original)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                losses += loss.item()

            # Update scheduler after epoch (exactly like original)
            scheduler.step()

            # Compute test accuracy (exactly like original)
            test_acc = learner._compute_accuracy(learner._network, learner.test_loader)

            # Logging (exactly like original)
            info = "CA Task {} => Loss {:.3f}, Test_accy {:.3f}".format(
                learner._cur_task, losses / learner._total_classes, test_acc
            )
            logging.info(info)

            # Save best model (exactly like original logic)
            if test_acc >= learner.best_acc_cur:
                learner.best_acc_cur = test_acc
                learner.best_acc[learner._cur_task] = learner.best_acc_cur
                learner.best_epoch[learner._cur_task] = ep
                learner.best_model = copy.deepcopy(learner._network.state_dict())

                # Also save in handler
                self._best_model_state = copy.deepcopy(learner._network.state_dict())
                self._best_epoch = ep

            losses_history.append(losses / learner._total_classes)
            accuracies_history.append(test_acc)

        # Final report (exactly like original)
        report_str = (
            f"Task {learner._cur_task} => Best accuracy: {learner.best_acc_cur:2f}[{learner.best_epoch[learner._cur_task]}],"
            + f" Average accuracy: {np.mean(learner.best_acc):2f}"
        )
        logging.info(report_str)

        return {
            "best_accuracy": learner.best_acc_cur,
            "final_accuracy": accuracies_history[-1] if accuracies_history else 0.0,
            "losses": losses_history,
            "accuracies": accuracies_history,
            "best_epoch": learner.best_epoch[learner._cur_task],
        }

    def _apply_early_stopping(self, learner: Any) -> None:
        """Apply early stopping by restoring best model (following original logic exactly)"""
        # Use learner's args for early_stop check (exactly like original)
        if learner.args["early_stop"]:
            learner._network.load_state_dict(learner.best_model)
            logging.info(
                f"Early stopping: restored best model from epoch {learner.best_epoch[learner._cur_task]}"
            )

    @property
    def best_model_state(self) -> Optional[Dict[str, Any]]:
        """Get the best model state"""
        return self._best_model_state

    @property
    def training_results(self) -> Dict[str, Any]:
        """Get the training results"""
        return self._training_results

    @property
    def best_epoch(self) -> Optional[int]:
        """Get the best epoch"""
        return self._best_epoch
