"""
Post-training functionality for continual learning.

This module implements post-training functionality as an event handler,
enabling automatic fine-tuning at specific points in the training pipeline.

The post-training handler is used for:
- Full network fine-tuning after main training
- Additional optimization with reduced learning rate
- Differential learning rates for backbone and FC layers

Key Features:
- Event-driven architecture for modular integration
- Full network fine-tuning with customizable learning rates
- Support for different learning rates for backbone and FC layers
- Early stopping with best model preservation
- Compatible with various learner interfaces

Example:
    >>> # Advanced usage with different learning rates for backbone and FC
    >>> handler_config = HandlerConfig.create(
    ...     type="PostTrainingHandler",
    ...     enabled=True,
    ...     config={
    ...         'epochs': 30,
    ...         'learning_rate': 0.001,  # Default LR
    ...         'bcb_lr': 0.0001,   # Lower LR for backbone
    ...         'fc_lr': 0.005,          # Higher LR for FC layers
    ...         'verbose': True
    ...     }
    ... )
    >>> handler = PostTrainingHandler(handler_config)
"""

import torch
import torch.nn.functional as F
import torch.optim as optim
import logging
import copy
import numpy as np
import traceback
from typing import Dict, Any, Optional, List, Union

from learners.interfaces.event import EventContext, EventType
from learners.events.handlers.base import BaseEventHandler
from learners.events.config import HandlerConfig

from learners.components.training_manager import TrainingManager


class PostTrainingHandler(BaseEventHandler):
    """
    Event handler for post-training fine-tuning in continual learning.

    This handler implements full network fine-tuning after the main training phase.
    Parameters can be fine-tuned with different learning rates for backbone and FC layers
    to enable more flexible optimization strategies.
    """

    def __init__(self, config: HandlerConfig, *args, **kwargs):
        """
        Initialize post-training handler with configuration.

        Args:
            config: Configuration object with attributes:
                   - enabled: bool, whether post-training is enabled
                   - epochs: int, number of post-training epochs
                   - learning_rate: float, default learning rate for post-training
                   - bcb_lr: float, optional learning rate for backbone (if None, use learning_rate)
                   - fc_lr: float, optional learning rate for FC layers (if None, use learning_rate)
                   - weight_decay: float, weight decay for optimizer
                   - verbose: bool, whether to enable verbose logging
                   - early_stop: bool, whether to use early stopping
                   - scheduler_type: str, type of learning rate scheduler
        """
        super().__init__(config, *args, name="PostTrainingHandler", **kwargs)

        self.training_manager = TrainingManager(self.config)
        # Extract configuration parameters
        self.pt_epochs = self.config.get("pt_epochs", 5)
        self.pt_lr = self.config.get("pt_lr", 0.001)
        # Support for different learning rates for backbone and FC
        self.bcb_lr = self.config.get("bcb_lr", None)  # If None, use pt_lr
        self.fc_lr = self.config.get("fc_lr", None)  # If None, use pt_lr
        self.pt_wd = self.config.get("pt_wd", 1e-4)
        self.verbose = self.config.get("verbose", True)
        self.early_stop = self.config.get("early_stop", False)
        self.scheduler_type = self.config.get("scheduler_type", "cosine")

        # Internal state
        self._best_model_state: Optional[Dict[str, Any]] = None
        self._best_accuracy: float = 0.0
        self._best_epoch: Optional[int] = None
        self._training_results: Dict[str, Any] = {}

    def _handle_event(self, context: EventContext) -> None:
        """
        Handle the post-training event by performing fine-tuning.

        Args:
            context: Event context containing learner instance and training state
        """
        if not self._enabled:
            logging.info("Post-training is disabled")
            return

        if self.pt_epochs <= 0:
            logging.info("Post-training epochs <= 0, skipping")
            return

        if not self._validate_learner_interface(context.learner):
            logging.error("Learner does not meet requirements for post-training")
            return

        # set training strategy
        self.strategy = self.training_manager.get_strategy_for_task(
            context.learner._cur_task
        )
        logging.info("=" * 60)
        logging.info(f"Starting Post-Training Fine-tuning [{self.strategy.get_name()}]")
        logging.info("=" * 60)

        try:
            results = self._execute_post_training(context.learner)
            self._training_results = results

            if self.verbose:
                self._log_training_results(results)

        except Exception as e:
            logging.error(f"Error during post-training: {e}")
            if self.verbose:
                logging.error(traceback.format_exc())

    def _execute_post_training(self, learner: Any) -> Dict[str, Any]:
        """
        Execute the post-training procedure based on the selected strategy.

        Args:
            learner: The learner instance

        Returns:
            Dictionary containing training results and metrics
        """
        self._best_accuracy = learner.acc_cur
        initial_accuracy = learner.acc_cur
        if self.verbose:
            # Store initial state
            initial_accuracy = self._compute_initial_accuracy(learner)
            self._best_accuracy = initial_accuracy
            logging.info(
                f"Initial accuracy before post-training: {initial_accuracy:.2f}%"
            )

        # Setup optimizer and scheduler based on strategy
        optimizer, scheduler = self._setup_optimization(learner)

        # Execute training loop
        current_strategy = self.training_manager.get_strategy_for_task(
            learner._cur_task
        )
        if self.verbose:
            logging.info(f"Using training strategy: {current_strategy.get_name()}")
        training_history = self._training_loop(
            learner, optimizer, scheduler, current_strategy
        )

        # Apply early stopping if enabled
        if self.early_stop and self._best_model_state is not None:
            learner._network.load_state_dict(self._best_model_state)
            logging.info(
                f"Applied early stopping: restored model from epoch {self._best_epoch}"
            )

        # Compile results
        results = {
            "strategy": "full_tuning",
            "initial_accuracy": initial_accuracy,
            "final_accuracy": (
                training_history["accuracies"][-1]
                if training_history["accuracies"]
                else initial_accuracy
            ),
            "best_accuracy": self._best_accuracy,
            "best_epoch": self._best_epoch,
            "total_epochs": len(training_history["accuracies"]),
            "losses": training_history["losses"],
            "accuracies": training_history["accuracies"],
            "learning_rates": training_history["learning_rates"],
        }

        return results

    def _setup_optimization(self, learner: Any) -> tuple:
        """
        Setup optimizer and scheduler for full network fine-tuning.
        Supports different learning rates for backbone and FC layers.

        Args:
            learner: The learner instance

        Returns:
            Tuple of (optimizer, scheduler)
        """
        # Prepare parameter groups with different learning rates
        bcb_params = []
        fc_params = []

        # Identify backbone and FC parameters
        for name, param in learner._network.named_parameters():
            if param.requires_grad:
                # Check if this is a classifier/FC parameter
                if any(
                    keyword in name.lower() for keyword in ["fc", "head", "classifier"]
                ):
                    fc_params.append(param)
                else:
                    bcb_params.append(param)

        # Setup parameter groups
        param_groups = []

        # Determine learning rates
        bcb_lr = self.bcb_lr if self.bcb_lr is not None else self.pt_lr
        fc_lr = self.fc_lr if self.fc_lr is not None else self.pt_lr

        if bcb_params:
            param_groups.append(
                {"params": bcb_params, "lr": bcb_lr, "weight_decay": self.pt_wd}
            )

        if fc_params:
            param_groups.append(
                {"params": fc_params, "lr": fc_lr, "weight_decay": self.pt_wd}
            )

        if not param_groups:
            raise ValueError("No parameters found for optimization")

        # Create optimizer with parameter groups
        optimizer = optim.AdamW(param_groups)

        # Create scheduler
        if self.scheduler_type == "cosine":
            scheduler = optim.lr_scheduler.CosineAnnealingLR(
                optimizer, T_max=self.pt_epochs
            )
        elif self.scheduler_type == "step":
            scheduler = optim.lr_scheduler.StepLR(
                optimizer, step_size=self.pt_epochs // 3, gamma=0.1
            )
        elif self.scheduler_type == "none":
            scheduler = None
        else:
            scheduler = optim.lr_scheduler.CosineAnnealingLR(
                optimizer, T_max=self.pt_epochs
            )

        if self.verbose:
            bcb_param_count = sum(p.numel() for p in bcb_params)
            fc_param_count = sum(p.numel() for p in fc_params)
            total_params = bcb_param_count + fc_param_count

            logging.info(f"Optimizing {total_params:,} parameters:")
            logging.info(f"  - Backbone: {bcb_param_count:,} params, LR: {bcb_lr:.6f}")
            logging.info(f"  - FC/Head: {fc_param_count:,} params, LR: {fc_lr:.6f}")

        return optimizer, scheduler

    def _training_loop(
        self, learner: Any, optimizer: optim.Optimizer, scheduler: Any, strategy
    ) -> Dict[str, List]:
        """
        Execute the post-training loop.

        Args:
            learner: The learner instance
            optimizer: Optimizer for parameter updates
            scheduler: Learning rate scheduler

        Returns:
            Dictionary containing training history
        """
        losses_history = []
        accuracies_history = []
        lr_history = []

        criterion = torch.nn.CrossEntropyLoss()

        for epoch in range(self.pt_epochs):
            learner._network.train()
            epoch_loss = 0.0
            num_batches = 0

            # Training on available data
            train_loader = learner.train_loader
            # Training epoch
            for batch_idx, batch in enumerate(train_loader):
                out, _ = self.strategy.process_batch(
                    batch,
                    batch_idx,
                    criterion,
                    learner._network,
                    learner._device,
                    learner._known_classes,
                    learner.forward_train,
                )
                loss = out["loss"]
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                epoch_loss += loss.item()
                num_batches += 1

            # Update scheduler after warmup
            if scheduler is not None:
                scheduler.step()

            # Compute metrics
            avg_loss = epoch_loss / max(num_batches, 1)
            # Get learning rates for all parameter groups
            lrs = [param_group["lr"] for param_group in optimizer.param_groups]
            current_lr = lrs[0]  # Use first group's LR for compatibility

            # Evaluate
            test_accuracy = learner._compute_accuracy(
                learner._network, learner.test_loader
            )
            # Update best model
            if test_accuracy >= self._best_accuracy:
                self._best_accuracy = test_accuracy
                self._best_epoch = epoch
                self._best_model_state = copy.deepcopy(learner._network.state_dict())

            # Record history
            losses_history.append(avg_loss)
            accuracies_history.append(test_accuracy)
            lr_history.append(current_lr)

            # Logging
            if self.verbose and (
                epoch % max(1, self.pt_epochs // 10) == 0 or epoch == self.pt_epochs - 1
            ):
                # Format learning rates display
                if len(lrs) > 1:
                    lr_str = f"LR(B/FC): {lrs[0]:.6f}/{lrs[1]:.6f}"
                else:
                    lr_str = f"LR: {lrs[0]:.6f}"

                logging.info(
                    f"Post-Train Epoch {epoch + 1}/{self.pt_epochs} | "
                    f"Loss: {avg_loss:.4f} | "
                    f"Accuracy: {test_accuracy:.2f}% | "
                    f"{lr_str} | "
                    f"Best: {self._best_accuracy:.2f}%[{self._best_epoch}]"
                )

        return {
            "losses": losses_history,
            "accuracies": accuracies_history,
            "learning_rates": lr_history,
        }

    def _compute_initial_accuracy(self, learner: Any) -> float:
        """
        Compute initial accuracy before post-training.

        Args:
            learner: The learner instance

        Returns:
            Initial accuracy as percentage
        """
        learner._network.eval()
        with torch.no_grad():
            accuracy = learner._compute_accuracy(learner._network, learner.test_loader)
        return accuracy

    def _log_training_results(self, results: Dict[str, Any]) -> None:
        """
        Log comprehensive training results.

        Args:
            results: Training results dictionary
        """
        logging.info("=" * 60)
        logging.info("Post-Training Results Summary")
        logging.info("=" * 60)
        logging.info(f"Strategy: {results['strategy']}")
        logging.info(f"Initial Accuracy: {results['initial_accuracy']:.2f}%")
        logging.info(f"Final Accuracy: {results['final_accuracy']:.2f}%")
        logging.info(
            f"Best Accuracy: {results['best_accuracy']:.2f}% (Epoch {results['best_epoch']})"
        )
        logging.info(
            f"Improvement: {results['best_accuracy'] - results['initial_accuracy']:+.2f}%"
        )
        logging.info(f"Total Epochs: {results['total_epochs']}")

        if len(results["accuracies"]) > 1:
            accuracy_trend = (
                "↗" if results["accuracies"][-1] > results["accuracies"][0] else "↘"
            )
            logging.info(f"Accuracy Trend: {accuracy_trend}")

        logging.info("=" * 60)

    def _validate_learner_interface(self, learner: Any) -> bool:
        """
        Validate that learner implements required interface for post-training.

        Args:
            learner: The learner instance to validate

        Returns:
            True if learner is compatible, False otherwise
        """
        required_attrs = [
            "_network",
            "_device",
            "_cur_task",
            "args",
            "train_loader",
            "test_loader",
        ]

        # Check basic required attributes
        for attr in required_attrs:
            if not hasattr(learner, attr):
                logging.error(f"Learner missing required attribute: {attr}")
                return False

        # Check for methods needed for accuracy computation
        if not hasattr(learner, "_compute_accuracy") or not callable(
            getattr(learner, "_compute_accuracy")
        ):
            logging.error("Learner missing _compute_accuracy method")
            return False

        return True

    # Properties for accessing training state
    @property
    def best_model_state(self) -> Optional[Dict[str, Any]]:
        """Get the best model state from post-training."""
        return self._best_model_state

    @property
    def training_results(self) -> Dict[str, Any]:
        """Get the complete training results."""
        return self._training_results

    @property
    def best_accuracy(self) -> float:
        """Get the best accuracy achieved during post-training."""
        return self._best_accuracy

    @property
    def best_epoch(self) -> Optional[int]:
        """Get the epoch where best accuracy was achieved."""
        return self._best_epoch
