from __future__ import annotations

import copy
import logging
import os
from typing import Callable, TypeVar, Any
import numpy as np
import torch
from torch import optim
from torch.distributions.multivariate_normal import MultivariateNormal
from torch.optim.adam import Adam
from torch.optim.adamw import AdamW
from torch.optim.sgd import SGD
from torch.utils.data import DataLoader

from learners.base import BaseLearner
from learners.components.training_manager import TrainingManager
from learners.events.mixins import EventCapableMixin
from models import BaseSingle, BaseMulti, Base
from utils.loss import AngularPenaltySMLoss, LossFactory
from utils.toolkit import AccMeter, LossMeter, NamespaceDict, TrainMP, time_execution
from learners.interfaces.event import EventType
from learners.events.decorators import emit_events

# 事件系统集成说明：
# 1. 每个关键生命周期方法都使用 @emit_events 装饰器来触发对应的事件
# 2. 事件类型遵循以下命名约定：
#    - ON_XXX_START: 方法执行开始前触发
#    - ON_XXX_END: 方法执行完成后触发
# 3. 每个装饰器可以通过 extract_args 参数提取上下文信息传递给事件处理器
# 4. 事件处理可以在配置中定义，无需修改此处代码

num_workers = 8

# 定义类型变量，限制为BaseSingle的子类
T = TypeVar("T", bound=BaseSingle | BaseMulti | Base)


class Learner(BaseLearner[T], EventCapableMixin):
    _network: T
    _old_network: T

    def __init__(self, args, data_manager, model_func: Callable[..., T] = BaseSingle):
        super().__init__(args, data_manager, model_func)
        # Initialize training manager for strategy pattern
        self.training_manager = TrainingManager(args)
        # Configure unified event system
        self.configure_events(args["event_config"])

    """
    Incremental Training
    """

    def get_optimizer(self, lr):
        if self.args["optimizer"] == "sgd":
            optimizer = SGD(
                filter(lambda p: p.requires_grad, self._network.parameters()),
                momentum=0.9,
                lr=lr,
                weight_decay=self.weight_decay,
            )
        elif self.args["optimizer"] == "adam":
            optimizer = Adam(
                filter(lambda p: p.requires_grad, self._network.parameters()),
                lr=lr,
                weight_decay=self.weight_decay,
            )
        elif self.args["optimizer"] == "adamw":
            optimizer = AdamW(
                filter(lambda p: p.requires_grad, self._network.parameters()),
                lr=lr,
                weight_decay=self.weight_decay,
            )
        else:
            # 默认使用 AdamW 优化器
            optimizer = AdamW(
                filter(lambda p: p.requires_grad, self._network.parameters()),
                lr=lr,
                weight_decay=self.weight_decay,
            )

        return optimizer

    def get_scheduler(self, optimizer, epoch):
        if self.args["scheduler"] == "cosine":
            scheduler = optim.lr_scheduler.CosineAnnealingLR(
                optimizer=optimizer, T_max=epoch, eta_min=self.min_lr
            )
        elif self.args["scheduler"] == "steplr":
            scheduler = optim.lr_scheduler.MultiStepLR(
                optimizer=optimizer,
                milestones=self.args["milestones"],
                gamma=self.args["lr_decay"],
            )
        elif self.args["scheduler"] == "linearwarmup_cosine":
            scheduler1 = optim.lr_scheduler.LinearLR(
                optimizer,
                start_factor=0.01,
                total_iters=self.args.warmup_epochs,
            )
            scheduler2 = optim.lr_scheduler.CosineAnnealingLR(
                optimizer,
                T_max=epoch - self.args.warmup_epochs,
                eta_min=self.min_lr,
            )
            scheduler = optim.lr_scheduler.ChainedScheduler(
                schedulers=[scheduler1, scheduler2],
            )
            # scheduler = optim.lr_scheduler.SequentialLR(
            #     optimizer,
            #     [scheduler1, scheduler2],
            #     milestones=[self.args.warmup_epochs],
            # )
        elif self.args["scheduler"] == "constant":
            scheduler = None
        else:
            # 默认不使用调度器
            scheduler = None

        return scheduler

    def get_loss_function(self):
        """Get loss function based on configuration"""
        # Check if loss_type is provided in args and issue warning if not
        if "loss_type" not in self.args:
            logging.warning(
                "No 'loss_type' specified in configuration arguments. "
                "Using default loss type 'cosface'. "
                "Consider adding 'loss_type' to your configuration for explicit control."
            )

        loss_type = self.args.get("loss_type", "cosface")

        # Get loss parameters from args
        loss_kwargs = {
            "scale": self.args.get("scale", 20),
            "margin": self.args.get("margin", 0.0),
            "eps": self.args.get("eps", 1e-7),
            "angular_type": self.args.get("angular_type", "cosface"),
            "alpha": self.args.get("focal_alpha", 1),
            "gamma": self.args.get("focal_gamma", 2),
            "num_classes": self.task_size,
            "smoothing": self.args.get("label_smoothing", 0.1),
            "temperature": self.args.get("distill_temperature", 4.0),
        }

        try:
            loss_fn = LossFactory.create_loss(loss_type, **loss_kwargs)
            return loss_fn
        except ValueError as e:
            print(f"Warning: {e}")
            print("Falling back to default AngularPenaltySMLoss with cosface")
            return AngularPenaltySMLoss(
                loss_type="cosface",
                eps=1e-7,
                s=self.args.get("scale", 20),
                m=self.args.get("margin", 0.0),
            )

    @time_execution
    @emit_events(
        before=EventType.ON_FIT_START,
        after=EventType.ON_FIT_END,
    )
    def incremental_train(self):
        self.before_task()

        self.before_train()

        self._train(self.train_loader, self.test_loader)

        self.after_train()
        cnn_accy, nme_accy = self.eval_task()
        self.after_task()

        return cnn_accy, nme_accy

    @emit_events(
        before=EventType.ON_BEFORE_NEW_TASK,
        after=EventType.ON_TASK_START,
    )
    def before_task(self):
        self._cur_task += 1
        self.task_size = self.data_manager.get_task_size(self._cur_task)
        self.task_sizes.append(self.task_size)
        self._total_classes = self._known_classes + self.task_size
        logging.info(
            "Learning on {}-{}".format(
                str(self._known_classes).rjust(2, "0"),
                str(self._total_classes - 1).rjust(2, "0"),
            )
        )
        self._network.before_task()

    @emit_events(
        before=EventType.ON_BEFORE_TRAIN_START,
        after=EventType.ON_BEFORE_TRAIN_END,
    )
    def before_train(self):
        self.best_acc_cur = 0.0
        self.best_model = {}
        self.best_acc.append(self.best_acc_cur)
        self.best_epoch.append(0)

        self.train_loader, self.test_loader = self._prepare_dataloaders()
        self._network.to(self._device)

    def _prepare_dataloaders(self):
        train_dataset = self.data_manager.get_dataset(
            np.arange(self._known_classes, self._total_classes),
            source="train",
            mode="train",
        )
        train_loader = DataLoader(
            train_dataset,
            batch_size=self.batch_size,
            shuffle=True,
            num_workers=num_workers,
        )

        test_dataset = self.data_manager.get_dataset(
            np.arange(0, self._total_classes), source="test", mode="test"
        )
        test_loader = DataLoader(
            test_dataset,
            batch_size=self.batch_size,
            shuffle=False,
            num_workers=num_workers,
        )
        return train_loader, test_loader

    """Training
    """

    @emit_events(
        before=EventType.ON_TRAIN_START,
        after=EventType.ON_TRAIN_END,
    )
    def _train(self, train_loader, test_loader):
        self._log_model_details()
        self._initialize_meters()
        self._train_task(train_loader, test_loader)

    def _log_model_details(self):
        n_parameters = 0
        for _, param in self._network.named_parameters():
            if param.requires_grad:
                n_parameters += param.numel()
        print(f"number of trainable params: {n_parameters}")

        if self.args["verbose"]:
            logging.info(
                "ALERT: verbose mode is on, which may damage the trainig accuracy\n"
                + "ONLY ENABLE WHEN NEEDED"
            )
            from ptflops import get_model_complexity_info

            macs, params = get_model_complexity_info(
                self._network,
                (3, 224, 224),
                as_strings=True,
                print_per_layer_stat=True,
                verbose=True,
            )
            print(f"MACs: {macs,params}")

            intt = torch.randn(1, 3, 224, 224)
            from fvcore.nn import FlopCountAnalysis, flop_count_table

            flops = FlopCountAnalysis(self._network, intt.to(self._device))
            flops.total()
            print(flop_count_table(flops))
        """======================================================================
        """

    def _initialize_meters(self):
        self.acc_meter = AccMeter(
            names=["train_acc"],
            fmt=".2f",
            tail="\t",
        )
        self.loss_meter = LossMeter(
            names=["cross_entropy"],
            fmt=".3f",
            tail="\t",
        )
        self.meter_processor = TrainMP(
            name="Train",
            epochs=self.epoch,
            meters=[self.acc_meter, self.loss_meter],
            prefix="",
        )

        self.loss_meter.add(self.loss_family)

    @emit_events(
        before=EventType.ON_BEFORE_TASK_TRAINING,
        after=EventType.ON_AFTER_TASK_TRAINING,
    )
    def _train_task(self, train_loader, test_loader):
        epochs = self.epoch
        lr = self.lr
        optimizer: SGD | Adam | AdamW = self.get_optimizer(lr=lr)
        scheduler = self.get_scheduler(optimizer, epochs)

        # Clean multi-GPU setup
        self._setup_multi_gpu_training()

        # Use the new loss function factory
        criterion = self.get_loss_function()

        # Get current training strategy
        current_strategy = self.training_manager.get_strategy_for_task(self._cur_task)
        logging.info(f"Using training strategy: {current_strategy.get_name()}")

        self._train_func(
            epochs,
            train_loader,
            test_loader,
            optimizer=optimizer,
            scheduler=scheduler,
            criterion=criterion,
            strategy=current_strategy,
        )

    @property
    def _train_batch(self):
        """_init_train_batch for initial trianing
        and _inc_train_batch for incremental training"""
        return self._init_train_batch if self._cur_task == 0 else self._inc_train_batch

    # 使用策略模式重构batch处理
    def _init_train_batch(
        self, batch, batch_idx, criterion, strategy=None
    ) -> tuple[dict[str, Any], dict[str, Any]]:
        """Delegate batch processing to strategy"""
        if strategy is None:
            # Fallback to standard processing for backward compatibility
            return self._standard_train_batch(batch, batch_idx, criterion)

        return strategy.process_batch(
            batch,
            batch_idx,
            criterion,
            self._network,
            self._device,
            self._known_classes,
            self.forward_train,
        )

    def _standard_train_batch(self, batch, batch_idx, criterion):
        del batch_idx
        """Handles a single batch of training."""
        _, inputs, targets = batch
        inputs, targets = inputs.to(self._device), targets.to(self._device)
        aux_targets = targets.clone()
        aux_targets = torch.where(
            aux_targets - self._known_classes >= 0,
            aux_targets - self._known_classes,
            -1,
        )
        output = self.forward_train(self._network, inputs)
        logits = output["logits"][:, self._known_classes :]
        loss = criterion(logits, aux_targets)

        _, preds = torch.max(logits, dim=1)
        correct = preds.eq(aux_targets.expand_as(preds)).cpu().sum().item()
        total = len(aux_targets)

        out = {
            "loss": loss,
            "logits": output["logits"],
        }
        o_metrics = {
            "acc_metric": {"train_acc": {"correct": correct, "total": total}},
            "loss_metric": {"cross_entropy": loss.item()},
        }

        return out, o_metrics

    def _inc_train_batch(self, batch, batch_idx, criterion, strategy=None):
        """Default incremental training batch implementation.
        By default, this behaves the same as _init_train_batch,
        but subclasses can override this to provide specific incremental training behavior.
        """
        return self._init_train_batch(batch, batch_idx, criterion, strategy)

    @property
    def _train_epoch(self):
        """_init_train_batch for initial trianing
        and _inc_train_batch for incremental training"""
        return self._init_train_epoch if self._cur_task == 0 else self._inc_train_epoch

    def _init_train_epoch(
        self, train_loader, criterion, optimizer, scheduler, strategy=None
    ):
        """Handles training for a single epoch with strategy support."""
        self._network.train()

        for batch_idx, batch in enumerate(train_loader):
            out, o_metrics = self._train_batch(batch, batch_idx, criterion, strategy)
            loss = out["loss"]
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            self.acc_meter.update(o_metrics["acc_metric"])
            self.loss_meter.update(o_metrics["loss_metric"])

        if scheduler:
            scheduler.step()

    def _inc_train_epoch(
        self, train_loader, criterion, optimizer, scheduler, strategy=None
    ):
        """Default incremental training epoch implementation.
        By default, this behaves the same as _init_train_epoch,
        but subclasses can override this to provide specific incremental training behavior.
        """
        return self._init_train_epoch(
            train_loader, criterion, optimizer, scheduler, strategy
        )

    @property
    def _train_func(self):
        """_init_train_batch for initial trianing
        and _inc_train_batch for incremental training"""
        return self._init_train_func if self._cur_task == 0 else self._inc_train_func

    @time_execution
    @emit_events(
        before=EventType.ON_TRAIN_FUNC_START,
        after=EventType.ON_TRAIN_FUNC_END,
    )
    def _init_train_func(
        self,
        epochs,
        train_loader,
        test_loader,
        optimizer,
        scheduler,
        criterion,
        strategy=None,
    ):
        test_acc = 0.0
        self.meter_processor.reset_timer()
        for epoch in range(epochs):
            self._train_epoch(train_loader, criterion, optimizer, scheduler, strategy)
            lr = [group["lr"] for group in optimizer.param_groups]
            self.meter_processor.display(epoch + 1, lr=lr)
            # Handle periodic test accuracy computation
            if ((epoch + 1) % self.interval == 0) or epoch == (epochs - 1):
                test_acc = self._compute_accuracy(self._network, test_loader)
            # Log progress
            info = (
                f"Task {self._cur_task}, Epoch {epoch + 1}/{epochs}"
                + f"=> Loss {self.loss_meter.avg['cross_entropy']:.3f}, "
                + f"TrAcc {self.acc_meter.avg['train_acc']:.2f}, TeAcc {test_acc:.2f}"
            )
            logging.info(info)
            # Update best model if necessary
            self.acc_cur = test_acc
            if test_acc >= self.best_acc_cur:
                self.best_acc_cur = test_acc
                self.best_acc[self._cur_task] = self.best_acc_cur
                self.best_epoch[self._cur_task] = epoch
                self.best_model = copy.deepcopy(self._network.state_dict())
        # Final logging
        report_str = (
            f"Task {self._cur_task} => "
            + f"Best accuracy: {self.best_acc_cur:.2f}[{self.best_epoch[self._cur_task]}], "
            + f"Average accuracy: {np.mean(self.best_acc):.2f}"
        )
        logging.info(report_str)
        # Early stopping behavior
        if self.args["early_stop"]:
            self._network.load_state_dict(self.best_model)

    @time_execution
    @emit_events(
        before=EventType.ON_TRAIN_FUNC_START,
        after=EventType.ON_TRAIN_FUNC_END,
    )
    def _inc_train_func(
        self,
        epochs,
        train_loader,
        test_loader,
        optimizer,
        scheduler,
        criterion,
        strategy=None,
    ):
        """Default incremental training function implementation.
        By default, this behaves the same as _init_train_func,
        but subclasses can override this to provide specific incremental training behavior.
        """
        return self._init_train_func(
            epochs,
            train_loader,
            test_loader,
            optimizer,
            scheduler,
            criterion,
            strategy,
        )

    """After training
    """

    @emit_events(
        before=EventType.ON_AFTER_TRAIN_START,
        after=EventType.ON_AFTER_TRAIN_END,
    )
    def after_train(self):
        # Clean multi-GPU teardown
        self._teardown_multi_gpu_training()
        self._compute_class_mean(self.data_manager)
        self._compute_class_distribution()
        self._network.after_train()

    """After task learning
    """

    @time_execution
    @emit_events(
        before=EventType.ON_AFTER_TASK_START,
        after=EventType.ON_TASK_END,
    )
    def after_task(self):
        # Clean multi-GPU teardown
        self._teardown_multi_gpu_training()
        self._save_ckps()
        self._backup()
        self._known_classes = self._total_classes
        self._network.after_task()

    def _save_ckps(self):
        dir_path = self.args["ckp_path"]
        if dir_path is not None:
            if not os.path.exists(dir_path):
                os.makedirs(dir_path)

            start_cls, end_cls = self.get_cls_range(self._cur_task)
            ckp_path = "{}/{}_{}_{}[{:.2f}].pth".format(
                dir_path,
                self.args["dataset"],
                start_cls,
                end_cls,
                self.best_acc_cur,
            )
            network_copy = copy.deepcopy(self._network)
            network_copy.to("cpu")
            torch.save(network_copy, ckp_path)

    def _backup(self):
        self._old_network = copy.deepcopy(self._network)
        self._old_network.requires_grad_(False)
        self._old_network.eval()

    """General
    """

    @property
    def epoch(self):
        return (
            self.args["init_epochs"] if self._cur_task == 0 else self.args["inc_epochs"]
        )

    @property
    def lr(self):
        return self.args["init_lr"] if self._cur_task == 0 else self.args["inc_lr"]

    @property
    def loss_family(self):
        return ["cross_entropy"]

    @property
    def inc_states(self):
        states = {
            "cur_task": self._cur_task,
            "task_size": self.task_size,
            "task_sizes": self.task_sizes,
            "known_classes": self._known_classes,
            "total_classes": self._total_classes,
        }
        states = NamespaceDict(states)
        return states

    def set_states(self, states):
        # notify state change details
        logging.info(
            "Setting states:"
            + f"\n\tcur_task({self._cur_task}) => {states.cur_task},"
            + f"\n\ttask_size({self.task_size}) => {states.task_size},"
            + f"\n\ttask_sizes({self.task_sizes}) => {states.task_sizes},"
            + f"\n\tknown_classes({self._known_classes}) => {states.known_classes},"
            + f"\n\ttotal_classes({self._total_classes}) => {states.total_classes}"
        )

        self._cur_task = states.cur_task
        self.task_size = states.task_size
        self.task_sizes = states.task_sizes
        self._known_classes = states.known_classes
        self._total_classes = states.total_classes

    def _compute_class_distribution(self, update=False):
        """Compute class distribution"""
        logging.info(
            f"Computing class distribution for class {self._known_classes} - {self._total_classes}"
        )
        if not update:
            for class_idx in range(self._known_classes, self._total_classes):
                class_mean = self._class_means[class_idx].clone().cpu()
                class_cov = self._class_covs[class_idx].clone().cpu()

                cov_matrix = (class_cov + class_cov.T) / 2
                try:
                    L = torch.linalg.cholesky(cov_matrix)
                except Exception:
                    # 如果分解失败，说明矩阵不是正定的，先加 epsilon
                    print(f"Cholesky decomposition failed for class {class_idx}")
                    epsilon = 1e-4
                    cov_matrix_pd = cov_matrix + epsilon * torch.eye(
                        class_mean.size(-1)
                    )
                    L = torch.linalg.cholesky(cov_matrix_pd)

                class_cov = L @ L.T
                self.GD.append(MultivariateNormal(class_mean, class_cov))
        else:
            self.GD = []
            for class_idx in range(self._total_classes):
                class_mean = self._class_means[class_idx].clone().cpu()
                class_cov = self._class_covs[class_idx].clone().cpu()

                cov_matrix = (class_cov + class_cov.T) / 2
                try:
                    L = torch.linalg.cholesky(cov_matrix)
                except Exception:
                    # 如果分解失败，说明矩阵不是正定的，先加 epsilon
                    print(f"Cholesky decomposition failed for class {class_idx}")
                    epsilon = 1e-4
                    cov_matrix_pd = cov_matrix + epsilon * torch.eye(
                        class_mean.size(-1)
                    )
                    L = torch.linalg.cholesky(cov_matrix_pd)

                class_cov = L @ L.T
                self.GD.append(MultivariateNormal(class_mean, class_cov))
        # evaluate the Centered Kernel Alignment Similarity between class distribution and the network output
        pass
