import logging
from typing import Callable
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader

from utils.toolkit import accuracy_cil, accuracy_dil, tensor2numpy, time_execution
from models.base import Base
from learners.strategies.multi_gpu_training import (
    create_multi_gpu_strategy,
    MultiGPUContext,
)
from typing import TypeVar, Generic, Any
from abc import abstractmethod

from functools import partial

T = TypeVar("T", bound=Base)

EPSILON = 1e-8
batch_size = 64


class BaseLearner(Generic[T]):
    _network: T
    _old_network: T

    def __init__(self, args, data_manager, model_func: Callable = Base):
        """Initialize BaseLearner with decoupled multi-GPU support"""

        """model predefine"""
        super().__init__()
        self._network = model_func(args)
        self.data_manager = data_manager
        self.forward_test = lambda model, inputs: model(inputs)
        self.forward_train = lambda model, inputs: model(inputs)
        self.ca_forward = lambda model, inputs: model(inputs, ca=True)
        self.GD = []

        # 设置增量学习setting
        self._set_up_CL(args)
        """training config
        """
        self.nb_tasks = data_manager.nb_tasks
        self.args = args
        # TODO if evaluate using NME
        self.evalNME = True
        self.interval = args["interval"]
        self.batch_size = args["batch_size"]
        self.init_lr = args["init_lr"]
        self.inc_lr = args["inc_lr"]
        self.min_lr = args["min_lr"] if args.get("min_lr", None) is not None else 1e-8
        self.weight_decay = (
            args["weight_decay"] if args["weight_decay"] is not None else 0.0005
        )
        self.init_cls = args["init_cls"]
        self.inc_cls = args["inc_cls"]
        self.task_sizes = []

        """Best model storage
        """
        self.best_model: dict = {}
        self.best_acc_cur = 0
        self.best_acc = []
        self.best_epoch = []

        """Original
        """
        self._cur_task = -1
        self._known_classes = 0
        self._total_classes = 0
        self._data_memory, self._targets_memory = np.array([]), np.array([])

        self._memory_size = args["memory_size"]
        self._memory_per_class = args.get("memory_per_class", None)
        self._fixed_memory = args.get("fixed_memory", False)
        self._device = args["device"][0]
        self._multiple_gpus = args["device"]

        # Initialize clean multi-GPU strategy
        self._multi_gpu_strategy = create_multi_gpu_strategy(self._multiple_gpus)
        self._multi_gpu_context = MultiGPUContext(self._multi_gpu_strategy)

    def _set_up_CL(self, args):
        self.cls_num = args.get("cls_num", 0)
        if self.cls_num == 0:
            self.accuracy = accuracy_cil
        else:
            self.accuracy = partial(accuracy_dil, cls_num=self.cls_num)

    def extract_token(self, inputs):
        # Use clean strategy-based method call
        return self._call_network_method("extract_token", inputs)

    @property
    def feature_dim(self) -> int:
        # Use clean strategy-based property access
        return int(self._get_network_property("feature_dim"))

    def save_checkpoint(self, filename):
        self._network.cpu()
        save_dict = {
            "tasks": self._cur_task,
            "model_state_dict": self._network.state_dict(),
        }
        torch.save(save_dict, "{}_{}.pkl".format(filename, self._cur_task))

    def _train(self, *args, **kwargs):
        pass

    def _compute_accuracy(self, model, loader):
        model.eval()
        correct, total = 0, 0
        for _, (_, inputs, targets) in enumerate(loader):
            inputs = inputs.to(self._device)
            with torch.no_grad():
                outputs = self.forward_test(model, inputs)["logits"]
            predicts = torch.max(outputs, dim=1)[1]
            correct += (predicts.cpu() == targets).sum()
            total += len(targets)

        return np.around(tensor2numpy(correct) * 100 / total, decimals=2)

    def _evaluate(self, y_pred, y_true):
        ret = {}
        grouped = self.accuracy(
            y_pred.T[0],
            y_true,
            self._known_classes,
            self.args["inc_cls"],
        )
        ret["grouped"] = grouped
        ret["top1"] = grouped["total"]
        ret["top5"] = np.around(
            (y_pred.T == np.tile(y_true, (self.topk, 1))).sum() * 100 / len(y_true),
            decimals=2,
        )
        # FIXME - self.topk is changing all the time, should be "top{}".format(self.topk)
        # ret["top{}".format(self.topk)] = np.around(
        #     (y_pred.T == np.tile(y_true, (self.topk, 1))).sum() * 100 / len(y_true),
        #     decimals=2,
        # )

        return ret

    @torch.no_grad()
    def _eval_cnn(self, loader):
        calc_task_acc = True if self.inc_cls > 0 else False

        # Initialize task accuracy variables unconditionally to avoid unbound variable errors
        task_correct, task_acc, total = 0, 0, 0

        self._network.eval()
        y_pred, y_true = [], []
        for _, (_, inputs, targets) in enumerate(loader):
            inputs = inputs.to(self._device)
            outputs = self.forward_test(self._network, inputs)["logits"]

            # Get top-k predictions
            predicts = torch.topk(
                outputs, k=self.topk, dim=1, largest=True, sorted=True
            )[
                1
            ]  # [bs, topk]
            y_pred.append(predicts.cpu().numpy())
            y_true.append(targets.cpu().numpy())

            # calculate the accuracy by using task_id
            if calc_task_acc:
                task_ids = (targets - self.init_cls) // self.inc_cls + 1
                task_logits = torch.zeros(outputs.shape).to(self._device)
                for i, task_id in enumerate(task_ids):
                    if task_id == 0:
                        start_cls = 0
                        end_cls = self.init_cls
                    else:
                        start_cls = self.init_cls + (task_id - 1) * self.inc_cls
                        end_cls = self.init_cls + task_id * self.inc_cls
                    task_logits[i, start_cls:end_cls] += outputs[i, start_cls:end_cls]

                # calculate the accuracy of task_id
                pred_task_ids = (
                    torch.max(outputs, dim=1)[1] - self.init_cls
                ) // self.inc_cls + 1
                task_correct += (pred_task_ids.cpu() == task_ids).sum()
                # Calculate per-task class accuracy
                pred_task_y = torch.max(task_logits, dim=1)[1]
                task_acc += (pred_task_y.cpu() == targets).sum()
                total += len(targets)

        # Log task-specific accuracy if calculated
        if calc_task_acc:
            logging.info(
                "Task correct: {}".format(tensor2numpy(task_correct) * 100 / total)
            )
            logging.info("Task acc: {}".format(tensor2numpy(task_acc) * 100 / total))

        return np.concatenate(y_pred), np.concatenate(y_true)  # [N, topk]

    @torch.no_grad()
    def _eval_nme(self, loader, class_means):
        self._network.eval()
        vectors, y_true = self._extract_tokens(loader)
        class_means = class_means.to(dtype=torch.float32, device=self._device)
        fc_inp = vectors
        out = F.linear(
            F.normalize(fc_inp, p=2, dim=1),
            F.normalize(class_means, p=2, dim=1),
        )
        scores = tensor2numpy(out)

        return (
            np.argsort(-scores, axis=1)[:, : self.topk],
            tensor2numpy(y_true, dtype=np.int32),  # type: ignore
        )  # [N, topk]

    def eval_task(
        self,
    ):  # -> tuple[dict[Any, Any], dict[Any, Any] | None]:
        y_pred, y_true = self._eval_cnn(self.test_loader)  # type: ignore
        cnn_accy = self._evaluate(y_pred, y_true)

        if hasattr(self, "_class_means") and self.evalNME:
            y_pred, y_true = self._eval_nme(self.test_loader, self._class_means)  # type: ignore
            nme_accy = self._evaluate(y_pred, y_true)
        else:
            nme_accy = None

        return cnn_accy, nme_accy

    def _extract_tokens(self, loader):
        self._network.eval()
        vectors, targets = [], []

        with torch.no_grad():
            for _, _inputs, _targets in loader:
                _inputs = _inputs.to(self._device)
                _targets = _targets.to(self._device)
                _vectors = self.extract_token(_inputs)

                vectors.append(_vectors)
                targets.append(_targets)

        return torch.cat(vectors, dim=0), torch.cat(targets, dim=0)

    @time_execution
    def _compute_class_mean(self, data_manager):
        logging.info(
            f"Compute cls means and covs for new classes: {self._known_classes} to {self._total_classes}"
        )

        if getattr(self, "_class_means", None) is not None:
            ori_classes = self._class_means.shape[0]
            assert ori_classes == self._known_classes
            new_class_means = torch.zeros(
                (self._total_classes, self.feature_dim), dtype=torch.float64
            )
            new_class_means[: self._known_classes] = self._class_means
            self._class_means = new_class_means

            new_class_cov = torch.zeros(
                (self._total_classes, self.feature_dim, self.feature_dim),
                dtype=torch.float64,
            )
            new_class_cov[: self._known_classes] = self._class_covs
            self._class_covs = new_class_cov
        else:
            self._class_means = torch.zeros(
                (self._total_classes, self.feature_dim), dtype=torch.float64
            )
            self._class_covs = torch.zeros(
                (self._total_classes, self.feature_dim, self.feature_dim),
                dtype=torch.float64,
            )

        # Initialize tracking variables for summary logging
        regularization_methods_used = {}
        problematic_classes = {
            "det": [],  # Classes with very small determinants
            "cond": [],  # Classes with high condition numbers
        }
        problematic_vals = {
            "det_values": {},  # Store actual determinant values
            "cond_values": {},  # Store actual condition number values
        }

        for class_idx in range(self._known_classes, self._total_classes):
            _, _, idx_dataset = data_manager.get_dataset(
                np.arange(class_idx, class_idx + 1),
                source="train",
                mode="test",
                ret_data=True,
            )
            idx_loader = DataLoader(
                idx_dataset,
                batch_size=128,
                shuffle=False,
                num_workers=4,
            )
            # [nb_samples, feature_dim]
            vectors, _ = self._extract_tokens(idx_loader)
            vectors = vectors.to("cpu", torch.float64)

            class_mean = torch.mean(vectors, dim=0)

            # Ensure covariance matrix is positive definite
            class_cov, method_used = self._compute_robust_covariance(
                vectors, class_mean
            )

            # Track regularization methods used
            if method_used not in regularization_methods_used:
                regularization_methods_used[method_used] = 0
            regularization_methods_used[method_used] += 1

            self._class_means[class_idx, :] = class_mean
            self._class_covs[class_idx, ...] = class_cov

            # Validate that matrices are positive definite and collect problematic info
            validation_result = self._validate_covariance_matrix(class_cov, class_idx)
            if validation_result["det_issue"]:
                problematic_classes["det"].append(class_idx)
                problematic_vals["det_values"][class_idx] = validation_result[
                    "det_value"
                ]
            if validation_result["cond_issue"]:
                problematic_classes["cond"].append(class_idx)
                problematic_vals["cond_values"][class_idx] = validation_result[
                    "cond_value"
                ]

            # Check for NaN values
            assert not torch.isnan(
                self._class_means
            ).any(), f"NaN detected in class means for class {class_idx}"
            assert not torch.isnan(
                self._class_covs
            ).any(), f"NaN detected in class covariance for class {class_idx}"

        # Summary logging
        total_new_classes = self._total_classes - self._known_classes
        logging.info(
            f"Covariance computation completed for {total_new_classes} classes:"
        )
        for method, count in regularization_methods_used.items():
            logging.info(
                f"  - {method}: {count} classes ({count/total_new_classes*100:.1f}%)"
            )

        # Consolidated problematic classes reporting
        if problematic_classes["det"] or problematic_classes["cond"]:
            logging.warning("Covariance matrix issues detected:")

            if problematic_classes["det"]:
                det_classes = problematic_classes["det"]
                det_summary = (
                    f"  - Small determinant classes ({len(det_classes)}): {det_classes}"
                )
                if len(det_classes) <= 5:  # Show details for few classes
                    det_details = [
                        f"class {cls}: {problematic_vals['det_values'][cls]:.2e}"
                        for cls in det_classes
                    ]
                    det_summary += f" (values: {', '.join(det_details)})"
                logging.warning(det_summary)

            if problematic_classes["cond"]:
                cond_classes = problematic_classes["cond"]
                cond_summary = f"  - High condition number classes ({len(cond_classes)}): {cond_classes}"
                if len(cond_classes) <= 5:  # Show details for few classes
                    cond_details = [
                        f"class {cls}: {problematic_vals['cond_values'][cls]:.2e}"
                        for cls in cond_classes
                    ]
                    cond_summary += f" (values: {', '.join(cond_details)})"
                logging.warning(cond_summary)

    def _compute_robust_covariance(
        self,
        vectors,
        mean_vec,
        regularization_methods=["tikhonov", "shrinkage", "eigenvalue_clipping"],
    ):
        """
        Compute a robust positive definite covariance matrix

        Args:
            vectors: Feature vectors [num_samples, feature_dim]
            mean_vec: Mean vector for the class
            regularization_methods: List of regularization methods to try

        Returns:
            tuple: (Positive definite covariance matrix, method_used)
        """
        num_samples, feature_dim = vectors.shape

        # Center the data
        centered_vectors = vectors - mean_vec.unsqueeze(0)

        # Basic covariance computation
        if num_samples > 1:
            cov_mat = torch.cov(vectors.T)
            # Ensure symmetry
            cov_mat = (cov_mat + cov_mat.T) / 2
        else:
            # Single sample case - use identity matrix with small scale
            cov_mat = torch.eye(feature_dim, dtype=torch.float64) * 1e-3
            logging.warning(
                "Only one sample available for covariance computation, using scaled identity matrix"
            )
            return cov_mat, "single_sample_identity"

        # Check if matrix is already positive definite
        if self._is_positive_definite(cov_mat):
            return cov_mat, "no_regularization"

        # Apply regularization methods
        for method in regularization_methods:
            if method == "tikhonov":
                # Tikhonov regularization (ridge regression style)
                regularized_cov = self._apply_tikhonov_regularization(
                    cov_mat, feature_dim
                )
            elif method == "shrinkage":
                # Ledoit-Wolf shrinkage
                regularized_cov = self._apply_shrinkage_regularization(
                    cov_mat, centered_vectors
                )
            elif method == "eigenvalue_clipping":
                # Eigenvalue clipping
                regularized_cov = self._apply_eigenvalue_clipping(cov_mat)
            else:
                continue

            if self._is_positive_definite(regularized_cov):
                # Only log once at the end with summary instead of per-class
                return regularized_cov, method

        # Fallback: use scaled identity matrix
        logging.warning(
            "All regularization methods failed, using scaled identity matrix"
        )
        return (
            torch.eye(feature_dim, dtype=torch.float64)
            * torch.trace(cov_mat)
            / feature_dim,
            "fallback_identity",
        )

    def _is_positive_definite(self, matrix, tolerance=1e-8):
        """Check if a matrix is positive definite"""
        try:
            # Try Cholesky decomposition
            torch.linalg.cholesky(matrix)
            return True
        except RuntimeError:
            # Check eigenvalues as backup
            eigenvals = torch.linalg.eigvals(matrix).real
            return torch.all(eigenvals > tolerance)

    def _apply_tikhonov_regularization(
        self, cov_mat, feature_dim, min_reg=1e-6, max_reg=1e-2
    ):
        """Apply Tikhonov regularization with adaptive regularization parameter"""
        # Start with a small regularization parameter and increase if needed
        reg_params = [1e-6, 1e-5, 1e-4, 1e-3, 1e-2]

        for reg_param in reg_params:
            regularized_cov = (
                cov_mat + torch.eye(feature_dim, dtype=torch.float64) * reg_param
            )
            if self._is_positive_definite(regularized_cov):
                return regularized_cov

        # If all fail, use maximum regularization
        return cov_mat + torch.eye(feature_dim, dtype=torch.float64) * max_reg

    def _apply_shrinkage_regularization(
        self, cov_mat, centered_vectors, target_type="identity"
    ):
        """Apply shrinkage regularization (Ledoit-Wolf style)"""
        num_samples, feature_dim = centered_vectors.shape

        if target_type == "identity":
            # Shrink towards identity matrix
            target = (
                torch.eye(feature_dim, dtype=torch.float64)
                * torch.trace(cov_mat)
                / feature_dim
            )
        elif target_type == "diagonal":
            # Shrink towards diagonal matrix
            target = torch.diag(torch.diag(cov_mat))
        else:
            target = torch.eye(feature_dim, dtype=torch.float64)

        # Estimate optimal shrinkage parameter
        if num_samples > feature_dim:
            # Use analytical estimate when possible
            shrinkage = min(1.0, (feature_dim + 1) / (num_samples - 1))
        else:
            # Use higher shrinkage for small sample sizes
            shrinkage = 0.5

        shrunk_cov = (1 - shrinkage) * cov_mat + shrinkage * target
        return shrunk_cov

    def _apply_eigenvalue_clipping(self, cov_mat, min_eigenval=1e-6):
        """Apply eigenvalue clipping to ensure positive definiteness"""
        try:
            # Eigenvalue decomposition
            eigenvals, eigenvecs = torch.linalg.eigh(cov_mat)

            # Clip negative eigenvalues
            eigenvals = torch.clamp(eigenvals, min=min_eigenval)

            # Reconstruct matrix
            regularized_cov = eigenvecs @ torch.diag(eigenvals) @ eigenvecs.T
            return regularized_cov
        except RuntimeError as e:
            logging.warning(f"Eigenvalue clipping failed: {e}")
            return cov_mat

    def _validate_covariance_matrix(self, cov_matrix, class_idx):
        """Validate covariance matrix properties with reduced logging"""
        validation_result = {
            "det_issue": False,
            "cond_issue": False,
            "det_value": 0.0,
            "cond_value": 0.0,
        }

        # Check positive definiteness
        if not self._is_positive_definite(cov_matrix):
            logging.error(
                f"Covariance matrix for class {class_idx} is not positive definite"
            )
            raise ValueError(
                f"Failed to create positive definite covariance matrix for class {class_idx}"
            )

        # Check determinant and store the value
        det = torch.linalg.det(cov_matrix)
        validation_result["det_value"] = float(det)
        if det <= 1e-12:  # Very small determinant indicates numerical issues
            validation_result["det_issue"] = True

        # Check for reasonable condition number
        try:
            cond_num = torch.linalg.cond(cov_matrix)
            validation_result["cond_value"] = float(cond_num)
            if cond_num > 1e12:
                validation_result["cond_issue"] = True
        except RuntimeError:
            logging.warning(f"Could not compute condition number for class {class_idx}")
            validation_result["cond_value"] = float("inf")

        # Only log detailed info for first and last class for debugging
        if class_idx == self._known_classes or class_idx == self._total_classes - 1:
            logging.debug(
                f"Covariance matrix for class {class_idx}: det={det:.2e}, cond={validation_result['cond_value']:.2e}"
            )

        return validation_result

    def get_cls_range(self, task_id):
        if task_id == 0:
            start_cls = 0
            end_cls = self.init_cls
        else:
            start_cls = self.init_cls + (task_id - 1) * self.inc_cls
            end_cls = start_cls + self.inc_cls

        return start_cls, end_cls

    @property
    def topk(self):
        """Get the top-k value for evaluation
        Returns:
            int: Top-k value, default is 5 or the number of classes if less
        """
        topk = min(5, self._total_classes)
        return topk

    # LifecycleManager

    @abstractmethod
    def before_task(self):
        pass

    @abstractmethod
    def before_train(self):
        pass

    @abstractmethod
    def after_train(self):
        pass

    @abstractmethod
    def after_task(self):
        pass

    @abstractmethod
    def incremental_train(self) -> Any:
        """
        Execute the complete incremental training pipeline for current task

        Returns:
            Dict containing training results and metrics
        """
        pass

    # === Multi-GPU Strategy Methods (Clean Implementation) ===

    def _setup_multi_gpu_training(self):
        """Setup network for multi-GPU training using clean strategy"""
        self._network = self._multi_gpu_context.setup_training(self._network)  # type: ignore

    def _teardown_multi_gpu_training(self):
        """Teardown multi-GPU training using clean strategy"""
        self._network = self._multi_gpu_context.teardown_training(self._network)  # type: ignore

    def _call_network_method(self, method_name: str, *args, **kwargs):
        """Call network method through strategy (handles DataParallel transparently)"""
        return self._multi_gpu_strategy.execute_method(
            self._network, method_name, *args, **kwargs
        )

    def _get_network_property(self, property_name: str, default=None):
        """Get network property through strategy (handles DataParallel transparently)"""
        return self._multi_gpu_strategy.get_property(
            self._network, property_name, default
        )

    def _set_network_property(self, property_name: str, value):
        """Set network property through strategy (handles DataParallel transparently)"""
        self._multi_gpu_strategy.set_property(self._network, property_name, value)

    def _is_multi_gpu_active(self) -> bool:
        """Check if multi-GPU training is active"""
        return self._multi_gpu_strategy.is_multi_gpu_active()
