"""
Mahalanobis Distance OOD检测器
"""

import torch
import numpy as np
from sklearn.covariance import LedoitWolf, EmpiricalCovariance
from tqdm import tqdm


class MahalanobisDetector:
    """Mahalanobis Distance检测器"""

    def __init__(
        self, num_classes, feat_dim, use_relative=True, normalize_features=True
    ):
        self.num_classes = num_classes
        self.feat_dim = feat_dim  # 1024 for ViT-L, 768 for ViT-B
        self.use_relative = use_relative
        self.normalize_features = normalize_features

        # 类别相关参数
        self.class_means = None
        self.class_precision = None

        # 全局参数（用于Relative Mahalanobis）
        self.global_mean = None
        self.global_precision = None

        self.fitted = False

    def fit(self, model, dataloader, device):
        """计算类别均值和精度矩阵"""
        model.eval()

        # 收集每个类别的特征
        class_features = {i: [] for i in range(self.num_classes)}
        all_features = []

        with torch.no_grad():
            for data, target in tqdm(dataloader, desc="Collecting features"):
                data = data.to(device)

                # 提取特征
                features = model(data, return_features=True)
                features = features.cpu().numpy()

                # 按类别收集
                for i in range(len(target)):
                    if target[i] >= 0:  # 只收集已知类
                        class_features[target[i].item()].append(features[i])
                        all_features.append(features[i])

        # 计算类别均值
        self.class_means = np.zeros((self.num_classes, self.feat_dim))
        class_counts = np.zeros(self.num_classes)

        for class_idx in range(self.num_classes):
            if class_features[class_idx]:
                features = np.array(class_features[class_idx])
                self.class_means[class_idx] = np.mean(features, axis=0)
                class_counts[class_idx] = len(features)

        # 计算类别协方差矩阵（共享）
        all_centered_features = []

        for class_idx in range(self.num_classes):
            if class_features[class_idx]:
                features = np.array(class_features[class_idx])
                # 中心化
                centered = features - self.class_means[class_idx]
                all_centered_features.extend(centered)

        all_centered_features = np.array(all_centered_features)

        # 使用更稳定的协方差估计
        print("Computing class-conditional covariance matrix...")

        # 增加正则化以提高稳定性
        regularization = 0.1  # 增加正则化强度

        try:
            # 首先尝试Ledoit-Wolf估计
            lw = LedoitWolf(assume_centered=True)
            class_covariance = lw.fit(all_centered_features).covariance_
            print(f"Using Ledoit-Wolf shrinkage: {lw.shrinkage_:.4f}")
            # 额外添加正则化
            class_covariance += (
                regularization
                * np.trace(class_covariance)
                / self.feat_dim
                * np.eye(self.feat_dim)
            )
        except np.linalg.LinAlgError:
            # 如果失败，使用经验协方差+更强的正则化
            print("Ledoit-Wolf failed, using regularized empirical covariance")
            emp_cov = EmpiricalCovariance(assume_centered=True)
            class_covariance = emp_cov.fit(all_centered_features).covariance_
            # 添加更强的正则化项
            class_covariance += regularization * np.eye(self.feat_dim)

        # 计算精度矩阵
        try:
            self.class_precision = np.linalg.inv(class_covariance)
        except np.linalg.LinAlgError:
            print("Using pseudo-inverse for class precision matrix")
            self.class_precision = np.linalg.pinv(class_covariance)

        # 如果使用Relative Mahalanobis，计算全局参数
        if self.use_relative:
            print("Computing global statistics for Relative Mahalanobis...")
            all_features = np.array(all_features)

            # 全局均值
            self.global_mean = np.mean(all_features, axis=0)

            # 全局协方差
            centered_features = all_features - self.global_mean

            try:
                lw_global = LedoitWolf(assume_centered=True)
                global_covariance = lw_global.fit(centered_features).covariance_
                print(f"Global Ledoit-Wolf shrinkage: {lw_global.shrinkage_:.4f}")
                # 额外添加正则化
                global_covariance += (
                    regularization
                    * np.trace(global_covariance)
                    / self.feat_dim
                    * np.eye(self.feat_dim)
                )
            except Exception as e:
                print(
                    "Global Ledoit-Wolf failed, using regularized empirical covariance"
                )
                print(f"Error: {e}")
                emp_cov_global = EmpiricalCovariance(assume_centered=True)
                global_covariance = emp_cov_global.fit(centered_features).covariance_
                global_covariance += regularization * np.eye(self.feat_dim)

            # 全局精度矩阵
            try:
                self.global_precision = np.linalg.inv(global_covariance)
            except np.linalg.LinAlgError as e:
                print("Using pseudo-inverse for global precision matrix")
                print(f"Error: {e}")
                self.global_precision = np.linalg.pinv(global_covariance)

        self.fitted = True
        print("Mahalanobis detector fitted successfully")

        # 打印一些统计信息
        print(f"Total features collected: {len(all_features)}")
        print(f"Feature dimension: {self.feat_dim}")
        print(
            f"Class covariance condition number: {np.linalg.cond(class_covariance):.2e}"
        )
        if self.use_relative:
            print(
                f"Global covariance condition number: {np.linalg.cond(global_covariance):.2e}"
            )

    def compute_distance(self, features, class_idx, precision_matrix, mean_vector):
        """计算Mahalanobis距离"""
        diff = features - mean_vector
        # 使用更稳定的计算方式
        dist_squared = np.sum((diff @ precision_matrix) * diff, axis=1)
        # 避免负数（数值误差）
        dist_squared = np.maximum(dist_squared, 0)
        return np.sqrt(dist_squared)

    def get_ood_scores(self, model, dataloader, device):
        """计算OOD分数"""
        if not self.fitted:
            raise RuntimeError("Detector not fitted. Call fit() first.")

        model.eval()

        all_scores = []
        all_labels = []

        with torch.no_grad():
            for data, target in tqdm(
                dataloader, desc="Computing Mahalanobis distances"
            ):
                data = data.to(device)

                # 提取特征
                features = model(data, return_features=True)
                features = features.cpu().numpy()

                batch_size = len(features)

                batch_labels = [1 if t >= 0 and t < self.num_classes else 0 for t in target]

                if self.use_relative:
                    # Relative Mahalanobis Distance
                    # 1. 计算到每个类别中心的距离
                    class_distances = np.zeros((batch_size, self.num_classes))
                    for class_idx in range(self.num_classes):
                        class_distances[:, class_idx] = self.compute_distance(
                            features,
                            class_idx,
                            self.class_precision,
                            self.class_means[class_idx],
                        )

                    # 2. 找到最近的类别
                    min_class_distances = np.min(class_distances, axis=1)

                    # 3. 计算到全局中心的距离
                    global_distances = self.compute_distance(
                        features, 0, self.global_precision, self.global_mean
                    )

                    # 4. Relative Mahalanobis score = 最近类别距离 - 全局距离
                    scores = -(min_class_distances - global_distances)
                else:
                    # 标准Mahalanobis Distance
                    distances = np.zeros((batch_size, self.num_classes))
                    for class_idx in range(self.num_classes):
                        distances[:, class_idx] = self.compute_distance(
                            features,
                            class_idx,
                            self.class_precision,
                            self.class_means[class_idx],
                        )

                    # 取最小距离作为分数（负值）
                    min_distances = np.min(distances, axis=1)
                    scores = -min_distances

                all_scores.extend(scores)
                # 标签：1表示已知类（in-distribution），0表示未知类（OOD）
                all_labels.extend(batch_labels)

        return np.array(all_scores), np.array(all_labels)
