# server.py
# 封装中心服务器的所有行为。

import torch
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
import numpy as np
from label_shift_estimator import MLLSEstimator
from utils import get_logits_and_labels
from sklearn.metrics import (
    precision_recall_fscore_support, balanced_accuracy_score, cohen_kappa_score, 
    log_loss, brier_score_loss, matthews_corrcoef, confusion_matrix
)

class Server:
    def __init__(self, global_model, server_unlabeled_loader, test_loader, config):
        self.global_model = global_model.to(config['device'])
        self.unlabeled_loader = server_unlabeled_loader
        self.test_loader = test_loader
        self.config = config
        self.device = config['device']
        
        # 服务器维护的全局状态
        self.fused_calibrator = None
        self.effective_source_priors = None
        self.estimated_target_priors = None
        self.client_weighted_priors = None  # 存储客户端先验加权平均（用于Scheme 1）

    def aggregate_classifier_params(self, client_updates):
        """
        聚合客户端的分类头参数。
        使用加权平均，权重基于客户端的样本数量。
        """
        print("Server: Aggregating classifier parameters...")
        
        # 计算总样本数
        total_samples = sum(update['num_samples'] for update in client_updates)
        
        # 获取参数结构（从第一个客户端）
        aggregated_params = {}
        first_classifier_params = client_updates[0]['classifier_params']
        
        # 初始化聚合参数
        for param_name, param_tensor in first_classifier_params.items():
            aggregated_params[param_name] = torch.zeros_like(param_tensor, device=self.device)
        
        # 加权平均
        for update in client_updates:
            weight = update['num_samples'] / total_samples
            classifier_params = update['classifier_params']
            
            for param_name, param_tensor in classifier_params.items():
                aggregated_params[param_name] += weight * param_tensor.to(self.device)
        
        print(f"Server: Aggregated classifier from {len(client_updates)} clients")
        return aggregated_params


    def execute_scheme(self, client_updates, round_num=1):
        """
        根据配置的方案执行相应的服务器端逻辑。
        参数:
            client_updates: 客户端上传的更新信息
            round_num: 通信轮次（方案2需要）
        返回:
            dict: 服务器处理结果，可能包含需要分发给客户端的信息
        """
        scheme = self.config['scheme']
        
        if scheme == 'scheme1':
            return self._execute_scheme1(client_updates)
        elif scheme == 'scheme2':
            if round_num == 1:
                return self._execute_scheme2_round1(client_updates)
            else:
                return self._execute_scheme2_round2(client_updates)
        elif scheme == 'scheme3':
            return self._execute_scheme3(client_updates)
        else:
            raise ValueError(f"Unknown scheme: {scheme}")
    
    def _execute_scheme1(self, client_updates):
        """
        方案1：传统FedAvg + EM适应
        - 聚合分类头参数
        - 计算客户端先验加权平均（作为有效源域先验）
        - 运行EM算法进行标签偏移适应
        """
        print("\n--- Server: Executing Scheme 1 (Traditional FedAvg + EM Adaptation) ---")
        
        # 聚合分类头参数
        aggregated_classifier_params = self.aggregate_classifier_params(client_updates)
        
        # 加载聚合后的分类头
        self.global_model.classifier.load_state_dict(aggregated_classifier_params)
        
        # 计算客户端先验加权平均（用于后续分布质量评估和EM算法）
        self._compute_client_weighted_priors(client_updates)
        
        # 运行EM算法进行标签偏移适应
        self._run_em_for_scheme1()
        
        print("Server: Global classifier updated with aggregated parameters")
        
        return {"status": "completed"}
    
    def _compute_client_weighted_priors(self, client_updates):
        """
        计算客户端先验的加权平均，用于Scheme 1的分布估计
        """
        # 计算每个客户端数据的标签分布（简单计数方法）
        total_samples = sum(update['num_samples'] for update in client_updates)
        self.client_weighted_priors = torch.zeros(self.config['num_classes'], device=self.device)
        
        print("Computing client weighted priors for Scheme 1...")
        
        # 遍历每个客户端的数据来计算真实的标签分布
        from data_handler_federated import prepare_federated_data
        
        # 重新准备数据以获取标签分布
        client_dataloaders, _, _, _ = prepare_federated_data(self.config)
        
        for i, client_loader in enumerate(client_dataloaders):
            # 统计该客户端的标签分布
            label_counts = torch.zeros(self.config['num_classes'])
            total_client_samples = 0
            
            # 统计训练集标签
            for _, labels in client_loader['train']:
                for label in labels:
                    label_counts[label.item()] += 1
                    total_client_samples += 1
            
            # 如果有校准集，也要统计
            if client_loader['calib'] is not None:
                for _, labels in client_loader['calib']:
                    for label in labels:
                        label_counts[label.item()] += 1
                        total_client_samples += 1
            
            # 转换为概率分布
            client_priors = label_counts / total_client_samples
            
            # 加权累加
            weight = total_client_samples / total_samples
            self.client_weighted_priors += weight * client_priors.to(self.device)
            
            print(f"Client {i} priors: {np.round(client_priors.detach().cpu().numpy(), 3)} (weight: {weight:.3f})")
        
        print(f"Client Weighted Priors: {np.round(self.client_weighted_priors.detach().cpu().numpy(), 3)}")
    
    def _run_em_for_scheme1(self):
        """
        为Scheme 1运行EM算法进行标签偏移适应
        使用聚合后的分类头（无校准）和客户端加权先验
        """
        print("\n--- Running EM Algorithm for Scheme 1 ---")
        
        # 使用客户端加权先验作为有效源域先验
        self.effective_source_priors = self.client_weighted_priors
        print(f"Using Client Weighted Priors as Source Priors: {np.round(self.effective_source_priors.detach().cpu().numpy(), 3)}")
        
        # 在服务器无标签数据上获取logits（无校准，直接使用原始logits）
        print("Getting logits on server's unlabeled data with aggregated model...")
        target_logits, _ = get_logits_and_labels(self.global_model, self.unlabeled_loader, self.device)
        
        # 直接转换为概率（不使用校准，相当于T=1, b=0）
        target_probs = torch.softmax(target_logits, dim=1)
        print("Using uncalibrated probabilities (T=1, b=0) for EM algorithm...")
        
        # 运行EM算法估计目标先验
        print("Running EM algorithm to estimate target priors...")
        estimator = MLLSEstimator(self.config['num_classes'], self.config['em_max_iter'], self.config['em_tol'])
        self.estimated_target_priors = estimator.estimate_target_priors(target_probs, self.effective_source_priors)
        
        print(f"Estimated Target Priors (q_hat): {np.round(self.estimated_target_priors.detach().cpu().numpy(), 3)}")
        
        # 评估估计质量（如果有真实目标分布）
        if 'true_target_priors' in self.config:
            self._evaluate_estimation_quality()
    
    def _execute_scheme2_round1(self, client_updates):
        """
        方案2第一轮：聚合分类头参数并分发
        """
        print("\n--- Server: Executing Scheme 2 Round 1 (Aggregate and Distribute Classifier) ---")
        
        # 聚合分类头参数
        aggregated_classifier_params = self.aggregate_classifier_params(client_updates)
        
        # 加载聚合后的分类头
        self.global_model.classifier.load_state_dict(aggregated_classifier_params)
        
        print("Server: Global classifier ready for distribution")
        
        return {
            "status": "round1_completed",
            "global_classifier_params": aggregated_classifier_params
        }
    
    def _execute_scheme2_round2(self, client_updates):
        """
        方案2第二轮：聚合校准参数并执行EM算法
        """
        print("\n--- Server: Executing Scheme 2 Round 2 (Aggregate Calibration and EM) ---")
        
        return self._aggregate_calibration_and_em(client_updates)
    
    def _execute_scheme3(self, client_updates):
        """
        方案3：一轮通信
        - 同时聚合分类头和校准参数
        - 执行EM算法
        """
        print("\n--- Server: Executing Scheme 3 (One-Round Communication) ---")
        
        # 1. 聚合分类头参数
        aggregated_classifier_params = self.aggregate_classifier_params(client_updates)
        self.global_model.classifier.load_state_dict(aggregated_classifier_params)
        
        # 2. 聚合校准参数并执行EM
        return self._aggregate_calibration_and_em(client_updates)
    
    def _aggregate_calibration_and_em(self, client_updates):
        """
        聚合校准参数并执行EM算法的通用方法。
        """
        print("Server: Aggregating calibration parameters and running EM...")
        
        # 1. 构建融合校准器 f_fused
        total_samples = sum(update['num_samples'] for update in client_updates)
        
        avg_temp = torch.zeros(1, device=self.device)
        avg_bias = torch.zeros(self.config['num_classes'], device=self.device)
        
        for update in client_updates:
            weight = update['num_samples'] / total_samples
            avg_temp += weight * update['calib_params']['temperature'].to(self.device)
            avg_bias += weight * update['calib_params']['bias'].to(self.device)
        
        self.fused_calibrator = nn.ParameterDict({
            'temperature': nn.Parameter(avg_temp),
            'bias': nn.Parameter(avg_bias)
        })
        print(f"Fused Calibrator: T={avg_temp.item():.3f}, Bias norm={torch.norm(avg_bias).item():.3f}")
        
        # 2. 构建有效源域先验 p_eff(y)
        self.effective_source_priors = torch.zeros(self.config['num_classes'], device=self.device)
        for update in client_updates:
            weight = update['num_samples'] / total_samples
            self.effective_source_priors += weight * update['local_priors'].to(self.device)
        
        print(f"Effective Source Priors (p_eff): {np.round(self.effective_source_priors.detach().cpu().numpy(), 3)}")
        
        # 3. 估计目标先验 q(y)
        print("Getting logits on server's unlabeled data with global model...")
        target_logits, _ = get_logits_and_labels(self.global_model, self.unlabeled_loader, self.device)
        
        print("Applying fused calibrator...")
        calibrated_target_logits = target_logits / self.fused_calibrator['temperature'] + self.fused_calibrator['bias']
        target_probs = torch.softmax(calibrated_target_logits, dim=1)
        
        print("Running EM algorithm to estimate target priors...")
        estimator = MLLSEstimator(self.config['num_classes'], self.config['em_max_iter'], self.config['em_tol'])
        self.estimated_target_priors = estimator.estimate_target_priors(target_probs, self.effective_source_priors)
        
        print(f"Estimated Target Priors (q_hat): {np.round(self.estimated_target_priors.detach().cpu().numpy(), 3)}")
        
        # 评估估计质量（如果有真实目标分布）
        if 'true_target_priors' in self.config:
            self._evaluate_estimation_quality()
        
        return {"status": "completed"}
    
    def _evaluate_estimation_quality(self):
        """
        评估EM算法估计的目标先验质量
        """
        true_priors = torch.tensor(self.config['true_target_priors'], device=self.device, dtype=torch.float32)
        estimated_priors = self.estimated_target_priors
        
        print("\n=== Prior Estimation Quality Assessment ===")
        print(f"True Target Priors:      {np.round(true_priors.detach().cpu().numpy(), 3)}")
        print(f"Estimated Priors (q_hat): {np.round(estimated_priors.detach().cpu().numpy(), 3)}")
        
        # 1. L2范数（欧几里得距离）
        l2_distance = torch.norm(estimated_priors - true_priors, p=2).item()
        print(f"L2 Distance (Euclidean): {l2_distance:.6f}")
        
        # 2. L1范数和总变分距离
        l1_distance = torch.norm(estimated_priors - true_priors, p=1).item()
        total_variation = l1_distance / 2
        print(f"L1 Distance (Manhattan):  {l1_distance:.6f}")
        print(f"Total Variation Distance: {total_variation:.6f}")
        
        # 3. KL散度 (使用小的epsilon避免log(0))
        eps = 1e-12
        true_priors_safe = torch.clamp(true_priors, min=eps)
        estimated_priors_safe = torch.clamp(estimated_priors, min=eps)
        
        # KL(True || Estimated) - 真实分布相对于估计分布的KL散度
        kl_true_to_est = torch.sum(true_priors_safe * torch.log(true_priors_safe / estimated_priors_safe)).item()
        # KL(Estimated || True) - 估计分布相对于真实分布的KL散度  
        kl_est_to_true = torch.sum(estimated_priors_safe * torch.log(estimated_priors_safe / true_priors_safe)).item()
        
        print(f"KL Divergence (True||Est): {kl_true_to_est:.6f}")
        print(f"KL Divergence (Est||True): {kl_est_to_true:.6f}")
        
        # 4. JS散度（对称的）
        m = (true_priors_safe + estimated_priors_safe) / 2
        js_divergence = 0.5 * torch.sum(true_priors_safe * torch.log(true_priors_safe / m)) + \
                       0.5 * torch.sum(estimated_priors_safe * torch.log(estimated_priors_safe / m))
        js_divergence = js_divergence.item()
        print(f"JS Divergence (Symmetric): {js_divergence:.6f}")
        
        # 5. Hellinger距离
        hellinger_distance = torch.norm(torch.sqrt(true_priors_safe) - torch.sqrt(estimated_priors_safe), p=2).item() / np.sqrt(2)
        print(f"Hellinger Distance:       {hellinger_distance:.6f}")
        
        # 6. 最大绝对误差
        max_abs_error = torch.max(torch.abs(estimated_priors - true_priors)).item()
        print(f"Max Absolute Error:       {max_abs_error:.6f}")
        
        # 7. 均方根误差 (RMSE)
        rmse = torch.sqrt(torch.mean((estimated_priors - true_priors)**2)).item()
        print(f"Root Mean Square Error:   {rmse:.6f}")
        
        print("=" * 45)
        
        # 返回主要指标用于后续分析
        return {
            'l2_distance': l2_distance,
            'total_variation': total_variation,
            'kl_divergence': kl_est_to_true,
            'js_divergence': js_divergence,
            'hellinger_distance': hellinger_distance,
            'rmse': rmse
        }
        
    def evaluate(self):
        """
        评估最终模型在测试集上的性能。
        根据不同方案返回不同的评估结果。
        """
        scheme = self.config['scheme']
        print(f"\n--- Server: Evaluating Final Model (Scheme {scheme.upper()}) ---")
        
        self.global_model.eval()
        all_logits, all_labels = get_logits_and_labels(self.global_model, self.test_loader, self.device)
        
        if scheme == 'scheme1':
            return self._evaluate_scheme1(all_logits, all_labels)
        elif scheme in ['scheme2', 'scheme3']:
            return self._evaluate_with_adaptation(all_logits, all_labels)
        else:
            raise ValueError(f"Unknown scheme: {scheme}")
    
    def _evaluate_scheme1(self, all_logits, all_labels):
        """
        方案1的评估：原始FedAvg模型 + EM适应后模型
        """
        # 1. 原始模型性能（聚合后的分类头，未适应）
        original_probs = torch.softmax(all_logits, dim=1)
        _, preds_original = torch.max(all_logits, 1)
        acc_original = torch.sum(preds_original == all_labels).item() / len(all_labels)
        print(f"Accuracy (Original FedAvg Model): {acc_original:.4f}")
        
        # 2. EM适应后模型的性能
        # 应用标签偏移适应权重: w(y) = q_hat(y) / p_eff(y)
        adaptation_weights = self.estimated_target_priors / (self.effective_source_priors + 1e-12)
        adapted_probs = original_probs * adaptation_weights.unsqueeze(0)  # 广播到 [batch_size, num_classes]
        
        # 重新归一化概率（确保每行和为1）
        adapted_probs = adapted_probs / (adapted_probs.sum(dim=1, keepdim=True) + 1e-12)
        
        _, preds_adapted = torch.max(adapted_probs, 1)
        acc_adapted = torch.sum(preds_adapted == all_labels).item() / len(all_labels)
        print(f"Accuracy (FedAvg + EM Adapted Model): {acc_adapted:.4f}")
        
        # 打印适应权重信息
        print(f"Adaptation Weights: {np.round(adaptation_weights.detach().cpu().numpy(), 3)}")
        
        # 计算原始FedAvg的详细分类指标
        original_metrics = self._calculate_detailed_metrics(
            all_labels.detach().cpu().numpy(), 
            preds_original.detach().cpu().numpy(), 
            original_probs.detach().cpu().numpy()
        )
        
        # 计算EM适应后的详细分类指标
        adapted_metrics = self._calculate_detailed_metrics(
            all_labels.detach().cpu().numpy(), 
            preds_adapted.detach().cpu().numpy(), 
            adapted_probs.detach().cpu().numpy()
        )
        
        # 计算各阶段的KL散度
        true_priors = torch.tensor(self.config['true_target_priors'], device=self.device, dtype=torch.float32) if 'true_target_priors' in self.config else None
        
        fedavg_kl = self._calculate_kl_divergence(self.client_weighted_priors, true_priors) if self.client_weighted_priors is not None else None
        adapted_kl = self._calculate_kl_divergence(self.estimated_target_priors, true_priors) if self.estimated_target_priors is not None else None
        
        return {
            'fedavg': acc_original,
            'adapted': acc_adapted,
            'fedavg_macro_f1': original_metrics['macro_f1'],
            'adapted_macro_f1': adapted_metrics['macro_f1'],
            'fedavg_kl_div': fedavg_kl,
            'adapted_kl_div': adapted_kl,
            **adapted_metrics  # 其他指标使用adapted的结果
        }
    
    def _evaluate_with_adaptation(self, all_logits, all_labels):
        """
        方案2和3的评估：包含校准和适应的完整流程
        """
        # 1. 原始模型性能（聚合后的分类头，未校准）
        original_probs = torch.softmax(all_logits, dim=1)
        _, preds_original = torch.max(all_logits, 1)
        acc_original = torch.sum(preds_original == all_labels).item() / len(all_labels)
        print(f"Accuracy (Original Aggregated Model): {acc_original:.4f}")
        
        # 2. 使用融合校准器后的性能（校准但未适应标签偏移）
        calibrated_logits = all_logits / self.fused_calibrator['temperature'] + self.fused_calibrator['bias']
        calibrated_probs = torch.softmax(calibrated_logits, dim=1)
        _, preds_calibrated = torch.max(calibrated_logits, 1)
        acc_calibrated = torch.sum(preds_calibrated == all_labels).item() / len(all_labels)
        print(f"Accuracy (After Fused Calibration): {acc_calibrated:.4f}")
        
        # 3. 最终适应后模型的性能（校准+标签偏移适应）
        # 应用标签偏移适应权重: w(y) = q_hat(y) / p_eff(y)
        adaptation_weights = self.estimated_target_priors / (self.effective_source_priors + 1e-12)
        adapted_probs = calibrated_probs * adaptation_weights.unsqueeze(0)  # 广播到 [batch_size, num_classes]
        
        # 重新归一化概率（确保每行和为1）
        adapted_probs = adapted_probs / (adapted_probs.sum(dim=1, keepdim=True) + 1e-12)
        
        _, preds_adapted = torch.max(adapted_probs, 1)
        acc_adapted = torch.sum(preds_adapted == all_labels).item() / len(all_labels)
        print(f"Accuracy (Final Label-Shift Adapted Model): {acc_adapted:.4f}")
        
        # 打印适应权重信息
        print(f"Adaptation Weights: {np.round(adaptation_weights.detach().cpu().numpy(), 3)}")
        
        # 计算各阶段的详细分类指标
        original_metrics = self._calculate_detailed_metrics(
            all_labels.detach().cpu().numpy(), 
            preds_original.detach().cpu().numpy(), 
            original_probs.detach().cpu().numpy()
        )
        
        calibrated_metrics = self._calculate_detailed_metrics(
            all_labels.detach().cpu().numpy(), 
            preds_calibrated.detach().cpu().numpy(), 
            calibrated_probs.detach().cpu().numpy()
        )
        
        adapted_metrics = self._calculate_detailed_metrics(
            all_labels.detach().cpu().numpy(), 
            preds_adapted.detach().cpu().numpy(), 
            adapted_probs.detach().cpu().numpy()
        )

        # 计算各阶段的KL散度
        true_priors = torch.tensor(self.config['true_target_priors'], device=self.device, dtype=torch.float32) if 'true_target_priors' in self.config else None
        
        # 对于Scheme2/3，原始阶段也使用client_weighted_priors（需要从Scheme1的逻辑中获取）
        original_kl = self._calculate_kl_divergence(self.effective_source_priors, true_priors) if self.effective_source_priors is not None else None
        calibrated_kl = self._calculate_kl_divergence(self.effective_source_priors, true_priors) if self.effective_source_priors is not None else None  # 校准阶段仍使用相同的源域先验
        adapted_kl = self._calculate_kl_divergence(self.estimated_target_priors, true_priors) if self.estimated_target_priors is not None else None

        return {
            'original': acc_original,
            'calibrated': acc_calibrated, 
            'adapted': acc_adapted,
            'original_macro_f1': original_metrics['macro_f1'],
            'calibrated_macro_f1': calibrated_metrics['macro_f1'],
            'adapted_macro_f1': adapted_metrics['macro_f1'],
            'original_kl_div': original_kl,
            'calibrated_kl_div': calibrated_kl,
            'adapted_kl_div': adapted_kl,
            **adapted_metrics  # 其他指标使用adapted的结果
        }
    
    def _calculate_detailed_metrics(self, y_true, y_pred, y_prob):
        """
        计算详细的分类指标
        """
        # 基本精确率、召回率、F1分数
        precision, recall, f1, _ = precision_recall_fscore_support(y_true, y_pred, average=None, zero_division=0)
        
        # 宏平均和微平均
        macro_precision, macro_recall, macro_f1, _ = precision_recall_fscore_support(y_true, y_pred, average='macro', zero_division=0)
        micro_precision, micro_recall, micro_f1, _ = precision_recall_fscore_support(y_true, y_pred, average='micro', zero_division=0)
        
        # 平衡准确率
        balanced_acc = balanced_accuracy_score(y_true, y_pred)
        
        # Top-k准确率
        top2_acc = self._calculate_topk_accuracy(y_prob, y_true, k=2)
        top3_acc = self._calculate_topk_accuracy(y_prob, y_true, k=3)
        
        # Cohen's Kappa
        kappa = cohen_kappa_score(y_true, y_pred)
        
        # 交叉熵损失
        cross_entropy = log_loss(y_true, y_prob)
        
        # Brier分数（多分类）
        brier_score = self._calculate_multiclass_brier_score(y_true, y_prob)
        
        # Matthews相关系数（多分类）
        mcc = matthews_corrcoef(y_true, y_pred)
        
        # 混淆矩阵相关指标
        cm = confusion_matrix(y_true, y_pred)
        per_class_tpr, per_class_fpr = self._calculate_tpr_fpr_from_cm(cm)
        
        return {
            'macro_precision': macro_precision,
            'micro_precision': micro_precision,
            'macro_recall': macro_recall,
            'micro_recall': micro_recall,
            'macro_f1': macro_f1,
            'micro_f1': micro_f1,
            'balanced_accuracy': balanced_acc,
            'top2_accuracy': top2_acc,
            'top3_accuracy': top3_acc,
            'cohen_kappa': kappa,
            'matthews_corrcoef': mcc,
            'cross_entropy_loss': cross_entropy,
            'brier_score': brier_score,
            'per_class_precision': precision,
            'per_class_recall': recall,
            'per_class_f1': f1,
            'per_class_tpr': per_class_tpr,
            'per_class_fpr': per_class_fpr,
            'confusion_matrix': cm
        }
    
    def _calculate_topk_accuracy(self, y_prob, y_true, k):
        """计算Top-k准确率"""
        top_k_preds = np.argsort(y_prob, axis=1)[:, -k:]
        correct = 0
        for i, true_label in enumerate(y_true):
            if true_label in top_k_preds[i]:
                correct += 1
        return correct / len(y_true)
    
    def _calculate_multiclass_brier_score(self, y_true, y_prob):
        """计算多分类Brier分数"""
        # 将标签转换为one-hot编码
        n_classes = y_prob.shape[1]
        y_true_onehot = np.eye(n_classes)[y_true]
        
        # 计算Brier分数
        brier_score = np.mean(np.sum((y_prob - y_true_onehot) ** 2, axis=1))
        return brier_score
    
    def _calculate_tpr_fpr_from_cm(self, cm):
        """从混淆矩阵计算每个类别的TPR和FPR"""
        n_classes = cm.shape[0]
        tpr = np.zeros(n_classes)
        fpr = np.zeros(n_classes)
        
        for i in range(n_classes):
            # True Positive Rate (Sensitivity/Recall)
            tp = cm[i, i]
            fn = np.sum(cm[i, :]) - tp
            tpr[i] = tp / (tp + fn) if (tp + fn) > 0 else 0
            
            # False Positive Rate
            fp = np.sum(cm[:, i]) - tp
            tn = np.sum(cm) - tp - fp - fn
            fpr[i] = fp / (fp + tn) if (fp + tn) > 0 else 0
            
        return tpr, fpr
    
    def _calculate_kl_divergence(self, estimated_priors, true_priors):
        """
        计算估计分布相对于真实分布的KL散度
        """
        if true_priors is None:
            return None
        
        eps = 1e-12
        true_priors_safe = torch.clamp(true_priors, min=eps)
        estimated_priors_safe = torch.clamp(estimated_priors, min=eps)
        
        # KL(Estimated || True) - 估计分布相对于真实分布的KL散度  
        kl_divergence = torch.sum(estimated_priors_safe * torch.log(estimated_priors_safe / true_priors_safe)).item()
        return kl_divergence
    
    def get_distribution_quality_metrics(self):
        """
        获取分布估计质量指标，用于生成对比表
        """
        if 'true_target_priors' not in self.config:
            return None
            
        true_priors = torch.tensor(self.config['true_target_priors'], device=self.device, dtype=torch.float32)
        
        # 根据方案选择估计分布
        scheme = self.config['scheme']
        if scheme == 'scheme1':
            if self.client_weighted_priors is None:
                return None
            estimated_priors = self.client_weighted_priors
            method = "Client Weighted Priors"
        elif scheme in ['scheme2', 'scheme3']:
            if self.estimated_target_priors is None:
                return None
            estimated_priors = self.estimated_target_priors
            method = "EM Algorithm"
        else:
            return None
        
        # 计算各种距离度量
        l2_distance = torch.norm(estimated_priors - true_priors, p=2).item()
        l1_distance = torch.norm(estimated_priors - true_priors, p=1).item()
        total_variation = l1_distance / 2
        
        eps = 1e-12
        true_priors_safe = torch.clamp(true_priors, min=eps)
        estimated_priors_safe = torch.clamp(estimated_priors, min=eps)
        
        kl_divergence = torch.sum(estimated_priors_safe * torch.log(estimated_priors_safe / true_priors_safe)).item()
        
        m = (true_priors_safe + estimated_priors_safe) / 2
        js_divergence = 0.5 * torch.sum(true_priors_safe * torch.log(true_priors_safe / m)) + \
                       0.5 * torch.sum(estimated_priors_safe * torch.log(estimated_priors_safe / m))
        js_divergence = js_divergence.item()
        
        hellinger_distance = torch.norm(torch.sqrt(true_priors_safe) - torch.sqrt(estimated_priors_safe), p=2).item() / np.sqrt(2)
        max_abs_error = torch.max(torch.abs(estimated_priors - true_priors)).item()
        rmse = torch.sqrt(torch.mean((estimated_priors - true_priors)**2)).item()
        
        return {
            'method': method,
            'estimated_priors': estimated_priors.detach().cpu().numpy(),
            'l2_distance': l2_distance,
            'total_variation': total_variation,
            'kl_divergence': kl_divergence,
            'js_divergence': js_divergence,
            'hellinger_distance': hellinger_distance,
            'max_abs_error': max_abs_error,
            'rmse': rmse
        }
