# main_federated.py
# 联邦化标签偏移适应实验的主入口，支持三种方案的对比实验。

import torch
import numpy as np
import random
import time
import argparse
from datetime import datetime

# 导入自定义模块
from config import get_config
from data_handler_federated import prepare_federated_data
from models import ClipClassifier
from client import Client
from server import Server
from utils import display_results

def run_single_scheme(scheme_name, global_seed=None):
    """
    运行单个方案的实验
    """
    print(f"\n{'='*80}")
    print(f" " * 30 + f"RUNNING {scheme_name.upper()}")
    print(f"{'='*80}")
    
    start_time = time.time()
    
    # 1. 加载配置，如果提供了全局种子则使用全局种子
    config = get_config(dataset_name='CIFAR10', scheme=scheme_name)
    if global_seed is not None:
        config['seed'] = global_seed
        print(f"🎲 Using global seed: {global_seed}")
    
    # 设置随机种子以保证实验可复现
    torch.manual_seed(config['seed'])
    np.random.seed(config['seed'])
    random.seed(config['seed'])
    
    # 2. 准备联邦化数据
    client_dataloaders, server_unlabeled_loader, test_loader, config = prepare_federated_data(config)
    
    # 3. 初始化全局模型、客户端和服务器
    global_model = ClipClassifier(
        model_name=config['base_model_name'],
        num_classes=config['num_classes'],
        device=config['device']
    )
    
    # 初始化K个客户端
    clients = [Client(client_id=i, dataloaders=client_dataloaders[i], config=config) 
               for i in range(config['num_clients'])]
    
    # 初始化服务器
    server = Server(global_model, server_unlabeled_loader, test_loader, config)
    
    # 4. 根据方案执行相应的流程
    if scheme_name == 'scheme1':
        results = execute_scheme1(clients, server)
    elif scheme_name == 'scheme2':
        results = execute_scheme2(clients, server)
    elif scheme_name == 'scheme3':
        results = execute_scheme3(clients, server)
    else:
        raise ValueError(f"Unknown scheme: {scheme_name}")
    
    end_time = time.time()
    execution_time = end_time - start_time
    
    # 添加执行时间到结果中
    results['execution_time'] = execution_time
    results['scheme'] = scheme_name
    
    print(f"\n{scheme_name.upper()} completed in {execution_time:.2f} seconds")
    
    return results, config

def run_single_scheme_with_server(scheme_name, global_seed=None):
    """
    运行单个方案的实验，同时返回服务器实例
    """
    print(f"\n{'='*80}")
    print(f" " * 30 + f"RUNNING {scheme_name.upper()}")
    print(f"{'='*80}")
    
    start_time = time.time()
    
    # 1. 加载配置，如果提供了全局种子则使用全局种子
    config = get_config(dataset_name='CIFAR10', scheme=scheme_name)
    if global_seed is not None:
        config['seed'] = global_seed
        print(f"🎲 Using global seed: {global_seed}")
    
    # 设置随机种子以保证实验可复现
    torch.manual_seed(config['seed'])
    np.random.seed(config['seed'])
    random.seed(config['seed'])
    
    # 2. 准备联邦化数据
    client_dataloaders, server_unlabeled_loader, test_loader, config = prepare_federated_data(config)
    
    # 3. 初始化全局模型、客户端和服务器
    global_model = ClipClassifier(
        model_name=config['base_model_name'],
        num_classes=config['num_classes'],
        device=config['device']
    )
    
    # 初始化K个客户端
    clients = [Client(client_id=i, dataloaders=client_dataloaders[i], config=config) 
               for i in range(config['num_clients'])]
    
    # 初始化服务器
    server = Server(global_model, server_unlabeled_loader, test_loader, config)
    
    # 4. 根据方案执行相应的流程
    if scheme_name == 'scheme1':
        results = execute_scheme1(clients, server)
    elif scheme_name == 'scheme2':
        results = execute_scheme2(clients, server)
    elif scheme_name == 'scheme3':
        results = execute_scheme3(clients, server)
    else:
        raise ValueError(f"Unknown scheme: {scheme_name}")
    
    end_time = time.time()
    execution_time = end_time - start_time
    
    # 添加执行时间到结果中
    results['execution_time'] = execution_time
    results['scheme'] = scheme_name
    
    print(f"\n{scheme_name.upper()} completed in {execution_time:.2f} seconds")
    
    return results, config, server

def execute_scheme1(clients, server):
    """执行方案1：传统FedAvg"""
    print("\n--- Scheme 1: Traditional FedAvg ---")
    print("- Clients train classifier on full local data")
    print("- Server aggregates classifiers")
    print("- Direct evaluation (no calibration or EM)")
    
    # 客户端训练并上传分类头参数
    print("\n=== Phase 1: Client Training ===")
    client_updates = []
    for client in clients:
        update = client.execute_scheme(server.global_model)
        client_updates.append(update)
    
    # 服务器聚合分类头参数
    print("\n=== Phase 2: Server Aggregation ===")
    server.execute_scheme(client_updates)
    
    # 评估最终结果
    print("\n=== Phase 3: Evaluation ===")
    results = server.evaluate()
    
    return results

def execute_scheme2(clients, server):
    """执行方案2：两轮通信的标签偏移适应"""
    print("\n--- Scheme 2: Two-Round Label Shift Adaptation ---")
    print("- Round 1: Clients train classifiers → Server aggregates → Distribute global classifier")
    print("- Round 2: Clients calibrate with global classifier → Server runs EM algorithm")
    
    # === 第一轮通信 ===
    print("\n=== ROUND 1: Classifier Training and Aggregation ===")
    
    # 客户端第一轮：训练本地分类头
    print("\n--- Phase 1.1: Client Local Classifier Training ---")
    client_updates_round1 = []
    for client in clients:
        update = client.execute_scheme(server.global_model)
        client_updates_round1.append(update)
    
    # 服务器第一轮：聚合分类头并准备分发
    print("\n--- Phase 1.2: Server Classifier Aggregation ---")
    server_result_round1 = server.execute_scheme(client_updates_round1, round_num=1)
    global_classifier_params = server_result_round1['global_classifier_params']
    
    # === 第二轮通信 ===
    print("\n=== ROUND 2: Calibration and EM Algorithm ===")
    
    # 客户端第二轮：使用全局分类头进行校准
    print("\n--- Phase 2.1: Client Calibration with Global Classifier ---")
    client_updates_round2 = []
    for client in clients:
        update = client.execute_scheme(server.global_model, global_classifier_params)
        client_updates_round2.append(update)
    
    # 服务器第二轮：聚合校准参数并运行EM算法
    print("\n--- Phase 2.2: Server Calibration Aggregation and EM ---")
    server.execute_scheme(client_updates_round2, round_num=2)
    
    # 评估最终结果
    print("\n=== Phase 3: Final Evaluation ===")
    results = server.evaluate()
    
    return results

def execute_scheme3(clients, server):
    """执行方案3：一轮通信的标签偏移适应"""
    print("\n--- Scheme 3: One-Round Label Shift Adaptation ---")
    print("- Clients train local classifiers + calibrate with local classifiers")
    print("- Server aggregates both classifier and calibration parameters")
    print("- Server runs EM algorithm for label shift adaptation")
    
    # === 单轮通信 ===
    print("\n=== SINGLE ROUND: Training, Calibration, and Aggregation ===")
    
    # 客户端：训练本地分类头 + 本地校准
    print("\n--- Phase 1: Client Local Training and Calibration ---")
    client_updates = []
    for client in clients:
        update = client.execute_scheme(server.global_model)
        client_updates.append(update)
    
    # 服务器：聚合分类头和校准参数 + EM算法
    print("\n--- Phase 2: Server Aggregation and EM Algorithm ---")
    server.execute_scheme(client_updates)
    
    # 评估最终结果
    print("\n=== Phase 3: Final Evaluation ===")
    results = server.evaluate()
    
    return results

def display_single_results(results, config):
    """显示单个方案的结果"""
    scheme = config['scheme']
    
    print(f"\n\n{'='*70}")
    print(f" " * 20 + f"{scheme.upper()} FINAL RESULTS")
    print(f"{'='*70}")
    print(f"Dataset: {config['dataset']}")
    print(f"Number of Clients: {config['num_clients']}")
    print(f"Execution Time: {results['execution_time']:.2f} seconds")
    
    if scheme == 'scheme1':
        print(f"Method: Traditional FedAvg + EM Label Shift Adaptation")
        print(f"True Target Priors: {np.round(config['true_target_priors'], 3)}")
        print("-"*70)
        print(f"  - Accuracy (Original FedAvg Model):         {results['fedavg']:.4f}")
        print(f"  - Accuracy (FedAvg + EM Adapted Model):     {results['adapted']:.4f}")
        print("-"*70)
        improvement = results['adapted'] - results['fedavg']
        print(f"  - Improvement over Original FedAvg: {improvement:+.4f}")
    else:
        method = "Two-Round" if scheme == 'scheme2' else "One-Round"
        print(f"Method: {method} Label Shift Adaptation")
        print(f"True Target Priors: {np.round(config['true_target_priors'], 3)}")
        print("-"*70)
        print(f"  - Accuracy (Original Aggregated Model):    {results['original']:.4f}")
        print(f"  - Accuracy (After Fused Calibration):      {results['calibrated']:.4f}")
        print(f"  - Accuracy (Final Label-Shift Adapted):    {results['adapted']:.4f}")
        print("-"*70)
        improvement = results['adapted'] - results['original']
        print(f"  - Improvement over Original: {improvement:+.4f}")
    
    print(f"{'='*70}")

def display_comparison_results(all_results, all_servers=None):
    """显示三个方案的对比结果"""
    print(f"\n\n{'='*120}")
    print(f" " * 45 + "COMPREHENSIVE COMPARISON RESULTS")
    print(f"{'='*120}")
    print(f"Timestamp: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
    print(f"Dataset: CIFAR10, Clients: 3")
    print(f"{'='*120}")
    
    # 表1: 分布估计质量对比表
    if all_servers:
        display_distribution_quality_table(all_servers)
    
    # 表2: 分类性能综合对比表
    display_classification_performance_table(all_results)
    
    # 原有的简化对比分析
    print(f"\n{'='*60}")
    print(f" " * 20 + "QUICK SUMMARY")
    print(f"{'='*60}")
    
    # 简化的性能对比（使用各方案的最终适应后结果）
    scheme1_acc = all_results['scheme1']['adapted'] if 'adapted' in all_results['scheme1'] else all_results['scheme1']['fedavg']
    scheme2_acc = all_results['scheme2']['adapted'] if 'adapted' in all_results['scheme2'] else all_results['scheme2']['fedavg']
    scheme3_acc = all_results['scheme3']['adapted'] if 'adapted' in all_results['scheme3'] else all_results['scheme3']['fedavg']
    
    print(f"Accuracy: Scheme1={scheme1_acc:.4f}, Scheme2={scheme2_acc:.4f}, Scheme3={scheme3_acc:.4f}")
    print(f"Time: Scheme1={all_results['scheme1']['execution_time']:.1f}s, "
          f"Scheme2={all_results['scheme2']['execution_time']:.1f}s, "
          f"Scheme3={all_results['scheme3']['execution_time']:.1f}s")
    
    # 找出最佳方案
    best_acc_scheme = max([('Scheme1', scheme1_acc), ('Scheme2', scheme2_acc), ('Scheme3', scheme3_acc)], key=lambda x: x[1])
    print(f"Best Performance: {best_acc_scheme[0]} with {best_acc_scheme[1]:.4f} accuracy")
    
    # 表3: 详细阶段对比表
    display_detailed_stage_comparison_table(all_results, all_servers)
    
    print(f"{'='*120}")

def display_distribution_quality_table(all_servers):
    """显示分布估计质量对比表"""
    print(f"\n📊 TABLE 1: DISTRIBUTION ESTIMATION QUALITY")
    print(f"{'='*120}")
    
    # 表头
    print(f"{'Scheme':<10} {'Method':<20} {'L2 Dist':<10} {'Total Var':<10} {'KL Div':<10} {'JS Div':<10} {'Hellinger':<10} {'Max Error':<10} {'RMSE':<10}")
    print(f"{'-'*120}")
    
    # 收集所有方案的分布质量指标
    for scheme_name, server in all_servers.items():
        metrics = server.get_distribution_quality_metrics()
        if metrics:
            print(f"{scheme_name.upper():<10} {metrics['method']:<20} "
                  f"{metrics['l2_distance']:<10.6f} {metrics['total_variation']:<10.6f} "
                  f"{metrics['kl_divergence']:<10.6f} {metrics['js_divergence']:<10.6f} "
                  f"{metrics['hellinger_distance']:<10.6f} {metrics['max_abs_error']:<10.6f} "
                  f"{metrics['rmse']:<10.6f}")
    
    print(f"{'-'*120}")
    print("📝 Note: Lower values indicate better distribution estimation quality")
    print("   - L2 Dist: Euclidean distance")
    print("   - Total Var: Total variation distance")  
    print("   - KL Div: Kullback-Leibler divergence")
    print("   - JS Div: Jensen-Shannon divergence")
    print("   - Hellinger: Hellinger distance")

def display_classification_performance_table(all_results):
    """显示分类性能综合对比表"""
    print(f"\n📈 TABLE 2: CLASSIFICATION PERFORMANCE COMPARISON")
    print(f"{'='*160}")
    
    # 主要指标表
    print(f"{'Scheme':<10} {'Method':<20} {'Accuracy':<10} {'Macro-P':<10} {'Micro-P':<10} "
          f"{'Macro-R':<10} {'Micro-R':<10} {'Macro-F1':<10} {'Micro-F1':<10} {'Balanced':<10}")
    print(f"{'-'*160}")
    
    for scheme_name, results in all_results.items():
        # 确定主要准确率和方法名
        if scheme_name == 'scheme1':
            main_acc = results['adapted'] if 'adapted' in results else results['fedavg']
            method = "FedAvg + EM Adapt"
        else:
            main_acc = results['adapted'] if 'adapted' in results else results['fedavg']
            method = "Two-Round Adapt" if scheme_name == 'scheme2' else "One-Round Adapt"
        
        # 显示主要指标
        print(f"{scheme_name.upper():<10} {method:<20} {main_acc:<10.4f} "
              f"{results.get('macro_precision', 0):<10.4f} {results.get('micro_precision', 0):<10.4f} "
              f"{results.get('macro_recall', 0):<10.4f} {results.get('micro_recall', 0):<10.4f} "
              f"{results.get('macro_f1', 0):<10.4f} {results.get('micro_f1', 0):<10.4f} "
              f"{results.get('balanced_accuracy', 0):<10.4f}")
    
    print(f"{'-'*160}")
    
    # 高级指标表
    print(f"\n📊 ADVANCED METRICS")
    print(f"{'='*120}")
    print(f"{'Scheme':<10} {'Top-2':<10} {'Top-3':<10} {'Kappa':<10} {'MCC':<10} "
          f"{'Cross-Ent':<12} {'Brier':<10} {'Avg-TPR':<10} {'Avg-FPR':<10}")
    print(f"{'-'*120}")
    
    for scheme_name, results in all_results.items():
        # 计算平均TPR和FPR
        avg_tpr = np.mean(results.get('per_class_tpr', [0])) if 'per_class_tpr' in results else 0
        avg_fpr = np.mean(results.get('per_class_fpr', [0])) if 'per_class_fpr' in results else 0
        
        print(f"{scheme_name.upper():<10} {results.get('top2_accuracy', 0):<10.4f} "
              f"{results.get('top3_accuracy', 0):<10.4f} {results.get('cohen_kappa', 0):<10.4f} "
              f"{results.get('matthews_corrcoef', 0):<10.4f} {results.get('cross_entropy_loss', 0):<12.4f} "
              f"{results.get('brier_score', 0):<10.4f} {avg_tpr:<10.4f} {avg_fpr:<10.4f}")
    
    print(f"{'-'*120}")
    
    # 每个类别的详细指标（可选展示）
    display_per_class_metrics(all_results)
    
    print("📝 Note: Higher values are better except for Cross-Entropy Loss, Brier Score, and FPR (lower is better)")
    print("   - Macro-P/R/F1: Macro-averaged Precision/Recall/F1-Score")
    print("   - Micro-P/R/F1: Micro-averaged Precision/Recall/F1-Score")
    print("   - Balanced: Balanced accuracy for imbalanced datasets") 
    print("   - Top-k: Top-k accuracy (prediction in top k classes)")
    print("   - Kappa: Cohen's kappa coefficient")
    print("   - MCC: Matthews Correlation Coefficient")
    print("   - Cross-Ent: Cross-entropy loss")
    print("   - Brier: Brier score for probability calibration")
    print("   - TPR/FPR: True/False Positive Rate (averaged across classes)")

def display_per_class_metrics(all_results):
    """显示每个类别的详细指标"""
    print(f"\n📋 PER-CLASS METRICS SUMMARY")
    print(f"{'='*100}")
    
    # CIFAR10类别名
    class_names = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
    
    for scheme_name, results in all_results.items():
        if 'per_class_f1' in results:
            per_class_f1 = results['per_class_f1']
            per_class_precision = results.get('per_class_precision', [0]*10)
            per_class_recall = results.get('per_class_recall', [0]*10)
            
            print(f"\n{scheme_name.upper()} - Per-Class Performance:")
            print(f"{'Class':<8} {'Precision':<10} {'Recall':<10} {'F1-Score':<10}")
            print(f"{'-'*45}")
            
            for i, class_name in enumerate(class_names):
                if i < len(per_class_f1):
                    print(f"{class_name:<8} {per_class_precision[i]:<10.4f} "
                          f"{per_class_recall[i]:<10.4f} {per_class_f1[i]:<10.4f}")
    
    print(f"{'='*100}")

def display_detailed_stage_comparison_table(all_results, all_servers):
    """显示所有方案各阶段的详细对比表"""
    print(f"\n📋 TABLE 3: DETAILED STAGE COMPARISON")
    print(f"{'='*100}")
    
    # 表头
    print(f"{'Scheme':<10} {'Method':<25} {'Accuracy':<12} {'KL Div':<12} {'Macro-F1':<12}")
    print(f"{'-'*100}")
    
    # 处理每个方案的所有阶段
    for scheme_name, results in all_results.items():
        if scheme_name == 'scheme1':
            # Scheme 1: FedAvg 和 FedAvg+EM
            # 1. FedAvg (原始)
            fedavg_acc = results['fedavg']
            fedavg_f1 = results.get('fedavg_macro_f1', 0)
            fedavg_kl = results.get('fedavg_kl_div')
            fedavg_kl_str = f"{fedavg_kl:.6f}" if fedavg_kl is not None else "N/A"
            print(f"{scheme_name.upper():<10} {'FedAvg':<25} {fedavg_acc:<12.4f} {fedavg_kl_str:<12} {fedavg_f1:<12.4f}")
            
            # 2. FedAvg+EM (适应后)
            adapted_acc = results['adapted']
            adapted_f1 = results.get('adapted_macro_f1', results.get('macro_f1', 0))
            adapted_kl = results.get('adapted_kl_div')
            adapted_kl_str = f"{adapted_kl:.6f}" if adapted_kl is not None else "N/A"
            print(f"{scheme_name.upper():<10} {'FedAvg+EM':<25} {adapted_acc:<12.4f} {adapted_kl_str:<12} {adapted_f1:<12.4f}")
            
        elif scheme_name in ['scheme2', 'scheme3']:
            # Scheme 2/3: 三个阶段
            # 1. FedAvg(half data) - 原始聚合模型
            original_acc = results['original']
            original_f1 = results.get('original_macro_f1', 0)
            original_kl = results.get('original_kl_div')
            original_kl_str = f"{original_kl:.6f}" if original_kl is not None else "N/A"
            print(f"{scheme_name.upper():<10} {'FedAvg(half data)':<25} {original_acc:<12.4f} {original_kl_str:<12} {original_f1:<12.4f}")
            
            # 2. Calib_T_b without EM - 校准但未EM
            calibrated_acc = results['calibrated']
            calibrated_f1 = results.get('calibrated_macro_f1', 0)
            calibrated_kl = results.get('calibrated_kl_div')
            calibrated_kl_str = f"{calibrated_kl:.6f}" if calibrated_kl is not None else "N/A"
            print(f"{scheme_name.upper():<10} {'Calib_T_b without EM':<25} {calibrated_acc:<12.4f} {calibrated_kl_str:<12} {calibrated_f1:<12.4f}")
            
            # 3. FULL - 完整流程 (校准+EM)
            adapted_acc = results['adapted']
            adapted_f1 = results.get('adapted_macro_f1', results.get('macro_f1', 0))
            adapted_kl = results.get('adapted_kl_div')
            adapted_kl_str = f"{adapted_kl:.6f}" if adapted_kl is not None else "N/A"
            print(f"{scheme_name.upper():<10} {'FULL':<25} {adapted_acc:<12.4f} {adapted_kl_str:<12} {adapted_f1:<12.4f}")
    
    print(f"{'-'*100}")
    print("📝 Note:")
    print("   - KL Div: Kullback-Leibler divergence between estimated and true target distribution")
    print("     • FedAvg stages: Uses client weighted priors vs true distribution")
    print("     • Calib stages: Uses effective source priors vs true distribution") 
    print("     • EM stages: Uses EM estimated priors vs true distribution")
    print("   - Macro-F1: Macro-averaged F1-score across all classes")
    print("   - Lower KL Div values indicate better distribution estimation")

def main():
    """主函数：支持单个方案运行或三个方案对比"""
    parser = argparse.ArgumentParser(description='联邦化标签偏移适应实验')
    parser.add_argument('--scheme', type=str, choices=['scheme1', 'scheme2', 'scheme3', 'all'], 
                        default='all', help='运行的方案 (scheme1, scheme2, scheme3, 或 all 进行对比)')
    
    args = parser.parse_args()
    
    # 为本次运行生成一个随机种子，所有scheme都使用这个种子确保公平对比
    import random
    global_seed = random.randint(1, 9999)
    print(f"🎲 Generated random seed for this run: {global_seed}")
    print(f"   All schemes in this run will use the same seed for fair comparison")
    
    
    if args.scheme == 'all':
        # 运行所有三个方案进行对比
        print("Starting Comprehensive Comparison of Three Schemes...")
        print("This will run Scheme 1, Scheme 2, and Scheme 3 sequentially.")
        
        all_results = {}
        all_servers = {}  # 保存服务器实例用于分布质量评估
        
        for scheme in ['scheme1', 'scheme2', 'scheme3']:
            try:
                results, config, server = run_single_scheme_with_server(scheme, global_seed)
                all_results[scheme] = results
                all_servers[scheme] = server
            except Exception as e:
                print(f"Error running {scheme}: {e}")
                continue
        
        # 展示对比结果
        if len(all_results) == 3:
            display_comparison_results(all_results, all_servers)
        else:
            print("Not all schemes completed successfully. Cannot generate comparison.")
            for scheme, results in all_results.items():
                print(f"{scheme}: {results}")
    
    else:
        # 运行单个方案
        results, config = run_single_scheme(args.scheme, global_seed)
        display_single_results(results, config)

if __name__ == '__main__':
    main()
