# data_handler_federated.py
# 负责为联邦环境加载、划分和模拟异构标签偏移。

import torch
import numpy as np
from torchvision import datasets, transforms
from torch.utils.data import DataLoader, Subset

def prepare_federated_data(config):
    """
    准备联邦学习所需的数据集和加载器。
    
    返回:
        tuple: (client_dataloaders, server_unlabeled_loader, test_loader, config)
               其中 client_dataloaders 是一个包含每个客户端(train, calib)加载器的列表。
    """
    print("--- Preparing Federated Data ---")
    dataset_name = config['dataset']
    data_path = config['data_path']

    # CLIP模型需要特定的图像变换
    transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
    ])

    if dataset_name == 'CIFAR10':
        full_train_dataset = datasets.CIFAR10(root=data_path, train=True, download=True, transform=transform)
        test_dataset = datasets.CIFAR10(root=data_path, train=False, download=True, transform=transform)
    else:
        raise NotImplementedError(f"Dataset {dataset_name} is not implemented.")

    # --- 为客户端划分数据（支持重叠/不重叠两种模式） ---
    train_labels = np.array(full_train_dataset.targets)
    num_train_data = len(full_train_dataset)
    
    if config['allow_data_overlap']:
        client_indices = _create_overlapping_client_data(config, train_labels, num_train_data)
    else:
        client_indices = _create_non_overlapping_client_data(config, train_labels, num_train_data)

    # --- 创建客户端数据加载器 ---
    client_dataloaders = []
    for i in range(config['num_clients']):
        indices = client_indices[i]
        np.random.shuffle(indices)
        
        # 根据实验方案决定数据分割策略
        if config['scheme'] == 'scheme1':
            # 方案1：使用全部数据训练分类头，不需要校准数据
            train_subset = Subset(full_train_dataset, indices)
            train_loader = DataLoader(train_subset, batch_size=config['batch_size'], shuffle=True, num_workers=config['num_workers'])
            client_dataloaders.append({'train': train_loader, 'calib': None})
        else:
            # 方案2和3：将数据对半分，一半训练分类头，一半校准
            split_point = len(indices) // 2
            train_idx, calib_idx = indices[:split_point], indices[split_point:]

            train_subset = Subset(full_train_dataset, train_idx)
            calib_subset = Subset(full_train_dataset, calib_idx)
            
            train_loader = DataLoader(train_subset, batch_size=config['batch_size'], shuffle=True, num_workers=config['num_workers'])
            calib_loader = DataLoader(calib_subset, batch_size=config['batch_size'], shuffle=False, num_workers=config['num_workers'])
            
            client_dataloaders.append({'train': train_loader, 'calib': calib_loader})

    # --- 修改：创建服务器数据（均匀分割测试集） ---
    print("\nCreating server's data by evenly splitting test set...")
    
    # 获取测试集的所有索引并随机打乱
    test_indices = np.arange(len(test_dataset))
    np.random.shuffle(test_indices)
    
    # 均匀分割为两部分
    split_point = len(test_indices) // 2
    em_indices = test_indices[:split_point]      # 前一半用于EM算法
    eval_indices = test_indices[split_point:]    # 后一半用于最终评估
    
    print(f"Total test samples: {len(test_indices)}")
    print(f"EM algorithm samples: {len(em_indices)}")
    print(f"Final evaluation samples: {len(eval_indices)}")
    
    # 创建服务器无标签数据加载器（用于EM算法，有标签但不使用）
    server_unlabeled_subset = Subset(test_dataset, em_indices)
    server_unlabeled_loader = DataLoader(server_unlabeled_subset, batch_size=config['batch_size'], shuffle=False)
    
    # 创建最终评估数据加载器
    eval_subset = Subset(test_dataset, eval_indices)
    test_loader = DataLoader(eval_subset, batch_size=config['batch_size'], shuffle=False)
    
    # 设置真实的目标域分布（现在是均匀分布）
    config['true_target_priors'] = np.array([0.1] * config['num_classes'])  # CIFAR10均匀分布
    
    print(f"Server EM data: {len(em_indices)} samples (has labels but won't use them for EM)")
    print(f"Server eval data: {len(eval_indices)} samples (will use labels for final evaluation)")
    print(f"True Target Priors: {config['true_target_priors']} (uniform distribution)")
    
    print("\nData preparation complete.")
    return client_dataloaders, server_unlabeled_loader, test_loader, config

def _create_non_overlapping_client_data(config, train_labels, num_train_data):
    """
    创建不重叠的客户端数据分割（原有逻辑）
    """
    print(f"Partitioning non-overlapping data for {config['num_clients']} clients...")
    
    # 首先为每个类别创建不重叠的索引池
    class_indices_pools = {}
    for class_idx in range(config['num_classes']):
        class_mask = (train_labels == class_idx)
        class_all_indices = np.arange(num_train_data)[class_mask]
        np.random.shuffle(class_all_indices)  # 随机打乱该类别的索引
        class_indices_pools[class_idx] = class_all_indices
    
    # 为每个客户端分配数据，确保不重叠
    client_indices = []
    used_indices_per_class = {class_idx: 0 for class_idx in range(config['num_classes'])}
    
    for i in range(config['num_clients']):
        alpha = config['clients_dirichlet_alphas'][i]
        proportions = np.random.dirichlet(alpha=[alpha] * config['num_classes'])
        
        # 确保每个类别至少有最小概率（避免某些类别完全为0）
        min_prob = 0.02  # 最小概率2%
        proportions = np.maximum(proportions, min_prob)
        proportions = proportions / proportions.sum()  # 重新归一化
        
        client_size = config['client_data_sizes'][i]
        indices_for_client = []
        
        print(f"Client {i+1} (alpha={alpha}) target proportions: {np.round(proportions, 3)}")
        
        for class_idx in range(config['num_classes']):
            num_samples_for_class = int(client_size * proportions[class_idx])
            
            # 从该类别的可用索引池中取样本（确保不重叠）
            start_idx = used_indices_per_class[class_idx]
            end_idx = start_idx + num_samples_for_class
            
            available_indices = class_indices_pools[class_idx]
            if end_idx <= len(available_indices):
                # 有足够的样本，无重叠分配
                chosen_indices = available_indices[start_idx:end_idx]
                used_indices_per_class[class_idx] = end_idx
            else:
                # 样本不足，分配剩余所有样本
                chosen_indices = available_indices[start_idx:]
                used_indices_per_class[class_idx] = len(available_indices)
                print(f"  Warning: Class {class_idx} exhausted, got {len(chosen_indices)} instead of {num_samples_for_class}")
            
            indices_for_client.extend(chosen_indices)
        
        client_indices.append(indices_for_client)
        print(f"Client {i+1}: assigned {len(indices_for_client)} samples (no overlap with other clients)")
    
    return client_indices

def _create_overlapping_client_data(config, train_labels, num_train_data):
    """
    创建允许重叠的客户端数据分割（新增逻辑）
    """
    print(f"Partitioning overlapping data for {config['num_clients']} clients...")
    
    # 为每个类别创建索引池（允许重复使用）
    class_indices_pools = {}
    for class_idx in range(config['num_classes']):
        class_mask = (train_labels == class_idx)
        class_all_indices = np.arange(num_train_data)[class_mask]
        class_indices_pools[class_idx] = class_all_indices
    
    client_indices = []
    
    for i in range(config['num_clients']):
        alpha = config['clients_dirichlet_alphas'][i]
        proportions = np.random.dirichlet(alpha=[alpha] * config['num_classes'])
        
        # 确保每个类别至少有最小概率（避免某些类别完全为0）
        min_prob = 0.02  # 最小概率2%
        proportions = np.maximum(proportions, min_prob)
        proportions = proportions / proportions.sum()  # 重新归一化
        
        client_size = config['client_data_sizes'][i]
        indices_for_client = []
        
        print(f"Client {i+1} (alpha={alpha}) target proportions: {np.round(proportions, 3)}")
        
        for class_idx in range(config['num_classes']):
            num_samples_for_class = int(client_size * proportions[class_idx])
            
            if num_samples_for_class > 0:
                available_indices = class_indices_pools[class_idx]
                
                if len(available_indices) >= num_samples_for_class:
                    # 有足够样本，随机抽取（允许重叠）
                    chosen_indices = np.random.choice(available_indices, num_samples_for_class, replace=False)
                else:
                    # 样本不足，有放回抽样
                    chosen_indices = np.random.choice(available_indices, num_samples_for_class, replace=True)
                    print(f"  Note: Class {class_idx} has only {len(available_indices)} samples, using replacement sampling for {num_samples_for_class} samples")
                
                indices_for_client.extend(chosen_indices)
        
        client_indices.append(indices_for_client)
        print(f"Client {i+1}: assigned {len(indices_for_client)} samples (overlap allowed)")
    
    return client_indices
