import numpy as np
import torch
from torchvision import datasets, transforms
from torch.utils.data import DataLoader, Subset, Dataset

def load_datasets(dataset_name, num_clients, non_iid_degree=0.8, val_ratio=0.1):
    """
    加载MNIST或CIFAR-10数据集并分割为非IID分布 (对应论文实验设置)
    
    参数:
        dataset_name: 'MNIST' 或 'CIFAR-10'
        num_clients: 客户端数量
        non_iid_degree: 非独立同分布程度 (0.0~1.0)
        val_ratio: 验证集比例
        
    返回:
        client_datasets: 客户端数据集列表
        testset: 测试集
    """
    # 数据预处理
    if dataset_name == "MNIST":
        transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307,), (0.3081,))
        ])
        dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
        testset = datasets.MNIST(root='./data', train=False, download=True, transform=transform)
        num_classes = 10
    elif dataset_name == "CIFAR-10":
        transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])
        dataset = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
        testset = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
        num_classes = 10
    else:
        raise ValueError("不支持的数据库名称")
    
    # 划分验证集
    num_val = int(len(dataset) * val_ratio)
    train_indices = list(range(len(dataset)))[num_val:]
    val_indices = list(range(len(dataset)))[:num_val]
    
    # 创建非IID数据分割 (Dirichlet分布)
    client_datasets = []
    labels = np.array(dataset.targets)
    
    # 按Dirichlet分布分配样本
    min_size = 0
    while min_size < 10:  # 确保每个客户端至少有10个样本
        idx_batch = [[] for _ in range(num_clients)]
        for k in range(num_classes):
            idx_k = np.where(labels[train_indices] == k)[0]
            np.random.shuffle(idx_k)
            proportions = np.random.dirichlet(np.repeat(non_iid_degree, num_clients))
            proportions = np.array([p * (len(idx_j) < len(train_indices) / num_clients) for p, idx_j in zip(proportions, idx_batch)])
            proportions = proportions / proportions.sum()
            proportions = (np.cumsum(proportions) * len(idx_k)).astype(int)[:-1]
            idx_batch = [idx_j + idx.tolist() for idx_j, idx in zip(idx_batch, np.split(idx_k, proportions))]
            min_size = min([len(idx_j) for idx_j in idx_batch])
    
    # 创建客户端数据集
    for i in range(num_clients):
        client_indices = [train_indices[j] for j in idx_batch[i]]
        client_data = Subset(dataset, client_indices)
        client_datasets.append(client_data)
    
    # 创建验证集
    val_dataset = Subset(dataset, val_indices)
    
    return client_datasets, val_dataset, testset

def get_data_loaders(client_datasets, batch_size=64, val_dataset=None):
    """
    创建数据加载器
    
    参数:
        client_datasets: 客户端数据集列表
        batch_size: 批次大小
        val_dataset: 验证集
        
    返回:
        client_loaders: 客户端数据加载器列表
        val_loader: 验证集加载器 (如果有)
    """
    client_loaders = []
    for dataset in client_datasets:
        loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
        client_loaders.append(loader)
    
    val_loader = None
    if val_dataset:
        val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
    
    return client_loaders, val_loader
