"""Dataset partitioning for federated learning (CIFAR-10) with IID / non-IID options."""
from typing import Dict, List, Tuple
import numpy as np
import torch
from torch.utils.data import DataLoader, Subset, TensorDataset

try:  # Optional dependency handling
    from torchvision import datasets, transforms  # type: ignore
except Exception:  # pragma: no cover
    datasets = None  # type: ignore
    transforms = None  # type: ignore


def get_cifar10_datasets(root: str = "./data"):
    if datasets is None or transforms is None:
        raise RuntimeError("torchvision not installed; use synthetic dataset with synthetic=True")
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914,0.4822,0.4465),(0.2470,0.2435,0.2616))
    ])
    train = datasets.CIFAR10(root=root, train=True, download=True, transform=transform)
    test = datasets.CIFAR10(root=root, train=False, download=True, transform=transform)
    return train, test


def _dirichlet_partition(labels: np.ndarray, num_clients: int, alpha: float) -> List[List[int]]:
    num_classes = labels.max() + 1
    idx_by_class = [np.where(labels == c)[0] for c in range(num_classes)]
    client_indices = [[] for _ in range(num_clients)]
    for c, idxs in enumerate(idx_by_class):
        np.random.shuffle(idxs)
        proportions = np.random.dirichlet(alpha=[alpha]*num_clients)
        proportions = (np.cumsum(proportions) * len(idxs)).astype(int)[:-1]
        split = np.split(idxs, proportions)
        for client_id, part in enumerate(split):
            client_indices[client_id].extend(part.tolist())
    return client_indices


def partition_dataset(train_dataset, num_clients: int, non_iid: bool = False, alpha: float = 0.5):
    labels = np.array(train_dataset.targets)
    if non_iid:
        client_indices = _dirichlet_partition(labels, num_clients, alpha)
    else:
        all_indices = np.arange(len(train_dataset))
        np.random.shuffle(all_indices)
        client_indices = np.array_split(all_indices, num_clients)
        client_indices = [c.tolist() for c in client_indices]
    return client_indices


def _synthetic_dataset(num_samples: int = 1024, num_classes: int = 10):
    X = torch.randn(num_samples, 3, 32, 32)
    y = torch.randint(0, num_classes, (num_samples,))
    dataset = TensorDataset(X, y)
    # add targets attribute to match CIFAR dataset expectations
    dataset.targets = y.tolist()  # type: ignore
    return dataset

def get_client_dataloaders(num_clients: int, batch_size: int = 32, non_iid: bool = False, alpha: float = 0.5, synthetic: bool = False, synth_train_per_client: int = 256, synth_test_size: int = 512):
    """Create per-client dataloaders.

    synthetic: use random tensors instead of downloading CIFAR (fast smoke test)
    synth_train_per_client: number of synthetic samples per client
    """
    if synthetic:
        train = _synthetic_dataset(synth_train_per_client * num_clients)
        test = _synthetic_dataset(synth_test_size)
    else:
        train, test = get_cifar10_datasets()
    parts = partition_dataset(train, num_clients, non_iid, alpha)
    loaders = {}
    for cid, idxs in enumerate(parts):
        subset = Subset(train, idxs)
        loaders[cid] = DataLoader(subset, batch_size=batch_size, shuffle=True)
    test_loader = DataLoader(test, batch_size=256, shuffle=False)
    return loaders, test_loader


def simulate_local_update(model, dataloader, epochs: int = 1, lr: float = 0.01, device: str = 'cpu'):
    import copy
    import torch.nn as nn
    import torch.optim as optim

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
    original_state = copy.deepcopy(model.state_dict())
    model.to(device)
    model.train()
    for _ in range(epochs):
        for images, labels in dataloader:
            images, labels = images.to(device), labels.to(device)
            optimizer.zero_grad()
            outputs = model(images)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
    updated_state = model.state_dict()

    # Return delta
    delta = {k: updated_state[k] - original_state[k] for k in original_state}
    num_samples = len(dataloader.dataset)
    return delta, num_samples
