from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
from torch.utils.data import DistributedSampler
import torch
import os

# Use Distribute Training
################################
from InitProcessGroup import ddp_setup
# gpu_id = 0
# world_size = torch.cuda.device_count()
# ddp_setup(gpu_id, world_size)
################################


batch_size = 64

train_transform = transforms.Compose(
    [
        transforms.RandomHorizontalFlip(),
        transforms.ColorJitter(),
        # 收到padding的影响，降低了泛化能力
        # transforms.Pad((random.randint(0, 5), random.randint(0, 5), random.randint(0, 5), random.randint(0, 5))),
        transforms.Resize(32),
        transforms.ToTensor()
    ]
)

val_transform = transforms.Compose(
    [
        transforms.ToTensor(),
    ]
)

train_dataset = datasets.CIFAR10(
    root="~/code/dataset/code/dataset/cifar10/",
    train=True,
    download=True,
    transform=train_transform
)
val_dataset = datasets.CIFAR10(
    root="~/code/dataset/code/dataset/cifar10/",
    train=False,
    download=True,
    transform=val_transform
)

train_dataloader = DataLoader(
    train_dataset, shuffle=True, batch_size=batch_size
)
val_dataloader = DataLoader(
    val_dataset, shuffle=False, batch_size=batch_size
)


def get_dataloaders(batch_size):
    distributed_train_dataloader = DataLoader(
        train_dataset,  batch_size=batch_size,pin_memory=True,sampler=DistributedSampler(train_dataset)
    )
    distributed_val_dataloader = DataLoader(
        val_dataset,  batch_size=batch_size,pin_memory=True,sampler=DistributedSampler(val_dataloader)
    )
    return distributed_train_dataloader,distributed_val_dataloader
