# my_data_loader.py
import torch
import sys
import torch.utils.data
import torchvision
sys.path.append('./extra_utils')
from extra_utils.utils import read_split_data
from extra_utils.my_dataset import MyDataSet
from extra_utils.distributed_utils import reduce_value, is_main_process
from torchvision import datasets, transforms


def get_dataloader(args, conf, rank, dir='./data', name="mnist"):  # dir为文件目录路径  无默认值的参数要在前面。

    if name == 'cifar':
        # transforms.Compose([])将多种图形变换的方法整合到一起
        torch.manual_seed(4)
        transform_train = transforms.Compose([
            transforms.RandomCrop(32, padding=4),  # 填充4个像素，并且随机裁剪为32×32大小的图片
            transforms.RandomHorizontalFlip(),  # 按0.5的概率随机水平翻转PIL图像。
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
        ])  # 给定(R, G, B)三个通道的均值和标准差，Normalized_image=(image-mean)/std来将tensor正则化。

        transform_test = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
        ])
        train_dataset = datasets.CIFAR10(dir, train=True, download=True, transform=transform_train)
        eval_dataset = datasets.CIFAR10(dir, train=False, download=True, transform=transform_test)
        train_length = train_dataset.__len__()
        eval__length = eval_dataset.__len__()

        batch_size = args.batch_size
        if is_main_process():
            print('Using {} dataloader workers every process'.format(args.nw))

        train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, num_replicas=conf['world_size'],
                                                                        rank=rank)
        train_batch_sampler = torch.utils.data.BatchSampler(train_sampler, batch_size, drop_last=True)
        train_loader = torch.utils.data.DataLoader(train_dataset, batch_sampler=train_batch_sampler,
                                                   num_workers=args.nw, pin_memory=True)
        val_loader = torch.utils.data.DataLoader(eval_dataset, batch_size=batch_size, num_workers=args.nw,
                                                 pin_memory=True)

    elif name == 'flower':
        dir = "./data/flower_data/flower_data/flower_photos"
        train_info, val_info, num_classes = read_split_data(dir)
        train_images_path, train_images_label = train_info
        val_images_path, val_images_label = val_info

        torch.manual_seed(4)  # 使得所涉及的数据增强可以得到很好的复现
        data_transform = {
            "train": transforms.Compose([transforms.RandomResizedCrop(224),  # 数据增强
                                         transforms.RandomHorizontalFlip(),
                                         transforms.ToTensor(),
                                         transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]),
            "val": transforms.Compose([transforms.Resize(256),
                                       transforms.CenterCrop(224),
                                       transforms.ToTensor(),
                                       transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])}

        # 实例化训练数据集
        train_dataset = MyDataSet(images_path=train_images_path,
                                   images_class=train_images_label,
                                   transform=data_transform["train"])
        train_length = train_dataset.__len__()

        # 实例化验证数据集
        eval_dataset = MyDataSet(images_path=val_images_path,
                                 images_class=val_images_label,
                                 transform=data_transform["val"])
        eval__length = eval_dataset.__len__()

        batch_size = args.batch_size
        if is_main_process():
            print('Using {} dataloader workers every process'.format(args.nw))

        train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, num_replicas=conf['world_size'],
                                                                        rank=rank)
        train_batch_sampler = torch.utils.data.BatchSampler(train_sampler, batch_size, drop_last=True)
        train_loader = torch.utils.data.DataLoader(train_dataset, batch_sampler=train_batch_sampler,
                                                   num_workers=args.nw, pin_memory=True)

        val_loader = torch.utils.data.DataLoader(eval_dataset,
                                                 batch_size=batch_size,
                                                 shuffle=False,
                                                 pin_memory=True,
                                                 num_workers=args.nw,
                                                 collate_fn=eval_dataset.collate_fn)

    else:
        train_dataset = datasets.MNIST(dir, train=True, download=True, transform=transforms.ToTensor())
        eval_dataset = datasets.MNIST(dir, train=False, transform=transforms.ToTensor())
        train_length = train_dataset.__len__()
        eval__length = eval_dataset.__len__()

        train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, num_replicas=conf['world_size'],
                                                                        rank=rank)
        batch_size = args.batch_size
        train_batch_sampler = torch.utils.data.BatchSampler(train_sampler, batch_size, drop_last=True)
        if is_main_process():
            print('Using {} dataloader workers every process'.format(args.nw))
        train_loader = torch.utils.data.DataLoader(train_dataset, batch_sampler=train_batch_sampler,
                                                   num_workers=args.nw, pin_memory=True)
        val_loader = torch.utils.data.DataLoader(eval_dataset, batch_size=batch_size, num_workers=args.nw,
                                                 pin_memory=True)
        # 测试集我们要用全部的所以不需要shuffle

    return train_loader, val_loader, train_sampler, train_length, eval__length

# transforms.ToTensor()把一个取值范围是[0,255]的PIL.Image或者shape为(H,W,C)的numpy.ndarray()，转换成
# 形状为[C,H,W]，取值范围是[0,1.0]的torch.FloatTensor
# 所提到的各知识点详见"Pytorch中文文档"
