"""create dataset"""
# import
import os
import mindspore.common.dtype as mstype
import mindspore.dataset.engine as de
import mindspore.dataset.vision.c_transforms as C
import mindspore.dataset.transforms.c_transforms as C2
from mindspore.communication.management import init, get_rank, get_group_size

# def create_dataset1(dataset_path, do_train, repeat_num=1, batch_size=32):
#     """
#     create a train or evaluate cifar10 dataset for resnet50
#     Args:
#         dataset_path(string): the path of dataset.
#         do_train(bool): whether dataset is used for train or eval.
#         repeat_num(int): the repeat times of dataset. Default: 1
#         batch_size(int): the batch size of dataset. Default: 32

#     Returns:
#         dataset
#     """
#     # init distributed backend, used before comm
#     init()
#     # get rank id for current device
#     rank_id = get_rank()
    
#     device_num = get_group_size()

#     if device_num == 1:
#         ds = de.Cifar10Dataset(dataset_path, num_parallel_workers=8, shuffle=True)
#     else:
#         ds = de.Cifar10Dataset(dataset_path, num_parallel_workers=8, shuffle=True, num_shards=device_num, shard_id=rank_id)

#     # map
#     trans = []
#     if do_train:
#         trans += [
#             # crop input image at certain position
#             # size:(height, width) padding: (left, top, right, bottom)
#             C.RandomCrop((32, 32), (4, 4, 4, 4)),
#             # flip horizontally with certain prob, to increase variation
#             C.RandomHorizontalFlip(prob=0.5)
#         ]
    
#     trans == [
#         C.Resize((224, 224)),
#         # rescale & shift
#         C.Rescale(1.0 / 255.0, 0.0), 
#         # normalize each channel, mean & std
#         C.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
#         # rearrange H W C
#         C.HWC2CHW()
#     ]

#     # set tensor op on given data type
#     type_cast_op = C2.TypeCast(mstype.int32)

#     # apply map to dataset
#     ds = ds.map(operations=type_cast_op, input_columns="label", num_parallel_workers=8)
#     ds = ds.map(operations=trans, input_columns="image", num_parallel_workers=8)

#     # apply batch operations
#     ds = ds.batch(batch_size, drop_remainder=True)
#     # apply dataset repeat operation
#     ds = ds.repeat(repeat_num)

#     return ds

def create_dataset2(dataset_path, do_train, repeat_num=1, batch_size=32):
    """
    create a train or eval imagenet2012 dataset for resnet50
    Args:
        dataset_path(string): the path of dataset.
        do_train(bool): whether dataset is used for train or eval.
        repeat_num(int): the repeat times of dataset. Default: 1
        batch_size(int): the batch size of dataset. Default: 32

    Returns:
        dataset
    """
    # init distributed backend, used before comm
    init()
    # get rank id for current device
    rank_id = get_rank()
    
    device_num = get_group_size()

    if device_num == 1:
        ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=True)
    else:
        ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=True, num_shards=device_num, shard_id=rank_id)

    image_size = 224
    mean = [0.417 * 255, 0.385 * 255, 0.299 * 255]
    std = [0.264 * 255, 0.241 * 255, 0.240 * 255]

    # map
    trans = []
    if do_train:
        trans += [
            # combine crop, decode & resize for jpeg
            C.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
            # flip horizontally with certain prob, to increase variation
            C.RandomHorizontalFlip(prob=0.5),
            C.Normalize(mean=mean, std=std),
            C.HWC2CHW()
        ]
    else:
        trans = [
            # decode img to rgb mode
            C.Decode(),
            C.Resize(256),
            C.CenterCrop(image_size),
            C.Normalize(mean=mean, std=std),
            C.HWC2CHW()
        ]
    
    trans == [
        C.Resize((224, 224)),
        # rescale & shift
        C.Rescale(1.0 / 255.0, 0.0), 
        # normalize each channel, mean & std
        C.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
        # rearrange H W C
        C.HWC2CHW()
    ]

    # set tensor op on given data type
    type_cast_op = C2.TypeCast(mstype.int32)

    # apply map to dataset
    ds = ds.map(operations=type_cast_op, input_columns="label", num_parallel_workers=8)
    ds = ds.map(operations=trans, input_columns="image", num_parallel_workers=8)

    # apply batch operations
    ds = ds.batch(batch_size, drop_remainder=True)
    # apply dataset repeat operation
    ds = ds.repeat(repeat_num)

    return ds


# def create_dataset3(dataset_path, do_train, repeat_num=1, batch_size=32):
#     """
#     create a train or eval imagenet2012 dataset for resnet101
#     Args:
#         dataset_path(string): the path of dataset.
#         do_train(bool): whether dataset is used for train or eval.
#         repeat_num(int): the repeat times of dataset. Default: 1
#         batch_size(int): the batch size of dataset. Default: 32

#     Returns:
#         dataset
#     """
#     device_num, rank_id = _get_rank_info()
    
#     device_num = get_group_size()

#     if device_num == 1:
#         ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=True)
#     else:
#         ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=True, num_shards=device_num, shard_id=rank_id)

#     image_size = 224
#     mean = [0.475 * 255, 0.451 * 255, 0.392 * 255]
#     std = [0.275 * 255, 0.267 * 255, 0.278 * 255]

#     # map
#     trans = []
#     if do_train:
#         trans += [
#             # combine crop, decode & resize for jpeg
#             C.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
#             # flip horizontally with certain prob, to increase variation
#             C.RandomHorizontalFlip(rank_id / (rank_id + 1)),
#             C.Normalize(mean=mean, std=std),
#             C.HWC2CHW()
#         ]
#     else:
#         trans = [
#             # decode img to rgb mode
#             C.Decode(),
#             C.Resize(256),
#             C.CenterCrop(image_size),
#             C.Normalize(mean=mean, std=std),
#             C.HWC2CHW()
#         ]
    
#     trans == [
#         C.Resize((224, 224)),
#         # rescale & shift
#         C.Rescale(1.0 / 255.0, 0.0), 
#         # normalize each channel, mean & std
#         C.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
#         # rearrange H W C
#         C.HWC2CHW()
#     ]

#     # set tensor op on given data type
#     type_cast_op = C2.TypeCast(mstype.int32)

#     # apply map to dataset
#     ds = ds.map(operations=type_cast_op, input_columns="label", num_parallel_workers=8)
#     ds = ds.map(operations=trans, input_columns="image", num_parallel_workers=8)

#     # apply batch operations
#     ds = ds.batch(batch_size, drop_remainder=True)
#     # apply dataset repeat operation
#     ds = ds.repeat(repeat_num)

#     return ds


# def create_dataset4(dataset_path, do_train, repeat_num=1, batch_size=32):
#     """
#     create a train or eval imagenet2012 dataset for se-resnet50
#     Args:
#         dataset_path(string): the path of dataset.
#         do_train(bool): whether dataset is used for train or eval.
#         repeat_num(int): the repeat times of dataset. Default: 1
#         batch_size(int): the batch size of dataset. Default: 32

#     Returns:
#         dataset
#     """
#     device_num, rank_id = _get_rank_info()
    
#     device_num = get_group_size()

#     if device_num == 1:
#         ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=12, shuffle=True)
#     else:
#         ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=12, shuffle=True, num_shards=device_num, shard_id=rank_id)

#     image_size = 224
#     mean = [123.68, 116.78, 103.94]
#     std = [1.0, 1.0, 1.0]

#     # map
#     trans = []
#     if do_train:
#         trans += [
#             # combine crop, decode & resize for jpeg
#             C.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
#             # flip horizontally with certain prob, to increase variation
#             C.RandomHorizontalFlip(prob=0.5),
#             C.Normalize(mean=mean, std=std),
#             C.HWC2CHW()
#         ]
#     else:
#         trans = [
#             # decode img to rgb mode
#             C.Decode(),
#             C.Resize(292),
#             C.CenterCrop(256),
#             C.Normalize(mean=mean, std=std),
#             C.HWC2CHW()
#         ]
    
#     trans == [
#         C.Resize((224, 224)),
#         # rescale & shift
#         C.Rescale(1.0 / 255.0, 0.0), 
#         # normalize each channel, mean & std
#         C.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
#         # rearrange H W C
#         C.HWC2CHW()
#     ]

#     # set tensor op on given data type
#     type_cast_op = C2.TypeCast(mstype.int32)

#     # apply map to dataset
#     ds = ds.map(operations=type_cast_op, input_columns="label", num_parallel_workers=12)
#     ds = ds.map(operations=trans, input_columns="image", num_parallel_workers=12)

#     # apply batch operations
#     ds = ds.batch(batch_size, drop_remainder=True)
#     # apply dataset repeat operation
#     ds = ds.repeat(repeat_num)

#     return ds


# def _get_rank_info():
#     """
#     get rank size and rank id
#     """
#     rank_size = int(os.environ.get("RANK_SIZE", 1))

#     if rank_size > 1:
#         rank_size = get_group_size()
#         rank_id = get_rank()
#     else:
#         rank_size = 1
#         rank_id = 0

#     return rank_size, rank_id