'''
Function:
    Build distributed dataloader
Author:
    Zhenchao Jin
'''
# import torch
import luojianet.dataset as ds


'''BuildDistributedDataloader'''
def BuildDistributedDataloader(dataset, dataloader_cfg, dataset_cfg):
    args = {
        'batch_size': dataloader_cfg.get('batch_size', 16),
        'num_workers': dataloader_cfg.get('num_workers', 16),
        'shuffle': False,
        'pin_memory': dataloader_cfg.get('pin_memory', True),
        'drop_last': dataloader_cfg.get('drop_last', True),
        # 'sampler': torch.utils.data.distributed.DistributedSampler(dataset, shuffle=dataloader_cfg.get('shuffle', True)),
    }
    # dataloader = torch.utils.data.DataLoader(dataset, **args)
    # return dataloader
    # test = dataset.__getitem__(0)
    # especially for freenet&s3anet
    # hri_dataset = dataloader_cfg.get('HRI', False)
    # if hri_dataset:
    #     dataset = ds.GeneratorDataset(dataset, ["data", "label"], num_parallel_workers=1, python_multiprocessing=False)
    # elif dataset_cfg['fusion_method']=='input_add':
    #     dataset = ds.GeneratorDataset(dataset, ["data", "label"], num_parallel_workers=1, python_multiprocessing=False)
    # elif dataset_cfg['fusion_method'] == 'cmx':
    #     dataset = ds.GeneratorDataset(dataset, ["optical_data", "x_data", "label"], num_parallel_workers=1, python_multiprocessing=False)
    # elif dataset_cfg['fusion_method'] == '':
    dataset = ds.GeneratorDataset(
                source= dataset,
                column_names=["image", "segmentation", "edge"],
                num_parallel_workers=1)

    dt = dataset.batch(batch_size=args['batch_size'])
    # iter = dt.create_dict_iterator()
    # for item in iter:
    #     print('this is a item')
    return dt

def BuildDistributedTestDataloader(dataset, dataloader_cfg):
    args = {
        'batch_size': dataloader_cfg.get('batch_size', 16),
        'num_workers': dataloader_cfg.get('num_workers', 16),
        'shuffle': False,
        'pin_memory': dataloader_cfg.get('pin_memory', True),
        'drop_last': dataloader_cfg.get('drop_last', True),
        # 'sampler': torch.utils.data.distributed.DistributedSampler(dataset, shuffle=dataloader_cfg.get('shuffle', True)),
    }
    # dataloader = torch.utils.data.DataLoader(dataset, **args)
    # return dataloader
    # test = dataset.__getitem__(0)
    hri_dataset = dataloader_cfg.get('HRI', False)
    if hri_dataset:
        dataset = ds.GeneratorDataset(dataset, ["data", "label"], num_parallel_workers=8, python_multiprocessing=False)
    else:
        dataset = ds.GeneratorDataset(
                source= dataset,
                column_names=["image", "segmentation", "edge"],
                num_parallel_workers=1)

    dt = dataset.batch(batch_size=args['batch_size'])

    # 在config dataloader里面新增columns，直接使用ds.generator，传入columns

    # handbook中需要表明
    # iter = dt.create_dict_iterator()
    # for item in iter:
    #     print('this is a item')
    return dt