import torch
from copy import deepcopy
from torch.utils.data import DataLoader, distributed
from wwengine.registry import Registry, build_from_cfg

TRANSFORMS = Registry('transform')
DATASETS = Registry('dataset')


def build(cfg, registry, default_args=None):
    return build_from_cfg(cfg, registry, default_args)


def build_transform(cfg):
    return build(cfg, TRANSFORMS)


def build_dataset(cfg, tasks=['DET'], split='train'):
    for task in tasks:
        assert task in ['DET', 'SEG']  # 任务集合['DET','SEG']
    groups = ['train', 'val', 'trainval', 'test']
    assert split in groups
    cfg2 = deepcopy(cfg)
    cfg2['tasks'] = tasks  # 增加任务类型  因为任务类型在key(DATASETS)之外,此时发它放进来
    # 确定是哪组数据及其数据预处理pipeline
    cfg2['split'] = split
    cfg2['pipeline'] = cfg2[split]['pipeline']
    # 弹出其他key
    for group in groups:
        if group in cfg2:
            cfg2.pop(group)
    return build(cfg2, DATASETS)


def collate_fn(list_data):
    batched_pts_list, batched_gt_bboxes_list = [], []
    batched_labels_list, batched_names_list = [], []
    for data_dict in list_data:
        pts = data_dict['lidar_pts']
        gt_bboxes_3d = data_dict['gt_bboxes_3d']
        gt_labels = data_dict['gt_labels']
        gt_names = data_dict['gt_names']
        batched_pts_list.append(torch.from_numpy(pts))
        batched_gt_bboxes_list.append(torch.from_numpy(gt_bboxes_3d))
        batched_labels_list.append(torch.from_numpy(gt_labels))
        batched_names_list.append(gt_names)
    # 因为每帧点云数量不一样,所以此时不能合并batch
    rt_data_dict = dict(
        batched_pts=batched_pts_list,
        batched_gt_bboxes=batched_gt_bboxes_list,
        batched_labels=batched_labels_list,
        batched_names=batched_names_list
    )
    return rt_data_dict


def build_dataloader(dataset,
                     samples_per_gpu,
                     workers_per_gpu,
                     num_gpus=1,
                     **kwargs):
    batch_size = num_gpus * samples_per_gpu
    num_workers = num_gpus * workers_per_gpu

    sampler = distributed.DistributedSampler(dataset)
    collate = collate_fn

    data_loader = DataLoader(
        dataset,
        batch_size=batch_size,
        sampler=sampler,
        num_workers=num_workers,
        collate_fn=collate,
        pin_memory=False,
        **kwargs)

    return data_loader
