
"""kitti dataset"""
from mind3d.dataset.kitti.src.builder import box_coder_builder
from mind3d.dataset.kitti.src.builder import dataset_builder
from mind3d.dataset.kitti.src.builder import target_assigner_builder
from mind3d.dataset.kitti.src.builder import voxel_builder


def KittiDetectionDataset(cfg, is_training=True):
        """
    Generate a KITTI dataset for detection tasks.

    The KITTI dataset is a benchmark dataset used for autonomous driving tasks.
    This function generates a dataset for detection tasks, using the voxel
    generator, box coder, and target assigner specified in the provided
    configuration. The dataset can be set to either training or evaluation mode.

    Args:
        cfg (dict): A dictionary containing the configuration parameters for
                    generating the dataset. It should include configurations
                    for the voxel generator, box coder, target assigner, and 
                    input reader (either for training or evaluation).
        is_training (bool, optional): Whether to generate a training dataset 
                                       or an evaluation dataset. Defaults to True.

    Returns:
        dataset: The generated dataset.
        box_coder: The box coder used in the dataset, only returned if 
                   is_training is False.

    The configuration (cfg) dictionary should follow this structure:

    {
        'model': {
            'voxel_generator': {voxel_generator_cfg},
            'box_coder': {box_coder_cfg},
            'target_assigner': {target_assigner_cfg},
        },
        'train_input_reader': {train_input_reader_cfg},
        'eval_input_reader': {eval_input_reader_cfg},
    }

    Examples:
    >> from mind3d.dataset.kitti.src.builder import box_coder_builder
    >> from mind3d.dataset.kitti.src.builder import dataset_builder
    >> from mind3d.dataset.kitti.src.builder import target_assigner_builder
    >> from mind3d.dataset.kitti.src.builder import voxel_builder
    >> import mindspore.dataset as ds
    >> from mindvision.ms3d.dataset.scannet import KittiDetectionDataset
    >> config = {
        'model': {
            'voxel_generator': {'type': 'Voxel', 'voxel_size': [0.05, 0.05, 0.1], 'max_number_of_points_per_voxel': 30},
            'box_coder': {'type': 'GroundBox3dCoder'},
            'target_assigner': {'anchor_generator_cfg': {'type': 'AnchorGeneratorBEV'}, 'similarity_cfg': {'type': 'NearestIoU'}}
        },
        'train_input_reader': {
            'input_path': '/path/to/kitti/training/dataset', 
            'batch_size': 4,
            'shuffle_buffer_size': 1024,
        },
        'eval_input_reader': {
            'input_path': '/path/to/kitti/evaluation/dataset',
            'batch_size': 4,
            'shuffle_buffer_size': None,
        }
    }

    >>training_dataset = KittiDetectionDataset(config, is_training=True)
    >>print(f"Training dataset generated with {len(training_dataset)} samples.")

    # Generate evaluation dataset and the box coder
    >>evaluation_dataset, box_coder = KittiDetectionDataset(config, is_training=False)
    >> print(f"Evaluation dataset generated with {len(evaluation_dataset)} samples.")

    Note:
    The model_cfg includes the configurations of voxel_generator, box_coder, 
    and target_assigner. The voxel_generator is used for voxelizing the input point 
    clouds. The box_coder is for encoding the ground truth boxes into the format
    the model can understand. The target_assigner is used for assigning the targets 
    to the anchor boxes during training.

    ..dataset ::

    └── KITTI
    ├── training
    │   ├── image_2 <-- for visualization
    │   ├── calib
    │   ├── label_2
    │   ├── velodyne
    │   └── velodyne_reduced <-- create this empty directory
    └── testing
    ├── image_2 <-- for visualization
    ├── calib
    ├── velodyne
    └── velodyne_reduced <-- create this empty directory

    Citation:

        .. code-block::

           @article{geiger2013vision,
                    title={Vision meets robotics: The kitti dataset},
                    author={Geiger, Andreas and Lenz, Philip and Stiller, Christoph and Urtasun, Raquel},
                    journal={The International Journal of Robotics Research},
                    volume={32},
                    number={11},
                    pages={1231--1237},
                    year={2013},
                    publisher={Sage Publications Sage UK: London, England}
                    }
        
    """
    model_cfg = cfg['model']

    voxel_cfg = model_cfg['voxel_generator']
    voxel_generator = voxel_builder.build(voxel_cfg)

    box_coder_cfg = model_cfg['box_coder']
    box_coder = box_coder_builder.build(box_coder_cfg)

    target_assigner_cfg = model_cfg['target_assigner']
    target_assigner = target_assigner_builder.build(target_assigner_cfg, box_coder)

    if is_training:
        input_cfg = cfg['train_input_reader']
    else:
        input_cfg = cfg['eval_input_reader']

    dataset = dataset_builder.build(
        input_reader_config=input_cfg,
        model_config=model_cfg,
        training=is_training,
        voxel_generator=voxel_generator,
        target_assigner=target_assigner
    )
    if is_training:
        return dataset
    return dataset, box_coder

