"""PointPillarsNet"""

import numpy as np
from mindspore import nn
from mindspore import numpy as mnp
from mindspore import ops
from mindspore.common import dtype as mstype
from mind3d.models.backbones.pointpillars_RPN import RPN
from mind3d.models.blocks.pointpillars_PFNLayer import PFNLayer
from mind3d.models.blocks.pointpillars_PillarFeatureNet import PillarFeatureNet
from mind3d.models.blocks.pointpillars_PointPillarsScatter import PointPillarsScatter

class PointPillarsNet(nn.Cell):
    """PointPillars net"""
    def __init__(
            self,
            output_shape,
            num_class=2,
            num_input_features=4,
            vfe_num_filters=(32, 128),
            with_distance=False,
            rpn_layer_nums=(3, 5, 5),
            rpn_layer_strides=(2, 2, 2),
            rpn_num_filters=(128, 128, 256),
            rpn_upsample_strides=(1, 2, 4),
            rpn_num_upsample_filters=(256, 256, 256),
            use_norm=True,
            use_direction_classifier=True,
            encode_background_as_zeros=True,
            num_anchor_per_loc=2,
            code_size=7,
            use_bev=False,
            voxel_size=(0.2, 0.2, 4),
            pc_range=(0, -40, -3, 70.4, 40, 1)
    ):
        """
        A network based on the PointPillars method, which uses a PointPillars-based architecture for object detection in point clouds. 

        Args:
            output_shape (tuple): The shape of the output tensor.
            num_class (int, default=2): The number of classes in the dataset.
            num_input_features (int, default=4): The number of input features.
            vfe_num_filters (tuple, default=(32, 128)): The number of filters to use in the Voxel Feature Encoding layer.
            with_distance (bool, default=False): Whether to include distance features or not.
            rpn_layer_nums (tuple, default=(3, 5, 5)): The number of RPN (Region Proposal Network) layers.
            rpn_layer_strides (tuple, default=(2, 2, 2)): The strides of the RPN layers.
            rpn_num_filters (tuple, default=(128, 128, 256)): The number of filters in the RPN layers.
            rpn_upsample_strides (tuple, default=(1, 2, 4)): The upsample strides in the RPN layers.
            rpn_num_upsample_filters (tuple, default=(256, 256, 256)): The number of upsample filters in the RPN layers.
            use_norm (bool, default=True): Whether to use normalization or not.
            use_direction_classifier (bool, default=True): Whether to use direction classifier or not.
            encode_background_as_zeros (bool, default=True): Whether to encode background as zeros or not.
            num_anchor_per_loc (int, default=2): The number of anchors per location.
            code_size (int, default=7): The size of the encoding.
            use_bev (bool, default=False): Whether to use Bird's Eye View features or not.
            voxel_size (tuple, default=(0.2, 0.2, 4)): The size of the voxels.
            pc_range (tuple, default=(0, -40, -3, 70.4, 40, 1)): The range of the point cloud.

            The network starts with a voxel feature extractor which is followed by a middle feature extractor. The middle feature extractor is a PointPillarsScatter layer which
            spatially scatters the learned voxel features. The scattered voxel features are then fed into an RPN to get the final predictions.

            Args:
            voxels (tensor): The input voxels.
            num_points (tensor): The number of points.
            coors (tensor): The coordinates.
            bev_map (tensor, optional): The Bird's Eye View map.

            Returns:
                preds (tuple): The predictions from the RPN.

            First, it performs voxel feature extraction on the input. These extracted features are then spatially scattered using the middle feature extractor. 
            Finally, these scattered features are passed through the RPN to get the predictions.

            Citation

            .. code-block::

                @inproceedings{2019PointPillars,
                title={PointPillars: Fast Encoders for Object Detection From Point Clouds},
                author={ Lang, Alex H.  and  Vora, Sourabh  and  Caesar, Holger  and  Zhou, Lubing  and  Beijbom, Oscar },
                booktitle={2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
                year={2019}}
        """
        super().__init__()

        self.num_class = num_class
        self.encode_background_as_zeros = encode_background_as_zeros
        self.use_direction_classifier = use_direction_classifier
        self.use_bev = use_bev
        self.code_size = code_size
        self.num_anchor_per_loc = num_anchor_per_loc

        self.voxel_feature_extractor = PillarFeatureNet(
            num_input_features,
            use_norm,
            num_filters=vfe_num_filters,
            with_distance=with_distance,
            voxel_size=voxel_size,
            pc_range=pc_range
        )
        self.middle_feature_extractor = PointPillarsScatter(
            output_shape=output_shape,
            num_input_features=vfe_num_filters[-1]
        )
        num_rpn_input_filters = self.middle_feature_extractor.n_channels

        self.rpn = RPN(
            use_norm=True,
            num_class=num_class,
            layer_nums=rpn_layer_nums,
            layer_strides=rpn_layer_strides,
            num_filters=rpn_num_filters,
            upsample_strides=rpn_upsample_strides,
            num_upsample_filters=rpn_num_upsample_filters,
            num_input_filters=num_rpn_input_filters,
            num_anchor_per_loc=num_anchor_per_loc,
            encode_background_as_zeros=encode_background_as_zeros,
            use_direction_classifier=use_direction_classifier,
            use_bev=use_bev,
            box_code_size=code_size
        )

    def construct(self, voxels, num_points, coors, bev_map=None):
        """forward graph"""
        voxel_features = self.voxel_feature_extractor(voxels, num_points, coors)
        spatial_features = self.middle_feature_extractor(voxel_features, coors)
        if self.use_bev:
            preds = self.rpn(spatial_features, bev_map)
        else:
            preds = self.rpn(spatial_features)
        return preds

