"""PFNLayer"""

from mindspore import nn
from mindspore import ops

class PFNLayer(nn.Cell):
    """PFN layer"""
    def __init__(self, in_channels, out_channels, use_norm, last_layer):
        """
        Constructs a Permutation Equivariant Layer (PFNLayer) used in PointPillars 
        algorithm <https://arxiv.org/abs/1812.05784>. The layer first applies a Dense (fully connected) 
        operation to the input features and then applies a Batch Normalization (optional) and a ReLU 
        activation. Then it takes the max value of each channel for each point and, if it's not the 
        last layer, it concatenates the max values with the input features. 

        Args:
            in_channels (int): Number of channels in the input tensor.
            out_channels (int): Number of channels produced by the layer.
            use_norm (bool): If set to True, this layer will normalize the input features. Default: False.
            last_layer (bool): If set to True, this layer will not concatenate the max values with the input features. Default: False.

        Inputs:
            - inputs (Tensor) - The input tensor. Shape: [batch, voxels, points, in_channels].

        Outputs:
            Tensor of shape [batch, voxels, points, out_channels] if last_layer is False, otherwise [batch, voxels, points, out_channels//2].

        Supported Platforms:
            ``GPU``

        Examples:
            >> import numpy as np
            >> import mindspore as ms
            >> from mindspore import Tensor, context
            >> from your_module import PFNLayer
            >> context.set_context(mode=context.GRAPH_MODE, device_target="GPU", save_graphs=False)
            >> layer = PFNLayer(in_channels=64, out_channels=128, use_norm=True, last_layer=False)
            >> inputs = Tensor(np.ones((1, 500, 500, 64)), ms.float32)
            >> output = layer(inputs)
            >> print(output.shape)
            (1, 500, 500, 128)

        Citation

        .. code-block::

            @inproceedings{2019PointPillars,
            title={PointPillars: Fast Encoders for Object Detection From Point Clouds},
            author={ Lang, Alex H.  and  Vora, Sourabh  and  Caesar, Holger  and  Zhou, Lubing  and  Beijbom, Oscar },
            booktitle={2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
            year={2019}}

            
    """
        super().__init__()

        self.last_vfe = last_layer
        if not self.last_vfe:
            out_channels = out_channels // 2

        self.units = out_channels
        self.use_norm = use_norm

        if use_norm:
            self.norm = nn.BatchNorm2d(self.units, eps=1e-3, momentum=0.99)
        else:
            self.norm = ops.Identity()
        self.linear = nn.Dense(in_channels, self.units, has_bias=not use_norm)

        self.transpose = ops.Transpose()
        self.tile = ops.Tile()
        self.concat = ops.Concat(axis=2)
        self.expand_dims = ops.ExpandDims()
        self.argmax_w_value = ops.ArgMaxWithValue(axis=-1, keep_dims=True)

    def construct(self, inputs):
        """forward graph"""
        x = self.linear(inputs)
        x = self.norm(x.transpose((0, 3, 1, 2)))  # [bs, V, P, 4]
        x = ops.ReLU()(x)
        x_max = self.argmax_w_value(x)[1].transpose((0, 2, 3, 1))  # [bs, V, P, 4]
        if self.last_vfe:
            return x_max
        x_repeat = self.tile(x_max, (1, 1, inputs.shape[1], 1))  # [bs, V, P, 4]
        x_concatenated = self.concat([x, x_repeat])
        return x_concatenated


