# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
"""PointPillarsScatter"""
import numpy as np
from mindspore import nn
from mindspore import ops

class PointPillarsScatter(nn.Cell):
    """PointPillars scatter"""
    def __init__(self, output_shape, num_input_features):
         """
        Constructs a PointPillarsScatter architecture, which is used to scatter features from 
        PointPillars algorithm <https://arxiv.org/abs/1812.05784> back to a pseudo image.

        Args:
            output_shape (tuple): The shape of the output tensor. 
            num_input_features (int): The number of input features.

        Inputs:
            - voxel_features (Tensor) - The features to be scattered. Shape: [batch, channels, height, width].
            - coords (Tensor) - The indices where the features will be scattered. Shape: [batch, voxel, point, num_input_features].

        Outputs:
            Tensor of shape [batch, num_input_features, height, width].

        Supported Platforms:
            ``GPU``

        Examples:
            >> import numpy as np
            >> import mindspore as ms
            >> from mindspore import Tensor, context
            >> from your_module import PointPillarsScatter
            >> context.set_context(mode=context.GRAPH_MODE, device_target="GPU", save_graphs=False)
            >> scatter = PointPillarsScatter(output_shape=(1, 64, 500, 500), num_input_features=64)
            >> voxel_features = Tensor(np.ones((1, 64, 500, 500)), ms.float32)
            >> coords = Tensor(np.ones((1, 500, 500, 64)), ms.float32)
            >> output = scatter(voxel_features, coords)
            >> print(output.shape)
            (1, 64, 500, 500)

        Citation

        .. code-block::

            @inproceedings{2019PointPillars,
            title={PointPillars: Fast Encoders for Object Detection From Point Clouds},
            author={ Lang, Alex H.  and  Vora, Sourabh  and  Caesar, Holger  and  Zhou, Lubing  and  Beijbom, Oscar },
            booktitle={2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
            year={2019}}
    """
        
        super().__init__()
        self.output_shape = output_shape
        self.ny = output_shape[2]
        self.nx = output_shape[3]
        self.n_channels = num_input_features
        self.scatter_nd = ops.ScatterNd()
        self.concat = ops.Concat(axis=1)
        self.expand_dims = ops.ExpandDims()
        self.transpose = ops.Transpose()

    def construct(self, voxel_features, coords):
        """forward graph"""
        # Batch_canvas will be the final output.
        batch_size = voxel_features.shape[0]
        # z coordinate is not used, z -> batch
        for i in range(batch_size):  # [bs, v, p, 64]
            coords[i, :, 0] = i
        shape = (batch_size, self.ny, self.nx, 2, self.n_channels)
        batch_canvas = self.scatter_nd(coords, voxel_features, shape)  # [bs, v, p, 2, 64]
        batch_canvas = batch_canvas[:, :, :, 1]
        batch_canvas = self.transpose(batch_canvas, (0, 3, 1, 2))
        return batch_canvas



