"""Groupfree 3D Backbone"""
import mindspore.nn as nn
from mind3d.models.blocks.groupfree_3d_pointnet2_sa import PointNet2SetAbstraction
from mind3d.models.blocks.groupfree_3d_pointnet2_fp import PointNetFeaturePropagation


class Pointnet2Backbone(nn.Cell):
    r"""
       Backbone network for point cloud feature learning.
       Based on Pointnet++ single-scale grouping network. 
        
       Parameters
       ----------
       input_feature_dim: int
            Number of input channels in the feature descriptor for each point.
            e.g. 3 for RGB.
    """

    def __init__(self, input_feature_dim=0, width=1, depth=2):
        super().__init__()
        self.depth = depth
        self.width = width

        self.sa1 = PointNet2SetAbstraction(
            npoint=2048,
            radius=0.2,
            nsample=64,
            in_channel=input_feature_dim + 3,
            mlp=[64 * width for i in range(depth)] + [128 * width],
            group_all=False
        )

        self.sa2 = PointNet2SetAbstraction(
            npoint=1024,
            radius=0.4,
            nsample=32,
            in_channel=128 * width + 3,
            mlp=[128 * width for i in range(depth)] + [256 * width],
            group_all=False
        )

        self.sa3 = PointNet2SetAbstraction(
            npoint=512,
            radius=0.8,
            nsample=16,
            in_channel=256 * width + 3,
            mlp=[128 * width for i in range(depth)] + [256 * width],
            group_all=False
        )

        self.sa4 = PointNet2SetAbstraction(
            npoint=256,
            radius=1.2,
            nsample=16,
            in_channel=256 * width + 3,
            mlp=[128 * width for i in range(depth)] + [256 * width],
            group_all=False
        )

        self.fp1 = PointNetFeaturePropagation(in_channel=256 * width + 256 * width, mlp=[256 * width, 256 * width])

        self.fp2 = PointNetFeaturePropagation(in_channel=256 * width + 256 * width, mlp=[256 * width, 288])

    def construct(self, data):
        r"""
            Forward pass of the network

            Parameters
            ----------
            pointcloud: Variable(torch.cuda.FloatTensor)
                (B, N, 3 + input_feature_dim) tensor
                Point cloud to run predicts on
                Each point in the point-cloud MUST
                be formated as (x, y, z, features...)

            Returns
            ----------
            end_points: {XXX_xyz, XXX_features, XXX_inds}
                XXX_xyz: float32 Tensor of shape (B,K,3)
                XXX_features: float32 Tensor of shape (B,K,D)
                XXX-inds: int64 Tensor of shape (B,K) values in [0,N-1]
        """
        # end_points = {}
        xyz0 = data[:, :, :3]  # [B, N, 3]
        # xyz = xyz.transpose(0, 2, 1)
        features0 = None

        xyz1, features1, fps_inds1 = self.sa1(xyz0, features0)
        xyz2, features2, fps_inds2 = self.sa2(xyz1, features1)  # this fps_inds is just 0,1,...,1023
        xyz3, features3, fps_inds3 = self.sa3(xyz2, features2)  # this fps_inds is just 0,1,...,511
        xyz4, features4, fps_inds4 = self.sa4(xyz3, features3)  # this fps_inds is just 0,1,...,255

        features3 = self.fp1(xyz3, xyz4, features3, features4)
        features2 = self.fp2(xyz2, xyz3, features2, features3)
        # end_points['fp2_features'] = features
        # end_points['fp2_xyz'] = end_points['sa2_xyz']
        num_seed = xyz2.shape[1]
        # end_points['fp2_inds'] = end_points['sa1_inds'][:, 0:num_seed]  # indices among the entire input point clouds
        backbone_sample_idx = fps_inds1[:, 0:num_seed]

        return features2, xyz2, backbone_sample_idx


if __name__ == '__main__':
    import numpy as np
    import mindspore as ms

    ms.set_context(mode=ms.PYNATIVE_MODE, device_target="GPU")
    np.random.seed(41)
    x = ms.Tensor(np.random.randn(4, 20000, 3), ms.float32)
    backbone = Pointnet2Backbone()
    print(backbone)
    y = backbone(x)
    print(y)
