import os, sys
sys.path.append(os.path.abspath(os.path.join(os.getcwd())))
import mindspore.ops as ops
import mindspore.numpy as mnp
import mindspore.nn as nn
from mind3d.utils.pointnet2_util import index_points
from mind3d.models.blocks.TransitionDown import Square_distance

class Transformer(nn.Cell):
    """
    The  transformer  block  integrates  the  self-attention  layer,linear  projections  that  can  reduce dimensionality  and  accelerate processing,  and a residual connection.
    The information aggregation adapts both to the content of the feature vectors and their layout in 3D.

    Input:
        x(Tensor) : The input tensor. The shapes of tensor is (B,N,C), where B is batch size; N is input number; C is channels.
        feature(Tensor) : Feature tensor. The shapes of tensor is (B,N,D), where D = 
    Output:
        output(Tensor) : The output tensor. The shapes of the tensor is (B,N,D), where * is label dimensions.
    Argument:
        d_points: The dim of input.
        d_models: The dim of feature.
        tasks(str) : The name of dataset.
        k : KNN Argument
    Example:
        X = mindspore.Tensor(np.random.rand(32, 1024, 3), dtype = mindspore.float32)
        Y = mindspore.Tensor(np.random.rand(32, 1024, 32), dtype = mindspore.float32)
        Transformer = Transformer(32,512,16)
        Output, attn = T(X, Y)
        Print(output.shape)
        Print(attn.shape)
        
    """

    def __init__(self, d_points, d_model, k):
        super(Transformer, self).__init__()
        self.fc1 = nn.Dense(d_points, d_model, weight_init = "Uniform", bias_init = "Uniform")
        self.fc2 = nn.Dense(d_model, d_points, weight_init = "Uniform", bias_init = "Uniform")
        self.fc_delta = nn.SequentialCell(
            nn.Dense(3, d_model, weight_init = "Uniform", bias_init = "Uniform"),
            nn.ReLU(),
            nn.Dense(d_model, d_model, weight_init = "Uniform", bias_init = "Uniform")
        )
        self.fc_gamma = nn.SequentialCell(
            nn.Dense(d_model, d_model, weight_init = "Uniform", bias_init = "Uniform"),
            nn.ReLU(),
            nn.Dense(d_model, d_model, weight_init = "Uniform", bias_init = "Uniform")
        )
        self.w_qs = nn.Dense(d_model, d_model, has_bias=False, weight_init = "Uniform")
        self.w_ks = nn.Dense(d_model, d_model, has_bias=False, weight_init = "Uniform")
        self.w_vs = nn.Dense(d_model, d_model, has_bias=False, weight_init = "Uniform")
        self.k = k

    def construct(self, xyz, features):
        """
        Transformer construct
        """
        dist = Square_distance(xyz, xyz)
        sort = ops.Sort()
        knn_num, knn_idx= sort(dist)

        knn_idx = knn_idx[:, :, :self.k]
        knn_xyz = index_points(xyz, knn_idx)

        pre = features
        x = self.fc1(features)
        q, k, v = self.w_qs(x), index_points(self.w_ks(x), knn_idx), index_points(self.w_vs(x), knn_idx)
        pos_enc = self.fc_delta(xyz[:, :, None] - knn_xyz)
        attn = self.fc_gamma(q[:, :, None] - k + pos_enc)
        k = mnp.size(k, axis=-1)
        s = mnp.sqrt(float(k))
        at = attn / s
        softmax = nn.Softmax(axis=-2)
        attn = softmax(at)
        eq = "bmnf, bmnf->bmf"
        einsum = ops.Einsum(eq)
        res = einsum((attn, v + pos_enc))
        res = self.fc2(res) + pre
        return res, attn

if __name__ == "__main__":
        import numpy as np
        import mindspore
        X = mindspore.Tensor(np.random.rand(32, 1024, 3), dtype = mindspore.float32)
        Y = mindspore.Tensor(np.random.rand(32, 1024, 32), dtype = mindspore.float32)
        Transformer = Transformer(32,512,16)
        Output, attn = Transformer(X, Y)
        print(Output.shape)
        print(attn.shape)