"""Transformer Decoder Layer"""
import mindspore.nn as nn
import mindspore.ops as ops
from mind3d.models.blocks.multi_head_attention import MultiheadAttention

class PositionEmbeddingLearned(nn.Cell):
    """
    Absolute pos embedding, learned.
    """
    def __init__(self, input_channel, num_pos_feats=288):
        super().__init__()
        self.unsqueeze = ops.ExpandDims()
        self.conv1 = nn.Conv1d(input_channel, num_pos_feats, kernel_size=1, has_bias=True, weight_init="xavier_uniform", bias_init="normal")
        self.bn1 = nn.BatchNorm2d(num_pos_feats)
        self.conv2 = nn.Conv1d(num_pos_feats, num_pos_feats, kernel_size=1, has_bias=True, weight_init="xavier_uniform", bias_init="normal")
        self.relu = nn.ReLU()
    def construct(self, xyz):
        if xyz.shape[1] != 3:
            xyz = xyz.transpose(0, 2, 1)
        position_embedding = self.conv1(xyz)
        position_embedding = self.bn1(self.unsqueeze(position_embedding, -1)).squeeze(-1)
        position_embedding = self.relu(position_embedding)
        position_embedding = self.conv2(position_embedding)
        return position_embedding

class TransformerDecoderLayer(nn.Cell):
    def __init__(self, d_model=288, nhead=8, dim_feedforward=2048, dropout=0.9, activation="relu"):
        super().__init__()
        self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
        self.multihead_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
        # Implementation of Feedforward model
        self.linear1 = nn.Dense(d_model, dim_feedforward, weight_init="xavier_uniform", bias_init="normal")
        self.dropout = nn.Dropout(dropout)
        self.linear2 = nn.Dense(dim_feedforward, d_model, weight_init="xavier_uniform", bias_init="normal")

        self.norm1 = nn.LayerNorm([d_model])
        self.norm2 = nn.LayerNorm([d_model])
        self.norm3 = nn.LayerNorm([d_model])
        self.dropout1 = nn.Dropout(dropout)
        self.dropout2 = nn.Dropout(dropout)
        self.dropout3 = nn.Dropout(dropout)

        self.activation = ops.ReLU()

        self.self_posembed = PositionEmbeddingLearned(6,288)
        self.cross_posembed = PositionEmbeddingLearned(3,288)

    def with_pos_embed(self, tensor, pos_embed):
        return tensor if pos_embed is None else tensor + pos_embed

    def construct(self, query, key, query_pos, key_pos):
        """
        :param query: B C Pq
        :param key: B C Pk
        :param query_pos: B Pq 3/6
        :param key_pos: B Pk 3/6
        :param value_pos: [B Pq 3/6]

        :return:
        """
        # NxCxP to PxNxC
        if self.self_posembed is not None:
            query_pos_embed = self.self_posembed(query_pos).transpose(2, 0, 1)
        else:
            query_pos_embed = None
        if self.cross_posembed is not None:
            key_pos_embed = self.cross_posembed(key_pos).transpose(2, 0, 1)
        else:
            key_pos_embed = None

        query = query.transpose(2, 0, 1)
        key = key.transpose(2, 0, 1)

        q = k = v = self.with_pos_embed(query, query_pos_embed)
        query2 = self.self_attn(q, k, value=v)[0]
        query = query + self.dropout1(query2)
        query = self.norm1(query)

        query2 = self.multihead_attn(query=self.with_pos_embed(query, query_pos_embed),
                                     key=self.with_pos_embed(key, key_pos_embed),
                                     value=self.with_pos_embed(key, key_pos_embed))[0]
        query = query + self.dropout2(query2)
        query = self.norm2(query)

        query2 = self.linear2(self.dropout(self.activation(self.linear1(query))))
        query = query + self.dropout3(query2)
        query = self.norm3(query)

        # NxCxP to PxNxC
        query = query.transpose(1, 2, 0)
        return query
        
if __name__ == '__main__':
    import numpy as np
    import mindspore as ms
    ms.set_context(mode=ms.PYNATIVE_MODE, device_target="GPU")
    np.random.seed(41)
    query = ms.Tensor(np.random.randn(2, 288, 128), ms.float32)
    key = ms.Tensor(np.random.randn(2, 288, 1024), ms.float32)
    query_pos = ms.Tensor(np.random.randn(2, 128, 6), ms.float32)
    key_pos = ms.Tensor(np.random.randn(2, 1024, 3), ms.float32)
    DecoderLayer = TransformerDecoderLayer()
    output = DecoderLayer(query, key, query_pos, key_pos)
    print(output.shape)