import math
# import torch
import mindspore as ms
import numpy as np
from mindspore import  nn,ops
from  mindspore import context
from src.ms_resnet import resnet45

class PositionalEncoding(nn.Cell):
    
    def __init__(self,d_model=512,dropout=0.9,max_len=5000):
        super(PositionalEncoding,self).__init__()
        self.dropout=nn.Dropout(keep_prob=dropout)
        pe=np.zeros((max_len,d_model),np.float32)
        position = np.arange(0,max_len,dtype=np.float32)
        position = np.expand_dims(position, 1)
        div = np.arange(0, d_model, 2, dtype=np.float32)
        div =div * (-math.log(10000.0) / d_model)
        div_term = np.exp(div)
        pe[:,0::2] = np.sin(position * div_term)
        pe[:,1::2] = np.cos(position * div_term)
        pe = np.expand_dims(pe,0)
        pe = ms.Tensor(pe).astype(dtype=ms.float32)
        pe = pe.transpose(1,0,2)
        #self.pe = pe
        self.pe = ms.Parameter(pe, name="pe1", requires_grad=False)
        
        

        """
        feature = self.resnet(images)     
        #print("feature.shape:")
        #print(feature.shape)
        n, c, h, w = feature.shape
        feature = feature.view(n, c, -1)
        feature = feature.transpose(feature,(2,0,1))
        feature = feature.transpose(0,2,1)
        feature = self.pos_encoder(feature)
        #x: [sequence length, batch size, embed dim]
        #256 1 512
        feature = feature.transpose(1,0,2)
        feature, past = self.transformer(feature,self.encoder_mask)
        feature = feature.transpose(feature,(1,2,0))
        feature = feature.view(n, c, h, w)
        print("mas_done")
        return feature

        """




    def construct(self,x):

        
        w, y, z = x.shape
        x = x + self.pe[:w, :]

        return self.dropout(x)

def _get_activation_fn(activation): #
    if activation == "relu":
        return ms.ops.ReLU
    elif activation == "gelu":
        return ms.ops.ReLU

# class TransformerEncoderLayer(nn.Cell):
#     r"""TransformerEncoderLayer is made up of self-attn and feedforward network.
#     This standard encoder layer is based on the paper "Attention Is All You Need".
#     Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
#     Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
#     Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
#     in a different way during application.

#     Args:
#         d_model: the number of expected features in the input (required).
#         nhead: the number of heads in the multiheadattention models (required).
#         dim_feedforward: the dimension of the feedforward network model (default=2048).
#         dropout: the dropout value (default=0.1).
#         activation: the activation function of intermediate layer, relu or gelu (default=relu).

#     Examples::
#         >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
#         >>> src = torch.rand(10, 32, 512)
#         >>> out = encoder_layer(src)
#     """

#     def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, 
#                  activation="relu", debug=False):
#         super(TransformerEncoderLayer, self).__init__()
#         self.debug = debug
#         self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)#记得改dropout
#         # Implementation of Feedforward model
#         self.linear1 = nn.Dense(d_model, dim_feedforward,weight_init ='uniform',bias_init='uniform')
#         self.dropout = nn.Dropout(1-dropout)
#         self.linear2 = nn.Dense(dim_feedforward, d_model,weight_init ='uniform',bias_init='uniform')

#         self.norm1 = nn.LayerNorm(d_model)
#         self.norm2 = nn.LayerNorm(d_model)
#         self.dropout1 = nn.Dropout(1-dropout)
#         self.dropout2 = nn.Dropout(1-dropout)

#         self.activation = _get_activation_fn(activation)

#     def __setstate__(self, state):
#         if 'activation' not in state:
#             state['activation'] = ms.ops.ReLU
#         super(TransformerEncoderLayer, self).__setstate__(state)

#     def construct(self, src, src_mask=None, src_key_padding_mask=None):
#         r"""Pass the input through the encoder layer.

#         Args:
#             src: the sequence to the encoder layer (required).
#             src_mask: the mask for the src sequence (optional).
#             src_key_padding_mask: the mask for the src keys per batch (optional).

#         Shape:
#             see the docs in Transformer class.
#         """
#         src2, attn = self.self_attn(src, src, src, attn_mask=src_mask,
#                               key_padding_mask=src_key_padding_mask)
#         if self.debug: self.attn = attn
#         src = src + self.dropout1(src2)
#         src = self.norm1(src)
#         src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
#         src = src + self.dropout2(src2)
#         src = self.norm2(src)
        
#         return src


# class MultiheadAttention(nn.Cell):
#     r"""Allows the model to jointly attend to information
#     from different representation subspaces.
#     See reference: Attention Is All You Need
#     .. math::
#         \text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
#         \text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)
#     Args:
#         embed_dim: total dimension of the model.
#         num_heads: parallel attention heads.
#         dropout: a Dropout layer on attn_output_weights. Default: 0.0.
#         bias: add bias as module parameter. Default: True.
#         add_bias_kv: add bias to the key and value sequences at dim=0.
#         add_zero_attn: add a new batch of zeros to the key and
#                        value sequences at dim=1.
#         kdim: total number of features in key. Default: None.
#         vdim: total number of features in value. Default: None.
#         Note: if kdim and vdim are None, they will be set to embed_dim such that
#         query, key, and value have the same number of features.
#     Examples::
#         >>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
#         >>> attn_output, attn_output_weights = multihead_attn(query, key, value)
#     """
#     # __annotations__ = {
#     #     'bias_k': torch._jit_internal.Optional[torch.Tensor],
#     #     'bias_v': torch._jit_internal.Optional[torch.Tensor],
#     # }
#     __constants__ = ['q_proj_weight', 'k_proj_weight', 'v_proj_weight', 'in_proj_weight']

#     def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None):
#         super(MultiheadAttention, self).__init__()
#         self.embed_dim = embed_dim
#         self.kdim = kdim if kdim is not None else embed_dim
#         self.vdim = vdim if vdim is not None else embed_dim
#         self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim

#         self.num_heads = num_heads
#         self.dropout = dropout
#         self.head_dim = embed_dim // num_heads
#         assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"

#         if self._qkv_same_embed_dim is False:
#             self.q_proj_weight = ms.Parameter(ms.Tensor(embed_dim, embed_dim))
#             self.k_proj_weight = ms.Parameter(ms.Tensor(embed_dim, self.kdim))
#             self.v_proj_weight = ms.Parameter(ms.Tensor(embed_dim, self.vdim))
#             #self.register_parameter('in_proj_weight', None)
#             self.in_proj_weight = None
#         else:
#             self.in_proj_weight = ms.Parameter(ms.numpy.empty(3 * embed_dim, embed_dim))
#             #self.register_parameter('q_proj_weight', None)
#             self.q_proj_weight = None
#             #self.register_parameter('k_proj_weight', None)
#             self.k_proj_weight = None
#             #self.register_parameter('v_proj_weight', None)
#             self.v_proj_weight = None

#         if bias:
#             self.in_proj_bias = ms.Parameter(ms.numpy.empty(3 * embed_dim))
#         else:
#             self.register_parameter('in_proj_bias', None)
#         self.out_proj = nn.Dense(embed_dim, embed_dim, has_bias=bias,weight_init ='uniform',bias_init = 'uniform')
#         self.out_proj.bias = ms.numpy(self.out_proj.bias)

#         if add_bias_kv:
#             self.bias_k = ms.Parameter(ms.numpy.empty(1, 1, embed_dim))
#             self.bias_v = ms.Parameter(ms.numpy.empty(1, 1, embed_dim))
#         else:
#             self.bias_k = self.bias_v = None

#         self.add_zero_attn = add_zero_attn

#         self._reset_parameters()

#     def _reset_parameters(self):
#         if self._qkv_same_embed_dim:
#             self.in_proj_weight = ms.Tensor(self.in_proj_weight)
#             self.in_proj_weight = ms.common.initializer.XavierUniform(gain = 1)
#             #xavier_uniform_(self.in_proj_weight)
#         else:
#             self.q_proj_weight = ms.Tensor(self.q_proj_weight)
#             self.k_proj_weight = ms.Tensor(self.k_proj_weight)
#             self.v_proj_weight = ms.Tensor(self.v_proj_weight)
#             self.q_proj_weight = ms.common.initializer.XavierUniform(gain = 1)
#             self.k_proj_weight = ms.common.initializer.XavierUniform(gain = 1)
#             self.v_proj_weight = ms.common.initializer.XavierUniform(gain = 1)
#             #xavier_uniform_(self.q_proj_weight)
#             #xavier_uniform_(self.k_proj_weight)
#             #xavier_uniform_(self.v_proj_weight)

#         if self.in_proj_bias is not None:
#             constant_init = ms.common.initializer.Constant(value=0)
#             #constant_(self.in_proj_bias, 0.)
#             self.in_proj_bias = constant_init(self.in_proj_bias)
#             self.in_proj_bias = ms.Tensor(self.in_proj_bias)
#             #constant_(self.out_proj.bias, 0.)
#             self.out_proj.bias = constant_init(self.out_proj.bias)
#             self.out_proj.bias = ms.Tensor(self.out_proj.bias)
#         #if self.bias_k is not None:
#             #xavier_normal_(self.bias_k)
#         #if self.bias_v is not None:
#             #xavier_normal_(self.bias_v)

#     def __setstate__(self, state):
#         # Support loading old MultiheadAttention checkpoints generated by v1.1.0
#         if '_qkv_same_embed_dim' not in state:
#             state['_qkv_same_embed_dim'] = True

#         super(MultiheadAttention, self).__setstate__(state)

#     def construct(self, query, key, value, key_padding_mask=None,
#                 need_weights=True, attn_mask=None):
#         r"""
#     Args:
#         query, key, value: map a query and a set of key-value pairs to an output.
#             See "Attention Is All You Need" for more details.
#         key_padding_mask: if provided, specified padding elements in the key will
#             be ignored by the attention. This is an binary mask. When the value is True,
#             the corresponding value on the attention layer will be filled with -inf.
#         need_weights: output attn_output_weights.
#         attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
#             the batches while a 3D mask allows to specify a different mask for the entries of each batch.
#     Shape:
#         - Inputs:
#         - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
#           the embedding dimension.
#         - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
#           the embedding dimension.
#         - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
#           the embedding dimension.
#         - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
#           If a ByteTensor is provided, the non-zero positions will be ignored while the position
#           with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the
#           value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
#         - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
#           3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
#           S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked
#           positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
#           while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
#           is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
#           is provided, it will be added to the attention weight.
#         - Outputs:
#         - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
#           E is the embedding dimension.
#         - attn_output_weights: :math:`(N, L, S)` where N is the batch size,
#           L is the target sequence length, S is the source sequence length.
#         """
#         if not self._qkv_same_embed_dim:
#             return multi_head_attention_forward(
#                 query, key, value, self.embed_dim, self.num_heads,
#                 self.in_proj_weight, self.in_proj_bias,
#                 self.bias_k, self.bias_v, self.add_zero_attn,
#                 self.dropout, self.out_proj.weight, self.out_proj.bias,
#                 training=self.training,
#                 key_padding_mask=key_padding_mask, need_weights=need_weights,
#                 attn_mask=attn_mask, use_separate_proj_weight=True,
#                 q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
#                 v_proj_weight=self.v_proj_weight)
#         else:
#             return multi_head_attention_forward(
#                 query, key, value, self.embed_dim, self.num_heads,
#                 self.in_proj_weight, self.in_proj_bias,
#                 self.bias_k, self.bias_v, self.add_zero_attn,
#                 self.dropout, self.out_proj.weight, self.out_proj.bias,
#                 training=self.training,
#                 key_padding_mask=key_padding_mask, need_weights=need_weights,
#                 attn_mask=attn_mask)




