# import logging
# import mindspore as ms
# from mindspore import nn
# #import torch.nn as nn
# from src.PositionAttention import *
# from src.ms_Retsranformer import ResTranformer
# from src.ms_resnet import resnet45
# from src.ms_model import Model,_default_tfmer_cfg
# import numpy as np
# from utils.ms_utils import CharsetMapper
# from src.ms_transformer_encoder_decoder import TransformerDecoder
# # from modules.attention import *
# # from modules.backbone import ResTranformer
# # from modules.model import Model
# # from modules.resnet import resnet45
# class BCNLanguage(Model):
#     def __init__(self, config):
#         super().__init__(config)
#         d_model =  _default_tfmer_cfg['d_model']
#         nhead =  _default_tfmer_cfg['nhead']
#         d_inner =  _default_tfmer_cfg['d_inner']
#         dropout =  _default_tfmer_cfg['dropout']
#         activation =  _default_tfmer_cfg['activation']
#         num_layers = 4
#         self.d_model = d_model
#         self.detach = config.model_language_detach
#         self.use_self_attn = config.model_language_use_self_attn
#         self.loss_weight = config.model_language_loss_weight
#         self.max_length = config.dataset_max_length + 1  # additional stop token
#         self.debug =  False
#         # self.encoder_mask = ms.Tensor(np.ones((4, 256, 256)), ms.float16)
#         #self.transformer = nn.TransformerEncoder(batch_size=1,num_layers=num_layers,hidden_size=self.d_model, num_heads=nhead, 
#                 #ffn_hidden_size=d_inner, hidden_dropout_rate=dropout,attention_dropout_rate=dropout, hidden_act=activation,seq_length=256)
#         self.proj = nn.Dense(self.charset.num_classes, d_model, weight_init ='uniform',bias_init='uniform',has_bias =False)
#         self.token_encoder = PositionalEncoding(d_model, max_len=self.max_length)
#         self.pos_encoder = PositionalEncoding(d_model, dropout=1.0, max_len=self.max_length)

#         self.model = TransformerDecoder(batch_size = 72,num_layers=num_layers,hidden_size=self.d_model,num_heads=nhead,ffn_hidden_size=d_inner,
#                         hidden_dropout_rate=dropout,attention_dropout_rate=dropout,hidden_act='relu',src_seq_length=26,tgt_seq_length=26)



#         self.cls = nn.Dense(self.d_model, self.charset.num_classes, weight_init ='uniform',bias_init='uniform')

#         #if config.model_language_checkpoint is not None:
#             #logging.info(f'Read language model from {config.model_language_checkpoint}.')
#             #self.load(config.model_language_checkpoint)

#     def construct(self, tokens):
#         """
#         Args:
#             tokens: (N, T, C) where T is length, N is batch size and C is classes number
#             lengths: (N,)
#         """
#         #if self.detach: tokens = tokens.detach()
#         embed = self.proj(tokens)  # (N, T, E)
#         embed = embed.transpose(1,0,2)
#         #embed = embed.permute(1, 0, 2)  # (T, N, E)
#         embed = self.token_encoder(embed)  # (T, N, E)
#         embed = embed.transpose(1,0,2)
#         #padding_mask = self._get_padding_mask(lengths, self.max_length)#需要1，26，26。现在是1，26
#         padding_mask = ms.Tensor(np.ones((72, 26, 26)), dtype=ms.float32)
#         zeroo = ms.ops.Zeros()
#         zeros = zeroo((72,26,512),ms.float32)
#         #zeros = embed.new_zeros(*embed.shape)#需要是1 26 512，现在是 26 1 512
#         zeros = zeros.transpose(1,0,2)
#         qeury = self.pos_encoder(zeros) #需要是1 26 512，现在是26 1 512
#         qeury = qeury.transpose(1,0,2)
#         location_mask = ms.Tensor(np.ones((72, 26, 26)), dtype=ms.float32)
        
        
#         #location_mask = self._get_location_mask(self.max_length, tokens.device)#需要是1，26，26。现在是26，26
        
        
        
#         output = self.model(qeury, padding_mask,embed,location_mask)
#                 #memory_key_padding_mask=padding_mask  # (T, N, E)
#         #output = output.permute(1, 0, 2)  # (N, T, E)
        
#         # print(output.shape)
#         logits = self.cls(output)  # (N, T, C)
#         pt_lengths = self._get_length(logits)
        
#         res =  {'feature': output, 'logits': logits, 'pt_lengths': pt_lengths,
#                  'loss_weight':self.loss_weight, 'name': 'language'}
        
#         #print(output.shape)
#         return res
    
    
    
    
    
import logging
import mindspore as ms
from mindspore import nn
#import torch.nn as nn
from src.PositionAttention import *
from src.ms_Retsranformer import ResTranformer
from src.ms_resnet import resnet45
from src.ms_model import Model,_default_tfmer_cfg
import numpy as np
from utils.ms_utils import CharsetMapper
#from modules.attention import *
#from modules.backbone import ResTranformer
#from modules.model import Model
#from modules.resnet import resnet45
from src.ms_transformer_encoder_decoder import TransformerDecoder as ms_TransformerDecoder
class BCNLanguage(Model):
    def __init__(self, config):
        super().__init__(config)
        d_model =  _default_tfmer_cfg['d_model']
        nhead =  _default_tfmer_cfg['nhead']
        d_inner =  _default_tfmer_cfg['d_inner']
        dropout =  _default_tfmer_cfg['dropout']
        activation =  _default_tfmer_cfg['activation']
        num_layers = 4
        self.d_model = d_model
        self.detach = config.model_language_detach
        self.use_self_attn = config.model_language_use_self_attn
        self.loss_weight = config.model_language_loss_weight
        self.max_length = config.dataset_max_length + 1  # additional stop token
        self.debug =  False
        # self.encoder_mask = ms.Tensor(np.ones((4, 256, 256)), ms.float16)
        #self.transformer = nn.TransformerEncoder(batch_size=1,num_layers=num_layers,hidden_size=self.d_model, num_heads=nhead, 
                #ffn_hidden_size=d_inner, hidden_dropout_rate=dropout,attention_dropout_rate=dropout, hidden_act=activation,seq_length=256)
        self.proj = nn.Dense(self.charset.num_classes, d_model, weight_init ='uniform',bias_init='uniform',has_bias =False)
        self.token_encoder = PositionalEncoding(d_model, max_len=self.max_length)
        self.pos_encoder = PositionalEncoding(d_model, dropout=1.0, max_len=self.max_length)

        # self.model = nn.TransformerDecoder(batch_size = 72,num_layers=num_layers,hidden_size=self.d_model,num_heads=nhead,ffn_hidden_size=d_inner,
        #                 hidden_dropout_rate=dropout,attention_dropout_rate=dropout,hidden_act='relu',src_seq_length=26,tgt_seq_length=26)

        self.model = ms_TransformerDecoder(batch_size = 96,num_layers=num_layers,hidden_size=self.d_model,num_heads=nhead,ffn_hidden_size=d_inner,
                        hidden_dropout_rate=dropout,attention_dropout_rate=dropout,hidden_act='relu',src_seq_length=26,tgt_seq_length=26)


        self.cls = nn.Dense(self.d_model, self.charset.num_classes, weight_init ='uniform',bias_init='uniform')

        #if config.model_language_checkpoint is not None:
            #logging.info(f'Read language model from {config.model_language_checkpoint}.')
            #self.load(config.model_language_checkpoint)

    def mindspore_decoder_mask(self,lengths):

        ms_unqueeze = ms.ops.expand_dims
        ms_pad_mask = self._get_padding_mask(lengths,26)
        ms_pad_mask = ms_unqueeze(ms_pad_mask,-2)#广播存疑
        ms_eye_mask = self._get_location_mask(26)
        ms_eye_mask = ms_unqueeze(ms_eye_mask,0)
        bitand = ms.ops.logical_and
        out_mask = bitand(ms_pad_mask,ms_eye_mask)
        
        return (out_mask).astype(ms.float16)



    def _get_padding_mask(self,length, max_length):
        ms_unqueeze = ms.ops.expand_dims
        length = ms_unqueeze(length,-1)
        # #length = length.unsqueeze(-1)
        # grid = ms.numpy(0, max_length, device=length.device).unsqueeze(0)
        grid = ms.numpy.arange(0, max_length )
        grid = ms_unqueeze(grid, 0)
        return grid < length


#ascend无法运行必出现在location_mask部分
#     def _get_location_mask(self,sz):
#         eye = ms.ops.Eye()
#         a = eye(sz,sz,ms.int16)
#         mask = ms.Tensor(np.ones((26,26)),dtype = ms.int16)
#         # b = ms.Tensor(np.ones((26,26)),dtype = ms.int16)
#         # mask = b - a
# #         mask = 1 - eye(sz,sz,ms.int16)
        
    def _get_location_mask(self,sz):
        # eye = ms.ops.Eye()
        # a = eye(sz,sz,ms.int16)
        a = np.eye(sz,sz)
        # mask = ms.Tensor(np.ones((26,26)),dtype = ms.int16)
        b = np.ones((26,26))
        mask = b - a
        mask = ms.Tensor(mask)
        # mask = 1 - eye(sz,sz,ms.int16)
        return mask.astype(ms.bool_)




    
    
    

    def construct(self, tokens,lengths):
        """
        Args:
            tokens: (N, T, C) where T is length, N is batch size and C is classes number
            lengths: (N,)
        """
        ## if self.detach: tokens = tokens.detach()

        embed = self.proj(tokens)  # (N, T, E)
        embed = embed.transpose(1,0,2)
        #embed = embed.permute(1, 0, 2)  # (T, N, E)
        embed = self.token_encoder(embed)  # (T, N, E)
        embed = embed.transpose(1,0,2)
        #padding_mask = self._get_padding_mask(lengths, self.max_length)#需要1，26，26。现在是1，26
        
        #padding_mask = ms.Tensor(np.ones((192, 26, 26)), dtype=ms.float32)
        zeroo = ms.ops.Zeros()
        zeros = zeroo((96,26,512),ms.float32)
        #zeros = embed.new_zeros(*embed.shape)#需要是1 26 512，现在是 26 1 512
        zeros = zeros.transpose(1,0,2)
        qeury = self.pos_encoder(zeros) #需要是1 26 512，现在是26 1 512
        qeury = qeury.transpose(1,0,2)
        #location_mask = ms.Tensor(np.ones((192, 26, 26)), dtype=ms.float32)
        padding_mask = self.mindspore_decoder_mask(lengths)
        location_mask = self.mindspore_decoder_mask(lengths)
        
        #location_mask = self._get_location_mask(self.max_length, tokens.device)#需要是1，26，26。现在是26，26
        
        
        
        output = self.model(qeury, padding_mask,embed,location_mask)
                #memory_key_padding_mask=padding_mask  # (T, N, E)
        #output = output.permute(1, 0, 2)  # (N, T, E)
        
        # print(output.shape)
        logits = self.cls(output)  # (N, T, C)
        pt_lengths = self._get_length(logits)

        res =  {'feature': output, 'logits': logits, 'pt_lengths': pt_lengths,
                 'loss_weight':self.loss_weight, 'name': 'language'}
        
        #print(output.shape)
        return res


        # features = ms.Tensor(np.ones((1,26,512)),ms.dtype.float32)
        # logits = ms.Tensor(np.ones((1,26,37)),ms.dtype.float32)
        # pt_lengths = ms.Tensor(np.ones((1)),ms.dtype.float32)


        # res = {'feature': features, 'logits': logits, 'pt_lengths': pt_lengths,
        #           'loss_weight':1.0, 'name': 'language'}
        # # 'feature': Tensor(shape=[1, 26, 512], dtype=Float32,
        # # 'logits': Tensor(shape=[1, 26, 37], dtype=Float32, value=
        # # 'pt_lengths': Tensor(shape=[1], dtype=Int32, value= [8]
        # # 'loss_weight': 1.0, 'name': 'language'
        # return res