#import torch
#import torch.nn as nn
import mindspore as ms
from mindspore import nn
from utils.ms_utils import CharsetMapper
import copy



_default_tfmer_cfg = dict(d_model=512, nhead=8, d_inner=2048, # 1024
                          dropout=0.1, activation='relu')
#charset1 = CharsetMapper("/home/data4/zyh/ABINet/data/charset_36.txt",30)
#print(charset1.null_label)
class Model(nn.Cell):

    def __init__(self, config):
        super().__init__()
        self.max_length = config.dataset_max_length + 1
        self.charset = CharsetMapper(config.dataset_charset_path, max_length=self.max_length)
        self.config = config
        
    #def load(self, source, device=None, strict=True):
        #state = torch.load(source, map_location='cpu')
        #self.load_state_dict(state['model'], strict=strict)
        
    def _get_length(self, logit, dim=-1):
        """ Greed decoder to obtain length from logit"""

        #get_length用asnumpy将Tensor转为numpy后实现，但可能会产生梯度计算问题，导致训练不收敛。
        # logit_argmax = ms.ops.Argmax()(logit)
        # #out = (logit_argmax == 0)
        # out = (logit_argmax == self.charset.null_label)
        # out_copy = copy.deepcopy(out)
        # abn = out.any(dim)
        # out1 = (out.cumsum(dim) == 1) 
        # out1 = ms.Tensor.asnumpy(out1)
        # out2 = ms.Tensor.asnumpy(out_copy)
        # out = out2 & out1
        # out1 = out.argmax(-1)
        # out1 = out1 + 1  # additional end token
        # out1 = ms.Tensor(out1)
        # logit_shape1 = logit.shape[1]
        # logit_tensor = ms.Tensor(logit_shape1)
        # out_np = ms.numpy.where(abn, out1, logit_tensor)
        # out = ms.Tensor(out_np)
        # return out


        #不转化为numpy，直接对Tensor进行与运算。
        
        logit_argmax = ms.ops.Argmax()(logit)
        out = (logit_argmax == self.charset.null_label)
        out_copy = copy.deepcopy(out)
        abn = out.any(dim)
        out1 = (out.cumsum(dim) == 1)
        out = ms.ops.logical_and(out_copy,out1)
        out1 = out.argmax(-1)
        out1 = out1 + 1
        logit_shape1 = logit.shape[1]
        out = ms.numpy.where(abn,out1,logit_shape1)
        return out

    @staticmethod
    def _get_padding_mask(length, max_length):
        length = ms.numpy.expand_dims(length, -1)
        #length = length.unsqueeze(-1)
        grid = ms.numpy.arange(0, max_length)
        grid = ms.numpy.expand_dims(grid, 0)
        grid = ms.Tensor(grid)
        return grid >= length

    #@staticmethod
    #def _get_square_subsequent_mask(sz, device, diagonal=0, fw=True):
        #r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf').
           # Unmasked positions are filled with float(0.0).
        #"""
        #mask = (torch.triu(torch.ones(sz, sz, device=device), diagonal=diagonal) == 1)
        #if fw: mask = mask.transpose(0, 1)
        #mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
        #return mask

    @staticmethod
    def _get_location_mask(sz, device=None):
        eyes = ms.ops.Eye()
        mask1 = eyes(sz,sz,ms.bool_)
        cast = ms.ops.Cast()
        mask = cast(mask1,ms.float32)
        mask = ms.ops.masked_fill(mask,mask1,float('-inf') )
        expand_dims = ms.ops.ExpandDims()
        mask = expand_dims(mask,0)
        #mask = mask.float().masked_fill(mask == 1, float('-inf'))
        return mask

    