from ms_resnet import resnet45 as ms_resnet50
from ms_transformer import PositionalEncoding 
from ms_Retsranformer import ResTranformer as ms_ResTranformer
from print_para import mindspore_params,pytorch_params_test
from modules.backbone import ResTranformer
from ms_BaseVision import BaseVision
import mindspore as ms
import numpy as np
from ms_model import _default_tfmer_cfg
import torch
from mindspore import nn
from utils import Config
from ms_bcnlanguage import BCNLanguage as ms_BCNLanguage
from ms_utils import CharsetMapper
from modules.model_language import BCNLanguage
from modules.model_alignment import BaseAlignment
from ms_alignment import BaseAlignment as ms_BaseAlignment
from ms_model_abinet_iter import ABINetIterModel as ms_ABINetIterModel
from modules.model_abinet_iter import ABINetIterModel
from mindspore.common import set_seed







ms.set_context(mode=ms.PYNATIVE_MODE)



# concact_1 = ms.Tensor(np.random.rand(3,37))
# concact_2 = ms.Tensor(np.random.rand(2,37))
# concact_3 = ms.Tensor(np.random.rand(5,37))
# concat_op = ms.ops.Concat()
# input_x1 = ms.Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
# input_x2 = ms.Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
# print(input_x1.shape)
# print(input_x2.shape)
# print(concact_1.shape)
# print(concact_2.shape)
# concat_result = concat_op((concact_1,concact_2))
# print(concat_result.shape)



#config = Config("/home/data/zhangyh22/ABINet/ABINet_Mindspore/configs/train_abinet.yaml")
# net_ms = ms_ABINetIterModel(config=config)



set_seed(1)

config = Config("/home/data/zhangyh22/ABINet/ABINet_Mindspore/configs/train_abinet.yaml")
config.batch_size = 4
#pth_path = "/home/data4/zyh/ABINet/best-train-abinet.pth"
#pt_param = pytorch_params(pth_path)
#print("="*20)
#ms_param = mindspore_params(ms_ABINetIterModel(config))
#ms_param = mindspore_params(BaseVision(config))
#ms_param = mindspore_params(ms_BCNLanguage(config))
pth_path = "/home/data/zhangyh22/ABINet/ABINet_Mindspore/workdir/train-abinet/best-train-abinet.pth"
ms_ckpt = "/home/data/zhangyh22/ABINet/ABINet_Mindspore/ms_abinet_vision.ckpt"
abinet_py = ABINetIterModel(config).eval()
abinet_ms = ms_ABINetIterModel(config).set_train(False)

data = np.random.rand(4,3,32,128)
data_py = torch.tensor(data,dtype=torch.float32)
data_ms = ms.Tensor(data,dtype=ms.float32)

ckpt_file_name = "/home/data/zhangyh22/ABINet/ABINet_Mindspore/ms_abinet_vision.ckpt"
param_dict = ms.load_checkpoint(ckpt_file_name)
param_not_load = ms.load_param_into_net(abinet_ms, param_dict)
#py_parm = pytorch_params(pth_path)

print("1")
abinet_py.load_state_dict(pytorch_params_test(pth_path),strict = False)

out_py = abinet_py(data_py)
out_ms = abinet_ms(data_ms)

print(out_py)
print(out_ms)

print(np.max(np.abs(out_py.detach().numpy() - out_ms.asnumpy())))

print("done")
# pt = ['abcdefg']
# print(pt)
# pt = str(pt)
# pt = pt.strip('[').strip(']').strip("'")
# print(pt[0])
# print(pt[2])

# #pt_scores = ms.Tensor(np.random.rand(2,2))
# pt_scores = torch.rand(2,2)
# pt_lengths = torch.rand(3,3)
# print(pt_lengths)
# print(pt_scores)

# pt_lengths = pt_scores.new_tensor(pt_lengths, dtype=torch.float16)
# print(pt_lengths)
# print(pt_scores)



# updates = ms.Tensor(np.ones((26,37)))



# label_expanddims = np.expand_dims()
# indices = ms.Tensor(np.array([0, 1, 2,0,0,5,3,6,8,3,2,36,35]), ms.int32)
# label_expand = label_expanddims(indices,-1)
# depth, on_value, off_value = 37, ms.Tensor(1.0, ms.float32), ms.Tensor(0.0, ms.float32)
# output = ms.ops.one_hot(indices ,depth, on_value, off_value, axis=-1)
# print(output)
# print(output.shape)

# ms_onehot_zeros = np.zeros()
    
# onehot_output = ms_onehot_zeros((26,depth),ms.float32) 


# onehot_output = ms.Tensor(onehot_output)
# label_expand = ms.Tensor(label_expand)
# updates = ms.Tensor(np.ones((26,37)))
# onehot_output = onehot_output.scatter_add(indices, updates)





# a = ms.Tensor(np.random.rand(2,3,2))
# print(a)





# a = ms.Tensor(np.random.rand(26))
# x = a.shape
# print(x)
# x = np.int0(x)
# x = x[0]
# print(x)
# b = 30
# ms_onehot_zeros = ms.ops.Zeros()
# #s_shape = a_shape + b
# onehot = ms_onehot_zeros((x,y,b),ms.float32)








# a = 30
# b = torch.Size([a])

# label = torch.rand(1,5)
# onehot = torch.zeros(label.size() + torch.Size([a]))
# print(onehot)




# a = torch.rand([])
# b = np.random.rand(1)[0]
# lenth = int(5)
# print(lenth.shape)
# len_64 = np.int64(lenth)

# len_32 = np.int32(lenth)

# print("done")

# # config = Config("/home/data4/zyh/ABINet/configs/train_abinet.yaml")
# # pth_path = "/home/data4/zyh/ABINet/best-train-abinet.pth"
# # pt_param = pytorch_params(pth_path)
# # print("="*20)
# # ms_param = mindspore_params(ms_ResTranformer(config))
# # #ms_param = mindspore_params(BaseVision(config))
# # #ms_param = mindspore_params(ms_BCNLanguage(config))
# # ms.set_context(mode=ms.PYNATIVE_MODE)

# # a = torch.rand(2,2,2)

# # print(a)

# # a1 = a.flatten(1,2)

# # print(a1)

# # a2 =a.view(2,-1)







# # abinet_py = ABINetIterModel(config)
# # abinet_ms = ms_ABINetIterModel(config)

# # image_py = torch.rand(1,3,32,128)

# # image_ms = ms.Tensor(np.random.rand(1,3,32,128),ms.float32)


# # after_image_py = abinet_py(image_py)

# # after_image_ms = abinet_ms(image_ms)





# """

# alignment_1_py = torch.rand(1,26,512)

# alignment_2_py = torch.rand(1,26,512)

# alignment_1_ms = ms.Tensor(np.random.rand(1,26,512),ms.float32)
# alignment_2_ms = ms.Tensor(np.random.rand(1,26,512),ms.float32)

# alignment_py = BaseAlignment(config)
# alignment_ms = ms_BaseAlignment(config)

# after_alignment_py = alignment_py(alignment_1_py,alignment_2_py)

# after_alignment_ms = alignment_ms(alignment_1_ms,alignment_2_ms)
# """
# """
# language = ms.Tensor(np.random.rand(1,26,37),ms.float32)
# language_py = torch.rand(1,26,37)
# print("language.shape")
# print(language.shape)
# print("after_language_ms.shape")
# ms_language = ms_BCNLanguage(config)
# py_language = BCNLanguage(config)
# after_language111 = ms_language(language)
# length_py = torch.tensor([6])
# print("after_language_py.shape")
# after_language_py = py_language(language_py,length_py)
# """
# """
# d_model =  _default_tfmer_cfg['d_model']
# nhead =  _default_tfmer_cfg['nhead']
# d_inner =  _default_tfmer_cfg['d_inner']
# dropout =  _default_tfmer_cfg['dropout']
# activation =  _default_tfmer_cfg['activation']
# num_layers = 4
# d_model = d_model
# detach = config.model_language_detach
# use_self_attn = config.model_language_use_self_attn
# loss_weight = config.model_language_loss_weight
# max_length = config.dataset_max_length + 1  # additional stop token
# debug =  False
        
# proj = nn.Dense(37, d_model, weight_init ='uniform',bias_init='uniform',has_bias =False)
# token_encoder = PositionalEncoding(d_model, max_len=max_length)
# pos_encoder = PositionalEncoding(d_model, dropout=1.0, max_len=max_length)
#         #decoder_layer = TransformerDecoderLayer(d_model, nhead, d_inner, dropout, 
#                 #activation, self_attn=self.use_self_attn, debug=self.debug)
#         #self.model = TransformerDecoder(decoder_layer, num_layers)
# model = nn.TransformerDecoder(batch_size=1,num_layers=num_layers,hidden_size=d_model,num_heads=nhead,ffn_hidden_size=d_inner,
#                         hidden_dropout_rate=dropout,attention_dropout_rate=dropout,hidden_act=activation,src_seq_length=26,tgt_seq_length=26)


# cls = nn.Dense(d_model, 37, weight_init ='uniform',bias_init='uniform')

# print("language.shape")
# print(language.shape)
# print("after_language.shape")
# #after_language1111 = ms_language(language)


# embed = proj(language)  # (N, T, E)
# embed = embed.transpose(1,0,2)
#         #embed = embed.permute(1, 0, 2)  # (T, N, E)
# embed = token_encoder(embed)  # (T, N, E)
# embed = embed.transpose(1,0,2)
#         #padding_mask = self._get_padding_mask(lengths, self.max_length)#需要1，26，26。现在是1，26
# padding_mask = ms.Tensor(ms.numpy.ones((1, 26, 26)), ms.dtype.float32)
# zeroo = ms.ops.Zeros()
# zeros = zeroo((1,26,512),ms.float16)#需要是1 26 512，现在是 26 1 512
# zeros = zeros.transpose(1,0,2)
# qeury = pos_encoder(zeros) #需要是1 26 512，现在是26 1 512
# qeury = qeury.transpose(1,0,2)
# location_mask = ms.Tensor(ms.numpy.ones((1, 26, 26)), ms.dtype.float32)
#         #location_mask = self._get_location_mask(self.max_length, tokens.device)#需要是1，26，26。现在是26，26
# output,past = model(hidden_states=qeury, encoder_output=embed,
#                 attention_mask = padding_mask,
#                 memory_mask=location_mask)
#                 #memory_key_padding_mask=padding_mask  # (T, N, E)
#         #output = output.permute(1, 0, 2)  # (N, T, E)


# logits = cls(output)  # (N, T, C)
# charset = CharsetMapper(config.dataset_charset_path, max_length=max_length)

# logit_argmax = ms.ops.Argmax()(logits)
# out = (logit_argmax == charset.null_label)
# abn = out.any(-1)

# out1 = (out.cumsum(-1) == 1) 
# out1 = ms.Tensor.asnumpy(out1)
# out = ms.Tensor.asnumpy(out)
# out = out & out1
# out[0][6] = True
# out1 = out.argmax(-1)
# out1 = out1 + 1  # additional end token
# logit_shape1 = logits.shape[1] 
# out1 = ms.Tensor(out1)
# logit_tensor = ms.Tensor(logit_shape1)
#         #out = torch.where(abn, out1, logit_tensor)
# # out1 = ms.Tensor.asnumpy(out1)
# # logit_tensor = ms.numpy(logit_tensor)
# out_np = ms.numpy.where(abn, out1, logit_tensor)
# out = ms.Tensor(out_np)

# print(output.shape)
# """

# """
# tran=ResTranformer(config)
# img = torch.rand(1,3,32,128)

# img_after = tran(img)

# print("img_after_pytorch.shape")
# print(img_after.shape)



# ms_vision = ms_ResTranformer(config)

# image = ms.Tensor(np.random.rand(1,3, 32, 128), ms.float32)
# print("image.shape")
# print(image.shape)
# after_image = ms_vision(image)
# print("after_image.shape")
# print(after_image.shape)
# """

# """
# x = ms.Tensor(np.ones((1,512,256), dtype=np.float32))
# print(x.shape)
# x = x.transpose(0,2,1)
# print(x.shape)
# """
# """
# def _get_location_mask(sz, device=None):
#     mask = torch.eye(sz, device=device)
#     mask = mask.float().masked_fill(mask == 1, float('-inf'))
#     return mask


# def _get_location_mask1(sz, device=None):
#     eyes = ms.ops.Eye()
#     a = 5
#     mask1 = eyes(sz,sz,ms.bool_)
#     cast = ms.ops.Cast()
#     mask = cast(mask1,ms.float32)
#     mask = ms.ops.masked_fill(mask,mask1,float('-inf') )
#     expand_dims = ms.ops.ExpandDims()
#     mask = expand_dims(mask,0)
#     mask[0] = mask[1]
#     #mask = mask.float().masked_fill(mask == 1, float('-inf'))
#     return mask

# def _get_padding_mask(length, max_length):
#     length = length.unsqueeze(-1)
#     grid = torch.arange(0, max_length, device=length.device).unsqueeze(0)
#     return grid >= length

# def _get_padding_mask1(length, max_length):
#     length = ms.numpy.expand_dims(length, -1)
#     #length = length.unsqueeze(-1)
#     grid = ms.numpy.arange(0, max_length,dtype=ms.float16)
#     grid = ms.numpy.expand_dims(grid, 0)
#     grid = ms.Tensor(grid)
#     return grid >= length

# #(N,T,C)384 26 1
# location_tensor = torch.rand(384,26,1)

# location1_tensor = ms.Tensor(np.random.rand(384,26,1), ms.float32)

# padding_tensor = torch.ones(2,)
# padding_tensor1 = ms.Tensor(np.ones(2,), ms.float32)
# location = _get_location_mask(3)
# location1 = _get_location_mask1(3)

# padding = _get_padding_mask(padding_tensor,3)

# print(location)

# padding1 = _get_padding_mask1(padding_tensor1,3)
# print(location1)
# """