
import numpy as np
import luojianet as luojianet
import torch
import torch.nn as nn
import torch.nn.functional as F

# from _legacy.nn.transformer import MultiheadAttention
from transformer.mha import MultiheadAttention
from _legacy.nn.dropout import Dropout2d
from dropout.droppath import DropPath
from activation.hardsigmoid import HardSigmoid
from convolution.apconv import AdptivePaddingConv2d
from convolution.dsconv import DepthwiseSeparableConv2d
from convolution.dyconv import DynamicConv2d
from convolution.irconv import InvertedResidualConv2d, InvertedResidualConv2dV3

query = np.random.rand(20, 32, 512)
key = np.random.rand(10, 32, 512)
value = np.random.rand(10, 32, 512)
dropout_input = np.random.randn(20, 16)

luojia_query = luojianet.Tensor(query, luojianet.float32)
luojia_key = luojianet.Tensor(key, luojianet.float32)
luojia_value = luojianet.Tensor(value, luojianet.float32)
luojia_dropout_input = luojianet.Tensor(dropout_input, luojianet.float32)

luojianet_multihead_attn = MultiheadAttention(embed_dims=512, num_heads=8)
# luojianet_output, luojianet_attn_output_weights = luojianet_multihead_attn(query=luojia_query, key=luojia_key, value=luojia_value)
luojianet_output = luojianet_multihead_attn(query=luojia_query, key=luojia_key, value=luojia_value)

print(luojianet_output.shape)
# print(luojianet_attn_output_weights.shape)

# luojianet_dropout = DropPath(p=0.2)
# print(luojianet_dropout(luojia_dropout_input))

# luojianet_hardsigmoid = HardSigmoid()
# print(luojianet_hardsigmoid(luojia_dropout_input))



# torch_query = torch.from_numpy(query).float()
# torch_key = torch.from_numpy(key).float()
# torch_value = torch.from_numpy(value).float()
# torch_dropout_input =  torch.from_numpy(dropout_input).float()


# torch_multihead_attn = torch.nn.MultiheadAttention(embed_dim=512, num_heads=8)
# torch_output, torch_attn_output_weights = torch_multihead_attn(torch_query, torch_key, torch_value)

# print("***********************")
# print(torch_output.shape)
# print(torch_attn_output_weights.shape)

# torch_dropout = torch.nn.Dropout2d(p=0.2)
# print(torch_dropout(torch_dropout_input))


# import luojianet.ops as ops

# src_pad = np.arange(1 * 2 * 2 * 2).reshape((1, 2, 2, 2))
# x = luojianet.Tensor(src_pad, dtype=luojianet.float32)
# output = ops.pad(x, [1, 0, 0, 1], mode='constant', value=6.0)
# print("luojianet padding output: ")
# print(output.shape)

# torch_x = torch.from_numpy(src_pad).float()
# torch_output = F.pad(torch_x, [1, 0, 0, 1], "constant", 6.0)
# print("pytorch padding output: ")
# print(torch_output.shape)

conv_input_orin = np.ones((20, 16, 50, 100))
conv_input = luojianet.Tensor(conv_input_orin, luojianet.float32)

# m = AdptivePaddingConv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))
# m = AdptivePaddingConv2d(16, 33, (3, 5), stride=(2, 1),  padding=(3,3), dilation=(3, 4))
# m = DynamicConv2d(16, 33, kernel_size=3, stride=(2, 1),  padding=(1,3), dilation=(3, 4))
# m = InvertedResidualConv2dV3(16, 33, stride=2, dilation=3)
m = InvertedResidualConv2dV3(16, 33, mid_channels=32, stride=2)


conv_output = m(conv_input)

print(conv_output.shape)


from misc.l2norm import L2Norm
from misc.scale import Scale
from misc.weightinit import TruncNormal, trunc_normal_
from luojianet.common import initializer

l2norm = L2Norm(channels=16)
scale = Scale(10)
l2norm_value = l2norm(conv_input)
l2norm_value_scale = scale(l2norm_value)
# l2norm_value_scale_trucknorm = TruncNormal()(conv_input_orin)
l2norm_value_scale_trucknorm = luojianet.Tensor(trunc_normal_(l2norm_value_scale))


# l2norm_value_scale_trucknorm = initializer.initializer(TruncNormal())(l2norm_value_scale)

# print(l2norm_value)
# print(l2norm_value_scale)
# print(l2norm_value_scale_trucknorm.shape)


from transformer.embed import AdaptivePadding, PatchEmbed, PatchMerging
from transformer.ffn import FFN
from collections import OrderedDict

# adaptive_pad = PatchEmbed(in_channels=16, kernel_size=(3, 5), stride=(2, 1),  padding='corner', dilation=(3, 4))
# adaptive_pad_out = adaptive_pad(conv_input)
# print(adaptive_pad_out[0].shape)

PatchMerging_input_orin = np.ones((20, 9, 16))
PatchMerging_input = luojianet.Tensor(PatchMerging_input_orin, luojianet.float32)

# adaptive_pad = PatchMerging(in_channels=16, kernel_size=3, stride=2,  padding='corner', dilation=3)
# adaptive_pad_out = adaptive_pad(PatchMerging_input, (3, 3))
# print(adaptive_pad_out[0].shape)
act_cfg = OrderedDict()
act_cfg['type'] = 'relu'
test_ffn = FFN(act_cfg=act_cfg)
ffn_out = test_ffn(PatchMerging_input)
print(ffn_out.shape)


