import torch
from torch import nn
from torch.nn import functional as F
from nlpx.model import attention, ClassSelfAttention, MultiHeadClassSelfAttention, RNNAttention, \
	RNNCNNAttention,  ResRNNCNNAttention, CNNRNNAttention, RotaryAttention
from torch_model_hub.model.layer import TextCNNLayer, RNNLayer


if __name__ == '__main__':
	batch_size = 4
	sent_length = 10
	word_dim = 16
	X = torch.randn(batch_size, sent_length, word_dim)

	# mask = torch.ones(X.shape)
	# mask[:, 8:, :] = 0

	atten = RotaryAttention(word_dim)
	output = atten(X)
	print("RotaryAttention", output[0].shape, output[1].shape)
	
	atten = nn.MultiheadAttention(word_dim, 1)
	output = atten(X, X, X)
	print("nn.MultiheadAttention:", output[0].shape, output[1].shape)
	
	# 二维attention
	X_attn = torch.randn(batch_size, word_dim)
	atten = nn.MultiheadAttention(word_dim, 1)
	output = atten(X_attn, X_attn, X_attn)
	print("二维attention:", output[0].shape, output[1].shape)
	
	encoder_layer = nn.TransformerEncoderLayer(d_model=word_dim, nhead=2, batch_first=True)
	transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=1)
	out = transformer_encoder(X)
	print("nn.TransformerEncoder:", out.shape)
	
	decoder_layer = nn.TransformerDecoderLayer(d_model=word_dim, nhead=2, batch_first=True)
	transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers=1)
	out = transformer_decoder(X, out)
	print("nn.TransformerDecoder:", out.shape)
	
	transformer_model = nn.Transformer(d_model=word_dim, nhead=2, num_encoder_layers=1, num_decoder_layers=1, batch_first=True)
	out = transformer_model(X, X)
	print("nn.Transformer:", out.shape)
	
	# print(attention(X, X, X))

	# atten = ClassSelfAttention(word_dim)
	# print(atten(X).shape)
	#
	atten = MultiHeadClassSelfAttention(word_dim, 2)
	print(atten(X).shape)

	atten = RNNAttention(embed_dim=word_dim, out_features=2, hidden_size=11, residual=True)
	print(atten(X).shape)

	atten = CNNRNNAttention(embed_dim=word_dim, out_features=2, hidden_size=11, residual=False)
	print(atten(X).shape)

	atten = RNNCNNAttention(embed_dim=word_dim, out_features=2, seq_length=11, cnn_channels=12)
	print(atten(X).shape)

	atten = TextCNNLayer(embed_dim=word_dim, seq_length=11, out_channels=12, layer_norm=True)
	print(atten(X).shape)

	atten = RNNLayer(embed_dim=word_dim, bidirectional=False, layer_norm=True)
	print(atten(X).shape)

	# atten = ResRNNCNNAttention(embed_dim=word_dim, seq_length=11, cnn_channels=12)
	# print(atten(X).shape)
	
	# seq_length = 4
	# out_channels = 8
	# kernel_sizes = (2, 3, 4)
	# convs = convs = nn.ModuleList([
	# 	nn.Sequential(
	# 		nn.Conv1d(in_channels=word_dim, out_channels=word_dim, kernel_size=kernel_size, bias=False),
	# 		# nn.ReLU(inplace=True),  # inplace为True，将会改变输入的数据 ，否则不会改变原输入，只会产生新的输出
	# 		# nn.AdaptiveMaxPool1d(seq_length)
	# 	) for kernel_size in kernel_sizes
	# ])
	# inputs = X.transpose(2, 1)
	# output = torch.cat([conv(inputs) for conv in convs], dim=-1)
	# output = F.relu(output)
	# output = F.adaptive_max_pool1d(output, seq_length)
	# output = output.transpose(2, 1)
	# print(output.shape)
	
