import numpy as np
import torch
from torch import tensor, nn, cuda
from torch.nn import functional
torch.set_default_tensor_type(torch.cuda.FloatTensor)

x2=[tensor([[ 1.1108,  2.1974,  3.4145, -1.9743, -1.4355, -0.7595],
        [ 3.4210,  4.9091,  2.0919, -2.5529, -1.9208, -2.2419],
        [ 3.1305,  5.3193,  3.9172, -2.4626, -2.1018, -2.4932],
        [ 3.0258,  2.3903, -0.2149, -1.8888, -1.3622, -0.9463],
        [ 4.7503,  3.3249,  3.2370, -2.4487, -2.1267, -1.1225],
        [ 2.1766,  1.8763,  2.2034, -1.3738, -1.8671, -1.3329]],
       device='cuda:0')
    ,tensor([[ 1.1108,  2.1974,  3.4145, -1.9743, -1.4355, -0.7595],
        [ 3.4210,  4.9091,  2.0919, -2.5529, -1.9208, -2.2419],
        [ 3.1305,  5.3193,  3.9172, -2.4626, -2.1018, -2.4932],
        [ 3.0258,  2.3903, -0.2149, -1.8888, -1.3622, -0.9463],
        [ 4.7503,  3.3249,  3.2370, -2.4487, -2.1267, -1.1225],
        [ 2.1766,  1.8763,  2.2034, -1.3738, -1.8671, -1.3329]],
       device='cuda:0')]

# x2=torch.cat([x.float() for x in x2], dim=0).float().unsqueeze(dim=0)#half()#将list转换为tensor
x2=x2[0]#取第一个元素
print("x2.sum(dim=1)",x2.sum(dim=1))
# print(x2,x2.device, x2.dtype,x2.size(), sep='\n')#查看本身，装置cuda/cpu,数据类型

line1=nn.Linear(6,6)
Q=line1(x2)#Q=XW
print(Q,Q.device, Q.dtype,Q.size(), sep='\n')#查看本身，装置cuda/cpu,数据类型

t=torch.rand(3,6,6)
print(t,t.device, t.dtype,t.size(), sep='\n')#查看本身，装置cuda/cpu,数据类型

class Attention(nn.Module):
    def __init__(self, enc_hid_dim, dec_hid_dim):
        super().__init__()
        self.attn = nn.Linear((enc_hid_dim * 2) + dec_hid_dim, dec_hid_dim, bias=False)
        self.v = nn.Linear(dec_hid_dim, 1, bias=False)

    def forward(self, s, enc_output):
        # s = [batch_size, dec_hid_dim]
        # enc_output = [src_len, batch_size, enc_hid_dim * 2]

        batch_size = enc_output.shape[1]
        src_len = enc_output.shape[0]

        # repeat decoder hidden state src_len times
        # s = [batch_size, src_len, dec_hid_dim]
        # enc_output = [batch_size, src_len, enc_hid_dim * 2]
        s = s.unsqueeze(1).repeat(1, src_len, 1)
        enc_output = enc_output.transpose(0, 1)

        # energy = [batch_size, src_len, dec_hid_dim]
        energy = torch.tanh(self.attn(torch.cat((s, enc_output), dim=2)))

        # attention = [batch_size, src_len]
        attention = self.v(energy).squeeze(2)

        return functional.softmax(attention, dim=1)

# # model=Attention(266,7128)
# # q_score=torch.rand(1,266,7128).cuda()
model=Attention(3,6)
# score=model(Q,x2_line2)

score=model(Q,t)
print(score)

