import torch
import math
from torch import nn
import dltools

print(dltools.try_gpu())

num_hiddens, num_heads, dropout = 100, 5, 0.2
attention = dltools.MultiHeadAttention(num_hiddens, num_hiddens, num_hiddens, num_hiddens, num_heads, dropout)
attention.eval()
batch_size, num_queries, valid_lens = 2, 4, torch.tensor([3, 2])
X = torch.ones((batch_size, num_queries, num_hiddens))
res = attention(X, X, X, valid_lens)
print(res.shape)

print(torch.arange(1000, dtype=torch.float32).reshape(-1, 1))
print(torch.arange(0, num_hiddens, 2, dtype=torch.float32))

# 位置编码
class PositionalEncoding(nn.Module):
    def __init__(self, num_hiddens, dropout=0.0, max_len=1000, **kwargs):
        super().__init__(**kwargs)
        self.dropout = nn.Dropout(dropout)
        # 创建一个存放位置编码的tensor
        self.P = torch.zeros((1, max_len, num_hiddens))
        X = torch.arange(max_len, dtype=torch.float32).reshape(-1, 1) / torch.pow(1000, torch.arange(0, num_hiddens, 2, dtype=torch.float32)/num_hiddens)
        # X 形状(max_len, num_hiddens/2)
        # 偶数列和奇数列分别进行赋值，X第三维度的长度为num_hiddens一半，但是同一个位置由sin(X)和cos(X)对应两个值，正好可以覆盖num_hiddens
        self.P[:, :, 0::2] = torch.sin(X)
        self.P[:, :, 1::2] = torch.cos(X)

    def forward(self, X):
        X = X + self.P[:, :X.shape[1], :].to(X.device)
        return self.dropout(X)

encoding_dim, num_steps = 32, 60
pos_encoding = PositionalEncoding(encoding_dim, 0)
pos_encoding.eval()
X = pos_encoding(torch.zeros(1, num_steps, encoding_dim))
P = pos_encoding.P[:, :X.shape[1], :]
print(f"X.shape:{X.shape}")
print(f"P.shape:{P.shape}")

dltools.plot(torch.arange(num_steps), P[0, :, 6:10].T, xlabel="Row (position)", figsize=(6, 2.5), legend=["Col %d" %i for i in range(6, 10)])
#dltools.plt.show()

test = [[[7, 8, 9],
         [10, 11, 12]],
        [[1, 2, 3],
         [4, 5, 6]]
        ]
test = torch.tensor(test, dtype=torch.float32)
print(f'test.shape:{test.shape}')
mean = test.mean(-1, keepdim=True)
print(f'mean:{mean}')
