import torch
import torch.nn as nn

max_len = 5000
base = 10000
num_features = 256

x = torch.randn(5 * 32, 250, 256)
position = torch.arange(max_len)  # (max_len,)
index = torch.arange(0, num_features, 2) / num_features  # (num_features // 2,)
indices = position.unsqueeze(dim=1) / (
    base ** index.unsqueeze(dim=0)
)  # (max_len, num_features // 2)
sin, cos = torch.sin(indices), torch.cos(indices)
positional_encoding = torch.stack([sin, cos], dim=-1)  # (max_len, num_features // 2, 2)
pos = positional_encoding.view(max_len, num_features)

out = x + pos[: x.size(1), :]

print(out.shape)

import math

input_size = 256
pe = torch.zeros(max_len, input_size, requires_grad=False)
positions = torch.arange(0, max_len).unsqueeze(1).float()
denominator = torch.exp(
    torch.arange(0, input_size, 2).float() * -(math.log(10000.0) / input_size)
)

pe[:, 0::2] = torch.sin(positions * denominator)
pe[:, 1::2] = torch.cos(positions * denominator)
pe = pe.unsqueeze(0)

x = torch.randn(5 * 32, 250, 256)
pes = pe[:, : x.size(1)].clone().detach()
out2 = pes + x
print(out2.shape)
