from model import make_model
from utils import get_key_padding_mask
from utils import get_subsequent_mask
from utils import get_key_padding_mask_with_subsequent_mask
import torch
import parameters as p
# a basic copy test for transformer
# e.g input [1,5,8,9,3] output [1,5,8,9,3]

# 对encoder 只适用 padding_mask
# decoder 应用两种 mask
x = torch.tensor([[p.SOS_INDEX, 57, 60, 43, 44, 156, 111, 347,
                 172, p.EOS_INDEX, p.PAD_INDEX, p.PAD_INDEX]])

for _ in range(0):
    x = torch.concat([x, torch.tensor([[p.SOS_INDEX, 57, 60, 43, 44,
                     156, 111, 347, 172, p.EOS_INDEX, p.PAD_INDEX, p.PAD_INDEX]])])

tgt = x[:,:-1]
tgt_y = x[:,1:]
src_mask = get_key_padding_mask(x)
tgt_mask = get_key_padding_mask_with_subsequent_mask(tgt)
# print(get_key_padding_mask_with_subsequent_mask(tgt)[0])

# sample = tgt_y[0]
# ntokens = (sample != p.PAD_INDEX).sum()

model = make_model(src_vocab=400,tgt_vocab=400,encoder_layers=3,decoder_layers=3,d_model=4,d_ff=64,h=2,dropout=0.2)

y = model(src=x, target=tgt, src_mask=src_mask, target_mask=tgt_mask)

print(torch.max(torch.exp(y),-2)[1])

# print(x)
