import torch
import copy

from utils import make_src_mask, make_tgt_mask
from make_model import make_model

if __name__ == '__main__':

    source_vocab_size = 10
    target_vocab_size = 10
    num_layer = 2
    d_model = 512
    d_ff = 2048
    num_head = 8
    dropout = 0.1
    batch_size = 2

    model = make_model(
        source_vocab=source_vocab_size,
        target_vocab=target_vocab_size,
        N=num_layer,
        feature_dim=d_model,
        d_ff=d_ff,
        num_head=num_head,
        dropout=dropout,
    )

    PAD = 0  # <pad> 占位标志，在一个batch中如果当前句子不足固定长度用PAD占位，仅为了合成batch提高效率
    BOS = 1  # <bos> 开始标志
    EOS = 2  # <eos> 结束标志

    min_token = 3
    src_tokens = torch.randint(min_token, source_vocab_size, (batch_size, 4))
    src_mask = make_src_mask(src_tokens, PAD)

    origin_tgt_tokens = copy.deepcopy(src_tokens)

    origin_tgt_tokens[:, 0] = BOS
    origin_tgt_tokens[:, -1] = EOS

    tgt_tokens = origin_tgt_tokens[:, :-1]
    tgt_tokens_y = origin_tgt_tokens[:, 1:]
    tgt_mask = make_tgt_mask(tgt_tokens, PAD)

    print('src_tokens size:     ', src_tokens.size())
    print('src_mask size:       ', src_mask.size())
    print('src_mask:           \n', src_mask)

    print('tgt_tokens size:     ', tgt_tokens.size())
    print('tgt_mask size:       ', tgt_mask.size())
    print('tgt_mask:          \n', tgt_mask)

    result = model(
        source=src_tokens,
        target=tgt_tokens,
        source_mask=src_mask,
        target_mask=tgt_mask,
    )
    print('result size:         ', result.size())
