import copy

import torch
import torch.nn as nn
from torch.autograd import Variable
import transformer as tf

# 实例化若干参数
head = 8
dropout = 0.2
N = 8

# 词嵌入维度是512维
size = d_model = 512

d_ff = 1024

# 词表大小是1000
vocab = 1000

c = copy.deepcopy

# # 输入X是一个使用Variable封装的长整型张量，形状是2 X 4
x = Variable(torch.LongTensor(
    [
        [100, 2, 421, 508],
        [491, 998, 1, 221]
    ]
))

emb = tf.Embeddings(d_model, vocab)
emb_result = emb(x)
print(emb_result)
print(emb_result.shape)

pe = tf.PositionalEncoding(d_model, 0.2)
pe_result = pe(emb_result)
print(pe_result)
print(pe_result.shape)

# 若干输入参数的初始化
# x = pe_result
mask = Variable(torch.zeros(8, 4, 4))
# 多头注意力层
mha = tf.MultiHeadedAttention(head, d_model, dropout)

# 前鐀全连接层
ff = tf.PositionwiseFeedForward(d_model, d_ff, dropout)

# 编码器层
el = tf.EncoderLayer(size, c(mha), c(ff), dropout)
# 编码器
en = tf.Encoder(el, N)
# en_result = en(x, mask)
# print(en_result)
# print(en_result.shape)

source_mask = target_mask = mask
# 解码器层
dl = tf.DecoderLayer(size, c(mha), c(mha), c(ff), dropout)
# 解码器
de = tf.Decoder(dl, N)
# de_result = de(x, en_result, source_mask, target_mask)
# print(de_result)
# print(de_result.shape)

gen = tf.Generator(d_model, vocab_size=vocab)
# gen_result = gen(de_result)
# print(gen_result)
# print(gen_result.shape)

# source_embed = nn.Embedding(vocab, d_model)
# target_embed = nn.Embedding(vocab, d_model)
source_embed = c(pe)
target_embed = c(pe)

source = target = Variable(torch.LongTensor(
    [
        [100, 2, 421, 508],
        [491, 998, 1, 221]
    ]
))

source_mask = target_mask = Variable(torch.zeros(8, 4, 4))

ed = tf.EncoderDecoder(en, de, source_embed, target_embed, gen)
print(ed)
ed_result = ed(emb(source), emb(target), source_mask, target_mask)
print(ed_result)
print(ed_result.shape)

