import sys
sys.path.append('/tmp/JJK/torch_areas/transformer_learning/transformers_network')
sys.path.append('/tmp/JJK/torch_areas/transformer_learning')

from text2text.datasetProcess import Iterator, datasets, src_vocab, trg_vocab
from transformers_network.decoder import Decoder
from transformers_network.encoder import Encoder
from transformers_network.transformer import Transformer


# super parameters
batch_size = 128
max_len = 41    # the maximum length of the longest sentence in the dataset
drop_reminder = True

d_model = 512
d_ff = 2048
n_layers = 6
n_heads = 8


train_iterator = Iterator(datasets, src_vocab, trg_vocab, batch_size=batch_size, max_len=max_len, drop_reminder=True)


""" 实例化模型。
"""
src_vocab_size = len(src_vocab)
trg_vocab_size = len(trg_vocab)
src_pad_idx = src_vocab.pad_idx
trg_pad_idx = trg_vocab.pad_idx


encoder = Encoder(src_vocab_size, d_model, n_heads, d_ff, n_layers, dropout_p=0.1)
decoder = Decoder(trg_vocab_size, d_model, n_heads, d_ff, n_layers, dropout_p=0.1)
model = Transformer(encoder, decoder)


if __name__ == '__main__':
    for src_idx, src_len, trg_idx in train_iterator():
        print(
            f'src_idx.shape:{src_idx.shape}\n{src_idx}\nsrc_len.shape:{src_len.shape}\n{src_len}\ntrg_idx.shape:{trg_idx.shape}\n{trg_idx}')
        break

    print(model.trainable_params())