import pickle

from constant.constant import idx2word, tgt_vocab, device
from datasets.data_loader import preprocess_data, MyDataSet
import torch.utils.data as Data

from models.model import train, greedy_decoder


def test_model():
    file_path = 'checkpoints/my_first_model.pkl'
    with open(file_path, 'rb') as f:
        model = pickle.load(f)
    sentences = [
        # enc_input           dec_input         dec_output
        ['ich mochte ein bier P', 'S i want a beer .', 'i want a beer . E'],
        ['ich mochte ein cola P', 'S i want a coke .', 'i want a coke . E']
    ]

    # 数据预处理
    enc_inputs, dec_inputs, dec_outputs = preprocess_data(sentences)
    loader = Data.DataLoader(MyDataSet(enc_inputs, dec_inputs, dec_outputs), 2, True)
    enc_inputs, _, _ = next(iter(loader))
    enc_inputs = enc_inputs.to(device)
    for i in range(len(enc_inputs)):
        greedy_dec_input = greedy_decoder(model, enc_inputs[i].view(1, -1), start_symbol=tgt_vocab["S"])
        predict, _, _, _ = model(enc_inputs[i].view(1, -1), greedy_dec_input)
        predict = predict.data.max(1, keepdim=True)[1]
        print(enc_inputs[i], '->', [idx2word[n.item()] for n in predict.squeeze()])


def train_model():
    # S: 表示解码输入开始的符号
    # E: 显示解码输出开始的符号
    # P: 如果当前批次数据大小短于时间步长，则将填充空白序列的符号
    sentences = [
        # enc_input           dec_input         dec_output
        ['ich mochte ein bier P', 'S i want a beer .', 'i want a beer . E'],
        ['ich mochte ein cola P', 'S i want a coke .', 'i want a coke . E']
    ]

    # 数据预处理
    enc_inputs, dec_inputs, dec_outputs = preprocess_data(sentences)
    loader = Data.DataLoader(MyDataSet(enc_inputs, dec_inputs, dec_outputs), 2, True)
    train(loader)


def main():
    # train_model()
    test_model()


if __name__ == '__main__':
    main()
