import torch

from backbones.seq2seq import Seq2SeqEncoder, Seq2SeqDecoder
from backbones.encoder_decoder import EncoderDecoder
from dataset.vocab import Vocab
import re
from torch import nn

if __name__ == '__main__':
    sentence = "I like play game.\t我喜欢打游戏。"
    s1, s2 = sentence.split("\t")
    input_tokens = [re.sub(r"[^a-zA-Z]+", " ", s1).lower().strip().split()]
    output_tokens = [['<bos>'] + list(re.sub(r"[^\u4e00-\u9fff]+", " ", s2).strip()) + ['<eos>']]

    # 在seq2seq中绝大多数情况是 翻译问题，因此输入与输出的序列需要采用不用词元分类方式
    # 在现在的这个例子中，由于同是英文，所以，可以采用同一个词元分类表
    vocab_encode = Vocab(input_tokens, 0)
    vocab_decode = Vocab(output_tokens, 0)

    encode_idx = [vocab_encode.to_idx(line) for line in input_tokens]
    decode_idx = [vocab_decode.to_idx(line) for line in output_tokens]
    encode_idx = torch.tensor(encode_idx)
    decode_idx = torch.tensor(decode_idx)

    decode_input = decode_idx[:, :-1]
    decode_output = decode_idx[:, 1:]

    device = torch.device("cpu")
    encoder = Seq2SeqEncoder(len(vocab_encode), 3, 20, 2, device=device)
    decoder = Seq2SeqDecoder(len(vocab_decode), 10, 20, 2, device=device)
    model = EncoderDecoder(encoder, decoder)

    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

    epochs = 1000
    for epoch in range(epochs):
        optimizer.zero_grad()
        predict = model(encode_idx, decode_input)
        loss = criterion(predict, decode_output.view(-1).long())
        loss.backward()
        optimizer.step()

        if (epoch + 1) % 100 == 0:
            print(f"epoch:{epoch + 1}/{epochs} -- loss:{loss.item():.4f}")

    # 预测
    test_encode_sentence = "I like play game"
    test_encode_tokens = [re.sub(r"[^a-zA-Z<>]+", " ", test_encode_sentence).lower().strip().split()]
    test_encode_idx = torch.tensor([vocab_encode.to_idx(line) for line in test_encode_tokens])
    test_decode_idx = torch.tensor([[vocab_decode.to_idx('<bos>')]])
    model.eval()
    while True:
        y = model(test_encode_idx, test_decode_idx)
        predict_idx = torch.argmax(y, dim=-1)[-1, None]
        test_decode_idx = torch.cat([test_decode_idx, predict_idx.unsqueeze(0)], dim=-1)
        if predict_idx == vocab_decode.to_idx('<eos>'):  # 遇到结束的词
            break
    print([vocab_decode[idx.item()] for idx in test_decode_idx[0]])
