from backbones.transformer import ChatNet
import torch
from dataset.dataloader import generate_vocab
from dataset.vocab import tokenizer
import time

# ===================== 预测 =====================
if __name__ == '__main__':
    vocab_zh, vocab_en = generate_vocab()
    vocab_zh_size = len(vocab_zh)
    vocab_en_size = len(vocab_en)
    embedding_dims = 512

    device = torch.device("cuda")
    # model = ChatNet(vocab_zh_size, vocab_en_size, 512, num_decoder_layers=4, num_encoder_layers=4, device=device)
    model = torch.load("./save/best.pt", weights_only=False)
    model.eval()

    while True:
        sentence = input("用户:")
        if sentence == "q":
            break
        outputs = torch.tensor([[vocab_en.to_idx("<bos>")]]).to(device)  # (1,1)
        print("AI:")
        while True:
            # sentence = "你的身份信息能告诉我吗？"
            tokens = tokenizer([sentence], mode="char")
            encode_idx = torch.tensor([vocab_zh.to_idx(line) for line in tokens])
            results = model(encode_idx.to(device), outputs, None, None)
            idx = torch.argmax(results[:, -1:], dim=-1)
            word = vocab_en[idx.squeeze(0).item()]
            time.sleep(0.1)
            print(word, end=" ")
            if word == "<eos>":
                break
            outputs = torch.cat([outputs, idx], dim=-1)
        print("")
