from backbones.gru import GRU
import torch
from dataset.dataloader import generate_loader_vocab
import torch.nn.functional as F
import time

if __name__ == '__main__':
    _, vocab = generate_loader_vocab(batch_size=200)
    vocab_size = len(vocab)
    hidden_size = 256
    device = torch.device("cuda")
    model = GRU(vocab_size, hidden_size, num_layers=2, device=device)
    model.load_state_dict(torch.load("./gru_best.pt", weights_only=True))

    test_idx = vocab.to_idx("the Time Machine".lower().split())
    test_idx = torch.tensor(test_idx, device=device)

    model.eval()
    print("续写内容：")
    # 续写内容，设置续写长度256
    for i in range(256):
        h0 = torch.zeros((2, 1, hidden_size), device=device)
        y_hat, h = model(F.one_hot(test_idx.unsqueeze(0), vocab_size).float(), h0)
        indices = torch.argmax(y_hat, dim=-1).squeeze(0)
        time.sleep(0.5)
        print(vocab[indices[-1].item()],end=" ")
        test_idx = torch.cat([test_idx, indices[-1].unsqueeze(0)])
